diff -Naurp linux-2.4.20-wolk4.8-fullkernel/Documentation/Configure.help linux-2.4.20-wolk4.9-fullkernel/Documentation/Configure.help --- linux-2.4.20-wolk4.8-fullkernel/Documentation/Configure.help 2003-08-25 18:27:01.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/Documentation/Configure.help 2003-08-26 17:59:06.000000000 +0200 @@ -6438,6 +6438,8 @@ CONFIG_SCHED_SERVER max_sleep_avg = 2 * HZ; starvation_limit = 2 * HZ; + vm.bdflush = 50 500 0 0 500 3000 80 50 0 (if HZ == 100) + If unsure or you don't select this, the Server Scheduler will be used. If you select none of the Scheduler Tweaks, the Server Scheduler will be used. @@ -6462,8 +6464,6 @@ CONFIG_SCHED_DESKTOP max_sleep_avg = 2 * HZ; starvation_limit = 2 * HZ; - vm.bdflush = 50 500 0 0 500 3000 80 50 0 (if HZ == 100) - vm.bdflush = 30 500 0 0 500 3000 80 30 0 (if HZ == 100) If unsure or you don't select this, the Server Scheduler will @@ -11026,6 +11026,54 @@ CONFIG_SCSI_MEGARAID2 say M here and read . The module will be called megaraid2.o. +iSCSI support (SCSI-over-Network) (driver 3.1.x series) +CONFIG_SCSI_ISCSI + The Linux iSCSI driver acts as an iSCSI protocol initiator to transport + SCSI requests and responses over an IP network between the client and + an iSCSI-enabled target device such as a Cisco SN 5428-2 storage router. + The iSCSI protocol is an IETF-defined protocol for IP storage. For more + information about the iSCSI protocol, refer to the IETF standards for IP + storage at http://www.ietf.org. + + Architecturally, the iSCSI driver combines with the client TCP/IP stack, + network drivers, and NICs to provide the same functions as a SCSI adapter + driver with an HBA. + + To attach to storage, you must also have an iSCSI-capable device connected + to your network. The iSCSI device may be on the same LAN as your Linux host, + or the iSCSI traffic may be routed using normal IP routing methods. + + The daemon and the kernel driver are available under the terms of the GNU + General Public License. + + For further informations, please read: http://linux-iscsi.sourceforge.net/ + + If unsure, say N. + +iSCSI support (SCSI-over-Network) (driver 3.4.x series) +CONFIG_SCSI_ISCSI_NEW + The Linux iSCSI driver acts as an iSCSI protocol initiator to transport + SCSI requests and responses over an IP network between the client and + an iSCSI-enabled target device such as a Cisco SN 5428-2 storage router. + The iSCSI protocol is an IETF-defined protocol for IP storage. For more + information about the iSCSI protocol, refer to the IETF standards for IP + storage at http://www.ietf.org. + + Architecturally, the iSCSI driver combines with the client TCP/IP stack, + network drivers, and NICs to provide the same functions as a SCSI adapter + driver with an HBA. + + To attach to storage, you must also have an iSCSI-capable device connected + to your network. The iSCSI device may be on the same LAN as your Linux host, + or the iSCSI traffic may be routed using normal IP routing methods. + + The daemon and the kernel driver are available under the terms of the GNU + General Public License. + + For further informations, please read: http://linux-iscsi.sourceforge.net/ + + If unsure, say N. + Intel/ICP (former GDT SCSI Disk Array) RAID Controller support CONFIG_SCSI_GDTH Formerly called GDT SCSI Disk Array Controller Support. @@ -12865,6 +12913,19 @@ CONFIG_NET_CLS_U32 If unsure, say N. +Layer7 classifier +CONFIG_NET_CLS_LAYER7 + Say Y if you want to be able to classify connetions (and their + packets) based on application layer data. This is necessary if + you wish to classify applications such as peer-to-peer filesharing + systems that do not always use the same port. Say N if unsure. + + This code is also available as a module called cls_layer7 ( = code + which can be inserted in and removed from the running kernel + whenever you want). If you want to compile it as a module, say M + here and read . + + Special RSVP classifier CONFIG_NET_CLS_RSVP The Resource Reservation Protocol (RSVP) permits end systems to @@ -21988,6 +22049,26 @@ CONFIG_IBMASM servers and are also available as PCI adapter cards. This driver supports both configurations. +Dazuko support +CONFIG_DAZUKO + This driver allows file access control for 3rd-party applications. + An application communicates with Dazuko through a device. To find + the major device number to create the node you can do: + + > grep dazuko /proc/devices + 254 dazuko + > mknod -m 666 /dev/dazuko c 254 0 + + For security reasons, Dazuko will only interact with applications + running as root. For more information about dazuko please go to + the project homepage: http://www.dazuko.org/. + + This code can only be compiled as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called dazuko.o. + + If unsure, say N. + MTRR (Memory Type Range Register) support CONFIG_MTRR On Intel P6 family processors (Pentium Pro, Pentium II and later) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/Documentation/kernel-parameters.txt linux-2.4.20-wolk4.9-fullkernel/Documentation/kernel-parameters.txt --- linux-2.4.20-wolk4.8-fullkernel/Documentation/kernel-parameters.txt 2003-08-25 18:26:31.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/Documentation/kernel-parameters.txt 2003-08-25 20:35:58.000000000 +0200 @@ -640,6 +640,11 @@ running once the system is up. the kernel using a special protocol. See linux/Documentation/i386/boot.txt for information. + vm_reserve=nn[KM] + [KNL,BOOT,IA-32] force use of a specific amount of + virtual memory for vmalloc and ioremap allocations + minimum 32 MB maximum 800 MB, default 128MB. + vmhalt= [KNL,S390] vmpoff= [KNL,S390] diff -Naurp linux-2.4.20-wolk4.8-fullkernel/Documentation/networking/sk98lin.txt linux-2.4.20-wolk4.9-fullkernel/Documentation/networking/sk98lin.txt --- linux-2.4.20-wolk4.8-fullkernel/Documentation/networking/sk98lin.txt 2003-08-25 18:26:43.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/Documentation/networking/sk98lin.txt 2003-08-29 10:55:43.000000000 +0200 @@ -2,9 +2,9 @@ All rights reserved =========================================================================== -sk98lin.txt created 18-Jun-2003 +sk98lin.txt created 26-Aug-2003 -Readme File for sk98lin v6.12 +Readme File for sk98lin v6.17 Marvell Yukon/SysKonnect SK-98xx Gigabit Ethernet Adapter family driver for LINUX This file contains @@ -19,7 +19,6 @@ This file contains 5 Large Frame Support 6 VLAN and Link Aggregation Support (IEEE 802.1, 802.1q, 802.3ad) 7 Troubleshooting - 8 History =========================================================================== @@ -76,8 +75,8 @@ follows: To integrate the driver permanently into the kernel, proceed as follows: 1. Select the menu "Network device support" and then "Ethernet(1000Mbit)" -2. Mark "Marvell Yukon/SysKonnect SK-98xx/SK-95xx Gigabit Ethernet Adapter - support" with (*) +2. Mark "Marvell Yukon Chipset / SysKonnect SK-98xx family support" + with (*) 3. Build a new kernel when the configuration of the above options is finished. 4. Install the new kernel. @@ -88,8 +87,8 @@ To use the driver as a module, proceed a 1. Enable 'loadable module support' in the kernel. 2. For automatic driver start, enable the 'Kernel module loader'. 3. Select the menu "Network device support" and then "Ethernet(1000Mbit)" -4. Mark "Marvell Yukon/SysKonnect SK-98xx/SK-95xx Gigabit Ethernet Adapter - support" with (M) +4. Mark "Marvell Yukon Chipset / SysKonnect SK-98xx family support" + with (M) 5. Execute the command "make modules". 6. Execute the command "make modules_install". The appropiate modules will be installed. @@ -201,7 +200,7 @@ You also want to set DuplexCapabilities to FULL, and on the second adapter to HALF. Then, you must enter: - modprobe sk98lin AutoNeg=On,Off DupCap=Full,Half + modprobe sk98lin AutoNeg_A=On,Off DupCap_A=Full,Half NOTE: The number of adapters that can be configured this way is limited in the driver (file skge.c, constant SK_MAX_CARD_PARAM). @@ -259,9 +258,9 @@ This parameter can be used to set the fl port reports during auto-negotiation. It can be set for each port individually. Possible modes: - -- Sym = Symetric: both link partners are allowed to send + -- Sym = Symmetric: both link partners are allowed to send PAUSE frames - -- SymOrRem = SymetricOrRemote: both or only remote partner + -- SymOrRem = SymmetricOrRemote: both or only remote partner are allowed to send PAUSE frames -- LocSend = LocalSend: only local link partner is allowed to send PAUSE frames @@ -286,6 +285,35 @@ with this parameter. 4.2 Adapter Parameters ----------------------- +Connection Type (SK-98xx V2.0 copper adapters only) +--------------- +Parameter: ConType +Values: Auto, 100FD, 100HD, 10FD, 10HD +Default: Auto + +The parameter 'ConType' is a combination of all five per-port parameters +within one single parameter. This simplifies the configuration of both ports +of an adapter card! The different values of this variable reflect the most +meaningful combinations of port parameters. + +The following table shows the values of 'ConType' and the corresponding +combinations of the per-port parameters: + + ConType | DupCap AutoNeg FlowCtrl Role Speed + ----------+------------------------------------------------------ + Auto | Both On SymOrRem Auto Auto + 100FD | Full Off None Auto (ignored) 100 + 100HD | Half Off None Auto (ignored) 100 + 10FD | Full Off None Auto (ignored) 10 + 10HD | Half Off None Auto (ignored) 10 + +Stating any other port parameter together with this 'ConType' variable +will result in a merged configuration of those settings. This due to +the fact, that the per-port parameters (e.g. Speed_? ) have a higher +priority than the combined variable 'ConType'. + +NOTE: This parameter is always used on both ports of the adapter card. + Interrupt Moderation -------------------- Parameter: Moderation @@ -517,7 +545,7 @@ Problem: Upon driver start, the followi Nr: 0xcc Msg: SkGeInitPort() cannot init running ports" Reason: You are using a driver compiled for single processor machines - on a multiprocessor machine with SMP (Symetric MultiProcessor) + on a multiprocessor machine with SMP (Symmetric MultiProcessor) kernel. Solution: Configure your kernel appropriately and recompile the kernel or the modules. @@ -535,185 +563,6 @@ information is available: - Driver version *** -8 History -========== - -VERSION 6.11 (In-Kernel version) -New Features: -- Support for Kernel 2.5/2.6 -- Support for new IO-control MIB data structure -- New SkOsGetTime function -Problems fixed: -- Fix: Race condition with broken LM80 chip -- Fix: Common modules update (#10803, #10768, #10767) -- Fix: Dim, ProcFS, Isr, Module Support changes for Kernel 2.5/2.6 -Known limitations: -- None - -VERSION 6.10 -New Features: -- none -Problems fixed: -- Fix: Race condition with padded frames -Known limitations: -- None - -VERSION 6.09 -New Features: -- none -Problems fixed: -- Fix: Disabled HW Error IRQ on 32-bit Yukon if sensor IRQ occurs -- Fix: Delay race condition with some server machines -Known limitations: -- None - -VERSION 6.08 -New Features: -- Add: Dynamic Interrupt moderation -- Add: Blink mode verification -- Fix: CSUM changes -Problems fixed: -- Fix: CSUM changes -Known limitations: -- None - -VERSION 6.04 - 6.07 -New Features: -- Common modules update -Problems fixed: -- none -Known limitations: -- None - -VERSION 6.03 -New Features: -- Common modules update -Problems fixed: -- Remove useless init_module/cleanup_module forward declarations -Known limitations: -- None - -VERSION 6.02 (In-Kernel version) -New Features: -- Common modules update -Problems fixed: -- Boot message cleanup -Known limitations: -- None - -VERSION 6.00 (In-Kernel version) -New Features: -- Support for SK-98xx V2.0 adapters -- Support for gmac -- Support for kernel 2.4.x and kernel 2.2.x -- Zerocopy support for kernel 2.4.x with sendfile() -- Support for scatter-gather functionality with sendfile() -- Speed support for SK-98xx V2.0 adapters -- New ProcFs entries -- New module parameters -Problems fixed: -- ProcFS initialization -- csum packet error -- Ierror/crc counter error (#10767) -- rx_too_long counter error (#10751) -Known limitations: -- None - -VERSION 4.11 -New Features: -- none -Problems fixed: -- Error statistic counter fix (#10620) -- RLMT-Fixes (#10659, #10639, #10650) -- LM80 sensor initialization fix (#10623) -- SK-CSUM memory fixes (#10610). -Known limitations: -- None - -VERSION 4.10 -New Features: -- New ProcFs entries -Problems fixed: -- Corrected some printk's -Known limitations: -- None - -VERSION 4.09 -New Features: -- IFF_RUNNING support (link status) -- New ProcFs entries -Problems fixed: -- too long counters -- too short counters -- Kernel error compilation -Known limitations: -- None - -VERSION 4.06 (In-Kernel version) -Problems fixed: -- MTU init problems - -VERSION 4.04 -Problems fixed: -- removed VLAN error messages - -VERSION 4.02 (In-Kernel version) -New Features: -- Add Kernel 2.4 changes -Known limitations: -- None - -VERSION 4.01 (In-Kernel version) -Problems fixed: -- Full statistics support for DualNet mode -Known limitations: -- None - -VERSION 4.00 (In-Kernel version) -Problems fixed: -- Memory leak found -New Features: -- Proc filesystem integration -- DualNet functionality integrated -- Rlmt networks added -Known limitations: -- statistics partially incorrect in DualNet mode - -VERSION 3.04 (In-Kernel version) -Problems fixed: -- Driver start failed on UltraSPARC -- Rx checksum calculation for big endian machines did not work -- Jumbo frames were counted as input-errors in netstat - -VERSION 3.03 (Standalone version) -Problems fixed: -- Compilation did not find script "printver.sh" if "." not in PATH -Known limitations: -- None - -VERSION 3.02 (In-Kernel version) -Problems fixed: -- None -New Features: -- Integration in Linux kernel source (2.2.14 and 2.3.29) -Known limitations: -- None - -VERSION 3.01 -Problems fixed: -- None -New Features: -- Full source release -Known limitations: -- None - -VERSION 3.00 -Problems fixed: -- None -New Features: -- Support for 1000Base-T adapters (SK-9821 and SK-9822) -Known limitations: -- None ***End of Readme File*** diff -Naurp linux-2.4.20-wolk4.8-fullkernel/Documentation/sysrq.txt linux-2.4.20-wolk4.9-fullkernel/Documentation/sysrq.txt --- linux-2.4.20-wolk4.8-fullkernel/Documentation/sysrq.txt 2002-08-03 02:39:42.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/Documentation/sysrq.txt 2003-08-25 23:41:21.000000000 +0200 @@ -36,6 +36,10 @@ On PowerPC - Press 'ALT - Print Screen ( On other - If you know of the key combos for other architectures, please let me know so I can add them to this section. +On all - write a character to /proc/sysrq-trigger. eg: + + echo t > /proc/sysrq-trigger + * What are the 'command' keys? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'r' - Turns off keyboard raw mode and sets it to XLATE. diff -Naurp linux-2.4.20-wolk4.8-fullkernel/MAINTAINERS linux-2.4.20-wolk4.9-fullkernel/MAINTAINERS --- linux-2.4.20-wolk4.8-fullkernel/MAINTAINERS 2003-08-25 18:27:01.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/MAINTAINERS 2003-08-25 20:35:28.000000000 +0200 @@ -668,8 +668,8 @@ W: http://www.nyx.net/~arobinso S: Maintained HFS FILESYSTEM -P: Oliver Neukum -M: oliver@neukum.org +P: Roman Zippel +P: zippel@linux-m68k.org L: linux-kernel@vger.kernel.org S: Maintained diff -Naurp linux-2.4.20-wolk4.8-fullkernel/Makefile linux-2.4.20-wolk4.9-fullkernel/Makefile --- linux-2.4.20-wolk4.8-fullkernel/Makefile 2003-08-25 18:27:12.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/Makefile 2003-08-25 20:36:20.000000000 +0200 @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 4 SUBLEVEL = 20 -EXTRAVERSION = -wolk4.8s +EXTRAVERSION = -wolk4.9s KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/Rules.make linux-2.4.20-wolk4.9-fullkernel/Rules.make --- linux-2.4.20-wolk4.8-fullkernel/Rules.make 2003-08-25 18:24:30.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/Rules.make 2003-08-25 20:36:21.000000000 +0200 @@ -312,10 +312,6 @@ ifneq ($(wildcard .depend),) include .depend endif -ifneq ($(wildcard $(TOPDIR)/.hdepend),) -include $(TOPDIR)/.hdepend -endif - # # Find files whose flags have changed and force recompilation. # For safety, this works in the converse direction: diff -Naurp linux-2.4.20-wolk4.8-fullkernel/VERSION linux-2.4.20-wolk4.9-fullkernel/VERSION --- linux-2.4.20-wolk4.8-fullkernel/VERSION 2003-08-25 18:27:12.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/VERSION 2003-08-25 20:36:17.000000000 +0200 @@ -1 +1 @@ -WOLK v4.8s "Server Edition" FINAL, based on 2.4.20 +WOLK v4.9s "Server Edition" FINAL, based on 2.4.20 diff -Naurp linux-2.4.20-wolk4.8-fullkernel/WOLK-CHANGELOG linux-2.4.20-wolk4.9-fullkernel/WOLK-CHANGELOG --- linux-2.4.20-wolk4.8-fullkernel/WOLK-CHANGELOG 2003-08-25 18:27:12.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/WOLK-CHANGELOG 2003-08-28 13:03:26.000000000 +0200 @@ -1,3 +1,38 @@ +Changelog from v4.8s -> v4.9s +----------------------------- +o added: Dazuko v1.2.1 +o added: missing iSCSI Configure help entries and informations +o added: sysctl to control ipfrag_secret_interval +o added: WEB-DAV Linux File System support (davfs2) v0.2.4 +o added: Application Layer 7 Packet Classifier v0.1.4 +o fixed: hfsplus unresolved symbols +o fixed: rsbac v1.2.2 compilation errors :-( +o fixed: Mhwahahhaha! A missing 'generic_unplug_device' in IDE code +o fixed: irq/bh races +o fixed: two missing Intel x86 cache defines (now Pentium-M should be ok) +o fixed: use Jenkins hash for fragment reassembly handling +o fixed: lru queue for ip_fragment evictor +o fixed: hardcoded vmalloc reserve size: now we have a boot parameter + See: Documentation/kernel-parameters.txt : vm_reserve +o fixed: RMAP: zeromap_pmd_range +o fixed: RMAP: treat database shared memory segments with the + same swapout priority as anonymous pages, this helps + database performance under some loads +o fixed: RMAP: do all page->flags updates atomically, to avoid + race conditions +o fixed: some bogus 'file->f_flags' +o fixed: more unshare_files() fixes +o fixed: speedup 'make dep' again a bit +o fixed: loop handling of sector size ioctl +o fixed: asm constraint bug in arch/i386/kernel/pci-pc.c +o updated: IBM ServeRAID v6.10.24 +o updated: Broadcom BCM4400 driver v2.0.5 +o updated: Broadcom BCM5700 driver v6.2.17 +o updated: SysKonnect SK-98xx driver v6.17 +o updated: iSCSI support (SCSI-over-Network) v3.4.0.3 +o updated: XFS v1.3.0 Final + + Changelog from v4.7s -> v4.8s ----------------------------- o fixed: new i2c merge broke matroxfb, tvmixer, bttv and DXR3 @@ -922,7 +957,7 @@ o added: SCSI: Tekram DC395/U/UW an o added: SCSI: Single Driver Qlogic FC/SCSI support o added: SCSI: Qlogic QLA 2XXX v6 FC SCSI support o added: SCSI: Adaptec AIC79xx support -o added: SCSI: iSCSI support (scsi-over-network) +o added: SCSI: iSCSI support (SCSI-over-Network) v3.1.0.3 o added: FTP file system support o added: ALSA v.0.9.0-rc6 (yezz, you wanted it no? ;) o fixed: Low-Latency vs. ext2|ext3|reiserfs|$whatever fs segfault bug diff -Naurp linux-2.4.20-wolk4.8-fullkernel/WOLK-README linux-2.4.20-wolk4.9-fullkernel/WOLK-README --- linux-2.4.20-wolk4.8-fullkernel/WOLK-README 2003-08-25 18:27:12.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/WOLK-README 2003-08-25 17:53:33.000000000 +0200 @@ -1,4 +1,4 @@ -Kernel - patched - WOLK v4.8s - Base: Linux kernel 2.4.20 +Kernel - patched - WOLK v4.9s - Base: Linux kernel 2.4.20 located at http://sf.net/projects/wolk by Marc-Christian Petersen -------------------------------------------------------------------------- diff -Naurp linux-2.4.20-wolk4.8-fullkernel/arch/i386/kernel/bluesmoke.c linux-2.4.20-wolk4.9-fullkernel/arch/i386/kernel/bluesmoke.c --- linux-2.4.20-wolk4.8-fullkernel/arch/i386/kernel/bluesmoke.c 2003-08-25 18:26:39.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/arch/i386/kernel/bluesmoke.c 2003-08-29 12:07:38.000000000 +0200 @@ -190,7 +190,7 @@ static void __init intel_mcheck_init(str wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); } set_in_cr4(X86_CR4_MCE); - printk(KERN_INFO "CPU#%d: Intel machine check reporting enabled on CPU#%d.\n", + printk(KERN_INFO "CPU#%d: Intel machine check reporting enabled.\n", smp_processor_id()); done=1; } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/arch/i386/kernel/irq.c linux-2.4.20-wolk4.9-fullkernel/arch/i386/kernel/irq.c --- linux-2.4.20-wolk4.8-fullkernel/arch/i386/kernel/irq.c 2003-08-25 18:24:31.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/arch/i386/kernel/irq.c 2003-08-25 20:35:57.000000000 +0200 @@ -275,8 +275,7 @@ static inline void wait_on_irq(int cpu) * already executing in one.. */ if (!irqs_running()) - if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock)) - break; + break; /* Duh, we have to loop. Release the lock to avoid deadlocks */ clear_bit(0,&global_irq_lock); @@ -312,6 +311,7 @@ static inline void wait_on_irq(int cpu) */ void synchronize_irq(void) { + smp_mb(); /* Sync with irq_enter() */ if (irqs_running()) { /* Stupid approach */ cli(); @@ -343,6 +343,10 @@ static inline void get_irqlock(int cpu) */ wait_on_irq(cpu); + /* bh is disallowed inside irqlock. */ + if (!local_bh_count(cpu)) + spin_lock(&global_bh_lock); + /* * Ok, finally.. */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/arch/i386/kernel/nmi.c linux-2.4.20-wolk4.9-fullkernel/arch/i386/kernel/nmi.c --- linux-2.4.20-wolk4.8-fullkernel/arch/i386/kernel/nmi.c 2003-08-25 18:24:31.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/arch/i386/kernel/nmi.c 2003-08-29 12:07:39.000000000 +0200 @@ -167,9 +167,15 @@ static void disable_apic_nmi_watchdog(vo case X86_VENDOR_INTEL: switch (boot_cpu_data.x86) { case 6: + if (boot_cpu_data.x86_model > 0xd) + break; + wrmsr(MSR_P6_EVNTSEL0, 0, 0); break; case 15: + if (boot_cpu_data.x86_model > 0x3) + break; + wrmsr(MSR_P4_IQ_CCCR0, 0, 0); wrmsr(MSR_P4_CRU_ESCR0, 0, 0); break; @@ -305,9 +311,19 @@ void __pminit setup_apic_nmi_watchdog (v case X86_VENDOR_INTEL: switch (boot_cpu_data.x86) { case 6: + if (boot_cpu_data.x86_model > 0xd) { + printk (KERN_INFO "Performance Counter support for this CPU model not yet added.\n"); + return; + } + setup_p6_watchdog(); break; case 15: + if (boot_cpu_data.x86_model > 0x3) { + printk (KERN_INFO "Performance Counter support for this CPU model not yet added.\n"); + return; + } + if (!setup_p4_watchdog()) return; break; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/arch/i386/kernel/pci-pc.c linux-2.4.20-wolk4.9-fullkernel/arch/i386/kernel/pci-pc.c --- linux-2.4.20-wolk4.8-fullkernel/arch/i386/kernel/pci-pc.c 2003-08-25 18:26:39.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/arch/i386/kernel/pci-pc.c 2003-08-26 17:59:06.000000000 +0200 @@ -1025,7 +1025,8 @@ struct irq_routing_table * __devinit pci "xor %%ah, %%ah\n" "1:" : "=a" (ret), - "=b" (map) + "=b" (map), + "+m" (opt) : "0" (PCIBIOS_GET_ROUTING_OPTIONS), "1" (0), "D" ((long) &opt), diff -Naurp linux-2.4.20-wolk4.8-fullkernel/arch/i386/kernel/setup.c linux-2.4.20-wolk4.9-fullkernel/arch/i386/kernel/setup.c --- linux-2.4.20-wolk4.8-fullkernel/arch/i386/kernel/setup.c 2003-08-25 18:27:01.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/arch/i386/kernel/setup.c 2003-08-29 12:07:39.000000000 +0200 @@ -147,6 +147,10 @@ unsigned int mca_pentium_flag; /* For PCI or other memory-mapped resources */ unsigned long pci_mem_start = 0x10000000; +/* reserved mapping space for vmalloc and ioremap */ +unsigned long vmalloc_reserve = __VMALLOC_RESERVE_DEFAULT; +static unsigned long vm_reserve __initdata = -1; + /* user-defined highmem size */ static unsigned int highmem_pages __initdata = -1; @@ -875,6 +879,14 @@ static void __init parse_cmdline_early ( */ else if (!memcmp(from, "highmem=", 8)) highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT; + /* + * vm_reserve=size forces to reserve 'size' bytes for vmalloc and + * ioremap areas minimum is 32 MB maximum is 800 MB + * the default without vm_reserve depends on the total amount of + * memory the minimum default is 128 MB. + */ + else if (!memcmp(from, "vm_reserve=", 11)) + vm_reserve = memparse(from+11, &from); nextchar: c = *(from++); if (!c) @@ -1052,7 +1064,28 @@ static unsigned long __init setup_memory start_pfn = PFN_UP(__pa(&_end)); find_max_pfn(); + + /* + * calculate the default size of vmalloc/ioremap area + * overwrite with the value of the vm_reserve= option + * if set + */ + if (max_pfn >= PFN_UP(KERNEL_MAXMEM - __VMALLOC_RESERVE_DEFAULT)) + vmalloc_reserve = __VMALLOC_RESERVE_DEFAULT; + else + vmalloc_reserve = KERNEL_MAXMEM - PFN_PHYS(max_pfn); + if (vm_reserve != -1) { + if (vm_reserve < __VMALLOC_RESERVE_MIN) + vm_reserve = __VMALLOC_RESERVE_MIN; + if (vm_reserve > __VMALLOC_RESERVE_MAX) + vm_reserve = __VMALLOC_RESERVE_MAX; + vmalloc_reserve = vm_reserve; + } + + printk(KERN_NOTICE "%ldMB vmalloc/ioremap area available.\n", + vmalloc_reserve>>20); + max_low_pfn = find_max_low_pfn(); #ifdef CONFIG_HIGHMEM @@ -2411,6 +2444,8 @@ static struct _cache_table cache_table[] { 0x83, LVL_2, 512 }, { 0x84, LVL_2, 1024 }, { 0x85, LVL_2, 2048 }, + { 0x86, LVL_2, 512 }, + { 0x87, LVL_2, 1024 }, { 0x00, 0, 0} }; @@ -2928,6 +2963,10 @@ void __init identify_cpu(struct cpuinfo_ c->x86_model += ((tfms >> 16) & 0xF) << 4; } c->x86_mask = tfms & 15; + if (c->x86 == 0xf) { + c->x86 += (tfms >> 20) & 0xff; + c->x86_model += ((tfms >> 16) & 0xf) << 4; + } } else { /* Have CPUID level 0 only - unheard of */ c->x86 = 4; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/block/ll_rw_blk.c linux-2.4.20-wolk4.9-fullkernel/drivers/block/ll_rw_blk.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/block/ll_rw_blk.c 2003-08-25 18:27:02.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/block/ll_rw_blk.c 2003-08-26 17:59:06.000000000 +0200 @@ -446,11 +446,6 @@ static inline void __generic_unplug_devi q->plugged = 0; if (!list_empty(&q->queue_head)) { - next = blkdev_entry_next_request(&q->queue_head); - - if (next == q->last_merge) - q->last_merge = NULL; - /* we don't want merges later on to come in * and significantly increase the amount of * work during an unplug, it can lead to high @@ -463,6 +458,12 @@ static inline void __generic_unplug_devi rq = blkdev_entry_prev_request(&q->queue_head), rq->elevator_sequence = 0; } + + next = blkdev_entry_next_request(&q->queue_head); + + if (next == q->last_merge) + q->last_merge = NULL; + q->request_fn(q); } } @@ -700,8 +701,8 @@ void blk_init_queue(request_queue_t * q, q->plugged = 0; q->full = 0; q->can_throttle = 0; - q->last_merge = NULL; q->low_latency = 0; + q->last_merge = NULL; /* * These booleans describe the queue properties. We set the @@ -1199,7 +1200,7 @@ static int __make_request(request_queue_ rw_ahead = 0; /* normal case; gets changed below for READA */ switch (rw) { case READA: -#ifndef CONFIG_SMP /* bread() misinterprets failed READA attempts as IO errors on SMP */ +#if 0 /* bread() misinterprets failed READA attempts as IO errors on SMP */ rw_ahead = 1; #endif rw = READ; /* drop into READ */ @@ -1320,7 +1321,7 @@ get_rq: * See description above __get_request_wait() */ if (rw_ahead) { - if (q->rq.count < q->batch_requests || blk_oversized_queue_batch(q)) { + if (q->full || q->rq.count < q->batch_requests || blk_oversized_queue_batch(q)) { spin_unlock_irq(q->queue_lock); goto end_io; } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/block/loop.c linux-2.4.20-wolk4.9-fullkernel/drivers/block/loop.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/block/loop.c 2003-08-25 18:24:37.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/block/loop.c 2003-08-25 20:35:57.000000000 +0200 @@ -1151,6 +1151,7 @@ static int lo_ioctl(struct inode * inode break; case BLKBSZGET: case BLKBSZSET: + case BLKSSZGET: err = blk_ioctl(inode->i_rdev, cmd, arg); break; default: diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/char/Config.in linux-2.4.20-wolk4.9-fullkernel/drivers/char/Config.in --- linux-2.4.20-wolk4.8-fullkernel/drivers/char/Config.in 2003-08-25 18:27:02.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/char/Config.in 2003-08-26 17:59:06.000000000 +0200 @@ -344,4 +344,6 @@ fi source drivers/char/ibmasm/Config.in +dep_tristate 'Dazuko support' CONFIG_DAZUKO m + endmenu diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/char/Makefile linux-2.4.20-wolk4.9-fullkernel/drivers/char/Makefile --- linux-2.4.20-wolk4.8-fullkernel/drivers/char/Makefile 2003-08-25 18:27:02.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/char/Makefile 2003-08-25 20:35:57.000000000 +0200 @@ -217,6 +217,7 @@ subdir-$(CONFIG_IBMASM) += ibmasm obj-$(CONFIG_ATIXL_BUSMOUSE) += atixlmouse.o obj-$(CONFIG_LOGIBUSMOUSE) += logibusmouse.o obj-$(CONFIG_PRINTER) += lp.o +obj-$(CONFIG_DAZUKO) += dazuko.o ifeq ($(CONFIG_INPUT),y) obj-y += joystick/js.o diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/char/dazuko.c linux-2.4.20-wolk4.9-fullkernel/drivers/char/dazuko.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/char/dazuko.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/char/dazuko.c 2003-08-25 20:35:57.000000000 +0200 @@ -0,0 +1,2578 @@ +/* Dazuko. Allow file access control for 3rd-party applications. + Copyright (C) 2002,2003 H+BEDV Datentechnik GmbH + Written by Martin Ritter + John Ogness + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version 2 + of the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +*/ + +#if CONFIG_MODVERSIONS==1 +#define MODVERSIONS +#include +#endif + +#include +#include + +#ifdef MODULE +#include +#endif + +#ifndef KERNEL_VERSION +#define KERNEL_VERSION(a,b,c) ((a)*65536+(b)*256+(c)) +#endif + +#ifdef DEBUG +#define DPRINTK(x) printk x +#else +#define DPRINTK(x) +#endif + +#include + +#include +#include +#include +#include +#include + +#ifdef CONFIG_DEVFS_FS +#include +#endif + +#ifdef CONFIG_SMP +#ifndef __SMP__ +#define __SMP__ +#endif +#endif + +#ifdef __SMP__ +#include +#endif +#include + +#define NUM_SLOT_LISTS 5 +#define NUM_SLOTS 25 + +#define SCAN_ON_OPEN (access_mask & ON_OPEN) +#define SCAN_ON_CLOSE (access_mask & ON_CLOSE) +#define SCAN_ON_EXEC (access_mask & ON_EXEC) +#define SCAN_ON_CLOSE_MODIFIED (access_mask & ON_CLOSE_MODIFIED) + +#define FREE 0 /* the daemon is not ready */ +#define READY 1 /* a daemon waits for something to do */ +#define WAITING 2 /* a request is waiting to be served */ +#define WORKING 3 /* daemon is currently in action */ +#define DONE 4 /* daemon response is available */ + +#define BROKEN 5 /* invalid state (interrupt from ready,waiting) */ + +#ifdef HIDDEN_SCT +void **sys_call_table; +void **get_sct(); +extern asmlinkage long sys_close(unsigned int fd); +#else +extern void *sys_call_table[]; +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) +int dazuko_device_read(struct file *file, char *buffer, size_t length, loff_t *pos); +#else +ssize_t dazuko_device_read(struct file *file, char *buffer, size_t length, loff_t *pos); +#endif +int dazuko_device_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long param); +int dazuko_device_open(struct inode *inode, struct file *file); +int dazuko_device_release(struct inode *inode, struct file *file); + +struct path_t +{ + /* A node in a linked list of paths. Used + * for the include and exclude lists. */ + + struct path_t *next; + int len; + char path[1]; /* this MUST be at the end of the struct */ +}; + +struct hash_t +{ + /* A node in a linked list of filenames. + * Used for the list of files to be + * scanned on close. */ + + struct hash_t *next; + struct file *file; + int dirty; + int namelen; + char name[1]; /* this MUST be at the end of the struct */ +}; + +struct slot_t +{ + /* A representation of a daemon. It holds + * all information about the daemon, the + * file that is scanned, and the state of + * the scanning process. */ + + int id; + int pid; /* pid of our daemon */ + int state; + int response; + int event; + int o_flags; + int o_mode; + int kuid; /* user id of the kernel process */ + int kpid; /* process id of the kernel process */ + int filenamelength; /* not including terminator */ + char *filename; + struct semaphore mutex; +}; + +struct slot_list_container_t +{ + struct slot_list_t *slot_list; + struct semaphore mutex; +}; + +struct slot_list_t +{ + atomic_t use_count; + struct slot_t slots[NUM_SLOTS]; + char reg_name[1]; /* this MUST be at the end of the struct */ +}; + +struct dazuko_file_struct +{ + /* A structure designed for simple and + * intelligent memory management when + * doing filename lookups in the kernel. */ + + const char *user_filename; /* userspace filename */ + int should_scan; /* already know we need to scan? */ + int filename_length; /* length of filename */ + char *filename; /* kernelspace filename */ + int putname_filename; /* flag to clean up filename */ + int full_filename_length; /* length of filename */ + char *full_filename; /* kernelspace filename with full path */ + int free_full_filename; /* flag to clean up full_filename */ + struct dentry *dentry; /* used to get inode */ + int dput_dentry; /* flag to clean up dentry */ + char *buffer; /* used to get full path */ + int free_page_buffer; /* flag to clean up buffer */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + struct nameidata nd; /* used to get full path */ + int path_release_nd; /* flag to clean up nd */ + struct vfsmount *vfsmount; /* used to get full path */ + int mntput_vfsmount; /* flag to clean up vfsmount */ +#endif +}; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) +static struct vfsmount *orig_rootmnt = NULL; +#endif + +static struct dentry *orig_root = NULL; +static char access_mask = 7; +static struct slot_list_container_t slot_lists[NUM_SLOT_LISTS]; +static struct path_t *incl_paths = NULL; +static struct path_t *excl_paths = NULL; +static struct hash_t *hash = NULL; +static int dev_major = -1; +static rwlock_t lock_hash; +static rwlock_t lock_lists; +static atomic_t active; + +#if defined(ON_OPEN_SUPPORT) || defined(ON_CLOSE_SUPPORT) || defined(ON_CLOSE_MODIFIED_SUPPORT) +static asmlinkage long (*original_sys_open)(const char *filename, int flags, int mode); +#endif +#if defined(ON_CLOSE_SUPPORT) || defined(ON_CLOSE_MODIFIED_SUPPORT) +static asmlinkage long (*original_sys_close)(unsigned int fd); +#endif +#ifdef ON_CLOSE_MODIFIED_SUPPORT +static asmlinkage ssize_t (*original_sys_write)(unsigned int fd, char *buf, unsigned int count); +#endif +#ifdef ON_EXEC_SUPPORT +static asmlinkage int (*original_sys_execve)(struct pt_regs regs); +#endif + +static struct file_operations fops = { + read: dazuko_device_read, /* read */ + ioctl: dazuko_device_ioctl, /* ioctl */ + open: dazuko_device_open, /* open */ + release: dazuko_device_release, /* release */ + }; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + +static DECLARE_WAIT_QUEUE_HEAD(wait_kernel_waiting_for_free_slot); +static DECLARE_WAIT_QUEUE_HEAD(wait_daemon_waiting_for_work); +static DECLARE_WAIT_QUEUE_HEAD(wait_kernel_waiting_while_daemon_works); +static DECLARE_WAIT_QUEUE_HEAD(wait_daemon_waiting_for_free); + +#else + +static struct wait_queue *wait_kernel_waiting_for_free_slot; +static struct wait_queue *wait_daemon_waiting_for_work; +static struct wait_queue *wait_kernel_waiting_while_daemon_works; +static struct wait_queue *wait_daemon_waiting_for_free; + +/* The following code is taken directly from Linux in the file: + include/linux/sched.h */ + +#ifndef __wait_event_interruptible +#define __wait_event_interruptible(wq, condition, ret) \ +do { \ + struct wait_queue __wait; \ + \ + __wait.task = current; \ + add_wait_queue(&wq, &__wait); \ + for (;;) { \ + current->state = TASK_INTERRUPTIBLE; \ + mb(); \ + if (condition) \ + break; \ + if (!signal_pending(current)) { \ + schedule(); \ + continue; \ + } \ + ret = -ERESTARTSYS; \ + break; \ + } \ + current->state = TASK_RUNNING; \ + remove_wait_queue(&wq, &__wait); \ +} while (0) +#endif + +#ifndef wait_event_interruptible +#define wait_event_interruptible(wq, condition) \ +({ \ + int __ret = 0; \ + if (!(condition)) \ + __wait_event_interruptible(wq, condition, __ret);\ + __ret; \ +}) +#endif + +#endif + +static inline void dazuko_bzero(void *p, int len) +{ + /* "zero out" len bytes starting with p */ + + char *ptr = (char *)p; + + while (len--) + *ptr++ = 0; +} + +static inline int dazuko_slot_state(struct slot_t *s) +{ + int state; + +/* DOWN */ + if (down_interruptible(&(s->mutex)) != 0) + return -EINTR; + + state = s->state; + + up(&(s->mutex)); +/* UP */ + + return state; +} + +static inline int __dazuko_change_slot_state(struct slot_t *s, int from_state, int to_state) +{ + /* Make a predicted state transition. We fail if it + * is an unpredicted change. We can ALWAYS go to the + * to_state if it is the same as from_state. Not SMP safe! */ + + if (to_state != from_state) + { + /* make sure this is a predicted transition and there + * is a daemon on this slot (pid != 0)*/ + if (s->state != from_state || !s->pid) + return 0; + } + + s->state = to_state; + + /* handle appropriate wake_up's for basic + * state changes */ + + if (to_state == READY) + { + wake_up(&wait_kernel_waiting_for_free_slot); + } + else if (to_state == FREE) + { + wake_up(&wait_kernel_waiting_while_daemon_works); + wake_up(&wait_daemon_waiting_for_free); + } + + return 1; +} + +static int dazuko_change_slot_state(struct slot_t *s, int from_state, int to_state, int release) +{ + /* SMP safe version of __dazuko_change_slot_state(). + * This should only be used if we haven't + * already aquired slot.mutex. Use this function + * with CAUTION, since the mutex may or may not + * be released depending on the return value AND + * on the value of the "release" argument. */ + + int success; + + /* if we are interrupted, report the state as unpredicted */ +/* DOWN */ + if (down_interruptible(&(s->mutex)) != 0) + return 0; + + success = __dazuko_change_slot_state(s, from_state, to_state); + + /* the mutex is released if the state change was + * unpredicted or if the called wants it released */ + if (!success || release) + up(&(s->mutex)); +/* UP */ + return success; +} + +static struct slot_t * _dazuko_find_slot(int pid, int release, struct slot_list_t *sl) +{ + /* Find the first slot with the same given + * pid number. SMP safe. Use this function + * with CAUTION, since the mutex may or may not + * be released depending on the return value AND + * on the value of the "release" argument. */ + + int i; + struct slot_t *s = NULL; + + if (sl == NULL) + { + printk("dazuko: invalid slot_list given (bug!)\n"); + return NULL; + } + + for (i=0 ; islots[i]); +/* DOWN */ + /* if we are interrupted, we say that no + * slot was found */ + if (down_interruptible(&(s->mutex)) != 0) + return NULL; + + if (s->pid == pid) + { + /* we release the mutex only if the + * called wanted us to */ + if (release) + up(&(s->mutex)); +/* UP */ + return s; + } + + up(&(s->mutex)); +/* UP */ + } + + return NULL; +} + +static struct slot_t * dazuko_find_slot_and_slotlist(int pid, int release, struct slot_list_t *slist, struct slot_list_t **sl_result) +{ + struct slot_t *s; + int i; + struct slot_list_t *sl; + + if (slist == NULL) + { + for (i=0 ; ipath, fs_path, fs_len) != 0) + { + kfree(newitem); + return -EFAULT; + } + + newitem->path[fs_len] = 0; + + while (newitem->path[fs_len] == 0) + { + fs_len--; + if (fs_len == 0) + break; + } + + newitem->len = fs_len; + + /* we want only absolute paths */ + if (newitem->path[0] != '/') + { + kfree(newitem); + return -EINVAL; + } + + /* check if this path already exists in the list */ + for (tmp=*list ; tmp ; tmp=tmp->next) + { + if (newitem->len == tmp->len) + { + if (memcmp(newitem->path, tmp->path, tmp->len) == 0) + { + /* we already have this path */ + + kfree(newitem); + + return 0; + } + } + } + + DPRINTK(("dazuko: adding %s %s\n", (list == &incl_paths) ? "incl" : "excl", newitem->path)); + + /* add path_t to head of linked list */ +/* LOCK */ + write_lock(&lock_lists); + newitem->next = *list; + *list = newitem; + write_unlock(&lock_lists); +/* UNLOCK */ + + return 0; +} + +static void dazuko_remove_all_hash(void) +{ + /* Empty the hash linked list. */ + + struct hash_t *tmp; + +/* LOCK */ + write_lock(&lock_hash); + while (hash) + { + tmp = hash; + hash = hash->next; + + kfree(tmp); + } + write_unlock(&lock_hash); +/* UNLOCK */ +} + +static void dazuko_remove_all_paths(void) +{ + /* Empty both include and exclude path_t + * linked lists. */ + + struct path_t *tmp; + +/* LOCK */ + write_lock(&lock_lists); + + /* empty include paths list */ + while (incl_paths) + { + tmp = incl_paths; + incl_paths = incl_paths->next; + + DPRINTK(("dazuko: removing incl %s\n", tmp->path)); + + kfree(tmp); + } + + /* empty exclude paths list */ + while (excl_paths) + { + tmp = excl_paths; + excl_paths = excl_paths->next; + + DPRINTK(("dazuko: removing excl %s\n", tmp->path)); + + kfree(tmp); + } + + write_unlock(&lock_lists); +/* UNLOCK */ +} + +int dazuko_device_release(struct inode *inode, struct file *file) +{ + /* We unregister the daemon by finding the + * slot with the same slot->pid as the the + * current process id, the daemon. */ + + struct slot_t *s; + struct slot_list_t *sl; + + DPRINTK(("dazuko: dazuko_device_release() [%d]\n", current->pid)); + + /* non-root daemons are ignored */ + if (current->uid != 0) + return 0; + + /* find our slot and hold the mutex + * if we find it */ +/* DOWN? */ + s = dazuko_find_slot_and_slotlist(current->pid, 0, NULL, &sl); + + if (s == NULL) + { + printk("dazuko: daemon %d had no slot (possible bug)\n", current->pid); + return 0; + } + +/* DOWN */ + + /* clearing the pid makes the slot available */ + s->pid = 0; + + /* reset slot state */ + __dazuko_change_slot_state(s, FREE, FREE); + + atomic_dec(&(sl->use_count)); + + up(&(s->mutex)); +/* UP */ + + /* active should always be positive here, but + * let's check just to be sure. ;) */ + if (atomic_read(&active) > 0) + { + /* active and the kernel usage counter + * should always reflect how many daemons + * are active */ + +#ifdef MODULE + MOD_DEC_USE_COUNT; +#endif + atomic_dec(&active); + } + else + { + printk("dazuko: active count error (possible bug)\n"); + } + + /* Wake up any kernel processes that are + * waiting for an available slot. Remove + * all the include and exclude paths + * if there are no more daemons */ + + if (atomic_read(&active) == 0) + { + /* clear out include and exclude paths */ + /* are we sure we want to do this? */ + dazuko_remove_all_paths(); + + /* clear out hash nodes */ + dazuko_remove_all_hash(); + } + + wake_up(&wait_kernel_waiting_for_free_slot); + wake_up(&wait_kernel_waiting_while_daemon_works); + + return 0; +} + +int dazuko_device_open(struct inode *inode, struct file *file) +{ + DPRINTK(("dazuko: dazuko_device_open() [%d]\n", current->pid)); + + return 0; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) +int dazuko_device_read(struct file *file, char *buffer, size_t length, loff_t *pos) +#else +ssize_t dazuko_device_read(struct file *file, char *buffer, size_t length, loff_t *pos) +#endif +{ + /* Reading from the dazuko device simply + * returns the device number. This is to + * help out the daemon. */ + + char tmp[20]; + size_t dev_major_len; + + DPRINTK(("dazuko: dazuko_device_read() [%d]\n", current->pid)); + + /* non-root daemons are ignored */ + if (current->uid != 0) + return 0; + + if (dev_major < 0) + return -ENODEV; + + /* print dev_major to a string + * and get length (with terminator) */ + dazuko_bzero(tmp, sizeof(tmp)); + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,8) + dev_major_len = snprintf(tmp, sizeof(tmp), "%d", dev_major) + 1; + #else + dev_major_len = sprintf(tmp, "%d", dev_major) + 1; + #endif + + if (tmp[sizeof(tmp)-1] != 0) + { + printk("dazuko: failing device_read, device number overflow for dameon %d (dev_major=%d)\n", current->pid, dev_major); + return -EFAULT; + } + + if (length < dev_major_len) + return -EINVAL; + + /* copy dev_major string to userspace */ + if (copy_to_user(buffer, tmp, dev_major_len) != 0) + return -EFAULT; + + return dev_major_len; +} + +static int dazuko_register_daemon(const char *reg_name, int string_length) +{ + const char *p1; + char *p2; + struct slot_t *s; + struct slot_list_t *sl; + int i; + + DPRINTK(("dazuko: dazuko_register_daemon() [%d]\n", current->pid)); + + if (reg_name == NULL) + return -EPERM; + + /* Find the slot_list with the matching name. */ + + for (i=0 ; ireg_name; + + while (*p1 == *p2) + { + if (*p1 == 0) + break; + + p1++; + p2++; + } + + if (*p1 == *p2) + break; + } + } + + if (i == NUM_SLOT_LISTS) + { + /* There is no slot_list with this name. We + * need to make one. */ + + sl = (struct slot_list_t *)kmalloc(sizeof(struct slot_list_t) + string_length, GFP_KERNEL); + if (!sl) + return -EFAULT; + + dazuko_bzero(sl, sizeof(struct slot_list_t) + string_length); + atomic_set(&(sl->use_count), 0); + + p1 = reg_name; + p2 = sl->reg_name; + + while (*p1) + { + *p2 = *p1; + + p1++; + p2++; + } + *p2 = 0; + + /* give each slot a unique id */ + for (i=0 ; islots[i].id = i; + #ifdef init_MUTEX + init_MUTEX(&(sl->slots[i].mutex)); + #else + sema_init(&(sl->slots[i].mutex), 1); + #endif + } + + /* we need to find an empty slot */ + for (i=0 ; ipid = current->pid; + + atomic_inc(&(sl->use_count)); + + /* the daemon is registered, but not yet + * ready to receive files */ + __dazuko_change_slot_state(s, FREE, FREE); + + DPRINTK(("dazuko: slot[%d] assigned to daemon %d\n", s->id, current->pid)); + + up(&(s->mutex)); +/* UP */ + + return 0; +} + +int dazuko_device_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long param) +{ + /* A daemon uses this function to interact with + * the kernel. A daemon can set scanning parameters, + * give scanning response, and get filenames to scan. */ + + int error; + int len; + int opt; + struct access_t *u_dazuko; + struct slot_t *s; + char *reg_name; + int i; + + /* non-root daemons are ignored */ + if (current->uid != 0) + return 0; + + /* A macro is used to translate the cmd argument. + * This keeps it compatible across various Linux + * platforms. */ + switch (_IOC_NR(cmd)) + { + case IOCTL_GET_AN_ACCESS: + /* The daemon is requesting a filename of a file + * to scan. This code will wait until a filename + * is available, or until we should be killed. + * (killing is done if any errors occur as well + * as when the user kills us) */ + + error = verify_area(VERIFY_WRITE, (void *)param, sizeof(struct access_t)); + if (error) + return error; + + u_dazuko = (struct access_t *)param; + +tryagain: + /* find our slot */ + s = dazuko_find_slot(current->pid, 1, NULL); + + if (s == NULL) + { + i = dazuko_register_daemon("_COMPAT", 7); + if (i != 0) + { + printk("dazuko: unregistered daemon %d attempted to get access\n", current->pid); + return -ESRCH; + } + + s = dazuko_find_slot(current->pid, 1, NULL); + if (s == NULL) + { + printk("dazuko: unregistered daemon %d attempted to get access\n", current->pid); + return -ESRCH; + } + + printk("dazuko: warning: daemon %d is using a deprecated protocol\n", current->pid); + } + + /* the daemon is now ready to receive a file */ + dazuko_change_slot_state(s, READY, READY, 1); + + if (wait_event_interruptible(wait_daemon_waiting_for_work, dazuko_slot_state(s) != READY) != 0) + { + /* The user has issued an interrupt. + * Return an error. The daemon should + * unregister itself. */ + + DPRINTK(("dazuko: daemon %d killed while waiting for work\n", current->pid)); + + if (dazuko_change_slot_state(s, READY, BROKEN, 1) || dazuko_change_slot_state(s, WAITING, BROKEN, 1)) + { + wake_up(&wait_kernel_waiting_for_free_slot); + wake_up(&wait_kernel_waiting_while_daemon_works); + } + + return -EINTR; + } + + /* slot SHOULD now be in WAITING state */ + + /* we will be writing data to the slot, so + * we need to lock it */ +/* DOWN */ + if (down_interruptible(&(s->mutex)) != 0) + { + return -EINTR; + } + + if (!__dazuko_change_slot_state(s, WAITING, WORKING)) + { + /* State transition error. Try again., */ + + up(&(s->mutex)); +/* UP */ + goto tryagain; + } + + /* Slot IS in WORKING state. Copy all the + * necessary information to userspace structure. */ + + if (copy_to_user(u_dazuko->filename, s->filename, s->filenamelength+1) != 0) + { + up(&(s->mutex)); +/* UP */ + return -EFAULT; + } + + if (copy_to_user(&(u_dazuko->event), &(s->event), sizeof(int)) != 0) + { + up(&(s->mutex)); +/* UP */ + return -EFAULT; + } + + if (copy_to_user(&(u_dazuko->o_flags), &(s->o_flags), sizeof(int)) != 0) + { + up(&(s->mutex)); +/* UP */ + return -EFAULT; + } + + if (copy_to_user(&(u_dazuko->o_mode), &(s->o_mode), sizeof(int)) != 0) + { + up(&(s->mutex)); +/* UP */ + return -EFAULT; + } + + if (copy_to_user(&(u_dazuko->uid), &(s->kuid), sizeof(int)) != 0) + { + up(&(s->mutex)); +/* UP */ + return -EFAULT; + } + + if (copy_to_user(&(u_dazuko->pid), &(s->kpid), sizeof(int)) != 0) + { + up(&(s->mutex)); +/* UP */ + return -EFAULT; + } + + up(&(s->mutex)); +/* UP */ + return 0; /* no error */ + + case IOCTL_RETURN_ACCESS: + /* The daemon has finished scanning a file + * and has the response to give. The daemon's + * slot should be in the WORKING state. */ + + error = verify_area(VERIFY_READ, (void *)param, sizeof(struct access_t)); + if (error) + { + return error; + } + + u_dazuko = (struct access_t *)param; + + /* find our slot */ + s = dazuko_find_slot(current->pid, 1, NULL); + + if (s == NULL) + { + /* It appears the kernel isn't interested + * in us or our response. It gave our slot away! */ + + DPRINTK(("dazuko: daemon %d unexpectedly lost slot\n", current->pid)); + + return -EPERM; + } + + /* we will be writing into the slot, so we + * need to lock it */ +/* DOWN */ + if (down_interruptible(&(s->mutex)) != 0) + { + return -EINTR; + } + + if (!__dazuko_change_slot_state(s, WORKING, DONE)) + { + /* The slot is in the wrong state. We will + * assume the kernel has cancelled the file + * access. */ + + DPRINTK(("dazuko: response from daemon %d on slot[%d] not needed\n", current->pid, s->id)); + + up(&(s->mutex)); +/* UP */ + return 0; + } + + /* copy the response into the slot */ + if (copy_from_user(&(s->response), &(u_dazuko->deny), sizeof(int)) != 0) + { + up(&(s->mutex)); +/* UP */ + return -EFAULT; + } + + up(&(s->mutex)); +/* UP */ + + /* wake up any kernel processes that are + * waiting for responses */ + wake_up(&wait_kernel_waiting_while_daemon_works); + + if (wait_event_interruptible(wait_daemon_waiting_for_free, dazuko_slot_state(s) != DONE) != 0) + { + /* The user has issued an interrupt. + * Return an error. The daemon should + * unregister itself. */ + + DPRINTK(("dazuko: daemon %d killed while waiting for response acknowledgement\n", current->pid)); + + return -EINTR; + } + + return 0; + + case IOCTL_SET_OPTION: + /* The daemon wants to set a configuration + * option in the kernel. */ + + error = verify_area(VERIFY_READ, (void *)param, 2*sizeof(int)); + if (error) + return error; + + /* copy option type from userspace */ + if (copy_from_user(&opt, (int *)param, sizeof(int)) != 0) + return -EPERM; + + param += sizeof(int); + + /* copy path length from userspace */ + if (copy_from_user(&len, (int *)param, sizeof(int)) != 0) + return -EPERM; + + /* sanity check */ + if (len < 0 || len > 1024) + return -EPERM; + + param += sizeof(int); + + error = verify_area(VERIFY_READ, (void *)param, len); + if (error) + return error; + + /* make sure we are already registered + * (or that we don't register twice) */ + + /* find our slot */ + s = dazuko_find_slot(current->pid, 1, NULL); + + if (opt == REGISTER) + { + if (s != NULL) + { + /* We are already registered! */ + + printk("dazuko: daemon %d already assigned to slot[%d]\n", current->pid, s->id); + + return -EPERM; + } + } + else + { + if (s == NULL) + { + i = dazuko_register_daemon("_COMPAT", 7); + if (i != 0) + { + printk("dazuko: unregistered daemon %d attempted to get access\n", current->pid); + return -EPERM; + } + + s = dazuko_find_slot(current->pid, 1, NULL); + if (s == NULL) + { + printk("dazuko: unregistered daemon %d attempted to get access\n", current->pid); + return -EPERM; + } + + printk("dazuko: warning: daemon %d is using a deprecated protocol\n", current->pid); + } + } + + /* check option type and take the appropriate action */ + switch (opt) + { + case SET_ACCESS_MASK: + if (copy_from_user(&access_mask, (char *)param, sizeof(char)) != 0) + return -EFAULT; + break; + + case ADD_INCLUDE_PATH: + dazuko_insert_path_fs(&incl_paths, (char *)param, len); + break; + + case ADD_EXCLUDE_PATH: + dazuko_insert_path_fs(&excl_paths, (char *)param, len); + break; + + case REGISTER: + /* We register the daemon by finding an + * unused slot (slot->pid=0) and setting + * the slot->pid to the proccess id of + * the current proccess, the daemon. */ + + reg_name = (char *)kmalloc(len + 1, GFP_KERNEL); + if (!reg_name) + return -EFAULT; + + /* We must copy the reg_name from userspace to kernelspace. */ + + if (copy_from_user(reg_name, (char *)param, len) != 0) + { + kfree(reg_name); + return -EFAULT; + } + + reg_name[len] = 0; + + i = dazuko_register_daemon(reg_name, len); + kfree(reg_name); + + if (i != 0) + return i; + + break; + + case REMOVE_ALL_PATHS: + dazuko_remove_all_paths(); + break; + + default: + printk("dazuko: daemon %d requested unknown set %d (possible bug)\n", current->pid, opt); + break; + } + break; + default: + printk("dazuko: daemon %d requested unknown device_ioctl %d (possible bug)\n", current->pid, _IOC_NR(cmd)); + break; + } + + return 0; +} + +static struct slot_t * dazuko_get_and_hold_ready_slot(struct slot_list_t *sl) +{ + /* This is a simple search to find a + * slot whose state is READY. This means + * it is able to accept work. If a slot + * is found, the slot.mutex is held so + * it can be filled with work by the caller. + * It is the responsibility of the caller + * to RELEASE THE MUTEX. */ + + int i; + struct slot_t *s; + + for (i=0 ; islots[i]); +/* DOWN? */ + if (dazuko_change_slot_state(s, READY, WAITING, 0)) + { +/* DOWN */ + return s; + } + } + + /* we didn't find a slot that is ready for work */ + + return NULL; +} + +static int dazuko_run_daemon_on_slotlist(int event, char *filename, int filenamelength, int o_flags, int o_mode, struct slot_list_t *sl) +{ + /* This is the main function called by the kernel + * to work with a daemon. */ + + int rc; + int pid; + struct slot_t *s; + +begin: + /* we initialize the slot value because + * we cannot guarentee that it will be + * assigned a new value BEFORE !active + * is checked */ + s = NULL; + + /* wait for a slot to become ready */ + if (wait_event_interruptible(wait_kernel_waiting_for_free_slot, ((s = dazuko_get_and_hold_ready_slot(sl)) != NULL) || (atomic_read(&active) == 0) || (atomic_read(&(sl->use_count)) == 0)) != 0) + { + /* The kernel process was killed while + * waiting for a slot to become ready. + * This is fine. */ + + DPRINTK(("dazuko: kernel process %d killed while waiting for free slot\n", current->pid)); + + return -1; /* user interrupted */ + } + + /* Make sure we have a slot. We may have + * gotten past the last wait because we + * are no longer active. */ + + if (s == NULL) + { + /* We were no longer active. We don't + * need to initiate a daemon. This also + * means we never acquired the lock. */ + + return 0; /* allow access */ + } + +/* DOWN */ + + /* the slot is already locked at this point */ + + /* grab the daemon's pid */ + pid = s->pid; + + /* At this point we have a locked slot. It IS + * sitting in the WAITING state, waiting for + * us to give it some work. */ + + /* set up the slot to do work */ + s->filename = filename; + s->event = event; + s->response = 0; + s->kuid = current->uid; + s->kpid = current->pid; + s->o_flags = o_flags; + s->o_mode = o_mode; + s->filenamelength = filenamelength; + + /* we are done modifying the slot */ + up(&(s->mutex)); +/* UP */ + + /* wake up any daemons waiting for work */ + wake_up(&wait_daemon_waiting_for_work); + + /* wait until the daemon is finished with the slot */ + if (wait_event_interruptible(wait_kernel_waiting_while_daemon_works, dazuko_slot_state(s) != WAITING && dazuko_slot_state(s) != WORKING) != 0) + { + /* The kernel process was killed while + * waiting for a daemon to process the file. + * This is fine. */ + + DPRINTK(("dazuko: kernel process %d killed while waiting for daemon response\n", current->pid)); + + /* change the slot's state to let the + * daemon know we are not interested + * in a response */ + dazuko_change_slot_state(s, FREE, FREE, 1); + + return -1; /* user interrupted */ + } + + /* we are working with the slot, so + * we need to lock it */ +/* DOWN */ + if (down_interruptible(&(s->mutex)) != 0) + { + return -1; /* user interrupted */ + } + + /* make sure this is the right daemon */ + if (s->pid != pid) + { + /* This is a different daemon than + * the one we assigned work to. + * We need to scan again. */ + up(&(s->mutex)); +/* UP */ + goto begin; + } + + /* The slot should now be in the DONE state. */ + if (!__dazuko_change_slot_state(s, DONE, FREE)) + { + /* The daemon was killed while scanning. + * We need to scan again. */ + + up(&(s->mutex)); +/* UP */ + goto begin; + } + + /* grab the response */ + rc = s->response; + + up(&(s->mutex)); +/* UP */ + + /* CONGRATULATIONS! You successfully completed a full state cycle! */ + + return rc; +} + +static int dazuko_run_daemon(int event, char *filename, int filenamelength, int o_flags, int o_mode) +{ + struct slot_list_t *sl; + int i; + int rc = 0; + int error; + + for (i=0 ; i 0) + { + /* this daemon wants access blocked */ + rc = 1; + } + } + } + + return rc; +} + +static inline int dazuko_is_our_daemon(void) +{ + /* Check if the current process is one + * of the daemons. */ + + return (dazuko_find_slot(current->pid, 1, NULL) != NULL); +} + +static int dazuko_is_selected(char *filename, int len) +{ + /* Check if the given filename (with path) is + * under our include directories but not under + * the exclude directories. */ + + struct path_t *path; + + /* If we are interrupted here, we will report that + * this file is not selected. This will make the + * kernel allow normal access. Is this dangerous? */ +/* LOCK */ + read_lock(&lock_lists); + + /* check if filename is under our include paths */ + for (path=incl_paths ; path ; path=path->next) + { + /* the include item must be at least as long as the given filename */ + if (path->len < len) + { + /* the include item should match the beginning of the given filename */ + if (memcmp(path->path, filename, path->len) == 0) + break; + } + } + + /* If we didn't find a path, it isn't in our + * include directories. It can't be one of + * the selected files to scan. */ + if (!path) + { + read_unlock(&lock_lists); +/* UNLOCK */ + return 0; + } + + /* check if filename is under our exclude paths */ + for (path=excl_paths ; path ; path=path->next) + { + /* the exclude item must be at least as long as the given filename */ + if (path->len < len) + { + /* the exclude item should match the beginning of the given filename */ + if (memcmp(path->path,filename,path->len) == 0) + break; + } + } + + read_unlock(&lock_lists); +/* UNLOCK */ + + /* If we got a path, then we are supposed + * to exclude this file for scanning. */ + if (path) + return 0; + + /* if we made it this far, it is a selected file to scan */ + + return 1; +} + +#if defined(ON_CLOSE_SUPPORT) || defined(ON_CLOSE_MODIFIED_SUPPORT) +static int dazuko_add_hash(struct file *file, char *filename, int len) +{ + /* Add the given file and filename to the linked list + * of files to scan once they are closed. */ + + struct hash_t *h; + + /* create a new hash_t structure making room for name also */ + h = (struct hash_t *)kmalloc(sizeof(struct hash_t) + len, GFP_KERNEL); + if (!h) + return -EFAULT; + + /* fill in structure items */ + + h->file = file; + h->dirty = 0; + h->namelen = len; + memcpy(h->name, filename, len); + h->name[len] = 0; + + /* add the new hash_t item to the head of the + * hast_t linked list */ + +/* LOCK */ + write_lock(&lock_hash); + h->next = hash; + hash = h; + write_unlock(&lock_hash); +/* UNLOCK */ + return 0; +} +#endif + +#ifdef ON_CLOSE_MODIFIED_SUPPORT +/* Code based on code from: Swade 12/08/02: Move dirty to end of list */ +static void dazuko_mark_hash_dirty(struct file *file) +{ + struct hash_t *h = NULL; + struct hash_t *entry = NULL; + struct hash_t *prev = NULL; + struct hash_t *prev_entry = NULL; + +/* LOCK */ + write_lock(&lock_hash); + + for (h=hash ; h ; h=h->next) + { + /* not found if hit first dirty entry */ + if (h->dirty) + { + entry = NULL; + break; + } + + /* since these are file* and not + * strings, we can compare them + * directly */ + if (h->file == file) + { + prev_entry = prev; + entry = h; + break; + } + + prev = h; + } + + if (entry) + { + if (!entry->dirty) + { + /* mark as dirty */ + entry->dirty = 1; + + /* If we already are last entry or next + * entry dirty, we don't need to move */ + + if (entry->next) + { + if (!entry->next->dirty) + { + for (h=entry->next ; h ; h=h->next) + { + if (h->dirty) + break; + + prev = h; + } + + /* remove from current position */ + if (prev_entry) + prev_entry->next = entry->next; + else + hash = entry->next; + + if (prev == NULL) + { + /* insert as first item */ + entry->next = hash; + hash = entry; + } + else if (h) + { + /* insert before h (after prev) */ + entry->next = prev->next; + prev->next = entry; + } + else + { + /* insert as last item (after prev) */ + entry->next = NULL; + prev->next = entry; + } + } + } + } + } + + write_unlock(&lock_hash); +/* UNLOCK */ + +} +#endif + +#if defined(ON_CLOSE_SUPPORT) || defined(ON_CLOSE_MODIFIED_SUPPORT) +static struct hash_t *dazuko_get_hash(struct file *file) +{ + /* Find the given file within our list + * and then remove it from the list and + * return it. */ + + struct hash_t *prev; + struct hash_t *cur; + +/* LOCK */ + write_lock(&lock_hash); + + prev = NULL; + cur = hash; + while (cur) + { + /* since these are file* and not + * strings, we can compare them + * directly */ + if (cur->file == file) + { + /* remove the item from the list */ + if (!prev) + hash = cur->next; + else + prev->next = cur->next; + break; + } + + prev = cur; + cur = cur->next; + } + + write_unlock(&lock_hash); +/* UNLOCK */ + + return cur; +} +#endif + +static inline int dazuko_get_filename_length(char *filename) +{ + /* Get the length of the filename. There is + * currently a DAZUKO_FILENAME_MAX_LENGTH maximum size restriction + * on filenames. :( */ + + int len; + + for (len=0 ; lenputname_filename) + { + /* grab filename from filename cache */ + kfs->filename = (char *)getname(kfs->user_filename); + + /* make sure it is a valid name */ + if (IS_ERR(kfs->filename)) + return 0; + + /* the name will need to be put back */ + kfs->putname_filename = 1; + } + + /* get filename length and make sure it isn't too long */ + kfs->filename_length = dazuko_get_filename_length(kfs->filename); + if (kfs->filename_length == DAZUKO_FILENAME_MAX_LENGTH) + return 0; + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + dazuko_bzero(&(kfs->nd), sizeof(struct nameidata)); + + /* initialize nameidata structure for finding file data */ + if (!path_init(kfs->filename, LOOKUP_FOLLOW | LOOKUP_POSITIVE, &(kfs->nd))) + return 0; + + if (!kfs->path_release_nd) + { + /* find file data and fill it in nameidata structure */ + if (path_walk(kfs->filename, &(kfs->nd))) /* !=0 -> error */ + return 0; + + /* the nameidata will need to be released */ + kfs->path_release_nd = 1; + } + + /* get a local copy of the dentry to make kernel version + * compatibility code eaiser to read */ + + /* make sure we don't already have a dentry */ + if (!kfs->dput_dentry) + { + kfs->dentry = dget(kfs->nd.dentry); + + /* the dentry will need to be put back */ + kfs->dput_dentry = 1; + } + } + #else + { + if (!kfs->dput_dentry) + { + kfs->dentry = lookup_dentry(kfs->filename, NULL, 1); + if (IS_ERR(kfs->dentry)) + return 0; + + /* the dentry will need to be put back */ + kfs->dput_dentry = 1; + } + } + #endif + + /* check if this file has no inode */ + if (kfs->dentry->d_inode == NULL) + return 0; + + /* if we made it this far, we got the inode */ + + return 1; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) +static char * __d_path(struct dentry *dentry, struct dentry *root, char *buffer, int buflen) +{ + /* Copy of d_path from linux/dcache.c but using + * a given root instead of the current root. */ + + char * end = buffer+buflen; + char * retval; + + *--end = '\0'; + buflen--; + if (dentry->d_parent != dentry && list_empty(&dentry->d_hash)) { + buflen -= 10; + end -= 10; + memcpy(end, " (deleted)", 10); + } + + /* Get '/' right */ + retval = end-1; + *retval = '/'; + + for (;;) { + struct dentry * parent; + int namelen; + + if (dentry == root) + break; + dentry = dentry->d_covers; + parent = dentry->d_parent; + if (dentry == parent) + break; + namelen = dentry->d_name.len; + buflen -= namelen + 1; + if (buflen < 0) + break; + end -= namelen; + memcpy(end, dentry->d_name.name, namelen); + *--end = '/'; + retval = end; + dentry = parent; + } + return retval; +} +#endif + +static int dazuko_get_full_filename(struct dazuko_file_struct *kfs) +{ + /* Get the filename with the full path appended + * to the beginning. */ + + char *temp; + struct dentry *root; + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + struct vfsmount *rootmnt; + #endif + + /* check if we need to allocate a buffer */ + if (!kfs->free_page_buffer) + { + /* get pre-requisites for d_path function */ + kfs->buffer = (char *)__get_free_page(GFP_USER); + + /* the buffer will need to be freed */ + kfs->free_page_buffer = 1; + } + + root = dget(orig_root); + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + /* make sure we don't already have a vfsmount */ + if (!kfs->mntput_vfsmount) + { + kfs->vfsmount = mntget(kfs->nd.mnt); + + /* the vfsmount will need to be put back */ + kfs->mntput_vfsmount = 1; + } + + /* build new filename with path included, using temp */ + + rootmnt = mntget(orig_rootmnt); + + spin_lock(&dcache_lock); + temp = __d_path(kfs->dentry, kfs->vfsmount, root, rootmnt, kfs->buffer, PAGE_SIZE); + spin_unlock(&dcache_lock); + + mntput(rootmnt); + } + #else + { + /* build new filename with path included, using temp */ + + temp = __d_path(kfs->dentry, root, kfs->buffer, PAGE_SIZE); + } + #endif + + dput(root); + + /* make sure we really got a new filename */ + if (!temp) + return 0; + + /* make sure we don't already have a full_filename */ + if (!kfs->free_full_filename) + { + /* get new filename length and make sure it isn't too long */ + kfs->full_filename_length = dazuko_get_filename_length(temp); + if (kfs->full_filename_length == DAZUKO_FILENAME_MAX_LENGTH) + return 0; + + kfs->full_filename = (char *)kmalloc(kfs->full_filename_length + 1, GFP_KERNEL); + + /* the char array will need to be freed */ + kfs->free_full_filename = 1; + + memcpy(kfs->full_filename, temp, kfs->full_filename_length + 1); + } + + /* we have a filename with the full path */ + + return 1; +} + +static void dazuko_file_struct_critical_cleanup(struct dazuko_file_struct *kfs) +{ + /* Delete all the flagged structures from the + * given dazuko_file_struct and reset all critical + * values back to 0. */ + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + if (kfs->mntput_vfsmount) + { + mntput(kfs->vfsmount); + kfs->mntput_vfsmount = 0; + } + } + #endif + + if (kfs->free_page_buffer) + { + free_page((unsigned long)kfs->buffer); + kfs->free_page_buffer = 0; + } + + if (kfs->dput_dentry) + { + dput(kfs->dentry); + kfs->dput_dentry = 0; + } + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + if (kfs->path_release_nd) + { + path_release(&(kfs->nd)); + kfs->path_release_nd = 0; + } + } + #endif + + if (kfs->putname_filename) + { + putname(kfs->filename); + kfs->putname_filename = 0; + } +} + +static void dazuko_file_struct_cleanup(struct dazuko_file_struct *kfs) +{ + kfs->should_scan = 0; + + dazuko_file_struct_critical_cleanup(kfs); + + if (kfs->free_full_filename) + { + kfree(kfs->full_filename); + kfs->free_full_filename = 0; + } +} + +static int dazuko_should_scan(struct dazuko_file_struct *kfs) +{ + /* Check if we are supposed to scan this file. + * This checks for all the correct file types, + * permissions, and if it is within the desired + * paths to scan. */ + + int success = 0; + + /* check if we already know we scan this file */ + if (kfs->should_scan) + { + return 1; + } + + /* make sure we can get an inode */ + if (dazuko_get_dentry(kfs)) + { + /* make sure we have a regular file */ + if (S_ISREG(kfs->dentry->d_inode->i_mode)) + { + /* make sure the file is readable */ + if (permission(kfs->dentry->d_inode, MAY_READ) == 0) + { + /* make sure we can get the full path */ + if (dazuko_get_full_filename(kfs)) + { + /* check if the filename is within our include + * directories but not our exclude directories */ + + if (dazuko_is_selected(kfs->full_filename, kfs->full_filename_length)) + { + /* If we made it this far, we are supposed + * to scan this file. We mark it so that + * any further immediate inquiries don't have + * to do all this work all over again. */ + + kfs->should_scan = 1; + + success = 1; + } + } + } + } + } + + dazuko_file_struct_critical_cleanup(kfs); + + return success; +} + +#ifdef ON_EXEC_SUPPORT +asmlinkage int dazuko_sys_execve(struct pt_regs regs) +{ + /* The kernel wants to execute the given file. + * Because the given structure contains stack + * address information, we can't simply call + * the default standard execve. Instead we + * have to manually inline the standard execve + * call. */ + + struct dazuko_file_struct kfs; + char *filename; + int error = 0; + + /* check if we are supposed to do scanning */ + if ((atomic_read(&active) == 0) || !SCAN_ON_EXEC) + goto standard; + + /* start with a clean dazuko_file_struct */ + dazuko_bzero(&kfs, sizeof(struct dazuko_file_struct)); + + kfs.user_filename = (char *)regs.ebx; + + /* make sure we should scan this file */ + if (dazuko_should_scan(&kfs)) + error = dazuko_run_daemon(ON_EXEC, kfs.full_filename, kfs.full_filename_length, 0, 0); + + dazuko_file_struct_cleanup(&kfs); + + if (error > 0) + { + /* virus found and not cleaned */ + + return -EPERM; + } + else if (error < 0) + { + /* user interrupted */ + + return -EINTR; + } + + /* call the standard execve function */ + + /* We cannot simply call the original version of execvc + * because the parameter contains stack information and + * the call will push the execvc call onto a new stack + * level and seg fault. :( */ + +standard: + /* The following code only works on i386 machines. + * It is directly copied from Linux in the file: + * arch/i386/kernel/process.c */ + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + filename = getname((char *) regs.ebx); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + goto out; + error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, ®s); + if (error == 0) + current->ptrace &= ~PT_DTRACE; + putname(filename); +out: + return error; + } + #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,20) + { + #ifdef __SMP__ + lock_kernel(); + #endif + filename = getname((char *) regs.ebx); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + goto out; + error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, ®s); + if (error == 0) + current->ptrace &= ~PT_DTRACE; + putname(filename); +out: + #ifdef __SMP__ + unlock_kernel(); + #endif + return error; + } + #else + { + #ifdef __SMP__ + lock_kernel(); + #endif + filename = getname((char *) regs.ebx); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + goto out; + error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, ®s); + if (error == 0) + current->flags &= ~PF_DTRACE; + putname(filename); +out: + #ifdef __SMP__ + unlock_kernel(); + #endif + return error; + } + #endif + + +} +#endif + +#if defined(ON_OPEN_SUPPORT) || defined(ON_CLOSE_SUPPORT) || defined(ON_CLOSE_MODIFIED_SUPPORT) +asmlinkage long dazuko_sys_open(const char *filename, int flags, int mode) +{ + /* The kernel wants to open the given filename + * with the given flags and mode. The dazuko_file_struct + * is used to handle the tricky job of cleaning + * up the many pieces of memory that may or may + * not be allocated. */ + + struct dazuko_file_struct kfs; + int error = 0; + int fd; + + /* Check if we are supposed to do scanning. Even + * if we don't scan on open, we need to keep going + * if we are supposed to scan on close. */ + if ((atomic_read(&active) == 0) || filename == NULL || !(SCAN_ON_OPEN | SCAN_ON_CLOSE | SCAN_ON_CLOSE_MODIFIED)) + { + return original_sys_open(filename, flags, mode); + } + + /* do not scan if it is our scan daemon + * opening the file */ + if (dazuko_is_our_daemon()) + { + return original_sys_open(filename, flags, mode); + } + + /* start with a clean dazuko_file_struct */ + dazuko_bzero(&kfs, sizeof(struct dazuko_file_struct)); + + kfs.user_filename = filename; + + /* make sure we are supposed to scan files on open and + * that we aren't truncating this file on open (truncating + * the file will delete any contents, so no worry for viruses. */ + if (SCAN_ON_OPEN) + { + /* make sure we should scan this file */ + if (dazuko_should_scan(&kfs)) + { + error = dazuko_run_daemon(ON_OPEN, kfs.full_filename, kfs.full_filename_length, flags, mode); + } + } + + if (error > 0) + { + /* virus found and not cleaned */ + + fd = -EPERM; + } + else if (error < 0) + { + /* user interrupted */ + + fd = -EINTR; + } + else + { + /* call the standard open function */ + fd = original_sys_open(filename, flags, mode); + + /* if the file was opened and we are interested + * in scanning on close, add this file to our hash_t list */ + + if ((atomic_read(&active) != 0) && fd > 0 && fd < NR_OPEN) + { + if (SCAN_ON_CLOSE || (SCAN_ON_CLOSE_MODIFIED && (flags & (O_RDWR | O_WRONLY)))) + { + /* make sure we should scan this file */ + if (dazuko_should_scan(&kfs)) + { + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + read_lock(¤t->files->file_lock); + } + #endif + + dazuko_add_hash(current->files->fd[fd], kfs.full_filename, kfs.full_filename_length); + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + read_unlock(¤t->files->file_lock); + } + #endif + } + } + } + } + + dazuko_file_struct_cleanup(&kfs); + + return fd; +} +#endif + +#if defined(ON_CLOSE_SUPPORT) || defined(ON_CLOSE_MODIFIED_SUPPORT) +asmlinkage long dazuko_sys_close(unsigned int fd) +{ + /* The kernel wants to close the given file + * descriptor. */ + + int error; + struct hash_t *h = NULL; + struct file *file = NULL; + + /* do not scan if it is our scan daemon + * closing the file */ + if (dazuko_is_our_daemon()) + { + return original_sys_close(fd); + } + + /* If it is a valid file descriptor, see if it is + * in our list of files to scan on close. If it is, + * it will be removed from the list also. */ + + if ((atomic_read(&active) != 0) && (SCAN_ON_CLOSE || SCAN_ON_CLOSE_MODIFIED) && fd > 0 && fd < NR_OPEN) + { + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + read_lock(¤t->files->file_lock); + } + #endif + + /* grab the file* for possible later use */ + file = current->files->fd[fd]; + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + read_unlock(¤t->files->file_lock); + } + #endif + } + + /* call the standard close function */ + error = original_sys_close(fd); + + if (!error && (atomic_read(&active) != 0) && (SCAN_ON_CLOSE || SCAN_ON_CLOSE_MODIFIED) && fd > 0 && fd < NR_OPEN) + { + /* find hash entry and remove it from list */ + h = dazuko_get_hash(file); + + /* if we found the file in our list and the file was + * successfully closed, we need to scan it */ + if (h) + { + /* determine if we are scanning on close and/or close_modified */ + + /* note that modified has priority over just close */ + + if (SCAN_ON_CLOSE_MODIFIED && h->dirty) + dazuko_run_daemon(ON_CLOSE_MODIFIED, h->name, h->namelen, 0, 0); + else if (SCAN_ON_CLOSE) + dazuko_run_daemon(ON_CLOSE, h->name, h->namelen, 0, 0); + + /* clean up the hash_t structure */ + kfree(h); + } + } + + return error; +} +#endif + +#ifdef ON_CLOSE_MODIFIED_SUPPORT +asmlinkage ssize_t dazuko_sys_write(unsigned int fd, char *buf, unsigned int count) +{ + /* The kernel wants to write to the given file + * descriptor. */ + + int num; + struct file *file = NULL; + + /* do not track if it is our scan daemon + * writing the file */ + if (dazuko_is_our_daemon()) + { + return original_sys_write(fd, buf, count); + } + + /* Check if this file is in our list of files to + * be cleaned on close. It will not be removed. + * We only have to do this if we are scanning on + * close.*/ + + if ((atomic_read(&active) != 0) && SCAN_ON_CLOSE_MODIFIED && fd > 0 && fd < NR_OPEN) + { + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + read_lock(¤t->files->file_lock); + } + #endif + + /* Grab a copy of the file* "just in case*. */ + file = current->files->fd[fd]; + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + read_unlock(¤t->files->file_lock); + } + #endif + } + + /* call the standard write function */ + num = original_sys_write(fd, buf, count); + + /* if we actually wrote something and we found the + * file in our list, set it as dirty */ + + if (num > 0 && file) + { + /* Swade 4/24/02: Move to end of clean list */ + dazuko_mark_hash_dirty(file); + } + + return num; +} +#endif + +#ifdef HIDDEN_SCT +static void** dazuko_get_sct() +{ + unsigned long ptr; + extern int loops_per_jiffy; + unsigned long *p; + + for (ptr=(unsigned long)&loops_per_jiffy ; ptr<(unsigned long)&boot_cpu_data ; ptr+=sizeof(void *)) + { + p = (unsigned long *)ptr; + if (p[6] == (unsigned long)sys_close) + { + return (void **)p; + } + } + + return NULL; +} +#endif + +int __init dazuko_init(void) +{ + /* Called insmod when inserting the module. */ + + int i; + +#ifdef HIDDEN_SCT + sys_call_table = dazuko_get_sct(); + if (sys_call_table == NULL) + { + printk("dazuko: panic (sys_call_table == NULL)\n"); + return -1; + } +#endif + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + rwlock_init(&lock_hash); + rwlock_init(&lock_lists); + } + #else + { + lock_hash = RW_LOCK_UNLOCKED; + lock_lists = RW_LOCK_UNLOCKED; + } + #endif + + dazuko_bzero(&slot_lists, sizeof(slot_lists)); + for (i=0 ; ifs == NULL) + { + printk("dazuko: panic (current->fs == NULL)\n"); + return -1; + } + if (current->fs->root == NULL) + { + printk("dazuko: panic (current->root == NULL)\n"); + return -1; + } + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + if (current->fs->rootmnt == NULL) + { + printk("dazuko: panic (current->rootmnt == NULL)\n"); + return -1; + } + } + #endif + + /* register the dazuko device */ +#ifdef CONFIG_DEVFS_FS + dev_major = devfs_register_chrdev(0, DEVICE_NAME, &fops); + devfs_register(NULL, DEVICE_NAME, DEVFS_FL_DEFAULT, + dev_major, 0, S_IFCHR | S_IRUSR | S_IWUSR, + &fops, NULL); +#else + dev_major = register_chrdev(0, DEVICE_NAME, &fops); +#endif + if (dev_major < 0) + { + printk("dazuko: unable to register device chrdev, err=%d\n", dev_major); + return dev_major; + } + + /* Grab the current root. This is assumed to be the real. + * If it is not the real root, we could have problems + * looking up filenames. */ + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + { + read_lock(¤t->fs->lock); + orig_rootmnt = current->fs->rootmnt; + } + #endif + + orig_root = current->fs->root; + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + read_unlock(¤t->fs->lock); + #endif + + /* do a file syncronization on all devices (IMPORTANT!) and replace system calls */ + #ifdef __SMP__ + lock_kernel(); + #endif + + fsync_dev(0); + +#if defined(ON_OPEN_SUPPORT) || defined(ON_CLOSE_SUPPORT) || defined(ON_CLOSE_MODIFIED_SUPPORT) + /* replace the system call entries with our entries */ + DPRINTK(("dazuko: hooked sys_open\n")); + original_sys_open = sys_call_table[__NR_open]; + sys_call_table[__NR_open] = dazuko_sys_open; +#endif + +#if defined(ON_CLOSE_SUPPORT) || defined(ON_CLOSE_MODIFIED_SUPPORT) + DPRINTK(("dazuko: hooked sys_close\n")); + original_sys_close = sys_call_table[__NR_close]; + sys_call_table[__NR_close] = dazuko_sys_close; +#endif + +#ifdef ON_CLOSE_MODIFIED_SUPPORT + DPRINTK(("dazuko: hooked sys_write\n")); + original_sys_write = sys_call_table[__NR_write]; + sys_call_table[__NR_write] = dazuko_sys_write; +#endif + +#ifdef ON_EXEC_SUPPORT + DPRINTK(("dazuko: hooked sys_execve\n")); + original_sys_execve = sys_call_table[__NR_execve]; + sys_call_table[__NR_execve] = dazuko_sys_execve; +#endif + + #ifdef __SMP__ + unlock_kernel(); + #endif + /* done syncing and replacing */ + + /* initialization complete */ + + printk("dazuko: loaded, version=%s, dev_major=%d\n", VERSION, dev_major); + + return 0; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) +void __exit dazuko_exit(void) +#else +void dazuko_exit(void) +#endif +{ + /* Called by rmmod when removing the module. */ + + int error; + int i; + + dazuko_remove_all_paths(); + dazuko_remove_all_hash(); + + /* do a file syncronization on all devices (IMPORTANT!) and replace system calls */ + #ifdef __SMP__ + lock_kernel(); + #endif + + fsync_dev(0); + + /* check if it is still our entries in the sytem call table */ +#if defined(ON_OPEN_SUPPORT) || defined(ON_CLOSE_SUPPORT) || defined(ON_CLOSE_MODIFIED_SUPPORT) + if (sys_call_table[__NR_open] != dazuko_sys_open) + printk("dazuko: open system call not correct (system may be left in an unstable state!)\n"); +#endif +#if defined(ON_CLOSE_SUPPORT) || defined(ON_CLOSE_MODIFIED_SUPPORT) + if (sys_call_table[__NR_close] != dazuko_sys_close) + printk("dazuko: close system call not correct (system may be left in an unstable state!)\n"); +#endif +#ifdef ON_CLOSE_MODIFIED_SUPPORT + if (sys_call_table[__NR_write] != dazuko_sys_write) + printk("dazuko: write system call not correct (system may be left in an unstable state!)\n"); +#endif +#ifdef ON_EXEC_SUPPORT + if (sys_call_table[__NR_execve] != dazuko_sys_execve) + printk("dazuko: execve system call not correct (system may be left in an unstable state!)\n"); +#endif + + /* return original system calls (we HOPE no one has played with the table) */ + +#if defined(ON_OPEN_SUPPORT) || defined(ON_CLOSE_SUPPORT) || defined(ON_CLOSE_MODIFIED_SUPPORT) + sys_call_table[__NR_open] = original_sys_open; +#endif +#if defined(ON_CLOSE_SUPPORT) || defined(ON_CLOSE_MODIFIED_SUPPORT) + sys_call_table[__NR_close] = original_sys_close; +#endif +#ifdef ON_CLOSE_MODIFIED_SUPPORT + sys_call_table[__NR_write] = original_sys_write; +#endif +#ifdef ON_EXEC_SUPPORT + sys_call_table[__NR_execve] = original_sys_execve; +#endif + + #ifdef __SMP__ + unlock_kernel(); + #endif + /* done syncing and replacing */ + +#ifdef CONFIG_DEVFS_FS + error = devfs_unregister_chrdev(dev_major, DEVICE_NAME); + devfs_unregister(devfs_find_handle(NULL, DEVICE_NAME, dev_major, 0, DEVFS_SPECIAL_CHR, 0)); +#else + error = unregister_chrdev(dev_major, DEVICE_NAME); +#endif + if (error < 0) + { + printk("dazuko: error unregistering chrdev, err=%d\n", error); + } + + for (i=0 ; iuse_count)) != 0) + printk("dazuko: slot_list count for daemon %d was not 0 (possible bug)\n", current->pid); + + kfree(slot_lists[i].slot_list); + slot_lists[i].slot_list = NULL; + } + } + + DPRINTK(("dazuko: module unloaded\n")); +} + +#ifdef MODULE + +int init_module(void) +{ + return dazuko_init(); +} + +void cleanup_module(void) +{ + dazuko_exit(); +} + +MODULE_AUTHOR("H+BEDV Datentechnik GmbH "); +MODULE_DESCRIPTION("allow 3rd-party file access control"); +#ifdef MODULE_LICENSE +MODULE_LICENSE("GPL"); +#else +static const char __module_license[] __attribute__((section(".modinfo"))) = "license=GPL"; +#endif + +EXPORT_NO_SYMBOLS; + +#else + +module_init(dazuko_init); +module_exit(dazuko_exit); +/* module_init(int dazuko_init(void)); */ +/* module_exit(void dazuko_exit(void)); */ + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/char/keyboard.c linux-2.4.20-wolk4.9-fullkernel/drivers/char/keyboard.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/char/keyboard.c 2003-08-25 18:26:45.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/char/keyboard.c 2003-08-25 23:43:26.000000000 +0200 @@ -94,13 +94,11 @@ static int npadch = -1; /* -1 or numbe static unsigned char diacr; static char rep; /* flag telling character repeat */ struct kbd_struct kbd_table[MAX_NR_CONSOLES]; -struct tty_struct **ttytab; +static struct tty_struct **ttytab; static struct kbd_struct * kbd = kbd_table; static struct tty_struct * tty; static unsigned char prev_scancode; -EXPORT_SYMBOL(ttytab); - void compute_shiftstate(void); typedef void (*k_hand)(unsigned char value, char up_flag); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/ide/ide.c linux-2.4.20-wolk4.9-fullkernel/drivers/ide/ide.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/ide/ide.c 2003-08-25 18:26:32.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/ide/ide.c 2003-08-26 17:59:07.000000000 +0200 @@ -1774,8 +1774,9 @@ int ide_do_drive_cmd (ide_drive_t *drive ide_do_request(hwgroup, 0); spin_unlock_irqrestore(&io_request_lock, flags); if (action == ide_wait) { - wait_for_completion(&wait); /* wait for it to be serviced */ - return rq->errors ? -EIO : 0; /* return -EIO if errors */ + generic_unplug_device(&drive->queue); /* make sure IO is not suspended */ + wait_for_completion(&wait); /* wait for it to be serviced */ + return rq->errors ? -EIO : 0; /* return -EIO if errors */ } return 0; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/md/lvm-fs.c linux-2.4.20-wolk4.9-fullkernel/drivers/md/lvm-fs.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/md/lvm-fs.c 2003-08-25 18:24:42.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/md/lvm-fs.c 2003-08-26 17:59:08.000000000 +0200 @@ -591,6 +591,13 @@ static int _proc_read_global(char *page, buf = NULL; return 0; } + if (buf + pos < page) { + /* Work around stupid hack in proc_file_read. */ + sz = sz - pos >= count ? count : sz - pos; + memcpy(page, &buf[pos], sz); + *start = page; + return sz; + } *start = &buf[pos]; if (sz - pos < count) return sz - pos; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm/b57um.c linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm/b57um.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm/b57um.c 2003-08-25 18:26:45.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm/b57um.c 2003-08-25 20:31:19.000000000 +0200 @@ -12,8 +12,8 @@ char bcm5700_driver[] = "bcm5700"; -char bcm5700_version[] = "6.2.11"; -char bcm5700_date[] = "(05/16/03)"; +char bcm5700_version[] = "6.2.17"; +char bcm5700_date[] = "(07/14/03)"; #define B57UM #include "mm.h" @@ -173,10 +173,21 @@ struct pci_device_id { #endif +#if (LINUX_VERSION_CODE < 0x020411) +#ifndef __devexit_p +#define __devexit_p(x) x +#endif +#endif + #ifndef MODULE_LICENSE #define MODULE_LICENSE(license) #endif +#ifndef IRQ_RETVAL +typedef void irqreturn_t; +#define IRQ_RETVAL(x) +#endif + #if (LINUX_VERSION_CODE < 0x02032a) static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_handle) @@ -339,7 +350,7 @@ STATIC int bcm5700_open(struct net_devic STATIC void bcm5700_timer(unsigned long data); STATIC void bcm5700_reset(struct net_device *dev); STATIC int bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev); -STATIC void bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs); +STATIC irqreturn_t bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs); #ifdef BCM_TASKLET STATIC void bcm5700_tasklet(unsigned long data); #endif @@ -377,8 +388,7 @@ STATIC void poll_bcm5700(struct net_devi static struct net_device *root_tigon3_dev = NULL; typedef enum { - BCM5700VIGIL = 0, - BCM5700A6, + BCM5700A6 = 0, BCM5700T6, BCM5700A9, BCM5700T9, @@ -433,7 +443,6 @@ typedef enum { static struct { char *name; } board_info[] __devinitdata = { - { "Broadcom Vigil B5700 1000Base-T" }, { "Broadcom BCM5700 1000Base-T" }, { "Broadcom BCM5700 1000Base-SX" }, { "Broadcom BCM5700 1000Base-SX" }, @@ -449,7 +458,7 @@ static struct { { "Broadcom BCM5702 1000Base-T" }, { "Broadcom BCM5703 1000Base-T" }, { "Broadcom BCM5703 1000Base-SX" }, - { "Broadcom Arbuckle B5703 1000Base-SX" }, + { "Broadcom B5703 1000Base-SX" }, { "3Com 3C996 10/100/1000 Server NIC" }, { "3Com 3C996 10/100/1000 Server NIC" }, { "3Com 3C996 Gigabit Fiber-SX Server NIC" }, @@ -486,7 +495,6 @@ static struct { }; static struct pci_device_id bcm5700_pci_tbl[] __devinitdata = { - {0x14e4, 0x1644, 0x1014, 0x0277, 0, 0, BCM5700VIGIL }, {0x14e4, 0x1644, 0x14e4, 0x1644, 0, 0, BCM5700A6 }, {0x14e4, 0x1644, 0x14e4, 0x2, 0, 0, BCM5700T6 }, {0x14e4, 0x1644, 0x14e4, 0x3, 0, 0, BCM5700A9 }, @@ -801,14 +809,22 @@ bcm5700_init_one(struct pci_dev *pdev, else printk("Copper "); } - else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5704_PHY_ID) - printk("Broadcom BCM5704 Integrated Copper "); + else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5704_PHY_ID) { + printk("Broadcom BCM5704 Integrated "); + if (pDevice->EnableTbi) + printk("SerDes "); + else + printk("Copper "); + } else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5705_PHY_ID) printk("Broadcom BCM5705 Integrated Copper "); else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM8002_PHY_ID) printk("Broadcom BCM8002 SerDes "); else if (pDevice->EnableTbi) { - if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) { + if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) { + printk("Broadcom BCM5703 Integrated SerDes "); + } + else if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) { printk("Broadcom BCM5704 Integrated SerDes "); } else { @@ -932,8 +948,13 @@ bcm5700_open(struct net_device *dev) #endif #if INCLUDE_TBI_SUPPORT - if(pDevice->PollTbiLink) - pUmDevice->poll_tbi_expiry = HZ / pUmDevice->timer_interval; + if (pDevice->EnableTbi && pDevice->PollTbiLink) { + pUmDevice->poll_tbi_interval = HZ / pUmDevice->timer_interval; + if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) { + pUmDevice->poll_tbi_interval /= 4; + } + pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval; + } #endif pUmDevice->asf_heartbeat = (120 * HZ) / pUmDevice->timer_interval; @@ -1020,7 +1041,7 @@ bcm5700_timer(unsigned long data) } #if INCLUDE_TBI_SUPPORT - if(pDevice->PollTbiLink && (--pUmDevice->poll_tbi_expiry == 0)) { + if(pDevice->PollTbiLink && (--pUmDevice->poll_tbi_expiry <= 0)) { BCM5700_PHY_LOCK(pUmDevice, flags); value32 = REG_RD(pDevice, MacCtrl.Status); if (((pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) && @@ -1035,7 +1056,7 @@ bcm5700_timer(unsigned long data) LM_SetupPhy(pDevice); } BCM5700_PHY_UNLOCK(pUmDevice, flags); - pUmDevice->poll_tbi_expiry = HZ / pUmDevice->timer_interval; + pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval; } #endif @@ -1496,7 +1517,7 @@ bcm5700_poll(struct net_device *dev, int } #endif /* BCM_NAPI_RXPOLL */ -STATIC void +STATIC irqreturn_t bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs) { struct net_device *dev = (struct net_device *)dev_instance; @@ -1507,13 +1528,17 @@ bcm5700_interrupt(int irq, void *dev_ins #ifdef BCM_TASKLET int repl_buf_count; #endif + unsigned int handled = 1; - if (!pDevice->InitDone) - return; + if (!pDevice->InitDone) { + handled = 0; + return IRQ_RETVAL(handled); + } if (atomic_read(&pUmDevice->intr_sem)) { MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1); - return; + handled = 0; + return IRQ_RETVAL(handled); } bcm5700_intr_lock(pUmDevice); @@ -1522,7 +1547,8 @@ bcm5700_interrupt(int irq, void *dev_ins "processor %d.\n", dev->name, hard_smp_processor_id()); bcm5700_intr_unlock(pUmDevice); - return; + handled = 0; + return IRQ_RETVAL(handled); } if ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) || @@ -1593,6 +1619,9 @@ bcm5700_interrupt(int irq, void *dev_ins } } } + else { + handled = 0; + } #ifdef BCM_TASKLET repl_buf_count = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container); if (((repl_buf_count > pUmDevice->rx_buf_repl_panic_thresh) || @@ -1635,7 +1664,7 @@ bcm5700_interrupt(int irq, void *dev_ins pUmDevice->tx_queued = 0; netif_wake_queue(dev); } - return; + return IRQ_RETVAL(handled); } @@ -1839,7 +1868,8 @@ static int netdev_ethtool_ioctl(struct n BCM_EEDUMP_LEN(&info, SEEPROM_CHIP_SIZE); } else { - BCM_EEDUMP_LEN(&info, NVRAM_ADDRESS_MASK + 1); + /* Limit to 128K - no NIC has more than that */ + BCM_EEDUMP_LEN(&info, 0x20000); } #endif if (copy_to_user(useraddr, &info, sizeof(info))) @@ -2115,6 +2145,11 @@ static int netdev_ethtool_ioctl(struct n if (eeprom.offset >= 0x20000) return -EFAULT; + /* maximum data limited */ + /* to read more, call again with a different offset */ + if (eeprom.len > 0x400) + eeprom.len = 0x400; + if (eeprom.len > 64) { buf = kmalloc(eeprom.len, GFP_KERNEL); if (!buf) @@ -2159,13 +2194,13 @@ static int netdev_ethtool_ioctl(struct n if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) return -EFAULT; - if ((eeprom.offset & 3) || (eeprom.offset >= 0x10000)) + if ((eeprom.offset & 3) || (eeprom.offset >= 0x20000)) return -EFAULT; - if (((eeprom.offset + eeprom.len) >= 0x10000) || + if (((eeprom.offset + eeprom.len) >= 0x20000) || (eeprom.len & 3)) { - eeprom.len = 0x10000 - eeprom.offset; + eeprom.len = 0x20000 - eeprom.offset; } useraddr += offsetof(struct ethtool_eeprom, data); @@ -2824,7 +2859,7 @@ static struct pci_driver bcm5700_pci_dri name: bcm5700_driver, id_table: bcm5700_pci_tbl, probe: bcm5700_init_one, - remove: bcm5700_remove_one, + remove: __devexit_p(bcm5700_remove_one), suspend: bcm5700_suspend, resume: bcm5700_resume, }; @@ -3135,6 +3170,10 @@ MM_GetConfig(PLM_DEVICE_BLOCK pDevice) if (T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5700) { pDevice->UseTaggedStatus = TRUE; pUmDevice->timer_interval = HZ; + if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) && + pDevice->EnableTbi) { + pUmDevice->timer_interval = HZ/4; + } } else { pUmDevice->timer_interval = HZ/10; @@ -3255,11 +3294,12 @@ MM_GetConfig(PLM_DEVICE_BLOCK pDevice) #endif #if INCLUDE_TBI_SUPPORT pDevice->PollTbiLink = TRUE; - if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) + if (((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) || + (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703)) && pDevice->EnableTbi) { - /* just poll since we have hardware autoneg. */ - pDevice->IgnoreTbiLinkChange = TRUE; + /* just poll since we have hardware autoneg. in 5704 */ + pDevice->NoTbiInterrupt = TRUE; } #endif bcm5700_validate_param_range(dev, &scatter_gather[index], diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm/mm.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm/mm.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm/mm.h 2003-08-25 18:24:43.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm/mm.h 2003-08-25 20:31:19.000000000 +0200 @@ -295,6 +295,7 @@ typedef struct _UM_DEVICE_BLOCK { int timer_interval; int adaptive_expiry; int crc_counter_expiry; + int poll_tbi_interval; int poll_tbi_expiry; int asf_heartbeat; int tx_full; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm/tigon3.c linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm/tigon3.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm/tigon3.c 2003-08-25 18:26:45.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm/tigon3.c 2003-08-25 20:31:19.000000000 +0200 @@ -1256,6 +1256,7 @@ PLM_DEVICE_BLOCK pDevice) #if INCLUDE_TBI_SUPPORT pDevice->PollTbiLink = BAD_DEFAULT_VALUE; pDevice->IgnoreTbiLinkChange = FALSE; + pDevice->NoTbiInterrupt = FALSE; #endif #if INCLUDE_TCP_SEG_SUPPORT pDevice->LargeSendMaxSize = T3_TCP_SEG_MAX_OFFLOAD_SIZE; @@ -1650,6 +1651,13 @@ PLM_DEVICE_BLOCK pDevice) { pDevice->PollTbiLink = FALSE; } + if (pDevice->PollTbiLink == TRUE) + { + if (pDevice->NoTbiInterrupt == TRUE) /* Always poll */ + { + pDevice->IgnoreTbiLinkChange = TRUE; + } + } } else { @@ -2509,7 +2517,7 @@ restart_reset: (pDevice->SubsystemId << 16) | pDevice->SubsystemVendorId); /* Clear the statistics block. */ - for(j = 0x0300; j < 0x0b00; j++) + for(j = 0x0300; j < 0x0b00; j = j + 4) { MEM_WR_OFFSET(pDevice, j, 0); } @@ -4971,6 +4979,7 @@ LM_SetupFiberPhy( LM_UINT32 Cnt; LM_UINT32 j, k; LM_UINT32 MacStatus, RemotePhyAd, LocalPhyAd; + LM_FLOW_CONTROL PreviousFlowControl = pDevice->FlowControl; if (pDevice->MacLoopBack) { @@ -4979,6 +4988,20 @@ LM_SetupFiberPhy( return LM_STATUS_SUCCESS; } + if ((T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5704) && + (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) && pDevice->InitDone) + { + MacStatus = REG_RD(pDevice, MacCtrl.Status); + if ((MacStatus & (MAC_STATUS_PCS_SYNCED | MAC_STATUS_SIGNAL_DETECTED | + MAC_STATUS_CFG_CHANGED | MAC_STATUS_RECEIVING_CFG)) + == (MAC_STATUS_PCS_SYNCED | MAC_STATUS_SIGNAL_DETECTED)) + { + + REG_WR(pDevice, MacCtrl.Status, MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED); + return LM_STATUS_SUCCESS; + } + } pDevice->MacMode &= ~(MAC_MODE_HALF_DUPLEX | MAC_MODE_PORT_MODE_MASK); /* Initialize the send_config register. */ @@ -5285,7 +5308,7 @@ LM_SetupFiberPhy( { pDevice->IgnoreTbiLinkChange = TRUE; } - else + else if (pDevice->NoTbiInterrupt == FALSE) { pDevice->IgnoreTbiLinkChange = FALSE; } @@ -5356,7 +5379,10 @@ LM_SetupFiberPhy( } /* Indicate link status. */ - if (pDevice->LinkStatus != CurrentLinkStatus) { + if ((pDevice->LinkStatus != CurrentLinkStatus) || + ((CurrentLinkStatus == LM_STATUS_LINK_ACTIVE) && + (PreviousFlowControl != pDevice->FlowControl))) + { pDevice->LinkStatus = CurrentLinkStatus; MM_IndicateStatus(pDevice, CurrentLinkStatus); } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm/tigon3.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm/tigon3.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm/tigon3.h 2003-08-25 18:26:45.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm/tigon3.h 2003-08-25 20:31:19.000000000 +0200 @@ -3219,6 +3219,7 @@ typedef struct _LM_DEVICE_BLOCK { /* Autoneg state info. */ AN_STATE_INFO AnInfo; LM_UINT32 PollTbiLink; + LM_UINT32 NoTbiInterrupt; LM_UINT32 IgnoreTbiLinkChange; #endif #ifdef BCM_NAPI_RXPOLL diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44.h 2003-08-25 18:24:43.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44.h 2003-08-25 20:31:22.000000000 +0200 @@ -273,6 +273,10 @@ typedef volatile struct { #define BCMENET_BACK_DOOR_ADDR 0xa0 #define BCMENET_BACK_DOOR_DATA 0xa4 +#define BCMENET_PMC 0x42 +#define BCMENET_PMCSR 0x44 +#define ENABLE_PCICONFIG_PME 0x8100 + /* cpp contortions to concatenate w/arg prescan */ #ifndef PAD #define _PADLINE(line) pad ## line @@ -602,6 +606,8 @@ typedef volatile struct _bcmenettregs { LM_UINT32 PAD[2]; LM_UINT32 biststatus; LM_UINT32 wakeuplength; +#define DISABLE_32_PATMATCH 0x80800000 +#define DISABLE_3210_PATMATCH 0x80808080 LM_UINT32 PAD[3]; /* Interrupt Control */ @@ -923,6 +929,13 @@ typedef struct _LM_PACKET { } u; } LM_PACKET; +#ifdef BCM_WOL + +#define BCMENET_PMPSIZE 0x80 +#define BCMENET_PMMSIZE 0x10 + +#endif + typedef struct _LM_DEVICE_BLOCK { /* Memory view. */ @@ -938,13 +951,12 @@ typedef struct _LM_DEVICE_BLOCK LM_RX_PACKET_Q RxPacketFreeQ; LM_RX_PACKET_Q RxPacketReceivedQ; LM_TX_PACKET_Q TxPacketFreeQ; - LM_TX_PACKET_Q TxPacketActiveQ; LM_TX_PACKET_Q TxPacketXmittedQ; LM_PACKET *RxPacketArr[DMAMAXRINGSZ / sizeof(dmadd_t)]; LM_PACKET *TxPacketArr[DMAMAXRINGSZ / sizeof(dmadd_t)]; - atomic_t SendDescLeft; + MM_ATOMIC_T SendDescLeft; /* Current node address. */ LM_UINT8 NodeAddress[6]; @@ -1070,8 +1082,9 @@ typedef struct _LM_DEVICE_BLOCK LM_COUNTER rx_pause_pkts; LM_COUNTER rx_nonpause_pkts; - char PartNo[32]; - +#ifdef BCM_WOL + LM_WAKE_UP_MODE WakeUpMode; +#endif } LM_DEVICE_BLOCK; /******************************************************************************/ @@ -1079,16 +1092,16 @@ typedef struct _LM_DEVICE_BLOCK /******************************************************************************/ #define REG_RD(pDevice, OffsetName) \ - __raw_readl(&((pDevice)->pMemView->OffsetName)) + MM_MEMREADL(&((pDevice)->pMemView->OffsetName)) #define REG_WR(pDevice, OffsetName, Value32) \ - (void) __raw_writel(Value32, &((pDevice)->pMemView->OffsetName)) + (void) MM_MEMWRITEL(&((pDevice)->pMemView->OffsetName), Value32) #define REG_RD_OFFSET(pDevice, Offset) \ - __raw_readl(((LM_UINT8 *) (pDevice)->pMemView + Offset)) + MM_MEMREADL(((LM_UINT8 *) (pDevice)->pMemView + Offset)) #define REG_WR_OFFSET(pDevice, Offset, Value32) \ - __raw_writel(Value32, ((LM_UINT8 *) (pDevice)->pMemView + Offset)) + MM_MEMWRITEL(((LM_UINT8 *) (pDevice)->pMemView + Offset), Value32) #define REG_OR(pDevice, OffsetName, Value32) \ REG_WR(pDevice, OffsetName, REG_RD(pDevice, OffsetName) | Value32) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44lm.c linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44lm.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44lm.c 2003-08-25 18:24:43.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44lm.c 2003-08-25 20:31:22.000000000 +0200 @@ -40,6 +40,10 @@ void b44_LM_sb_core_reset(LM_DEVICE_BLOC LM_UINT32 b44_LM_sb_coreid(LM_DEVICE_BLOCK *pDevice); LM_UINT32 b44_LM_sb_corerev(LM_DEVICE_BLOCK *pDevice); LM_UINT32 b44_LM_sb_iscoreup(LM_DEVICE_BLOCK *pDevice); +#ifdef BCM_WOL +static void b44_LM_ftwrite(LM_DEVICE_BLOCK *pDevice, LM_UINT32 *b, + LM_UINT32 nbytes, LM_UINT32 ftaddr); +#endif #define BCM4710_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */ #define BCM4710_ENUM 0x18000000 /* Beginning of core enumeration space */ @@ -52,6 +56,10 @@ struct sbmap bcm4402[] = { {SBID_REG_PCI, 0, 0x18002000} }; +#ifdef B44_DEBUG +int b44_reset_count = 0; +#endif + /******************************************************************************/ /* External functions. */ /******************************************************************************/ @@ -101,7 +109,7 @@ b44_LM_QueueRxPackets(PLM_DEVICE_BLOCK p } /* while */ pDevice->rxout = rxout; - wmb(); + MM_WMB(); REG_WR(pDevice, dmaregs.rcvptr, rxout * sizeof(dmadd_t)); return LM_STATUS_SUCCESS; @@ -115,20 +123,20 @@ b44_LM_QueueRxPackets(PLM_DEVICE_BLOCK p /******************************************************************************/ STATIC LM_STATUS b44_LM_EepromReadBlock(PLM_DEVICE_BLOCK pDevice, - LM_UINT32 offset, LM_UINT16 *pData, LM_UINT32 size) + LM_UINT32 offset, LM_UINT32 *pData, LM_UINT32 size) { int off, nw; // LM_UINT8 chk8; int i; - LM_UINT16 *buf; + LM_UINT32 *buf; off = offset; - nw = ROUNDUP(size, 2); - buf = (LM_UINT16 *) pData; + nw = ROUNDUP(size, 4); + buf = (LM_UINT32 *) pData; /* read the sprom */ - for (i = 0; i < nw; i += 2) - buf[i/2] = REG_RD_OFFSET(pDevice, 4096 + off + i); + for (i = 0; i < nw; i += 4) + buf[i/4] = REG_RD_OFFSET(pDevice, 4096 + off + i); return LM_STATUS_SUCCESS; } /* b44_LM_EepromRead */ @@ -146,7 +154,8 @@ LM_STATUS b44_LM_GetAdapterInfo( PLM_DEVICE_BLOCK pDevice) { - LM_UINT8 eprom[128]; + LM_UINT32 eprom_dw[32]; + LM_UINT8 *eprom = (LM_UINT8 *) eprom_dw; LM_STATUS Status; LM_UINT32 Value32; @@ -185,7 +194,7 @@ PLM_DEVICE_BLOCK pDevice) /* Initialize the memory view pointer. */ pDevice->pMemView = (bcmenetregs_t *) pDevice->pMappedMemBase; - b44_LM_EepromReadBlock(pDevice, 0, (LM_UINT16 *) eprom, sizeof(eprom)); + b44_LM_EepromReadBlock(pDevice, 0, eprom_dw, sizeof(eprom_dw)); if (eprom[126] != 1) return LM_STATUS_FAILURE; @@ -213,12 +222,23 @@ PLM_DEVICE_BLOCK pDevice) pDevice->intmask = DEF_INTMASK; pDevice->LinkStatus = LM_STATUS_LINK_DOWN; +#ifdef BCM_WOL + pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE; +#endif + /* Change driver parameters. */ Status = b44_MM_GetConfig(pDevice); if(Status != LM_STATUS_SUCCESS) { return Status; } + +#if 0 + /* Calling SetupPhy will cause target aborts if the chip has not */ + /* been reset */ + b44_LM_SetupPhy(pDevice); +#endif + return LM_STATUS_SUCCESS; } /* LM_GetAdapterInfo */ @@ -247,7 +267,6 @@ b44_LM_InitializeAdapter(PLM_DEVICE_BLOC MAX_RX_PACKET_DESC_COUNT); QQ_InitQueue(&pDevice->TxPacketFreeQ.Container,MAX_TX_PACKET_DESC_COUNT); - QQ_InitQueue(&pDevice->TxPacketActiveQ.Container,MAX_TX_PACKET_DESC_COUNT); QQ_InitQueue(&pDevice->TxPacketXmittedQ.Container,MAX_TX_PACKET_DESC_COUNT); /* Allocate memory for packet descriptors. */ @@ -310,7 +329,7 @@ b44_LM_InitializeAdapter(PLM_DEVICE_BLOC MemPhy += pDevice->MaxRxPacketDescCnt * sizeof(dmadd_t); /* Initialize the hardware. */ - Status = b44_LM_ResetAdapter(pDevice); + Status = b44_LM_ResetAdapter(pDevice, TRUE); if(Status != LM_STATUS_SUCCESS) { return Status; @@ -326,6 +345,15 @@ b44_LM_InitializeAdapter(PLM_DEVICE_BLOC LM_STATUS b44_LM_DisableChip(PLM_DEVICE_BLOCK pDevice) { + + /* disable emac */ + REG_WR(pDevice, enetcontrol, EC_ED); + SPINWAIT((REG_RD(pDevice, enetcontrol) & EC_ED), 200); + + REG_WR(pDevice, dmaregs.xmtcontrol, 0); + REG_WR(pDevice, dmaregs.rcvcontrol, 0); + b44_MM_Wait(10); + return LM_STATUS_SUCCESS; } @@ -337,19 +365,18 @@ b44_LM_DisableChip(PLM_DEVICE_BLOCK pDev /* LM_STATUS_SUCCESS */ /******************************************************************************/ LM_STATUS -b44_LM_ResetAdapter( -PLM_DEVICE_BLOCK pDevice) +b44_LM_ResetAdapter(PLM_DEVICE_BLOCK pDevice, LM_BOOL full) { /* Disable interrupt. */ - b44_LM_DisableInterrupt(pDevice); - - /* Disable transmit and receive DMA engines. Abort all pending requests. */ - if(pDevice->InitDone) + if (pDevice->InitDone) { - b44_LM_Abort(pDevice); + b44_LM_DisableInterrupt(pDevice); } + /* Disable transmit and receive DMA engines. Abort all pending requests. */ + b44_LM_Abort(pDevice); + pDevice->ShuttingDown = FALSE; ASSERT(b44_LM_sb_coreid(pDevice) == SB_ENET); @@ -402,27 +429,37 @@ PLM_DEVICE_BLOCK pDevice) /* set tx watermark */ REG_WR(pDevice, txwatermark, 56); - /* initialize the tx and rx dma channels */ - /* clear tx descriptor ring */ - memset((void*)pDevice->pTxDesc, 0, (pDevice->TxPacketDescCnt * - sizeof(dmadd_t))); - - REG_WR(pDevice, dmaregs.xmtcontrol, XC_XE); - REG_WR(pDevice, dmaregs.xmtaddr, (pDevice->TxDescPhy + - pDevice->ddoffset)); - - /* clear rx descriptor ring */ - memset((void*)pDevice->pRxDesc, 0, (pDevice->MaxRxPacketDescCnt * - sizeof(dmadd_t))); + if (full) + { + /* initialize the tx and rx dma channels */ + /* clear tx descriptor ring */ + memset((void*)pDevice->pTxDesc, 0, (pDevice->TxPacketDescCnt * + sizeof(dmadd_t))); + + REG_WR(pDevice, dmaregs.xmtcontrol, XC_XE); + REG_WR(pDevice, dmaregs.xmtaddr, (pDevice->TxDescPhy + + pDevice->ddoffset)); + + /* clear rx descriptor ring */ + memset((void*)pDevice->pRxDesc, 0, (pDevice->MaxRxPacketDescCnt * + sizeof(dmadd_t))); - REG_WR(pDevice, dmaregs.rcvcontrol, ((pDevice->rxoffset << - RC_RO_SHIFT) | RC_RE)); + REG_WR(pDevice, dmaregs.rcvcontrol, ((pDevice->rxoffset << + RC_RO_SHIFT) | RC_RE)); - REG_WR(pDevice, dmaregs.rcvaddr, (pDevice->RxDescPhy + - pDevice->ddoffset)); + REG_WR(pDevice, dmaregs.rcvaddr, (pDevice->RxDescPhy + + pDevice->ddoffset)); - /* Queue Rx packet buffers. */ - b44_LM_QueueRxPackets(pDevice); + /* Queue Rx packet buffers. */ + b44_LM_QueueRxPackets(pDevice); + + MM_ATOMIC_SET(&pDevice->SendDescLeft, pDevice->TxPacketDescCnt - 1); + } + else + { + REG_WR(pDevice, dmaregs.rcvcontrol, ((pDevice->rxoffset << + RC_RO_SHIFT) | RC_RE)); + } /* turn on the emac */ REG_OR(pDevice, enetcontrol, EC_EE); @@ -513,14 +550,13 @@ b44_LM_SendPacket(PLM_DEVICE_BLOCK pDevi } - atomic_sub(pPacket->u.Tx.FragCount, &pDevice->SendDescLeft); + MM_ATOMIC_SUB(&pDevice->SendDescLeft, pPacket->u.Tx.FragCount); pDevice->txout = txout; - wmb(); + MM_WMB(); REG_WR(pDevice, dmaregs.xmtptr, (txout * sizeof(dmadd_t))); - REG_WR(pDevice, dmaregs.xmtptr, (txout * sizeof(dmadd_t))); return LM_STATUS_SUCCESS; } @@ -613,25 +649,25 @@ b44_LM_Abort( PLM_DEVICE_BLOCK pDevice) { PLM_PACKET pPacket; - LM_UINT32 rxin; + LM_UINT32 rxin, txin, txdmask; - b44_LM_DisableInterrupt(pDevice); - - b44_LM_DisableChip(pDevice); - - /* Abort packets that have already queued to go out. */ - pPacket = (PLM_PACKET) QQ_PopHead(&pDevice->TxPacketActiveQ.Container); - while(pPacket) + if (!pDevice->InitDone) { + return LM_STATUS_SUCCESS; + } - pPacket->PacketStatus = LM_STATUS_TRANSMIT_ABORTED; - - atomic_add(pPacket->u.Tx.FragCount, &pDevice->SendDescLeft); + b44_LM_DisableInterrupt(pDevice); - QQ_PushTail(&pDevice->TxPacketXmittedQ.Container, pPacket); + b44_LM_DisableChip(pDevice); - pPacket = (PLM_PACKET) - QQ_PopHead(&pDevice->TxPacketActiveQ.Container); + txdmask = pDevice->TxPacketDescCnt - 1; + for (txin = pDevice->txin; txin != pDevice->txout; + txin = (txin + 1) & txdmask) + { + if ((pPacket = pDevice->TxPacketArr[txin])) { + QQ_PushTail(&pDevice->TxPacketXmittedQ.Container, pPacket); + pDevice->TxPacketArr[txin] = 0; + } } if(!pDevice->ShuttingDown) @@ -724,6 +760,7 @@ b44_LM_ResetChip(LM_DEVICE_BLOCK *pDevic ((pDevice->coreunit == 0)? SBIV_ENET0: SBIV_ENET1)); /* power on reset: reset the enet core */ b44_LM_sb_core_reset(pDevice); + goto chipinreset; } @@ -735,12 +772,11 @@ b44_LM_ResetChip(LM_DEVICE_BLOCK *pDevic /* disable emac */ REG_WR(pDevice, enetcontrol, EC_ED); - SPINWAIT((REG_RD(pDevice, enetcontrol) & EC_ED), 100); + SPINWAIT((REG_RD(pDevice, enetcontrol) & EC_ED), 200); /* reset the dma engines */ REG_WR(pDevice, dmaregs.xmtcontrol, 0); pDevice->txin = pDevice->txout = 0; - atomic_set(&pDevice->SendDescLeft, pDevice->TxPacketDescCnt - 1); if (REG_RD(pDevice, dmaregs.rcvstatus) & RS_RE_MASK) { /* wait until channel is idle or stopped */ @@ -751,6 +787,9 @@ b44_LM_ResetChip(LM_DEVICE_BLOCK *pDevic REG_WR(pDevice, dmaregs.rcvcontrol, 0); pDevice->rxin = pDevice->rxout = 0; + REG_WR(pDevice, enetcontrol, EC_ES); + SPINWAIT((REG_RD(pDevice, enetcontrol) & EC_ES), 200); + b44_LM_sb_core_reset(pDevice); chipinreset: @@ -857,7 +896,7 @@ b44_LM_ServiceRxInterrupt(LM_DEVICE_BLOC goto rx_err; } rxh = (bcmenetrxh_t *) pPacket->u.Rx.pRxBufferVirt; - len = cpu_to_le16(rxh->len); + len = MM_SWAP_LE16(rxh->len); if (len > (pPacket->u.Rx.RxBufferSize - pDevice->rxoffset)) { pPacket->PacketStatus = LM_STATUS_FAILURE; skiplen = len - (pPacket->u.Rx.RxBufferSize - @@ -869,7 +908,7 @@ b44_LM_ServiceRxInterrupt(LM_DEVICE_BLOC if (len == 0) { while ((len == 0) && (i < 5)) { b44_MM_Wait(2); - len = cpu_to_le16(rxh->len); + len = MM_SWAP_LE16(rxh->len); i++; } if (len == 0) { @@ -878,7 +917,7 @@ b44_LM_ServiceRxInterrupt(LM_DEVICE_BLOC goto rx_err; } } - if (cpu_to_le16(rxh->flags) & RXF_ERRORS) { + if (MM_SWAP_LE16(rxh->flags) & RXF_ERRORS) { pPacket->PacketStatus = LM_STATUS_FAILURE; } else { @@ -910,8 +949,8 @@ b44_LM_ServiceTxInterrupt(LM_DEVICE_BLOC QQ_PushTail(&pDevice->TxPacketXmittedQ.Container, pPacket); pDevice->TxPacketArr[txin] = 0; - atomic_add(pPacket->u.Tx.FragCount, - &pDevice->SendDescLeft); + MM_ATOMIC_ADD(&pDevice->SendDescLeft, + pPacket->u.Tx.FragCount); } } pDevice->txin = curr; @@ -952,7 +991,11 @@ b44_LM_ServiceInterrupts(PLM_DEVICE_BLOC REG_WR(pDevice, gptimer, 0); } if (intstatus & I_ERRORS) { - b44_LM_ResetAdapter(pDevice); +#ifdef B44_DEBUG + b44_reset_count++; +#endif + b44_LM_ResetAdapter(pDevice, TRUE); + b44_LM_EnableInterrupt(pDevice); } if (!QQ_Empty(&pDevice->RxPacketReceivedQ.Container)) { b44_MM_IndicateRxPackets(pDevice); @@ -1277,7 +1320,7 @@ b44_LM_ResetPhy(LM_DEVICE_BLOCK *pDevice b44_MM_Wait(100); b44_LM_ReadPhy(pDevice, 0, &value32); if (value32 & PHY_CTRL_PHY_RESET) { - printf(KERN_ALERT "Phy reset not complete\n"); + printf("Phy reset not complete\n"); } return LM_STATUS_SUCCESS; } @@ -1460,6 +1503,8 @@ b44_LM_PollLink(LM_DEVICE_BLOCK *pDevice LM_UINT32 LocalAdv, RemoteAdv; b44_LM_ReadPhy(pDevice, 1, &status); + b44_LM_ReadPhy(pDevice, 1, &status); + b44_LM_ReadPhy(pDevice, 24, &aux); /* check for bad mdio read */ @@ -1763,3 +1808,113 @@ b44_LM_sb_iscoreup(LM_DEVICE_BLOCK *pDev (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK); } +#ifdef BCM_WOL + +/* Program patterns on the chip */ +static void +b44_LM_pmprog(LM_DEVICE_BLOCK *pDevice) +{ + LM_UINT32 wfl; + int plen0, plen1, max, i, j; + LM_UINT8 wol_pattern[BCMENET_PMPSIZE]; + LM_UINT8 wol_mask[BCMENET_PMMSIZE]; + + /* program the chip with wakeup patterns, masks, and lengths */ + + if (pDevice->WakeUpMode == LM_WAKE_UP_MODE_NONE) { + wfl = DISABLE_3210_PATMATCH; + REG_WR(pDevice, wakeuplength, wfl); + } + else if (pDevice->WakeUpMode == LM_WAKE_UP_MODE_MAGIC_PACKET) { + /* UDP magic packet pattern */ + memset(wol_pattern, 0, BCMENET_PMPSIZE); + memset(wol_pattern + 42, 0xff, 6); /* sync pattern */ + max = ETHERNET_ADDRESS_SIZE; + for (i = 0; i < 14; ++i) { + if (i == 13) + max = 2; + for (j = 0; j < max; ++j) { + wol_pattern[42 + 6 + + (i * ETHERNET_ADDRESS_SIZE) + j] = + pDevice->NodeAddress[j]; + } + } + memset(wol_mask, 0, BCMENET_PMMSIZE); + wol_mask[5] = 0xfc; + memset(wol_mask + 6, 0xff, 10); + plen0 = BCMENET_PMPSIZE - 1; + + b44_LM_ftwrite(pDevice, (LM_UINT32 *)wol_pattern, + BCMENET_PMPSIZE, BCMENET_PMPBASE); + + b44_LM_ftwrite(pDevice, (LM_UINT32 *)wol_mask, BCMENET_PMMSIZE, + BCMENET_PMMBASE); + + /* raw ethernet II magic packet pattern */ + memset(wol_pattern, 0, BCMENET_PMPSIZE); + memset(wol_pattern + 14, 0xff, 6); /* sync pattern */ + max = ETHERNET_ADDRESS_SIZE; + for (i = 0; i < 16; ++i) { + for (j = 0; j < max; ++j) { + wol_pattern[14 + 6 + + (i * ETHERNET_ADDRESS_SIZE) + j] = + pDevice->NodeAddress[j]; + } + } + memset(wol_mask, 0, BCMENET_PMMSIZE); + wol_mask[2] = 0xf0; + memset(wol_mask + 3, 0xff, 11); + wol_mask[14] = 0xf; + plen1 = 14 + 6 + 96 - 1; + + b44_LM_ftwrite(pDevice, (LM_UINT32 *)wol_pattern, + BCMENET_PMPSIZE, BCMENET_PMPBASE + BCMENET_PMPSIZE); + + b44_LM_ftwrite(pDevice, (LM_UINT32 *)wol_mask, BCMENET_PMMSIZE, + BCMENET_PMMBASE + BCMENET_PMMSIZE); + + /* set this pattern's length: one less than the real length */ + wfl = plen0 | (plen1 << 8) | DISABLE_32_PATMATCH; + + REG_WR(pDevice, wakeuplength, wfl); + } + +} + +LM_VOID +b44_LM_pmset(LM_DEVICE_BLOCK *pDevice) +{ + LM_UINT16 Value16; + + b44_LM_Halt(pDevice); + + /* now turn on just enough of the chip to receive and match patterns */ + b44_LM_ResetAdapter(pDevice, FALSE); + + /* program patterns */ + b44_LM_pmprog(pDevice); + + /* enable chip wakeup pattern matching */ + REG_OR(pDevice, devcontrol, DC_PM); + /* enable sonics bus PME */ + REG_OR(pDevice, sbconfig.sbtmstatelow, SBTML_PE); + + b44_MM_ReadConfig16(pDevice, BCMENET_PMCSR, &Value16); + b44_MM_WriteConfig16(pDevice, BCMENET_PMCSR, + Value16 | ENABLE_PCICONFIG_PME); +} + + +static void +b44_LM_ftwrite(LM_DEVICE_BLOCK *pDevice, LM_UINT32 *b, LM_UINT32 nbytes, + LM_UINT32 ftaddr) +{ + LM_UINT32 i; + + for (i = 0; i < nbytes; i += sizeof(LM_UINT32)) { + REG_WR(pDevice, enetftaddr, ftaddr + i); + REG_WR(pDevice, enetftdata, b[i / sizeof(LM_UINT32)]); + } +} + +#endif /* BCM_WOL */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44lm.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44lm.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44lm.h 2003-08-25 18:24:43.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44lm.h 2003-08-25 20:31:22.000000000 +0200 @@ -336,7 +336,7 @@ typedef struct _LM_PACKET *PLM_PACKET; LM_STATUS b44_LM_GetAdapterInfo(PLM_DEVICE_BLOCK pDevice); LM_STATUS b44_LM_InitializeAdapter(PLM_DEVICE_BLOCK pDevice); -LM_STATUS b44_LM_ResetAdapter(PLM_DEVICE_BLOCK pDevice); +LM_STATUS b44_LM_ResetAdapter(PLM_DEVICE_BLOCK pDevice, LM_BOOL full); LM_STATUS b44_LM_DisableInterrupt(PLM_DEVICE_BLOCK pDevice); LM_STATUS b44_LM_EnableInterrupt(PLM_DEVICE_BLOCK pDevice); LM_STATUS b44_LM_SendPacket(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket); @@ -368,7 +368,9 @@ LM_STATUS b44_LM_NvramWriteBlock(PLM_DEV LM_UINT32 *pData, LM_UINT32 Size); void b44_LM_PollLink(PLM_DEVICE_BLOCK pDevice); LM_STATUS b44_LM_StatsUpdate(PLM_DEVICE_BLOCK pDevice); - +#ifdef BCM_WOL +LM_VOID b44_LM_pmset(PLM_DEVICE_BLOCK pDevice); +#endif /******************************************************************************/ /* These are the OS specific functions called by LMAC. */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44mm.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44mm.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44mm.h 2003-08-25 18:24:43.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44mm.h 2003-08-25 20:31:22.000000000 +0200 @@ -68,15 +68,34 @@ #define BCM_PROC_FS 1 #endif +#define BCM_WOL 1 + #ifdef __BIG_ENDIAN #define BIG_ENDIAN_HOST 1 #endif +#define MM_SWAP_LE16(x) cpu_to_le16(x) + #if (LINUX_VERSION_CODE < 0x020327) #define __raw_readl readl #define __raw_writel writel #endif +#define MM_MEMWRITEL(ptr, val) __raw_writel(val, ptr) +#define MM_MEMREADL(ptr) __raw_readl(ptr) + +typedef atomic_t MM_ATOMIC_T; + +#define MM_ATOMIC_SET(ptr, val) atomic_set(ptr, val) +#define MM_ATOMIC_READ(ptr) atomic_read(ptr) +#define MM_ATOMIC_INC(ptr) atomic_inc(ptr) +#define MM_ATOMIC_ADD(ptr, val) atomic_add(val, ptr) +#define MM_ATOMIC_DEC(ptr) atomic_dec(ptr) +#define MM_ATOMIC_SUB(ptr, val) atomic_sub(val, ptr) + +#define MM_MB() mb() +#define MM_WMB() wmb() + #include "b44lm.h" #include "b44queue.h" #include "b44.h" @@ -222,13 +241,10 @@ typedef struct _UM_DEVICE_BLOCK { int tx_queued; int line_speed; /* in Mbps, 0 if link is down */ UM_RX_PACKET_Q rx_out_of_buf_q; - int rx_out_of_buf; int rx_buf_repl_thresh; int rx_buf_repl_panic_thresh; - int rx_buf_align; struct timer_list timer; - int do_global_lock; - spinlock_t global_lock; + spinlock_t phy_lock; volatile unsigned long interrupt; atomic_t intr_sem; int tasklet_pending; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44proc.c linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44proc.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44proc.c 2003-08-25 18:24:43.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44proc.c 2003-08-25 20:31:22.000000000 +0200 @@ -22,6 +22,10 @@ static struct proc_dir_entry *bcm4400_pr extern char bcm4400_driver[], bcm4400_version[]; +#ifdef B44_DEBUG +extern int b44_reset_count; +#endif + static char *na_str = "n/a"; static char *pause_str = "pause "; static char *asym_pause_str = "asym_pause "; @@ -154,7 +158,6 @@ bcm4400_read_pfs(char *page, char **star pDevice->NodeAddress[0], pDevice->NodeAddress[1], pDevice->NodeAddress[2], pDevice->NodeAddress[3], pDevice->NodeAddress[4], pDevice->NodeAddress[5]); - len += sprintf(page+len, "Part_Number\t\t\t%s\n\n", pDevice->PartNo); len += sprintf(page+len, "Link\t\t\t\t%s\n", (pUmDevice->opened == 0) ? "unknown" : @@ -243,6 +246,16 @@ bcm4400_read_pfs(char *page, char **star len += sprintf(page+len, "Rx_Desc_Count\t\t\t%u\n", pDevice->RxPacketDescCnt); +#ifdef B44_DEBUG + len += sprintf(page+len, "Intr_Sem\t\t\t%u\n", + atomic_read(&pUmDevice->intr_sem)); + len += sprintf(page+len, "Int_Status\t\t\t%x\n", + REG_RD(pDevice, intstatus)); + len += sprintf(page+len, "Int_Mask\t\t\t%x\n", + REG_RD(pDevice, intmask)); + len += sprintf(page+len, "Reset_Count\t\t\t%u\n", b44_reset_count); +#endif + *eof = 1; return len; } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44queue.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44queue.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44queue.h 2003-08-25 18:24:43.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44queue.h 2003-08-25 20:31:22.000000000 +0200 @@ -45,7 +45,7 @@ typedef struct { unsigned int Head; unsigned int Tail; unsigned int Size; - atomic_t EntryCnt; + MM_ATOMIC_T EntryCnt; PQQ_ENTRY Array[1]; } QQ_CONTAINER, *PQQ_CONTAINER; @@ -91,7 +91,7 @@ unsigned int QueueSize) { pQueue->Head = 0; pQueue->Tail = 0; pQueue->Size = QueueSize+1; - atomic_set(&pQueue->EntryCnt, 0); + MM_ATOMIC_SET(&pQueue->EntryCnt, 0); } /* QQ_InitQueue */ @@ -147,7 +147,7 @@ PQQ_CONTAINER pQueue) { __inline static unsigned int QQ_GetEntryCnt( PQQ_CONTAINER pQueue) { - return atomic_read(&pQueue->EntryCnt); + return MM_ATOMIC_READ(&pQueue->EntryCnt); } /* QQ_GetEntryCnt */ @@ -174,9 +174,9 @@ PQQ_ENTRY pEntry) { #endif /* QQ_NO_OVERFLOW_CHECK */ pQueue->Array[pQueue->Head] = pEntry; - wmb(); + MM_WMB(); pQueue->Head = Head; - atomic_inc(&pQueue->EntryCnt); + MM_ATOMIC_INC(&pQueue->EntryCnt); return -1; } /* QQ_PushHead */ @@ -209,9 +209,9 @@ PQQ_ENTRY pEntry) { #endif /* QQ_NO_OVERFLOW_CHECK */ pQueue->Array[Tail] = pEntry; - wmb(); + MM_WMB(); pQueue->Tail = Tail; - atomic_inc(&pQueue->EntryCnt); + MM_ATOMIC_INC(&pQueue->EntryCnt); return -1; } /* QQ_PushTail */ @@ -243,9 +243,9 @@ PQQ_CONTAINER pQueue) { Head--; Entry = pQueue->Array[Head]; - mb(); + MM_MB(); pQueue->Head = Head; - atomic_dec(&pQueue->EntryCnt); + MM_ATOMIC_DEC(&pQueue->EntryCnt); return Entry; } /* QQ_PopHead */ @@ -272,9 +272,9 @@ PQQ_CONTAINER pQueue) { #endif /* QQ_NO_UNDERFLOW_CHECK */ Entry = pQueue->Array[Tail]; - mb(); + MM_MB(); pQueue->Tail = (Tail + 1) % pQueue->Size; - atomic_dec(&pQueue->EntryCnt); + MM_ATOMIC_DEC(&pQueue->EntryCnt); return Entry; } /* QQ_PopTail */ @@ -291,7 +291,7 @@ QQ_GetHead( PQQ_CONTAINER pQueue, unsigned int Idx) { - if(Idx >= atomic_read(&pQueue->EntryCnt)) + if(Idx >= MM_ATOMIC_READ(&pQueue->EntryCnt)) { return (PQQ_ENTRY) 0; } @@ -321,7 +321,7 @@ QQ_GetTail( PQQ_CONTAINER pQueue, unsigned int Idx) { - if(Idx >= atomic_read(&pQueue->EntryCnt)) + if(Idx >= MM_ATOMIC_READ(&pQueue->EntryCnt)) { return (PQQ_ENTRY) 0; } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44um.c linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44um.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/bcm4400/b44um.c 2003-08-25 18:24:43.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/bcm4400/b44um.c 2003-08-25 20:31:22.000000000 +0200 @@ -8,12 +8,26 @@ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation, located in the file LICENSE. */ /* */ +/* Change Log */ +/* 2.0.5 (07/02/03) */ +/* - Added __devexit_p to bcm4400_remove_one */ +/* - Changed Makefile to properly choose between kgcc/gcc */ +/* 2.0.4 (06/26/03) */ +/* - More Changes to fix the target abort problem. */ +/* 2.0.3 (06/25/03) */ +/* - Fixed target abort problem. */ +/* 2.0.0 (03/25/03) */ +/* - Fixed a crash problem under heavy traffic caused by reset and tasklet */ +/* running at the same time. */ +/* 1.0.3 (2/25/03) */ +/* - Fixed various problems related to reset. */ +/* - Added magic packet WOL. */ /******************************************************************************/ char bcm4400_driver[] = "bcm4400"; -char bcm4400_version[] = "1.0.1"; -char bcm4400_date[] = "(08/26/02)"; +char bcm4400_version[] = "2.0.5"; +char bcm4400_date[] = "(07/02/03)"; #define B44UM #include "b44mm.h" @@ -43,6 +57,9 @@ static unsigned int rx_pkt_desc_cnt[MAX_ RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT, RX_DESC_CNT }; +#ifdef BCM_WOL +static int enable_wol[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; +#endif /* Operational parameters that usually are not changed. */ /* Time in jiffies before concluding the transmitter is hung. */ @@ -112,6 +129,12 @@ struct pci_device_id { #define MODULE_DEVICE_TABLE(pci, pci_tbl) #endif + +#if (LINUX_VERSION_CODE < 0x020411) +#ifndef __devexit_p +#define __devexit_p(x) x +#endif +#endif #ifndef MODULE_LICENSE #define MODULE_LICENSE(license) @@ -158,6 +181,16 @@ pci_set_dma_mask(struct pci_dev *dev, dm #define pci_release_regions(pdev) #endif +#define BCM4400_PHY_LOCK(pUmDevice, flags) \ +{ \ + spin_lock_irqsave(&(pUmDevice)->phy_lock, flags); \ +} + +#define BCM4400_PHY_UNLOCK(pUmDevice, flags) \ +{ \ + spin_unlock_irqrestore(&(pUmDevice)->phy_lock, flags); \ +} + void bcm4400_intr_off(PUM_DEVICE_BLOCK pUmDevice) { @@ -188,6 +221,9 @@ MODULE_PARM(tx_flow_control, "1-" __MODU MODULE_PARM(auto_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i"); MODULE_PARM(tx_pkt_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i"); MODULE_PARM(rx_pkt_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i"); +#ifdef BCM_WOL +MODULE_PARM(enable_wol, "1-" __MODULE_STRING(MAX_UNITS) "i"); +#endif #endif #define RUN_AT(x) (jiffies + (x)) @@ -216,6 +252,7 @@ STATIC void bcm4400_set_rx_mode(struct n STATIC int bcm4400_set_mac_addr(struct net_device *dev, void *p); STATIC int bcm4400_rxfill(PUM_DEVICE_BLOCK pUmDevice); STATIC int bcm4400_freemem(struct net_device *dev); +STATIC void bcm4400_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice); /* A list of all installed bcm4400 devices. */ @@ -285,6 +322,8 @@ static int __devinit bcm4400_init_board( goto err_out; } + spin_lock_init(&pUmDevice->phy_lock); + pUmDevice->dev = dev; pUmDevice->pdev = pdev; pUmDevice->mem_list_num = 0; @@ -300,7 +339,6 @@ static int __devinit bcm4400_init_board( goto err_out_unmap; } - pUmDevice->rx_buf_align = 2; dev->mem_start = pci_resource_start(pdev, 0); dev->mem_end = dev->mem_start + sizeof(bcmenetregs_t) + 128; dev->irq = pdev->irq; @@ -480,6 +518,7 @@ bcm4400_open(struct net_device *dev) } + STATIC void bcm4400_timer(unsigned long data) { @@ -519,6 +558,7 @@ bcm4400_timer(unsigned long data) if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container) > pUmDevice->rx_buf_repl_panic_thresh) { /* Generate interrupt and let isr allocate buffers */ + REG_WR(pDevice, gptimer, 2); } if (pUmDevice->link_interval == 0) { @@ -545,7 +585,7 @@ bcm4400_tx_timeout(struct net_device *de netif_stop_queue(dev); bcm4400_intr_off(pUmDevice); - b44_LM_ResetAdapter(pDevice); + b44_LM_ResetAdapter(pDevice, TRUE); if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) { b44_LM_SetMacAddress(pDevice, dev->dev_addr); } @@ -705,19 +745,50 @@ bcm4400_close(struct net_device *dev) // tasklet_disable(&pUmDevice->tasklet); tasklet_kill(&pUmDevice->tasklet); #endif + b44_LM_Halt(pDevice); pDevice->InitDone = 0; + bcm4400_free_remaining_rx_bufs(pUmDevice); del_timer(&pUmDevice->timer); free_irq(dev->irq, dev); #if (LINUX_VERSION_CODE < 0x020300) MOD_DEC_USE_COUNT; #endif +#ifdef BCM_WOL + if (pDevice->WakeUpMode != LM_WAKE_UP_MODE_NONE) { + b44_LM_pmset(pDevice); + } +#endif + bcm4400_freemem(dev); return 0; } +STATIC void +bcm4400_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice) +{ + LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev; + UM_PACKET *pUmPacket; + struct sk_buff *skb; + int cnt, i; + + cnt = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container); + for (i = 0; i < cnt; i++) { + if ((pUmPacket = + QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container)) + != 0) { + + if ((skb = pUmPacket->skbuff)) + dev_kfree_skb(skb); + pUmPacket->skbuff = 0; + QQ_PushTail(&pDevice->RxPacketFreeQ.Container, + pUmPacket); + } + } +} + STATIC int bcm4400_freemem(struct net_device *dev) { @@ -776,6 +847,7 @@ static int netdev_ethtool_ioctl(struct n struct ethtool_cmd ethcmd; PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv; PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice; + unsigned long flags; if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) return -EFAULT; @@ -891,9 +963,49 @@ static int netdev_ethtool_ioctl(struct n LM_DUPLEX_MODE_HALF; } } + BCM4400_PHY_LOCK(pUmDevice, flags); b44_LM_SetupPhy(pDevice); + BCM4400_PHY_UNLOCK(pUmDevice, flags); return 0; } +#ifdef ETHTOOL_GWOL +#ifdef BCM_WOL + case ETHTOOL_GWOL: { + struct ethtool_wolinfo wol = {ETHTOOL_GWOL}; + + wol.supported = WAKE_MAGIC; + if (pDevice->WakeUpMode == LM_WAKE_UP_MODE_MAGIC_PACKET) + { + wol.wolopts = WAKE_MAGIC; + } + else { + wol.wolopts = 0; + } + if (copy_to_user(useraddr, &wol, sizeof(wol))) + return -EFAULT; + return 0; + } + case ETHTOOL_SWOL: { + struct ethtool_wolinfo wol; + + if(!capable(CAP_NET_ADMIN)) + return -EPERM; + if (copy_from_user(&wol, useraddr, sizeof(wol))) + return -EFAULT; + + if ((wol.wolopts & ~WAKE_MAGIC) != 0) { + return -EINVAL; + } + if (wol.wolopts & WAKE_MAGIC) { + pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET; + } + else { + pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE; + } + return 0; + } +#endif +#endif #ifdef ETHTOOL_GLINK case ETHTOOL_GLINK: { struct ethtool_value edata = {ETHTOOL_GLINK}; @@ -916,10 +1028,12 @@ static int netdev_ethtool_ioctl(struct n if (pDevice->DisableAutoNeg) { return -EINVAL; } + BCM4400_PHY_LOCK(pUmDevice, flags); b44_LM_ReadPhy(pDevice, PHY_CTRL_REG, &phyctrl); b44_LM_WritePhy(pDevice, PHY_CTRL_REG, phyctrl | PHY_CTRL_AUTO_NEG_ENABLE | PHY_CTRL_RESTART_AUTO_NEG); + BCM4400_PHY_UNLOCK(pUmDevice, flags); return 0; } #endif @@ -937,19 +1051,36 @@ STATIC int bcm4400_ioctl(struct net_devi PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice; u16 *data = (u16 *)&rq->ifr_data; u32 value; + unsigned long flags; switch(cmd) { +#ifdef SIOCGMIIPHY + case SIOCGMIIPHY: +#endif case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */ data[0] = pDevice->PhyAddr; + +#ifdef SIOCGMIIREG + case SIOCGMIIREG: +#endif case SIOCDEVPRIVATE+1: /* Read the specified MII register. */ + BCM4400_PHY_LOCK(pUmDevice, flags); b44_LM_ReadPhy(pDevice, data[1] & 0x1f, (LM_UINT32 *) &value); + BCM4400_PHY_UNLOCK(pUmDevice, flags); data[3] = value & 0xffff; return 0; + +#ifdef SIOCSMIIREG + case SIOCSMIIREG: +#endif case SIOCDEVPRIVATE+2: /* Write the specified MII register */ if (!capable(CAP_NET_ADMIN)) return -EPERM; + BCM4400_PHY_LOCK(pUmDevice, flags); b44_LM_WritePhy(pDevice, data[1] & 0x1f, data[2]); + BCM4400_PHY_UNLOCK(pUmDevice, flags); return 0; + #ifdef SIOCETHTOOL case SIOCETHTOOL: return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data); @@ -1106,6 +1237,7 @@ static void bcm4400_suspend (struct pci_ /* Disable interrupts, stop Tx and Rx. */ b44_LM_Halt(pDevice); + bcm4400_free_remaining_rx_bufs(pUmDevice); /* pci_power_off(pdev, -1);*/ #if (LINUX_VERSION_CODE >= 0x020406) @@ -1148,7 +1280,7 @@ static struct pci_driver bcm4400_pci_dri name: bcm4400_driver, id_table: bcm4400_pci_tbl, probe: bcm4400_init_one, - remove: bcm4400_remove_one, + remove: __devexit_p(bcm4400_remove_one), suspend: bcm4400_suspend, resume: bcm4400_resume, }; @@ -1311,17 +1443,9 @@ b44_MM_InitializeUmPackets(PLM_DEVICE_BL skb_reserve(skb, pDevice->rxoffset); QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket); } - if (1) { - /* reallocate buffers in the ISR */ - pUmDevice->rx_buf_repl_thresh = 0; - pUmDevice->rx_buf_repl_panic_thresh = 0; - } - else { - pUmDevice->rx_buf_repl_thresh = pDevice->RxPacketDescCnt / 4; - pUmDevice->rx_buf_repl_panic_thresh = - pDevice->RxPacketDescCnt / 2; + pUmDevice->rx_buf_repl_thresh = pDevice->RxPacketDescCnt / 4; + pUmDevice->rx_buf_repl_panic_thresh = pDevice->RxPacketDescCnt * 3 / 4; - } return LM_STATUS_SUCCESS; } @@ -1411,6 +1535,11 @@ b44_MM_GetConfig(PLM_DEVICE_BLOCK pDevic } pDevice->RxPacketDescCnt = rx_pkt_desc_cnt[index]; +#ifdef BCM_WOL + if (enable_wol[index]) { + pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET; + } +#endif return LM_STATUS_SUCCESS; } @@ -1486,6 +1615,9 @@ bcm4400_rxfill(PUM_DEVICE_BLOCK pUmDevic int queue_rx = 0; int ret = 0; + if (!pUmDevice->opened) + return ret; + while ((pUmPacket = (PUM_PACKET) QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container)) != 0) { pPacket = (PLM_PACKET) pUmPacket; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/ppp_generic.c linux-2.4.20-wolk4.9-fullkernel/drivers/net/ppp_generic.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/ppp_generic.c 2003-08-25 18:26:32.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/ppp_generic.c 2003-08-26 17:59:08.000000000 +0200 @@ -122,6 +122,7 @@ struct ppp { spinlock_t rlock; /* lock for receive side 58 */ spinlock_t wlock; /* lock for transmit side 5c */ int mru; /* max receive unit 60 */ + int mru_alloc; /* MAX(1500,MRU) for dev_alloc_skb() */ unsigned int flags; /* control bits 64 */ unsigned int xstate; /* transmit state bits 68 */ unsigned int rstate; /* receive state bits 6c */ @@ -573,6 +574,10 @@ static int ppp_ioctl(struct inode *inode if (get_user(val, (int *) arg)) break; ppp->mru = val; + if (ppp->mru > ppp->mru_alloc) + ppp->mru_alloc = ppp->mru; + else if (ppp->mru < PPP_MRU) + ppp->mru_alloc = PPP_MRU; /* adjust to minimum */ err = 0; break; @@ -1563,14 +1568,15 @@ ppp_decompress_frame(struct ppp *ppp, st int len; if (proto == PPP_COMP) { - ns = dev_alloc_skb(ppp->mru + PPP_HDRLEN); + ns = dev_alloc_skb(ppp->mru_alloc + PPP_HDRLEN); if (ns == 0) { printk(KERN_ERR "ppp_decompress_frame: no memory\n"); goto err; } /* the decompressor still expects the A/C bytes in the hdr */ len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, - skb->len + 2, ns->data, ppp->mru + PPP_HDRLEN); + skb->len + 2, ns->data, + ppp->mru_alloc + PPP_HDRLEN); if (len < 0) { /* Pass the compressed frame to pppd as an error indication. */ @@ -2276,6 +2282,7 @@ ppp_create_interface(int unit, int *retp /* Initialize the new ppp unit */ ppp->file.index = unit; ppp->mru = PPP_MRU; + ppp->mru_alloc = PPP_MRU; init_ppp_file(&ppp->file, INTERFACE); ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ for (i = 0; i < NUM_NP; ++i) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/ppp_mppe.c linux-2.4.20-wolk4.9-fullkernel/drivers/net/ppp_mppe.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/ppp_mppe.c 2003-08-25 18:24:47.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/ppp_mppe.c 2003-08-26 17:59:08.000000000 +0200 @@ -530,6 +530,15 @@ mppe_decompress(void *arg, unsigned char return DECOMP_ERROR; } + /* Make sure we have enough room to decrypt the packet. */ + if (osize < isize - MPPE_OVHD - 2) { + printk(KERN_DEBUG "mppe_decompress[%d]: osize too small! " + "(have: %d need: %d)\n", state->unit, + osize, isize - MPPE_OVHD - 2); + return DECOMP_ERROR; + } + osize = isize - MPPE_OVHD - 2; + /* Check the sequence number. */ seq = MPPE_CCOUNT_FROM_PACKET(ibuf); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/Makefile linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/Makefile --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/Makefile 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/Makefile 2003-08-29 10:55:43.000000000 +0200 @@ -7,6 +7,7 @@ # Standalone driver params # SKPARAM += -DSK_KERNEL_24 # SKPARAM += -DSK_KERNEL_24_26 +# SKPARAM += -DSK_KERNEL_26 # SKPARAM += -DSK_KERNEL_22_24 O_TARGET := sk98lin.o diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skdrv1st.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skdrv1st.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skdrv1st.h 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skdrv1st.h 2003-08-29 10:55:43.000000000 +0200 @@ -2,8 +2,8 @@ * * Name: skdrv1st.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.14 $ - * Date: $Date: 2003/06/03 14:36:32 $ + * Version: $Revision: 1.1 $ + * Date: $Date: 2003/07/21 07:22:43 $ * Purpose: First header file for driver and all other modules * ******************************************************************************/ @@ -26,6 +26,12 @@ * History: * * $Log: skdrv1st.h,v $ + * Revision 1.1 2003/07/21 07:22:43 rroesler + * Fix: Re-Enter after CVS crash + * + * Revision 1.15 2003/07/17 14:54:09 rroesler + * Fix: Corrected SK_PNMI_READ macros to copy right amount of bytes + * * Revision 1.14 2003/06/03 14:36:32 mlindner * Add: Additions for SK_SLIM * @@ -118,8 +124,8 @@ typedef struct s_AC SK_AC; #define SK_PNMI_STORE_U32(p,v) memcpy((char*)(p),(char*)&(v),4) #define SK_PNMI_STORE_U64(p,v) memcpy((char*)(p),(char*)&(v),8) #define SK_PNMI_READ_U16(p,v) memcpy((char*)&(v),(char*)(p),2) -#define SK_PNMI_READ_U32(p,v) memcpy((char*)&(v),(char*)(p),2) -#define SK_PNMI_READ_U64(p,v) memcpy((char*)&(v),(char*)(p),2) +#define SK_PNMI_READ_U32(p,v) memcpy((char*)&(v),(char*)(p),4) +#define SK_PNMI_READ_U64(p,v) memcpy((char*)&(v),(char*)(p),8) #define SkCsCalculateChecksum(p,l) ((~ip_compute_csum(p, l)) & 0xffff) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skdrv2nd.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skdrv2nd.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skdrv2nd.h 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skdrv2nd.h 2003-08-29 10:55:43.000000000 +0200 @@ -2,8 +2,8 @@ * * Name: skdrv2nd.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.18 $ - * Date: $Date: 2003/06/12 07:54:14 $ + * Version: $Revision: 1.3 $ + * Date: $Date: 2003/08/12 16:51:18 $ * Purpose: Second header file for driver and all other modules * ******************************************************************************/ @@ -26,6 +26,19 @@ * History: * * $Log: skdrv2nd.h,v $ + * Revision 1.3 2003/08/12 16:51:18 mlindner + * Fix: UDP and TCP Proto checks + * Fix: UDP header offset + * + * Revision 1.2 2003/08/07 10:50:54 mlindner + * Add: Speed and HW-Csum support for Yukon Lite chipset + * + * Revision 1.1 2003/07/21 07:25:29 rroesler + * Fix: Re-Enter after CVS crash + * + * Revision 1.19 2003/07/07 09:53:10 rroesler + * Fix: Removed proprietary RxTx defines and used the ones from skgehw.h instead + * * Revision 1.18 2003/06/12 07:54:14 mlindner * Fix: Changed Descriptor Alignment to 64 Byte * @@ -285,12 +298,41 @@ struct s_IOCTL { ** Interim definition of SK_DRV_TIMER placed in this file until ** common modules have boon finallized */ -#define SK_DRV_TIMER 11 +#define SK_DRV_TIMER 11 #define SK_DRV_MODERATION_TIMER 1 #define SK_DRV_MODERATION_TIMER_LENGTH 1000000 /* 1 second */ #define SK_DRV_RX_CLEANUP_TIMER 2 #define SK_DRV_RX_CLEANUP_TIMER_LENGTH 1000000 /* 100 millisecs */ +/* +** Definitions regarding transmitting frames +** any calculating any checksum. +*/ +#define C_LEN_ETHERMAC_HEADER_DEST_ADDR 6 +#define C_LEN_ETHERMAC_HEADER_SRC_ADDR 6 +#define C_LEN_ETHERMAC_HEADER_LENTYPE 2 +#define C_LEN_ETHERMAC_HEADER ( (C_LEN_ETHERMAC_HEADER_DEST_ADDR) + \ + (C_LEN_ETHERMAC_HEADER_SRC_ADDR) + \ + (C_LEN_ETHERMAC_HEADER_LENTYPE) ) + +#define C_LEN_ETHERMTU_MINSIZE 46 +#define C_LEN_ETHERMTU_MAXSIZE_STD 1500 +#define C_LEN_ETHERMTU_MAXSIZE_JUMBO 9000 + +#define C_LEN_ETHERNET_MINSIZE ( (C_LEN_ETHERMAC_HEADER) + \ + (C_LEN_ETHERMTU_MINSIZE) ) + +#define C_OFFSET_IPHEADER C_LEN_ETHERMAC_HEADER +#define C_OFFSET_IPHEADER_IPPROTO 9 +#define C_OFFSET_TCPHEADER_TCPCS 16 +#define C_OFFSET_UDPHEADER_UDPCS 6 + +#define C_OFFSET_IPPROTO ( (C_LEN_ETHERMAC_HEADER) + \ + (C_OFFSET_IPHEADER_IPPROTO) ) + +#define C_PROTO_ID_UDP 17 /* refer to RFC 790 or Stevens' */ +#define C_PROTO_ID_TCP 6 /* TCP/IP illustrated for details */ + /* TX and RX descriptors *****************************************************/ typedef struct s_RxD RXD; /* the receive descriptor */ @@ -324,160 +366,42 @@ struct s_TxD { struct sk_buff *pMBuf; /* Pointer to Linux' socket buffer */ }; +/* Used interrupt bits in the interrupts source register *********************/ -/* definition of flags in descriptor control field */ -#define RX_CTRL_OWN_BMU UINT32_C(0x80000000) -#define RX_CTRL_STF UINT32_C(0x40000000) -#define RX_CTRL_EOF UINT32_C(0x20000000) -#define RX_CTRL_EOB_IRQ UINT32_C(0x10000000) -#define RX_CTRL_EOF_IRQ UINT32_C(0x08000000) -#define RX_CTRL_DEV_NULL UINT32_C(0x04000000) -#define RX_CTRL_STAT_VALID UINT32_C(0x02000000) -#define RX_CTRL_TIME_VALID UINT32_C(0x01000000) -#define RX_CTRL_CHECK_DEFAULT UINT32_C(0x00550000) -#define RX_CTRL_CHECK_CSUM UINT32_C(0x00560000) -#define RX_CTRL_LEN_MASK UINT32_C(0x0000FFFF) - -#define TX_CTRL_OWN_BMU UINT32_C(0x80000000) -#define TX_CTRL_STF UINT32_C(0x40000000) -#define TX_CTRL_EOF UINT32_C(0x20000000) -#define TX_CTRL_EOB_IRQ UINT32_C(0x10000000) -#define TX_CTRL_EOF_IRQ UINT32_C(0x08000000) -#define TX_CTRL_ST_FWD UINT32_C(0x04000000) -#define TX_CTRL_DISAB_CRC UINT32_C(0x02000000) -#define TX_CTRL_SOFTWARE UINT32_C(0x01000000) -#define TX_CTRL_CHECK_DEFAULT UINT32_C(0x00550000) -#define TX_CTRL_CHECK_CSUM UINT32_C(0x00560000) -#define TX_CTRL_LEN_MASK UINT32_C(0x0000FFFF) - - - -/* The offsets of registers in the TX and RX queue control io area ***********/ - -#define RX_Q_BUF_CTRL_CNT 0x00 -#define RX_Q_NEXT_DESCR_LOW 0x04 -#define RX_Q_BUF_ADDR_LOW 0x08 -#define RX_Q_BUF_ADDR_HIGH 0x0c -#define RX_Q_FRAME_STAT 0x10 -#define RX_Q_TIME_STAMP 0x14 -#define RX_Q_CSUM_1_2 0x18 -#define RX_Q_CSUM_START_1_2 0x1c -#define RX_Q_CUR_DESCR_LOW 0x20 -#define RX_Q_DESCR_HIGH 0x24 -#define RX_Q_CUR_ADDR_LOW 0x28 -#define RX_Q_CUR_ADDR_HIGH 0x2c -#define RX_Q_CUR_BYTE_CNT 0x30 -#define RX_Q_CTRL 0x34 -#define RX_Q_FLAG 0x38 -#define RX_Q_TEST1 0x3c -#define RX_Q_TEST2 0x40 -#define RX_Q_TEST3 0x44 - -#define TX_Q_BUF_CTRL_CNT 0x00 -#define TX_Q_NEXT_DESCR_LOW 0x04 -#define TX_Q_BUF_ADDR_LOW 0x08 -#define TX_Q_BUF_ADDR_HIGH 0x0c -#define TX_Q_FRAME_STAT 0x10 -#define TX_Q_CSUM_START 0x14 -#define TX_Q_CSUM_START_POS 0x18 -#define TX_Q_RESERVED 0x1c -#define TX_Q_CUR_DESCR_LOW 0x20 -#define TX_Q_DESCR_HIGH 0x24 -#define TX_Q_CUR_ADDR_LOW 0x28 -#define TX_Q_CUR_ADDR_HIGH 0x2c -#define TX_Q_CUR_BYTE_CNT 0x30 -#define TX_Q_CTRL 0x34 -#define TX_Q_FLAG 0x38 -#define TX_Q_TEST1 0x3c -#define TX_Q_TEST2 0x40 -#define TX_Q_TEST3 0x44 - -/* definition of flags in the queue control field */ -#define RX_Q_CTRL_POLL_ON 0x00000080 -#define RX_Q_CTRL_POLL_OFF 0x00000040 -#define RX_Q_CTRL_STOP 0x00000020 -#define RX_Q_CTRL_START 0x00000010 -#define RX_Q_CTRL_CLR_I_PAR 0x00000008 -#define RX_Q_CTRL_CLR_I_EOB 0x00000004 -#define RX_Q_CTRL_CLR_I_EOF 0x00000002 -#define RX_Q_CTRL_CLR_I_ERR 0x00000001 - -#define TX_Q_CTRL_POLL_ON 0x00000080 -#define TX_Q_CTRL_POLL_OFF 0x00000040 -#define TX_Q_CTRL_STOP 0x00000020 -#define TX_Q_CTRL_START 0x00000010 -#define TX_Q_CTRL_CLR_I_EOB 0x00000004 -#define TX_Q_CTRL_CLR_I_EOF 0x00000002 -#define TX_Q_CTRL_CLR_I_ERR 0x00000001 - - -/* Interrupt bits in the interrupts source register **************************/ -#define IRQ_HW_ERROR 0x80000000 -#define IRQ_RESERVED 0x40000000 -#define IRQ_PKT_TOUT_RX1 0x20000000 -#define IRQ_PKT_TOUT_RX2 0x10000000 -#define IRQ_PKT_TOUT_TX1 0x08000000 -#define IRQ_PKT_TOUT_TX2 0x04000000 -#define IRQ_I2C_READY 0x02000000 -#define IRQ_SW 0x01000000 -#define IRQ_EXTERNAL_REG 0x00800000 -#define IRQ_TIMER 0x00400000 -#define IRQ_MAC1 0x00200000 -#define IRQ_LINK_SYNC_C_M1 0x00100000 -#define IRQ_MAC2 0x00080000 -#define IRQ_LINK_SYNC_C_M2 0x00040000 -#define IRQ_EOB_RX1 0x00020000 -#define IRQ_EOF_RX1 0x00010000 -#define IRQ_CHK_RX1 0x00008000 -#define IRQ_EOB_RX2 0x00004000 -#define IRQ_EOF_RX2 0x00002000 -#define IRQ_CHK_RX2 0x00001000 -#define IRQ_EOB_SY_TX1 0x00000800 -#define IRQ_EOF_SY_TX1 0x00000400 -#define IRQ_CHK_SY_TX1 0x00000200 -#define IRQ_EOB_AS_TX1 0x00000100 -#define IRQ_EOF_AS_TX1 0x00000080 -#define IRQ_CHK_AS_TX1 0x00000040 -#define IRQ_EOB_SY_TX2 0x00000020 -#define IRQ_EOF_SY_TX2 0x00000010 -#define IRQ_CHK_SY_TX2 0x00000008 -#define IRQ_EOB_AS_TX2 0x00000004 -#define IRQ_EOF_AS_TX2 0x00000002 -#define IRQ_CHK_AS_TX2 0x00000001 - -#define DRIVER_IRQS (IRQ_SW | IRQ_EOF_RX1 | IRQ_EOF_RX2 | \ - IRQ_EOF_SY_TX1 | IRQ_EOF_AS_TX1 | \ - IRQ_EOF_SY_TX2 | IRQ_EOF_AS_TX2) - -#define SPECIAL_IRQS (IRQ_HW_ERROR | IRQ_PKT_TOUT_RX1 | IRQ_PKT_TOUT_RX2 | \ - IRQ_PKT_TOUT_TX1 | IRQ_PKT_TOUT_TX2 | \ - IRQ_I2C_READY | IRQ_EXTERNAL_REG | IRQ_TIMER | \ - IRQ_MAC1 | IRQ_LINK_SYNC_C_M1 | \ - IRQ_MAC2 | IRQ_LINK_SYNC_C_M2 | \ - IRQ_CHK_RX1 | IRQ_CHK_RX2 | \ - IRQ_CHK_SY_TX1 | IRQ_CHK_AS_TX1 | \ - IRQ_CHK_SY_TX2 | IRQ_CHK_AS_TX2) - -#define IRQ_MASK (IRQ_SW | IRQ_EOB_RX1 | IRQ_EOF_RX1 | \ - IRQ_EOB_RX2 | IRQ_EOF_RX2 | \ - IRQ_EOB_SY_TX1 | IRQ_EOF_SY_TX1 | \ - IRQ_EOB_AS_TX1 | IRQ_EOF_AS_TX1 | \ - IRQ_EOB_SY_TX2 | IRQ_EOF_SY_TX2 | \ - IRQ_EOB_AS_TX2 | IRQ_EOF_AS_TX2 | \ - IRQ_HW_ERROR | IRQ_PKT_TOUT_RX1 | IRQ_PKT_TOUT_RX2 | \ - IRQ_PKT_TOUT_TX1 | IRQ_PKT_TOUT_TX2 | \ - IRQ_I2C_READY | IRQ_EXTERNAL_REG | IRQ_TIMER | \ - IRQ_MAC1 | \ - IRQ_MAC2 | \ - IRQ_CHK_RX1 | IRQ_CHK_RX2 | \ - IRQ_CHK_SY_TX1 | IRQ_CHK_AS_TX1 | \ - IRQ_CHK_SY_TX2 | IRQ_CHK_AS_TX2) +#define DRIVER_IRQS ((IS_IRQ_SW) | \ + (IS_R1_F) |(IS_R2_F) | \ + (IS_XS1_F) |(IS_XA1_F) | \ + (IS_XS2_F) |(IS_XA2_F)) + +#define SPECIAL_IRQS ((IS_HW_ERR) |(IS_I2C_READY) | \ + (IS_EXT_REG) |(IS_TIMINT) | \ + (IS_PA_TO_RX1) |(IS_PA_TO_RX2) | \ + (IS_PA_TO_TX1) |(IS_PA_TO_TX2) | \ + (IS_MAC1) |(IS_LNK_SYNC_M1)| \ + (IS_MAC2) |(IS_LNK_SYNC_M2)| \ + (IS_R1_C) |(IS_R2_C) | \ + (IS_XS1_C) |(IS_XA1_C) | \ + (IS_XS2_C) |(IS_XA2_C)) + +#define IRQ_MASK ((IS_IRQ_SW) | \ + (IS_R1_B) |(IS_R1_F) |(IS_R2_B) |(IS_R2_F) | \ + (IS_XS1_B) |(IS_XS1_F) |(IS_XA1_B)|(IS_XA1_F)| \ + (IS_XS2_B) |(IS_XS2_F) |(IS_XA2_B)|(IS_XA2_F)| \ + (IS_HW_ERR) |(IS_I2C_READY)| \ + (IS_EXT_REG) |(IS_TIMINT) | \ + (IS_PA_TO_RX1) |(IS_PA_TO_RX2)| \ + (IS_PA_TO_TX1) |(IS_PA_TO_TX2)| \ + (IS_MAC1) |(IS_MAC2) | \ + (IS_R1_C) |(IS_R2_C) | \ + (IS_XS1_C) |(IS_XA1_C) | \ + (IS_XS2_C) |(IS_XA2_C)) -#define IRQ_HWE_MASK 0x00000FFF /* enable all HW irqs */ +#define IRQ_HWE_MASK (IS_ERR_MSK) /* enable all HW irqs */ typedef struct s_DevNet DEV_NET; struct s_DevNet { + struct proc_dir_entry *proc; int PortNr; int NetNr; int Mtu; @@ -516,16 +440,18 @@ struct s_RxPort { int PortIndex; /* index number of port (0 or 1) */ }; -#define IRQ_EOF_AS_TX ((IRQ_EOF_AS_TX1) | (IRQ_EOF_AS_TX2)) -#define IRQ_EOF_SY_TX ((IRQ_EOF_SY_TX1) | (IRQ_EOF_SY_TX2)) -#define IRQ_MASK_TX_ONLY ((IRQ_EOF_AS_TX) | (IRQ_EOF_SY_TX)) -#define IRQ_MASK_RX_ONLY ((IRQ_EOF_RX1) | (IRQ_EOF_RX2)) +/* Definitions needed for interrupt moderation *******************************/ + +#define IRQ_EOF_AS_TX ((IS_XA1_F) | (IS_XA2_F)) +#define IRQ_EOF_SY_TX ((IS_XS1_F) | (IS_XS2_F)) +#define IRQ_MASK_TX_ONLY ((IRQ_EOF_AS_TX)| (IRQ_EOF_SY_TX)) +#define IRQ_MASK_RX_ONLY ((IS_R1_F) | (IS_R2_F)) #define IRQ_MASK_SP_ONLY (SPECIAL_IRQS) -#define IRQ_MASK_TX_RX ((IRQ_MASK_TX_ONLY) | (IRQ_MASK_RX_ONLY)) -#define IRQ_MASK_SP_RX ((SPECIAL_IRQS) | (IRQ_MASK_RX_ONLY)) -#define IRQ_MASK_SP_TX ((SPECIAL_IRQS) | (IRQ_MASK_TX_ONLY)) -#define IRQ_MASK_RX_TX_SP ((SPECIAL_IRQS) | (IRQ_MASK_TX_RX)) - +#define IRQ_MASK_TX_RX ((IRQ_MASK_TX_ONLY)| (IRQ_MASK_RX_ONLY)) +#define IRQ_MASK_SP_RX ((SPECIAL_IRQS) | (IRQ_MASK_RX_ONLY)) +#define IRQ_MASK_SP_TX ((SPECIAL_IRQS) | (IRQ_MASK_TX_ONLY)) +#define IRQ_MASK_RX_TX_SP ((SPECIAL_IRQS) | (IRQ_MASK_TX_RX)) + #define C_INT_MOD_NONE 1 #define C_INT_MOD_STATIC 2 #define C_INT_MOD_DYNAMIC 4 @@ -636,6 +562,10 @@ struct s_AC { /* Only for tests */ int PortUp; int PortDown; + int ChipsetType; /* Chipset family type + * 0 == Genesis family support + * 1 == Yukon family support + */ }; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skgedrv.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skgedrv.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skgedrv.h 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skgedrv.h 2003-08-29 10:55:43.000000000 +0200 @@ -2,8 +2,8 @@ * * Name: skgedrv.h * Project: Gigabit Ethernet Adapters, Common Modules - * Version: $Revision: 1.9 $ - * Date: $Date: 2003/05/13 17:24:21 $ + * Version: $Revision: 1.10 $ + * Date: $Date: 2003/07/04 12:25:01 $ * Purpose: Interface with the driver * ******************************************************************************/ @@ -27,6 +27,9 @@ * History: * * $Log: skgedrv.h,v $ + * Revision 1.10 2003/07/04 12:25:01 rschmidt + * Added event SK_DRV_DOWNSHIFT_DET for Downshift 4-Pair / 2-Pair + * * Revision 1.9 2003/05/13 17:24:21 mkarl * Added events SK_DRV_LINK_UP and SK_DRV_LINK_DOWN for drivers not using * RLMT (SK_NO_RLMT). @@ -85,4 +88,5 @@ #define SK_DRV_LINK_UP 12 /* Link Up event for driver */ #define SK_DRV_LINK_DOWN 13 /* Link Down event for driver */ #endif -#endif /* __INC_SKGEDRV_H_ */ +#define SK_DRV_DOWNSHIFT_DET 14 /* Downshift 4-Pair / 2-Pair (YUKON only) */ +#endif /* __INC_SKGEDRV_H_ */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skgehw.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skgehw.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skgehw.h 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skgehw.h 2003-08-29 10:55:43.000000000 +0200 @@ -2,8 +2,8 @@ * * Name: skgehw.h * Project: Gigabit Ethernet Adapters, Common Modules - * Version: $Revision: 1.52 $ - * Date: $Date: 2003/05/13 17:16:36 $ + * Version: $Revision: 1.53 $ + * Date: $Date: 2003/07/04 12:39:01 $ * Purpose: Defines and Macros for the Gigabit Ethernet Adapter Product Family * ******************************************************************************/ @@ -26,6 +26,10 @@ * * History: * $Log: skgehw.h,v $ + * Revision 1.53 2003/07/04 12:39:01 rschmidt + * Added SK_FAR to pointers in XM_IN32() and GM_IN32() macros (for PXE) + * Editorial changes + * * Revision 1.52 2003/05/13 17:16:36 mkarl * Added SK_FAR for PXE. * Editorial changes. @@ -449,7 +453,7 @@ extern "C" { #define PCI_MEM32BIT (0L<<1) /* Base addr anywhere in 32 Bit range */ #define PCI_MEM1M (1L<<1) /* Base addr below 1 MegaByte */ #define PCI_MEM64BIT (2L<<1) /* Base addr anywhere in 64 Bit range */ -#define PCI_MEMSPACE BIT_0 /* Memory Space Indic. */ +#define PCI_MEMSPACE BIT_0 /* Memory Space Indicator */ /* PCI_BASE_2ND 32 bit 2nd Base address */ #define PCI_IOBASE 0xffffff00L /* Bit 31.. 8: I/O Base address */ @@ -458,8 +462,8 @@ extern "C" { #define PCI_IOSPACE BIT_0 /* I/O Space Indicator */ /* PCI_BASE_ROM 32 bit Expansion ROM Base Address */ -#define PCI_ROMBASE 0xfffe0000L /* Bit 31..17: ROM BASE address (1st)*/ -#define PCI_ROMBASZ (0x1cL<<14) /* Bit 16..14: Treat as BASE or SIZE */ +#define PCI_ROMBASE_MSK 0xfffe0000L /* Bit 31..17: ROM Base address */ +#define PCI_ROMBASE_SIZ (0x1cL<<14) /* Bit 16..14: Treat as Base or Size */ #define PCI_ROMSIZE (0x38L<<11) /* Bit 13..11: ROM Size Requirements */ /* Bit 10.. 1: reserved */ #define PCI_ROMEN BIT_0 /* Address Decode enable */ @@ -467,7 +471,7 @@ extern "C" { /* Device Dependent Region */ /* PCI_OUR_REG_1 32 bit Our Register 1 */ /* Bit 31..29: reserved */ -#define PCI_PHY_COMA BIT_28 /* Set PHY to Coma Mode */ +#define PCI_PHY_COMA BIT_28 /* Set PHY to Coma Mode (YUKON only) */ #define PCI_TEST_CAL BIT_27 /* Test PCI buffer calib. (YUKON only) */ #define PCI_EN_CAL BIT_26 /* Enable PCI buffer calib. (YUKON only) */ #define PCI_VIO BIT_25 /* PCI I/O Voltage, 0 = 3.3V, 1 = 5V */ @@ -1053,7 +1057,7 @@ extern "C" { /* B0_IMSK 32 bit Interrupt Mask Register */ /* B0_SP_ISRC 32 bit Special Interrupt Source Reg */ /* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ -#define IS_ALL_MSK 0xbfffffffL /* All Interrupt bits */ +#define IS_ALL_MSK 0xbfffffffUL /* All Interrupt bits */ #define IS_HW_ERR BIT_31 /* Interrupt HW Error */ /* Bit 30: reserved */ #define IS_PA_TO_RX1 BIT_29 /* Packet Arb Timeout Rx1 */ @@ -1195,16 +1199,16 @@ extern "C" { /* B2_GP_IO 32 bit General Purpose I/O Register */ /* Bit 31..26: reserved */ -#define GP_DIR_9 BIT_25 /* IO_9 direct, 0=I/1=O */ -#define GP_DIR_8 BIT_24 /* IO_8 direct, 0=I/1=O */ -#define GP_DIR_7 BIT_23 /* IO_7 direct, 0=I/1=O */ -#define GP_DIR_6 BIT_22 /* IO_6 direct, 0=I/1=O */ -#define GP_DIR_5 BIT_21 /* IO_5 direct, 0=I/1=O */ -#define GP_DIR_4 BIT_20 /* IO_4 direct, 0=I/1=O */ -#define GP_DIR_3 BIT_19 /* IO_3 direct, 0=I/1=O */ -#define GP_DIR_2 BIT_18 /* IO_2 direct, 0=I/1=O */ -#define GP_DIR_1 BIT_17 /* IO_1 direct, 0=I/1=O */ -#define GP_DIR_0 BIT_16 /* IO_0 direct, 0=I/1=O */ +#define GP_DIR_9 BIT_25 /* IO_9 direct, 0=In/1=Out */ +#define GP_DIR_8 BIT_24 /* IO_8 direct, 0=In/1=Out */ +#define GP_DIR_7 BIT_23 /* IO_7 direct, 0=In/1=Out */ +#define GP_DIR_6 BIT_22 /* IO_6 direct, 0=In/1=Out */ +#define GP_DIR_5 BIT_21 /* IO_5 direct, 0=In/1=Out */ +#define GP_DIR_4 BIT_20 /* IO_4 direct, 0=In/1=Out */ +#define GP_DIR_3 BIT_19 /* IO_3 direct, 0=In/1=Out */ +#define GP_DIR_2 BIT_18 /* IO_2 direct, 0=In/1=Out */ +#define GP_DIR_1 BIT_17 /* IO_1 direct, 0=In/1=Out */ +#define GP_DIR_0 BIT_16 /* IO_0 direct, 0=In/1=Out */ /* Bit 15..10: reserved */ #define GP_IO_9 BIT_9 /* IO_9 pin */ #define GP_IO_8 BIT_8 /* IO_8 pin */ @@ -1354,7 +1358,7 @@ extern "C" { /* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ /* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ /* Bit 31..24: reserved */ -#define TXA_MAX_VAL 0x00ffffffL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ +#define TXA_MAX_VAL 0x00ffffffUL/* Bit 23.. 0: Max TXA Timer/Cnt Val */ /* TXA_CTRL 8 bit Tx Arbiter Control Register */ #define TXA_ENA_FSYNC BIT_7S /* Enable force of sync Tx queue */ @@ -1796,7 +1800,7 @@ extern "C" { WOL_CTL_DIS_LINK_CHG_UNIT | \ WOL_CTL_DIS_PATTERN_UNIT | \ WOL_CTL_DIS_MAGIC_PKT_UNIT) - + /* WOL_MATCH_CTL 8 bit WOL Match Control Reg */ #define WOL_CTL_PATT_ENA(x) (BIT_0 << (x)) @@ -1840,7 +1844,7 @@ typedef struct s_HwRxd { SK_U32 RxAdrHi; /* Physical Rx Buffer Address upper dword */ SK_U32 RxStat; /* Receive Frame Status Word */ SK_U32 RxTiSt; /* Receive Time Stamp (from XMAC on GENESIS) */ -#ifndef SK_USE_REV_DESC +#ifndef SK_USE_REV_DESC SK_U16 RxTcpSum1; /* TCP Checksum 1 */ SK_U16 RxTcpSum2; /* TCP Checksum 2 */ SK_U16 RxTcpSp1; /* TCP Checksum Calculation Start Position 1 */ @@ -1895,20 +1899,9 @@ typedef struct s_HwRxd { * (see XMR_FS bits) */ -/* other defines *************************************************************/ - -/* - * FlashProm specification - */ -#define MAX_PAGES 0x20000L /* Every byte has a single page */ -#define MAX_FADDR 1 /* 1 byte per page */ -#define SKFDDI_PSZ 8 /* address PROM size */ - /* macros ********************************************************************/ -/* - * Receive and Transmit Queues - */ +/* Receive and Transmit Queues */ #define Q_R1 0x0000 /* Receive Queue 1 */ #define Q_R2 0x0080 /* Receive Queue 2 */ #define Q_XS1 0x0200 /* Synchronous Transmit Queue 1 */ @@ -1921,7 +1914,7 @@ typedef struct s_HwRxd { * * Use this macro to access the Receive and Transmit Queue Registers. * - * para: + * para: * Queue Queue to access. * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2 * Offs Queue register offset. @@ -1936,7 +1929,7 @@ typedef struct s_HwRxd { * * Use this macro to access the RAM Buffer Registers. * - * para: + * para: * Queue Queue to access. * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2 * Offs Queue register offset. @@ -1947,9 +1940,7 @@ typedef struct s_HwRxd { #define RB_ADDR(Queue, Offs) (B16_RAM_REGS + (Queue) + (Offs)) -/* - * MAC Related Registers - */ +/* MAC Related Registers */ #define MAC_1 0 /* belongs to the port near the slot */ #define MAC_2 1 /* belongs to the port far away from the slot */ @@ -1958,7 +1949,7 @@ typedef struct s_HwRxd { * * Use this macro to access a MAC Related Registers inside the ASIC. * - * para: + * para: * Mac MAC to access. * Values: MAC_1, MAC_2 * Offs MAC register offset. @@ -2010,9 +2001,9 @@ typedef struct s_HwRxd { #define XM_IN32(IoC, Mac, Reg, pVal) { \ SK_IN16((IoC), XMA((Mac), (Reg)), \ - (SK_U16 *)&((SK_U16 *)(pVal))[XM_WORD_LO]); \ + (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_LO]); \ SK_IN16((IoC), XMA((Mac), (Reg+2)), \ - (SK_U16 *)&((SK_U16 *)(pVal))[XM_WORD_HI]); \ + (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_HI]); \ } #define XM_OUT32(IoC, Mac, Reg, Val) { \ @@ -2118,9 +2109,9 @@ typedef struct s_HwRxd { #define GM_IN32(IoC, Mac, Reg, pVal) { \ SK_IN16((IoC), GMA((Mac), (Reg)), \ - (SK_U16 *)&((SK_U16 *)(pVal))[XM_WORD_LO]); \ + (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_LO]); \ SK_IN16((IoC), GMA((Mac), (Reg+4)), \ - (SK_U16 *)&((SK_U16 *)(pVal))[XM_WORD_HI]); \ + (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_HI]); \ } #define GM_OUT32(IoC, Mac, Reg, Val) { \ @@ -2215,7 +2206,7 @@ typedef struct s_HwRxd { #define PHY_ADDR_BCOM (1<<8) #define PHY_ADDR_LONE (3<<8) #define PHY_ADDR_NAT (0<<8) - + /* GPHY address (bits 15..11 of SMI control reg) */ #define PHY_ADDR_MARV 0 @@ -2225,7 +2216,7 @@ typedef struct s_HwRxd { * PHY_READ() read a 16 bit value from the PHY * PHY_WRITE() write a 16 bit value to the PHY * - * para: + * para: * IoC I/O context needed for SK I/O macros * pPort Pointer to port struct for PhyAddr * Mac XMAC to access values: MAC_1 or MAC_2 @@ -2316,12 +2307,12 @@ typedef struct s_HwRxd { * #define SK_IN8(pAC, Addr, pVal) ...\ * *pVal = (SK_U8)inp(SK_HW_ADDR(pAC->Hw.Iop, Addr))) */ -#ifdef SK_MEM_MAPPED_IO +#ifdef SK_MEM_MAPPED_IO #define SK_HW_ADDR(Base, Addr) ((Base) + (Addr)) -#else /* SK_MEM_MAPPED_IO */ +#else /* SK_MEM_MAPPED_IO */ #define SK_HW_ADDR(Base, Addr) \ ((Base) + (((Addr) & 0x7f) | (((Addr) >> 7 > 0) ? 0x80 : 0))) -#endif /* SK_MEM_MAPPED_IO */ +#endif /* SK_MEM_MAPPED_IO */ #define SZ_LONG (sizeof(SK_U32)) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skgeinit.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skgeinit.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skgeinit.h 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skgeinit.h 2003-08-29 10:55:43.000000000 +0200 @@ -2,8 +2,8 @@ * * Name: skgeinit.h * Project: Gigabit Ethernet Adapters, Common Modules - * Version: $Revision: 1.80 $ - * Date: $Date: 2003/05/28 15:25:30 $ + * Version: $Revision: 1.81 $ + * Date: $Date: 2003/07/04 12:30:38 $ * Purpose: Structures and prototypes for the GE Init Module * ******************************************************************************/ @@ -27,94 +27,98 @@ * History: * * $Log: skgeinit.h,v $ + * Revision 1.81 2003/07/04 12:30:38 rschmidt + * Added SK_FAR to pointers in MAC statistic functions (for PXE) + * Editorial changes + * * Revision 1.80 2003/05/28 15:25:30 rschmidt * Added SK_FAR to pointers in MAC/PHY read functions (for PXE) * Minor changes to avoid LINT warnings * Editorial changes - * + * * Revision 1.79 2003/05/06 12:02:33 rschmidt * Added entry GIYukon in s_GeInit structure * Editorial changes - * + * * Revision 1.78 2003/04/28 08:59:57 rschmidt * Added entries GIValIrqMask and GITimeStampCnt in s_GeInit structure - * + * * Revision 1.77 2003/04/08 16:27:02 rschmidt * Added entry GILedBlinkCtrl in s_GeInit structure * Added defines for LED Blink Control - * + * * Revision 1.76 2003/03/31 07:21:01 mkarl * Added PGmANegAdv to SK_GEPORT. * Corrected Copyright. - * + * * Revision 1.75 2003/02/05 13:36:39 rschmidt * Added define SK_FACT_78 for YUKON's Host Clock of 78.12 MHz * Editorial changes - * + * * Revision 1.74 2003/01/28 09:39:16 rschmidt * Added entry GIYukonLite in s_GeInit structure * Editorial changes - * + * * Revision 1.73 2002/11/15 12:47:25 rschmidt * Replaced error message SKERR_HWI_E024 for Cable Diagnostic with * Rx queue error in SkGeStopPort(). - * + * * Revision 1.72 2002/11/12 17:08:35 rschmidt * Added entries for Cable Diagnostic to Port structure * Added entries GIPciSlot64 and GIPciClock66 in s_GeInit structure * Added error message for Cable Diagnostic * Added prototypes for SkGmCableDiagStatus() * Editorial changes - * + * * Revision 1.71 2002/10/21 11:26:10 mkarl * Changed interface of SkGeInitAssignRamToQueues(). - * + * * Revision 1.70 2002/10/14 08:21:32 rschmidt * Changed type of GICopperType, GIVauxAvail to SK_BOOL * Added entry PRxOverCnt to Port structure * Added entry GIYukon32Bit in s_GeInit structure * Editorial changes - * + * * Revision 1.69 2002/10/09 16:57:15 mkarl * Added some constants and macros for SkGeInitAssignRamToQueues(). - * + * * Revision 1.68 2002/09/12 08:58:51 rwahl * Retrieve counters needed for XMAC errata workarounds directly because * PNMI returns corrected counter values (e.g. #10620). - * + * * Revision 1.67 2002/08/16 14:40:30 rschmidt * Added entries GIGenesis and GICopperType in s_GeInit structure * Added prototypes for SkMacHashing() * Editorial changes - * + * * Revision 1.66 2002/08/12 13:27:21 rschmidt * Added defines for Link speed capabilities * Added entry PLinkSpeedCap to Port structure * Added entry GIVauxAvail in s_GeInit structure * Added prototypes for SkMacPromiscMode() * Editorial changes - * + * * Revision 1.65 2002/08/08 15:46:18 rschmidt * Added define SK_PHY_ACC_TO for PHY access timeout * Added define SK_XM_RX_HI_WM for XMAC Rx High Watermark * Added define SK_MIN_TXQ_SIZE for Min RAM Buffer Tx Queue Size * Added entry PhyId1 to Port structure - * + * * Revision 1.64 2002/07/23 16:02:56 rschmidt * Added entry GIWolOffs in s_GeInit struct (HW-Bug in YUKON 1st rev.) * Added prototypes for: SkGePhyRead(), SkGePhyWrite() - * + * * Revision 1.63 2002/07/18 08:17:38 rwahl * Corrected definitions for SK_LSPEED_xxx & SK_LSPEED_STAT_xxx. - * + * * Revision 1.62 2002/07/17 18:21:55 rwahl * Added SK_LSPEED_INDETERMINATED define. - * + * * Revision 1.61 2002/07/17 17:16:03 rwahl * - MacType now member of GIni struct. * - Struct alignment to 32bit. * - Editorial change. - * + * * Revision 1.60 2002/07/15 18:23:39 rwahl * Added GeMacFunc to GE Init structure. * Added prototypes for SkXmUpdateStats(), SkGmUpdateStats(), @@ -122,19 +126,19 @@ * SkGmResetCounter(), SkXmOverflowStatus(), SkGmOverflowStatus(). * Added defines for current link speed state. * Added ERRMSG defintions for MacUpdateStat() & MacStatistics(). - * + * * Revision 1.59 2002/07/15 15:40:22 rschmidt * Added entry PLinkSpeedUsed to Port structure * Editorial changes - * + * * Revision 1.58 2002/06/10 09:36:30 rschmidt * Editorial changes. - * + * * Revision 1.57 2002/06/05 08:18:00 rschmidt * Corrected alignment in Port Structure * Added new prototypes for GMAC * Editorial changes - * + * * Revision 1.56 2002/04/25 11:38:12 rschmidt * Added defines for Link speed values * Added defines for Loopback parameters for MAC and PHY @@ -149,150 +153,150 @@ * SkXmPhyRead(), SkXmPhyRead(), SkGmPhyWrite(), SkGmPhyWrite(); * Removed prototypes for static functions in SkXmac2.c * Editorial changes - * + * * Revision 1.55 2002/02/26 15:24:53 rwahl * Fix: no link with manual configuration (#10673). The previous fix for * #10639 was removed. So for RLMT mode = CLS the RLMT may switch to * misconfigured port. It should not occur for the other RLMT modes. - * + * * Revision 1.54 2002/01/18 16:52:52 rwahl * Editorial corrections. - * + * * Revision 1.53 2001/11/20 09:19:58 rwahl * Reworked bugfix #10639 (no dependency to RLMT mode). - * + * * Revision 1.52 2001/10/26 07:52:23 afischer * Port switching bug in `check local link` mode - * + * * Revision 1.51 2001/02/09 12:26:38 cgoos * Inserted #ifdef DIAG for half duplex workaround timer. - * + * * Revision 1.50 2001/02/07 07:56:40 rassmann * Corrected copyright. - * + * * Revision 1.49 2001/01/31 15:32:18 gklug * fix: problem with autosensing an SR8800 switch * add: counter for autoneg timeouts - * + * * Revision 1.48 2000/11/09 11:30:10 rassmann * WA: Waiting after releasing reset until BCom chip is accessible. - * + * * Revision 1.47 2000/10/18 12:22:40 cgoos * Added workaround for half duplex hangup. - * + * * Revision 1.46 2000/08/10 11:28:00 rassmann * Editorial changes. * Preserving 32-bit alignment in structs for the adapter context. - * + * * Revision 1.45 1999/11/22 13:56:19 cgoos * Changed license header to GPL. - * + * * Revision 1.44 1999/10/26 07:34:15 malthoff * The define SK_LNK_ON has been lost in v1.41. - * + * * Revision 1.43 1999/10/06 09:30:16 cgoos * Changed SK_XM_THR_JUMBO. - * + * * Revision 1.42 1999/09/16 12:58:26 cgoos * Changed SK_LED_STANDY macro to be independent of HW link sync. - * + * * Revision 1.41 1999/07/30 06:56:14 malthoff * Correct comment for SK_MS_STAT_UNSET. - * + * * Revision 1.40 1999/05/27 13:38:46 cgoos * Added SK_BMU_TX_WM. * Made SK_BMU_TX_WM and SK_BMU_RX_WM user-definable. * Changed XMAC Tx treshold to max. values. - * + * * Revision 1.39 1999/05/20 14:35:26 malthoff * Remove prototypes for SkGeLinkLED(). - * + * * Revision 1.38 1999/05/19 11:59:12 cgoos * Added SK_MS_CAP_INDETERMINATED define. - * + * * Revision 1.37 1999/05/19 07:32:33 cgoos * Changes for 1000Base-T. * LED-defines for HWAC_LINK_LED macro. - * + * * Revision 1.36 1999/04/08 14:00:24 gklug * add:Port struct field PLinkResCt - * + * * Revision 1.35 1999/03/25 07:43:07 malthoff * Add error string for SKERR_HWI_E018MSG. - * + * * Revision 1.34 1999/03/12 16:25:57 malthoff * Remove PPollRxD and PPollTxD. * Add SKERR_HWI_E017MSG. and SK_DPOLL_MAX. - * + * * Revision 1.33 1999/03/12 13:34:41 malthoff * Add Autonegotiation error codes. * Change defines for parameter Mode in SkXmSetRxCmd(). * Replace __STDC__ by SK_KR_PROTO. - * + * * Revision 1.32 1999/01/25 14:40:20 mhaveman * Added new return states for the virtual management port if multiple * ports are active but differently configured. - * + * * Revision 1.31 1998/12/11 15:17:02 gklug * add: Link partnet autoneg states : Unknown Manual and Auto-negotiation - * + * * Revision 1.30 1998/12/07 12:17:04 gklug * add: Link Partner auto-negotiation flag - * + * * Revision 1.29 1998/12/01 10:54:42 gklug * add: variables for XMAC Errata - * + * * Revision 1.28 1998/12/01 10:14:15 gklug * add: PIsave saves the Interrupt status word - * + * * Revision 1.27 1998/11/26 15:24:52 mhaveman * Added link status states SK_LMODE_STAT_AUTOHALF and * SK_LMODE_STAT_AUTOFULL which are used by PNMI. - * + * * Revision 1.26 1998/11/26 14:53:01 gklug * add:autoNeg Timeout variable - * + * * Revision 1.25 1998/11/26 08:58:50 gklug * add: Link Mode configuration (AUTO Sense mode) - * + * * Revision 1.24 1998/11/24 13:30:27 gklug * add: PCheckPar to port struct - * + * * Revision 1.23 1998/11/18 13:23:26 malthoff * Add SK_PKT_TO_MAX. - * + * * Revision 1.22 1998/11/18 13:19:54 gklug * add: PPrevShorts and PLinkBroken to port struct for WA XMAC Errata #C1 * * Revision 1.21 1998/10/26 08:02:57 malthoff * Add GIRamOffs. - * + * * Revision 1.20 1998/10/19 07:28:37 malthoff * Add prototype for SkGeInitRamIface(). - * + * * Revision 1.19 1998/10/14 14:47:48 malthoff * SK_TIMER should not be defined for Diagnostics. * Add SKERR_HWI_E015MSG and SKERR_HWI_E016MSG. - * + * * Revision 1.18 1998/10/14 14:00:03 gklug * add: timer to port struct for workaround of Errata #2 - * + * * Revision 1.17 1998/10/14 11:23:09 malthoff * Add prototype for SkXmAutoNegDone(). * Fix SkXmSetRxCmd() prototype statement. * * Revision 1.16 1998/10/14 05:42:29 gklug * add: HWLinkUp flag to Port struct - * + * * Revision 1.15 1998/10/09 08:26:33 malthoff * Rename SK_RB_ULPP_B to SK_RB_LLPP_B. - * + * * Revision 1.14 1998/10/09 07:11:13 malthoff * bug fix: SK_FACT_53 is 85 not 117. * Rework time out init values. * Add GIPortUsage and corresponding defines. * Add some error log messages. - * + * * Revision 1.13 1998/10/06 14:13:14 malthoff * Add prototype for SkGeLoadLnkSyncCnt(). * @@ -358,10 +362,10 @@ extern "C" { /* defines ********************************************************************/ #define SK_TEST_VAL 0x11335577UL - + /* modifying Link LED behaviour (used with SkGeLinkLED()) */ #define SK_LNK_OFF LED_OFF -#define SK_LNK_ON (LED_ON | LED_BLK_OFF | LED_SYNC_OFF) +#define SK_LNK_ON (LED_ON | LED_BLK_OFF | LED_SYNC_OFF) #define SK_LNK_BLINK (LED_ON | LED_BLK_ON | LED_SYNC_ON) #define SK_LNK_PERM (LED_ON | LED_BLK_OFF | LED_SYNC_ON) #define SK_LNK_TST (LED_ON | LED_BLK_ON | LED_SYNC_OFF) @@ -572,7 +576,7 @@ extern "C" { #define SK_LENERR_OK_ON (1<<4) /* Don't chk fr for in range len error */ #define SK_LENERR_OK_OFF (1<<5) /* Check frames for in range len error */ #define SK_BIG_PK_OK_ON (1<<6) /* Don't set Rx Error bit for big frames */ -#define SK_BIG_PK_OK_OFF (1<<7) /* Set Rx Error bit for big frames */ +#define SK_BIG_PK_OK_OFF (1<<7) /* Set Rx Error bit for big frames */ #define SK_SELF_RX_ON (1<<8) /* Enable Rx of own packets */ #define SK_SELF_RX_OFF (1<<9) /* Disable Rx of own packets */ @@ -617,10 +621,10 @@ extern "C" { typedef struct s_GeMacFunc { int (*pFnMacUpdateStats)(SK_AC *pAC, SK_IOC IoC, unsigned int Port); int (*pFnMacStatistic)(SK_AC *pAC, SK_IOC IoC, unsigned int Port, - SK_U16 StatAddr, SK_U32 *pVal); + SK_U16 StatAddr, SK_U32 SK_FAR *pVal); int (*pFnMacResetCounter)(SK_AC *pAC, SK_IOC IoC, unsigned int Port); int (*pFnMacOverflow)(SK_AC *pAC, SK_IOC IoC, unsigned int Port, - SK_U16 IStatus, SK_U64 *pVal); + SK_U16 IStatus, SK_U64 SK_FAR *pVal); } SK_GEMACFUNC; /* @@ -631,7 +635,7 @@ typedef struct s_GePort { SK_TIMER PWaTimer; /* Workaround Timer */ SK_TIMER HalfDupChkTimer; #endif /* SK_DIAG */ - SK_U32 PPrevShorts; /* Previous short Counter checking */ + SK_U32 PPrevShorts; /* Previous Short Counter checking */ SK_U32 PPrevFcs; /* Previous FCS Error Counter checking */ SK_U64 PPrevRx; /* Previous RxOk Counter checking */ SK_U64 PRxLim; /* Previous RxOk Counter checking */ @@ -676,7 +680,7 @@ typedef struct s_GePort { SK_U8 PMSCap; /* Master/Slave Capabilities */ SK_U8 PMSMode; /* Master/Slave Mode */ SK_U8 PMSStatus; /* Master/Slave Status */ - SK_U8 PAutoNegFail; /* Auto-negotiation fail flag */ + SK_BOOL PAutoNegFail; /* Auto-negotiation fail flag */ SK_U8 PLipaAutoNeg; /* Auto-negotiation possible with Link Partner */ SK_U8 PCableLen; /* Cable Length */ SK_U8 PMdiPairLen[4]; /* MDI[0..3] Pair Length */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skgesirq.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skgesirq.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skgesirq.h 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skgesirq.h 2003-08-29 10:55:43.000000000 +0200 @@ -2,8 +2,8 @@ * * Name: skgesirq.h * Project: Gigabit Ethernet Adapters, Common Modules - * Version: $Revision: 1.29 $ - * Date: $Date: 2003/05/28 15:14:49 $ + * Version: $Revision: 1.30 $ + * Date: $Date: 2003/07/04 12:34:13 $ * Purpose: SK specific Gigabit Ethernet special IRQ functions * ******************************************************************************/ @@ -26,6 +26,9 @@ * * History: * $Log: skgesirq.h,v $ + * Revision 1.30 2003/07/04 12:34:13 rschmidt + * Added SKERR_SIRQ_E025 for Downshift detected (Yukon-Copper) + * * Revision 1.29 2003/05/28 15:14:49 rschmidt * Moved defines for return codes of SkGePortCheckUp() to header file. * Minor changes to avoid LINT warnings. @@ -202,6 +205,8 @@ #define SKERR_SIRQ_E023MSG "Auto-negotiation error" #define SKERR_SIRQ_E024 (SKERR_SIRQ_E023+1) #define SKERR_SIRQ_E024MSG "FIFO overflow error" +#define SKERR_SIRQ_E025 (SKERR_SIRQ_E024+1) +#define SKERR_SIRQ_E025MSG "2 Pair Downshift detected" extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus); extern int SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/sktypes.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/sktypes.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/sktypes.h 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/sktypes.h 2003-08-29 10:55:43.000000000 +0200 @@ -2,8 +2,8 @@ * * Name: sktypes.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.3 $ - * Date: $Date: 2003/02/25 14:16:40 $ + * Version: $Revision: 1.1 $ + * Date: $Date: 2003/07/21 07:26:01 $ * Purpose: Define data types for Linux * ******************************************************************************/ @@ -26,6 +26,9 @@ * History: * * $Log: sktypes.h,v $ + * Revision 1.1 2003/07/21 07:26:01 rroesler + * Fix: Re-Enter after CVS crash + * * Revision 1.3 2003/02/25 14:16:40 mlindner * Fix: Copyright statement * diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skversion.h linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skversion.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/h/skversion.h 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/h/skversion.h 2003-08-29 10:55:44.000000000 +0200 @@ -2,8 +2,8 @@ * * Name: version.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.4 $ - * Date: $Date: 2003/02/25 14:16:40 $ + * Version: $Revision: 1.2 $ + * Date: $Date: 2003/08/13 12:01:01 $ * Purpose: SK specific Error log support * ******************************************************************************/ @@ -25,6 +25,12 @@ * * History: * $Log: skversion.h,v $ + * Revision 1.2 2003/08/13 12:01:01 mlindner + * Add: Changes for Lint + * + * Revision 1.1 2003/07/24 09:29:56 rroesler + * Fix: Re-Enter after CVS crash + * * Revision 1.4 2003/02/25 14:16:40 mlindner * Fix: Copyright statement * @@ -42,13 +48,15 @@ ******************************************************************************/ +#ifdef lint static const char SysKonnectFileId[] = "@(#) (C) SysKonnect GmbH."; static const char SysKonnectBuildNumber[] = - "@(#)SK-BUILD: 6.12 PL: 01"; + "@(#)SK-BUILD: 6.17 PL: 01"; +#endif /* !defined(lint) */ -#define BOOT_STRING "sk98lin: Network Device Driver v6.12\n" \ +#define BOOT_STRING "sk98lin: Network Device Driver v6.17\n" \ "(C)Copyright 1999-2003 Marvell(R)." -#define VER_STRING "6.12" +#define VER_STRING "6.17" diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/skdim.c linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/skdim.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/skdim.c 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/skdim.c 2003-08-29 10:55:44.000000000 +0200 @@ -2,8 +2,8 @@ * * Name: skdim.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.3 $ - * Date: $Date: 2003/06/10 09:16:40 $ + * Version: $Revision: 1.2 $ + * Date: $Date: 2003/08/21 12:35:05 $ * Purpose: All functions to maintain interrupt moderation * ******************************************************************************/ @@ -26,6 +26,15 @@ * History: * * $Log: skdim.c,v $ + * Revision 1.2 2003/08/21 12:35:05 mlindner + * Fix: Corrected CPU detection and compile errors on single CPU machines + * + * Revision 1.1 2003/07/18 13:39:55 rroesler + * Fix: Re-enter after CVS crash + * + * Revision 1.4 2003/07/07 09:45:47 rroesler + * Fix: Compiler warnings corrected + * * Revision 1.3 2003/06/10 09:16:40 rroesler * Adapt GetCurrentSystemLoad() to NOT access the kernels * kstat-structure in kernel 2.5/2.6. This must be done @@ -53,7 +62,7 @@ #ifndef lint static const char SysKonnectFileId[] = - "@(#) $Id: skdim.c,v 1.3 2003/06/10 09:16:40 rroesler Exp $ (C) SysKonnect."; + "@(#) $Id: skdim.c,v 1.2 2003/08/21 12:35:05 mlindner Exp $ (C) SysKonnect."; #endif #define __SKADDR_C @@ -309,9 +318,15 @@ GetCurrentSystemLoad(SK_AC *pAC) { unsigned int TotalTime = 0; unsigned int UsedTime = 0; unsigned int SystemLoad = 0; +#ifdef CONFIG_SMP + unsigned int SKNumCpus = smp_num_cpus; +#else + unsigned int SKNumCpus = 1; +#endif + unsigned int NbrCpu = 0; - for (NbrCpu = 0; NbrCpu < smp_num_cpus; NbrCpu++) { + for (NbrCpu = 0; NbrCpu < SKNumCpus; NbrCpu++) { UserTime = UserTime + kstat.per_cpu_user[NbrCpu]; NiceTime = NiceTime + kstat.per_cpu_nice[NbrCpu]; SystemTime = SystemTime + kstat.per_cpu_system[NbrCpu]; @@ -319,7 +334,7 @@ GetCurrentSystemLoad(SK_AC *pAC) { UsedTime = UserTime + NiceTime + SystemTime; - IdleTime = jif * smp_num_cpus - UsedTime; + IdleTime = jif * SKNumCpus - UsedTime; TotalTime = UsedTime + IdleTime; SystemLoad = ( 100 * (UsedTime - M_DIMINFO.PrevUsedTime) ) / diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/skge.c linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/skge.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/skge.c 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/skge.c 2003-08-29 10:55:44.000000000 +0200 @@ -1,10 +1,9 @@ - /****************************************************************************** * * Name: skge.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.58 $ - * Date: $Date: 2003/06/17 07:14:29 $ + * Version: $Revision: 1.11 $ + * Date: $Date: 2003/08/26 16:05:19 $ * Purpose: The main driver source module * ******************************************************************************/ @@ -57,6 +56,62 @@ * History: * * $Log: skge.c,v $ + * Revision 1.11 2003/08/26 16:05:19 mlindner + * Fix: Compiler warnings (void *) + * + * Revision 1.10 2003/08/25 09:24:08 mlindner + * Add: Dynamic Interrupt Moderation (DIM) port up message + * + * Revision 1.9 2003/08/21 14:09:43 mlindner + * Fix: Disable Half Duplex with Gigabit-Speed (Yukon). Enable Full Duplex. + * + * Revision 1.8 2003/08/19 15:09:18 mlindner + * Fix: Ignore ConType parameter if empty value + * + * Revision 1.7 2003/08/13 12:00:35 mlindner + * Fix: Removed useless defines + * + * Revision 1.6 2003/08/12 16:49:41 mlindner + * Fix: UDP and TCP HW-CSum calculation (Kernel 2.5/2.6) + * Fix: UDP and TCP Proto checks + * Fix: Build without ProcFS + * Fix: Kernel 2.6 editorial changes + * + * Revision 1.5 2003/08/07 12:25:07 mlindner + * Fix: ConType parameter check and error detection + * Fix: Insert various fixes applied to the kernel tree + * + * Revision 1.4 2003/08/07 10:50:21 mlindner + * Add: Speed and HW-Csum support for Yukon Lite chipset + * + * Revision 1.3 2003/08/06 11:24:08 mlindner + * Add: Kernel updates + * + * Revision 1.2 2003/07/21 08:28:47 rroesler + * Fix: Handle padded bytes using skb_put() + * + * Revision 1.63 2003/07/15 09:26:23 rroesler + * Fix: Removed memory leak when sending short padded frames + * + * Revision 1.62 2003/07/09 11:11:16 rroesler + * Fix: Call of ReceiveIrq() performed with parameter SK_FALSE in + * order not to hang the system with multiple spinlocks + * + * Revision 1.61 2003/07/08 07:32:41 rroesler + * Fix: Correct Kernel-version + * + * Revision 1.60 2003/07/07 15:42:30 rroesler + * Fix: Removed function pci_present() for 2.5/2.6 kernels (deprecated) + * Fix: Corrected warning in GetConfiguration() + * + * Revision 1.59 2003/07/07 09:44:32 rroesler + * Add: HW checksumming on kernel 2.5/2.6 + * Add: padding of short frames (<60 bytes) with 0x00 instead of 0xaa + * Add: ConType parameter combining multiple other parameters into one + * Fix: Corrected bugreport #10721 (warning when changing MTU size) + * Fix: Removed obsolete function SetQueueSize() + * Fix: Function ChangeMtuSize() returns new MTU size in kernel 2.5/2.6 + * * Revision 1.58 2003/06/17 07:14:29 mlindner * Add: Disable checksum functionality * Fix: Unload module (Kernel 2.5) @@ -386,7 +441,10 @@ #include #include + +#ifdef CONFIG_PROC_FS #include +#endif #include "h/skdrv1st.h" #include "h/skdrv2nd.h" @@ -438,6 +496,7 @@ // #define ROLE_A {"Auto", } // #define ROLE_B {"Auto", } // #define PREF_PORT {"A", } +// #define CON_TYPE {"Auto", } // #define RLMT_MODE {"CheckLinkState", } #define DEV_KFREE_SKB(skb) dev_kfree_skb(skb) @@ -496,30 +555,23 @@ static void StartDrvCleanupTimer(SK_AC * static void StopDrvCleanupTimer(SK_AC *pAC); static int XmitFrameSG(SK_AC*, TX_PORT*, struct sk_buff*); - -/******************************************************************************* - * - * Obsolete Function Prototypes - * - ******************************************************************************/ -#if 0 -static void SetQueueSizes(SK_AC *pAC); -#endif - /******************************************************************************* * * Extern Function Prototypes * ******************************************************************************/ +#ifdef CONFIG_PROC_FS static const char SK_Root_Dir_entry[] = "sk98lin"; static struct proc_dir_entry *pSkRootDir; -extern int sk_proc_read(char *buffer, - char **buffer_location, - off_t offset, - int buffer_length, - int *eof, - void *data); + +extern int sk_proc_read( char *buffer, + char **buffer_location, + off_t offset, + int buffer_length, + int *eof, + void *data); +#endif extern void SkDimEnableModerationIfNeeded(SK_AC *pAC); extern void SkDimDisplayModerationSettings(SK_AC *pAC); @@ -531,7 +583,6 @@ static void DumpMsg(struct sk_buff*, cha static void DumpData(char*, int); static void DumpLong(char*, int); #endif -void dump_frag( SK_U8 *data, int length); /* global variables *********************************************************/ static const char *BootString = BOOT_STRING; @@ -543,7 +594,10 @@ static uintptr_t TxQueueAddr[SK_MAX_MACS static uintptr_t RxQueueAddr[SK_MAX_MACS] = {0x400, 0x480}; +#ifdef CONFIG_PROC_FS static struct proc_dir_entry *pSkRootDir; +#endif + /***************************************************************************** @@ -561,27 +615,29 @@ static struct proc_dir_entry *pSkRootDir */ static int __init skge_probe (void) { - int proc_root_initialized = 0; int boards_found = 0; int vendor_flag = SK_FALSE; SK_AC *pAC; DEV_NET *pNet = NULL; - struct proc_dir_entry *pProcFile; struct pci_dev *pdev = NULL; unsigned long base_address; struct SK_NET_DEVICE *dev = NULL; SK_BOOL DeviceFound = SK_FALSE; SK_BOOL BootStringCount = SK_FALSE; +#ifdef CONFIG_PROC_FS + int proc_root_initialized = 0; + struct proc_dir_entry *pProcFile; +#endif if (probed) return -ENODEV; probed++; - if (!pci_present()) /* is PCI support present? */ + if (!pci_present()) { /* is PCI support present? */ return -ENODEV; + } - while((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) - { + while((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) { dev = NULL; pNet = NULL; @@ -591,12 +647,6 @@ static int __init skge_probe (void) if (!vendor_flag) continue; -/* if ((pdev->vendor != PCI_VENDOR_ID_SYSKONNECT) && - ((pdev->device != PCI_DEVICE_ID_SYSKONNECT_GE) || - (pdev->device != PCI_DEVICE_ID_SYSKONNECT_YU))){ - continue; - } -*/ /* Configure DMA attributes. */ if (pci_set_dma_mask(pdev, (u64) 0xffffffffffffffffULL) && pci_set_dma_mask(pdev, (u64) 0xffffffff)) @@ -658,7 +708,7 @@ static int __init skge_probe (void) #ifdef SK_ZEROCOPY #ifdef USE_SK_TX_CHECKSUM - if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { + if (pAC->ChipsetType) { /* Use only if yukon hardware */ /* SK and ZEROCOPY - fly baby... */ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; @@ -712,6 +762,7 @@ static int __init skge_probe (void) (caddr_t) &pAC->Addr.Net[0].CurrentMacAddress, 6); /* First adapter... Create proc and print message */ +#ifdef CONFIG_PROC_FS if (!DeviceFound) { DeviceFound = SK_TRUE; SK_MEMCPY(&SK_Root_Dir_entry, BootString, @@ -724,11 +775,8 @@ static int __init skge_probe (void) pSkRootDir->owner = THIS_MODULE; proc_root_initialized = 1; } - } - - /* Create proc file */ pProcFile = create_proc_entry(dev->name, S_IFREG | S_IXUSR | S_IWGRP | S_IROTH, @@ -741,13 +789,15 @@ static int __init skge_probe (void) pProcFile->size = sizeof(dev->name + 1); pProcFile->data = (void *)pProcFile; pProcFile->owner = THIS_MODULE; +#endif pNet->PortNr = 0; pNet->NetNr = 0; + #ifdef SK_ZEROCOPY #ifdef USE_SK_TX_CHECKSUM - if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { + if (pAC->ChipsetType) { /* SG and ZEROCOPY - fly baby... */ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; } @@ -784,13 +834,14 @@ static int __init skge_probe (void) #ifdef SK_ZEROCOPY #ifdef USE_SK_TX_CHECKSUM - if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { + if (pAC->ChipsetType) { /* SG and ZEROCOPY - fly baby... */ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; } #endif #endif +#ifdef CONFIG_PROC_FS pProcFile = create_proc_entry(dev->name, S_IFREG | S_IXUSR | S_IWGRP | S_IROTH, pSkRootDir); @@ -802,6 +853,7 @@ static int __init skge_probe (void) pProcFile->size = sizeof(dev->name + 1); pProcFile->data = (void *)pProcFile; pProcFile->owner = THIS_MODULE; +#endif memcpy((caddr_t) &dev->dev_addr, (caddr_t) &pAC->Addr.Net[1].CurrentMacAddress, 6); @@ -882,6 +934,7 @@ MODULE_PARM(FlowCtrl_A, "1-" __MODULE_ST MODULE_PARM(FlowCtrl_B, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(Role_A, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(Role_B, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); +MODULE_PARM(ConType, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(PrefPort, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(RlmtMode, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); /* not used, just there because every driver should have them: */ @@ -955,6 +1008,12 @@ static char *Role_B[SK_MAX_CARD_PARAM] = static char *Role_B[SK_MAX_CARD_PARAM] = {"", }; #endif +#ifdef CON_TYPE +static char *ConType[SK_MAX_CARD_PARAM] = CON_TYPE; +#else +static char *ConType[SK_MAX_CARD_PARAM] = {"", }; +#endif + #ifdef PREF_PORT static char *PrefPort[SK_MAX_CARD_PARAM] = PREF_PORT; #else @@ -1073,8 +1132,10 @@ SK_EVPARA EvPara; SkGeRootDev = next; } +#ifdef CONFIG_PROC_FS /* clear proc-dir */ remove_proc_entry(pSkRootDir->name, proc_net); +#endif } /* skge_cleanup_module */ @@ -1158,6 +1219,13 @@ SK_BOOL DualNet; SkRlmtInit( pAC, pAC->IoBase, SK_INIT_IO); SkTimerInit(pAC, pAC->IoBase, SK_INIT_IO); + /* Set chipset type support */ + pAC->ChipsetType = 0; + if ((pAC->GIni.GIChipId == CHIP_ID_YUKON) || + (pAC->GIni.GIChipId == CHIP_ID_YUKON_LITE)) { + pAC->ChipsetType = 1; + } + GetConfiguration(pAC); if (pAC->RlmtNets == 2) { pAC->GIni.GIPortUsage = SK_MUL_LINK; @@ -1196,9 +1264,6 @@ SK_BOOL DualNet; pAC->CsOfs = (pAC->CsOfs2 << 16) | pAC->CsOfs1; BoardInitMem(pAC); -#if 0 - SetQueueSizes(pAC); -#else /* tschilling: New common function with minimum size check. */ DualNet = SK_FALSE; if (pAC->RlmtNets == 2) { @@ -1213,7 +1278,6 @@ SK_BOOL DualNet; printk("SkGeInitAssignRamToQueues failed.\n"); return(-EAGAIN); } -#endif /* Print adapter specific string from vpd */ ProductStr(pAC); @@ -1475,24 +1539,22 @@ int PortIndex) /* index of the port for ("PortReInitBmu ")); /* set address of first descriptor of ring in BMU */ - SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ - TX_Q_CUR_DESCR_LOW, + SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ Q_DA_L, (uint32_t)(((caddr_t) (pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxdRingHead) - pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxDescrRing + pAC->TxPort[PortIndex][TX_PRIO_LOW].VTxDescrRing) & 0xFFFFFFFF)); - SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ - TX_Q_DESCR_HIGH, + SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ Q_DA_H, (uint32_t)(((caddr_t) (pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxdRingHead) - pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxDescrRing + pAC->TxPort[PortIndex][TX_PRIO_LOW].VTxDescrRing) >> 32)); - SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+RX_Q_CUR_DESCR_LOW, + SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+Q_DA_L, (uint32_t)(((caddr_t)(pAC->RxPort[PortIndex].pRxdRingHead) - pAC->RxPort[PortIndex].pRxDescrRing + pAC->RxPort[PortIndex].VRxDescrRing) & 0xFFFFFFFF)); - SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+RX_Q_DESCR_HIGH, + SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+Q_DA_H, (uint32_t)(((caddr_t)(pAC->RxPort[PortIndex].pRxdRingHead) - pAC->RxPort[PortIndex].pRxDescrRing + pAC->RxPort[PortIndex].VRxDescrRing) >> 32)); @@ -1531,20 +1593,20 @@ SK_U32 IntSrc; /* interrupts source re while (((IntSrc & IRQ_MASK) & ~SPECIAL_IRQS) != 0) { #if 0 /* software irq currently not used */ - if (IntSrc & IRQ_SW) { + if (IntSrc & IS_IRQ_SW) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("Software IRQ\n")); } #endif - if (IntSrc & IRQ_EOF_RX1) { + if (IntSrc & IS_R1_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF RX1 IRQ\n")); ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE); SK_PNMI_CNT_RX_INTR(pAC, 0); } - if (IntSrc & IRQ_EOF_RX2) { + if (IntSrc & IS_R2_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF RX2 IRQ\n")); @@ -1552,7 +1614,7 @@ SK_U32 IntSrc; /* interrupts source re SK_PNMI_CNT_RX_INTR(pAC, 1); } #ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ - if (IntSrc & IRQ_EOF_AS_TX1) { + if (IntSrc & IS_XA1_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF AS TX1 IRQ\n")); @@ -1561,7 +1623,7 @@ SK_U32 IntSrc; /* interrupts source re FreeTxDescriptors(pAC, &pAC->TxPort[0][TX_PRIO_LOW]); spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); } - if (IntSrc & IRQ_EOF_AS_TX2) { + if (IntSrc & IS_XA2_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF AS TX2 IRQ\n")); @@ -1571,7 +1633,7 @@ SK_U32 IntSrc; /* interrupts source re spin_unlock(&pAC->TxPort[1][TX_PRIO_LOW].TxDesRingLock); } #if 0 /* only if sync. queues used */ - if (IntSrc & IRQ_EOF_SY_TX1) { + if (IntSrc & IS_XS1_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF SY TX1 IRQ\n")); @@ -1581,7 +1643,7 @@ SK_U32 IntSrc; /* interrupts source re spin_unlock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); ClearTxIrq(pAC, 0, TX_PRIO_HIGH); } - if (IntSrc & IRQ_EOF_SY_TX2) { + if (IntSrc & IS_XS2_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF SY TX2 IRQ\n")); @@ -1595,14 +1657,14 @@ SK_U32 IntSrc; /* interrupts source re #endif /* do all IO at once */ - if (IntSrc & IRQ_EOF_RX1) + if (IntSrc & IS_R1_F) ClearAndStartRx(pAC, 0); - if (IntSrc & IRQ_EOF_RX2) + if (IntSrc & IS_R2_F) ClearAndStartRx(pAC, 1); #ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ - if (IntSrc & IRQ_EOF_AS_TX1) + if (IntSrc & IS_XA1_F) ClearTxIrq(pAC, 0, TX_PRIO_LOW); - if (IntSrc & IRQ_EOF_AS_TX2) + if (IntSrc & IS_XA2_F) ClearTxIrq(pAC, 1, TX_PRIO_LOW); #endif SK_IN32(pAC->IoBase, B0_ISRC, &IntSrc); @@ -1680,13 +1742,13 @@ SK_U32 IntSrc; /* interrupts source re while (((IntSrc & IRQ_MASK) & ~SPECIAL_IRQS) != 0) { #if 0 /* software irq currently not used */ - if (IntSrc & IRQ_SW) { + if (IntSrc & IS_IRQ_SW) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("Software IRQ\n")); } #endif - if (IntSrc & IRQ_EOF_RX1) { + if (IntSrc & IS_R1_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF RX1 IRQ\n")); @@ -1694,7 +1756,7 @@ SK_U32 IntSrc; /* interrupts source re SK_PNMI_CNT_RX_INTR(pAC, 0); } #ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ - if (IntSrc & IRQ_EOF_AS_TX1) { + if (IntSrc & IS_XA1_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF AS TX1 IRQ\n")); @@ -1704,7 +1766,7 @@ SK_U32 IntSrc; /* interrupts source re spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); } #if 0 /* only if sync. queues used */ - if (IntSrc & IRQ_EOF_SY_TX1) { + if (IntSrc & IS_XS1_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF SY TX1 IRQ\n")); @@ -1718,10 +1780,10 @@ SK_U32 IntSrc; /* interrupts source re #endif /* do all IO at once */ - if (IntSrc & IRQ_EOF_RX1) + if (IntSrc & IS_R1_F) ClearAndStartRx(pAC, 0); #ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ - if (IntSrc & IRQ_EOF_AS_TX1) + if (IntSrc & IS_XA1_F) ClearTxIrq(pAC, 0, TX_PRIO_LOW); #endif SK_IN32(pAC->IoBase, B0_ISRC, &IntSrc); @@ -2062,24 +2124,29 @@ int Rc; /* return code of XmitFrame */ * < 0 - on failure: other problems ( -> return failure to upper layers) */ static int XmitFrame( -SK_AC *pAC, /* pointer to adapter context */ +SK_AC *pAC, /* pointer to adapter context */ TX_PORT *pTxPort, /* pointer to struct of port to send to */ -struct sk_buff *pMessage) /* pointer to send-message */ +struct sk_buff *pMessage) /* pointer to send-message */ { -TXD *pTxd, *pOldTxd; /* the rxd to fill */ -unsigned long Flags; -SK_U64 PhysAddr; -int BytesSend; + TXD *pTxd; /* the rxd to fill */ + TXD *pOldTxd; + unsigned long Flags; + SK_U64 PhysAddr; + int Protocol; + int IpHeaderLength; + int BytesSend = pMessage->len; - SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, - ("X")); + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("X")); spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags); #ifndef USE_TX_COMPLETE FreeTxDescriptors(pAC, pTxPort); #endif if (pTxPort->TxdRingFree == 0) { - /* no enough free descriptors in ring at the moment */ + /* + ** no enough free descriptors in ring at the moment. + ** Maybe free'ing some old one help? + */ FreeTxDescriptors(pAC, pTxPort); if (pTxPort->TxdRingFree == 0) { spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); @@ -2087,58 +2154,104 @@ int BytesSend; SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("XmitFrame failed\n")); - /* this message can not be sent now */ - /* Because tbusy seems to be set, the message should not be freed here */ - /* It will be used by the scheduler of the ethernet handler */ + /* + ** the desired message can not be sent + ** Because tbusy seems to be set, the message + ** should not be freed here. It will be used + ** by the scheduler of the ethernet handler + */ return (-1); } } - /* advance head counter behind descriptor needed for this frame */ + + /* + ** If the passed socket buffer is of smaller MTU-size than 60, + ** copy everything into new buffer and fill all bytes between + ** the original packet end and the new packet end of 60 with 0x00. + ** This is to resolve faulty padding by the HW with 0xaa bytes. + */ + if (BytesSend < C_LEN_ETHERNET_MINSIZE) { + skb_put(pMessage, (C_LEN_ETHERNET_MINSIZE-BytesSend)); + memset( ((int *)(pMessage->data))+BytesSend, + 0, C_LEN_ETHERNET_MINSIZE-BytesSend); + } + + /* + ** advance head counter behind descriptor needed for this frame, + ** so that needed descriptor is reserved from that on. The next + ** action will be to add the passed buffer to the TX-descriptor + */ pTxd = pTxPort->pTxdRingHead; pTxPort->pTxdRingHead = pTxd->pNextTxd; pTxPort->TxdRingFree--; - /* the needed descriptor is reserved now */ - - /* - * everything allocated ok, so add buffer to descriptor - */ #ifdef SK_DUMP_TX DumpMsg(pMessage, "XmitFrame"); #endif - /* set up descriptor and CONTROL dword */ + /* + ** First step is to map the data to be sent via the adapter onto + ** the DMA memory. Kernel 2.2 uses virt_to_bus(), but kernels 2.4 + ** and 2.6 need to use pci_map_page() for that mapping. + */ PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, - virt_to_page(pMessage->data), - ((unsigned long) pMessage->data & - ~PAGE_MASK), - pMessage->len, - PCI_DMA_TODEVICE); - pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + virt_to_page(pMessage->data), + ((unsigned long) pMessage->data & ~PAGE_MASK), + pMessage->len, + PCI_DMA_TODEVICE); + pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); - pTxd->pMBuf = pMessage; - pTxd->TBControl = TX_CTRL_OWN_BMU | TX_CTRL_STF | - TX_CTRL_CHECK_DEFAULT | TX_CTRL_SOFTWARE | + pTxd->pMBuf = pMessage; + + if (pMessage->ip_summed == CHECKSUM_HW) { + Protocol = ((SK_U8)pMessage->data[C_OFFSET_IPPROTO] & 0xff); + if ((Protocol == C_PROTO_ID_UDP) && (pAC->GIni.GIChipRev != 0)) { + pTxd->TBControl = BMU_UDP_CHECK; + } else { + pTxd->TBControl = BMU_TCP_CHECK ; + } + + IpHeaderLength = (SK_U8)pMessage->data[C_OFFSET_IPHEADER]; + IpHeaderLength = (IpHeaderLength & 0xf) * 4; + pTxd->TcpSumOfs = 0; /* PH-Checksum already calculated */ + pTxd->TcpSumSt = C_LEN_ETHERMAC_HEADER + IpHeaderLength + + (Protocol == C_PROTO_ID_UDP ? + C_OFFSET_UDPHEADER_UDPCS : + C_OFFSET_TCPHEADER_TCPCS); + pTxd->TcpSumWr = C_LEN_ETHERMAC_HEADER + IpHeaderLength; + + pTxd->TBControl |= BMU_OWN | BMU_STF | + BMU_SW | BMU_EOF | #ifdef USE_TX_COMPLETE - TX_CTRL_EOF | TX_CTRL_EOF_IRQ | pMessage->len; -#else - TX_CTRL_EOF | pMessage->len; + BMU_IRQ_EOF | #endif + pMessage->len; + } else { + pTxd->TBControl = BMU_OWN | BMU_STF | BMU_CHECK | + BMU_SW | BMU_EOF | +#ifdef USE_TX_COMPLETE + BMU_IRQ_EOF | +#endif + pMessage->len; + } + /* + ** If previous descriptor already done, give TX start cmd + */ pOldTxd = xchg(&pTxPort->pTxdRingPrev, pTxd); - if ((pOldTxd->TBControl & TX_CTRL_OWN_BMU) == 0) { - /* previous descriptor already done, so give tx start cmd */ - /* StartTx(pAC, pTxPort->HwAddr); */ - SK_OUT8(pTxPort->HwAddr, TX_Q_CTRL, TX_Q_CTRL_START); + if ((pOldTxd->TBControl & BMU_OWN) == 0) { + SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START); } - BytesSend = pMessage->len; + /* + ** after releasing the lock, the skb may immediately be free'd + */ spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); - /* after releasing the lock, the skb may be immidiately freed */ - if (pTxPort->TxdRingFree != 0) + if (pTxPort->TxdRingFree != 0) { return (BytesSend); - else + } else { return (0); + } } /* XmitFrame */ @@ -2158,21 +2271,21 @@ int BytesSend; * < 0 - on failure: other problems ( -> return failure to upper layers) */ static int XmitFrameSG( -SK_AC *pAC, /* pointer to adapter context */ -TX_PORT *pTxPort, /* pointer to struct of port to send to */ -struct sk_buff *pMessage) /* pointer to send-message */ +SK_AC *pAC, /* pointer to adapter context */ +TX_PORT *pTxPort, /* pointer to struct of port to send to */ +struct sk_buff *pMessage) /* pointer to send-message */ { - int i; - int BytesSend; - int hlength; - int protocol; - skb_frag_t *sk_frag; - TXD *pTxd; - TXD *pTxdFst; - TXD *pTxdLst; - SK_U64 PhysAddr; - unsigned long Flags; + TXD *pTxd; + TXD *pTxdFst; + TXD *pTxdLst; + int CurrFrag; + int BytesSend; + int IpHeaderLength; + int Protocol; + skb_frag_t *sk_frag; + SK_U64 PhysAddr; + unsigned long Flags; spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags); #ifndef USE_TX_COMPLETE @@ -2191,114 +2304,120 @@ struct sk_buff *pMessage) /* pointer to } } - - pTxd = pTxPort->pTxdRingHead; - pTxdFst = pTxd; - pTxdLst = pTxd; + pTxd = pTxPort->pTxdRingHead; + pTxdFst = pTxd; + pTxdLst = pTxd; BytesSend = 0; - protocol = 0; + Protocol = 0; - /* map first fragment (header) */ + /* + ** Map the first fragment (header) into the DMA-space + */ PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, virt_to_page(pMessage->data), ((unsigned long) pMessage->data & ~PAGE_MASK), skb_headlen(pMessage), PCI_DMA_TODEVICE); - pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); - /* HW checksum? */ + /* + ** Does the HW need to evaluate checksum for TCP or UDP packets? + */ if (pMessage->ip_summed == CHECKSUM_HW) { - pTxd->TBControl = TX_CTRL_STF | - TX_CTRL_ST_FWD | - skb_headlen(pMessage); - - /* We have to use the opcode for tcp here because the opcode for - udp is not working in the hardware yet (revision 2.0)*/ - protocol = ((SK_U8)pMessage->data[23] & 0xf); - if ((protocol == 17) && (pAC->GIni.GIChipRev != 0)) - pTxd->TBControl |= BMU_UDP_CHECK; - else + pTxd->TBControl = BMU_STF | BMU_STFWD | skb_headlen(pMessage); + /* + ** We have to use the opcode for tcp here, because the + ** opcode for udp is not working in the hardware yet + ** (Revision 2.0) + */ + Protocol = ((SK_U8)pMessage->data[C_OFFSET_IPPROTO] & 0xff); + if ((Protocol == C_PROTO_ID_UDP) && (pAC->GIni.GIChipRev != 0)) { + pTxd->TBControl |= BMU_UDP_CHECK; + } else { pTxd->TBControl |= BMU_TCP_CHECK ; + } - hlength = ((SK_U8)pMessage->data[14] & 0xf) * 4; + IpHeaderLength = ((SK_U8)pMessage->data[C_OFFSET_IPHEADER] & 0xf)*4; pTxd->TcpSumOfs = 0; /* PH-Checksum already claculated */ - pTxd->TcpSumSt = 14+hlength+16; - pTxd->TcpSumWr = 14+hlength; - + pTxd->TcpSumSt = C_LEN_ETHERMAC_HEADER + IpHeaderLength + + (Protocol == C_PROTO_ID_UDP ? + C_OFFSET_UDPHEADER_UDPCS : + C_OFFSET_TCPHEADER_TCPCS); + pTxd->TcpSumWr = C_LEN_ETHERMAC_HEADER + IpHeaderLength; } else { - pTxd->TBControl = TX_CTRL_CHECK_DEFAULT | - TX_CTRL_SOFTWARE | - TX_CTRL_STF | - skb_headlen(pMessage); + pTxd->TBControl = BMU_CHECK | BMU_SW | BMU_STF | + skb_headlen(pMessage); } pTxd = pTxd->pNextTxd; pTxPort->TxdRingFree--; BytesSend += skb_headlen(pMessage); - - /* Map SG fragments */ - for (i = 0; i < skb_shinfo(pMessage)->nr_frags; i++) { - sk_frag = &skb_shinfo(pMessage)->frags[i]; - - /* we already have the proper value in entry */ + /* + ** Browse over all SG fragments and map each of them into the DMA space + */ + for (CurrFrag = 0; CurrFrag < skb_shinfo(pMessage)->nr_frags; CurrFrag++) { + sk_frag = &skb_shinfo(pMessage)->frags[CurrFrag]; + /* + ** we already have the proper value in entry + */ PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, sk_frag->page, sk_frag->page_offset, sk_frag->size, PCI_DMA_TODEVICE); - pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); - pTxd->pMBuf = pMessage; + pTxd->pMBuf = pMessage; - /* HW checksum */ + /* + ** Does the HW need to evaluate checksum for TCP or UDP packets? + */ if (pMessage->ip_summed == CHECKSUM_HW) { - pTxd->TBControl = TX_CTRL_OWN_BMU | - TX_CTRL_SOFTWARE | - TX_CTRL_ST_FWD; - - /* We have to use the opcode for tcp here because the opcode for - udp is not working in the hardware yet (revision 2.0)*/ - if ((protocol == 17) && (pAC->GIni.GIChipRev != 0)) + pTxd->TBControl = BMU_OWN | BMU_SW | BMU_STFWD; + /* + ** We have to use the opcode for tcp here because the + ** opcode for udp is not working in the hardware yet + ** (revision 2.0) + */ + if ( (Protocol == C_PROTO_ID_UDP) && + (pAC->GIni.GIChipRev != 0) ) { pTxd->TBControl |= BMU_UDP_CHECK ; - else + } else { pTxd->TBControl |= BMU_TCP_CHECK ; - + } } else { - pTxd->TBControl = TX_CTRL_CHECK_DEFAULT | - TX_CTRL_SOFTWARE | - TX_CTRL_OWN_BMU; + pTxd->TBControl = BMU_CHECK | BMU_SW | BMU_OWN; } - /* Last fragment */ - if( (i+1) == skb_shinfo(pMessage)->nr_frags ) { + /* + ** Do we have the last fragment? + */ + if( (CurrFrag+1) == skb_shinfo(pMessage)->nr_frags ) { #ifdef USE_TX_COMPLETE - pTxd->TBControl |= TX_CTRL_EOF | - TX_CTRL_EOF_IRQ | - sk_frag->size; + pTxd->TBControl |= BMU_EOF | BMU_IRQ_EOF | sk_frag->size; #else - pTxd->TBControl |= TX_CTRL_EOF | - sk_frag->size; + pTxd->TBControl |= BMU_EOF | sk_frag->size; #endif - pTxdFst->TBControl |= TX_CTRL_OWN_BMU | - TX_CTRL_SOFTWARE; + pTxdFst->TBControl |= BMU_OWN | BMU_SW; } else { pTxd->TBControl |= sk_frag->size; } pTxdLst = pTxd; - pTxd = pTxd->pNextTxd; + pTxd = pTxd->pNextTxd; pTxPort->TxdRingFree--; BytesSend += sk_frag->size; } - if ((pTxPort->pTxdRingPrev->TBControl & TX_CTRL_OWN_BMU) == 0) { - /* previous descriptor already done, so give tx start cmd */ - /* StartTx(pAC, pTxPort->HwAddr); */ - SK_OUT8(pTxPort->HwAddr, TX_Q_CTRL, TX_Q_CTRL_START); + /* + ** If previous descriptor already done, give TX start cmd + */ + if ((pTxPort->pTxdRingPrev->TBControl & BMU_OWN) == 0) { + SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START); } pTxPort->pTxdRingPrev = pTxdLst; @@ -2306,28 +2425,13 @@ struct sk_buff *pMessage) /* pointer to spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); - if (pTxPort->TxdRingFree > 0) + if (pTxPort->TxdRingFree > 0) { return (BytesSend); - else + } else { return (0); + } } - -void dump_frag( SK_U8 *data, int length) -{ - int i; - - printk("Length: %d\n", length); - for( i=0; i < length; i++ ) { - printk(" %02x", (SK_U8)*(data + i) ); - if( !((i+1) % 20) ) - printk("\n"); - } - printk("\n\n"); - -} - - /***************************************************************************** * * FreeTxDescriptors - release descriptors from the descriptor ring @@ -2356,25 +2460,25 @@ SK_U32 Control; /* TBControl field of de SK_U64 PhysAddr; /* address of DMA mapping */ pNewTail = pTxPort->pTxdRingTail; - pTxd = pNewTail; + pTxd = pNewTail; /* - * loop forever; exits if TX_CTRL_SOFTWARE bit not set in start frame - * or TX_CTRL_OWN_BMU bit set in any frame - */ + ** loop forever; exits if BMU_SW bit not set in start frame + ** or BMU_OWN bit set in any frame + */ while (1) { Control = pTxd->TBControl; - if ((Control & TX_CTRL_SOFTWARE) == 0) { + if ((Control & BMU_SW) == 0) { /* - * software controllable bit is set in first - * fragment when given to BMU. Not set means that - * this fragment was never sent or is already - * freed ( -> ring completely free now). - */ + ** software controllable bit is set in first + ** fragment when given to BMU. Not set means that + ** this fragment was never sent or is already + ** freed ( -> ring completely free now). + */ pTxPort->pTxdRingTail = pTxd; netif_wake_queue(pAC->dev[pTxPort->PortIndex]); return; } - if (Control & TX_CTRL_OWN_BMU) { + if (Control & BMU_OWN) { pTxPort->pTxdRingTail = pTxd; if (pTxPort->TxdRingFree > 0) { netif_wake_queue(pAC->dev[pTxPort->PortIndex]); @@ -2382,18 +2486,22 @@ SK_U64 PhysAddr; /* address of DMA mappi return; } - /* release the DMA mapping */ + /* + ** release the DMA mapping, because until not unmapped + ** this buffer is considered being under control of the + ** adapter card! + */ PhysAddr = ((SK_U64) pTxd->VDataHigh) << (SK_U64) 32; PhysAddr |= (SK_U64) pTxd->VDataLow; pci_unmap_page(pAC->PciDev, PhysAddr, pTxd->pMBuf->len, PCI_DMA_TODEVICE); - if (Control & TX_CTRL_EOF) + if (Control & BMU_EOF) DEV_KFREE_SKB_ANY(pTxd->pMBuf); /* free message */ pTxPort->TxdRingFree++; - pTxd->TBControl &= ~TX_CTRL_SOFTWARE; + pTxd->TBControl &= ~BMU_SW; pTxd = pTxd->pNextTxd; /* point behind fragment with EOF */ } /* while(forever) */ } /* FreeTxDescriptors */ @@ -2472,11 +2580,15 @@ SK_U64 PhysAddr; /* physical address of ~PAGE_MASK), pAC->RxBufSize - 2, PCI_DMA_FROMDEVICE); - pRxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + + pRxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); pRxd->VDataHigh = (SK_U32) (PhysAddr >> 32); - pRxd->pMBuf = pMsgBlock; - pRxd->RBControl = RX_CTRL_OWN_BMU | RX_CTRL_STF | - RX_CTRL_EOF_IRQ | RX_CTRL_CHECK_CSUM | Length; + pRxd->pMBuf = pMsgBlock; + pRxd->RBControl = BMU_OWN | + BMU_STF | + BMU_IRQ_EOF | + BMU_TCP_CHECK | + Length; return (SK_TRUE); } /* FillRxDescriptor */ @@ -2507,15 +2619,18 @@ SK_U16 Length; /* data fragment length pRxPort->pRxdRingTail = pRxd->pNextRxd; pRxPort->RxdRingFree--; Length = pAC->RxBufSize; - pRxd->VDataLow = PhysLow; + + pRxd->VDataLow = PhysLow; pRxd->VDataHigh = PhysHigh; - pRxd->pMBuf = pMsg; - pRxd->RBControl = RX_CTRL_OWN_BMU | RX_CTRL_STF | - RX_CTRL_EOF_IRQ | RX_CTRL_CHECK_CSUM | Length; + pRxd->pMBuf = pMsg; + pRxd->RBControl = BMU_OWN | + BMU_STF | + BMU_IRQ_EOF | + BMU_TCP_CHECK | + Length; return; } /* ReQueueRxBuffer */ - /***************************************************************************** * * ReceiveIrq - handle a receive IRQ @@ -2557,7 +2672,7 @@ int Result; SK_U64 PhysAddr; rx_start: - /* do forever; exit if RX_CTRL_OWN_BMU found */ + /* do forever; exit if BMU_OWN found */ for ( pRxd = pRxPort->pRxdRingHead ; pRxPort->RxdRingFree < pAC->RxDescrPerRing ; pRxd = pRxd->pNextRxd, @@ -2577,7 +2692,7 @@ rx_start: Control = pRxd->RBControl; /* check if this descriptor is ready */ - if ((Control & RX_CTRL_OWN_BMU) != 0) { + if ((Control & BMU_OWN) != 0) { /* this descriptor is not yet ready */ /* This is the usual end of the loop */ /* We don't need to start the ring again */ @@ -2587,14 +2702,13 @@ rx_start: pAC->DynIrqModInfo.NbrProcessedDescr++; /* get length of frame and check it */ - FrameLength = Control & RX_CTRL_LEN_MASK; + FrameLength = Control & BMU_BBC; if (FrameLength > pAC->RxBufSize) { goto rx_failed; } /* check for STF and EOF */ - if ((Control & (RX_CTRL_STF | RX_CTRL_EOF)) != - (RX_CTRL_STF | RX_CTRL_EOF)) { + if ((Control & (BMU_STF | BMU_EOF)) != (BMU_STF | BMU_EOF)) { goto rx_failed; } @@ -2648,8 +2762,7 @@ rx_start: pRxPort->RxdRingFree)); /* DumpMsg(pMsg, "Rx"); */ - if ((Control & RX_CTRL_STAT_VALID) != RX_CTRL_STAT_VALID || - (IsBadFrame)) { + if ((Control & BMU_STAT_VAL) != BMU_STAT_VAL || (IsBadFrame)) { #if 0 (FrameStat & (XMR_FS_ANY_ERR | XMR_FS_2L_VLAN)) != 0) { #endif @@ -2736,7 +2849,7 @@ rx_start: /* Frame not padded => TCP offload! */ if ((((Csum1 & 0xfffe) && (Csum2 & 0xfffe)) && (pAC->GIni.GIChipId == CHIP_ID_GENESIS)) || - (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { + (pAC->ChipsetType)) { Result = SkCsGetReceiveInfo(pAC, &pMsg->data[14], Csum1, Csum2, pRxPort->PortIndex); @@ -2917,8 +3030,9 @@ static void ClearAndStartRx( SK_AC *pAC, /* pointer to the adapter context */ int PortIndex) /* index of the receive port (XMAC) */ { - SK_OUT8(pAC->IoBase, RxQueueAddr[PortIndex]+RX_Q_CTRL, - RX_Q_CTRL_START | RX_Q_CTRL_CLR_I_EOF); + SK_OUT8(pAC->IoBase, + RxQueueAddr[PortIndex]+Q_CSR, + CSR_START | CSR_IRQ_CL_F); } /* ClearAndStartRx */ @@ -2937,8 +3051,9 @@ SK_AC *pAC, /* pointer to the adapter c int PortIndex, /* index of the transmit port (XMAC) */ int Prio) /* priority or normal queue */ { - SK_OUT8(pAC->IoBase, TxQueueAddr[PortIndex][Prio]+TX_Q_CTRL, - TX_Q_CTRL_CLR_I_EOF); + SK_OUT8(pAC->IoBase, + TxQueueAddr[PortIndex][Prio]+Q_CSR, + CSR_IRQ_CL_F); } /* ClearTxIrq */ @@ -2977,7 +3092,7 @@ SK_U64 PhysAddr; DEV_KFREE_SKB(pRxd->pMBuf); pRxd->pMBuf = NULL; } - pRxd->RBControl &= RX_CTRL_OWN_BMU; + pRxd->RBControl &= BMU_OWN; pRxd = pRxd->pNextRxd; pRxPort->RxdRingFree++; } while (pRxd != pRxPort->pRxdRingTail); @@ -2985,7 +3100,6 @@ SK_U64 PhysAddr; spin_unlock_irqrestore(&pRxPort->RxDesRingLock, Flags); } /* ClearRxRing */ - /***************************************************************************** * * ClearTxRing - remove all buffers from the transmit ring @@ -3010,109 +3124,13 @@ unsigned long Flags; spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags); pTxd = pTxPort->pTxdRingHead; for (i=0; iTxDescrPerRing; i++) { - pTxd->TBControl &= ~TX_CTRL_OWN_BMU; + pTxd->TBControl &= ~BMU_OWN; pTxd = pTxd->pNextTxd; } FreeTxDescriptors(pAC, pTxPort); spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); } /* ClearTxRing */ - -#if 0 -/***************************************************************************** - * - * SetQueueSizes - configure the sizes of rx and tx queues - * - * Description: - * This function assigns the sizes for active and passive port - * to the appropriate HWinit structure variables. - * The passive port(s) get standard values, all remaining RAM - * is given to the active port. - * The queue sizes are in kbyte and must be multiple of 8. - * The limits for the number of buffers filled into the rx rings - * is also set in this routine. - * - * Returns: - * none - */ -static void SetQueueSizes( -SK_AC *pAC) /* pointer to the adapter context */ -{ -int StandbyRam; /* adapter RAM used for a standby port */ -int RemainingRam; /* adapter RAM available for the active port */ -int RxRam; /* RAM used for the active port receive queue */ -int i; /* loop counter */ - -if (pAC->RlmtNets == 1) { - StandbyRam = SK_RLMT_STANDBY_QRXSIZE + SK_RLMT_STANDBY_QXASIZE + - SK_RLMT_STANDBY_QXSSIZE; - RemainingRam = pAC->GIni.GIRamSize - - (pAC->GIni.GIMacsFound-1) * StandbyRam; - for (i=0; iGIni.GIMacsFound; i++) { - pAC->GIni.GP[i].PRxQSize = SK_RLMT_STANDBY_QRXSIZE; - pAC->GIni.GP[i].PXSQSize = SK_RLMT_STANDBY_QXSSIZE; - pAC->GIni.GP[i].PXAQSize = SK_RLMT_STANDBY_QXASIZE; - } - RxRam = (RemainingRam * 8 / 10) & ~7; - pAC->GIni.GP[pAC->ActivePort].PRxQSize = RxRam; - pAC->GIni.GP[pAC->ActivePort].PXSQSize = 0; - pAC->GIni.GP[pAC->ActivePort].PXAQSize = - (RemainingRam - RxRam) & ~7; - pAC->RxQueueSize = RxRam; - pAC->TxSQueueSize = 0; - pAC->TxAQueueSize = (RemainingRam - RxRam) & ~7; - SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, - ("queue sizes settings - rx:%d txA:%d txS:%d\n", - pAC->RxQueueSize,pAC->TxAQueueSize, pAC->TxSQueueSize)); -} else { - RemainingRam = pAC->GIni.GIRamSize/pAC->GIni.GIMacsFound; - RxRam = (RemainingRam * 8 / 10) & ~7; - for (i=0; iGIni.GIMacsFound; i++) { - pAC->GIni.GP[i].PRxQSize = RxRam; - pAC->GIni.GP[i].PXSQSize = 0; - pAC->GIni.GP[i].PXAQSize = (RemainingRam - RxRam) & ~7; - } - - pAC->RxQueueSize = RxRam; - pAC->TxSQueueSize = 0; - pAC->TxAQueueSize = (RemainingRam - RxRam) & ~7; -} - for (i=0; iRxPort[i].RxFillLimit = pAC->RxDescrPerRing; - } - - if (pAC->RlmtNets == 2) { - for (i=0; iGIni.GIMacsFound; i++) { - pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - 100; - } - } else { - for (i=0; iGIni.GIMacsFound; i++) { - pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - 100; - } - /* - * Do not set the Limit to 0, because this could cause - * wrap around with ReQueue'ed buffers (a buffer could - * be requeued in the same position, made accessable to - * the hardware, and the hardware could change its - * contents! - */ - pAC->RxPort[pAC->ActivePort].RxFillLimit = 1; - } - -#ifdef DEBUG - for (i=0; iGIni.GIMacsFound; i++) { - SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, - ("i: %d, RxQSize: %d, PXSQsize: %d, PXAQSize: %d\n", - i, - pAC->GIni.GP[i].PRxQSize, - pAC->GIni.GP[i].PXSQSize, - pAC->GIni.GP[i].PXAQSize)); - } -#endif -} /* SetQueueSizes */ -#endif - - /***************************************************************************** * * SkGeSetMacAddr - Set the hardware MAC address @@ -3257,7 +3275,7 @@ SK_EVPARA EvPara; ("SkGeChangeMtu starts now...\n")); pNet = (DEV_NET*) dev->priv; - pAC = pNet->pAC; + pAC = pNet->pAC; if ((NewMtu < 68) || (NewMtu > SK_JUMBO_MTU)) { return -EINVAL; @@ -3269,37 +3287,40 @@ SK_EVPARA EvPara; pNet->Mtu = NewMtu; pOtherNet = (DEV_NET*)pAC->dev[1 - pNet->NetNr]->priv; - if ((pOtherNet->Mtu > 1500) && (NewMtu <= 1500) && (pOtherNet->Up==1)) { + if ((pOtherNet->Mtu>1500) && (NewMtu<=1500) && (pOtherNet->Up==1)) { return(0); } - EvPara.Para32[0] = pNet->NetNr; - EvPara.Para32[1] = -1; - pAC->RxBufSize = NewMtu + 32; dev->mtu = NewMtu; SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, ("New MTU: %d\n", NewMtu)); - /* prevent reconfiguration while changing the MTU */ - - /* disable interrupts */ + /* + ** Prevent any reconfiguration while changing the MTU + ** by disabling any interrupts + */ SK_OUT32(pAC->IoBase, B0_IMSK, 0); spin_lock_irqsave(&pAC->SlowPathLock, Flags); - /* Found more than one port */ - if ((pAC->GIni.GIMacsFound == 2 ) && - (pAC->RlmtNets == 2)) { - /* Stop both ports */ - EvPara.Para32[0] = 0; - SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); - EvPara.Para32[0] = 1; - SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); + /* + ** Notify RLMT that any ports are to be stopped + */ + EvPara.Para32[0] = 0; + EvPara.Para32[1] = -1; + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); + EvPara.Para32[0] = 1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); } else { SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); } + /* + ** After calling the SkEventDispatcher(), RLMT is aware about + ** the stopped ports -> configuration can take place! + */ SkEventDispatcher(pAC, pAC->IoBase); for (i=0; iGIni.GIMacsFound; i++) { @@ -3310,59 +3331,59 @@ SK_EVPARA EvPara; } /* - * adjust number of rx buffers allocated - */ + ** Depending on the desired MTU size change, a different number of + ** RX buffers need to be allocated + */ if (NewMtu > 1500) { - /* use less rx buffers */ - for (i=0; iGIni.GIMacsFound; i++) { - /* Found more than one port */ - if ((pAC->GIni.GIMacsFound == 2 ) && - (pAC->RlmtNets == 2)) { - pAC->RxPort[i].RxFillLimit = - pAC->RxDescrPerRing - 100; - } else { - if (i == pAC->ActivePort) - pAC->RxPort[i].RxFillLimit = - pAC->RxDescrPerRing - 100; - else - pAC->RxPort[i].RxFillLimit = - pAC->RxDescrPerRing - 10; - } + /* + ** Use less rx buffers + */ + for (i=0; iGIni.GIMacsFound; i++) { + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - + (pAC->RxDescrPerRing / 4); + } else { + if (i == pAC->ActivePort) { + pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - + (pAC->RxDescrPerRing / 4); + } else { + pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - + (pAC->RxDescrPerRing / 10); + } } - } - else { - /* use normal amount of rx buffers */ - for (i=0; iGIni.GIMacsFound; i++) { - /* Found more than one port */ - if ((pAC->GIni.GIMacsFound == 2 ) && - (pAC->RlmtNets == 2)) { - pAC->RxPort[i].RxFillLimit = 1; - } else { - if (i == pAC->ActivePort) - pAC->RxPort[i].RxFillLimit = 1; - else - pAC->RxPort[i].RxFillLimit = - pAC->RxDescrPerRing - 100; - } + } + } else { + /* + ** Use the normal amount of rx buffers + */ + for (i=0; iGIni.GIMacsFound; i++) { + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + pAC->RxPort[i].RxFillLimit = 1; + } else { + if (i == pAC->ActivePort) { + pAC->RxPort[i].RxFillLimit = 1; + } else { + pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - + (pAC->RxDescrPerRing / 4); + } } + } } SkGeDeInit(pAC, pAC->IoBase); /* - * enable/disable hardware support for long frames - */ + ** enable/disable hardware support for long frames + */ if (NewMtu > 1500) { -// pAC->JumboActivated = SK_TRUE; /* is never set back !!! */ +// pAC->JumboActivated = SK_TRUE; /* is never set back !!! */ pAC->GIni.GIPortUsage = SK_JUMBO_LINK; - } - else { - if ((pAC->GIni.GIMacsFound == 2 ) && - (pAC->RlmtNets == 2)) { - pAC->GIni.GIPortUsage = SK_MUL_LINK; - } else { - pAC->GIni.GIPortUsage = SK_RED_LINK; - } + } else { + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + pAC->GIni.GIPortUsage = SK_MUL_LINK; + } else { + pAC->GIni.GIPortUsage = SK_RED_LINK; + } } SkGeInit( pAC, pAC->IoBase, SK_INIT_IO); @@ -3374,9 +3395,9 @@ SK_EVPARA EvPara; SkTimerInit(pAC, pAC->IoBase, SK_INIT_IO); /* - * tschilling: - * Speed and others are set back to default in level 1 init! - */ + ** tschilling: + ** Speed and others are set back to default in level 1 init! + */ GetConfiguration(pAC); SkGeInit( pAC, pAC->IoBase, SK_INIT_RUN); @@ -3388,21 +3409,21 @@ SK_EVPARA EvPara; SkTimerInit(pAC, pAC->IoBase, SK_INIT_RUN); /* - * clear and reinit the rx rings here - */ - + ** clear and reinit the rx rings here + */ for (i=0; iGIni.GIMacsFound; i++) { ReceiveIrq(pAC, &pAC->RxPort[i], SK_TRUE); ClearRxRing(pAC, &pAC->RxPort[i]); FillRxRing(pAC, &pAC->RxPort[i]); - /* Enable transmit descriptor polling. */ + /* + ** Enable transmit descriptor polling + */ SkGePollTxD(pAC, pAC->IoBase, i, SK_TRUE); FillRxRing(pAC, &pAC->RxPort[i]); }; SkGeYellowLED(pAC, pAC->IoBase, 1); - SkDimEnableModerationIfNeeded(pAC); SkDimDisplayModerationSettings(pAC); @@ -3411,32 +3432,30 @@ SK_EVPARA EvPara; spin_unlock(&pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock); } - /* enable Interrupts */ + /* + ** Enable Interrupts again + */ SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask); SK_OUT32(pAC->IoBase, B0_HWE_IMSK, IRQ_HWE_MASK); SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); SkEventDispatcher(pAC, pAC->IoBase); - /* Found more than one port */ - if ((pAC->GIni.GIMacsFound == 2 ) && - (pAC->RlmtNets == 2)) { - /* Start both ports */ - EvPara.Para32[0] = pAC->RlmtNets; - EvPara.Para32[1] = -1; - SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_SET_NETS, - EvPara); - + /* + ** Notify RLMT about the changing and restarting one (or more) ports + */ + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + EvPara.Para32[0] = pAC->RlmtNets; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_SET_NETS, EvPara); + EvPara.Para32[0] = pNet->PortNr; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); - EvPara.Para32[1] = -1; - EvPara.Para32[0] = pNet->PortNr; + if (pOtherNet->Up) { + EvPara.Para32[0] = pOtherNet->PortNr; SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); - - if (pOtherNet->Up) { - EvPara.Para32[0] = pOtherNet->PortNr; - SkEventQueue(pAC, SKGE_RLMT, - SK_RLMT_START, EvPara); - } + } } else { SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); } @@ -3444,7 +3463,20 @@ SK_EVPARA EvPara; SkEventDispatcher(pAC, pAC->IoBase); spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + /* + ** While testing this driver with latest kernel 2.5 (2.5.70), it + ** seems as if upper layers have a problem to handle a successful + ** return value of '0'. If such a zero is returned, the complete + ** system hangs for several minutes (!), which is in acceptable. + ** + ** Currently it is not clear, what the exact reason for this problem + ** is. The implemented workaround for 2.5 is to return the desired + ** new MTU size if all needed changes for the new MTU size where + ** performed. In kernels 2.2 and 2.4, a zero value is returned, + ** which indicates the successful change of the mtu-size. + */ return 0; + } /* SkGeChangeMtu */ @@ -3678,12 +3710,19 @@ static void GetConfiguration( SK_AC *pAC) /* pointer to the adapter context structure */ { SK_I32 Port; /* preferred port */ -int LinkSpeed; /* Link speed */ -int AutoNeg; /* auto negotiation off (0) or on (1) */ -int DuplexCap; /* duplex capabilities (0=both, 1=full, 2=half */ -int MSMode; /* master / slave mode selection */ SK_BOOL AutoSet; SK_BOOL DupSet; +int LinkSpeed = SK_LSPEED_AUTO; /* Link speed */ +int AutoNeg = 1; /* autoneg off (0) or on (1) */ +int DuplexCap = 0; /* 0=both,1=full,2=half */ +int FlowCtrl = SK_FLOW_MODE_SYM_OR_REM; /* FlowControl */ +int MSMode = SK_MS_MODE_AUTO; /* master/slave mode */ + +SK_BOOL IsConTypeDefined = SK_TRUE; +SK_BOOL IsLinkSpeedDefined = SK_TRUE; +SK_BOOL IsFlowCtrlDefined = SK_TRUE; +SK_BOOL IsRoleDefined = SK_TRUE; +SK_BOOL IsModeDefined = SK_TRUE; /* * The two parameters AutoNeg. and DuplexCap. map to one configuration * parameter. The mapping is described by this table: @@ -3697,44 +3736,143 @@ SK_BOOL DupSet; * Sense | AutoSense | AutoSense | AutoSense | */ int Capabilities[3][3] = - { { -1, SK_LMODE_FULL, SK_LMODE_HALF}, - {SK_LMODE_AUTOBOTH, SK_LMODE_AUTOFULL, SK_LMODE_AUTOHALF}, + { { -1, SK_LMODE_FULL , SK_LMODE_HALF }, + {SK_LMODE_AUTOBOTH , SK_LMODE_AUTOFULL , SK_LMODE_AUTOHALF }, {SK_LMODE_AUTOSENSE, SK_LMODE_AUTOSENSE, SK_LMODE_AUTOSENSE} }; + #define DC_BOTH 0 #define DC_FULL 1 #define DC_HALF 2 #define AN_OFF 0 #define AN_ON 1 #define AN_SENS 2 +#define M_CurrPort pAC->GIni.GP[Port] + + + /* + ** Set the default values first for both ports! + */ + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_ON][DC_BOTH]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_AUTO; + } + + /* + ** Check merged parameter ConType. If it has not been used, + ** verify any other parameter (e.g. AutoNeg) and use default values. + ** + ** Stating both ConType and other lowlevel link parameters is also + ** possible. If this is the case, the passed ConType-parameter is + ** overwritten by the lowlevel link parameter. + ** + ** The following settings are used for a merged ConType-parameter: + ** + ** ConType DupCap AutoNeg FlowCtrl Role Speed + ** ------- ------ ------- -------- ---------- ----- + ** Auto Both On SymOrRem Auto Auto + ** 100FD Full Off None 100 + ** 100HD Half Off None 100 + ** 10FD Full Off None 10 + ** 10HD Half Off None 10 + ** + ** This ConType parameter is used for all ports of the adapter! + */ + if ( (ConType != NULL) && + (pAC->Index < SK_MAX_CARD_PARAM) && + (ConType[pAC->Index] != NULL) ) { + + /* Check chipset family */ + if ((!pAC->ChipsetType) && + (strcmp(ConType[pAC->Index],"Auto")!=0) && + (strcmp(ConType[pAC->Index],"")!=0)) { + /* Set the speed parameter back */ + printk("%s: Illegal value \"%s\" " + "for ConType." + " Using Auto.\n", + pAC->dev[0]->name, + ConType[pAC->Index]); + + sprintf(ConType[pAC->Index], "Auto"); + } + + if (strcmp(ConType[pAC->Index],"")==0) { + IsConTypeDefined = SK_FALSE; /* No ConType defined */ + } else if (strcmp(ConType[pAC->Index],"Auto")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_ON][DC_BOTH]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_AUTO; + } + } else if (strcmp(ConType[pAC->Index],"100FD")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_FULL]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_100MBPS; + } + } else if (strcmp(ConType[pAC->Index],"100HD")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_HALF]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_100MBPS; + } + } else if (strcmp(ConType[pAC->Index],"10FD")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_FULL]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_10MBPS; + } + } else if (strcmp(ConType[pAC->Index],"10HD")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_HALF]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_10MBPS; + } + } else { + printk("%s: Illegal value \"%s\" for ConType\n", + pAC->dev[0]->name, ConType[pAC->Index]); + IsConTypeDefined = SK_FALSE; /* Wrong ConType defined */ + } + } else { + IsConTypeDefined = SK_FALSE; /* No ConType defined */ + } - /* settings for port A */ - /* settings link speed */ - LinkSpeed = SK_LSPEED_AUTO; /* default: do auto select */ + /* + ** Parse any parameter settings for port A: + ** a) any LinkSpeed stated? + */ if (Speed_A != NULL && pAC->IndexIndex] != NULL) { if (strcmp(Speed_A[pAC->Index],"")==0) { - LinkSpeed = SK_LSPEED_AUTO; - } - else if (strcmp(Speed_A[pAC->Index],"Auto")==0) { - LinkSpeed = SK_LSPEED_AUTO; - } - else if (strcmp(Speed_A[pAC->Index],"10")==0) { - LinkSpeed = SK_LSPEED_10MBPS; - } - else if (strcmp(Speed_A[pAC->Index],"100")==0) { - LinkSpeed = SK_LSPEED_100MBPS; - } - else if (strcmp(Speed_A[pAC->Index],"1000")==0) { - LinkSpeed = SK_LSPEED_1000MBPS; + IsLinkSpeedDefined = SK_FALSE; + } else if (strcmp(Speed_A[pAC->Index],"Auto")==0) { + LinkSpeed = SK_LSPEED_AUTO; + } else if (strcmp(Speed_A[pAC->Index],"10")==0) { + LinkSpeed = SK_LSPEED_10MBPS; + } else if (strcmp(Speed_A[pAC->Index],"100")==0) { + LinkSpeed = SK_LSPEED_100MBPS; + } else if (strcmp(Speed_A[pAC->Index],"1000")==0) { + LinkSpeed = SK_LSPEED_1000MBPS; + } else { + printk("%s: Illegal value \"%s\" for Speed_A\n", + pAC->dev[0]->name, Speed_A[pAC->Index]); + IsLinkSpeedDefined = SK_FALSE; } - else printk("%s: Illegal value for Speed_A\n", - pAC->dev[0]->name); + } else { + IsLinkSpeedDefined = SK_FALSE; } - /* Check speed parameter */ - /* Only copper type adapter and GE V2 cards */ - if (((pAC->GIni.GIChipId != CHIP_ID_YUKON) || - (pAC->GIni.GICopperType != SK_TRUE)) && + /* + ** Check speed parameter: + ** Only copper type adapter and GE V2 cards + */ + if (((!pAC->ChipsetType) || (pAC->GIni.GICopperType != SK_TRUE)) && ((LinkSpeed != SK_LSPEED_AUTO) && (LinkSpeed != SK_LSPEED_1000MBPS))) { printk("%s: Illegal value for Speed_A. " @@ -3742,63 +3880,82 @@ int Capabilities[3][3] = "speed 1000\n", pAC->dev[0]->name); LinkSpeed = SK_LSPEED_1000MBPS; } - pAC->GIni.GP[0].PLinkSpeed = LinkSpeed; - - /* Autonegotiation */ + + /* + ** Decide whether to set new config value if somethig valid has + ** been received. + */ + if (IsLinkSpeedDefined) { + pAC->GIni.GP[0].PLinkSpeed = LinkSpeed; + } + + /* + ** b) Any Autonegotiation and DuplexCapabilities set? + ** Please note that both belong together... + */ AutoNeg = AN_ON; /* tschilling: Default: Autonegotiation on! */ AutoSet = SK_FALSE; if (AutoNeg_A != NULL && pAC->IndexIndex] != NULL) { AutoSet = SK_TRUE; if (strcmp(AutoNeg_A[pAC->Index],"")==0) { - AutoSet = SK_FALSE; - } - else if (strcmp(AutoNeg_A[pAC->Index],"On")==0) { - AutoNeg = AN_ON; - } - else if (strcmp(AutoNeg_A[pAC->Index],"Off")==0) { - AutoNeg = AN_OFF; - } - else if (strcmp(AutoNeg_A[pAC->Index],"Sense")==0) { - AutoNeg = AN_SENS; + AutoSet = SK_FALSE; + } else if (strcmp(AutoNeg_A[pAC->Index],"On")==0) { + AutoNeg = AN_ON; + } else if (strcmp(AutoNeg_A[pAC->Index],"Off")==0) { + AutoNeg = AN_OFF; + } else if (strcmp(AutoNeg_A[pAC->Index],"Sense")==0) { + AutoNeg = AN_SENS; + } else { + printk("%s: Illegal value \"%s\" for AutoNeg_A\n", + pAC->dev[0]->name, AutoNeg_A[pAC->Index]); } - else printk("%s: Illegal value for AutoNeg_A\n", - pAC->dev[0]->name); } DuplexCap = DC_BOTH; - DupSet = SK_FALSE; + DupSet = SK_FALSE; if (DupCap_A != NULL && pAC->IndexIndex] != NULL) { DupSet = SK_TRUE; if (strcmp(DupCap_A[pAC->Index],"")==0) { - DupSet = SK_FALSE; - } - else if (strcmp(DupCap_A[pAC->Index],"Both")==0) { - DuplexCap = DC_BOTH; - } - else if (strcmp(DupCap_A[pAC->Index],"Full")==0) { - DuplexCap = DC_FULL; - } - else if (strcmp(DupCap_A[pAC->Index],"Half")==0) { - DuplexCap = DC_HALF; + DupSet = SK_FALSE; + } else if (strcmp(DupCap_A[pAC->Index],"Both")==0) { + DuplexCap = DC_BOTH; + } else if (strcmp(DupCap_A[pAC->Index],"Full")==0) { + DuplexCap = DC_FULL; + } else if (strcmp(DupCap_A[pAC->Index],"Half")==0) { + DuplexCap = DC_HALF; + } else { + printk("%s: Illegal value \"%s\" for DupCap_A\n", + pAC->dev[0]->name, DupCap_A[pAC->Index]); } - else printk("%s: Illegal value for DupCap_A\n", - pAC->dev[0]->name); } - /* check for illegal combinations */ - if (AutoSet && AutoNeg==AN_SENS && DupSet) { + /* + ** Check for illegal combinations + */ + if ((LinkSpeed = SK_LSPEED_1000MBPS) && + ((DuplexCap == SK_LMODE_STAT_AUTOHALF) || + (DuplexCap == SK_LMODE_STAT_HALF)) && + (pAC->ChipsetType)) { + printk("%s: Half Duplex not possible with Gigabit speed!\n" + " Using Full Duplex.\n", + pAC->dev[0]->name); + DuplexCap = DC_FULL; + } + + if ( AutoSet && AutoNeg==AN_SENS && DupSet) { printk("%s, Port A: DuplexCapabilities" " ignored using Sense mode\n", pAC->dev[0]->name); } + if (AutoSet && AutoNeg==AN_OFF && DupSet && DuplexCap==DC_BOTH){ printk("%s, Port A: Illegal combination" " of values AutoNeg. and DuplexCap.\n Using " "Full Duplex\n", pAC->dev[0]->name); - DuplexCap = DC_FULL; } + if (AutoSet && AutoNeg==AN_OFF && !DupSet) { DuplexCap = DC_FULL; } @@ -3811,89 +3968,110 @@ int Capabilities[3][3] = AutoNeg = AN_ON; } - /* set the desired mode */ - pAC->GIni.GP[0].PLinkModeConf = - Capabilities[AutoNeg][DuplexCap]; + /* + ** set the desired mode + */ + if (AutoSet || DupSet) { + pAC->GIni.GP[0].PLinkModeConf = Capabilities[AutoNeg][DuplexCap]; + } - pAC->GIni.GP[0].PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM; + /* + ** c) Any Flowcontrol-parameter set? + */ if (FlowCtrl_A != NULL && pAC->IndexIndex] != NULL) { if (strcmp(FlowCtrl_A[pAC->Index],"") == 0) { + IsFlowCtrlDefined = SK_FALSE; + } else if (strcmp(FlowCtrl_A[pAC->Index],"SymOrRem") == 0) { + FlowCtrl = SK_FLOW_MODE_SYM_OR_REM; + } else if (strcmp(FlowCtrl_A[pAC->Index],"Sym")==0) { + FlowCtrl = SK_FLOW_MODE_SYMMETRIC; + } else if (strcmp(FlowCtrl_A[pAC->Index],"LocSend")==0) { + FlowCtrl = SK_FLOW_MODE_LOC_SEND; + } else if (strcmp(FlowCtrl_A[pAC->Index],"None")==0) { + FlowCtrl = SK_FLOW_MODE_NONE; + } else { + printk("%s: Illegal value \"%s\" for FlowCtrl_A\n", + pAC->dev[0]->name, FlowCtrl_A[pAC->Index]); + IsFlowCtrlDefined = SK_FALSE; } - else if (strcmp(FlowCtrl_A[pAC->Index],"SymOrRem") == 0) { - pAC->GIni.GP[0].PFlowCtrlMode = - SK_FLOW_MODE_SYM_OR_REM; - } - else if (strcmp(FlowCtrl_A[pAC->Index],"Sym")==0) { - pAC->GIni.GP[0].PFlowCtrlMode = - SK_FLOW_MODE_SYMMETRIC; - } - else if (strcmp(FlowCtrl_A[pAC->Index],"LocSend")==0) { - pAC->GIni.GP[0].PFlowCtrlMode = - SK_FLOW_MODE_LOC_SEND; - } - else if (strcmp(FlowCtrl_A[pAC->Index],"None")==0) { - pAC->GIni.GP[0].PFlowCtrlMode = - SK_FLOW_MODE_NONE; - } - else printk("Illegal value for FlowCtrl_A\n"); + } else { + IsFlowCtrlDefined = SK_FALSE; } - if (AutoNeg==AN_OFF && pAC->GIni.GP[0].PFlowCtrlMode!= - SK_FLOW_MODE_NONE) { + + if (IsFlowCtrlDefined) { + if ((AutoNeg == AN_OFF) && (FlowCtrl != SK_FLOW_MODE_NONE)) { printk("%s, Port A: FlowControl" " impossible without AutoNegotiation," " disabled\n", pAC->dev[0]->name); - pAC->GIni.GP[0].PFlowCtrlMode = SK_FLOW_MODE_NONE; + FlowCtrl = SK_FLOW_MODE_NONE; + } + pAC->GIni.GP[0].PFlowCtrlMode = FlowCtrl; } - MSMode = SK_MS_MODE_AUTO; /* default: do auto select */ + /* + ** d) What is with the RoleParameter? + */ if (Role_A != NULL && pAC->IndexIndex] != NULL) { if (strcmp(Role_A[pAC->Index],"")==0) { + IsRoleDefined = SK_FALSE; + } else if (strcmp(Role_A[pAC->Index],"Auto")==0) { + MSMode = SK_MS_MODE_AUTO; + } else if (strcmp(Role_A[pAC->Index],"Master")==0) { + MSMode = SK_MS_MODE_MASTER; + } else if (strcmp(Role_A[pAC->Index],"Slave")==0) { + MSMode = SK_MS_MODE_SLAVE; + } else { + printk("%s: Illegal value \"%s\" for Role_A\n", + pAC->dev[0]->name, Role_A[pAC->Index]); + IsRoleDefined = SK_FALSE; } - else if (strcmp(Role_A[pAC->Index],"Auto")==0) { - MSMode = SK_MS_MODE_AUTO; - } - else if (strcmp(Role_A[pAC->Index],"Master")==0) { - MSMode = SK_MS_MODE_MASTER; - } - else if (strcmp(Role_A[pAC->Index],"Slave")==0) { - MSMode = SK_MS_MODE_SLAVE; - } - else printk("%s: Illegal value for Role_A\n", - pAC->dev[0]->name); + } else { + IsRoleDefined = SK_FALSE; + } + + if (IsRoleDefined == SK_TRUE) { + pAC->GIni.GP[0].PMSMode = MSMode; } - pAC->GIni.GP[0].PMSMode = MSMode; + - /* settings for port B */ - /* settings link speed */ - LinkSpeed = SK_LSPEED_AUTO; /* default: do auto select */ + /* + ** Parse any parameter settings for port B: + ** a) any LinkSpeed stated? + */ + IsConTypeDefined = SK_TRUE; + IsLinkSpeedDefined = SK_TRUE; + IsFlowCtrlDefined = SK_TRUE; + IsModeDefined = SK_TRUE; + if (Speed_B != NULL && pAC->IndexIndex] != NULL) { if (strcmp(Speed_B[pAC->Index],"")==0) { - LinkSpeed = SK_LSPEED_AUTO; - } - else if (strcmp(Speed_B[pAC->Index],"Auto")==0) { - LinkSpeed = SK_LSPEED_AUTO; - } - else if (strcmp(Speed_B[pAC->Index],"10")==0) { - LinkSpeed = SK_LSPEED_10MBPS; - } - else if (strcmp(Speed_B[pAC->Index],"100")==0) { - LinkSpeed = SK_LSPEED_100MBPS; - } - else if (strcmp(Speed_B[pAC->Index],"1000")==0) { - LinkSpeed = SK_LSPEED_1000MBPS; + IsLinkSpeedDefined = SK_FALSE; + } else if (strcmp(Speed_B[pAC->Index],"Auto")==0) { + LinkSpeed = SK_LSPEED_AUTO; + } else if (strcmp(Speed_B[pAC->Index],"10")==0) { + LinkSpeed = SK_LSPEED_10MBPS; + } else if (strcmp(Speed_B[pAC->Index],"100")==0) { + LinkSpeed = SK_LSPEED_100MBPS; + } else if (strcmp(Speed_B[pAC->Index],"1000")==0) { + LinkSpeed = SK_LSPEED_1000MBPS; + } else { + printk("%s: Illegal value \"%s\" for Speed_B\n", + pAC->dev[1]->name, Speed_B[pAC->Index]); + IsLinkSpeedDefined = SK_FALSE; } - else printk("%s: Illegal value for Speed_B\n", - pAC->dev[1]->name); + } else { + IsLinkSpeedDefined = SK_FALSE; } - /* Check speed parameter */ - /* Only copper type adapter and GE V2 cards */ - if (((pAC->GIni.GIChipId != CHIP_ID_YUKON) || - (pAC->GIni.GICopperType != SK_TRUE)) && + /* + ** Check speed parameter: + ** Only copper type adapter and GE V2 cards + */ + if (((!pAC->ChipsetType) || (pAC->GIni.GICopperType != SK_TRUE)) && ((LinkSpeed != SK_LSPEED_AUTO) && (LinkSpeed != SK_LSPEED_1000MBPS))) { printk("%s: Illegal value for Speed_B. " @@ -3901,61 +4079,83 @@ int Capabilities[3][3] = "speed 1000\n", pAC->dev[1]->name); LinkSpeed = SK_LSPEED_1000MBPS; } - pAC->GIni.GP[1].PLinkSpeed = LinkSpeed; - /* Auto negotiation */ + /* + ** Decide whether to set new config value if somethig valid has + ** been received. + */ + if (IsLinkSpeedDefined) { + pAC->GIni.GP[1].PLinkSpeed = LinkSpeed; + } + + /* + ** b) Any Autonegotiation and DuplexCapabilities set? + ** Please note that both belong together... + */ AutoNeg = AN_SENS; /* default: do auto Sense */ AutoSet = SK_FALSE; if (AutoNeg_B != NULL && pAC->IndexIndex] != NULL) { AutoSet = SK_TRUE; if (strcmp(AutoNeg_B[pAC->Index],"")==0) { - AutoSet = SK_FALSE; - } - else if (strcmp(AutoNeg_B[pAC->Index],"On")==0) { - AutoNeg = AN_ON; - } - else if (strcmp(AutoNeg_B[pAC->Index],"Off")==0) { - AutoNeg = AN_OFF; - } - else if (strcmp(AutoNeg_B[pAC->Index],"Sense")==0) { - AutoNeg = AN_SENS; + AutoSet = SK_FALSE; + } else if (strcmp(AutoNeg_B[pAC->Index],"On")==0) { + AutoNeg = AN_ON; + } else if (strcmp(AutoNeg_B[pAC->Index],"Off")==0) { + AutoNeg = AN_OFF; + } else if (strcmp(AutoNeg_B[pAC->Index],"Sense")==0) { + AutoNeg = AN_SENS; + } else { + printk("%s: Illegal value \"%s\" for AutoNeg_B\n", + pAC->dev[0]->name, AutoNeg_B[pAC->Index]); } - else printk("Illegal value for AutoNeg_B\n"); } DuplexCap = DC_BOTH; - DupSet = SK_FALSE; + DupSet = SK_FALSE; if (DupCap_B != NULL && pAC->IndexIndex] != NULL) { DupSet = SK_TRUE; if (strcmp(DupCap_B[pAC->Index],"")==0) { - DupSet = SK_FALSE; - } - else if (strcmp(DupCap_B[pAC->Index],"Both")==0) { - DuplexCap = DC_BOTH; - } - else if (strcmp(DupCap_B[pAC->Index],"Full")==0) { - DuplexCap = DC_FULL; - } - else if (strcmp(DupCap_B[pAC->Index],"Half")==0) { - DuplexCap = DC_HALF; + DupSet = SK_FALSE; + } else if (strcmp(DupCap_B[pAC->Index],"Both")==0) { + DuplexCap = DC_BOTH; + } else if (strcmp(DupCap_B[pAC->Index],"Full")==0) { + DuplexCap = DC_FULL; + } else if (strcmp(DupCap_B[pAC->Index],"Half")==0) { + DuplexCap = DC_HALF; + } else { + printk("%s: Illegal value \"%s\" for DupCap_B\n", + pAC->dev[0]->name, DupCap_B[pAC->Index]); } - else printk("Illegal value for DupCap_B\n"); } + - /* check for illegal combinations */ + /* + ** Check for illegal combinations + */ + if ((LinkSpeed = SK_LSPEED_1000MBPS) && + ((DuplexCap == SK_LMODE_STAT_AUTOHALF) || + (DuplexCap == SK_LMODE_STAT_HALF)) && + (pAC->ChipsetType)) { + printk("%s: Half Duplex not possible with Gigabit speed!\n" + " Using Full Duplex.\n", + pAC->dev[1]->name); + DuplexCap = DC_FULL; + } + if (AutoSet && AutoNeg==AN_SENS && DupSet) { printk("%s, Port B: DuplexCapabilities" " ignored using Sense mode\n", pAC->dev[1]->name); } + if (AutoSet && AutoNeg==AN_OFF && DupSet && DuplexCap==DC_BOTH){ printk("%s, Port B: Illegal combination" " of values AutoNeg. and DuplexCap.\n Using " "Full Duplex\n", pAC->dev[1]->name); - DuplexCap = DC_FULL; } + if (AutoSet && AutoNeg==AN_OFF && !DupSet) { DuplexCap = DC_FULL; } @@ -3968,90 +4168,103 @@ int Capabilities[3][3] = AutoNeg = AN_ON; } - /* set the desired mode */ - pAC->GIni.GP[1].PLinkModeConf = - Capabilities[AutoNeg][DuplexCap]; + /* + ** set the desired mode + */ + if (AutoSet || DupSet) { + pAC->GIni.GP[1].PLinkModeConf = Capabilities[AutoNeg][DuplexCap]; + } - pAC->GIni.GP[1].PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM; + /* + ** c) Any FlowCtrl parameter set? + */ if (FlowCtrl_B != NULL && pAC->IndexIndex] != NULL) { if (strcmp(FlowCtrl_B[pAC->Index],"") == 0) { + IsFlowCtrlDefined = SK_FALSE; + } else if (strcmp(FlowCtrl_B[pAC->Index],"SymOrRem") == 0) { + FlowCtrl = SK_FLOW_MODE_SYM_OR_REM; + } else if (strcmp(FlowCtrl_B[pAC->Index],"Sym")==0) { + FlowCtrl = SK_FLOW_MODE_SYMMETRIC; + } else if (strcmp(FlowCtrl_B[pAC->Index],"LocSend")==0) { + FlowCtrl = SK_FLOW_MODE_LOC_SEND; + } else if (strcmp(FlowCtrl_B[pAC->Index],"None")==0) { + FlowCtrl = SK_FLOW_MODE_NONE; + } else { + printk("%s: Illegal value \"%s\" for FlowCtrl_B\n", + pAC->dev[0]->name, FlowCtrl_B[pAC->Index]); + IsFlowCtrlDefined = SK_FALSE; } - else if (strcmp(FlowCtrl_B[pAC->Index],"SymOrRem") == 0) { - pAC->GIni.GP[1].PFlowCtrlMode = - SK_FLOW_MODE_SYM_OR_REM; - } - else if (strcmp(FlowCtrl_B[pAC->Index],"Sym")==0) { - pAC->GIni.GP[1].PFlowCtrlMode = - SK_FLOW_MODE_SYMMETRIC; - } - else if (strcmp(FlowCtrl_B[pAC->Index],"LocSend")==0) { - pAC->GIni.GP[1].PFlowCtrlMode = - SK_FLOW_MODE_LOC_SEND; - } - else if (strcmp(FlowCtrl_B[pAC->Index],"None")==0) { - pAC->GIni.GP[1].PFlowCtrlMode = - SK_FLOW_MODE_NONE; - } - else printk("Illegal value for FlowCtrl_B\n"); + } else { + IsFlowCtrlDefined = SK_FALSE; } - if (AutoNeg==AN_OFF && pAC->GIni.GP[1].PFlowCtrlMode!= - SK_FLOW_MODE_NONE) { + + if (IsFlowCtrlDefined) { + if ((AutoNeg == AN_OFF) && (FlowCtrl != SK_FLOW_MODE_NONE)) { printk("%s, Port B: FlowControl" " impossible without AutoNegotiation," " disabled\n", pAC->dev[1]->name); - pAC->GIni.GP[1].PFlowCtrlMode = SK_FLOW_MODE_NONE; + FlowCtrl = SK_FLOW_MODE_NONE; + } + pAC->GIni.GP[1].PFlowCtrlMode = FlowCtrl; } - MSMode = SK_MS_MODE_AUTO; /* default: do auto select */ + /* + ** d) What is the RoleParameter? + */ if (Role_B != NULL && pAC->IndexIndex] != NULL) { if (strcmp(Role_B[pAC->Index],"")==0) { + IsRoleDefined = SK_FALSE; + } else if (strcmp(Role_B[pAC->Index],"Auto")==0) { + MSMode = SK_MS_MODE_AUTO; + } else if (strcmp(Role_B[pAC->Index],"Master")==0) { + MSMode = SK_MS_MODE_MASTER; + } else if (strcmp(Role_B[pAC->Index],"Slave")==0) { + MSMode = SK_MS_MODE_SLAVE; + } else { + printk("%s: Illegal value \"%s\" for Role_B\n", + pAC->dev[1]->name, Role_B[pAC->Index]); + IsRoleDefined = SK_FALSE; } - else if (strcmp(Role_B[pAC->Index],"Auto")==0) { - MSMode = SK_MS_MODE_AUTO; - } - else if (strcmp(Role_B[pAC->Index],"Master")==0) { - MSMode = SK_MS_MODE_MASTER; - } - else if (strcmp(Role_B[pAC->Index],"Slave")==0) { - MSMode = SK_MS_MODE_SLAVE; - } - else printk("%s: Illegal value for Role_B\n", - pAC->dev[1]->name); + } else { + IsRoleDefined = SK_FALSE; + } + + if (IsRoleDefined) { + pAC->GIni.GP[1].PMSMode = MSMode; } - pAC->GIni.GP[1].PMSMode = MSMode; - - /* settings for both ports */ + /* + ** Evaluate settings for both ports + */ pAC->ActivePort = 0; if (PrefPort != NULL && pAC->IndexIndex] != NULL) { if (strcmp(PrefPort[pAC->Index],"") == 0) { /* Auto */ - pAC->ActivePort = 0; - pAC->Rlmt.Net[0].Preference = -1; /* auto */ - pAC->Rlmt.Net[0].PrefPort = 0; - } - else if (strcmp(PrefPort[pAC->Index],"A") == 0) { - /* - * do not set ActivePort here, thus a port - * switch is issued after net up. - */ - Port = 0; - pAC->Rlmt.Net[0].Preference = Port; - pAC->Rlmt.Net[0].PrefPort = Port; - } - else if (strcmp(PrefPort[pAC->Index],"B") == 0) { - /* - * do not set ActivePort here, thus a port - * switch is issued after net up. - */ - Port = 1; - pAC->Rlmt.Net[0].Preference = Port; - pAC->Rlmt.Net[0].PrefPort = Port; + pAC->ActivePort = 0; + pAC->Rlmt.Net[0].Preference = -1; /* auto */ + pAC->Rlmt.Net[0].PrefPort = 0; + } else if (strcmp(PrefPort[pAC->Index],"A") == 0) { + /* + ** do not set ActivePort here, thus a port + ** switch is issued after net up. + */ + Port = 0; + pAC->Rlmt.Net[0].Preference = Port; + pAC->Rlmt.Net[0].PrefPort = Port; + } else if (strcmp(PrefPort[pAC->Index],"B") == 0) { + /* + ** do not set ActivePort here, thus a port + ** switch is issued after net up. + */ + Port = 1; + pAC->Rlmt.Net[0].Preference = Port; + pAC->Rlmt.Net[0].PrefPort = Port; + } else { + printk("%s: Illegal value \"%s\" for PrefPort\n", + pAC->dev[0]->name, PrefPort[pAC->Index]); } - else printk("%s: Illegal value for PrefPort\n", - pAC->dev[0]->name); } pAC->RlmtNets = 1; @@ -4060,31 +4273,26 @@ int Capabilities[3][3] = RlmtMode[pAC->Index] != NULL) { if (strcmp(RlmtMode[pAC->Index], "") == 0) { pAC->RlmtMode = 0; - } - else if (strcmp(RlmtMode[pAC->Index], "CheckLinkState") == 0) { + } else if (strcmp(RlmtMode[pAC->Index], "CheckLinkState") == 0) { pAC->RlmtMode = SK_RLMT_CHECK_LINK; - } - else if (strcmp(RlmtMode[pAC->Index], "CheckLocalPort") == 0) { - pAC->RlmtMode = SK_RLMT_CHECK_LINK | - SK_RLMT_CHECK_LOC_LINK; - } - else if (strcmp(RlmtMode[pAC->Index], "CheckSeg") == 0) { + } else if (strcmp(RlmtMode[pAC->Index], "CheckLocalPort") == 0) { pAC->RlmtMode = SK_RLMT_CHECK_LINK | - SK_RLMT_CHECK_LOC_LINK | - SK_RLMT_CHECK_SEG; - } - else if ((strcmp(RlmtMode[pAC->Index], "DualNet") == 0) && + SK_RLMT_CHECK_LOC_LINK; + } else if (strcmp(RlmtMode[pAC->Index], "CheckSeg") == 0) { + pAC->RlmtMode = SK_RLMT_CHECK_LINK | + SK_RLMT_CHECK_LOC_LINK | + SK_RLMT_CHECK_SEG; + } else if ((strcmp(RlmtMode[pAC->Index], "DualNet") == 0) && (pAC->GIni.GIMacsFound == 2)) { - pAC->RlmtMode = SK_RLMT_CHECK_LINK; - pAC->RlmtNets = 2; - } - else { - printk("%s: Illegal value for" - " RlmtMode, using default\n", pAC->dev[0]->name); + pAC->RlmtMode = SK_RLMT_CHECK_LINK; + pAC->RlmtNets = 2; + } else { + printk("%s: Illegal value \"%s\" for" + " RlmtMode, using default\n", + pAC->dev[0]->name, RlmtMode[pAC->Index]); pAC->RlmtMode = 0; } - } - else { + } else { pAC->RlmtMode = 0; } @@ -4642,9 +4850,23 @@ SK_BOOL DualNet; printk(" role: ???\n"); } } - + + /* + Display dim (dynamic interrupt moderation) + informations + */ + if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_STATIC) + printk(" irq moderation: static (%d ints/sec)\n", + pAC->DynIrqModInfo.MaxModIntsPerSec); + else if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_DYNAMIC) + printk(" irq moderation: dynamic (%d ints/sec)\n", + pAC->DynIrqModInfo.MaxModIntsPerSec); + else + printk(" irq moderation: disabled\n"); + + #ifdef SK_ZEROCOPY - if (pAC->GIni.GIChipId == CHIP_ID_YUKON) + if (pAC->ChipsetType) #ifdef USE_SK_TX_CHECKSUM printk(" scatter-gather: enabled\n"); #else @@ -4796,9 +5018,9 @@ SK_BOOL DualNet; */ StartDrvCleanupTimer(pAC); if (pAC->GIni.GIMacsFound == 2) { - ReceiveIrq(pAC, &pAC->RxPort[1], SK_TRUE); + ReceiveIrq(pAC, &pAC->RxPort[1], SK_FALSE); } - ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE); + ReceiveIrq(pAC, &pAC->RxPort[0], SK_FALSE); } else { printk("Expiration of unknown timer\n"); } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/skgepnmi.c linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/skgepnmi.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/skgepnmi.c 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/skgepnmi.c 2003-08-29 10:55:44.000000000 +0200 @@ -2,8 +2,8 @@ * * Name: skgepnmi.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.108 $ - * Date: $Date: 2003/05/27 07:10:11 $ + * Version: $Revision: 1.109 $ + * Date: $Date: 2003/07/17 14:15:24 $ * Purpose: Private Network Management Interface * ****************************************************************************/ @@ -27,9 +27,21 @@ * History: * * $Log: skgepnmi.c,v $ + * Revision 1.109 2003/07/17 14:15:24 tschilli + * Bug in SkPnmiGenIoctl() fixed. + * * Revision 1.108 2003/05/27 07:10:11 tschilli * Bug in SkPnmiGenIoctl() fixed. * + * Revision 1.107 2003/05/23 13:01:10 tschilli + * Code for DIAG support added (#define SK_DIAG_SUPPORT). + * Code for generic PNMI IOCTL support added. The new function + * SkPnmiGenIoctl() is used for this purpose. + * Handling of OID_SKGE_BOARDLEVEL added. + * Incorrect buffer size handling of OID_SKGE_MTU during GET action fixed. + * Return code handling in PowerManagement() fixed. + * Editorial changes. + * * Revision 1.106 2003/04/10 14:47:31 rschmidt * Fixed handling for OID_GEN_RCV_OK and OID_GEN_XMIT_OK for YUKON's GMAC * in GetPhysStatVal(). @@ -459,7 +471,7 @@ #ifndef _lint static const char SysKonnectFileId[] = - "@(#) $Id: skgepnmi.c,v 1.108 2003/05/27 07:10:11 tschilli Exp $ (C) Marvell."; + "@(#) $Id: skgepnmi.c,v 1.109 2003/07/17 14:15:24 tschilli Exp $ (C) Marvell."; #endif /* !_lint */ #include "h/skdrv1st.h" @@ -8436,32 +8448,41 @@ int HeaderLength; /* Length of desired ReturnCode = SK_PNMI_ERR_GENERAL; - Mode = *((SK_I32 *) pBuf); - Oid = *((SK_U32 *) ((char *) pBuf + sizeof(SK_I32))); + SK_MEMCPY(&Mode, pBuf, sizeof(SK_I32)); + SK_MEMCPY(&Oid, (char *) pBuf + sizeof(SK_I32), sizeof(SK_U32)); HeaderLength = sizeof(SK_I32) + sizeof(SK_U32); *pLen = *pLen - HeaderLength; - SK_MEMCPY((char *) pBuf, (char *) pBuf + HeaderLength, *pLen); + SK_MEMCPY((char *) pBuf + sizeof(SK_I32), (char *) pBuf + HeaderLength, *pLen); switch(Mode) { case SK_GET_SINGLE_VAR: - ReturnCode = SkPnmiGetVar(pAC, IoC, Oid, pBuf, pLen, - ((SK_U32) (-1)), NetIndex); - break; - case SK_SET_SINGLE_VAR: - ReturnCode = SkPnmiSetVar(pAC, IoC, Oid, pBuf, pLen, - ((SK_U32) (-1)), NetIndex); + ReturnCode = SkPnmiGetVar(pAC, IoC, Oid, + (char *) pBuf + sizeof(SK_I32), pLen, + ((SK_U32) (-1)), NetIndex); + SK_PNMI_STORE_U32(pBuf, ReturnCode); + *pLen = *pLen + sizeof(SK_I32); break; case SK_PRESET_SINGLE_VAR: - ReturnCode = SkPnmiPreSetVar(pAC, IoC, Oid, pBuf, pLen, - ((SK_U32) (-1)), NetIndex); + ReturnCode = SkPnmiPreSetVar(pAC, IoC, Oid, + (char *) pBuf + sizeof(SK_I32), pLen, + ((SK_U32) (-1)), NetIndex); + SK_PNMI_STORE_U32(pBuf, ReturnCode); + *pLen = *pLen + sizeof(SK_I32); + break; + case SK_SET_SINGLE_VAR: + ReturnCode = SkPnmiSetVar(pAC, IoC, Oid, + (char *) pBuf + sizeof(SK_I32), pLen, + ((SK_U32) (-1)), NetIndex); + SK_PNMI_STORE_U32(pBuf, ReturnCode); + *pLen = *pLen + sizeof(SK_I32); break; case SK_GET_FULL_MIB: ReturnCode = SkPnmiGetStruct(pAC, IoC, pBuf, pLen, NetIndex); break; - case SK_SET_FULL_MIB: + case SK_PRESET_FULL_MIB: ReturnCode = SkPnmiPreSetStruct(pAC, IoC, pBuf, pLen, NetIndex); break; - case SK_PRESET_FULL_MIB: + case SK_SET_FULL_MIB: ReturnCode = SkPnmiSetStruct(pAC, IoC, pBuf, pLen, NetIndex); break; default: diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/skgesirq.c linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/skgesirq.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/skgesirq.c 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/skgesirq.c 2003-08-29 10:55:44.000000000 +0200 @@ -2,8 +2,8 @@ * * Name: skgesirq.c * Project: Gigabit Ethernet Adapters, Common Modules - * Version: $Revision: 1.90 $ - * Date: $Date: 2003/05/28 15:35:45 $ + * Version: $Revision: 1.91 $ + * Date: $Date: 2003/07/04 12:46:22 $ * Purpose: Special IRQ module * ******************************************************************************/ @@ -27,6 +27,12 @@ * History: * * $Log: skgesirq.c,v $ + * Revision 1.91 2003/07/04 12:46:22 rschmidt + * Added debug messages in SkGePortCheckUpGmac(). + * Added error log message and new driver event SK_DRV_DOWNSHIFT_DET + * for Downshift detection (Yukon-Copper). + * Editorial changes. + * * Revision 1.90 2003/05/28 15:35:45 rschmidt * Added parameter AutoNeg in all SkGePortCheckUp...() to save code. * Added setting for AutoNeg only once in SkGePortCheckUp(). @@ -404,7 +410,7 @@ #if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) static const char SysKonnectFileId[] = - "@(#) $Id: skgesirq.c,v 1.90 2003/05/28 15:35:45 rschmidt Exp $ (C) Marvell."; + "@(#) $Id: skgesirq.c,v 1.91 2003/07/04 12:46:22 rschmidt Exp $ (C) Marvell."; #endif #include "h/skdrv1st.h" /* Driver Specific Definitions */ @@ -707,7 +713,8 @@ int Port) /* Port Index of the port fai if (pAC->GIni.GIYukon) { /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), - (SK_U8)((pAC->GIni.GIChipRev == 0) ? GMF_CLI_TX_FC : GMF_CLI_TX_PE)); + (SK_U8)((pAC->GIni.GIChipId == CHIP_ID_YUKON && + pAC->GIni.GIChipRev == 0) ? GMF_CLI_TX_FC : GMF_CLI_TX_PE)); } #endif /* YUKON */ @@ -1357,6 +1364,8 @@ int Port) /* Which port should be chec SK_BOOL AutoNeg; /* Is Auto-negotiation used ? */ int Rtv; /* Return value */ + Rtv = SK_HW_PS_NONE; + pPrt = &pAC->GIni.GP[Port]; if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { @@ -1385,8 +1394,6 @@ int Port) /* Which port should be chec Rtv = SkGePortCheckUpNat(pAC, IoC, Port, AutoNeg); break; #endif /* OTHER_PHY */ - default: - Rtv = SK_HW_PS_NONE; } } #endif /* GENESIS */ @@ -1533,6 +1540,7 @@ SK_BOOL AutoNeg) /* Is Auto-negotiation } else { SkXmAutoNegLipaXmac(pAC, IoC, Port, Isrc); + if (SkGePortCheckShorts(pAC, IoC, Port) == SK_HW_PS_RESTART) { return(SK_HW_PS_RESTART); } @@ -1870,6 +1878,7 @@ SK_BOOL AutoNeg) /* Is Auto-negotiation /* Error */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("Master/Slave Fault port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; pPrt->PMSStatus = SK_MS_STAT_FAULT; @@ -1884,7 +1893,7 @@ SK_BOOL AutoNeg) /* Is Auto-negotiation SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE; SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNeg: %d, PhyStat: 0x%04X\n", AutoNeg, PhyStat)); + ("Port %d, ResAb: 0x%04X\n", Port, ResAb)); if (AutoNeg) { if ((PhyStat & PHY_ST_AN_OVER) != 0) { @@ -1975,23 +1984,25 @@ SK_BOOL AutoNeg) /* Is Auto-negotiation { SK_GEPORT *pPrt; /* GIni Port struct pointer */ int Done; - SK_U16 Isrc; /* Interrupt source */ - SK_U16 PhyStat; /* Phy Status */ - SK_U16 PhySpecStat;/* Phy Specific Status */ + SK_U16 PhyIsrc; /* PHY Interrupt source */ + SK_U16 PhyStat; /* PPY Status */ + SK_U16 PhySpecStat;/* PHY Specific Status */ SK_U16 ResAb; /* Master/Slave resolution */ + SK_EVPARA Para; pPrt = &pAC->GIni.GP[Port]; /* Read PHY Interrupt Status */ - SkGmPhyRead(pAC, IoC, Port, PHY_MARV_INT_STAT, &Isrc); + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_INT_STAT, &PhyIsrc); - if ((Isrc & PHY_M_IS_AN_COMPL) != 0) { - /* TBD */ + if ((PhyIsrc & PHY_M_IS_AN_COMPL) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Auto-Negotiation Completed, PhyIsrc: 0x%04X\n", PhyIsrc)); } - if ((Isrc & PHY_M_IS_DOWNSH_DET) != 0) { + if ((PhyIsrc & PHY_M_IS_LSP_CHANGE) != 0) { SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("Downshift detected port %d\n", Port)); + ("Link Speed Changed, PhyIsrc: 0x%04X\n", PhyIsrc)); } if (pPrt->PHWLinkUp) { @@ -2012,6 +2023,7 @@ SK_BOOL AutoNeg) /* Is Auto-negotiation /* Error */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("Master/Slave Fault port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; pPrt->PMSStatus = SK_MS_STAT_FAULT; @@ -2028,6 +2040,18 @@ SK_BOOL AutoNeg) /* Is Auto-negotiation return(SK_HW_PS_NONE); } + if ((PhySpecStat & PHY_M_PS_DOWNS_STAT) != 0 || + (PhyIsrc & PHY_M_IS_DOWNSH_DET) != 0) { + /* Downshift detected */ + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E025, SKERR_SIRQ_E025MSG); + + Para.Para64 = Port; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_DOWNSHIFT_DET, Para); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Downshift detected, PhyIsrc: 0x%04X\n", PhyIsrc)); + } + pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ? SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE; @@ -2372,7 +2396,7 @@ SK_EVPARA Para) /* Event specific Param } Val8 = (SK_U8)Para.Para32[1]; if (pPrt->PMSMode != Val8) { - /* Set New link mode */ + /* Set New Role (Master/Slave) mode */ pPrt->PMSMode = Val8; /* Restart Port */ @@ -2463,7 +2487,7 @@ SK_U16 IStatus) /* Interrupt Status */ if ((IStatus & PHY_B_IS_PSE) != 0) { /* Incorrectable pair swap error */ - SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E022, + SK_ERR_LOG(pAC, SK_ERRCL_HW | SK_ERRCL_INIT, SKERR_SIRQ_E022, SKERR_SIRQ_E022MSG); } @@ -2531,14 +2555,11 @@ SK_U16 IStatus) /* Interrupt Status */ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E023, SKERR_SIRQ_E023MSG); } - if ((IStatus & PHY_M_IS_LSP_CHANGE) != 0) { - /* TBD */ - } - if ((IStatus & PHY_M_IS_FIFO_ERROR) != 0) { /* FIFO Overflow/Underrun Error */ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E024, SKERR_SIRQ_E024MSG); } + } /* SkPhyIsrGmac */ #endif /* YUKON */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/skproc.c linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/skproc.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/skproc.c 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/skproc.c 2003-08-29 10:55:44.000000000 +0200 @@ -2,8 +2,8 @@ * * Name: skproc.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.6 $ - * Date: $Date: 2003/05/26 12:58:53 $ + * Version: $Revision: 1.2 $ + * Date: $Date: 2003/08/12 16:45:29 $ * Purpose: Funktions to display statictic data * ******************************************************************************/ @@ -28,6 +28,19 @@ * History: * * $Log: skproc.c,v $ + * Revision 1.2 2003/08/12 16:45:29 mlindner + * Add: Removed SkNumber and SkDoDiv + * Add: Counter output as (unsigned long long) + * + * Revision 1.1 2003/07/18 13:39:57 rroesler + * Fix: Re-enter after CVS crash + * + * Revision 1.8 2003/06/27 14:41:42 rroesler + * Corrected compiler-warning kernel 2.2 + * + * Revision 1.7 2003/06/27 12:09:51 rroesler + * corrected minor edits + * * Revision 1.6 2003/05/26 12:58:53 mlindner * Add: Support for Kernel 2.5/2.6 * @@ -80,22 +93,9 @@ #include "h/skdrv1st.h" #include "h/skdrv2nd.h" -#define ZEROPAD 1 /* pad with zero */ -#define SIGN 2 /* unsigned/signed long */ -#define PLUS 4 /* show plus */ -#define SPACE 8 /* space if plus */ -#define LEFT 16 /* left justified */ -#define SPECIALX 32 /* 0x */ -#define LARGE 64 - -extern struct net_device *SkGeRootDev; -extern char * SkNumber( - char * str, - long long num, - int base, - int size, - int precision, - int type); + + extern struct net_device *SkGeRootDev; + int sk_proc_read(char *buffer, char **buffer_location, @@ -130,7 +130,6 @@ void *data) int i; DEV_NET *pNet; SK_AC *pAC; - char test_buf[100]; char sens_msg[50]; unsigned long Flags; unsigned int Size; @@ -240,13 +239,11 @@ void *data) "\nReceive statistics\n\n"); len += sprintf(buffer + len, - "Received bytes %s\n", - SkNumber(test_buf, pPnmiStat->StatRxOctetsOkCts, - 10,0,-1,0)); - len += sprintf(buffer + len, - "Received packets %s\n", - SkNumber(test_buf, pPnmiStat->StatRxOkCts, - 10,0,-1,0)); + "Received bytes %Ld\n", + (unsigned long long) pPnmiStat->StatRxOctetsOkCts); + len += sprintf(buffer + len, + "Received packets %Ld\n", + (unsigned long long) pPnmiStat->StatRxOkCts); #if 0 if (pAC->GIni.GP[0].PhyType == SK_PHY_XMAC && pAC->HWRevision < 12) { @@ -260,71 +257,56 @@ void *data) pPnmiStat->StatRxTooLongCts; len += sprintf(buffer + len, - "Receive errors %s\n", - SkNumber(test_buf, pPnmiStruct->InErrorsCts, - 10,0,-1,0)); - len += sprintf(buffer + len, - "Receive droped %s\n", - SkNumber(test_buf, pPnmiStruct->RxNoBufCts, - 10,0,-1,0)); - len += sprintf(buffer + len, - "Received multicast %s\n", - SkNumber(test_buf, pPnmiStat->StatRxMulticastOkCts, - 10,0,-1,0)); + "Receive errors %Ld\n", + (unsigned long long) pPnmiStruct->InErrorsCts); + len += sprintf(buffer + len, + "Receive dropped %Ld\n", + (unsigned long long) pPnmiStruct->RxNoBufCts); + len += sprintf(buffer + len, + "Received multicast %Ld\n", + (unsigned long long) pPnmiStat->StatRxMulticastOkCts); len += sprintf(buffer + len, "Receive error types\n"); len += sprintf(buffer + len, - " length %s\n", - SkNumber(test_buf, pPnmiStat->StatRxRuntCts, - 10, 0, -1, 0)); - len += sprintf(buffer + len, - " buffer overflow %s\n", - SkNumber(test_buf, pPnmiStat->StatRxFifoOverflowCts, - 10, 0, -1, 0)); - len += sprintf(buffer + len, - " bad crc %s\n", - SkNumber(test_buf, pPnmiStat->StatRxFcsCts, - 10, 0, -1, 0)); - len += sprintf(buffer + len, - " framing %s\n", - SkNumber(test_buf, pPnmiStat->StatRxFramingCts, - 10, 0, -1, 0)); - len += sprintf(buffer + len, - " missed frames %s\n", - SkNumber(test_buf, pPnmiStat->StatRxMissedCts, - 10, 0, -1, 0)); + " length %Ld\n", + (unsigned long long) pPnmiStat->StatRxRuntCts); + len += sprintf(buffer + len, + " buffer overflow %Ld\n", + (unsigned long long) pPnmiStat->StatRxFifoOverflowCts); + len += sprintf(buffer + len, + " bad crc %Ld\n", + (unsigned long long) pPnmiStat->StatRxFcsCts); + len += sprintf(buffer + len, + " framing %Ld\n", + (unsigned long long) pPnmiStat->StatRxFramingCts); + len += sprintf(buffer + len, + " missed frames %Ld\n", + (unsigned long long) pPnmiStat->StatRxMissedCts); if (pNet->Mtu > 1500) pPnmiStat->StatRxTooLongCts = 0; len += sprintf(buffer + len, - " too long %s\n", - SkNumber(test_buf, pPnmiStat->StatRxTooLongCts, - 10, 0, -1, 0)); - len += sprintf(buffer + len, - " carrier extension %s\n", - SkNumber(test_buf, pPnmiStat->StatRxCextCts, - 10, 0, -1, 0)); - len += sprintf(buffer + len, - " too short %s\n", - SkNumber(test_buf, pPnmiStat->StatRxShortsCts, - 10, 0, -1, 0)); - len += sprintf(buffer + len, - " symbol %s\n", - SkNumber(test_buf, pPnmiStat->StatRxSymbolCts, - 10, 0, -1, 0)); - len += sprintf(buffer + len, - " LLC MAC size %s\n", - SkNumber(test_buf, pPnmiStat->StatRxIRLengthCts, - 10, 0, -1, 0)); - len += sprintf(buffer + len, - " carrier event %s\n", - SkNumber(test_buf, pPnmiStat->StatRxCarrierCts, - 10, 0, -1, 0)); - len += sprintf(buffer + len, - " jabber %s\n", - SkNumber(test_buf, pPnmiStat->StatRxJabberCts, - 10, 0, -1, 0)); + " too long %Ld\n", + (unsigned long long) pPnmiStat->StatRxTooLongCts); + len += sprintf(buffer + len, + " carrier extension %Ld\n", + (unsigned long long) pPnmiStat->StatRxCextCts); + len += sprintf(buffer + len, + " too short %Ld\n", + (unsigned long long) pPnmiStat->StatRxShortsCts); + len += sprintf(buffer + len, + " symbol %Ld\n", + (unsigned long long) pPnmiStat->StatRxSymbolCts); + len += sprintf(buffer + len, + " LLC MAC size %Ld\n", + (unsigned long long) pPnmiStat->StatRxIRLengthCts); + len += sprintf(buffer + len, + " carrier event %Ld\n", + (unsigned long long) pPnmiStat->StatRxCarrierCts); + len += sprintf(buffer + len, + " jabber %Ld\n", + (unsigned long long) pPnmiStat->StatRxJabberCts); /*Transmit statistics */ @@ -332,42 +314,34 @@ void *data) "\nTransmit statistics\n\n"); len += sprintf(buffer + len, - "Transmited bytes %s\n", - SkNumber(test_buf, pPnmiStat->StatTxOctetsOkCts, - 10,0,-1,0)); - len += sprintf(buffer + len, - "Transmited packets %s\n", - SkNumber(test_buf, pPnmiStat->StatTxOkCts, - 10,0,-1,0)); - len += sprintf(buffer + len, - "Transmit errors %s\n", - SkNumber(test_buf, pPnmiStat->StatTxSingleCollisionCts, - 10,0,-1,0)); - len += sprintf(buffer + len, - "Transmit dropped %s\n", - SkNumber(test_buf, pPnmiStruct->TxNoBufCts, - 10,0,-1,0)); - len += sprintf(buffer + len, - "Transmit collisions %s\n", - SkNumber(test_buf, pPnmiStat->StatTxSingleCollisionCts, - 10,0,-1,0)); + "Transmited bytes %Ld\n", + (unsigned long long) pPnmiStat->StatTxOctetsOkCts); + len += sprintf(buffer + len, + "Transmited packets %Ld\n", + (unsigned long long) pPnmiStat->StatTxOkCts); + len += sprintf(buffer + len, + "Transmit errors %Ld\n", + (unsigned long long) pPnmiStat->StatTxSingleCollisionCts); + len += sprintf(buffer + len, + "Transmit dropped %Ld\n", + (unsigned long long) pPnmiStruct->TxNoBufCts); + len += sprintf(buffer + len, + "Transmit collisions %Ld\n", + (unsigned long long) pPnmiStat->StatTxSingleCollisionCts); len += sprintf(buffer + len, "Transmit error types\n"); len += sprintf(buffer + len, " excessive collision %ld\n", pAC->stats.tx_aborted_errors); len += sprintf(buffer + len, - " carrier %s\n", - SkNumber(test_buf, pPnmiStat->StatTxCarrierCts, - 10, 0, -1, 0)); - len += sprintf(buffer + len, - " fifo underrun %s\n", - SkNumber(test_buf, pPnmiStat->StatTxFifoUnderrunCts, - 10, 0, -1, 0)); - len += sprintf(buffer + len, - " heartbeat %s\n", - SkNumber(test_buf, pPnmiStat->StatTxCarrierCts, - 10, 0, -1, 0)); + " carrier %Ld\n", + (unsigned long long) pPnmiStat->StatTxCarrierCts); + len += sprintf(buffer + len, + " fifo underrun %Ld\n", + (unsigned long long) pPnmiStat->StatTxFifoUnderrunCts); + len += sprintf(buffer + len, + " heartbeat %Ld\n", + (unsigned long long) pPnmiStat->StatTxCarrierCts); len += sprintf(buffer + len, " window %ld\n", pAC->stats.tx_window_errors); @@ -389,139 +363,3 @@ void *data) } - - - -/***************************************************************************** - * - * SkDoDiv - convert 64bit number - * - * Description: - * This function "converts" a long long number. - * - * Returns: - * remainder of division - */ -static long SkDoDiv (long long Dividend, int Divisor, long long *pErg) -{ - long Rest; - long long Ergebnis; - long Akku; - - - Akku = Dividend >> 32; - - Ergebnis = ((long long) (Akku / Divisor)) << 32; - Rest = Akku % Divisor ; - - Akku = Rest << 16; - Akku |= ((Dividend & 0xFFFF0000) >> 16); - - - Ergebnis += ((long long) (Akku / Divisor)) << 16; - Rest = Akku % Divisor ; - - Akku = Rest << 16; - Akku |= (Dividend & 0xFFFF); - - Ergebnis += (Akku / Divisor); - Rest = Akku % Divisor ; - - *pErg = Ergebnis; - return (Rest); -} - - -#if 0 -#define do_div(n,base) ({ \ -long long __res; \ -__res = ((unsigned long long) n) % (unsigned) base; \ -n = ((unsigned long long) n) / (unsigned) base; \ -__res; }) - -#endif - - -/***************************************************************************** - * - * SkNumber - Print results - * - * Description: - * This function converts a long long number into a string. - * - * Returns: - * number as string - */ -char * SkNumber(char * str, long long num, int base, int size, int precision - ,int type) -{ - char c,sign,tmp[66], *strorg = str; - const char *digits="0123456789abcdefghijklmnopqrstuvwxyz"; - int i; - - if (type & LARGE) - digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - if (type & LEFT) - type &= ~ZEROPAD; - if (base < 2 || base > 36) - return 0; - c = (type & ZEROPAD) ? '0' : ' '; - sign = 0; - if (type & SIGN) { - if (num < 0) { - sign = '-'; - num = -num; - size--; - } else if (type & PLUS) { - sign = '+'; - size--; - } else if (type & SPACE) { - sign = ' '; - size--; - } - } - if (type & SPECIALX) { - if (base == 16) - size -= 2; - else if (base == 8) - size--; - } - i = 0; - if (num == 0) - tmp[i++]='0'; - else while (num != 0) - tmp[i++] = digits[SkDoDiv(num,base, &num)]; - - if (i > precision) - precision = i; - size -= precision; - if (!(type&(ZEROPAD+LEFT))) - while(size-->0) - *str++ = ' '; - if (sign) - *str++ = sign; - if (type & SPECIALX) { - if (base==8) - *str++ = '0'; - else if (base==16) { - *str++ = '0'; - *str++ = digits[33]; - } - } - if (!(type & LEFT)) - while (size-- > 0) - *str++ = c; - while (i < precision--) - *str++ = '0'; - while (i-- > 0) - *str++ = tmp[i]; - while (size-- > 0) - *str++ = ' '; - - str[0] = '\0'; - - return strorg; -} - - - diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/skxmac2.c linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/skxmac2.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/net/sk98lin/skxmac2.c 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/net/sk98lin/skxmac2.c 2003-08-29 10:55:44.000000000 +0200 @@ -2,8 +2,8 @@ * * Name: skxmac2.c * Project: Gigabit Ethernet Adapters, Common Modules - * Version: $Revision: 1.97 $ - * Date: $Date: 2003/05/28 15:53:47 $ + * Version: $Revision: 1.99 $ + * Date: $Date: 2003/07/11 12:19:33 $ * Purpose: Contains functions to initialize the MACs and PHYs * ******************************************************************************/ @@ -27,6 +27,17 @@ * History: * * $Log: skxmac2.c,v $ + * Revision 1.99 2003/07/11 12:19:33 rschmidt + * Reduced init values for Master & Slave downshift counters to + * minimum values. + * Editorial changes. + * + * Revision 1.98 2003/07/04 12:53:56 rschmidt + * Changed setting of downshift feature in SkGmInitPhyMarv(). + * Enabled downshift feature only for para 'Speed' set to 'Auto'. + * Changed init values for Master & Slave downshift counters. + * Editorial changes. + * * Revision 1.97 2003/05/28 15:53:47 rschmidt * Removed setting of Yukon PHY's 'force link good' in loopback mode. * Replaced call pFnMacOverflow() with SkXmOverflowStatus() resp. @@ -464,7 +475,7 @@ typedef struct s_PhyHack { #if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) static const char SysKonnectFileId[] = - "@(#) $Id: skxmac2.c,v 1.97 2003/05/28 15:53:47 rschmidt Exp $ (C) Marvell."; + "@(#) $Id: skxmac2.c,v 1.99 2003/07/11 12:19:33 rschmidt Exp $ (C) Marvell."; #endif #ifdef GENESIS @@ -515,7 +526,7 @@ SK_AC *pAC, /* Adapter Context */ SK_IOC IoC, /* I/O Context */ int Port, /* Port Index (MAC_1 + n) */ int PhyReg, /* Register Address (Offset) */ -SK_U16 SK_FAR *pVal) /* Pointer to Value */ +SK_U16 SK_FAR *pVal) /* Pointer to Value */ { SK_U16 Mmu; SK_GEPORT *pPrt; @@ -1979,7 +1990,7 @@ int Port) /* Port Index (MAC_1 + n) */ #ifdef WA_DEV_16 /* WA for deviation #16 */ - if (pAC->GIni.GIChipRev == 0) { + if (pAC->GIni.GIChipId == CHIP_ID_YUKON && pAC->GIni.GIChipRev == 0) { /* swap the address bytes */ SWord = ((SWord & 0xff00) >> 8) | ((SWord & 0x00ff) << 8); @@ -2369,7 +2380,7 @@ SK_BOOL DoLoop) /* Should a Phy LoopBac /* Write AutoNeg Advertisement Register */ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, Ctrl3); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("Auto-Neg. Adv. Reg=0x%04X\n", Ctrl3)); + ("Auto-Neg.Adv.Reg=0x%04X\n", Ctrl3)); if (DoLoop) { /* Set the Phy Loopback bit, too */ @@ -2438,6 +2449,10 @@ SK_BOOL DoLoop) /* Should a Phy LoopBac AutoNeg = SK_TRUE; } + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyMarv: Port %d, auto-negotiation %s\n", + Port, AutoNeg ? "ON" : "OFF")); + #ifdef VCPU VCPUprintf(0, "SkGmInitPhyMarv(), Port=%u, DoLoop=%u\n", Port, DoLoop); @@ -2448,15 +2463,15 @@ SK_BOOL DoLoop) /* Should a Phy LoopBac SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, PHY_M_PC_MAC_POW_UP); } - else { + else if (AutoNeg && pPrt->PLinkSpeed == SK_LSPEED_AUTO) { /* Read Ext. PHY Specific Control */ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_EXT_CTRL, &ExtPhyCtrl); ExtPhyCtrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | PHY_M_EC_MAC_S_MSK); - ExtPhyCtrl |= PHY_M_EC_M_DSC(1) | PHY_M_EC_S_DSC(1) | - PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); + ExtPhyCtrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ) | + PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_CTRL, ExtPhyCtrl); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, @@ -2488,8 +2503,6 @@ SK_BOOL DoLoop) /* Should a Phy LoopBac /* Auto-negotiation ? */ if (!AutoNeg) { - SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("InitPhyMarv: no auto-negotiation Port %d\n", Port)); if (pPrt->PLinkMode == SK_LMODE_FULL) { /* Set Full Duplex Mode */ @@ -2526,9 +2539,6 @@ SK_BOOL DoLoop) /* Should a Phy LoopBac */ } else { - SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("InitPhyMarv: with auto-negotiation Port %d\n", Port)); - PhyCtrl |= PHY_CT_ANE; if (pAC->GIni.GICopperType) { @@ -2640,7 +2650,7 @@ SK_BOOL DoLoop) /* Should a Phy LoopBac */ /* Program PHY register 30 as 16'h0708 for simulation speed up */ - SkGmPhyWrite(pAC, IoC, Port, 30, 0x0708); + SkGmPhyWrite(pAC, IoC, Port, 30, 0x0700 /* 0x0708 */); VCpuWait(2000); @@ -2878,8 +2888,7 @@ SK_BOOL DoLoop) /* Should a Phy LoopBac /* Write AutoNeg Advertisement Register */ SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_AUNE_ADV, Ctrl3); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("Auto-Neg. Adv. Reg=0x%04X\n", Ctrl3)); - + ("Auto-Neg.Adv.Reg=0x%04X\n", Ctrl3)); if (DoLoop) { /* Set the Phy Loopback bit, too */ @@ -2991,7 +3000,7 @@ int Port) /* Port Index (MAC_1 + n) */ SK_U16 LPAb; /* Link Partner Ability */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNegDoneXmac, Port %d\n",Port)); + ("AutoNegDoneXmac, Port %d\n", Port)); pPrt = &pAC->GIni.GP[Port]; @@ -3211,7 +3220,7 @@ int Port) /* Port Index (MAC_1 + n) */ /* Check Speed & Duplex resolved */ if ((AuxStat & PHY_M_PS_SPDUP_RES) == 0) { SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNegFail: Speed & Duplex not resolved Port %d\n", Port)); + ("AutoNegFail: Speed & Duplex not resolved, Port %d\n", Port)); pPrt->PAutoNegFail = SK_TRUE; pPrt->PLinkModeStatus = SK_LMODE_STAT_UNKNOWN; return(SK_AND_DUP_CAP); @@ -3284,7 +3293,7 @@ int Port) /* Port Index (MAC_1 + n) */ SK_U16 QuickStat; /* Auxiliary Status */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNegDoneLone, Port %d\n",Port)); + ("AutoNegDoneLone, Port %d\n", Port)); pPrt = &pAC->GIni.GP[Port]; /* Get PHY parameters */ @@ -3408,6 +3417,8 @@ int Port) /* Port Index (MAC_1 + n) */ SK_GEPORT *pPrt; int Rtv; + Rtv = SK_AND_OK; + pPrt = &pAC->GIni.GP[Port]; #ifdef GENESIS @@ -4311,7 +4322,7 @@ SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ unsigned int Port, /* Port Index (MAC_1 + n) */ SK_U16 StatAddr, /* MIB counter base address */ -SK_U32 SK_FAR *pVal) /* ptr to return statistic value */ +SK_U32 SK_FAR *pVal) /* ptr to return statistic value */ { if ((StatAddr < XM_TXF_OK) || (StatAddr > XM_RXF_MAX_SZ)) { @@ -4373,7 +4384,7 @@ int SkXmOverflowStatus( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ unsigned int Port, /* Port Index (MAC_1 + n) */ -SK_U16 IStatus, /* Interupt Status from MAC */ +SK_U16 IStatus, /* Interupt Status from MAC */ SK_U64 SK_FAR *pStatus) /* ptr for return overflow status value */ { SK_U64 Status; /* Overflow status */ @@ -4518,7 +4529,7 @@ int SkGmOverflowStatus( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ unsigned int Port, /* Port Index (MAC_1 + n) */ -SK_U16 IStatus, /* Interupt Status from MAC */ +SK_U16 IStatus, /* Interupt Status from MAC */ SK_U64 SK_FAR *pStatus) /* ptr for return overflow status value */ { SK_U64 Status; /* Overflow status */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/Config.in linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/Config.in --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/Config.in 2003-08-25 18:26:32.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/Config.in 2003-08-26 17:59:08.000000000 +0200 @@ -67,7 +67,8 @@ dep_tristate 'Always IN2000 SCSI support dep_tristate 'AM53/79C974 PCI SCSI support' CONFIG_SCSI_AM53C974 $CONFIG_SCSI $CONFIG_PCI dep_tristate 'LSI MegaRAID support (driver 1.18 series)' CONFIG_SCSI_MEGARAID $CONFIG_SCSI dep_tristate 'LSI MegaRAID support (driver 2.00 series)' CONFIG_SCSI_MEGARAID2 $CONFIG_SCSI -dep_tristate 'iSCSI support (scsi-over-network)' CONFIG_SCSI_ISCSI $CONFIG_SCSI m +dep_tristate 'iSCSI support (SCSI-over-Network) (driver 3.1.x series)' CONFIG_SCSI_ISCSI $CONFIG_SCSI m +dep_tristate 'iSCSI support (SCSI-over-Network) (driver 3.4.x series)' CONFIG_SCSI_ISCSI_NEW $CONFIG_SCSI m dep_tristate 'BusLogic SCSI support' CONFIG_SCSI_BUSLOGIC $CONFIG_SCSI if [ "$CONFIG_SCSI_BUSLOGIC" != "n" ]; then diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/Makefile linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/Makefile --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/Makefile 2003-08-25 18:26:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/Makefile 2003-08-26 17:59:08.000000000 +0200 @@ -32,6 +32,7 @@ subdir-$(CONFIG_SCSI_QLOGIC_QLA2XXX) + subdir-$(CONFIG_SCSI_NEWISP) += isp subdir-$(CONFIG_PCMCIA) += pcmcia subdir-$(CONFIG_SCSI_ISCSI) += iscsi +subdir-$(CONFIG_SCSI_ISCSI_NEW) += iscsi-new obj-$(CONFIG_SCSI) += scsi_mod.o diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/ips.c linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/ips.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/ips.c 2003-08-25 18:24:52.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/ips.c 2003-08-26 00:05:08.000000000 +0200 @@ -5,8 +5,8 @@ /* Jack Hammer, Adaptec, Inc. */ /* David Jeffery, Adaptec, Inc. */ /* */ -/* Copyright (C) 2000 IBM Corporation */ -/* Copyright (C) 2003 Adaptec, Inc. */ +/* Copyright (C) 2000 IBM Corporation */ +/* Copyright (C) 2002,2003 Adaptec, Inc. */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ @@ -83,7 +83,7 @@ /* 2.3.18 and later */ /* - Sync with other changes from the 2.3 kernels */ /* 4.00.06 - Fix timeout with initial FFDC command */ -/* 4.00.06a - Port to 2.4 (trivial) -- Christoph Hellwig */ +/* 4.00.06a - Port to 2.4 (trivial) -- Christoph Hellwig */ /* 4.10.00 - Add support for ServeRAID 4M/4L */ /* 4.10.13 - Fix for dynamic unload and proc file system */ /* 4.20.03 - Rename version to coincide with new release schedules */ @@ -130,6 +130,7 @@ /* 5.10.15 - remove unused code (sem, macros, etc.) */ /* 5.30.00 - use __devexit_p() */ /* 6.00.00 - Add 6x Adapters and Battery Flash */ +/* 6.10.00 - Remove 1G Addressing Limitations */ /*****************************************************************************/ /* @@ -150,7 +151,7 @@ * nommap - Don't use memory mapped I/O * ioctlsize - Initial size of the IOCTL buffer */ - + #include #include #include @@ -182,75 +183,35 @@ #include #include -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - #include - #include -#else - #include -#endif +#include +#include #include #ifdef MODULE - static char *ips = NULL; - MODULE_PARM(ips, "s"); +static char *ips = NULL; +MODULE_PARM(ips, "s"); #endif /* * DRIVER_VER */ -#define IPS_VERSION_HIGH "6.00" -#define IPS_VERSION_LOW ".26 " - - -#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) -struct proc_dir_entry proc_scsi_ips = { - 0, - 3, "ips", - S_IFDIR | S_IRUGO | S_IXUGO, 2 -}; -#endif +#define IPS_VERSION_HIGH "6.10" +#define IPS_VERSION_LOW ".24 " -#if !defined(__i386__) && !defined(__ia64__) - #error "This driver has only been tested on the x86/ia64 platforms" -#endif - -#if LINUX_VERSION_CODE < LinuxVersionCode(2,2,0) - #error "This driver only works with kernel 2.2.0 and later" -#elif LINUX_VERSION_CODE <= LinuxVersionCode(2,3,18) - #define dma_addr_t uint32_t - - static inline void *pci_alloc_consistent(struct pci_dev *dev,int size, - dma_addr_t *dmahandle) { - void * ptr = kmalloc(size, GFP_ATOMIC); - if(ptr){ - *dmahandle = (uint32_t)virt_to_bus(ptr); - } - return ptr; - } - - #define pci_free_consistent(a,size,address,dmahandle) kfree(address) - - #define pci_map_sg(a,b,n,z) (n) - #define pci_unmap_sg(a,b,c,d) - #define pci_map_single(a,b,c,d) ((uint32_t)virt_to_bus(b)) - #define pci_unmap_single(a,b,c,d) - #ifndef sg_dma_address - #define sg_dma_address(x) ((uint32_t)virt_to_bus((x)->address)) - #define sg_dma_len(x) ((x)->length) - #endif - #define pci_unregister_driver(x) +#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__) +#error "This driver has only been tested on the x86/ia64 platforms" #endif #if LINUX_VERSION_CODE <= LinuxVersionCode(2,5,0) - #define IPS_SG_ADDRESS(sg) ((sg)->address) - #define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags) - #define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags) +#define IPS_SG_ADDRESS(sg) ((sg)->address) +#define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags) +#define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags) #else - #define IPS_SG_ADDRESS(sg) (page_address((sg)->page) ? \ +#define IPS_SG_ADDRESS(sg) (page_address((sg)->page) ? \ page_address((sg)->page)+(sg)->offset : 0) - #define IPS_LOCK_SAVE(lock,flags) spin_lock(lock) - #define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock(lock) +#define IPS_LOCK_SAVE(lock,flags) do{spin_lock(lock);(void)flags;}while(0) +#define IPS_UNLOCK_RESTORE(lock,flags) do{spin_unlock(lock);(void)flags;}while(0) #endif #define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \ @@ -259,218 +220,144 @@ struct proc_dir_entry proc_scsi_ips = { scsi_to_pci_dma_dir(scb->scsi_cmd->sc_data_direction)) #ifdef IPS_DEBUG - #define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n"); - #define DEBUG(i, s) if (ips_debug >= i) printk(KERN_NOTICE s "\n"); - #define DEBUG_VAR(i, s, v...) if (ips_debug >= i) printk(KERN_NOTICE s "\n", v); +#define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n"); +#define DEBUG(i, s) if (ips_debug >= i) printk(KERN_NOTICE s "\n"); +#define DEBUG_VAR(i, s, v...) if (ips_debug >= i) printk(KERN_NOTICE s "\n", v); #else - #define METHOD_TRACE(s, i) - #define DEBUG(i, s) - #define DEBUG_VAR(i, s, v...) +#define METHOD_TRACE(s, i) +#define DEBUG(i, s) +#define DEBUG_VAR(i, s, v...) #endif /* * global variables */ -static const char ips_name[] = "ips"; -static struct Scsi_Host *ips_sh[IPS_MAX_ADAPTERS]; /* Array of host controller structures */ -static ips_ha_t *ips_ha[IPS_MAX_ADAPTERS]; /* Array of HA structures */ -static unsigned int ips_next_controller = 0; -static unsigned int ips_num_controllers = 0; -static unsigned int ips_released_controllers = 0; -static int ips_cmd_timeout = 60; -static int ips_reset_timeout = 60 * 5; -static int ips_force_memio = 1; /* Always use Memory Mapped I/O */ -static int ips_force_i2o = 1; /* Always use I2O command delivery */ -static int ips_ioctlsize = IPS_IOCTL_SIZE; /* Size of the ioctl buffer */ -static int ips_cd_boot = 0; /* Booting from Manager CD */ -static char *ips_FlashData = NULL; /* CD Boot - Flash Data Buffer */ -static long ips_FlashDataInUse = 0; /* CD Boot - Flash Data In Use Flag */ -static uint32_t MaxLiteCmds = 32; /* Max Active Cmds for a Lite Adapter */ +static const char ips_name[] = "ips"; +static struct Scsi_Host *ips_sh[IPS_MAX_ADAPTERS]; /* Array of host controller structures */ +static ips_ha_t *ips_ha[IPS_MAX_ADAPTERS]; /* Array of HA structures */ +static unsigned int ips_next_controller; +static unsigned int ips_num_controllers; +static unsigned int ips_released_controllers; +static int ips_cmd_timeout = 60; +static int ips_reset_timeout = 60 * 5; +static int ips_force_memio = 1; /* Always use Memory Mapped I/O */ +static int ips_force_i2o = 1; /* Always use I2O command delivery */ +static int ips_ioctlsize = IPS_IOCTL_SIZE; /* Size of the ioctl buffer */ +static int ips_cd_boot; /* Booting from Manager CD */ +static char *ips_FlashData = NULL; /* CD Boot - Flash Data Buffer */ +static long ips_FlashDataInUse; /* CD Boot - Flash Data In Use Flag */ +static uint32_t MaxLiteCmds = 32; /* Max Active Cmds for a Lite Adapter */ +static Scsi_Host_Template ips_driver_template = IPS; -IPS_DEFINE_COMPAT_TABLE( Compatable ); /* Version Compatability Table */ +IPS_DEFINE_COMPAT_TABLE(Compatable); /* Version Compatability Table */ - -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) /* This table describes all ServeRAID Adapters */ - static struct pci_device_id ips_pci_table[] __devinitdata = { - { 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, - { 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, - { 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, - { 0, } - }; - - /* This table describes only Anaconda Family Adapters */ - static struct pci_device_id ips_pci_table_anaconda[] __devinitdata = { - { 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, - { 0, } - }; - - /* This table describes only Sarasota ( ServeRAID 5i ) Adapters */ - static struct pci_device_id ips_pci_table_5i[] __devinitdata = { - { 0x1014, 0x01BD, PCI_ANY_ID, 0x259, 0, 0 }, - { 0x1014, 0x01BD, PCI_ANY_ID, 0x258, 0, 0 }, - { 0, } - }; - - /* This table describes only Sebring ( ServeRAID 6i ) Adapters */ - static struct pci_device_id ips_pci_table_6i[] __devinitdata = { - { 0x9005, 0x0250, PCI_ANY_ID, 0x28C, 0, 0 }, - { 0, } - }; - - /* This table describes all i960 ( 4M, 4Mx, 4L, 4Lx ) Adapters */ - static struct pci_device_id ips_pci_table_i960[] __devinitdata = { - { 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, - { 0, } - }; - - /* This table describes all Adaptec ( 6M ) Adapters */ - static struct pci_device_id ips_pci_table_adaptec[] __devinitdata = { - { 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, - { 0, } - }; - - MODULE_DEVICE_TABLE( pci, ips_pci_table ); - - static char ips_hot_plug_name[] = "ips"; - - static int __devinit ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent); - static void ips_remove_device(struct pci_dev *pci_dev); - - struct pci_driver ips_pci_driver = { - name: ips_hot_plug_name, - id_table: ips_pci_table, - probe: ips_insert_device, - remove: ips_remove_device, - }; - - struct pci_driver ips_pci_driver_anaconda = { - name: ips_hot_plug_name, - id_table: ips_pci_table_anaconda, - probe: ips_insert_device, - remove: ips_remove_device, - }; - - struct pci_driver ips_pci_driver_5i = { - name: ips_hot_plug_name, - id_table: ips_pci_table_5i, - probe: ips_insert_device, - remove: ips_remove_device, - }; - - struct pci_driver ips_pci_driver_6i = { - name: ips_hot_plug_name, - id_table: ips_pci_table_6i, - probe: ips_insert_device, - remove: ips_remove_device, - }; - - struct pci_driver ips_pci_driver_i960 = { - name: ips_hot_plug_name, - id_table: ips_pci_table_i960, - probe: ips_insert_device, - remove: ips_remove_device, - }; - - struct pci_driver ips_pci_driver_adaptec = { - name: ips_hot_plug_name, - id_table: ips_pci_table_adaptec, - probe: ips_insert_device, - remove: ips_remove_device, - }; +static struct pci_device_id ips_pci_table[] __devinitdata = { + {0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, + {0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, + {0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, + {0,} +}; -#endif +MODULE_DEVICE_TABLE(pci, ips_pci_table); + +static char ips_hot_plug_name[] = "ips"; + +static int __devinit ips_insert_device(struct pci_dev *pci_dev, + const struct pci_device_id *ent); +static void ips_remove_device(struct pci_dev *pci_dev); + +struct pci_driver ips_pci_driver = { + .name = ips_hot_plug_name, + .id_table = ips_pci_table, + .probe = ips_insert_device, + .remove = ips_remove_device, +}; /* * Necessary forward function protoypes */ static int ips_halt(struct notifier_block *nb, ulong event, void *buf); -#define MAX_ADAPTER_NAME 11 +#define MAX_ADAPTER_NAME 15 static char ips_adapter_name[][30] = { - "ServeRAID", - "ServeRAID II", - "ServeRAID on motherboard", - "ServeRAID on motherboard", - "ServeRAID 3H", - "ServeRAID 3L", - "ServeRAID 4H", - "ServeRAID 4M", - "ServeRAID 4L", - "ServeRAID 4Mx", - "ServeRAID 4Lx", - "ServeRAID 5i", - "ServeRAID 5i", - "ServeRAID 6M", - "ServeRAID 6i" + "ServeRAID", + "ServeRAID II", + "ServeRAID on motherboard", + "ServeRAID on motherboard", + "ServeRAID 3H", + "ServeRAID 3L", + "ServeRAID 4H", + "ServeRAID 4M", + "ServeRAID 4L", + "ServeRAID 4Mx", + "ServeRAID 4Lx", + "ServeRAID 5i", + "ServeRAID 5i", + "ServeRAID 6M", + "ServeRAID 6i" }; -/* Init State 0 means we're only looking for a device to provide us the BIOS Adapter Ordering Table */ -/* Init State 1 is when we are actually enumerating the devices. */ -static int InitState; -/* IF BIOS wants to tell us the enumeration order, it puts a table in NVRAM Page 5 */ -static uint8_t AdapterOrder[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - static struct notifier_block ips_notifier = { - ips_halt, NULL, 0 + ips_halt, NULL, 0 }; /* * Direction table */ static char ips_command_direction[] = { -IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, -IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, -IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT, -IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_OUT, -IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT, -IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_IN, -IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK, -IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, -IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, -IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, -IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, -IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_NONE, -IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, -IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE, -IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_OUT, -IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE, -IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK + IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, + IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, + IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT, + IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_OUT, + IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT, + IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_IN, + IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK, + IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, + IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, + IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, + IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, + IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_NONE, + IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, + IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE, + IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_OUT, + IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE, + IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK }; /* @@ -480,9 +367,9 @@ int ips_detect(Scsi_Host_Template *); int ips_release(struct Scsi_Host *); int ips_eh_abort(Scsi_Cmnd *); int ips_eh_reset(Scsi_Cmnd *); -int ips_queue(Scsi_Cmnd *, void (*) (Scsi_Cmnd *)); +int ips_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *)); int ips_biosparam(Disk *, kdev_t, int *); -const char * ips_info(struct Scsi_Host *); +const char *ips_info(struct Scsi_Host *); void do_ipsintr(int, void *, struct pt_regs *); static int ips_hainit(ips_ha_t *); static int ips_map_status(ips_ha_t *, ips_scb_t *, ips_stat_t *); @@ -527,10 +414,9 @@ static int ips_verify_bios_memio(ips_ha_ static int ips_flash_copperhead(ips_ha_t *, ips_passthru_t *, ips_scb_t *); static int ips_flash_bios(ips_ha_t *, ips_passthru_t *, ips_scb_t *); static int ips_flash_firmware(ips_ha_t *, ips_passthru_t *, ips_scb_t *); -static void ips_free_flash_copperhead(ips_ha_t *ha); +static void ips_free_flash_copperhead(ips_ha_t * ha); static void ips_get_bios_version(ips_ha_t *, int); static void ips_identify_controller(ips_ha_t *); -static void ips_select_queue_depth(struct Scsi_Host *, Scsi_Device *); static void ips_chkstatus(ips_ha_t *, IPS_STATUS *); static void ips_enable_int_copperhead(ips_ha_t *); static void ips_enable_int_copperhead_memio(ips_ha_t *); @@ -553,40 +439,43 @@ static void ips_ffdc_time(ips_ha_t *); static uint32_t ips_statupd_copperhead(ips_ha_t *); static uint32_t ips_statupd_copperhead_memio(ips_ha_t *); static uint32_t ips_statupd_morpheus(ips_ha_t *); -static ips_scb_t * ips_getscb(ips_ha_t *); +static ips_scb_t *ips_getscb(ips_ha_t *); static inline void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *); static inline void ips_putq_scb_tail(ips_scb_queue_t *, ips_scb_t *); static inline void ips_putq_wait_head(ips_wait_queue_t *, Scsi_Cmnd *); static inline void ips_putq_wait_tail(ips_wait_queue_t *, Scsi_Cmnd *); -static inline void ips_putq_copp_head(ips_copp_queue_t *, ips_copp_wait_item_t *); -static inline void ips_putq_copp_tail(ips_copp_queue_t *, ips_copp_wait_item_t *); -static inline ips_scb_t * ips_removeq_scb_head(ips_scb_queue_t *); -static inline ips_scb_t * ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *); -static inline Scsi_Cmnd * ips_removeq_wait_head(ips_wait_queue_t *); -static inline Scsi_Cmnd * ips_removeq_wait(ips_wait_queue_t *, Scsi_Cmnd *); -static inline ips_copp_wait_item_t * ips_removeq_copp(ips_copp_queue_t *, ips_copp_wait_item_t *); -static inline ips_copp_wait_item_t * ips_removeq_copp_head(ips_copp_queue_t *); +static inline void ips_putq_copp_head(ips_copp_queue_t *, + ips_copp_wait_item_t *); +static inline void ips_putq_copp_tail(ips_copp_queue_t *, + ips_copp_wait_item_t *); +static inline ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *); +static inline ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *); +static inline Scsi_Cmnd *ips_removeq_wait_head(ips_wait_queue_t *); +static inline Scsi_Cmnd *ips_removeq_wait(ips_wait_queue_t *, Scsi_Cmnd *); +static inline ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *, + ips_copp_wait_item_t *); +static inline ips_copp_wait_item_t *ips_removeq_copp_head(ips_copp_queue_t *); static int ips_is_passthru(Scsi_Cmnd *); static int ips_make_passthru(ips_ha_t *, Scsi_Cmnd *, ips_scb_t *, int); static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *); static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *); +static void ips_scmd_buf_write(Scsi_Cmnd * scmd, void *data, + unsigned int count); +static void ips_scmd_buf_read(Scsi_Cmnd * scmd, void *data, unsigned int count); -int ips_proc_info(char *, char **, off_t, int, int, int); +int ips_proc_info(char *, char **, off_t, int, int, int); static int ips_host_info(ips_ha_t *, char *, off_t, int); static void copy_mem_info(IPS_INFOSTR *, char *, int); static int copy_info(IPS_INFOSTR *, char *, ...); -static int ips_get_version_info(ips_ha_t *ha, IPS_VERSION_DATA *Buffer, int intr ); -static void ips_version_check(ips_ha_t *ha, int intr); -static int ips_abort_init(ips_ha_t *ha, struct Scsi_Host *sh, int index); -static int ips_init_phase2( int index ); - -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) -static int ips_init_phase1( struct pci_dev *pci_dev, int *indexPtr ); -#else -static int ips_init_oldphase1(Scsi_Host_Template *SHT); -#endif +static int ips_get_version_info(ips_ha_t * ha, IPS_VERSION_DATA * Buffer, + int intr); +static void ips_version_check(ips_ha_t * ha, int intr); +static int ips_abort_init(ips_ha_t * ha, int index); +static int ips_init_phase2(int index); +static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr); +static int ips_register_scsi(int index); /*--------------------------------------------------------------------------*/ /* Exported Functions */ /*--------------------------------------------------------------------------*/ @@ -600,92 +489,52 @@ static int ips_init_oldphase1(Scsi_Host_ /* setup parameters to the driver */ /* */ /****************************************************************************/ -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) static int -ips_setup(char *ips_str) { -#else -void -ips_setup(char *ips_str, int *dummy) { -#endif - - int i; - char *key; - char *value; - IPS_OPTION options[] = { - {"noi2o", &ips_force_i2o, 0}, - {"nommap", &ips_force_memio, 0}, - {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE}, - {"cdboot", &ips_cd_boot, 0}, - {"maxcmds", &MaxLiteCmds, 32}, - }; - - /* Don't use strtok() anymore ( if 2.4 Kernel or beyond ) */ -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - /* Search for value */ - while ((key = strsep(&ips_str, ",."))) { - if (!*key) - continue; - value = strchr(key, ':'); - if (value) - *value++ = '\0'; - /* - * We now have key/value pairs. - * Update the variables - */ - for (i = 0; i < (sizeof(options) / sizeof(options[0])); i++) { - if (strnicmp(key, options[i].option_name, strlen(options[i].option_name)) == 0) { - if (value) - *options[i].option_flag = simple_strtoul(value, NULL, 0); - else - *options[i].option_flag = options[i].option_value; - break; - } - } - } - - return (1); - -#else - - char *p; - char tokens[3] = {',', '.', 0}; - - for (key = strtok(ips_str, tokens); key; key = strtok(NULL, tokens)) { - p = key; - - /* Search for value */ - while ((p) && (*p != ':')) - p++; - - if (p) { - *p = '\0'; - value = p+1; - } else - value = NULL; - - /* - * We now have key/value pairs. - * Update the variables - */ - for (i = 0; i < (sizeof(options) / sizeof(options[0])); i++) { - if (strnicmp(key, options[i].option_name, strlen(ips_str)) == 0) { - if (value) - *options[i].option_flag = simple_strtoul(value, NULL, 0); - else - *options[i].option_flag = options[i].option_value; - - break; - } - } - } +ips_setup(char *ips_str) +{ -#endif + int i; + char *key; + char *value; + IPS_OPTION options[] = { + {"noi2o", &ips_force_i2o, 0}, + {"nommap", &ips_force_memio, 0}, + {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE}, + {"cdboot", &ips_cd_boot, 0}, + {"maxcmds", &MaxLiteCmds, 32}, + }; + + /* Don't use strtok() anymore ( if 2.4 Kernel or beyond ) */ + /* Search for value */ + while ((key = strsep(&ips_str, ",."))) { + if (!*key) + continue; + value = strchr(key, ':'); + if (value) + *value++ = '\0'; + /* + * We now have key/value pairs. + * Update the variables + */ + for (i = 0; i < (sizeof (options) / sizeof (options[0])); i++) { + if (strnicmp + (key, options[i].option_name, + strlen(options[i].option_name)) == 0) { + if (value) + *options[i].option_flag = + simple_strtoul(value, NULL, 0); + else + *options[i].option_flag = + options[i].option_value; + break; + } + } + } + return (1); } -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) __setup("ips=", ips_setup); -#endif /****************************************************************************/ /* */ @@ -699,682 +548,97 @@ __setup("ips=", ips_setup); /* */ /****************************************************************************/ int -ips_detect(Scsi_Host_Template *SHT) { -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - int i; -#endif +ips_detect(Scsi_Host_Template * SHT) +{ + int i; - METHOD_TRACE("ips_detect", 1); + METHOD_TRACE("ips_detect", 1); #ifdef MODULE - if (ips) -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - ips_setup(ips); -#else - ips_setup(ips, NULL); -#endif -#endif - - /* If Booting from the Manager CD, Allocate a large Flash */ - /* Buffer ( so we won't need to allocate one for each adapter ). */ - if ( ips_cd_boot ) { - ips_FlashData = ( char * ) __get_free_pages( GFP_KERNEL, 7 ); - if (ips_FlashData == NULL) { - /* The validity of this pointer is checked in ips_make_passthru() before it is used */ - printk( KERN_WARNING "ERROR: Can't Allocate Large Buffer for Flashing\n" ); - } - } - /* initalize number of controllers */ - ips_num_controllers = 0; - ips_next_controller = 0; - ips_released_controllers = 0; - - if (!pci_present()) - return (0); - -/**********************************************************************************/ -/* For Kernel Versions 2.4 or greater, use new PCI ( Hot Pluggable ) architecture */ -/**********************************************************************************/ - -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - #if LINUX_VERSION_CODE < LinuxVersionCode(2,5,0) - spin_unlock_irq(&io_request_lock); - #endif - SHT->proc_info = ips_proc_info; - SHT->proc_name = "ips"; - - /* There are several special cases ( which are too complicated to enumerate here ) where, due */ - /* to System BIOS rules, the adapters must be enumerated in a certain order. If ServeRAID */ - /* BIOS tells us the order, then we will follow it. The first pass at init is simply to be */ - /* able to communicate with the first adapter to see if BIOS is telling us the order. */ - /* This does not apply to ia64 EFI BIOS. */ - -#if !defined(__ia64__) - InitState = 0; - pci_module_init(&ips_pci_driver); /* Look for Any Adapter, to fill in the Adapter Order Table */ + if (ips) + ips_setup(ips); #endif - InitState = 1; - - if ( AdapterOrder[0] ) { - /* BIOS has dictated the order that we should enumerate Adapters */ - for ( i = 1; i <= AdapterOrder[0]; i++ ) { - switch (AdapterOrder[i]) { - case 'M': - pci_module_init(&ips_pci_driver_adaptec); /* Ask for Adaptec Adapters */ - break; - case 'S': - pci_module_init(&ips_pci_driver_5i); /* Ask for 5i Adapters */ - pci_module_init(&ips_pci_driver_6i); /* Ask for 6i Adapters */ - break; - case 'N': - pci_module_init(&ips_pci_driver_i960); /* Ask for i960 Adapters */ - break; - case 'A': - pci_module_init(&ips_pci_driver_anaconda); /* Ask for Anaconda Family Adapters */ - break; - default: - i = AdapterOrder[0] + 1; /* Premature End of List - Ensure Loop Ends */ - break; - } - } - } - else { - /* No Adapter Order Table from BIOS, so sort things the old-fashioned way */ - - /* By definition, an Internal ( 5i or 6i ) Adapter MUST be enumerated first */ - /* or the server may not boot properly. The adapters must be enumerated in */ - /* exactly the same order as BIOS for the machine to come up properly. */ - /* NOTE: There will never be both a 5i and a 6i in the same machine. */ - - pci_module_init(&ips_pci_driver_5i); /* Ask for 5i Adapters First */ - if (ips_num_controllers) { /* If there is a 5i Adapter */ - pci_module_init(&ips_pci_driver_i960); /* Get all i960's next */ - } - else { - pci_module_init(&ips_pci_driver_6i); /* Ask if any 6i Adapters */ - if (ips_num_controllers) /* If there is a 6i Adapter */ - pci_module_init(&ips_pci_driver_adaptec); /* Get all Adaptecs next */ - } - - pci_module_init(&ips_pci_driver); /* Get all remaining Adapters */ - /* ( in normal BUS order ) */ - } - - #if LINUX_VERSION_CODE < LinuxVersionCode(2,5,0) - spin_lock_irq(&io_request_lock); - #endif - if (ips_num_controllers > 0) - register_reboot_notifier(&ips_notifier); - - return (ips_num_controllers); -#else - InitState = 1; - SHT->proc_info = ips_proc_info; - SHT->proc_dir = &proc_scsi_ips; - return ips_init_oldphase1(SHT); -#endif /* LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) */ - -} - -#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) - -/***********************************************************************************/ -/* Sort the Device Structures */ -/* Devices are sorted by groups ( type ) and then PCI address within each group */ -/* This also results in the same ordering that results when using the 2.4 kernel */ -/* architecture for initialization. */ -/***********************************************************************************/ - -static void -ips_sort_controllers(struct pci_dev *dev[]) { - struct pci_dev *tempdev[IPS_MAX_ADAPTERS]; - struct pci_dev *lowestdev; - int i, j; - int temp_index = 0; /* Index into tempdev[] array */ - int lowIndex = 0; - int newlowflag = 0; /* Flag to indicate when a new low address has been found */ - uint16_t subdevice_id; - unsigned char BusNumber; - - /* Clear the Temporary Dev Structure */ - for (i = 0; i < IPS_MAX_ADAPTERS; i++) - tempdev[i] = NULL; - - /* The Outer Loop goes thru each Adapter Type Supported */ - for (j = 0; j < IPS_MAX_ADAPTER_TYPES; j++) { - lowestdev = NULL; - /* The Inner Loop Checks each Device still in the List and */ - /* Finds the lowset adapter left ( by PCI boot order ) */ - for (i = 0; i < IPS_MAX_ADAPTERS; i++) { - if (dev[i]) { - if (lowestdev == NULL) { /* If this is the first one found, it must be the lowest ! */ - lowestdev = dev[i]; - lowIndex = i; - } - - /* If you find a Sarasota ( 5i ), it must always be treated as the first adapter */ - if (dev[i]->device == IPS_DEVICEID_MORPHEUS) { - pci_read_config_word(dev[i], PCI_SUBSYSTEM_ID, &subdevice_id); - if ((subdevice_id == IPS_SUBDEVICEID_5I1) || - (subdevice_id == IPS_SUBDEVICEID_5I2)) { - lowestdev = dev[i]; - lowIndex = i; - break; - } - } - - /* If you find a Sebring ( 6i ), it must always be treated as the first adapter */ - if (dev[i]->device == IPS_DEVICEID_MARCO) { - pci_read_config_word(dev[i], PCI_SUBSYSTEM_ID, &subdevice_id); - if (subdevice_id == IPS_SUBDEVICEID_6I) { - lowestdev = dev[i]; - lowIndex = i; - break; - } - } - - /* Determine if this device is at a lower PCI address than the current lowest device */ - newlowflag = 0; - - if (dev[i]->device == IPS_DEVICEID_MARCO) /* System BIOS adds 1 to Marco Bus Number */ - BusNumber = ( dev[i]->bus->number ) - 1; /* because of Bridge Chip */ - else - BusNumber = dev[i]->bus->number; - - if (BusNumber < lowestdev->bus->number) /* IF a lower BUS # */ - newlowflag = i; - - if ((BusNumber == lowestdev->bus->number) && /* If Same Bus #, but a lower device # */ - (dev[i]->devfn < lowestdev->devfn)) - newlowflag = i; - - if ( newlowflag ) { - lowestdev = dev[i]; - lowIndex = i; - } - } - } - - if (lowestdev) { /* If we found another adapter */ - tempdev[temp_index] = lowestdev; /* Add it in the list */ - dev[lowIndex] = NULL; /* Null it out so we don't find it again */ - temp_index++; - /* Now get all the adapters that are the same type as the low one . */ - /* They will already be in order, so they don't need any further sorting.*/ - for (i = 0; i < IPS_MAX_ADAPTERS; i++) { - if (dev[i]) { - if (dev[i]->device == lowestdev->device) { - tempdev[temp_index] = dev[i]; /* Add the same type adapter to the list */ - temp_index++; - dev[i] = NULL; /* Null it out so we don't find it again */ - } - } - } - } - } - - /* Copy the Sorted Adapter Pointers ( tempdev[] ) to the Original Structure */ - for (i = 0; i < IPS_MAX_ADAPTERS; i++) - dev[i] = tempdev[i]; + /* If Booting from the Manager CD, Allocate a large Flash */ + /* Buffer ( so we won't need to allocate one for each adapter ). */ + if (ips_cd_boot) { + ips_FlashData = (char *) __get_free_pages(IPS_INIT_GFP, 7); + if (ips_FlashData == NULL) { + /* The validity of this pointer is checked in ips_make_passthru() before it is used */ + printk(KERN_WARNING + "ERROR: Can't Allocate Large Buffer for Flashing\n"); + } + } + + SHT->proc_info = ips_proc_info; + SHT->proc_name = "ips"; + + for (i = 0; i < ips_num_controllers; i++) { + if (ips_register_scsi(i)) + ips_free(ips_ha[i]); + ips_released_controllers++; + } + return (ips_num_controllers); } /****************************************************************************/ -/* Detect and initialize the driver for 2.2 kernels */ -/* */ -/* NOTE: this routine is called under the io_request_lock spinlock */ -/****************************************************************************/ -static int ips_init_oldphase1(Scsi_Host_Template *SHT){ - struct Scsi_Host *sh; - ips_ha_t *ha; - uint32_t io_addr; - uint32_t mem_addr; - uint32_t io_len; - uint32_t mem_len; - uint16_t planer; - uint8_t revision_id; - uint8_t bus; - uint8_t func; - uint8_t irq; - uint16_t subdevice_id; - int i; - int j; - uint32_t count; - char *ioremap_ptr; - char *mem_ptr; - struct pci_dev *dev[IPS_MAX_ADAPTERS]; - dma_addr_t dma_address; - uint32_t currbar; - uint32_t maskbar; - uint8_t barnum; - uint32_t IsDead; - - METHOD_TRACE("ips_init_oldphase1", 1); - - for ( i = 0; i < IPS_MAX_ADAPTERS; i++ ) - dev[i] = NULL; - - /* Find all the adapters that we support and save them in the dev[] structure */ - i = 0; - dev[i] = pci_find_device(IPS_VENDORID_IBM, IPS_DEVICEID_MORPHEUS, NULL); - while ( dev[i] ) { - i++; - dev[i] = pci_find_device(IPS_VENDORID_IBM, IPS_DEVICEID_MORPHEUS, dev[i-1]); - } - - dev[i] = pci_find_device(IPS_VENDORID_IBM, IPS_DEVICEID_COPPERHEAD, NULL); - while ( dev[i] ) { - i++; - dev[i] = pci_find_device(IPS_VENDORID_IBM, IPS_DEVICEID_COPPERHEAD, dev[i-1]); - } - - dev[i] = pci_find_device(IPS_VENDORID_ADAPTEC, IPS_DEVICEID_MARCO, NULL); - while ( dev[i] ) { - i++; - dev[i] = pci_find_device(IPS_VENDORID_IBM, IPS_DEVICEID_MARCO, dev[i-1]); - } - - /* Sort the Adapters */ - if ( dev[0] ) - ips_sort_controllers( dev ); - else - return (0); - - /* Now scan and Initialize the controllers */ - for ( i = 0; i < IPS_MAX_ADAPTERS; i++ ) { - if (!dev[i]) - break; - - if (ips_next_controller >= IPS_MAX_ADAPTERS) - break; - - /* stuff that we get in dev */ - irq = dev[i]->irq; - bus = dev[i]->bus->number; - func = dev[i]->devfn; - - /* Init MEM/IO addresses to 0 */ - mem_addr = 0; - io_addr = 0; - mem_len = 0; - io_len = 0; - - for (j = 0; j < 2; j++) { - if (!dev[i]->base_address[j]) - break; - - if ((dev[i]->base_address[j] & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { - barnum = PCI_BASE_ADDRESS_0 + (j * 4); - io_addr = dev[i]->base_address[j] & PCI_BASE_ADDRESS_IO_MASK; - - /* Get Size */ - pci_read_config_dword(dev[i], barnum, &currbar); - pci_write_config_dword(dev[i], barnum, ~0); - pci_read_config_dword(dev[i], barnum, &maskbar); - pci_write_config_dword(dev[i], barnum, currbar); - - io_len = ~(maskbar & PCI_BASE_ADDRESS_IO_MASK) + 1; - } else { - barnum = PCI_BASE_ADDRESS_0 + (j * 4); - mem_addr = dev[i]->base_address[j] & PCI_BASE_ADDRESS_MEM_MASK; - - /* Get Size */ - pci_read_config_dword(dev[i], barnum, &currbar); - pci_write_config_dword(dev[i], barnum, ~0); - pci_read_config_dword(dev[i], barnum, &maskbar); - pci_write_config_dword(dev[i], barnum, currbar); - - mem_len = ~(maskbar & PCI_BASE_ADDRESS_MEM_MASK) + 1; - } - } - - /* setup memory mapped area (if applicable) */ - if (mem_addr) { - uint32_t base; - uint32_t offs; - - DEBUG_VAR(1, "(%s%d) detect, Memory region %x, size: %d", - ips_name, ips_next_controller, mem_addr, mem_len); - - base = mem_addr & PAGE_MASK; - offs = mem_addr - base; - - ioremap_ptr = ioremap(base, PAGE_SIZE); - mem_ptr = ioremap_ptr + offs; - } else { - ioremap_ptr = NULL; - mem_ptr = NULL; - } - - /* setup I/O mapped area (if applicable) */ - if (io_addr) { - DEBUG_VAR(1, "(%s%d) detect, IO region %x, size: %d", - ips_name, ips_next_controller, io_addr, io_len); - - if (check_region(io_addr, io_len)) { - /* Couldn't allocate io space */ - printk(KERN_WARNING "(%s%d) couldn't allocate IO space %x len %d.\n", - ips_name, ips_next_controller, io_addr, io_len); - - ips_next_controller++; - - continue; - } - - request_region(io_addr, io_len, "ips"); - } - - /* get planer status */ - if (pci_read_config_word(dev[i], 0x04, &planer)) { - printk(KERN_WARNING "(%s%d) can't get planer status.\n", - ips_name, ips_next_controller); - - ips_next_controller++; - - continue; - } - - /* check to see if an onboard planer controller is disabled */ - if (!(planer & 0x000C)) { - - DEBUG_VAR(1, "(%s%d) detect, Onboard controller disabled by BIOS", - ips_name, ips_next_controller); - - ips_next_controller++; - - continue; - } - - DEBUG_VAR(1, "(%s%d) detect bus %d, func %x, irq %d, io %x, mem: %x, ptr: %p", - ips_name, ips_next_controller, bus, func, irq, io_addr, mem_addr, mem_ptr); - - /* get the revision ID */ - if (pci_read_config_byte(dev[i], PCI_REVISION_ID, &revision_id)) { - printk(KERN_WARNING "(%s%d) can't get revision id.\n", - ips_name, ips_next_controller); - - ips_next_controller++; - continue; - } - - /* get the subdevice id */ - if (pci_read_config_word(dev[i], PCI_SUBSYSTEM_ID, &subdevice_id)) { - printk(KERN_WARNING "(%s%d) can't get subdevice id.\n", - ips_name, ips_next_controller); - - ips_next_controller++; - - continue; - } - - /* found a controller */ - sh = scsi_register(SHT, sizeof(ips_ha_t)); - - if (sh == NULL) { - printk(KERN_WARNING "(%s%d) Unable to register controller with SCSI subsystem - skipping controller\n", - ips_name, ips_next_controller); - - ips_next_controller++; - - continue; - } - - ha = IPS_HA(sh); - memset(ha, 0, sizeof(ips_ha_t)); - - ips_sh[ips_next_controller] = sh; - ips_ha[ips_next_controller] = ha; - ips_num_controllers++; - ha->active = 1; - - ha->enq = kmalloc(sizeof(IPS_ENQ), GFP_KERNEL); - - if (!ha->enq) { - printk(KERN_WARNING "(%s%d) Unable to allocate host inquiry structure - skipping contoller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - - ha->adapt = pci_alloc_consistent(dev[i], sizeof(IPS_ADAPTER) + sizeof(IPS_IO_CMD), &dma_address); - - if (!ha->adapt) { - printk(KERN_WARNING "(%s%d) Unable to allocate host adapt and dummy structure - skipping controller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - ha->adapt->hw_status_start = dma_address; - ha->dummy = (void *)ha->adapt + 1; - - ha->conf = kmalloc(sizeof(IPS_CONF), GFP_KERNEL); - - if (!ha->conf) { - printk(KERN_WARNING "(%s%d) Unable to allocate host conf structure - skipping controller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - - ha->nvram = kmalloc(sizeof(IPS_NVRAM_P5), GFP_KERNEL); - - if (!ha->nvram) { - printk(KERN_WARNING "(%s%d) Unable to allocate host nvram structure - skipping controller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - - ha->subsys = kmalloc(sizeof(IPS_SUBSYS), GFP_KERNEL); - - if (!ha->subsys) { - printk(KERN_WARNING "(%s%d) Unable to allocate host subsystem structure - skipping controller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - - for (count = PAGE_SIZE, ha->ioctl_order = 0; - count < ips_ioctlsize; - ha->ioctl_order++, count <<= 1); - - ha->ioctl_data = (char *) __get_free_pages(GFP_KERNEL, ha->ioctl_order); - ha->ioctl_datasize = count; - - if (!ha->ioctl_data) { - printk(KERN_WARNING "(%s%d) Unable to allocate ioctl data\n", - ips_name, ips_next_controller); - - ha->ioctl_data = NULL; - ha->ioctl_order = 0; - ha->ioctl_datasize = 0; - } - - /* Store away needed values for later use */ - sh->io_port = io_addr; - sh->n_io_port = io_addr ? 255 : 0; - sh->unique_id = (io_addr) ? io_addr : mem_addr; - sh->irq = irq; - sh->select_queue_depths = ips_select_queue_depth; - sh->sg_tablesize = sh->hostt->sg_tablesize; - sh->can_queue = sh->hostt->can_queue; - sh->cmd_per_lun = sh->hostt->cmd_per_lun; - sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma; - sh->use_clustering = sh->hostt->use_clustering; - - sh->wish_block = FALSE; - - /* Store info in HA structure */ - ha->irq = irq; - ha->io_addr = io_addr; - ha->io_len = io_len; - ha->mem_addr = mem_addr; - ha->mem_len = mem_len; - ha->mem_ptr = mem_ptr; - ha->ioremap_ptr = ioremap_ptr; - ha->host_num = ips_next_controller; - ha->revision_id = revision_id; - ha->slot_num = PCI_SLOT(dev[i]->devfn); - ha->device_id = dev[i]->device; - ha->subdevice_id = subdevice_id; - ha->pcidev = dev[i]; - - /* - * Setup Functions - */ - ips_setup_funclist(ha); - - /* If Morpheus appears dead, reset it */ - if ( ( IPS_IS_MORPHEUS( ha ) ) || ( IPS_IS_MARCO( ha ) ) ) { - IsDead = readl( ha->mem_ptr + IPS_REG_I960_MSG1 ); - if ( IsDead == 0xDEADBEEF ) { - ips_reset_morpheus( ha ); - } - } - - /* - * Initialize the card if it isn't already - */ - - if (!(*ha->func.isinit)(ha)) { - if (!(*ha->func.init)(ha)) { - /* - * Initialization failed - */ - printk(KERN_WARNING "(%s%d) unable to initialize controller - skipping controller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - } - - /* install the interrupt handler */ - if (request_irq(irq, do_ipsintr, SA_SHIRQ, ips_name, ha)) { - printk(KERN_WARNING "(%s%d) unable to install interrupt handler - skipping controller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - - /* - * Allocate a temporary SCB for initialization - */ - ha->max_cmds = 1; - if (!ips_allocatescbs(ha)) { - /* couldn't allocate a temp SCB */ - printk(KERN_WARNING "(%s%d) unable to allocate CCBs - skipping contoller\n", - ips_name, ips_next_controller); - free_irq(ha->irq, ha); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - - ips_next_controller++; - } - - /* - * Do Phase 2 Initialization - * Controller init - */ - for (i = 0; i < ips_next_controller; i++) { - - if (ips_ha[i] == 0) { - printk(KERN_WARNING "(%s%d) ignoring bad controller\n", ips_name, i); - continue; - } - - if (ips_init_phase2(i) != SUCCESS) - ips_num_controllers--; - - } - - if (ips_num_controllers > 0) - register_reboot_notifier(&ips_notifier); - - return (ips_num_controllers); -} -#endif - -/****************************************************************************/ /* configure the function pointers to use the functions that will work */ /* with the found version of the adapter */ /****************************************************************************/ -static void ips_setup_funclist(ips_ha_t *ha){ +static void +ips_setup_funclist(ips_ha_t * ha) +{ - /* - * Setup Functions - */ - if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) { - /* morpheus / marco / sebring */ - ha->func.isintr = ips_isintr_morpheus; - ha->func.isinit = ips_isinit_morpheus; - ha->func.issue = ips_issue_i2o_memio; - ha->func.init = ips_init_morpheus; - ha->func.statupd = ips_statupd_morpheus; - ha->func.reset = ips_reset_morpheus; - ha->func.intr = ips_intr_morpheus; - ha->func.enableint = ips_enable_int_morpheus; - } else if (IPS_USE_MEMIO(ha)) { - /* copperhead w/MEMIO */ - ha->func.isintr = ips_isintr_copperhead_memio; - ha->func.isinit = ips_isinit_copperhead_memio; - ha->func.init = ips_init_copperhead_memio; - ha->func.statupd = ips_statupd_copperhead_memio; - ha->func.statinit = ips_statinit_memio; - ha->func.reset = ips_reset_copperhead_memio; - ha->func.intr = ips_intr_copperhead; - ha->func.erasebios = ips_erase_bios_memio; - ha->func.programbios = ips_program_bios_memio; - ha->func.verifybios = ips_verify_bios_memio; - ha->func.enableint = ips_enable_int_copperhead_memio; - if (IPS_USE_I2O_DELIVER(ha)) - ha->func.issue = ips_issue_i2o_memio; - else - ha->func.issue = ips_issue_copperhead_memio; - } else { - /* copperhead */ - ha->func.isintr = ips_isintr_copperhead; - ha->func.isinit = ips_isinit_copperhead; - ha->func.init = ips_init_copperhead; - ha->func.statupd = ips_statupd_copperhead; - ha->func.statinit = ips_statinit; - ha->func.reset = ips_reset_copperhead; - ha->func.intr = ips_intr_copperhead; - ha->func.erasebios = ips_erase_bios; - ha->func.programbios = ips_program_bios; - ha->func.verifybios = ips_verify_bios; - ha->func.enableint = ips_enable_int_copperhead; - - if (IPS_USE_I2O_DELIVER(ha)) - ha->func.issue = ips_issue_i2o; - else - ha->func.issue = ips_issue_copperhead; - } + /* + * Setup Functions + */ + if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) { + /* morpheus / marco / sebring */ + ha->func.isintr = ips_isintr_morpheus; + ha->func.isinit = ips_isinit_morpheus; + ha->func.issue = ips_issue_i2o_memio; + ha->func.init = ips_init_morpheus; + ha->func.statupd = ips_statupd_morpheus; + ha->func.reset = ips_reset_morpheus; + ha->func.intr = ips_intr_morpheus; + ha->func.enableint = ips_enable_int_morpheus; + } else if (IPS_USE_MEMIO(ha)) { + /* copperhead w/MEMIO */ + ha->func.isintr = ips_isintr_copperhead_memio; + ha->func.isinit = ips_isinit_copperhead_memio; + ha->func.init = ips_init_copperhead_memio; + ha->func.statupd = ips_statupd_copperhead_memio; + ha->func.statinit = ips_statinit_memio; + ha->func.reset = ips_reset_copperhead_memio; + ha->func.intr = ips_intr_copperhead; + ha->func.erasebios = ips_erase_bios_memio; + ha->func.programbios = ips_program_bios_memio; + ha->func.verifybios = ips_verify_bios_memio; + ha->func.enableint = ips_enable_int_copperhead_memio; + if (IPS_USE_I2O_DELIVER(ha)) + ha->func.issue = ips_issue_i2o_memio; + else + ha->func.issue = ips_issue_copperhead_memio; + } else { + /* copperhead */ + ha->func.isintr = ips_isintr_copperhead; + ha->func.isinit = ips_isinit_copperhead; + ha->func.init = ips_init_copperhead; + ha->func.statupd = ips_statupd_copperhead; + ha->func.statinit = ips_statinit; + ha->func.reset = ips_reset_copperhead; + ha->func.intr = ips_intr_copperhead; + ha->func.erasebios = ips_erase_bios; + ha->func.programbios = ips_program_bios; + ha->func.verifybios = ips_verify_bios; + ha->func.enableint = ips_enable_int_copperhead; + + if (IPS_USE_I2O_DELIVER(ha)) + ha->func.issue = ips_issue_i2o; + else + ha->func.issue = ips_issue_copperhead; + } } /****************************************************************************/ @@ -1387,79 +651,72 @@ static void ips_setup_funclist(ips_ha_t /* */ /****************************************************************************/ int -ips_release(struct Scsi_Host *sh) { - ips_scb_t *scb; - ips_ha_t *ha; - int i; - - METHOD_TRACE("ips_release", 1); - - for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++); - - if (i == IPS_MAX_ADAPTERS) { - printk(KERN_WARNING "(%s) release, invalid Scsi_Host pointer.\n", - ips_name); -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - BUG(); -#endif - return (FALSE); - } +ips_release(struct Scsi_Host *sh) +{ + ips_scb_t *scb; + ips_ha_t *ha; + int i; - ha = IPS_HA(sh); + METHOD_TRACE("ips_release", 1); - if (!ha) - return (FALSE); + for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++) ; - /* flush the cache on the controller */ - scb = &ha->scbs[ha->max_cmds-1]; + if (i == IPS_MAX_ADAPTERS) { + printk(KERN_WARNING + "(%s) release, invalid Scsi_Host pointer.\n", ips_name); + BUG(); + return (FALSE); + } - ips_init_scb(ha, scb); + ha = IPS_HA(sh); - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_FLUSH; + if (!ha) + return (FALSE); - scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; - scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.flush_cache.state = IPS_NORM_STATE; - scb->cmd.flush_cache.reserved = 0; - scb->cmd.flush_cache.reserved2 = 0; - scb->cmd.flush_cache.reserved3 = 0; - scb->cmd.flush_cache.reserved4 = 0; + /* flush the cache on the controller */ + scb = &ha->scbs[ha->max_cmds - 1]; - if (InitState != 0) /* If Not just Searching for the Adapter Order Table */ - printk(KERN_NOTICE "(%s%d) Flushing Cache.\n", ips_name, ha->host_num); + ips_init_scb(ha, scb); - /* send command */ - if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE) - printk(KERN_NOTICE "(%s%d) Incomplete Flush.\n", ips_name, ha->host_num); + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FLUSH; - if (InitState != 0) /* If Not just Searching for the Adapter Order Table */ - printk(KERN_NOTICE "(%s%d) Flushing Complete.\n", ips_name, ha->host_num); + scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; + scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flush_cache.state = IPS_NORM_STATE; + scb->cmd.flush_cache.reserved = 0; + scb->cmd.flush_cache.reserved2 = 0; + scb->cmd.flush_cache.reserved3 = 0; + scb->cmd.flush_cache.reserved4 = 0; - ips_sh[i] = NULL; - ips_ha[i] = NULL; + printk(KERN_NOTICE "(%s%d) Flushing Cache.\n", ips_name, ha->host_num); - /* free extra memory */ - ips_free(ha); + /* send command */ + if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE) + printk(KERN_NOTICE "(%s%d) Incomplete Flush.\n", ips_name, + ha->host_num); - /* Free I/O Region */ - if (ha->io_addr) - release_region(ha->io_addr, ha->io_len); + printk(KERN_NOTICE "(%s%d) Flushing Complete.\n", ips_name, + ha->host_num); - /* free IRQ */ - free_irq(ha->irq, ha); + ips_sh[i] = NULL; + ips_ha[i] = NULL; - scsi_unregister(sh); + /* free extra memory */ + ips_free(ha); - if (InitState != 0) { - ips_released_controllers++; - if (ips_num_controllers == ips_released_controllers){ - unregister_reboot_notifier(&ips_notifier); - pci_unregister_driver(ha->pcidev->driver); - } - } + /* Free I/O Region */ + if (ha->io_addr) + release_region(ha->io_addr, ha->io_len); - return (FALSE); + /* free IRQ */ + free_irq(ha->irq, ha); + + scsi_unregister(sh); + + ips_released_controllers++; + + return (FALSE); } /****************************************************************************/ @@ -1472,50 +729,54 @@ ips_release(struct Scsi_Host *sh) { /* */ /****************************************************************************/ static int -ips_halt(struct notifier_block *nb, ulong event, void *buf) { - ips_scb_t *scb; - ips_ha_t *ha; - int i; - - if ((event != SYS_RESTART) && (event != SYS_HALT) && - (event != SYS_POWER_OFF)) - return (NOTIFY_DONE); - - for (i = 0; i < ips_next_controller; i++) { - ha = (ips_ha_t *) ips_ha[i]; - - if (!ha) - continue; - - if (!ha->active) - continue; - - /* flush the cache on the controller */ - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_FLUSH; - - scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; - scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.flush_cache.state = IPS_NORM_STATE; - scb->cmd.flush_cache.reserved = 0; - scb->cmd.flush_cache.reserved2 = 0; - scb->cmd.flush_cache.reserved3 = 0; - scb->cmd.flush_cache.reserved4 = 0; - - printk(KERN_NOTICE "(%s%d) Flushing Cache.\n", ips_name, ha->host_num); - - /* send command */ - if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE) - printk(KERN_NOTICE "(%s%d) Incomplete Flush.\n", ips_name, ha->host_num); - else - printk(KERN_NOTICE "(%s%d) Flushing Complete.\n", ips_name, ha->host_num); - } +ips_halt(struct notifier_block *nb, ulong event, void *buf) +{ + ips_scb_t *scb; + ips_ha_t *ha; + int i; + + if ((event != SYS_RESTART) && (event != SYS_HALT) && + (event != SYS_POWER_OFF)) return (NOTIFY_DONE); + + for (i = 0; i < ips_next_controller; i++) { + ha = (ips_ha_t *) ips_ha[i]; + + if (!ha) + continue; + + if (!ha->active) + continue; + + /* flush the cache on the controller */ + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FLUSH; + + scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; + scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flush_cache.state = IPS_NORM_STATE; + scb->cmd.flush_cache.reserved = 0; + scb->cmd.flush_cache.reserved2 = 0; + scb->cmd.flush_cache.reserved3 = 0; + scb->cmd.flush_cache.reserved4 = 0; + + printk(KERN_NOTICE "(%s%d) Flushing Cache.\n", ips_name, + ha->host_num); + + /* send command */ + if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == + IPS_FAILURE) printk(KERN_NOTICE + "(%s%d) Incomplete Flush.\n", ips_name, + ha->host_num); + else + printk(KERN_NOTICE "(%s%d) Flushing Complete.\n", + ips_name, ha->host_num); + } - return (NOTIFY_OK); + return (NOTIFY_OK); } /****************************************************************************/ @@ -1528,50 +789,51 @@ ips_halt(struct notifier_block *nb, ulon /* Note: this routine is called under the io_request_lock */ /****************************************************************************/ int -ips_eh_abort(Scsi_Cmnd *SC) { - ips_ha_t *ha; - ips_copp_wait_item_t *item; - int ret; - - METHOD_TRACE("ips_eh_abort", 1); - - if (!SC) - return (FAILED); - - ha = (ips_ha_t *) SC->host->hostdata; - - if (!ha) - return (FAILED); - - if (!ha->active) - return (FAILED); - - if (SC->serial_number != SC->serial_number_at_timeout) { - /* HMM, looks like a bogus command */ - DEBUG(1, "Abort called with bogus scsi command"); - - return (FAILED); - } - - /* See if the command is on the copp queue */ - item = ha->copp_waitlist.head; - while ((item) && (item->scsi_cmd != SC)) - item = item->next; - - if (item) { - /* Found it */ - ips_removeq_copp(&ha->copp_waitlist, item); - ret = (SUCCESS); - - /* See if the command is on the wait queue */ - } else if (ips_removeq_wait(&ha->scb_waitlist, SC)) { - /* command not sent yet */ - ret = (SUCCESS); - } else { - /* command must have already been sent */ - ret = (FAILED); - } - return ret; +ips_eh_abort(Scsi_Cmnd * SC) +{ + ips_ha_t *ha; + ips_copp_wait_item_t *item; + int ret; + + METHOD_TRACE("ips_eh_abort", 1); + + if (!SC) + return (FAILED); + + ha = (ips_ha_t *) SC->host->hostdata; + + if (!ha) + return (FAILED); + + if (!ha->active) + return (FAILED); + + if (SC->serial_number != SC->serial_number_at_timeout) { + /* HMM, looks like a bogus command */ + DEBUG(1, "Abort called with bogus scsi command"); + + return (FAILED); + } + + /* See if the command is on the copp queue */ + item = ha->copp_waitlist.head; + while ((item) && (item->scsi_cmd != SC)) + item = item->next; + + if (item) { + /* Found it */ + ips_removeq_copp(&ha->copp_waitlist, item); + ret = (SUCCESS); + + /* See if the command is on the wait queue */ + } else if (ips_removeq_wait(&ha->scb_waitlist, SC)) { + /* command not sent yet */ + ret = (SUCCESS); + } else { + /* command must have already been sent */ + ret = (FAILED); + } + return ret; } /****************************************************************************/ @@ -1586,191 +848,194 @@ ips_eh_abort(Scsi_Cmnd *SC) { /* */ /****************************************************************************/ int -ips_eh_reset(Scsi_Cmnd *SC) { - int ret; - int i; - ips_ha_t *ha; - ips_scb_t *scb; - ips_copp_wait_item_t *item; +ips_eh_reset(Scsi_Cmnd * SC) +{ + int ret; + int i; + ips_ha_t *ha; + ips_scb_t *scb; + ips_copp_wait_item_t *item; - METHOD_TRACE("ips_eh_reset", 1); + METHOD_TRACE("ips_eh_reset", 1); #ifdef NO_IPS_RESET - return (FAILED); + return (FAILED); #else - if (!SC) { - DEBUG(1, "Reset called with NULL scsi command"); + if (!SC) { + DEBUG(1, "Reset called with NULL scsi command"); - return (FAILED); - } + return (FAILED); + } - ha = (ips_ha_t *) SC->host->hostdata; + ha = (ips_ha_t *) SC->host->hostdata; - if (!ha) { - DEBUG(1, "Reset called with NULL ha struct"); - - return (FAILED); - } - - if (!ha->active) - return (FAILED); - - /* See if the command is on the copp queue */ - item = ha->copp_waitlist.head; - while ((item) && (item->scsi_cmd != SC)) - item = item->next; - - if (item) { - /* Found it */ - ips_removeq_copp(&ha->copp_waitlist, item); - return (SUCCESS); - } - - /* See if the command is on the wait queue */ - if (ips_removeq_wait(&ha->scb_waitlist, SC)) { - /* command not sent yet */ - return (SUCCESS); - } - - /* An explanation for the casual observer: */ - /* Part of the function of a RAID controller is automatic error */ - /* detection and recovery. As such, the only problem that physically */ - /* resetting an adapter will ever fix is when, for some reason, */ - /* the driver is not successfully communicating with the adapter. */ - /* Therefore, we will attempt to flush this adapter. If that succeeds, */ - /* then there's no real purpose in a physical reset. This will complete */ - /* much faster and avoids any problems that might be caused by a */ - /* physical reset ( such as having to fail all the outstanding I/O's ). */ - - if (ha->ioctl_reset == 0) { /* IF Not an IOCTL Requested Reset */ - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_FLUSH; - - scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; - scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.flush_cache.state = IPS_NORM_STATE; - scb->cmd.flush_cache.reserved = 0; - scb->cmd.flush_cache.reserved2 = 0; - scb->cmd.flush_cache.reserved3 = 0; - scb->cmd.flush_cache.reserved4 = 0; - - /* Attempt the flush command */ - ret = ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_IORL); - if (ret == IPS_SUCCESS) { - printk(KERN_NOTICE "(%s%d) Reset Request - Flushed Cache\n", ips_name, ha->host_num); - return (SUCCESS); - } - } - - /* Either we can't communicate with the adapter or it's an IOCTL request */ - /* from a utility. A physical reset is needed at this point. */ - - ha->ioctl_reset = 0; /* Reset the IOCTL Requested Reset Flag */ - - /* - * command must have already been sent - * reset the controller - */ - printk(KERN_NOTICE "(%s%d) Resetting controller.\n", - ips_name, ha->host_num); - ret = (*ha->func.reset)(ha); - - if (!ret) { - Scsi_Cmnd *scsi_cmd; - - printk(KERN_NOTICE - "(%s%d) Controller reset failed - controller now offline.\n", - ips_name, ha->host_num); - - /* Now fail all of the active commands */ - DEBUG_VAR(1, "(%s%d) Failing active commands", - ips_name, ha->host_num); - - while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { - scb->scsi_cmd->result = DID_ERROR << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - ips_freescb(ha, scb); - } - - /* Now fail all of the pending commands */ - DEBUG_VAR(1, "(%s%d) Failing pending commands", - ips_name, ha->host_num); - - while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { - scsi_cmd->result = DID_ERROR; - scsi_cmd->scsi_done(scsi_cmd); - } - - ha->active = FALSE; - return (FAILED); - } - - if (!ips_clear_adapter(ha, IPS_INTR_IORL)) { - Scsi_Cmnd *scsi_cmd; - - printk(KERN_NOTICE - "(%s%d) Controller reset failed - controller now offline.\n", - ips_name, ha->host_num); - - /* Now fail all of the active commands */ - DEBUG_VAR(1, "(%s%d) Failing active commands", - ips_name, ha->host_num); - - while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { - scb->scsi_cmd->result = DID_ERROR << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - ips_freescb(ha, scb); - } - - /* Now fail all of the pending commands */ - DEBUG_VAR(1, "(%s%d) Failing pending commands", - ips_name, ha->host_num); - - while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { - scsi_cmd->result = DID_ERROR << 16; - scsi_cmd->scsi_done(scsi_cmd); - } - - ha->active = FALSE; - return (FAILED); - } - - /* FFDC */ - if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) { - struct timeval tv; - - do_gettimeofday(&tv); - ha->last_ffdc = tv.tv_sec; - ha->reset_count++; - ips_ffdc_reset(ha, IPS_INTR_IORL); - } - - /* Now fail all of the active commands */ - DEBUG_VAR(1, "(%s%d) Failing active commands", - ips_name, ha->host_num); - - while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { - scb->scsi_cmd->result = (DID_RESET << 16) | (SUGGEST_RETRY << 24); - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - ips_freescb(ha, scb); - } - - /* Reset DCDB active command bits */ - for (i = 1; i < ha->nbus; i++) - ha->dcdb_active[i-1] = 0; + if (!ha) { + DEBUG(1, "Reset called with NULL ha struct"); + + return (FAILED); + } + + if (!ha->active) + return (FAILED); + + /* See if the command is on the copp queue */ + item = ha->copp_waitlist.head; + while ((item) && (item->scsi_cmd != SC)) + item = item->next; + + if (item) { + /* Found it */ + ips_removeq_copp(&ha->copp_waitlist, item); + return (SUCCESS); + } + + /* See if the command is on the wait queue */ + if (ips_removeq_wait(&ha->scb_waitlist, SC)) { + /* command not sent yet */ + return (SUCCESS); + } + + /* An explanation for the casual observer: */ + /* Part of the function of a RAID controller is automatic error */ + /* detection and recovery. As such, the only problem that physically */ + /* resetting an adapter will ever fix is when, for some reason, */ + /* the driver is not successfully communicating with the adapter. */ + /* Therefore, we will attempt to flush this adapter. If that succeeds, */ + /* then there's no real purpose in a physical reset. This will complete */ + /* much faster and avoids any problems that might be caused by a */ + /* physical reset ( such as having to fail all the outstanding I/O's ). */ + + if (ha->ioctl_reset == 0) { /* IF Not an IOCTL Requested Reset */ + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FLUSH; + + scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; + scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flush_cache.state = IPS_NORM_STATE; + scb->cmd.flush_cache.reserved = 0; + scb->cmd.flush_cache.reserved2 = 0; + scb->cmd.flush_cache.reserved3 = 0; + scb->cmd.flush_cache.reserved4 = 0; + + /* Attempt the flush command */ + ret = ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_IORL); + if (ret == IPS_SUCCESS) { + printk(KERN_NOTICE + "(%s%d) Reset Request - Flushed Cache\n", + ips_name, ha->host_num); + return (SUCCESS); + } + } + + /* Either we can't communicate with the adapter or it's an IOCTL request */ + /* from a utility. A physical reset is needed at this point. */ + + ha->ioctl_reset = 0; /* Reset the IOCTL Requested Reset Flag */ + + /* + * command must have already been sent + * reset the controller + */ + printk(KERN_NOTICE "(%s%d) Resetting controller.\n", + ips_name, ha->host_num); + ret = (*ha->func.reset) (ha); + + if (!ret) { + Scsi_Cmnd *scsi_cmd; + + printk(KERN_NOTICE + "(%s%d) Controller reset failed - controller now offline.\n", + ips_name, ha->host_num); + + /* Now fail all of the active commands */ + DEBUG_VAR(1, "(%s%d) Failing active commands", + ips_name, ha->host_num); + + while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { + scb->scsi_cmd->result = DID_ERROR << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + ips_freescb(ha, scb); + } + + /* Now fail all of the pending commands */ + DEBUG_VAR(1, "(%s%d) Failing pending commands", + ips_name, ha->host_num); + + while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { + scsi_cmd->result = DID_ERROR; + scsi_cmd->scsi_done(scsi_cmd); + } + + ha->active = FALSE; + return (FAILED); + } + + if (!ips_clear_adapter(ha, IPS_INTR_IORL)) { + Scsi_Cmnd *scsi_cmd; + + printk(KERN_NOTICE + "(%s%d) Controller reset failed - controller now offline.\n", + ips_name, ha->host_num); + + /* Now fail all of the active commands */ + DEBUG_VAR(1, "(%s%d) Failing active commands", + ips_name, ha->host_num); + + while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { + scb->scsi_cmd->result = DID_ERROR << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + ips_freescb(ha, scb); + } + + /* Now fail all of the pending commands */ + DEBUG_VAR(1, "(%s%d) Failing pending commands", + ips_name, ha->host_num); + + while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { + scsi_cmd->result = DID_ERROR << 16; + scsi_cmd->scsi_done(scsi_cmd); + } + + ha->active = FALSE; + return (FAILED); + } + + /* FFDC */ + if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) { + struct timeval tv; + + do_gettimeofday(&tv); + ha->last_ffdc = tv.tv_sec; + ha->reset_count++; + ips_ffdc_reset(ha, IPS_INTR_IORL); + } + + /* Now fail all of the active commands */ + DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num); + + while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { + scb->scsi_cmd->result = + (DID_RESET << 16) | (SUGGEST_RETRY << 24); + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + ips_freescb(ha, scb); + } + + /* Reset DCDB active command bits */ + for (i = 1; i < ha->nbus; i++) + ha->dcdb_active[i - 1] = 0; - /* Reset the number of active IOCTLs */ - ha->num_ioctl = 0; + /* Reset the number of active IOCTLs */ + ha->num_ioctl = 0; - ips_next(ha, IPS_INTR_IORL); + ips_next(ha, IPS_INTR_IORL); - return (SUCCESS); -#endif /* NO_IPS_RESET */ + return (SUCCESS); +#endif /* NO_IPS_RESET */ } @@ -1787,103 +1052,92 @@ ips_eh_reset(Scsi_Cmnd *SC) { /* */ /****************************************************************************/ int -ips_queue(Scsi_Cmnd *SC, void (*done) (Scsi_Cmnd *)) { - ips_ha_t *ha; - ips_passthru_t *pt; - - METHOD_TRACE("ips_queue", 1); - - ha = (ips_ha_t *) SC->host->hostdata; - - if (!ha) - return (1); - - if (!ha->active) - return (DID_ERROR); - - if (ips_is_passthru(SC)) { - if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) { - SC->result = DID_BUS_BUSY << 16; - done(SC); - - return (0); - } - } else if (ha->scb_waitlist.count == IPS_MAX_QUEUE) { - SC->result = DID_BUS_BUSY << 16; - done(SC); - - return (0); - } - - SC->scsi_done = done; - - DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)", - ips_name, - ha->host_num, - SC->cmnd[0], - SC->channel, - SC->target, - SC->lun); - - /* Check for command to initiator IDs */ - if ((SC->channel > 0) && (SC->target == ha->ha_id[SC->channel])) { - SC->result = DID_NO_CONNECT << 16; - done(SC); - - return (0); - } - - if (ips_is_passthru(SC)) { - - ips_copp_wait_item_t *scratch; - - /* A Reset IOCTL is only sent by the boot CD in extreme cases. */ - /* There can never be any system activity ( network or disk ), but check */ - /* anyway just as a good practice. */ - pt = (ips_passthru_t *) SC->request_buffer; - if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) && - (pt->CoppCP.cmd.reset.adapter_flag == 1)) { - if (ha->scb_activelist.count != 0) { - SC->result = DID_BUS_BUSY << 16; - done(SC); - return (0); - } - ha->ioctl_reset = 1; /* This reset request is from an IOCTL */ - ips_eh_reset(SC); - SC->result = DID_OK << 16; - SC->scsi_done(SC); - return (0); - } - - /* allocate space for the scribble */ - scratch = kmalloc(sizeof(ips_copp_wait_item_t), GFP_ATOMIC); - - if (!scratch) { - SC->result = DID_ERROR << 16; - done(SC); - - return (0); - } - - scratch->scsi_cmd = SC; - scratch->next = NULL; - - ips_putq_copp_tail(&ha->copp_waitlist, scratch); - } - else { - ips_putq_wait_tail(&ha->scb_waitlist, SC); - } - - ips_next(ha, IPS_INTR_IORL); - - /* If We were using the CD Boot Flash Buffer, Restore the Old Values */ - if ( ips_FlashData == ha->ioctl_data ) { - ha->ioctl_data = ha->flash_data; - ha->ioctl_order = ha->flash_order; - ha->ioctl_datasize = ha->flash_datasize; - ips_FlashDataInUse = 0; - } - return (0); +ips_queue(Scsi_Cmnd * SC, void (*done) (Scsi_Cmnd *)) +{ + ips_ha_t *ha; + ips_passthru_t *pt; + + METHOD_TRACE("ips_queue", 1); + + ha = (ips_ha_t *) SC->host->hostdata; + + if (!ha) + return (1); + + if (!ha->active) + return (DID_ERROR); + + if (ips_is_passthru(SC)) { + if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) { + SC->result = DID_BUS_BUSY << 16; + done(SC); + + return (0); + } + } else if (ha->scb_waitlist.count == IPS_MAX_QUEUE) { + SC->result = DID_BUS_BUSY << 16; + done(SC); + + return (0); + } + + SC->scsi_done = done; + + DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)", + ips_name, + ha->host_num, SC->cmnd[0], SC->channel, SC->target, SC->lun); + + /* Check for command to initiator IDs */ + if ((SC->channel > 0) && (SC->target == ha->ha_id[SC->channel])) { + SC->result = DID_NO_CONNECT << 16; + done(SC); + + return (0); + } + + if (ips_is_passthru(SC)) { + + ips_copp_wait_item_t *scratch; + + /* A Reset IOCTL is only sent by the boot CD in extreme cases. */ + /* There can never be any system activity ( network or disk ), but check */ + /* anyway just as a good practice. */ + pt = (ips_passthru_t *) SC->request_buffer; + if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) && + (pt->CoppCP.cmd.reset.adapter_flag == 1)) { + if (ha->scb_activelist.count != 0) { + SC->result = DID_BUS_BUSY << 16; + done(SC); + return (0); + } + ha->ioctl_reset = 1; /* This reset request is from an IOCTL */ + ips_eh_reset(SC); + SC->result = DID_OK << 16; + SC->scsi_done(SC); + return (0); + } + + /* allocate space for the scribble */ + scratch = kmalloc(sizeof (ips_copp_wait_item_t), GFP_ATOMIC); + + if (!scratch) { + SC->result = DID_ERROR << 16; + done(SC); + + return (0); + } + + scratch->scsi_cmd = SC; + scratch->next = NULL; + + ips_putq_copp_tail(&ha->copp_waitlist, scratch); + } else { + ips_putq_wait_tail(&ha->scb_waitlist, SC); + } + + ips_next(ha, IPS_INTR_IORL); + + return (0); } /****************************************************************************/ @@ -1896,48 +1150,49 @@ ips_queue(Scsi_Cmnd *SC, void (*done) (S /* */ /****************************************************************************/ int -ips_biosparam(Disk *disk, kdev_t dev, int geom[]) { - ips_ha_t *ha; - int heads; - int sectors; - int cylinders; - - METHOD_TRACE("ips_biosparam", 1); - - ha = (ips_ha_t *) disk->device->host->hostdata; - - if (!ha) - /* ?!?! host adater info invalid */ - return (0); - - if (!ha->active) - return (0); - - if (!ips_read_adapter_status(ha, IPS_INTR_ON)) - /* ?!?! Enquiry command failed */ - return (0); - - if ((disk->capacity > 0x400000) && - ((ha->enq->ucMiscFlag & 0x8) == 0)) { - heads = IPS_NORM_HEADS; - sectors = IPS_NORM_SECTORS; - } else { - heads = IPS_COMP_HEADS; - sectors = IPS_COMP_SECTORS; - } - - cylinders = disk->capacity / (heads * sectors); - - DEBUG_VAR(2, "Geometry: heads: %d, sectors: %d, cylinders: %d", - heads, sectors, cylinders); - - geom[0] = heads; - geom[1] = sectors; - geom[2] = cylinders; +ips_biosparam(Disk * disk, kdev_t dev, int geom[]) +{ + ips_ha_t *ha; + int heads; + int sectors; + int cylinders; + + METHOD_TRACE("ips_biosparam", 1); - return (0); + ha = (ips_ha_t *) disk->device->host->hostdata; + + if (!ha) + /* ?!?! host adater info invalid */ + return (0); + + if (!ha->active) + return (0); + + if (!ips_read_adapter_status(ha, IPS_INTR_ON)) + /* ?!?! Enquiry command failed */ + return (0); + + if ((disk->capacity > 0x400000) && ((ha->enq->ucMiscFlag & 0x8) == 0)) { + heads = IPS_NORM_HEADS; + sectors = IPS_NORM_SECTORS; + } else { + heads = IPS_COMP_HEADS; + sectors = IPS_COMP_SECTORS; + } + + cylinders = disk->capacity / (heads * sectors); + + DEBUG_VAR(2, "Geometry: heads: %d, sectors: %d, cylinders: %d", + heads, sectors, cylinders); + + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + + return (0); } +#if LINUX_VERSION_CODE < LinuxVersionCode(2,5,0) /****************************************************************************/ /* */ /* Routine Name: ips_select_queue_depth */ @@ -1948,39 +1203,67 @@ ips_biosparam(Disk *disk, kdev_t dev, in /* */ /****************************************************************************/ static void -ips_select_queue_depth(struct Scsi_Host *host, Scsi_Device *scsi_devs) { - Scsi_Device *device; - ips_ha_t *ha; - int count = 0; - int min; - - ha = IPS_HA(host); - min = ha->max_cmds / 4; - - for (device = scsi_devs; device; device = device->next) { - if (device->host == host) { - if ((device->channel == 0) && (device->type == 0)) - count++; - } - } - - for (device = scsi_devs; device; device = device->next) { - if (device->host == host) { - if ((device->channel == 0) && (device->type == 0)) { - device->queue_depth = ( ha->max_cmds - 1 ) / count; - if (device->queue_depth < min) - device->queue_depth = min; - } - else { - device->queue_depth = 2; - } - - if (device->queue_depth < 2) - device->queue_depth = 2; - } - } +ips_select_queue_depth(struct Scsi_Host *host, Scsi_Device * scsi_devs) +{ + Scsi_Device *device; + ips_ha_t *ha; + int count = 0; + int min; + + ha = IPS_HA(host); + min = ha->max_cmds / 4; + + for (device = scsi_devs; device; device = device->next) { + if (device->host == host) { + if ((device->channel == 0) && (device->type == 0)) + count++; + } + } + + for (device = scsi_devs; device; device = device->next) { + if (device->host == host) { + if ((device->channel == 0) && (device->type == 0)) { + device->queue_depth = + (ha->max_cmds - 1) / count; + if (device->queue_depth < min) + device->queue_depth = min; + } else { + device->queue_depth = 2; + } + + if (device->queue_depth < 2) + device->queue_depth = 2; + } + } } +#else +/****************************************************************************/ +/* */ +/* Routine Name: ips_slave_configure */ +/* */ +/* Routine Description: */ +/* */ +/* Set queue depths on devices once scan is complete */ +/* */ +/****************************************************************************/ +int +ips_slave_configure(Scsi_Device * SDptr) +{ + ips_ha_t *ha; + int min; + + ha = IPS_HA(SDptr->host); + if (SDptr->tagged_supported && SDptr->type == TYPE_DISK) { + min = ha->max_cmds / 2; + if (ha->enq->ucLogDriveCount <= 2) + min = ha->max_cmds - 1; + scsi_adjust_queue_depth(SDptr, MSG_ORDERED_TAG, min); + } + return 0; +} +#endif + /****************************************************************************/ /* */ /* Routine Name: do_ipsintr */ @@ -1991,30 +1274,37 @@ ips_select_queue_depth(struct Scsi_Host /* */ /****************************************************************************/ void -do_ipsintr(int irq, void *dev_id, struct pt_regs *regs) { - ips_ha_t *ha; - unsigned long cpu_flags; - struct Scsi_Host *host; - - METHOD_TRACE("do_ipsintr", 2); - - ha = (ips_ha_t *) dev_id; - if (!ha) - return; - host = ips_sh[ha->host_num]; - IPS_LOCK_SAVE(host->host_lock, cpu_flags); - - if (!ha->active) { - IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); - return; - } +do_ipsintr(int irq, void *dev_id, struct pt_regs *regs) +{ + ips_ha_t *ha; + unsigned long cpu_flags; + struct Scsi_Host *host; + + METHOD_TRACE("do_ipsintr", 2); + + ha = (ips_ha_t *) dev_id; + if (!ha) + return; + host = ips_sh[ha->host_num]; + /* interrupt during initialization */ + if (!host) { + (*ha->func.intr) (ha); + return; + } + + IPS_LOCK_SAVE(host->host_lock, cpu_flags); + + if (!ha->active) { + IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); + return; + } - (*ha->func.intr)(ha); + (*ha->func.intr) (ha); - IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); + IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); - /* start the next command */ - ips_next(ha, IPS_INTR_ON); + /* start the next command */ + ips_next(ha, IPS_INTR_ON); } /****************************************************************************/ @@ -2029,54 +1319,55 @@ do_ipsintr(int irq, void *dev_id, struct /* */ /****************************************************************************/ void -ips_intr_copperhead(ips_ha_t *ha) { - ips_stat_t *sp; - ips_scb_t *scb; - IPS_STATUS cstatus; - int intrstatus; - - METHOD_TRACE("ips_intr", 2); - - if (!ha) - return; - - if (!ha->active) - return; - - intrstatus = (*ha->func.isintr)(ha); - - if (!intrstatus) { - /* - * Unexpected/Shared interrupt - */ - - return; - } - - while (TRUE) { - sp = &ha->sp; - - intrstatus = (*ha->func.isintr)(ha); - - if (!intrstatus) - break; - else - cstatus.value = (*ha->func.statupd)(ha); - - if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) { - /* Spurious Interupt ? */ - continue; - } - - ips_chkstatus(ha, &cstatus); - scb = (ips_scb_t *) sp->scb_addr; - - /* - * use the callback function to finish things up - * NOTE: interrupts are OFF for this - */ - (*scb->callback) (ha, scb); - } /* end while */ +ips_intr_copperhead(ips_ha_t * ha) +{ + ips_stat_t *sp; + ips_scb_t *scb; + IPS_STATUS cstatus; + int intrstatus; + + METHOD_TRACE("ips_intr", 2); + + if (!ha) + return; + + if (!ha->active) + return; + + intrstatus = (*ha->func.isintr) (ha); + + if (!intrstatus) { + /* + * Unexpected/Shared interrupt + */ + + return; + } + + while (TRUE) { + sp = &ha->sp; + + intrstatus = (*ha->func.isintr) (ha); + + if (!intrstatus) + break; + else + cstatus.value = (*ha->func.statupd) (ha); + + if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) { + /* Spurious Interupt ? */ + continue; + } + + ips_chkstatus(ha, &cstatus); + scb = (ips_scb_t *) sp->scb_addr; + + /* + * use the callback function to finish things up + * NOTE: interrupts are OFF for this + */ + (*scb->callback) (ha, scb); + } /* end while */ } /****************************************************************************/ @@ -2091,60 +1382,62 @@ ips_intr_copperhead(ips_ha_t *ha) { /* */ /****************************************************************************/ void -ips_intr_morpheus(ips_ha_t *ha) { - ips_stat_t *sp; - ips_scb_t *scb; - IPS_STATUS cstatus; - int intrstatus; +ips_intr_morpheus(ips_ha_t * ha) +{ + ips_stat_t *sp; + ips_scb_t *scb; + IPS_STATUS cstatus; + int intrstatus; - METHOD_TRACE("ips_intr_morpheus", 2); + METHOD_TRACE("ips_intr_morpheus", 2); - if (!ha) - return; + if (!ha) + return; - if (!ha->active) - return; + if (!ha->active) + return; - intrstatus = (*ha->func.isintr)(ha); + intrstatus = (*ha->func.isintr) (ha); - if (!intrstatus) { - /* - * Unexpected/Shared interrupt - */ + if (!intrstatus) { + /* + * Unexpected/Shared interrupt + */ - return; - } + return; + } - while (TRUE) { - sp = &ha->sp; + while (TRUE) { + sp = &ha->sp; - intrstatus = (*ha->func.isintr)(ha); + intrstatus = (*ha->func.isintr) (ha); - if (!intrstatus) - break; - else - cstatus.value = (*ha->func.statupd)(ha); + if (!intrstatus) + break; + else + cstatus.value = (*ha->func.statupd) (ha); - if (cstatus.value == 0xffffffff) - /* No more to process */ - break; + if (cstatus.value == 0xffffffff) + /* No more to process */ + break; - if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) { - printk(KERN_WARNING "(%s%d) Spurious interrupt; no ccb.\n", - ips_name, ha->host_num); + if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) { + printk(KERN_WARNING + "(%s%d) Spurious interrupt; no ccb.\n", ips_name, + ha->host_num); - continue; - } + continue; + } - ips_chkstatus(ha, &cstatus); - scb = (ips_scb_t *) sp->scb_addr; + ips_chkstatus(ha, &cstatus); + scb = (ips_scb_t *) sp->scb_addr; - /* - * use the callback function to finish things up - * NOTE: interrupts are OFF for this - */ - (*scb->callback) (ha, scb); - } /* end while */ + /* + * use the callback function to finish things up + * NOTE: interrupts are OFF for this + */ + (*scb->callback) (ha, scb); + } /* end while */ } /****************************************************************************/ @@ -2157,31 +1450,32 @@ ips_intr_morpheus(ips_ha_t *ha) { /* */ /****************************************************************************/ const char * -ips_info(struct Scsi_Host *SH) { - static char buffer[256]; - char *bp; - ips_ha_t *ha; +ips_info(struct Scsi_Host *SH) +{ + static char buffer[256]; + char *bp; + ips_ha_t *ha; - METHOD_TRACE("ips_info", 1); + METHOD_TRACE("ips_info", 1); - ha = IPS_HA(SH); + ha = IPS_HA(SH); - if (!ha) - return (NULL); + if (!ha) + return (NULL); - bp = &buffer[0]; - memset(bp, 0, sizeof(buffer)); + bp = &buffer[0]; + memset(bp, 0, sizeof (buffer)); - sprintf(bp, "%s%s%s", "IBM PCI ServeRAID ", IPS_VERSION_HIGH, IPS_VERSION_LOW ); + sprintf(bp, "%s%s%s Build %d", "IBM PCI ServeRAID ", + IPS_VERSION_HIGH, IPS_VERSION_LOW, IPS_BUILD_IDENT); - if (ha->ad_type > 0 && - ha->ad_type <= MAX_ADAPTER_NAME) { - strcat(bp, " <"); - strcat(bp, ips_adapter_name[ha->ad_type-1]); - strcat(bp, ">"); - } + if (ha->ad_type > 0 && ha->ad_type <= MAX_ADAPTER_NAME) { + strcat(bp, " <"); + strcat(bp, ips_adapter_name[ha->ad_type - 1]); + strcat(bp, ">"); + } - return (bp); + return (bp); } /****************************************************************************/ @@ -2195,38 +1489,39 @@ ips_info(struct Scsi_Host *SH) { /****************************************************************************/ int ips_proc_info(char *buffer, char **start, off_t offset, - int length, int hostno, int func) { - int i; - int ret; - ips_ha_t *ha = NULL; - - METHOD_TRACE("ips_proc_info", 1); - - /* Find our host structure */ - for (i = 0; i < ips_next_controller; i++) { - if (ips_sh[i]) { - if (ips_sh[i]->host_no == hostno) { - ha = (ips_ha_t *) ips_sh[i]->hostdata; - break; - } - } - } - - if (!ha) - return (-EINVAL); - - if (func) { - /* write */ - return (0); - } else { - /* read */ - if (start) - *start = buffer; + int length, int hostno, int func) +{ + int i; + int ret; + ips_ha_t *ha = NULL; + + METHOD_TRACE("ips_proc_info", 1); + + /* Find our host structure */ + for (i = 0; i < ips_next_controller; i++) { + if (ips_sh[i]) { + if (ips_sh[i]->host_no == hostno) { + ha = (ips_ha_t *) ips_sh[i]->hostdata; + break; + } + } + } + + if (!ha) + return (-EINVAL); + + if (func) { + /* write */ + return (0); + } else { + /* read */ + if (start) + *start = buffer; - ret = ips_host_info(ha, buffer, offset, length); + ret = ips_host_info(ha, buffer, offset, length); - return (ret); - } + return (ret); + } } /*--------------------------------------------------------------------------*/ @@ -2243,32 +1538,65 @@ ips_proc_info(char *buffer, char **start /* */ /****************************************************************************/ static int -ips_is_passthru(Scsi_Cmnd *SC) { - METHOD_TRACE("ips_is_passthru", 1); +ips_is_passthru(Scsi_Cmnd * SC) +{ + METHOD_TRACE("ips_is_passthru", 1); - if (!SC) - return (0); + if (!SC) + return (0); + + if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) && + (SC->channel == 0) && + (SC->target == IPS_ADAPTER_ID) && + (SC->lun == 0) && SC->request_buffer) { + if ((!SC->use_sg) && SC->request_bufflen && + (((char *) SC->request_buffer)[0] == 'C') && + (((char *) SC->request_buffer)[1] == 'O') && + (((char *) SC->request_buffer)[2] == 'P') && + (((char *) SC->request_buffer)[3] == 'P')) + return 1; + else if (SC->use_sg) { + struct scatterlist *sg = SC->request_buffer; + char *buffer = IPS_SG_ADDRESS(sg); + if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && + buffer[2] == 'P' && buffer[3] == 'P') + return 1; + } + } + return 0; +} - if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) && - (SC->channel == 0) && - (SC->target == IPS_ADAPTER_ID) && - (SC->lun == 0) && - SC->request_buffer){ - if((!SC->use_sg) && SC->request_bufflen && - (((char *) SC->request_buffer)[0] == 'C') && - (((char *) SC->request_buffer)[1] == 'O') && - (((char *) SC->request_buffer)[2] == 'P') && - (((char *) SC->request_buffer)[3] == 'P')) - return 1; - else if(SC->use_sg){ - struct scatterlist *sg = SC->request_buffer; - char *buffer = IPS_SG_ADDRESS(sg); - if(buffer && buffer[0] == 'C' && buffer[1] == 'O' && - buffer[2] == 'P' && buffer[3] == 'P') - return 1; - } - } - return 0; +/****************************************************************************/ +/* */ +/* Routine Name: ips_alloc_passthru_buffer */ +/* */ +/* Routine Description: */ +/* allocate a buffer large enough for the ioctl data if the ioctl buffer */ +/* is too small or doesn't exist */ +/****************************************************************************/ +static int +ips_alloc_passthru_buffer(ips_ha_t * ha, int length) +{ + void *bigger_buf; + int count; + int order; + + if (ha->ioctl_data && length <= (PAGE_SIZE << ha->ioctl_order)) + return 0; + /* there is no buffer or it's not big enough, allocate a new one */ + for (count = PAGE_SIZE, order = 0; + count < length; order++, count <<= 1) ; + bigger_buf = (void *) __get_free_pages(IPS_ATOMIC_GFP, order); + if (bigger_buf) { + /* free the old memory */ + free_pages((unsigned long) ha->ioctl_data, ha->ioctl_order); + /* use the new memory */ + ha->ioctl_data = (char *) bigger_buf; + ha->ioctl_order = order; + } else { + return -1; + } + return 0; } /****************************************************************************/ @@ -2281,127 +1609,96 @@ ips_is_passthru(Scsi_Cmnd *SC) { /* */ /****************************************************************************/ static int -ips_make_passthru(ips_ha_t *ha, Scsi_Cmnd *SC, ips_scb_t *scb, int intr) { - ips_passthru_t *pt; - char *buffer; - int length = 0; - - METHOD_TRACE("ips_make_passthru", 1); - - if(!SC->use_sg){ - buffer = SC->request_buffer; - length = SC->request_bufflen; - }else{ - struct scatterlist *sg = SC->request_buffer; - int i; - for(i = 0; i < SC->use_sg; i++) - length += sg[i].length; - - if (length < sizeof(ips_passthru_t)) { - /* wrong size */ - DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", - ips_name, ha->host_num); - return (IPS_FAILURE); - }else if(!ha->ioctl_data || length > (PAGE_SIZE << ha->ioctl_order)){ - void *bigger_buf; - int count; - int order; - /* try to allocate a bigger buffer */ - for (count = PAGE_SIZE, order = 0; - count < length; - order++, count <<= 1); - bigger_buf = (void *) __get_free_pages(GFP_ATOMIC, order); - if (bigger_buf) { - /* free the old memory */ - free_pages((unsigned long) ha->ioctl_data, ha->ioctl_order); - /* use the new memory */ - ha->ioctl_data = (char *) bigger_buf; - ha->ioctl_order = order; - ha->ioctl_datasize = count; - } else { - pt = (ips_passthru_t*)IPS_SG_ADDRESS(sg); - pt->BasicStatus = 0x0B; - pt->ExtendedStatus = 0x00; - SC->result = DID_ERROR << 16; - return (IPS_FAILURE); - } - } - ha->ioctl_datasize = length; - length = 0; - for(i = 0; i < SC->use_sg; i++){ - memcpy(&ha->ioctl_data[length], IPS_SG_ADDRESS(&sg[i]), sg[i].length); - length += sg[i].length; - } - pt = (ips_passthru_t *)ha->ioctl_data; - buffer = ha->ioctl_data; - } - if (!length || !buffer) { - /* no data */ - DEBUG_VAR(1, "(%s%d) No passthru structure", - ips_name, ha->host_num); - - return (IPS_FAILURE); - } - if (length < sizeof(ips_passthru_t)) { - /* wrong size */ - DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", - ips_name, ha->host_num); - - return (IPS_FAILURE); - } - pt = (ips_passthru_t*) buffer; - /* - * Some notes about the passthru interface used - * - * IF the scsi op_code == 0x0d then we assume - * that the data came along with/goes with the - * packet we received from the sg driver. In this - * case the CmdBSize field of the pt structure is - * used for the size of the buffer. - */ - - switch (pt->CoppCmd) { - case IPS_NUMCTRLS: - memcpy(buffer + sizeof(ips_passthru_t), - &ips_num_controllers, sizeof(int)); - SC->result = DID_OK << 16; - - return (IPS_SUCCESS_IMM); - - case IPS_CTRLINFO: - memcpy(buffer + sizeof(ips_passthru_t), - ha, sizeof(ips_ha_t)); - SC->result = DID_OK << 16; - - return (IPS_SUCCESS_IMM); - - case IPS_COPPUSRCMD: - case IPS_COPPIOCCMD: - if (SC->cmnd[0] == IPS_IOCTL_COMMAND) { - if (length < (sizeof(ips_passthru_t) + pt->CmdBSize)) { - /* wrong size */ - DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", - ips_name, ha->host_num); - - return (IPS_FAILURE); - } - - if(ha->device_id == IPS_DEVICEID_COPPERHEAD && - pt->CoppCP.cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW) - return ips_flash_copperhead(ha, pt, scb); - - if (ips_usrcmd(ha, pt, scb)) - return (IPS_SUCCESS); - else - return (IPS_FAILURE); - } - - break; +ips_make_passthru(ips_ha_t * ha, Scsi_Cmnd * SC, ips_scb_t * scb, int intr) +{ + ips_passthru_t *pt; + int length = 0; + int ret; + + METHOD_TRACE("ips_make_passthru", 1); + + if (!SC->use_sg) { + length = SC->request_bufflen; + } else { + struct scatterlist *sg = SC->request_buffer; + int i; + for (i = 0; i < SC->use_sg; i++) + length += sg[i].length; + } + if (length < sizeof (ips_passthru_t)) { + /* wrong size */ + DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", + ips_name, ha->host_num); + return (IPS_FAILURE); + } + if (ips_alloc_passthru_buffer(ha, length)) { + /* allocation failure! If ha->ioctl_data exists, use it to return + some error codes. Return a failed command to the scsi layer. */ + if (ha->ioctl_data) { + pt = (ips_passthru_t *) ha->ioctl_data; + ips_scmd_buf_read(SC, pt, sizeof (ips_passthru_t)); + pt->BasicStatus = 0x0B; + pt->ExtendedStatus = 0x00; + ips_scmd_buf_write(SC, pt, sizeof (ips_passthru_t)); + } + return IPS_FAILURE; + } + ha->ioctl_datasize = length; + + ips_scmd_buf_read(SC, ha->ioctl_data, ha->ioctl_datasize); + pt = (ips_passthru_t *) ha->ioctl_data; + + /* + * Some notes about the passthru interface used + * + * IF the scsi op_code == 0x0d then we assume + * that the data came along with/goes with the + * packet we received from the sg driver. In this + * case the CmdBSize field of the pt structure is + * used for the size of the buffer. + */ + + switch (pt->CoppCmd) { + case IPS_NUMCTRLS: + memcpy(ha->ioctl_data + sizeof (ips_passthru_t), + &ips_num_controllers, sizeof (int)); + ips_scmd_buf_write(SC, ha->ioctl_data, + sizeof (ips_passthru_t) + sizeof (int)); + SC->result = DID_OK << 16; + + return (IPS_SUCCESS_IMM); + + case IPS_COPPUSRCMD: + case IPS_COPPIOCCMD: + if (SC->cmnd[0] == IPS_IOCTL_COMMAND) { + if (length < (sizeof (ips_passthru_t) + pt->CmdBSize)) { + /* wrong size */ + DEBUG_VAR(1, + "(%s%d) Passthru structure wrong size", + ips_name, ha->host_num); + + return (IPS_FAILURE); + } + + if (ha->device_id == IPS_DEVICEID_COPPERHEAD && + pt->CoppCP.cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW) { + ret = ips_flash_copperhead(ha, pt, scb); + ips_scmd_buf_write(SC, ha->ioctl_data, + sizeof (ips_passthru_t)); + return ret; + } + if (ips_usrcmd(ha, pt, scb)) + return (IPS_SUCCESS); + else + return (IPS_FAILURE); + } - } /* end switch */ + break; - return (IPS_FAILURE); - } + } /* end switch */ + + return (IPS_FAILURE); +} /****************************************************************************/ /* Routine Name: ips_flash_copperhead */ @@ -2409,62 +1706,65 @@ ips_make_passthru(ips_ha_t *ha, Scsi_Cmn /* Flash the BIOS/FW on a Copperhead style controller */ /****************************************************************************/ static int -ips_flash_copperhead(ips_ha_t *ha, ips_passthru_t *pt, ips_scb_t *scb){ - int datasize, count; +ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) +{ + int datasize, count; - /* Trombone is the only copperhead that can do packet flash, but only - * for firmware. No one said it had to make sence. */ - if(IPS_IS_TROMBONE(ha) && pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE){ - if(ips_usrcmd(ha, pt, scb)) - return IPS_SUCCESS; - else - return IPS_FAILURE; - } - pt->BasicStatus = 0x0B; - pt->ExtendedStatus = 0; - scb->scsi_cmd->result = DID_OK <<16; - /* IF it's OK to Use the "CD BOOT" Flash Buffer, then you can */ - /* avoid allocating a huge buffer per adapter ( which can fail ). */ - if(pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && - pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS){ - pt->BasicStatus = 0; - return ips_flash_bios(ha, pt, scb); - }else if(pt->CoppCP.cmd.flashfw.packet_num == 0){ - if(ips_FlashData && !test_and_set_bit(0, &ips_FlashDataInUse)){ - ha->flash_data = ips_FlashData; - ha->flash_order = 7; - ha->flash_datasize = 0; - }else if(!ha->flash_data){ - datasize = pt->CoppCP.cmd.flashfw.total_packets * - pt->CoppCP.cmd.flashfw.count; - for (count = PAGE_SIZE, ha->flash_order = 0; count < datasize; - ha->flash_order++, count <<= 1); - ha->flash_data = (char *)__get_free_pages(GFP_ATOMIC, ha->flash_order); - ha->flash_datasize = 0; - }else - return IPS_FAILURE; - }else{ - if(pt->CoppCP.cmd.flashfw.count + ha->flash_datasize > - (PAGE_SIZE << ha->flash_order)){ - ips_free_flash_copperhead(ha); - printk(KERN_WARNING "failed size sanity check\n"); - return IPS_FAILURE; - } - } - if(!ha->flash_data) - return IPS_FAILURE; - pt->BasicStatus = 0; - memcpy(&ha->flash_data[ha->flash_datasize], pt + 1, - pt->CoppCP.cmd.flashfw.count); - ha->flash_datasize += pt->CoppCP.cmd.flashfw.count; - if(pt->CoppCP.cmd.flashfw.packet_num == - pt->CoppCP.cmd.flashfw.total_packets - 1){ - if(pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE) - return ips_flash_bios(ha, pt, scb); - else if(pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) - return ips_flash_firmware(ha, pt, scb); - } - return IPS_SUCCESS_IMM; + /* Trombone is the only copperhead that can do packet flash, but only + * for firmware. No one said it had to make sence. */ + if (IPS_IS_TROMBONE(ha) && pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) { + if (ips_usrcmd(ha, pt, scb)) + return IPS_SUCCESS; + else + return IPS_FAILURE; + } + pt->BasicStatus = 0x0B; + pt->ExtendedStatus = 0; + scb->scsi_cmd->result = DID_OK << 16; + /* IF it's OK to Use the "CD BOOT" Flash Buffer, then you can */ + /* avoid allocating a huge buffer per adapter ( which can fail ). */ + if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && + pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) { + pt->BasicStatus = 0; + return ips_flash_bios(ha, pt, scb); + } else if (pt->CoppCP.cmd.flashfw.packet_num == 0) { + if (ips_FlashData && !test_and_set_bit(0, &ips_FlashDataInUse)) { + ha->flash_data = ips_FlashData; + ha->flash_order = 7; + ha->flash_datasize = 0; + } else if (!ha->flash_data) { + datasize = pt->CoppCP.cmd.flashfw.total_packets * + pt->CoppCP.cmd.flashfw.count; + for (count = PAGE_SIZE, ha->flash_order = 0; + count < datasize; ha->flash_order++, count <<= 1) ; + ha->flash_data = + (char *) __get_free_pages(IPS_ATOMIC_GFP, + ha->flash_order); + ha->flash_datasize = 0; + } else + return IPS_FAILURE; + } else { + if (pt->CoppCP.cmd.flashfw.count + ha->flash_datasize > + (PAGE_SIZE << ha->flash_order)) { + ips_free_flash_copperhead(ha); + printk(KERN_WARNING "failed size sanity check\n"); + return IPS_FAILURE; + } + } + if (!ha->flash_data) + return IPS_FAILURE; + pt->BasicStatus = 0; + memcpy(&ha->flash_data[ha->flash_datasize], pt + 1, + pt->CoppCP.cmd.flashfw.count); + ha->flash_datasize += pt->CoppCP.cmd.flashfw.count; + if (pt->CoppCP.cmd.flashfw.packet_num == + pt->CoppCP.cmd.flashfw.total_packets - 1) { + if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE) + return ips_flash_bios(ha, pt, scb); + else if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) + return ips_flash_firmware(ha, pt, scb); + } + return IPS_SUCCESS_IMM; } /****************************************************************************/ @@ -2473,46 +1773,95 @@ ips_flash_copperhead(ips_ha_t *ha, ips_p /* flashes the bios of a copperhead adapter */ /****************************************************************************/ static int -ips_flash_bios(ips_ha_t * ha, ips_passthru_t *pt, ips_scb_t *scb){ +ips_flash_bios(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) +{ - if(pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && - pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_BIOS){ - if ((!ha->func.programbios) || (!ha->func.erasebios) || - (!ha->func.verifybios)) - goto error; - if((*ha->func.erasebios)(ha)){ - DEBUG_VAR(1, "(%s%d) flash bios failed - unable to erase flash", - ips_name, ha->host_num); - goto error; - }else if ((*ha->func.programbios)(ha, ha->flash_data + IPS_BIOS_HEADER, - ha->flash_datasize - IPS_BIOS_HEADER, 0 )) { - DEBUG_VAR(1, "(%s%d) flash bios failed - unable to flash", - ips_name, ha->host_num); - goto error; - }else if ((*ha->func.verifybios)(ha, ha->flash_data + IPS_BIOS_HEADER, - ha->flash_datasize - IPS_BIOS_HEADER, 0 )) { - DEBUG_VAR(1, "(%s%d) flash bios failed - unable to verify flash", - ips_name, ha->host_num); - goto error; - } - ips_free_flash_copperhead(ha); - return IPS_SUCCESS_IMM; - }else if(pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && - pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS){ - if(!ha->func.erasebios) - goto error; - if((*ha->func.erasebios)(ha)){ - DEBUG_VAR(1, "(%s%d) flash bios failed - unable to erase flash", - ips_name, ha->host_num); - goto error; - } - return IPS_SUCCESS_IMM; - } -error: - pt->BasicStatus = 0x0B; - pt->ExtendedStatus = 0x00; - ips_free_flash_copperhead(ha); - return IPS_FAILURE; + if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && + pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_BIOS) { + if ((!ha->func.programbios) || (!ha->func.erasebios) || + (!ha->func.verifybios)) goto error; + if ((*ha->func.erasebios) (ha)) { + DEBUG_VAR(1, + "(%s%d) flash bios failed - unable to erase flash", + ips_name, ha->host_num); + goto error; + } else + if ((*ha->func.programbios) + (ha, ha->flash_data + IPS_BIOS_HEADER, + ha->flash_datasize - IPS_BIOS_HEADER, 0)) { + DEBUG_VAR(1, + "(%s%d) flash bios failed - unable to flash", + ips_name, ha->host_num); + goto error; + } else + if ((*ha->func.verifybios) + (ha, ha->flash_data + IPS_BIOS_HEADER, + ha->flash_datasize - IPS_BIOS_HEADER, 0)) { + DEBUG_VAR(1, + "(%s%d) flash bios failed - unable to verify flash", + ips_name, ha->host_num); + goto error; + } + ips_free_flash_copperhead(ha); + return IPS_SUCCESS_IMM; + } else if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && + pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) { + if (!ha->func.erasebios) + goto error; + if ((*ha->func.erasebios) (ha)) { + DEBUG_VAR(1, + "(%s%d) flash bios failed - unable to erase flash", + ips_name, ha->host_num); + goto error; + } + return IPS_SUCCESS_IMM; + } + error: + pt->BasicStatus = 0x0B; + pt->ExtendedStatus = 0x00; + ips_free_flash_copperhead(ha); + return IPS_FAILURE; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_fill_scb_sg_single */ +/* */ +/* Routine Description: */ +/* Fill in a single scb sg_list element from an address */ +/* return a -1 if a breakup occured */ +/****************************************************************************/ +static inline int +ips_fill_scb_sg_single(ips_ha_t * ha, dma_addr_t busaddr, + ips_scb_t * scb, int indx, unsigned int e_len) +{ + + int ret_val = 0; + + if ((scb->data_len + e_len) > ha->max_xfer) { + e_len = ha->max_xfer - scb->data_len; + scb->breakup = indx; + ++scb->sg_break; + ret_val = -1; + } else { + scb->breakup = 0; + scb->sg_break = 0; + } + if (IPS_USE_ENH_SGLIST(ha)) { + scb->sg_list.enh_list[indx].address_lo = + cpu_to_le32(pci_dma_lo32(busaddr)); + scb->sg_list.enh_list[indx].address_hi = + cpu_to_le32(pci_dma_hi32(busaddr)); + scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len); + } else { + scb->sg_list.std_list[indx].address = + cpu_to_le32(pci_dma_lo32(busaddr)); + scb->sg_list.std_list[indx].length = cpu_to_le32(e_len); + } + + ++scb->sg_len; + scb->data_len += e_len; + return ret_val; } /****************************************************************************/ @@ -2521,49 +1870,51 @@ error: /* flashes the firmware of a copperhead adapter */ /****************************************************************************/ static int -ips_flash_firmware(ips_ha_t * ha, ips_passthru_t *pt, ips_scb_t *scb){ - IPS_SG_LIST *sg_list; - uint32_t cmd_busaddr; - - if(pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE && - pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_FW ){ - memset(&pt->CoppCP.cmd, 0, sizeof(IPS_HOST_COMMAND)); - pt->CoppCP.cmd.flashfw.op_code = IPS_CMD_DOWNLOAD; - pt->CoppCP.cmd.flashfw.count = cpu_to_le32(ha->flash_datasize); - }else{ - pt->BasicStatus = 0x0B; - pt->ExtendedStatus = 0x00; - ips_free_flash_copperhead(ha); - return IPS_FAILURE; - } - /* Save the S/G list pointer so it doesn't get clobbered */ - sg_list = scb->sg_list; - cmd_busaddr = scb->scb_busaddr; - /* copy in the CP */ - memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof(IPS_IOCTL_CMD)); - /* FIX stuff that might be wrong */ - scb->sg_list = sg_list; - scb->scb_busaddr = cmd_busaddr; - scb->bus = scb->scsi_cmd->channel; - scb->target_id = scb->scsi_cmd->target; - scb->lun = scb->scsi_cmd->lun; - scb->sg_len = 0; - scb->data_len = 0; - scb->flags = 0; - scb->op_code = 0; - scb->callback = ipsintr_done; - scb->timeout = ips_cmd_timeout; - - scb->data_len = ha->flash_datasize; - scb->data_busaddr = pci_map_single(ha->pcidev, ha->flash_data, scb->data_len, - IPS_DMA_DIR(scb)); - scb->flags |= IPS_SCB_MAP_SINGLE; - scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.flashfw.buffer_addr = scb->data_busaddr; - if (pt->TimeOut) - scb->timeout = pt->TimeOut; - scb->scsi_cmd->result = DID_OK <<16; - return IPS_SUCCESS; +ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) +{ + IPS_SG_LIST sg_list; + uint32_t cmd_busaddr; + + if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE && + pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_FW) { + memset(&pt->CoppCP.cmd, 0, sizeof (IPS_HOST_COMMAND)); + pt->CoppCP.cmd.flashfw.op_code = IPS_CMD_DOWNLOAD; + pt->CoppCP.cmd.flashfw.count = cpu_to_le32(ha->flash_datasize); + } else { + pt->BasicStatus = 0x0B; + pt->ExtendedStatus = 0x00; + ips_free_flash_copperhead(ha); + return IPS_FAILURE; + } + /* Save the S/G list pointer so it doesn't get clobbered */ + sg_list.list = scb->sg_list.list; + cmd_busaddr = scb->scb_busaddr; + /* copy in the CP */ + memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD)); + /* FIX stuff that might be wrong */ + scb->sg_list.list = sg_list.list; + scb->scb_busaddr = cmd_busaddr; + scb->bus = scb->scsi_cmd->channel; + scb->target_id = scb->scsi_cmd->target; + scb->lun = scb->scsi_cmd->lun; + scb->sg_len = 0; + scb->data_len = 0; + scb->flags = 0; + scb->op_code = 0; + scb->callback = ipsintr_done; + scb->timeout = ips_cmd_timeout; + + scb->data_len = ha->flash_datasize; + scb->data_busaddr = + pci_map_single(ha->pcidev, ha->flash_data, scb->data_len, + IPS_DMA_DIR(scb)); + scb->flags |= IPS_SCB_MAP_SINGLE; + scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flashfw.buffer_addr = cpu_to_le32(scb->data_busaddr); + if (pt->TimeOut) + scb->timeout = pt->TimeOut; + scb->scsi_cmd->result = DID_OK << 16; + return IPS_SUCCESS; } /****************************************************************************/ @@ -2572,12 +1923,13 @@ ips_flash_firmware(ips_ha_t * ha, ips_pa /* release the memory resources used to hold the flash image */ /****************************************************************************/ static void -ips_free_flash_copperhead(ips_ha_t *ha){ - if(ha->flash_data == ips_FlashData) - test_and_clear_bit(0, &ips_FlashDataInUse); - else if(ha->flash_data) - free_pages((unsigned long)ha->flash_data, ha->flash_order); - ha->flash_data = NULL; +ips_free_flash_copperhead(ips_ha_t * ha) +{ + if (ha->flash_data == ips_FlashData) + test_and_clear_bit(0, &ips_FlashDataInUse); + else if (ha->flash_data) + free_pages((unsigned long) ha->flash_data, ha->flash_order); + ha->flash_data = NULL; } /****************************************************************************/ @@ -2590,93 +1942,87 @@ ips_free_flash_copperhead(ips_ha_t *ha){ /* */ /****************************************************************************/ static int -ips_usrcmd(ips_ha_t *ha, ips_passthru_t *pt, ips_scb_t *scb) { - IPS_SG_LIST *sg_list; - uint32_t cmd_busaddr; - - METHOD_TRACE("ips_usrcmd", 1); - - if ((!scb) || (!pt) || (!ha)) - return (0); - - /* Save the S/G list pointer so it doesn't get clobbered */ - sg_list = scb->sg_list; - cmd_busaddr = scb->scb_busaddr; - /* copy in the CP */ - memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof(IPS_IOCTL_CMD)); - memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof(IPS_DCDB_TABLE)); - - /* FIX stuff that might be wrong */ - scb->sg_list = sg_list; - scb->scb_busaddr = cmd_busaddr; - scb->bus = scb->scsi_cmd->channel; - scb->target_id = scb->scsi_cmd->target; - scb->lun = scb->scsi_cmd->lun; - scb->sg_len = 0; - scb->data_len = 0; - scb->flags = 0; - scb->op_code = 0; - scb->callback = ipsintr_done; - scb->timeout = ips_cmd_timeout; - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - - /* we don't support DCDB/READ/WRITE Scatter Gather */ - if ((scb->cmd.basic_io.op_code == IPS_CMD_READ_SG) || - (scb->cmd.basic_io.op_code == IPS_CMD_WRITE_SG) || - (scb->cmd.basic_io.op_code == IPS_CMD_DCDB_SG)) - return (0); - - if (pt->CmdBSize) { - if(!scb->scsi_cmd->use_sg){ - scb->data_len = pt->CmdBSize; - scb->data_busaddr = pci_map_single(ha->pcidev, - scb->scsi_cmd->request_buffer + - sizeof(ips_passthru_t), - pt->CmdBSize, - IPS_DMA_DIR(scb)); - scb->flags |= IPS_SCB_MAP_SINGLE; - } else { - scb->data_len = pt->CmdBSize; - scb->data_busaddr = pci_map_single(ha->pcidev, - ha->ioctl_data + - sizeof(ips_passthru_t), - pt->CmdBSize, - IPS_DMA_DIR(scb)); - scb->flags |= IPS_SCB_MAP_SINGLE; - } - } else { - scb->data_busaddr = 0L; - } - - if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) - scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr + - (unsigned long)&scb->dcdb - - (unsigned long)scb); - - if (pt->CmdBSize) { - if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) - scb->dcdb.buffer_pointer = cpu_to_le32(scb->data_busaddr); - else - scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->data_busaddr); - } - - /* set timeouts */ - if (pt->TimeOut) { - scb->timeout = pt->TimeOut; - - if (pt->TimeOut <= 10) - scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; - else if (pt->TimeOut <= 60) - scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; - else - scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; - } +ips_usrcmd(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) +{ + IPS_SG_LIST sg_list; + uint32_t cmd_busaddr; + + METHOD_TRACE("ips_usrcmd", 1); + + if ((!scb) || (!pt) || (!ha)) + return (0); + + /* Save the S/G list pointer so it doesn't get clobbered */ + sg_list.list = scb->sg_list.list; + cmd_busaddr = scb->scb_busaddr; + /* copy in the CP */ + memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD)); + memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof (IPS_DCDB_TABLE)); + + /* FIX stuff that might be wrong */ + scb->sg_list.list = sg_list.list; + scb->scb_busaddr = cmd_busaddr; + scb->bus = scb->scsi_cmd->channel; + scb->target_id = scb->scsi_cmd->target; + scb->lun = scb->scsi_cmd->lun; + scb->sg_len = 0; + scb->data_len = 0; + scb->flags = 0; + scb->op_code = 0; + scb->callback = ipsintr_done; + scb->timeout = ips_cmd_timeout; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + + /* we don't support DCDB/READ/WRITE Scatter Gather */ + if ((scb->cmd.basic_io.op_code == IPS_CMD_READ_SG) || + (scb->cmd.basic_io.op_code == IPS_CMD_WRITE_SG) || + (scb->cmd.basic_io.op_code == IPS_CMD_DCDB_SG)) + return (0); + + if (pt->CmdBSize) { + scb->data_len = pt->CmdBSize; + scb->data_busaddr = pci_map_single(ha->pcidev, + ha->ioctl_data + + sizeof (ips_passthru_t), + pt->CmdBSize, + IPS_DMA_DIR(scb)); + scb->flags |= IPS_SCB_MAP_SINGLE; + } else { + scb->data_busaddr = 0L; + } + + if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) + scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr + + (unsigned long) &scb-> + dcdb - + (unsigned long) scb); + + if (pt->CmdBSize) { + if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) + scb->dcdb.buffer_pointer = + cpu_to_le32(scb->data_busaddr); + else + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->data_busaddr); + } + + /* set timeouts */ + if (pt->TimeOut) { + scb->timeout = pt->TimeOut; + + if (pt->TimeOut <= 10) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; + else if (pt->TimeOut <= 60) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; + else + scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; + } - /* assume success */ - scb->scsi_cmd->result = DID_OK << 16; + /* assume success */ + scb->scsi_cmd->result = DID_OK << 16; - /* success */ - return (1); + /* success */ + return (1); } /****************************************************************************/ @@ -2689,43 +2035,34 @@ ips_usrcmd(ips_ha_t *ha, ips_passthru_t /* */ /****************************************************************************/ static void -ips_cleanup_passthru(ips_ha_t *ha, ips_scb_t *scb) { - ips_passthru_t *pt; +ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb) +{ + ips_passthru_t *pt; + + METHOD_TRACE("ips_cleanup_passthru", 1); + + if ((!scb) || (!scb->scsi_cmd) || (!scb->scsi_cmd->request_buffer)) { + DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru", + ips_name, ha->host_num); - METHOD_TRACE("ips_cleanup_passthru", 1); + return; + } + pt = (ips_passthru_t *) ha->ioctl_data; - if ((!scb) || (!scb->scsi_cmd) || (!scb->scsi_cmd->request_buffer)) { - DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru", - ips_name, ha->host_num); - - return ; - } - if(!scb->scsi_cmd->use_sg) - pt = (ips_passthru_t *) scb->scsi_cmd->request_buffer; - else - pt = (ips_passthru_t *) ha->ioctl_data; - - /* Copy data back to the user */ - if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) /* Copy DCDB Back to Caller's Area */ - memcpy(&pt->CoppCP.dcdb, &scb->dcdb, sizeof(IPS_DCDB_TABLE)); - - pt->BasicStatus = scb->basic_status; - pt->ExtendedStatus = scb->extended_status; - pt->AdapterType = ha->ad_type; - - if(ha->device_id == IPS_DEVICEID_COPPERHEAD && - (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD || - scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW)) - ips_free_flash_copperhead(ha); - - if(scb->scsi_cmd->use_sg){ - int i, length = 0; - struct scatterlist *sg = scb->scsi_cmd->request_buffer; - for(i = 0; i < scb->scsi_cmd->use_sg; i++){ - memcpy(IPS_SG_ADDRESS(&sg[i]), &ha->ioctl_data[length], sg[i].length); - length += sg[i].length; - } - } + /* Copy data back to the user */ + if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) /* Copy DCDB Back to Caller's Area */ + memcpy(&pt->CoppCP.dcdb, &scb->dcdb, sizeof (IPS_DCDB_TABLE)); + + pt->BasicStatus = scb->basic_status; + pt->ExtendedStatus = scb->extended_status; + pt->AdapterType = ha->ad_type; + + if (ha->device_id == IPS_DEVICEID_COPPERHEAD && + (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD || + scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW)) + ips_free_flash_copperhead(ha); + + ips_scmd_buf_write(scb->scsi_cmd, ha->ioctl_data, ha->ioctl_datasize); } /****************************************************************************/ @@ -2738,75 +2075,88 @@ ips_cleanup_passthru(ips_ha_t *ha, ips_s /* */ /****************************************************************************/ static int -ips_host_info(ips_ha_t *ha, char *ptr, off_t offset, int len) { - IPS_INFOSTR info; +ips_host_info(ips_ha_t * ha, char *ptr, off_t offset, int len) +{ + IPS_INFOSTR info; - METHOD_TRACE("ips_host_info", 1); + METHOD_TRACE("ips_host_info", 1); - info.buffer = ptr; - info.length = len; - info.offset = offset; - info.pos = 0; - info.localpos = 0; - - copy_info(&info, "\nIBM ServeRAID General Information:\n\n"); - - if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) && - (le16_to_cpu(ha->nvram->adapter_type) != 0)) - copy_info(&info, "\tController Type : %s\n", ips_adapter_name[ha->ad_type-1]); - else - copy_info(&info, "\tController Type : Unknown\n"); - - if (ha->io_addr) - copy_info(&info, "\tIO region : 0x%lx (%d bytes)\n", - ha->io_addr, ha->io_len); - - if (ha->mem_addr) { - copy_info(&info, "\tMemory region : 0x%lx (%d bytes)\n", - ha->mem_addr, ha->mem_len); - copy_info(&info, "\tShared memory address : 0x%lx\n", ha->mem_ptr); - } - - copy_info(&info, "\tIRQ number : %d\n", ha->irq); - - if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) - copy_info(&info, "\tBIOS Version : %c%c%c%c%c%c%c%c\n", - ha->nvram->bios_high[0], ha->nvram->bios_high[1], - ha->nvram->bios_high[2], ha->nvram->bios_high[3], - ha->nvram->bios_low[0], ha->nvram->bios_low[1], - ha->nvram->bios_low[2], ha->nvram->bios_low[3]); - - copy_info(&info, "\tFirmware Version : %c%c%c%c%c%c%c%c\n", - ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1], - ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3], - ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5], - ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]); - - copy_info(&info, "\tBoot Block Version : %c%c%c%c%c%c%c%c\n", - ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1], - ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3], - ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5], - ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]); - - copy_info(&info, "\tDriver Version : %s%s\n", - IPS_VERSION_HIGH, IPS_VERSION_LOW); - - copy_info(&info, "\tMax Physical Devices : %d\n", - ha->enq->ucMaxPhysicalDevices); - copy_info(&info, "\tMax Active Commands : %d\n", - ha->max_cmds); - copy_info(&info, "\tCurrent Queued Commands : %d\n", - ha->scb_waitlist.count); - copy_info(&info, "\tCurrent Active Commands : %d\n", - ha->scb_activelist.count - ha->num_ioctl); - copy_info(&info, "\tCurrent Queued PT Commands : %d\n", - ha->copp_waitlist.count); - copy_info(&info, "\tCurrent Active PT Commands : %d\n", - ha->num_ioctl); + info.buffer = ptr; + info.length = len; + info.offset = offset; + info.pos = 0; + info.localpos = 0; + + copy_info(&info, "\nIBM ServeRAID General Information:\n\n"); + + if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) && + (le16_to_cpu(ha->nvram->adapter_type) != 0)) + copy_info(&info, "\tController Type : %s\n", + ips_adapter_name[ha->ad_type - 1]); + else + copy_info(&info, + "\tController Type : Unknown\n"); + + if (ha->io_addr) + copy_info(&info, + "\tIO region : 0x%lx (%d bytes)\n", + ha->io_addr, ha->io_len); + + if (ha->mem_addr) { + copy_info(&info, + "\tMemory region : 0x%lx (%d bytes)\n", + ha->mem_addr, ha->mem_len); + copy_info(&info, + "\tShared memory address : 0x%lx\n", + ha->mem_ptr); + } + + copy_info(&info, "\tIRQ number : %d\n", ha->irq); + + if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) + copy_info(&info, + "\tBIOS Version : %c%c%c%c%c%c%c%c\n", + ha->nvram->bios_high[0], ha->nvram->bios_high[1], + ha->nvram->bios_high[2], ha->nvram->bios_high[3], + ha->nvram->bios_low[0], ha->nvram->bios_low[1], + ha->nvram->bios_low[2], ha->nvram->bios_low[3]); + + copy_info(&info, + "\tFirmware Version : %c%c%c%c%c%c%c%c\n", + ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1], + ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3], + ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5], + ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]); + + copy_info(&info, + "\tBoot Block Version : %c%c%c%c%c%c%c%c\n", + ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1], + ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3], + ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5], + ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]); + + copy_info(&info, "\tDriver Version : %s%s\n", + IPS_VERSION_HIGH, IPS_VERSION_LOW); + + copy_info(&info, "\tDriver Build : %d\n", + IPS_BUILD_IDENT); + + copy_info(&info, "\tMax Physical Devices : %d\n", + ha->enq->ucMaxPhysicalDevices); + copy_info(&info, "\tMax Active Commands : %d\n", + ha->max_cmds); + copy_info(&info, "\tCurrent Queued Commands : %d\n", + ha->scb_waitlist.count); + copy_info(&info, "\tCurrent Active Commands : %d\n", + ha->scb_activelist.count - ha->num_ioctl); + copy_info(&info, "\tCurrent Queued PT Commands : %d\n", + ha->copp_waitlist.count); + copy_info(&info, "\tCurrent Active PT Commands : %d\n", + ha->num_ioctl); - copy_info(&info, "\n"); + copy_info(&info, "\n"); - return (info.localpos); + return (info.localpos); } /****************************************************************************/ @@ -2819,28 +2169,29 @@ ips_host_info(ips_ha_t *ha, char *ptr, o /* */ /****************************************************************************/ static void -copy_mem_info(IPS_INFOSTR *info, char *data, int len) { - METHOD_TRACE("copy_mem_info", 1); +copy_mem_info(IPS_INFOSTR * info, char *data, int len) +{ + METHOD_TRACE("copy_mem_info", 1); - if (info->pos + len < info->offset) { - info->pos += len; - return; - } - - if (info->pos < info->offset) { - data += (info->offset - info->pos); - len -= (info->offset - info->pos); - info->pos += (info->offset - info->pos); - } - - if (info->localpos + len > info->length) - len = info->length - info->localpos; - - if (len > 0) { - memcpy(info->buffer + info->localpos, data, len); - info->pos += len; - info->localpos += len; - } + if (info->pos + len < info->offset) { + info->pos += len; + return; + } + + if (info->pos < info->offset) { + data += (info->offset - info->pos); + len -= (info->offset - info->pos); + info->pos += (info->offset - info->pos); + } + + if (info->localpos + len > info->length) + len = info->length - info->localpos; + + if (len > 0) { + memcpy(info->buffer + info->localpos, data, len); + info->pos += len; + info->localpos += len; + } } /****************************************************************************/ @@ -2853,20 +2204,21 @@ copy_mem_info(IPS_INFOSTR *info, char *d /* */ /****************************************************************************/ static int -copy_info(IPS_INFOSTR *info, char *fmt, ...) { - va_list args; - char buf[128]; - int len; +copy_info(IPS_INFOSTR * info, char *fmt, ...) +{ + va_list args; + char buf[128]; + int len; - METHOD_TRACE("copy_info", 1); + METHOD_TRACE("copy_info", 1); - va_start(args, fmt); - len = vsprintf(buf, fmt, args); - va_end(args); + va_start(args, fmt); + len = vsprintf(buf, fmt, args); + va_end(args); - copy_mem_info(info, buf, len); + copy_mem_info(info, buf, len); - return (len); + return (len); } /****************************************************************************/ @@ -2879,71 +2231,73 @@ copy_info(IPS_INFOSTR *info, char *fmt, /* */ /****************************************************************************/ static void -ips_identify_controller(ips_ha_t *ha) { - METHOD_TRACE("ips_identify_controller", 1); +ips_identify_controller(ips_ha_t * ha) +{ + METHOD_TRACE("ips_identify_controller", 1); - switch (ha->device_id) { - case IPS_DEVICEID_COPPERHEAD: - if (ha->revision_id <= IPS_REVID_SERVERAID) { - ha->ad_type = IPS_ADTYPE_SERVERAID; - } else if (ha->revision_id == IPS_REVID_SERVERAID2) { - ha->ad_type = IPS_ADTYPE_SERVERAID2; - } else if (ha->revision_id == IPS_REVID_NAVAJO) { - ha->ad_type = IPS_ADTYPE_NAVAJO; - } else if ((ha->revision_id == IPS_REVID_SERVERAID2) && (ha->slot_num == 0)) { - ha->ad_type = IPS_ADTYPE_KIOWA; - } else if ((ha->revision_id >= IPS_REVID_CLARINETP1) && - (ha->revision_id <= IPS_REVID_CLARINETP3)) { - if (ha->enq->ucMaxPhysicalDevices == 15) - ha->ad_type = IPS_ADTYPE_SERVERAID3L; - else - ha->ad_type = IPS_ADTYPE_SERVERAID3; - } else if ((ha->revision_id >= IPS_REVID_TROMBONE32) && - (ha->revision_id <= IPS_REVID_TROMBONE64)) { - ha->ad_type = IPS_ADTYPE_SERVERAID4H; - } - break; - - case IPS_DEVICEID_MORPHEUS: - switch (ha->subdevice_id) { - case IPS_SUBDEVICEID_4L: - ha->ad_type = IPS_ADTYPE_SERVERAID4L; - break; - - case IPS_SUBDEVICEID_4M: - ha->ad_type = IPS_ADTYPE_SERVERAID4M; - break; - - case IPS_SUBDEVICEID_4MX: - ha->ad_type = IPS_ADTYPE_SERVERAID4MX; - break; - - case IPS_SUBDEVICEID_4LX: - ha->ad_type = IPS_ADTYPE_SERVERAID4LX; - break; - - case IPS_SUBDEVICEID_5I2: - ha->ad_type = IPS_ADTYPE_SERVERAID5I2; - break; - - case IPS_SUBDEVICEID_5I1: - ha->ad_type = IPS_ADTYPE_SERVERAID5I1; - break; - } - - break; - - case IPS_DEVICEID_MARCO: - switch (ha->subdevice_id) { - case IPS_SUBDEVICEID_6M: - ha->ad_type = IPS_ADTYPE_SERVERAID6M; - break; - case IPS_SUBDEVICEID_6I: - ha->ad_type = IPS_ADTYPE_SERVERAID6I; - break; - } - break; - } + switch (ha->device_id) { + case IPS_DEVICEID_COPPERHEAD: + if (ha->revision_id <= IPS_REVID_SERVERAID) { + ha->ad_type = IPS_ADTYPE_SERVERAID; + } else if (ha->revision_id == IPS_REVID_SERVERAID2) { + ha->ad_type = IPS_ADTYPE_SERVERAID2; + } else if (ha->revision_id == IPS_REVID_NAVAJO) { + ha->ad_type = IPS_ADTYPE_NAVAJO; + } else if ((ha->revision_id == IPS_REVID_SERVERAID2) + && (ha->slot_num == 0)) { + ha->ad_type = IPS_ADTYPE_KIOWA; + } else if ((ha->revision_id >= IPS_REVID_CLARINETP1) && + (ha->revision_id <= IPS_REVID_CLARINETP3)) { + if (ha->enq->ucMaxPhysicalDevices == 15) + ha->ad_type = IPS_ADTYPE_SERVERAID3L; + else + ha->ad_type = IPS_ADTYPE_SERVERAID3; + } else if ((ha->revision_id >= IPS_REVID_TROMBONE32) && + (ha->revision_id <= IPS_REVID_TROMBONE64)) { + ha->ad_type = IPS_ADTYPE_SERVERAID4H; + } + break; + + case IPS_DEVICEID_MORPHEUS: + switch (ha->subdevice_id) { + case IPS_SUBDEVICEID_4L: + ha->ad_type = IPS_ADTYPE_SERVERAID4L; + break; + + case IPS_SUBDEVICEID_4M: + ha->ad_type = IPS_ADTYPE_SERVERAID4M; + break; + + case IPS_SUBDEVICEID_4MX: + ha->ad_type = IPS_ADTYPE_SERVERAID4MX; + break; + + case IPS_SUBDEVICEID_4LX: + ha->ad_type = IPS_ADTYPE_SERVERAID4LX; + break; + + case IPS_SUBDEVICEID_5I2: + ha->ad_type = IPS_ADTYPE_SERVERAID5I2; + break; + + case IPS_SUBDEVICEID_5I1: + ha->ad_type = IPS_ADTYPE_SERVERAID5I1; + break; + } + + break; + + case IPS_DEVICEID_MARCO: + switch (ha->subdevice_id) { + case IPS_SUBDEVICEID_6M: + ha->ad_type = IPS_ADTYPE_SERVERAID6M; + break; + case IPS_SUBDEVICEID_6I: + ha->ad_type = IPS_ADTYPE_SERVERAID6I; + break; + } + break; + } } /****************************************************************************/ @@ -2956,158 +2310,164 @@ ips_identify_controller(ips_ha_t *ha) { /* */ /****************************************************************************/ static void -ips_get_bios_version(ips_ha_t *ha, int intr) { - ips_scb_t *scb; - int ret; - uint8_t major; - uint8_t minor; - uint8_t subminor; - uint8_t *buffer; - char hexDigits[] = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'}; - - METHOD_TRACE("ips_get_bios_version", 1); - - major = 0; - minor = 0; - - strncpy(ha->bios_version, " ?", 8); - - if (ha->device_id == IPS_DEVICEID_COPPERHEAD) { - if (IPS_USE_MEMIO(ha)) { - /* Memory Mapped I/O */ - - /* test 1st byte */ - writel(0, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) - return; - - writel(1, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) - return; - - /* Get Major version */ - writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - major = readb(ha->mem_ptr + IPS_REG_FLDP); - - /* Get Minor version */ - writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - minor = readb(ha->mem_ptr + IPS_REG_FLDP); - - /* Get SubMinor version */ - writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - subminor = readb(ha->mem_ptr + IPS_REG_FLDP); - - } else { - /* Programmed I/O */ - - /* test 1st byte */ - outl(0, ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) - return ; - - outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) - return ; - - /* Get Major version */ - outl(cpu_to_le32(0x1FF), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - major = inb(ha->io_addr + IPS_REG_FLDP); - - /* Get Minor version */ - outl(cpu_to_le32(0x1FE), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - minor = inb(ha->io_addr + IPS_REG_FLDP); - - /* Get SubMinor version */ - outl(cpu_to_le32(0x1FD), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - subminor = inb(ha->io_addr + IPS_REG_FLDP); - - } - } else { - /* Morpheus Family - Send Command to the card */ - - buffer = kmalloc(0x1000, GFP_ATOMIC); - if (!buffer) - return; - - memset(buffer, 0, 0x1000); - - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_RW_BIOSFW; - - scb->cmd.flashfw.op_code = IPS_CMD_RW_BIOSFW; - scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.flashfw.type = 1; - scb->cmd.flashfw.direction = 0; - scb->cmd.flashfw.count = cpu_to_le32(0x800); - scb->cmd.flashfw.total_packets = 1; - scb->cmd.flashfw.packet_num = 0; - scb->data_len = 0x1000; - scb->data_busaddr = pci_map_single(ha->pcidev, buffer, scb->data_len, - IPS_DMA_DIR(scb)); - scb->cmd.flashfw.buffer_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - - /* issue the command */ - if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { - /* Error occurred */ - kfree(buffer); - - return; - } - - if ((buffer[0xC0] == 0x55) && (buffer[0xC1] == 0xAA)) { - major = buffer[0x1ff + 0xC0]; /* Offset 0x1ff after the header (0xc0) */ - minor = buffer[0x1fe + 0xC0]; /* Offset 0x1fe after the header (0xc0) */ - subminor = buffer[0x1fd + 0xC0]; /* Offset 0x1fd after the header (0xc0) */ - } else { - return; - } - - kfree(buffer); - } - - ha->bios_version[0] = hexDigits[(major & 0xF0) >> 4]; - ha->bios_version[1] = '.'; - ha->bios_version[2] = hexDigits[major & 0x0F]; - ha->bios_version[3] = hexDigits[subminor]; - ha->bios_version[4] = '.'; - ha->bios_version[5] = hexDigits[(minor & 0xF0) >> 4]; - ha->bios_version[6] = hexDigits[minor & 0x0F]; - ha->bios_version[7] = 0; +ips_get_bios_version(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int ret; + uint8_t major; + uint8_t minor; + uint8_t subminor; + uint8_t *buffer; + char hexDigits[] = + { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', + 'D', 'E', 'F' }; + + METHOD_TRACE("ips_get_bios_version", 1); + + major = 0; + minor = 0; + + strncpy(ha->bios_version, " ?", 8); + + if (ha->device_id == IPS_DEVICEID_COPPERHEAD) { + if (IPS_USE_MEMIO(ha)) { + /* Memory Mapped I/O */ + + /* test 1st byte */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) + return; + + writel(1, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) + return; + + /* Get Major version */ + writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + major = readb(ha->mem_ptr + IPS_REG_FLDP); + + /* Get Minor version */ + writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + minor = readb(ha->mem_ptr + IPS_REG_FLDP); + + /* Get SubMinor version */ + writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + subminor = readb(ha->mem_ptr + IPS_REG_FLDP); + + } else { + /* Programmed I/O */ + + /* test 1st byte */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) + return; + + outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) + return; + + /* Get Major version */ + outl(cpu_to_le32(0x1FF), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + major = inb(ha->io_addr + IPS_REG_FLDP); + + /* Get Minor version */ + outl(cpu_to_le32(0x1FE), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + minor = inb(ha->io_addr + IPS_REG_FLDP); + + /* Get SubMinor version */ + outl(cpu_to_le32(0x1FD), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + subminor = inb(ha->io_addr + IPS_REG_FLDP); + + } + } else { + /* Morpheus Family - Send Command to the card */ + + buffer = kmalloc(0x1000, IPS_ATOMIC_GFP); + if (!buffer) + return; + + memset(buffer, 0, 0x1000); + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_RW_BIOSFW; + + scb->cmd.flashfw.op_code = IPS_CMD_RW_BIOSFW; + scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flashfw.type = 1; + scb->cmd.flashfw.direction = 0; + scb->cmd.flashfw.count = cpu_to_le32(0x800); + scb->cmd.flashfw.total_packets = 1; + scb->cmd.flashfw.packet_num = 0; + scb->data_len = 0x1000; + scb->data_busaddr = + pci_map_single(ha->pcidev, buffer, scb->data_len, + IPS_DMA_DIR(scb)); + scb->cmd.flashfw.buffer_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + + /* issue the command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { + /* Error occurred */ + kfree(buffer); + + return; + } + + if ((buffer[0xC0] == 0x55) && (buffer[0xC1] == 0xAA)) { + major = buffer[0x1ff + 0xC0]; /* Offset 0x1ff after the header (0xc0) */ + minor = buffer[0x1fe + 0xC0]; /* Offset 0x1fe after the header (0xc0) */ + subminor = buffer[0x1fd + 0xC0]; /* Offset 0x1fd after the header (0xc0) */ + } else { + kfree(buffer); + return; + } + + kfree(buffer); + } + + ha->bios_version[0] = hexDigits[(major & 0xF0) >> 4]; + ha->bios_version[1] = '.'; + ha->bios_version[2] = hexDigits[major & 0x0F]; + ha->bios_version[3] = hexDigits[subminor]; + ha->bios_version[4] = '.'; + ha->bios_version[5] = hexDigits[(minor & 0xF0) >> 4]; + ha->bios_version[6] = hexDigits[minor & 0x0F]; + ha->bios_version[7] = 0; } /****************************************************************************/ @@ -3122,129 +2482,134 @@ ips_get_bios_version(ips_ha_t *ha, int i /* */ /****************************************************************************/ static int -ips_hainit(ips_ha_t *ha) { - int i; - struct timeval tv; - - METHOD_TRACE("ips_hainit", 1); - - if (!ha) - return (0); - - if (ha->func.statinit) - (*ha->func.statinit)(ha); - - if (ha->func.enableint) - (*ha->func.enableint)(ha); - - /* Send FFDC */ - ha->reset_count = 1; - do_gettimeofday(&tv); - ha->last_ffdc = tv.tv_sec; - ips_ffdc_reset(ha, IPS_INTR_IORL); - - if (!ips_read_config(ha, IPS_INTR_IORL)) { - printk(KERN_WARNING "(%s%d) unable to read config from controller.\n", - ips_name, ha->host_num); - - return (0); - } /* end if */ - - if (!ips_read_adapter_status(ha, IPS_INTR_IORL)) { - printk(KERN_WARNING "(%s%d) unable to read controller status.\n", - ips_name, ha->host_num); - - return (0); - } - - /* Identify this controller */ - ips_identify_controller(ha); - - if (!ips_read_subsystem_parameters(ha, IPS_INTR_IORL)) { - printk(KERN_WARNING "(%s%d) unable to read subsystem parameters.\n", - ips_name, ha->host_num); - - return (0); - } - - /* write nvram user page 5 */ - if (!ips_write_driver_status(ha, IPS_INTR_IORL)) { - printk(KERN_WARNING "(%s%d) unable to write driver info to controller.\n", - ips_name, ha->host_num); - - return (0); - } - - /* If there are Logical Drives and a Reset Occurred, then an EraseStripeLock is Needed */ - if ( (ha->conf->ucLogDriveCount > 0) && (ha->requires_esl == 1) ) - ips_clear_adapter(ha, IPS_INTR_IORL); - - /* set limits on SID, LUN, BUS */ - ha->ntargets = IPS_MAX_TARGETS + 1; - ha->nlun = 1; - ha->nbus = (ha->enq->ucMaxPhysicalDevices / IPS_MAX_TARGETS) + 1; - - switch (ha->conf->logical_drive[0].ucStripeSize) { - case 4: - ha->max_xfer = 0x10000; - break; - - case 5: - ha->max_xfer = 0x20000; - break; - - case 6: - ha->max_xfer = 0x40000; - break; - - case 7: - default: - ha->max_xfer = 0x80000; - break; - } - - /* setup max concurrent commands */ - if (le32_to_cpu(ha->subsys->param[4]) & 0x1) { - /* Use the new method */ - ha->max_cmds = ha->enq->ucConcurrentCmdCount; - } else { - /* use the old method */ - switch (ha->conf->logical_drive[0].ucStripeSize) { - case 4: - ha->max_cmds = 32; - break; - - case 5: - ha->max_cmds = 16; - break; - - case 6: - ha->max_cmds = 8; - break; - - case 7: - default: - ha->max_cmds = 4; - break; - } - } - - /* Limit the Active Commands on a Lite Adapter */ - if ((ha->ad_type == IPS_ADTYPE_SERVERAID3L) || - (ha->ad_type == IPS_ADTYPE_SERVERAID4L) || - (ha->ad_type == IPS_ADTYPE_SERVERAID4LX)) { - if ((ha->max_cmds > MaxLiteCmds) && (MaxLiteCmds)) - ha->max_cmds = MaxLiteCmds; - } - - /* set controller IDs */ - ha->ha_id[0] = IPS_ADAPTER_ID; - for (i = 1; i < ha->nbus; i++) { - ha->ha_id[i] = ha->conf->init_id[i-1] & 0x1f; - ha->dcdb_active[i-1] = 0; - } +ips_hainit(ips_ha_t * ha) +{ + int i; + struct timeval tv; + + METHOD_TRACE("ips_hainit", 1); - return (1); + if (!ha) + return (0); + + if (ha->func.statinit) + (*ha->func.statinit) (ha); + + if (ha->func.enableint) + (*ha->func.enableint) (ha); + + /* Send FFDC */ + ha->reset_count = 1; + do_gettimeofday(&tv); + ha->last_ffdc = tv.tv_sec; + ips_ffdc_reset(ha, IPS_INTR_IORL); + + if (!ips_read_config(ha, IPS_INTR_IORL)) { + printk(KERN_WARNING + "(%s%d) unable to read config from controller.\n", + ips_name, ha->host_num); + + return (0); + } + /* end if */ + if (!ips_read_adapter_status(ha, IPS_INTR_IORL)) { + printk(KERN_WARNING + "(%s%d) unable to read controller status.\n", ips_name, + ha->host_num); + + return (0); + } + + /* Identify this controller */ + ips_identify_controller(ha); + + if (!ips_read_subsystem_parameters(ha, IPS_INTR_IORL)) { + printk(KERN_WARNING + "(%s%d) unable to read subsystem parameters.\n", + ips_name, ha->host_num); + + return (0); + } + + /* write nvram user page 5 */ + if (!ips_write_driver_status(ha, IPS_INTR_IORL)) { + printk(KERN_WARNING + "(%s%d) unable to write driver info to controller.\n", + ips_name, ha->host_num); + + return (0); + } + + /* If there are Logical Drives and a Reset Occurred, then an EraseStripeLock is Needed */ + if ((ha->conf->ucLogDriveCount > 0) && (ha->requires_esl == 1)) + ips_clear_adapter(ha, IPS_INTR_IORL); + + /* set limits on SID, LUN, BUS */ + ha->ntargets = IPS_MAX_TARGETS + 1; + ha->nlun = 1; + ha->nbus = (ha->enq->ucMaxPhysicalDevices / IPS_MAX_TARGETS) + 1; + + switch (ha->conf->logical_drive[0].ucStripeSize) { + case 4: + ha->max_xfer = 0x10000; + break; + + case 5: + ha->max_xfer = 0x20000; + break; + + case 6: + ha->max_xfer = 0x40000; + break; + + case 7: + default: + ha->max_xfer = 0x80000; + break; + } + + /* setup max concurrent commands */ + if (le32_to_cpu(ha->subsys->param[4]) & 0x1) { + /* Use the new method */ + ha->max_cmds = ha->enq->ucConcurrentCmdCount; + } else { + /* use the old method */ + switch (ha->conf->logical_drive[0].ucStripeSize) { + case 4: + ha->max_cmds = 32; + break; + + case 5: + ha->max_cmds = 16; + break; + + case 6: + ha->max_cmds = 8; + break; + + case 7: + default: + ha->max_cmds = 4; + break; + } + } + + /* Limit the Active Commands on a Lite Adapter */ + if ((ha->ad_type == IPS_ADTYPE_SERVERAID3L) || + (ha->ad_type == IPS_ADTYPE_SERVERAID4L) || + (ha->ad_type == IPS_ADTYPE_SERVERAID4LX)) { + if ((ha->max_cmds > MaxLiteCmds) && (MaxLiteCmds)) + ha->max_cmds = MaxLiteCmds; + } + + /* set controller IDs */ + ha->ha_id[0] = IPS_ADAPTER_ID; + for (i = 1; i < ha->nbus; i++) { + ha->ha_id[i] = ha->conf->init_id[i - 1] & 0x1f; + ha->dcdb_active[i - 1] = 0; + } + + return (1); } /****************************************************************************/ @@ -3257,275 +2622,238 @@ ips_hainit(ips_ha_t *ha) { /* */ /****************************************************************************/ static void -ips_next(ips_ha_t *ha, int intr) { - ips_scb_t *scb; - Scsi_Cmnd *SC; - Scsi_Cmnd *p; - Scsi_Cmnd *q; - ips_copp_wait_item_t *item; - int ret; - unsigned long cpu_flags = 0; - struct Scsi_Host *host; - METHOD_TRACE("ips_next", 1); - - if (!ha) - return ; - host = ips_sh[ha->host_num]; - /* - * Block access to the queue function so - * this command won't time out - */ - if(intr == IPS_INTR_ON) - IPS_LOCK_SAVE(host->host_lock, cpu_flags); - - if ((ha->subsys->param[3] & 0x300000) && ( ha->scb_activelist.count == 0 )) { - struct timeval tv; - - do_gettimeofday(&tv); - - if (tv.tv_sec - ha->last_ffdc > IPS_SECS_8HOURS) { - ha->last_ffdc = tv.tv_sec; - ips_ffdc_time(ha); - } - } - - /* - * Send passthru commands - * These have priority over normal I/O - * but shouldn't affect performance too much - * since we limit the number that can be active - * on the card at any one time - */ - while ((ha->num_ioctl < IPS_MAX_IOCTL) && - (ha->copp_waitlist.head) && - (scb = ips_getscb(ha))) { - - item = ips_removeq_copp_head(&ha->copp_waitlist); - ha->num_ioctl++; - if(intr == IPS_INTR_ON) - IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); - scb->scsi_cmd = item->scsi_cmd; - kfree(item); - - ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr); - - if(intr == IPS_INTR_ON) - IPS_LOCK_SAVE(host->host_lock, cpu_flags); - switch (ret) { - case IPS_FAILURE: - if (scb->scsi_cmd) { - scb->scsi_cmd->result = DID_ERROR << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - } - - ips_freescb(ha, scb); - break; - case IPS_SUCCESS_IMM: - if (scb->scsi_cmd) { - scb->scsi_cmd->result = DID_OK << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - } - - ips_freescb(ha, scb); - break; - default: - break; - } /* end case */ - - if (ret != IPS_SUCCESS) { - ha->num_ioctl--; - continue; - } - - ret = ips_send_cmd(ha, scb); - - if (ret == IPS_SUCCESS) - ips_putq_scb_head(&ha->scb_activelist, scb); - else - ha->num_ioctl--; - - switch(ret) { - case IPS_FAILURE: - if (scb->scsi_cmd) { - scb->scsi_cmd->result = DID_ERROR << 16; - } - - ips_freescb(ha, scb); - break; - case IPS_SUCCESS_IMM: - ips_freescb(ha, scb); - break; - default: - break; - } /* end case */ - - } - - - /* - * Send "Normal" I/O commands - */ - - p = ha->scb_waitlist.head; - while ((p) && (scb = ips_getscb(ha))) { - if ((p->channel > 0) && (ha->dcdb_active[p->channel-1] & (1 << p->target))) { - ips_freescb(ha, scb); - p = (Scsi_Cmnd *) p->host_scribble; - continue; - } - - q = p; - SC = ips_removeq_wait(&ha->scb_waitlist, q); - - if(intr == IPS_INTR_ON) - IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); /* Unlock HA after command is taken off queue */ - - SC->result = DID_OK; - SC->host_scribble = NULL; - - memset(SC->sense_buffer, 0, sizeof(SC->sense_buffer)); - - scb->target_id = SC->target; - scb->lun = SC->lun; - scb->bus = SC->channel; - scb->scsi_cmd = SC; - scb->breakup = 0; - scb->data_len = 0; - scb->callback = ipsintr_done; - scb->timeout = ips_cmd_timeout; - memset(&scb->cmd, 0, 16); - - /* copy in the CDB */ - memcpy(scb->cdb, SC->cmnd, SC->cmd_len); - - /* Now handle the data buffer */ - if (SC->use_sg) { - struct scatterlist *sg; - int i; - - sg = SC->request_buffer; - scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg, - scsi_to_pci_dma_dir(SC->sc_data_direction)); - scb->flags |= IPS_SCB_MAP_SG; - if (scb->sg_count == 1) { - if (sg_dma_len(sg) > ha->max_xfer) { - scb->breakup = 1; - scb->data_len = ha->max_xfer; - } else - scb->data_len = sg_dma_len(sg); - - scb->dcdb.transfer_length = scb->data_len; - scb->data_busaddr = sg_dma_address(sg); - scb->sg_len = 0; - } else { - /* Check for the first Element being bigger than MAX_XFER */ - if (sg_dma_len(&sg[0]) > ha->max_xfer) { - scb->sg_list[0].address = cpu_to_le32(sg_dma_address(&sg[0])); - scb->sg_list[0].length = ha->max_xfer; - scb->data_len = ha->max_xfer; - scb->breakup = 0; - scb->sg_break=1; - scb->sg_len = 1; - } else { - for (i = 0; i < scb->sg_count; i++) { - scb->sg_list[i].address = cpu_to_le32(sg_dma_address(&sg[i])); - scb->sg_list[i].length = cpu_to_le32(sg_dma_len(&sg[i])); - - if (scb->data_len + sg_dma_len(&sg[i]) > ha->max_xfer) { - /* - * Data Breakup required - */ - scb->breakup = i; - break; - } - - scb->data_len += sg_dma_len(&sg[i]); - } - - if (!scb->breakup) - scb->sg_len = scb->sg_count; - else - scb->sg_len = scb->breakup; - } - - scb->dcdb.transfer_length = scb->data_len; - scb->data_busaddr = scb->sg_busaddr; - } - } else { - if (SC->request_bufflen) { - if (SC->request_bufflen > ha->max_xfer) { - /* - * Data breakup required - */ - scb->breakup = 1; - scb->data_len = ha->max_xfer; - } else { - scb->data_len = SC->request_bufflen; - } - - scb->dcdb.transfer_length = scb->data_len; - scb->data_busaddr = pci_map_single(ha->pcidev, SC->request_buffer, - scb->data_len, - scsi_to_pci_dma_dir(SC->sc_data_direction)); - scb->flags |= IPS_SCB_MAP_SINGLE; - scb->sg_len = 0; - } else { - scb->data_busaddr = 0L; - scb->sg_len = 0; - scb->data_len = 0; - scb->dcdb.transfer_length = 0; - } - - } - - scb->dcdb.cmd_attribute = ips_command_direction[scb->scsi_cmd->cmnd[0]]; - - if (!scb->dcdb.cmd_attribute & 0x3) - scb->dcdb.transfer_length = 0; - - if (scb->data_len >= IPS_MAX_XFER) { - scb->dcdb.cmd_attribute |= IPS_TRANSFER64K; - scb->dcdb.transfer_length = 0; - } - if(intr == IPS_INTR_ON) - IPS_LOCK_SAVE(host->host_lock, cpu_flags); - - ret = ips_send_cmd(ha, scb); - - switch(ret) { - case IPS_SUCCESS: - ips_putq_scb_head(&ha->scb_activelist, scb); - break; - case IPS_FAILURE: - if (scb->scsi_cmd) { - scb->scsi_cmd->result = DID_ERROR << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - } - - if (scb->bus) - ha->dcdb_active[scb->bus-1] &= ~(1 << scb->target_id); - - ips_freescb(ha, scb); - break; - case IPS_SUCCESS_IMM: - if (scb->scsi_cmd) - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - - if (scb->bus) - ha->dcdb_active[scb->bus-1] &= ~(1 << scb->target_id); - - ips_freescb(ha, scb); - break; - default: - break; - } /* end case */ +ips_next(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + Scsi_Cmnd *SC; + Scsi_Cmnd *p; + Scsi_Cmnd *q; + ips_copp_wait_item_t *item; + int ret; + unsigned long cpu_flags = 0; + struct Scsi_Host *host; + METHOD_TRACE("ips_next", 1); + + if (!ha) + return; + host = ips_sh[ha->host_num]; + /* + * Block access to the queue function so + * this command won't time out + */ + if (intr == IPS_INTR_ON) + IPS_LOCK_SAVE(host->host_lock, cpu_flags); + + if ((ha->subsys->param[3] & 0x300000) + && (ha->scb_activelist.count == 0)) { + struct timeval tv; + + do_gettimeofday(&tv); + + if (tv.tv_sec - ha->last_ffdc > IPS_SECS_8HOURS) { + ha->last_ffdc = tv.tv_sec; + ips_ffdc_time(ha); + } + } + + /* + * Send passthru commands + * These have priority over normal I/O + * but shouldn't affect performance too much + * since we limit the number that can be active + * on the card at any one time + */ + while ((ha->num_ioctl < IPS_MAX_IOCTL) && + (ha->copp_waitlist.head) && (scb = ips_getscb(ha))) { + + item = ips_removeq_copp_head(&ha->copp_waitlist); + ha->num_ioctl++; + if (intr == IPS_INTR_ON) + IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); + scb->scsi_cmd = item->scsi_cmd; + kfree(item); + + ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr); + + if (intr == IPS_INTR_ON) + IPS_LOCK_SAVE(host->host_lock, cpu_flags); + switch (ret) { + case IPS_FAILURE: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + } + + ips_freescb(ha, scb); + break; + case IPS_SUCCESS_IMM: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_OK << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + } + + ips_freescb(ha, scb); + break; + default: + break; + } /* end case */ + + if (ret != IPS_SUCCESS) { + ha->num_ioctl--; + continue; + } + + ret = ips_send_cmd(ha, scb); + + if (ret == IPS_SUCCESS) + ips_putq_scb_head(&ha->scb_activelist, scb); + else + ha->num_ioctl--; + + switch (ret) { + case IPS_FAILURE: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + } + + ips_freescb(ha, scb); + break; + case IPS_SUCCESS_IMM: + ips_freescb(ha, scb); + break; + default: + break; + } /* end case */ + + } + + /* + * Send "Normal" I/O commands + */ + + p = ha->scb_waitlist.head; + while ((p) && (scb = ips_getscb(ha))) { + if ((p->channel > 0) + && (ha->dcdb_active[p->channel - 1] & (1 << p->target))) { + ips_freescb(ha, scb); + p = (Scsi_Cmnd *) p->host_scribble; + continue; + } + + q = p; + SC = ips_removeq_wait(&ha->scb_waitlist, q); + + if (intr == IPS_INTR_ON) + IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); /* Unlock HA after command is taken off queue */ + + SC->result = DID_OK; + SC->host_scribble = NULL; + + memset(SC->sense_buffer, 0, sizeof (SC->sense_buffer)); + + scb->target_id = SC->target; + scb->lun = SC->lun; + scb->bus = SC->channel; + scb->scsi_cmd = SC; + scb->breakup = 0; + scb->data_len = 0; + scb->callback = ipsintr_done; + scb->timeout = ips_cmd_timeout; + memset(&scb->cmd, 0, 16); + + /* copy in the CDB */ + memcpy(scb->cdb, SC->cmnd, SC->cmd_len); + + /* Now handle the data buffer */ + if (SC->use_sg) { + struct scatterlist *sg; + int i; + + sg = SC->request_buffer; + scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg, + scsi_to_pci_dma_dir(SC-> + sc_data_direction)); + scb->flags |= IPS_SCB_MAP_SG; + for (i = 0; i < scb->sg_count; i++) { + if (ips_fill_scb_sg_single + (ha, sg_dma_address(&sg[i]), scb, i, + sg_dma_len(&sg[i])) < 0) + break; + } + scb->dcdb.transfer_length = scb->data_len; + } else { + if (SC->request_bufflen) { + scb->data_busaddr = + pci_map_single(ha->pcidev, + SC->request_buffer, + SC->request_bufflen, + scsi_to_pci_dma_dir(SC-> + sc_data_direction)); + scb->flags |= IPS_SCB_MAP_SINGLE; + ips_fill_scb_sg_single(ha, scb->data_busaddr, + scb, 0, + SC->request_bufflen); + scb->dcdb.transfer_length = scb->data_len; + } else { + scb->data_busaddr = 0L; + scb->sg_len = 0; + scb->data_len = 0; + scb->dcdb.transfer_length = 0; + } + + } + + scb->dcdb.cmd_attribute = + ips_command_direction[scb->scsi_cmd->cmnd[0]]; + + if (!(scb->dcdb.cmd_attribute & 0x3)) + scb->dcdb.transfer_length = 0; + + if (scb->data_len >= IPS_MAX_XFER) { + scb->dcdb.cmd_attribute |= IPS_TRANSFER64K; + scb->dcdb.transfer_length = 0; + } + if (intr == IPS_INTR_ON) + IPS_LOCK_SAVE(host->host_lock, cpu_flags); + + ret = ips_send_cmd(ha, scb); + + switch (ret) { + case IPS_SUCCESS: + ips_putq_scb_head(&ha->scb_activelist, scb); + break; + case IPS_FAILURE: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + } + + if (scb->bus) + ha->dcdb_active[scb->bus - 1] &= + ~(1 << scb->target_id); + + ips_freescb(ha, scb); + break; + case IPS_SUCCESS_IMM: + if (scb->scsi_cmd) + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + + if (scb->bus) + ha->dcdb_active[scb->bus - 1] &= + ~(1 << scb->target_id); + + ips_freescb(ha, scb); + break; + default: + break; + } /* end case */ - p = (Scsi_Cmnd *) p->host_scribble; + p = (Scsi_Cmnd *) p->host_scribble; - } /* end while */ + } /* end while */ - if(intr == IPS_INTR_ON) - IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); + if (intr == IPS_INTR_ON) + IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); } /****************************************************************************/ @@ -3540,19 +2868,20 @@ ips_next(ips_ha_t *ha, int intr) { /* */ /****************************************************************************/ static inline void -ips_putq_scb_head(ips_scb_queue_t *queue, ips_scb_t *item) { - METHOD_TRACE("ips_putq_scb_head", 1); +ips_putq_scb_head(ips_scb_queue_t * queue, ips_scb_t * item) +{ + METHOD_TRACE("ips_putq_scb_head", 1); - if (!item) - return ; + if (!item) + return; - item->q_next = queue->head; - queue->head = item; + item->q_next = queue->head; + queue->head = item; - if (!queue->tail) - queue->tail = item; + if (!queue->tail) + queue->tail = item; - queue->count++; + queue->count++; } /****************************************************************************/ @@ -3567,23 +2896,24 @@ ips_putq_scb_head(ips_scb_queue_t *queue /* */ /****************************************************************************/ static inline void -ips_putq_scb_tail(ips_scb_queue_t *queue, ips_scb_t *item) { - METHOD_TRACE("ips_putq_scb_tail", 1); +ips_putq_scb_tail(ips_scb_queue_t * queue, ips_scb_t * item) +{ + METHOD_TRACE("ips_putq_scb_tail", 1); - if (!item) - return ; + if (!item) + return; - item->q_next = NULL; + item->q_next = NULL; - if (queue->tail) - queue->tail->q_next = item; + if (queue->tail) + queue->tail->q_next = item; - queue->tail = item; + queue->tail = item; - if (!queue->head) - queue->head = item; + if (!queue->head) + queue->head = item; - queue->count++; + queue->count++; } /****************************************************************************/ @@ -3598,26 +2928,27 @@ ips_putq_scb_tail(ips_scb_queue_t *queue /* */ /****************************************************************************/ static inline ips_scb_t * -ips_removeq_scb_head(ips_scb_queue_t *queue) { - ips_scb_t *item; +ips_removeq_scb_head(ips_scb_queue_t * queue) +{ + ips_scb_t *item; - METHOD_TRACE("ips_removeq_scb_head", 1); + METHOD_TRACE("ips_removeq_scb_head", 1); - item = queue->head; + item = queue->head; - if (!item) { - return (NULL); - } + if (!item) { + return (NULL); + } - queue->head = item->q_next; - item->q_next = NULL; + queue->head = item->q_next; + item->q_next = NULL; - if (queue->tail == item) - queue->tail = NULL; + if (queue->tail == item) + queue->tail = NULL; - queue->count--; + queue->count--; - return (item); + return (item); } /****************************************************************************/ @@ -3632,37 +2963,38 @@ ips_removeq_scb_head(ips_scb_queue_t *qu /* */ /****************************************************************************/ static inline ips_scb_t * -ips_removeq_scb(ips_scb_queue_t *queue, ips_scb_t *item) { - ips_scb_t *p; +ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item) +{ + ips_scb_t *p; - METHOD_TRACE("ips_removeq_scb", 1); + METHOD_TRACE("ips_removeq_scb", 1); - if (!item) - return (NULL); + if (!item) + return (NULL); - if (item == queue->head) { - return (ips_removeq_scb_head(queue)); - } + if (item == queue->head) { + return (ips_removeq_scb_head(queue)); + } - p = queue->head; + p = queue->head; - while ((p) && (item != p->q_next)) - p = p->q_next; + while ((p) && (item != p->q_next)) + p = p->q_next; - if (p) { - /* found a match */ - p->q_next = item->q_next; + if (p) { + /* found a match */ + p->q_next = item->q_next; - if (!item->q_next) - queue->tail = p; + if (!item->q_next) + queue->tail = p; - item->q_next = NULL; - queue->count--; + item->q_next = NULL; + queue->count--; - return (item); - } + return (item); + } - return (NULL); + return (NULL); } /****************************************************************************/ @@ -3677,19 +3009,20 @@ ips_removeq_scb(ips_scb_queue_t *queue, /* */ /****************************************************************************/ static inline void -ips_putq_wait_head(ips_wait_queue_t *queue, Scsi_Cmnd *item) { - METHOD_TRACE("ips_putq_wait_head", 1); +ips_putq_wait_head(ips_wait_queue_t * queue, Scsi_Cmnd * item) +{ + METHOD_TRACE("ips_putq_wait_head", 1); - if (!item) - return ; + if (!item) + return; - item->host_scribble = (char *) queue->head; - queue->head = item; + item->host_scribble = (char *) queue->head; + queue->head = item; - if (!queue->tail) - queue->tail = item; + if (!queue->tail) + queue->tail = item; - queue->count++; + queue->count++; } /****************************************************************************/ @@ -3704,23 +3037,24 @@ ips_putq_wait_head(ips_wait_queue_t *que /* */ /****************************************************************************/ static inline void -ips_putq_wait_tail(ips_wait_queue_t *queue, Scsi_Cmnd *item) { - METHOD_TRACE("ips_putq_wait_tail", 1); +ips_putq_wait_tail(ips_wait_queue_t * queue, Scsi_Cmnd * item) +{ + METHOD_TRACE("ips_putq_wait_tail", 1); - if (!item) - return ; + if (!item) + return; - item->host_scribble = NULL; + item->host_scribble = NULL; - if (queue->tail) - queue->tail->host_scribble = (char *)item; + if (queue->tail) + queue->tail->host_scribble = (char *) item; - queue->tail = item; + queue->tail = item; - if (!queue->head) - queue->head = item; + if (!queue->head) + queue->head = item; - queue->count++; + queue->count++; } /****************************************************************************/ @@ -3735,26 +3069,27 @@ ips_putq_wait_tail(ips_wait_queue_t *que /* */ /****************************************************************************/ static inline Scsi_Cmnd * -ips_removeq_wait_head(ips_wait_queue_t *queue) { - Scsi_Cmnd *item; +ips_removeq_wait_head(ips_wait_queue_t * queue) +{ + Scsi_Cmnd *item; - METHOD_TRACE("ips_removeq_wait_head", 1); + METHOD_TRACE("ips_removeq_wait_head", 1); - item = queue->head; + item = queue->head; - if (!item) { - return (NULL); - } + if (!item) { + return (NULL); + } - queue->head = (Scsi_Cmnd *) item->host_scribble; - item->host_scribble = NULL; + queue->head = (Scsi_Cmnd *) item->host_scribble; + item->host_scribble = NULL; - if (queue->tail == item) - queue->tail = NULL; + if (queue->tail == item) + queue->tail = NULL; - queue->count--; + queue->count--; - return (item); + return (item); } /****************************************************************************/ @@ -3769,37 +3104,38 @@ ips_removeq_wait_head(ips_wait_queue_t * /* */ /****************************************************************************/ static inline Scsi_Cmnd * -ips_removeq_wait(ips_wait_queue_t *queue, Scsi_Cmnd *item) { - Scsi_Cmnd *p; +ips_removeq_wait(ips_wait_queue_t * queue, Scsi_Cmnd * item) +{ + Scsi_Cmnd *p; - METHOD_TRACE("ips_removeq_wait", 1); + METHOD_TRACE("ips_removeq_wait", 1); - if (!item) - return (NULL); + if (!item) + return (NULL); - if (item == queue->head) { - return (ips_removeq_wait_head(queue)); - } + if (item == queue->head) { + return (ips_removeq_wait_head(queue)); + } - p = queue->head; + p = queue->head; - while ((p) && (item != (Scsi_Cmnd *) p->host_scribble)) - p = (Scsi_Cmnd *) p->host_scribble; + while ((p) && (item != (Scsi_Cmnd *) p->host_scribble)) + p = (Scsi_Cmnd *) p->host_scribble; - if (p) { - /* found a match */ - p->host_scribble = item->host_scribble; + if (p) { + /* found a match */ + p->host_scribble = item->host_scribble; - if (!item->host_scribble) - queue->tail = p; + if (!item->host_scribble) + queue->tail = p; - item->host_scribble = NULL; - queue->count--; + item->host_scribble = NULL; + queue->count--; - return (item); - } + return (item); + } - return (NULL); + return (NULL); } /****************************************************************************/ @@ -3814,19 +3150,20 @@ ips_removeq_wait(ips_wait_queue_t *queue /* */ /****************************************************************************/ static inline void -ips_putq_copp_head(ips_copp_queue_t *queue, ips_copp_wait_item_t *item) { - METHOD_TRACE("ips_putq_copp_head", 1); +ips_putq_copp_head(ips_copp_queue_t * queue, ips_copp_wait_item_t * item) +{ + METHOD_TRACE("ips_putq_copp_head", 1); - if (!item) - return ; + if (!item) + return; - item->next = queue->head; - queue->head = item; + item->next = queue->head; + queue->head = item; - if (!queue->tail) - queue->tail = item; + if (!queue->tail) + queue->tail = item; - queue->count++; + queue->count++; } /****************************************************************************/ @@ -3841,23 +3178,24 @@ ips_putq_copp_head(ips_copp_queue_t *que /* */ /****************************************************************************/ static inline void -ips_putq_copp_tail(ips_copp_queue_t *queue, ips_copp_wait_item_t *item) { - METHOD_TRACE("ips_putq_copp_tail", 1); +ips_putq_copp_tail(ips_copp_queue_t * queue, ips_copp_wait_item_t * item) +{ + METHOD_TRACE("ips_putq_copp_tail", 1); - if (!item) - return ; + if (!item) + return; - item->next = NULL; + item->next = NULL; - if (queue->tail) - queue->tail->next = item; + if (queue->tail) + queue->tail->next = item; - queue->tail = item; + queue->tail = item; - if (!queue->head) - queue->head = item; + if (!queue->head) + queue->head = item; - queue->count++; + queue->count++; } /****************************************************************************/ @@ -3872,26 +3210,27 @@ ips_putq_copp_tail(ips_copp_queue_t *que /* */ /****************************************************************************/ static inline ips_copp_wait_item_t * -ips_removeq_copp_head(ips_copp_queue_t *queue) { - ips_copp_wait_item_t *item; +ips_removeq_copp_head(ips_copp_queue_t * queue) +{ + ips_copp_wait_item_t *item; - METHOD_TRACE("ips_removeq_copp_head", 1); + METHOD_TRACE("ips_removeq_copp_head", 1); - item = queue->head; + item = queue->head; - if (!item) { - return (NULL); - } + if (!item) { + return (NULL); + } - queue->head = item->next; - item->next = NULL; + queue->head = item->next; + item->next = NULL; - if (queue->tail == item) - queue->tail = NULL; + if (queue->tail == item) + queue->tail = NULL; - queue->count--; + queue->count--; - return (item); + return (item); } /****************************************************************************/ @@ -3906,37 +3245,38 @@ ips_removeq_copp_head(ips_copp_queue_t * /* */ /****************************************************************************/ static inline ips_copp_wait_item_t * -ips_removeq_copp(ips_copp_queue_t *queue, ips_copp_wait_item_t *item) { - ips_copp_wait_item_t *p; +ips_removeq_copp(ips_copp_queue_t * queue, ips_copp_wait_item_t * item) +{ + ips_copp_wait_item_t *p; - METHOD_TRACE("ips_removeq_copp", 1); + METHOD_TRACE("ips_removeq_copp", 1); - if (!item) - return (NULL); + if (!item) + return (NULL); - if (item == queue->head) { - return (ips_removeq_copp_head(queue)); - } + if (item == queue->head) { + return (ips_removeq_copp_head(queue)); + } - p = queue->head; + p = queue->head; - while ((p) && (item != p->next)) - p = p->next; + while ((p) && (item != p->next)) + p = p->next; - if (p) { - /* found a match */ - p->next = item->next; + if (p) { + /* found a match */ + p->next = item->next; - if (!item->next) - queue->tail = p; + if (!item->next) + queue->tail = p; - item->next = NULL; - queue->count--; + item->next = NULL; + queue->count--; - return (item); - } + return (item); + } - return (NULL); + return (NULL); } /****************************************************************************/ @@ -3949,16 +3289,16 @@ ips_removeq_copp(ips_copp_queue_t *queue /* */ /****************************************************************************/ static void -ipsintr_blocking(ips_ha_t *ha, ips_scb_t *scb) { - METHOD_TRACE("ipsintr_blocking", 2); +ipsintr_blocking(ips_ha_t * ha, ips_scb_t * scb) +{ + METHOD_TRACE("ipsintr_blocking", 2); - ips_freescb(ha, scb); - if ((ha->waitflag == TRUE) && - (ha->cmd_in_progress == scb->cdb[0])) { - ha->waitflag = FALSE; + ips_freescb(ha, scb); + if ((ha->waitflag == TRUE) && (ha->cmd_in_progress == scb->cdb[0])) { + ha->waitflag = FALSE; - return ; - } + return; + } } /****************************************************************************/ @@ -3971,25 +3311,27 @@ ipsintr_blocking(ips_ha_t *ha, ips_scb_t /* */ /****************************************************************************/ static void -ipsintr_done(ips_ha_t *ha, ips_scb_t *scb) { - METHOD_TRACE("ipsintr_done", 2); +ipsintr_done(ips_ha_t * ha, ips_scb_t * scb) +{ + METHOD_TRACE("ipsintr_done", 2); - if (!scb) { - printk(KERN_WARNING "(%s%d) Spurious interrupt; scb NULL.\n", - ips_name, ha->host_num); + if (!scb) { + printk(KERN_WARNING "(%s%d) Spurious interrupt; scb NULL.\n", + ips_name, ha->host_num); - return ; - } + return; + } - if (scb->scsi_cmd == NULL) { - /* unexpected interrupt */ - printk(KERN_WARNING "(%s%d) Spurious interrupt; scsi_cmd not set.\n", - ips_name, ha->host_num); + if (scb->scsi_cmd == NULL) { + /* unexpected interrupt */ + printk(KERN_WARNING + "(%s%d) Spurious interrupt; scsi_cmd not set.\n", + ips_name, ha->host_num); - return; - } + return; + } - ips_done(ha, scb); + ips_done(ha, scb); } /****************************************************************************/ @@ -4002,180 +3344,118 @@ ipsintr_done(ips_ha_t *ha, ips_scb_t *sc /* ASSUMED to be called form within the request lock */ /****************************************************************************/ static void -ips_done(ips_ha_t *ha, ips_scb_t *scb) { - int ret; +ips_done(ips_ha_t * ha, ips_scb_t * scb) +{ + int ret; - METHOD_TRACE("ips_done", 1); + METHOD_TRACE("ips_done", 1); - if (!scb) - return ; + if (!scb) + return; - if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) { - ips_cleanup_passthru(ha, scb); - ha->num_ioctl--; - } else { - /* - * Check to see if this command had too much - * data and had to be broke up. If so, queue - * the rest of the data and continue. - */ - if ((scb->breakup) || (scb->sg_break)) { - /* we had a data breakup */ - uint8_t bk_save; - - bk_save = scb->breakup; - scb->breakup = 0; - - if (scb->sg_count) { - /* S/G request */ - struct scatterlist *sg; - int i; - - sg = scb->scsi_cmd->request_buffer; - - if (scb->sg_count == 1) { - if (sg_dma_len(sg) - (bk_save * ha->max_xfer) > ha->max_xfer) { - /* Further breakup required */ - scb->data_len = ha->max_xfer; - scb->data_busaddr = sg_dma_address(sg) + (bk_save * ha->max_xfer); - scb->breakup = bk_save + 1; - } else { - scb->data_len = sg_dma_len(sg) - (bk_save * ha->max_xfer); - scb->data_busaddr = sg_dma_address(sg) + (bk_save * ha->max_xfer); - } - - scb->dcdb.transfer_length = scb->data_len; - scb->sg_len = 0; - } else { - /* We're here because there was MORE than one s/g unit. */ - /* bk_save points to which sg unit to look at */ - /* sg_break points to how far through this unit we are */ - /* NOTE: We will not move from one sg to another here, */ - /* just finish the one we are in. Not the most */ - /* efficient, but it keeps it from getting too hacky */ - - /* IF sg_break is non-zero, then just work on this current sg piece, */ - /* pointed to by bk_save */ - if (scb->sg_break) { - scb->sg_len = 1; - scb->sg_list[0].address = sg_dma_address(&sg[bk_save]) - + ha->max_xfer*scb->sg_break; - if (ha->max_xfer > sg_dma_len(&sg[bk_save]) - ha->max_xfer * scb->sg_break) - scb->sg_list[0].length = sg_dma_len(&sg[bk_save]) - ha->max_xfer * scb->sg_break; - else - scb->sg_list[0].length = ha->max_xfer; - scb->sg_break++; /* MUST GO HERE for math below to work */ - scb->data_len = scb->sg_list[0].length;; - - if (sg_dma_len(&sg[bk_save]) <= ha->max_xfer * scb->sg_break ) { - scb->sg_break = 0; /* No more work in this unit */ - if (( bk_save + 1 ) >= scb->sg_count) - scb->breakup = 0; - else - scb->breakup = bk_save + 1; - } - } else { - /* ( sg_break == 0 ), so this is our first look at a new sg piece */ - if (sg_dma_len(&sg[bk_save]) > ha->max_xfer) { - scb->sg_list[0].address = sg_dma_address(&sg[bk_save]); - scb->sg_list[0].length = ha->max_xfer; - scb->breakup = bk_save; - scb->sg_break = 1; - scb->data_len = ha->max_xfer; - scb->sg_len = 1; - } else { - /* OK, the next sg is a short one, so loop until full */ - scb->data_len = 0; - scb->sg_len = 0; - scb->sg_break = 0; - /* We're only doing full units here */ - for (i = bk_save; i < scb->sg_count; i++) { - scb->sg_list[i - bk_save].address = sg_dma_address(&sg[i]); - scb->sg_list[i - bk_save].length = cpu_to_le32(sg_dma_len(&sg[i])); - if (scb->data_len + sg_dma_len(&sg[i]) > ha->max_xfer) { - scb->breakup = i; /* sneaky, if not more work, than breakup is 0 */ - break; - } - scb->data_len += sg_dma_len(&sg[i]); - scb->sg_len++; /* only if we didn't get too big */ - } - } - } - - /* Also, we need to be sure we don't queue work ( breakup != 0 ) - if no more sg units for next time */ - scb->dcdb.transfer_length = scb->data_len; - scb->data_busaddr = scb->sg_busaddr; - } - - } else { - /* Non S/G Request */ - pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len, - IPS_DMA_DIR(scb)); - if ((scb->scsi_cmd->request_bufflen - (bk_save * ha->max_xfer)) > ha->max_xfer) { - /* Further breakup required */ - scb->data_len = ha->max_xfer; - scb->data_busaddr = pci_map_single(ha->pcidev, - scb->scsi_cmd->request_buffer + - (bk_save * ha->max_xfer), - scb->data_len, IPS_DMA_DIR(scb)); - scb->breakup = bk_save + 1; - } else { - scb->data_len = scb->scsi_cmd->request_bufflen - (bk_save * ha->max_xfer); - scb->data_busaddr = pci_map_single(ha->pcidev, - scb->scsi_cmd->request_buffer + - (bk_save * ha->max_xfer), - scb->data_len, IPS_DMA_DIR(scb)); - } - - scb->dcdb.transfer_length = scb->data_len; - scb->sg_len = 0; - } - - scb->dcdb.cmd_attribute |= ips_command_direction[scb->scsi_cmd->cmnd[0]]; - - if (!scb->dcdb.cmd_attribute & 0x3) - scb->dcdb.transfer_length = 0; - - if (scb->data_len >= IPS_MAX_XFER) { - scb->dcdb.cmd_attribute |= IPS_TRANSFER64K; - scb->dcdb.transfer_length = 0; - } - - ret = ips_send_cmd(ha, scb); - - switch(ret) { - case IPS_FAILURE: - if (scb->scsi_cmd) { - scb->scsi_cmd->result = DID_ERROR << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - } - - ips_freescb(ha, scb); - break; - case IPS_SUCCESS_IMM: - if (scb->scsi_cmd) { - scb->scsi_cmd->result = DID_ERROR << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - } - - ips_freescb(ha, scb); - break; - default: - break; - } /* end case */ - - return ; - } - } /* end if passthru */ - - if (scb->bus) { - ha->dcdb_active[scb->bus-1] &= ~(1 << scb->target_id); - } + if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) { + ips_cleanup_passthru(ha, scb); + ha->num_ioctl--; + } else { + /* + * Check to see if this command had too much + * data and had to be broke up. If so, queue + * the rest of the data and continue. + */ + if ((scb->breakup) || (scb->sg_break)) { + /* we had a data breakup */ + scb->data_len = 0; + + if (scb->sg_count) { + /* S/G request */ + struct scatterlist *sg; + int ips_sg_index = 0; + int sg_dma_index; + + sg = scb->scsi_cmd->request_buffer; + + /* Spin forward to last dma chunk */ + sg_dma_index = scb->breakup; + + /* Take care of possible partial on last chunk */ + ips_fill_scb_sg_single(ha, + sg_dma_address(&sg + [sg_dma_index]), + scb, ips_sg_index++, + sg_dma_len(&sg + [sg_dma_index])); + + for (; sg_dma_index < scb->sg_count; + sg_dma_index++) { + if (ips_fill_scb_sg_single + (ha, + sg_dma_address(&sg[sg_dma_index]), + scb, ips_sg_index++, + sg_dma_len(&sg[sg_dma_index])) < 0) + break; + + } + + } else { + /* Non S/G Request */ + (void) ips_fill_scb_sg_single(ha, + scb-> + data_busaddr + + (scb->sg_break * + ha->max_xfer), + scb, 0, + scb->scsi_cmd-> + request_bufflen - + (scb->sg_break * + ha->max_xfer)); + } + + scb->dcdb.transfer_length = scb->data_len; + scb->dcdb.cmd_attribute |= + ips_command_direction[scb->scsi_cmd->cmnd[0]]; + + if (!(scb->dcdb.cmd_attribute & 0x3)) + scb->dcdb.transfer_length = 0; + + if (scb->data_len >= IPS_MAX_XFER) { + scb->dcdb.cmd_attribute |= IPS_TRANSFER64K; + scb->dcdb.transfer_length = 0; + } + + ret = ips_send_cmd(ha, scb); + + switch (ret) { + case IPS_FAILURE: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + } + + ips_freescb(ha, scb); + break; + case IPS_SUCCESS_IMM: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + } + + ips_freescb(ha, scb); + break; + default: + break; + } /* end case */ + + return; + } + } /* end if passthru */ + + if (scb->bus) { + ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id); + } - scb->scsi_cmd->scsi_done(scb->scsi_cmd); + scb->scsi_cmd->scsi_done(scb->scsi_cmd); - ips_freescb(ha, scb); + ips_freescb(ha, scb); } /****************************************************************************/ @@ -4188,118 +3468,130 @@ ips_done(ips_ha_t *ha, ips_scb_t *scb) { /* */ /****************************************************************************/ static int -ips_map_status(ips_ha_t *ha, ips_scb_t *scb, ips_stat_t *sp) { - int errcode; - int device_error; - uint32_t transfer_len; - IPS_DCDB_TABLE_TAPE *tapeDCDB; - - METHOD_TRACE("ips_map_status", 1); - - if (scb->bus) { - DEBUG_VAR(2, "(%s%d) Physical device error (%d %d %d): %x %x, Sense Key: %x, ASC: %x, ASCQ: %x", - ips_name, - ha->host_num, - scb->scsi_cmd->channel, - scb->scsi_cmd->target, - scb->scsi_cmd->lun, - scb->basic_status, - scb->extended_status, - scb->extended_status == IPS_ERR_CKCOND ? scb->dcdb.sense_info[2] & 0xf : 0, - scb->extended_status == IPS_ERR_CKCOND ? scb->dcdb.sense_info[12] : 0, - scb->extended_status == IPS_ERR_CKCOND ? scb->dcdb.sense_info[13] : 0); - } - - /* default driver error */ - errcode = DID_ERROR; - device_error = 0; - - switch (scb->basic_status & IPS_GSC_STATUS_MASK) { - case IPS_CMD_TIMEOUT: - errcode = DID_TIME_OUT; - break; - - case IPS_INVAL_OPCO: - case IPS_INVAL_CMD_BLK: - case IPS_INVAL_PARM_BLK: - case IPS_LD_ERROR: - case IPS_CMD_CMPLT_WERROR: - break; - - case IPS_PHYS_DRV_ERROR: - switch (scb->extended_status) { - case IPS_ERR_SEL_TO: - if (scb->bus) - errcode = DID_NO_CONNECT; - - break; - - case IPS_ERR_OU_RUN: - if ( ( scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB ) || - ( scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG ) ) { - tapeDCDB = ( IPS_DCDB_TABLE_TAPE * ) &scb->dcdb; - transfer_len = tapeDCDB->transfer_length; - } else { - transfer_len = ( uint32_t ) scb->dcdb.transfer_length; - } - - if ((scb->bus) && (transfer_len < scb->data_len)) { - /* Underrun - set default to no error */ - errcode = DID_OK; - - /* Restrict access to physical DASD */ - if ((scb->scsi_cmd->cmnd[0] == INQUIRY) && - ((((char *) scb->scsi_cmd->buffer)[0] & 0x1f) == TYPE_DISK)) { - /* underflow -- no error */ - /* restrict access to physical DASD */ - errcode = DID_TIME_OUT; - break; - } - } else - errcode = DID_ERROR; - - break; - - case IPS_ERR_RECOVERY: - /* don't fail recovered errors */ - if (scb->bus) - errcode = DID_OK; - - break; - - case IPS_ERR_HOST_RESET: - case IPS_ERR_DEV_RESET: - errcode = DID_RESET; - break; - - case IPS_ERR_CKCOND: - if (scb->bus) { - if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) || - (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)) { - tapeDCDB = (IPS_DCDB_TABLE_TAPE *) &scb->dcdb; - memcpy(scb->scsi_cmd->sense_buffer, tapeDCDB->sense_info, - sizeof(scb->scsi_cmd->sense_buffer)); - } else { - memcpy(scb->scsi_cmd->sense_buffer, scb->dcdb.sense_info, - sizeof(scb->scsi_cmd->sense_buffer)); - } - device_error = 2; /* check condition */ - } - - errcode = DID_OK; - - break; - - default: - errcode = DID_ERROR; - break; +ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp) +{ + int errcode; + int device_error; + uint32_t transfer_len; + IPS_DCDB_TABLE_TAPE *tapeDCDB; + + METHOD_TRACE("ips_map_status", 1); + + if (scb->bus) { + DEBUG_VAR(2, + "(%s%d) Physical device error (%d %d %d): %x %x, Sense Key: %x, ASC: %x, ASCQ: %x", + ips_name, ha->host_num, scb->scsi_cmd->channel, + scb->scsi_cmd->target, scb->scsi_cmd->lun, + scb->basic_status, scb->extended_status, + scb->extended_status == + IPS_ERR_CKCOND ? scb->dcdb.sense_info[2] & 0xf : 0, + scb->extended_status == + IPS_ERR_CKCOND ? scb->dcdb.sense_info[12] : 0, + scb->extended_status == + IPS_ERR_CKCOND ? scb->dcdb.sense_info[13] : 0); + } + + /* default driver error */ + errcode = DID_ERROR; + device_error = 0; + + switch (scb->basic_status & IPS_GSC_STATUS_MASK) { + case IPS_CMD_TIMEOUT: + errcode = DID_TIME_OUT; + break; + + case IPS_INVAL_OPCO: + case IPS_INVAL_CMD_BLK: + case IPS_INVAL_PARM_BLK: + case IPS_LD_ERROR: + case IPS_CMD_CMPLT_WERROR: + break; + + case IPS_PHYS_DRV_ERROR: + switch (scb->extended_status) { + case IPS_ERR_SEL_TO: + if (scb->bus) + errcode = DID_NO_CONNECT; + + break; + + case IPS_ERR_OU_RUN: + if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) || + (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)) { + tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; + transfer_len = tapeDCDB->transfer_length; + } else { + transfer_len = + (uint32_t) scb->dcdb.transfer_length; + } + + if ((scb->bus) && (transfer_len < scb->data_len)) { + /* Underrun - set default to no error */ + errcode = DID_OK; + + /* Restrict access to physical DASD */ + if ((scb->scsi_cmd->cmnd[0] == INQUIRY) && + ((((char + *) scb->scsi_cmd->buffer)[0] & 0x1f) == + TYPE_DISK)) { + /* underflow -- no error */ + /* restrict access to physical DASD */ + errcode = DID_TIME_OUT; + break; + } + } else + errcode = DID_ERROR; + + break; + + case IPS_ERR_RECOVERY: + /* don't fail recovered errors */ + if (scb->bus) + errcode = DID_OK; + + break; + + case IPS_ERR_HOST_RESET: + case IPS_ERR_DEV_RESET: + errcode = DID_RESET; + break; + + case IPS_ERR_CKCOND: + if (scb->bus) { + if ( + (scb->cmd.dcdb.op_code == + IPS_CMD_EXTENDED_DCDB) + || (scb->cmd.dcdb.op_code == + IPS_CMD_EXTENDED_DCDB_SG)) { + tapeDCDB = + (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; + memcpy(scb->scsi_cmd->sense_buffer, + tapeDCDB->sense_info, + sizeof (scb->scsi_cmd-> + sense_buffer)); + } else { + memcpy(scb->scsi_cmd->sense_buffer, + scb->dcdb.sense_info, + sizeof (scb->scsi_cmd-> + sense_buffer)); + } + device_error = 2; /* check condition */ + } + + errcode = DID_OK; + + break; + + default: + errcode = DID_ERROR; + break; - } /* end switch */ - } /* end switch */ + } /* end switch */ + } /* end switch */ - scb->scsi_cmd->result = device_error | (errcode << 16); + scb->scsi_cmd->result = device_error | (errcode << 16); - return (1); + return (1); } /****************************************************************************/ @@ -4314,25 +3606,90 @@ ips_map_status(ips_ha_t *ha, ips_scb_t * /* actually need to wait. */ /****************************************************************************/ static int -ips_send_wait(ips_ha_t *ha, ips_scb_t *scb, int timeout, int intr) { - int ret; +ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr) +{ + int ret; - METHOD_TRACE("ips_send_wait", 1); + METHOD_TRACE("ips_send_wait", 1); - if (intr != IPS_FFDC) { /* Won't be Waiting if this is a Time Stamp */ - ha->waitflag = TRUE; - ha->cmd_in_progress = scb->cdb[0]; - } - scb->callback = ipsintr_blocking; - ret = ips_send_cmd(ha, scb); + if (intr != IPS_FFDC) { /* Won't be Waiting if this is a Time Stamp */ + ha->waitflag = TRUE; + ha->cmd_in_progress = scb->cdb[0]; + } + scb->callback = ipsintr_blocking; + ret = ips_send_cmd(ha, scb); - if ((ret == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM)) - return (ret); + if ((ret == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM)) + return (ret); - if (intr != IPS_FFDC) /* Don't Wait around if this is a Time Stamp */ - ret = ips_wait(ha, timeout, intr); + if (intr != IPS_FFDC) /* Don't Wait around if this is a Time Stamp */ + ret = ips_wait(ha, timeout, intr); - return (ret); + return (ret); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_scmd_buf_write */ +/* */ +/* Routine Description: */ +/* Write data to Scsi_Cmnd request_buffer at proper offsets */ +/****************************************************************************/ +static void +ips_scmd_buf_write(Scsi_Cmnd * scmd, void *data, unsigned + int count) +{ + if (scmd->use_sg) { + int i; + unsigned int min_cnt, xfer_cnt; + char *cdata = (char *) data; + struct scatterlist *sg = scmd->request_buffer; + for (i = 0, xfer_cnt = 0; + (i < scmd->use_sg) && (xfer_cnt < count); i++) { + if (!IPS_SG_ADDRESS(&sg[i])) + return; + min_cnt = min(count - xfer_cnt, sg[i].length); + memcpy(IPS_SG_ADDRESS(&sg[i]), &cdata[xfer_cnt], + min_cnt); + xfer_cnt += min_cnt; + } + + } else { + unsigned int min_cnt = min(count, scmd->request_bufflen); + memcpy(scmd->request_buffer, data, min_cnt); + } +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_scmd_buf_read */ +/* */ +/* Routine Description: */ +/* Copy data from a Scsi_Cmnd to a new, linear buffer */ +/****************************************************************************/ +static void +ips_scmd_buf_read(Scsi_Cmnd * scmd, void *data, unsigned + int count) +{ + if (scmd->use_sg) { + int i; + unsigned int min_cnt, xfer_cnt; + char *cdata = (char *) data; + struct scatterlist *sg = scmd->request_buffer; + for (i = 0, xfer_cnt = 0; + (i < scmd->use_sg) && (xfer_cnt < count); i++) { + if (!IPS_SG_ADDRESS(&sg[i])) + return; + min_cnt = min(count - xfer_cnt, sg[i].length); + memcpy(&cdata[xfer_cnt], IPS_SG_ADDRESS(&sg[i]), + min_cnt); + xfer_cnt += min_cnt; + } + + } else { + unsigned int min_cnt = min(count, scmd->request_bufflen); + memcpy(data, scmd->request_buffer, min_cnt); + } } /****************************************************************************/ @@ -4345,288 +3702,402 @@ ips_send_wait(ips_ha_t *ha, ips_scb_t *s /* */ /****************************************************************************/ static int -ips_send_cmd(ips_ha_t *ha, ips_scb_t *scb) { - int ret; - char *sp; - int device_error; - IPS_DCDB_TABLE_TAPE *tapeDCDB; - int TimeOut; - - METHOD_TRACE("ips_send_cmd", 1); - - ret = IPS_SUCCESS; - - if (!scb->scsi_cmd) { - /* internal command */ - - if (scb->bus > 0) { - /* Controller commands can't be issued */ - /* to real devices -- fail them */ - if ((ha->waitflag == TRUE) && - (ha->cmd_in_progress == scb->cdb[0])) { - ha->waitflag = FALSE; - } - - return (1); - } - } else if ((scb->bus == 0) && (!ips_is_passthru(scb->scsi_cmd))) { - /* command to logical bus -- interpret */ - if(ha->scb_waitlist.count + ha->scb_activelist.count > 32) - mod_timer(&scb->scsi_cmd->eh_timeout, jiffies + 120 * HZ); - ret = IPS_SUCCESS_IMM; - - switch (scb->scsi_cmd->cmnd[0]) { - case ALLOW_MEDIUM_REMOVAL: - case REZERO_UNIT: - case ERASE: - case WRITE_FILEMARKS: - case SPACE: - scb->scsi_cmd->result = DID_ERROR << 16; - break; - - case START_STOP: - scb->scsi_cmd->result = DID_OK << 16; - - case TEST_UNIT_READY: - case INQUIRY: - scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO; - scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.logical_info.reserved = 0; - scb->cmd.logical_info.reserved2 = 0; - scb->data_len = sizeof(ha->adapt->logical_drive_info); - scb->data_busaddr = pci_map_single(ha->pcidev, - &ha->adapt->logical_drive_info, - scb->data_len, IPS_DMA_DIR(scb)); - scb->flags |= IPS_SCB_MAP_SINGLE; - scb->cmd.logical_info.buffer_addr = scb->data_busaddr; - ret = IPS_SUCCESS; - break; - - case REQUEST_SENSE: - ips_reqsen(ha, scb); - scb->scsi_cmd->result = DID_OK << 16; - break; - - case READ_6: - case WRITE_6: - if (!scb->sg_len) { - scb->cmd.basic_io.op_code = - (scb->scsi_cmd->cmnd[0] == READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE; - } else { - scb->cmd.basic_io.op_code = - (scb->scsi_cmd->cmnd[0] == READ_6) ? IPS_CMD_READ_SG : IPS_CMD_WRITE_SG; - } - - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.basic_io.log_drv = scb->target_id; - scb->cmd.basic_io.sg_count = scb->sg_len; - scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->data_busaddr); - - if (scb->cmd.basic_io.lba) - scb->cmd.basic_io.lba = cpu_to_le32(le32_to_cpu(scb->cmd.basic_io.lba) + - le16_to_cpu(scb->cmd.basic_io.sector_count)); - else - scb->cmd.basic_io.lba = (((scb->scsi_cmd->cmnd[1] & 0x1f) << 16) | - (scb->scsi_cmd->cmnd[2] << 8) | - (scb->scsi_cmd->cmnd[3])); - - scb->cmd.basic_io.sector_count = cpu_to_le16(scb->data_len / IPS_BLKSIZE); - - if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0) - scb->cmd.basic_io.sector_count = cpu_to_le16(256); - - scb->cmd.basic_io.reserved = 0; - ret = IPS_SUCCESS; - break; - - case READ_10: - case WRITE_10: - if (!scb->sg_len) { - scb->cmd.basic_io.op_code = - (scb->scsi_cmd->cmnd[0] == READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE; - } else { - scb->cmd.basic_io.op_code = - (scb->scsi_cmd->cmnd[0] == READ_10) ? IPS_CMD_READ_SG : IPS_CMD_WRITE_SG; - } - - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.basic_io.log_drv = scb->target_id; - scb->cmd.basic_io.sg_count = scb->sg_len; - scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->data_busaddr); - - if (scb->cmd.basic_io.lba) - scb->cmd.basic_io.lba = cpu_to_le32(le32_to_cpu(scb->cmd.basic_io.lba) + - le16_to_cpu(scb->cmd.basic_io.sector_count)); - else - scb->cmd.basic_io.lba = ((scb->scsi_cmd->cmnd[2] << 24) | - (scb->scsi_cmd->cmnd[3] << 16) | - (scb->scsi_cmd->cmnd[4] << 8) | - scb->scsi_cmd->cmnd[5]); - - scb->cmd.basic_io.sector_count = cpu_to_le16(scb->data_len / IPS_BLKSIZE); - - scb->cmd.basic_io.reserved = 0; - - if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) { - /* - * This is a null condition - * we don't have to do anything - * so just return - */ - scb->scsi_cmd->result = DID_OK << 16; - } else - ret = IPS_SUCCESS; - - break; - - case RESERVE: - case RELEASE: - scb->scsi_cmd->result = DID_OK << 16; - break; - - case MODE_SENSE: - scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY; - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - scb->data_len = sizeof(*ha->enq); - scb->data_busaddr = pci_map_single(ha->pcidev, ha->enq, - scb->data_len, IPS_DMA_DIR(scb)); - scb->cmd.basic_io.sg_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - ret = IPS_SUCCESS; - break; - - case READ_CAPACITY: - scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO; - scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.logical_info.reserved = 0; - scb->cmd.logical_info.reserved2 = 0; - scb->cmd.logical_info.reserved3 = 0; - scb->data_len = sizeof(ha->adapt->logical_drive_info); - scb->data_busaddr = pci_map_single(ha->pcidev, - &ha->adapt->logical_drive_info, - scb->data_len, IPS_DMA_DIR(scb)); - scb->flags |= IPS_SCB_MAP_SINGLE; - scb->cmd.logical_info.buffer_addr = scb->data_busaddr; - ret = IPS_SUCCESS; - break; - - case SEND_DIAGNOSTIC: - case REASSIGN_BLOCKS: - case FORMAT_UNIT: - case SEEK_10: - case VERIFY: - case READ_DEFECT_DATA: - case READ_BUFFER: - case WRITE_BUFFER: - scb->scsi_cmd->result = DID_OK << 16; - break; - - default: - /* Set the Return Info to appear like the Command was */ - /* attempted, a Check Condition occurred, and Sense */ - /* Data indicating an Invalid CDB OpCode is returned. */ - sp = (char *) scb->scsi_cmd->sense_buffer; - memset(sp, 0, sizeof(scb->scsi_cmd->sense_buffer)); - - sp[0] = 0x70; /* Error Code */ - sp[2] = ILLEGAL_REQUEST; /* Sense Key 5 Illegal Req. */ - sp[7] = 0x0A; /* Additional Sense Length */ - sp[12] = 0x20; /* ASC = Invalid OpCode */ - sp[13] = 0x00; /* ASCQ */ - - device_error = 2; /* Indicate Check Condition */ - scb->scsi_cmd->result = device_error | (DID_OK << 16); - break; - } /* end switch */ - } /* end if */ - - if (ret == IPS_SUCCESS_IMM) - return (ret); - - /* setup DCDB */ - if (scb->bus > 0) { - if (!scb->sg_len) - scb->cmd.dcdb.op_code = IPS_CMD_DCDB; - else - scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG; - - /* If we already know the Device is Not there, no need to attempt a Command */ - /* This also protects an NT FailOver Controller from getting CDB's sent to it */ - if ( ha->conf->dev[scb->bus-1][scb->target_id].ucState == 0 ) { - scb->scsi_cmd->result = DID_NO_CONNECT << 16; - return (IPS_SUCCESS_IMM); - } - - ha->dcdb_active[scb->bus-1] |= (1 << scb->target_id); - scb->cmd.dcdb.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr + - (unsigned long)&scb->dcdb - - (unsigned long)scb); - scb->cmd.dcdb.reserved = 0; - scb->cmd.dcdb.reserved2 = 0; - scb->cmd.dcdb.reserved3 = 0; - - TimeOut = scb->scsi_cmd->timeout_per_command; - - if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ - if (!scb->sg_len) - scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB; - else - scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB_SG; - - tapeDCDB = (IPS_DCDB_TABLE_TAPE *) &scb->dcdb; /* Use Same Data Area as Old DCDB Struct */ - tapeDCDB->device_address = ((scb->bus - 1) << 4) | scb->target_id; - tapeDCDB->cmd_attribute |= IPS_DISCONNECT_ALLOWED; - tapeDCDB->cmd_attribute &= ~IPS_TRANSFER64K; /* Always Turn OFF 64K Size Flag */ - - if (TimeOut) { - if (TimeOut < ( 10 * HZ )) - tapeDCDB->cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */ - else if (TimeOut < (60 * HZ)) - tapeDCDB->cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */ - else if (TimeOut < (1200 * HZ)) - tapeDCDB->cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */ - } - - tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len; - tapeDCDB->reserved_for_LUN = 0; - tapeDCDB->transfer_length = scb->data_len; - tapeDCDB->buffer_pointer = cpu_to_le32(scb->data_busaddr); - tapeDCDB->sg_count = scb->sg_len; - tapeDCDB->sense_length = sizeof(tapeDCDB->sense_info); - tapeDCDB->scsi_status = 0; - tapeDCDB->reserved = 0; - memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd, scb->scsi_cmd->cmd_len); - } else { - scb->dcdb.device_address = ((scb->bus - 1) << 4) | scb->target_id; - scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED; - - if (TimeOut) { - if (TimeOut < (10 * HZ)) - scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */ - else if (TimeOut < (60 * HZ)) - scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */ - else if (TimeOut < (1200 * HZ)) - scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */ - } - - scb->dcdb.transfer_length = scb->data_len; - if ( scb->dcdb.cmd_attribute & IPS_TRANSFER64K ) - scb->dcdb.transfer_length = 0; - scb->dcdb.buffer_pointer = cpu_to_le32(scb->data_busaddr); - scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len; - scb->dcdb.sense_length = sizeof(scb->dcdb.sense_info); - scb->dcdb.sg_count = scb->sg_len; - scb->dcdb.reserved = 0; - memcpy(scb->dcdb.scsi_cdb, scb->scsi_cmd->cmnd, scb->scsi_cmd->cmd_len); - scb->dcdb.scsi_status = 0; - scb->dcdb.reserved2[0] = 0; - scb->dcdb.reserved2[1] = 0; - scb->dcdb.reserved2[2] = 0; - } - } +ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb) +{ + int ret; + char *sp; + int device_error; + IPS_DCDB_TABLE_TAPE *tapeDCDB; + int TimeOut; + + METHOD_TRACE("ips_send_cmd", 1); + + ret = IPS_SUCCESS; + + if (!scb->scsi_cmd) { + /* internal command */ + + if (scb->bus > 0) { + /* Controller commands can't be issued */ + /* to real devices -- fail them */ + if ((ha->waitflag == TRUE) && + (ha->cmd_in_progress == scb->cdb[0])) { + ha->waitflag = FALSE; + } + + return (1); + } + } else if ((scb->bus == 0) && (!ips_is_passthru(scb->scsi_cmd))) { + /* command to logical bus -- interpret */ + ret = IPS_SUCCESS_IMM; + + switch (scb->scsi_cmd->cmnd[0]) { + case ALLOW_MEDIUM_REMOVAL: + case REZERO_UNIT: + case ERASE: + case WRITE_FILEMARKS: + case SPACE: + scb->scsi_cmd->result = DID_ERROR << 16; + break; + + case START_STOP: + scb->scsi_cmd->result = DID_OK << 16; + + case TEST_UNIT_READY: + case INQUIRY: + if (scb->target_id == IPS_ADAPTER_ID) { + /* + * Either we have a TUR + * or we have a SCSI inquiry + */ + if (scb->scsi_cmd->cmnd[0] == TEST_UNIT_READY) + scb->scsi_cmd->result = DID_OK << 16; + + if (scb->scsi_cmd->cmnd[0] == INQUIRY) { + IPS_SCSI_INQ_DATA inquiry; + + memset(&inquiry, 0, + sizeof (IPS_SCSI_INQ_DATA)); + + inquiry.DeviceType = + IPS_SCSI_INQ_TYPE_PROCESSOR; + inquiry.DeviceTypeQualifier = + IPS_SCSI_INQ_LU_CONNECTED; + inquiry.Version = IPS_SCSI_INQ_REV2; + inquiry.ResponseDataFormat = + IPS_SCSI_INQ_RD_REV2; + inquiry.AdditionalLength = 31; + inquiry.Flags[0] = + IPS_SCSI_INQ_Address16; + inquiry.Flags[1] = + IPS_SCSI_INQ_WBus16 | + IPS_SCSI_INQ_Sync; + strncpy(inquiry.VendorId, "IBM ", + 8); + strncpy(inquiry.ProductId, + "SERVERAID ", 16); + strncpy(inquiry.ProductRevisionLevel, + "1.00", 4); + + ips_scmd_buf_write(scb->scsi_cmd, + &inquiry, + sizeof (inquiry)); + + scb->scsi_cmd->result = DID_OK << 16; + } + } else { + scb->cmd.logical_info.op_code = + IPS_CMD_GET_LD_INFO; + scb->cmd.logical_info.command_id = + IPS_COMMAND_ID(ha, scb); + scb->cmd.logical_info.reserved = 0; + scb->cmd.logical_info.reserved2 = 0; + scb->data_len = + sizeof (ha->adapt->logical_drive_info); + scb->data_busaddr = + pci_map_single(ha->pcidev, + &ha->adapt-> + logical_drive_info, + scb->data_len, + IPS_DMA_DIR(scb)); + scb->flags |= IPS_SCB_MAP_SINGLE; + scb->cmd.logical_info.buffer_addr = + scb->data_busaddr; + ret = IPS_SUCCESS; + } + + break; + + case REQUEST_SENSE: + ips_reqsen(ha, scb); + scb->scsi_cmd->result = DID_OK << 16; + break; + + case READ_6: + case WRITE_6: + if (!scb->sg_len) { + scb->cmd.basic_io.op_code = + (scb->scsi_cmd->cmnd[0] == + READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE; + scb->cmd.basic_io.enhanced_sg = 0; + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->data_busaddr); + } else { + scb->cmd.basic_io.op_code = + (scb->scsi_cmd->cmnd[0] == + READ_6) ? IPS_CMD_READ_SG : + IPS_CMD_WRITE_SG; + scb->cmd.basic_io.enhanced_sg = + IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->sg_busaddr); + } + + scb->cmd.basic_io.segment_4G = 0; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.log_drv = scb->target_id; + scb->cmd.basic_io.sg_count = scb->sg_len; + + if (scb->cmd.basic_io.lba) + scb->cmd.basic_io.lba = + cpu_to_le32(le32_to_cpu + (scb->cmd.basic_io.lba) + + le16_to_cpu(scb->cmd.basic_io. + sector_count)); + else + scb->cmd.basic_io.lba = + (((scb-> + scsi_cmd->cmnd[1] & 0x1f) << 16) | (scb-> + scsi_cmd-> + cmnd + [2] + << 8) + | (scb->scsi_cmd->cmnd[3])); + + scb->cmd.basic_io.sector_count = + cpu_to_le16(scb->data_len / IPS_BLKSIZE); + + if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0) + scb->cmd.basic_io.sector_count = + cpu_to_le16(256); + + ret = IPS_SUCCESS; + break; + + case READ_10: + case WRITE_10: + if (!scb->sg_len) { + scb->cmd.basic_io.op_code = + (scb->scsi_cmd->cmnd[0] == + READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE; + scb->cmd.basic_io.enhanced_sg = 0; + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->data_busaddr); + } else { + scb->cmd.basic_io.op_code = + (scb->scsi_cmd->cmnd[0] == + READ_10) ? IPS_CMD_READ_SG : + IPS_CMD_WRITE_SG; + scb->cmd.basic_io.enhanced_sg = + IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->sg_busaddr); + } + + scb->cmd.basic_io.segment_4G = 0; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.log_drv = scb->target_id; + scb->cmd.basic_io.sg_count = scb->sg_len; + + if (scb->cmd.basic_io.lba) + scb->cmd.basic_io.lba = + cpu_to_le32(le32_to_cpu + (scb->cmd.basic_io.lba) + + le16_to_cpu(scb->cmd.basic_io. + sector_count)); + else + scb->cmd.basic_io.lba = + ((scb-> + scsi_cmd->cmnd[2] << 24) | (scb-> + scsi_cmd-> + cmnd[3] << 16) + | (scb->scsi_cmd->cmnd[4] << 8) | scb-> + scsi_cmd->cmnd[5]); + + scb->cmd.basic_io.sector_count = + cpu_to_le16(scb->data_len / IPS_BLKSIZE); + + if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) { + /* + * This is a null condition + * we don't have to do anything + * so just return + */ + scb->scsi_cmd->result = DID_OK << 16; + } else + ret = IPS_SUCCESS; + + break; + + case RESERVE: + case RELEASE: + scb->scsi_cmd->result = DID_OK << 16; + break; + + case MODE_SENSE: + scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.segment_4G = 0; + scb->cmd.basic_io.enhanced_sg = 0; + scb->data_len = sizeof (*ha->enq); + scb->data_busaddr = pci_map_single(ha->pcidev, ha->enq, + scb->data_len, + IPS_DMA_DIR(scb)); + scb->cmd.basic_io.sg_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + ret = IPS_SUCCESS; + break; + + case READ_CAPACITY: + scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO; + scb->cmd.logical_info.command_id = + IPS_COMMAND_ID(ha, scb); + scb->cmd.logical_info.reserved = 0; + scb->cmd.logical_info.reserved2 = 0; + scb->cmd.logical_info.reserved3 = 0; + scb->data_len = sizeof (ha->adapt->logical_drive_info); + scb->data_busaddr = pci_map_single(ha->pcidev, + &ha->adapt-> + logical_drive_info, + scb->data_len, + IPS_DMA_DIR(scb)); + scb->flags |= IPS_SCB_MAP_SINGLE; + scb->cmd.logical_info.buffer_addr = scb->data_busaddr; + ret = IPS_SUCCESS; + break; + + case SEND_DIAGNOSTIC: + case REASSIGN_BLOCKS: + case FORMAT_UNIT: + case SEEK_10: + case VERIFY: + case READ_DEFECT_DATA: + case READ_BUFFER: + case WRITE_BUFFER: + scb->scsi_cmd->result = DID_OK << 16; + break; + + default: + /* Set the Return Info to appear like the Command was */ + /* attempted, a Check Condition occurred, and Sense */ + /* Data indicating an Invalid CDB OpCode is returned. */ + sp = (char *) scb->scsi_cmd->sense_buffer; + memset(sp, 0, sizeof (scb->scsi_cmd->sense_buffer)); + + sp[0] = 0x70; /* Error Code */ + sp[2] = ILLEGAL_REQUEST; /* Sense Key 5 Illegal Req. */ + sp[7] = 0x0A; /* Additional Sense Length */ + sp[12] = 0x20; /* ASC = Invalid OpCode */ + sp[13] = 0x00; /* ASCQ */ + + device_error = 2; /* Indicate Check Condition */ + scb->scsi_cmd->result = device_error | (DID_OK << 16); + break; + } /* end switch */ + } + /* end if */ + if (ret == IPS_SUCCESS_IMM) + return (ret); + + /* setup DCDB */ + if (scb->bus > 0) { + + /* If we already know the Device is Not there, no need to attempt a Command */ + /* This also protects an NT FailOver Controller from getting CDB's sent to it */ + if (ha->conf->dev[scb->bus - 1][scb->target_id].ucState == 0) { + scb->scsi_cmd->result = DID_NO_CONNECT << 16; + return (IPS_SUCCESS_IMM); + } + + ha->dcdb_active[scb->bus - 1] |= (1 << scb->target_id); + scb->cmd.dcdb.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr + + (unsigned long) &scb-> + dcdb - + (unsigned long) scb); + scb->cmd.dcdb.reserved = 0; + scb->cmd.dcdb.reserved2 = 0; + scb->cmd.dcdb.reserved3 = 0; + scb->cmd.dcdb.segment_4G = 0; + scb->cmd.dcdb.enhanced_sg = 0; + + TimeOut = scb->scsi_cmd->timeout_per_command; + + if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ + if (!scb->sg_len) { + scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB; + } else { + scb->cmd.dcdb.op_code = + IPS_CMD_EXTENDED_DCDB_SG; + scb->cmd.dcdb.enhanced_sg = + IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; + } + + tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; /* Use Same Data Area as Old DCDB Struct */ + tapeDCDB->device_address = + ((scb->bus - 1) << 4) | scb->target_id; + tapeDCDB->cmd_attribute |= IPS_DISCONNECT_ALLOWED; + tapeDCDB->cmd_attribute &= ~IPS_TRANSFER64K; /* Always Turn OFF 64K Size Flag */ + + if (TimeOut) { + if (TimeOut < (10 * HZ)) + tapeDCDB->cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */ + else if (TimeOut < (60 * HZ)) + tapeDCDB->cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */ + else if (TimeOut < (1200 * HZ)) + tapeDCDB->cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */ + } + + tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len; + tapeDCDB->reserved_for_LUN = 0; + tapeDCDB->transfer_length = scb->data_len; + if (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG) + tapeDCDB->buffer_pointer = + cpu_to_le32(scb->sg_busaddr); + else + tapeDCDB->buffer_pointer = + cpu_to_le32(scb->data_busaddr); + tapeDCDB->sg_count = scb->sg_len; + tapeDCDB->sense_length = sizeof (tapeDCDB->sense_info); + tapeDCDB->scsi_status = 0; + tapeDCDB->reserved = 0; + memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd, + scb->scsi_cmd->cmd_len); + } else { + if (!scb->sg_len) { + scb->cmd.dcdb.op_code = IPS_CMD_DCDB; + } else { + scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG; + scb->cmd.dcdb.enhanced_sg = + IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; + } + + scb->dcdb.device_address = + ((scb->bus - 1) << 4) | scb->target_id; + scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED; + + if (TimeOut) { + if (TimeOut < (10 * HZ)) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */ + else if (TimeOut < (60 * HZ)) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */ + else if (TimeOut < (1200 * HZ)) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */ + } + + scb->dcdb.transfer_length = scb->data_len; + if (scb->dcdb.cmd_attribute & IPS_TRANSFER64K) + scb->dcdb.transfer_length = 0; + if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB_SG) + scb->dcdb.buffer_pointer = + cpu_to_le32(scb->sg_busaddr); + else + scb->dcdb.buffer_pointer = + cpu_to_le32(scb->data_busaddr); + scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len; + scb->dcdb.sense_length = sizeof (scb->dcdb.sense_info); + scb->dcdb.sg_count = scb->sg_len; + scb->dcdb.reserved = 0; + memcpy(scb->dcdb.scsi_cdb, scb->scsi_cmd->cmnd, + scb->scsi_cmd->cmd_len); + scb->dcdb.scsi_status = 0; + scb->dcdb.reserved2[0] = 0; + scb->dcdb.reserved2[1] = 0; + scb->dcdb.reserved2[2] = 0; + } + } - return ((*ha->func.issue)(ha, scb)); + return ((*ha->func.issue) (ha, scb)); } /****************************************************************************/ @@ -4639,169 +4110,151 @@ ips_send_cmd(ips_ha_t *ha, ips_scb_t *sc /* Assumed to be called with the HA lock */ /****************************************************************************/ static void -ips_chkstatus(ips_ha_t *ha, IPS_STATUS *pstatus) { - ips_scb_t *scb; - ips_stat_t *sp; - uint8_t basic_status; - uint8_t ext_status; - int errcode; - - METHOD_TRACE("ips_chkstatus", 1); - - scb = &ha->scbs[pstatus->fields.command_id]; - scb->basic_status = basic_status = pstatus->fields.basic_status & IPS_BASIC_STATUS_MASK; - scb->extended_status = ext_status = pstatus->fields.extended_status; - - sp = &ha->sp; - sp->residue_len = 0; - sp->scb_addr = (void *) scb; - - /* Remove the item from the active queue */ - ips_removeq_scb(&ha->scb_activelist, scb); - - if (!scb->scsi_cmd) - /* internal commands are handled in do_ipsintr */ - return ; - - DEBUG_VAR(2, "(%s%d) ips_chkstatus: cmd 0x%X id %d (%d %d %d)", - ips_name, - ha->host_num, - scb->cdb[0], - scb->cmd.basic_io.command_id, - scb->bus, - scb->target_id, - scb->lun); - - if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) - /* passthru - just returns the raw result */ - return ; - - errcode = DID_OK; - - if (((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_SUCCESS) || - ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR)) { - - if (scb->bus == 0) { - if ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR) { - DEBUG_VAR(1, "(%s%d) Recovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x", - ips_name, ha->host_num, - scb->cmd.basic_io.op_code, basic_status, ext_status); - } - - switch (scb->scsi_cmd->cmnd[0]) { - case ALLOW_MEDIUM_REMOVAL: - case REZERO_UNIT: - case ERASE: - case WRITE_FILEMARKS: - case SPACE: - errcode = DID_ERROR; - break; - - case START_STOP: - break; - - case TEST_UNIT_READY: - if (scb->target_id == IPS_ADAPTER_ID) - break; - - if (!ips_online(ha, scb)) { - errcode = DID_TIME_OUT; - } - break; - - case INQUIRY: - if (scb->target_id == IPS_ADAPTER_ID) { - IPS_SCSI_INQ_DATA inquiry; - - memset(&inquiry, 0, sizeof(IPS_SCSI_INQ_DATA)); - - inquiry.DeviceType = IPS_SCSI_INQ_TYPE_PROCESSOR; - inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED; - inquiry.Version = IPS_SCSI_INQ_REV2; - inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2; - inquiry.AdditionalLength = 31; - inquiry.Flags[0] = IPS_SCSI_INQ_Address16; - inquiry.Flags[1] = IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync; - strncpy(inquiry.VendorId, "IBM ", 8); - strncpy(inquiry.ProductId, "SERVERAID ", 16); - strncpy(inquiry.ProductRevisionLevel, "1.00", 4); - - memcpy(scb->scsi_cmd->request_buffer, &inquiry, scb->scsi_cmd->request_bufflen); - - scb->scsi_cmd->result = DID_OK << 16; - break; - } - - if (ips_online(ha, scb)) { - ips_inquiry(ha, scb); - } else { - errcode = DID_TIME_OUT; - } - break; - - case REQUEST_SENSE: - ips_reqsen(ha, scb); - break; - - case READ_6: - case WRITE_6: - case READ_10: - case WRITE_10: - case RESERVE: - case RELEASE: - break; - - case MODE_SENSE: - if (!ips_online(ha, scb) || !ips_msense(ha, scb)) { - errcode = DID_ERROR; - } - break; - - case READ_CAPACITY: - if (ips_online(ha, scb)) - ips_rdcap(ha, scb); - else { - errcode = DID_TIME_OUT; - } - break; - - case SEND_DIAGNOSTIC: - case REASSIGN_BLOCKS: - break; - - case FORMAT_UNIT: - errcode = DID_ERROR; - break; - - case SEEK_10: - case VERIFY: - case READ_DEFECT_DATA: - case READ_BUFFER: - case WRITE_BUFFER: - break; - - default: - errcode = DID_ERROR; - } /* end switch */ - - scb->scsi_cmd->result = errcode << 16; - } else { /* bus == 0 */ - /* restrict access to physical drives */ - if ((scb->scsi_cmd->cmnd[0] == INQUIRY) && - ((((char *) scb->scsi_cmd->buffer)[0] & 0x1f) == TYPE_DISK)) { - - scb->scsi_cmd->result = DID_TIME_OUT << 16; - } - } /* else */ - } else { /* recovered error / success */ - if (scb->bus == 0) { - DEBUG_VAR(1, "(%s%d) Unrecovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x", - ips_name, ha->host_num, - scb->cmd.basic_io.op_code, basic_status, ext_status); - } +ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus) +{ + ips_scb_t *scb; + ips_stat_t *sp; + uint8_t basic_status; + uint8_t ext_status; + int errcode; + + METHOD_TRACE("ips_chkstatus", 1); + + scb = &ha->scbs[pstatus->fields.command_id]; + scb->basic_status = basic_status = + pstatus->fields.basic_status & IPS_BASIC_STATUS_MASK; + scb->extended_status = ext_status = pstatus->fields.extended_status; + + sp = &ha->sp; + sp->residue_len = 0; + sp->scb_addr = (void *) scb; + + /* Remove the item from the active queue */ + ips_removeq_scb(&ha->scb_activelist, scb); + + if (!scb->scsi_cmd) + /* internal commands are handled in do_ipsintr */ + return; + + DEBUG_VAR(2, "(%s%d) ips_chkstatus: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + + if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) + /* passthru - just returns the raw result */ + return; + + errcode = DID_OK; + + if (((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_SUCCESS) || + ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR)) { + + if (scb->bus == 0) { + if ((basic_status & IPS_GSC_STATUS_MASK) == + IPS_CMD_RECOVERED_ERROR) { + DEBUG_VAR(1, + "(%s%d) Recovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x", + ips_name, ha->host_num, + scb->cmd.basic_io.op_code, + basic_status, ext_status); + } + + switch (scb->scsi_cmd->cmnd[0]) { + case ALLOW_MEDIUM_REMOVAL: + case REZERO_UNIT: + case ERASE: + case WRITE_FILEMARKS: + case SPACE: + errcode = DID_ERROR; + break; + + case START_STOP: + break; + + case TEST_UNIT_READY: + if (!ips_online(ha, scb)) { + errcode = DID_TIME_OUT; + } + break; + + case INQUIRY: + if (ips_online(ha, scb)) { + ips_inquiry(ha, scb); + } else { + errcode = DID_TIME_OUT; + } + break; + + case REQUEST_SENSE: + ips_reqsen(ha, scb); + break; + + case READ_6: + case WRITE_6: + case READ_10: + case WRITE_10: + case RESERVE: + case RELEASE: + break; + + case MODE_SENSE: + if (!ips_online(ha, scb) + || !ips_msense(ha, scb)) { + errcode = DID_ERROR; + } + break; + + case READ_CAPACITY: + if (ips_online(ha, scb)) + ips_rdcap(ha, scb); + else { + errcode = DID_TIME_OUT; + } + break; + + case SEND_DIAGNOSTIC: + case REASSIGN_BLOCKS: + break; + + case FORMAT_UNIT: + errcode = DID_ERROR; + break; + + case SEEK_10: + case VERIFY: + case READ_DEFECT_DATA: + case READ_BUFFER: + case WRITE_BUFFER: + break; + + default: + errcode = DID_ERROR; + } /* end switch */ + + scb->scsi_cmd->result = errcode << 16; + } else { /* bus == 0 */ + /* restrict access to physical drives */ + if ((scb->scsi_cmd->cmnd[0] == INQUIRY) && + ((((char *) scb->scsi_cmd->buffer)[0] & 0x1f) == + TYPE_DISK)) { + + scb->scsi_cmd->result = DID_TIME_OUT << 16; + } + } /* else */ + } else { /* recovered error / success */ + if (scb->bus == 0) { + DEBUG_VAR(1, + "(%s%d) Unrecovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x", + ips_name, ha->host_num, + scb->cmd.basic_io.op_code, basic_status, + ext_status); + } - ips_map_status(ha, scb, sp); - } /* else */ + ips_map_status(ha, scb, sp); + } /* else */ } /****************************************************************************/ @@ -4814,25 +4267,30 @@ ips_chkstatus(ips_ha_t *ha, IPS_STATUS * /* */ /****************************************************************************/ static int -ips_online(ips_ha_t *ha, ips_scb_t *scb) { - METHOD_TRACE("ips_online", 1); - - if (scb->target_id >= IPS_MAX_LD) - return (0); +ips_online(ips_ha_t * ha, ips_scb_t * scb) +{ + METHOD_TRACE("ips_online", 1); - if ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1) { - memset(&ha->adapt->logical_drive_info, 0, sizeof(ha->adapt->logical_drive_info)); + if (scb->target_id >= IPS_MAX_LD) + return (0); - return (0); - } - - if (ha->adapt->logical_drive_info.drive_info[scb->target_id].state != IPS_LD_OFFLINE && - ha->adapt->logical_drive_info.drive_info[scb->target_id].state != IPS_LD_FREE && - ha->adapt->logical_drive_info.drive_info[scb->target_id].state != IPS_LD_CRS && - ha->adapt->logical_drive_info.drive_info[scb->target_id].state != IPS_LD_SYS) - return (1); - else - return (0); + if ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1) { + memset(&ha->adapt->logical_drive_info, 0, + sizeof (ha->adapt->logical_drive_info)); + + return (0); + } + + if (ha->adapt->logical_drive_info.drive_info[scb->target_id].state != + IPS_LD_OFFLINE + && ha->adapt->logical_drive_info.drive_info[scb->target_id].state != + IPS_LD_FREE + && ha->adapt->logical_drive_info.drive_info[scb->target_id].state != + IPS_LD_CRS + && ha->adapt->logical_drive_info.drive_info[scb->target_id].state != + IPS_LD_SYS) return (1); + else + return (0); } /****************************************************************************/ @@ -4845,27 +4303,29 @@ ips_online(ips_ha_t *ha, ips_scb_t *scb) /* */ /****************************************************************************/ static int -ips_inquiry(ips_ha_t *ha, ips_scb_t *scb) { - IPS_SCSI_INQ_DATA inquiry; +ips_inquiry(ips_ha_t * ha, ips_scb_t * scb) +{ + IPS_SCSI_INQ_DATA inquiry; - METHOD_TRACE("ips_inquiry", 1); + METHOD_TRACE("ips_inquiry", 1); - memset(&inquiry, 0, sizeof(IPS_SCSI_INQ_DATA)); + memset(&inquiry, 0, sizeof (IPS_SCSI_INQ_DATA)); - inquiry.DeviceType = IPS_SCSI_INQ_TYPE_DASD; - inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED; - inquiry.Version = IPS_SCSI_INQ_REV2; - inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2; - inquiry.AdditionalLength = 31; - inquiry.Flags[0] = IPS_SCSI_INQ_Address16; - inquiry.Flags[1] = IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync; - strncpy(inquiry.VendorId, "IBM ", 8); - strncpy(inquiry.ProductId, "SERVERAID ", 16); - strncpy(inquiry.ProductRevisionLevel, "1.00", 4); + inquiry.DeviceType = IPS_SCSI_INQ_TYPE_DASD; + inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED; + inquiry.Version = IPS_SCSI_INQ_REV2; + inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2; + inquiry.AdditionalLength = 31; + inquiry.Flags[0] = IPS_SCSI_INQ_Address16; + inquiry.Flags[1] = + IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync | IPS_SCSI_INQ_CmdQue; + strncpy(inquiry.VendorId, "IBM ", 8); + strncpy(inquiry.ProductId, "SERVERAID ", 16); + strncpy(inquiry.ProductRevisionLevel, "1.00", 4); - memcpy(scb->scsi_cmd->request_buffer, &inquiry, scb->scsi_cmd->request_bufflen); + ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof (inquiry)); - return (1); + return (1); } /****************************************************************************/ @@ -4878,20 +4338,24 @@ ips_inquiry(ips_ha_t *ha, ips_scb_t *scb /* */ /****************************************************************************/ static int -ips_rdcap(ips_ha_t *ha, ips_scb_t *scb) { - IPS_SCSI_CAPACITY *cap; +ips_rdcap(ips_ha_t * ha, ips_scb_t * scb) +{ + IPS_SCSI_CAPACITY cap; - METHOD_TRACE("ips_rdcap", 1); + METHOD_TRACE("ips_rdcap", 1); - if (scb->scsi_cmd->bufflen < 8) - return (0); + if (scb->scsi_cmd->bufflen < 8) + return (0); - cap = (IPS_SCSI_CAPACITY *) scb->scsi_cmd->request_buffer; + cap.lba = + cpu_to_be32(le32_to_cpu + (ha->adapt->logical_drive_info. + drive_info[scb->target_id].sector_count) - 1); + cap.len = cpu_to_be32((uint32_t) IPS_BLKSIZE); - cap->lba = cpu_to_be32(le32_to_cpu(ha->adapt->logical_drive_info.drive_info[scb->target_id].sector_count) - 1); - cap->len = cpu_to_be32((uint32_t) IPS_BLKSIZE); + ips_scmd_buf_write(scb->scsi_cmd, &cap, sizeof (cap)); - return (1); + return (1); } /****************************************************************************/ @@ -4904,72 +4368,78 @@ ips_rdcap(ips_ha_t *ha, ips_scb_t *scb) /* */ /****************************************************************************/ static int -ips_msense(ips_ha_t *ha, ips_scb_t *scb) { - uint16_t heads; - uint16_t sectors; - uint32_t cylinders; - IPS_SCSI_MODE_PAGE_DATA mdata; - - METHOD_TRACE("ips_msense", 1); - - if (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) > 0x400000 && - (ha->enq->ucMiscFlag & 0x8) == 0) { - heads = IPS_NORM_HEADS; - sectors = IPS_NORM_SECTORS; - } else { - heads = IPS_COMP_HEADS; - sectors = IPS_COMP_SECTORS; - } - - cylinders = (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) - 1) / (heads * sectors); - - memset(&mdata, 0, sizeof(IPS_SCSI_MODE_PAGE_DATA)); - - mdata.hdr.BlockDescLength = 8; - - switch (scb->scsi_cmd->cmnd[2] & 0x3f) { - case 0x03: /* page 3 */ - mdata.pdata.pg3.PageCode = 3; - mdata.pdata.pg3.PageLength = sizeof(IPS_SCSI_MODE_PAGE3); - mdata.hdr.DataLength = 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg3.PageLength; - mdata.pdata.pg3.TracksPerZone = 0; - mdata.pdata.pg3.AltSectorsPerZone = 0; - mdata.pdata.pg3.AltTracksPerZone = 0; - mdata.pdata.pg3.AltTracksPerVolume = 0; - mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors); - mdata.pdata.pg3.BytesPerSector = cpu_to_be16(IPS_BLKSIZE); - mdata.pdata.pg3.Interleave = cpu_to_be16(1); - mdata.pdata.pg3.TrackSkew = 0; - mdata.pdata.pg3.CylinderSkew = 0; - mdata.pdata.pg3.flags = IPS_SCSI_MP3_SoftSector; - break; - - case 0x4: - mdata.pdata.pg4.PageCode = 4; - mdata.pdata.pg4.PageLength = sizeof(IPS_SCSI_MODE_PAGE4); - mdata.hdr.DataLength = 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg4.PageLength; - mdata.pdata.pg4.CylindersHigh = cpu_to_be16((cylinders >> 8) & 0xFFFF); - mdata.pdata.pg4.CylindersLow = (cylinders & 0xFF); - mdata.pdata.pg4.Heads = heads; - mdata.pdata.pg4.WritePrecompHigh = 0; - mdata.pdata.pg4.WritePrecompLow = 0; - mdata.pdata.pg4.ReducedWriteCurrentHigh = 0; - mdata.pdata.pg4.ReducedWriteCurrentLow = 0; - mdata.pdata.pg4.StepRate = cpu_to_be16(1); - mdata.pdata.pg4.LandingZoneHigh = 0; - mdata.pdata.pg4.LandingZoneLow = 0; - mdata.pdata.pg4.flags = 0; - mdata.pdata.pg4.RotationalOffset = 0; - mdata.pdata.pg4.MediumRotationRate = 0; - break; - - default: - return (0); - } /* end switch */ +ips_msense(ips_ha_t * ha, ips_scb_t * scb) +{ + uint16_t heads; + uint16_t sectors; + uint32_t cylinders; + IPS_SCSI_MODE_PAGE_DATA mdata; + + METHOD_TRACE("ips_msense", 1); + + if (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) > 0x400000 && + (ha->enq->ucMiscFlag & 0x8) == 0) { + heads = IPS_NORM_HEADS; + sectors = IPS_NORM_SECTORS; + } else { + heads = IPS_COMP_HEADS; + sectors = IPS_COMP_SECTORS; + } + + cylinders = + (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) - + 1) / (heads * sectors); + + memset(&mdata, 0, sizeof (IPS_SCSI_MODE_PAGE_DATA)); + + mdata.hdr.BlockDescLength = 8; + + switch (scb->scsi_cmd->cmnd[2] & 0x3f) { + case 0x03: /* page 3 */ + mdata.pdata.pg3.PageCode = 3; + mdata.pdata.pg3.PageLength = sizeof (IPS_SCSI_MODE_PAGE3); + mdata.hdr.DataLength = + 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg3.PageLength; + mdata.pdata.pg3.TracksPerZone = 0; + mdata.pdata.pg3.AltSectorsPerZone = 0; + mdata.pdata.pg3.AltTracksPerZone = 0; + mdata.pdata.pg3.AltTracksPerVolume = 0; + mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors); + mdata.pdata.pg3.BytesPerSector = cpu_to_be16(IPS_BLKSIZE); + mdata.pdata.pg3.Interleave = cpu_to_be16(1); + mdata.pdata.pg3.TrackSkew = 0; + mdata.pdata.pg3.CylinderSkew = 0; + mdata.pdata.pg3.flags = IPS_SCSI_MP3_SoftSector; + break; + + case 0x4: + mdata.pdata.pg4.PageCode = 4; + mdata.pdata.pg4.PageLength = sizeof (IPS_SCSI_MODE_PAGE4); + mdata.hdr.DataLength = + 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg4.PageLength; + mdata.pdata.pg4.CylindersHigh = + cpu_to_be16((cylinders >> 8) & 0xFFFF); + mdata.pdata.pg4.CylindersLow = (cylinders & 0xFF); + mdata.pdata.pg4.Heads = heads; + mdata.pdata.pg4.WritePrecompHigh = 0; + mdata.pdata.pg4.WritePrecompLow = 0; + mdata.pdata.pg4.ReducedWriteCurrentHigh = 0; + mdata.pdata.pg4.ReducedWriteCurrentLow = 0; + mdata.pdata.pg4.StepRate = cpu_to_be16(1); + mdata.pdata.pg4.LandingZoneHigh = 0; + mdata.pdata.pg4.LandingZoneLow = 0; + mdata.pdata.pg4.flags = 0; + mdata.pdata.pg4.RotationalOffset = 0; + mdata.pdata.pg4.MediumRotationRate = 0; + break; + + default: + return (0); + } /* end switch */ - memcpy(scb->scsi_cmd->request_buffer, &mdata, scb->scsi_cmd->request_bufflen); + ips_scmd_buf_write(scb->scsi_cmd, &mdata, sizeof (mdata)); - return (1); + return (1); } /****************************************************************************/ @@ -4982,21 +4452,23 @@ ips_msense(ips_ha_t *ha, ips_scb_t *scb) /* */ /****************************************************************************/ static int -ips_reqsen(ips_ha_t *ha, ips_scb_t *scb) { - IPS_SCSI_REQSEN reqsen; +ips_reqsen(ips_ha_t * ha, ips_scb_t * scb) +{ + IPS_SCSI_REQSEN reqsen; - METHOD_TRACE("ips_reqsen", 1); + METHOD_TRACE("ips_reqsen", 1); - memset(&reqsen, 0, sizeof(IPS_SCSI_REQSEN)); + memset(&reqsen, 0, sizeof (IPS_SCSI_REQSEN)); - reqsen.ResponseCode = IPS_SCSI_REQSEN_VALID | IPS_SCSI_REQSEN_CURRENT_ERR; - reqsen.AdditionalLength = 10; - reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE; - reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE; + reqsen.ResponseCode = + IPS_SCSI_REQSEN_VALID | IPS_SCSI_REQSEN_CURRENT_ERR; + reqsen.AdditionalLength = 10; + reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE; + reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE; - memcpy(scb->scsi_cmd->request_buffer, &reqsen, scb->scsi_cmd->request_bufflen); + ips_scmd_buf_write(scb->scsi_cmd, &reqsen, sizeof (reqsen)); - return (1); + return (1); } /****************************************************************************/ @@ -5009,60 +4481,63 @@ ips_reqsen(ips_ha_t *ha, ips_scb_t *scb) /* */ /****************************************************************************/ static void -ips_free(ips_ha_t *ha) { +ips_free(ips_ha_t * ha) +{ - METHOD_TRACE("ips_free", 1); + METHOD_TRACE("ips_free", 1); - if (ha) { - if (ha->enq) { - kfree(ha->enq); - ha->enq = NULL; - } - - if (ha->conf) { - kfree(ha->conf); - ha->conf = NULL; - } - - if (ha->adapt) { - pci_free_consistent(ha->pcidev,sizeof(IPS_ADAPTER)+ sizeof(IPS_IO_CMD), - ha->adapt, ha->adapt->hw_status_start); - ha->adapt = NULL; - } - - if (ha->nvram) { - kfree(ha->nvram); - ha->nvram = NULL; - } - - if (ha->subsys) { - kfree(ha->subsys); - ha->subsys = NULL; - } - - if (ha->ioctl_data) { - free_pages((unsigned long) ha->ioctl_data, ha->ioctl_order); - ha->ioctl_data = NULL; - ha->ioctl_datasize = 0; - ha->ioctl_order = 0; - } - ips_deallocatescbs(ha, ha->max_cmds); - - /* free memory mapped (if applicable) */ - if (ha->mem_ptr) { - iounmap(ha->ioremap_ptr); - ha->ioremap_ptr = NULL; - ha->mem_ptr = NULL; - } - -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - if (ha->mem_addr) - release_mem_region(ha->mem_addr, ha->mem_len); -#endif - ha->mem_addr = 0; + if (ha) { + if (ha->enq) { + kfree(ha->enq); + ha->enq = NULL; + } + + if (ha->conf) { + kfree(ha->conf); + ha->conf = NULL; + } + + if (ha->adapt) { + pci_free_consistent(ha->pcidev, + sizeof (IPS_ADAPTER) + + sizeof (IPS_IO_CMD), ha->adapt, + ha->adapt->hw_status_start); + ha->adapt = NULL; + } + + if (ha->nvram) { + kfree(ha->nvram); + ha->nvram = NULL; + } + + if (ha->subsys) { + kfree(ha->subsys); + ha->subsys = NULL; + } + + if (ha->ioctl_data) { + free_pages((unsigned long) ha->ioctl_data, + ha->ioctl_order); + ha->ioctl_data = NULL; + ha->ioctl_datasize = 0; + ha->ioctl_order = 0; + } + ips_deallocatescbs(ha, ha->max_cmds); + + /* free memory mapped (if applicable) */ + if (ha->mem_ptr) { + iounmap(ha->ioremap_ptr); + ha->ioremap_ptr = NULL; + ha->mem_ptr = NULL; + } + + if (ha->mem_addr) + release_mem_region(ha->mem_addr, ha->mem_len); + ha->mem_addr = 0; - } + } } + /****************************************************************************/ /* */ /* Routine Name: ips_deallocatescbs */ @@ -5073,15 +4548,18 @@ ips_free(ips_ha_t *ha) { /* */ /****************************************************************************/ static int -ips_deallocatescbs(ips_ha_t *ha, int cmds) { - if (ha->scbs) { - pci_free_consistent(ha->pcidev,sizeof(IPS_SG_LIST) * IPS_MAX_SG * - cmds, ha->scbs->sg_list, ha->scbs->sg_busaddr); - pci_free_consistent(ha->pcidev, sizeof(ips_scb_t) * cmds, - ha->scbs, ha->scbs->scb_busaddr); - ha->scbs = NULL; - } /* end if */ -return 1; +ips_deallocatescbs(ips_ha_t * ha, int cmds) +{ + if (ha->scbs) { + pci_free_consistent(ha->pcidev, + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds, + ha->scbs->sg_list.list, + ha->scbs->sg_busaddr); + pci_free_consistent(ha->pcidev, sizeof (ips_scb_t) * cmds, + ha->scbs, ha->scbs->scb_busaddr); + ha->scbs = NULL; + } /* end if */ + return 1; } /****************************************************************************/ @@ -5094,44 +4572,59 @@ return 1; /* */ /****************************************************************************/ static int -ips_allocatescbs(ips_ha_t *ha) { - ips_scb_t *scb_p; - IPS_SG_LIST* ips_sg; - int i; - dma_addr_t command_dma, sg_dma; - - METHOD_TRACE("ips_allocatescbs", 1); - - /* Allocate memory for the SCBs */ - ha->scbs = pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof(ips_scb_t), - &command_dma); - if (ha->scbs == NULL) - return 0; - ips_sg = pci_alloc_consistent(ha->pcidev, sizeof(IPS_SG_LIST) * IPS_MAX_SG * - ha->max_cmds, &sg_dma); - if(ips_sg == NULL){ - pci_free_consistent(ha->pcidev,ha->max_cmds * sizeof(ips_scb_t),ha->scbs, command_dma); - return 0; - } - - memset(ha->scbs, 0, ha->max_cmds * sizeof(ips_scb_t)); - - for (i = 0; i < ha->max_cmds; i++) { - scb_p = &ha->scbs[i]; - scb_p->scb_busaddr = command_dma + sizeof(ips_scb_t) * i; - /* set up S/G list */ - scb_p->sg_list = ips_sg + i * IPS_MAX_SG; - scb_p->sg_busaddr = sg_dma + sizeof(IPS_SG_LIST) * IPS_MAX_SG * i; - - /* add to the free list */ - if (i < ha->max_cmds - 1) { - scb_p->q_next = ha->scb_freelist; - ha->scb_freelist = scb_p; - } - } +ips_allocatescbs(ips_ha_t * ha) +{ + ips_scb_t *scb_p; + IPS_SG_LIST ips_sg; + int i; + dma_addr_t command_dma, sg_dma; + + METHOD_TRACE("ips_allocatescbs", 1); + + /* Allocate memory for the SCBs */ + ha->scbs = + pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof (ips_scb_t), + &command_dma); + if (ha->scbs == NULL) + return 0; + ips_sg.list = + pci_alloc_consistent(ha->pcidev, + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * + ha->max_cmds, &sg_dma); + if (ips_sg.list == NULL) { + pci_free_consistent(ha->pcidev, + ha->max_cmds * sizeof (ips_scb_t), ha->scbs, + command_dma); + return 0; + } + + memset(ha->scbs, 0, ha->max_cmds * sizeof (ips_scb_t)); + + for (i = 0; i < ha->max_cmds; i++) { + scb_p = &ha->scbs[i]; + scb_p->scb_busaddr = command_dma + sizeof (ips_scb_t) * i; + /* set up S/G list */ + if (IPS_USE_ENH_SGLIST(ha)) { + scb_p->sg_list.enh_list = + ips_sg.enh_list + i * IPS_MAX_SG; + scb_p->sg_busaddr = + sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i; + } else { + scb_p->sg_list.std_list = + ips_sg.std_list + i * IPS_MAX_SG; + scb_p->sg_busaddr = + sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i; + } + + /* add to the free list */ + if (i < ha->max_cmds - 1) { + scb_p->q_next = ha->scb_freelist; + ha->scb_freelist = scb_p; + } + } - /* success */ - return (1); + /* success */ + return (1); } /****************************************************************************/ @@ -5144,36 +4637,37 @@ ips_allocatescbs(ips_ha_t *ha) { /* */ /****************************************************************************/ static void -ips_init_scb(ips_ha_t *ha, ips_scb_t *scb) { - IPS_SG_LIST *sg_list; - uint32_t cmd_busaddr, sg_busaddr; - METHOD_TRACE("ips_init_scb", 1); - - if (scb == NULL) - return ; - - sg_list = scb->sg_list; - cmd_busaddr = scb->scb_busaddr; - sg_busaddr = scb->sg_busaddr; - /* zero fill */ - memset(scb, 0, sizeof(ips_scb_t)); - memset(ha->dummy, 0, sizeof(IPS_IO_CMD)); - - /* Initialize dummy command bucket */ - ha->dummy->op_code = 0xFF; - ha->dummy->ccsar = cpu_to_le32(ha->adapt->hw_status_start - + sizeof(IPS_ADAPTER)); - ha->dummy->command_id = IPS_MAX_CMDS; - - /* set bus address of scb */ - scb->scb_busaddr = cmd_busaddr; - scb->sg_busaddr = sg_busaddr; - scb->sg_list = sg_list; - - /* Neptune Fix */ - scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE); - scb->cmd.basic_io.ccsar = cpu_to_le32(ha->adapt->hw_status_start - + sizeof(IPS_ADAPTER)); +ips_init_scb(ips_ha_t * ha, ips_scb_t * scb) +{ + IPS_SG_LIST sg_list; + uint32_t cmd_busaddr, sg_busaddr; + METHOD_TRACE("ips_init_scb", 1); + + if (scb == NULL) + return; + + sg_list.list = scb->sg_list.list; + cmd_busaddr = scb->scb_busaddr; + sg_busaddr = scb->sg_busaddr; + /* zero fill */ + memset(scb, 0, sizeof (ips_scb_t)); + memset(ha->dummy, 0, sizeof (IPS_IO_CMD)); + + /* Initialize dummy command bucket */ + ha->dummy->op_code = 0xFF; + ha->dummy->ccsar = cpu_to_le32(ha->adapt->hw_status_start + + sizeof (IPS_ADAPTER)); + ha->dummy->command_id = IPS_MAX_CMDS; + + /* set bus address of scb */ + scb->scb_busaddr = cmd_busaddr; + scb->sg_busaddr = sg_busaddr; + scb->sg_list.list = sg_list.list; + + /* Neptune Fix */ + scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE); + scb->cmd.basic_io.ccsar = cpu_to_le32(ha->adapt->hw_status_start + + sizeof (IPS_ADAPTER)); } /****************************************************************************/ @@ -5188,22 +4682,23 @@ ips_init_scb(ips_ha_t *ha, ips_scb_t *sc /* */ /****************************************************************************/ static ips_scb_t * -ips_getscb(ips_ha_t *ha) { - ips_scb_t *scb; +ips_getscb(ips_ha_t * ha) +{ + ips_scb_t *scb; - METHOD_TRACE("ips_getscb", 1); + METHOD_TRACE("ips_getscb", 1); - if ((scb = ha->scb_freelist) == NULL) { + if ((scb = ha->scb_freelist) == NULL) { - return (NULL); - } + return (NULL); + } - ha->scb_freelist = scb->q_next; - scb->q_next = NULL; + ha->scb_freelist = scb->q_next; + scb->q_next = NULL; - ips_init_scb(ha, scb); + ips_init_scb(ha, scb); - return (scb); + return (scb); } /****************************************************************************/ @@ -5218,22 +4713,22 @@ ips_getscb(ips_ha_t *ha) { /* */ /****************************************************************************/ static void -ips_freescb(ips_ha_t *ha, ips_scb_t *scb) { +ips_freescb(ips_ha_t * ha, ips_scb_t * scb) +{ - METHOD_TRACE("ips_freescb", 1); - if(scb->flags & IPS_SCB_MAP_SG) - pci_unmap_sg(ha->pcidev, scb->scsi_cmd->request_buffer, - scb->scsi_cmd->use_sg, - IPS_DMA_DIR(scb)); - else if(scb->flags & IPS_SCB_MAP_SINGLE) - pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len, - IPS_DMA_DIR(scb)); - - /* check to make sure this is not our "special" scb */ - if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) { - scb->q_next = ha->scb_freelist; - ha->scb_freelist = scb; - } + METHOD_TRACE("ips_freescb", 1); + if (scb->flags & IPS_SCB_MAP_SG) + pci_unmap_sg(ha->pcidev, scb->scsi_cmd->request_buffer, + scb->scsi_cmd->use_sg, IPS_DMA_DIR(scb)); + else if (scb->flags & IPS_SCB_MAP_SINGLE) + pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len, + IPS_DMA_DIR(scb)); + + /* check to make sure this is not our "special" scb */ + if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) { + scb->q_next = ha->scb_freelist; + ha->scb_freelist = scb; + } } /****************************************************************************/ @@ -5246,19 +4741,20 @@ ips_freescb(ips_ha_t *ha, ips_scb_t *scb /* */ /****************************************************************************/ static int -ips_isinit_copperhead(ips_ha_t *ha) { - uint8_t scpr; - uint8_t isr; - - METHOD_TRACE("ips_isinit_copperhead", 1); - - isr = inb(ha->io_addr + IPS_REG_HISR); - scpr = inb(ha->io_addr + IPS_REG_SCPR); - - if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0)) - return (0); - else - return (1); +ips_isinit_copperhead(ips_ha_t * ha) +{ + uint8_t scpr; + uint8_t isr; + + METHOD_TRACE("ips_isinit_copperhead", 1); + + isr = inb(ha->io_addr + IPS_REG_HISR); + scpr = inb(ha->io_addr + IPS_REG_SCPR); + + if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0)) + return (0); + else + return (1); } /****************************************************************************/ @@ -5271,19 +4767,20 @@ ips_isinit_copperhead(ips_ha_t *ha) { /* */ /****************************************************************************/ static int -ips_isinit_copperhead_memio(ips_ha_t *ha) { - uint8_t isr=0; - uint8_t scpr; - - METHOD_TRACE("ips_is_init_copperhead_memio", 1); - - isr = readb(ha->mem_ptr + IPS_REG_HISR); - scpr = readb(ha->mem_ptr + IPS_REG_SCPR); - - if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0)) - return (0); - else - return (1); +ips_isinit_copperhead_memio(ips_ha_t * ha) +{ + uint8_t isr = 0; + uint8_t scpr; + + METHOD_TRACE("ips_is_init_copperhead_memio", 1); + + isr = readb(ha->mem_ptr + IPS_REG_HISR); + scpr = readb(ha->mem_ptr + IPS_REG_SCPR); + + if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0)) + return (0); + else + return (1); } /****************************************************************************/ @@ -5296,21 +4793,22 @@ ips_isinit_copperhead_memio(ips_ha_t *ha /* */ /****************************************************************************/ static int -ips_isinit_morpheus(ips_ha_t *ha) { - uint32_t post; - uint32_t bits; - - METHOD_TRACE("ips_is_init_morpheus", 1); - - post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); - bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR); - - if (post == 0) - return (0); - else if (bits & 0x3) - return (0); - else - return (1); +ips_isinit_morpheus(ips_ha_t * ha) +{ + uint32_t post; + uint32_t bits; + + METHOD_TRACE("ips_is_init_morpheus", 1); + + post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); + bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR); + + if (post == 0) + return (0); + else if (bits & 0x3) + return (0); + else + return (1); } /****************************************************************************/ @@ -5322,10 +4820,12 @@ ips_isinit_morpheus(ips_ha_t *ha) { /* */ /****************************************************************************/ static void -ips_enable_int_copperhead(ips_ha_t *ha) { - METHOD_TRACE("ips_enable_int_copperhead", 1); +ips_enable_int_copperhead(ips_ha_t * ha) +{ + METHOD_TRACE("ips_enable_int_copperhead", 1); - outb(ha->io_addr + IPS_REG_HISR, IPS_BIT_EI); + outb(ha->io_addr + IPS_REG_HISR, IPS_BIT_EI); + inb(ha->io_addr + IPS_REG_HISR); // Ensure PCI Posting Completes } /****************************************************************************/ @@ -5337,10 +4837,12 @@ ips_enable_int_copperhead(ips_ha_t *ha) /* */ /****************************************************************************/ static void -ips_enable_int_copperhead_memio(ips_ha_t *ha) { - METHOD_TRACE("ips_enable_int_copperhead_memio", 1); +ips_enable_int_copperhead_memio(ips_ha_t * ha) +{ + METHOD_TRACE("ips_enable_int_copperhead_memio", 1); - writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR); + writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR); + readb(ha->mem_ptr + IPS_REG_HISR); // Ensure PCI Posting Completes } /****************************************************************************/ @@ -5352,14 +4854,16 @@ ips_enable_int_copperhead_memio(ips_ha_t /* */ /****************************************************************************/ static void -ips_enable_int_morpheus(ips_ha_t *ha) { - uint32_t Oimr; +ips_enable_int_morpheus(ips_ha_t * ha) +{ + uint32_t Oimr; - METHOD_TRACE("ips_enable_int_morpheus", 1); + METHOD_TRACE("ips_enable_int_morpheus", 1); - Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR); - Oimr &= ~0x08; - writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR); + Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR); + Oimr &= ~0x08; + writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR); + readl(ha->mem_ptr + IPS_REG_I960_OIMR); // Ensure PCI Posting Completes } /****************************************************************************/ @@ -5372,86 +4876,88 @@ ips_enable_int_morpheus(ips_ha_t *ha) { /* */ /****************************************************************************/ static int -ips_init_copperhead(ips_ha_t *ha) { - uint8_t Isr; - uint8_t Cbsp; - uint8_t PostByte[IPS_MAX_POST_BYTES]; - uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES]; - int i, j; - - METHOD_TRACE("ips_init_copperhead", 1); - - for (i = 0; i < IPS_MAX_POST_BYTES; i++) { - for (j = 0; j < 45; j++) { - Isr = inb(ha->io_addr + IPS_REG_HISR); - if (Isr & IPS_BIT_GHI) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (j >= 45) - /* error occurred */ - return (0); - - PostByte[i] = inb(ha->io_addr + IPS_REG_ISPR); - outb(Isr, ha->io_addr + IPS_REG_HISR); - } - - if (PostByte[0] < IPS_GOOD_POST_STATUS) { - printk(KERN_WARNING "(%s%d) reset controller fails (post status %x %x).\n", - ips_name, ha->host_num, PostByte[0], PostByte[1]); - - return (0); - } - - for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) { - for (j = 0; j < 240; j++) { - Isr = inb(ha->io_addr + IPS_REG_HISR); - if (Isr & IPS_BIT_GHI) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (j >= 240) - /* error occurred */ - return (0); - - ConfigByte[i] = inb(ha->io_addr + IPS_REG_ISPR); - outb(Isr, ha->io_addr + IPS_REG_HISR); - } - - for (i = 0; i < 240; i++) { - Cbsp = inb(ha->io_addr + IPS_REG_CBSP); - - if ((Cbsp & IPS_BIT_OP) == 0) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (i >= 240) - /* reset failed */ - return (0); - - /* setup CCCR */ - outl(cpu_to_le32(0x1010), ha->io_addr + IPS_REG_CCCR); - - /* Enable busmastering */ - outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR); - - if (ha->revision_id == IPS_REVID_TROMBONE64) - /* fix for anaconda64 */ - outl(0, ha->io_addr + IPS_REG_NDAE); +ips_init_copperhead(ips_ha_t * ha) +{ + uint8_t Isr; + uint8_t Cbsp; + uint8_t PostByte[IPS_MAX_POST_BYTES]; + uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES]; + int i, j; + + METHOD_TRACE("ips_init_copperhead", 1); + + for (i = 0; i < IPS_MAX_POST_BYTES; i++) { + for (j = 0; j < 45; j++) { + Isr = inb(ha->io_addr + IPS_REG_HISR); + if (Isr & IPS_BIT_GHI) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (j >= 45) + /* error occurred */ + return (0); + + PostByte[i] = inb(ha->io_addr + IPS_REG_ISPR); + outb(Isr, ha->io_addr + IPS_REG_HISR); + } + + if (PostByte[0] < IPS_GOOD_POST_STATUS) { + printk(KERN_WARNING + "(%s%d) reset controller fails (post status %x %x).\n", + ips_name, ha->host_num, PostByte[0], PostByte[1]); + + return (0); + } + + for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) { + for (j = 0; j < 240; j++) { + Isr = inb(ha->io_addr + IPS_REG_HISR); + if (Isr & IPS_BIT_GHI) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (j >= 240) + /* error occurred */ + return (0); + + ConfigByte[i] = inb(ha->io_addr + IPS_REG_ISPR); + outb(Isr, ha->io_addr + IPS_REG_HISR); + } + + for (i = 0; i < 240; i++) { + Cbsp = inb(ha->io_addr + IPS_REG_CBSP); + + if ((Cbsp & IPS_BIT_OP) == 0) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 240) + /* reset failed */ + return (0); + + /* setup CCCR */ + outl(cpu_to_le32(0x1010), ha->io_addr + IPS_REG_CCCR); + + /* Enable busmastering */ + outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR); + + if (ha->revision_id == IPS_REVID_TROMBONE64) + /* fix for anaconda64 */ + outl(0, ha->io_addr + IPS_REG_NDAE); - /* Enable interrupts */ - outb(IPS_BIT_EI, ha->io_addr + IPS_REG_HISR); + /* Enable interrupts */ + outb(IPS_BIT_EI, ha->io_addr + IPS_REG_HISR); - return (1); + return (1); } /****************************************************************************/ @@ -5464,87 +4970,89 @@ ips_init_copperhead(ips_ha_t *ha) { /* */ /****************************************************************************/ static int -ips_init_copperhead_memio(ips_ha_t *ha) { - uint8_t Isr=0; - uint8_t Cbsp; - uint8_t PostByte[IPS_MAX_POST_BYTES]; - uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES]; - int i, j; - - METHOD_TRACE("ips_init_copperhead_memio", 1); - - for (i = 0; i < IPS_MAX_POST_BYTES; i++) { - for (j = 0; j < 45; j++) { - Isr = readb(ha->mem_ptr + IPS_REG_HISR); - if (Isr & IPS_BIT_GHI) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (j >= 45) - /* error occurred */ - return (0); - - PostByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR); - writeb(Isr, ha->mem_ptr + IPS_REG_HISR); - } - - if (PostByte[0] < IPS_GOOD_POST_STATUS) { - printk(KERN_WARNING "(%s%d) reset controller fails (post status %x %x).\n", - ips_name, ha->host_num, PostByte[0], PostByte[1]); - - return (0); - } - - for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) { - for (j = 0; j < 240; j++) { - Isr = readb(ha->mem_ptr + IPS_REG_HISR); - if (Isr & IPS_BIT_GHI) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (j >= 240) - /* error occurred */ - return (0); - - ConfigByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR); - writeb(Isr, ha->mem_ptr + IPS_REG_HISR); - } - - for (i = 0; i < 240; i++) { - Cbsp = readb(ha->mem_ptr + IPS_REG_CBSP); - - if ((Cbsp & IPS_BIT_OP) == 0) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (i >= 240) - /* error occurred */ - return (0); - - /* setup CCCR */ - writel(0x1010, ha->mem_ptr + IPS_REG_CCCR); - - /* Enable busmastering */ - writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR); - - if (ha->revision_id == IPS_REVID_TROMBONE64) - /* fix for anaconda64 */ - writel(0, ha->mem_ptr + IPS_REG_NDAE); +ips_init_copperhead_memio(ips_ha_t * ha) +{ + uint8_t Isr = 0; + uint8_t Cbsp; + uint8_t PostByte[IPS_MAX_POST_BYTES]; + uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES]; + int i, j; + + METHOD_TRACE("ips_init_copperhead_memio", 1); + + for (i = 0; i < IPS_MAX_POST_BYTES; i++) { + for (j = 0; j < 45; j++) { + Isr = readb(ha->mem_ptr + IPS_REG_HISR); + if (Isr & IPS_BIT_GHI) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (j >= 45) + /* error occurred */ + return (0); + + PostByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR); + writeb(Isr, ha->mem_ptr + IPS_REG_HISR); + } + + if (PostByte[0] < IPS_GOOD_POST_STATUS) { + printk(KERN_WARNING + "(%s%d) reset controller fails (post status %x %x).\n", + ips_name, ha->host_num, PostByte[0], PostByte[1]); + + return (0); + } + + for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) { + for (j = 0; j < 240; j++) { + Isr = readb(ha->mem_ptr + IPS_REG_HISR); + if (Isr & IPS_BIT_GHI) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (j >= 240) + /* error occurred */ + return (0); + + ConfigByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR); + writeb(Isr, ha->mem_ptr + IPS_REG_HISR); + } + + for (i = 0; i < 240; i++) { + Cbsp = readb(ha->mem_ptr + IPS_REG_CBSP); + + if ((Cbsp & IPS_BIT_OP) == 0) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 240) + /* error occurred */ + return (0); + + /* setup CCCR */ + writel(0x1010, ha->mem_ptr + IPS_REG_CCCR); + + /* Enable busmastering */ + writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR); + + if (ha->revision_id == IPS_REVID_TROMBONE64) + /* fix for anaconda64 */ + writel(0, ha->mem_ptr + IPS_REG_NDAE); - /* Enable interrupts */ - writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR); + /* Enable interrupts */ + writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR); - /* if we get here then everything went OK */ - return (1); + /* if we get here then everything went OK */ + return (1); } /****************************************************************************/ @@ -5557,109 +5065,112 @@ ips_init_copperhead_memio(ips_ha_t *ha) /* */ /****************************************************************************/ static int -ips_init_morpheus(ips_ha_t *ha) { - uint32_t Post; - uint32_t Config; - uint32_t Isr; - uint32_t Oimr; - int i; - - METHOD_TRACE("ips_init_morpheus", 1); - - /* Wait up to 45 secs for Post */ - for (i = 0; i < 45; i++) { - Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); - - if (Isr & IPS_BIT_I960_MSG0I) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (i >= 45) { - /* error occurred */ - printk(KERN_WARNING "(%s%d) timeout waiting for post.\n", - ips_name, ha->host_num); - - return (0); - } - - Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); - - if (Post == 0x4F00) { /* If Flashing the Battery PIC */ - printk(KERN_WARNING "Flashing Battery PIC, Please wait ...\n" ); - - /* Clear the interrupt bit */ - Isr = (uint32_t) IPS_BIT_I960_MSG0I; - writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); - - for (i = 0; i < 120; i++) { /* Wait Up to 2 Min. for Completion */ - Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); - if (Post != 0x4F00) - break; - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (i >= 120) { - printk(KERN_WARNING "(%s%d) timeout waiting for Battery PIC Flash\n", - ips_name, ha->host_num); - return (0); - } - - } - - /* Clear the interrupt bit */ - Isr = (uint32_t) IPS_BIT_I960_MSG0I; - writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); - - if (Post < (IPS_GOOD_POST_STATUS << 8)) { - printk(KERN_WARNING "(%s%d) reset controller fails (post status %x).\n", - ips_name, ha->host_num, Post); - - return (0); - } - - /* Wait up to 240 secs for config bytes */ - for (i = 0; i < 240; i++) { - Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); - - if (Isr & IPS_BIT_I960_MSG1I) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (i >= 240) { - /* error occurred */ - printk(KERN_WARNING "(%s%d) timeout waiting for config.\n", - ips_name, ha->host_num); - - return (0); - } - - Config = readl(ha->mem_ptr + IPS_REG_I960_MSG1); - - /* Clear interrupt bit */ - Isr = (uint32_t) IPS_BIT_I960_MSG1I; - writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); - - /* Turn on the interrupts */ - Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR); - Oimr &= ~0x8; - writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR); - - /* if we get here then everything went OK */ - - /* Since we did a RESET, an EraseStripeLock may be needed */ - if (Post == 0xEF10) { - if ( (Config == 0x000F) || (Config == 0x0009) ) - ha->requires_esl = 1; - } +ips_init_morpheus(ips_ha_t * ha) +{ + uint32_t Post; + uint32_t Config; + uint32_t Isr; + uint32_t Oimr; + int i; + + METHOD_TRACE("ips_init_morpheus", 1); + + /* Wait up to 45 secs for Post */ + for (i = 0; i < 45; i++) { + Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); + + if (Isr & IPS_BIT_I960_MSG0I) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 45) { + /* error occurred */ + printk(KERN_WARNING "(%s%d) timeout waiting for post.\n", + ips_name, ha->host_num); + + return (0); + } + + Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); + + if (Post == 0x4F00) { /* If Flashing the Battery PIC */ + printk(KERN_WARNING "Flashing Battery PIC, Please wait ...\n"); + + /* Clear the interrupt bit */ + Isr = (uint32_t) IPS_BIT_I960_MSG0I; + writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); + + for (i = 0; i < 120; i++) { /* Wait Up to 2 Min. for Completion */ + Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); + if (Post != 0x4F00) + break; + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 120) { + printk(KERN_WARNING + "(%s%d) timeout waiting for Battery PIC Flash\n", + ips_name, ha->host_num); + return (0); + } + + } + + /* Clear the interrupt bit */ + Isr = (uint32_t) IPS_BIT_I960_MSG0I; + writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); + + if (Post < (IPS_GOOD_POST_STATUS << 8)) { + printk(KERN_WARNING + "(%s%d) reset controller fails (post status %x).\n", + ips_name, ha->host_num, Post); + + return (0); + } + + /* Wait up to 240 secs for config bytes */ + for (i = 0; i < 240; i++) { + Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); + + if (Isr & IPS_BIT_I960_MSG1I) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 240) { + /* error occurred */ + printk(KERN_WARNING "(%s%d) timeout waiting for config.\n", + ips_name, ha->host_num); + + return (0); + } + + Config = readl(ha->mem_ptr + IPS_REG_I960_MSG1); + + /* Clear interrupt bit */ + Isr = (uint32_t) IPS_BIT_I960_MSG1I; + writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); + + /* Turn on the interrupts */ + Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR); + Oimr &= ~0x8; + writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR); + + /* if we get here then everything went OK */ + + /* Since we did a RESET, an EraseStripeLock may be needed */ + if (Post == 0xEF10) { + if ((Config == 0x000F) || (Config == 0x0009)) + ha->requires_esl = 1; + } - return (1); + return (1); } /****************************************************************************/ @@ -5672,38 +5183,39 @@ ips_init_morpheus(ips_ha_t *ha) { /* */ /****************************************************************************/ static int -ips_reset_copperhead(ips_ha_t *ha) { - int reset_counter; +ips_reset_copperhead(ips_ha_t * ha) +{ + int reset_counter; - METHOD_TRACE("ips_reset_copperhead", 1); + METHOD_TRACE("ips_reset_copperhead", 1); - DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d", - ips_name, ha->host_num, ha->io_addr, ha->irq); + DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d", + ips_name, ha->host_num, ha->io_addr, ha->irq); - reset_counter = 0; + reset_counter = 0; - while (reset_counter < 2) { - reset_counter++; + while (reset_counter < 2) { + reset_counter++; - outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR); + outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR); - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - - outb(0, ha->io_addr + IPS_REG_SCPR); + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - - if ((*ha->func.init)(ha)) - break; - else if (reset_counter >= 2) { + outb(0, ha->io_addr + IPS_REG_SCPR); - return (0); - } - } + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); - return (1); + if ((*ha->func.init) (ha)) + break; + else if (reset_counter >= 2) { + + return (0); + } + } + + return (1); } /****************************************************************************/ @@ -5716,38 +5228,39 @@ ips_reset_copperhead(ips_ha_t *ha) { /* */ /****************************************************************************/ static int -ips_reset_copperhead_memio(ips_ha_t *ha) { - int reset_counter; +ips_reset_copperhead_memio(ips_ha_t * ha) +{ + int reset_counter; - METHOD_TRACE("ips_reset_copperhead_memio", 1); + METHOD_TRACE("ips_reset_copperhead_memio", 1); - DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d", - ips_name, ha->host_num, ha->mem_addr, ha->irq); + DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d", + ips_name, ha->host_num, ha->mem_addr, ha->irq); - reset_counter = 0; + reset_counter = 0; - while (reset_counter < 2) { - reset_counter++; + while (reset_counter < 2) { + reset_counter++; - writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR); + writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR); - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - - writeb(0, ha->mem_ptr + IPS_REG_SCPR); + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - - if ((*ha->func.init)(ha)) - break; - else if (reset_counter >= 2) { + writeb(0, ha->mem_ptr + IPS_REG_SCPR); - return (0); - } - } + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); - return (1); + if ((*ha->func.init) (ha)) + break; + else if (reset_counter >= 2) { + + return (0); + } + } + + return (1); } /****************************************************************************/ @@ -5760,37 +5273,38 @@ ips_reset_copperhead_memio(ips_ha_t *ha) /* */ /****************************************************************************/ static int -ips_reset_morpheus(ips_ha_t *ha) { - int reset_counter; - uint8_t junk; +ips_reset_morpheus(ips_ha_t * ha) +{ + int reset_counter; + uint8_t junk; + + METHOD_TRACE("ips_reset_morpheus", 1); - METHOD_TRACE("ips_reset_morpheus", 1); + DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d", + ips_name, ha->host_num, ha->mem_addr, ha->irq); - DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d", - ips_name, ha->host_num, ha->mem_addr, ha->irq); + reset_counter = 0; - reset_counter = 0; + while (reset_counter < 2) { + reset_counter++; - while (reset_counter < 2) { - reset_counter++; + writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR); - writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR); + /* Delay for 5 Seconds */ + MDELAY(5 * IPS_ONE_SEC); - /* Delay for 5 Seconds */ - MDELAY(5 * IPS_ONE_SEC); - - /* Do a PCI config read to wait for adapter */ - pci_read_config_byte(ha->pcidev, 4, &junk); + /* Do a PCI config read to wait for adapter */ + pci_read_config_byte(ha->pcidev, 4, &junk); - if ((*ha->func.init)(ha)) - break; - else if (reset_counter >= 2) { + if ((*ha->func.init) (ha)) + break; + else if (reset_counter >= 2) { - return (0); - } - } + return (0); + } + } - return (1); + return (1); } /****************************************************************************/ @@ -5803,22 +5317,25 @@ ips_reset_morpheus(ips_ha_t *ha) { /* */ /****************************************************************************/ static void -ips_statinit(ips_ha_t *ha) { - uint32_t phys_status_start; +ips_statinit(ips_ha_t * ha) +{ + uint32_t phys_status_start; - METHOD_TRACE("ips_statinit", 1); + METHOD_TRACE("ips_statinit", 1); - ha->adapt->p_status_start = ha->adapt->status; - ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS; - ha->adapt->p_status_tail = ha->adapt->status; - - phys_status_start = ha->adapt->hw_status_start; - outl(cpu_to_le32(phys_status_start), ha->io_addr + IPS_REG_SQSR); - outl(cpu_to_le32(phys_status_start + IPS_STATUS_Q_SIZE), ha->io_addr + IPS_REG_SQER); - outl(cpu_to_le32(phys_status_start + IPS_STATUS_SIZE), ha->io_addr + IPS_REG_SQHR); - outl(cpu_to_le32(phys_status_start), ha->io_addr + IPS_REG_SQTR); + ha->adapt->p_status_start = ha->adapt->status; + ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS; + ha->adapt->p_status_tail = ha->adapt->status; + + phys_status_start = ha->adapt->hw_status_start; + outl(cpu_to_le32(phys_status_start), ha->io_addr + IPS_REG_SQSR); + outl(cpu_to_le32(phys_status_start + IPS_STATUS_Q_SIZE), + ha->io_addr + IPS_REG_SQER); + outl(cpu_to_le32(phys_status_start + IPS_STATUS_SIZE), + ha->io_addr + IPS_REG_SQHR); + outl(cpu_to_le32(phys_status_start), ha->io_addr + IPS_REG_SQTR); - ha->adapt->hw_status_tail = phys_status_start; + ha->adapt->hw_status_tail = phys_status_start; } /****************************************************************************/ @@ -5831,22 +5348,24 @@ ips_statinit(ips_ha_t *ha) { /* */ /****************************************************************************/ static void -ips_statinit_memio(ips_ha_t *ha) { - uint32_t phys_status_start; +ips_statinit_memio(ips_ha_t * ha) +{ + uint32_t phys_status_start; - METHOD_TRACE("ips_statinit_memio", 1); + METHOD_TRACE("ips_statinit_memio", 1); - ha->adapt->p_status_start = ha->adapt->status; - ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS; - ha->adapt->p_status_tail = ha->adapt->status; - - phys_status_start = ha->adapt->hw_status_start; - writel(phys_status_start, ha->mem_ptr + IPS_REG_SQSR); - writel(phys_status_start + IPS_STATUS_Q_SIZE, ha->mem_ptr + IPS_REG_SQER); - writel(phys_status_start + IPS_STATUS_SIZE, ha->mem_ptr + IPS_REG_SQHR); - writel(phys_status_start, ha->mem_ptr + IPS_REG_SQTR); + ha->adapt->p_status_start = ha->adapt->status; + ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS; + ha->adapt->p_status_tail = ha->adapt->status; + + phys_status_start = ha->adapt->hw_status_start; + writel(phys_status_start, ha->mem_ptr + IPS_REG_SQSR); + writel(phys_status_start + IPS_STATUS_Q_SIZE, + ha->mem_ptr + IPS_REG_SQER); + writel(phys_status_start + IPS_STATUS_SIZE, ha->mem_ptr + IPS_REG_SQHR); + writel(phys_status_start, ha->mem_ptr + IPS_REG_SQTR); - ha->adapt->hw_status_tail = phys_status_start; + ha->adapt->hw_status_tail = phys_status_start; } /****************************************************************************/ @@ -5859,20 +5378,22 @@ ips_statinit_memio(ips_ha_t *ha) { /* */ /****************************************************************************/ static uint32_t -ips_statupd_copperhead(ips_ha_t *ha) { - METHOD_TRACE("ips_statupd_copperhead", 1); +ips_statupd_copperhead(ips_ha_t * ha) +{ + METHOD_TRACE("ips_statupd_copperhead", 1); - if (ha->adapt->p_status_tail != ha->adapt->p_status_end) { - ha->adapt->p_status_tail++; - ha->adapt->hw_status_tail += sizeof(IPS_STATUS); - } else { - ha->adapt->p_status_tail = ha->adapt->p_status_start; - ha->adapt->hw_status_tail = ha->adapt->hw_status_start; - } + if (ha->adapt->p_status_tail != ha->adapt->p_status_end) { + ha->adapt->p_status_tail++; + ha->adapt->hw_status_tail += sizeof (IPS_STATUS); + } else { + ha->adapt->p_status_tail = ha->adapt->p_status_start; + ha->adapt->hw_status_tail = ha->adapt->hw_status_start; + } - outl(cpu_to_le32(ha->adapt->hw_status_tail), ha->io_addr + IPS_REG_SQTR); + outl(cpu_to_le32(ha->adapt->hw_status_tail), + ha->io_addr + IPS_REG_SQTR); - return (ha->adapt->p_status_tail->value); + return (ha->adapt->p_status_tail->value); } /****************************************************************************/ @@ -5885,20 +5406,21 @@ ips_statupd_copperhead(ips_ha_t *ha) { /* */ /****************************************************************************/ static uint32_t -ips_statupd_copperhead_memio(ips_ha_t *ha) { - METHOD_TRACE("ips_statupd_copperhead_memio", 1); +ips_statupd_copperhead_memio(ips_ha_t * ha) +{ + METHOD_TRACE("ips_statupd_copperhead_memio", 1); - if (ha->adapt->p_status_tail != ha->adapt->p_status_end) { - ha->adapt->p_status_tail++; - ha->adapt->hw_status_tail += sizeof(IPS_STATUS); - } else { - ha->adapt->p_status_tail = ha->adapt->p_status_start; - ha->adapt->hw_status_tail = ha->adapt->hw_status_start; - } + if (ha->adapt->p_status_tail != ha->adapt->p_status_end) { + ha->adapt->p_status_tail++; + ha->adapt->hw_status_tail += sizeof (IPS_STATUS); + } else { + ha->adapt->p_status_tail = ha->adapt->p_status_start; + ha->adapt->hw_status_tail = ha->adapt->hw_status_start; + } - writel(ha->adapt->hw_status_tail, ha->mem_ptr + IPS_REG_SQTR); + writel(ha->adapt->hw_status_tail, ha->mem_ptr + IPS_REG_SQTR); - return (ha->adapt->p_status_tail->value); + return (ha->adapt->p_status_tail->value); } /****************************************************************************/ @@ -5911,14 +5433,15 @@ ips_statupd_copperhead_memio(ips_ha_t *h /* */ /****************************************************************************/ static uint32_t -ips_statupd_morpheus(ips_ha_t *ha) { - uint32_t val; +ips_statupd_morpheus(ips_ha_t * ha) +{ + uint32_t val; - METHOD_TRACE("ips_statupd_morpheus", 1); + METHOD_TRACE("ips_statupd_morpheus", 1); - val = readl(ha->mem_ptr + IPS_REG_I2O_OUTMSGQ); + val = readl(ha->mem_ptr + IPS_REG_I2O_OUTMSGQ); - return (val); + return (val); } /****************************************************************************/ @@ -5931,50 +5454,49 @@ ips_statupd_morpheus(ips_ha_t *ha) { /* */ /****************************************************************************/ static int -ips_issue_copperhead(ips_ha_t *ha, ips_scb_t *scb) { - uint32_t TimeOut; - uint32_t val; - - METHOD_TRACE("ips_issue_copperhead", 1); - - if (scb->scsi_cmd) { - DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", - ips_name, - ha->host_num, - scb->cdb[0], - scb->cmd.basic_io.command_id, - scb->bus, - scb->target_id, - scb->lun); - } else { - DEBUG_VAR(2, KERN_NOTICE "(%s%d) ips_issue: logical cmd id %d", - ips_name, - ha->host_num, - scb->cmd.basic_io.command_id); - } - - TimeOut = 0; - - while ((val = le32_to_cpu(inl(ha->io_addr + IPS_REG_CCCR))) & IPS_BIT_SEM) { - udelay(1000); - - if (++TimeOut >= IPS_SEM_TIMEOUT) { - if (!(val & IPS_BIT_START_STOP)) - break; - - printk(KERN_WARNING "(%s%d) ips_issue val [0x%x].\n", - ips_name, ha->host_num, val); - printk(KERN_WARNING "(%s%d) ips_issue semaphore chk timeout.\n", - ips_name, ha->host_num); - - return (IPS_FAILURE); - } /* end if */ - } /* end while */ +ips_issue_copperhead(ips_ha_t * ha, ips_scb_t * scb) +{ + uint32_t TimeOut; + uint32_t val; - outl(cpu_to_le32(scb->scb_busaddr), ha->io_addr + IPS_REG_CCSAR); - outw(cpu_to_le32(IPS_BIT_START_CMD), ha->io_addr + IPS_REG_CCCR); + METHOD_TRACE("ips_issue_copperhead", 1); - return (IPS_SUCCESS); + if (scb->scsi_cmd) { + DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + } else { + DEBUG_VAR(2, KERN_NOTICE "(%s%d) ips_issue: logical cmd id %d", + ips_name, ha->host_num, scb->cmd.basic_io.command_id); + } + + TimeOut = 0; + + while ((val = le32_to_cpu(inl(ha->io_addr + IPS_REG_CCCR))) & + IPS_BIT_SEM) { + udelay(1000); + + if (++TimeOut >= IPS_SEM_TIMEOUT) { + if (!(val & IPS_BIT_START_STOP)) + break; + + printk(KERN_WARNING "(%s%d) ips_issue val [0x%x].\n", + ips_name, ha->host_num, val); + printk(KERN_WARNING + "(%s%d) ips_issue semaphore chk timeout.\n", + ips_name, ha->host_num); + + return (IPS_FAILURE); + } /* end if */ + } /* end while */ + + outl(cpu_to_le32(scb->scb_busaddr), ha->io_addr + IPS_REG_CCSAR); + outw(cpu_to_le32(IPS_BIT_START_CMD), ha->io_addr + IPS_REG_CCCR); + + return (IPS_SUCCESS); } /****************************************************************************/ @@ -5987,50 +5509,48 @@ ips_issue_copperhead(ips_ha_t *ha, ips_s /* */ /****************************************************************************/ static int -ips_issue_copperhead_memio(ips_ha_t *ha, ips_scb_t *scb) { - uint32_t TimeOut; - uint32_t val; - - METHOD_TRACE("ips_issue_copperhead_memio", 1); - - if (scb->scsi_cmd) { - DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", - ips_name, - ha->host_num, - scb->cdb[0], - scb->cmd.basic_io.command_id, - scb->bus, - scb->target_id, - scb->lun); - } else { - DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", - ips_name, - ha->host_num, - scb->cmd.basic_io.command_id); - } - - TimeOut = 0; - - while ((val = readl(ha->mem_ptr + IPS_REG_CCCR)) & IPS_BIT_SEM) { - udelay(1000); - - if (++TimeOut >= IPS_SEM_TIMEOUT) { - if (!(val & IPS_BIT_START_STOP)) - break; - - printk(KERN_WARNING "(%s%d) ips_issue val [0x%x].\n", - ips_name, ha->host_num, val); - printk(KERN_WARNING "(%s%d) ips_issue semaphore chk timeout.\n", - ips_name, ha->host_num); - - return (IPS_FAILURE); - } /* end if */ - } /* end while */ +ips_issue_copperhead_memio(ips_ha_t * ha, ips_scb_t * scb) +{ + uint32_t TimeOut; + uint32_t val; - writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_CCSAR); - writel(IPS_BIT_START_CMD, ha->mem_ptr + IPS_REG_CCCR); + METHOD_TRACE("ips_issue_copperhead_memio", 1); - return (IPS_SUCCESS); + if (scb->scsi_cmd) { + DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + } else { + DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", + ips_name, ha->host_num, scb->cmd.basic_io.command_id); + } + + TimeOut = 0; + + while ((val = readl(ha->mem_ptr + IPS_REG_CCCR)) & IPS_BIT_SEM) { + udelay(1000); + + if (++TimeOut >= IPS_SEM_TIMEOUT) { + if (!(val & IPS_BIT_START_STOP)) + break; + + printk(KERN_WARNING "(%s%d) ips_issue val [0x%x].\n", + ips_name, ha->host_num, val); + printk(KERN_WARNING + "(%s%d) ips_issue semaphore chk timeout.\n", + ips_name, ha->host_num); + + return (IPS_FAILURE); + } /* end if */ + } /* end while */ + + writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_CCSAR); + writel(IPS_BIT_START_CMD, ha->mem_ptr + IPS_REG_CCCR); + + return (IPS_SUCCESS); } /****************************************************************************/ @@ -6043,29 +5563,26 @@ ips_issue_copperhead_memio(ips_ha_t *ha, /* */ /****************************************************************************/ static int -ips_issue_i2o(ips_ha_t *ha, ips_scb_t *scb) { +ips_issue_i2o(ips_ha_t * ha, ips_scb_t * scb) +{ - METHOD_TRACE("ips_issue_i2o", 1); + METHOD_TRACE("ips_issue_i2o", 1); - if (scb->scsi_cmd) { - DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", - ips_name, - ha->host_num, - scb->cdb[0], - scb->cmd.basic_io.command_id, - scb->bus, - scb->target_id, - scb->lun); - } else { - DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", - ips_name, - ha->host_num, - scb->cmd.basic_io.command_id); - } + if (scb->scsi_cmd) { + DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + } else { + DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", + ips_name, ha->host_num, scb->cmd.basic_io.command_id); + } - outl(cpu_to_le32(scb->scb_busaddr), ha->io_addr + IPS_REG_I2O_INMSGQ); + outl(cpu_to_le32(scb->scb_busaddr), ha->io_addr + IPS_REG_I2O_INMSGQ); - return (IPS_SUCCESS); + return (IPS_SUCCESS); } /****************************************************************************/ @@ -6078,29 +5595,26 @@ ips_issue_i2o(ips_ha_t *ha, ips_scb_t *s /* */ /****************************************************************************/ static int -ips_issue_i2o_memio(ips_ha_t *ha, ips_scb_t *scb) { +ips_issue_i2o_memio(ips_ha_t * ha, ips_scb_t * scb) +{ - METHOD_TRACE("ips_issue_i2o_memio", 1); + METHOD_TRACE("ips_issue_i2o_memio", 1); - if (scb->scsi_cmd) { - DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", - ips_name, - ha->host_num, - scb->cdb[0], - scb->cmd.basic_io.command_id, - scb->bus, - scb->target_id, - scb->lun); - } else { - DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", - ips_name, - ha->host_num, - scb->cmd.basic_io.command_id); - } + if (scb->scsi_cmd) { + DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + } else { + DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", + ips_name, ha->host_num, scb->cmd.basic_io.command_id); + } - writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_I2O_INMSGQ); + writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_I2O_INMSGQ); - return (IPS_SUCCESS); + return (IPS_SUCCESS); } /****************************************************************************/ @@ -6113,26 +5627,27 @@ ips_issue_i2o_memio(ips_ha_t *ha, ips_sc /* */ /****************************************************************************/ static int -ips_isintr_copperhead(ips_ha_t *ha) { - uint8_t Isr; +ips_isintr_copperhead(ips_ha_t * ha) +{ + uint8_t Isr; + + METHOD_TRACE("ips_isintr_copperhead", 2); - METHOD_TRACE("ips_isintr_copperhead", 2); + Isr = inb(ha->io_addr + IPS_REG_HISR); - Isr = inb(ha->io_addr + IPS_REG_HISR); + if (Isr == 0xFF) + /* ?!?! Nothing really there */ + return (0); - if (Isr == 0xFF) - /* ?!?! Nothing really there */ - return (0); - - if (Isr & IPS_BIT_SCE) - return (1); - else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) { - /* status queue overflow or GHI */ - /* just clear the interrupt */ - outb(Isr, ha->io_addr + IPS_REG_HISR); - } + if (Isr & IPS_BIT_SCE) + return (1); + else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) { + /* status queue overflow or GHI */ + /* just clear the interrupt */ + outb(Isr, ha->io_addr + IPS_REG_HISR); + } - return (0); + return (0); } /****************************************************************************/ @@ -6145,26 +5660,27 @@ ips_isintr_copperhead(ips_ha_t *ha) { /* */ /****************************************************************************/ static int -ips_isintr_copperhead_memio(ips_ha_t *ha) { - uint8_t Isr; +ips_isintr_copperhead_memio(ips_ha_t * ha) +{ + uint8_t Isr; - METHOD_TRACE("ips_isintr_memio", 2); + METHOD_TRACE("ips_isintr_memio", 2); - Isr = readb(ha->mem_ptr + IPS_REG_HISR); + Isr = readb(ha->mem_ptr + IPS_REG_HISR); - if (Isr == 0xFF) - /* ?!?! Nothing really there */ - return (0); - - if (Isr & IPS_BIT_SCE) - return (1); - else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) { - /* status queue overflow or GHI */ - /* just clear the interrupt */ - writeb(Isr, ha->mem_ptr + IPS_REG_HISR); - } + if (Isr == 0xFF) + /* ?!?! Nothing really there */ + return (0); - return (0); + if (Isr & IPS_BIT_SCE) + return (1); + else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) { + /* status queue overflow or GHI */ + /* just clear the interrupt */ + writeb(Isr, ha->mem_ptr + IPS_REG_HISR); + } + + return (0); } /****************************************************************************/ @@ -6177,17 +5693,18 @@ ips_isintr_copperhead_memio(ips_ha_t *ha /* */ /****************************************************************************/ static int -ips_isintr_morpheus(ips_ha_t *ha) { - uint32_t Isr; +ips_isintr_morpheus(ips_ha_t * ha) +{ + uint32_t Isr; - METHOD_TRACE("ips_isintr_morpheus", 2); + METHOD_TRACE("ips_isintr_morpheus", 2); - Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); + Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); - if (Isr & IPS_BIT_I2O_OPQI) - return (1); - else - return (0); + if (Isr & IPS_BIT_I2O_OPQI) + return (1); + else + return (0); } /****************************************************************************/ @@ -6200,51 +5717,52 @@ ips_isintr_morpheus(ips_ha_t *ha) { /* */ /****************************************************************************/ static int -ips_wait(ips_ha_t *ha, int time, int intr) { - int ret; - int done; - - METHOD_TRACE("ips_wait", 1); - - ret = IPS_FAILURE; - done = FALSE; - - time *= IPS_ONE_SEC; /* convert seconds */ - - while ((time > 0) && (!done)) { - if (intr == IPS_INTR_ON) { - if (ha->waitflag == FALSE) { - ret = IPS_SUCCESS; - done = TRUE; - break; - } - } else if (intr == IPS_INTR_IORL) { - if (ha->waitflag == FALSE) { - /* - * controller generated an interrupt to - * acknowledge completion of the command - * and ips_intr() has serviced the interrupt. - */ - ret = IPS_SUCCESS; - done = TRUE; - break; - } - - /* - * NOTE: we already have the io_request_lock so - * even if we get an interrupt it won't get serviced - * until after we finish. - */ - - (*ha->func.intr)(ha); - } - - /* This looks like a very evil loop, but it only does this during start-up */ - udelay(1000); - time--; - } +ips_wait(ips_ha_t * ha, int time, int intr) +{ + int ret; + int done; + + METHOD_TRACE("ips_wait", 1); - return (ret); + ret = IPS_FAILURE; + done = FALSE; + + time *= IPS_ONE_SEC; /* convert seconds */ + + while ((time > 0) && (!done)) { + if (intr == IPS_INTR_ON) { + if (ha->waitflag == FALSE) { + ret = IPS_SUCCESS; + done = TRUE; + break; + } + } else if (intr == IPS_INTR_IORL) { + if (ha->waitflag == FALSE) { + /* + * controller generated an interrupt to + * acknowledge completion of the command + * and ips_intr() has serviced the interrupt. + */ + ret = IPS_SUCCESS; + done = TRUE; + break; + } + + /* + * NOTE: we already have the io_request_lock so + * even if we get an interrupt it won't get serviced + * until after we finish. + */ + + (*ha->func.intr) (ha); + } + + /* This looks like a very evil loop, but it only does this during start-up */ + udelay(1000); + time--; + } + + return (ret); } /****************************************************************************/ @@ -6257,61 +5775,59 @@ ips_wait(ips_ha_t *ha, int time, int int /* */ /****************************************************************************/ static int -ips_write_driver_status(ips_ha_t *ha, int intr) { - METHOD_TRACE("ips_write_driver_status", 1); - - if (!ips_readwrite_page5(ha, FALSE, intr)) { - printk(KERN_WARNING "(%s%d) unable to read NVRAM page 5.\n", - ips_name, ha->host_num); - - return (0); - } - - /* check to make sure the page has a valid */ - /* signature */ - if (le32_to_cpu(ha->nvram->signature) != IPS_NVRAM_P5_SIG) { - DEBUG_VAR(1, "(%s%d) NVRAM page 5 has an invalid signature: %X.", - ips_name, ha->host_num, ha->nvram->signature); - ha->nvram->signature = IPS_NVRAM_P5_SIG; - } - - DEBUG_VAR(2, "(%s%d) Ad Type: %d, Ad Slot: %d, BIOS: %c%c%c%c %c%c%c%c.", - ips_name, ha->host_num, le16_to_cpu(ha->nvram->adapter_type), - ha->nvram->adapter_slot, - ha->nvram->bios_high[0], ha->nvram->bios_high[1], - ha->nvram->bios_high[2], ha->nvram->bios_high[3], - ha->nvram->bios_low[0], ha->nvram->bios_low[1], - ha->nvram->bios_low[2], ha->nvram->bios_low[3]); - - ips_get_bios_version(ha, intr); - - /* change values (as needed) */ - ha->nvram->operating_system = IPS_OS_LINUX; - ha->nvram->adapter_type = ha->ad_type; - strncpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4); - strncpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4); - strncpy((char *) ha->nvram->bios_high, ha->bios_version, 4); - strncpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4); - - ips_version_check(ha, intr); /* Check BIOS/FW/Driver Versions */ - - /* Save the First Copy of the Adapter Order that BIOS put in Page 5 */ - if ( (InitState == 0) && (AdapterOrder[0] == 0) ) - strncpy((char *) AdapterOrder, (char *) ha->nvram->adapter_order, sizeof(AdapterOrder) ); - - /* now update the page */ - if (!ips_readwrite_page5(ha, TRUE, intr)) { - printk(KERN_WARNING "(%s%d) unable to write NVRAM page 5.\n", - ips_name, ha->host_num); +ips_write_driver_status(ips_ha_t * ha, int intr) +{ + METHOD_TRACE("ips_write_driver_status", 1); - return (0); - } + if (!ips_readwrite_page5(ha, FALSE, intr)) { + printk(KERN_WARNING "(%s%d) unable to read NVRAM page 5.\n", + ips_name, ha->host_num); + + return (0); + } + + /* check to make sure the page has a valid */ + /* signature */ + if (le32_to_cpu(ha->nvram->signature) != IPS_NVRAM_P5_SIG) { + DEBUG_VAR(1, + "(%s%d) NVRAM page 5 has an invalid signature: %X.", + ips_name, ha->host_num, ha->nvram->signature); + ha->nvram->signature = IPS_NVRAM_P5_SIG; + } + + DEBUG_VAR(2, + "(%s%d) Ad Type: %d, Ad Slot: %d, BIOS: %c%c%c%c %c%c%c%c.", + ips_name, ha->host_num, le16_to_cpu(ha->nvram->adapter_type), + ha->nvram->adapter_slot, ha->nvram->bios_high[0], + ha->nvram->bios_high[1], ha->nvram->bios_high[2], + ha->nvram->bios_high[3], ha->nvram->bios_low[0], + ha->nvram->bios_low[1], ha->nvram->bios_low[2], + ha->nvram->bios_low[3]); + + ips_get_bios_version(ha, intr); + + /* change values (as needed) */ + ha->nvram->operating_system = IPS_OS_LINUX; + ha->nvram->adapter_type = ha->ad_type; + strncpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4); + strncpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4); + strncpy((char *) ha->nvram->bios_high, ha->bios_version, 4); + strncpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4); + + ips_version_check(ha, intr); /* Check BIOS/FW/Driver Versions */ + + /* now update the page */ + if (!ips_readwrite_page5(ha, TRUE, intr)) { + printk(KERN_WARNING "(%s%d) unable to write NVRAM page 5.\n", + ips_name, ha->host_num); - /* IF NVRAM Page 5 is OK, Use it for Slot Number Info Because Linux Doesn't Do Slots */ - ha->slot_num = ha->nvram->adapter_slot; + return (0); + } + /* IF NVRAM Page 5 is OK, Use it for Slot Number Info Because Linux Doesn't Do Slots */ + ha->slot_num = ha->nvram->adapter_slot; - return (1); + return (1); } /****************************************************************************/ @@ -6324,39 +5840,40 @@ ips_write_driver_status(ips_ha_t *ha, in /* */ /****************************************************************************/ static int -ips_read_adapter_status(ips_ha_t *ha, int intr) { - ips_scb_t *scb; - int ret; - - METHOD_TRACE("ips_read_adapter_status", 1); - - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_ENQUIRY; - - scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY; - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.basic_io.sg_count = 0; - scb->cmd.basic_io.lba = 0; - scb->cmd.basic_io.sector_count = 0; - scb->cmd.basic_io.log_drv = 0; - scb->cmd.basic_io.reserved = 0; - scb->data_len = sizeof(*ha->enq); - scb->data_busaddr = pci_map_single(ha->pcidev, ha->enq, scb->data_len, - IPS_DMA_DIR(scb)); - scb->cmd.basic_io.sg_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - - /* send command */ - if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) - return (0); +ips_read_adapter_status(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int ret; + + METHOD_TRACE("ips_read_adapter_status", 1); + + scb = &ha->scbs[ha->max_cmds - 1]; - return (1); + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_ENQUIRY; + + scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.sg_count = 0; + scb->cmd.basic_io.lba = 0; + scb->cmd.basic_io.sector_count = 0; + scb->cmd.basic_io.log_drv = 0; + scb->data_len = sizeof (*ha->enq); + scb->data_busaddr = pci_map_single(ha->pcidev, ha->enq, scb->data_len, + IPS_DMA_DIR(scb)); + scb->cmd.basic_io.sg_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + + /* send command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) + return (0); + + return (1); } /****************************************************************************/ @@ -6369,39 +5886,40 @@ ips_read_adapter_status(ips_ha_t *ha, in /* */ /****************************************************************************/ static int -ips_read_subsystem_parameters(ips_ha_t *ha, int intr) { - ips_scb_t *scb; - int ret; - - METHOD_TRACE("ips_read_subsystem_parameters", 1); - - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_GET_SUBSYS; - - scb->cmd.basic_io.op_code = IPS_CMD_GET_SUBSYS; - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.basic_io.sg_count = 0; - scb->cmd.basic_io.lba = 0; - scb->cmd.basic_io.sector_count = 0; - scb->cmd.basic_io.log_drv = 0; - scb->cmd.basic_io.reserved = 0; - scb->data_len = sizeof(*ha->subsys); - scb->data_busaddr = pci_map_single(ha->pcidev, ha->subsys, - scb->data_len, IPS_DMA_DIR(scb)); - scb->cmd.basic_io.sg_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - - /* send command */ - if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) - return (0); +ips_read_subsystem_parameters(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int ret; - return (1); + METHOD_TRACE("ips_read_subsystem_parameters", 1); + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_GET_SUBSYS; + + scb->cmd.basic_io.op_code = IPS_CMD_GET_SUBSYS; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.sg_count = 0; + scb->cmd.basic_io.lba = 0; + scb->cmd.basic_io.sector_count = 0; + scb->cmd.basic_io.log_drv = 0; + scb->data_len = sizeof (*ha->subsys); + scb->data_busaddr = pci_map_single(ha->pcidev, ha->subsys, + scb->data_len, IPS_DMA_DIR(scb)); + scb->cmd.basic_io.sg_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + + /* send command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) + return (0); + + return (1); } /****************************************************************************/ @@ -6414,51 +5932,53 @@ ips_read_subsystem_parameters(ips_ha_t * /* */ /****************************************************************************/ static int -ips_read_config(ips_ha_t *ha, int intr) { - ips_scb_t *scb; - int i; - int ret; - - METHOD_TRACE("ips_read_config", 1); - - /* set defaults for initiator IDs */ - for (i = 0; i < 4; i++) - ha->conf->init_id[i] = 7; - - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_READ_CONF; - - scb->cmd.basic_io.op_code = IPS_CMD_READ_CONF; - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - scb->data_len = sizeof(*ha->conf); - scb->data_busaddr = pci_map_single(ha->pcidev, ha->conf, - scb->data_len, IPS_DMA_DIR(scb)); - scb->cmd.basic_io.sg_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - - /* send command */ - if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { - - memset(ha->conf, 0, sizeof(IPS_CONF)); - - /* reset initiator IDs */ - for (i = 0; i < 4; i++) - ha->conf->init_id[i] = 7; - - /* Allow Completed with Errors, so JCRM can access the Adapter to fix the problems */ - if ((scb->basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_CMPLT_WERROR) - return (1); - - return (0); - } +ips_read_config(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int i; + int ret; + + METHOD_TRACE("ips_read_config", 1); - return (1); + /* set defaults for initiator IDs */ + for (i = 0; i < 4; i++) + ha->conf->init_id[i] = 7; + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_READ_CONF; + + scb->cmd.basic_io.op_code = IPS_CMD_READ_CONF; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->data_len = sizeof (*ha->conf); + scb->data_busaddr = pci_map_single(ha->pcidev, ha->conf, + scb->data_len, IPS_DMA_DIR(scb)); + scb->cmd.basic_io.sg_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + + /* send command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { + + memset(ha->conf, 0, sizeof (IPS_CONF)); + + /* reset initiator IDs */ + for (i = 0; i < 4; i++) + ha->conf->init_id[i] = 7; + + /* Allow Completed with Errors, so JCRM can access the Adapter to fix the problems */ + if ((scb->basic_status & IPS_GSC_STATUS_MASK) == + IPS_CMD_CMPLT_WERROR) return (1); + + return (0); + } + + return (1); } /****************************************************************************/ @@ -6471,42 +5991,44 @@ ips_read_config(ips_ha_t *ha, int intr) /* */ /****************************************************************************/ static int -ips_readwrite_page5(ips_ha_t *ha, int write, int intr) { - ips_scb_t *scb; - int ret; - - METHOD_TRACE("ips_readwrite_page5", 1); - - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_RW_NVRAM_PAGE; - - scb->cmd.nvram.op_code = IPS_CMD_RW_NVRAM_PAGE; - scb->cmd.nvram.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.nvram.page = 5; - scb->cmd.nvram.write = write; - scb->cmd.nvram.reserved = 0; - scb->cmd.nvram.reserved2 = 0; - scb->data_len = sizeof(*ha->nvram); - scb->data_busaddr = pci_map_single(ha->pcidev, ha->nvram, - scb->data_len, IPS_DMA_DIR(scb)); - scb->cmd.nvram.buffer_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - - /* issue the command */ - if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { +ips_readwrite_page5(ips_ha_t * ha, int write, int intr) +{ + ips_scb_t *scb; + int ret; + + METHOD_TRACE("ips_readwrite_page5", 1); + + scb = &ha->scbs[ha->max_cmds - 1]; - memset(ha->nvram, 0, sizeof(IPS_NVRAM_P5)); + ips_init_scb(ha, scb); - return (0); - } + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_RW_NVRAM_PAGE; + + scb->cmd.nvram.op_code = IPS_CMD_RW_NVRAM_PAGE; + scb->cmd.nvram.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.nvram.page = 5; + scb->cmd.nvram.write = write; + scb->cmd.nvram.reserved = 0; + scb->cmd.nvram.reserved2 = 0; + scb->data_len = sizeof (*ha->nvram); + scb->data_busaddr = pci_map_single(ha->pcidev, ha->nvram, + scb->data_len, IPS_DMA_DIR(scb)); + scb->cmd.nvram.buffer_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + + /* issue the command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { - return (1); + memset(ha->nvram, 0, sizeof (IPS_NVRAM_P5)); + + return (0); + } + + return (1); } /****************************************************************************/ @@ -6519,54 +6041,57 @@ ips_readwrite_page5(ips_ha_t *ha, int wr /* */ /****************************************************************************/ static int -ips_clear_adapter(ips_ha_t *ha, int intr) { - ips_scb_t *scb; - int ret; - - METHOD_TRACE("ips_clear_adapter", 1); - - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_reset_timeout; - scb->cdb[0] = IPS_CMD_CONFIG_SYNC; - - scb->cmd.config_sync.op_code = IPS_CMD_CONFIG_SYNC; - scb->cmd.config_sync.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.config_sync.channel = 0; - scb->cmd.config_sync.source_target = IPS_POCL; - scb->cmd.config_sync.reserved = 0; - scb->cmd.config_sync.reserved2 = 0; - scb->cmd.config_sync.reserved3 = 0; - - /* issue command */ - if (((ret = ips_send_wait(ha, scb, ips_reset_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) - return (0); - - /* send unlock stripe command */ - ips_init_scb(ha, scb); - - scb->cdb[0] = IPS_CMD_ERROR_TABLE; - scb->timeout = ips_reset_timeout; - - scb->cmd.unlock_stripe.op_code = IPS_CMD_ERROR_TABLE; - scb->cmd.unlock_stripe.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.unlock_stripe.log_drv = 0; - scb->cmd.unlock_stripe.control = IPS_CSL; - scb->cmd.unlock_stripe.reserved = 0; - scb->cmd.unlock_stripe.reserved2 = 0; - scb->cmd.unlock_stripe.reserved3 = 0; - - /* issue command */ - if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) - return (0); +ips_clear_adapter(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int ret; + + METHOD_TRACE("ips_clear_adapter", 1); + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_reset_timeout; + scb->cdb[0] = IPS_CMD_CONFIG_SYNC; + + scb->cmd.config_sync.op_code = IPS_CMD_CONFIG_SYNC; + scb->cmd.config_sync.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.config_sync.channel = 0; + scb->cmd.config_sync.source_target = IPS_POCL; + scb->cmd.config_sync.reserved = 0; + scb->cmd.config_sync.reserved2 = 0; + scb->cmd.config_sync.reserved3 = 0; + + /* issue command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_reset_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) + return (0); + + /* send unlock stripe command */ + ips_init_scb(ha, scb); + + scb->cdb[0] = IPS_CMD_ERROR_TABLE; + scb->timeout = ips_reset_timeout; + + scb->cmd.unlock_stripe.op_code = IPS_CMD_ERROR_TABLE; + scb->cmd.unlock_stripe.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.unlock_stripe.log_drv = 0; + scb->cmd.unlock_stripe.control = IPS_CSL; + scb->cmd.unlock_stripe.reserved = 0; + scb->cmd.unlock_stripe.reserved2 = 0; + scb->cmd.unlock_stripe.reserved3 = 0; + + /* issue command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) + return (0); - return (1); + return (1); } /****************************************************************************/ @@ -6579,27 +6104,28 @@ ips_clear_adapter(ips_ha_t *ha, int intr /* */ /****************************************************************************/ static void -ips_ffdc_reset(ips_ha_t *ha, int intr) { - ips_scb_t *scb; +ips_ffdc_reset(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; - METHOD_TRACE("ips_ffdc_reset", 1); + METHOD_TRACE("ips_ffdc_reset", 1); - scb = &ha->scbs[ha->max_cmds-1]; + scb = &ha->scbs[ha->max_cmds - 1]; - ips_init_scb(ha, scb); + ips_init_scb(ha, scb); - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_FFDC; - scb->cmd.ffdc.op_code = IPS_CMD_FFDC; - scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.ffdc.reset_count = ha->reset_count; - scb->cmd.ffdc.reset_type = 0x80; + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FFDC; + scb->cmd.ffdc.op_code = IPS_CMD_FFDC; + scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.ffdc.reset_count = ha->reset_count; + scb->cmd.ffdc.reset_type = 0x80; - /* convert time to what the card wants */ - ips_fix_ffdc_time(ha, scb, ha->last_ffdc); + /* convert time to what the card wants */ + ips_fix_ffdc_time(ha, scb, ha->last_ffdc); - /* issue command */ - ips_send_wait(ha, scb, ips_cmd_timeout, intr); + /* issue command */ + ips_send_wait(ha, scb, ips_cmd_timeout, intr); } /****************************************************************************/ @@ -6612,30 +6138,30 @@ ips_ffdc_reset(ips_ha_t *ha, int intr) { /* */ /****************************************************************************/ static void -ips_ffdc_time(ips_ha_t *ha) { - ips_scb_t *scb; +ips_ffdc_time(ips_ha_t * ha) +{ + ips_scb_t *scb; - METHOD_TRACE("ips_ffdc_time", 1); + METHOD_TRACE("ips_ffdc_time", 1); - DEBUG_VAR(1, "(%s%d) Sending time update.", - ips_name, ha->host_num); + DEBUG_VAR(1, "(%s%d) Sending time update.", ips_name, ha->host_num); - scb = &ha->scbs[ha->max_cmds-1]; + scb = &ha->scbs[ha->max_cmds - 1]; - ips_init_scb(ha, scb); + ips_init_scb(ha, scb); - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_FFDC; - scb->cmd.ffdc.op_code = IPS_CMD_FFDC; - scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.ffdc.reset_count = 0; - scb->cmd.ffdc.reset_type = 0x80; + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FFDC; + scb->cmd.ffdc.op_code = IPS_CMD_FFDC; + scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.ffdc.reset_count = 0; + scb->cmd.ffdc.reset_type = 0x80; - /* convert time to what the card wants */ - ips_fix_ffdc_time(ha, scb, ha->last_ffdc); + /* convert time to what the card wants */ + ips_fix_ffdc_time(ha, scb, ha->last_ffdc); - /* issue command */ - ips_send_wait(ha, scb, ips_cmd_timeout, IPS_FFDC); + /* issue command */ + ips_send_wait(ha, scb, ips_cmd_timeout, IPS_FFDC); } /****************************************************************************/ @@ -6647,57 +6173,59 @@ ips_ffdc_time(ips_ha_t *ha) { /* */ /****************************************************************************/ static void -ips_fix_ffdc_time(ips_ha_t *ha, ips_scb_t *scb, time_t current_time) { - long days; - long rem; - int i; - int year; - int yleap; - int year_lengths[2] = { IPS_DAYS_NORMAL_YEAR, IPS_DAYS_LEAP_YEAR }; - int month_lengths[12][2] = { {31, 31}, - {28, 29}, - {31, 31}, - {30, 30}, - {31, 31}, - {30, 30}, - {31, 31}, - {31, 31}, - {30, 30}, - {31, 31}, - {30, 30}, - {31, 31} }; - - METHOD_TRACE("ips_fix_ffdc_time", 1); - - days = current_time / IPS_SECS_DAY; - rem = current_time % IPS_SECS_DAY; - - scb->cmd.ffdc.hour = (rem / IPS_SECS_HOUR); - rem = rem % IPS_SECS_HOUR; - scb->cmd.ffdc.minute = (rem / IPS_SECS_MIN); - scb->cmd.ffdc.second = (rem % IPS_SECS_MIN); - - year = IPS_EPOCH_YEAR; - while (days < 0 || days >= year_lengths[yleap = IPS_IS_LEAP_YEAR(year)]) { - int newy; - - newy = year + (days / IPS_DAYS_NORMAL_YEAR); - if (days < 0) - --newy; - days -= (newy - year) * IPS_DAYS_NORMAL_YEAR + - IPS_NUM_LEAP_YEARS_THROUGH(newy - 1) - - IPS_NUM_LEAP_YEARS_THROUGH(year - 1); - year = newy; - } +ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time_t current_time) +{ + long days; + long rem; + int i; + int year; + int yleap; + int year_lengths[2] = { IPS_DAYS_NORMAL_YEAR, IPS_DAYS_LEAP_YEAR }; + int month_lengths[12][2] = { {31, 31}, + {28, 29}, + {31, 31}, + {30, 30}, + {31, 31}, + {30, 30}, + {31, 31}, + {31, 31}, + {30, 30}, + {31, 31}, + {30, 30}, + {31, 31} + }; + + METHOD_TRACE("ips_fix_ffdc_time", 1); + + days = current_time / IPS_SECS_DAY; + rem = current_time % IPS_SECS_DAY; + + scb->cmd.ffdc.hour = (rem / IPS_SECS_HOUR); + rem = rem % IPS_SECS_HOUR; + scb->cmd.ffdc.minute = (rem / IPS_SECS_MIN); + scb->cmd.ffdc.second = (rem % IPS_SECS_MIN); + + year = IPS_EPOCH_YEAR; + while (days < 0 || days >= year_lengths[yleap = IPS_IS_LEAP_YEAR(year)]) { + int newy; + + newy = year + (days / IPS_DAYS_NORMAL_YEAR); + if (days < 0) + --newy; + days -= (newy - year) * IPS_DAYS_NORMAL_YEAR + + IPS_NUM_LEAP_YEARS_THROUGH(newy - 1) - + IPS_NUM_LEAP_YEARS_THROUGH(year - 1); + year = newy; + } - scb->cmd.ffdc.yearH = year / 100; - scb->cmd.ffdc.yearL = year % 100; + scb->cmd.ffdc.yearH = year / 100; + scb->cmd.ffdc.yearL = year % 100; - for (i = 0; days >= month_lengths[i][yleap]; ++i) - days -= month_lengths[i][yleap]; + for (i = 0; days >= month_lengths[i][yleap]; ++i) + days -= month_lengths[i][yleap]; - scb->cmd.ffdc.month = i + 1; - scb->cmd.ffdc.day = days + 1; + scb->cmd.ffdc.month = i + 1; + scb->cmd.ffdc.day = days + 1; } /**************************************************************************** @@ -6713,106 +6241,107 @@ ips_fix_ffdc_time(ips_ha_t *ha, ips_scb_ /* */ /****************************************************************************/ static int -ips_erase_bios(ips_ha_t *ha) { - int timeout; - uint8_t status=0; - - METHOD_TRACE("ips_erase_bios", 1); - - status = 0; - - /* Clear the status register */ - outl(0, ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - outb(0x50, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* Erase Setup */ - outb(0x20, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* Erase Confirm */ - outb(0xD0, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* Erase Status */ - outb(0x70, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - timeout = 80000; /* 80 seconds */ - - while (timeout > 0) { - if (ha->revision_id == IPS_REVID_TROMBONE64) { - outl(0, ha->io_addr + IPS_REG_FLAP); - udelay(25); /* 25 us */ - } - - status = inb(ha->io_addr + IPS_REG_FLDP); - - if (status & 0x80) - break; - - MDELAY(1); - timeout--; - } - - /* check for timeout */ - if (timeout <= 0) { - /* timeout */ - - /* try to suspend the erase */ - outb(0xB0, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* wait for 10 seconds */ - timeout = 10000; - while (timeout > 0) { - if (ha->revision_id == IPS_REVID_TROMBONE64) { - outl(0, ha->io_addr + IPS_REG_FLAP); - udelay(25); /* 25 us */ - } - - status = inb(ha->io_addr + IPS_REG_FLDP); - - if (status & 0xC0) - break; - - MDELAY(1); - timeout--; - } - - return (1); - } - - /* check for valid VPP */ - if (status & 0x08) - /* VPP failure */ - return (1); - - /* check for succesful flash */ - if (status & 0x30) - /* sequence error */ - return (1); - - /* Otherwise, we were successful */ - /* clear status */ - outb(0x50, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* enable reads */ - outb(0xFF, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ +ips_erase_bios(ips_ha_t * ha) +{ + int timeout; + uint8_t status = 0; + + METHOD_TRACE("ips_erase_bios", 1); + + status = 0; - return (0); + /* Clear the status register */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0x50, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Setup */ + outb(0x20, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Confirm */ + outb(0xD0, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Status */ + outb(0x70, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + timeout = 80000; /* 80 seconds */ + + while (timeout > 0) { + if (ha->revision_id == IPS_REVID_TROMBONE64) { + outl(0, ha->io_addr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = inb(ha->io_addr + IPS_REG_FLDP); + + if (status & 0x80) + break; + + MDELAY(1); + timeout--; + } + + /* check for timeout */ + if (timeout <= 0) { + /* timeout */ + + /* try to suspend the erase */ + outb(0xB0, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* wait for 10 seconds */ + timeout = 10000; + while (timeout > 0) { + if (ha->revision_id == IPS_REVID_TROMBONE64) { + outl(0, ha->io_addr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = inb(ha->io_addr + IPS_REG_FLDP); + + if (status & 0xC0) + break; + + MDELAY(1); + timeout--; + } + + return (1); + } + + /* check for valid VPP */ + if (status & 0x08) + /* VPP failure */ + return (1); + + /* check for succesful flash */ + if (status & 0x30) + /* sequence error */ + return (1); + + /* Otherwise, we were successful */ + /* clear status */ + outb(0x50, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* enable reads */ + outb(0xFF, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (0); } /****************************************************************************/ @@ -6824,106 +6353,107 @@ ips_erase_bios(ips_ha_t *ha) { /* */ /****************************************************************************/ static int -ips_erase_bios_memio(ips_ha_t *ha) { - int timeout; - uint8_t status; - - METHOD_TRACE("ips_erase_bios_memio", 1); - - status = 0; - - /* Clear the status register */ - writel(0, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* Erase Setup */ - writeb(0x20, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* Erase Confirm */ - writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* Erase Status */ - writeb(0x70, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - timeout = 80000; /* 80 seconds */ - - while (timeout > 0) { - if (ha->revision_id == IPS_REVID_TROMBONE64) { - writel(0, ha->mem_ptr + IPS_REG_FLAP); - udelay(25); /* 25 us */ - } - - status = readb(ha->mem_ptr + IPS_REG_FLDP); - - if (status & 0x80) - break; - - MDELAY(1); - timeout--; - } - - /* check for timeout */ - if (timeout <= 0) { - /* timeout */ - - /* try to suspend the erase */ - writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* wait for 10 seconds */ - timeout = 10000; - while (timeout > 0) { - if (ha->revision_id == IPS_REVID_TROMBONE64) { - writel(0, ha->mem_ptr + IPS_REG_FLAP); - udelay(25); /* 25 us */ - } - - status = readb(ha->mem_ptr + IPS_REG_FLDP); - - if (status & 0xC0) - break; - - MDELAY(1); - timeout--; - } - - return (1); - } - - /* check for valid VPP */ - if (status & 0x08) - /* VPP failure */ - return (1); - - /* check for succesful flash */ - if (status & 0x30) - /* sequence error */ - return (1); - - /* Otherwise, we were successful */ - /* clear status */ - writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* enable reads */ - writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ +ips_erase_bios_memio(ips_ha_t * ha) +{ + int timeout; + uint8_t status; + + METHOD_TRACE("ips_erase_bios_memio", 1); + + status = 0; - return (0); + /* Clear the status register */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Setup */ + writeb(0x20, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Confirm */ + writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Status */ + writeb(0x70, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + timeout = 80000; /* 80 seconds */ + + while (timeout > 0) { + if (ha->revision_id == IPS_REVID_TROMBONE64) { + writel(0, ha->mem_ptr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = readb(ha->mem_ptr + IPS_REG_FLDP); + + if (status & 0x80) + break; + + MDELAY(1); + timeout--; + } + + /* check for timeout */ + if (timeout <= 0) { + /* timeout */ + + /* try to suspend the erase */ + writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* wait for 10 seconds */ + timeout = 10000; + while (timeout > 0) { + if (ha->revision_id == IPS_REVID_TROMBONE64) { + writel(0, ha->mem_ptr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = readb(ha->mem_ptr + IPS_REG_FLDP); + + if (status & 0xC0) + break; + + MDELAY(1); + timeout--; + } + + return (1); + } + + /* check for valid VPP */ + if (status & 0x08) + /* VPP failure */ + return (1); + + /* check for succesful flash */ + if (status & 0x30) + /* sequence error */ + return (1); + + /* Otherwise, we were successful */ + /* clear status */ + writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* enable reads */ + writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (0); } /****************************************************************************/ @@ -6935,84 +6465,86 @@ ips_erase_bios_memio(ips_ha_t *ha) { /* */ /****************************************************************************/ static int -ips_program_bios(ips_ha_t *ha, char *buffer, uint32_t buffersize, uint32_t offset) { - int i; - int timeout; - uint8_t status=0; - - METHOD_TRACE("ips_program_bios", 1); - - status = 0; - - for (i = 0; i < buffersize; i++) { - /* write a byte */ - outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - outb(0x40, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - outb(buffer[i], ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* wait up to one second */ - timeout = 1000; - while (timeout > 0) { - if (ha->revision_id == IPS_REVID_TROMBONE64) { - outl(0, ha->io_addr + IPS_REG_FLAP); - udelay(25); /* 25 us */ - } - - status = inb(ha->io_addr + IPS_REG_FLDP); - - if (status & 0x80) - break; - - MDELAY(1); - timeout--; - } - - if (timeout == 0) { - /* timeout error */ - outl(0, ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - outb(0xFF, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - return (1); - } - - /* check the status */ - if (status & 0x18) { - /* programming error */ - outl(0, ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - outb(0xFF, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - return (1); - } - } /* end for */ - - /* Enable reading */ - outl(0, ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - outb(0xFF, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ +ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize, + uint32_t offset) +{ + int i; + int timeout; + uint8_t status = 0; + + METHOD_TRACE("ips_program_bios", 1); + + status = 0; + + for (i = 0; i < buffersize; i++) { + /* write a byte */ + outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0x40, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(buffer[i], ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* wait up to one second */ + timeout = 1000; + while (timeout > 0) { + if (ha->revision_id == IPS_REVID_TROMBONE64) { + outl(0, ha->io_addr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = inb(ha->io_addr + IPS_REG_FLDP); + + if (status & 0x80) + break; + + MDELAY(1); + timeout--; + } + + if (timeout == 0) { + /* timeout error */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0xFF, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (1); + } + + /* check the status */ + if (status & 0x18) { + /* programming error */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0xFF, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (1); + } + } /* end for */ + + /* Enable reading */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0xFF, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ - return (0); + return (0); } /****************************************************************************/ @@ -7024,84 +6556,86 @@ ips_program_bios(ips_ha_t *ha, char *buf /* */ /****************************************************************************/ static int -ips_program_bios_memio(ips_ha_t *ha, char *buffer, uint32_t buffersize, uint32_t offset) { - int i; - int timeout; - uint8_t status=0; - - METHOD_TRACE("ips_program_bios_memio", 1); - - status = 0; - - for (i = 0; i < buffersize; i++) { - /* write a byte */ - writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - writeb(0x40, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* wait up to one second */ - timeout = 1000; - while (timeout > 0) { - if (ha->revision_id == IPS_REVID_TROMBONE64) { - writel(0, ha->mem_ptr + IPS_REG_FLAP); - udelay(25); /* 25 us */ - } - - status = readb(ha->mem_ptr + IPS_REG_FLDP); - - if (status & 0x80) - break; - - MDELAY(1); - timeout--; - } - - if (timeout == 0) { - /* timeout error */ - writel(0, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - return (1); - } - - /* check the status */ - if (status & 0x18) { - /* programming error */ - writel(0, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - return (1); - } - } /* end for */ - - /* Enable reading */ - writel(0, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ +ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize, + uint32_t offset) +{ + int i; + int timeout; + uint8_t status = 0; + + METHOD_TRACE("ips_program_bios_memio", 1); + + status = 0; + + for (i = 0; i < buffersize; i++) { + /* write a byte */ + writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0x40, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* wait up to one second */ + timeout = 1000; + while (timeout > 0) { + if (ha->revision_id == IPS_REVID_TROMBONE64) { + writel(0, ha->mem_ptr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = readb(ha->mem_ptr + IPS_REG_FLDP); + + if (status & 0x80) + break; + + MDELAY(1); + timeout--; + } + + if (timeout == 0) { + /* timeout error */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (1); + } + + /* check the status */ + if (status & 0x18) { + /* programming error */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (1); + } + } /* end for */ + + /* Enable reading */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ - return (0); + return (0); } /****************************************************************************/ @@ -7113,42 +6647,44 @@ ips_program_bios_memio(ips_ha_t *ha, cha /* */ /****************************************************************************/ static int -ips_verify_bios(ips_ha_t *ha, char *buffer, uint32_t buffersize, uint32_t offset) { - uint8_t checksum; - int i; - - METHOD_TRACE("ips_verify_bios", 1); - - /* test 1st byte */ - outl(0, ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) - return (1); - - outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) - return (1); - - checksum = 0xff; - for (i = 2; i < buffersize; i++) { - - outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP); - } - - if (checksum != 0) - /* failure */ - return (1); - else - /* success */ - return (0); +ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize, + uint32_t offset) +{ + uint8_t checksum; + int i; + + METHOD_TRACE("ips_verify_bios", 1); + + /* test 1st byte */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) + return (1); + + outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) + return (1); + + checksum = 0xff; + for (i = 2; i < buffersize; i++) { + + outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP); + } + + if (checksum != 0) + /* failure */ + return (1); + else + /* success */ + return (0); } /****************************************************************************/ @@ -7160,42 +6696,45 @@ ips_verify_bios(ips_ha_t *ha, char *buff /* */ /****************************************************************************/ static int -ips_verify_bios_memio(ips_ha_t *ha, char *buffer, uint32_t buffersize, uint32_t offset) { - uint8_t checksum; - int i; - - METHOD_TRACE("ips_verify_bios_memio", 1); - - /* test 1st byte */ - writel(0, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) - return (1); - - writel(1, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) - return (1); - - checksum = 0xff; - for (i = 2; i < buffersize; i++) { - - writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - checksum = (uint8_t) checksum + readb(ha->mem_ptr + IPS_REG_FLDP); - } - - if (checksum != 0) - /* failure */ - return (1); - else - /* success */ - return (0); +ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize, + uint32_t offset) +{ + uint8_t checksum; + int i; + + METHOD_TRACE("ips_verify_bios_memio", 1); + + /* test 1st byte */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) + return (1); + + writel(1, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) + return (1); + + checksum = 0xff; + for (i = 2; i < buffersize; i++) { + + writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + checksum = + (uint8_t) checksum + readb(ha->mem_ptr + IPS_REG_FLDP); + } + + if (checksum != 0) + /* failure */ + return (1); + else + /* success */ + return (0); } /*---------------------------------------------------------------------------*/ @@ -7208,77 +6747,82 @@ ips_verify_bios_memio(ips_ha_t *ha, char /* Data is available. */ /* */ /*---------------------------------------------------------------------------*/ -static void ips_version_check(ips_ha_t *ha, int intr) { - IPS_VERSION_DATA VersionInfo; - uint8_t FirmwareVersion[ IPS_COMPAT_ID_LENGTH + 1 ]; - uint8_t BiosVersion[ IPS_COMPAT_ID_LENGTH + 1]; - int MatchError; - int rc; - char BiosString[10]; - char FirmwareString[10]; - - METHOD_TRACE("ips_version_check", 1); - - memset(FirmwareVersion, 0, IPS_COMPAT_ID_LENGTH + 1); - memset(BiosVersion, 0, IPS_COMPAT_ID_LENGTH + 1); - - /* Get the Compatible BIOS Version from NVRAM Page 5 */ - memcpy(BiosVersion, ha->nvram->BiosCompatibilityID, IPS_COMPAT_ID_LENGTH); - - rc = IPS_FAILURE; - if (ha->subsys->param[4] & IPS_GET_VERSION_SUPPORT) /* If Versioning is Supported */ - { - /* Get the Version Info with a Get Version Command */ - rc = ips_get_version_info(ha, &VersionInfo, intr); - if (rc == IPS_SUCCESS) - memcpy(FirmwareVersion, VersionInfo.compatibilityId, IPS_COMPAT_ID_LENGTH); - } - - if (rc != IPS_SUCCESS) /* If Data Not Obtainable from a GetVersion Command */ - { - /* Get the Firmware Version from Enquiry Data */ - memcpy(FirmwareVersion, ha->enq->CodeBlkVersion, IPS_COMPAT_ID_LENGTH); - } - - /* printk(KERN_WARNING "Adapter's BIOS Version = %s\n", BiosVersion); */ - /* printk(KERN_WARNING "BIOS Compatible Version = %s\n", IPS_COMPAT_BIOS); */ - /* printk(KERN_WARNING "Adapter's Firmware Version = %s\n", FirmwareVersion); */ - /* printk(KERN_WARNING "Firmware Compatible Version = %s \n", Compatable[ ha->nvram->adapter_type ]); */ - - MatchError = 0; - - if (strncmp(FirmwareVersion, Compatable[ ha->nvram->adapter_type ], IPS_COMPAT_ID_LENGTH) != 0) - MatchError = 1; - - if (strncmp(BiosVersion, IPS_COMPAT_BIOS, IPS_COMPAT_ID_LENGTH) != 0) - MatchError = 1; - - ha->nvram->versioning = 1; /* Indicate the Driver Supports Versioning */ - - if (MatchError) - { - ha->nvram->version_mismatch = 1; - if (ips_cd_boot == 0) - { - strncpy(&BiosString[0], ha->nvram->bios_high, 4); - strncpy(&BiosString[4], ha->nvram->bios_low, 4); - BiosString[8] = 0; - - strncpy(&FirmwareString[0], ha->enq->CodeBlkVersion, 8); - FirmwareString[8] = 0; - - printk(KERN_WARNING "Warning ! ! ! ServeRAID Version Mismatch\n"); - printk(KERN_WARNING "Bios = %s, Firmware = %s, Device Driver = %s%s\n", - BiosString, FirmwareString, IPS_VERSION_HIGH, IPS_VERSION_LOW ); - printk(KERN_WARNING "These levels should match to avoid possible compatibility problems.\n" ); - } - } - else - { - ha->nvram->version_mismatch = 0; - } +static void +ips_version_check(ips_ha_t * ha, int intr) +{ + IPS_VERSION_DATA VersionInfo; + uint8_t FirmwareVersion[IPS_COMPAT_ID_LENGTH + 1]; + uint8_t BiosVersion[IPS_COMPAT_ID_LENGTH + 1]; + int MatchError; + int rc; + char BiosString[10]; + char FirmwareString[10]; + + METHOD_TRACE("ips_version_check", 1); + + memset(FirmwareVersion, 0, IPS_COMPAT_ID_LENGTH + 1); + memset(BiosVersion, 0, IPS_COMPAT_ID_LENGTH + 1); + + /* Get the Compatible BIOS Version from NVRAM Page 5 */ + memcpy(BiosVersion, ha->nvram->BiosCompatibilityID, + IPS_COMPAT_ID_LENGTH); + + rc = IPS_FAILURE; + if (ha->subsys->param[4] & IPS_GET_VERSION_SUPPORT) { /* If Versioning is Supported */ + /* Get the Version Info with a Get Version Command */ + rc = ips_get_version_info(ha, &VersionInfo, intr); + if (rc == IPS_SUCCESS) + memcpy(FirmwareVersion, VersionInfo.compatibilityId, + IPS_COMPAT_ID_LENGTH); + } + + if (rc != IPS_SUCCESS) { /* If Data Not Obtainable from a GetVersion Command */ + /* Get the Firmware Version from Enquiry Data */ + memcpy(FirmwareVersion, ha->enq->CodeBlkVersion, + IPS_COMPAT_ID_LENGTH); + } + + /* printk(KERN_WARNING "Adapter's BIOS Version = %s\n", BiosVersion); */ + /* printk(KERN_WARNING "BIOS Compatible Version = %s\n", IPS_COMPAT_BIOS); */ + /* printk(KERN_WARNING "Adapter's Firmware Version = %s\n", FirmwareVersion); */ + /* printk(KERN_WARNING "Firmware Compatible Version = %s \n", Compatable[ ha->nvram->adapter_type ]); */ + + MatchError = 0; + + if (strncmp + (FirmwareVersion, Compatable[ha->nvram->adapter_type], + IPS_COMPAT_ID_LENGTH) != 0) + MatchError = 1; + + if (strncmp(BiosVersion, IPS_COMPAT_BIOS, IPS_COMPAT_ID_LENGTH) != 0) + MatchError = 1; + + ha->nvram->versioning = 1; /* Indicate the Driver Supports Versioning */ + + if (MatchError) { + ha->nvram->version_mismatch = 1; + if (ips_cd_boot == 0) { + strncpy(&BiosString[0], ha->nvram->bios_high, 4); + strncpy(&BiosString[4], ha->nvram->bios_low, 4); + BiosString[8] = 0; + + strncpy(&FirmwareString[0], ha->enq->CodeBlkVersion, 8); + FirmwareString[8] = 0; + + printk(KERN_WARNING + "Warning ! ! ! ServeRAID Version Mismatch\n"); + printk(KERN_WARNING + "Bios = %s, Firmware = %s, Device Driver = %s%s\n", + BiosString, FirmwareString, IPS_VERSION_HIGH, + IPS_VERSION_LOW); + printk(KERN_WARNING + "These levels should match to avoid possible compatibility problems.\n"); + } + } else { + ha->nvram->version_mismatch = 0; + } - return; + return; } /*---------------------------------------------------------------------------*/ @@ -7290,52 +6834,227 @@ static void ips_version_check(ips_ha_t * /* Return Value: */ /* 0 if Successful, else non-zero */ /*---------------------------------------------------------------------------*/ -static int ips_get_version_info(ips_ha_t *ha, IPS_VERSION_DATA *Buffer, int intr ) { - ips_scb_t *scb; - int rc; - - METHOD_TRACE("ips_get_version_info", 1); - - memset(Buffer, 0, sizeof(IPS_VERSION_DATA)); - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_GET_VERSION_INFO; - scb->cmd.version_info.op_code = IPS_CMD_GET_VERSION_INFO; - scb->cmd.version_info.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.version_info.reserved = 0; - scb->cmd.version_info.count = sizeof( IPS_VERSION_DATA); - scb->cmd.version_info.reserved2 = 0; - scb->data_len = sizeof(*Buffer); - scb->data_busaddr = pci_map_single(ha->pcidev, Buffer, - scb->data_len, IPS_DMA_DIR(scb)); - scb->cmd.version_info.buffer_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - - /* issue command */ - rc = ips_send_wait(ha, scb, ips_cmd_timeout, intr); - return( rc ); -} - - - -#if defined (MODULE) || (LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0)) -static Scsi_Host_Template driver_template = IPS; -#include "scsi_module.c" -#endif +static int +ips_get_version_info(ips_ha_t * ha, IPS_VERSION_DATA * Buffer, int intr) +{ + ips_scb_t *scb; + int rc; + + METHOD_TRACE("ips_get_version_info", 1); + + memset(Buffer, 0, sizeof (IPS_VERSION_DATA)); + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); -static int ips_abort_init(ips_ha_t *ha, struct Scsi_Host *sh, int index){ - ha->active = 0; - ips_free(ha); - scsi_unregister(sh); - ips_ha[index] = 0; - ips_sh[index] = 0; - return -1; + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_GET_VERSION_INFO; + scb->cmd.version_info.op_code = IPS_CMD_GET_VERSION_INFO; + scb->cmd.version_info.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.version_info.reserved = 0; + scb->cmd.version_info.count = sizeof (IPS_VERSION_DATA); + scb->cmd.version_info.reserved2 = 0; + scb->data_len = sizeof (*Buffer); + scb->data_busaddr = pci_map_single(ha->pcidev, Buffer, + scb->data_len, IPS_DMA_DIR(scb)); + scb->cmd.version_info.buffer_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + + /* issue command */ + rc = ips_send_wait(ha, scb, ips_cmd_timeout, intr); + return (rc); } -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) +/****************************************************************************/ +/* */ +/* Routine Name: ips_abort_init */ +/* */ +/* Routine Description: */ +/* cleanup routine for a failed adapter initialization */ +/****************************************************************************/ +static int +ips_abort_init(ips_ha_t * ha, int index) +{ + ha->active = 0; + ips_free(ha); + ips_ha[index] = 0; + ips_sh[index] = 0; + return -1; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_shift_controllers */ +/* */ +/* Routine Description: */ +/* helper function for ordering adapters */ +/****************************************************************************/ +static void +ips_shift_controllers(int lowindex, int highindex) +{ + ips_ha_t *ha_sav = ips_ha[highindex]; + struct Scsi_Host *sh_sav = ips_sh[highindex]; + int i; + + for (i = highindex; i > lowindex; i--) { + ips_ha[i] = ips_ha[i - 1]; + ips_sh[i] = ips_sh[i - 1]; + ips_ha[i]->host_num = i; + } + ha_sav->host_num = lowindex; + ips_ha[lowindex] = ha_sav; + ips_sh[lowindex] = sh_sav; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_order_controllers */ +/* */ +/* Routine Description: */ +/* place controllers is the "proper" boot order */ +/****************************************************************************/ +static void +ips_order_controllers(void) +{ + int i, j, tmp, position = 0; + IPS_NVRAM_P5 *nvram; + if (!ips_ha[0]) + return; + nvram = ips_ha[0]->nvram; + + if (nvram->adapter_order[0]) { + for (i = 1; i <= nvram->adapter_order[0]; i++) { + for (j = position; j < ips_num_controllers; j++) { + switch (ips_ha[j]->ad_type) { + case IPS_ADTYPE_SERVERAID6M: + if (nvram->adapter_order[i] == 'M') { + ips_shift_controllers(position, + j); + position++; + } + break; + case IPS_ADTYPE_SERVERAID4L: + case IPS_ADTYPE_SERVERAID4M: + case IPS_ADTYPE_SERVERAID4MX: + case IPS_ADTYPE_SERVERAID4LX: + if (nvram->adapter_order[i] == 'N') { + ips_shift_controllers(position, + j); + position++; + } + break; + case IPS_ADTYPE_SERVERAID6I: + case IPS_ADTYPE_SERVERAID5I2: + case IPS_ADTYPE_SERVERAID5I1: + if (nvram->adapter_order[i] == 'S') { + ips_shift_controllers(position, + j); + position++; + } + break; + case IPS_ADTYPE_SERVERAID: + case IPS_ADTYPE_SERVERAID2: + case IPS_ADTYPE_NAVAJO: + case IPS_ADTYPE_KIOWA: + case IPS_ADTYPE_SERVERAID3L: + case IPS_ADTYPE_SERVERAID3: + case IPS_ADTYPE_SERVERAID4H: + if (nvram->adapter_order[i] == 'A') { + ips_shift_controllers(position, + j); + position++; + } + break; + default: + break; + } + } + } + /* if adapter_order[0], then ordering is complete */ + return; + } + /* old bios, use older ordering */ + tmp = 0; + for (i = position; i < ips_num_controllers; i++) { + if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I2 || + ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I1) { + ips_shift_controllers(position, i); + position++; + tmp = 1; + } + } + /* if there were no 5I cards, then don't do any extra ordering */ + if (!tmp) + return; + for (i = position; i < ips_num_controllers; i++) { + if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4L || + ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4M || + ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4LX || + ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4MX) { + ips_shift_controllers(position, i); + position++; + } + } + + return; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_register_scsi */ +/* */ +/* Routine Description: */ +/* perform any registration and setup with the scsi layer */ +/****************************************************************************/ +static int +ips_register_scsi(int index) +{ + struct Scsi_Host *sh; + ips_ha_t *ha, *oldha; + sh = scsi_register(&ips_driver_template, sizeof (ips_ha_t)); + if (!sh) { + printk(KERN_WARNING + "Unable to register controller with SCSI subsystem\n"); + return -1; + } + oldha = ips_ha[index]; + ha = IPS_HA(sh); + memcpy(ha, oldha, sizeof (ips_ha_t)); + free_irq(oldha->irq, oldha); + /* Install the interrupt handler with the new ha */ + if (request_irq(ha->irq, do_ipsintr, SA_SHIRQ, ips_name, ha)) { + printk(KERN_WARNING "Unable to install interrupt handler\n"); + scsi_unregister(sh); + return -1; + } + + kfree(oldha); + ips_sh[index] = sh; + ips_ha[index] = ha; + scsi_set_pci_device(sh, ha->pcidev); + + /* Store away needed values for later use */ + sh->io_port = ha->io_addr; + sh->n_io_port = ha->io_addr ? 255 : 0; + sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr; + sh->irq = ha->irq; + sh->sg_tablesize = sh->hostt->sg_tablesize; + sh->can_queue = sh->hostt->can_queue; + sh->cmd_per_lun = sh->hostt->cmd_per_lun; + sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma; + sh->use_clustering = sh->hostt->use_clustering; + +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,7) + sh->max_sectors = 128; +#endif + + sh->max_id = ha->ntargets; + sh->max_lun = ha->nlun; + sh->max_channel = ha->nbus - 1; + sh->can_queue = ha->max_cmds - 1; + + return 0; +} /*---------------------------------------------------------------------------*/ /* Routine Name: ips_remove_device */ @@ -7343,24 +7062,64 @@ static int ips_abort_init(ips_ha_t *ha, /* Routine Description: */ /* Remove one Adapter ( Hot Plugging ) */ /*---------------------------------------------------------------------------*/ -static void ips_remove_device(struct pci_dev *pci_dev) +static void +ips_remove_device(struct pci_dev *pci_dev) { - int i; - struct Scsi_Host *sh; - ips_ha_t *ha; - - for (i = 0; i < IPS_MAX_ADAPTERS; i++) { - ha = ips_ha[i]; - if (ha) { - if ( (pci_dev->bus->number == ha->pcidev->bus->number) && - (pci_dev->devfn == ha->pcidev->devfn)) { - sh = ips_sh[i]; - ips_release(sh); - } - } - } + int i; + struct Scsi_Host *sh; + ips_ha_t *ha; + + for (i = 0; i < IPS_MAX_ADAPTERS; i++) { + ha = ips_ha[i]; + if (ha) { + if ((pci_dev->bus->number == ha->pcidev->bus->number) && + (pci_dev->devfn == ha->pcidev->devfn)) { + sh = ips_sh[i]; + ips_release(sh); + } + } + } } +/****************************************************************************/ +/* */ +/* Routine Name: ips_module_init */ +/* */ +/* Routine Description: */ +/* function called on module load */ +/****************************************************************************/ +static int __init +ips_module_init(void) +{ + if (pci_module_init(&ips_pci_driver) < 0) + return -ENODEV; + ips_driver_template.module = THIS_MODULE; + ips_order_controllers(); + if (scsi_register_module(MODULE_SCSI_HA, &ips_driver_template)) { + pci_unregister_driver(&ips_pci_driver); + return -ENODEV; + } + register_reboot_notifier(&ips_notifier); + return 0; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_module_exit */ +/* */ +/* Routine Description: */ +/* function called on module unload */ +/****************************************************************************/ +static void __exit +ips_module_exit(void) +{ + scsi_unregister_module(MODULE_SCSI_HA, &ips_driver_template); + pci_unregister_driver(&ips_pci_driver); + unregister_reboot_notifier(&ips_notifier); +} + +module_init(ips_module_init); +module_exit(ips_module_exit); /*---------------------------------------------------------------------------*/ /* Routine Name: ips_insert_device */ @@ -7371,40 +7130,28 @@ static void ips_remove_device(struct pci /* Return Value: */ /* 0 if Successful, else non-zero */ /*---------------------------------------------------------------------------*/ -static int __devinit ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent) +static int __devinit +ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent) { - int index; - int rc; + int index; + int rc; - METHOD_TRACE("ips_insert_device", 1); + METHOD_TRACE("ips_insert_device", 1); - /* If we're still in Init State 0, and we've already found the Adapter */ - /* Ordering Table, there is no reason to continue. */ - if ( (InitState == 0) && (AdapterOrder[0]) ) - return -1; - - if (pci_enable_device(pci_dev)) + if (pci_enable_device(pci_dev)) return -1; - rc = ips_init_phase1(pci_dev, &index); - if (rc == SUCCESS) - rc = ips_init_phase2(index); - - /* If we're in Init State 0, we're done with the device for now. */ - /* Release the device and don't count it. */ - if ( InitState == 0 ) { - ips_remove_device(pci_dev); - return -1; - } + rc = ips_init_phase1(pci_dev, &index); + if (rc == SUCCESS) + rc = ips_init_phase2(index); - if (rc == SUCCESS) - ips_num_controllers++; + if (rc == SUCCESS) + ips_num_controllers++; - ips_next_controller = ips_num_controllers; - return rc; + ips_next_controller = ips_num_controllers; + return rc; } - /*---------------------------------------------------------------------------*/ /* Routine Name: ips_init_phase1 */ /* */ @@ -7414,248 +7161,232 @@ static int __devinit ips_insert_device(s /* Return Value: */ /* 0 if Successful, else non-zero */ /*---------------------------------------------------------------------------*/ -static int ips_init_phase1( struct pci_dev *pci_dev, int *indexPtr ) -{ - struct Scsi_Host *sh; - ips_ha_t *ha; - uint32_t io_addr; - uint32_t mem_addr; - uint32_t io_len; - uint32_t mem_len; - uint8_t revision_id; - uint8_t bus; - uint8_t func; - uint8_t irq; - uint16_t subdevice_id; - int j; - int index; - uint32_t count; - dma_addr_t dma_address; - char *ioremap_ptr; - char *mem_ptr; - uint32_t IsDead - - METHOD_TRACE("ips_init_phase1", 1); - index = IPS_MAX_ADAPTERS; - for (j = 0; j < IPS_MAX_ADAPTERS; j++) { - if (ips_ha[j] ==0) { - index = j; - break; - } - } - - if (index >= IPS_MAX_ADAPTERS) - return -1; - - /* stuff that we get in dev */ - irq = pci_dev->irq; - bus = pci_dev->bus->number; - func = pci_dev->devfn; - - /* Init MEM/IO addresses to 0 */ - mem_addr = 0; - io_addr = 0; - mem_len = 0; - io_len = 0; - - for (j = 0; j < 2; j++) { - if (!pci_resource_start(pci_dev, j)) - break; - - if (pci_resource_flags(pci_dev, j) & IORESOURCE_IO) { - io_addr = pci_resource_start(pci_dev, j); - io_len = pci_resource_len(pci_dev, j); - } else { - mem_addr = pci_resource_start(pci_dev, j); - mem_len = pci_resource_len(pci_dev, j); - } - } - - /* setup memory mapped area (if applicable) */ - if (mem_addr) { - uint32_t base; - uint32_t offs; - - if (check_mem_region(mem_addr, mem_len)) { - printk(KERN_WARNING "Couldn't allocate IO Memory space %x len %d.\n", mem_addr, mem_len); - return -1; - } - - request_mem_region(mem_addr, mem_len, "ips"); - base = mem_addr & PAGE_MASK; - offs = mem_addr - base; - ioremap_ptr = ioremap(base, PAGE_SIZE); - mem_ptr = ioremap_ptr + offs; - } else { - ioremap_ptr = NULL; - mem_ptr = NULL; - } - - /* setup I/O mapped area (if applicable) */ - if (io_addr) { - if (check_region(io_addr, io_len)) { - printk(KERN_WARNING "Couldn't allocate IO space %x len %d.\n", io_addr, io_len); - return -1; - } - request_region(io_addr, io_len, "ips"); - } - - /* get the revision ID */ - if (pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id)) { - printk(KERN_WARNING "Can't get revision id.\n" ); - return -1; - } - - subdevice_id = pci_dev->subsystem_device; - - /* found a controller */ - sh = scsi_register(&driver_template, sizeof(ips_ha_t)); -#if LINUX_VERSION_CODE > LinuxVersionCode(2,5,0) - pci_set_dma_mask(pci_dev, (u64)0xffffffff); - scsi_set_pci_device(sh, pci_dev); -#endif - if (sh == NULL) { - printk(KERN_WARNING "Unable to register controller with SCSI subsystem\n" ); - return -1; - } - - ha = IPS_HA(sh); - memset(ha, 0, sizeof(ips_ha_t)); - - ips_sh[index] = sh; - ips_ha[index] = ha; - ha->active = 1; - - ha->enq = kmalloc(sizeof(IPS_ENQ), GFP_KERNEL); - - if (!ha->enq) { - printk(KERN_WARNING "Unable to allocate host inquiry structure\n" ); - return ips_abort_init(ha, sh, index); - } - - ha->adapt = pci_alloc_consistent(pci_dev, sizeof(IPS_ADAPTER) + - sizeof(IPS_IO_CMD), &dma_address); - if (!ha->adapt) { - printk(KERN_WARNING "Unable to allocate host adapt & dummy structures\n"); - return ips_abort_init(ha, sh, index); - } - ha->adapt->hw_status_start = dma_address; - ha->dummy = (void *)(ha->adapt + 1); - - ha->conf = kmalloc(sizeof(IPS_CONF), GFP_KERNEL); - - if (!ha->conf) { - printk(KERN_WARNING "Unable to allocate host conf structure\n" ); - return ips_abort_init(ha, sh, index); - } - - ha->nvram = kmalloc(sizeof(IPS_NVRAM_P5), GFP_KERNEL); - - if (!ha->nvram) { - printk(KERN_WARNING "Unable to allocate host NVRAM structure\n" ); - return ips_abort_init(ha, sh, index); - } - - ha->subsys = kmalloc(sizeof(IPS_SUBSYS), GFP_KERNEL); - - if (!ha->subsys) { - printk(KERN_WARNING "Unable to allocate host subsystem structure\n" ); - return ips_abort_init(ha, sh, index); - } - - for (count = PAGE_SIZE, ha->ioctl_order = 0; - count < ips_ioctlsize; - ha->ioctl_order++, count <<= 1); - - ha->ioctl_data = (char *) __get_free_pages(GFP_KERNEL, ha->ioctl_order); - ha->ioctl_datasize = count; - - if (!ha->ioctl_data) { - printk(KERN_WARNING "Unable to allocate IOCTL data\n" ); - ha->ioctl_data = NULL; - ha->ioctl_order = 0; - ha->ioctl_datasize = 0; - } - - /* Store away needed values for later use */ - sh->io_port = io_addr; - sh->n_io_port = io_addr ? 255 : 0; - sh->unique_id = (io_addr) ? io_addr : mem_addr; - sh->irq = irq; - sh->select_queue_depths = ips_select_queue_depth; - sh->sg_tablesize = sh->hostt->sg_tablesize; - sh->can_queue = sh->hostt->can_queue; - sh->cmd_per_lun = sh->hostt->cmd_per_lun; - sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma; - sh->use_clustering = sh->hostt->use_clustering; +static int +ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr) +{ + ips_ha_t *ha; + uint32_t io_addr; + uint32_t mem_addr; + uint32_t io_len; + uint32_t mem_len; + uint8_t revision_id; + uint8_t bus; + uint8_t func; + uint8_t irq; + uint16_t subdevice_id; + int j; + int index; + uint32_t count; + dma_addr_t dma_address; + char *ioremap_ptr; + char *mem_ptr; + uint32_t IsDead; + + METHOD_TRACE("ips_init_phase1", 1); + index = IPS_MAX_ADAPTERS; + for (j = 0; j < IPS_MAX_ADAPTERS; j++) { + if (ips_ha[j] == 0) { + index = j; + break; + } + } -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,7) - sh->max_sectors = 128; -#endif + if (index >= IPS_MAX_ADAPTERS) + return -1; - /* Store info in HA structure */ - ha->irq = irq; - ha->io_addr = io_addr; - ha->io_len = io_len; - ha->mem_addr = mem_addr; - ha->mem_len = mem_len; - ha->mem_ptr = mem_ptr; - ha->ioremap_ptr = ioremap_ptr; - ha->host_num = ( uint32_t) index; - ha->revision_id = revision_id; - ha->slot_num = PCI_SLOT(pci_dev->devfn); - ha->device_id = pci_dev->device; - ha->subdevice_id = subdevice_id; - ha->pcidev = pci_dev; - - /* - * Setup Functions - */ - ips_setup_funclist(ha); - - if ( ( IPS_IS_MORPHEUS( ha ) ) || ( IPS_IS_MARCO( ha ) ) ) { - /* If Morpheus appears dead, reset it */ - IsDead = readl( ha->mem_ptr + IPS_REG_I960_MSG1 ); - if ( IsDead == 0xDEADBEEF ) { - ips_reset_morpheus( ha ); - } - } - - /* - * Initialize the card if it isn't already - */ - - if (!(*ha->func.isinit)(ha)) { - if (!(*ha->func.init)(ha)) { - /* - * Initialization failed - */ - printk(KERN_WARNING "Unable to initialize controller\n" ); - return ips_abort_init(ha, sh, index); - } - } - - /* Install the interrupt handler */ - if (request_irq(irq, do_ipsintr, SA_SHIRQ, ips_name, ha)) { - printk(KERN_WARNING "Unable to install interrupt handler\n" ); - return ips_abort_init(ha, sh, index); - } - - /* - * Allocate a temporary SCB for initialization - */ - ha->max_cmds = 1; - if (!ips_allocatescbs(ha)) { - printk(KERN_WARNING "Unable to allocate a CCB\n" ); - free_irq(ha->irq, ha); - return ips_abort_init(ha, sh, index); - } + /* stuff that we get in dev */ + irq = pci_dev->irq; + bus = pci_dev->bus->number; + func = pci_dev->devfn; + + /* Init MEM/IO addresses to 0 */ + mem_addr = 0; + io_addr = 0; + mem_len = 0; + io_len = 0; + + for (j = 0; j < 2; j++) { + if (!pci_resource_start(pci_dev, j)) + break; + + if (pci_resource_flags(pci_dev, j) & IORESOURCE_IO) { + io_addr = pci_resource_start(pci_dev, j); + io_len = pci_resource_len(pci_dev, j); + } else { + mem_addr = pci_resource_start(pci_dev, j); + mem_len = pci_resource_len(pci_dev, j); + } + } + + /* setup memory mapped area (if applicable) */ + if (mem_addr) { + uint32_t base; + uint32_t offs; + + if (!request_mem_region(mem_addr, mem_len, "ips")) { + printk(KERN_WARNING + "Couldn't allocate IO Memory space %x len %d.\n", + mem_addr, mem_len); + return -1; + } + + base = mem_addr & PAGE_MASK; + offs = mem_addr - base; + ioremap_ptr = ioremap(base, PAGE_SIZE); + mem_ptr = ioremap_ptr + offs; + } else { + ioremap_ptr = NULL; + mem_ptr = NULL; + } + + /* setup I/O mapped area (if applicable) */ + if (io_addr) { + if (!request_region(io_addr, io_len, "ips")) { + printk(KERN_WARNING + "Couldn't allocate IO space %x len %d.\n", + io_addr, io_len); + return -1; + } + } + + /* get the revision ID */ + if (pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id)) { + printk(KERN_WARNING "Can't get revision id.\n"); + return -1; + } - *indexPtr = index; - return SUCCESS; -} + subdevice_id = pci_dev->subsystem_device; -#endif + /* found a controller */ + ha = kmalloc(sizeof (ips_ha_t), GFP_KERNEL); + if (ha == NULL) { + printk(KERN_WARNING "Unable to allocate temporary ha struct\n"); + return -1; + } + + memset(ha, 0, sizeof (ips_ha_t)); + + ips_sh[index] = NULL; + ips_ha[index] = ha; + ha->active = 1; + + /* Store info in HA structure */ + ha->irq = irq; + ha->io_addr = io_addr; + ha->io_len = io_len; + ha->mem_addr = mem_addr; + ha->mem_len = mem_len; + ha->mem_ptr = mem_ptr; + ha->ioremap_ptr = ioremap_ptr; + ha->host_num = (uint32_t) index; + ha->revision_id = revision_id; + ha->slot_num = PCI_SLOT(pci_dev->devfn); + ha->device_id = pci_dev->device; + ha->subdevice_id = subdevice_id; + ha->pcidev = pci_dev; + + /* + * Set the pci_dev's dma_mask. Not all adapters support 64bit + * addressing so don't enable it if the adapter can't support + * it! Also, don't use 64bit addressing if dma addresses + * are guaranteed to be < 4G. + */ + if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) && + !pci_set_dma_mask(ha->pcidev, (u64) 0xffffffffffffffff)) { + (ha)->flags |= IPS_HA_ENH_SG; + } else { + if (pci_set_dma_mask(ha->pcidev, (u64) 0xffffffff) != 0) { + printk(KERN_WARNING "Unable to set DMA Mask\n"); + return ips_abort_init(ha, index); + } + } + + ha->enq = kmalloc(sizeof (IPS_ENQ), IPS_INIT_GFP); + + if (!ha->enq) { + printk(KERN_WARNING + "Unable to allocate host inquiry structure\n"); + return ips_abort_init(ha, index); + } + + ha->adapt = pci_alloc_consistent(pci_dev, sizeof (IPS_ADAPTER) + + sizeof (IPS_IO_CMD), &dma_address); + if (!ha->adapt) { + printk(KERN_WARNING + "Unable to allocate host adapt & dummy structures\n"); + return ips_abort_init(ha, index); + } + ha->adapt->hw_status_start = dma_address; + ha->dummy = (void *) (ha->adapt + 1); + + ha->conf = kmalloc(sizeof (IPS_CONF), IPS_INIT_GFP); + + if (!ha->conf) { + printk(KERN_WARNING "Unable to allocate host conf structure\n"); + return ips_abort_init(ha, index); + } + + ha->nvram = kmalloc(sizeof (IPS_NVRAM_P5), IPS_INIT_GFP); + + if (!ha->nvram) { + printk(KERN_WARNING + "Unable to allocate host NVRAM structure\n"); + return ips_abort_init(ha, index); + } + + ha->subsys = kmalloc(sizeof (IPS_SUBSYS), IPS_INIT_GFP); + + if (!ha->subsys) { + printk(KERN_WARNING + "Unable to allocate host subsystem structure\n"); + return ips_abort_init(ha, index); + } + + for (count = PAGE_SIZE, ha->ioctl_order = 0; + count < ips_ioctlsize; ha->ioctl_order++, count <<= 1) ; + + ha->ioctl_data = + (char *) __get_free_pages(IPS_INIT_GFP, ha->ioctl_order); + ha->ioctl_datasize = count; + + if (!ha->ioctl_data) { + printk(KERN_WARNING "Unable to allocate IOCTL data\n"); + ha->ioctl_data = NULL; + ha->ioctl_order = 0; + ha->ioctl_datasize = 0; + } + + /* + * Setup Functions + */ + ips_setup_funclist(ha); + + if ((IPS_IS_MORPHEUS(ha)) || (IPS_IS_MARCO(ha))) { + /* If Morpheus appears dead, reset it */ + IsDead = readl(ha->mem_ptr + IPS_REG_I960_MSG1); + if (IsDead == 0xDEADBEEF) { + ips_reset_morpheus(ha); + } + } + + /* + * Initialize the card if it isn't already + */ + + if (!(*ha->func.isinit) (ha)) { + if (!(*ha->func.init) (ha)) { + /* + * Initialization failed + */ + printk(KERN_WARNING + "Unable to initialize controller\n"); + return ips_abort_init(ha, index); + } + } + + *indexPtr = index; + return SUCCESS; +} /*---------------------------------------------------------------------------*/ /* Routine Name: ips_init_phase2 */ @@ -7666,46 +7397,52 @@ static int ips_init_phase1( struct pci_d /* Return Value: */ /* 0 if Successful, else non-zero */ /*---------------------------------------------------------------------------*/ -static int ips_init_phase2( int index ) -{ - struct Scsi_Host *sh; - ips_ha_t *ha; - - ha = ips_ha[index]; - sh = ips_sh[index]; - - METHOD_TRACE("ips_init_phase2", 1); - if (!ha->active) { - scsi_unregister(sh); - ips_ha[index] = NULL; - ips_sh[index] = NULL; - return -1;; - } - - if (!ips_hainit(ha)) { - printk(KERN_WARNING "Unable to initialize controller\n" ); - free_irq(ha->irq, ha); - return ips_abort_init(ha, sh, index); - } - /* Free the temporary SCB */ - ips_deallocatescbs(ha, 1); - - /* allocate CCBs */ - if (!ips_allocatescbs(ha)) { - printk(KERN_WARNING "Unable to allocate CCBs\n" ); - free_irq(ha->irq, ha); - return ips_abort_init(ha, sh, index); - } - - /* finish setting values */ - sh->max_id = ha->ntargets; - sh->max_lun = ha->nlun; - sh->max_channel = ha->nbus - 1; - sh->can_queue = ha->max_cmds-1; +static int +ips_init_phase2(int index) +{ + ips_ha_t *ha; - return SUCCESS; -} + ha = ips_ha[index]; + + METHOD_TRACE("ips_init_phase2", 1); + if (!ha->active) { + ips_ha[index] = NULL; + return -1; + } + + /* Install the interrupt handler */ + if (request_irq(ha->irq, do_ipsintr, SA_SHIRQ, ips_name, ha)) { + printk(KERN_WARNING "Unable to install interrupt handler\n"); + return ips_abort_init(ha, index); + } + + /* + * Allocate a temporary SCB for initialization + */ + ha->max_cmds = 1; + if (!ips_allocatescbs(ha)) { + printk(KERN_WARNING "Unable to allocate a CCB\n"); + free_irq(ha->irq, ha); + return ips_abort_init(ha, index); + } + + if (!ips_hainit(ha)) { + printk(KERN_WARNING "Unable to initialize controller\n"); + free_irq(ha->irq, ha); + return ips_abort_init(ha, index); + } + /* Free the temporary SCB */ + ips_deallocatescbs(ha, 1); + + /* allocate CCBs */ + if (!ips_allocatescbs(ha)) { + printk(KERN_WARNING "Unable to allocate CCBs\n"); + free_irq(ha->irq, ha); + return ips_abort_init(ha, index); + } + return SUCCESS; +} #if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,9) MODULE_LICENSE("GPL"); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/ips.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/ips.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/ips.h 2003-08-25 18:24:52.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/ips.h 2003-08-25 20:31:12.000000000 +0200 @@ -69,6 +69,13 @@ #define LinuxVersionCode(x,y,z) (((x)<<16)+((y)<<8)+(z)) #endif + #if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,20) || defined CONFIG_HIGHIO + #define IPS_HIGHIO + #define IPS_HIGHMEM_IO .highmem_io = 1, + #else + #define IPS_HIGHMEM_IO + #endif + #define IPS_HA(x) ((ips_ha_t *) x->hostdata) #define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs) #define IPS_IS_TROMBONE(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \ @@ -86,10 +93,42 @@ ((IPS_IS_TROMBONE(ha) || IPS_IS_CLARINET(ha)) && \ (ips_force_memio))) ? 1 : 0) + #define IPS_HAS_ENH_SGLIST(ha) (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) + #define IPS_USE_ENH_SGLIST(ha) ((ha)->flags & IPS_HA_ENH_SG) + #define IPS_SGLIST_SIZE(ha) (IPS_USE_ENH_SGLIST(ha) ? \ + sizeof(IPS_ENH_SG_LIST) : sizeof(IPS_STD_SG_LIST)) + + #if LINUX_VERSION_CODE < LinuxVersionCode(2,4,4) + #define pci_set_dma_mask(dev,mask) ( mask > 0xffffffff ? 1:0 ) + #define scsi_set_pci_device(sh,dev) (0) + #endif + #ifndef MDELAY #define MDELAY mdelay #endif - + + #ifndef min + #define min(x,y) ((x) < (y) ? x : y) + #endif + + #define pci_dma_lo32(a) (a & 0xffffffff) + + #if (BITS_PER_LONG > 32) || (defined CONFIG_HIGHMEM64G && defined IPS_HIGHIO) + #define IPS_ENABLE_DMA64 (1) + #define pci_dma_hi32(a) (a >> 32) + #else + #define IPS_ENABLE_DMA64 (0) + #define pci_dma_hi32(a) (0) + #endif + + #if defined(__ia64__) + #define IPS_ATOMIC_GFP (GFP_DMA | GFP_ATOMIC) + #define IPS_INIT_GFP GFP_DMA + #else + #define IPS_ATOMIC_GFP GFP_ATOMIC + #define IPS_INIT_GFP GFP_KERNEL + #endif + /* * Adapter address map equates */ @@ -354,6 +393,12 @@ #define IPS_SCSI_MP3_AllocateSurface 0x08 /* + * HA Flags + */ + + #define IPS_HA_ENH_SG 0x1 + + /* * SCB Flags */ #define IPS_SCB_MAP_SG 0x00008 @@ -390,93 +435,45 @@ /* * Scsi_Host Template */ -#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) - #define IPS { \ - next : NULL, \ - module : NULL, \ - proc_info : NULL, \ - proc_dir : NULL, \ - name : NULL, \ - detect : ips_detect, \ - release : ips_release, \ - info : ips_info, \ - command : NULL, \ - queuecommand : ips_queue, \ - eh_strategy_handler : NULL, \ - eh_abort_handler : ips_eh_abort, \ - eh_device_reset_handler : NULL, \ - eh_bus_reset_handler : NULL, \ - eh_host_reset_handler : ips_eh_reset, \ - abort : NULL, \ - reset : NULL, \ - slave_attach : NULL, \ - bios_param : ips_biosparam, \ - can_queue : 0, \ - this_id: -1, \ - sg_tablesize : IPS_MAX_SG, \ - cmd_per_lun: 16, \ - present : 0, \ - unchecked_isa_dma : 0, \ - use_clustering : ENABLE_CLUSTERING, \ - use_new_eh_code : 1 \ -} -#elif LINUX_VERSION_CODE < LinuxVersionCode(2,5,0) - #define IPS { \ - next : NULL, \ - module : NULL, \ - proc_info : NULL, \ - name : NULL, \ - detect : ips_detect, \ - release : ips_release, \ - info : ips_info, \ - command : NULL, \ - queuecommand : ips_queue, \ - eh_strategy_handler : NULL, \ - eh_abort_handler : ips_eh_abort, \ - eh_device_reset_handler : NULL, \ - eh_bus_reset_handler : NULL, \ - eh_host_reset_handler : ips_eh_reset, \ - abort : NULL, \ - reset : NULL, \ - slave_attach : NULL, \ - bios_param : ips_biosparam, \ - can_queue : 0, \ - this_id: -1, \ - sg_tablesize : IPS_MAX_SG, \ - cmd_per_lun: 16, \ - present : 0, \ - unchecked_isa_dma : 0, \ - use_clustering : ENABLE_CLUSTERING, \ - use_new_eh_code : 1 \ +#if LINUX_VERSION_CODE < LinuxVersionCode(2,5,0) + static void ips_select_queue_depth(struct Scsi_Host *, Scsi_Device *); +#define IPS { \ + .detect = ips_detect, \ + .release = ips_release, \ + .info = ips_info, \ + .queuecommand = ips_queue, \ + .eh_abort_handler = ips_eh_abort, \ + .eh_host_reset_handler = ips_eh_reset, \ + .bios_param = ips_biosparam,\ + .select_queue_depths = ips_select_queue_depth, \ + .can_queue = 0, \ + .this_id = -1, \ + .sg_tablesize = IPS_MAX_SG, \ + .cmd_per_lun = 16, \ + .present = 0, \ + .unchecked_isa_dma = 0, \ + .use_clustering = ENABLE_CLUSTERING,\ + .use_new_eh_code = 1, \ + IPS_HIGHMEM_IO \ } #else - #define IPS { \ - next : NULL, \ - module : NULL, \ - proc_info : NULL, \ - name : NULL, \ - detect : ips_detect, \ - release : ips_release, \ - info : ips_info, \ - command : NULL, \ - queuecommand : ips_queue, \ - eh_strategy_handler : NULL, \ - eh_abort_handler : ips_eh_abort, \ - eh_device_reset_handler : NULL, \ - eh_bus_reset_handler : NULL, \ - eh_host_reset_handler : ips_eh_reset, \ - abort : NULL, \ - reset : NULL, \ - slave_attach : NULL, \ - bios_param : ips_biosparam, \ - can_queue : 0, \ - this_id: -1, \ - sg_tablesize : IPS_MAX_SG, \ - cmd_per_lun: 16, \ - present : 0, \ - unchecked_isa_dma : 0, \ - use_clustering : ENABLE_CLUSTERING, \ - highmem_io : 1 \ +#define IPS { \ + .detect = ips_detect, \ + .release = ips_release, \ + .info = ips_info, \ + .queuecommand = ips_queue, \ + .eh_abort_handler = ips_eh_abort, \ + .eh_host_reset_handler = ips_eh_reset, \ + .slave_configure = ips_slave_configure, \ + .bios_param = ips_biosparam, \ + .can_queue = 0, \ + .this_id = -1, \ + .sg_tablesize = IPS_MAX_SG, \ + .cmd_per_lun = 3, \ + .present = 0, \ + .unchecked_isa_dma = 0, \ + .use_clustering = ENABLE_CLUSTERING, \ + .highmem_io = 1 \ } #endif @@ -491,7 +488,8 @@ typedef struct { uint32_t lba; uint32_t sg_addr; uint16_t sector_count; - uint16_t reserved; + uint8_t segment_4G; + uint8_t enhanced_sg; uint32_t ccsar; uint32_t cccr; } IPS_IO_CMD, *PIPS_IO_CMD; @@ -542,7 +540,9 @@ typedef struct { uint16_t reserved; uint32_t reserved2; uint32_t dcdb_address; - uint32_t reserved3; + uint16_t reserved3; + uint8_t segment_4G; + uint8_t enhanced_sg; uint32_t ccsar; uint32_t cccr; } IPS_DCDB_CMD, *PIPS_DCDB_CMD; @@ -986,7 +986,20 @@ typedef struct { typedef struct ips_sglist { uint32_t address; uint32_t length; -} IPS_SG_LIST, *PIPS_SG_LIST; +} IPS_STD_SG_LIST; + +typedef struct ips_enh_sglist { + uint32_t address_lo; + uint32_t address_hi; + uint32_t length; + uint32_t reserved; +} IPS_ENH_SG_LIST; + +typedef union { + void *list; + IPS_STD_SG_LIST *std_list; + IPS_ENH_SG_LIST *enh_list; +} IPS_SG_LIST; typedef struct _IPS_INFOSTR { char *buffer; @@ -1086,6 +1099,7 @@ typedef struct ips_ha { char *ioctl_data; /* IOCTL data area */ uint32_t ioctl_datasize; /* IOCTL data size */ uint32_t cmd_in_progress; /* Current command in progress*/ + int flags; /* */ uint8_t waitflag; /* are we waiting for cmd */ uint8_t active; int ioctl_reset; /* IOCTL Requested Reset Flag */ @@ -1133,7 +1147,7 @@ typedef struct ips_scb { uint32_t sg_len; uint32_t flags; uint32_t op_code; - IPS_SG_LIST *sg_list; + IPS_SG_LIST sg_list; Scsi_Cmnd *scsi_cmd; struct ips_scb *q_next; ips_scb_callback callback; @@ -1194,11 +1208,13 @@ typedef struct { #define IPS_VER_MAJOR 6 #define IPS_VER_MAJOR_STRING "6" -#define IPS_VER_MINOR 00 -#define IPS_VER_MINOR_STRING "00" -#define IPS_VER_BUILD 26 -#define IPS_VER_BUILD_STRING "26" -#define IPS_VER_STRING "6.00.26" +#define IPS_VER_MINOR 10 +#define IPS_VER_MINOR_STRING "10" +#define IPS_VER_BUILD 24 +#define IPS_VER_BUILD_STRING "24" +#define IPS_VER_STRING "6.10.24" +#define IPS_RELEASE_ID 0x00010000 +#define IPS_BUILD_IDENT 1250 #define IPS_LEGALCOPYRIGHT_STRING "(C) Copyright IBM Corp. 1994, 2003. All Rights Reserved." #define IPS_ADAPTECCOPYRIGHT_STRING "(c) Copyright Adaptec, Inc. 2002 to present. All Rights Reserved." #define IPS_NT_LEGALCOPYRIGHT_STRING "(C) Copyright IBM Corp. 1994, 2003." @@ -1207,31 +1223,33 @@ typedef struct { #define IPS_VER_SERVERAID1 "2.25.01" #define IPS_VER_SERVERAID2 "2.88.13" #define IPS_VER_NAVAJO "2.88.13" -#define IPS_VER_SERVERAID3 "6.00.26" -#define IPS_VER_SERVERAID4H "6.00.26" -#define IPS_VER_SERVERAID4MLx "6.00.26" -#define IPS_VER_SARASOTA "6.00.26" -#define IPS_VER_MARCO "6.00.26" +#define IPS_VER_SERVERAID3 "6.10.24" +#define IPS_VER_SERVERAID4H "6.10.24" +#define IPS_VER_SERVERAID4MLx "6.10.24" +#define IPS_VER_SARASOTA "6.10.24" +#define IPS_VER_MARCO "6.10.24" +#define IPS_VER_SEBRING "6.10.24" /* Compatability IDs for various adapters */ #define IPS_COMPAT_UNKNOWN "" -#define IPS_COMPAT_CURRENT "MR600" +#define IPS_COMPAT_CURRENT "SB610" #define IPS_COMPAT_SERVERAID1 "2.25.01" #define IPS_COMPAT_SERVERAID2 "2.88.13" #define IPS_COMPAT_NAVAJO "2.88.13" #define IPS_COMPAT_KIOWA "2.88.13" -#define IPS_COMPAT_SERVERAID3H "MR600" -#define IPS_COMPAT_SERVERAID3L "MR600" -#define IPS_COMPAT_SERVERAID4H "MR600" -#define IPS_COMPAT_SERVERAID4M "MR600" -#define IPS_COMPAT_SERVERAID4L "MR600" -#define IPS_COMPAT_SERVERAID4Mx "MR600" -#define IPS_COMPAT_SERVERAID4Lx "MR600" -#define IPS_COMPAT_SARASOTA "MR600" -#define IPS_COMPAT_MARCO "MR600" -#define IPS_COMPAT_BIOS "MR600" +#define IPS_COMPAT_SERVERAID3H "SB610" +#define IPS_COMPAT_SERVERAID3L "SB610" +#define IPS_COMPAT_SERVERAID4H "SB610" +#define IPS_COMPAT_SERVERAID4M "SB610" +#define IPS_COMPAT_SERVERAID4L "SB610" +#define IPS_COMPAT_SERVERAID4Mx "SB610" +#define IPS_COMPAT_SERVERAID4Lx "SB610" +#define IPS_COMPAT_SARASOTA "SB610" +#define IPS_COMPAT_MARCO "SB610" +#define IPS_COMPAT_SEBRING "SB610" +#define IPS_COMPAT_BIOS "SB610" -#define IPS_COMPAT_MAX_ADAPTER_TYPE 15 +#define IPS_COMPAT_MAX_ADAPTER_TYPE 16 #define IPS_COMPAT_ID_LENGTH 8 #define IPS_DEFINE_COMPAT_TABLE(tablename) \ @@ -1250,7 +1268,8 @@ typedef struct { IPS_COMPAT_SERVERAID4Lx, \ IPS_COMPAT_SARASOTA, /* one-channel variety of SARASOTA */ \ IPS_COMPAT_SARASOTA, /* two-channel variety of SARASOTA */ \ - IPS_COMPAT_MARCO \ + IPS_COMPAT_MARCO, \ + IPS_COMPAT_SEBRING \ } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/ips_old.c linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/ips_old.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/ips_old.c 2003-08-25 18:24:52.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/ips_old.c 2003-08-25 20:35:12.000000000 +0200 @@ -276,6 +276,7 @@ IPS_DEFINE_COMPAT_TABLE( Compatable ); static struct pci_device_id ips_pci_table[] __devinitdata = { { 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, { 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, + { 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, { 0, } }; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/Makefile linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/Makefile --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/Makefile 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,18 @@ +# +# Makefile for the Cisco iSCSI client +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definitions are now in the main makefile... + +O_TARGET := iscsi-new_module.o + +EXTRA_CFLAGS := -I../../scsi -DLINUX + +obj-y := iscsi.o iscsi-probe.o iscsi-login.o iscsiAuthClient.o iscsiAuthClientGlue.o md5.o iscsi-crc.o + +obj-m := $(O_TARGET) + +include $(TOPDIR)/Rules.make diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/config.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/config.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/config.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/config.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,16 @@ +/* + * We need to determine byte ordering for the md5 code + */ +#ifndef BROKEN_CONFIG_H +#define BROKEN_CONFIG_H + +#include +#ifdef __BIG_ENDIAN +#define WORDS_BIGENDIAN +#endif + +typedef u32 UWORD32; +typedef u32 UINT32; +typedef u8 UINT8; +typedef u16 UINT16; +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-common.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-common.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-common.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-common.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,57 @@ +#ifndef ISCSI_COMMON_H_ +#define ISCSI_COMMON_H_ + +/* + * iSCSI connection daemon + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-common.h,v 1.8 2002/11/08 19:04:03 smferris Exp $ + * + * include for common info needed by both the daemon and kernel module + * + */ + +#define ISCSI_MAX_HBAS 1 + +/* these are limited by the packing of the GET_IDLUN ioctl */ +#define ISCSI_MAX_CHANNELS_PER_HBA 256 +#define ISCSI_MAX_TARGET_IDS_PER_BUS 256 +#define ISCSI_MAX_LUNS_PER_TARGET 256 + +/* iSCSI bus numbers are a 1:1 mapping of the Linux HBA/channel combos onto + * non-negative integers, so that we don't have to care what number + * the OS assigns to each HBA, and we don't care if they're non-contiguous. + * We use the ordering of each HBA in the iSCSI kernel module's hba_list, + * and number the channels on each HBA sequentially. + */ +#define ISCSI_MAX_BUS_IDS (ISCSI_MAX_HBAS * ISCSI_MAX_CHANNELS_PER_HBA) + +/* compatibility names */ +#define ISCSI_MAX_TARGETS ISCSI_MAX_TARGET_IDS_PER_BUS +#define ISCSI_MAX_LUN ISCSI_MAX_LUNS_PER_TARGET + +/* max base dir path for the tree of device symlinks */ +#define LINK_PATH_MAX 255 + +#ifndef __cplusplus +typedef enum boolean { + false= 0, + true = 1 +} bool; +#endif + + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-crc.c linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-crc.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-crc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-crc.c 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,141 @@ +#include "iscsi-platform.h" +#include "iscsi-crc.h" + +/* + * This is the CRC-32C table + * Generated with: + * width = 32 bits + * poly = 0x1EDC6F41 + * reflect input bytes = true + * reflect output bytes = true + */ + +uint32_t iscsi_crc32c_table[256] = +{ + 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L, + 0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL, + 0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL, + 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L, + 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL, + 0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L, + 0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L, + 0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL, + 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL, + 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L, + 0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L, + 0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL, + 0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L, + 0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL, + 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL, + 0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L, + 0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L, + 0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L, + 0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L, + 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L, + 0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L, + 0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L, + 0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L, + 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L, + 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L, + 0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L, + 0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L, + 0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L, + 0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L, + 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L, + 0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L, + 0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L, + 0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL, + 0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L, + 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L, + 0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL, + 0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L, + 0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL, + 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL, + 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L, + 0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L, + 0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL, + 0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL, + 0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L, + 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL, + 0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L, + 0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L, + 0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL, + 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L, + 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL, + 0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL, + 0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L, + 0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL, + 0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L, + 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L, + 0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL, + 0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL, + 0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L, + 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L, + 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL, + 0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L, + 0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL, + 0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL, + 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L +}; + +/* + * Steps through buffer one byte at at time, calculates reflected crc using table. + */ + +uint32_t iscsi_crc32c(void *address, unsigned long length) +{ + uint8_t *buffer = address; + uint32_t crc = 0xffffffff, result; +#ifdef WORDS_BIGENDIAN + uint8_t byte0, byte1, byte2, byte3; +#endif + + while (length--) + crc = iscsi_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^ (crc >> 8); + result = crc ^ 0xffffffff; + +#ifdef WORDS_BIGENDIAN + byte0 = (uint8_t)(result & 0xFF); + byte1 = (uint8_t)((result>>8) & 0xFF); + byte2 = (uint8_t)((result>>16) & 0xFF); + byte3 = (uint8_t)((result>>24) & 0xFF); + result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3); +#endif + + return result; +} + + +/* + * Continues stepping through buffer one byte at at time, calculates reflected crc using table. + */ +uint32_t iscsi_crc32c_continued(void *address, unsigned long length, uint32_t crc) +{ + uint8_t *buffer = address; + uint32_t result; +#ifdef WORDS_BIGENDIAN + uint8_t byte0, byte1, byte2, byte3; + + byte0 = (uint8_t)((crc>>24) & 0xFF); + byte1 = (uint8_t)((crc>>16) & 0xFF); + byte2 = (uint8_t)((crc>>8) & 0xFF); + byte3 = (uint8_t)(crc & 0xFF); + crc = ((byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0); +#endif + + crc = crc ^ 0xffffffff; + while (length--) + crc = iscsi_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^ (crc >> 8); + result = crc ^ 0xffffffff; + +#ifdef WORDS_BIGENDIAN + byte0 = (uint8_t)(result & 0xFF); + byte1 = (uint8_t)((result>>8) & 0xFF); + byte2 = (uint8_t)((result>>16) & 0xFF); + byte3 = (uint8_t)((result>>24) & 0xFF); + result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3); +#endif + + return result; +} + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-crc.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-crc.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-crc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-crc.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,7 @@ +#ifndef ISCSI_CRC_H_ +#define ISCSI_CRC_H_ + +extern uint32_t iscsi_crc32c(void *address, unsigned long length); +extern uint32_t iscsi_crc32c_continued(void *address, unsigned long length, uint32_t crc); + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-io.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-io.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-io.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-io.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,43 @@ +#ifndef ISCSI_IO_H_ +#define ISCSI_IO_H_ + +/* + * iSCSI driver for Linux + * Copyright (C) 2002 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-io.h,v 1.2 2002/09/19 20:54:29 smferris Exp $ + * + * iscsi-io.h + * + * define the PDU I/O functions needed by the login library + * + */ + +# include "iscsi-protocol.h" +# include "iscsi-session.h" +# include "iscsi-platform.h" + +extern int iscsi_connect(iscsi_session_t *session); +extern void iscsi_disconnect(iscsi_session_t *session); + +/* functions used in iscsi-login.c that must be implemented for each platform */ +extern int iscsi_send_pdu(iscsi_session_t *session, struct IscsiHdr *header, char *data, int timeout); +extern int iscsi_recv_pdu(iscsi_session_t *session, + struct IscsiHdr *header, int max_header_length, + char *data, int max_data_length, + int timeout); + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-ioctl.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-ioctl.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-ioctl.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-ioctl.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,195 @@ +#ifndef ISCSI_IOCTL_H_ +#define ISCSI_IOCTL_H_ + +/* + * iSCSI connection daemon + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-ioctl.h,v 1.24.2.1 2003/08/08 06:14:44 asimgupta Exp $ + * + * include for ioctl calls between the daemon and the kernel module + * + */ + +#include "iscsi-protocol.h" +#include "iscsiAuthClient.h" +#include "iscsi-portal.h" + +/* + * ioctls + */ +#define ISCSI_ESTABLISH_SESSION 0x00470301 +#define ISCSI_TERMINATE_SESSION 0x00470302 +#define ISCSI_SHUTDOWN 0x00470303 +#define ISCSI_GETTRACE 0x00470304 +#define ISCSI_PROBE_LUNS 0x00470305 +#define ISCSI_RESET_PROBING 0x00470306 +#define ISCSI_DEVICE_INFO 0x00470307 +#define ISCSI_LS_TARGET_INFO 0x00470308 +#define ISCSI_LS_PORTAL_INFO 0x00470309 +#define ISCSI_SET_INBP_INFO 0x0047030a +#define ISCSI_CHECK_INBP_BOOT 0x0047030b + +#define INBP_BUF_SIZE 1024 +#define SCANAREA 1024 +#define SIGNATURE "Cisco PiXiE Dust" +#define DADDLEN 6 + +typedef struct sapiNBP +{ + uint8_t signature[20]; /* "Cisco PiXiE Dust" */ + uint32_t targetipaddr; // iSCSI target IPv4 address + uint32_t myipmask; // lan netmask + uint32_t ripaddr; // gateway IPv4 address + uint8_t tgtethaddr[DADDLEN]; // target ethernet address + uint8_t structVersion; // version number of this struct + uint8_t pad1; // pad for windows driver + uint16_t tcpport; // tcp port to use + uint16_t slun; // boot disk lun + uint8_t targetstring[256]; // boot disk target + uint32_t ntbootdd_routine; + uint32_t myipaddr; // Our IPv4 address + uint8_t myethaddr[DADDLEN]; // Our ethernet address + uint8_t pad2[2]; // pad for windows driver + uint8_t bootPartitionNumber; // boot partition number of c: + uint8_t numberLocalDisks; + uint8_t nbpVersion[32]; // NBP version string +}sapiNBP_t; + +typedef struct scsi_device_info { + int max_sd_devices; + int max_sd_partitions; + int max_st_devices; + int max_sg_devices; + int max_sr_devices; +} scsi_device_info_t; + +typedef struct iscsi_session_ioctl { + uint32_t ioctl_size; + uint32_t ioctl_version; + uint32_t config_number; + int probe_luns; + int update; + uint8_t isid[6]; + int iscsi_bus; + int target_id; + int probe_order; + int password_length; + char username[iscsiAuthStringMaxLength]; + unsigned char password[iscsiAuthStringMaxLength]; + int password_length_in; + char username_in[iscsiAuthStringMaxLength]; + unsigned char password_in[iscsiAuthStringMaxLength]; + unsigned char TargetName[TARGET_NAME_MAXLEN + 1]; + unsigned char InitiatorName[TARGET_NAME_MAXLEN + 1]; + unsigned char InitiatorAlias[TARGET_NAME_MAXLEN + 1]; + uint32_t lun_bitmap[8]; + char link_base_dir[LINK_PATH_MAX + 1]; + mode_t link_dir_mode; + scsi_device_info_t device_info; + int host_number; /* returned from the kernel */ + int channel; /* returned from the kernel */ + unsigned char TargetAlias[TARGET_NAME_MAXLEN + 1]; /* returned from the kernel */ + int portal_failover; + unsigned char preferred_portal[16]; + unsigned char preferred_subnet[16]; + uint32_t preferred_subnet_mask; + uint32_t num_portals; + uint32_t portal_info_size; + iscsi_portal_info_t portals[1]; /* 1 or more portals for this session to use */ +} iscsi_session_ioctl_t; + +#define ISCSI_SESSION_IOCTL_VERSION 21 + +typedef struct iscsi_terminate_session_ioctl { + uint32_t ioctl_size; + uint32_t ioctl_version; + int iscsi_bus; + int target_id; +} iscsi_terminate_session_ioctl_t; + +#define ISCSI_TERMINATE_SESSION_IOCTL_VERSION 1 + +typedef struct iscsi_probe_luns_ioctl { + uint32_t ioctl_size; + uint32_t ioctl_version; + int iscsi_bus; + int target_id; + uint32_t lun_bitmap[8]; + scsi_device_info_t device_info; +} iscsi_probe_luns_ioctl_t; + +#define ISCSI_PROBE_LUNS_IOCTL_VERSION 1 + +/* request info for a particular session by host,channel,target */ +typedef struct iscsi_get_session_info_ioctl { + uint32_t ioctl_size; + uint32_t ioctl_version; + int host; + int channel; + int target_id; + char TargetName[TARGET_NAME_MAXLEN + 1]; + char TargetAlias[TARGET_NAME_MAXLEN + 1]; + uint8_t isid[6]; + uint32_t lun_bitmap[8]; + uint8_t ip_address[16]; /* current address, may not match any portal after a temp redirect */ + int ip_length; /* current address, may not match any portal after a temp redirect */ + uint32_t num_portals; + uint32_t portal_info_size; + iscsi_portal_info_t portals[1]; /* 1 or more portals this session can use */ +} iscsi_get_session_info_ioctl_t; + +#define ISCSI_GET_SESSION_INFO_IOCTL_VERSION 1 + +typedef struct iscsi_portal_list { + int target_id; + iscsi_portal_info_t * portals; +} portal_list_t; + +typedef struct iscsi_ls_session { + int conn_status; + uint8_t isid[6]; + uint16_t tsid; + int InitialR2T; + int ImmediateData; + int HeaderDigest; + int DataDigest; + int FirstBurstLength; + int MaxBurstLength; + int MaxRecvDataSegmentLength; + int MaxXmitDataSegmentLength; + int login_timeout; + int auth_timeout; + int active_timeout; + int idle_timeout; + int ping_timeout; + time_t establishment_time; + int addr[4]; /* peer address */ + int port; /* peer port number */ +} iscsi_ls_session_t; + +typedef struct target_info +{ + unsigned char target_name[TARGET_NAME_MAXLEN + 1]; + unsigned char target_alias[TARGET_NAME_MAXLEN + 1]; + int host_no; + int channel; + int target_id; + int num_portals; + iscsi_ls_session_t session_data; +}target_info_t; + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-kernel.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-kernel.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-kernel.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-kernel.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,228 @@ +#ifndef ISCSI_KERNEL_H_ +#define ISCSI_KERNEL_H_ + +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-kernel.h,v 1.10 2003/05/08 11:11:07 surekhap Exp $ + * + * iscsi-kernel.h + * + * hide variations in various Linux kernel versions + * + */ + +/* useful 2.4-ism */ +#ifndef set_current_state +# define set_current_state(state_value) do { current->state = state_value; mb(); } while(0) +#endif + +/* the interface to the SCSI code varies between kernels */ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,53) ) +# define HAS_NEW_SCSI_DEPTH 1 +# define HAS_NEW_DEVICE_LISTS 1 +# define HAS_SLAVE_CONFIGURE 1 +#elif ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,44) ) +# define HAS_NEW_SLAVE_ATTACH 1 +# define HAS_CMND_REQUEST_STRUCT 1 +#else +# define HAS_SELECT_QUEUE_DEPTHS 1 +# define HAS_CMND_REQUEST_STRUCT 1 +#endif + +/* scatterlists have changed for HIGHMEM support. + * Later 2.4 kernels may have unmapped segments, and + * 2.5 kernels remove the address altogether. + */ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +# include +# define HAS_SCATTERLIST_PAGE 1 +# define HAS_SCATTERLIST_ADDRESS 0 +#elif ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,13) ) +# include +# define HAS_SCATTERLIST_PAGE 1 +# define HAS_SCATTERLIST_ADDRESS 1 +#elif defined (SCSI_HAS_HOST_LOCK) +/* Redhat Advanced Server calls itself 2.4.9, but has much newer patches in it. + * FIXME: find a better way to detect whether scatterlists have page pointers or not. + */ +# include +# define HAS_SCATTERLIST_PAGE 1 +# define HAS_SCATTERLIST_ADDRESS 1 +#else +# define HAS_SCATTERLIST_PAGES 0 +# define HAS_SCATTERLIST_ADDRESS 0 +#endif + + +/* hide the wait queue differences */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,2,18) ) +typedef struct wait_queue *wait_queue_head_t; +typedef struct wait_queue wait_queue_t; +# define init_waitqueue_head(q_head_ptr) do { *(q_head_ptr) = NULL; mb(); } while (0) +# define init_waitqueue_entry(q_ptr, tsk) do { (q_ptr)->task = (tsk); mb(); } while (0) +#endif + +/* The scheduling policy name has been changed from SCHED_OTHER to + * SCHED_NORMAL in linux kernel version 2.5.39 + */ + +#if defined (SCHED_NORMAL) +#define SCHED_OTHER SCHED_NORMAL +#endif + +/* the lock we need to hold while checking pending signals */ + +/* Linux kernel version 2.5.60 onwards and Redhat 9.0 kernel 2.4.20-8 + * onwards implements NPTL ( Native Posix Thread Library ) which has + * introduced some changes to signal lock members of task structure in + * "sched.h". These changes have been taken care at few places below + * through the introduction of INIT_SIGHAND variable. + */ + +#if defined(INIT_SIGHAND) +# define LOCK_SIGNALS() spin_lock_irq(¤t->sighand->siglock) +# define UNLOCK_SIGNALS() spin_unlock_irq(¤t->sighand->siglock) +#elif ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) +# define LOCK_SIGNALS() spin_lock_irq(¤t->sig->siglock) +# define UNLOCK_SIGNALS() spin_unlock_irq(¤t->sig->siglock) +#else +# define LOCK_SIGNALS() spin_lock_irq(¤t->sigmask_lock) +# define UNLOCK_SIGNALS() spin_unlock_irq(¤t->sigmask_lock) +#endif + +/* determine if a particular signal is pending or not */ +# if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) ) +# define SIGNAL_IS_PENDING(SIG) sigismember(¤t->pending.signal, (SIG)) +# else +# define SIGNAL_IS_PENDING(SIG) sigismember(¤t->signal, (SIG)) +# endif + +# if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) || defined(INIT_SIGHAND) ) +# define RECALC_PENDING_SIGNALS recalc_sigpending() +# else +# define RECALC_PENDING_SIGNALS recalc_sigpending(current) +# endif + +/* we don't have to worry about ordering I/O and memory, just memory, + * so we can use the smp_ memory barriers. Older kernels don't have them, + * so map them to the non-SMP barriers if need be. + */ +#ifndef smp_mb +# if defined(CONFIG_SMP) || defined(__SMP__) +# define smp_mb() mb() +# else +# define smp_mb() barrier() +# endif +#endif + +#ifndef smp_wmb +# if defined(CONFIG_SMP) || defined(__SMP__) +# define smp_wmb() wmb() +# else +# define smp_wmb() barrier() +# endif +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) && !defined (SCSI_HAS_HOST_LOCK) && !defined(__clear_bit) +# define __clear_bit clear_bit +#endif + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) ) +typedef unsigned long cpu_flags_t; +#else +typedef unsigned int cpu_flags_t; +#endif + +/* kernels 2.2.16 through 2.5.21 call driver entry points with a lock + * held and interrupts off, but the lock varies. Hide the + * differences, and give ourselves ways of releasing the lock in our + * entry points, since we may need to call the scheduler, and can't do + * that with a spinlock held and interrupts off, so we need to release + * the lock and reenable interrupts, and then reacquire the lock + * before returning from the entry point. + */ + +/* for releasing the lock when we don't want it, but have it */ +#if defined (SCSI_HAS_HOST_LOCK) + /* Redhat Advanced Server is like 2.5, but uses a different name for the lock pointer. + * At least they gave us a define to check for. + */ +# define RELEASE_MIDLAYER_LOCK(host) spin_unlock_irq((host)->lock) +# define REACQUIRE_MIDLAYER_LOCK(host) spin_lock_irq((host)->lock) +/* for getting the lock when we need it to call done(), but don't have it */ +# define DECLARE_MIDLAYER_FLAGS cpu_flags_t midlayer_flags_ +# define LOCK_MIDLAYER_LOCK(host) spin_lock_irqsave((host)->lock, midlayer_flags_); +# define UNLOCK_MIDLAYER_LOCK(host) spin_unlock_irqrestore((host)->lock, midlayer_flags_); +#elif ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,2) ) +# define RELEASE_MIDLAYER_LOCK(host) spin_unlock_irq((host)->host_lock) +# define REACQUIRE_MIDLAYER_LOCK(host) spin_lock_irq((host)->host_lock) +/* for getting the lock when we need it to call done(), but don't have it */ +# define DECLARE_MIDLAYER_FLAGS cpu_flags_t midlayer_flags_ +# define LOCK_MIDLAYER_LOCK(host) spin_lock_irqsave((host)->host_lock, midlayer_flags_); +# define UNLOCK_MIDLAYER_LOCK(host) spin_unlock_irqrestore((host)->host_lock, midlayer_flags_); +#else +# define RELEASE_MIDLAYER_LOCK(host) spin_unlock_irq(&io_request_lock) +# define REACQUIRE_MIDLAYER_LOCK(host) spin_lock_irq(&io_request_lock) +/* for getting the lock when we need it to call done(), but don't have it */ +# define DECLARE_MIDLAYER_FLAGS cpu_flags_t midlayer_flags_ +# define LOCK_MIDLAYER_LOCK(host) spin_lock_irqsave(&io_request_lock, midlayer_flags_); +# define UNLOCK_MIDLAYER_LOCK(host) spin_unlock_irqrestore(&io_request_lock, midlayer_flags_); +#endif + +/* register as a SCSI HBA with the kernel */ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,2) ) +# define REGISTER_SCSI_HOST(template) scsi_register_host((template)) +# define UNREGISTER_SCSI_HOST(template) scsi_unregister_host((template)) +#else +# define REGISTER_SCSI_HOST(template) scsi_register_module(MODULE_SCSI_HA, (template)) +# define UNREGISTER_SCSI_HOST(template) scsi_unregister_module(MODULE_SCSI_HA, (template)) +#endif + +/* we need to ensure the SCSI midlayer won't call the queuecommand() + * entry point from a bottom-half handler while a thread holding locks + * that queuecommand() will need to acquire is suspended by an interrupt. + * we don't use spin_lock_bh() on 2.4 kernels, because spin_unlock_bh() + * will run bottom-half handlers, which is bad if interrupts are turned off + * and the io_request_lock is held, since the SCSI bottom-half handler will + * try to acquire the io_request_lock again and deadlock. + */ +#define DECLARE_NOQUEUE_FLAGS cpu_flags_t noqueue_flags_ +#define SPIN_LOCK_NOQUEUE(lock) spin_lock_irqsave((lock), noqueue_flags_) +#define SPIN_UNLOCK_NOQUEUE(lock) spin_unlock_irqrestore((lock), noqueue_flags_) + +/* Linux doesn't define the SCSI opcode REPORT_LUNS yet, but we will, since we use it */ +#ifndef REPORT_LUNS +# define REPORT_LUNS 0xa0 +#endif + +#define MSECS_TO_JIFFIES(ms) (((ms)*HZ+999)/1000) + +/* Redhat Advanced Server backports a bunch of patches to older kernels. + * This makes kernel version checks unreliable, but the patches + * don't always provide good ways of doing feature tests, so + * sometimes we just check if it's Advanced Server or not. For now, + * we use one of the things we can feature check, which is currently + * unlikely to be in use outside of Advanced Server. + * FIXME: find a better way of doing kernel feature tests. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,9)) && defined(SCSI_HAS_HOST_LOCK) && !defined(REDHAT_ADVANCED_SERVER) +# define REDHAT_ADVANCED_SERVER 1 +#endif + +#endif + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-limits.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-limits.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-limits.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-limits.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,38 @@ +#ifndef ISCSI_LIMITS_H_ +#define ISCSI_LIMITS_H_ + +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-limits.h,v 1.3 2003/01/04 00:21:33 smferris Exp $ + * + * iscsi-limits.h + * + */ + +#define ISCSI_CMDS_PER_LUN 12 +#define ISCSI_MIN_CANQUEUE 64 +#define ISCSI_MAX_CANQUEUE 64 +#define ISCSI_PREALLOCATED_TASKS 64 +#define ISCSI_MAX_SG 64 +#define ISCSI_MAX_CMD_LEN 12 +#define ISCSI_MAX_TASKS_PER_SESSION (ISCSI_CMDS_PER_LUN * ISCSI_MAX_LUN) + +/* header plus alignment plus login pdu size + pad */ +#define ISCSI_RXCTRL_SIZE ((2 * sizeof(struct IscsiHdr)) + 4096 + 4) + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-login.c linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-login.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-login.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-login.c 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,1045 @@ +/* + * iSCSI login library + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-login.c,v 1.37 2003/06/24 13:39:38 surekhap Exp $ + * + */ + +#include "iscsi-platform.h" +#include "iscsi-protocol.h" +#include "iscsi-io.h" +#include "iscsi-login.h" + +struct IscsiHdr *iscsi_align_pdu(iscsi_session_t *session, unsigned char *buffer, int buffersize) +{ + struct IscsiHdr *header; + unsigned long addr = (unsigned long)buffer; + + /* find a buffer location guaranteed to be reasonably aligned for the header */ + addr += (addr % sizeof(*header)); + header = (struct IscsiHdr *)addr; + + return header; +} + + +/* caller is assumed to be well-behaved and passing NUL terminated strings */ +int iscsi_add_text(iscsi_session_t *session, struct IscsiHdr *pdu, char *data, int max_data_length, char *param, char *value) +{ + int param_len = strlen(param); + int value_len = strlen(value); + int length = param_len + 1 + value_len + 1; /* param, separator, value, and trailing NUL */ + int pdu_length = ntoh24(pdu->dlength); + char *text = data; + char *end = data + max_data_length; + char *pdu_text; + + /* find the end of the current text */ + text += pdu_length; + pdu_text = text; + pdu_length += length; + + if (text + length >= end) { + logmsg(AS_NOTICE, "failed to add login text '%s=%s'\n", param, value); + return 0; + } + + /* param */ + iscsi_strncpy(text, param, param_len); + text += param_len; + + /* separator */ + *text++ = ISCSI_TEXT_SEPARATOR; + + /* value */ + strncpy(text, value, value_len); + text += value_len; + + /* NUL */ + *text++ = '\0'; + + /* update the length in the PDU header */ + hton24(pdu->dlength, pdu_length); + + return 1; +} + +int iscsi_find_key_value(char *param, char *pdu, char *pdu_end, char **value_start, char **value_end) +{ + char *str = param; + char *text = pdu; + char *value = NULL; + + if (value_start) + *value_start = NULL; + if (value_end) + *value_end = NULL; + + /* make sure they contain the same bytes */ + while (*str) { + if (text >= pdu_end) + return 0; + if (*text == '\0') + return 0; + if (*str != *text) + return 0; + str++; + text++; + } + + if ((text >= pdu_end) || (*text == '\0') || (*text != ISCSI_TEXT_SEPARATOR)) { + return 0; + } + + /* find the value */ + value = text + 1; + + /* find the end of the value */ + while ((text < pdu_end) && (*text)) + text++; + + if (value_start) + *value_start = value; + if (value_end) + *value_end = text; + + return 1; +} + +/* + * This callback may be used under certain conditions when authenticating a target, but I'm not sure what we need + * to do here. + */ +static void null_callback(void *user_handle, void *message_handle, int auth_status) +{ + debugmsg(1, "iscsi-login: null_callback(%p, %p, %d)\n", user_handle, message_handle, auth_status); +} + + +/* this assumes the text data is always NUL terminated. The caller can always arrange for that + * by using a slightly larger buffer than the max PDU size, and then appending a NUL to the PDU. + */ +iscsi_login_status_t iscsi_process_login_response(iscsi_session_t *session, struct IscsiLoginRspHdr *login_rsp_pdu, char *data, int max_data_length) +{ + IscsiAuthClient *auth_client = (session->auth_buffers && session->num_auth_buffers) ? + (IscsiAuthClient *)session->auth_buffers[0].address : NULL; + int transit = login_rsp_pdu->flags & ISCSI_FLAG_LOGIN_TRANSIT; + char *text = data; + char *end; + int pdu_current_stage = 0, pdu_next_stage = 0; + + end = text + ntoh24(login_rsp_pdu->dlength) + 1; + if (end >= (data + max_data_length)) { + logmsg(AS_ERROR, "login failed, process_login_response buffer too small to guarantee NUL termination\n"); + return LOGIN_FAILED; + } + /* guarantee a trailing NUL */ + *end = '\0'; + + /* if the response status was success, sanity check the response */ + if (login_rsp_pdu->status_class == STATUS_CLASS_SUCCESS) { + /* check the active version */ + if (login_rsp_pdu->active_version != ISCSI_DRAFT20_VERSION) { + logmsg(AS_ERROR, "login version mismatch, received incompatible active iSCSI version 0x%02x, expected version 0x%02x\n", + login_rsp_pdu->active_version, ISCSI_DRAFT20_VERSION); + return LOGIN_VERSION_MISMATCH; + } + + /* make sure the current stage matches */ + pdu_current_stage = (login_rsp_pdu->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2; + if (pdu_current_stage != session->current_stage) { + logmsg(AS_ERROR, "received invalid login PDU, current stage mismatch, session %d, response %d\n", + session->current_stage, pdu_current_stage); + return LOGIN_INVALID_PDU; + } + + /* make sure that we're actually advancing if the T-bit is set */ + pdu_next_stage = login_rsp_pdu->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK; + if (transit && (pdu_next_stage <= session->current_stage)) { + logmsg(AS_ERROR, "received invalid login PDU, current stage %d, target wants to go to stage %d, but we want to go to stage %d\n", + session->current_stage, pdu_next_stage, session->next_stage); + return LOGIN_INVALID_PDU; + } + } + + if (session->current_stage == ISCSI_SECURITY_NEGOTIATION_STAGE) { + if (iscsiAuthClientRecvBegin(auth_client) != iscsiAuthStatusNoError) { + logmsg(AS_ERROR, "login failed because authClientRecvBegin failed\n"); + return LOGIN_FAILED; + } + + if (iscsiAuthClientRecvTransitBit(auth_client, transit) != iscsiAuthStatusNoError) { + logmsg(AS_ERROR, "login failed because authClientRecvTransitBit failed\n"); + return LOGIN_FAILED; + } + } + + /* scan the text data */ + more_text: + while (text && (text < end)) { + char *value = NULL; + char *value_end = NULL; + + /* skip any NULs separating each text key=value pair */ + while ((text < end) && (*text == '\0')) + text++; + if (text >= end) + break; + + /* handle keys appropriate for each stage */ + switch (session->current_stage) { + case ISCSI_SECURITY_NEGOTIATION_STAGE: { + /* a few keys are possible in Security stage which the + * auth code doesn't care about, but which we might + * want to see, or at least not choke on. + */ + if (iscsi_find_key_value("TargetAlias", text, end, &value, &value_end)) { + size_t size = sizeof(session->TargetAlias); + + if ((value_end - value) < size) + size = value_end - value; + + memcpy(session->TargetAlias, value, size); + session->TargetAlias[sizeof(session->TargetAlias)-1] = '\0'; + text = value_end; + } + else if (iscsi_find_key_value("TargetAddress", text, end, &value, &value_end)) { + /* if possible, change the session's ip_address and port to the new TargetAddress */ + if (session->update_address && session->update_address(session, value)) { + text = value_end; + } + else { + logmsg(AS_ERROR, "login redirection failed, can't handle redirection to %s\n", value); + return LOGIN_REDIRECTION_FAILED; + } + } + else if (iscsi_find_key_value("TargetPortalGroupTag", text, end, &value, &value_end)) { + /* We should have already obtained this via discovery. + * We've already picked an isid, so the most we can do is confirm we reached + * the portal group we were expecting to. + */ + int tag = iscsi_strtoul(value, NULL, 0); + if (session->portal_group_tag >= 0) { + if (tag != session->portal_group_tag) { + logmsg(AS_ERROR, "portal group tag mismatch, expected %u, received %u\n", + session->portal_group_tag, tag); + return LOGIN_WRONG_PORTAL_GROUP; + } + } + else { + /* we now know the tag */ + session->portal_group_tag = tag; + } + text = value_end; + } + else { + /* any key we don't recognize either goes to the auth code, or we choke on it */ + int keytype = iscsiAuthKeyTypeNone; + + while (iscsiAuthClientGetNextKeyType(&keytype) == iscsiAuthStatusNoError) { + char *key = (char *)iscsiAuthClientGetKeyName(keytype); + + if (key && iscsi_find_key_value(key, text, end, &value, &value_end)) { + if (iscsiAuthClientRecvKeyValue(auth_client, keytype, value) != iscsiAuthStatusNoError) { + logmsg(AS_ERROR, "login negotiation failed, can't accept %s in security stage\n", text); + return LOGIN_NEGOTIATION_FAILED; + } + text = value_end; + goto more_text; + } + } + + logmsg(AS_ERROR, "login negotiation failed, can't accept %s in security stage\n", text); + return LOGIN_NEGOTIATION_FAILED; + } + break; + } + case ISCSI_OP_PARMS_NEGOTIATION_STAGE: { + /* FIXME: they're making base64 an encoding option for + * all numbers in draft13, since some security + * protocols use large numbers, and it was somehow + * considered "simpler" to let them be used for any + * number anywhere. + */ + if (iscsi_find_key_value("TargetAlias", text, end, &value, &value_end)) { + size_t size = sizeof(session->TargetAlias); + + if ((value_end - value) < size) + size = value_end - value; + + memcpy(session->TargetAlias, value, size); + session->TargetAlias[sizeof(session->TargetAlias)-1] = '\0'; + text = value_end; + } + else if (iscsi_find_key_value("TargetAddress", text, end, &value, &value_end)) { + if (session->update_address && session->update_address(session, value)) { + text = value_end; + } + else { + logmsg(AS_ERROR, "login redirection failed, can't handle redirection to %s\n", value); + return LOGIN_REDIRECTION_FAILED; + } + } + else if (iscsi_find_key_value("TargetPortalGroupTag", text, end, &value, &value_end)) { + /* We should have already obtained this via discovery. + * We've already picked an isid, so the most we can do is confirm we reached + * the portal group we were expecting to. + */ + int tag = iscsi_strtoul(value, NULL, 0); + if (session->portal_group_tag >= 0) { + if (tag != session->portal_group_tag) { + logmsg(AS_ERROR, "portal group tag mismatch, expected %u, received %u\n", + session->portal_group_tag, tag); + return LOGIN_WRONG_PORTAL_GROUP; + } + } + else { + /* we now know the tag */ + session->portal_group_tag = tag; + } + text = value_end; + } + else if (iscsi_find_key_value("InitialR2T", text, end, &value, &value_end)) { + if (value && (iscsi_strcmp(value, "Yes") == 0)) + session->InitialR2T = 1; + else + session->InitialR2T = 0; + text = value_end; + } + else if (iscsi_find_key_value("ImmediateData", text, end, &value, &value_end)) { + if (value && (iscsi_strcmp(value, "Yes") == 0)) + session->ImmediateData = 1; + else + session->ImmediateData = 0; + text = value_end; + } + else if (iscsi_find_key_value("MaxRecvDataSegmentLength", text, end, &value, &value_end)) { + /* FIXME: no octal */ + session->MaxXmitDataSegmentLength = iscsi_strtoul(value, NULL, 0); + text = value_end; + } + else if (iscsi_find_key_value("FirstBurstLength", text, end, &value, &value_end)) { + /* FIXME: no octal */ + session->FirstBurstLength = iscsi_strtoul(value, NULL, 0); + text = value_end; + } + else if (iscsi_find_key_value("MaxBurstLength", text, end, &value, &value_end)) { + /* we don't really care, since it's a limit on the target's R2Ts, but record it anwyay */ + /* FIXME: no octal, and draft20 says we MUST NOT send more than MaxBurstLength */ + session->MaxBurstLength = iscsi_strtoul(value, NULL, 0); + text = value_end; + } + else if (iscsi_find_key_value("HeaderDigest", text, end, &value, &value_end)) { + if (iscsi_strcmp(value, "None") == 0) { + if (session->HeaderDigest != ISCSI_DIGEST_CRC32C) { + session->HeaderDigest = ISCSI_DIGEST_NONE; + } + else { + logmsg(AS_ERROR, "login negotiation failed, HeaderDigest=CRC32C is required, can't accept %s\n", text); + return LOGIN_NEGOTIATION_FAILED; + } + } + else if (iscsi_strcmp(value, "CRC32C") == 0) { + if (session->HeaderDigest != ISCSI_DIGEST_NONE) { + session->HeaderDigest = ISCSI_DIGEST_CRC32C; + } + else { + logmsg(AS_ERROR, "login negotiation failed, HeaderDigest=None is required, can't accept %s\n", text); + return LOGIN_NEGOTIATION_FAILED; + } + } + else { + logmsg(AS_ERROR, "login negotiation failed, can't accept %s\n", text); + return LOGIN_NEGOTIATION_FAILED; + } + text = value_end; + } + else if (iscsi_find_key_value("DataDigest", text, end, &value, &value_end)) { + if (iscsi_strcmp(value, "None") == 0) { + if (session->DataDigest != ISCSI_DIGEST_CRC32C) { + session->DataDigest = ISCSI_DIGEST_NONE; + } + else { + logmsg(AS_ERROR, "login negotiation failed, DataDigest=CRC32C is required, can't accept %s\n", text); + return LOGIN_NEGOTIATION_FAILED; + } + } + else if (iscsi_strcmp(value, "CRC32C") == 0) { + if (session->DataDigest != ISCSI_DIGEST_NONE) { + session->DataDigest = ISCSI_DIGEST_CRC32C; + } + else { + logmsg(AS_ERROR, "login negotiation failed, DataDigest=None is required, can't accept %s\n",text); + return LOGIN_NEGOTIATION_FAILED; + } + } + else { + logmsg(AS_ERROR, "login negotiation failed, can't accept %s\n", text); + return LOGIN_NEGOTIATION_FAILED; + } + text = value_end; + } + else if (iscsi_find_key_value("DefaultTime2Wait", text, end, &value, &value_end)) { + session->DefaultTime2Wait = iscsi_strtoul(value, NULL, 0); + text = value_end; + } + else if (iscsi_find_key_value("DefaultTime2Retain", text, end, &value, &value_end)) { + session->DefaultTime2Retain = iscsi_strtoul(value, NULL, 0); + text = value_end; + } + else if (iscsi_find_key_value("OFMarker", text, end, &value, &value_end)) { + /* result function is AND, target must honor our No */ + text = value_end; + } + else if (iscsi_find_key_value("OFMarkInt", text, end, &value, &value_end)) { + /* we don't do markers, so we don't care */ + text = value_end; + } + else if (iscsi_find_key_value("IFMarker", text, end, &value, &value_end)) { + /* result function is AND, target must honor our No */ + text = value_end; + } + else if (iscsi_find_key_value("IFMarkInt", text, end, &value, &value_end)) { + /* we don't do markers, so we don't care */ + text = value_end; + } + else if (iscsi_find_key_value("DataPDUInOrder", text, end, &value, &value_end)) { + if (value && iscsi_strcmp(value, "Yes") == 0) + session->DataPDUInOrder = 1; + else + session->DataPDUInOrder = 0; + text = value_end; + } + else if (iscsi_find_key_value("DataSequenceInOrder", text, end, &value, &value_end)) { + if (value && iscsi_strcmp(value, "Yes") == 0) + session->DataSequenceInOrder = 1; + else + session->DataSequenceInOrder = 0; + text = value_end; + } + else if (iscsi_find_key_value("MaxOutstandingR2T", text, end, &value, &value_end)) { + if (iscsi_strcmp(value, "1")) { + logmsg(AS_ERROR, "login negotiation failed, can't accept MaxOutstandingR2T %s\n", value); + return LOGIN_NEGOTIATION_FAILED; + } + text = value_end; + } + else if (iscsi_find_key_value("MaxConnections", text, end, &value, &value_end)) { + if (iscsi_strcmp(value, "1")) { + logmsg(AS_ERROR, "login negotiation failed, can't accept MaxConnections %s\n", value); + return LOGIN_NEGOTIATION_FAILED; + } + text = value_end; + } + else if (iscsi_find_key_value("ErrorRecoveryLevel", text, end, &value, &value_end)) { + if (iscsi_strcmp(value, "0")) { + logmsg(AS_ERROR, "login negotiation failed, can't accept ErrorRecovery %s\n", value); + return LOGIN_NEGOTIATION_FAILED; + } + text = value_end; + } + else if (iscsi_find_key_value("X-com.cisco.protocol", text, end, &value, &value_end)) { + if (iscsi_strcmp(value, "NotUnderstood") && + iscsi_strcmp(value, "Reject") && + iscsi_strcmp(value, "Irrelevant") && + iscsi_strcmp(value, "draft20")) + { + /* if we didn't get a compatible protocol, fail */ + logmsg(AS_ERROR, "login version mismatch, can't accept protocol %s\n", value); + return LOGIN_VERSION_MISMATCH; + } + text = value_end; + } + else if (iscsi_find_key_value("X-com.cisco.PingTimeout", text, end, &value, &value_end)) { + /* we don't really care what the target ends up using */ + text = value_end; + } + else if (iscsi_find_key_value("X-com.cisco.sendAsyncText", text,end, &value, &value_end)) { + /* we don't bother for the target response */ + text = value_end; + } + else { + /* FIXME: we may want to ignore X- keys sent by + * the target, but that would require us to have + * another PDU buffer so that we can generate a + * response while we still know what keys we + * received, so that we can reply with a + * NotUnderstood response. For now, reject logins + * with keys we don't understand. Another option is + * to silently ignore them, and see if the target has + * a problem with that. The danger there is we may + * get caught in an infinite loop where we send an empty + * PDU requesting a stage transition, and the target + * keeps sending an empty PDU denying a stage transition + * (because we haven't replied to it's key yet). + */ + logmsg(AS_ERROR, "login negotiation failed, couldn't recognize text %s\n", text); + return LOGIN_NEGOTIATION_FAILED; + } + break; + } + default: + return LOGIN_FAILED; + } + } + + if (session->current_stage == ISCSI_SECURITY_NEGOTIATION_STAGE) { + switch (iscsiAuthClientRecvEnd(auth_client, null_callback, (void *)session, NULL)) { + case iscsiAuthStatusContinue: + /* continue sending PDUs */ + break; + + case iscsiAuthStatusPass: + logmsg(AS_DEBUG, "authenticated by target %s\n", session->TargetName); + break; + + case iscsiAuthStatusInProgress: + /* this should only occur if we were authenticating the target, + * which we don't do yet, so treat this as an error. + */ + case iscsiAuthStatusNoError: /* treat this as an error, since we should get a different code */ + case iscsiAuthStatusError: + case iscsiAuthStatusFail: + default: { + int debug_status = 0; + + if (iscsiAuthClientGetDebugStatus(auth_client, &debug_status) != iscsiAuthStatusNoError) { + logmsg(AS_ERROR, "login authentication failed with target %s, %s\n", + session->TargetName, iscsiAuthClientDebugStatusToText(debug_status)); + } + else { + logmsg(AS_ERROR, "login authentication failed with target %s\n", session->TargetName); + } + return LOGIN_AUTHENTICATION_FAILED; + } + } + } + + /* record some of the PDU fields for later use */ + session->tsid = iscsi_ntohs(login_rsp_pdu->tsid); + session->ExpCmdSn = iscsi_ntohl(login_rsp_pdu->expcmdsn); + session->MaxCmdSn = iscsi_ntohl(login_rsp_pdu->maxcmdsn); + if (login_rsp_pdu->status_class == STATUS_CLASS_SUCCESS) + session->ExpStatSn = iscsi_ntohl(login_rsp_pdu->statsn) + 1; + + if (transit) { + /* advance to the next stage */ + session->partial_response = 0; + session->current_stage = login_rsp_pdu->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK; + } + else { + /* we got a partial response, don't advance, more negotiation to do */ + session->partial_response = 1; + } + + return LOGIN_OK; /* this PDU is ok, though the login process may not be done yet */ +} + +int iscsi_make_text_pdu(iscsi_session_t *session, struct IscsiHdr *pdu, char *data, int max_data_length) +{ + struct IscsiTextHdr *text_pdu = (struct IscsiTextHdr *)pdu; + + /* initialize the PDU header */ + memset(text_pdu, 0, sizeof(*text_pdu)); + + text_pdu->opcode = ISCSI_OP_TEXT_CMD; + text_pdu->itt = iscsi_htonl(session->itt); + text_pdu->ttt = RSVD_TASK_TAG; + text_pdu->cmdsn = iscsi_htonl(session->CmdSn++); + text_pdu->expstatsn = iscsi_htonl(session->ExpStatSn); + + return 1; +} + + +int iscsi_make_login_pdu(iscsi_session_t *session, struct IscsiHdr *pdu, char *data, int max_data_length) +{ + int transit = 0; + char value[iscsiAuthStringMaxLength]; + struct IscsiLoginHdr *login_pdu = (struct IscsiLoginHdr *)pdu; + IscsiAuthClient *auth_client = (session->auth_buffers && session->num_auth_buffers) ? + (IscsiAuthClient *)session->auth_buffers[0].address : NULL; + + /* initialize the PDU header */ + memset(login_pdu, 0, sizeof(*login_pdu)); + login_pdu->opcode = ISCSI_OP_LOGIN_CMD | ISCSI_OP_IMMEDIATE; + login_pdu->cid = 0; + memcpy(login_pdu->isid, session->isid, sizeof(session->isid)); + login_pdu->tsid = 0; + login_pdu->cmdsn = iscsi_htonl(session->CmdSn); /* don't increment on immediate */ + + login_pdu->min_version = ISCSI_DRAFT20_VERSION; + login_pdu->max_version = ISCSI_DRAFT20_VERSION; + + /* we have to send 0 until full-feature stage */ + login_pdu->expstatsn = iscsi_htonl(session->ExpStatSn); + + /* the very first Login PDU has some additional requirements, + * and we need to decide what stage to start in. + */ + if (session->current_stage == ISCSI_INITIAL_LOGIN_STAGE) { + if (session->InitiatorName && session->InitiatorName[0]) { + if (!iscsi_add_text(session, pdu, data, max_data_length, "InitiatorName", session->InitiatorName)) + return 0; + } + else { + logmsg(AS_ERROR, "InitiatorName is required on the first Login PDU\n"); + return 0; + } + if (session->InitiatorAlias && session->InitiatorAlias[0]) { + if (!iscsi_add_text(session, pdu, data, max_data_length, "InitiatorAlias", session->InitiatorAlias)) + return 0; + } + + if (session->TargetName[0] != '\0') { + if (!iscsi_add_text(session, pdu, data, max_data_length, "TargetName", session->TargetName)) + return 0; + } + + if (!iscsi_add_text(session, pdu, data, max_data_length, "SessionType", + (session->type == ISCSI_SESSION_TYPE_DISCOVERY) ? "Discovery" : "Normal")) + return 0; + + if (auth_client) { + /* we're prepared to do authentication */ + session->current_stage = session->next_stage = ISCSI_SECURITY_NEGOTIATION_STAGE; + } + else { + /* can't do any authentication, skip that stage */ + session->current_stage = session->next_stage = ISCSI_OP_PARMS_NEGOTIATION_STAGE; + } + } + + /* fill in text based on the stage */ + switch (session->current_stage) { + case ISCSI_OP_PARMS_NEGOTIATION_STAGE: { + /* we always try to go from op params to full feature stage */ + session->current_stage = ISCSI_OP_PARMS_NEGOTIATION_STAGE; + session->next_stage = ISCSI_FULL_FEATURE_PHASE; + transit = 1; + + /* the terminology here may have gotten dated. a partial + * response is a login response that doesn't complete a + * login. If we haven't gotten a partial response, then + * either we shouldn't be here, or we just switched to + * this stage, and need to start offering keys. + */ + if (!session->partial_response) { + /* request the desired settings the first time we are in this stage */ + switch (session->HeaderDigest) { + case ISCSI_DIGEST_NONE: + if (!iscsi_add_text(session, pdu, data, max_data_length, "HeaderDigest", "None")) + return 0; + break; + case ISCSI_DIGEST_CRC32C: + if (!iscsi_add_text(session, pdu, data, max_data_length, "HeaderDigest", "CRC32C")) + return 0; + break; + case ISCSI_DIGEST_CRC32C_NONE: + if (!iscsi_add_text(session, pdu, data, max_data_length, "HeaderDigest", "CRC32C,None")) + return 0; + break; + default: + case ISCSI_DIGEST_NONE_CRC32C: + if (!iscsi_add_text(session, pdu, data, max_data_length, "HeaderDigest", "None,CRC32C")) + return 0; + break; + } + + switch (session->DataDigest) { + case ISCSI_DIGEST_NONE: + if (!iscsi_add_text(session, pdu, data, max_data_length, "DataDigest", "None")) + return 0; + break; + case ISCSI_DIGEST_CRC32C: + if (!iscsi_add_text(session, pdu, data, max_data_length, "DataDigest", "CRC32C")) + return 0; + break; + case ISCSI_DIGEST_CRC32C_NONE: + if (!iscsi_add_text(session, pdu, data, max_data_length, "DataDigest", "CRC32C,None")) + return 0; + break; + default: + case ISCSI_DIGEST_NONE_CRC32C: + if (!iscsi_add_text(session, pdu, data, max_data_length, "DataDigest", "None,CRC32C")) + return 0; + break; + } + + iscsi_sprintf(value, "%d", session->MaxRecvDataSegmentLength); + if (!iscsi_add_text(session, pdu, data, max_data_length, "MaxRecvDataSegmentLength", value)) + return 0; + + if (session->type == ISCSI_SESSION_TYPE_NORMAL) { + /* these are only relevant for normal sessions */ + if (!iscsi_add_text(session, pdu, data, max_data_length, "InitialR2T", session->InitialR2T ? "Yes" : "No")) + return 0; + + if (!iscsi_add_text(session, pdu, data, max_data_length, "ImmediateData", session->ImmediateData ? "Yes" : "No")) + return 0; + + iscsi_sprintf(value, "%d", session->MaxBurstLength); + if (!iscsi_add_text(session, pdu, data, max_data_length, "MaxBurstLength", value)) + return 0; + + iscsi_sprintf(value, "%d", session->FirstBurstLength); + if (!iscsi_add_text(session, pdu, data, max_data_length, "FirstBurstLength", value)) + return 0; + + iscsi_sprintf(value, "%d", session->DefaultTime2Wait); + if (!iscsi_add_text(session, pdu, data, max_data_length, "DefaultTime2Wait", value)) + return 0; + + iscsi_sprintf(value, "%d", session->DefaultTime2Retain); + if (!iscsi_add_text(session, pdu, data, max_data_length, "DefaultTime2Retain", value)) + return 0; + + /* these we must have */ + if (!iscsi_add_text(session, pdu, data, max_data_length, "MaxOutstandingR2T", "1")) + return 0; + if (!iscsi_add_text(session, pdu, data, max_data_length, "MaxConnections", "1")) + return 0; + if (!iscsi_add_text(session, pdu, data, max_data_length, "ErrorRecoveryLevel", "0")) + return 0; + if (!iscsi_add_text(session, pdu, data, max_data_length, "IFMarker", "No")) + return 0; + if (!iscsi_add_text(session, pdu, data, max_data_length, "OFMarker", "No")) + return 0; + + /* FIXME: the caller may want different settings for these. */ + if (!iscsi_add_text(session, pdu, data, max_data_length, "DataPDUInOrder", "Yes")) + return 0; + if (!iscsi_add_text(session, pdu, data, max_data_length, "DataSequenceInOrder", "Yes")) + return 0; + } + + /* Note: 12.22 forbids vendor-specific keys on discovery sessions, so the caller + * is violating the spec if it asks for these on a discovery session. + */ + if (session->vendor_specific_keys) { + /* adjust the target's PingTimeout for normal sessions, so that it matches + * the driver's ping timeout. The network probably has the same latency in + * both directions, so the values ought to match. + */ + if (session->ping_timeout >= 0) { + iscsi_sprintf(value, "%d", session->ping_timeout); + if (!iscsi_add_text(session, pdu, data, max_data_length, "X-com.cisco.PingTimeout", value)) + return 0; + } + + if (session->send_async_text >= 0) { + if (!iscsi_add_text(session, pdu, data, max_data_length, "X-com.cisco.sendAsyncText", session->send_async_text ? "Yes" : "No")) + return 0; + } + /* vendor-specific protocol specification. list of protocol level strings in order of preference + * allowable values are: draft (e.g. draft8), rfc (e.g. rfc666). + * For example: + * "X-com.cisco.protocol=draft20,draft8" requests draft 20, or 8 if 20 isn't supported. + * "X-com.cisco.protocol=draft8,draft20" requests draft 8, or 20 if 8 isn't supported. + * + * Targets that understand this key SHOULD return the protocol level they selected as + * a response to this key, though the active_version may be sufficient to distinguish + * which protocol was chosen. + * + * Note: This probably won't work unless we start in op param stage, since the security + * stage limits what keys we can send, and we'd need to have sent this on the first PDU + * of the login. Keep sending it for informational use, and so that we can sanity + * check things later if the RFC and draft20 are using the same active version number, + * but have non-trivial differences. + */ + if (!iscsi_add_text(session, pdu, data, max_data_length, "X-com.cisco.protocol", "draft20")) + return 0; + } + } + else { + /* FIXME: echo back the keys the target sent us, with the current values for those keys + * or NotUnderstood, and request the next stage. + * FIXME: make this code handle vendor-defined keys sent by the target. + * We can't handle them now because we don't have anyplace to save the key until the + * response is generated, which is what we'd need to do to send key=NotUnderstood. + */ + } + break; + } + case ISCSI_SECURITY_NEGOTIATION_STAGE: { + int keytype = iscsiAuthKeyTypeNone; + int rc = iscsiAuthClientSendTransitBit(auth_client, &transit); + + /* see if we're ready for a stage change */ + if (rc == iscsiAuthStatusNoError) { + if (transit) { + /* discovery sessions can go right to full-feature phase, unless + * they want to non-standard values for the few relevant keys, + * or want to offer vendor-specific keys. + */ + if (session->type == ISCSI_SESSION_TYPE_DISCOVERY) { + if ((session->HeaderDigest != ISCSI_DIGEST_NONE) || + (session->DataDigest != ISCSI_DIGEST_NONE) || + (session->MaxRecvDataSegmentLength != DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH) || + (session->vendor_specific_keys)) + { + session->next_stage = ISCSI_OP_PARMS_NEGOTIATION_STAGE; + } + else { + session->next_stage = ISCSI_FULL_FEATURE_PHASE; + } + } + else { + session->next_stage = ISCSI_OP_PARMS_NEGOTIATION_STAGE; + } + } + else { + session->next_stage = ISCSI_SECURITY_NEGOTIATION_STAGE; + } + } + else { + return 0; + } + + /* enumerate all the keys the auth code might want to send */ + while (iscsiAuthClientGetNextKeyType(&keytype) == iscsiAuthStatusNoError) { + int present = 0; + char *key = (char *)iscsiAuthClientGetKeyName(keytype); + int key_length = key ? iscsi_strlen(key) : 0; + int pdu_length = ntoh24(pdu->dlength); + char *auth_value = data + pdu_length + key_length + 1; + unsigned int max_length = max_data_length - (pdu_length + key_length + 1); /* FIXME: check this */ + + /* add the key/value pairs the auth code wants to send directly to the PDU, + * since they could in theory be large. + */ + rc = iscsiAuthClientSendKeyValue(auth_client, keytype, &present, auth_value, max_length); + if ((rc == iscsiAuthStatusNoError) && present) { + /* actually fill in the key */ + strncpy(&data[pdu_length], key, key_length); + pdu_length += key_length; + data[pdu_length] = '='; + pdu_length++; + /* adjust the PDU's data segment length to include the value and trailing NUL */ + pdu_length += iscsi_strlen(auth_value) + 1; + hton24(pdu->dlength, pdu_length); + } + } + + break; + } + case ISCSI_FULL_FEATURE_PHASE: + logmsg(AS_ERROR, "can't send login PDUs in full feature phase\n"); + return 0; + default: + logmsg(AS_ERROR, "can't send login PDUs in unknown stage %d\n", session->current_stage); + return 0; + } + + /* fill in the flags */ + login_pdu->flags = 0; + login_pdu->flags |= session->current_stage << 2; + if (transit) { + /* transit to the next stage */ + login_pdu->flags |= session->next_stage; + login_pdu->flags |= ISCSI_FLAG_LOGIN_TRANSIT; + } + else { + /* next == current */ + login_pdu->flags |= session->current_stage; + } + return 1; +} + + +/* attempt to login to the target. + * The caller must check the status class to determine if the login succeeded. + * A return of 1 does not mean the login succeeded, it just means this function + * worked, and the status class is valid info. This allows the caller to decide + * whether or not to retry logins, so that we don't have any policy logic here. + */ +iscsi_login_status_t iscsi_login(iscsi_session_t *session, char *buffer, size_t bufsize, uint8_t *status_class, uint8_t *status_detail) +{ + IscsiAuthClient *auth_client = NULL; + int received_pdu = 0; + iscsi_login_status_t ret = LOGIN_FAILED; + + /* prepare the session */ + session->CmdSn = 1; + session->ExpCmdSn = 1; + session->MaxCmdSn = 1; + session->ExpStatSn = 0; + + session->current_stage = ISCSI_INITIAL_LOGIN_STAGE; + session->partial_response = 0; + + if (session->auth_buffers && session->num_auth_buffers) { + auth_client = (IscsiAuthClient *)session->auth_buffers[0].address; + + /* prepare for authentication */ + if (iscsiAuthClientInit(iscsiAuthNodeTypeInitiator, session->num_auth_buffers, session->auth_buffers) != iscsiAuthStatusNoError) { + logmsg(AS_ERROR, "couldn't initialize authentication\n"); + return LOGIN_FAILED; + } + + if (iscsiAuthClientSetVersion(auth_client, iscsiAuthVersionRfc) != iscsiAuthStatusNoError) { + logmsg(AS_ERROR, "couldn't set authentication version RFC\n"); + goto done; + } + + if (session->username && (iscsiAuthClientSetUsername(auth_client, session->username) != iscsiAuthStatusNoError)) { + logmsg(AS_ERROR, "couldn't set username\n"); + goto done; + } + + if (session->password && (iscsiAuthClientSetPassword(auth_client, session->password, session->password_length) != iscsiAuthStatusNoError)) { + logmsg(AS_ERROR, "couldn't set password\n"); + goto done; + } + + /* FIXME: we disable the minimum size check for now */ + if (iscsiAuthClientSetIpSec(auth_client, 1) != iscsiAuthStatusNoError) { + logmsg(AS_ERROR, "couldn't set IPSec\n"); + goto done; + } + + if (iscsiAuthClientSetAuthRemote(auth_client, session->bidirectional_auth) != iscsiAuthStatusNoError) { + logmsg(AS_ERROR, "couldn't set remote authentication\n"); + goto done; + } + } + + /* exchange PDUs until the login stage is complete, or an error occurs */ + do { + struct IscsiHdr pdu; + struct IscsiLoginRspHdr *login_rsp_pdu = (struct IscsiLoginRspHdr *)&pdu; + char *data; + int max_data_length; + int timeout = 0; + + memset(buffer, 0, bufsize); + + data = buffer; + max_data_length = bufsize; + + ret = LOGIN_FAILED; + + /* pick the appropriate timeout. If we know the target has + * responded before, and we're in the security stage, we use a + * longer timeout, since the authentication alogorithms can + * take a while, especially if the target has to go talk to a + * tacacs or RADIUS server (which may or may not be + * responding). + */ + if (received_pdu && (session->current_stage == ISCSI_SECURITY_NEGOTIATION_STAGE)) + timeout = session->auth_timeout; + else + timeout = session->login_timeout; + + /* fill in the PDU header and text data based on the login stage that we're in */ + if (!iscsi_make_login_pdu(session, &pdu, data, max_data_length)) { + logmsg(AS_ERROR, "login failed, couldn't make a login PDU\n"); + ret = LOGIN_FAILED; + goto done; + } + + /* send a PDU to the target */ + if (!iscsi_send_pdu(session, &pdu, data, timeout)) { + /* FIXME: caller might want us to distinguish I/O error and timeout. + * might want to switch portals on timeouts, but not I/O errors. + */ + logmsg(AS_ERROR, "login I/O error, failed to send a PDU\n"); + ret = LOGIN_IO_ERROR; + goto done; + } + + /* read the target's response into the same buffer */ + memset(buffer, 0, bufsize); + if (!iscsi_recv_pdu(session, &pdu, sizeof(pdu), data, max_data_length, timeout)) { + /* FIXME: caller might want us to distinguish I/O error and timeout. + * might want to switch portals on timeouts, but not I/O errors. + */ + logmsg(AS_ERROR, "login I/O error, failed to receive a PDU\n"); + ret = LOGIN_IO_ERROR; + goto done; + } + + received_pdu = 1; + + /* check the PDU response type */ + if (pdu.opcode == (ISCSI_OP_LOGIN_RSP | 0xC0)) { + /* it's probably a draft 8 login response, which we can't deal with */ + logmsg(AS_ERROR, "received iSCSI draft 8 login response opcode 0x%x, expected draft 20 login response 0x%2x\n", + pdu.opcode, ISCSI_OP_LOGIN_RSP); + logmsg(AS_ERROR, "please make sure that you have installed the correct driver version.\n"); + ret = LOGIN_VERSION_MISMATCH; + goto done; + } + else if (pdu.opcode != ISCSI_OP_LOGIN_RSP) { + logmsg(AS_ERROR, "received invalud PDU during login, opcode 0x%2x, expected login response opcode 0x%2x\n", + pdu.opcode, ISCSI_OP_LOGIN_RSP); + ret = LOGIN_INVALID_PDU; + goto done; + } + + /* give the caller the status class and detail from the last login response PDU received */ + if (status_class) + *status_class = login_rsp_pdu->status_class; + if (status_detail) + *status_detail = login_rsp_pdu->status_detail; + + switch (login_rsp_pdu->status_class) { + case STATUS_CLASS_SUCCESS: + /* process this response and possibly continue sending PDUs */ + ret = iscsi_process_login_response(session, login_rsp_pdu, data, max_data_length); + if (ret != LOGIN_OK) /* pass back whatever error we discovered */ + goto done; + break; + case STATUS_CLASS_REDIRECT: + /* we need to process this response to get the TargetAddress of the redirect, + * but we don't care about the return code. + * FIXME: we really only need to process a TargetAddress, but there shouldn't + * be any other keys. + */ + iscsi_process_login_response(session, login_rsp_pdu, data, max_data_length); + ret = LOGIN_OK; + goto done; + case STATUS_CLASS_INITIATOR_ERR: + if (login_rsp_pdu->status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED) { + logmsg(AS_ERROR, "login failed to authenticate with target %s\n", session->TargetName); + } + ret = LOGIN_OK; + goto done; + default: + /* some sort of error, login terminated unsuccessfully, though this function did it's job. + * the caller must check the status_class and status_detail and decide what to do next. + */ + ret = LOGIN_OK; + goto done; + } + + } while (session->current_stage != ISCSI_FULL_FEATURE_PHASE); + + ret = LOGIN_OK; + + done: + if (auth_client) { + if (iscsiAuthClientFinish(auth_client) != iscsiAuthStatusNoError) { + logmsg(AS_ERROR, "login failed, error finishing authClient\n"); + if (ret == LOGIN_OK) + ret = LOGIN_FAILED; + } + /* FIXME: clear the temp buffers as well? */ + } + + return ret; +} diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-login.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-login.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-login.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-login.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,69 @@ +#ifndef ISCSI_LOGIN_H_ +#define ISCSI_LOGIN_H_ + +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-login.h,v 1.11 2003/02/14 21:54:52 smferris Exp $ + * + * iscsi-login.h + * + * include for iSCSI login + * + */ + +#include "iscsi-platform.h" +#include "iscsi-protocol.h" +#include "iscsi-session.h" + +#define ISCSI_SESSION_TYPE_NORMAL 0 +#define ISCSI_SESSION_TYPE_DISCOVERY 1 + +/* not defined by iSCSI, but used in the login code to determine when to send the initial Login PDU */ +#define ISCSI_INITIAL_LOGIN_STAGE -1 + +#define ISCSI_TEXT_SEPARATOR '=' + +typedef enum iscsi_login_status { + LOGIN_OK = 0, /* library worked, but caller must check the status class and detail */ + LOGIN_IO_ERROR, /* PDU I/O failed, connection have been closed or reset */ + LOGIN_FAILED, /* misc. failure */ + LOGIN_VERSION_MISMATCH, /* incompatible iSCSI protocol version */ + LOGIN_NEGOTIATION_FAILED,/* didn't like a key value (or received an unknown key) */ + LOGIN_AUTHENTICATION_FAILED, /* auth code indicated failure */ + LOGIN_WRONG_PORTAL_GROUP,/* portal group tag didn't match the one required */ + LOGIN_REDIRECTION_FAILED,/* couldn't handle the redirection requested by the target */ + LOGIN_INVALID_PDU, /* received an incorrect opcode, or bogus fields in a PDU */ +} iscsi_login_status_t; + +/* implemented in iscsi-login.c for use on all platforms */ +extern struct IscsiHdr *iscsi_align_pdu(iscsi_session_t *session, unsigned char *buffer, int buffersize); +extern int iscsi_make_text_pdu(iscsi_session_t *session, struct IscsiHdr *text_pdu, char *data, int max_data_length); +extern int iscsi_make_login_pdu(iscsi_session_t *session, struct IscsiHdr *text_pdu, char *data, int max_data_length); + +extern int iscsi_add_text(iscsi_session_t *session, struct IscsiHdr *pdu, char *data, int max_data_length, char *param, char *value); + +extern iscsi_login_status_t iscsi_login(iscsi_session_t *session, char *buffer, size_t bufsize, uint8_t *status_class, uint8_t *status_detail); + +/* Digest types */ +#define ISCSI_DIGEST_NONE 0 +#define ISCSI_DIGEST_CRC32C 1 +#define ISCSI_DIGEST_CRC32C_NONE 2 /* offer both, prefer CRC32C */ +#define ISCSI_DIGEST_NONE_CRC32C 3 /* offer both, prefer None */ + +#endif + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-platform.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-platform.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-platform.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-platform.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,136 @@ +#ifndef ISCSI_PLATFORM_H_ +#define ISCSI_PLATFORM_H_ + +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-platform.h,v 1.14 2003/04/11 10:06:38 naveenb Exp $ + * + * iscsi-platform.h + * + * abstract platform dependencies + * + */ + +#if defined(LINUX) +# ifdef __KERNEL__ +# include +# include +# include +# include +# include +# include +# include +# include +# include +# define AS_ERROR KERN_ERR +# define AS_NOTICE KERN_NOTICE +# define AS_INFO KERN_INFO +# define AS_DEBUG KERN_DEBUG +# define logmsg(level, fmt, args...) printk(level "iSCSI: session %p " fmt, session , ##args) +# define debugmsg(level, fmt, arg...) do { } while (0) +# define iscsi_strtoul simple_strtoul +# ifdef __BIG_ENDIAN +# define WORDS_BIGENDIAN 1 +# endif +# else +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# define AS_ERROR LOG_ERR +# define AS_NOTICE LOG_NOTICE +# define AS_INFO LOG_INFO +# define AS_DEBUG LOG_DEBUG +# define iscsi_atoi atoi +# define iscsi_inet_aton inet_aton +# define iscsi_strtoul strtoul +extern void debugmsg(int level, const char *fmt, ...); +extern void errormsg(const char *fmt, ...); +extern void logmsg(int priority, const char *fmt, ...); +# endif +/* both the kernel and userland have the normal names available */ +# define iscsi_memcmp memcmp +# define iscsi_strcmp strcmp +# define iscsi_strrchr strrchr +# define iscsi_strncmp strncmp +# define iscsi_strlen strlen +# define iscsi_strncpy strncpy +# define iscsi_sprintf sprintf +# define iscsi_isdigit isdigit +# define iscsi_isspace isspace +# define iscsi_ntohl ntohl +# define iscsi_ntohs ntohs +# define iscsi_htonl htonl +# define iscsi_htons htons + +#elif defined(SOLARIS) +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# define AS_ERROR LOG_ERR +# define AS_NOTICE LOG_NOTICE +# define AS_INFO LOG_INFO +# define AS_DEBUG LOG_DEBUG +# define iscsi_memcmp memcmp +# define iscsi_strtoul strtoul +# define iscsi_atoi atoi +# define iscsi_strcmp strcmp +# define iscsi_strrchr strrchr +# define iscsi_strncmp strncmp +# define iscsi_strlen strlen +# define iscsi_strncpy strncpy +# define iscsi_sprintf sprintf +# define iscsi_isdigit isdigit +# define iscsi_isspace isspace +# define iscsi_ntohl ntohl +# define iscsi_ntohs ntohs +# define iscsi_htonl htonl +# define iscsi_htons htons +extern int inet_aton(const char *addrp, struct in_addr *addr); +# define iscsi_inet_aton inet_aton +extern void debugmsg(int level, const char *fmt, ...); +extern void errormsg(const char *fmt, ...); +extern void logmsg(int priority, const char *fmt, ...); +#endif /* SOLARIS */ + + +#ifndef MIN +# define MIN(x,y) ((x) < (y) ? (x) : (y)) +#endif + +#ifndef MAX +# define MAX(x,y) ((x) >= (y) ? (x) : (y)) +#endif + + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-portal.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-portal.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-portal.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-portal.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,55 @@ +#ifndef ISCSI_PORTAL_H_ +#define ISCSI_PORTAL_H_ +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-portal.h,v 1.4 2002/12/18 00:46:08 smferris Exp $ + * + * portal info structure used in ioctls and the kernel module + * + */ + +typedef struct iscsi_portal_info { + int login_timeout; + int auth_timeout; + int active_timeout; + int idle_timeout; + int ping_timeout; + int abort_timeout; + int reset_timeout; + int replacement_timeout; /* FIXME: should this be per-session rather than per-portal? */ + int min_disk_command_timeout; /* FIXME: should this be per-session rather than per-portal? */ + int max_disk_command_timeout; /* FIXME: should this be per-session rather than per-portal? */ + int InitialR2T; + int ImmediateData; + int MaxRecvDataSegmentLength; + int FirstBurstLength; + int MaxBurstLength; + int DefaultTime2Wait; + int DefaultTime2Retain; + int HeaderDigest; + int DataDigest; + int ip_length; + unsigned char ip_address[16]; + int port; + int tag; + int tcp_window_size; + int type_of_service; + int preference; /* preference relative to other portals, higher is better */ +} iscsi_portal_info_t; + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-probe.c linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-probe.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-probe.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-probe.c 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,1657 @@ +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-probe.c,v 1.21.4.3 2003/08/19 11:24:07 krishmnc Exp $ + * + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) ) +# include +#else +# include +#endif +#include +#include + +/* these are from $(TOPDIR)/drivers/scsi, not $(TOPDIR)/include */ +#include +#include + +#include "iscsi-common.h" +#include "iscsi-ioctl.h" +#include "iscsi.h" + +/* LUN probing needs to be serialized across all HBA's, to keep a somewhat sane ordering */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) ) +DECLARE_MUTEX(iscsi_lun_probe_mutex); +#else +struct semaphore iscsi_lun_probe_mutex = MUTEX; +#endif +spinlock_t iscsi_lun_probe_lock = SPIN_LOCK_UNLOCKED; +static iscsi_session_t *iscsi_lun_probe_head = NULL; +static iscsi_session_t *iscsi_lun_probe_tail = NULL; +static iscsi_session_t *iscsi_currently_probing = NULL; +static volatile int iscsi_next_probe = 0; +volatile unsigned long iscsi_lun_probe_start = 0; +#if 0 +struct dirent +{ + long d_ino; /* inode number */ + off_t d_off; /* offset to next dirent */ + unsigned short d_reclen; /* length of this dirent */ + char d_name[1]; /* file name (null-terminated) */ +}; +#endif + +/* we need to make some syscalls to create and destroy the device name tree. */ +static int errno = 0; +static inline _syscall2(long, mkdir, const char *, dir, int, mode); +static inline _syscall1(long, unlink, const char *, path); +static inline _syscall2(long, symlink, const char *, oldname, const char *, newname); +static inline _syscall3(int,open,const char *,file,int,flag,int,mode) +static inline _syscall1(int,close,int,fd) +static inline _syscall1(long, rmdir, const char *, path); +static inline _syscall3(int, getdents, uint, fd, struct dirent *, dirp, uint, count); + +/* caller must hold iscsi_lun_probe_lock */ +static int enqueue_lun_probe(iscsi_session_t *session) +{ + if (session->probe_next || session->probe_prev) { + DEBUG_INIT("iSCSI: session %p already queued for LUN probing\n", session); + return 0; + } + + if (iscsi_lun_probe_head) { + if (session->probe_order < iscsi_lun_probe_head->probe_order) { + /* insert before the current head */ + session->probe_prev = NULL; + session->probe_next = iscsi_lun_probe_head; + iscsi_lun_probe_head->probe_prev = session; + iscsi_lun_probe_head = session; + } + else if (session->probe_order >= iscsi_lun_probe_tail->probe_order) { + /* insert after the tail */ + session->probe_next = NULL; + session->probe_prev = iscsi_lun_probe_tail; + iscsi_lun_probe_tail->probe_next = session; + iscsi_lun_probe_tail = session; + } + else { + /* insert somewhere in the middle */ + iscsi_session_t *search = iscsi_lun_probe_head; + while (search && search->probe_next) { + if (session->probe_order < search->probe_next->probe_order) { + session->probe_next = search->probe_next; + session->probe_prev = search; + search->probe_next->probe_prev = session; + search->probe_next = session; + break; + } + search = search->probe_next; + } + } + } + else { + /* become the only session in the queue */ + session->probe_next = session->probe_prev = NULL; + iscsi_lun_probe_head = iscsi_lun_probe_tail = session; + } + return 1; +} + +/* caller must hold iscsi_lun_probe_lock */ +static void dequeue_lun_probe(iscsi_session_t *session) +{ + if (iscsi_currently_probing == session) { + /* the timer may have tried to start us probing just before we gave up */ + iscsi_currently_probing = NULL; + } + else { + if (iscsi_lun_probe_head == session) { + if ((iscsi_lun_probe_head = iscsi_lun_probe_head->probe_next)) + iscsi_lun_probe_head->probe_prev = NULL; + else + iscsi_lun_probe_tail = NULL; + } + else if (iscsi_lun_probe_tail == session) { + iscsi_lun_probe_tail = iscsi_lun_probe_tail->probe_prev; + iscsi_lun_probe_tail->probe_next = NULL; + } + else { + /* in the middle */ + if (session->probe_next && session->probe_prev) { + session->probe_prev->probe_next = session->probe_next; + session->probe_next->probe_prev = session->probe_prev; + } + else { + printk("iSCSI: bug - dequeue_lun_probe %p, prev %p, next %p\n", + session, session->probe_prev, session->probe_next); + } + } + } +} + +static int wait_for_probe_order(iscsi_session_t *session) +{ + spin_lock(&iscsi_lun_probe_lock); + if ((iscsi_currently_probing == session) || session->probe_next || session->probe_prev) { + /* we're already probing or queued to be probed, ignore the 2nd probe request */ + DEBUG_INIT("iSCSI: session %p to %s ignoring duplicate probe request\n", + session, session->log_name); + spin_unlock(&iscsi_lun_probe_lock); + return 0; + } + else if ((iscsi_currently_probing == NULL) && (session->probe_order <= iscsi_next_probe)) { + /* if there's no LUN being probed, and our probe_order can go now, start probing */ + DEBUG_INIT("iSCSI: session %p to %s, probe_order %d <= next %d, not waiting\n", + session, session->log_name, session->probe_order, iscsi_next_probe); + iscsi_currently_probing = session; + + /* let the timer know another session became ready for LUN probing. */ + iscsi_lun_probe_start = (jiffies + (3 * HZ)); + if (iscsi_lun_probe_start == 0) + iscsi_lun_probe_start = 1; + smp_mb(); + + spin_unlock(&iscsi_lun_probe_lock); + return 1; + } + else if (enqueue_lun_probe(session)) { + /* otherwise queue up based on our probe order */ + + /* tell the timer when to start the LUN probing, to handle gaps in the probe_order */ + iscsi_lun_probe_start = (jiffies + (3 * HZ)) ? (jiffies + (3 * HZ)) : 1; + smp_mb(); + DEBUG_INIT("iSCSI: queued session %p for LUN probing, probe_order %d, probe_start at %lu\n", + session, session->probe_order, iscsi_lun_probe_start); + + spin_unlock(&iscsi_lun_probe_lock); + + /* and wait for either the timer or the currently probing session to wake us up */ + if (down_interruptible(&session->probe_sem)) { + printk("iSCSI: session %p to %s interrupted while waiting to probe LUNs\n", session, session->log_name); + /* give up and take ourselves out of the lun probing data structures */ + spin_lock(&iscsi_lun_probe_lock); + dequeue_lun_probe(session); + spin_unlock(&iscsi_lun_probe_lock); + return 0; + } + + /* give up if the session is terminating */ + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + printk("iSCSI: session %p to %s terminated while waiting to probe LUNs\n", session, session->log_name); + /* give up and take ourselves out of the lun probing data structures */ + spin_lock(&iscsi_lun_probe_lock); + dequeue_lun_probe(session); + spin_unlock(&iscsi_lun_probe_lock); + return 0; + } + +#ifdef DEBUG + /* we should be out of the queue, and in iscsi_currently_probing */ + spin_lock(&iscsi_lun_probe_lock); + if (iscsi_currently_probing != session) + printk("iSCSI: bug - currently probing should be %p, not %p\n", session, iscsi_currently_probing); + spin_unlock(&iscsi_lun_probe_lock); +#endif + DEBUG_INIT("iSCSI: wait_for_probe_order %p returning 1\n", session); + return 1; + } + + /* silently fail, since the enqueue attempt will have logged any detailed messages needed */ + spin_unlock(&iscsi_lun_probe_lock); + return 0; +} + +/* caller must hold iscsi_lun_probe_lock */ +static void start_next_lun_probe(void) +{ + if (iscsi_currently_probing) { + printk("iSCSI: bug - start_next_lun_probe called while currently probing %p at %lu\n", + iscsi_currently_probing, jiffies); + } + else if (iscsi_lun_probe_head) { + /* pop one off the queue, and tell it to start probing */ + iscsi_currently_probing = iscsi_lun_probe_head; + if ((iscsi_lun_probe_head = iscsi_currently_probing->probe_next)) + iscsi_lun_probe_head->probe_prev = NULL; + else + iscsi_lun_probe_tail = NULL; + + /* it's out of the queue now */ + iscsi_currently_probing->probe_next = NULL; + iscsi_currently_probing->probe_prev = NULL; + + /* skip over any gaps in the probe order */ + if (iscsi_next_probe < iscsi_currently_probing->probe_order) { + DEBUG_INIT("iSCSI: LUN probe_order skipping from %d to %d\n", + iscsi_next_probe, iscsi_currently_probing->probe_order); + iscsi_next_probe = iscsi_currently_probing->probe_order; + smp_mb(); + } + + /* wake up the ioctl which is waiting to do a probe */ + DEBUG_INIT("iSCSI: starting LUN probe for session %p to %s\n", + iscsi_currently_probing, iscsi_currently_probing->log_name); + up(&iscsi_currently_probing->probe_sem); + } + else { + /* if there is nothing else queued, then we don't need the timer to keep checking, + * and we want to reset the probe order so that future LUN probes get queued, + * and maintain the proper relative order amonst themselves, even if the global + * order may have been lost. + */ + DEBUG_INIT("iSCSI: start_next_lun_probe has nothing to start, resetting next LUN probe from %d to 0 at %lu\n", + iscsi_next_probe, jiffies); + iscsi_lun_probe_start = 0; + iscsi_next_probe = 0; + smp_mb(); + } +} + +void iscsi_possibly_start_lun_probing(void) { + spin_lock(&iscsi_lun_probe_lock); + if (iscsi_currently_probing == NULL) { + /* if we're not probing already, make sure we start */ + DEBUG_INIT("iSCSI: timer starting LUN probing at %lu\n", jiffies); + start_next_lun_probe(); + } + spin_unlock(&iscsi_lun_probe_lock); +} + +static void iscsi_probe_finished(iscsi_session_t *session) +{ + spin_lock(&iscsi_lun_probe_lock); + if (iscsi_currently_probing == session) { + iscsi_currently_probing = NULL; + DEBUG_INIT("iSCSI: session %p to %s finished probing LUNs at %lu\n", session, session->log_name, jiffies); + + /* continue through the probe order */ + if (iscsi_next_probe == session->probe_order) + iscsi_next_probe++; + + /* and possibly start another session probing */ + if (iscsi_lun_probe_head == NULL) { + /* nothing is queued, reset LUN probing */ + DEBUG_INIT("iSCSI: probe_finished has nothing to start, resetting next LUN probe from %d to 0 at %lu\n", + iscsi_next_probe, jiffies); + iscsi_next_probe = 0; + iscsi_lun_probe_start = 0; + smp_mb(); + } + else if ((iscsi_lun_probe_head->probe_order <= iscsi_next_probe) || + (iscsi_lun_probe_start && time_before_eq(iscsi_lun_probe_start, jiffies))) + { + /* next in order is up, or the timer has expired, start probing */ + start_next_lun_probe(); + } + else { + DEBUG_INIT("iSCSI: iscsi_probe_finished can't start_next_lun_probe at %lu, next %d, head %p (%d), tail %p (%d), current %p, start time %lu\n", + jiffies, iscsi_next_probe, + iscsi_lun_probe_head, iscsi_lun_probe_head ? iscsi_lun_probe_head->probe_order : -1, + iscsi_lun_probe_tail, iscsi_lun_probe_tail ? iscsi_lun_probe_tail->probe_order : -1, + iscsi_currently_probing, iscsi_lun_probe_start); + } + } + else { + /* should be impossible */ + printk("iSCSI: bug - session %p in iscsi_probe_finished, but currently probing %p\n", session, iscsi_currently_probing); + } + spin_unlock(&iscsi_lun_probe_lock); +} + +/* try to write to /proc/scsi/scsi */ +static int write_proc_scsi_scsi(iscsi_session_t *session, char *str) +{ + struct file *filp = NULL; + loff_t offset = 0; + int rc = 0; + mm_segment_t oldfs = get_fs(); + + set_fs( get_ds() ); + + filp = filp_open("/proc/scsi/scsi", O_WRONLY, 0); + if (IS_ERR(filp)) { + printk("iSCSI: session %p couldn't open /proc/scsi/scsi\n", session); + set_fs(oldfs); + return -ENOENT; + } + + rc = filp->f_op->write(filp, str, strlen(str), &offset); + filp_close(filp, 0); + set_fs(oldfs); + + if (rc >= 0) { + /* assume it worked, since the non-negative return codes aren't set very reliably. + * wait for 20 ms to avoid deadlocks on SMP systems. + * FIXME: figure out why the SMP systems need this wait, and fix the kernel. + */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(MSECS_TO_JIFFIES(20)); + return 1; + } + + return rc; +} + +/* caller must hold the iscsi_lun_probe_mutex */ +static int iscsi_probe_lun(iscsi_session_t *session, int lun) +{ + char str[80]; + int rc; + + if (lun >= ISCSI_MAX_LUN) + return 0; + + sprintf(str, "scsi add-single-device %d %d %d %d\n", + session->host_no, session->channel, session->target_id, lun); + str[sizeof(str) - 1] = '\0'; + + rc = write_proc_scsi_scsi(session, str); + if (rc < 0) { + /* clear the newline */ + str[strlen(str) - 1] = '\0'; + printk("iSCSI: session %p error %d writing '%s' to /proc/scsi/scsi\n", + session, rc, str); + return 0; + } + + return rc; +} + +static int iscsi_remove_lun(iscsi_session_t *session, int lun) +{ + char str[88]; + int rc = 0; + + sprintf(str, "scsi remove-single-device %d %d %d %d\n", + session->host_no, session->channel, session->target_id, lun); + str[sizeof(str) - 1] = '\0'; + + rc = write_proc_scsi_scsi(session, str); + if (rc < 0) { + /* clear the newline */ + str[strlen(str) - 1] = '\0'; + printk("iSCSI: session %p error %d writing '%s' to /proc/scsi/scsi\n", + session, rc, str); + return 0; + } + else { + /* removed it */ + clear_bit(lun, session->luns_activated); + clear_bit(lun, session->luns_detected); + return 1; + } + + return rc; +} + + +static void empty_directory(char *dir, char *data, int size) +{ + int fd; + struct dirent dent; + int rc, processed; + char *name = dir + strlen(dir); + + /* there should only be directories in the target dir */ + if ((fd = open(dir, O_DIRECTORY|O_RDONLY, 0)) >= 0) { + /* loop doing getdents, and unlinking files */ + do { + rc = getdents(fd, (struct dirent *)data, size); + DEBUG_FLOW("iSCSI: getdents %s, size %d, returned %d\n", + dir, size, rc); + processed = 0; + while (processed < rc) { + memcpy(&dent, &data[processed], sizeof(dent)); + strcpy(name, &data[processed] + offsetof(struct dirent, d_name)); + if (strcmp(name, ".") && strcmp(name, "..")) { + DEBUG_FLOW("iSCSI: unlink %s\n", dir); + unlink(dir); + } + processed += dent.d_reclen; + } + } while (rc > 0); + + name[0] = '\0'; + close(fd); + } +} + +void iscsi_remove_luns(iscsi_session_t *session) +{ + int l; + mm_segment_t oldfs; + char *data = session->rx_buffer; + int size = sizeof(session->rx_buffer) - 1; + char *lun_dir = session->target_link_dir + strlen(session->target_link_dir); + char *bus_dir = lun_dir - 2; /* before the slash */ + char c; + + /* try to release the kernel's SCSI device structures for every LUN */ + down(&iscsi_lun_probe_mutex); + + oldfs = get_fs(); + set_fs( get_ds() ); + + for (l = 0; l < ISCSI_MAX_LUN; l++) { + if (session->target_link_dir[0] == '/') { + sprintf(lun_dir, "lun%d/", l); + + /* this assumes the session isn't using the rx_buffer right now */ + empty_directory(session->target_link_dir, data, size); + + rmdir(session->target_link_dir); + } + if (test_bit(l, session->luns_activated)) { + /* tell Linux to release the Scsi_Devices */ + iscsi_remove_lun(session, l); + } + } + + if (session->target_link_dir[0] == '/') { + /* and get rid of the target dir itself */ + *lun_dir = '\0'; + DEBUG_FLOW("iSCSI: rmdir %s\n", session->target_link_dir); + rmdir(session->target_link_dir); + + /* if the bus dir is empty now, get rid of it too, but don't corrupt the session's target dir */ + while (*bus_dir != '/') + bus_dir--; + bus_dir++; /* leave the slash */ + c = *bus_dir; + *bus_dir = '\0'; + + DEBUG_FLOW("iSCSI: rmdir %s\n", session->target_link_dir); + rmdir(session->target_link_dir); + *bus_dir = c; + } + + set_fs( oldfs ); + + up(&iscsi_lun_probe_mutex); +} + +void iscsi_remove_lun_complete(iscsi_session_t *session, int lun_id) +{ + mm_segment_t oldfs; + char data[sizeof(struct dirent) + 1]; + int size = sizeof(data) - 1; + char *lun_dir = session->target_link_dir + strlen(session->target_link_dir); + char *bus_dir = lun_dir - 2; /* before the slash */ + char c; + + /* try to release the kernel's SCSI device structures for every LUN */ + down(&iscsi_lun_probe_mutex); + + oldfs = get_fs(); + set_fs( get_ds() ); + + if (session->target_link_dir[0] == '/') { + sprintf(lun_dir, "lun%d/", lun_id); + + /* this assumes the session isn't using the rx_buffer right now */ + empty_directory(session->target_link_dir, data, size); + + rmdir(session->target_link_dir); + } + if (test_bit(lun_id, session->luns_activated)) { + /* tell Linux to release the Scsi_Devices */ + iscsi_remove_lun(session, lun_id); + } + + if (session->target_link_dir[0] == '/') { + /* and get rid of the target dir itself */ + *lun_dir = '\0'; + DEBUG_FLOW("iSCSI: rmdir %s\n", session->target_link_dir); + rmdir(session->target_link_dir); + + /* if the bus dir is empty now, get rid of it too, but don't corrupt the session's target dir */ + while (*bus_dir != '/') + bus_dir--; + bus_dir++; /* leave the slash */ + c = *bus_dir; + *bus_dir = '\0'; + + DEBUG_FLOW("iSCSI: rmdir %s\n", session->target_link_dir); + rmdir(session->target_link_dir); + *bus_dir = c; + } + set_fs( oldfs ); + + up(&iscsi_lun_probe_mutex); +} + +/* find all dir prefixes of pathname, and make them all if they don't exist */ +static void ensure_directories_exist(char *pathname, mode_t dir_mode) +{ + char *end = pathname; + + /* skip leading slashes */ + while (end && *end && (*end == '/')) + end++; + + while (end && (*end != '\0')) { + /* if there is another slash, make the dir. + * FIXME: we ought to ignore errors when the directory exists, + * but report errors where the directory doesn't exist and + * we failed to create it. + */ + while ((*end != '/') && (*end != '\0')) + end++; + + if (*end == '/') { + *end = '\0'; + mkdir(pathname, dir_mode); + *end = '/'; + end++; + } + } +} + +static int get_device_scsi_quad(char *device_name, int *host, int *channel, int *target, int *lun) +{ + int ret = 0; + u_long info[2]; + struct file *filp = NULL; + struct inode *inode = NULL; + + filp = filp_open(device_name, O_RDONLY|O_NONBLOCK, 0); + if (IS_ERR(filp)) { + return 0; + } + + memset(info, 0, sizeof(info)); + inode = filp->f_dentry->d_inode; + if (filp->f_op->ioctl(inode, filp, SCSI_IOCTL_GET_IDLUN, (unsigned long)info) == 0) { + if (target) + *target = info[0] & 0xff; + if (lun) + *lun = (info[0] >> 8) & 0xff; + if (channel) + *channel = (info[0] >> 16) & 0xff; +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) ) + /* 2.4 kernels give us all the info we need with that ioctl. */ + if (host) + *host = ((info[0] >> 24) & 0xff); + + ret = 1; +#else + /* 2.2 kernels have another ioctl to get the host number, and set the host + * above to something useless to us. + */ + memset(info, 0, sizeof(info)); + if (filp->f_op->ioctl(inode, filp, SCSI_IOCTL_GET_BUS_NUMBER, (unsigned long)info) == 0) { + if (host) + *host = info[0]; + ret = 1; + } +#endif + } + + filp_close(filp, 0); + return ret; +} + + +static void iscsi_update_disk_links(iscsi_session_t *session, int max_sd_devices, int max_sd_partitions, mode_t dir_mode) +{ + int i; + char devname[20]; + /* we've reserved enough space in session->target_link_dir so that we can use it to build pathnames */ + char *lun_dir = session->target_link_dir + strlen(session->target_link_dir); + + /* FIXME: can we get the number of devices supported from the running kernel? */ + for (i=0; i < max_sd_devices; i++) { + int host = -1, channel = -1, id = -1, lun = -1; + + if (i < 26) { + sprintf(devname, "/dev/sd%c", 'a' + i); + } + else { + /* double char names for disknum 26+ */ + sprintf(devname, "/dev/sd%c%c", 'a' + (i / 26) - 1, 'a' + (i % 26)); + } + + if (get_device_scsi_quad(devname, &host, &channel, &id, &lun)) { + if ((host == session->host_no) && (channel == session->channel) && (id == session->target_id)) { + char *partition = devname + strlen(devname); + char *link; + int p; + + DEBUG_INIT("iSCSI: disk device node %s = bus %d target %d LUN %d\n", devname, session->iscsi_bus, id, lun); + + /* ensure the LUN dir exists */ + sprintf(lun_dir, "lun%d/", lun); + ensure_directories_exist(session->target_link_dir, dir_mode); + + link = lun_dir + strlen(lun_dir); + + /* symlink the whole-disk device */ + strcpy(link, "disk"); + unlink(session->target_link_dir); /* remove any existing symlink */ + symlink(devname, session->target_link_dir); /* make a new symlink */ + + /* and make links for each possible disk partition as well, + * since we don't want to have to track what partitions get added or removed. + * This works just like the normal partition device nodes, which + * are always present, but may or may not be openable. + */ + for (p = 1; p <= max_sd_partitions; p++) { + sprintf(partition, "%d", p); + sprintf(link, "part%d", p); + unlink(session->target_link_dir); + symlink(devname, session->target_link_dir); + } + + } + } + } + + /* restore the session's target dir */ + *lun_dir = '\0'; +} + +static void iscsi_update_tape_links(iscsi_session_t *session, int max_st_devices, mode_t dir_mode) +{ + int i; + char devname[20]; + /* we've reserved enough space in session->target_link_dir so that we can use it to build pathnames */ + char *lun_dir = session->target_link_dir + strlen(session->target_link_dir); + + /* FIXME: can we get the number of devices supported from the running kernel? */ + for (i=0; i < max_st_devices; i++) { + int host = -1, channel = -1, id = -1, lun = -1; + + /* we check the no-rewind device to avoid having side-effects */ + sprintf(devname, "/dev/nst%d", i); + + if (get_device_scsi_quad(devname, &host, &channel, &id, &lun)) { + if ((host == session->host_no) && (channel == session->channel) && (id == session->target_id)) { + char *link; + + DEBUG_INIT("iSCSI: tape device node %s = bus %d target %d LUN %d\n", devname, session->iscsi_bus, id, lun); + + /* ensure the LUN dir exists */ + sprintf(lun_dir, "lun%d/", lun); + ensure_directories_exist(session->target_link_dir, dir_mode); + + link = lun_dir + strlen(lun_dir); + + /* auto-rewind nodes */ + strcpy(link, "mt"); + unlink(session->target_link_dir); /* remove any existing symlink */ + sprintf(devname, "/dev/st%d", i); + symlink(devname, session->target_link_dir); /* make a new symlink */ + + strcpy(link, "mtl"); + unlink(session->target_link_dir); /* remove any existing symlink */ + sprintf(devname, "/dev/st%dl", i); + symlink(devname, session->target_link_dir); /* make a new symlink */ + + strcpy(link, "mtm"); + unlink(session->target_link_dir); /* remove any existing symlink */ + sprintf(devname, "/dev/st%dm", i); + symlink(devname, session->target_link_dir); /* make a new symlink */ + + strcpy(link, "mta"); + unlink(session->target_link_dir); /* remove any existing symlink */ + sprintf(devname, "/dev/st%da", i); + symlink(devname, session->target_link_dir); /* make a new symlink */ + + /* no rewind nodes */ + strcpy(link, "mtn"); + unlink(session->target_link_dir); /* remove any existing symlink */ + sprintf(devname, "/dev/nst%d", i); + symlink(devname, session->target_link_dir); /* make a new symlink */ + + strcpy(link, "mtln"); + unlink(session->target_link_dir); /* remove any existing symlink */ + sprintf(devname, "/dev/nst%dl", i); + symlink(devname, session->target_link_dir); /* make a new symlink */ + + strcpy(link, "mtmn"); + unlink(session->target_link_dir); /* remove any existing symlink */ + sprintf(devname, "/dev/nst%dm", i); + symlink(devname, session->target_link_dir); /* make a new symlink */ + + strcpy(link, "mtan"); + unlink(session->target_link_dir); /* remove any existing symlink */ + sprintf(devname, "/dev/nst%da", i); + symlink(devname, session->target_link_dir); /* make a new symlink */ + } + } + } + + /* restore the session's target dir */ + *lun_dir = '\0'; +} + +static void iscsi_update_generic_links(iscsi_session_t *session, int max_sg_devices, mode_t dir_mode) +{ + int i; + char devname[20]; + /* we've reserved enough space in session->target_link_dir so that we can use it to build pathnames */ + char *lun_dir = session->target_link_dir + strlen(session->target_link_dir); + char *link; + + /* FIXME: can we get the number of devices supported from the running kernel? */ + for (i=0; i < max_sg_devices; i++) { + int host = -1, channel = -1, id = -1, lun = -1; + + sprintf(devname, "/dev/sg%d", i); + + if (get_device_scsi_quad(devname, &host, &channel, &id, &lun)) { + if ((host == session->host_no) && (channel == session->channel) && (id == session->target_id)) { + DEBUG_INIT("iSCSI: generic device node %s = bus %d target %d LUN %d\n", devname, session->iscsi_bus, id, lun); + + /* ensure the LUN dir exists */ + sprintf(lun_dir, "lun%d/", lun); + ensure_directories_exist(session->target_link_dir, dir_mode); + + link = lun_dir + strlen(lun_dir); + + strcpy(link, "generic"); + unlink(session->target_link_dir); /* remove any existing symlink */ + symlink(devname, session->target_link_dir); /* make a new symlink */ + } + } + } + + /* restore the session's target dir */ + *lun_dir = '\0'; +} + +static void iscsi_update_cd_links(iscsi_session_t *session, int max_sr_devices, mode_t dir_mode) +{ + int i; + char devname[20]; + /* we've reserved enough space in session->target_link_dir so that we can use it to build pathnames */ + char *lun_dir = session->target_link_dir + strlen(session->target_link_dir); + char *link; + + /* FIXME: can we get the number of devices supported from the running kernel? */ + for (i=0; i < max_sr_devices; i++) { + int host = -1, channel = -1, id = -1, lun = -1; + + /* FIXME: the distribution may be using /dev/sr instead of /dev/scd */ + sprintf(devname, "/dev/scd%d", i); + + if (get_device_scsi_quad(devname, &host, &channel, &id, &lun)) { + if ((host == session->host_no) && (channel == session->channel) && (id == session->target_id)) { + DEBUG_INIT("iSCSI: cdrom device node %s = bus %d target %d LUN %d\n", devname, session->iscsi_bus, id, lun); + + /* ensure the LUN dir exists */ + sprintf(lun_dir, "lun%d/", lun); + ensure_directories_exist(session->target_link_dir, dir_mode); + + link = lun_dir + strlen(lun_dir); + + strcpy(link, "cd"); + unlink(session->target_link_dir); /* remove any existing symlink */ + symlink(devname, session->target_link_dir); /* make a new symlink */ + } + } + } + + /* restore the session's target dir */ + *lun_dir = '\0'; +} + +/* compute the intersection of the LUNS detected and configured, and probe each LUN */ +void iscsi_probe_luns(iscsi_session_t *session, uint32_t *lun_bitmap, scsi_device_info_t *device_info) +{ + int l; + int detected = 0; + int probed = 0; + int activated = 0; + + /* try wait for our turn to probe, to keep the device node ordering as repeatable as possible */ + DEBUG_INIT("iSCSI: session %p to %s waiting to probe LUNs at %lu, probe order %d\n", + session, session->log_name, jiffies, session->probe_order); + + if (!wait_for_probe_order(session)) { + DEBUG_INIT("iSCSI: session %p to %s couldn't probe LUNs, error waiting for probe order\n", + session, session->log_name); + return; + } + + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + printk("iSCSI: session %p to %s terminated while waiting to probe LUNs\n", session, session->log_name); + goto done; + } + if (signal_pending(current)) { + printk("iSCSI: session %p ioctl killed while waiting to probe LUNs\n", session); + goto done; + } + + /* make sure we're the only driver process trying to add or remove LUNs */ + if (down_interruptible(&iscsi_lun_probe_mutex)) { + printk("iSCSI: session %p to %s interrupted while probing LUNs\n", session, session->log_name); + goto done; + } + + /* need to set the host's max_channel, max_id, max_lun, since we + * zero them in iscsi_detect in order to disable the scan that + * occurs during scsi_register_host. + */ + session->hba->host->max_id = ISCSI_MAX_TARGET_IDS_PER_BUS; + session->hba->host->max_lun = ISCSI_MAX_LUNS_PER_TARGET; + session->hba->host->max_channel = ISCSI_MAX_CHANNELS_PER_HBA - 1; /* convert from count to index */ + smp_mb(); + + DEBUG_INIT("iSCSI: probing LUNs for session %p to %s at %lu, probe_order %d at %lu\n", + session, session->log_name, jiffies, session->probe_order, jiffies); + for (l = 0; l < ISCSI_MAX_LUN; l++) { + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + goto give_up; + if (signal_pending(current)) + goto give_up; + if (test_bit(l, session->luns_detected)) { + /* Check if lun has been removed */ + if (!test_bit(l, session->luns_found)) { + if (iscsi_remove_lun(session,l) != 0 ) { + char buffer[sizeof(struct dirent) + 1],c; + mm_segment_t oldfs; + int size = sizeof(buffer) - 1; + char *lun_dir = session->target_link_dir + strlen(session->target_link_dir); + char *bus_dir = lun_dir - 2; /* before the slash */ + + oldfs = get_fs(); + set_fs(get_ds()); + if (session->target_link_dir[0] == '/') { + sprintf(lun_dir, "lun%d/", l); + empty_directory(session->target_link_dir, buffer, size); + rmdir(session->target_link_dir); + } + /* If all luns on this target have been deleted. + * remove the target entry. + * + */ + if (session->target_link_dir[0] == '/') { + /* and get rid of the target dir itself */ + *lun_dir = '\0'; + DEBUG_FLOW("iSCSI: rmdir %s\n", session->target_link_dir); + rmdir(session->target_link_dir); + /* if the bus dir is empty now, get rid of it too, but don't corrupt the session's target dir */ + while (*bus_dir != '/') + bus_dir--; + bus_dir++; + c = *bus_dir; + *bus_dir = '\0'; + DEBUG_FLOW("iSCSI: rmdir %s\n", session->target_link_dir); + rmdir(session->target_link_dir); + *bus_dir = c; + } + set_fs(oldfs); + } + } else { + detected++; + + /* if allowed and not already activated (successfully probed), probe it */ + if ((lun_bitmap[l / 32] & (1 << (l % 32))) && !test_bit(l, session->luns_activated)) { + DEBUG_FLOW("iSCSI: session %p probing LUN %d at %lu\n", session, l, jiffies); + iscsi_probe_lun(session, l); + probed++; + if (test_bit(l, session->luns_activated)) + activated++; + } + } + } else { + if (test_bit(l, session->luns_activated)) { + if (iscsi_remove_lun(session,l) != 0 ) { + char buffer[sizeof(struct dirent) + 1],c; + mm_segment_t oldfs; + int size = sizeof(buffer) - 1; + char *lun_dir = session->target_link_dir + strlen(session->target_link_dir); + char *bus_dir = lun_dir - 2; /* before the slash */ + + oldfs = get_fs(); + set_fs(get_ds()); + if (session->target_link_dir[0] == '/') { + sprintf(lun_dir, "lun%d/", l); + empty_directory(session->target_link_dir, buffer, size); + rmdir(session->target_link_dir); + } + /* If all luns on this target have been deleted. + * remove the target entry. + * + */ + if (session->target_link_dir[0] == '/') { + /* and get rid of the target dir itself */ + *lun_dir = '\0'; + DEBUG_FLOW("iSCSI: rmdir %s\n", session->target_link_dir); + rmdir(session->target_link_dir); + /* if the bus dir is empty now, get rid of it too, but don't corrupt the session's target dir */ + while (*bus_dir != '/') + bus_dir--; + bus_dir++; + c = *bus_dir; + *bus_dir = '\0'; + DEBUG_FLOW("iSCSI: rmdir %s\n", session->target_link_dir); + rmdir(session->target_link_dir); + *bus_dir = c; + } + set_fs(oldfs); + } + } + } + } + + if (detected == 0) { + printk("iSCSI: no LUNs detected for session %p to %s\n", session, session->log_name); + + } + else if (LOG_ENABLED(ISCSI_LOG_INIT)) { + printk("iSCSI: session %p to %s probed %d of %d LUNs detected, %d new LUNs activated\n", + session, session->log_name, probed, detected, activated); + } + + /* optionally set up a symlink tree. We do this in the kernel so that we + * can guard it with the lun_probe_mutex. The high-level SCSI drivers in Linux tend + * to crash if a device node is opened while the Scsi_Device is still being + * initialized, so we want to make sure we're not doing any probes when we open + * lots of device nodes. + */ + if (session->target_link_dir[0] == '/') { + mm_segment_t oldfs = get_fs(); + + set_fs( get_ds() ); + + /* make the target dir, so that the user can always see the target has a session, even if + * LUN probing fails to find anything or no target drivers have attached. + */ + ensure_directories_exist(session->target_link_dir, session->dir_mode); + + if (device_info->max_sd_devices > 0) { + DEBUG_INIT("iSCSI: session %p updating disk links under %s\n", session, session->target_link_dir); + iscsi_update_disk_links(session, device_info->max_sd_devices, device_info->max_sd_partitions, session->dir_mode); + } + if (device_info->max_sg_devices > 0) { + DEBUG_INIT("iSCSI: session %p updating generic links under %s\n", session, session->target_link_dir); + iscsi_update_generic_links(session, device_info->max_sg_devices, session->dir_mode); + } + if (device_info->max_st_devices > 0) { + DEBUG_INIT("iSCSI: session %p updating tape links under %s\n", session, session->target_link_dir); + iscsi_update_tape_links(session, device_info->max_st_devices, session->dir_mode); + } + if (device_info->max_sr_devices > 0) { + DEBUG_INIT("iSCSI: session %p updating cdrom links under %s\n", session, session->target_link_dir); + iscsi_update_cd_links(session, device_info->max_sr_devices, session->dir_mode); + } + + set_fs(oldfs); + } + + give_up: + up(&iscsi_lun_probe_mutex); + + done: + /* clean up after wait_for_probe_order, and possibly start the next session probing */ + iscsi_probe_finished(session); +} + + +typedef struct iscsi_cmnd { + struct semaphore done_sem; + Scsi_Cmnd sc; + unsigned int bufflen; + uint8_t buffer[1]; +} iscsi_cmnd_t; + + +/* callback function for Scsi_Cmnd's generated by the iSCSI driver itself */ +void iscsi_done(Scsi_Cmnd *sc) +{ + iscsi_cmnd_t *c = (iscsi_cmnd_t *)sc->buffer; + + up(&c->done_sem); +} + +static int iscsi_do_cmnd(iscsi_session_t *session, iscsi_cmnd_t *c, unsigned int attempts_allowed) +{ + Scsi_Cmnd *sc = NULL; + int queue_attempts = 0; + + if (c->sc.host) { + DEBUG_FLOW("iSCSI: session %p iscsi_do_cmnd %p to (%u %u %u %u), Cmd 0x%02x, %u retries, buffer %p, bufflen %u\n", + session, c, c->sc.host->host_no, c->sc.channel, c->sc.target, c->sc.lun, c->sc.cmnd[0], + attempts_allowed, c->sc.request_buffer, c->sc.request_bufflen); + } + else { + printk("iSCSI: session %p iscsi_do_cmnd %p, buffer %p, bufflen %u, host %p\n", + session, c, c->sc.request_buffer, c->sc.request_bufflen, c->sc.host); + return 0; + } + if (!c->sc.request_buffer) + return 0; + if (!c->sc.request_bufflen) + return 0; + + sc = &(c->sc); + sc->retries = -1; + sc->allowed = attempts_allowed; + + retry: + while (++sc->retries < sc->allowed) { + if (signal_pending(current)) + return 0; + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + return 0; + + sc->result = 0; + memset(sc->sense_buffer, 0, sizeof(sc->sense_buffer)); + memset(c->buffer, 0, c->bufflen); + + /* try to queue the command */ + queue_attempts = 0; + for (;;) { + sema_init(&c->done_sem, 0); + smp_mb(); + + if (signal_pending(current)) + return 0; + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + return 0; + + DEBUG_INIT("iSCSI: detect_luns queueing %p to session %p at %lu\n", sc, session, jiffies); + + /* give up eventually, in case the replacement timeout is in effect. + * we don't want to loop forever trying to queue to a session + * that may never accept commands. + */ + if (iscsi_queue(session, sc, iscsi_done)) { + break; + } + else if (queue_attempts++ >= 500) { + /* give up after 10 seconds */ + return 0; + } + + /* command not queued, wait a bit and try again */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(MSECS_TO_JIFFIES(20)); + } + + DEBUG_QUEUE("iSCSI: session %p queued iscsi_cmnd %p, buffer %p, bufflen %u, scsi_done %p\n", + session, c, c->sc.request_buffer, c->sc.request_bufflen, c->sc.scsi_done); + + /* wait til either the command completes, or we get signalled. */ + if (down_interruptible(&c->done_sem)) { + /* if we got signalled, squash the command and give up */ + iscsi_squash_cmnd(session, sc); + return 0; + } + + DEBUG_QUEUE("iSCSI: session %p hba %p host %p woken up by iscsi_cmnd %p, buffer %p, bufflen %u\n", + session, session->hba, session->hba->host, c, c->sc.request_buffer, c->sc.request_bufflen); + + /* the command completed, check the result and decide if it needs to be retried. */ + DEBUG_FLOW("iSCSI: session %p iscsi cmnd %p to (%u %u %u %u), Cmd 0x%02x, " + "host byte 0x%x, SCSI status 0x%x, residual %u\n", + session, c, c->sc.host->host_no, c->sc.channel, c->sc.target, c->sc.lun, c->sc.cmnd[0], + (sc->result >> 24) & 0xFF, sc->result & 0xFF, sc->resid); + + /* check the host byte */ + switch (host_byte(sc->result)) { + case DID_OK: + /* no problems so far */ + break; + case DID_NO_CONNECT: + /* give up, we can't talk to the device */ + printk("iSCSI: session %p failing iscsi cmnd %p to (%u %u %u %u), Cmd 0x%02x, " + "host byte 0x%x, SCSI status 0x%x, residual %u\n", + session, c, c->sc.host->host_no, c->sc.channel, c->sc.target, c->sc.lun, c->sc.cmnd[0], + (sc->result >> 24) & 0xFF, sc->result & 0xFF, sc->resid); + return 0; + case DID_ERROR: + case DID_SOFT_ERROR: + case DID_ABORT: + case DID_BUS_BUSY: + case DID_PARITY: + case DID_TIME_OUT: + case DID_RESET: + default: + if (LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: session %p iscsi cmnd %p to (%u %u %u %u), Cmd 0x%02x, " + "host byte 0x%x, SCSI status 0x%x, residual %u\n", + session, c, c->sc.host->host_no, c->sc.channel, c->sc.target, c->sc.lun, c->sc.cmnd[0], + (sc->result >> 24) & 0xFF, sc->result & 0xFF, sc->resid); + + /* some sort of problem, possibly retry */ + goto retry; + } + + /* check the SCSI status byte. Note, Linux values are right-shifted once compared to the SCSI spec */ + switch (status_byte(sc->result)) { + case GOOD: + case COMMAND_TERMINATED: +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,19) + /* make sure we got enough of a response */ + if (sc->resid && ((iscsi_expected_data_length(sc) - sc->resid) < sc->underflow)) { + /* try again */ + if (LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: session %p iscsi cmnd %p to (%u %u %u %u), Cmd 0x%02x, " + "residual %u, retrying to get %u bytes desired\n", + session, c, c->sc.host->host_no, c->sc.channel, c->sc.target, c->sc.lun, + c->sc.cmnd[0], sc->resid, sc->underflow); + goto retry; + } +#endif + /* all done */ + return 1; + case BUSY: /* device is busy, try again later */ + case QUEUE_FULL: /* tagged queuing device has a full queue, wait a bit and try again. */ + sc->allowed++; + if (sc->allowed > 100) { + printk("iSCSI: session %p iscsi cmnd %p to (%u %u %u %u), Cmd 0x%02x, SCSI status 0x%x, out of retries\n", + session, c, c->sc.host->host_no, c->sc.channel, c->sc.target, c->sc.lun, c->sc.cmnd[0], + sc->result & 0xFF); + return 0; + } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(MSECS_TO_JIFFIES(20)); + goto retry; + case CONDITION_GOOD: + case INTERMEDIATE_GOOD: + case INTERMEDIATE_C_GOOD: + /* we should never get the linked command return codes */ + case RESERVATION_CONFLICT: + /* this is probably never going to happen for INQUIRY or REPORT_LUNS, but retry if it does */ + printk("iSCSI: session %p iscsi_do_cmnd %p SCSI status 0x%x at %lu, retrying\n", + session, c, sc->result & 0xFF, jiffies); + goto retry; + case CHECK_CONDITION: + /* look at the sense. If it's illegal request, don't bother retrying the command */ + if ((sc->sense_buffer[0] & 0x70) == 0x70) { + switch (SENSE_KEY(sc->sense_buffer)) { + case ILLEGAL_REQUEST: + printk("iSCSI: session %p iscsi cmnd %p to (%u %u %u %u), Cmd 0x%02x, illegal request\n", + session, c, c->sc.host->host_no, c->sc.channel, c->sc.target, c->sc.lun, c->sc.cmnd[0]); + return 0; + default: + /* possibly retry */ + if (LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: session %p iscsi cmnd %p to (%u %u %u %u), Cmd 0x%02x with sense, retrying\n", + session, c, c->sc.host->host_no, c->sc.channel, c->sc.target, c->sc.lun, + c->sc.cmnd[0]); + goto retry; + } + } + goto retry; + default: + printk("iSCSI: session %p iscsi_do_cmnd %p unexpected SCSI status 0x%x at %lu\n", + session, c, sc->result & 0xFF, jiffies); + return 0; + } + } + + if (LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: session %p iscsi_do_cmnd %p SCSI status 0x%x, out of retries at %lu\n", + session, c, sc->result & 0xFF, jiffies); + + return 0; +} + +void send_tur(iscsi_session_t *session) +{ + iscsi_cmnd_t *c = NULL; + Scsi_Cmnd *sc = NULL; + size_t cmd_size = sizeof(iscsi_cmnd_t); + unsigned int bufflen = 255; + + cmd_size += bufflen; + + c = kmalloc(cmd_size, GFP_KERNEL); + if (!c) { + printk("iSCSI: session %p send_tur couldn't allocate a Scsi_Cmnd\n", session); + return; + } + + /* initialize */ + memset(c, 0, cmd_size); + sema_init(&c->done_sem, 0); + c->bufflen = bufflen; + DEBUG_ALLOC("iSCSI: session %p hba %p host %p allocated iscsi cmnd %p, size %d, buffer %p, bufflen %u, end %p\n", + session, session->hba, session->hba->host, c, cmd_size, c->buffer, c->bufflen, c->buffer + c->bufflen); + + /* fill in the basic required info in the Scsi_Cmnd */ + sc = &(c->sc); + sc->host = session->hba->host; + sc->channel = session->channel; + sc->target = session->target_id; + sc->lun = 0; + sc->use_sg = 0; + sc->request_buffer = c->buffer; + sc->request_bufflen = c->bufflen; + sc->scsi_done = iscsi_done; + sc->timeout_per_command = 30 * HZ; + sc->resid = 0; + sc->underflow = 8; + init_timer(&sc->eh_timeout); + /* save a pointer to the iscsi_cmnd in the Scsi_Cmnd, so that iscsi_done can + use it */ + + sc->buffer = (void *)c; + { + if (signal_pending(current)) { + DEBUG_INIT("iSCSI: session %p send_tur aborted by signal\n", session); + goto done; + } + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + goto done; + + sc->cmd_len = 6; + memset(sc->cmnd, 0, sizeof(sc->cmnd)); + sc->cmnd[0] = TEST_UNIT_READY; + sc->cmnd[1] = 0; + sc->cmnd[2] = 0; + sc->cmnd[3] = 0; + sc->cmnd[4] = 0; + sc->cmnd[5] = 0; + + smp_mb(); + if (iscsi_do_cmnd(session, c, 6)) { + } else { + printk("\niSCSI: Received a sense for a TEST UNIT READY\n"); + } + } + +done: + kfree(c); +} + +void reinitialize_disk(iscsi_session_t *session) +{ + iscsi_cmnd_t *c = NULL; + Scsi_Cmnd *sc = NULL; + size_t cmd_size = sizeof(iscsi_cmnd_t); + unsigned int bufflen = 255; + + cmd_size += bufflen; + + c = kmalloc(cmd_size, GFP_KERNEL); + if (!c) { + printk("iSCSI: session %p reinitialize_disk couldn't allocate a Scsi_Cmnd\n", session); + return; + } + + /* initialize */ + memset(c, 0, cmd_size); + sema_init(&c->done_sem, 0); + c->bufflen = bufflen; + DEBUG_ALLOC("iSCSI: session %p hba %p host %p allocated iscsi cmnd %p, size %d, buffer %p, bufflen %u, end %p\n", + session, session->hba, session->hba->host, c, cmd_size, c->buffer, c->bufflen, c->buffer + c->bufflen); + + /* fill in the basic required info in the Scsi_Cmnd */ + sc = &(c->sc); + sc->host = session->hba->host; + sc->channel = session->channel; + sc->target = session->target_id; + sc->lun = 0; + sc->use_sg = 0; + sc->request_buffer = c->buffer; + sc->request_bufflen = c->bufflen; + sc->scsi_done = iscsi_done; + sc->timeout_per_command = 30 * HZ; + sc->resid = 0; + sc->underflow = 8; + init_timer(&sc->eh_timeout); + /* save a pointer to the iscsi_cmnd in the Scsi_Cmnd, so that iscsi_done can + use it */ + + sc->buffer = (void *)c; + { + if (signal_pending(current)) { + DEBUG_INIT("iSCSI: session %p reinitialize_disk aborted by signal\n", session); + goto done; + } + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + goto done; + + sc->cmd_len = 6; + memset(sc->cmnd, 0, sizeof(sc->cmnd)); + sc->cmnd[0] = START_STOP; + sc->cmnd[1] = 0; + sc->cmnd[1] |= 1; + sc->cmnd[2] = 0; + sc->cmnd[3] = 0; + sc->cmnd[4] = 1; + sc->cmnd[5] = 0; + + smp_mb(); + if (iscsi_do_cmnd(session, c, 6)) { + } else { + printk("\niSCSI: Received a sense for a START STOP\n"); + } + } + +done: + kfree(c); +} + + +static void make_report_luns(Scsi_Cmnd *sc, uint32_t max_entries) +{ + uint32_t length = 8 + (max_entries * 8); /* 8 byte header plus 8 bytes per LUN */ + + sc->cmd_len = 10; + sc->request_bufflen = length; + sc->underflow = 8; /* need at least the length */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,19) + sc->resid = 0; +#endif + + /* CDB */ + memset(sc->cmnd, 0, sizeof(sc->cmnd)); + sc->cmnd[0] = REPORT_LUNS; + sc->cmnd[1] = 0; + sc->cmnd[2] = 0; /* either reserved or select report in various versions of SCSI-3 */ + sc->cmnd[3] = 0; + sc->cmnd[4] = 0; + sc->cmnd[5] = 0; + sc->cmnd[6] = (length >> 24) & 0xFF; + sc->cmnd[7] = (length >> 16) & 0xFF; + sc->cmnd[8] = (length >> 8) & 0xFF; + sc->cmnd[9] = (length) & 0xFF; +} + +static void make_inquiry(Scsi_Cmnd *sc, int lun0_scsi_level) +{ + sc->cmd_len = 6; + sc->request_bufflen = 255; + if (sc->lun == 0) + sc->underflow = 3; /* we need at least the peripheral code and SCSI version */ + else + sc->underflow = 1; /* we need at least the peripheral code */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,19) + sc->resid = 0; +#endif + + memset(sc->cmnd, 0, sizeof(sc->cmnd)); + sc->cmnd[0] = INQUIRY; + if (lun0_scsi_level >= 0x3) + sc->cmnd[1] = 0; /* reserved in SCSI-3 and higher */ + else + sc->cmnd[1] = (sc->lun << 5) & 0xe0; + + sc->cmnd[2] = 0; + sc->cmnd[3] = 0; + sc->cmnd[4] = 255; /* length */ + sc->cmnd[5] = 0; +} + +/* scan for LUNs */ +void iscsi_detect_luns(iscsi_session_t *session) +{ + int l; + iscsi_cmnd_t *c = NULL; + Scsi_Cmnd *sc = NULL; + int lun0_scsi_level = 0; + size_t cmd_size = sizeof(iscsi_cmnd_t); + unsigned int bufflen = 0; + uint32_t last_luns = 0; + uint32_t luns = 32; /* start small to avoid bugs in REPORT_LUNS handling */ + int report_luns_failed = 0; + + memset(session->luns_found,0,sizeof(session->luns_found)); + + /* need enough buffer space for replies to INQUIRY and REPORT_LUNS */ + if ((8 + (ISCSI_MAX_LUN * 8)) < 255) + bufflen = 255; + else + bufflen = (ISCSI_MAX_LUN * 8) + 8; + + cmd_size += bufflen; + + c = kmalloc(cmd_size, GFP_KERNEL); + if (!c) { + printk("iSCSI: session %p iscsi_detect_luns couldn't allocate a Scsi_Cmnd\n", session); + return; + } + + /* initialize */ + memset(c, 0, cmd_size); + sema_init(&c->done_sem, 0); + c->bufflen = bufflen; + DEBUG_ALLOC("iSCSI: session %p hba %p host %p allocated iscsi cmnd %p, size %d, buffer %p, bufflen %u, end %p\n", + session, session->hba, session->hba->host, c, cmd_size, c->buffer, c->bufflen, c->buffer + c->bufflen); + + /* fill in the basic required info in the Scsi_Cmnd */ + sc = &(c->sc); + sc->host = session->hba->host; + sc->channel = session->channel; + sc->target = session->target_id; + sc->lun = 0; + sc->use_sg = 0; + sc->request_buffer = c->buffer; + sc->request_bufflen = c->bufflen; + sc->scsi_done = iscsi_done; + sc->timeout_per_command = 30 * HZ; + init_timer(&sc->eh_timeout); + /* save a pointer to the iscsi_cmnd in the Scsi_Cmnd, so that iscsi_done can use it */ + sc->buffer = (void *)c; + + do { + if (signal_pending(current)) { + DEBUG_INIT("iSCSI: session %p detect LUNs aborted by signal\n", session); + goto done; + } + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + goto done; + + /* send a REPORT_LUNS to LUN 0. If it works, we know the LUNs. */ + last_luns = luns; + make_report_luns(sc, luns); + smp_mb(); + if (iscsi_do_cmnd(session, c, 6)) { + uint8_t *lun_list = c->buffer + 8; + int luns_listed; + uint32_t length = 0; + + /* get the list length the target has */ + length = c->buffer[0] << 24; + length |= c->buffer[1] << 16; + length |= c->buffer[2] << 8; + length |= c->buffer[3]; + + if (length < 8) { + /* odd, assume REPORT_LUNS is broken, fall back to doing INQUIRY */ + DEBUG_INIT("iSCSI: session %p REPORT_LUNS length 0, falling back to INQUIRY\n", session); + report_luns_failed = 1; + break; + } + + /* figure out how many luns we were told about this time */ + if ((length / 8U) < luns) + luns_listed = length / 8U; + else + luns_listed = luns; + + /* loop until we run out of data, or out of buffer */ + for (l = 0; l < luns_listed; l++) { + int address_method = (lun_list[0] & 0xc0) >> 6; + int lun; + + + if (LOG_ENABLED(ISCSI_LOG_LOGIN) || LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: session %p (%u %u %u *) REPORT_LUNS[%d] = %02x %02x %02x %02x %02x %02x %02x %02x\n", + session, session->host_no, session->channel, session->target_id, l, + lun_list[0], lun_list[1], lun_list[2], lun_list[3], + lun_list[4], lun_list[5], lun_list[6], lun_list[7]); + + switch (address_method) { + case 0x0: { + /* single-level LUN if bus id is 0, else peripheral device addressing */ + lun = lun_list[1]; + set_bit(lun, session->luns_detected); + /* This is useful while checking for deleted luns */ + set_bit(lun, session->luns_found); + break; + } + case 0x1: { + /* flat-space addressing */ + lun = lun_list[1]; + set_bit(lun, session->luns_detected); + /* This is useful while checking for deleted luns */ + set_bit(lun, session->luns_found); + break; + } + case 0x2: { + /* logical unit addressing method */ + lun = lun_list[1] & 0x1F; + set_bit(lun, session->luns_detected); + /* This is useful while checking for deleted luns */ + set_bit(lun, session->luns_found); + break; + } + case 0x3: { + /* extended logical unit addressing method is too complicated for us to want to deal with */ + printk("iSCSI: session %p (%u %u %u *) REPORT_LUNS[%d] with extended LU address method 0x%x ignored\n", + session, session->host_no, session->channel, session->target_id, l, address_method); + break; + } + default: + printk("iSCSI: session %p (%u %u %u *) REPORT_LUNS[%d] with unknown address method 0x%x ignored\n", + session, session->host_no, session->channel, session->target_id, l, address_method); + break; + } + + /* next LUN in the list */ + lun_list += 8; + } + + /* decide how many luns to ask for on the next iteration, if there is one */ + luns = length / 8U; + if (luns > ISCSI_MAX_LUN) { + /* we only have buffer space for so many LUNs */ + luns = ISCSI_MAX_LUN; + printk("iSCSI: session %p REPORT_LUNS length %u (%u entries) truncated to %u (%u entries)\n", + session, length, (length / 8) - 1, (luns + 1) * 8U, luns); + } + + } + else { + /* REPORT_LUNS failed, fall back to doing INQUIRY */ + DEBUG_INIT("iSCSI: session %p REPORT_LUNS failed, falling back to INQUIRY\n", session); + report_luns_failed = 1; + break; + } + + } while (luns > last_luns); + + if (signal_pending(current)) { + DEBUG_INIT("iSCSI: session %p detect LUNs aborted by signal\n", session); + goto done; + } + + if (report_luns_failed) { + /* if REPORT_LUNS failed, then either it's a SCSI-2 device + * that doesn't understand the command, or it's a SCSI-3 + * device that only has one LUN and decided not to implement + * REPORT_LUNS. In either case, we're safe just probing LUNs + * 0-7 with INQUIRY, since SCSI-2 can't have more than 8 LUNs, + * and SCSI-3 should do REPORT_LUNS if it has more than 1 LUN. + */ + for (l = 0; l < 8; l++) { + sc->lun = l; + sc->request_buffer = c->buffer; + make_inquiry(sc, lun0_scsi_level); + + /* we'll make a note of the LUN when the rx thread receives the response. + * No need to do it again here. + */ + if (iscsi_do_cmnd(session, c, 6)) { + /* we do need to record the SCSI level so we can build inquiries properly though */ + if (l == 0) { + lun0_scsi_level = c->buffer[2] & 0x07; + if (LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: session %p (%u %u %u %u) is SCSI level %d\n", + session, sc->host->host_no, sc->channel, sc->target, sc->lun, lun0_scsi_level); + } + } + else { + /* just assume there's no LUN */ + } + + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + break; + if (signal_pending(current)) + break; + } + } + + done: + DEBUG_ALLOC("iSCSI: session %p hba %p host %p kfree iscsi cmnd %p, bufflen %u\n", + session, session->hba, session->hba->host, c, c->bufflen); + kfree(c); +} + +int iscsi_reset_lun_probing(void) +{ + int ret = 0; + + spin_lock(&iscsi_lun_probe_lock); + if ((iscsi_currently_probing == NULL) && (iscsi_lun_probe_head == NULL)) { + /* if we're not currently probing, reset */ + DEBUG_INIT("iSCSI: reset LUN probing at %lu\n", jiffies); + iscsi_next_probe = 0; + iscsi_lun_probe_start = 0; + smp_mb(); + ret = 1; + } + else { + DEBUG_INIT("iSCSI: failed to reset LUN probing at %lu, currently probing %p, queue head %p\n", + jiffies, iscsi_currently_probing, iscsi_lun_probe_head); + } + spin_unlock(&iscsi_lun_probe_lock); + + return ret; +} diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-probe.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-probe.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-probe.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-probe.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,45 @@ +#ifndef ISCSI_PROBE_H_ +#define ISCSI_PROBE_H_ + +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-probe.h,v 1.2.4.1 2003/08/18 12:31:38 smhatre Exp $ + * + * iscsi-probe.h + * + * include for iSCSI kernel module LUN probing + * + */ + +/* various ioctls need these */ +extern void iscsi_detect_luns(iscsi_session_t *session); +extern void reinitialize_disk(iscsi_session_t *session); +extern void send_tur(iscsi_session_t *session); +extern void iscsi_probe_luns(iscsi_session_t *session, uint32_t *lun_bitmap, scsi_device_info_t *device_info); +extern void iscsi_remove_luns(iscsi_session_t *session); +extern void iscsi_remove_lun_complete(iscsi_session_t *session, int lun); +extern int iscsi_reset_lun_probing(void); + +/* we check the done function on commands to distinguish commands created by the driver itself */ +extern void iscsi_done(Scsi_Cmnd *sc); + +/* timer needs these to know when to start lun probing */ +extern void iscsi_possibly_start_lun_probing(void); +extern volatile unsigned long iscsi_lun_probe_start; + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-protocol.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-protocol.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-protocol.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-protocol.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,610 @@ +#ifndef ISCSI_PROTOCOL_H_ +#define ISCSI_PROTOCOL_H_ +/* + * iSCSI connection daemon + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-protocol.h,v 1.22 2003/01/24 00:14:52 smferris Exp $ + * + * This file sets up definitions of messages and constants used by the + * iSCSI protocol. + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(VXWORKS) +# include +#elif defined(LINUX) +# ifdef __KERNEL__ +# include +# include +# else +# include +# endif + typedef uint8_t UINT8; + typedef uint16_t UINT16; + typedef uint32_t UINT32; +#elif defined(SOLARIS) +# include +# include + typedef uint8_t UINT8; + typedef uint16_t UINT16; + typedef uint32_t UINT32; +#else + /* FIXME: probably ok for ILP32 and LP64, but not ILP64 */ + typedef unsigned char UINT8; + typedef unsigned short UINT16; + typedef unsigned int UINT32; +#endif + + +/* iSCSI listen port for incoming connections */ +#define ISCSI_LISTEN_PORT 3260 + +/* assumes a pointer to a 3-byte array */ +#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2])) + +/* assumes a pointer to a 3 byte array, and an integer value */ +#define hton24(p, v) {\ + p[0] = (((v) >> 16) & 0xFF); \ + p[1] = (((v) >> 8) & 0xFF); \ + p[2] = ((v) & 0xFF); \ +} + + +/* for Login min, max, active version fields */ +#define ISCSI_MIN_VERSION 0x00 +#define ISCSI_DRAFT8_VERSION 0x02 +#define ISCSI_DRAFT20_VERSION 0x00 +#define ISCSI_MAX_VERSION 0x02 + +/* Min. and Max. length of a PDU we can support */ +#define MIN_PDU_LENGTH (8 << 9) /* 4KB */ +#define MAX_PDU_LENGTH (0xffffffff) /* Huge */ + +/* Padding word length */ +#define PAD_WORD_LEN 4 + +/* Max. number of Key=Value pairs in a text message */ +#define MAX_KEY_VALUE_PAIRS 8192 + +/* Reserved value for initiator/target task tag */ +#define RSVD_TASK_TAG 0xffffffff + +/* maximum length for text keys/values */ +#define KEY_MAXLEN 64 +#define VALUE_MAXLEN 255 +#define TARGET_NAME_MAXLEN VALUE_MAXLEN + +#define DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH 8192 + +/* most PDU types have a final bit */ +#define ISCSI_FLAG_FINAL 0x80 + +/* iSCSI Template Message Header */ +struct IscsiHdr { + UINT8 opcode; + UINT8 flags; /* Final bit */ + UINT8 rsvd2[2]; + UINT8 hlength; /* AHSs total length */ + UINT8 dlength[3]; /* Data length */ + UINT8 lun[8]; + UINT32 itt; /* Initiator Task Tag */ + UINT8 other[28]; +}; + +/* Opcode encoding bits */ +#define ISCSI_OP_RETRY 0x80 +#define ISCSI_OP_IMMEDIATE 0x40 +#define ISCSI_OP_RSP 0x00 /* The 2 MSB not set anymore */ +#define ISCSI_OPCODE_MASK 0x3F + +/* Client to Server Message Opcode values */ +#define ISCSI_OP_NOOP_OUT 0x00 +#define ISCSI_OP_SCSI_CMD 0x01 +#define ISCSI_OP_SCSI_TASK_MGT_MSG 0x02 +#define ISCSI_OP_LOGIN_CMD 0x03 +#define ISCSI_OP_TEXT_CMD 0x04 +#define ISCSI_OP_SCSI_DATA 0x05 +#define ISCSI_OP_LOGOUT_CMD 0x06 +#define ISCSI_OP_SNACK_CMD 0x10 + +/* Server to Client Message Opcode values */ +#define ISCSI_OP_NOOP_IN (0x20 | ISCSI_OP_RSP) +#define ISCSI_OP_SCSI_RSP (0x21 | ISCSI_OP_RSP) +#define ISCSI_OP_SCSI_TASK_MGT_RSP (0x22 | ISCSI_OP_RSP) +#define ISCSI_OP_LOGIN_RSP (0x23 | ISCSI_OP_RSP) +#define ISCSI_OP_TEXT_RSP (0x24 | ISCSI_OP_RSP) +#define ISCSI_OP_SCSI_DATA_RSP (0x25 | ISCSI_OP_RSP) +#define ISCSI_OP_LOGOUT_RSP (0x26 | ISCSI_OP_RSP) +#define ISCSI_OP_RTT_RSP (0x31 | ISCSI_OP_RSP) +#define ISCSI_OP_ASYNC_EVENT (0x32 | ISCSI_OP_RSP) +#define ISCSI_OP_REJECT_MSG (0x3f | ISCSI_OP_RSP) + + +/* SCSI Command Header */ +struct IscsiScsiCmdHdr { + UINT8 opcode; + UINT8 flags; + UINT8 rsvd2; + UINT8 cmdrn; + UINT8 hlength; + UINT8 dlength[3]; + UINT8 lun[8]; + UINT32 itt; /* Initiator Task Tag */ + UINT32 data_length; + UINT32 cmdsn; + UINT32 expstatsn; + UINT8 scb[16]; /* SCSI Command Block */ + /* Additional Data (Command Dependent) */ +}; + +/* Command PDU flags */ +#define ISCSI_FLAG_CMD_READ 0x40 +#define ISCSI_FLAG_CMD_WRITE 0x20 +#define ISCSI_FLAG_CMD_ATTR_MASK 0x07 /* 3 bits */ + +/* SCSI Command Attribute values */ +#define ISCSI_ATTR_UNTAGGED 0 +#define ISCSI_ATTR_SIMPLE 1 +#define ISCSI_ATTR_ORDERED 2 +#define ISCSI_ATTR_HEAD_OF_QUEUE 3 +#define ISCSI_ATTR_ACA 4 + + +/* SCSI Response Header */ +struct IscsiScsiRspHdr { + UINT8 opcode; + UINT8 flags; + UINT8 response; + UINT8 cmd_status; + UINT8 hlength; + UINT8 dlength[3]; + UINT8 rsvd[8]; + UINT32 itt; /* Initiator Task Tag */ + UINT32 rsvd1; + UINT32 statsn; + UINT32 expcmdsn; + UINT32 maxcmdsn; + UINT32 expdatasn; + UINT32 bi_residual_count; + UINT32 residual_count; + /* Response or Sense Data (optional) */ +}; + +/* Command Response PDU flags */ +#define ISCSI_FLAG_CMD_BIDI_OVERFLOW 0x10 +#define ISCSI_FLAG_CMD_BIDI_UNDERFLOW 0x08 +#define ISCSI_FLAG_CMD_OVERFLOW 0x04 +#define ISCSI_FLAG_CMD_UNDERFLOW 0x02 + +/* iSCSI Status values. Valid if Rsp Selector bit is not set */ +#define ISCSI_STATUS_CMD_COMPLETED 0 +#define ISCSI_STATUS_TARGET_FAILURE 1 +#define ISCSI_STATUS_SUBSYS_FAILURE 2 + + +/* Asynchronous Event Header */ +struct IscsiAsyncEvtHdr { + UINT8 opcode; + UINT8 flags; + UINT8 rsvd2[2]; + UINT8 rsvd3; + UINT8 dlength[3]; + UINT8 lun[8]; + UINT8 rsvd4[8]; + UINT32 statsn; + UINT32 expcmdsn; + UINT32 maxcmdsn; + UINT8 async_event; + UINT8 async_vcode; + UINT16 param1; + UINT16 param2; + UINT16 param3; + UINT8 rsvd5[4]; +}; + +/* iSCSI Event Indicator values */ +#define ASYNC_EVENT_SCSI_EVENT 0 +#define ASYNC_EVENT_REQUEST_LOGOUT 1 +#define ASYNC_EVENT_DROPPING_CONNECTION 2 +#define ASYNC_EVENT_DROPPING_ALL_CONNECTIONS 3 +#define ASYNC_EVENT_PARAM_NEGOTIATION 4 +#define ASYNC_EVENT_VENDOR_SPECIFIC 255 + + +/* NOP-Out Message */ +struct IscsiNopOutHdr { + UINT8 opcode; + UINT8 flags; + UINT16 rsvd2; + UINT8 rsvd3; + UINT8 dlength[3]; + UINT8 lun[8]; + UINT32 itt; /* Initiator Task Tag */ + UINT32 ttt; /* Target Transfer Tag */ + UINT32 cmdsn; + UINT32 expstatsn; + UINT8 rsvd4[16]; +}; + +/* NOP-In Message */ +struct IscsiNopInHdr { + UINT8 opcode; + UINT8 flags; + UINT16 rsvd2; + UINT8 rsvd3; + UINT8 dlength[3]; + UINT8 lun[8]; + UINT32 itt; /* Initiator Task Tag */ + UINT32 ttt; /* Target Transfer Tag */ + UINT32 statsn; + UINT32 expcmdsn; + UINT32 maxcmdsn; + UINT8 rsvd4[12]; +}; + +/* SCSI Task Management Message Header */ +struct IscsiScsiTaskMgtHdr { + UINT8 opcode; + UINT8 flags; + UINT8 rsvd1[2]; + UINT8 hlength; + UINT8 dlength[3]; + UINT8 lun[8]; + UINT32 itt; /* Initiator Task Tag */ + UINT32 rtt; /* Reference Task Tag */ + UINT32 cmdsn; + UINT32 expstatsn; + UINT32 refcmdsn; + UINT32 expdatasn; + UINT8 rsvd2[8]; +}; + +#define ISCSI_FLAG_TASK_MGMT_FUNCTION_MASK 0x7F + + +/* Function values */ +#define ISCSI_TM_FUNC_ABORT_TASK 1 +#define ISCSI_TM_FUNC_ABORT_TASK_SET 2 +#define ISCSI_TM_FUNC_CLEAR_ACA 3 +#define ISCSI_TM_FUNC_CLEAR_TASK_SET 4 +#define ISCSI_TM_FUNC_LOGICAL_UNIT_RESET 5 +#define ISCSI_TM_FUNC_TARGET_WARM_RESET 6 +#define ISCSI_TM_FUNC_TARGET_COLD_RESET 7 +#define ISCSI_TM_FUNC_TASK_REASSIGN 8 + + +/* SCSI Task Management Response Header */ +struct IscsiScsiTaskMgtRspHdr { + UINT8 opcode; + UINT8 flags; + UINT8 response; /* see Response values below */ + UINT8 qualifier; + UINT8 hlength; + UINT8 dlength[3]; + UINT8 rsvd2[8]; + UINT32 itt; /* Initiator Task Tag */ + UINT32 rtt; /* Reference Task Tag */ + UINT32 statsn; + UINT32 expcmdsn; + UINT32 maxcmdsn; + UINT8 rsvd3[12]; +}; + + +/* Response values */ +#define SCSI_TCP_TM_RESP_COMPLETE 0x00 +#define SCSI_TCP_TM_RESP_NO_TASK 0x01 +#define SCSI_TCP_TM_RESP_NO_LUN 0x02 +#define SCSI_TCP_TM_RESP_TASK_ALLEGIANT 0x03 +#define SCSI_TCP_TM_RESP_NO_FAILOVER 0x04 +#define SCSI_TCP_TM_RESP_IN_PRGRESS 0x05 +#define SCSI_TCP_TM_RESP_REJECTED 0xff + +/* Ready To Transfer Header */ +struct IscsiRttHdr { + UINT8 opcode; + UINT8 flags; + UINT8 rsvd2[2]; + UINT8 rsvd3[12]; + UINT32 itt; /* Initiator Task Tag */ + UINT32 ttt; /* Target Transfer Tag */ + UINT32 statsn; + UINT32 expcmdsn; + UINT32 maxcmdsn; + UINT32 rttsn; + UINT32 data_offset; + UINT32 data_length; +}; + + +/* SCSI Data Hdr */ +struct IscsiDataHdr { + UINT8 opcode; + UINT8 flags; + UINT8 rsvd2[2]; + UINT8 rsvd3; + UINT8 dlength[3]; + UINT8 lun[8]; + UINT32 itt; + UINT32 ttt; + UINT32 rsvd4; + UINT32 expstatsn; + UINT32 rsvd5; + UINT32 datasn; + UINT32 offset; + UINT32 rsvd6; + /* Payload */ +}; + +/* SCSI Data Response Hdr */ +struct IscsiDataRspHdr { + UINT8 opcode; + UINT8 flags; + UINT8 rsvd2; + UINT8 cmd_status; + UINT8 hlength; + UINT8 dlength[3]; + UINT8 lun[8]; + UINT32 itt; + UINT32 ttt; + UINT32 statsn; + UINT32 expcmdsn; + UINT32 maxcmdsn; + UINT32 datasn; + UINT32 offset; + UINT32 residual_count; +}; + +/* Data Response PDU flags */ +#define ISCSI_FLAG_DATA_ACK 0x40 +#define ISCSI_FLAG_DATA_OVERFLOW 0x04 +#define ISCSI_FLAG_DATA_UNDERFLOW 0x02 +#define ISCSI_FLAG_DATA_STATUS 0x01 + + +/* Text Header */ +struct IscsiTextHdr { + UINT8 opcode; + UINT8 flags; + UINT8 rsvd2[2]; + UINT8 hlength; + UINT8 dlength[3]; + UINT8 rsvd4[8]; + UINT32 itt; + UINT32 ttt; + UINT32 cmdsn; + UINT32 expstatsn; + UINT8 rsvd5[16]; + /* Text - key=value pairs */ +}; + +#define ISCSI_FLAG_TEXT_CONTINUE 0x40 + +/* Text Response Header */ +struct IscsiTextRspHdr { + UINT8 opcode; + UINT8 flags; + UINT8 rsvd2[2]; + UINT8 hlength; + UINT8 dlength[3]; + UINT8 rsvd4[8]; + UINT32 itt; + UINT32 ttt; + UINT32 statsn; + UINT32 expcmdsn; + UINT32 maxcmdsn; + UINT8 rsvd5[12]; + /* Text Response - key:value pairs */ +}; + +/* Login Header */ +struct IscsiLoginHdr { + UINT8 opcode; + UINT8 flags; + UINT8 max_version; /* Max. version supported */ + UINT8 min_version; /* Min. version supported */ + UINT8 hlength; + UINT8 dlength[3]; + UINT8 isid[6]; /* Initiator Session ID */ + UINT16 tsid; /* Target Session ID */ + UINT32 itt; /* Initiator Task Tag */ + UINT16 cid; + UINT16 rsvd3; + UINT32 cmdsn; + UINT32 expstatsn; + UINT8 rsvd5[16]; +}; + +/* Login PDU flags */ +#define ISCSI_FLAG_LOGIN_TRANSIT 0x80 +#define ISCSI_FLAG_LOGIN_CONTINUE 0x40 +#define ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK 0x0C /* 2 bits */ +#define ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK 0x03 /* 2 bits */ + +#define ISCSI_LOGIN_CURRENT_STAGE(flags) ((flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) +#define ISCSI_LOGIN_NEXT_STAGE(flags) (flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK) + + +/* Login Response Header */ +struct IscsiLoginRspHdr { + UINT8 opcode; + UINT8 flags; + UINT8 max_version; /* Max. version supported */ + UINT8 active_version; /* Active version */ + UINT8 hlength; + UINT8 dlength[3]; + UINT8 isid[6]; /* Initiator Session ID */ + UINT16 tsid; /* Target Session ID */ + UINT32 itt; /* Initiator Task Tag */ + UINT32 rsvd3; + UINT32 statsn; + UINT32 expcmdsn; + UINT32 maxcmdsn; + UINT8 status_class; /* see Login RSP ststus classes below */ + UINT8 status_detail; /* see Login RSP Status details below */ + UINT8 rsvd4[10]; +}; + +/* Login stage (phase) codes for CSG, NSG */ +#define ISCSI_SECURITY_NEGOTIATION_STAGE 0 +#define ISCSI_OP_PARMS_NEGOTIATION_STAGE 1 +#define ISCSI_FULL_FEATURE_PHASE 3 + +/* Login Status response classes */ +#define STATUS_CLASS_SUCCESS 0x00 +#define STATUS_CLASS_REDIRECT 0x01 +#define STATUS_CLASS_INITIATOR_ERR 0x02 +#define STATUS_CLASS_TARGET_ERR 0x03 + +/* Login Status response detail codes */ +/* Class-0 (Success) */ +#define ISCSI_LOGIN_STATUS_ACCEPT 0x00 + +/* Class-1 (Redirection) */ +#define ISCSI_LOGIN_STATUS_TGT_MOVED_TEMP 0x01 +#define ISCSI_LOGIN_STATUS_TGT_MOVED_PERM 0x02 + +/* Class-2 (Initiator Error) */ +#define ISCSI_LOGIN_STATUS_INIT_ERR 0x00 +#define ISCSI_LOGIN_STATUS_AUTH_FAILED 0x01 +#define ISCSI_LOGIN_STATUS_TGT_FORBIDDEN 0x02 +#define ISCSI_LOGIN_STATUS_TGT_NOT_FOUND 0x03 +#define ISCSI_LOGIN_STATUS_TGT_REMOVED 0x04 +#define ISCSI_LOGIN_STATUS_NO_VERSION 0x05 +#define ISCSI_LOGIN_STATUS_ISID_ERROR 0x06 +#define ISCSI_LOGIN_STATUS_MISSING_FIELDS 0x07 +#define ISCSI_LOGIN_STATUS_CONN_ADD_FAILED 0x08 +#define ISCSI_LOGIN_STATUS_NO_SESSION_TYPE 0x09 +#define ISCSI_LOGIN_STATUS_NO_SESSION 0x0a +#define ISCSI_LOGIN_STATUS_INVALID_REQUEST 0x0b + +/* Class-3 (Target Error) */ +#define ISCSI_LOGIN_STATUS_TARGET_ERROR 0x00 +#define ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE 0x01 +#define ISCSI_LOGIN_STATUS_NO_RESOURCES 0x02 + +/* Logout Header */ +struct IscsiLogoutHdr { + UINT8 opcode; + UINT8 flags; + UINT8 rsvd1[2]; + UINT8 hlength; + UINT8 dlength[3]; + UINT8 rsvd2[8]; + UINT32 itt; /* Initiator Task Tag */ + UINT16 cid; + UINT8 rsvd3[2]; + UINT32 cmdsn; + UINT32 expstatsn; + UINT8 rsvd4[16]; +}; + +/* Logout PDU flags */ +#define ISCSI_FLAG_LOGOUT_REASON_MASK 0x7F + +/* logout reason_code values */ + +#define ISCSI_LOGOUT_REASON_CLOSE_SESSION 0 +#define ISCSI_LOGOUT_REASON_CLOSE_CONNECTION 1 +#define ISCSI_LOGOUT_REASON_RECOVERY 2 +#define ISCSI_LOGOUT_REASON_AEN_REQUEST 3 + +/* Logout Response Header */ +struct IscsiLogoutRspHdr { + UINT8 opcode; + UINT8 flags; + UINT8 response; /* see Logout response values below */ + UINT8 rsvd2; + UINT8 hlength; + UINT8 dlength[3]; + UINT8 rsvd3[8]; + UINT32 itt; /* Initiator Task Tag */ + UINT32 rsvd4; + UINT32 statsn; + UINT32 expcmdsn; + UINT32 maxcmdsn; + UINT32 rsvd5; + UINT16 t2wait; + UINT16 t2retain; + UINT32 rsvd6; +}; + +/* logout response status values */ + +#define ISCSI_LOGOUT_SUCCESS 0 +#define ISCSI_LOGOUT_CID_NOT_FOUND 1 +#define ISCSI_LOGOUT_RECOVERY_UNSUPPORTED 2 +#define ISCSI_LOGOUT_CLEANUP_FAILED 3 + + +/* SNACK Header */ +struct IscsiSNACKHdr { + UINT8 opcode; + UINT8 flags; + UINT8 rsvd2[14]; + UINT32 itt; + UINT32 begrun; + UINT32 runlength; + UINT32 expstatsn; + UINT32 rsvd3; + UINT32 expdatasn; + UINT8 rsvd6[8]; +}; + +/* SNACK PDU flags */ +#define ISCSI_FLAG_SNACK_TYPE_MASK 0x0F /* 4 bits */ + +/* Reject Message Header */ +struct IscsiRejectRspHdr { + UINT8 opcode; + UINT8 flags; + UINT8 reason; + UINT8 rsvd2; + UINT8 rsvd3; + UINT8 dlength[3]; + UINT8 rsvd4[16]; + UINT32 statsn; + UINT32 expcmdsn; + UINT32 maxcmdsn; + UINT32 datasn; + UINT8 rsvd5[8]; + /* Text - Rejected hdr */ +}; + +/* Reason for Reject */ +#define CMD_BEFORE_LOGIN 1 +#define DATA_DIGEST_ERROR 2 +#define DATA_SNACK_REJECT 3 +#define ISCSI_PROTOCOL_ERROR 4 +#define CMD_NOT_SUPPORTED 5 +#define IMM_CMD_REJECT 6 +#define TASK_IN_PROGRESS 7 +#define INVALID_SNACK 8 +#define BOOKMARK_REJECTED 9 +#define BOOKMARK_NO_RESOURCES 10 +#define NEGOTIATION_RESET 11 + + +#ifdef __cplusplus +} +#endif + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-session.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-session.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-session.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-session.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,459 @@ +#ifndef ISCSI_SESSION_H_ +#define ISCSI_SESSION_H_ + +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-session.h,v 1.31.4.2 2003/08/18 12:31:38 smhatre Exp $ + * + * iscsi-session.h + * + * define the iSCSI session structure needed by the login library + * + */ + + +#include "iscsi-common.h" +#include "iscsiAuthClient.h" + +#if defined(LINUX) + +#if defined(__KERNEL__) + +# include +# include +# include +# include +# include +# include +# include +# if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) ) +# include +# else +# include +# endif +# include + +/* these are from $(TOPDIR)/drivers/scsi, not $(TOPDIR)/include */ +# include "scsi.h" +# include "hosts.h" + +# include "iscsi-limits.h" +# include "iscsi-kernel.h" +# include "iscsi-task.h" +# include "iscsi-portal.h" + +#define LUN_BITMAP_SIZE ((ISCSI_MAX_LUN + BITS_PER_LONG - 1) / (BITS_PER_LONG)) +#define PREVENT_HARDWARE_CRC_ERROR 1 /* To disable this, set it to zero. */ +#define PREVENT_DATA_CORRUPTION 1 /* This will prevent the buffer cache + * data and raw I/O data corruption by + * internally copying each segment of + * scatter-gather list and sending one by + * one all the data segments to the TCP + * layer. + * + * If buffer cache data and raw I/O date + * never gets corrupted by the TCP stack + * then, for better preformance, disable + * it by setting it to zero. + */ + +/* used for replying to NOPs */ +typedef struct iscsi_nop_info { + struct iscsi_nop_info *next; + uint32_t ttt; + unsigned int dlength; + unsigned char lun[8]; + unsigned char data[1]; +} iscsi_nop_info_t; + +typedef struct iscsi_session { + atomic_t refcount; + int this_is_root_disk; + volatile unsigned long generation; + struct iscsi_session *next; + struct iscsi_session *prev; + struct iscsi_session *probe_next; + struct iscsi_session *probe_prev; + struct iscsi_hba *hba; + struct socket *socket; + int iscsi_bus; + unsigned int host_no; + unsigned int channel; + unsigned int target_id; + unsigned long luns_found[LUN_BITMAP_SIZE]; + unsigned long luns_detected[LUN_BITMAP_SIZE]; + unsigned long luns_activated[LUN_BITMAP_SIZE]; + unsigned long luns_unreachable[LUN_BITMAP_SIZE]; + unsigned long luns_checked[LUN_BITMAP_SIZE]; + unsigned long luns_delaying_commands[LUN_BITMAP_SIZE]; + unsigned long luns_timing_out[LUN_BITMAP_SIZE]; + unsigned long luns_needing_recovery[LUN_BITMAP_SIZE]; + unsigned long luns_delaying_recovery[LUN_BITMAP_SIZE]; + unsigned long luns_doing_recovery[LUN_BITMAP_SIZE]; + uint32_t num_luns; + int probe_order; + struct semaphore probe_sem; + int ip_length; + unsigned char ip_address[16]; + int port; + int tcp_window_size; + struct semaphore config_mutex; + uint32_t config_number; + char *username; + unsigned char *password; + int password_length; + char *username_in; + unsigned char *password_in; + int password_length_in; + unsigned char *InitiatorName; + unsigned char *InitiatorAlias; + unsigned char TargetName[TARGET_NAME_MAXLEN + 1]; + unsigned char TargetAlias[TARGET_NAME_MAXLEN + 1]; + unsigned char *log_name; + char target_link_dir[LINK_PATH_MAX + 1 + 3 + 10 + 1 + 6 + 10 + 1 + 3 + 10 + 1 + 7 + 1]; + mode_t dir_mode; + int bidirectional_auth; + IscsiAuthClient *auth_client_block; + IscsiAuthStringBlock *auth_recv_string_block; + IscsiAuthStringBlock *auth_send_string_block; + IscsiAuthLargeBinary *auth_recv_binary_block; + IscsiAuthLargeBinary *auth_send_binary_block; + int num_auth_buffers; + IscsiAuthBufferDesc auth_buffers[5]; + spinlock_t portal_lock; + iscsi_portal_info_t *portals; + unsigned int num_portals; + int portal_failover; + unsigned int current_portal; + unsigned int requested_portal; + unsigned int fallback_portal; + unsigned char preferred_portal[16]; + unsigned char preferred_subnet[16]; + unsigned int preferred_subnet_mask; + unsigned int preferred_portal_bitmap; + unsigned int preferred_subnet_bitmap; + unsigned int tried_portal_bitmap; + unsigned int auth_failures; + int ever_established; + int commands_queued; + int (*update_address)(struct iscsi_session *session, char *address); + /* the queue of SCSI commands that we need to send on this session */ + spinlock_t scsi_cmnd_lock; + Scsi_Cmnd *retry_cmnd_head; + Scsi_Cmnd *retry_cmnd_tail; + atomic_t num_retry_cmnds; + Scsi_Cmnd *scsi_cmnd_head; + Scsi_Cmnd *scsi_cmnd_tail; + atomic_t num_cmnds; + Scsi_Cmnd *deferred_cmnd_head; + Scsi_Cmnd *deferred_cmnd_tail; + unsigned int num_deferred_cmnds; + int ignore_lun; + unsigned int ignore_completions; + unsigned int ignore_aborts; + unsigned int ignore_abort_task_sets; + unsigned int ignore_lun_resets; + unsigned int ignore_warm_resets; + unsigned int ignore_cold_resets; + int reject_lun; + unsigned int reject_aborts; + unsigned int reject_abort_task_sets; + unsigned int reject_lun_resets; + unsigned int reject_warm_resets; + unsigned int reject_cold_resets; + unsigned int fake_read_header_mismatch; + unsigned int fake_write_header_mismatch; + unsigned int fake_read_data_mismatch; + unsigned int fake_write_data_mismatch; + unsigned int fake_not_ready; + int fake_status_lun; + unsigned int fake_status_unreachable; + unsigned int fake_status_busy; + unsigned int fake_status_queue_full; + unsigned int fake_status_aborted; + unsigned int print_cmnds; + struct timer_list busy_task_timer; + struct timer_list busy_command_timer; + struct timer_list immediate_reject_timer; + struct timer_list retry_timer; + unsigned int num_luns_delaying_commands; + uint8_t isid[6]; + uint16_t tsid; + unsigned int CmdSn; + volatile uint32_t ExpCmdSn; + volatile uint32_t MaxCmdSn; + volatile uint32_t last_peak_window_size; + volatile uint32_t current_peak_window_size; + unsigned long window_peak_check; + int ImmediateData; + int InitialR2T; + int MaxRecvDataSegmentLength; /* the value we declare */ + int MaxXmitDataSegmentLength; /* the value declared by the target */ + int FirstBurstLength; + int MaxBurstLength; + int DataPDUInOrder; + int DataSequenceInOrder; + int DefaultTime2Wait; + int DefaultTime2Retain; + int HeaderDigest; + int DataDigest; + int type; + int current_stage; + int next_stage; + int partial_response; + int portal_group_tag; + uint32_t itt; + int ping_test_data_length; + int ping_test_rx_length; + unsigned long ping_test_start; + unsigned long ping_test_rx_start; + unsigned char *ping_test_tx_buffer; + volatile unsigned long last_rx; + volatile unsigned long last_ping; + unsigned long last_window_check; + unsigned long last_kill; + unsigned long login_phase_timer; + unsigned long window_full; + unsigned long window_closed; + int vendor_specific_keys; + int send_async_text; + int login_timeout; + int auth_timeout; + int active_timeout; + int idle_timeout; + int ping_timeout; + int abort_timeout; + int reset_timeout; + int replacement_timeout; + int min_disk_command_timeout; + int max_disk_command_timeout; + /* the following fields may have to move if we decide to implement multiple connections, + * per session, and decide to have threads for each connection rather than for each session. + */ + /* the queue of SCSI commands that have been sent on this session, and for which we're waiting for a reply */ + spinlock_t task_lock; + iscsi_task_t *preallocated_task; + iscsi_task_collection_t arrival_order; + iscsi_task_collection_t tx_tasks; + atomic_t num_active_tasks; + unsigned int tasks_allocated; + unsigned int tasks_freed; + iscsi_nop_info_t nop_reply; + iscsi_nop_info_t *nop_reply_head; + iscsi_nop_info_t *nop_reply_tail; + uint32_t mgmt_itt; + wait_queue_head_t tx_wait_q; + wait_queue_head_t tx_blocked_wait_q; + wait_queue_head_t login_wait_q; + volatile unsigned long control_bits; + volatile uint32_t warm_reset_itt; + volatile uint32_t cold_reset_itt; + volatile pid_t rx_pid; + volatile pid_t tx_pid; + volatile pid_t disk_init_pid; + volatile pid_t send_tur_pid; + volatile unsigned long session_drop_time; + volatile unsigned long session_established_time; + /* the following fields are per-connection, not per session, and will need to move if + * we decide to support multiple connections per session. + */ + unsigned long task_mgmt_response_deadline; + unsigned long reset_response_deadline; + unsigned long logout_deadline; + unsigned long logout_response_deadline; + uint32_t logout_itt; + long time2wait; + unsigned int ExpStatSn; + struct iovec rx_iov[(ISCSI_MAX_SG+1+1)]; /* all data + pad + digest */ + struct iovec crc_rx_iov[(ISCSI_MAX_SG+1+1)]; /* all data + pad + digest for CRC calculations */ + unsigned char rx_buffer[ISCSI_RXCTRL_SIZE]; + struct iovec tx_iov[(1+1+ISCSI_MAX_SG+1+1)]; /* header + digest + all data + pad + digest */ +#if PREVENT_DATA_CORRUPTION + unsigned char *xmit_data_buffer; + unsigned int xmit_buffer_size; +#endif +} iscsi_session_t; + +/* session control bits */ +#define TX_WAKE 0 +#define TX_PING 1 /* NopOut, reply requested */ +#define TX_PING_DATA 2 /* NopOut, reply requested, with data */ +#define TX_NOP_REPLY 3 /* reply to a Nop-in from the target */ + +#define TX_SCSI_COMMAND 4 +#define TX_DATA 5 +#define TX_ABORT 6 +#define TX_LUN_RESET 7 + +#define TX_LOGOUT 8 + +#define DISK_INIT 9 +#define SEND_TUR 10 + +#define TX_THREAD_BLOCKED 12 +#define SESSION_PROBING_LUNS 15 + +#define SESSION_REPLACEMENT_TIMEDOUT 16 +#define SESSION_TASK_MGMT_TIMEDOUT 17 +#define SESSION_COMMAND_TIMEDOUT 18 +#define SESSION_TASK_TIMEDOUT 19 + +#define SESSION_ESTABLISHED 20 +#define SESSION_DROPPED 21 +#define SESSION_TASK_ALLOC_FAILED 22 +#define SESSION_RETRY_COMMANDS 23 + +#define SESSION_RESET_REQUESTED 24 +#define SESSION_RESETTING 25 +#define SESSION_RESET 26 +#define SESSION_LOGOUT_REQUESTED 27 + +#define SESSION_LOGGED_OUT 28 +#define SESSION_WINDOW_CLOSED 29 +#define SESSION_TERMINATING 30 +#define SESSION_TERMINATED 31 + + +#else + +/* daemon's session structure */ +typedef struct iscsi_session { + int socket_fd; + int login_timeout; + int auth_timeout; + int active_timeout; + int idle_timeout; + int ping_timeout; + int vendor_specific_keys; + int send_async_text; + uint32_t itt; + uint32_t CmdSn; + uint32_t ExpCmdSn; + uint32_t MaxCmdSn; + uint32_t ExpStatSn; + int ImmediateData; + int InitialR2T; + int MaxRecvDataSegmentLength; /* the value we declare */ + int MaxXmitDataSegmentLength; /* the value declared by the target */ + int FirstBurstLength; + int MaxBurstLength; + int DataPDUInOrder; + int DataSequenceInOrder; + int DefaultTime2Wait; + int DefaultTime2Retain; + int HeaderDigest; + int DataDigest; + int type; + int current_stage; + int next_stage; + int partial_response; + int portal_group_tag; + uint8_t isid[6]; + uint16_t tsid; + int iscsi_bus; + int target_id; + char TargetName[TARGET_NAME_MAXLEN+1]; + char TargetAlias[TARGET_NAME_MAXLEN+1]; + char *InitiatorName; + char *InitiatorAlias; + int ip_length; + uint8_t ip_address[16]; + int port; + int tcp_window_size; + int (*update_address)(struct iscsi_session *session, char *address); + IscsiAuthStringBlock auth_recv_string_block; + IscsiAuthStringBlock auth_send_string_block; + IscsiAuthLargeBinary auth_recv_binary_block; + IscsiAuthLargeBinary auth_send_binary_block; + IscsiAuthClient auth_client_block; + IscsiAuthClient *auth_client; + int num_auth_buffers; + IscsiAuthBufferDesc auth_buffers[5]; + int bidirectional_auth; + char username[iscsiAuthStringMaxLength]; + uint8_t password[iscsiAuthStringMaxLength]; + int password_length; + char username_in[iscsiAuthStringMaxLength]; + uint8_t password_in[iscsiAuthStringMaxLength]; + int password_length_in; +} iscsi_session_t; + +#endif /* __KERNEL__ */ + +#endif /* LINUX */ + +#ifdef SOLARIS + +/* daemon's session structure */ +typedef struct iscsi_session { + int socket_fd; + int login_timeout; + int auth_timeout; + int active_timeout; + int idle_timeout; + int ping_timeout; + int conn_fail_timeout; + uint32_t itt; + uint32_t CmdSn; + uint32_t ExpCmdSn; + uint32_t MaxCmdSn; + uint32_t ExpStatSn; + int ImmediateData; + int InitialR2T; + int MaxRecvDataSegmentLength; /* the value we declare */ + int MaxXmitDataSegmentLength; /* the value declared by the target */ + int FirstBurstLength; + int MaxBurstLength; + int DataPDUInOrder; + int DataSequenceInOrder; + int DefaultTime2Wait; + int DefaultTime2Retain; + int HeaderDigest; + int DataDigest; + int type; + int current_stage; + int next_stage; + int partial_response; + int portal_group_tag; + uint8_t isid[6]; + uint16_t tsid; + int iscsi_bus; + int target_id; + char TargetName[TARGET_NAME_MAXLEN+1]; + char TargetAlias[TARGET_NAME_MAXLEN+1]; + char *InitiatorName; + char *InitiatorAlias; + int ip_length; + uint8_t ip_address[16]; + int port; + int tcp_window_size; + int (*update_address)(struct iscsi_session *session, char *address); + IscsiAuthClient *auth_client; + int bidirectional_auth; + char username[iscsiAuthStringMaxLength]; + uint8_t password[iscsiAuthStringMaxLength]; + int password_length; + char username_in[iscsiAuthStringMaxLength]; + uint8_t password_in[iscsiAuthStringMaxLength]; + int password_length_in; +} iscsi_session_t; + + +#endif + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-task.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-task.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-task.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-task.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,84 @@ +#ifndef ISCSI_TASK_H_ +#define ISCSI_TASK_H_ + +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-task.h,v 1.4 2002/12/30 19:07:55 smferris Exp $ + * + * iscsi-task.h + * + * define the iSCSI task structure needed by the kernel module + * + */ + +#include "iscsi-kernel.h" + +struct iscsi_session; + +/* task flags */ + +#define TASK_CONTROL 1 +#define TASK_WRITE 2 +#define TASK_READ 3 + +/* internal driver state for the task */ +#define TASK_INITIAL_R2T 4 +#define TASK_PREALLOCATED 5 /* preallocated by a session, never freed to the task cache */ +#define TASK_NEEDS_RETRY 6 +#define TASK_COMPLETED 7 + +/* what type of task mgmt function to try next */ +#define TASK_TRY_ABORT 8 +#define TASK_TRY_ABORT_TASK_SET 9 +#define TASK_TRY_LUN_RESET 10 +#define TASK_TRY_WARM_RESET 11 + +#define TASK_TRY_COLD_RESET 12 + + +/* we need to check and sometimes clear all of the TRY_ bits at once */ +#define TASK_RECOVERY_MASK 0x1F00UL +#define TASK_NEEDS_RECOVERY(task) ((task)->flags & TASK_RECOVERY_MASK) + +typedef struct iscsi_task { + struct iscsi_task *order_next; + struct iscsi_task *order_prev; + struct iscsi_task *next; + struct iscsi_task *prev; + Scsi_Cmnd *scsi_cmnd; + struct iscsi_session *session; + atomic_t refcount; + uint32_t rxdata; + unsigned long flags; /* guarded by session->task_lock */ + uint32_t cmdsn; /* need to record so that aborts can set RefCmdSN properly */ + uint32_t itt; + uint32_t ttt; + uint32_t mgmt_itt; /* itt of a task mgmt command for this task */ + unsigned int data_offset; /* explicit R2T */ + int data_length; /* explicit R2T */ + unsigned int lun; + unsigned long timedout; /* separate from flags so that the flags don't need to be atomically updated */ + struct timer_list timer; /* task timer used to trigger error recovery */ +} iscsi_task_t; + +typedef struct iscsi_task_collection { + struct iscsi_task *volatile head; + struct iscsi_task *volatile tail; +} iscsi_task_collection_t; + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-trace.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-trace.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-trace.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-trace.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,102 @@ +#ifndef ISCSI_TRACE_H_ +#define ISCSI_TRACE_H_ + +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi-trace.h,v 1.10 2002/11/08 19:11:11 smferris Exp $ + * + * iscsi-trace.h + * + * include for driver trace info + * + */ + +#define ISCSI_TRACE_COUNT 10000 + +typedef struct iscsi_trace_entry { + unsigned char type; + unsigned char cmd; + unsigned char host; + unsigned char channel; + unsigned char target; + unsigned char lun; + unsigned int itt; + unsigned long data1; + unsigned long data2; + unsigned long jiffies; +} iscsi_trace_entry_t; + +typedef struct iscsi_trace_dump { + uint32_t dump_ioctl_size; + uint32_t dump_version; + uint32_t trace_entry_size; + uint32_t num_entries; + iscsi_trace_entry_t trace[1]; +} iscsi_trace_dump_t; + +#define TRACE_DUMP_VERSION 0x1 + +/* + * Trace flags + */ +#define ISCSI_TRACE_Qd 0x01 +#define ISCSI_TRACE_QFailed 0x11 +#define ISCSI_TRACE_QSessionLockFailed 0x21 +#define ISCSI_TRACE_QCmndLockFailed 0x31 + +#define ISCSI_TRACE_TxCmd 0x02 + +#define ISCSI_TRACE_RxCmd 0x03 +#define ISCSI_TRACE_RxCmdStatus 0x13 +#define ISCSI_TRACE_RxUnderflow 0x23 +#define ISCSI_TRACE_RxOverflow 0x33 +#define ISCSI_TRACE_HostUnderflow 0x43 + +#define ISCSI_TRACE_TxData 0x04 +#define ISCSI_TRACE_TxDataPDU 0x14 + +#define ISCSI_TRACE_RxData 0x05 +#define ISCSI_TRACE_RxDataCmdStatus 0x15 + +#define ISCSI_TRACE_TxAbort 0x06 +#define ISCSI_TRACE_RxAbort 0x07 +#define ISCSI_TRACE_TxAbortTaskSet 0x16 +#define ISCSI_TRACE_RxAbortTaskSet 0x17 +#define ISCSI_TRACE_TxLunReset 0x08 +#define ISCSI_TRACE_RxLunReset 0x09 +#define ISCSI_TRACE_TxWarmReset 0x18 +#define ISCSI_TRACE_RxWarmReset 0x19 +#define ISCSI_TRACE_TxColdReset 0x28 +#define ISCSI_TRACE_RxColdReset 0x29 +#define ISCSI_TRACE_CmdDone 0x0a +#define ISCSI_TRACE_CmndAborted 0x0b +#define ISCSI_TRACE_TaskAborted 0x1b +#define ISCSI_TRACE_R2T 0x0c +#define ISCSI_TRACE_TxPing 0x0d +#define ISCSI_TRACE_RxPingReply 0x0e +#define ISCSI_TRACE_RxNop 0x0f +#define ISCSI_TRACE_TxNopReply 0x1f + +#define ISCSI_TRACE_RxAsyncEvent 0xa1 + +#define ISCSI_TRACE_BadOffset 0xb1 +#define ISCSI_TRACE_BadRxSeg 0xb2 +#define ISCSI_TRACE_OutOfData 0xb3 +#define ISCSI_TRACE_BadTxSeg 0xb4 + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-version.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-version.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi-version.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi-version.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,51 @@ +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * + * $Id: iscsi-version.h,v 1.26.6.3 2003/08/22 10:38:53 naveenb Exp $ + * + * controls the version number printed by the iSCSI driver + * + */ + +#define DRIVER_MAJOR_VERSION 3 +#define DRIVER_MINOR_VERSION 4 +#define DRIVER_PATCH_VERSION 0 +#define DRIVER_INTERNAL_VERSION 3 + +/* DRIVER_EXTRAVERSION is intended to be customized by Linux + * distributors, similar to the kernel Makefile's EXTRAVERSION. This + * string will be appended to all version numbers displayed by the + * driver. RPMs that patch the driver are encouraged to also patch + * this string to indicate to users that the driver has been patched, + * and may behave differently than a driver tarball from SourceForge. + */ + +#define DRIVER_EXTRAVERSION "" + +#define ISCSI_DATE "22-Aug-2003" + +/* Distributors may also set BUILD_STR to a string, which will be + * logged by the kernel module after it loads and displays the version + * number. It is currently used as part of the driver development + * process, to mark tarballs built by developers containing code + * not yet checked into CVS. Publically available tarballs on + * SourceForge should always have BUILD_STR set to NULL, since + * all code should be checked in prior to making a public release. + */ + +#define BUILD_STR NULL diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi.c linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi.c 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,14476 @@ +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi.c,v 1.132.2.8 2003/08/22 08:57:08 naveenb Exp $ + * + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +/* + * These header files are required for Shutdown Notification routines + */ +#include +#include +#include + +/* these are from $(TOPDIR)/drivers/scsi, not $(TOPDIR)/include */ +#include "scsi.h" +#include "hosts.h" + +/* if non-zero, do a TCP Abort when a session drops, instead of (attempting) a graceful TCP Close */ +#define TCP_ABORT_ON_DROP 0 + +#define RETRIES_BLOCK_DEVICES 1 + +/* some targets, such as the Intel Labs target on SourceForge, make + * invalid assumptions about the relateive ordering of command and + * data PDUs, but still advertise a CmdSN window greater than one + * command. When this is non-zero, we try to break ourselves in such + * a way that the target's bogus assumptions are met. No promises + * though, since we may send Nops or task mgmt PDUs at any time, + * which the broken target may still choke on. + */ +#define INVALID_ORDERING_ASSUMPTIONS 0 + +/* periodically stall reading data to test data arriving after aborts have started */ +#define TEST_DELAYED_DATA 0 + +/* fake sense indicating ILLEGAL_REQUEST for all REPORT_LUNS commands */ +#define FAKE_NO_REPORT_LUNS 0 + +/* fake check conditions on the first 2 attempts for each probe command */ +#define FAKE_PROBE_CHECK_CONDITIONS 0 + +/* fake underflows on the first 4 attempts for each probe command */ +#define FAKE_PROBE_UNDERFLOW 0 + +#include "iscsi-common.h" +#include "iscsi-protocol.h" +#include "iscsi-ioctl.h" +#include "iscsi-io.h" +#include "iscsi-login.h" +#include "iscsi-trace.h" +#include "iscsi.h" +#include "iscsi-session.h" +#include "iscsi-version.h" +#include "iscsi-probe.h" +#include "iscsi-crc.h" + +/* + * IMPORTANT NOTE: to prevent deadlock, when holding multiple locks, + * the following locking order must be followed at all times: + * + * hba_list_lock - access to collection of HBA instances + * session->portal_lock - access to a session's portal info + * session->task_lock - access to a session's collections of tasks + * hba->session_lock - access to an HBA's collection of sessions + * session->scsi_cmnd_lock - access to a session's list of Scsi_Cmnds (IRQSAVE) + * io_request_lock/host_lock - mid-layer acquires before calling queuecommand, eh_*, + * we must acquire before done() callback (IRQSAVE) + * iscsi_trace_lock - for the tracing code (IRQSAVE) + * + * Note: callers not in interrupt context must locally disable/restore interrupts + * when holding locks marked (IRQSAVE) + */ + +#ifdef MODULE +MODULE_AUTHOR("Cisco Systems, Inc."); +MODULE_DESCRIPTION("iSCSI initiator"); +# if defined(MODULE_LICENSE) +MODULE_LICENSE("GPL"); +# endif +#endif + +static int iscsi_system_is_rebooting; +static int this_is_iscsi_boot; +static sapiNBP_t iscsi_inbp_info; +static char inbp_interface_name[IFNAMSIZ]; + + +/* Force tagged command queueing for all devices, regardless of whether they say they support it */ +static int force_tcq = 0; +MODULE_PARM(force_tcq, "i"); +MODULE_PARM_DESC(force_tcq, "when non-zero, force tagged command queueing for all devices"); + +/* Queue depth for devices that don't support tagged command queueing. + * The driver used to use ISCSI_CMDS_PER_LUN, which was probably a bug. + * Default to 1 now, but let people who want to the old behavior set it higher. + */ +static int untagged_queue_depth = 1; +MODULE_PARM(untagged_queue_depth, "i"); +MODULE_PARM_DESC(untagged_queue_depth, "queue depth to use for devices that don't support tagged command queueing"); + +static int translate_deferred_sense = 1; +MODULE_PARM(translate_deferred_sense, "i"); +MODULE_PARM_DESC(translate_deferred_sense, "translate deferred sense data to current sense data in disk command responses"); + +static int iscsi_reap_tasks = 0; +MODULE_PARM(iscsi_reap_tasks, "i"); +MODULE_PARM_DESC(iscsi_reap_task, "when non-zero, the OS is allowed to reap pages from the iSCSI task cache"); + +#ifndef UINT32_MAX +# define UINT32_MAX 0xFFFFFFFFU +#endif + +#if PREVENT_HARDWARE_CRC_ERROR +#define HARDWARE_CRC_ERROR_RETRIES 3 +#endif + +/* We need it here for probing luns on lun change async event */ +#define MAX_SCSI_DISKS 128 +#define MAX_SCSI_DISK_PARTITIONS 15 +#define MAX_SCSI_TAPES 32 +#define MAX_SCSI_GENERICS 256 +#define MAX_SCSI_CDROMS 256 + +#define LINK_DIR "/dev/iscsi" + +static int ctl_open(struct inode *inode, struct file *file); +static int ctl_close(struct inode *inode, struct file *file); +static int ctl_ioctl(struct inode *inode, + struct file *file, + unsigned int cmd, + unsigned long arg ); +static int iscsi_inet_aton(char *asciiz, + unsigned char *ip_address, + int *ip_length); + +static int control_major; +static const char *control_name = "iscsictl"; + +static struct file_operations control_fops = { + owner: THIS_MODULE, + ioctl: ctl_ioctl, /* ioctl */ + open: ctl_open, /* open */ + release: ctl_close, /* release */ +}; + +spinlock_t iscsi_hba_list_lock = SPIN_LOCK_UNLOCKED; +static iscsi_hba_t *iscsi_hba_list = NULL; + +static volatile unsigned long init_module_complete = 0; +static volatile unsigned long iscsi_timer_running = 0; +static volatile pid_t iscsi_timer_pid = 0; +static volatile pid_t iscsi_lun_pid = 0; + +volatile unsigned int iscsi_log_settings = LOG_SET(ISCSI_LOG_ERR) | LOG_SET(ISCSI_LOG_RETRY) | LOG_SET(ISCSI_LOG_TIMEOUT); + + +#define is_digit(c) (((c) >= '0') && ((c) <= '9')) +#define is_hex_lower(c) (((c) >= 'a') && ((c) <= 'f')) +#define is_hex_upper(c) (((c) >= 'A') && ((c) <= 'F')) +#define is_space(c) ((c) == ' ' || (c) == '\t' || (c) == '\n' || (c) == '\0') + +#if DEBUG_TRACE +spinlock_t iscsi_trace_lock = SPIN_LOCK_UNLOCKED; +static iscsi_trace_entry_t trace_table[ISCSI_TRACE_COUNT]; +static int trace_index=0; + +# define ISCSI_TRACE(P_TYPE, P_CMND, P_TASK, P_DATA1, P_DATA2) \ + iscsi_fill_trace((P_TYPE), (P_CMND), (P_TASK), (P_DATA1), (P_DATA2)) +#else +# define ISCSI_TRACE(P_TYPE, P_CMND, P_TASK, P_DATA1, P_DATA2) +#endif + +#define MAX_PORTALS 32 /* + * 32 is the sizeof(unsigned int). + * If max portals to a target exceeds 32 then + * we need to change the preferred_portal_bitmap, + * preferred_subnet_bitmap from unsigned int to + * an array of unsigned int's. + */ + +/* Returns 0 on success, non zero on error */ +static int iscsi_add_route(void) +{ + struct rtentry rt; + struct sockaddr_in *dst = (struct sockaddr_in *)&rt.rt_dst; + struct sockaddr_in *gw = (struct sockaddr_in *)&rt.rt_gateway; + char dev[IFNAMSIZ]; + char ip[16]; + int ret; + + memset((char *) &rt, 0, sizeof(struct rtentry)); + memset(ip, 0, 16); + memset(dev, 0, IFNAMSIZ); + + dst->sin_family = AF_INET; + dst->sin_addr.s_addr = INADDR_ANY; + + strcpy(ip, (char *)(&iscsi_inbp_info.ripaddr)); + strcpy(dev, inbp_interface_name); + + gw->sin_family = AF_INET; + memcpy((char *)(&(gw->sin_addr.s_addr)), (char*)ip, 4); + + rt.rt_flags = (RTF_UP | RTF_GATEWAY); + rt.rt_dev = dev; + + + DEBUG_INIT("iSCSI: Setting gateway ip as: 0x%2x %2x %2x %2x\n", ip[0], ip[1], ip[2], ip[3]); + + ret = ip_rt_ioctl(SIOCADDRT, &rt); + + if(ret != 0) { + printk("iSCSI: ERROR: ip_rt_ioctl returned with value: %d\n", ret); + } + + return ret; +} + + +/* Returns 1 on success 0 on failure */ +/* Needs to be called between set_fs() ... */ +static int iscsi_ifdown (void) +{ + struct ifreq ifr; + int dev_ret = 0; + + memset(&ifr, 0, sizeof(struct ifreq)); + + printk("\niSCSI: iscsi_ifdown: Bringing down network interface %s\n", inbp_interface_name); + + strcpy(ifr.ifr_name, inbp_interface_name); + + /* + * Check if the interface is already up or not, set_fs should have already + * been done before calling this function + */ + if (dev_ioctl(SIOCGIFFLAGS ,&ifr) == 0 ) { + if ((ifr.ifr_flags & IFF_UP) != 0) { + DEBUG_INIT("\niSCSI: Interface %s has IFF_UP flag set, will bring it down ...\n", ifr.ifr_name); + /* fall through and bring down the interface */ + } else { + DEBUG_INIT("\niSCSI: Interface %s does not have IFF_UP flag set\n", ifr.ifr_name); + return 1; + } + } else { + printk("\niSCSI: ERROR in getting interface flags for interface %s\n", ifr.ifr_name); + return 0; + } + + ifr.ifr_flags &= ~(IFF_UP); + + if((dev_ret = devinet_ioctl(SIOCSIFFLAGS, (void *)&ifr)) != 0) { + printk("\niSCSI: ERROR in bringing down interface %s, return value %d\n", ifr.ifr_name, dev_ret); + return 0; + } + + return 1; +} + +/* Returns 1 on success 0 on failure */ +/* Needs to be called between set_fs() ... */ +static int iscsi_set_if_addr (void) +{ + struct ifreq ifr; + struct sockaddr sa; + struct sockaddr_in *sin = (struct sockaddr_in *)&sa; + int dev_ret = 0; + + memset(&ifr, 0, sizeof(struct ifreq)); + memset(&sa, 0, sizeof(struct sockaddr)); + + printk("\niSCSI: iscsi_set_if_addr: Bringing up network interface\n"); + + if(iscsi_inbp_info.myipaddr != 0) { + DEBUG_INIT("\nSetting ip from inbp 0x%x\n", iscsi_inbp_info.myipaddr); + memcpy((char *)(&sin->sin_addr), (char *)(&iscsi_inbp_info.myipaddr), 4); + } else { + DEBUG_INIT("\nERROR !!! Not setting ip from inbp !!!\n"); + return 0; + } + if(inbp_interface_name != NULL) { + DEBUG_INIT("\nSetting interface from inbp %s\n", inbp_interface_name); + strcpy(ifr.ifr_name, inbp_interface_name); + } else { + DEBUG_INIT("\nERROR !!! Not setting interface from inbp !!!\n"); + return 0; + } + + /* Check if the interface is already up or not */ + if (dev_ioctl(SIOCGIFFLAGS ,&ifr) == 0 ) { + if ((ifr.ifr_flags & IFF_UP) != 0) { + DEBUG_INIT("\nInterface %s has IFF_UP flag already set\n", ifr.ifr_name); + return 1; + } else { + DEBUG_INIT("\nInterface %s does not have IFF_UP flag set\n", ifr.ifr_name); + /* fall through and bring up the interface */ + } + } else { + printk("\niSCSI: ERROR in getting interface FLAGS for interface %s\n", ifr.ifr_name); + return 0; + } + + memset(&ifr, 0, sizeof(struct ifreq)); + /* If we came this far then inbp_interface_name should be valid */ + strcpy(ifr.ifr_name, inbp_interface_name); + + sin->sin_family = AF_INET; + sin->sin_port = 0; + + memcpy((char *) &ifr.ifr_addr, (char *) &sa, sizeof(struct sockaddr)); + + /* Bring up networking, set_fs has already been done */ + if ((dev_ret = devinet_ioctl(SIOCSIFADDR, (void *)&ifr)) != 0) { + printk("\niSCSI: ERROR in setting ip address for interface %s\n", ifr.ifr_name); + return 0; + } + + DEBUG_INIT("\niSCSI: addr_dev_ret = 0x%x\n", dev_ret); + + memset(&ifr, 0, sizeof(struct ifreq)); + + if(inbp_interface_name != NULL) { + DEBUG_INIT("\nSetting interface from inbp %s\n", inbp_interface_name); + strcpy(ifr.ifr_name, inbp_interface_name); + } else { + DEBUG_INIT("\nERROR !!! Not setting interface from inbp !!!\n"); + return 0; + } + + ifr.ifr_flags |= IFF_UP | IFF_BROADCAST | IFF_RUNNING | IFF_MULTICAST; + + + if((dev_ret = devinet_ioctl(SIOCSIFFLAGS, (void *)&ifr)) != 0) { + printk("\niSCSI: ERROR in setting flags for interface %s\n", ifr.ifr_name); + return 0; + } + + DEBUG_INIT("\niSCSI: flag_dev_ret = 0x%x\n", dev_ret); + + memset(&ifr, 0, sizeof(struct ifreq)); + + /* If we get this far we assume name and mask in inbp structure are valid */ + + strcpy(ifr.ifr_name, inbp_interface_name); + memcpy((char *)(&sin->sin_addr), (char *)(&iscsi_inbp_info.myipmask), 4); + memcpy((char *) &ifr.ifr_netmask, (char *) &sa, sizeof(struct sockaddr)); + + if((dev_ret = devinet_ioctl(SIOCSIFNETMASK, (void *)&ifr)) != 0) { + printk("\niSCSI: ERROR in setting network mask for interface %s\n", ifr.ifr_name); + return 0; + } + + DEBUG_INIT("\niSCSI: mask_dev_ret = 0x%x\n", dev_ret); + + while(iscsi_add_route()) { + printk("\niSCSI: set_inbp_info: iscsi_add_route failed\n"); + schedule_timeout(10 * HZ); + } + + return 1; +} + +/* become a daemon kernel thread. Some kernels provide this functionality + * already, and some even do it correctly + */ +void iscsi_daemonize(void) +{ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,44) ) + /* use the kernel's daemonize */ + daemonize(); + + /* Reparent to init now done by daemonize */ + + /* FIXME: do we really need to bump up the thread priority? */ +# if defined(HAS_SET_USER_NICE) || defined(set_user_nice) + { + struct task_struct *this_task = current; + set_user_nice(this_task, -20); + } +# endif + +#else + struct task_struct *this_task = current; + + /* use the kernel's daemonize */ + daemonize(); + +# if defined(HAS_REPARENT_TO_INIT) || defined(reparent_to_init) + /* Reparent to init */ + reparent_to_init(); +# endif + + /* increase priority like the md driver does for it's kernel threads */ + this_task->policy = SCHED_OTHER; + set_user_nice(this_task, -20); + smp_mb(); +#endif +} + +#ifdef HAS_NEW_DEVICE_LISTS + +static void target_reset_occured(iscsi_session_t *session) +{ + Scsi_Device *device = NULL; + + list_for_each_entry(device, &session->hba->host->my_devices, siblings) { + if ((device->channel == session->channel) && + (device->id == session->target_id)) + { + device->was_reset = 1; + device->expecting_cc_ua = 1; + } + } +} + +static void lun_reset_occured(iscsi_session_t *session, unsigned int lun) +{ + Scsi_Device *device = NULL; + + list_for_each_entry(device, &session->hba->host->my_devices, siblings) { + if ((device->channel == session->channel) && + (device->id == session->target_id) && + (device->lun == lun)) + { + device->was_reset = 1; + device->expecting_cc_ua = 1; + } + } +} + +#else + +static void target_reset_occured(iscsi_session_t *session) +{ + Scsi_Device *device = NULL; + + /* FIXME: locking? */ + for (device = session->hba->host->host_queue; device; device = device->next) { + if ((device->channel == session->channel) && + (device->id == session->target_id)) + { + device->was_reset = 1; + device->expecting_cc_ua = 1; + } + } +} + +static void lun_reset_occured(iscsi_session_t *session, unsigned int lun) +{ + Scsi_Device *device = NULL; + + for (device = session->hba->host->host_queue; device; device = device->next) { + if ((device->channel == session->channel) && + (device->id == session->target_id) && + (device->lun == lun)) + { + device->was_reset = 1; + device->expecting_cc_ua = 1; + } + } +} + +#endif + +/* determine whether a command is eligible to be retried internally. */ +static inline int internally_retryable(Scsi_Cmnd *sc) +{ + if (sc->device && (sc->device->type == TYPE_DISK)) { + switch (sc->cmnd[0]) { + case INQUIRY: + case REPORT_LUNS: + case TEST_UNIT_READY: + case READ_CAPACITY: + case START_STOP: + case MODE_SENSE: + return 0; + default: + return 1; + } + } + + return 0; +} + +/* newer kernels require long alignment for bitops. + * we assume pointers have at least as much alignment as longs, + * and use an unused pointer field to store bit flags. + */ +static inline unsigned long *device_flags(Scsi_Device *sd) +{ + unsigned long *flags = (unsigned long *)&(sd->hostdata); + + return flags; +} + +/* device flags */ +#define DEVICE_LOG_TERMINATING 0 +#define DEVICE_LOG_REPLACEMENT_TIMEDOUT 1 +#define DEVICE_LOG_NO_SESSION 2 + +/* newer kernels require long alignment for bitops. + * we assume pointers have at least as much alignment as longs, + * and use an unused pointer field to store bit flags. + */ +static inline unsigned long *command_flags(Scsi_Cmnd *sc) +{ + unsigned long *flags = (unsigned long *)&(sc->SCp.buffer); + + return flags; +} + +static void useless_timeout_function(unsigned long arg) +{ + +} + +static inline void add_completion_timer(Scsi_Cmnd *sc) +{ + if (sc->scsi_done != iscsi_done) { + sc->eh_timeout.data = (unsigned long)sc; + sc->eh_timeout.expires = jiffies + sc->timeout_per_command; + sc->eh_timeout.function = useless_timeout_function; + add_timer(&sc->eh_timeout); + } +} + + +static void add_command_timer(iscsi_session_t *session, Scsi_Cmnd *sc, void (*fn)(unsigned long)) +{ + unsigned long now = jiffies; + unsigned long expires; + + if (sc->eh_timeout.function != NULL) { + DEBUG_QUEUE("iSCSI: add_command_timer %p %p deleting existing timer at %lu\n", + sc, fn, jiffies); + + del_timer_sync(&sc->eh_timeout); + } + + /* default is based on the number of retries remaining and the timeout for each */ + if ((sc->allowed > 1) && (sc->retries < sc->allowed)) + sc->eh_timeout.expires = now + ((sc->allowed - sc->retries) * sc->timeout_per_command); + else + sc->eh_timeout.expires = now + sc->timeout_per_command; + + if (sc->eh_timeout.expires == 0) + sc->eh_timeout.expires = 1; + + /* but each session may override that timeout value for certain disk commands */ + if (sc->device && (sc->device->type == TYPE_DISK)) { + if (internally_retryable(sc)) { + /* we only increase timeouts on commands that are internally retryable, + * since some commands must be completed in a reasonable amount of + * time in order for the upper layers to behave properly. + */ + if ((session->min_disk_command_timeout < 0) && (session->max_disk_command_timeout <= 0)) { + /* no command timeout (infinite retries) when the min is infinite and there is no max */ + sc->eh_timeout.expires = 0; + } + else if (session->min_disk_command_timeout > 0) { + /* clamp the SCSI layer's timeout to be at least the minimum */ + expires = now + (session->min_disk_command_timeout * HZ); + if (expires > sc->eh_timeout.expires) { + /* use a longer timeout */ + if (expires) + sc->eh_timeout.expires = expires; + else + sc->eh_timeout.expires = 1; + } + } + } + + /* we always allow the timeout to be shortened, to make multipath + * drivers happy. + */ + if (session->max_disk_command_timeout > 0) { + /* clamp the SCSI layer's timeout to be at most the maximum */ + expires = now + (session->max_disk_command_timeout * HZ); + if (expires < sc->eh_timeout.expires) { + /* use a shorter timeout */ + if (expires) + sc->eh_timeout.expires = expires; + else + sc->eh_timeout.expires = 1; + } + } + } + + if (sc->eh_timeout.expires) { + DEBUG_QUEUE("iSCSI: add_command_timer %p %p adding timer at %lu, expires %lu, timeout %u, retries %u, allowed %u\n", + sc, fn, jiffies, sc->eh_timeout.expires, sc->timeout_per_command, sc->retries, sc->allowed); + add_timer(&sc->eh_timeout); + sc->eh_timeout.data = (unsigned long)sc; + sc->eh_timeout.function = fn; + } +} + +static void add_task_timer(iscsi_task_t *task, void (*fn)(unsigned long)) +{ + if (task->timer.function != NULL) { + DEBUG_QUEUE("iSCSI: add_task_timer %p %p deleting existing timer at %lu\n", + task, fn, jiffies); + + del_timer_sync(&task->timer); + } + task->timer.data = (unsigned long)task; + task->timer.expires = jiffies + task->scsi_cmnd->timeout_per_command; + task->timer.function = fn; + + DEBUG_QUEUE("iSCSI: add_task_timer %p %p adding timer at %lu, expires %lu, timeout %u\n", + task, fn, jiffies, task->timer.expires, task->scsi_cmnd->timeout_per_command); + add_timer(&task->timer); +} + +static int del_command_timer(Scsi_Cmnd *sc) +{ + int ret; + + DEBUG_QUEUE("iSCSI: del_command_timer %p deleting timer at %lu\n", sc, jiffies); + + ret = del_timer_sync(&sc->eh_timeout); + + sc->eh_timeout.expires = 0; + sc->eh_timeout.data = (unsigned long) NULL; + sc->eh_timeout.function = NULL; + return ret; +} + +static int del_task_timer(iscsi_task_t *task) +{ + int ret; + + DEBUG_QUEUE("iSCSI: del_task_timer %p deleting timer at %lu\n", task, jiffies); + + ret = del_timer_sync(&task->timer); + + task->timer.expires = 0; + task->timer.data = (unsigned long) NULL; + task->timer.function = NULL; + return ret; +} + + + +static void iscsi_command_times_out(unsigned long arg) +{ + Scsi_Cmnd *sc = (Scsi_Cmnd *)arg; + iscsi_session_t *session = (iscsi_session_t *)sc->SCp.ptr; + + /* we can safely use the session pointer, since during a session termination + * the rx thread will make sure all commands have been completed before it + * drops the session refcount. + */ + + if (session == NULL) + return; + + DEBUG_EH("iSCSI: session %p timer for command %p expired at %lu, retries %d, allowed %d\n", + session, sc, jiffies, sc->retries, sc->allowed); + + /* tell the tx thread that a command has timed out */ + set_bit(COMMAND_TIMEDOUT, command_flags(sc)); + smp_wmb(); + + set_bit(SESSION_COMMAND_TIMEDOUT, &session->control_bits); + smp_wmb(); + + /* wake up the tx thread to deal with the timeout */ + set_bit(TX_WAKE, &session->control_bits); + smp_mb(); + /* we can't know which wait_q the tx thread is in (if any), so wake them both */ + wake_up(&session->tx_wait_q); + wake_up(&session->login_wait_q); +} + +static void iscsi_task_times_out(unsigned long arg) +{ + iscsi_task_t *task = (iscsi_task_t *)arg; + iscsi_session_t *session = task->session; + + /* we can safely use the session pointer, since during a session termination + * the rx thread will make sure all tasks have been completed before it + * drops the session refcount. + */ + if (session == NULL) + return; + + DEBUG_TIMEOUT("iSCSI: session %p timer for task %p expired at %lu\n", + session, task, jiffies); + + /* stop new tasks from being sent to this LUN (force error recovery) */ + set_bit(task->lun, session->luns_timing_out); + smp_wmb(); + + /* tell the tx thread that a task has timed out */ + set_bit(0, &task->timedout); + smp_wmb(); + + set_bit(SESSION_TASK_TIMEDOUT, &session->control_bits); + smp_wmb(); + + /* wake up the tx thread to deal with the timeout and possible error recovery */ + set_bit(TX_WAKE, &session->control_bits); + smp_mb(); + + /* we can't know which wait_q the tx thread is in (if any), so wake them both */ + wake_up(&session->tx_wait_q); + wake_up(&session->login_wait_q); +} + + +/* wake up the tx_thread without ever losing the wakeup event */ +static void wake_tx_thread(int control_bit, iscsi_session_t *session) +{ + /* tell the tx thread what to do when it wakes up. */ + set_bit(control_bit, &session->control_bits); + smp_wmb(); + + /* We make a condition variable out of a wait queue and atomic test&clear. + * May get spurious wake-ups, but no wakeups will be lost. + * this is cv_signal(). wait_event_interruptible is cv_wait(). + */ + set_bit(TX_WAKE, &session->control_bits); + smp_mb(); + + wake_up(&session->tx_wait_q); +} + +/* drop an iscsi session */ +static void iscsi_drop_session(iscsi_session_t *session) +{ + pid_t pid; + + DEBUG_INIT("iSCSI: iscsi_drop_session %p, rx %d, tx %d at %lu\n", + session, session->rx_pid, session->tx_pid, jiffies); + + set_bit(SESSION_DROPPED, &session->control_bits); /* so we know whether to abort the connection */ + session->session_drop_time = jiffies ? jiffies : 1; /* for replacement timeouts */ + smp_wmb(); + clear_bit(SESSION_ESTABLISHED, &session->control_bits); + smp_mb(); + + if ((pid = session->tx_pid)) + kill_proc(pid, SIGHUP, 1); + if ((pid = session->rx_pid)) + kill_proc(pid, SIGHUP, 1); + if ((pid = session->disk_init_pid)) + kill_proc(pid, SIGHUP, 1); + if ((pid = session->send_tur_pid)) + kill_proc(pid, SIGHUP, 1); +} + +/* caller must hold session->task_lock */ +static void iscsi_request_logout(iscsi_session_t *session, int logout, int logout_response) +{ + if (atomic_read(&session->num_active_tasks) == 0) { + DEBUG_INIT("iSCSI: session %p currently has no active tasks, queueing logout at %lu\n", + session, jiffies); + session->logout_response_deadline = jiffies + (logout_response * HZ); + if (session->logout_response_deadline == 0) + session->logout_response_deadline = 1; + smp_mb(); + set_bit(SESSION_LOGOUT_REQUESTED, &session->control_bits); + smp_mb(); + wake_tx_thread(TX_LOGOUT, session); + } + else { + session->logout_deadline = jiffies + (logout * HZ); + if (session->logout_deadline == 0) + session->logout_deadline = 1; + session->logout_response_deadline = session->logout_deadline + (logout_response * HZ); + if (session->logout_response_deadline == 0) + session->logout_response_deadline = 1; + smp_mb(); + set_bit(SESSION_LOGOUT_REQUESTED, &session->control_bits); + smp_mb(); + } +} + +/* Note: may acquire the task_lock */ +static void iscsi_terminate_session(iscsi_session_t *session) +{ + pid_t pid; + + if ((test_and_set_bit(SESSION_TERMINATING, &session->control_bits) == 0) && + test_bit(SESSION_ESTABLISHED, &session->control_bits)) + { + DEBUG_INIT("iSCSI: iscsi_terminate_session %p, requesting logout at %lu\n", + session, jiffies); + + /* on the first terminate request while the session is up, request a logout in the next 3 seconds */ + spin_lock(&session->task_lock); + iscsi_request_logout(session, 3, session->active_timeout); + spin_unlock(&session->task_lock); + } + else { + /* either we've already tried to terminate once, or the the session is down. just kill everything. */ + clear_bit(SESSION_ESTABLISHED, &session->control_bits); + session->session_drop_time = jiffies ? jiffies : 1; + smp_mb(); + + DEBUG_INIT("iSCSI: iscsi_terminate_session %p, killing rx %d, tx %d at %lu\n", + session, session->rx_pid, session->tx_pid, jiffies); + + /* kill the session's threads */ + if ((pid = session->tx_pid)) + kill_proc(pid, SIGKILL, 1); + if ((pid = session->rx_pid)) + kill_proc(pid, SIGKILL, 1); + if ((pid = session->disk_init_pid)) + kill_proc(pid, SIGKILL, 1); + if ((pid = session->send_tur_pid)) + kill_proc(pid, SIGKILL, 1); + } +} + +/* if a signal is pending, deal with it, and return 1. + * Otherwise, return 0. + */ +static int iscsi_handle_signals(iscsi_session_t *session) +{ + pid_t pid; + int ret = 0; + + /* if we got SIGHUP, try to establish a replacement session. + * if we got SIGKILL, terminate this session. + */ + if (signal_pending(current)) { + LOCK_SIGNALS(); + + /* iscsi_drop_session and iscsi_terminate_session signal both + * threads, but someone logged in as root may not. So, we + * make sure whichever process gets signalled first propagates + * the signal when it looks like only one thread got + * signalled. + */ + + /* on SIGKILL, terminate the session */ + if (SIGNAL_IS_PENDING(SIGKILL)) { + + /* + * FIXME: We don't terminate the sessions if "/" is iSCSI disk + * Need to fix this for other iSCSI targets. + */ + + if (!session->this_is_root_disk || iscsi_system_is_rebooting) { + if (!test_and_set_bit(SESSION_TERMINATING, &session->control_bits)) { + if ((pid = session->tx_pid) && (pid != current->pid)) { + printk("iSCSI: rx thread %d received SIGKILL, killing tx thread %d\n", current->pid, pid); + kill_proc(pid, SIGKILL, 1); + } + if ((pid = session->rx_pid) && (pid != current->pid)) { + printk("iSCSI: tx thread %d received SIGKILL, killing rx thread %d\n", current->pid, pid); + kill_proc(pid, SIGKILL, 1); + } + if ((pid = session->send_tur_pid) && (pid != current->pid)) { + printk("iSCSI: rx thread %d received SIGKILL, killing test unit ready thread %d\n", current->pid, pid); + kill_proc(pid, SIGKILL, 1); + } + if ((pid = session->disk_init_pid) && (pid != current->pid)) { + printk("iSCSI: rx thread %d received SIGKILL, killing disk init thread %d\n", current->pid, pid); + kill_proc(pid, SIGKILL, 1); + } + } + ret = 1; + } + } + /* on SIGHUP, drop the session, and try to establish a replacement session */ + if (SIGNAL_IS_PENDING(SIGHUP)) { + if (test_and_clear_bit(SESSION_ESTABLISHED, &session->control_bits)) { + if ((pid = session->tx_pid) && (pid != current->pid)) { + printk("iSCSI: rx thread %d received SIGHUP, signaling tx thread %d\n", current->pid, pid); + kill_proc(pid, SIGHUP, 1); + } + if ((pid = session->rx_pid) && (pid != current->pid)) { + printk("iSCSI: tx thread %d received SIGHUP, signaling rx thread %d\n", current->pid, pid); + kill_proc(pid, SIGHUP, 1); + } + if ((pid = session->send_tur_pid) && (pid != current->pid)) { + printk("iSCSI: rx thread %d received SIGHUP, killing test unit ready thread %d\n", current->pid, pid); + kill_proc(pid, SIGHUP, 1); + } + if ((pid = session->disk_init_pid) && (pid != current->pid)) { + printk("iSCSI: rx thread %d received SIGHUP, killing disk init thread %d\n", current->pid, pid); + kill_proc(pid, SIGHUP, 1); + } + } + ret = 1; + } + /* we don't care about any other signals */ + flush_signals(current); + UNLOCK_SIGNALS(); + } + + return ret; +} + +/* caller must hold the session's task_lock */ +static void trigger_error_recovery(iscsi_session_t *session, unsigned int lun) +{ + iscsi_task_t *t; + + /* stop new tasks from being sent to this LUN */ + set_bit(lun, session->luns_timing_out); + smp_wmb(); + + /* fake timeouts for all tasks to the specified LUN in order to trigger error recovery. */ + DEBUG_EH("iSCSI: session %p faking task timeouts to trigger error recovery for LUN %u at %lu\n", + session, lun, jiffies); + + for (t = session->arrival_order.head; t; t = t->order_next) { + if ((t->lun == lun) && t->scsi_cmnd && !test_bit(0, &t->timedout)) { + DEBUG_EH("iSCSI: session %p faking timeout of itt %u, task %p, LUN %u, sc %p at %lu\n", + session, t->itt, t, t->lun, t->scsi_cmnd, jiffies); + + /* make the command look like it timedout */ + del_task_timer(t); + set_bit(0, &t->timedout); + /* ensure nothing will be completed until error recovery finishes */ + set_bit(lun, session->luns_doing_recovery); + } + } + smp_mb(); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); +} + +unsigned int iscsi_expected_data_length(Scsi_Cmnd *sc) +{ + unsigned int length = 0; + + if (sc == NULL) + return 0; + + switch (sc->cmnd[0]) { + case INQUIRY: + case REQUEST_SENSE: + length = sc->cmnd[4]; + return length; + case REPORT_LUNS: + length |= sc->cmnd[6] << 24; + length |= sc->cmnd[7] << 16; + length |= sc->cmnd[8] << 8; + length |= sc->cmnd[9]; + return length; + default: + return sc->request_bufflen; + } +} + + +/* compare against 2^31 */ +#define SNA32_CHECK 2147483648UL + +/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ +static int sna_lt(uint32_t n1, uint32_t n2) +{ + return ((n1 != n2) && + (((n1 < n2) && ((n2 - n1) < SNA32_CHECK)) || ((n1 > n2) && ((n2 - n1) < SNA32_CHECK)))); +} + +/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ +static int sna_lte(uint32_t n1, uint32_t n2) +{ + return ((n1 == n2) || + (((n1 < n2) && ((n2 - n1) < SNA32_CHECK)) || ((n1 > n2) && ((n2 - n1) < SNA32_CHECK)))); +} + +/* difference isn't really a defined operation in SNA, but we'd like it so that + * we can determine how many commands can be queued to a session. + */ +static int cmdsn_window_size(uint32_t expected, uint32_t max) +{ + if ((expected <= max) && ((max - expected) < SNA32_CHECK)) { + return (max - expected + 1); + } + else if ((expected > max) && ((expected - max) < SNA32_CHECK)) { + /* window wraps around */ + return ((UINT32_MAX - expected) + 1 + max + 1); + } + else { + /* window closed, or numbers bogus */ + return 0; + } +} + +/* remember old peak cmdsn window size, and report the largest */ +static int max_tasks_for_session(iscsi_session_t *session) +{ + if (session->ExpCmdSn == session->MaxCmdSn + 1) + /* if the window is closed, report nothing, regardless of what it was in the past */ + return 0; + else if (session->last_peak_window_size < session->current_peak_window_size) + /* window increasing, so report the current peak size */ + return MIN(session->current_peak_window_size, ISCSI_CMDS_PER_LUN * session->num_luns); + else + /* window decreasing. report the previous peak size, in case it's + * a temporary decrease caused by the commands we're sending. + * we want to keep the right number of commands queued in the driver, + * ready to go as soon as they can. + */ + return MIN(session->last_peak_window_size, ISCSI_CMDS_PER_LUN * session->num_luns); +} + +/* possibly update the ExpCmdSN and MaxCmdSN, and peak window sizes */ +static void updateSN(iscsi_session_t *session, UINT32 expcmdsn, UINT32 maxcmdsn) +{ + int window_size; + + /* standard specifies this check for when to update expected and max sequence numbers */ + if (!sna_lt(maxcmdsn, expcmdsn - 1)) { + if ((expcmdsn != session->ExpCmdSn) && !sna_lt(expcmdsn, session->ExpCmdSn)) { + session->ExpCmdSn = expcmdsn; + } + if ((maxcmdsn != session->MaxCmdSn) && !sna_lt(maxcmdsn, session->MaxCmdSn)) { + + session->MaxCmdSn = maxcmdsn; + + /* look for the peak window size */ + window_size = cmdsn_window_size(expcmdsn, maxcmdsn); + if (window_size > session->current_peak_window_size) + session->current_peak_window_size = window_size; + + /* age peak window size info */ + if (time_before(session->window_peak_check + (15 * HZ), jiffies)) { + session->last_peak_window_size = session->current_peak_window_size; + session->current_peak_window_size = window_size; + session->window_peak_check = jiffies; + } + + /* memory barrier for all of that */ + smp_mb(); + + /* wake the tx thread to try sending more commands */ + wake_tx_thread(TX_SCSI_COMMAND, session); + } + + /* record whether or not the command window for this session has closed, + * so that we can ping the target periodically to ensure we eventually + * find out that the window has re-opened. + */ + if (maxcmdsn == expcmdsn - 1) { + /* record how many times this happens, to see how often we're getting throttled */ + session->window_closed++; + /* prepare to poll the target to see if the window has reopened */ + session->current_peak_window_size = 0; + session->last_window_check = jiffies; + smp_wmb(); + set_bit(SESSION_WINDOW_CLOSED, &session->control_bits); + smp_mb(); + DEBUG_QUEUE("iSCSI: session %p command window closed, ExpCmdSN %u, MaxCmdSN %u at %lu\n", + session, session->ExpCmdSn, session->MaxCmdSn, jiffies); + } + else if (test_bit(SESSION_WINDOW_CLOSED, &session->control_bits)) { + DEBUG_QUEUE("iSCSI: session %p command window opened, ExpCmdSN %u, MaxCmdSN %u at %lu\n", + session, session->ExpCmdSn, session->MaxCmdSn, jiffies); + clear_bit(SESSION_WINDOW_CLOSED, &session->control_bits); + smp_mb(); + } + else { + DEBUG_FLOW("iSCSI: session %p - ExpCmdSN %u, MaxCmdSN %u at %lu\n", + session, session->ExpCmdSn, session->MaxCmdSn, jiffies); + } + } +} + +/* add a session to some iSCSI HBA's collection of sessions. */ +static int add_session(iscsi_session_t *session) +{ + iscsi_session_t *prior, *next; + iscsi_hba_t *hba; + int hba_number; + int channel_number; + int ret = 0; + int p; + DECLARE_NOQUEUE_FLAGS; + + /* find the HBA that has the desired iSCSI bus */ + hba_number = session->iscsi_bus / ISCSI_MAX_CHANNELS_PER_HBA; + channel_number = session->iscsi_bus % ISCSI_MAX_CHANNELS_PER_HBA; + + spin_lock(&iscsi_hba_list_lock); + hba = iscsi_hba_list; + while (hba && (hba_number-- > 0)) { + hba = hba->next; + } + + if (!hba) { + printk("iSCSI: couldn't find HBA with iSCSI bus %d\n", session->iscsi_bus); + spin_unlock(&iscsi_hba_list_lock); + return 0; + } + if (!test_bit(ISCSI_HBA_ACTIVE, &hba->flags)) { + printk("iSCSI: HBA %p is not active, can't add session %p\n", hba, session); + spin_unlock(&iscsi_hba_list_lock); + return 0; + } + if (!hba->host) { + printk("iSCSI: HBA %p has no host, can't add session %p\n", hba, session); + spin_unlock(&iscsi_hba_list_lock); + return 0; + } + if (test_bit(ISCSI_HBA_RELEASING, &hba->flags)) { + printk("iSCSI: releasing HBA %p, can't add session %p\n", hba, session); + spin_unlock(&iscsi_hba_list_lock); + return 0; + } + if (test_bit(ISCSI_HBA_SHUTTING_DOWN, &hba->flags)) { + printk("iSCSI: HBA %p is shutting down, can't add session %p\n", hba, session); + spin_unlock(&iscsi_hba_list_lock); + return 0; + } + + SPIN_LOCK_NOQUEUE(&hba->session_lock); + + prior = NULL; + next = hba->session_list_head; + /* skip earlier channels */ + while (next && (next->channel < session->channel)) { + prior = next; + next = prior->next; + } + /* skip earlier targets on the same channel */ + while (next && (next->channel == session->channel) && (next->target_id < session->target_id)) { + prior = next; + next = prior->next; + } + + /* same Linux SCSI address? */ + if (next && (next->channel == session->channel) && (next->target_id == session->target_id)) { + if (strcmp(next->TargetName, session->TargetName)) { + /* warn that some other target has it */ + printk("iSCSI: bus %d target %d is already claimed for %s, can't claim for %s\n", + session->iscsi_bus, next->target_id, session->TargetName, next->TargetName); + } + ret = 0; + } + else { + /* insert the session into the list */ + if ((session->next = next)) + next->prev = session; + else + hba->session_list_tail = session; + + if ((session->prev = prior)) + prior->next = session; + else + hba->session_list_head = session; + + session->hba = hba; + session->host_no = hba->host->host_no; + atomic_inc(&hba->num_sessions); + + /* log the session's bus, target id, TargetName, and all of + * the portals, so that the user has a record of what targets + * the kernel module was given. We do this with locks held so + * that no other session's info will get interleaved while + * we're printing this one's. + */ + printk("iSCSI: bus %d target %d = %s\n", session->iscsi_bus, session->target_id, session->TargetName); + for (p = 0; p < session->num_portals; p++) { + /* FIXME: IPv6 */ + printk("iSCSI: bus %d target %d portal %u = address %u.%u.%u.%u port %d group %d\n", + session->iscsi_bus, session->target_id, p, + session->portals[p].ip_address[0], session->portals[p].ip_address[1], + session->portals[p].ip_address[2], session->portals[p].ip_address[3], + session->portals[p].port, session->portals[p].tag); + } + + ret = 1; + } + + SPIN_UNLOCK_NOQUEUE(&hba->session_lock); + spin_unlock(&iscsi_hba_list_lock); + + return ret; +} + + +/* remove a session from an HBA's collection of sessions. + * caller must hold the HBA's session lock. + */ +static int remove_session(iscsi_hba_t *hba, iscsi_session_t *session) +{ + if (session->hba && (hba != session->hba)) { + printk("iSCSI: tried to remove session %p from hba %p, but session->hba is %p\n", + session, hba, session->hba); + return 0; + } + + /* remove the session from the HBA */ + if (session == hba->session_list_head) { + if ((hba->session_list_head = session->next)) + hba->session_list_head->prev = NULL; + else + hba->session_list_tail = NULL; + } + else if (session == hba->session_list_tail) { + hba->session_list_tail = session->prev; + hba->session_list_tail->next = NULL; + } + else { + /* we should always be in the middle, + * but check pointers to make sure we don't crash the kernel + * if the function is called for a session not on the hba. + */ + if (session->next && session->prev) { + session->next->prev = session->prev; + session->prev->next = session->next; + } + else { + printk("iSCSI: failed to remove session %p from hba %p\n", + session, hba); + return 0; + } + } + session->prev = NULL; + session->next = NULL; + + return 1; +} + +static iscsi_session_t *find_session_for_cmnd(Scsi_Cmnd *sc) +{ + iscsi_session_t *session = NULL; + iscsi_hba_t *hba; + DECLARE_NOQUEUE_FLAGS; + + if (!sc->host) + return NULL; + + if (!sc->host->hostdata) + return NULL; + + hba = (iscsi_hba_t *)sc->host->hostdata; + + /* find the session for this command */ + /* FIXME: may want to cache the last session we looked for, since we'll often get + * burst of requests for the same session when multiple commands are queued. Would + * need to invalidate the cache when a session is removed from the HBA. + */ + SPIN_LOCK_NOQUEUE(&hba->session_lock); + session = hba->session_list_head; + while (session && (session->channel != sc->channel || session->target_id != sc->target)) + session = session->next; + if (session) + atomic_inc(&session->refcount); /* caller must use drop_reference when it's done with the session */ + SPIN_UNLOCK_NOQUEUE(&hba->session_lock); + + return session; +} + +#if 0 +static iscsi_session_t *find_session_by_channel(unsigned int host_no, unsigned int channel, unsigned int target_id) +{ + iscsi_session_t *session = NULL; + iscsi_hba_t *hba; + DECLARE_NOQUEUE_FLAGS; + + spin_lock(&iscsi_hba_list_lock); + + hba = iscsi_hba_list; + while (hba && (hba->host_no != host_no)) { + hba = hba->next; + } + + /* find the session for this command */ + if (hba) { + SPIN_LOCK_NOQUEUE(&hba->session_lock); + session = hba->session_list_head; + while (session && (session->channel != channel || session->target_id != target_id)) + session = session->next; + if (session) + atomic_inc(&session->refcount); /* caller must use drop_reference when it's done with the session */ + SPIN_UNLOCK_NOQUEUE(&hba->session_lock); + } + + spin_unlock(&iscsi_hba_list_lock); + + return session; +} +#endif + +static iscsi_session_t *find_session_by_bus(int iscsi_bus, int target_id) +{ + iscsi_session_t *session = NULL; + iscsi_hba_t *hba; + unsigned int hba_index; + unsigned int channel; + DECLARE_NOQUEUE_FLAGS; + + /* compute the appropriate HBA and channel numbers */ + hba_index = iscsi_bus / ISCSI_MAX_CHANNELS_PER_HBA; + channel = iscsi_bus % ISCSI_MAX_CHANNELS_PER_HBA; + + spin_lock(&iscsi_hba_list_lock); + + hba = iscsi_hba_list; + while (hba && (hba_index-- > 0)) { + hba = hba->next; + } + + /* find the session for this command */ + if (hba) { + SPIN_LOCK_NOQUEUE(&hba->session_lock); + session = hba->session_list_head; + while (session && (session->channel != channel || session->target_id != target_id)) + session = session->next; + if (session) + atomic_inc(&session->refcount); /* caller must use drop_reference when it's done with the session */ + SPIN_UNLOCK_NOQUEUE(&hba->session_lock); + } + + spin_unlock(&iscsi_hba_list_lock); + + return session; +} + + +static void iscsi_task_ctor(void *obj, kmem_cache_t *cache, unsigned long flags) +{ + iscsi_task_t *task = (iscsi_task_t *)obj; + + memset(task, 0, sizeof(*task)); + task->flags = 0; + task->itt = RSVD_TASK_TAG; + task->ttt = RSVD_TASK_TAG; + task->mgmt_itt = RSVD_TASK_TAG; + task->next = task->prev = NULL; + task->order_next = task->order_prev = NULL; + init_timer(&task->timer); + atomic_set(&task->refcount, 0); +} + +static void delete_session(iscsi_session_t *session) +{ + unsigned int host, channel, target; + + host = session->host_no; + channel = session->channel; + target = session->target_id; + + if (session->preallocated_task) { + DEBUG_ALLOC("iSCSI: session %p for (%u %u %u *) freeing preallocated task %p to cache %p prior to deleting session\n", + session, host, channel, target, session->preallocated_task, session->hba->task_cache); + iscsi_task_ctor(session->preallocated_task, NULL, 0); + kmem_cache_free(session->hba->task_cache, session->preallocated_task); + session->preallocated_task = NULL; + } + + /* free the auth structures */ + if (session->auth_client_block) + kfree(session->auth_client_block); + if (session->auth_recv_string_block) + kfree(session->auth_recv_string_block); + if (session->auth_send_string_block) + kfree(session->auth_send_string_block); + if (session->auth_recv_binary_block) + kfree(session->auth_recv_binary_block); + if (session->auth_send_binary_block) + kfree(session->auth_send_binary_block); + + if (session->username) { + memset(session->username, 0, strlen(session->username)); + kfree(session->username); + session->username = NULL; + } + if (session->password) { + memset(session->password, 0, session->password_length); + kfree(session->password); + session->password = NULL; + } + if (session->username_in) { + memset(session->username_in, 0, strlen(session->username_in)); + kfree(session->username_in); + session->username_in = NULL; + } + if (session->password_in) { + memset(session->password_in, 0, session->password_length_in); + kfree(session->password_in); + session->password_in = NULL; + } + if (session->portals) { + kfree(session->portals); + session->portals = NULL; + } + if (session->InitiatorName) { + kfree(session->InitiatorName); + session->InitiatorName = NULL; + } + if (session->InitiatorAlias) { + kfree(session->InitiatorAlias); + session->InitiatorAlias = NULL; + } + + memset(session, 0, sizeof(*session)); + kfree(session); +} + +/* decrement the session refcount, and remove it and free it if the refcount hit zero */ +static void drop_reference(iscsi_session_t *session) +{ + iscsi_hba_t *hba; + DECLARE_NOQUEUE_FLAGS; + + if (!session) { + printk("iSCSI: bug - drop_reference(NULL)\n"); + return; + } + + if ((hba = session->hba)) { + /* may need to remove it from the HBA's session list */ + SPIN_LOCK_NOQUEUE(&hba->session_lock); + if (atomic_dec_and_test(&session->refcount)) { + if (remove_session(hba, session)) { + delete_session(session); + atomic_dec(&hba->num_sessions); + DEBUG_INIT("iSCSI: terminated and deleted session %p for (%u %u %u *)\n", + session, session->host_no, session->channel, session->target_id); + } + else { + printk("iSCSI: bug - failed to remove unreferenced session %p\n", session); + } + } + SPIN_UNLOCK_NOQUEUE(&hba->session_lock); + } + else { + /* session isn't in an HBA's list at the moment, so just check + * the refcount, and possibly free it. + */ + if (atomic_dec_and_test(&session->refcount)) { + delete_session(session); + DEBUG_INIT("iSCSI: terminated and deleted session %p for (%u %u %u *)\n", + session, session->host_no, session->channel, session->target_id); + } + } +} + +/* must hold the task_lock to call this */ +static iscsi_task_t *find_session_task(iscsi_session_t *session, uint32_t itt) +{ + iscsi_task_t *task = session->arrival_order.head; + + while (task) { + if (task->itt == itt) { + DEBUG_FLOW("iSCSI: found itt %u, task %p, refcount %d\n", itt, task, atomic_read(&task->refcount)); + return task; + } + task = task->order_next; + } + + return NULL; +} + +/* must hold the task_lock to call this */ +static iscsi_task_t *find_session_mgmt_task(iscsi_session_t *session, uint32_t mgmt_itt) +{ + iscsi_task_t *task = session->arrival_order.head; + + while (task) { + if (task->mgmt_itt == mgmt_itt) { + DEBUG_FLOW("iSCSI: found mgmt_itt %u, task %p, refcount %d\n", + mgmt_itt, task, atomic_read(&task->refcount)); + return task; + } + task = task->order_next; + } + + return NULL; +} + + +#if 0 + +/* must hold the task_lock to call this */ +static iscsi_task_t *find_task(iscsi_task_collection_t *collection, uint32_t itt) +{ + iscsi_task_t *task = collection->head; + + while (task) { + if (task->itt == itt) { + DEBUG_FLOW("iSCSI: found itt %u, task %p, refcount %d\n", itt, task, atomic_read(&task->refcount)); + return task; + } + task = task->next; + } + + return NULL; +} + +/* don't actually use this at the moment */ +/* must hold the task_lock to call this */ +static iscsi_task_t *find_mgmt_task(iscsi_task_collection_t *collection, uint32_t mgmt_itt) +{ + iscsi_task_t *task = collection->head; + + while (task) { + if (task->mgmt_itt == mgmt_itt) { + DEBUG_FLOW("iSCSI: found mgmt_itt %u, task %p\n", mgmt_itt, task); + return task; + } + task = task->next; + } + + return NULL; +} + +#endif + +/* don't actually need this at the moment */ +/* must hold the task_lock to call this */ +static iscsi_task_t *find_task_for_cmnd(iscsi_session_t *session, Scsi_Cmnd *sc) +{ + iscsi_task_t *task = session->arrival_order.head; + + while (task) { + if (task->scsi_cmnd == sc) { + DEBUG_FLOW("iSCSI: found itt %u, task %p for cmnd %p\n", task->itt, task, sc); + return task; + } + task = task->order_next; + } + + return NULL; +} + +/* add a task to the collection. Must hold the task_lock to do this. */ +static void add_task(iscsi_task_collection_t *collection, iscsi_task_t *task) +{ + if (task->prev || task->next) + printk("iSCSI: bug - adding task %p, prev %p, next %p, to collection %p\n", + task, task->prev, task->next, collection); + + if (collection->head) { + task->next = NULL; + task->prev = collection->tail; + collection->tail->next = task; + collection->tail = task; + } + else { + task->prev = task->next = NULL; + collection->head = collection->tail = task; + } +} + +/* must hold the task_lock when calling this */ +static iscsi_task_t *pop_task(iscsi_task_collection_t *collection) +{ + iscsi_task_t *task = NULL; + + if ((task = collection->head)) { + /* pop the head */ + if ((collection->head = task->next)) + collection->head->prev = NULL; + else + collection->tail = NULL; + + /* and return it */ + task->prev = NULL; + task->next = NULL; + + return task; + } + + return NULL; +} + + +static void unlink_task(iscsi_task_collection_t *collection, iscsi_task_t *task) +{ + /* unlink the task from the collection */ + if (task == collection->head) { + if ((collection->head = task->next)) + collection->head->prev = NULL; + else + collection->tail = NULL; + } + else if (task == collection->tail) { + collection->tail = task->prev; + collection->tail->next = NULL; + } + else { + task->next->prev = task->prev; + task->prev->next = task->next; + } + task->next = NULL; + task->prev = NULL; +} + +/* if the task for the itt is found in the collection, remove it, and return it. + * otherwise, return NULL. Must hold the task_lock to call this. + */ +static iscsi_task_t *remove_task(iscsi_task_collection_t *collection, uint32_t itt) +{ + iscsi_task_t *task = NULL; + iscsi_task_t *search = collection->head; + + while (search) { + if (search->itt == itt) { + task = search; + unlink_task(collection, task); + return task; + } + search = search->next; + } + + return NULL; +} + + +/* if the task for the itt is found in the collection, remove it, and return it. + * otherwise, return NULL. Must hold the task_lock to call this. + */ +static iscsi_task_t *remove_task_for_cmnd(iscsi_task_collection_t *collection, Scsi_Cmnd *sc) +{ + iscsi_task_t *task = NULL; + iscsi_task_t *search = collection->head; + + while (search) { + if (search->scsi_cmnd == sc) { + task = search; + unlink_task(collection, task); + return task; + } + search = search->next; + } + + return NULL; +} + + +/* caller must hold the session's scsi_cmnd_lock */ +static void print_session_cmnds(iscsi_session_t *session) +{ + Scsi_Cmnd *search = session->retry_cmnd_head; + printk("iSCSI: session %p retry cmnd queue: head %p, tail %p, num %u at %lu\n", + session, session->retry_cmnd_head, session->retry_cmnd_tail, + atomic_read(&session->num_retry_cmnds), jiffies); + while (search) { + printk("iSCSI: session %p retry cmnd %p: cdb 0x%x to (%u %u %u %u) flags 0x%01lx expires %lu\n", + session, search, search->cmnd[0], search->host->host_no, search->channel, search->target, search->lun, + (unsigned long)*command_flags(search), search->eh_timeout.expires); + search = (Scsi_Cmnd *)search->host_scribble; + } + + printk("iSCSI: session %p deferred cmnd queue: head %p, tail %p, num %u at %lu\n", + session, session->deferred_cmnd_head, session->deferred_cmnd_tail, + session->num_deferred_cmnds, jiffies); + search = session->deferred_cmnd_head; + while (search) { + printk("iSCSI: session %p deferred cmnd %p: cdb 0x%x to (%u %u %u %u) flags 0x%01lx expires %lu\n", + session, search, search->cmnd[0], search->host->host_no, search->channel, search->target, search->lun, + (unsigned long)*command_flags(search), search->eh_timeout.expires); + search = (Scsi_Cmnd *)search->host_scribble; + } + + printk("iSCSI: session %p normal cmnd queue: head %p, tail %p, num %u at %lu\n", + session, session->scsi_cmnd_head, session->scsi_cmnd_tail, + atomic_read(&session->num_cmnds), jiffies); + search = session->scsi_cmnd_head; + while (search) { + printk("iSCSI: session %p normal cmnd %p: cdb 0x%x to (%u %u %u %u) flags 0x%01lx expires %lu\n", + session, search, search->cmnd[0], search->host->host_no, search->channel, search->target, search->lun, + (unsigned long)*command_flags(search), search->eh_timeout.expires); + search = (Scsi_Cmnd *)search->host_scribble; + } +} + +/* caller must hold the session's task_lock */ +static void print_session_tasks(iscsi_session_t *session) +{ + iscsi_task_t *task = NULL; + Scsi_Cmnd *cmnd = NULL; + + printk("iSCSI: session %p task queue: head %p, tail %p, num %u at %lu\n", + session, session->arrival_order.head, session->arrival_order.tail, + atomic_read(&session->num_active_tasks), jiffies); + + task = session->arrival_order.head; + while (task) { + if ((cmnd = task->scsi_cmnd)) + printk("iSCSI: session %p task %p: itt %u flags 0x%04lx expires %lu %c, cmnd %p cdb 0x%x to (%u %u %u %u) flags 0x%01lx expires %lu\n", + session, task, task->itt, task->flags, task->timer.expires, test_bit(0, &task->timedout) ? 'T' : ' ', + cmnd, cmnd->cmnd[0], cmnd->host->host_no, cmnd->channel, cmnd->target, cmnd->lun, + (unsigned long)*command_flags(cmnd), cmnd->eh_timeout.expires); + else + printk("iSCSI: session %p task %p: itt %u flags 0x%04lx expires %lu timedout %u, cmnd NULL, LUN %u\n", + session, task, task->itt, task->flags, task->timer.expires, test_bit(0, &task->timedout) ? 1 : 0, task->lun); + + task = task->order_next; + } +} + + + +/* caller must hold the session's task lock */ +static iscsi_task_t *alloc_task(iscsi_session_t *session) +{ + iscsi_task_t *task = NULL; + iscsi_hba_t *hba = session->hba; + + if (hba == NULL) { + printk("iSCSI: session %p alloc_task failed - NULL HBA\n", session); + return NULL; + } + else if (hba->task_cache == NULL) { + printk("iSCSI: session %p alloc_task failed - NULL HBA task cache\n", session); + return NULL; + } + + if ((task = kmem_cache_alloc(hba->task_cache, SLAB_ATOMIC))) { + session->tasks_allocated++; + task->session = session; + DEBUG_ALLOC("iSCSI: session %p allocated task %p from cache at %lu\n", session, task, jiffies); + } + else if (session->preallocated_task) { + /* if the task cache is empty, we fall back to the session's preallocated task, which + * guarantees us at least some forward progress on every session. + */ + task = session->preallocated_task; + session->preallocated_task = NULL; + task->session = session; + /* don't log by default. We're more concerned with when a + * task alloc fails than when we use the preallocated task. + */ + if (LOG_ENABLED(ISCSI_LOG_ALLOC)) + printk("iSCSI: session %p to (%u %u %u *) task cache empty, using preallocated task %p at %lu\n", + session, session->host_no, session->channel, session->target_id, task, jiffies); + } + else { + /* better luck later */ + task = NULL; + } + + return task; +} + +/* caller must hold the session's task lock */ +static void free_task(iscsi_session_t *session, iscsi_task_t *task) +{ + iscsi_hba_t *hba; + + if (! task) { + printk("iSCSI: free_task couldn't free NULL task\n"); + return; + } + if (! session) { + printk("iSCSI: free_task couldn't find session for task %p\n", task); + return; + } + hba = session->hba; + if (!hba) { + printk("iSCSI: free_task couldn't find HBA for task %p\n", task); + return; + } + + if (task->next || task->prev || task->order_next || task->order_prev) { + /* this is a memory leak, which is better than memory corruption */ + printk("iSCSI: bug - tried to free task %p with prev %p, next %p, order_prev %p, order_next %p\n", + task, task->prev, task->next, task->order_prev, task->order_next); + return; + } + + DEBUG_QUEUE("iSCSI: session %p free_task %p, itt %u\n", task->session, task, task->itt); + + if (test_bit(TASK_PREALLOCATED, &task->flags)) { + if (session->preallocated_task) { + printk("iSCSI: bug - session %p has preallocated task %p, really freeing %p itt %u flags 0x%0lx at %lu\n", + session, session->preallocated_task, task, task->itt, task->flags, jiffies); + + /* reinitialize the task for later use */ + iscsi_task_ctor(task, NULL, 0); + + kmem_cache_free(hba->task_cache, task); + } + else { + /* reinitialize the task for later use */ + iscsi_task_ctor(task, NULL, 0); + __set_bit(TASK_PREALLOCATED, &task->flags); + + /* save it for the next memory emergency */ + session->preallocated_task = task; + + /* wake up the tx thread, since it may have been forced to + * stop sending tasks once the prealloacte task was in use. + * Now that the preallocated task is back, we can guarantee + * this session can allocate at least one more task. Too many + * wakeups is better than too few. + */ + printk("iSCSI: session %p to (%u %u %u *) done using preallocated task %p at %lu\n", + session, session->host_no, session->channel, session->target_id, task, jiffies); + wake_tx_thread(TX_SCSI_COMMAND, session); + } + } + else { + session->tasks_freed++; + + /* reinitialize the task for later use */ + iscsi_task_ctor(task, NULL, 0); + + /* return it to the cache */ + kmem_cache_free(hba->task_cache, task); + } +} + + +/* As long as the tx thread is the only caller, no locking + * is required. If any other thread also needs to call this, + * then all callers must be changed to agree on some locking + * protocol. Currently, some but not all caller's are holding + * the session->task_lock. + */ +static inline uint32_t allocate_itt(iscsi_session_t *session) +{ + uint32_t itt = 0; + + if (session) { + itt = session->itt++; + /* iSCSI reserves 0xFFFFFFFF, this driver reserves 0 */ + if (session->itt == RSVD_TASK_TAG) + session->itt = 1; + } + return itt; +} + + +/* Caller must hold the session's task_lock. Associating a task with + * a session causes it to be completed on a session drop or target + * reset, along with all other session tasks, in the order they were + * added to the session. Preserving the ordering is required by the + * Linux SCSI architecture. Tasks that should not be completed to the + * Linux SCSI layer (because the eh_abort_handler has or will return + * SUCCESS for it) get removed from the session, though they may still + * be in various task collections so that PDUs relating to them can be + * sent or received. + */ +static void add_session_task(iscsi_session_t *session, iscsi_task_t *task) +{ + if (atomic_read(&session->num_active_tasks) == 0) { + /* session going from idle to active, pretend we just + * received something, so that the idle period before this doesn't + * cause an immediate timeout. + */ + session->last_rx = jiffies; + smp_mb(); + } + atomic_inc(&session->num_active_tasks); + + /* set task info */ + task->session = session; + task->itt = allocate_itt(session); + + DEBUG_QUEUE("iSCSI: task %p allocated itt %u for command %p, session %p to %s\n", + task, task->itt, task->scsi_cmnd, session, session->log_name); + + /* add it to the session task ordering list */ + if (session->arrival_order.head) { + task->order_prev = session->arrival_order.tail; + task->order_next = NULL; + session->arrival_order.tail->order_next = task; + session->arrival_order.tail = task; + } + else { + task->order_prev = NULL; + task->order_next = NULL; + session->arrival_order.head = session->arrival_order.tail = task; + } + + DEBUG_FLOW("iSCSI: task %p, itt %u, added to session %p to %s\n", task, task->itt, session, session->log_name); +} + +static int remove_session_task(iscsi_session_t *session, iscsi_task_t *task) +{ + /* remove the task from the session's arrival_order collection */ + if (task == session->arrival_order.head) { + if ((session->arrival_order.head = task->order_next)) + session->arrival_order.head->order_prev = NULL; + else + session->arrival_order.tail = NULL; + } + else if (task == session->arrival_order.tail) { + session->arrival_order.tail = task->order_prev; + session->arrival_order.tail->order_next = NULL; + } + else { + /* we should always be in the middle, + * but check pointers to make sure we don't crash the kernel + * if the function is called for a task not in the session. + */ + if (task->order_next && task->order_prev) { + task->order_next->order_prev = task->order_prev; + task->order_prev->order_next = task->order_next; + } + else { + printk("iSCSI: failed to remove itt %u, task %p from session %p to %s\n", + task->itt, task, session, session->log_name); + return 0; + } + } + task->order_prev = NULL; + task->order_next = NULL; + + if (atomic_dec_and_test(&session->num_active_tasks)) { + /* no active tasks, ready to logout */ + if (test_bit(SESSION_LOGOUT_REQUESTED, &session->control_bits)) { + DEBUG_INIT("iSCSI: session %p now has no active tasks, queueing logout at %lu\n", session, jiffies); + wake_tx_thread(TX_LOGOUT, session); + } + } + + return 1; +} + +static inline void set_not_ready(Scsi_Cmnd *sc) +{ + sc->sense_buffer[0] = 0x70; + sc->sense_buffer[2] = NOT_READY; + sc->sense_buffer[7] = 0x0; +} + +/* mark a Scsi_Cmnd as having a LUN communication failure */ +static inline void set_lun_comm_failure(Scsi_Cmnd *sc) +{ + sc->sense_buffer[0] = 0x70; + sc->sense_buffer[2] = NOT_READY; + sc->sense_buffer[7] = 0x6; + sc->sense_buffer[12] = 0x08; + sc->sense_buffer[13] = 0x00; +} + + +/* decode common network errno values into more useful strings. + * strerror would be nice right about now. + */ +static char *iscsi_strerror(int errno) +{ + switch (errno) { + case EIO: + return "I/O error"; + case EINTR: + return "Interrupted system call"; + case ENXIO: + return "No such device or address"; + case EFAULT: + return "Bad address"; + case EBUSY: + return "Device or resource busy"; + case EINVAL: + return "Invalid argument"; + case EPIPE: + return "Broken pipe"; + case ENONET: + return "Machine is not on the network"; + case ECOMM: + return "Communication error on send"; + case EPROTO: + return "Protocol error"; + case ENOTUNIQ: + return "Name not unique on network"; + case ENOTSOCK: + return "Socket operation on non-socket"; + case ENETDOWN: + return "Network is down"; + case ENETUNREACH: + return "Network is unreachable"; + case ENETRESET: + return "Network dropped connection because of reset"; + case ECONNABORTED: + return "Software caused connection abort"; + case ECONNRESET: + return "Connection reset by peer"; + case ESHUTDOWN: + return "Cannot send after shutdown"; + case ETIMEDOUT: + return "Connection timed out"; + case ECONNREFUSED: + return "Connection refused"; + case EHOSTDOWN: + return "Host is down"; + case EHOSTUNREACH: + return "No route to host"; + default: + return ""; + } +} + + +static int iscsi_recvmsg( iscsi_session_t *session, struct msghdr *msg, int len ) +{ + int rc = 0; + mm_segment_t oldfs; + + if (session->socket) { + oldfs = get_fs(); + set_fs( get_ds() ); + + /* Try to avoid memory allocation deadlocks by using GFP_ATOMIC. */ + session->socket->sk->allocation = GFP_ATOMIC; + + rc = sock_recvmsg( session->socket, msg, len, MSG_WAITALL); + + set_fs( oldfs ); + } + + return rc; +} + +static int iscsi_sendmsg( iscsi_session_t *session, struct msghdr *msg, int len ) +{ + int rc = 0; + mm_segment_t oldfs; + + if (session->socket) { + oldfs = get_fs(); + set_fs( get_ds() ); + + /* Try to avoid resource acquisition deadlocks by using GFP_ATOMIC. */ + session->socket->sk->allocation = GFP_ATOMIC; + + /* FIXME: ought to loop handling short writes, unless a signal occurs */ + rc = sock_sendmsg(session->socket, msg, len); + + set_fs( oldfs ); + } + + return rc; +} + + +/* create and connect a new socket for this session */ +int iscsi_connect(iscsi_session_t *session) +{ + mm_segment_t oldfs; + struct socket *socket = NULL; + struct sockaddr_in addr; + int window_size; + int arg = 1, arglen = 0; + int rc = 0, ret = 0; + + if (session->socket) { + printk("iSCSI: session %p already has socket %p\n", session, session->socket); + return 0; + } + + oldfs = get_fs(); + set_fs( get_ds() ); + + /* FIXME: sock_create may (indirectly) call the slab allocator with SLAB_KERNEL, + * which can fail if the cache needs to allocate another page. Should we preallocate + * a socket before starting the session, so that we have another to use if the + * first one drops? VM livelock can occur if the VM can't write to iSCSI disks when + * it needs to clean pages. To be useful it would have to work more than once, which + * means finding some way to safely allocate another socket for the next low-memory + * problem. A better solution would be to find a way to avoid freeing the current socket. + * If we abort the connection instead of close it, can we reuse the existing socket + * instead of allocating a new one? + */ + rc = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &socket); + if (rc < 0) { + printk("iSCSI: session %p failed to create socket, rc %d\n", session, rc); + set_fs(oldfs); + return 0; + } + + /* no delay in sending */ + rc = socket->ops->setsockopt(socket, IPPROTO_TCP, TCP_NODELAY, (char *)&arg, sizeof(arg)); + if (rc < 0) { + printk("iSCSI: session %p failed to setsockopt TCP_NODELAY, rc %d\n", session, rc); + goto done; + } + + /* try to ensure a reasonably sized TCP window */ + arglen = sizeof(window_size); + if (sock_getsockopt(socket, SOL_SOCKET, SO_RCVBUF, (char *)&window_size, &arglen) >= 0) { + DEBUG_FLOW("iSCSI: session %p TCP recv window size %u\n", session, window_size); + + if (session->tcp_window_size && (window_size < session->tcp_window_size)) { + window_size = session->tcp_window_size; + rc = sock_setsockopt(socket, SOL_SOCKET, SO_RCVBUF, (char *)&window_size, sizeof(window_size)); + if (rc < 0) { + printk("iSCSI: session %p failed to set TCP recv window size to %u, rc %d\n", + session, window_size, rc); + } + else if (sock_getsockopt(socket, SOL_SOCKET, SO_RCVBUF, (char *)&window_size, &arglen) >= 0) { + DEBUG_INIT("iSCSI: session %p set TCP recv window size to %u, actually got %u\n", + session, session->tcp_window_size, window_size); + } + } + } + else { + printk("iSCSI: session %p getsockopt RCVBUF %p failed\n", session, socket); + } + + if (sock_getsockopt(socket, SOL_SOCKET, SO_SNDBUF, (char *)&window_size, &arglen) >= 0) { + DEBUG_FLOW("iSCSI: session %p TCP send window size %u\n", session, window_size); + + if (session->tcp_window_size && (window_size < session->tcp_window_size)) { + window_size = session->tcp_window_size; + rc = sock_setsockopt(socket, SOL_SOCKET, SO_SNDBUF, (char *)&window_size, sizeof(window_size)); + if (rc < 0) { + printk("iSCSI: session %p failed to set TCP send window size to %u, rc %d\n", + session, window_size, rc); + } + else if (sock_getsockopt(socket, SOL_SOCKET, SO_SNDBUF, (char *)&window_size, &arglen) >= 0) { + DEBUG_INIT("iSCSI: session %p set TCP send window size to %u, actually got %u\n", + session, session->tcp_window_size, window_size); + } + } + } + else { + printk("iSCSI: session %p getsockopt SNDBUF %p failed\n", session, socket); + } + + /* connect to the target */ + addr.sin_family = AF_INET; + addr.sin_port = htons(session->port); + if (session->ip_length == 4) { + memcpy(&addr.sin_addr.s_addr, session->ip_address, MIN(sizeof(addr.sin_addr.s_addr), session->ip_length)); + } + else { + /* FIXME: IPv6 */ + printk("iSCSI: session %p unable to handle IPv6 address, length %u, addr %u.%u.%u.%u\n", session, + session->ip_length, session->ip_address[0], session->ip_address[1], session->ip_address[2], session->ip_address[3]); + goto done; + } + rc = socket->ops->connect(socket, (struct sockaddr *)&addr, sizeof(addr), 0); + + if (signal_pending(current)) + goto done; + + if (rc < 0) { + char *error = iscsi_strerror(-rc); + if (error && error[0] != '\0') { + printk("iSCSI: session %p to %s failed to connect, rc %d, %s\n", session, session->log_name, rc, error); + } + else { + printk("iSCSI: session %p to %s failed to connect, rc %d\n", session, session->log_name, rc); + } + if (this_is_iscsi_boot) { + while(!iscsi_set_if_addr()) { + printk("\niSCSI: iscsi_set_if_addr failed !!!\n"); + schedule_timeout(10 * HZ); + } + } + + } + else { + if (LOG_ENABLED(ISCSI_LOG_LOGIN)) + printk("iSCSI: session %p to %s connected at %lu\n", session, session->log_name, jiffies); + ret = 1; + } + + done: + if (ret) { + /* save the socket pointer for later */ + session->socket = socket; + } + else { + /* close the socket */ + sock_release(socket); + session->socket = NULL; + } + smp_mb(); + set_fs(oldfs); + return ret; +} + +void iscsi_disconnect(iscsi_session_t *session) +{ + if (session->socket) { +#if TCP_ABORT_ON_DROP + if (test_and_clear_bit(SESSION_DROPPED, &session->control_bits) && + !test_bit(SESSION_LOGGED_OUT, &session->control_bits)) + { + /* setting linger on and lingertime to 0 before closing + * the socket will trigger a TCP abort (abort all sends + * and receives, possibly send RST, connection to CLOSED), + * which is probably what we want if we're dropping and + * restarting a session. A TCP Abort will discard TCP + * data, which is probably a bunch of commands and data + * we'll resend on a new session anyway. This frees up + * skbuffs, and makes the VM livelock less likely. When + * we relogin again to the target with the same ISID, the + * target will kill off the old connections on it's side, + * so the FIN handshake should be unnecessary, and there + * are cases where network failures may prevent the FIN + * handshake from completing, so the connection wouldn't + * get cleaned up unless the TCP stack has timeouts for + * some of the TCP states. + */ + struct linger ling; + mm_segment_t oldfs; + + memset(&ling, 0, sizeof(ling)); + ling.l_onoff = 1; + ling.l_linger = 0; + + /* we could adjust the socket linger values directly, but using the sockopt call + * is less likely to break if someone overhauls the socket structure. + */ + oldfs = get_fs(); + set_fs(get_ds()); + + if (sock_setsockopt(session->socket, IPPROTO_TCP, SO_LINGER, (char *)&ling, sizeof(ling)) < 0) { + printk("iSCSI: session %p couldn't set lingertime to zero after session drop\n", session); + } + else { + DEBUG_INIT("iSCSI: session %p set lingertime to zero because of session drop\n", session); + } + + set_fs(oldfs); + } +#endif + + /* close the socket, triggering either a TCP close or a TCP abort */ + sock_release(session->socket); + + session->socket = NULL; + smp_mb(); + } +} + + +int iscsi_send_pdu(iscsi_session_t *session, struct IscsiHdr *pdu, char *data, int timeout) +{ + struct msghdr msg; + struct iovec iov[3]; + char padding[4]; + int pad = 0; + int rc; + int pdu_length = 0; + int data_length; + + if (pdu == NULL) { + printk("iSCSI: session %p, pdu NULL, can't send PDU header\n", session); + return 0; + } + + memset(iov, 0, sizeof(iov)); + memset(&msg, 0, sizeof(msg)); + msg.msg_iov = iov; + msg.msg_iovlen = 1; + + /* pdu header */ + iov[0].iov_base = pdu; + iov[0].iov_len = sizeof(*pdu); + pdu_length = sizeof(*pdu); + + /* pdu data */ + data_length = ntoh24(pdu->dlength); + if (data) { + iov[msg.msg_iovlen].iov_base = data; + iov[msg.msg_iovlen].iov_len = data_length; + msg.msg_iovlen++; + pdu_length += ntoh24(pdu->dlength); + } + else if (data_length) { + printk("iSCSI: session %p pdu %p with dlength %d, but data NULL\n", + session, pdu, data_length); + return 0; + } + + /* add any padding needed */ + if (pdu_length % PAD_WORD_LEN) { + memset(padding, 0x0, sizeof(padding)); + pad = PAD_WORD_LEN - (pdu_length % PAD_WORD_LEN); + } + if (pad) { + iov[msg.msg_iovlen].iov_base = padding; + iov[msg.msg_iovlen].iov_len = pad; + msg.msg_iovlen++; + pdu_length += pad; + } + + /* set a timer, though we shouldn't really need one */ + if (timeout) { + session->login_phase_timer = jiffies + (timeout * HZ); + smp_mb(); + } + + if (LOG_ENABLED(ISCSI_LOG_LOGIN)) { + char *text = data; + char *end = text + ntoh24(pdu->dlength); + int show_text = 0; + + if ((pdu->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN_CMD) { + struct IscsiLoginHdr *login_pdu = (struct IscsiLoginHdr *)pdu; + /* show the login phases and tbit */ + printk("iSCSI: session %p sending login pdu with current phase %d, next %d, transit 0x%x, dlength %d at %lu, timeout at %lu (%d seconds)\n", + session, ISCSI_LOGIN_CURRENT_STAGE(login_pdu->flags), ISCSI_LOGIN_NEXT_STAGE(login_pdu->flags), + login_pdu->flags & ISCSI_FLAG_LOGIN_TRANSIT, ntoh24(pdu->dlength), + jiffies, session->login_phase_timer, session->login_timeout); + show_text = 1; + } + else if ((pdu->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_TEXT_CMD) { + printk("iSCSI: session %p sending text pdu, dlength %d at %lu, timeout at %lu (%d seconds)\n", + session, ntoh24(pdu->dlength), + jiffies, session->login_phase_timer, session->login_timeout); + show_text = 1; + } + else { + printk("iSCSI: session %p sending pdu with opcode 0x%x, dlength %d at %lu, timeout at %lu (%d seconds)\n", + session, pdu->opcode, ntoh24(pdu->dlength), jiffies, session->login_phase_timer, session->login_timeout); + } + + /* show all the text that we're sending */ + while (show_text && (text < end)) { + printk("iSCSI: session %p login text: %s\n", session, text); + text += strlen(text); + while ((text < end) && (*text == '\0')) + text++; + } + } + + rc = iscsi_sendmsg(session, &msg, pdu_length); + + /* clear the timer */ + session->login_phase_timer = 0; + smp_mb(); + + if (rc != pdu_length) { + char *error; + if ((rc < 0) && (error = iscsi_strerror(-rc)) && (error[0] != '\0')) + printk("iSCSI: session %p failed to send login PDU, rc %d, %s\n", session, rc, iscsi_strerror(-rc)); + else + printk("iSCSI: session %p failed to send login PDU, rc %d\n", session, rc); + + return 0; + } + + DEBUG_FLOW("iSCSI: session %p sent login pdu %p at %lu, length %d, dlength %d\n", + session, pdu, jiffies, pdu_length, ntoh24(pdu->dlength)); + + return 1; +} + +/* try to read an entire login PDU into the buffer, timing out after timeout seconds */ +int iscsi_recv_pdu(iscsi_session_t *session, struct IscsiHdr *header, int max_header_length, char *data, int max_data_length, int timeout) +{ + struct msghdr msg; + struct iovec iov[2]; + char padding[PAD_WORD_LEN]; + int rc = 0; + int data_length; + int ret = 0; + + if (header == NULL) { + printk("iSCSI: session %p, can't receive PDU header into NULL\n", session); + return 0; + } + + if (max_header_length < sizeof(*header)) { + printk("iSCSI: session %p, can't receive %Zu PDU header bytes into %d byte buffer\n", + session, sizeof(*header), max_header_length); + return 0; + } + + /* set the timer to implement the timeout requested */ + if (timeout) + session->login_phase_timer = jiffies + (timeout * HZ); + else + session->login_phase_timer = 0; + smp_mb(); + if (LOG_ENABLED(ISCSI_LOG_LOGIN)) { + printk("iSCSI: session %p trying to recv login pdu at %lu, timeout at %lu (%d seconds)\n", + session, jiffies, session->login_phase_timer, timeout); + } + + /* read the PDU header */ + memset(iov, 0, sizeof(iov)); + iov[0].iov_base = (void *)header; + iov[0].iov_len = sizeof(*header); + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = iov; + msg.msg_iovlen = 1; + + rc = iscsi_recvmsg(session, &msg, sizeof(*header)); + + /* FIXME: check for additional header segments */ + + if (signal_pending(current)) { + printk("iSCSI: session %p recv_login_pdu timed out at %lu\n", session, jiffies); + goto done; + } + + if (rc != sizeof(*header)) { + if (rc < 0) { + char *error = iscsi_strerror(-rc); + if (error && error[0] != '\0') { + printk("iSCSI: session %p recv_login_pdu failed to recv %d login PDU bytes, rc %d, %s\n", + session, iov[0].iov_len, rc, iscsi_strerror(-rc)); + } + else { + printk("iSCSI: session %p recv_login_pdu failed to recv %d login PDU bytes, rc %d\n", + session, iov[0].iov_len, rc); + } + } + else if (rc == 0) { + printk("iSCSI: session %p recv_login_pdu: connection closed\n", session); + } + else { + /* short reads should be impossible unless a signal occured, + * which we already checked for. + */ + printk("iSCSI: bug - session %p recv_login_pdu, short read %d of %Zu\n", session, rc, sizeof(*header)); + } + goto done; + } + /* assume a PDU round-trip, connection is ok */ + session->last_rx = jiffies; + smp_mb(); + + /* possibly read PDU data */ + data_length = ntoh24(header->dlength); + if (data_length) { + /* check for buffer overflow */ + if (data_length > max_data_length) { + printk("iSCSI: session %p recv_login_pdu can't read %d bytes of login PDU data, only %d bytes of buffer available\n", + session, data_length, max_data_length); + goto done; + } + + /* read the PDU's text data payload */ + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = iov; + msg.msg_iovlen = 1; + + memset(iov, 0, sizeof(iov)); + iov[0].iov_base = data; + iov[0].iov_len = data_length; + + /* handle PDU padding */ + if (data_length % PAD_WORD_LEN) { + int pad = PAD_WORD_LEN - (data_length % PAD_WORD_LEN); + + iov[1].iov_base = padding; + iov[1].iov_len = pad; + msg.msg_iovlen = 2; + data_length += pad; + } + + rc = iscsi_recvmsg(session, &msg, data_length); + + if (signal_pending(current)) { + printk("iSCSI: session %p recv_login_pdu timed out at %lu\n", session, jiffies); + goto done; + } + + if (rc != data_length) { + if (rc < 0) { + char *error = iscsi_strerror(-rc); + if (error && error[0] != '\0') { + printk("iSCSI: session %p recv_login_pdu failed to recv %d login data PDU bytes, rc %d, %s\n", + session, data_length, rc, iscsi_strerror(-rc)); + } + else { + printk("iSCSI: session %p recv_login_pdu failed to recv %d login data PDU bytes, rc %d\n", + session, data_length, rc); + } + ret = rc; + } + else if (rc == 0) { + printk("iSCSI: session %p recv_login_pdu: connection closed\n", session); + } + else { + /* short reads should be impossible unless a signal occured, + * which we already checked for. + */ + printk("iSCSI: bug - session %p recv_login_pdu, short read %d of %d\n", session, rc, data_length); + } + goto done; + } + + /* assume a PDU round-trip, connection is ok */ + session->last_rx = jiffies; + smp_mb(); + } + + if (LOG_ENABLED(ISCSI_LOG_LOGIN)) { + char *text = data; + char *end = text + ntoh24(header->dlength); + int show_text = 0; + + if (header->opcode == ISCSI_OP_LOGIN_RSP) { + struct IscsiLoginRspHdr *login_pdu = (struct IscsiLoginRspHdr *)header; + /* show the login phases and transit bit */ + printk("iSCSI: session %p received login pdu response at %lu with current stage %d, next %d, transit 0x%x, dlength %d\n", + session, jiffies, + ISCSI_LOGIN_CURRENT_STAGE(login_pdu->flags), ISCSI_LOGIN_NEXT_STAGE(login_pdu->flags), + login_pdu->flags & ISCSI_FLAG_LOGIN_TRANSIT, ntoh24(header->dlength)); + show_text = 1; + } + else if (header->opcode == ISCSI_OP_TEXT_RSP) { + printk("iSCSI: session %p received text pdu response with dlength %d at %lu\n", + session, ntoh24(header->dlength), jiffies); + show_text = 1; + } + else { + printk("iSCSI: session %p received pdu with opcode 0x%x, dlength %d at %lu\n", + session, header->opcode, ntoh24(header->dlength), jiffies); + } + + /* show all the text that we're sending */ + while (show_text && (text < end)) { + printk("iSCSI: session %p login resp text: %s\n", session, text); + text += strlen(text); + while ((text < end) && (*text == '\0')) + text++; + } + } + + ret = 1; + + done: + /* clear the timer */ + session->login_phase_timer = 0; + smp_mb(); + iscsi_handle_signals(session); + + return ret; +} + + +#if DEBUG_TRACE +static void +iscsi_fill_trace(unsigned char type, Scsi_Cmnd *sc, iscsi_task_t *task, unsigned long data1, unsigned long data2) +{ + iscsi_trace_entry_t *te; + cpu_flags_t flags; + + spin_lock_irqsave(&iscsi_trace_lock, flags); + + te = &trace_table[trace_index]; + trace_index++; + if ( trace_index >= ISCSI_TRACE_COUNT ) { + trace_index = 0; + } + memset(te, 0x0, sizeof(*te)); + + te->type = type; + if (task) { + iscsi_session_t *session = task->session; + + te->host = session->host_no; + te->channel = session->channel; + te->target = session->target_id; + te->lun = task->lun; + te->itt = task->itt; + } + if (sc) { + te->cmd = sc->cmnd[0]; + te->host = sc->host->host_no; + te->channel = sc->channel; + te->target = sc->target; + te->lun = sc->lun; + } + te->data1 = data1; + te->data2 = data2; + te->jiffies = jiffies; + + spin_unlock_irqrestore(&iscsi_trace_lock, flags); +} +#endif + +/* FIXME: update for 16 byte CDBs, such as: + lock unlock cache 16 + pre-fetch 16 + read 16 + rebuild 16 + regenerate 16 + synchronize cache 16 + verify 16 + write 16 + write and verify 16 + write same 16 + xdwrite extended 16 + + Then increase ISCSI_MAX_CMD_LEN to 16 in iscsi.h. +*/ +/* FIXME: for that matter, check the existing list for correctness */ +/* caller must either hold the task, or keep the task refcount non-zero while calling this */ +static int iscsi_set_direction(iscsi_task_t *task) +{ + if (task && task->scsi_cmnd) { + switch (task->scsi_cmnd->cmnd[0]) { + case TEST_UNIT_READY: + case START_STOP: + case REZERO_UNIT: + case WRITE_FILEMARKS: + case SPACE: + case ERASE: + case ALLOW_MEDIUM_REMOVAL: + /* just control commands */ + __set_bit(TASK_CONTROL, &task->flags); + return TASK_CONTROL; + case WRITE_6: case WRITE_10: case WRITE_12: + case 0x8a: /* WRITE_16 */ case 0x8e: /* write and verify 16 */ + case 0x93: /* write same 16 */ + case WRITE_LONG: case WRITE_SAME: case WRITE_BUFFER: + case WRITE_VERIFY: case WRITE_VERIFY_12: + case COMPARE: case COPY: case COPY_VERIFY: + case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW: + case SEARCH_EQUAL_12: case SEARCH_HIGH_12: case SEARCH_LOW_12: + case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE: + case MODE_SELECT: case MODE_SELECT_10: case LOG_SELECT: + case SEND_DIAGNOSTIC: case CHANGE_DEFINITION: case UPDATE_BLOCK: + case SET_WINDOW: case MEDIUM_SCAN: case SEND_VOLUME_TAG: + case WRITE_LONG_2: + __set_bit(TASK_WRITE, &task->flags); + return TASK_WRITE; + default: + __set_bit(TASK_READ, &task->flags); + return TASK_READ; + } + } + + return -1; +} + +/* tagged queueing */ +static inline unsigned int iscsi_command_attr(Scsi_Cmnd *cmd) +{ + if (cmd->device && cmd->device->tagged_queue) { + switch (cmd->tag) { + case HEAD_OF_QUEUE_TAG: + return ISCSI_ATTR_HEAD_OF_QUEUE; + case ORDERED_QUEUE_TAG: + return ISCSI_ATTR_ORDERED; + default: + return ISCSI_ATTR_SIMPLE; + } + } + + return ISCSI_ATTR_UNTAGGED; +} + +static void print_cmnd(Scsi_Cmnd *sc) +{ +#ifdef HAS_CMND_REQUEST_STRUCT + struct request *req = &sc->request; + struct buffer_head *bh = NULL; +#endif + + printk("iSCSI: Scsi_Cmnd %p to (%u %u %u %u), Cmd 0x%x\n" + "iSCSI: done %p, scsi_done %p, host_scribble %p\n" + "iSCSI: reqbuf %p, req_len %u\n" + "iSCSI: buffer %p, bufflen %u\n" + "iSCSI: use_sg %u, old_use_sg %u, sglist_len %u\n" + "iSCSI: owner 0x%x, state 0x%x, eh_state 0x%x\n" + "iSCSI: cmd_len %u, old_cmd_len %u\n", + sc, sc->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0], + sc->done, sc->scsi_done, sc->host_scribble, + sc->request_buffer, sc->request_bufflen, sc->buffer, sc->bufflen, + sc->use_sg, sc->old_use_sg, sc->sglist_len, + sc->owner, sc->state, sc->eh_state, + sc->cmd_len, sc->old_cmd_len); + + if (sc->cmd_len >= 12) + printk("iSCSI: cdb %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", + sc->cmnd[0], sc->cmnd[1], sc->cmnd[2], sc->cmnd[3], + sc->cmnd[4], sc->cmnd[5], sc->cmnd[6], sc->cmnd[7], + sc->cmnd[8], sc->cmnd[9], sc->cmnd[10], sc->cmnd[11]); + else if (sc->cmd_len >= 10) + printk("iSCSI: cdb %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x\n", + sc->cmnd[0], sc->cmnd[1], sc->cmnd[2], sc->cmnd[3], + sc->cmnd[4], sc->cmnd[5], sc->cmnd[6], sc->cmnd[7], + sc->cmnd[8], sc->cmnd[9]); + else if (sc->cmd_len >= 8) + printk("iSCSI: cdb %02x%02x%02x%02x %02x%02x%02x%02x\n", + sc->cmnd[0], sc->cmnd[1], sc->cmnd[2], sc->cmnd[3], + sc->cmnd[4], sc->cmnd[5], sc->cmnd[6], sc->cmnd[7]); + else if (sc->cmd_len >= 6) + printk("iSCSI: cdb %02x%02x%02x%02x %02x%02x\n", + sc->cmnd[0], sc->cmnd[1], sc->cmnd[2], sc->cmnd[3], + sc->cmnd[4], sc->cmnd[5]); + else if (sc->cmd_len >= 4) + printk("iSCSI: cdb %02x%02x%02x%02x\n", + sc->cmnd[0], sc->cmnd[1], sc->cmnd[2], sc->cmnd[3]); + else if (sc->cmd_len >= 2) + printk("iSCSI: cdb %02x%02x\n", sc->cmnd[0], sc->cmnd[1]); + + if (sc->use_sg && sc->request_buffer) { + struct scatterlist *sglist = (struct scatterlist *)sc->request_buffer; + int i; + + for (i = 0; i < sc->use_sg; i++) { +#if (HAS_SCATTERLIST_PAGE && HAS_SCATTERLIST_ADDRESS) + printk("iSCSI: sglist %p index %02d = addr %p, page %p, offset %u, len %u\n", + sglist, i, sglist->address, sglist->page, sglist->offset, sglist->length); +#elif HAS_SCATTERLIST_PAGE + printk("iSCSI: sglist %p index %02d = page %p, offset %u, len %u\n", + sglist, i, sglist->page, sglist->offset, sglist->length); +#else + printk("iSCSI: sglist %p index %02d = addr %p, len %u\n", + sglist, i, sglist->address, sglist->length); +#endif + sglist++; + } + } + +#ifdef HAS_CMND_REQUEST_STRUCT + /* and log the struct request so we can check consistency */ + printk("iSCSI: request status 0x%x, sector %lu, nr_sectors %lu, hard_sector %lu, hard_nr_sectors %lu\n" + "iSCSI: nr_segments %u, hard_nr_segments %u, current_nr_sectors %lu\n" + "iSCSI: special %p, buffer %p, bh %p, bhtail %p\n", + req->rq_status, req->sector, req->nr_sectors, req->hard_sector, req->hard_nr_sectors, + req->nr_segments, req->nr_hw_segments, req->current_nr_sectors, + req->special, req->buffer, req->bh, req->bhtail); + + for (bh = req->bh; bh; bh = bh->b_reqnext) { + printk("iSCSI: bh %p = rsector %lu, blocknr %lu, size %u, list %u, state 0x%lx, data %p, page %p\n", + bh, bh->b_rsector, bh->b_blocknr, bh->b_size, bh->b_list, bh->b_state, bh->b_data, bh->b_page); + } +#endif + + /* and log the scsi_request so we can check consistency */ + if (sc->sc_request) { + printk("iSCSI: Scsi_Request %p = sr_magic 0x%x, sr_bufflen %u, sr_buffer %p, sr_allowed %u, sr_cmd_len %u\n" + "iSCSI: sr_use_sg %u, sr_sglist_len %u, sr_underflow %u\n", + sc->sc_request, + sc->sc_request->sr_magic, sc->sc_request->sr_bufflen, sc->sc_request->sr_buffer, sc->sc_request->sr_allowed, + sc->sc_request->sr_cmd_len, sc->sc_request->sr_use_sg, sc->sc_request->sr_sglist_len, sc->sc_request->sr_underflow); + + } +} + +static inline int add_cmnd(Scsi_Cmnd *sc, Scsi_Cmnd **head, Scsi_Cmnd **tail) +{ + sc->host_scribble = NULL; + + if (*head) { + (*tail)->host_scribble = (void *)sc; + *tail = sc; + } + else { + *tail = *head = sc; + } + + return 1; +} + + +static void request_command_retries(unsigned long arg) +{ + iscsi_session_t *session = (iscsi_session_t *)arg; + + DEBUG_RETRY("iSCSI: session %p retry timer expired at %lu\n", session, jiffies); + session->retry_timer.expires = 0; + smp_mb(); + wake_tx_thread(SESSION_RETRY_COMMANDS, session); +} + +/* try to queue one command retry for each LUN that needs one */ +static void iscsi_retry_commands(iscsi_session_t *session) +{ + Scsi_Cmnd *prior = NULL, *sc; + Scsi_Cmnd *retry_head = NULL, *retry_tail = NULL; + iscsi_task_t *task; + int num_retries = 0; + int l; + DECLARE_NOQUEUE_FLAGS; + + spin_lock(&session->task_lock); + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + + /* record which LUNs we're going to check for retries */ + memset(session->luns_checked, 0, sizeof(session->luns_checked)); + for (l = 0; l < ISCSI_MAX_LUN; l++) { + if (test_bit(l, session->luns_delaying_commands) && !test_bit(l, session->luns_timing_out)) + __set_bit(l, session->luns_checked); + } + + /* skip LUNs that already have outstanding tasks */ + for (task = session->arrival_order.head; task; task = task->next) { + __clear_bit(task->lun, session->luns_checked); + } + + /* skip LUNs that already have a retry queued */ + for (sc = session->retry_cmnd_head; sc; sc = (Scsi_Cmnd *)sc->host_scribble) { + __clear_bit(sc->lun, session->luns_checked); + } + + /* find the oldest deferred command to each of the LUNs we want to queue a retry to */ + while ((sc = session->deferred_cmnd_head)) { + if (test_bit(sc->lun, session->luns_checked)) { + /* pop this command off the head of the deferred queue */ + session->deferred_cmnd_head = (Scsi_Cmnd *)sc->host_scribble; + if (session->deferred_cmnd_head == NULL) + session->deferred_cmnd_tail = NULL; + session->num_deferred_cmnds--; + + /* queue it for retry */ + if (retry_head) { + retry_tail->next = sc; + retry_tail = sc; + } + else { + retry_head = retry_tail = sc; + } + sc->host_scribble = NULL; + num_retries++; + if (LOG_ENABLED(ISCSI_LOG_RETRY)) + printk("iSCSI: session %p queuing command %p cdb 0x%02x to (%u %u %u %u) for retry at %lu\n", + session, sc, sc->cmnd[0], session->host_no, session->channel, session->target_id, sc->lun, jiffies); + + /* and don't take any more commands for this LUN */ + __clear_bit(sc->lun, session->luns_checked); + } + else { + prior = sc; + break; + } + } + while (prior && (sc = (Scsi_Cmnd *)prior->host_scribble)) { + if (test_bit(sc->lun, session->luns_checked)) { + /* remove this command from the deferred queue */ + prior->host_scribble = sc->host_scribble; + if (session->deferred_cmnd_tail == sc) + session->deferred_cmnd_tail = prior; + session->num_deferred_cmnds--; + + /* queue it for retry */ + if (retry_head) { + retry_tail->next = sc; + retry_tail = sc; + } + else { + retry_head = retry_tail = sc; + } + sc->host_scribble = NULL; + num_retries++; + if (LOG_ENABLED(ISCSI_LOG_RETRY)) + printk("iSCSI: session %p queuing command %p cdb 0x%02x to (%u %u %u %u) for retry at %lu\n", + session, sc, sc->cmnd[0], session->host_no, session->channel, session->target_id, sc->lun, jiffies); + + /* and don't take any more commands for this LUN */ + __clear_bit(sc->lun, session->luns_checked); + } + else { + prior = sc; + } + } + + if (num_retries) { + /* append to the retry_cmnd queue */ + if (session->retry_cmnd_head) + session->retry_cmnd_tail->host_scribble = (void *)retry_head; + else + session->retry_cmnd_head = retry_head; + + session->retry_cmnd_tail = retry_tail; + atomic_add(num_retries, &session->num_retry_cmnds); + set_bit(TX_WAKE, &session->control_bits); + set_bit(TX_SCSI_COMMAND, &session->control_bits); + } + + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + + if (session->num_luns_delaying_commands && (session->retry_timer.expires == 0)) { + /* as long as at least one LUN is delaying commands, we need to reset the timer */ + session->retry_timer.function = request_command_retries; + session->retry_timer.data = (unsigned long)session; + session->retry_timer.expires = jiffies + HZ; + add_timer(&session->retry_timer); + } + + spin_unlock(&session->task_lock); +} + +static void requeue_deferred_commands(iscsi_session_t *session, unsigned int lun) +{ + Scsi_Cmnd *cmnd, *prior, *requeue_head = NULL, *requeue_tail = NULL; + int num_requeued = 0; + + DEBUG_RETRY("iSCSI: session %p requeuing deferred commands for (%u %u %u %u) at %lu\n", + session, session->host_no, session->channel, session->target_id, lun, jiffies); + + prior = NULL; + while ((cmnd = session->deferred_cmnd_head)) { + if (cmnd->lun == lun) { + /* remove it from the deferred queue */ + session->deferred_cmnd_head = (Scsi_Cmnd *)cmnd->host_scribble; + if (session->deferred_cmnd_head == NULL) + session->deferred_cmnd_tail = NULL; + session->num_deferred_cmnds--; + cmnd->host_scribble = NULL; + + DEBUG_RETRY("iSCSI: session %p requeueing deferred command %p cdb 0x%02x to (%u %u %u %u) at %lu\n", + session, cmnd, cmnd->cmnd[0], session->host_no, session->channel, session->target_id, cmnd->lun, jiffies); + add_cmnd(cmnd, &requeue_head, &requeue_tail); + num_requeued++; + } + else { + prior = cmnd; + break; + } + } + while (prior && (cmnd = (Scsi_Cmnd *)prior->host_scribble)) { + if (cmnd->lun == lun) { + /* remove it from the deferred queue */ + prior->host_scribble = cmnd->host_scribble; + if (session->deferred_cmnd_tail == cmnd) + session->deferred_cmnd_tail = prior; + session->num_deferred_cmnds--; + cmnd->host_scribble = NULL; + + DEBUG_RETRY("iSCSI: session %p requeueing deferred command %p cdb 0x%02x to (%u %u %u %u) at %lu\n", + session, cmnd, cmnd->cmnd[0], session->host_no, session->channel, session->target_id, cmnd->lun, jiffies); + add_cmnd(cmnd, &requeue_head, &requeue_tail); + num_requeued++; + } + else { + prior = cmnd; + } + } + + if (requeue_head) { + requeue_tail->host_scribble = (void *)session->scsi_cmnd_head; + session->scsi_cmnd_head = requeue_head; + if (session->scsi_cmnd_tail == NULL) + session->scsi_cmnd_tail = requeue_tail; + atomic_add(num_requeued, &session->num_cmnds); + wake_tx_thread(TX_SCSI_COMMAND, session); + DEBUG_RETRY("iSCSI: session %p requeued %d deferred commands and woke tx thread at %lu\n", + session, num_requeued, jiffies); + } +} + +static int iscsi_disk_initialize(void *vtaskp); +static int iscsi_unit_ready(void *vtaskp); +/* caller must hold the task lock */ +static void process_task_response(iscsi_session_t *session, iscsi_task_t *task, + struct IscsiScsiRspHdr *stsrh, unsigned char *sense_data, int senselen) +{ + Scsi_Cmnd *sc = task->scsi_cmnd; + int needs_retry = 0; + int slow_retry = 0; + unsigned int expected = 0; + + DEBUG_FLOW("iSCSI: session %p recv_cmd - itt %u, task %p, cmnd %p, cdb 0x%x, cmd_len %d, rsp dlength %d, senselen %d\n", + session, task->itt, task, sc, sc->cmnd[0], sc->cmd_len, ntoh24(stsrh->dlength), senselen); + + /* default to just passing along the SCSI status. We may change this later */ + sc->result = HOST_BYTE(DID_OK) | STATUS_BYTE(stsrh->cmd_status); + + /* grab any sense data that came with the command. It could be + * argued that we should only do this if the SCSI status is check + * condition. It could also be argued that the target should only + * send sense if the SCSI status is check condition. If the + * target bothered to send sense, we pass it along, since it + * may indicate a problem, and it's safer to report a possible + * problem than it is to assume everything is fine. + */ + if (senselen) { + /* fill in the Scsi_Cmnd's sense data */ + memset(sc->sense_buffer, 0, sizeof(sc->sense_buffer)); + memcpy(sc->sense_buffer, sense_data, MIN(senselen, sizeof(sc->sense_buffer))); + + /* if sense data logging is enabled, or it's deferred + * sense that we're going to do something special with, + * or if it's an unexpected unit attention, which Linux doesn't + * handle well, log the sense data. + */ + if ((LOG_ENABLED(ISCSI_LOG_SENSE)) || + (((sense_data[0] == 0x71) || (sense_data[0] == 0xF1)) && translate_deferred_sense) || + ((SENSE_KEY(sense_data) == UNIT_ATTENTION) && (test_bit(SESSION_RESETTING, &session->control_bits) == 0))) + { + if (senselen >= 26) { + printk("iSCSI: session %p recv_cmd %p, cdb 0x%x, status 0x%x, response 0x%x, senselen %d, " + "key %02x, ASC/ASCQ %02X/%02X, itt %u task %p to (%u %u %u %u), %s\n" + "iSCSI: Sense %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x " + "%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x\n", + session, sc, sc->cmnd[0], stsrh->cmd_status, stsrh->response, senselen, + SENSE_KEY(sense_data), ASC(sense_data), ASCQ(sense_data), + task->itt, task, + sc->host->host_no, sc->channel, sc->target, sc->lun, session->log_name, + sense_data[0],sense_data[1],sense_data[2],sense_data[3], + sense_data[4],sense_data[5],sense_data[6],sense_data[7], + sense_data[8],sense_data[9],sense_data[10],sense_data[11], + sense_data[12],sense_data[13],sense_data[14],sense_data[15], + sense_data[16],sense_data[17],sense_data[18],sense_data[19], + sense_data[20],sense_data[21],sense_data[22],sense_data[23], + sense_data[24], sense_data[25]); + } + else if ( senselen >= 18) { + printk("iSCSI: session %p recv_cmd %p, cdb 0x%x, status 0x%x, response 0x%x, senselen %d, " + "key %02x, ASC/ASCQ %02X/%02X, itt %u task %p to (%u %u %u %u), %s\n" + "iSCSI: Sense %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x\n", + session, sc, sc->cmnd[0], stsrh->cmd_status, stsrh->response, senselen, + SENSE_KEY(sense_data), ASC(sense_data), ASCQ(sense_data), + task->itt, task, + sc->host->host_no, sc->channel, sc->target, sc->lun, session->log_name, + sense_data[0],sense_data[1],sense_data[2],sense_data[3], + sense_data[4],sense_data[5],sense_data[6],sense_data[7], + sense_data[8],sense_data[9],sense_data[10],sense_data[11], + sense_data[12],sense_data[13],sense_data[14],sense_data[15], + sense_data[16],sense_data[17]); + } + else { + printk("iSCSI: session %p recv_cmd %p, cdb 0x%x, status 0x%x, response 0x%x, senselen %d, key %02x, " + "itt %u task %p to (%u %u %u %u), %s\n" + "iSCSI: Sense %02x%02x%02x%02x %02x%02x%02x%02x\n", + session, sc, sc->cmnd[0], stsrh->cmd_status, stsrh->response, senselen, SENSE_KEY(sense_data), + task->itt, task, + sc->host->host_no, sc->channel, sc->target, sc->lun, session->log_name, + sense_data[0],sense_data[1],sense_data[2],sense_data[3], + sense_data[4],sense_data[5],sense_data[6],sense_data[7]); + } + } + } + else if ((stsrh->cmd_status == STATUS_CHECK_CONDITION) && (senselen == 0)) { + /* check condition with no sense. We need to avoid this, + * since the Linux SCSI code could put the command in SCSI_STATE_FAILED, + * which it's error recovery doesn't appear to handle correctly, and even + * if it does, we're trying to bypass all of the Linux error recovery code + * to avoid blocking all I/O to the HBA. Fake some sense data that gets + * a retry from Linux. + */ + printk("iSCSI: session %p recv_cmd %p generating sense for itt %u, task %p, status 0x%x, response 0x%x, senselen %d, cdb 0x%x to (%u %u %u %u) at %lu\n", + session, sc, task->itt, task, stsrh->cmd_status, stsrh->response, senselen, + sc->cmnd[0], sc->host->host_no, sc->channel, sc->target, sc->lun, jiffies); + + /* report a complete underflow */ + stsrh->residual_count = htonl(iscsi_expected_data_length(sc)); + stsrh->flags |= ISCSI_FLAG_CMD_UNDERFLOW; + + memset(sc->sense_buffer, 0, sizeof(sc->sense_buffer)); + sc->sense_buffer[0] = 0x70; + sc->sense_buffer[2] = ABORTED_COMMAND; /* so that scsi_check_sense always returns NEEDS_RETRY to scsi_decide_dispostion */ + sc->sense_buffer[7] = 0x6; + sc->sense_buffer[12] = 0x04; /* ASC/ASCQ 04/01 appears to always get a retry from scsi_io_completion, so we use that */ + sc->sense_buffer[13] = 0x01; + } +#if FAKE_NO_REPORT_LUNS + else if (test_bit(SESSION_ESTABLISHED, &session->control_bits) && sc && (senselen == 0) && + (sc->cmnd[0] == REPORT_LUNS) && + (stsrh->cmd_status == 0) && (stsrh->response == 0)) + { + printk("iSCSI: session %p faking failed REPORT_LUNS itt %u, CmdSN %u, task %p, sc %p, cdb 0x%x to (%u %u %u %u)\n", + session, itt, task->cmdsn, task, sc, sc->cmnd[0], + sc->host->host_no, sc->channel, sc->target, sc->lun); + + /* fake an illegal request check condition for this command. + */ + sc->sense_buffer[0] = 0x70; + sc->sense_buffer[2] = ILLEGAL_REQUEST; + sc->sense_buffer[7] = 0x6; + sc->sense_buffer[12] = 0x20; /* INVALID COMMAND OPERATION CODE */ + sc->sense_buffer[13] = 0x00; + sc->result = HOST_BYTE(DID_OK) | STATUS_BYTE(0x02); + stsrh->cmd_status = 0x2; + stsrh->residual_count = htonl(iscsi_expected_data_length(sc)); + stsrh->flags |= ISCSI_FLAG_CMD_UNDERFLOW; + } +#endif +#if FAKE_PROBE_CHECK_CONDITIONS + else if (test_bit(SESSION_ESTABLISHED, &session->control_bits) && sc && (senselen == 0) && + (sc->scsi_done == iscsi_done) && (sc->retries <= 1) && + (stsrh->cmd_status == 0) && (stsrh->response == 0)) + { + printk("iSCSI: session %p faking failed probe itt %u, CmdSN %u, task %p, sc %p, cdb 0x%x to (%u %u %u %u)\n", + session, itt, task->cmdsn, task, sc, sc->cmnd[0], + sc->host->host_no, sc->channel, sc->target, sc->lun); + + /* fake an command aborted check condition to test the recovery of probe commands */ + sc->sense_buffer[0] = 0x70; + sc->sense_buffer[2] = NOT_READY; + sc->sense_buffer[7] = 0x6; + sc->sense_buffer[12] = 0x08; + sc->sense_buffer[13] = 0x00; + stsrh->cmd_status = 0x2; + stsrh->residual_count = htonl(iscsi_expected_data_length(sc)); + stsrh->flags |= ISCSI_FLAG_CMD_UNDERFLOW; + sc->result = HOST_BYTE(DID_OK) | STATUS_BYTE(0x02); + } +#endif +#if FAKE_PROBE_UNDERFLOW + else if (test_bit(SESSION_ESTABLISHED, &session->control_bits) && sc && (senselen == 0) && + (sc->scsi_done == iscsi_done) && (sc->retries <= 3) && + (stsrh->cmd_status == 0) && (stsrh->response == 0)) + { + printk("iSCSI: session %p faking probe underflow for itt %u, CmdSN %u, task %p, sc %p, cdb 0x%x to (%u %u %u %u)\n", + session, itt, task->cmdsn, task, sc, sc->cmnd[0], + sc->host->host_no, sc->channel, sc->target, sc->lun); + + stsrh->residual_count = htonl(iscsi_expected_data_length(sc)); + stsrh->flags |= ISCSI_FLAG_CMD_UNDERFLOW; + sc->resid = iscsi_expected_data_length(sc); + sc->result = HOST_BYTE(DID_OK) | STATUS_BYTE(0x0); + } +#endif + + /* record the (possibly fake) status in the trace */ + ISCSI_TRACE( ISCSI_TRACE_RxCmdStatus, sc, task, stsrh->cmd_status, stsrh->response); + + if (senselen && ((sense_data[0] == 0x71) || (sense_data[0] == 0xF1)) && + sc->device && (sc->device->type == TYPE_DISK) && translate_deferred_sense) + { + printk("iSCSI: session %p recv_cmd %p translating deferred sense to current sense for itt %u\n", + session, sc, task->itt); + sc->sense_buffer[0] &= 0xFE; + } + + /* check for underflow and overflow */ + expected = iscsi_expected_data_length(sc); + if ((stsrh->flags & ISCSI_FLAG_CMD_OVERFLOW) || (stsrh->flags & ISCSI_FLAG_CMD_UNDERFLOW) || + ((test_bit(TASK_READ, &task->flags)) && (task->rxdata < expected))) + { + if (LOG_ENABLED(ISCSI_LOG_QUEUE) || LOG_ENABLED(ISCSI_LOG_FLOW) || + (senselen && (SENSE_KEY(sense_data) == UNIT_ATTENTION))) + { + /* for debugging, always log this for UNIT ATTENTION */ + /* FIXME: bidi flags as well someday */ + printk("iSCSI: session %p recv_cmd %p, itt %u, task %p to (%u %u %u %u), cdb 0x%x, %c%c %s, received %u, residual %u, expected %u\n", + session, sc, task->itt, task, sc->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0], + (stsrh->flags & ISCSI_FLAG_CMD_OVERFLOW) ? 'O' : ' ', (stsrh->flags & ISCSI_FLAG_CMD_UNDERFLOW) ? 'U' : ' ', + (stsrh->flags & ISCSI_FLAG_CMD_OVERFLOW) ? "overflow" : "underflow", + task->rxdata, ntohl(stsrh->residual_count), expected); + } + +#ifdef DEBUG + /* FIXME: fake a bad driver or SCSI status if there is a + * residual for certain commands? The Linux high-level + * drivers appear to ignore the resid field. This may cause + * data corruption if a device returns a residual for a read + * or write command, but a good SCSI status and iSCSI + * response. The problem is that for some commands an + * underflow is normal, such as INQUIRY. We have to check the cdb + * to determine if an underflow should be translated to an error. + * For now, just log about it, so we can see if the problem + * is ever occuring. + */ + switch (sc->cmnd[0]) { + case READ_6: + case READ_10: + case READ_12: + case WRITE_6: + case WRITE_10: + case WRITE_12: + if (sc->device && (sc->device->type == TYPE_DISK) && stsrh->residual_count && + (stsrh->response == 0) && (stsrh->cmd_status == 0)) + { + /* log if we get an underflow with a good status + * and response for data transfer commands, since + * Linux appears to ignore the residual field of + * Scsi_Cmnds, and only consider the data invalid + * if the driver of SCSI status was bad. + */ + printk("iSCSI: session %p task %p itt %u to (%u %u %u %u), cdb 0x%x, received %u, residual %u, expected %u, but normal status\n", + session, task, task->itt, sc->host->host_no, sc->channel, sc->target, sc->lun, + sc->cmnd[0], task->rxdata, ntohl(stsrh->residual_count), expected); + } + break; + } +#endif + + if (stsrh->flags & ISCSI_FLAG_CMD_UNDERFLOW) { + ISCSI_TRACE(ISCSI_TRACE_RxUnderflow, sc, task, ntohl(stsrh->residual_count), expected); + sc->resid = ntohl(stsrh->residual_count); + } + else if (stsrh->flags & ISCSI_FLAG_CMD_OVERFLOW) { + ISCSI_TRACE(ISCSI_TRACE_RxOverflow, sc, task, ntohl(stsrh->residual_count), expected); + sc->result = HOST_BYTE(DID_ERROR) | STATUS_BYTE(stsrh->cmd_status); + sc->resid = expected; + } + else if (task->rxdata < expected) { + /* All the read data did not arrive. This can happen without an underflow indication + * from the target if the data is discarded by the driver, due to failed sanity checks + * on the PDU or digest errors. + */ + ISCSI_TRACE(ISCSI_TRACE_HostUnderflow, sc, task, task->rxdata, expected); + sc->resid = expected - task->rxdata; + } + } + + if (stsrh->response) { + needs_retry = 1; + slow_retry = 1; + + /* log when we transition from no transport errors to transport errors */ + if (__test_and_set_bit(sc->lun, session->luns_unreachable) == 0) { + printk("iSCSI: session %p recv_cmd %p, status 0x%x, iSCSI transport response 0x%x, itt %u, task %p to (%u %u %u %u) at %lu\n", + session, sc, stsrh->cmd_status, stsrh->response, task->itt, task, + sc->host->host_no, sc->channel, sc->target, sc->lun, jiffies); + } + } + else { + /* log when we transition from transport errors to no transport errors */ + if (__test_and_clear_bit(sc->lun, session->luns_unreachable)) { + printk("iSCSI: session %p recv_cmd %p, status 0x%x, iSCSI transport response 0x%x, itt %u, task %p to (%u %u %u %u) at %lu\n", + session, sc, stsrh->cmd_status, stsrh->response, task->itt, task, + sc->host->host_no, sc->channel, sc->target, sc->lun, jiffies); + } + + /* now we basically duplicate what scsi_decide_disposition and scsi_check_sense would have done + * if we completed the command, but we do it ourselves so that we can requeue internally. + */ + if ((stsrh->cmd_status == STATUS_BUSY) || (stsrh->cmd_status == STATUS_QUEUE_FULL)) { + /* slow retries, at least until a command completes */ + needs_retry = 1; + slow_retry = 1; + } + else if (stsrh->cmd_status == STATUS_CHECK_CONDITION) { + /* check conditions can only be retried if the command allows retries. + * Tapes for example, can't retry, since the tape head may have moved. + */ + /* FIXME: possible interactions with ACA. Do we need to complete the command + * back to the SCSI layer when ACA is enabled? + */ + if (sc->allowed > 1) { + if (senselen == 0) { + /* for check conditions with no sense, fast retry when possible */ + needs_retry = 1; + } + else if ((sc->sense_buffer[0] & 0x70) == 0) { + /* check conditions with invalid sense */ + needs_retry = 1; + } + else if (sc->sense_buffer[2] & 0xe0) { + /* can't retry internally */ + /* FIXME: why not? what are these bits? */ + } + else if ((SENSE_KEY(sc->sense_buffer) == ABORTED_COMMAND)) { + needs_retry = 1; + } +#if PREVENT_HARDWARE_CRC_ERROR + else if ((SENSE_KEY(sc->sense_buffer) == HARDWARE_ERROR)) { + if (sc->SCp.sent_command < HARDWARE_CRC_ERROR_RETRIES) + needs_retry = 1; + } +#endif + else if ((SENSE_KEY(sc->sense_buffer) == MEDIUM_ERROR)) { + needs_retry = 1; + } + else if ((SENSE_KEY(sc->sense_buffer) == NOT_READY)) { + if ((ASC(sc->sense_buffer) == 0x04) && (ASCQ(sc->sense_buffer) == 0x01)) { + /* LUN in the process of becoming ready */ + needs_retry = 1; + slow_retry = 1; + } + if ((ASC(sc->sense_buffer) == 0x04) && (ASCQ(sc->sense_buffer) == 0x02)) { + /* LUN in the process of becoming ready */ + /* A DISK INITIALIZATION command needs to be transmitted */ + needs_retry = 1; + slow_retry = 1; + smp_mb(); + wake_tx_thread(DISK_INIT, session); + } + if ((sc->cmnd[0] == 0x1b) && (ASC(sc->sense_buffer) == 0x04) && (ASCQ(sc->sense_buffer) == 0x00)) { + /* LUN in the process of becoming ready */ + /* We have received a response for our DISK INITIALIZATION command and we are now sending TUR till the disk is READY + */ + needs_retry = 1; + slow_retry = 1; + smp_mb(); + wake_tx_thread(SEND_TUR, session); + } + } + + /* switch to slow retries if the fast retries don't seem to be working */ + if (needs_retry && (sc->SCp.sent_command > 10)) + slow_retry = 1; + } + } + } + + if (needs_retry && internally_retryable(sc)) { + /* need to requeue this command for a retry later. + * Philsophically we ought to complete the command and let the + * midlayer or high-level driver deal with retries. Since the + * way the midlayer does retries is undesirable, we instead + * keep the command in the driver, but requeue it for the same + * cases the midlayer checks for retries. This lets us ignore + * the command's retry count, and do retries until the command + * timer expires. + */ + sc->result = 0; + sc->resid = 0; + sc->SCp.sent_command++; /* count how many internal retries we've done */ + memset(sc->sense_buffer, 0, sizeof(sc->sense_buffer)); + __set_bit(TASK_NEEDS_RETRY, &task->flags); + + if (slow_retry) { + /* delay commands for slower retries */ + if (__test_and_set_bit(task->lun, session->luns_delaying_commands) == 0) { + /* FIXME: we don't want to log this if a QUEUE_FULL + * puts us in slow retries for a fraction of a second. + * Where can we record a per-Scsi_Device timestamp to + * use when deciding whether or not to log? In 2.5 + * we can put a pointer in Scsi_Device->hostdata, but + * 2.4 doesn't appear to give us any good hooks for + * deallocating that memory. There's no slave_destroy + * or slave_detach. + */ + DEBUG_RETRY("iSCSI: session %p starting to delay commands to (%u %u %u %u) at %lu\n", + session, session->host_no, session->channel, session->target_id, sc->lun, jiffies); + if (session->num_luns_delaying_commands == 0) { + session->retry_timer.data = (unsigned long)session; + session->retry_timer.expires = jiffies + HZ; + session->retry_timer.function = request_command_retries; + add_timer(&session->retry_timer); + DEBUG_RETRY("iSCSI: session %p starting retry timer at %lu\n", session, jiffies); + } + session->num_luns_delaying_commands++; + } + } + +#if RETRIES_BLOCK_DEVICES + /* try to stop the mid-layer from queueing any more commands to this LUN + * until a command completes, by setting sc->device->device_blocked. + */ + /* FIXME: locking? */ + if (sc->device) { +# ifdef SCSI_DEFAULT_DEVICE_BLOCKED + sc->device->device_blocked = sc->device->max_device_blocked; +# else + sc->device->device_blocked = TRUE; +# endif + smp_mb(); + } +#endif + + /* FIXME: warn if the command's tag is ORDERED or HEAD_OF_QUEUE, since we're reordering + * commands by requeuing to the tail of the scsi_cmnd queue, rather than retrying + * this task and all younger tasks to this LUN. We emulate what the SCSI midlayer would + * do, even though what it does is probably broken if the command is ORDERED or HEAD_OF_QUEUE. + * We probably need something like ACA to make this work right, and it doesn't look + * like the midlayer uses ACA, but rather it just assumes everything is untagged or simple, + * so command reordering doesn't matter. If the midlayer ever changes, we'll need to make + * similar changes, or go back to actually completing the command back to the midlayer + * and letting it figure out how to retry. + */ + if (sc->tag == ORDERED_QUEUE_TAG) + printk("iSCSI: session %p retrying ORDERED command %p, possible reordering hazard at %lu\n", + session, sc, jiffies); + else if (sc->tag == HEAD_OF_QUEUE_TAG) + printk("iSCSI: session %p retrying HEAD_OF_QUEUE command %p, possible reordering hazard at %lu\n", + session, sc, jiffies); + + smp_mb(); + } + else { + /* if we're not retrying this command, we go back to full + * speed unless command timeouts have triggered or will + * trigger error recovery. + */ + if (test_bit(task->lun, session->luns_delaying_commands)) { + __clear_bit(task->lun, session->luns_delaying_commands); + session->num_luns_delaying_commands--; + DEBUG_RETRY("iSCSI: session %p no longer delaying commands to (%u %u %u %u) at %lu\n", + session, session->host_no, session->channel, session->target_id, sc->lun, jiffies); + if (session->num_luns_delaying_commands == 0) { + del_timer_sync(&session->retry_timer); + clear_bit(SESSION_RETRY_COMMANDS, &session->control_bits); + DEBUG_RETRY("iSCSI: session %p stopping retry timer at %lu\n", session, jiffies); + } + if (!test_bit(task->lun, session->luns_timing_out)) { + DECLARE_NOQUEUE_FLAGS; + + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + requeue_deferred_commands(session, task->lun); + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + } + smp_mb(); + } + } + + ISCSI_TRACE( ISCSI_TRACE_RxCmd, sc, task, task->rxdata, expected); +} + +/* + * complete a task in the session's completing queue, and return a pointer to it, + * or NULL if the task could not be completed. Caller must hold the task_lock, + * but the lock is always released before returning. + */ +static void complete_task(iscsi_session_t *session, uint32_t itt) +{ + iscsi_task_t *task; + unsigned long last_log = 0; + int refcount; + DECLARE_MIDLAYER_FLAGS; + + while (!signal_pending(current)) { + DEBUG_QUEUE("iSCSI: session %p attempting to complete itt %u\n", session, itt); + + if ((task = find_session_task(session, itt))) { + Scsi_Cmnd *sc = task->scsi_cmnd; + + if (test_bit(SESSION_RESETTING, &session->control_bits)) { + /* we don't trust the target to give us correct responses once we've issued a reset. + * Ensure that none of the outstanding tasks complete. + */ + spin_unlock(&session->task_lock); + DEBUG_EH("iSCSI: session %p can't complete itt %u, task %p, cmnd %p, reset in progress at %lu\n", + session, itt, task, sc, jiffies); + return; + } + else if (test_bit(task->lun, session->luns_doing_recovery)) { + /* don't complete any tasks once a LUN has started doing error recovery. + * Leave the recovery state as it is, since we may have an outstanding + * task mgmt PDU for this task. + */ + spin_unlock(&session->task_lock); + DEBUG_EH("iSCSI: session %p can't complete itt %u, task %p, cmnd %p, LUN %u doing error recovery at %lu\n", + session, itt, task, sc, task->lun, jiffies); + return; + } + + /* no need to do error recovery for this task */ + task->flags &= ~TASK_RECOVERY_MASK; + + /* it's possible the tx thread is using the task right now. + * the task's refcount can't increase while it's in the completing + * collection, so wait for the refcount to hit zero, or the task + * to leave the completing collection, whichever happens first. + */ + if ((refcount = atomic_read(&task->refcount)) == 0) { + /* this is the expected case */ +#if INCLUDE_DEBUG_EH + if (LOG_ENABLED(ISCSI_LOG_EH) && sc && (sc->cmnd[0] == TEST_UNIT_READY)) { + printk("iSCSI: completing TUR at %lu, itt %u, task %p, command %p, (%u %u %u %u), cdb 0x%x, result 0x%x\n", + jiffies, itt, task, sc, + sc->host->host_no, sc->channel, sc->target, sc->lun, + sc->cmnd[0], sc->result); + } + else +#endif + { +#if INCLUDE_DEBUG_QUEUE + if (LOG_ENABLED(ISCSI_LOG_QUEUE)) { + if (sc) + printk("iSCSI: completing itt %u, task %p, command %p, (%u %u %u %u), cdb 0x%x, done %p, result 0x%x\n", + itt, task, sc, + sc->host->host_no, sc->channel, sc->target, sc->lun, + sc->cmnd[0], sc->scsi_done, sc->result); + else + printk("iSCSI: completing itt %u, task %p, command NULL, (%u %u %u %u)\n", + itt, task, session->host_no, session->channel, session->target_id, task->lun); + } +#endif + } + + /* remove the task from the session, to ensure a + * session drop won't try to complete the task again. + */ + if (remove_session_task(session, task)) { + DEBUG_QUEUE("iSCSI: removed itt %u, task %p from session %p to %s\n", + task->itt, task, session, session->log_name); + } + + if (test_bit(task->lun, session->luns_timing_out)) { + /* this task may be the last thing delaying error recovery. + * make sure the tx thread scans tasks again. + */ + DEBUG_EH("iSCSI: session %p completing itt %u, task %p while LUN %u is timing out at %lu\n", + session, itt, task, task->lun, jiffies); + set_bit(SESSION_TASK_TIMEDOUT, &session->control_bits); + smp_mb(); + } + + /* this task no longer has a Scsi_Cmnd associated with it */ + task->scsi_cmnd = NULL; + if (sc) + sc->host_scribble = NULL; + + if (sc == NULL) { + /* already completed, nothing to do */ + printk("iSCSI: session %p already completed itt %u, task %p, (%u %u %u %u)\n", + session, itt, task, session->host_no, session->channel, session->target_id, task->lun); + + free_task(session, task); + } + else if (test_bit(TASK_NEEDS_RETRY, &task->flags)) { + DECLARE_NOQUEUE_FLAGS; + + /* done with this task */ + free_task(session, task); + + /* just requeue the task back to the scsi_cmnd queue so that it gets retried */ + DEBUG_RETRY("iSCSI: session %p requeueing itt %u task %p command %p cdb 0x%x for retry to (%u %u %u %u)\n", + session, itt, task, sc, sc->cmnd[0], sc->host->host_no, sc->channel, sc->target, sc->lun); + + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + add_cmnd(sc, &session->scsi_cmnd_head, &session->scsi_cmnd_tail); + atomic_inc(&session->num_cmnds); + +#if RETRIES_BLOCK_DEVICES + /* try to prevent the midlayer from issuing more commands to this device + * until we complete a command for this device back to the midlayer. + * This hopefully keeps the midlayer queueing commands to other LUNs, + * rather than filling up the driver's limit of 64 with commands that + * we can't complete, which would effectively block other LUNs that + * are still working from getting any commands. + */ + /* FIXME: locking? */ + if (sc->device) { +# ifdef SCSI_DEFAULT_DEVICE_BLOCKED + sc->device->device_blocked = sc->device->max_device_blocked; +# else + sc->device->device_blocked = TRUE; +# endif + smp_mb(); + } +#endif + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + wake_tx_thread(TX_SCSI_COMMAND, session); + } + else { + /* delete our command timer */ + del_command_timer(sc); + + /* we're completing it out of the driver */ + ISCSI_TRACE(ISCSI_TRACE_CmdDone, sc, task, sc->result, 0); + + /* done with this task */ + free_task(session, task); + + /* FIXME: if we want to get lots of retries for cases we don't retry internally, + * we'll need to conditionally alter sc->retries before completing the command. + */ + + if (sc->scsi_done == NULL) { + printk("iSCSI: no completion callback for command %p\n", sc); + } + else if (sc->scsi_done == iscsi_done) { + /* it came from iscsi-probe.c, and doesn't need a timer added or lock held */ + sc->scsi_done(sc); + } + else { + /* add a useless timer for the midlayer to delete */ + add_completion_timer(sc); + + /* tell the SCSI midlayer that the command is done */ + LOCK_MIDLAYER_LOCK(session->hba->host); + sc->scsi_done(sc); + UNLOCK_MIDLAYER_LOCK(session->hba->host); + } + + DEBUG_QUEUE("iSCSI: session %p completed itt %u, task %p, command %p, (%u %u %u %u), cdb 0x%x, result 0x%x\n", + session, itt, task, sc, sc->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0], sc->result); + } + + spin_unlock(&session->task_lock); + + return; + } + else { + /* task is still in use, can't complete it yet. Since + * this only happens when a command is aborted by the + * target unexpectedly, this error case can be slow. + * Just keep polling for the refcount to hit zero. If + * the tx thread is blocked while using a task, the + * timer thread will eventually send a signal to both + * the rx thread and tx thread, so this loop will + * terminate one way or another. + */ + if ((last_log == 0) || time_before_eq(last_log + HZ, jiffies)) { + DEBUG_QUEUE("iSCSI: waiting to complete itt %u, task %p, cmnd %p, refcount %d\n", itt, task, sc, refcount); + } + + spin_unlock(&session->task_lock); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(MSECS_TO_JIFFIES(10)); + + spin_lock(&session->task_lock); + } + } + else { + /* not a valid task */ + DEBUG_QUEUE("iSCSI: can't complete itt %u, task not found\n", itt); + spin_unlock(&session->task_lock); + return; + } + } + + printk("iSCSI: session %p complete_task %u failed at %lu\n", session, itt, jiffies); + spin_unlock(&session->task_lock); +} + + +static int iscsi_xmit_task_mgmt(iscsi_session_t *session, uint8_t func_type, iscsi_task_t *task, uint32_t mgmt_itt) +{ + struct msghdr msg; + struct iovec iov[2]; + int rc, wlen; + struct IscsiScsiTaskMgtHdr ststmh; + uint32_t crc32c; + + memset( &ststmh, 0, sizeof(ststmh) ); + ststmh.opcode = ISCSI_OP_SCSI_TASK_MGT_MSG | ISCSI_OP_IMMEDIATE; + ststmh.flags = ISCSI_FLAG_FINAL | (func_type & ISCSI_FLAG_TASK_MGMT_FUNCTION_MASK); + ststmh.rtt = RSVD_TASK_TAG; + ststmh.itt = htonl(mgmt_itt); + ststmh.cmdsn = htonl(session->CmdSn); /* CmdSN not incremented after imm cmd */ + ststmh.expstatsn = htonl(session->ExpStatSn); + + switch (func_type) { + case ISCSI_TM_FUNC_ABORT_TASK: + /* need a task for this */ + if (task) { + ststmh.refcmdsn = htonl(task->cmdsn); + ststmh.rtt = htonl(task->itt); + ststmh.lun[1] = task->lun; + ISCSI_TRACE(ISCSI_TRACE_TxAbort, task->scsi_cmnd, task, task->mgmt_itt, 0); + } + else { + printk("iSCSI: session %p failed to send abort, task unknown\n", session); + return 0; + } + break; + case ISCSI_TM_FUNC_ABORT_TASK_SET: + /* need a LUN for this */ + if (task) { + ststmh.lun[1] = task->lun; + ISCSI_TRACE(ISCSI_TRACE_TxAbortTaskSet, task->scsi_cmnd, task, task->mgmt_itt, 0); + } + else { + printk("iSCSI: session %p failed to send abort task set, LUN unknown\n", session); + return 0; + } + break; + case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: + /* need a LUN for this */ + if (task) { + ststmh.lun[1] = task->lun; + ISCSI_TRACE(ISCSI_TRACE_TxLunReset, task->scsi_cmnd, task, task->mgmt_itt, 0); + } + else { + printk("iSCSI: session %p failed to send logical unit reset, no task\n", session); + return 0; + } + break; + case ISCSI_TM_FUNC_TARGET_WARM_RESET: + ISCSI_TRACE(ISCSI_TRACE_TxWarmReset, task ? task->scsi_cmnd : NULL, task, mgmt_itt, 0); + break; + case ISCSI_TM_FUNC_TARGET_COLD_RESET: + ISCSI_TRACE(ISCSI_TRACE_TxColdReset, task ? task->scsi_cmnd : NULL, task, mgmt_itt, 0); + break; + default: + printk("iSCSI: unknown task mgmt function type %u for session %p to %s\n", + func_type, session, session->log_name); + return 0; + break; + } + + iov[0].iov_base = &ststmh; + iov[0].iov_len = sizeof(ststmh); + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = iov; + msg.msg_iovlen = 1; + wlen = sizeof(ststmh); + + /* HeaderDigests */ + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) { + crc32c = iscsi_crc32c(&ststmh, sizeof(ststmh)); + iov[msg.msg_iovlen].iov_base = &crc32c; + iov[msg.msg_iovlen].iov_len = sizeof(crc32c); + msg.msg_iovlen++; + wlen += sizeof(crc32c); + } + + rc = iscsi_sendmsg( session, &msg, wlen); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_task_mgmt failed, rc %d\n", session, rc); + iscsi_drop_session(session); + return 0; + } + + return 1; +} + +static void recheck_busy_commands(unsigned long arg) +{ + iscsi_session_t *session = (iscsi_session_t *)arg; + + session->busy_command_timer.expires = 0; + smp_mb(); + wake_tx_thread(SESSION_COMMAND_TIMEDOUT, session); +} + +static int process_timedout_commands(iscsi_session_t *session) +{ + iscsi_task_t *task, *next; + Scsi_Cmnd *fatal_head = NULL, *fatal_tail = NULL, *cmnd = NULL, *prior = NULL; + int busy = 0; + DECLARE_NOQUEUE_FLAGS; + + spin_lock(&session->task_lock); + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + + DEBUG_TIMEOUT("iSCSI: session %p processing timedout commands at %lu\n", session, jiffies); + clear_bit(SESSION_COMMAND_TIMEDOUT, &session->control_bits); + + /* by default, we can fail commands to any LUN */ + memset(session->luns_checked, 0xFF, sizeof(session->luns_checked)); + + DEBUG_TIMEOUT("iSCSI: session %p checking %d tasks for command timeouts at %lu\n", + session, atomic_read(&session->num_active_tasks), jiffies); + task = session->arrival_order.head; + while (task) { + next = task->order_next; + + if (task->scsi_cmnd && test_bit(COMMAND_TIMEDOUT, command_flags(task->scsi_cmnd))) { + if (atomic_read(&task->refcount) == 0) { + cmnd = task->scsi_cmnd; + task->scsi_cmnd = NULL; + + if (LOG_ENABLED(ISCSI_LOG_TIMEOUT)) + printk("iSCSI: session %p failing itt %u task %p cmnd %p cdb 0x%02x to (%u %u %u %u) at %lu, retries %d, allowed %d\n", + session, task->itt, task, cmnd, cmnd->cmnd[0], cmnd->host->host_no, cmnd->channel, cmnd->target, cmnd->lun, + jiffies, cmnd->retries, cmnd->allowed); + add_cmnd(cmnd, &fatal_head, &fatal_tail); + } + else { + /* can't fail this command now, something may be using it's buffers. + * delay failing this command and all younger commands to this LUN. + */ + DEBUG_TIMEOUT("iSCSI: session %p itt %u task %p cmnd %p is timedout but busy at %lu\n", + session, task->itt, task, task->scsi_cmnd, jiffies); + __clear_bit(task->lun, session->luns_checked); + busy = 1; + break; + } + } + + task = next; + } + + if (busy) { + /* schedule another scan in the near future */ + if ((session->busy_command_timer.expires == 0) && !test_bit(SESSION_TERMINATING, &session->control_bits)) { + session->busy_command_timer.expires = jiffies + MSECS_TO_JIFFIES(40); + session->busy_command_timer.data = (unsigned long)session; + session->busy_command_timer.function = recheck_busy_commands; + DEBUG_TIMEOUT("iSCSI: session %p scheduling busy command scan for %lu at %lu\n", + session, session->busy_command_timer.expires, jiffies); + del_timer_sync(&session->busy_command_timer); /* make sure it's not running now */ + add_timer(&session->busy_command_timer); + } + } + + + /* if any commands in the retry queue have TIMEDOUT, dequeue and fail them. */ + DEBUG_TIMEOUT("iSCSI: session %p checking %d retry commands for timeouts at %lu\n", + session, atomic_read(&session->num_retry_cmnds), jiffies); + prior = NULL; + while ((cmnd = session->retry_cmnd_head)) { + if (test_bit(COMMAND_TIMEDOUT, command_flags(cmnd))) { + /* remove it from the deferred queue */ + session->retry_cmnd_head = (Scsi_Cmnd *)cmnd->host_scribble; + if (session->retry_cmnd_head == NULL) + session->retry_cmnd_tail = NULL; + atomic_dec(&session->num_retry_cmnds); + cmnd->host_scribble = NULL; + + if (LOG_ENABLED(ISCSI_LOG_TIMEOUT)) + printk("iSCSI: session %p failing retryable command %p cdb 0x%02x to (%u %u %u %u) at %lu, retries %d, allowed %d\n", + session, cmnd, cmnd->cmnd[0], cmnd->host->host_no, cmnd->channel, cmnd->target, cmnd->lun, + jiffies, cmnd->retries, cmnd->allowed); + add_cmnd(cmnd, &fatal_head, &fatal_tail); + } + else { + prior = cmnd; + break; + } + } + while (prior && (cmnd = (Scsi_Cmnd *)prior->host_scribble)) { + if (test_bit(COMMAND_TIMEDOUT, command_flags(cmnd))) { + /* remove it from the deferred queue */ + prior->host_scribble = cmnd->host_scribble; + if (session->retry_cmnd_tail == cmnd) + session->retry_cmnd_tail = prior; + atomic_dec(&session->num_retry_cmnds); + cmnd->host_scribble = NULL; + + if (LOG_ENABLED(ISCSI_LOG_TIMEOUT)) + printk("iSCSI: session %p failing retryable command %p cdb 0x%02x to (%u %u %u %u) at %lu, retries %d, allowed %d\n", + session, cmnd, cmnd->cmnd[0], cmnd->host->host_no, cmnd->channel, cmnd->target, cmnd->lun, + jiffies, cmnd->retries, cmnd->allowed); + add_cmnd(cmnd, &fatal_head, &fatal_tail); + } + else { + prior = cmnd; + } + } + + + /* if any commands in the deferred queue have TIMEDOUT, dequeue and fail them */ + DEBUG_TIMEOUT("iSCSI: session %p checking %d deferred commands for timeouts at %lu\n", + session, session->num_deferred_cmnds, jiffies); + prior = NULL; + while ((cmnd = session->deferred_cmnd_head)) { + if (test_bit(COMMAND_TIMEDOUT, command_flags(cmnd))) { + /* remove it from the deferred queue */ + session->deferred_cmnd_head = (Scsi_Cmnd *)cmnd->host_scribble; + if (session->deferred_cmnd_head == NULL) + session->deferred_cmnd_tail = NULL; + session->num_deferred_cmnds--; + cmnd->host_scribble = NULL; + + if (LOG_ENABLED(ISCSI_LOG_TIMEOUT)) + printk("iSCSI: session %p failing deferred command %p cdb 0x%02x to (%u %u %u %u) at %lu, retries %d, allowed %d\n", + session, cmnd, cmnd->cmnd[0], cmnd->host->host_no, cmnd->channel, cmnd->target, cmnd->lun, + jiffies, cmnd->retries, cmnd->allowed); + add_cmnd(cmnd, &fatal_head, &fatal_tail); + } + else { + prior = cmnd; + break; + } + } + while (prior && (cmnd = (Scsi_Cmnd *)prior->host_scribble)) { + if (test_bit(COMMAND_TIMEDOUT, command_flags(cmnd))) { + /* remove it from the deferred queue */ + prior->host_scribble = cmnd->host_scribble; + if (session->deferred_cmnd_tail == cmnd) + session->deferred_cmnd_tail = prior; + session->num_deferred_cmnds--; + cmnd->host_scribble = NULL; + + if (LOG_ENABLED(ISCSI_LOG_TIMEOUT)) + printk("iSCSI: session %p failing deferred command %p cdb 0x%02x to (%u %u %u %u) at %lu, retries %d, allowed %d\n", + session, cmnd, cmnd->cmnd[0], cmnd->host->host_no, cmnd->channel, cmnd->target, cmnd->lun, + jiffies, cmnd->retries, cmnd->allowed); + add_cmnd(cmnd, &fatal_head, &fatal_tail); + } + else { + prior = cmnd; + } + } + + + /* if any commands in the normal queue have TIMEDOUT, dequeue and fail them */ + DEBUG_TIMEOUT("iSCSI: session %p checking %d normal commands for timeouts at %lu\n", + session, atomic_read(&session->num_cmnds), jiffies); + prior = NULL; + while ((cmnd = session->scsi_cmnd_head)) { + if (test_bit(COMMAND_TIMEDOUT, command_flags(cmnd))) { + /* remove it from the scsi_cmnd queue */ + session->scsi_cmnd_head = (Scsi_Cmnd *)cmnd->host_scribble; + if (session->scsi_cmnd_head == NULL) + session->scsi_cmnd_tail = NULL; + atomic_dec(&session->num_cmnds); + cmnd->host_scribble = NULL; + + if (LOG_ENABLED(ISCSI_LOG_TIMEOUT)) + printk("iSCSI: session %p failing normal command %p cdb 0x%02x to (%u %u %u %u) at %lu, retries %d, allowed %d\n", + session, cmnd, cmnd->cmnd[0], cmnd->host->host_no, cmnd->channel, cmnd->target, cmnd->lun, + jiffies, cmnd->retries, cmnd->allowed); + + /* and arrange for it to be completed with a fatal error */ + add_cmnd(cmnd, &fatal_head, &fatal_tail); + } + else { + prior = cmnd; + break; + } + } + while (prior && (cmnd = (Scsi_Cmnd *)prior->host_scribble)) { + if (test_bit(COMMAND_TIMEDOUT, command_flags(cmnd)) == 0) { + /* remove it from the scsi_cmnd queue */ + prior->host_scribble = cmnd->host_scribble; + if (session->scsi_cmnd_tail == cmnd) + session->scsi_cmnd_tail = prior; + atomic_dec(&session->num_cmnds); + cmnd->host_scribble = NULL; + + if (LOG_ENABLED(ISCSI_LOG_TIMEOUT)) + printk("iSCSI: session %p failing normal command %p cdb 0x%02x to (%u %u %u %u) at %lu, retries %d, allowed %d\n", + session, cmnd, cmnd->cmnd[0], cmnd->host->host_no, cmnd->channel, cmnd->target, cmnd->lun, + jiffies, cmnd->retries, cmnd->allowed); + + /* and arrange for it to be completed with a fatal error */ + add_cmnd(cmnd, &fatal_head, &fatal_tail); + } + else { + prior = cmnd; + } + } + + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + + /* if we have commands to fail back to the high-level driver with a fatal error, do so now */ + if (fatal_head) { + DECLARE_MIDLAYER_FLAGS; + + DEBUG_TIMEOUT("iSCSI: session %p completing timedout commands at %lu\n", session, jiffies); + + LOCK_MIDLAYER_LOCK(session->hba->host); + while ((cmnd = fatal_head)) { + fatal_head = (Scsi_Cmnd *)cmnd->host_scribble; + + cmnd->result = HOST_BYTE(DID_NO_CONNECT); + cmnd->resid = iscsi_expected_data_length(cmnd); + if (cmnd->allowed > 1) /* we've exhausted all retries */ + cmnd->retries = cmnd->allowed; + + set_not_ready(cmnd); /* fail the whole command now, rather than just 1 buffer head */ + + /* FIXME: if it's a disk write, take the device offline? + * We don't want the buffer cache data loss to occur silently, but + * offlining the device will break multipath drivers, and cause problems + * for future kernels that have the cache problem fixed. + */ + if (cmnd->scsi_done) { + del_command_timer(cmnd); /* must have already started running, but may not have finished yet */ + add_completion_timer(cmnd); + cmnd->scsi_done(cmnd); + } + } + UNLOCK_MIDLAYER_LOCK(session->hba->host); + } + + spin_unlock(&session->task_lock); + + return 0; +} + +static void recheck_busy_tasks(unsigned long arg) +{ + iscsi_session_t *session = (iscsi_session_t *)arg; + + session->busy_task_timer.expires = 0; + smp_mb(); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); +} + + +static int process_timedout_tasks(iscsi_session_t *session) +{ + iscsi_task_t *task, *next, *t; + Scsi_Cmnd *requeue_head = NULL, *requeue_tail = NULL, *defer_head = NULL, *defer_tail = NULL; + Scsi_Cmnd *cmnd = NULL, *prior = NULL; + int luns_checked, luns_recovering, tasks_recovering = 0; + int num_requeue_cmnds = 0, num_deferred_cmnds = 0; + int l, busy = 0; + DECLARE_NOQUEUE_FLAGS; + + spin_lock(&session->task_lock); + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + + if (LOG_ENABLED(ISCSI_LOG_EH)) + printk("iSCSI: session %p processing timedout tasks at %lu\n", session, jiffies); + + do { + if (signal_pending(current)) + break; /* the session drop will take care of everything */ + + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + break; /* the session termination will take care of everything */ + + /* calculate the state of each LUN based on the tasks, so that + * we know how to deal with the tasks and commands + * later on. + */ + + clear_bit(SESSION_TASK_TIMEDOUT, &session->control_bits); + /* we could use per-LUN data structures instead of bitmaps for these */ + memset(session->luns_checked, 0, sizeof(session->luns_checked)); + memset(session->luns_needing_recovery, 0, sizeof(session->luns_needing_recovery)); + memset(session->luns_delaying_recovery, 0, sizeof(session->luns_delaying_recovery)); + luns_checked = 0; + luns_recovering = 0; + tasks_recovering = 0; + + if (test_bit(SESSION_RESETTING, &session->control_bits)) { + DEBUG_EH("iSCSI: session %p resetting, task timeout processing checking all LUNs\n", session); + + memset(session->luns_checked, 0xFF, sizeof(session->luns_checked)); + luns_checked += ISCSI_MAX_LUN; + } + else { + /* record which LUNS currently are timing out, + * so that we know which ones we've checked for recovery. + */ + for (l=0; l < ISCSI_MAX_LUN; l++) { + if (test_bit(l, session->luns_timing_out)) { + DEBUG_EH("iSCSI: session %p task timeout processing checking LUN %u\n", session, l); + __set_bit(l, session->luns_checked); + luns_checked++; + } + } + } + + /* scan all outstanding tasks to determine which LUNs need error recovery, + * and whether recovery must be delayed. + */ + for (task = session->arrival_order.head; task; task = task->order_next) { + if (test_bit(task->lun, session->luns_checked)) { + if (TASK_NEEDS_RECOVERY(task)) { + /* we must do error recovery for this LUN */ + tasks_recovering++; + if (__test_and_set_bit(task->lun, session->luns_needing_recovery)) + luns_recovering++; + + DEBUG_EH("iSCSI: session %p itt %u task %p sc %p LUN %u needs error recovery\n", + session, task->itt, task, task->scsi_cmnd, task->lun); + } + + if (!test_bit(0, &task->timedout)) { + /* don't do error recovery for this LUN while outstanding tasks + * have not yet completed or timed out. + */ + __set_bit(task->lun, session->luns_delaying_recovery); + DEBUG_EH("iSCSI: session %p itt %u task %p sc %p has not timed out, delaying recovery for LUN %u\n", + session, task->itt, task, task->scsi_cmnd, task->lun); + } + else if (atomic_read(&task->refcount)) { + /* the task refcount may be non-zero if we're in + * the middle of sending or receiving data for + * this task. Make sure that we don't try to + * finish recovery and complete the task when it's + * in use. + */ + /* FIXME: we only want to delay finishing recovery for this LUN. + * we don't have to delay sending task mgmt PDUs for this task, + * though we currently do. + */ + __set_bit(task->lun, session->luns_needing_recovery); + __set_bit(task->lun, session->luns_delaying_recovery); + DEBUG_EH("iSCSI: session %p itt %u task %p sc %p has timed out but is busy, delaying recovery for LUN %u\n", + session, task->itt, task, task->scsi_cmnd, task->lun); + busy = 1; + } + else { + DEBUG_EH("iSCSI: session %p itt %u task %p sc %p has timed out\n", + session, task->itt, task, task->scsi_cmnd); + } + + /* Note: draft 16 - 9.5.1 says we MUST keep + * responding to valid target transfer tags, though we + * can terminate them early with the F-bit, and that + * the target must wait for all outstanding target + * transfer tags to complete before doing an abort + * task set. For simplicity's sake, we currently + * always continue responding to ttts, and send + * the actual data if we still have the command, + * or empty data PDUs if the command has already + * been completed out of the driver. + */ + } + } + + smp_mb(); + + } while (test_bit(SESSION_TASK_TIMEDOUT, &session->control_bits)); + + if (busy) { + /* either xmit_data invoked us with a task refcount held high, + * or the rx thread is in the middle of receiving data for + * a task. + */ + if ((session->busy_task_timer.expires == 0) && !test_bit(SESSION_TERMINATING, &session->control_bits)) { + session->busy_task_timer.expires = jiffies + MSECS_TO_JIFFIES(40); + session->busy_task_timer.data = (unsigned long)session; + session->busy_task_timer.function = recheck_busy_tasks; + DEBUG_EH("iSCSI: session %p scheduling busy task scan for %lu at %lu\n", + session, session->busy_task_timer.expires, jiffies); + del_timer_sync(&session->busy_task_timer); /* make sure it's not running now */ + add_timer(&session->busy_task_timer); + } + } + + if (test_bit(SESSION_RESETTING, &session->control_bits)) { + if (!test_bit(SESSION_RESET, &session->control_bits)) { + /* don't complete anything if a reset is in progress but has not yet occured */ + DEBUG_EH("iSCSI: session %p reset in progress at %lu, deferring recovery for all LUNs\n", session, jiffies); + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + /* we may need to escalate a timedout reset though */ + goto error_recovery; + } + else if (busy) { + /* reset has finished, but a task is busy, complete everything later */ + DEBUG_EH("iSCSI: session %p reset complete but tasks busy at %lu, deferring recovery for all LUNs\n", session, jiffies); + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + /* we may need to escalate a timedout reset though */ + goto error_recovery; + } + else { + /* go ahead and recovery everything */ + DEBUG_EH("iSCSI: session %p reset complete at %lu, recovering tasks for all LUNs\n", session, jiffies); + } + } + + /* if we've cleared a LUN's problems, we need to requeue tasks and commands to that LUN */ + + /* process the tasks */ + DEBUG_EH("iSCSI: session %p checking %d tasks for recovery at %lu\n", + session, atomic_read(&session->num_active_tasks), jiffies); + task = session->arrival_order.head; + while (task) { + next = task->order_next; + + if (test_bit(task->lun, session->luns_checked) && !test_bit(task->lun, session->luns_needing_recovery)) { + /* we're done with this task */ + if (remove_task(&session->tx_tasks, task->itt)) { + DEBUG_EH("iSCSI: session %p task %p data transmit cancelled to LUN %u\n", + session, task, task->lun); + task->ttt = RSVD_TASK_TAG; + } + remove_session_task(session, task); + del_task_timer(task); + + /* and this task's command */ + if ((cmnd = task->scsi_cmnd)) { + /* clear any Scsi_Cmnd fields that may have been modified */ + memset(cmnd->sense_buffer, 0, sizeof(cmnd->sense_buffer)); + cmnd->result = 0; + cmnd->resid = 0; + cmnd->host_scribble = NULL; + /* prepare to requeue it someplace appropriate */ + if (test_bit(task->lun, session->luns_delaying_commands)) { + /* we need to defer this task's command and any commands + * for this LUN in the retry queue (since the tasks should + * be retried first). + */ + if (LOG_ENABLED(ISCSI_LOG_EH)) + printk("iSCSI: session %p deferring itt %u task %p cmnd %p cdb 0x%02x to (%u %u %u %u) at %lu\n", + session, task->itt, task, cmnd, cmnd->cmnd[0], + session->host_no, session->channel, session->target_id, cmnd->lun, jiffies); + add_cmnd(cmnd, &defer_head, &defer_tail); + num_deferred_cmnds++; + } + else { + /* requeue all tasks and retry commands back to the scsi_cmd queue. + * There may be a command retry queued even when the LUN isn't failing + * delivery, in cases where a command completion arrived during error + * recovery and cleared the failing_delivery bit. + */ + if (LOG_ENABLED(ISCSI_LOG_EH)) + printk("iSCSI: session %p requeueing itt %u task %p cmnd %p cdb 0x%02x to (%u %u %u %u) at %lu\n", + session, task->itt, task, cmnd, cmnd->cmnd[0], + session->host_no, session->channel, session->target_id, cmnd->lun, jiffies); + add_cmnd(cmnd, &requeue_head, &requeue_tail); + num_requeue_cmnds++; + } + } + + /* the task has a refcount of zero and has already been + * removed from the session, so we can safely free it + * now. + */ + free_task(session, task); + } + + task = next; + } + + /* anything in the retry queue needs to get requeued along with the tasks, + * to avoid reordering commands. + */ + DEBUG_EH("iSCSI: session %p checking %d retry queue commands following task timeouts at %lu\n", + session, atomic_read(&session->num_retry_cmnds), jiffies); + prior = NULL; + while ((cmnd = session->retry_cmnd_head)) { + if (test_bit(cmnd->lun, session->luns_checked) && !test_bit(cmnd->lun, session->luns_needing_recovery)) { + /* remove it from the retry_cmnd queue */ + session->retry_cmnd_head = (Scsi_Cmnd *)cmnd->host_scribble; + if (session->retry_cmnd_head == NULL) + session->retry_cmnd_tail = NULL; + atomic_dec(&session->num_retry_cmnds); + cmnd->host_scribble = NULL; + + if (test_bit(task->lun, session->luns_delaying_commands)) { + if (LOG_ENABLED(ISCSI_LOG_EH)) + printk("iSCSI: session %p deferring retryable cmnd %p cdb 0x%02x to (%u %u %u %u) at %lu\n", + session, cmnd, cmnd->cmnd[0], + session->host_no, session->channel, session->target_id, cmnd->lun, jiffies); + add_cmnd(cmnd, &defer_head, &defer_tail); + num_deferred_cmnds++; + } + else { + if (LOG_ENABLED(ISCSI_LOG_EH)) + printk("iSCSI: session %p requeueing retryable cmnd %p cdb 0x%02x to (%u %u %u %u) at %lu\n", + session, cmnd, cmnd->cmnd[0], + session->host_no, session->channel, session->target_id, cmnd->lun, jiffies); + add_cmnd(cmnd, &requeue_head, &requeue_tail); + num_requeue_cmnds++; + } + } + else { + prior = cmnd; + break; + } + } + while (prior && (cmnd = (Scsi_Cmnd *)prior->host_scribble)) { + if (test_bit(cmnd->lun, session->luns_checked) && !test_bit(cmnd->lun, session->luns_needing_recovery)) { + /* remove it from the retry_cmnd queue */ + prior->host_scribble = cmnd->host_scribble; + if (session->retry_cmnd_tail == cmnd) + session->retry_cmnd_tail = prior; + atomic_dec(&session->num_retry_cmnds); + cmnd->host_scribble = NULL; + + if (test_bit(task->lun, session->luns_delaying_commands)) { + if (LOG_ENABLED(ISCSI_LOG_EH)) + printk("iSCSI: session %p deferring retryable cmnd %p cdb 0x%02x to (%u %u %u %u) at %lu\n", + session, cmnd, cmnd->cmnd[0], + session->host_no, session->channel, session->target_id, cmnd->lun, jiffies); + add_cmnd(cmnd, &defer_head, &defer_tail); + num_deferred_cmnds++; + } + else { + if (LOG_ENABLED(ISCSI_LOG_EH)) + printk("iSCSI: session %p requeueing retryable cmnd %p cdb 0x%02x to (%u %u %u %u) at %lu\n", + session, cmnd, cmnd->cmnd[0], + session->host_no, session->channel, session->target_id, cmnd->lun, jiffies); + add_cmnd(cmnd, &requeue_head, &requeue_tail); + num_requeue_cmnds++; + } + } + else { + prior = cmnd; + } + } + + /* scan the deferred queue, moving commands to the requeue list unless + * the LUN is currently delaying commands. + */ + DEBUG_EH("iSCSI: session %p checking %d deferred queue commands following task timeouts at %lu\n", + session, session->num_deferred_cmnds, jiffies); + prior = NULL; + while ((cmnd = session->deferred_cmnd_head)) { + if (test_bit(cmnd->lun, session->luns_checked) && + !test_bit(cmnd->lun, session->luns_needing_recovery) && + !test_bit(cmnd->lun, session->luns_delaying_commands)) + { + /* remove it from the deferred_cmnd queue */ + session->deferred_cmnd_head = (Scsi_Cmnd *)cmnd->host_scribble; + if (session->deferred_cmnd_head == NULL) + session->deferred_cmnd_tail = NULL; + session->num_deferred_cmnds--; + cmnd->host_scribble = NULL; + + /* and requeue it to be sent */ + if (LOG_ENABLED(ISCSI_LOG_EH)) + printk("iSCSI: session %p requeueing deferred cmnd %p cdb 0x%02x to (%u %u %u %u) at %lu\n", + session, cmnd, cmnd->cmnd[0], + session->host_no, session->channel, session->target_id, cmnd->lun, jiffies); + add_cmnd(cmnd, &requeue_head, &requeue_tail); + num_requeue_cmnds++; + } + else { + prior = cmnd; + break; + } + } + while (prior && (cmnd = (Scsi_Cmnd *)prior->host_scribble)) { + if (test_bit(cmnd->lun, session->luns_checked) && + !test_bit(cmnd->lun, session->luns_needing_recovery) && + !test_bit(cmnd->lun, session->luns_delaying_commands)) + { + /* remove it from the deferred_cmnd queue */ + prior->host_scribble = cmnd->host_scribble; + if (session->deferred_cmnd_tail == cmnd) + session->deferred_cmnd_tail = prior; + session->num_deferred_cmnds--; + cmnd->host_scribble = NULL; + + if (LOG_ENABLED(ISCSI_LOG_EH)) + printk("iSCSI: session %p requeueing deferred cmnd %p cdb 0x%02x to (%u %u %u %u) at %lu\n", + session, cmnd, cmnd->cmnd[0], + session->host_no, session->channel, session->target_id, cmnd->lun, jiffies); + add_cmnd(cmnd, &requeue_head, &requeue_tail); + num_requeue_cmnds++; + } + else { + prior = cmnd; + } + } + + if (requeue_head) { + /* requeue to the head of the scsi_cmnd queue */ + DEBUG_EH("iSCSI: session %p requeueing %d commands at %lu\n", session, num_requeue_cmnds, jiffies); + requeue_tail->host_scribble = (void *)session->scsi_cmnd_head; + session->scsi_cmnd_head = requeue_head; + if (session->scsi_cmnd_tail == NULL) + session->scsi_cmnd_tail = requeue_tail; + atomic_add(num_requeue_cmnds, &session->num_cmnds); + } + + if (defer_head) { + /* requeue to the head of the deferred_cmnd queue */ + DEBUG_EH("iSCSI: session %p deferring %d commands at %lu\n", session, num_deferred_cmnds, jiffies); + defer_tail->host_scribble = (void *)session->deferred_cmnd_head; + session->deferred_cmnd_head = defer_head; + if (session->deferred_cmnd_tail == NULL) + session->deferred_cmnd_tail = defer_tail; + session->num_deferred_cmnds += num_deferred_cmnds; + } + + /* we no longer need the scsi_cmnd lock */ + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + + /* clear bits and let I/O to these LUNs restart */ + if (test_and_clear_bit(SESSION_RESET, &session->control_bits)) { + printk("iSCSI: session %p (%u %u %u *) finished reset at %lu\n", + session, session->host_no, session->channel, session->target_id, jiffies); + for (l=0; l < ISCSI_MAX_LUN; l++) { + clear_bit(l, session->luns_doing_recovery); /* allow completion again */ + clear_bit(l, session->luns_timing_out); /* allow new tasks again */ + } + clear_bit(SESSION_RESETTING, &session->control_bits); + set_bit(TX_SCSI_COMMAND, &session->control_bits); + set_bit(TX_WAKE, &session->control_bits); + } + else { + for (l=0; l < ISCSI_MAX_LUN; l++) { + if (test_bit(l, session->luns_checked) && !test_bit(l, session->luns_needing_recovery)) { + printk("iSCSI: session %p (%u %u %u %u) finished error recovery at %lu\n", + session, session->host_no, session->channel, session->target_id, l, jiffies); + clear_bit(l, session->luns_doing_recovery); /* allow completion again */ + clear_bit(l, session->luns_timing_out); /* allow new tasks again */ + set_bit(TX_SCSI_COMMAND, &session->control_bits); + set_bit(TX_WAKE, &session->control_bits); + } + } + } + smp_mb(); + + error_recovery: + if (signal_pending(current)) { + DEBUG_EH("iSCSI: session %p signalled during timeout processing, skipping error recovery\n", session); + spin_unlock(&session->task_lock); + return 0; /* the session drop will take care of everything */ + } + + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + DEBUG_EH("iSCSI: session %p terminating, skipping error recovery\n", session); + spin_unlock(&session->task_lock); + return 0; /* the session termination will take care of everything */ + } + + if (test_and_clear_bit(SESSION_TASK_MGMT_TIMEDOUT, &session->control_bits) && + ((task = find_session_mgmt_task(session, session->mgmt_itt)))) + { + /* a timeout has occured, escalate the task's recovery method, and quit waiting for a response */ + if (__test_and_clear_bit(TASK_TRY_ABORT, &task->flags)) + __set_bit(TASK_TRY_ABORT_TASK_SET, &task->flags); + else if (__test_and_clear_bit(TASK_TRY_ABORT_TASK_SET, &task->flags)) + __set_bit(TASK_TRY_LUN_RESET, &task->flags); + else if (__test_and_clear_bit(TASK_TRY_LUN_RESET, &task->flags)) + __set_bit(TASK_TRY_WARM_RESET, &task->flags); + else if (__test_and_clear_bit(TASK_TRY_WARM_RESET, &task->flags)) + __set_bit(TASK_TRY_COLD_RESET, &task->flags); + else { + printk("iSCSI: session %p cold reset timed out, dropping session at %lu\n", session, jiffies); + spin_unlock(&session->task_lock); + iscsi_drop_session(session); + return 0; + } + + session->mgmt_itt = task->mgmt_itt = RSVD_TASK_TAG; + } + + /* if tasks need recovery and we don't have an oustanding task mgmt PDU, send one */ + if (tasks_recovering && (session->mgmt_itt == RSVD_TASK_TAG)) { + + DEBUG_EH("iSCSI: session %p doing error recovery at %lu, %d tasks need recovery\n", + session, jiffies, tasks_recovering); + + /* send a PDU for the oldest TIMEDOUT task needing recovery + * for a LUN that needs error recovery and isn't delaying it. + */ + for (task = session->arrival_order.head; task; task = task->order_next) { + DEBUG_EH("iSCSI: session %p error recovery checking itt %u task %p LUN %u flags 0x%04lx\n", + session, task->itt, task, task->lun, task->flags); + + if (TASK_NEEDS_RECOVERY(task) && + test_bit(task->lun, session->luns_needing_recovery) && + !test_bit(task->lun, session->luns_delaying_recovery)) + { + break; + } + } + + if (task) { + /* prevent any command completions once we start error + * recovery for a LUN. We want to hang on to all of the + * tasks, so that we can complete them in order once error + * recovery finishes. + */ + set_bit(task->lun, session->luns_doing_recovery); + + if (test_bit(TASK_TRY_ABORT, &task->flags)) { + session->mgmt_itt = task->mgmt_itt = allocate_itt(session); + if (session->abort_timeout) { + session->task_mgmt_response_deadline = jiffies + (session->abort_timeout * HZ); + if (session->task_mgmt_response_deadline == 0) + session->task_mgmt_response_deadline = 1; + } + atomic_inc(&task->refcount); + + if (task->scsi_cmnd) + printk("iSCSI: session %p sending mgmt %u abort for itt %u task %p cmnd %p cdb 0x%02x to (%u %u %u %u) at %lu\n", + session, task->mgmt_itt, task->itt, task, task->scsi_cmnd, task->scsi_cmnd->cmnd[0], + session->host_no, session->channel, session->target_id, task->lun, jiffies); + else + printk("iSCSI: session %p sending mgmt %u abort for itt %u task %p to (%u %u %u %u) at %lu\n", + session, task->mgmt_itt, task->itt, task, + session->host_no, session->channel, session->target_id, task->lun, jiffies); + + spin_unlock(&session->task_lock); + + iscsi_xmit_task_mgmt(session, ISCSI_TM_FUNC_ABORT_TASK, task, task->mgmt_itt); + atomic_dec(&task->refcount); + } + else if (test_bit(TASK_TRY_ABORT_TASK_SET, &task->flags)) { + session->mgmt_itt = task->mgmt_itt = allocate_itt(session); + if (session->abort_timeout) { + session->task_mgmt_response_deadline = jiffies + (session->abort_timeout * HZ); + if (session->task_mgmt_response_deadline == 0) + session->task_mgmt_response_deadline = 1; + } + atomic_inc(&task->refcount); + spin_unlock(&session->task_lock); + + printk("iSCSI: session %p sending mgmt %u abort task set to (%u %u %u %u) at %lu\n", + session, task->mgmt_itt, session->host_no, session->channel, session->target_id, task->lun, jiffies); + iscsi_xmit_task_mgmt(session, ISCSI_TM_FUNC_ABORT_TASK_SET, task, task->mgmt_itt); + atomic_dec(&task->refcount); + } + else if (test_bit(TASK_TRY_LUN_RESET, &task->flags)) { + session->mgmt_itt = task->mgmt_itt = allocate_itt(session); + if (session->reset_timeout) { + session->task_mgmt_response_deadline = jiffies + (session->reset_timeout * HZ); + if (session->task_mgmt_response_deadline == 0) + session->task_mgmt_response_deadline = 1; + } + atomic_inc(&task->refcount); + spin_unlock(&session->task_lock); + + printk("iSCSI: session %p sending mgmt %u LUN reset to (%u %u %u %u) at %lu\n", + session, task->mgmt_itt, session->host_no, session->channel, session->target_id, task->lun, jiffies); + iscsi_xmit_task_mgmt(session, ISCSI_TM_FUNC_LOGICAL_UNIT_RESET, task, task->mgmt_itt); + atomic_dec(&task->refcount); + } + else if (test_bit(TASK_TRY_WARM_RESET, &task->flags)) { + /* block any new tasks from starting and existing tasks from completing */ + set_bit(SESSION_RESETTING, &session->control_bits); + + for (t = session->arrival_order.head; t; t = t->order_next) { + DEBUG_EH("iSCSI: session %p warm target reset causing problems for LUN %u\n", session, task->lun); + set_bit(task->lun, session->luns_timing_out); + /* the task scans above assume that all tasks TIMEDOUT before error recovery + * could have killed the tasks. Make it look like all tasks have TIMEDOUT, + * so that the LUNs affected by the target reset can be recovered in the same + * way as usual. + */ + del_task_timer(t); + set_bit(0, &task->timedout); + /* the task mgmt response will set SESSION_TASK_TIMEDOUT and ensure these get processed later */ + } + + session->mgmt_itt = task->mgmt_itt = allocate_itt(session); + if (session->reset_timeout) { + session->task_mgmt_response_deadline = jiffies + (session->reset_timeout * HZ); + if (session->task_mgmt_response_deadline == 0) + session->task_mgmt_response_deadline = 1; + } + atomic_inc(&task->refcount); + spin_unlock(&session->task_lock); + + printk("iSCSI: session %p sending mgmt %u warm target reset to (%u %u %u *) at %lu\n", + session, task->mgmt_itt, session->host_no, session->channel, session->target_id, jiffies); + iscsi_xmit_task_mgmt(session, ISCSI_TM_FUNC_TARGET_WARM_RESET, task, task->mgmt_itt); + atomic_dec(&task->refcount); + } + else if (test_bit(TASK_TRY_COLD_RESET, &task->flags)) { + + /* block any new tasks from starting and existing tasks from completing */ + set_bit(SESSION_RESETTING, &session->control_bits); + + for (t = session->arrival_order.head; t; t = t->order_next) { + DEBUG_EH("iSCSI: session %p cold target reset causing problems for LUN %u\n", session, task->lun); + set_bit(task->lun, session->luns_timing_out); + /* the task scans above assume that all tasks TIMEDOUT before error recovery + * could have killed the tasks. Make it look like all tasks have TIMEDOUT, + * so that the LUNs affected by the target reset can be recovered in the same + * way as usual. + */ + del_task_timer(t); + set_bit(0, &task->timedout); + /* the task mgmt response will set SESSION_TASK_TIMEDOUT and ensure these get processed later */ + } + + /* tell all devices attached to this target that a reset occured + * we do this now, since a cold reset should cause the target to drop + * the session, and we probably won't get a task mgmt response for + * a cold reset. + * FIXME: better to do this when the session actually drops? + */ + target_reset_occured(session); + + /* this is our last resort, so force a 10 second deadline */ + session->task_mgmt_response_deadline = jiffies + (10 * HZ); + if (session->task_mgmt_response_deadline == 0) + session->task_mgmt_response_deadline = 1; + atomic_inc(&task->refcount); + spin_unlock(&session->task_lock); + + printk("iSCSI: session %p sending mgmt %u cold target reset to (%u %u %u *) at %lu\n", + session, task->mgmt_itt, session->host_no, session->channel, session->target_id, jiffies); + iscsi_xmit_task_mgmt(session, ISCSI_TM_FUNC_TARGET_COLD_RESET, task, task->mgmt_itt); + atomic_dec(&task->refcount); + } + } + else { + spin_unlock(&session->task_lock); + DEBUG_EH("iSCSI: session %p couldn't find a task ready for error recovery at %lu\n", session, jiffies); + } + } + else { + /* Either don't need or can't do any recovery right now. */ + spin_unlock(&session->task_lock); + } + + DEBUG_EH("iSCSI: session %p finished processing timedout commands at %lu\n", session, jiffies); + return 0; +} + + +static inline void *kmap_sg(struct scatterlist *sg); +static inline void *sg_virtual_address(struct scatterlist *sg); +static inline void kunmap_sg(struct scatterlist *sg); + +static int iscsi_xmit_task(iscsi_task_t *task) +{ + struct msghdr msg; + struct iovec iov[(ISCSI_MAX_SG+1+1)]; + struct IscsiScsiCmdHdr stsch; + int rc, wlen; + int remain; + iscsi_session_t *session = task->session; + Scsi_Cmnd *sc = task->scsi_cmnd; + unsigned int segment_offset = 0, index = 0; + uint32_t data_offset = 0; + int xfrlen = 0; + struct scatterlist *sglist = NULL, *sg, *first_sg = NULL, *last_sg = NULL; + int iovn = 0, first_data_iovn = 0; + int bytes_to_fill,bytes_from_segment; + int pad_bytes = 0; + char padding[4]; + uint32_t header_crc32c, data_crc32c; + +#if PREVENT_DATA_CORRUPTION + int last_data_iovn = 0; +#endif + + if (!task) { + printk("iSCSI: xmit_task NULL\n"); + return 0; + } + + if (!sc) { + printk("iSCSI: xmit_task %p, cmnd NULL\n", task); + return 0; + } + + DEBUG_FLOW("iSCSI: xmit_task %p, itt %u to (%u %u %u %u), cdb 0x%x, cmd_len %u, bufflen %u\n", + task, task->itt, sc->host->host_no, sc->channel, sc->target,sc->lun, sc->cmnd[0], + sc->cmd_len, sc->request_bufflen); + + wlen = sizeof(stsch); + memset( &stsch, 0, sizeof(stsch) ); + + if (test_bit(TASK_READ, &task->flags)) { + /* read */ + stsch.flags |= ISCSI_FLAG_CMD_READ; + stsch.data_length = htonl(iscsi_expected_data_length(sc)); + } + if (test_bit(TASK_WRITE, &task->flags)) { + /* write */ + stsch.flags |= ISCSI_FLAG_CMD_WRITE; + stsch.data_length = htonl(iscsi_expected_data_length(sc)); + } + + /* tagged command queueing */ + stsch.flags |= (iscsi_command_attr(sc) & ISCSI_FLAG_CMD_ATTR_MASK); + + /* FIXME: if it's an untagged command, and we've already sent + * an untagged command to the LUN, don't send a 2nd untagged command. + * Leave it queued up and send it after the other command completes. + * We also don't want to block commands for other LUNs. Basically, + * we need a per-LUN command queue. For now, deal with it by + * setting the Scsi_Device queue_depth to 1 without TCQ. We can + * reduce latency by keeping multiple commands per LUN queued to + * the HBA, but only sending one. That takes a more code though. + */ + + stsch.opcode = ISCSI_OP_SCSI_CMD; + stsch.itt = htonl(task->itt); + task->cmdsn = session->CmdSn; + stsch.cmdsn = htonl(session->CmdSn); + stsch.expstatsn = htonl(session->ExpStatSn); + + /* set the final bit when there are no unsolicited Data-out PDUs following the command PDU */ + if (!test_bit(TASK_INITIAL_R2T, &task->flags)) + stsch.flags |= ISCSI_FLAG_FINAL; + /* FIXME: why does clearing the flags crash the kernel? */ + + /* single level LUN format puts LUN in byte 1, 0 everywhere else */ + stsch.lun[1] = sc->lun; + + memcpy(stsch.scb, sc->cmnd, MIN(sizeof(stsch.scb), sc->cmd_len)); + + ISCSI_TRACE(ISCSI_TRACE_TxCmd, sc, task, session->CmdSn, ntohl(stsch.data_length)); + + /* FIXME: Sending ImmediateData along with the cmd PDU */ + + /* PDU header */ + iov[0].iov_base = &stsch; + iov[0].iov_len = sizeof(stsch); + iovn = 1; + wlen = sizeof(stsch); + + /* HeaderDigests */ + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) { + iov[1].iov_base = &header_crc32c; + iov[1].iov_len = sizeof(header_crc32c); + iovn = 2; + wlen += sizeof(header_crc32c); + } + + /* For ImmediateData, we need to compute the DataDigest also + */ + if (session->ImmediateData && (sc->sc_data_direction == SCSI_DATA_WRITE)) { + /* make sure we have data to send when we expect to */ + if (sc && (iscsi_expected_data_length(sc) == 0) && ((sc->request_bufflen == 0) || (sc->request_buffer == NULL))) { + printk("iSCSI: xmit_task for itt %u, task %p, sc %p, expected %u, no data in buffer\n" + " request_buffer %p len %u, buffer %p len %u\n", + task->itt, task, sc, iscsi_expected_data_length(sc), + sc->request_buffer, sc->request_bufflen, sc->buffer, sc->bufflen); + print_cmnd(sc); + return 0; + } + remain = 0; + /* Find the segment and offset within the segment to start writing from. */ + if (sc && sc->use_sg) { + sg = sglist = (struct scatterlist *)sc->request_buffer; + segment_offset = data_offset; + for (index = 0; index < sc->use_sg; index++) { + if (segment_offset < sglist[index].length) + break; + else + segment_offset -= sglist[index].length; + } + if (index >= sc->use_sg) { + /* didn't find the offset, command will eventually timeout */ + printk("iSCSI: session %p xmit_data for itt %u couldn't find offset %u in sglist %p, sc %p, bufflen %u, use_sg %u\n", + session, task->itt, data_offset, sglist, sc, sc->request_bufflen, sc->use_sg); + print_cmnd(sc); + ISCSI_TRACE(ISCSI_TRACE_OutOfData, sc, task, index, sc->use_sg); + return 0; + } + } + + first_data_iovn = iovn; + if (session->FirstBurstLength) + bytes_to_fill = MIN(session->FirstBurstLength, session->MaxXmitDataSegmentLength); + else + bytes_to_fill = session->MaxXmitDataSegmentLength; + bytes_to_fill = MIN(bytes_to_fill, sc->request_bufflen); + + /* check if we need to pad the PDU */ + + if (bytes_to_fill % PAD_WORD_LEN) { + pad_bytes = PAD_WORD_LEN - (bytes_to_fill % PAD_WORD_LEN); + memset(padding, 0x0, sizeof(padding)); + } else { + pad_bytes = 0; + } + + if (sc) { + /* find all the PDU data */ + if (sc->use_sg) { + /* while there is more data and we want to send more data */ + while (bytes_to_fill > 0) { + if (index >= sc->use_sg) { + printk("iSCSI: session %p xmit_data index %d exceeds sc->use_sg %d, bytes_to_fill %d, out of buffers\n", + session, index, sc->use_sg, bytes_to_fill); + /* the command will eventually timeout */ + print_cmnd(sc); + ISCSI_TRACE(ISCSI_TRACE_OutOfData, sc, task, index, sc->use_sg); + goto done; + } + sg = &sglist[index]; + /* make sure the segment is mapped */ + if (!kmap_sg(sg)) { + printk("iSCSI: session %p xmit_data couldn't map segment %p\n", session, sg); + goto done; + } + else if (first_sg == NULL) { + first_sg = sg; + } + last_sg = sg; + /* sanity check the sglist segment length */ + if (sg->length <= segment_offset) { + /* the sglist is corrupt */ + printk("iSCSI: session %p xmit_data index %d, length %u too small for offset %u, bytes_to_fill %d, sglist has been corrupted\n", + session, index, sg->length, segment_offset, bytes_to_fill); + /* the command will eventually timeout */ + print_cmnd(sc); + ISCSI_TRACE(ISCSI_TRACE_BadTxSeg, sc, task, sg->length, segment_offset); + goto done; + } + bytes_from_segment = sg->length - segment_offset; + if ( bytes_from_segment > bytes_to_fill ) { + /* only need part of this segment */ + iov[iovn].iov_base = sg->address + segment_offset; + iov[iovn].iov_len = bytes_to_fill; + xfrlen += bytes_to_fill; + DEBUG_FLOW("iSCSI: session %p xmit_data xfrlen %d, to_fill %d, from_segment %d, iov[%2d] = partial sg[%2d]\n", + session, xfrlen, bytes_to_fill, bytes_from_segment, iovn, index); + iovn++; + segment_offset += bytes_to_fill; + break; + } + else { + /* need all of this segment, and possibly more from the next */ + iov[iovn].iov_base = sg_virtual_address(sg) + segment_offset; + iov[iovn].iov_len = bytes_from_segment; + xfrlen += bytes_from_segment; + DEBUG_FLOW("iSCSI: session %p xmit_data xfrlen %d, to_fill %d, from_segment %d, iov[%2d] = sg[%2d]\n", + session, xfrlen, bytes_to_fill, bytes_from_s +egment, iovn, index); + bytes_to_fill -= bytes_from_segment; + iovn++; + /* any remaining data starts at offset 0 of the next segment */ + index++; + segment_offset = 0; + } + } + } + else { + /* no scatter-gather */ + if ((sc->request_buffer + data_offset + bytes_to_fill) <= (sc->request_buffer + sc->request_bufflen)) { + /* send all the data */ + iov[iovn].iov_base = sc->request_buffer + data_offset; + iov[iovn].iov_len = xfrlen = bytes_to_fill; + iovn++; + } + else if ((sc->request_buffer + data_offset) < (sc->request_buffer + sc->request_bufflen)) { + /* send some data, but can't send all requested */ + xfrlen = sc->request_bufflen - data_offset; + printk("iSCSI: xmit_data ran out of data, buffer %p len %u but offset %d length %d, sending final %d bytes\n", + sc->request_buffer, sc->request_bufflen, data_offset,bytes_to_fill, xfrlen); + iov[iovn].iov_base = sc->request_buffer + data_offset; + iov[iovn].iov_len = xfrlen; + iovn++; + stsch.flags = ISCSI_FLAG_FINAL; + remain = xfrlen; + } + else { + /* can't send any data */ + printk("iSCSI: xmit_data ran out of data, buffer %p len %u but offset %d length %d, sending no more data\n", + sc->request_buffer, sc->request_bufflen, data_offset,bytes_to_fill); + goto done; + } + } +#if PREVENT_DATA_CORRUPTION + last_data_iovn = iovn; +#endif + if (pad_bytes) { + iov[iovn].iov_base = padding; + iov[iovn].iov_len = pad_bytes; + iovn++; + wlen += pad_bytes; + } + } + + /* put the data length in the PDU header */ + hton24(stsch.dlength, xfrlen); + stsch.data_length = htonl(sc->request_bufflen); + wlen += xfrlen; + } + + /* header complete, we can finally calculate the HeaderDigest */ + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) { + header_crc32c = iscsi_crc32c(&stsch, sizeof(stsch)); + /* FIXME: this may not be SMP safe, but it's only for testing anyway, so it probably doesn't need to be */ + if (session->fake_write_header_mismatch > 0) { + session->fake_write_header_mismatch--; + smp_mb(); + printk("iSCSI: session %p faking HeaderDigest mismatch for itt %u, task %p\n", + session, task->itt, task); + header_crc32c = 0x01020304; + } + } + /* DataDigest */ + if (xfrlen && (session->DataDigest == ISCSI_DIGEST_CRC32C)) { + int i; + + data_crc32c = iscsi_crc32c(iov[first_data_iovn].iov_base, iov[first_data_iovn].iov_len); + for (i = first_data_iovn + 1; i < iovn; i++) { + data_crc32c = iscsi_crc32c_continued(iov[i].iov_base, iov[i].iov_len, data_crc32c); + } + + /* FIXME: this may not be SMP safe, but it's only for testing anyway, so it probably doesn't need to be */ + if (session->fake_write_data_mismatch > 0) { + session->fake_write_data_mismatch--; + smp_mb(); + printk("iSCSI: session %p faking DataDigest mismatch for itt %u, task %p\n", + session, task->itt, task); + data_crc32c = 0x01020304; + } + iov[iovn].iov_base = &data_crc32c; + iov[iovn].iov_len = sizeof(data_crc32c); + iovn++; + wlen += sizeof(data_crc32c); + } + + if (xfrlen && (session->DataDigest == ISCSI_DIGEST_CRC32C)) { +#if PREVENT_DATA_CORRUPTION + struct iovec data_iov; + int i; + + /* send header */ + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = &iov[0]; + msg.msg_iovlen = 1; + wlen = iov[0].iov_len; + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes, rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } + + /* send header digest */ + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) { + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = &iov[1]; + msg.msg_iovlen = 1; + wlen = iov[1].iov_len; + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes, rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } + } + + /* send data */ + for (i = first_data_iovn; i < last_data_iovn; i++) { + if (iov[i].iov_len > session->xmit_buffer_size) { + int j = 0; + + if (session->xmit_data_buffer) + kfree(session->xmit_data_buffer); + + session->xmit_buffer_size = iov[i].iov_len; + + do { + session->xmit_data_buffer = (unsigned char *) kmalloc(sizeof(char) * session->xmit_buffer_size, GFP_ATOMIC); + j++; + } while ((!session->xmit_data_buffer) && (j < 3)); + + if (!session->xmit_data_buffer) { + printk("iSCSI: session %p xmit_data failed, because of kmalloc failure\n", session); + session->xmit_buffer_size = 0; + iscsi_drop_session(session); + goto done; + } + } + + memset(session->xmit_data_buffer, 0, session->xmit_buffer_size); + memset( &msg, 0, sizeof(msg) ); + + memcpy(session->xmit_data_buffer, iov[i].iov_base, iov[i].iov_len); + data_iov.iov_base = session->xmit_data_buffer; + data_iov.iov_len = iov[i].iov_len; + msg.msg_iov = &data_iov; + msg.msg_iovlen = 1; + wlen = iov[i].iov_len; + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes, rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } + } + + /* send pad bytes, if any */ + memset( &msg, 0, sizeof(msg) ); + if (pad_bytes) { + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = &iov[last_data_iovn]; + msg.msg_iovlen = 1; + wlen = pad_bytes; + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes,rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } + } + + /* send data digest */ + if (xfrlen && (session->DataDigest == ISCSI_DIGEST_CRC32C)) { + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = &iov[iovn - 1]; + msg.msg_iovlen = 1; + wlen = iov[iovn - 1].iov_len; + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes, rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } + } +#else + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = &iov[0]; + msg.msg_iovlen = iovn; + + ISCSI_TRACE(ISCSI_TRACE_TxDataPDU, sc, task, data_offset, xfrlen); + + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes, rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } +#endif + } else { + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = &iov[0]; + msg.msg_iovlen = iovn; + + ISCSI_TRACE(ISCSI_TRACE_TxDataPDU, sc, task, data_offset, xfrlen); + + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes, rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } + } + session->CmdSn++; + + return 1; + +done: + if (first_sg) { + /* undo any temporary mappings */ + for (sg = first_sg; sg <= last_sg; sg++) { + kunmap_sg(sg); + } + } + return 0; +} + + +static int fake_task_completion(iscsi_session_t *session, iscsi_task_t *task) +{ + struct IscsiScsiRspHdr stsrh; + Scsi_Cmnd *sc = task->scsi_cmnd; + unsigned char sense_buffer[32]; + int senselen = 0; + uint32_t itt = task->itt; + + /* For testing, fake a completion with various status + * codes when requested, without ever sending the task or + * any data to the target, so that data corruption + * problems will occur if the retry isn't handled + * correctly. + */ + + memset(&stsrh, 0, sizeof(stsrh)); + stsrh.itt = htonl(itt); + + if (session->fake_status_unreachable) { + session->fake_status_unreachable--; + stsrh.response = 0x82; + printk("iSCSI: session %p faking iSCSI response 0x82 for itt %u task %p command %p to LUN %u at %lu\n", + session, task->itt, task, sc, task->lun, jiffies); + } + else if (session->fake_status_busy) { + session->fake_status_busy--; + stsrh.cmd_status = STATUS_BUSY; + printk("iSCSI: session %p faking SCSI status BUSY for itt %u task %p command %p to LUN %u at %lu\n", + session, task->itt, task, sc, task->lun, jiffies); + } + else if (session->fake_status_queue_full) { + session->fake_status_queue_full--; + stsrh.cmd_status = STATUS_QUEUE_FULL; + printk("iSCSI: session %p faking SCSI status QUEUE_FULL for itt %u task %p command %p to LUN %u at %lu\n", + session, task->itt, task, sc, task->lun, jiffies); + } + else if (session->fake_status_aborted) { + session->fake_status_aborted--; + stsrh.cmd_status = STATUS_CHECK_CONDITION; + stsrh.residual_count = htonl(iscsi_expected_data_length(sc)); + stsrh.flags |= ISCSI_FLAG_CMD_UNDERFLOW; + sense_buffer[0] = 0x70; + sense_buffer[2] = ABORTED_COMMAND; + senselen = 8; + printk("iSCSI: session %p faking SCSI status CHECK_CONDITION key ABORTED_COMMAND for itt %u task %p command %p to LUN %u at %lu\n", + session, task->itt, task, sc, task->lun, jiffies); + } + else { + /* nothing left to fake */ + session->fake_status_lun = -2; + return 0; + } + + /* determine command result based on the iSCSI response, status, and sense */ + process_task_response(session, task, &stsrh, sense_buffer, senselen); + + /* try to complete the command */ + complete_task(session, itt); + /* Note: we lose the task_lock by calling complete_task */ + + return 1; +} + + +static void iscsi_xmit_queued_cmnds(iscsi_session_t *session) +{ + Scsi_Cmnd *sc; + iscsi_task_t *task = NULL; + DECLARE_NOQUEUE_FLAGS; + uint32_t imm_data_length=0; + + if (!session) { + printk("iSCSI: can't xmit queued commands, no session\n"); + return; + } + + for (;;) { + + if (signal_pending(current)) { + DEBUG_QUEUE("iSCSI: session %p can't start tasks now, signal pending\n", session); + break; + } + + if ((atomic_read(&session->num_cmnds) == 0) && (atomic_read(&session->num_retry_cmnds) == 0)) { + DEBUG_QUEUE("iSCSI: no SCSI cmnds queued for session %p to %s\n", session, session->log_name); + break; + } + + if (!sna_lte(session->CmdSn, session->MaxCmdSn)) { + DEBUG_QUEUE("iSCSI: session %p can't start %u tasks now, ExpCmdSN %u, CmdSn %u, MaxCmdSN %u\n", + session, atomic_read(&session->num_cmnds), + session->ExpCmdSn, session->CmdSn, session->MaxCmdSn); + if (test_bit(SESSION_WINDOW_CLOSED, &session->control_bits) == 0) { + /* window is open, but not large enough for us to send everything we have queued. + * record how many times we hit this situation, to see how often we're getting throttled. + */ + session->window_full++; + smp_mb(); + } + break; + } + + if (test_bit(SESSION_RESETTING, &session->control_bits)) { + DEBUG_EH("iSCSI: session %p resetting, can't start tasks at %lu\n", + session, jiffies); + break; + } + + DEBUG_QUEUE("iSCSI: session %p xmit_queued_cmnds, CmdSN %u, MaxCmdSN %u\n", + session, session->CmdSn, session->MaxCmdSn); + + spin_lock(&session->task_lock); + + if (task == NULL) { + /* allocate a task */ + task = alloc_task(session); + if (task == NULL) { + printk("iSCSI: session %p to (%u %u %u *) couldn't allocate task at %lu\n", + session, session->host_no, session->channel, session->target_id, jiffies); + spin_unlock(&session->task_lock); + /* to prevent a stall of the driver, free_task must wakeup + * the tx thread later. + */ + return; + } + } + + /* Don't start any new tasks if a Logout has been requested. */ + if (test_bit(SESSION_LOGOUT_REQUESTED, &session->control_bits)) { + spin_unlock(&session->task_lock); + DEBUG_QUEUE("iSCSI: session %p logout requested, can't start tasks now\n", session); + break; + } + + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + + if ((sc = session->retry_cmnd_head)) { + /* remove the command from the retry_cmnd queue */ + session->retry_cmnd_head = (Scsi_Cmnd *)sc->host_scribble; + sc->host_scribble = NULL; + if (session->retry_cmnd_head == NULL) + session->retry_cmnd_tail = NULL; + + /* FIXME: we could stop using an atomic counter, if we're willing to + * acquire the session's scsi_cmnd_lock every time the TX_SCSI_COMMAND + * bit is set. For now, we use atomic counters so that we can skip + * the lock acquisition if there are no commands queued. + */ + atomic_dec(&session->num_retry_cmnds); + + /* commands in the retry queue are sent even when the LUN is delaying commands, + * since this is how we detect that they no longer need to be delayed. + */ + + /* if error recovery has started or will start, don't start any new tasks */ + if (test_bit(sc->lun, session->luns_timing_out)) { + /* defer the command until later */ + DEBUG_EH("iSCSI: session %p deferring command %p retry to (%u %u %u %u) at %lu\n", + session, sc, sc->host->host_no, sc->channel, sc->target, sc->lun, jiffies); + + /* these go back on the head of the deferred queue, not the tail, + * to preserve ordering of commands to each LUN. + */ + sc->host_scribble = (void *)session->deferred_cmnd_head; + if (session->deferred_cmnd_head == NULL) + session->deferred_cmnd_tail = sc; + session->deferred_cmnd_head = sc; + session->num_deferred_cmnds++; + + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + spin_unlock(&session->task_lock); + + /* there may be commands for other LUNs that we can send */ + continue; + } + } + else if ((sc = session->scsi_cmnd_head)) { + /* remove the command from the scsi_cmnd queue */ + session->scsi_cmnd_head = (Scsi_Cmnd *)sc->host_scribble; + sc->host_scribble = NULL; + if (session->scsi_cmnd_head == NULL) + session->scsi_cmnd_tail = NULL; + + /* FIXME: we could stop using an atomic counter, if we're willing to + * acquire the session's scsi_cmnd_lock every time the TX_SCSI_COMMAND + * bit is set. For now, we use atomic counters so that we can skip + * the lock acquisition if there are no commands queued. + */ + atomic_dec(&session->num_cmnds); + + /* FIXME: should we check delaying_commands first, or timing_out first? Does it matter? */ + if (test_bit(sc->lun, session->luns_delaying_commands)) { + /* defer the command until later */ + DEBUG_RETRY("iSCSI: session %p deferring command %p to (%u %u %u %u) at %lu\n", + session, sc, sc->host->host_no, sc->channel, sc->target, sc->lun, jiffies); + + /* append it to the tail of the deferred queue */ + add_cmnd(sc, &session->deferred_cmnd_head, &session->deferred_cmnd_tail); + session->num_deferred_cmnds++; + + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + spin_unlock(&session->task_lock); + + /* there may be commands for other LUNs that we can send */ + continue; + } + + if (test_bit(sc->lun, session->luns_timing_out)) { + /* defer the command until later */ + DEBUG_EH("iSCSI: session %p deferring command %p to (%u %u %u %u) at %lu\n", + session, sc, sc->host->host_no, sc->channel, sc->target, sc->lun, jiffies); + + /* append it to the tail of the deferred queue */ + add_cmnd(sc, &session->deferred_cmnd_head, &session->deferred_cmnd_tail); + session->num_deferred_cmnds++; + + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + spin_unlock(&session->task_lock); + + /* there may be commands for other LUNs that we can send */ + continue; + } + } + else { + /* this should never happen if the command counts are accurate */ + printk("iSCSI: bug - no SCSI cmnds queued at %lu for session %p, num_cmnds %u, head %p, tail %p, num_retry %u, head %p, tail %p\n", + jiffies, session, + atomic_read(&session->num_cmnds), session->scsi_cmnd_head, session->scsi_cmnd_tail, + atomic_read(&session->num_retry_cmnds), session->retry_cmnd_head, session->retry_cmnd_tail); + + atomic_set(&session->num_cmnds, 0); + atomic_set(&session->num_retry_cmnds, 0); + + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + spin_unlock(&session->task_lock); + break; + } + + /* prepare to start a new task */ + __set_bit(TASK_TRY_ABORT, &task->flags); + task->lun = sc->lun; + task->scsi_cmnd = sc; + iscsi_set_direction(task); + add_session_task(session, task); + + DEBUG_QUEUE("iSCSI: cmnd %p became task %p itt %u at %lu for session %p, num_cmnds %u, head %p, tail %p\n", + sc, task, task->itt, jiffies, session, atomic_read(&session->num_cmnds), + session->scsi_cmnd_head, session->scsi_cmnd_tail); + + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + + if ((session->fake_status_lun >= -1) && + ((session->fake_status_lun == -1) || (session->fake_status_lun == sc->lun))) + { + if (fake_task_completion(session, task)) { + /* the task has been completed, and we've lost the task_lock */ + task = NULL; + continue; + } + else { + /* nothing left to fake, still have the task_lock */ + session->fake_status_lun = -2; + } + } + + /* start a timer, queue up any unsolicited data, and send the task */ + add_task_timer(task, iscsi_task_times_out); + atomic_inc(&task->refcount); + smp_mb(); + + /* possibly queue up unsolicited data PDUs. + * With ImmediateData, we may or may not have to send + * additional Data PDUs, depending on the amount of data, and + * the Max PDU Length. For now, we never use immediate data. + */ + if (test_bit(TASK_WRITE, &task->flags) && sc->request_buffer && sc->request_bufflen && iscsi_expected_data_length(sc)) { + + /* queue up unsolicited data PDUs. the implied initial R2T doesn't count + * against the MaxOutstandingR2T, so we can't use the normal R2T fields of + * the task for the implied initial R2T. Use a special flag for the implied + * initial R2T, and let the rx thread update tasks in the tx_tasks collection + * if an R2T comes in before the implied initial R2T has been processed. + */ + if (session->ImmediateData) { + imm_data_length = session->MaxXmitDataSegmentLength; + imm_data_length = MIN(imm_data_length, sc->request_bufflen); + if (sc->request_bufflen > imm_data_length) { + if (!session->InitialR2T) { + __set_bit(TASK_INITIAL_R2T, &task->flags); + /* queue up an implied R2T data transfer for later */ + add_task(&session->tx_tasks, task); + set_bit(TX_DATA, &session->control_bits); + set_bit(TX_WAKE, &session->control_bits); + } + } + } else { + if (!session->InitialR2T) { + __set_bit(TASK_INITIAL_R2T, &task->flags); + /* queue up an implied R2T data transfer for later */ + add_task(&session->tx_tasks, task); + set_bit(TX_DATA, &session->control_bits); + set_bit(TX_WAKE, &session->control_bits); + } + } + } + + + spin_unlock(&session->task_lock); + + DEBUG_FLOW("iSCSI: sending itt %u on session %p as CmdSN %u, MaxCmdSn %u\n", + task->itt, session, session->CmdSn, session->MaxCmdSn); + + /* we don't bother to check if the xmit works, since if it + * fails, the session will drop, and all tasks and cmnds + * will be completed by the drop. + */ + iscsi_xmit_task(task); + +#if INVALID_ORDERING_ASSUMPTIONS + /* some broken targets choke if a command PDU is followed by anything other than the data + * for that command, but still advertise a CmdSN window of more than 1 command. For such + * broken targets, we stop the loop after the first write, to try and give them the data + * they want. The target may still choke if it doesn't get us an R2T before we send the + * next command. Nothing we can do about that, other than log bugs against the broken + * targets. + */ + if (test_bit(TASK_WRITE, &task->flags)) { + atomic_dec(&task->refcount); + set_bit(TX_SCSI_COMMAND, &session->control_bits); + set_bit(TX_WAKE, &session->control_bits); + return; + } +#endif + + atomic_dec(&task->refcount); + DEBUG_FLOW("iSCSI: after sending itt %u, task %p now has refcount %d\n", task->itt, task, atomic_read(&task->refcount)); + task = NULL; + } + + /* we still have a task we never used. free it before returning */ + if (task) { + spin_lock(&session->task_lock); + free_task(session, task); + spin_unlock(&session->task_lock); + } +} + + +static inline void *sg_virtual_address(struct scatterlist *sg) +{ +#if (HAS_SCATTERLIST_PAGE && HAS_SCATTERLIST_ADDRESS) + /* page may or may not be mapped */ + if (sg->address) { + return sg->address; + } + else if (sg->page) { + return page_address(sg->page) + sg->offset; + } + return NULL; + +#elif HAS_SCATTERLIST_PAGE + /* should have already mapped the page */ + if (sg->page) { + return page_address(sg->page) + sg->offset; + } + return NULL; +#else + return sg->address; +#endif + +} + +static inline void *kmap_sg(struct scatterlist *sg) +{ +#if (HAS_SCATTERLIST_PAGE && HAS_SCATTERLIST_ADDRESS) + /* page may or may not be mapped if HIGHMEM is in use */ + if (sg->address) { + DEBUG_FLOW("iSCSI: kmap sg %p to address %p\n", sg, sg->address); + return sg->address; + } + else if (sg->page) { + void *addr = kmap(sg->page); + DEBUG_FLOW("iSCSI: kmap sg %p page %p to addr %p\n", sg, sg->page, addr); + return addr; + } + return NULL; + +#elif HAS_SCATTERLIST_PAGE + /* there is no address, must kmap the page */ + if (sg->page) { + return kmap(sg->page); + } + return NULL; + +#else + /* just use the address */ + DEBUG_FLOW("iSCSI: kmap sg %p to address %p\n", sg, sg->address); + return sg->address; +#endif +} + +static inline void kunmap_sg(struct scatterlist *sg) +{ +#if (HAS_SCATTERLIST_PAGE && HAS_SCATTERLIST_ADDRESS) + if (!sg->address && sg->page) + kunmap(sg->page); +#elif HAS_SCATTERLIST_PAGE + if (sg->page) + kunmap(sg->page); +#endif + return; +} + + +static void iscsi_xmit_data(iscsi_task_t *task, uint32_t ttt, uint32_t data_offset, uint32_t data_length) +{ + struct msghdr msg; + struct IscsiDataHdr stdh; + Scsi_Cmnd *sc = NULL; + iscsi_session_t *session = task->session; + struct scatterlist *sglist = NULL, *sg, *first_sg = NULL, *last_sg = NULL; + int wlen, rc, iovn = 0, first_data_iovn = 0; + unsigned int segment_offset = 0, index = 0; + int remain, xfrlen; + uint32_t data_sn = 0; + int bytes_to_fill, bytes_from_segment; + char padding[4]; + int pad_bytes; + uint32_t header_crc32c; + uint32_t data_crc32c; +#if PREVENT_DATA_CORRUPTION + int last_data_iovn = 0; +#endif + + sc = task->scsi_cmnd; + /* make sure we have data to send when we expect to */ + if (sc && (iscsi_expected_data_length(sc) == 0) && ((sc->request_bufflen == 0) || (sc->request_buffer == NULL))) { + printk("iSCSI: xmit_data for itt %u, task %p, sc %p, dlength %u, expected %u, no data in buffer\n" + " request_buffer %p len %u, buffer %p len %u\n", + task->itt, task, sc, data_length, iscsi_expected_data_length(sc), + sc->request_buffer, sc->request_bufflen, sc->buffer, sc->bufflen); + print_cmnd(sc); + return; + } + + remain = data_length; + if (sc == NULL) + remain = 0; + + memset( &stdh, 0, sizeof(stdh) ); + stdh.opcode = ISCSI_OP_SCSI_DATA; + stdh.itt = htonl(task->itt); + stdh.ttt = ttt; + stdh.offset = htonl(data_offset); + + /* PDU header */ + session->tx_iov[0].iov_base = &stdh; + session->tx_iov[0].iov_len = sizeof(stdh); + + DEBUG_FLOW("iSCSI: xmit_data for itt %u, task %p, credit %d @ %u\n" + " request_buffer %p len %u, buffer %p len %u\n", + task->itt, task, remain, data_offset, + sc->request_buffer, sc->request_bufflen, sc->buffer, sc->bufflen); + + /* Find the segment and offset within the segment to start writing from. */ + if (sc && sc->use_sg) { + sg = sglist = (struct scatterlist *)sc->request_buffer; + + segment_offset = data_offset; + + for (index = 0; index < sc->use_sg; index++) { + if (segment_offset < sglist[index].length) + break; + else + segment_offset -= sglist[index].length; + } + + if (index >= sc->use_sg) { + /* didn't find the offset, command will eventually timeout */ + printk("iSCSI: session %p xmit_data for itt %u couldn't find offset %u in sglist %p, sc %p, bufflen %u, use_sg %u\n", + session, task->itt, data_offset, sglist, sc, sc->request_bufflen, sc->use_sg); + print_cmnd(sc); + ISCSI_TRACE(ISCSI_TRACE_OutOfData, sc, task, index, sc->use_sg); + return; + } + } + + ISCSI_TRACE(ISCSI_TRACE_TxData, sc, task, data_offset, data_length); + + do { + if (signal_pending(current)) + break; + +#if (INVALID_ORDERING_ASSUMPTIONS == 0) + /* since this loop may take a while, check for TIMEDOUT tasks and commands */ + /* Note: this means a task may have a non-zero refcount during timeout processing */ + if (test_bit(SESSION_TASK_TIMEDOUT, &session->control_bits)) { + process_timedout_tasks(session); + } + if (test_bit(SESSION_COMMAND_TIMEDOUT, &session->control_bits)) { + process_timedout_commands(session); + } + + /* also queue up command retries */ + if (test_and_clear_bit(SESSION_RETRY_COMMANDS, &session->control_bits)) { + /* try to queue up delayed commands for retries */ + iscsi_retry_commands(session); + } + + /* if command PDUs are small (no immediate data), + * start commands as soon as possible, so that we can + * overlap the R2T latency with the time it takes to + * send data for commands already issued. This increases + * throughput without significantly increasing the completion + * time of commands already issued. Some broken targets + * such as the one by Intel Labs will choke if they receive + * another command before they get all of the data for preceding + * commands, so this can be conditionally compiled out. + */ + if (!session->ImmediateData) { + DEBUG_FLOW("iSCSI: checking for new commands before sending data to %s\n", + session->log_name); + iscsi_xmit_queued_cmnds(session); + } +#endif + + iovn = 1; + wlen = sizeof(stdh); + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) { + /* we'll need to send a digest, but can't compute it yet */ + session->tx_iov[1].iov_base = &header_crc32c; + session->tx_iov[1].iov_len = sizeof(header_crc32c); + iovn = 2; + wlen += sizeof(header_crc32c); + } + + first_data_iovn = iovn; + + stdh.datasn = htonl(data_sn++); + stdh.offset = htonl(data_offset); + stdh.expstatsn = htonl(session->ExpStatSn); + + if (session->MaxXmitDataSegmentLength && (remain > session->MaxXmitDataSegmentLength)) { + /* enforce the target's data segment limit */ + bytes_to_fill = session->MaxXmitDataSegmentLength; + } + else { + /* final PDU of a data burst */ + bytes_to_fill = remain; + stdh.flags = ISCSI_FLAG_FINAL; + } + + /* check if we need to pad the PDU */ + if (bytes_to_fill % PAD_WORD_LEN) { + pad_bytes = PAD_WORD_LEN - (bytes_to_fill % PAD_WORD_LEN); + memset(padding, 0x0, sizeof(padding)); + } + else { + pad_bytes = 0; + } + + DEBUG_FLOW("iSCSI: remain %d, bytes_to_fill %d, sc->use_sg %u, MaxRecvDataSegmentLength %d\n", + remain, bytes_to_fill, sc->use_sg, session->MaxRecvDataSegmentLength); + + xfrlen = 0; + + if (sc) { + /* find all the PDU data */ + if (sc->use_sg) { + /* while there is more data and we want to send more data */ + while (bytes_to_fill > 0) { + + if (index >= sc->use_sg) { + printk("iSCSI: session %p xmit_data index %d exceeds sc->use_sg %d, bytes_to_fill %d, out of buffers\n", + session, index, sc->use_sg, bytes_to_fill); + /* the command will eventually timeout */ + print_cmnd(sc); + ISCSI_TRACE(ISCSI_TRACE_OutOfData, sc, task, index, sc->use_sg); + goto done; + } + if (signal_pending(current)) { + DEBUG_FLOW("iSCSI: session %p signal pending, returning from xmit_data\n", session); + goto done; + } + + sg = &sglist[index]; + + /* make sure the segment is mapped */ + if (!kmap_sg(sg)) { + printk("iSCSI: session %p xmit_data couldn't map segment %p\n", session, sg); + goto done; + } + else if (first_sg == NULL) { + first_sg = sg; + } + last_sg = sg; + + /* sanity check the sglist segment length */ + if (sg->length <= segment_offset) { + /* the sglist is corrupt */ + printk("iSCSI: session %p xmit_data index %d, length %u too small for offset %u, bytes_to_fill %d, sglist has been corrupted\n", + session, index, sg->length, segment_offset, bytes_to_fill); + /* the command will eventually timeout */ + print_cmnd(sc); + ISCSI_TRACE(ISCSI_TRACE_BadTxSeg, sc, task, sg->length, segment_offset); + goto done; + } + + bytes_from_segment = sg->length - segment_offset; + if ( bytes_from_segment > bytes_to_fill ) { + /* only need part of this segment */ + session->tx_iov[iovn].iov_base = sg_virtual_address(sg) + segment_offset; + session->tx_iov[iovn].iov_len = bytes_to_fill; + xfrlen += bytes_to_fill; + DEBUG_FLOW("iSCSI: session %p xmit_data xfrlen %d, to_fill %d, from_segment %d, iov[%2d] = partial sg[%2d]\n", + session, xfrlen, bytes_to_fill, bytes_from_segment, iovn, index); + iovn++; + segment_offset += bytes_to_fill; + break; + } + else { + /* need all of this segment, and possibly more from the next */ + session->tx_iov[iovn].iov_base = sg_virtual_address(sg) + segment_offset; + session->tx_iov[iovn].iov_len = bytes_from_segment; + xfrlen += bytes_from_segment; + DEBUG_FLOW("iSCSI: session %p xmit_data xfrlen %d, to_fill %d, from_segment %d, iov[%2d] = sg[%2d]\n", + session, xfrlen, bytes_to_fill, bytes_from_segment, iovn, index); + bytes_to_fill -= bytes_from_segment; + iovn++; + /* any remaining data starts at offset 0 of the next segment */ + index++; + segment_offset = 0; + } + } + + if (xfrlen <= 0) { + printk("iSCSI: session %p xmit_data picked xfrlen of 0, sc->use_sg %d, bytes_to_fill %d\n", + session, sc->use_sg, bytes_to_fill); + iscsi_drop_session(session); + goto done; + } + } + else { + /* no scatter-gather */ + if ((sc->request_buffer + data_offset + bytes_to_fill) <= (sc->request_buffer + sc->request_bufflen)) { + /* send all the data */ + session->tx_iov[iovn].iov_base = sc->request_buffer + data_offset; + session->tx_iov[iovn].iov_len = xfrlen = bytes_to_fill; + iovn++; + } + else if ((sc->request_buffer + data_offset) < (sc->request_buffer + sc->request_bufflen)) { + /* send some data, but can't send all requested */ + xfrlen = sc->request_bufflen - data_offset; + printk("iSCSI: xmit_data ran out of data, buffer %p len %u but offset %d length %d, sending final %d bytes\n", + sc->request_buffer, sc->request_bufflen, data_offset, bytes_to_fill, xfrlen); + session->tx_iov[iovn].iov_base = sc->request_buffer + data_offset; + session->tx_iov[iovn].iov_len = xfrlen; + iovn++; + stdh.flags = ISCSI_FLAG_FINAL; + remain = xfrlen; + } + else { + /* can't send any data */ + printk("iSCSI: xmit_data ran out of data, buffer %p len %u but offset %d length %d, sending no more data\n", + sc->request_buffer, sc->request_bufflen, data_offset, bytes_to_fill); + goto done; + } + } +#if PREVENT_DATA_CORRUPTION + last_data_iovn = iovn; +#endif + if (pad_bytes) { + session->tx_iov[iovn].iov_base = padding; + session->tx_iov[iovn].iov_len = pad_bytes; + iovn++; + wlen += pad_bytes; + } + } + + /* put the data length in the PDU header */ + hton24(stdh.dlength, xfrlen); + wlen += xfrlen; + + /* header complete, we can finally calculate the HeaderDigest */ + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) + header_crc32c = iscsi_crc32c(&stdh, sizeof(stdh)); + + /* DataDigest */ + if (xfrlen && (session->DataDigest == ISCSI_DIGEST_CRC32C)) { + int i; + + data_crc32c = iscsi_crc32c(session->tx_iov[first_data_iovn].iov_base, session->tx_iov[first_data_iovn].iov_len); + for (i = first_data_iovn + 1; i < iovn; i++) { + data_crc32c = iscsi_crc32c_continued(session->tx_iov[i].iov_base, session->tx_iov[i].iov_len, data_crc32c); + } + + /* FIXME: this may not be SMP safe, but it's only for testing anyway, so it probably doesn't need to be */ + if (session->fake_write_data_mismatch > 0) { + session->fake_write_data_mismatch--; + smp_mb(); + printk("iSCSI: session %p faking DataDigest mismatch for itt %u, task %p\n", + session, task->itt, task); + data_crc32c = 0x01020304; + } + + session->tx_iov[iovn].iov_base = &data_crc32c; + session->tx_iov[iovn].iov_len = sizeof(data_crc32c); + iovn++; + wlen += sizeof(data_crc32c); + } + + if (xfrlen && (session->DataDigest == ISCSI_DIGEST_CRC32C)) { +#if PREVENT_DATA_CORRUPTION + struct iovec data_iov; + int i; + + /* send header */ + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = &session->tx_iov[0]; + msg.msg_iovlen = 1; + wlen = session->tx_iov[0].iov_len; + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes, rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } + + /* send header digest */ + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) { + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = &session->tx_iov[1]; + msg.msg_iovlen = 1; + wlen = session->tx_iov[1].iov_len; + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes, rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } + } + + /* send data */ + for (i = first_data_iovn; i < last_data_iovn; i++) { + if (session->tx_iov[i].iov_len > session->xmit_buffer_size) { + int j = 0; + + if (session->xmit_data_buffer) + kfree(session->xmit_data_buffer); + + session->xmit_buffer_size = session->tx_iov[i].iov_len; + + do { + session->xmit_data_buffer = (unsigned char *) kmalloc(sizeof(char) * session->xmit_buffer_size, GFP_ATOMIC); + j++; + } while ((!session->xmit_data_buffer) && (j < 3)); + + if (!session->xmit_data_buffer) { + printk("iSCSI: session %p xmit_data failed, because of kmalloc failure\n", session); + session->xmit_buffer_size = 0; + iscsi_drop_session(session); + goto done; + } + } + + memset(session->xmit_data_buffer, 0, session->xmit_buffer_size); + memset( &msg, 0, sizeof(msg) ); + + memcpy(session->xmit_data_buffer, session->tx_iov[i].iov_base, session->tx_iov[i].iov_len); + data_iov.iov_base = session->xmit_data_buffer; + data_iov.iov_len = session->tx_iov[i].iov_len; + msg.msg_iov = &data_iov; + msg.msg_iovlen = 1; + wlen = session->tx_iov[i].iov_len; + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes, rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } + } + + /* send pad bytes, if any */ + memset( &msg, 0, sizeof(msg) ); + if (pad_bytes) { + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = &session->tx_iov[last_data_iovn]; + msg.msg_iovlen = 1; + wlen = pad_bytes; + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes, rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } + } + + /* send data digest */ + if (xfrlen && (session->DataDigest == ISCSI_DIGEST_CRC32C)) { + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = &session->tx_iov[iovn - 1]; + msg.msg_iovlen = 1; + wlen = session->tx_iov[iovn - 1].iov_len; + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes, rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } + } +#else + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = &session->tx_iov[0]; + msg.msg_iovlen = iovn; + + ISCSI_TRACE(ISCSI_TRACE_TxDataPDU, sc, task, data_offset, xfrlen); + + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes, rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } +#endif + } else { + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = &session->tx_iov[0]; + msg.msg_iovlen = iovn; + + ISCSI_TRACE(ISCSI_TRACE_TxDataPDU, sc, task, data_offset, xfrlen); + + rc = iscsi_sendmsg( session, &msg, wlen ); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_data failed to send %d bytes, rc %d\n", session, wlen, rc); + iscsi_drop_session(session); + goto done; + } + } + + remain -= xfrlen; + + DEBUG_FLOW("iSCSI: xmit_data sent %d @ %u for itt %u, remaining %d, final %d\n", + xfrlen, data_offset, task->itt, remain, stdh.flags & ISCSI_FLAG_FINAL); + + data_offset += xfrlen; + + if (first_sg) { + /* undo any temporary mappings */ + for (sg = first_sg; sg <= last_sg; sg++) { + kunmap_sg(sg); + } + first_sg = last_sg = NULL; + } + + } while (remain); + + done: + if (first_sg) { + /* undo any temporary mappings */ + for (sg = first_sg; sg <= last_sg; sg++) { + kunmap_sg(sg); + } + } +} + +static void iscsi_xmit_r2t_data(iscsi_session_t *session) +{ + iscsi_task_t *task; + uint32_t itt; + uint32_t ttt; + uint32_t offset; + uint32_t length; + int initial_r2t = 0; + uint32_t implied_length = 0; + uint32_t imm_data_length = 0; + + spin_lock(&session->task_lock); + while ((task = pop_task(&session->tx_tasks))) { + itt = task->itt; + + if ((initial_r2t = __test_and_clear_bit(TASK_INITIAL_R2T, &task->flags))) { + if (session->FirstBurstLength) + implied_length = MIN(session->FirstBurstLength, iscsi_expected_data_length(task->scsi_cmnd)); + else + implied_length = iscsi_expected_data_length(task->scsi_cmnd); /* FirstBurstLength 0 means no limit */ + + /* For ImmediateData, we'll have to subtract it off as well */ + if (session->ImmediateData) { + imm_data_length = session->MaxXmitDataSegmentLength; + imm_data_length = MIN(imm_data_length, session->FirstBurstLength); + imm_data_length = MIN(imm_data_length, iscsi_expected_data_length(task->scsi_cmnd)); + implied_length -= imm_data_length; + } + + if (implied_length == 0) + printk("iSCSI: session %p sending empty Data PDU for implied R2T of itt %u, task %p, cmnd NULL at %lu\n", + session, task->itt, task, jiffies); + } + + /* save the values that get set when we receive an R2T from the target, + * so that we can receive another one while we're sending data. + */ + ttt = task->ttt; + offset = task->data_offset; + length = task->data_length; + task->ttt = RSVD_TASK_TAG; + if (task->scsi_cmnd == NULL) { + printk("iSCSI: session %p sending empty Data PDU for R2T (%u @ %u), itt %u, ttt %u, task %p, cmnd NULL at %lu\n", + session, offset, length, task->itt, ntohl(ttt), task, jiffies); + length = 0; + } + + atomic_inc(&task->refcount); + spin_unlock(&session->task_lock); + + /* implied initial R2T */ + if (initial_r2t) { + DEBUG_FLOW("iSCSI: session %p sending implied initial R2T data (%u @ 0) for itt %u, task %p to %s\n", + session, implied_length, itt, task, session->log_name); + + /* we now send an empty PDU if the implied length is zero, + * to handle cases where a task's command is removed and + * completed while the task is still queued to have data + * sent. We could trigger error recovery at this point, + * or send an ABORT_TASK to try to quiet error message on + * the target about 0 length data PDUs. If we end up + * trying ABORT_TASK_SET, we're required to continue + * responding to all outstanding ttts, though we can send + * empty Data PDUs with the F-bit set (like we do here). + */ + iscsi_xmit_data(task, RSVD_TASK_TAG, imm_data_length, implied_length); + } + + if (signal_pending(current)) { + atomic_dec(&task->refcount); + return; + } + + /* normal R2T from the target */ + if (ttt != RSVD_TASK_TAG) { + DEBUG_FLOW("iSCSI: session %p sending R2T data (%u @ %u) for itt %u, ttt %u, task %p to %s\n", + session, length, offset, itt, ntohl(ttt), task, session->log_name); + + iscsi_xmit_data(task, ttt, offset, length); + } + + atomic_dec(&task->refcount); + + if (signal_pending(current)) + return; + + /* relock before checking loop condition */ + spin_lock(&session->task_lock); + } + spin_unlock(&session->task_lock); +} + + +/* send a reply to a nop that requested one */ +static void iscsi_xmit_nop_reply(iscsi_session_t *session, iscsi_nop_info_t *nop_info) +{ + struct IscsiNopOutHdr stnoh; + struct msghdr msg; + struct iovec iov[5]; + int rc; + int pad[4]; + uint32_t header_crc32c, data_crc32c; + int length, iovn, first_data_iovn, i; + + memset( &stnoh, 0, sizeof(stnoh) ); + stnoh.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; + stnoh.itt = RSVD_TASK_TAG; + stnoh.ttt = nop_info->ttt; + stnoh.flags = ISCSI_FLAG_FINAL; + memcpy(stnoh.lun, nop_info->lun, sizeof(stnoh.lun)); + hton24(stnoh.dlength, nop_info->dlength); + stnoh.cmdsn = htonl(session->CmdSn); /* don't increment after immediate cmds */ + stnoh.expstatsn = htonl(session->ExpStatSn); + + /* PDU header */ + iov[0].iov_base = &stnoh; + iov[0].iov_len = sizeof(stnoh); + length = sizeof(stnoh); + iovn = 1; + + /* HeaderDigest */ + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) { + iov[iovn].iov_base = &header_crc32c; + iov[iovn].iov_len = sizeof(header_crc32c); + iovn++; + length += sizeof(header_crc32c); + } + + first_data_iovn = iovn; + + if (nop_info->dlength) { + /* data */ + iov[iovn].iov_base = nop_info->data; + iov[iovn].iov_len = nop_info->dlength; + length += nop_info->dlength; + iovn++; + + /* pad */ + if (nop_info->dlength % PAD_WORD_LEN) { + memset(pad, 0, sizeof(pad)); + iov[iovn].iov_base = pad; + iov[iovn].iov_len = PAD_WORD_LEN - (nop_info->dlength % PAD_WORD_LEN); + length += iov[iovn].iov_len; + iovn++; + } + + /* DataDigest */ + if (session->DataDigest == ISCSI_DIGEST_CRC32C) { + data_crc32c = iscsi_crc32c(iov[first_data_iovn].iov_base, iov[first_data_iovn].iov_len); + + for (i = first_data_iovn + 1; i < iovn; i++) { + data_crc32c = iscsi_crc32c_continued(iov[i].iov_base, iov[i].iov_len, data_crc32c); + } + + iov[iovn].iov_base = &data_crc32c; + iov[iovn].iov_len = sizeof(data_crc32c); + length += sizeof(data_crc32c); + iovn++; + } + } + + /* HeaderDigest */ + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) + header_crc32c = iscsi_crc32c(&stnoh, sizeof(stnoh)); + + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = iov; + msg.msg_iovlen = iovn; + + rc = iscsi_sendmsg( session, &msg, length); + if ( rc != length ) { + printk("iSCSI: xmit_nop %d failed, rc %d\n", length, rc); + iscsi_drop_session(session); + } + + ISCSI_TRACE( ISCSI_TRACE_TxNopReply, NULL, NULL, nop_info->ttt, nop_info->dlength); +} + +/* send replies for NopIns that requested them */ +static void iscsi_xmit_nop_replys(iscsi_session_t *session) +{ + iscsi_nop_info_t *nop_info; + + /* these aren't really tasks, but it's not worth having a separate lock for them */ + spin_lock(&session->task_lock); + + /* space for one data-less reply is preallocated in the session itself */ + if (session->nop_reply.ttt != RSVD_TASK_TAG) { + spin_unlock(&session->task_lock); + + iscsi_xmit_nop_reply(session, &session->nop_reply); + session->nop_reply.ttt = RSVD_TASK_TAG; + + spin_lock(&session->task_lock); + } + + /* if we get multiple reply requests, or they have data, they'll get queued up */ + while ((nop_info = session->nop_reply_head)) { + session->nop_reply_head = nop_info->next; + if (!session->nop_reply_head) + session->nop_reply_tail = NULL; + spin_unlock(&session->task_lock); + + iscsi_xmit_nop_reply(session, nop_info); + kfree(nop_info); + DEBUG_ALLOC("iSCSI: kfree nop_info %p after sending nop reply\n", nop_info); + + if (signal_pending(current)) + return; + + /* relock before checking loop condition */ + spin_lock(&session->task_lock); + } + spin_unlock(&session->task_lock); +} + + +static void iscsi_xmit_logout(iscsi_session_t *session, uint32_t itt, int reason) +{ + struct IscsiLogoutHdr stlh; + struct msghdr msg; + struct iovec iov[2]; + uint32_t crc32c; + int rc, wlen; + + memset(&stlh, 0, sizeof(stlh)); + stlh.opcode = ISCSI_OP_LOGOUT_CMD | ISCSI_OP_IMMEDIATE; + stlh.flags = ISCSI_FLAG_FINAL | (reason & ISCSI_FLAG_LOGOUT_REASON_MASK); + stlh.itt = htonl(itt); + stlh.cmdsn = htonl(session->CmdSn); + stlh.expstatsn = htonl(session->ExpStatSn); + + memset(iov, 0, sizeof(iov)); + iov[0].iov_base = &stlh; + iov[0].iov_len = sizeof(stlh); + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = iov; + msg.msg_iovlen = 1; + wlen = sizeof(stlh); + + /* HeaderDigests */ + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) { + crc32c = iscsi_crc32c(&stlh, sizeof(stlh)); + iov[msg.msg_iovlen].iov_base = &crc32c; + iov[msg.msg_iovlen].iov_len = sizeof(crc32c); + msg.msg_iovlen++; + wlen += sizeof(crc32c); + } + + rc = iscsi_sendmsg(session, &msg, wlen); + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_logout error, rc %d, wlen %d\n", session, rc, wlen); + iscsi_drop_session(session); + } +} + +static void iscsi_xmit_ping(iscsi_session_t *session, uint32_t itt, unsigned char *data, int length) +{ + struct IscsiNopOutHdr stph; + struct msghdr msg; + struct iovec iov[5]; + unsigned char pad[4]; + uint32_t header_crc32c, data_crc32c; + int rc, wlen, iovn = 0, first_data_iovn, i; + + memset(&stph, 0, sizeof(stph)); + stph.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; + stph.flags = ISCSI_FLAG_FINAL; + stph.itt = htonl(itt); /* reply request */ + stph.ttt = RSVD_TASK_TAG; + stph.cmdsn = htonl(session->CmdSn); + stph.expstatsn = htonl(session->ExpStatSn); + + memset(iov, 0, sizeof(iov)); + iov[0].iov_base = &stph; + iov[0].iov_len = sizeof(stph); + iovn = 1; + wlen = sizeof(stph); + + /* HeaderDigests */ + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) { + iov[iovn].iov_base = &header_crc32c; + iov[iovn].iov_len = sizeof(header_crc32c); + iovn++; + wlen += sizeof(header_crc32c); + } + + first_data_iovn = iovn; + + if (data && length) { + hton24(stph.dlength, length); + + /* add the data */ + iov[iovn].iov_base = data; + iov[iovn].iov_len = length; + iovn++; + wlen += length; + + /* may need to pad as well */ + if (length % PAD_WORD_LEN) { + memset(pad, 0, sizeof(pad)); + iov[iovn].iov_base = pad; + iov[iovn].iov_len = PAD_WORD_LEN - (length % PAD_WORD_LEN); + wlen += iov[iovn].iov_len; + iovn++; + } + + /* DataDigest */ + if (session->DataDigest == ISCSI_DIGEST_CRC32C) { + data_crc32c = iscsi_crc32c(iov[first_data_iovn].iov_base, iov[first_data_iovn].iov_len); + + for (i = first_data_iovn + 1; i < iovn; i++) { + data_crc32c = iscsi_crc32c_continued(iov[i].iov_base, iov[i].iov_len, data_crc32c); + } + + iov[iovn].iov_base = &data_crc32c; + iov[iovn].iov_len = sizeof(data_crc32c); + wlen += sizeof(data_crc32c); + iovn++; + } + + DEBUG_FLOW("iSCSI: session %p tx Nop/data itt %u, lengths %d, %d, %d\n", + session, itt, iov[0].iov_len, iov[1].iov_len, iov[2].iov_len); + } + else { + DEBUG_FLOW("iSCSI: session %p tx Nop/data itt %u, lengths %d, %d, %d\n", + session, itt, iov[0].iov_len, iov[1].iov_len, iov[2].iov_len); + } + + /* can't calculate the HeaderDigest until after we've filled in the dlength */ + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) + header_crc32c = iscsi_crc32c(&stph, sizeof(stph)); + + ISCSI_TRACE(ISCSI_TRACE_TxPing, NULL, NULL, itt, length); + + memset( &msg, 0, sizeof(msg) ); + msg.msg_iov = iov; + msg.msg_iovlen = iovn; + + rc = iscsi_sendmsg( session, &msg, wlen); + + if ( rc != wlen ) { + printk("iSCSI: session %p xmit_ping error, rc %d, wlen %d\n", session, rc, wlen); + iscsi_drop_session(session); + } +} + +/* called by the /proc code, so we can block */ +static void iscsi_ping_test_session(iscsi_session_t *session, int total_data_length) +{ + unsigned char *rx_buffer; + unsigned char *tx_buffer; + + /* assume that we can run the test, and allocate the memory we'll need. + * draft 8 only allows 4K of ping data per Nop. + */ + rx_buffer = kmalloc(4096, GFP_ATOMIC); + tx_buffer = kmalloc(4096, GFP_ATOMIC); + + if (rx_buffer && tx_buffer) { + unsigned char *data; + unsigned int value = 0; + + /* put a simple pattern in the data */ + for (data = tx_buffer; data < tx_buffer + 4096; data++) { + *data = value & 0xFF; + value++; + } + + spin_lock(&session->task_lock); + if (session->ping_test_start == 0) { + /* start a ping test */ + session->ping_test_start = jiffies; + session->ping_test_data_length = total_data_length; + session->ping_test_rx_length = total_data_length; + session->ping_test_rx_start = 0; + session->ping_test_tx_buffer = tx_buffer; + + smp_mb(); + wake_tx_thread(TX_PING_DATA, session); + printk("iSCSI: session %p starting Nop data test with total length %u at %lu\n", session, total_data_length, jiffies); + } + else { + printk("iSCSI: session %p can't start Nop data test, test started at %lu still in progress at %lu\n", + session, session->ping_test_start, jiffies); + } + spin_unlock(&session->task_lock); + + /* the tx and rx thread will free the buffers when they're done with them, + * so that we can just return. + */ + } + else { + printk("iSCSI: session %p can't start Nop data test, couldn't allocate buffers at %lu\n", session, jiffies); + if (rx_buffer) + kfree(rx_buffer); + if (tx_buffer) + kfree(tx_buffer); + } +} + +/* the writer thread */ +static int iscsi_tx_thread( void *vtaskp ) +{ + iscsi_session_t *session; + + if ( ! vtaskp ) { + printk("iSCSI: tx thread task parameter NULL\n"); + return 0; + } + + session = (iscsi_session_t *)vtaskp; + /* whoever created the thread already incremented the session's refcount for us */ + + DEBUG_INIT("iSCSI: tx thread %d for session %p about to daemonize on cpu%d\n", + current->pid, session, smp_processor_id()); + + /* become a daemon kernel thread, and abandon any user space resources */ + sprintf(current->comm,"iscsi-tx"); + iscsi_daemonize(); + session->tx_pid = current->pid; + current->flags |= PF_MEMALLOC; + smp_mb(); + + /* check to see if iscsi_terminate_session was called before we + * started running, since we can't get a signal from it until + * until we set session->tx_pid. + */ + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + goto ThreadExit; + + /* Block all signals except SIGHUP and SIGKILL */ + LOCK_SIGNALS(); + siginitsetinv(¤t->blocked, sigmask(SIGKILL) | sigmask(SIGHUP)); + RECALC_PENDING_SIGNALS; + UNLOCK_SIGNALS(); + + DEBUG_INIT("iSCSI: tx thread %d for session %p starting on cpu%d\n", current->pid, session, smp_processor_id()); + + while (!test_bit(SESSION_TERMINATING, &session->control_bits)) { + wait_queue_t waitq; + int timedout = 0; + + DEBUG_INIT("iSCSI: tx thread %d for session %p waiting for new session to be established at %lu\n", + current->pid, session, jiffies); + + /* add ourselves to the login wait q, so that the rx thread can wake us up */ + init_waitqueue_entry(&waitq, current); + add_wait_queue(&session->login_wait_q, &waitq); + smp_mb(); + + for (;;) { + int replacement_timeout; + unsigned long now; + long sleep_jiffies = 0; + + /* tell the rx thread that we're blocked, and that it can + * safely call iscsi_sendmsg now as part of the Login + * phase, since we're guaranteed not to be doing any IO + * until the session is up. + */ + set_current_state(TASK_INTERRUPTIBLE); + set_bit(TX_THREAD_BLOCKED, &session->control_bits); + smp_mb(); + wake_up(&session->tx_blocked_wait_q); + + /* if the session is up, our wait is over */ + if (test_bit(SESSION_ESTABLISHED, &session->control_bits)) + break; + + now = jiffies; + replacement_timeout = session->replacement_timeout; + + /* check for a session replacement timeout */ + if (!timedout && replacement_timeout && session->session_drop_time && + time_before_eq(session->session_drop_time + (replacement_timeout * HZ), now)) + { + Scsi_Cmnd *sc; + DECLARE_NOQUEUE_FLAGS; + DECLARE_MIDLAYER_FLAGS; + + printk("iSCSI: session %p replacement timed after %d seconds, drop %lu, now %lu, failing all commands\n", + session, replacement_timeout, session->session_drop_time, jiffies); + + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + LOCK_MIDLAYER_LOCK(session->hba->host); + + /* make sure any future attempts to queue a command fail immediately */ + set_bit(SESSION_REPLACEMENT_TIMEDOUT, &session->control_bits); + + /* don't need to do this again, since we just put a barrier blocking any more commands from being queued */ + timedout = 1; + + /* we're failing all commands, so any outstanding command timeouts are also handled */ + clear_bit(SESSION_COMMAND_TIMEDOUT, &session->control_bits); + + /* complete all commands currently in the driver. + * Note: this assumes that the completion callback will not call iscsi_queuecommand, + * since we're holding the scsi_cmnd_lock, and would deadlock with ourselves + * if queuecommand was called. + */ + while ((sc = session->scsi_cmnd_head)) { + session->scsi_cmnd_head = (Scsi_Cmnd *)sc->host_scribble; + + atomic_dec(&session->num_cmnds); + sc->result = HOST_BYTE(DID_NO_CONNECT); + sc->resid = iscsi_expected_data_length(sc); + + set_lun_comm_failure(sc); + + /* FIXME: if this is the last retry of a disk + * write, log a warning about possible data loss + * from the buffer cache? + */ + + if (sc->scsi_done) { + add_completion_timer(sc); + DEBUG_EH("iSCSI: session %p replacement timeout completing %p at %lu\n", session, sc, jiffies); + sc->scsi_done(sc); + } + } + + UNLOCK_MIDLAYER_LOCK(session->hba->host); + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + } + + /* process any command timeouts */ + if (test_bit(SESSION_COMMAND_TIMEDOUT, &session->control_bits)) { + DEBUG_INIT("iSCSI: session %p processing command timeouts while not established at %lu\n", + session, jiffies); + process_timedout_commands(session); + } + + /* wait for either: + * the rx thread to tell us the session is up + * the session replacement timeout to expire + * a command timeout to expire for the last time + */ + if (!timedout && replacement_timeout && session->session_drop_time) { + unsigned long timeout = 0; + + /* calculate how long til the replacement timer expires */ + now = jiffies; + if (session->session_drop_time) + timeout = session->session_drop_time + (HZ * replacement_timeout); + else + timeout = now + (HZ * replacement_timeout); + + /* handle wrap-around */ + if (now <= timeout) + sleep_jiffies = timeout - now; + else + sleep_jiffies = ULONG_MAX - now + timeout; + + DEBUG_INIT("iSCSI: session %p tx thread %d blocking at %lu, timeout at %lu\n", + session, current->pid, jiffies, timeout); + schedule_timeout(sleep_jiffies); + } + else { + DEBUG_INIT("iSCSI: session %p tx thread %d blocking at %lu, timedout %d, replacement %d, drop time %lu\n", + session, current->pid, jiffies, timedout, replacement_timeout, session->session_drop_time); + schedule(); + } + + if (iscsi_handle_signals(session)) { + DEBUG_INIT("iSCSI: session %p tx thread %d signalled at %lu while waiting for session establishment\n", + session, current->pid, jiffies); + } + + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + /* we're all done */ + set_current_state(TASK_RUNNING); + remove_wait_queue(&session->login_wait_q, &waitq); + goto ThreadExit; + } + } + + /* remove ourselves from the login wait q */ + set_current_state(TASK_RUNNING); + remove_wait_queue(&session->login_wait_q, &waitq); + + /* we're up and running with a new session */ + clear_bit(TX_THREAD_BLOCKED, &session->control_bits); + DEBUG_INIT("iSCSI: tx thread %d for session %p starting to process new session with socket %p at %lu\n", + current->pid, session, session->socket, jiffies); + + /* make sure we start sending commands again */ + set_bit(TX_PING, &session->control_bits); + set_bit(TX_SCSI_COMMAND, &session->control_bits); + set_bit(TX_WAKE, &session->control_bits); + + /* don't start any new commands if we're still trying to do a reset */ + if (test_bit(SESSION_RESET_REQUESTED, &session->control_bits)) { + DEBUG_INIT("iSCSI: session %p still has a warm reset requested at %lu\n", session, jiffies); + set_bit(SESSION_RESETTING, &session->control_bits); + } + + /* process tx requests for this session, until the session drops */ + while (!signal_pending(current)) { + + DEBUG_FLOW("iSCSI: tx thread %d for session %p waiting at %lu\n", session->tx_pid, session, jiffies); + wait_event_interruptible(session->tx_wait_q, test_and_clear_bit(TX_WAKE, &session->control_bits)); + + DEBUG_FLOW("iSCSI: tx thread %d for session %p is awake at %lu\n", session->tx_pid, session, jiffies); + + if (signal_pending(current)) break; + + if ((test_and_clear_bit(DISK_INIT, &session->control_bits)) && (!session->disk_init_pid)) { + if (kernel_thread(iscsi_disk_initialize, (void *)session, 0) < 0) { + printk("iSCSI: failed to start the disk init thread \n"); + } + } + + if ((test_and_clear_bit(SEND_TUR, &session->control_bits)) && (!session->send_tur_pid)) { + if (kernel_thread(iscsi_unit_ready, (void *)session, 0) < 0) { + printk("iSCSI: failed to start the test unit thread \n"); + } + } + + if (test_bit(SESSION_TASK_TIMEDOUT, &session->control_bits)) { + process_timedout_tasks(session); + } + + if (signal_pending(current)) break; + + if (test_bit(SESSION_COMMAND_TIMEDOUT, &session->control_bits)) { + process_timedout_commands(session); + } + + if (signal_pending(current)) break; + + /* See if we should send a ping (Nop with reply requested) */ + if (test_and_clear_bit(TX_PING, &session->control_bits)) { + uint32_t itt; + + DEBUG_FLOW("iSCSI: sending Nop/poll on session %p\n", session); + /* may need locking someday. see allocate_itt comment */ + itt = allocate_itt(session); + iscsi_xmit_ping(session, itt, NULL, 0); + } + + if (signal_pending(current)) break; + + /* See if we should send a ping (Nop with reply requested) containing test data*/ + if (test_and_clear_bit(TX_PING_DATA, &session->control_bits)) { + int data_length, length; + unsigned char *buffer; + unsigned long tx_start, tx_stop; + + /* grab the total data length and buffer to use */ + spin_lock(&session->task_lock); + length = data_length = session->ping_test_data_length; + buffer = session->ping_test_tx_buffer; + session->ping_test_tx_buffer = NULL; + spin_unlock(&session->task_lock); + + tx_start = jiffies; + while ((length > 0) && buffer) { + /* may need locking someday. see allocate_itt comment */ + uint32_t itt = allocate_itt(session); + + DEBUG_FLOW("iSCSI: sending Nop/poll with data on session %p\n", session); + iscsi_xmit_ping(session, itt, buffer, MIN(4096, length)); + + if (signal_pending(current)) { + printk("iSCSI: session %p Nop data tx failed at %lu\n", session, jiffies); + break; + } + + length -= MIN(4096, length); + } + tx_stop = jiffies; + + if (buffer) + kfree(buffer); + + printk("iSCSI: session %p tx Nop data test - tx %d of %d bytes, start %lu, stop %lu, jiffies %lu, HZ %u\n", + session, data_length - length, data_length, tx_start, tx_stop, tx_stop - tx_start, HZ); + } + + if (signal_pending(current)) break; + + /* See if we should send one or more Nops (replies requested by the target) */ + if (test_and_clear_bit(TX_NOP_REPLY, &session->control_bits)) { + DEBUG_FLOW("iSCSI: sending Nop replies on session %p\n", session); + iscsi_xmit_nop_replys(session); + } + + if (signal_pending(current)) break; + + /* See if we should warm reset the target */ + if (test_bit(SESSION_RESET_REQUESTED, &session->control_bits) && (session->warm_reset_itt == RSVD_TASK_TAG)) { + if (test_bit(SESSION_RESETTING, &session->control_bits)) { + /* error recovery is already doing a reset, so we don't need to */ + printk("iSCSI: session %p ignoring target reset request for (%u %u %u *), reset already in progress at %lu\n", + session, session->host_no, session->channel, session->target_id, jiffies); + clear_bit(SESSION_RESET_REQUESTED, &session->control_bits); + } + else { + uint32_t itt; + iscsi_task_t *task; + + spin_lock(&session->task_lock); + + session->warm_reset_itt = itt = allocate_itt(session); + session->reset_response_deadline = jiffies + (session->reset_timeout * HZ); + if (session->reset_response_deadline == 0) + session->reset_response_deadline = 1; + + printk("iSCSI: session %p requested target reset for (%u %u %u *), warm reset itt %u at %lu\n", + session, session->host_no, session->channel, session->target_id, itt, jiffies); + /* prevent any new tasks from starting or existing tasks from completing */ + set_bit(SESSION_RESETTING, &session->control_bits); + for (task = session->arrival_order.head; task; task = task->order_next) { + DEBUG_EH("iSCSI: session %p warm target reset causing problems for LUN %u\n", session, task->lun); + set_bit(task->lun, session->luns_timing_out); + del_task_timer(task); + set_bit(0, &task->timedout); + /* the task mgmt response will set SESSION_TASK_TIMEDOUT and ensure these get processed later */ + } + spin_unlock(&session->task_lock); + + iscsi_xmit_task_mgmt(session, ISCSI_TM_FUNC_TARGET_WARM_RESET, NULL, itt); + } + } + + if (signal_pending(current)) break; + + if (test_and_clear_bit(SESSION_RETRY_COMMANDS, &session->control_bits)) { + /* try to queue up delayed commands for retries */ + iscsi_retry_commands(session); + } + + if (signal_pending(current)) break; + + /* New SCSI command received, or MaxCmdSN incremented, or task freed */ + if (test_and_clear_bit(TX_SCSI_COMMAND, &session->control_bits)) { + /* if possible, issue new commands */ + iscsi_xmit_queued_cmnds(session); + } + + if (signal_pending(current)) break; + + /* See if we need to send more data */ + if (test_and_clear_bit(TX_DATA, &session->control_bits)) { + /* NOTE: this may call iscsi_xmit_queued_cmnds under some conditions */ + iscsi_xmit_r2t_data(session); + } + + if (signal_pending(current)) break; + + if (test_and_clear_bit(TX_LOGOUT, &session->control_bits)) { + uint32_t itt; + + DEBUG_INIT("iSCSI: session %p sending Logout at %lu\n", session, jiffies); + /* may need locking someday. see allocate_itt comment */ + itt = allocate_itt(session); + session->logout_itt = itt; + smp_mb(); + iscsi_xmit_logout(session, itt, ISCSI_LOGOUT_REASON_CLOSE_SESSION); + } + } + + /* handle any signals that may have occured */ + iscsi_handle_signals(session); + } + + ThreadExit: + DEBUG_INIT("iSCSI: tx thread %d for session %p exiting\n", session->tx_pid, session); + + /* the rx thread may be waiting for the tx thread to block. make it happy */ + set_bit(TX_THREAD_BLOCKED, &session->control_bits); + wake_up(&session->tx_blocked_wait_q); + + /* we're done */ + set_current_state(TASK_RUNNING); + session->tx_pid = 0; + smp_mb(); + drop_reference(session); + + return 0; +} + +/* update LUN info for /proc/scsi/iscsi + * Watch the inquiry data rather than TEST_UNIT_READY response, + * since the TURs come from the target driver (sd, st, etc), and + * that may not happen until after a SCSI device node is opened + * and the target driver is loaded. We get INQUIRY commands when + * the HBA registers, and when our driver's LUN probing forces + * them. This is more reliable than waiting for TURs. + */ +static void process_inquiry_data(iscsi_session_t *session, Scsi_Cmnd *sc, uint8_t *data) +{ + if (data && sc) { + /* look at the peripheral qualifier (bits 5,6, and 7 of the first byte), SPC-3 7.4.2 */ + if (LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: session %p cmd %p INQUIRY peripheral 0x%02x from (%u %u %u %u), %s\n", + session, sc, *data, + sc->host->host_no, sc->channel, sc->target, sc->lun, session->log_name); + + switch ((*data & 0xE0) >> 5) { + case 0: /* possibly a LUN */ + if (sc->lun < ISCSI_MAX_LUN) { + /* detected a LUN */ + set_bit(sc->lun, session->luns_detected); + + /* unless it's one of our driver's LUN probes, the SCSI layer will activate this LUN */ + if ((sc->scsi_done != iscsi_done) && + !test_and_set_bit(sc->lun, session->luns_activated)) + { + /* assume we found a useable LUN */ + session->num_luns++; + smp_mb(); + } + } + break; + case 1: /* capable of supporting a physical device at this LUN, but not currently connected */ + /* as of 2002may09, the available Linux kernels treat qualifier 1 the same as qualifier 0, + * even though that's not really appropriate, and fills the log with a bunch of messages + * about unknown device types. Map it to qualifier 3, which gets silently ignored. + */ + *data = 0x7F; /* Linux only ignores a 0x7F */ + /* fall-through */ + case 2: /* reserved */ + case 3: /* not capable of supporting a physical device at this LUN */ + default: + if (sc->lun < ISCSI_MAX_LUN) { + clear_bit(sc->lun, session->luns_detected); + + if ((sc->scsi_done != iscsi_done) && + test_and_clear_bit(sc->lun, session->luns_activated)) + { + /* there's not really a useable LUN */ + session->num_luns--; + smp_mb(); + } + } + break; + } + } + else { + printk("iSCSI: failed to process inquiry data for session %p, sc %p, data %p\n", + session, sc, data); + } +} + + +static void iscsi_recv_logout(iscsi_session_t *session, struct IscsiLogoutRspHdr *stlh) +{ + updateSN(session, ntohl(stlh->expcmdsn), ntohl(stlh->maxcmdsn)); + + /* assume a PDU round-trip, connection is ok */ + session->last_rx = jiffies; + session->logout_itt = RSVD_TASK_TAG; + session->logout_response_deadline = 0; + smp_mb(); + + if (test_bit(SESSION_LOGOUT_REQUESTED, &session->control_bits)) { + switch (stlh->response) { + case ISCSI_LOGOUT_SUCCESS: + /* set session's time2wait to zero? use DefaultTime2Wait? */ + session->time2wait = 0; + printk("iSCSI: session %p to %s logged out at %lu\n", session, session->log_name, jiffies); + set_bit(SESSION_LOGGED_OUT, &session->control_bits); + smp_mb(); + iscsi_drop_session(session); + break; + case ISCSI_LOGOUT_CID_NOT_FOUND: + printk("iSCSI: session %p logout failed, cid not found\n", session); + iscsi_drop_session(session); + break; + case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED: + printk("iSCSI: session %p logout failed, connection recovery not supported\n", session); + iscsi_drop_session(session); + break; + case ISCSI_LOGOUT_CLEANUP_FAILED: + printk("iSCSI: session %p logout failed, cleanup failed\n", session); + iscsi_drop_session(session); + break; + default: + printk("iSCSI: session %p logout failed, response 0x%x\n", session, stlh->response); + iscsi_drop_session(session); + break; + } + } + else { + printk("iSCSI: session %p received logout response at %lu, but never sent a login request\n", session, jiffies); + iscsi_drop_session(session); + } +} + + +static int iscsi_recv_nop_data(iscsi_session_t *session, unsigned char *buffer, int data_length) +{ + /* read the nop data into the nop_info struct, and throw any pad bytes away */ + struct msghdr msg; + int bytes_read = 0, rc = 0; + int num_bytes = data_length; + int iovn = 1; + int pad = (data_length % PAD_WORD_LEN) ? (PAD_WORD_LEN - (data_length % PAD_WORD_LEN)) : 0; + uint32_t received_crc32c, calculated_crc32c; + + while (bytes_read < num_bytes) { + + /* data */ + session->rx_iov[0].iov_base = buffer + bytes_read; + session->rx_iov[0].iov_len = data_length - bytes_read; + num_bytes = data_length - bytes_read; + iovn = 1; + + if (pad) { + session->rx_iov[1].iov_base = session->rx_buffer; + session->rx_iov[1].iov_len = pad; + num_bytes += pad; + iovn++; + } + + if (session->DataDigest == ISCSI_DIGEST_CRC32C) { + session->rx_iov[1].iov_base = &received_crc32c; + session->rx_iov[1].iov_len = sizeof(received_crc32c); + num_bytes += sizeof(received_crc32c); + iovn++; + } + + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = session->rx_iov; + msg.msg_iovlen = iovn; + + rc = iscsi_recvmsg(session, &msg, num_bytes); + if ( rc <= 0) { + printk("iSCSI: session %p recv_nop_data failed to recv %d bytes, rc %d\n", session, num_bytes, rc); + iscsi_drop_session(session); + return bytes_read; + } + if (signal_pending(current)) { + return bytes_read; + } + + bytes_read += rc; + } + + DEBUG_FLOW("iSCSI: session %p recv_nop_data read %d bytes at %lu\n", session, num_bytes, jiffies); + + if (session->DataDigest == ISCSI_DIGEST_CRC32C) { + calculated_crc32c = iscsi_crc32c(buffer, data_length + pad); + if (calculated_crc32c != received_crc32c) { + printk("iSCSI: session %p recv_nop_data DataDigest mismatch, received 0x%08x, calculated 0x%08x\n", + session, received_crc32c, calculated_crc32c); + /* we're not required to do anything if Nop data has a digest error */ + } + } + + return data_length; +} + +static void iscsi_recv_nop(iscsi_session_t *session, struct IscsiNopInHdr *stnih) +{ + int dlength = ntoh24(stnih->dlength); + + DEBUG_FLOW("iSCSI: recv_nop for session %p from %s\n", session, session->log_name); + + if (stnih->itt != RSVD_TASK_TAG) { + /* FIXME: check StatSN */ + session->ExpStatSn = ntohl(stnih->statsn) + 1; + updateSN(session, ntohl(stnih->expcmdsn), ntohl(stnih->maxcmdsn)); + /* it's a reply to one of our Nop-outs, so there was a PDU round-trip, and the connection is ok */ + session->last_rx = jiffies; + smp_mb(); + + ISCSI_TRACE(ISCSI_TRACE_RxPingReply, NULL, NULL, ntohl(stnih->itt), dlength); + + /* if there is ping data in the reply, check to see if it matches what we expect */ + if (dlength) { + unsigned long rx_start, rx_stop; + + /* FIXME: make sure the dlength won't overflow the buffer */ + rx_start = jiffies; + if (iscsi_recv_nop_data(session, session->rx_buffer, dlength) != dlength) { + return; + } + rx_stop = jiffies; + + spin_lock(&session->task_lock); + if (session->ping_test_rx_start == 0) + session->ping_test_rx_start = rx_start; + + session->ping_test_rx_length -= dlength; + + if (session->ping_test_rx_length <= 0) { + /* all done */ + printk("iSCSI: session %p rx Nop data test - rx %d of %d bytes, start %lu, stop %lu, jiffies %lu, HZ %u\n", + session, session->ping_test_data_length - session->ping_test_rx_length, session->ping_test_data_length, + session->ping_test_rx_start, rx_stop, rx_stop - session->ping_test_rx_start, HZ); + printk("iSCSI: session %p Nop data test %d bytes, start %lu, stop %lu, jiffies %lu, HZ %u\n", + session, session->ping_test_data_length, + session->ping_test_start, rx_stop, rx_stop - session->ping_test_start, HZ); + session->ping_test_start = 0; + session->ping_test_rx_start = 0; + } + spin_unlock(&session->task_lock); + } + } + else { + /* FIXME: check StatSN, but don't advance it */ + updateSN(session, ntohl(stnih->expcmdsn), ntohl(stnih->maxcmdsn)); + } + + /* check the ttt to decide whether to reply with a Nop-out */ + if (stnih->ttt != RSVD_TASK_TAG) { + iscsi_nop_info_t *nop_info; + + ISCSI_TRACE( ISCSI_TRACE_RxNop, NULL, NULL, ntohl(stnih->itt), stnih->ttt); + + if (dlength == 0) { + /* we preallocate space for one data-less nop reply in the + * session structure, to avoid having to invoke the kernel + * memory allocator in the common case where the target + * has at most one outstanding data-less nop reply + * requested at any given time. + */ + spin_lock(&session->task_lock); + if ((session->nop_reply.ttt == RSVD_TASK_TAG) && (session->nop_reply_head == NULL)) { + session->nop_reply.ttt = stnih->ttt; + memcpy(session->nop_reply.lun, stnih->lun, sizeof(session->nop_reply.lun)); + spin_unlock(&session->task_lock); + DEBUG_FLOW("iSCSI: preallocated nop reply for ttt %u, dlength %d\n", ntohl(stnih->ttt), dlength); + wake_tx_thread(TX_NOP_REPLY, session); + return; + } + spin_unlock(&session->task_lock); + } + + /* otherwise, try to allocate a nop_info struct and queue it up */ + nop_info = kmalloc(sizeof(iscsi_nop_info_t) + dlength, GFP_ATOMIC); + if (nop_info) { + DEBUG_ALLOC("iSCSI: allocated nop_info %p, %u bytes\n", nop_info, sizeof(iscsi_nop_info_t) + dlength); + nop_info->next = NULL; + nop_info->ttt = stnih->ttt; + memcpy(nop_info->lun, stnih->lun, sizeof(nop_info->lun)); + nop_info->dlength = dlength; + + /* try to save any data from the nop for the reply */ + if (dlength) { + if (iscsi_recv_nop_data(session, nop_info->data, dlength) != dlength) { + kfree(nop_info); + return; + } + } + + /* queue it up */ + spin_lock(&session->task_lock); + if (session->nop_reply_head) { + session->nop_reply_tail->next = nop_info; + session->nop_reply_tail = nop_info; + } + else { + session->nop_reply_head = session->nop_reply_tail = nop_info; + } + spin_unlock(&session->task_lock); + + DEBUG_FLOW("iSCSI: queued nop reply for ttt %u, dlength %d\n", ntohl(stnih->ttt), dlength); + wake_tx_thread(TX_NOP_REPLY, session); + } + else { + printk("iSCSI: session %p couldn't queue nop reply for ttt %u\n", session, ntohl(stnih->ttt)); + } + } +} + + +static void iscsi_recv_cmd(iscsi_session_t *session, struct IscsiScsiRspHdr *stsrh, unsigned char *sense_data) +{ + iscsi_task_t *task; + Scsi_Cmnd *sc = NULL; + unsigned int senselen = 0; + uint32_t itt = ntohl(stsrh->itt); + + /* FIXME: check StatSN */ + session->ExpStatSn = ntohl(stsrh->statsn)+1; + updateSN(session, ntohl(stsrh->expcmdsn), ntohl(stsrh->maxcmdsn)); + /* assume a PDU round-trip, connection is ok */ + session->last_rx = jiffies; + smp_mb(); + + /* find the task for the itt we received */ + spin_lock(&session->task_lock); + if ((task = find_session_task(session, itt))) { + /* task was waiting for this command response */ + __set_bit(TASK_COMPLETED, &task->flags); + sc = task->scsi_cmnd; + + /* for testing, we may want to ignore this command completion */ + if (session->ignore_completions && ((session->ignore_lun == -1) || (session->ignore_lun == task->lun))) { + /* for testing, the driver can be told to ignore command completion */ + printk("iSCSI: session %p recv_cmd ignoring completion of itt %u, task %p, LUN %u, sc %p, cdb 0x%x to (%u %u %u %u) at %lu\n", + session, itt, task, task->lun, sc, sc->cmnd[0], sc->host->host_no, sc->channel, sc->target, sc->lun, jiffies); + session->ignore_completions--; + spin_unlock(&session->task_lock); + return; + } + + del_task_timer(task); + + if (sc == NULL) { + printk("iSCSI: session %p recv_cmd itt %u, task %p, refcount %d, no SCSI command at %lu\n", + session, itt, task, atomic_read(&task->refcount), jiffies); + /* this will just wait for the refcount to drop and then free the task */ + complete_task(session, itt); + return; + } + + DEBUG_QUEUE("iSCSI: session %p recv_cmd %p, itt %u, task %p, refcount %d\n", + session, sc, itt, task, atomic_read(&task->refcount)); + } + else { + DEBUG_INIT("iSCSI: session %p recv_cmd - response for itt %u, but no such task\n", session, itt); + spin_unlock(&session->task_lock); + return; + } + + /* check for sense data */ + if ((ntoh24(stsrh->dlength) > 1) && sense_data) { + /* Sense data format per draft-08, 3.4.6. 2-byte sense length, then sense data, then iSCSI response data */ + senselen = (sense_data[0] << 8) | sense_data[1]; + if (senselen > (ntoh24(stsrh->dlength) - 2)) + senselen = (ntoh24(stsrh->dlength) - 2); + sense_data += 2; + } + + /* determine command result based on the iSCSI response, status, and sense */ + process_task_response(session, task, stsrh, sense_data, senselen); + +#if TEST_PROBE_RECOVERY + if (test_bit(SESSION_ESTABLISHED, &session->control_bits) && sc && + ((sc->cmnd[0] == REPORT_LUNS) || (sc->cmnd[0] == INQUIRY)) && + (stsrh->cmd_status == 0) && + (task->cmdsn >= ABORT_FREQUENCY) && + ((task->cmdsn % ABORT_FREQUENCY) >= 0) && ((task->cmdsn % ABORT_FREQUENCY) < ABORT_COUNT)) + { + /* don't complete this command, so that we can test the probe error handling code. */ + if ((task = remove_task(&session->completing_tasks, itt))) { + add_task(&session->rx_tasks, task); + printk("iSCSI: ignoring completion of itt %u, CmdSN %u, task %p, sc %p, cdb 0x%x to (%u %u %u %u)\n", + itt, task->cmdsn, task, sc, sc->cmnd[0], sc->host->host_no, sc->channel, sc->target, sc->lun); + } + atomic_dec(&task->refcount); + spin_unlock(&session->task_lock); + return; + } +#endif + + /* now that we're done with it, try to complete it. */ + DEBUG_FLOW("iSCSI: session %p recv_cmd attempting to complete itt %u\n", session, itt); + complete_task(session, itt); + /* Note: the task_lock will be unlocked by complete_task */ +} + +static void iscsi_recv_r2t(iscsi_session_t *session, struct IscsiRttHdr *strh ) +{ + iscsi_task_t *task = NULL; + uint32_t itt = ntohl(strh->itt); + + updateSN(session, ntohl(strh->expcmdsn), ntohl(strh->maxcmdsn)); + /* assume a PDU round-trip, connection is ok */ + session->last_rx = jiffies; + smp_mb(); + + spin_lock(&session->task_lock); + if ((task = find_session_task(session, itt))) { + if (!test_bit(TASK_WRITE, &task->flags)) { + /* bug in the target. the command isn't a write, so we have no data to send */ + printk("iSCSI: session %p ignoring unexpected R2T for task %p, itt %u, %u bytes @ offset %u, ttt %u, not a write command\n", + session, task, ntohl(strh->itt), ntohl(strh->data_length), ntohl(strh->data_offset), ntohl(strh->ttt)); + iscsi_drop_session(session); + } + else if (task->scsi_cmnd == NULL) { + printk("iSCSI: session %p ignoring R2T for task %p, itt %u, %u bytes @ offset %u, ttt %u, no SCSI command\n", + session, task, ntohl(strh->itt), ntohl(strh->data_length), ntohl(strh->data_offset), ntohl(strh->ttt)); + } + else if (task->ttt != RSVD_TASK_TAG) { + /* bug in the target. MaxOutsandingR2T == 1 should have prevented this from occuring */ + printk("iSCSI: session %p ignoring R2T for task %p, itt %u, %u bytes @ offset %u, ttt %u, " + "already have R2T for %u @ %u, ttt %u\n", + session, task, ntohl(strh->itt), ntohl(strh->data_length), ntohl(strh->data_offset), ntohl(strh->ttt), + task->data_length, task->data_offset, ntohl(task->ttt)); + } + else { + /* record the R2T */ + task->ttt = strh->ttt; + task->data_length = ntohl(strh->data_length); + task->data_offset = ntohl(strh->data_offset); + ISCSI_TRACE(ISCSI_TRACE_R2T, task->scsi_cmnd, task, task->data_offset, task->data_length); + DEBUG_FLOW("iSCSI: session %p R2T for task %p itt %u, %u bytes @ offset %u\n", + session, task, ntohl(strh->itt), ntohl(strh->data_length), ntohl(strh->data_offset)); + + /* even if we've issued an abort task set, we're required + * to respond to R2Ts for this task, though we can + * apparently set the F-bit and terminate the data burst + * early. Rather than hope targets handle that correctly, + * we just send the data requested as usual. + */ + add_task(&session->tx_tasks, task); + wake_tx_thread(TX_DATA, session); + } + } + else { + /* the task no longer exists */ + DEBUG_FLOW("iSCSI: session %p ignoring R2T for itt %u, %u bytes @ offset %u\n", + session, ntohl(strh->itt), ntohl(strh->data_length), ntohl(strh->data_offset)); + } + spin_unlock(&session->task_lock); +} + + +static void iscsi_recv_data(iscsi_session_t *session, struct IscsiDataRspHdr *stdrh) +{ + iscsi_task_t *task = NULL; + Scsi_Cmnd *sc = NULL; + struct scatterlist *sglist = NULL, *sg, *first_sg = NULL, *last_sg = NULL; + int length, dlength, remaining, rc, i; + int bytes_read = 0; + uint32_t offset, expected_offset = 0; + unsigned int iovn = 0, pad = 0; + unsigned int segment_offset = 0; + struct msghdr msg; + uint32_t itt = ntohl(stdrh->itt); + uint8_t *peripheral = NULL; + uint32_t received_crc32c; + int fake_data_mismatch = 0; + int ignore_completion = 0; + + if (stdrh->flags & ISCSI_FLAG_DATA_STATUS) { + /* FIXME: check StatSN */ + session->ExpStatSn = ntohl(stdrh->statsn)+1; + } + updateSN(session, ntohl(stdrh->expcmdsn), ntohl(stdrh->maxcmdsn)); + /* assume a PDU round-trip, connection is ok */ + session->last_rx = jiffies; + smp_mb(); + + length = dlength = ntoh24( stdrh->dlength ); + offset = ntohl( stdrh->offset ); + + /* Compute padding bytes that follow the data */ + pad = dlength % PAD_WORD_LEN; + if (pad) { + pad = PAD_WORD_LEN - pad; + } + + spin_lock(&session->task_lock); + + task = find_session_task(session, itt); + if (task == NULL) { + printk("iSCSI: session %p recv_data, no task for itt %u (next itt %u), discarding received data, offset %u len %u\n", + session, ntohl(stdrh->itt), session->itt, offset, dlength); + } + else if (!test_bit(TASK_READ, &task->flags)) { + /* we shouldn't be getting Data-in unless it's a read */ + if (task->scsi_cmnd) + printk("iSCSI: session %p recv_data itt %u, task %p, command %p cdb 0x%02x, dropping session due to unexpected Data-in from (%u %u %u %u)\n", + session, itt, task, task->scsi_cmnd, task->scsi_cmnd->cmnd[0], + session->host_no, session->channel, session->target_id, task->lun); + else + printk("iSCSI: session %p recv_data itt %u, task %p, command NULL, dropping session due to unexpected Data-in from (%u %u %u %u)\n", + session, itt, task, + session->host_no, session->channel, session->target_id, task->lun); + + /* print the entire PDU header */ + printk("iSCSI: bogus Data-in PDU header: itt 0x%0x ttt 0x%0x, hlength %u dlength %u lun %u, " + "statsn 0x%0x expcmdsn 0x%0x maxcmdsn 0x%0x datasn 0x%0x, offset %u residual %u\n", + ntohl(stdrh->itt), ntohl(stdrh->ttt), stdrh->hlength, ntoh24(stdrh->dlength), stdrh->lun[1], + ntohl(stdrh->statsn), ntohl(stdrh->expcmdsn), ntohl(stdrh->maxcmdsn), ntohl(stdrh->datasn), + ntohl(stdrh->offset), ntohl(stdrh->residual_count)); + + iscsi_drop_session(session); + task = NULL; + spin_unlock(&session->task_lock); + return; + } + else { + /* accept all of the data for this task */ + sc = task->scsi_cmnd; + expected_offset = task->rxdata; + + if (sc) { + /* either we'll read it all, or we'll drop the session and requeue the command, + * so it's safe to increment the received data count before we actually read the data, + * while we still have the task_lock. + */ + task->rxdata += dlength; + + /* ensure the task's command won't be completed while we're using it */ + atomic_inc(&task->refcount); + + DEBUG_FLOW("iSCSI: session %p recv_data itt %u, task %p, sc %p, datasn %u, offset %u, dlength %u\n", + session, itt, task, sc, ntohl(stdrh->datasn), offset, dlength); + } + else { + /* command has already been completed (by a timeout) */ + printk("iSCSI: session %p recv_data itt %u, task %p, no SCSI command at %lu\n", + session, itt, task, jiffies); + } + + /* if there is piggybacked status, ensure that we're not delaying commands to this LUN */ + if (stdrh->flags & ISCSI_FLAG_DATA_STATUS) { + /* mark the task completed */ + __set_bit(TASK_COMPLETED, &task->flags); + + if (sc && session->ignore_completions && ((session->ignore_lun < 0) || (session->ignore_lun == task->lun))) { + /* for testing, the driver can be told to ignore command completion */ + printk("iSCSI: session %p ignoring completion of itt %u, task %p, cmnd %p, cdb 0x%x to (%u %u %u %u) at %lu\n", + session, itt, task, sc, sc->cmnd[0], + session->host_no, session->channel, session->target_id, task->lun, jiffies); + + session->ignore_completions--; + ignore_completion = 1; + } + else { + del_task_timer(task); + + /* piggybacked status is always good */ + if (test_bit(task->lun, session->luns_delaying_commands)) { + __clear_bit(task->lun, session->luns_delaying_commands); + session->num_luns_delaying_commands--; + DEBUG_RETRY("iSCSI: session %p no longer delaying commands to (%u %u %u %u) at %lu\n", + session, session->host_no, session->channel, session->target_id, task->lun, jiffies); + if (session->num_luns_delaying_commands == 0) { + del_timer_sync(&session->retry_timer); + clear_bit(SESSION_RETRY_COMMANDS, &session->control_bits); + DEBUG_RETRY("iSCSI: session %p stopping retry timer at %lu\n", session, jiffies); + } + if (!test_bit(task->lun, session->luns_timing_out)) { + DECLARE_NOQUEUE_FLAGS; + + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + requeue_deferred_commands(session, task->lun); + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + } + smp_mb(); + } + } + } + + /* if there is no command, we don't increment the task refcount, so we can't keep using it */ + if (sc == NULL) + task = NULL; + + /* for testing, possibly fake a digest mismatch */ + if (session->fake_read_data_mismatch > 0) { + session->fake_read_data_mismatch--; + fake_data_mismatch = 1; + } + } +#if TEST_DELAYED_DATA + if (task && dlength && ((task->cmdsn % 500) == 0)) { + printk("iSCSI: testing delayed data for task %p, itt %u, cmdsn %u, dlength %u at %lu\n", + task, task->itt, task->cmdsn, dlength, jiffies); + atomic_dec(&task->refcount); + task = NULL; + sc = NULL; + spin_unlock(&session->task_lock); + session->last_rx = jiffies + (45 * HZ); + smp_mb(); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(37 * HZ); + spin_lock(&session->task_lock); + printk("iSCSI: test of delayed data continuing at %lu\n", jiffies); + } +#endif + spin_unlock(&session->task_lock); + + if (sc == NULL) + goto toss_data; + + /* sanity check the PDU against the command */ + if ((offset + dlength) > sc->request_bufflen) { + /* buffer overflow, often because of a corrupt PDU header */ + printk("iSCSI: session %p recv_data for itt %u, task %p, cmnd %p, bufflen %u, Data PDU with offset %u len %u overflows command buffer, dropping session\n", + session, itt, task, sc, sc->request_bufflen, offset, dlength); + + if (task) + atomic_dec(&task->refcount); + iscsi_drop_session(session); + return; + } + else if (expected_offset != offset) { + /* if the data arrives out-of-order, it becomes much harder + * for us to correctly calculate the residual if we don't get + * enough data and also don't get an underflow from the + * target. This can happen if we discard Data PDUs due to + * bogus offsets/lengths. Since we always negotiate for + * Data PDUs in-order, this should never happen, but check + * for it anyway. + */ + /* buffer overflow, often because of a corrupt PDU header */ + printk("iSCSI: session %p recv_data for itt %u, task %p, cmnd %p, bufflen %u, offset %u does not match expected offset %u, dropping session\n", + session, itt, task, sc, sc->request_bufflen, offset, expected_offset); + + if (task) + atomic_dec(&task->refcount); + iscsi_drop_session(session); + return; + } + + + /* configure for receiving the data */ + if (sc->use_sg) { + int index; + + /* scatter-gather */ + sg = sglist = (struct scatterlist *)sc->request_buffer; + segment_offset = offset; + + for (index = 0; index < sc->use_sg; index++) { + if (segment_offset < sglist[index].length) + break; + else + segment_offset -= sglist[index].length; + } + + if (index >= sc->use_sg) { + /* didn't find the offset, toss the data and let the command underflow */ + printk("iSCSI: session %p recv_data for itt %u couldn't find offset %u in sglist %p, sc %p, bufflen %u, use_sg %u, dropping session\n", + session, task->itt, offset, sglist, sc, sc->request_bufflen, sc->use_sg); + print_cmnd(sc); + ISCSI_TRACE(ISCSI_TRACE_BadOffset, sc, task, offset, sc->request_bufflen); + /* FIXME: discard the data, or drop the session? */ + atomic_dec(&task->refcount); + iscsi_drop_session(session); + return; + } + else { + remaining = dlength; + + /* setup all the data buffers */ + while (sc && (remaining > 0) && (index < sc->use_sg)) { + sg = &sglist[index]; + + if (!kmap_sg(sg)) { + printk("iSCSI: session %p recv_data itt %u task %p failed to map sg %p\n", + session, task->itt, task, sg); + print_cmnd(sc); + /* FIXME: discard the data, or drop the session? */ + atomic_dec(&task->refcount); + iscsi_drop_session(session); + return; + } + else if (first_sg == NULL) { + first_sg = sg; + } + + last_sg = sg; + + /* sanity check the sglist segment length */ + if (sg->length <= segment_offset) { + /* the sglist is corrupt */ + printk("iSCSI: session %p recv_data index %d, length %u too small for offset %u, remaining %d, sglist has been corrupted\n", + session, index, sg->length, segment_offset, remaining); + print_cmnd(sc); + ISCSI_TRACE(ISCSI_TRACE_BadRxSeg, sc, task, sg->length, segment_offset); + /* FIXME: discard the data, or drop the session? */ + atomic_dec(&task->refcount); + iscsi_drop_session(session); + return; + } + + session->rx_iov[iovn].iov_base = sg_virtual_address(sg) + segment_offset; + session->rx_iov[iovn].iov_len = MIN(remaining, sg->length - segment_offset); + remaining -= session->rx_iov[iovn].iov_len; + + DEBUG_FLOW("iSCSI: recv_data itt %u, iov[%2d] = sg[%2d] = %p, %u of %u bytes, remaining %u\n", + itt, iovn, sg - sglist, session->rx_iov[iovn].iov_base, session->rx_iov[iovn].iov_len, + sg->length, remaining); + index++; + iovn++; + segment_offset = 0; + } + + if (remaining != 0) { + /* we ran out of buffer space with more data remaining. + * this should never happen if the Scsi_Cmnd's bufflen + * matches the combined length of the sglist segments. + */ + printk("iSCSI: session %p recv_data for cmnd %p, bufflen %u, offset %u len %u, remaining data %u, dropping session\n", + session, sc, sc->request_bufflen, offset, dlength, remaining); + print_cmnd(sc); + /* FIXME: discard the data, or drop the session? */ + atomic_dec(&task->refcount); + iscsi_drop_session(session); + return; + } + } + } + else { + /* no scatter-gather, just read it into the buffer */ + session->rx_iov[0].iov_base = sc->request_buffer + offset; + session->rx_iov[0].iov_len = dlength; + iovn = 1; + } + + if (pad) { + session->rx_iov[iovn].iov_base = session->rx_buffer; + session->rx_iov[iovn].iov_len = pad; + iovn++; + length += pad; + } + + if (session->DataDigest == ISCSI_DIGEST_CRC32C) { + /* If we're calculating a data digest, we need to save the pointer + * and length values in the iovecs before the recvmsg modifies + * them (or walk through the sglist again and recalculate + * them later, which seems inefficient). + */ + for (i = 0; i < iovn; i++) { + session->crc_rx_iov[i].iov_base = session->rx_iov[i].iov_base; + session->crc_rx_iov[i].iov_len = session->rx_iov[i].iov_len; + } + + /* and we need to receive the target's digest */ + session->rx_iov[iovn].iov_base = &received_crc32c; + session->rx_iov[iovn].iov_len = sizeof(received_crc32c); + iovn++; + length += sizeof(received_crc32c); + } + + /* save the address of the first byte of INQUIRY data */ + if ((sc->cmnd[0] == INQUIRY) && (offset == 0) && (dlength > 0)) + peripheral = session->rx_iov[0].iov_base; + + /* accept the data */ + memset( &msg, 0, sizeof(struct msghdr) ); + msg.msg_iov = session->rx_iov; + msg.msg_iovlen = iovn; + + DEBUG_FLOW("iSCSI: recv_data itt %u calling recvmsg %d bytes, iovn %u, rx_iov[0].base = %p\n", + itt, dlength + pad, iovn, session->rx_iov[0].iov_base); + rc = iscsi_recvmsg( session, &msg, length); + + if (rc == length) { + /* assume a PDU round-trip, connection is ok */ + session->last_rx = jiffies; + smp_mb(); + + if (session->DataDigest == ISCSI_DIGEST_CRC32C) { + uint32_t calculated_crc32c = iscsi_crc32c(session->crc_rx_iov[0].iov_base, session->crc_rx_iov[0].iov_len); + + /* add in all other segments, except for the digest itself */ + for (i = 1; i < iovn - 1; i++) { + calculated_crc32c = iscsi_crc32c_continued(session->crc_rx_iov[i].iov_base, session->crc_rx_iov[i].iov_len, + calculated_crc32c); + } + + if (fake_data_mismatch) { + printk("iSCSI: session %p faking read DataDigest mismatch for itt %u, task %p\n", + session, task->itt, task); + calculated_crc32c = 0x01020304; + } + + if (calculated_crc32c != received_crc32c) { + unsigned int lun = task->lun; + printk("iSCSI: session %p recv_data for itt %u, task %p, cmnd %p DataDigest mismatch, received 0x%08x, calculated 0x%08x, triggering error recovery for LUN %u\n", + session, itt, task, sc, received_crc32c, calculated_crc32c, lun); + if (first_sg) { + /* undo any temporary mappings */ + for (sg = first_sg; sg <= last_sg; sg++) { + kunmap_sg(sg); + } + first_sg = NULL; + } + /* we MUST abort this task. To avoid reordering, we + * trigger recovery for all tasks to this LUN. + */ + spin_lock(&session->task_lock); + task->rxdata = 0; + atomic_dec(&task->refcount); + trigger_error_recovery(session, lun); + spin_unlock(&session->task_lock); + return; + } + } + } + else { + printk("iSCSI: session %p recv_data for itt %u, task %p, cmnd %p failed to recv %d data PDU bytes, rc %d\n", + session, task->itt, task, sc, length, rc); + atomic_dec(&task->refcount); + iscsi_drop_session(session); + return; + } + + /* update LUN info based on the INQUIRY data, since we've got it mapped now */ + if (peripheral) + process_inquiry_data(session, task->scsi_cmnd, peripheral); + + /* done with the data buffers */ + if (first_sg) { + /* undo any temporary mappings */ + for (sg = first_sg; sg <= last_sg; sg++) { + kunmap_sg(sg); + } + } + + ISCSI_TRACE(ISCSI_TRACE_RxData, sc, task, offset, dlength); + + if ((stdrh->flags & ISCSI_FLAG_DATA_STATUS) && !ignore_completion) { + unsigned int expected = iscsi_expected_data_length(sc); + + /* we got status, meaning the command completed in a way that + * doesn't give us any sense data, and the command must be + * completed now, since we won't get a command response PDU. + */ + DEBUG_FLOW("iSCSI: Data-in with status 0x%x for itt %u, task %p, sc %p\n", + stdrh->cmd_status, ntohl(stdrh->itt), task, task->scsi_cmnd); + ISCSI_TRACE( ISCSI_TRACE_RxDataCmdStatus, sc, task, stdrh->cmd_status, 0); + sc->result = HOST_BYTE(DID_OK) | STATUS_BYTE(stdrh->cmd_status); + + spin_lock(&session->task_lock); + + if ((stdrh->flags & ISCSI_FLAG_DATA_OVERFLOW) || (stdrh->flags & ISCSI_FLAG_DATA_UNDERFLOW) || + ((test_bit(TASK_READ, &task->flags)) && (task->rxdata != expected))) + { + if (LOG_ENABLED(ISCSI_LOG_QUEUE) || LOG_ENABLED(ISCSI_LOG_FLOW)) { + printk("iSCSI: session %p task %p itt %u to (%u %u %u %u), cdb 0x%x, %c%c %s, received %u, residual %u, expected %u\n", + session, task, task->itt, sc->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0], + (stdrh->flags & ISCSI_FLAG_DATA_OVERFLOW) ? 'O' : ' ', (stdrh->flags & ISCSI_FLAG_DATA_UNDERFLOW) ? 'U' : ' ', + (stdrh->flags & ISCSI_FLAG_DATA_OVERFLOW) ? "overflow" : "underflow", + task->rxdata, ntohl(stdrh->residual_count), expected); + } + + if (stdrh->flags & ISCSI_FLAG_DATA_UNDERFLOW) { + ISCSI_TRACE(ISCSI_TRACE_RxUnderflow, sc, task, ntohl(stdrh->residual_count), expected); + sc->resid = ntohl(stdrh->residual_count); + } + else if (stdrh->flags & ISCSI_FLAG_DATA_OVERFLOW) { + /* FIXME: not sure how to tell the SCSI layer of an overflow, so just give it an error */ + ISCSI_TRACE(ISCSI_TRACE_RxOverflow, sc, task, ntohl(stdrh->residual_count), expected); + sc->result = HOST_BYTE(DID_ERROR) | STATUS_BYTE(stdrh->cmd_status); + } + else { + /* All the read data did not arrive */ + ISCSI_TRACE(ISCSI_TRACE_HostUnderflow, sc, task, task->rxdata, expected); + /* we don't know which parts of the buffer didn't get data, so report the whole buffer missing */ + sc->resid = expected; + } + } + + /* done using the command's data buffers and structure fields */ + atomic_dec(&task->refcount); + + /* try to complete the task. complete_task expects the task_lock held, but returns with it unlocked */ + complete_task(session, itt); + } + else { + /* done modifying the command and task */ + atomic_dec(&task->refcount); + } + + return; + + toss_data: + /* just throw away the PDU */ + if (first_sg) { + /* undo any temporary mappings */ + for (sg = first_sg; sg <= last_sg; sg++) { + kunmap_sg(sg); + } + } + + bytes_read = 0; + length = dlength + pad; + if (session->DataDigest == ISCSI_DIGEST_CRC32C) { + printk("iSCSI: session %p recv_data discarding %d data PDU bytes, %d pad bytes, %Zu digest bytes\n", + session, dlength, pad, sizeof(received_crc32c)); + length += sizeof(received_crc32c); + } + else { + printk("iSCSI: session %p recv_data discarding %d data PDU bytes, %d pad bytes\n", + session, dlength, pad); + } + + while (!signal_pending(current) && (bytes_read < length)) { + int num_bytes = MIN(length - bytes_read, sizeof(session->rx_buffer)); + + /* FIXME: can we use the same rx_buffer in all the iovecs, since we're discarding the data anyway? + * That would reduce the number of recvmsg calls we have to make. + */ + session->rx_iov[0].iov_base = session->rx_buffer; + session->rx_iov[0].iov_len = sizeof(session->rx_buffer); + memset( &msg, 0, sizeof(struct msghdr) ); + msg.msg_iov = &session->rx_iov[0]; + msg.msg_iovlen = 1; + rc = iscsi_recvmsg(session, &msg, num_bytes); + if ( rc <= 0) { + printk("iSCSI: session %p recv_data failed to recv and discard %d data PDU bytes, rc %d, bytes_read %d\n", + session, length, rc, bytes_read); + iscsi_drop_session(session); + } + else { + /* assume a PDU round-trip, connection is ok */ + bytes_read += rc; + DEBUG_FLOW("iSCSI: session %p recv_data discarded %d bytes, tossed %d of %d bytes at %lu\n", + session, rc, bytes_read, length, jiffies); + session->last_rx = jiffies; + smp_mb(); + } + } + + /* We don't bother checking the CRC, since we couldn't retry the command anyway */ + if (task) { + atomic_dec(&task->refcount); + task = NULL; + } + + if (stdrh->flags & ISCSI_FLAG_DATA_STATUS) { + spin_lock(&session->task_lock); + complete_task(session, itt); + /* complete_task will release the lock */ + } +} + +static void iscsi_recv_task_mgmt(iscsi_session_t *session, struct IscsiScsiTaskMgtRspHdr *ststmrh ) +{ + iscsi_task_t *task = NULL; + uint32_t mgmt_itt = ntohl(ststmrh->itt); + int ignored = 0; + + /* FIXME: check StatSN */ + session->ExpStatSn = ntohl(ststmrh->statsn)+1; + updateSN(session, ntohl(ststmrh->expcmdsn), ntohl(ststmrh->maxcmdsn)); + /* assume a PDU round-trip, connection is ok */ + session->last_rx = jiffies; + smp_mb(); + + spin_lock(&session->task_lock); + + /* we should always find the task, since we don't allow them to leave + * the driver once we've started error recovery, and we shouldn't + * receive a task mgmt response until we've started error recovery. + */ + if ((task = find_session_mgmt_task(session, mgmt_itt))) { + /* we save the recovery state in the session when we send task mgmt PDUs, + * since a command completion that arrives after we start recovery may + * change the task's state after we send the task mgmt PDU. We want + * to remember what we sent and act accordingly. + */ + if (test_bit(TASK_TRY_ABORT, &task->flags)) { + ISCSI_TRACE(ISCSI_TRACE_RxAbort, task->scsi_cmnd, task, mgmt_itt, ststmrh->response); + if (session->ignore_aborts && ((session->ignore_lun < 0) || (session->ignore_lun == task->lun))) { + session->ignore_aborts--; + ignored = 1; + if (task->scsi_cmnd) + printk("iSCSI: session %p ignoring abort response 0x%x for mgmt %u, itt %u, task %p, cmnd %p, cdb 0x%x at %lu\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, + task, task->scsi_cmnd, task->scsi_cmnd->cmnd[0], jiffies); + else + printk("iSCSI: session %p ignoring abort response 0x%x for mgmt %u, itt %u, task %p, cmnd NULL at %lu\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, task, jiffies); + } + else if (session->reject_aborts && ((session->reject_lun < 0) || (session->reject_lun == task->lun))) { + session->reject_aborts--; + if (task->scsi_cmnd) + printk("iSCSI: session %p treating abort response 0x%x as reject for mgmt %u, itt %u, task %p, cmnd %p, cdb 0x%x\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd, task->scsi_cmnd->cmnd[0]); + else + printk("iSCSI: session %p treating abort response 0x%x as reject for mgmt %u, itt %u, task %p, cmnd NULL\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, task); + + task->flags &= ~TASK_RECOVERY_MASK; + __set_bit(TASK_TRY_ABORT_TASK_SET, &task->flags); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + else if (ststmrh->response == 0) { + if (task->scsi_cmnd) + printk("iSCSI: session %p abort success for mgmt %u, itt %u, task %p, cmnd %p, cdb 0x%x\n", + session, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd, task->scsi_cmnd->cmnd[0]); + else + printk("iSCSI: session %p abort success for mgmt %u, itt %u, task %p, cmnd NULL\n", + session, ntohl(ststmrh->itt), task->itt, task); + task->flags &= ~TASK_RECOVERY_MASK; + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + else if (test_bit(TASK_COMPLETED, &task->flags)) { + /* we received a command completion before the abort response, + * so the task mgmt abort doesn't need to succeed. + */ + if (task->scsi_cmnd) + printk("iSCSI: session %p abort success for mgmt %u due to completion of itt %u, task %p, cmnd %p, cdb 0x%x\n", + session, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd, task->scsi_cmnd->cmnd[0]); + else + printk("iSCSI: session %p abort success for mgmt %u due to completion of itt %u, task %p, cmnd NULL\n", + session, ntohl(ststmrh->itt), task->itt, task); + + task->flags &= ~TASK_RECOVERY_MASK; + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + else { + if (task->scsi_cmnd) + printk("iSCSI: session %p abort rejected (0x%x) for mgmt %u, itt %u, task %p, cmnd %p, cdb 0x%x\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, + task, task->scsi_cmnd, task->scsi_cmnd->cmnd[0]); + else + printk("iSCSI: session %p abort rejected (0x%x) for mgmt %u, itt %u, task %p, cmnd NULL\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, task); + + task->flags &= ~TASK_RECOVERY_MASK; + __set_bit(TASK_TRY_ABORT_TASK_SET, &task->flags); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + } + else if (test_bit(TASK_TRY_ABORT_TASK_SET, &task->flags)) { + ISCSI_TRACE(ISCSI_TRACE_RxAbortTaskSet, task ? task->scsi_cmnd : NULL, task, mgmt_itt, ststmrh->response); + if (session->ignore_abort_task_sets && ((session->ignore_lun < 0) || (session->ignore_lun == task->lun))) { + session->ignore_abort_task_sets--; + printk("iSCSI: session %p ignoring abort task set response 0x%x for mgmt %u, itt %u, task %p, cmnd %p, at %lu\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, + task, task->scsi_cmnd, jiffies); + ignored = 1; + } + else if (session->reject_abort_task_sets && ((session->reject_lun < 0) || (session->reject_lun == task->lun))) { + session->reject_abort_task_sets--; + printk("iSCSI: session %p treating abort task set response 0x%x as reject for mgmt %u, itt %u, task %p, cmnd %p\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd); + task->flags &= ~TASK_RECOVERY_MASK; + __set_bit(TASK_TRY_LUN_RESET, &task->flags); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + else if (ststmrh->response == 0) { + iscsi_task_t *t; + printk("iSCSI: session %p abort task set success for mgmt %u, itt %u, task %p, cmnd %p\n", + session, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd); + /* all tasks to this LUN have been recovered */ + for (t = session->arrival_order.head; t; t = t->order_next) { + if (task->lun == t->lun) + t->flags &= ~TASK_RECOVERY_MASK; + } + task->flags &= ~TASK_RECOVERY_MASK; + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + else { + printk("iSCSI: session %p abort task set rejected (0x%x) for mgmt %u, itt %u, task %p, cmnd %p\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd); + task->flags &= ~TASK_RECOVERY_MASK; + __set_bit(TASK_TRY_LUN_RESET, &task->flags); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + } + else if (test_bit(TASK_TRY_LUN_RESET, &task->flags)) { + ISCSI_TRACE(ISCSI_TRACE_RxLunReset, task ? task->scsi_cmnd : NULL, task, mgmt_itt, ststmrh->response); + if (session->ignore_lun_resets && ((session->ignore_lun < 0) || (session->ignore_lun == task->lun))) { + session->ignore_lun_resets--; + ignored = 1; + printk("iSCSI: session %p ignoring LUN reset response 0x%x for mgmt %u, itt %u, task %p, cmnd %p at %lu\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, + task, task->scsi_cmnd, jiffies); + } + else if (session->reject_lun_resets && ((session->reject_lun < 0) || (session->reject_lun == task->lun))) { + session->reject_lun_resets--; + printk("iSCSI: session %p treating LUN reset response 0x%x as reject for mgmt %u, itt %u, task %p, cmnd %p\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd); + task->flags &= ~TASK_RECOVERY_MASK; + __set_bit(TASK_TRY_WARM_RESET, &task->flags); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + else if (ststmrh->response == 0) { + iscsi_task_t *t; + printk("iSCSI: session %p LUN reset success for mgmt %u, itt %u, task %p, cmnd %p\n", + session, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd); + + /* tell all devices attached to this LUN that a reset occured */ + lun_reset_occured(session, task->lun); + + /* all tasks to this LUN have been recovered */ + for (t = session->arrival_order.head; t; t = t->order_next) { + if (task->lun == t->lun) { + printk("iSCSI: session %p LUN reset success recovering itt %u, task %p, cmnd %p\n", + session, t->itt, t, t->scsi_cmnd); + t->flags &= ~TASK_RECOVERY_MASK; + } + } + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + else { + printk("iSCSI: session %p LUN reset rejected (0x%x) for mgmt %u, itt %u, task %p, cmnd %p\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd); + task->flags &= ~TASK_RECOVERY_MASK; + __set_bit(TASK_TRY_WARM_RESET, &task->flags); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + } + else if (test_bit(TASK_TRY_WARM_RESET, &task->flags)) { + ISCSI_TRACE(ISCSI_TRACE_RxWarmReset, task ? task->scsi_cmnd : NULL, task, mgmt_itt, ststmrh->response); + if (session->ignore_warm_resets && ((session->ignore_lun < 0) || (session->ignore_lun == task->lun))) { + session->ignore_warm_resets--; + printk("iSCSI: session %p ignoring warm reset response 0x%x for mgmt %u, itt %u, task %p, cmnd %p at %lu\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, + task, task->scsi_cmnd, jiffies); + ignored = 1; + } + else if (session->reject_warm_resets && ((session->reject_lun < 0) || (session->reject_lun == task->lun))) { + session->reject_warm_resets--; + printk("iSCSI: session %p treating warm reset response 0x%x as reject for mgmt %u, itt %u, task %p, cmnd %p\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd); + task->flags &= ~TASK_RECOVERY_MASK; + __set_bit(TASK_TRY_COLD_RESET, &task->flags); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + else if (ststmrh->response == 0) { + iscsi_task_t *t; + printk("iSCSI: session %p warm target reset success for mgmt %u, itt %u, task %p, cmnd %p\n", + session, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd); + + /* tell all devices attached to this target that a reset occured */ + target_reset_occured(session); + + /* mark all tasks recovered */ + for (t = session->arrival_order.head; t; t = t->order_next) { + printk("iSCSI: session %p warm target reset success recovering itt %u, task %p, cmnd %p\n", + session, t->itt, t, t->scsi_cmnd); + t->flags &= ~TASK_RECOVERY_MASK; + } + + /* and recover them */ + set_bit(SESSION_RESET, &session->control_bits); + smp_mb(); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + else { + printk("iSCSI: session %p warm target reset rejected (0x%x) for mgmt %u, itt %u, task %p, cmnd %p\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd); + task->flags &= ~TASK_RECOVERY_MASK; + __set_bit(TASK_TRY_COLD_RESET, &task->flags); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + } + else if (test_bit(TASK_TRY_COLD_RESET, &task->flags)) { + /* we probably won't ever get a task mgmt response for a cold reset that works, + * since the target should drop the session as part of the reset. + */ + ISCSI_TRACE(ISCSI_TRACE_RxColdReset, task ? task->scsi_cmnd : NULL, task, mgmt_itt, ststmrh->response); + task->flags &= ~TASK_RECOVERY_MASK; + if (session->ignore_cold_resets && ((session->ignore_lun < 0) || (session->ignore_lun == task->lun))) { + session->ignore_cold_resets--; + printk("iSCSI: session %p ignoring cold reset response 0x%x for mgmt %u, itt %u, task %p, cmnd %p at %lu\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, + task, task->scsi_cmnd, jiffies); + ignored = 1; + } + else if (session->reject_cold_resets && ((session->reject_lun < 0) || (session->reject_lun == task->lun))) { + session->reject_cold_resets--; + printk("iSCSI: session %p treating cold reset response 0x%x as reject for mgmt %u, itt %u, task %p, cmnd %p\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd); + task->flags &= ~TASK_RECOVERY_MASK; + __set_bit(TASK_TRY_COLD_RESET, &task->flags); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + else if (ststmrh->response == 0) { + iscsi_task_t *t; + + printk("iSCSI: session %p cold target reset success for mgmt %u, itt %u, task %p, cmnd %p\n", + session, ntohl(ststmrh->itt), task->itt, task, task->scsi_cmnd); + + /* mark all tasks recovered */ + for (t = session->arrival_order.head; t; t = t->order_next) { + printk("iSCSI: session %p cold target reset success recovering itt %u, task %p, cmnd %p\n", + session, t->itt, t, t->scsi_cmnd); + t->flags &= ~TASK_RECOVERY_MASK; + } + + /* clear any requested reset, since we just did one */ + session->warm_reset_itt = RSVD_TASK_TAG; + clear_bit(SESSION_RESET_REQUESTED, &session->control_bits); + /* and recover all the tasks */ + set_bit(SESSION_RESET, &session->control_bits); + smp_mb(); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + else { + printk("iSCSI: session %p cold target reset rejected (0x%x) for mgmt %u, itt %u, task %p, cmnd %p\n", + session, ststmrh->response, ntohl(ststmrh->itt), task->itt, + task, task->scsi_cmnd); + /* nothing left to try, just drop the session and hope the target clears the problem */ + iscsi_drop_session(session); + } + } + } + else if (mgmt_itt == session->warm_reset_itt) { + /* response to a requested reset */ + if (session->ignore_warm_resets && ((session->ignore_lun < 0) || (session->ignore_lun == task->lun))) { + session->ignore_warm_resets--; + printk("iSCSI: session %p ignoring warm reset response 0x%x for mgmt %u at %lu\n", + session, ststmrh->response, mgmt_itt, jiffies); + ignored = 1; + } + else if (session->reject_warm_resets && ((session->reject_lun < 0) || (session->reject_lun == task->lun))) { + session->reject_warm_resets--; + printk("iSCSI: session %p ignoring warm reset response 0x%x for mgmt %u at %lu\n", + session, ststmrh->response, mgmt_itt, jiffies); + + session->warm_reset_itt = RSVD_TASK_TAG; + clear_bit(SESSION_RESET_REQUESTED, &session->control_bits); + smp_mb(); + } + else if (ststmrh->response == 0) { + iscsi_task_t *t; + printk("iSCSI: session %p warm target reset success for mgmt %u at %lu\n", session, mgmt_itt, jiffies); + + session->warm_reset_itt = RSVD_TASK_TAG; + clear_bit(SESSION_RESET_REQUESTED, &session->control_bits); + smp_mb(); + + /* tell all devices attached to this target that a reset occured */ + target_reset_occured(session); + + /* mark all tasks recovered */ + for (t = session->arrival_order.head; t; t = t->order_next) { + printk("iSCSI: session %p warm target reset killed itt %u, task %p, cmnd %p\n", + session, t->itt, t, t->scsi_cmnd); + t->flags &= ~TASK_RECOVERY_MASK; + } + + /* and recovery them */ + set_bit(SESSION_RESET, &session->control_bits); + smp_mb(); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + else { + /* didn't work. just give up */ + session->warm_reset_itt = RSVD_TASK_TAG; + clear_bit(SESSION_RESET_REQUESTED, &session->control_bits); + smp_mb(); + + printk("iSCSI: session %p warm target reset rejected (0x%x) for mgmt %u at %lu\n", + session, ststmrh->response, mgmt_itt, jiffies); + } + } + else { + printk("iSCSI: session %p mgmt response 0x%x for unknown itt %u, rtt %u\n", + session, ststmrh->response, ntohl(ststmrh->itt), ntohl(ststmrh->rtt)); + } + + if (!ignored && (session->mgmt_itt == mgmt_itt)) { + /* we got the expected response, allow the tx thread to send another task mgmt PDU whenever it wants to */ + session->mgmt_itt = RSVD_TASK_TAG; + session->task_mgmt_response_deadline = 0; + smp_mb(); + } + + spin_unlock(&session->task_lock); +} + + +void retry_immediate_mgmt_pdus(unsigned long arg) +{ + iscsi_session_t *session = (iscsi_session_t *)arg; + + session->immediate_reject_timer.expires = 0; + smp_mb(); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); +} + + +static void iscsi_recv_reject(iscsi_session_t *session, struct IscsiRejectRspHdr *reject, unsigned char *xbuf) +{ + int dlength = ntoh24(reject->dlength); + uint32_t itt = 0; + iscsi_task_t *task = NULL; + struct IscsiHdr pdu; + + /* FIXME: check StatSN */ + session->ExpStatSn = ntohl(reject->statsn)+1; + updateSN(session, ntohl(reject->expcmdsn), ntohl(reject->maxcmdsn)); + /* assume a PDU round-trip, connection is ok */ + session->last_rx = jiffies; + smp_mb(); + + if (reject->reason == DATA_DIGEST_ERROR) { + /* we don't need to do anything about these, timers or other PDUs will handle the problem */ + if (dlength >= sizeof(pdu)) { + memcpy(&pdu, xbuf, sizeof(pdu)); + itt = ntohl(pdu.itt); + printk("iSCSI: session %p itt %u (opcode 0x%x) rejected because of a DataDigest error at %lu\n", + session, itt, pdu.opcode, jiffies); + } + else { + printk("iSCSI: session %p target rejected a PDU because of a DataDigest error at %lu\n", session, jiffies); + } + } + else if (reject->reason == IMM_CMD_REJECT) { + if (dlength >= sizeof(pdu)) { + /* look at the rejected PDU */ + memcpy(&pdu, xbuf, sizeof(pdu)); + itt = ntohl(pdu.itt); + + /* try to find the task corresponding to this itt, and wake up any process waiting on it */ + spin_lock(&session->task_lock); + + if (session->mgmt_itt == itt) + session->mgmt_itt = RSVD_TASK_TAG; + + if ((task = find_session_mgmt_task(session, itt))) { + if (task->scsi_cmnd) + DEBUG_EH("iSCSI: session %p task mgmt PDU rejected, mgmt %u, task %p, itt %u, cmnd %p, cdb 0x%x\n", + session, itt, task, task->itt, task->scsi_cmnd, task->scsi_cmnd->cmnd[0]); + else + DEBUG_EH("iSCSI: session %p task mgmt PDU rejected, mgmt %u, task %p, itt %u, cmnd NULL\n", + session, itt, task, task->itt); + + if (session->immediate_reject_timer.expires == 0) { + session->immediate_reject_timer.expires = jiffies + MSECS_TO_JIFFIES(40); + session->immediate_reject_timer.data = (unsigned long)session; + session->immediate_reject_timer.function = retry_immediate_mgmt_pdus; + DEBUG_EH("iSCSI: session %p scheduling task mgmt %u retry for %lu at %lu\n", + session, itt, session->busy_task_timer.expires, jiffies); + del_timer_sync(&session->busy_task_timer); /* make sure it's not running now */ + add_timer(&session->immediate_reject_timer); + } + } + else if ((pdu.opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT_CMD) { + /* our Logout was rejected. just let the logout response timer drop the session */ + printk("iSCSI: session %p logout PDU rejected, itt %u\n", session, itt); + session->logout_itt = RSVD_TASK_TAG; + smp_mb(); + } + else { + printk("iSCSI: session %p, itt %u immediate command rejected at %lu\n", + session, itt, jiffies); + } + spin_unlock(&session->task_lock); + } + else { + printk("iSCSI: session %p, immediate command rejected at %lu, dlength %u\n", + session, jiffies, dlength); + } + } + else { + if (dlength >= sizeof(pdu)) { + /* look at the rejected PDU */ + memcpy(&pdu, xbuf, sizeof(pdu)); + itt = ntohl(pdu.itt); + printk("iSCSI: dropping session %p because target rejected a PDU, reason 0x%x, dlength %d, rejected itt %u, opcode 0x%x\n", + session, reject->reason, dlength, itt, pdu.opcode); + } + else { + printk("iSCSI: dropping session %p because target rejected a PDU, reason 0x%x, dlength %u\n", + session, reject->reason, dlength); + } + iscsi_drop_session(session); + } +} + +static int iscsi_disk_initialize(void *vtaskp) +{ + iscsi_session_t *session; + + session = (iscsi_session_t *)vtaskp; + printk("iSCSI: session %p disk init thread %d about to daemonize on cpu%d\n", session, current->pid, smp_processor_id()); + + /* become a daemon kernel thread */ + sprintf(current->comm,"iscsi-disk-init-thr"); + iscsi_daemonize(); + session->disk_init_pid = current->pid; + current->flags |= PF_MEMALLOC; + smp_mb(); + + /* Block all signals except SIGHUP and SIGKILL */ + LOCK_SIGNALS(); + siginitsetinv(¤t->blocked, sigmask(SIGKILL) | sigmask(SIGHUP)); + RECALC_PENDING_SIGNALS; + UNLOCK_SIGNALS(); + reinitialize_disk(session); + DEBUG_INIT("iSCSI: disk init thread leaving kernel at %lu\n", jiffies); + set_current_state(TASK_RUNNING); + session->disk_init_pid = 0; + smp_mb(); + return 1; +} + +static int iscsi_unit_ready(void *vtaskp) +{ + iscsi_session_t *session; + + session = (iscsi_session_t *)vtaskp; + printk("iSCSI: session %p send tur thread %d about to daemonize on cpu%d\n", + session, current->pid, smp_processor_id()); + + /* become a daemon kernel thread */ + sprintf(current->comm,"iscsi-send-tur-thr"); + iscsi_daemonize(); + session->send_tur_pid = current->pid; + current->flags |= PF_MEMALLOC; + smp_mb(); + + /* Block all signals except SIGHUP and SIGKILL */ + LOCK_SIGNALS(); + siginitsetinv(¤t->blocked, sigmask(SIGKILL) | sigmask(SIGHUP)); + RECALC_PENDING_SIGNALS; + UNLOCK_SIGNALS(); + send_tur(session); + DEBUG_INIT("iSCSI: send tur thread leaving kernel at %lu\n", jiffies); + set_current_state(TASK_RUNNING); + session->send_tur_pid = 0; + smp_mb(); + return 1; +} + +static int iscsi_lun_thread(void *vtaskp) +{ + iscsi_session_t *session; + scsi_device_info_t *device_info; + uint32_t lun_bitmap = 0xFF; + int rc = -1; + int lun = 0; + + session = (iscsi_session_t *)vtaskp; + + printk("iSCSI: session %p lun thread %d about to daemonize on cpu%d\n", + session, current->pid, smp_processor_id()); + + /* become a daemon kernel thread */ + sprintf(current->comm,"iscsi-lun-thr"); + iscsi_daemonize(); + iscsi_lun_pid = current->pid; + current->flags |= PF_MEMALLOC; + smp_mb(); + + /* Block all signals except SIGHUP and SIGKILL */ + LOCK_SIGNALS(); + siginitsetinv(¤t->blocked, sigmask(SIGKILL) | sigmask(SIGHUP)); + RECALC_PENDING_SIGNALS; + UNLOCK_SIGNALS(); + + printk("iSCSI: session %p lun thread %d starting on cpu%d\n", session, current->pid, smp_processor_id()); + + if (test_and_set_bit(SESSION_PROBING_LUNS, &session->control_bits)) { + printk("iSCSI: session %p already has a process probing or waiting to probe LUNs for bus %d, target %d\n", session, session->iscsi_bus, session->target_id); + rc = -EBUSY; + goto done; + } + iscsi_detect_luns(session); + for (lun = 0; lun < ISCSI_MAX_LUN; lun++) { + if (test_bit(lun, session->luns_detected)) { + /* These are the original luns present */ + if(!test_bit(lun, session->luns_found)) { + /* the lun seems to have changed */ + iscsi_remove_lun_complete(session, lun); + } + } + } + + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + printk("iSCSI: session %p terminating, returning at %lu\n", session, jiffies); + clear_bit(SESSION_PROBING_LUNS, &session->control_bits); + smp_mb(); + goto done; + } else if (signal_pending(current)) { + iscsi_terminate_session(session); + printk("iSCSI: session %p ioctl terminated, returning at %lu\n", session, jiffies); + clear_bit(SESSION_PROBING_LUNS, &session->control_bits); + smp_mb(); + goto done; + } + + device_info = (scsi_device_info_t *)kmalloc(sizeof(scsi_device_info_t), GFP_KERNEL); + device_info->max_sd_devices = MAX_SCSI_DISKS; + device_info->max_sd_partitions = MAX_SCSI_DISK_PARTITIONS; + device_info->max_sg_devices = MAX_SCSI_GENERICS; + device_info->max_sr_devices = MAX_SCSI_CDROMS; + device_info->max_st_devices = MAX_SCSI_TAPES; + + iscsi_probe_luns(session, &lun_bitmap, device_info); + + /* and then we're done */ + clear_bit(SESSION_PROBING_LUNS, &session->control_bits); + smp_mb(); + rc = 0; + + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + printk("iSCSI: session %p terminating, ioctl returning at %lu\n", session, jiffies); + goto done; + } else if (signal_pending(current)) { + iscsi_terminate_session(session); + printk("iSCSI: session %p ioctl terminated, returning at %lu\n", session, jiffies); + goto done; + } + +done: /* lun change event finished */ + DEBUG_INIT("iSCSI: lun thread leaving kernel at %lu\n", jiffies); + set_current_state(TASK_RUNNING); + iscsi_lun_pid = 0; + smp_mb(); + return rc; +} + + +static void iscsi_recv_async_event(iscsi_session_t *session, struct IscsiAsyncEvtHdr *staeh, unsigned char *xbuf) +{ + unsigned int senselen = ntoh24(staeh->dlength); + + /* FIXME: check StatSN */ + session->ExpStatSn = ntohl(staeh->statsn)+1; + updateSN(session, ntohl(staeh->expcmdsn), ntohl(staeh->maxcmdsn)); + + ISCSI_TRACE(ISCSI_TRACE_RxAsyncEvent, NULL, NULL, staeh->async_event, staeh->async_vcode); + + switch (staeh->async_event) { + case ASYNC_EVENT_SCSI_EVENT: + printk(" iSCSI: SCSI Async event ASC=%0x2x, ASCQ=%0x2x received on session %p for target %s\n", xbuf[14], xbuf[15], session, session->log_name); + + if(xbuf[14] == 0x3f && xbuf[15] == 0x0e) { + /* Lun change event has occured for a target */ + if (kernel_thread(iscsi_lun_thread, (void *)session, 0) < 0) { + printk("iSCSI: failed to start the thread \n"); + } + } + + /* no way to pass this up to the SCSI layer, since there is no command associated with it */ + if (LOG_ENABLED(ISCSI_LOG_SENSE)) { + if (senselen >= 26) { + printk("iSCSI: SCSI Async event, senselen %d, key %02x, ASC/ASCQ %02X/%02X, session %p to %s\n" + "iSCSI: Sense %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x " + "%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x\n", + senselen, SENSE_KEY(xbuf), ASC(xbuf), ASCQ(xbuf), session, session->log_name, + xbuf[0],xbuf[1],xbuf[2],xbuf[3], + xbuf[4],xbuf[5],xbuf[6],xbuf[7], + xbuf[8],xbuf[9],xbuf[10],xbuf[11], + xbuf[12],xbuf[13],xbuf[14],xbuf[15], + xbuf[16],xbuf[17],xbuf[18],xbuf[19], + xbuf[20],xbuf[21],xbuf[22],xbuf[23], + xbuf[24], xbuf[25]); + } + else if ( senselen >= 18) { + printk("iSCSI: SCSI Async event, senselen %d, key %02x, ASC/ASCQ %02X/%02X, session %p to %s\n" + "iSCSI: Sense %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x\n", + senselen, SENSE_KEY(xbuf), ASC(xbuf), ASCQ(xbuf), session, session->log_name, + xbuf[0],xbuf[1],xbuf[2],xbuf[3], + xbuf[4],xbuf[5],xbuf[6],xbuf[7], + xbuf[8],xbuf[9],xbuf[10],xbuf[11], + xbuf[12],xbuf[13],xbuf[14],xbuf[15], + xbuf[16],xbuf[17]); + } + else if ( senselen >= 14) { + printk("iSCSI: SCSI Async event, senselen %d, key %02x, ASC/ASCQ %02X/%02X, session %p to %s\n" + "iSCSI: Sense %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x\n", + senselen, SENSE_KEY(xbuf), ASC(xbuf), ASCQ(xbuf), session, session->log_name, + xbuf[0],xbuf[1],xbuf[2],xbuf[3], + xbuf[4],xbuf[5],xbuf[6],xbuf[7], + xbuf[8],xbuf[9],xbuf[10],xbuf[11], + xbuf[12],xbuf[13]); + } + else { + printk("iSCSI: SCSI Async event, senselen %d, key %02x, session %p to %s\n" + "iSCSI: Sense %02x%02x%02x%02x %02x%02x%02x%02x\n", + senselen, SENSE_KEY(xbuf), session, session->log_name, + xbuf[0],xbuf[1],xbuf[2],xbuf[3], + xbuf[4],xbuf[5],xbuf[6],xbuf[7]); + } + } + break; + case ASYNC_EVENT_REQUEST_LOGOUT: + printk("iSCSI: target requests logout within %u seconds for session to %s\n", + ntohs(staeh->param3), session->log_name); + /* FIXME: this is really a request to drop a connection, not the whole session, + * but we currently only have one connection per session, so there's no difference + * at the moment. + */ + + /* we need to get the task lock to make sure the TX thread isn't in the middle + * of adding another task to the session. + */ + spin_lock(&session->task_lock); + iscsi_request_logout(session, ntohs(staeh->param3) - (HZ / 10), session->active_timeout); + spin_unlock(&session->task_lock); + break; + case ASYNC_EVENT_DROPPING_CONNECTION: + printk("iSCSI: session %p target dropping connection %u, reconnect min %u max %u\n", + session, ntohs(staeh->param1), ntohs(staeh->param2), ntohs(staeh->param3)); + session->time2wait = (long)ntohs(staeh->param2) & 0x0000FFFFFL; + smp_mb(); + break; + case ASYNC_EVENT_DROPPING_ALL_CONNECTIONS: + printk("iSCSI: session %p target dropping all connections, reconnect min %u max %u\n", + session->log_name, ntohs(staeh->param2), ntohs(staeh->param3)); + session->time2wait = (long)ntohs(staeh->param2) & 0x0000FFFFFL; + smp_mb(); + break; + case ASYNC_EVENT_VENDOR_SPECIFIC: + printk("iSCSI: session %p ignoring vendor-specific async event, vcode 0x%x\n", + session, staeh->async_vcode); + break; + case ASYNC_EVENT_PARAM_NEGOTIATION: + printk("iSCSI: session %p received async event param negotiation, dropping session\n", session); + iscsi_drop_session(session); + break; + default: + printk("iSCSI: session %p received unknown async event 0x%x at %lu\n", + session, staeh->async_event, jiffies); + break; + } +} + + +/* wait for the tx thread to block or exit, ignoring signals. + * the rx thread needs to know that the tx thread is not running before + * it can safely close the socket and start a new login phase on a new socket, + * Also, tasks still in use by the tx thread can't safely be completed on + * a session drop. + */ +static int wait_for_tx_blocked(iscsi_session_t *session) +{ + while (session->tx_pid) { + DEBUG_INIT("iSCSI: session %p thread %d waiting for tx thread %d to block\n", + session, current->pid, session->tx_pid); + + wait_event_interruptible(session->tx_blocked_wait_q, + test_bit(TX_THREAD_BLOCKED, &session->control_bits)); + + if (iscsi_handle_signals(session)) { + DEBUG_INIT("iSCSI: session %p wait_for_tx_blocked signalled at %lu while waiting for tx %d\n", + session, jiffies, session->tx_pid); + } + /* if the session is terminating, the tx thread will exit, waking us up in the process + * we don't want to return until the tx thread is blocked, since there's not much + * the rx thread can do until the tx thread is guaranteed not to be doing anything. + */ + if (test_bit(TX_THREAD_BLOCKED, &session->control_bits)) { + DEBUG_INIT("iSCSI: session %p rx thread %d found tx thread %d blocked\n", + session, current->pid, session->tx_pid); + return 1; + } + } + + /* dead and blocked are fairly similar, really */ + DEBUG_INIT("iSCSI: session %p rx thread %d found tx thread %d exited\n", + session, current->pid, session->tx_pid); + return 1; +} + + +/* Wait for a session to be established. + * Returns 1 if the session is established, zero if the timeout expires + * or the session is terminating/has already terminated. + */ +static int wait_for_session(iscsi_session_t *session, int use_timeout) +{ + int ret = 0; + wait_queue_t waitq; + + if (test_bit(SESSION_ESTABLISHED, &session->control_bits)) + return 1; + + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + printk("iSCSI: session %p terminating, wait_for_session failed\n", session); + return 0; + } + + init_waitqueue_entry(&waitq, current); + add_wait_queue(&session->login_wait_q, &waitq); + smp_mb(); + + DEBUG_INIT("iSCSI: pid %d waiting for session %p at %lu\n", current->pid, session, jiffies); + + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + + if (test_bit(SESSION_ESTABLISHED, &session->control_bits)) { + ret = 1; + goto done; + } + + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + ret = 0; + goto done; + } + + if (signal_pending(current)) { + ret = 0; + goto done; + } + + if (use_timeout && session->replacement_timeout) { + unsigned long timeout, now; + long sleep_jiffies = 0; + + if (test_bit(SESSION_REPLACEMENT_TIMEDOUT, &session->control_bits)) { + ret = 0; + goto done; + } + + if (session->session_drop_time) + timeout = session->session_drop_time + (HZ * session->replacement_timeout); + else + timeout = jiffies + (HZ * session->replacement_timeout); + + if (time_before_eq(timeout, jiffies)) { + printk("iSCSI: pid %d timed out in wait_for_session %p\n", current->pid, session); + ret = 0; + goto done; + } + + /* handle wrap-around */ + now = jiffies; + if (now < timeout) + sleep_jiffies = timeout - now; + else + sleep_jiffies = ULONG_MAX - now + timeout; + + schedule_timeout(sleep_jiffies); + } + else + schedule(); + } + + done: + set_current_state(TASK_RUNNING); + remove_wait_queue(&session->login_wait_q, &waitq); + + if (ret == 0) + printk("iSCSI: wait_for_session %p failed\n", session); + + return ret; +} + +/* caller must hold the session's portal_lock */ +static unsigned int find_portal(iscsi_session_t *session, unsigned char *ip_address, int ip_length, int port) +{ + iscsi_portal_info_t *portals = session->portals; + unsigned int p; + + for (p = 0; p < session->num_portals; p++) { + if (portals[p].ip_length == 0) + continue; + + if (portals[p].ip_length != ip_length) + continue; + + if (portals[p].port != port) + continue; + + if (memcmp(portals[p].ip_address, ip_address, ip_length)) + continue; + + DEBUG_INIT("iSCSI: session %p found portal %u\n", session, p); + break; + } + + if (p < session->num_portals) + return p; + + return UINT_MAX; +} + +static void set_portal_config(iscsi_session_t *session, unsigned int p) +{ + /* Set the session timeouts and iSCSI op params based on the portal's settings. + * Don't change the address, since a termporary redirect may have already changed the address, + * and we want to use the redirected address rather than the portal's address. + */ + session->login_timeout = session->portals[p].login_timeout; + session->auth_timeout = session->portals[p].auth_timeout; + session->active_timeout = session->portals[p].active_timeout; + session->idle_timeout = session->portals[p].idle_timeout; + session->ping_timeout = session->portals[p].ping_timeout; + session->abort_timeout = session->portals[p].abort_timeout; + session->reset_timeout = session->portals[p].reset_timeout; + session->replacement_timeout = session->portals[p].replacement_timeout; + + /* FIXME: get the scsi_cmnd_lock when setting these? */ + session->min_disk_command_timeout = session->portals[p].min_disk_command_timeout; + session->max_disk_command_timeout = session->portals[p].max_disk_command_timeout; + + session->InitialR2T = session->portals[p].InitialR2T; + session->ImmediateData = session->portals[p].ImmediateData; + session->MaxRecvDataSegmentLength = session->portals[p].MaxRecvDataSegmentLength; + session->FirstBurstLength = session->portals[p].FirstBurstLength; + session->MaxBurstLength = session->portals[p].MaxBurstLength; + session->DefaultTime2Wait = session->portals[p].DefaultTime2Wait; + session->DefaultTime2Retain = session->portals[p].DefaultTime2Retain; + + session->HeaderDigest = session->portals[p].HeaderDigest; + session->DataDigest = session->portals[p].DataDigest; + + session->portal_group_tag = session->portals[p].tag; + + /* TCP options */ + session->tcp_window_size = session->portals[p].tcp_window_size; + /* FIXME: type_of_service */ +} + +/* caller must hold the session's portal_lock */ +static int set_portal(iscsi_session_t *session, unsigned int p) +{ + iscsi_portal_info_t *portals = session->portals; + + if (portals == NULL) { + printk("iSCSI: session %p has no portal info, can't set portal %d\n", session, p); + return 0; + } + + if (p >= session->num_portals) { + printk("iSCSI: session %p has only %d portals, can't set portal %d\n", + session, session->num_portals, p); + return 0; + } + + session->current_portal = p; + + /* address */ + session->ip_length = portals[p].ip_length; + memcpy(session->ip_address, portals[p].ip_address, portals[p].ip_length); + session->port = portals[p].port; + + /* timeouts, operational params, other settings */ + set_portal_config(session, p); + + DEBUG_INIT("iSCSI: session %p set to portal %d, group %d\n", + session, session->current_portal, session->portal_group_tag); + + return 1; +} + +static void set_preferred_subnet_bitmap(iscsi_session_t *session) +{ + unsigned int bitmap = 0; + iscsi_portal_info_t *portals = session->portals; + unsigned char ip[16]; + int ip_length = 4; + unsigned int p; + uint32_t a1, a2; + + if (portals == NULL) { + printk("iSCSI: session %p has no portal info, therefore no preferred subnet bitmap\n", session); + return; + } + + iscsi_inet_aton(session->preferred_subnet, ip, &ip_length); + + a1 = ip[0] << 24; + a1 |= ip[1] << 16; + a1 |= ip[2] << 8; + a1 |= ip[3]; + a1 &= session->preferred_subnet_mask; + + for (p = 0; p < session->num_portals; p++) { + a2 = portals[p].ip_address[0] << 24; + a2 |= portals[p].ip_address[1] << 16; + a2 |= portals[p].ip_address[2] << 8; + a2 |= portals[p].ip_address[3]; + a2 &= session->preferred_subnet_mask; + + if (a1 == a2) + bitmap = bitmap | (1 << (p % MAX_PORTALS)); + } + session->preferred_subnet_bitmap = bitmap; +} + +static void set_preferred_portal_bitmap(iscsi_session_t *session) +{ + unsigned int bitmap = 0; + iscsi_portal_info_t *portals = session->portals; + unsigned char ip[16]; + int ip_length = 4; + unsigned int p; + + if (portals == NULL) { + printk("iSCSI: session %p has no portal info, therefore no preferred portal bitmap\n", session); + return; + } + + iscsi_inet_aton(session->preferred_portal, ip, &ip_length); + + for (p = 0; p < session->num_portals; p++) { + if (memcmp(ip, portals[p].ip_address, portals[p].ip_length) == 0) { + bitmap = bitmap | (1 << (p % MAX_PORTALS)); + break; + } + } + session->preferred_portal_bitmap = bitmap; +} + +static int get_appropriate_portal(iscsi_session_t *session) +{ + unsigned int p; + int pp = -1; + unsigned int portal_bitmap = session->preferred_portal_bitmap; + unsigned int subnet_bitmap = session->preferred_subnet_bitmap; + + if (!portal_bitmap && !subnet_bitmap) + return -1; + + for (p = 0; p < session->num_portals; p++) { + if (portal_bitmap & (1 << (p % MAX_PORTALS))) { + pp = p; + break; + } + } + + if (pp < 0) { + for (p = 0; p < session->num_portals; p++) { + if (subnet_bitmap & (1 << (p % MAX_PORTALS))) { + pp = p; + break; + } + } + } + return pp; +} + +/* caller must hold the session's portal_lock */ +static void next_portal(iscsi_session_t *session) +{ + unsigned int desired_portal = UINT_MAX; + int allow_any_tag = 1; + int current_tag = session->portal_group_tag; + + if (!allow_any_tag && (session->portal_group_tag < 0)) { + printk("iSCSI: session %p current portal %u group tag unknown, can't switch portals\n", session, session->current_portal); + set_portal(session, session->current_portal); + return; + } + + /* requested portals and fallbacks after requested portals are handled similarly */ + if (session->requested_portal != UINT_MAX) { + DEBUG_INIT("iSCSI: session %p requested to switch to portal %u\n", session, session->requested_portal); + desired_portal = session->requested_portal; + session->requested_portal = UINT_MAX; + } + else if (session->fallback_portal != UINT_MAX) { + DEBUG_INIT("iSCSI: session %p falling back to portal %u\n", session, session->fallback_portal); + desired_portal = session->fallback_portal; + session->fallback_portal = UINT_MAX; + } + + if (desired_portal != UINT_MAX) { + /* a particular portal has been requested */ + if (desired_portal >= session->num_portals) { + /* the portal doesn't exist */ + printk("iSCSI: session %p desired portal %u does not exist, staying with portal %u\n", + session, desired_portal, session->current_portal); + /* don't reset the address, so that we stay wherever we are if we can't switch portals */ + set_portal_config(session, session->current_portal); + } + else if (session->portals[desired_portal].ip_length == 0) { + /* the requested portal is dead (probably killed by a permanent redirect) */ + printk("iSCSI: session %p desireed portal %u is dead, staying with portal %u\n", + session, desired_portal, session->current_portal); + /* don't reset the address, so that we stay wherever we are if we can't switch portals */ + set_portal_config(session, session->current_portal); + } + else if (!allow_any_tag && (session->portals[desired_portal].tag != session->portal_group_tag)) { + /* the requested portal is in the wrong portal group */ + printk("iSCSI: session %p desired portal %u is in portal group %u, but portal group %u is required, staying with portal %u\n", + session, desired_portal, + session->portals[desired_portal].tag, session->portal_group_tag, session->current_portal); + /* don't reset the address, so that we stay wherever we are if we can't switch portals */ + set_portal_config(session, session->current_portal); + } + else { + /* try the requested portal */ + session->current_portal = desired_portal; + set_portal(session, session->current_portal); + } + } + else if (session->portal_failover) { + unsigned int p; + int failed = 1; + unsigned int bitmap = 0; + unsigned int num_portals = session->num_portals; + + /* Look for the preferred portal */ + bitmap = session->preferred_portal_bitmap; + if (bitmap) { + for (p = 0; p < num_portals; p++) { + if (bitmap & (1 << (p % MAX_PORTALS))) { + if (!(session->tried_portal_bitmap & (1 << (p % MAX_PORTALS)))) { + if (session->portals[p].ip_length == 0) { + /* this portal is dead (probably killed by a permanent redirect) */ + DEBUG_INIT("iSCSI: session %p skipping dead portal %u\n", + session, p); + } + else if (allow_any_tag) { + /* we can use any portal group, so a tag mismatch isn't a problem */ + session->current_portal = p; + session->tried_portal_bitmap |= (1 << (p % MAX_PORTALS)); + failed = 0; + break; + } + else if (session->portals[p].tag < 0) { + DEBUG_INIT("iSCSI: session %p skipping portal %u group unknown, must login to group %u\n", + session, p, current_tag); + } + else if (session->portals[p].tag == current_tag) { + /* tag allowed, go ahead and try it */ + session->current_portal = p; + session->tried_portal_bitmap |= (1 << (p % MAX_PORTALS)); + failed = 0; + break; + } + } + } + } + } + + if (failed) { + /* Look for the portal in the preferred subnet */ + bitmap = session->preferred_subnet_bitmap; + if (bitmap) { + for (p = 0; p < num_portals; p++) { + if (bitmap & (1 << (p % MAX_PORTALS))) { + if (!(session->tried_portal_bitmap & (1 << (p % MAX_PORTALS)))) { + if (session->portals[p].ip_length == 0) { + /* this portal is dead (probably killed by a permanent redirect) */ + DEBUG_INIT("iSCSI: session %p skipping dead portal %u\n", + session, p); + } + else if (allow_any_tag) { + /* we can use any portal group, so a tag mismatch isn't a problem */ + session->current_portal = p; + session->tried_portal_bitmap |= (1 << (p % MAX_PORTALS)); + failed = 0; + break; + } + else if (session->portals[p].tag < 0) { + DEBUG_INIT("iSCSI: session %p skipping portal %u group unknown, must login to group %u\n", + session, p, current_tag); + } + else if (session->portals[p].tag == current_tag) { + /* tag allowed, go ahead and try it */ + session->current_portal = p; + session->tried_portal_bitmap |= (1 << (p % MAX_PORTALS)); + failed = 0; + break; + } + } + } + } + } + } + + if (failed) { + /* Now, look for portal in the rest of the available portals */ + for (p = 0; p < num_portals; p++) { + if (!(session->tried_portal_bitmap & (1 << (p % MAX_PORTALS)))) { + if (session->portals[p].ip_length == 0) { + /* this portal is dead (probably killed by a permanent redirect) */ + DEBUG_INIT("iSCSI: session %p skipping dead portal %u\n", + session, p); + } + else if (allow_any_tag) { + /* we can use any portal group, so a tag mismatch isn't a problem */ + session->current_portal = p; + session->tried_portal_bitmap |= (1 << (p % MAX_PORTALS)); + failed = 0; + break; + } + else if (session->portals[p].tag < 0) { + DEBUG_INIT("iSCSI: session %p skipping portal %u group unknown, must login to group %u\n", + session, p, current_tag); + } + else if (session->portals[p].tag == current_tag) { + /* tag allowed, go ahead and try it */ + session->current_portal = p; + session->tried_portal_bitmap |= (1 << (p % MAX_PORTALS)); + failed = 0; + break; + } + } + } + } + + if (failed) { + /* no longer have a portal we can login to safely. + * This ought to be impossible, though an insane target might + * get us into this state by changing the tags on the fly. + */ + logmsg(AS_ERROR, "iSCSI: DANGER - session %p can't find any portals in group %u, staying with portal %u", + session, current_tag, session->current_portal); + /* we still set the current portal so that a failed temporary redirect + * will revert to the original address. + */ + } + + /* set the portal, even if it hasn't changed, so that we + * replace the session's address and undo any temporary + * redirects. + */ + set_portal(session, session->current_portal); + } +} + +static int iscsi_establish_session(iscsi_session_t *session) +{ + int ret = -1; + uint8_t status_class; + uint8_t status_detail; + iscsi_login_status_t login_status = 0; + + spin_lock(&session->portal_lock); + if (session->requested_portal != UINT_MAX) { + /* request to change to a specific portal */ + next_portal(session); + } + else { + /* Set almost everything based on the portal's settings. + * Don't change the address, since a temporary redirect may have already changed the address, + * and we want to use the redirected address rather than the portal's address. + */ + set_portal_config(session, session->current_portal); + } + spin_unlock(&session->portal_lock); + + if (LOG_ENABLED(ISCSI_LOG_LOGIN) || LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: bus %d target %d trying to establish session %p to portal %u, address %u.%u.%u.%u port %d group %d, rx %d, tx %d at %lu\n", + session->iscsi_bus, session->target_id, session, session->current_portal, + session->ip_address[0], session->ip_address[1], session->ip_address[2], session->ip_address[3], session->port, + session->portal_group_tag, session->rx_pid, session->tx_pid, jiffies); + else + printk("iSCSI: bus %d target %d trying to establish session %p to portal %u, address %u.%u.%u.%u port %d group %d\n", + session->iscsi_bus, session->target_id, session, session->current_portal, + session->ip_address[0], session->ip_address[1], session->ip_address[2], session->ip_address[3], session->port, + session->portal_group_tag); + + /* set a timer on the connect */ + if (session->login_timeout) { + session->login_phase_timer = jiffies + (session->login_timeout * HZ); + smp_mb(); + } + if (LOG_ENABLED(ISCSI_LOG_LOGIN)) + printk("iSCSI: session %p attempting to connect at %lu, timeout at %lu (%d seconds)\n", + session, jiffies, session->login_phase_timer, session->login_timeout); + + if (!iscsi_connect(session)) { + if (signal_pending(current)) + printk("iSCSI: session %p connect timed out at %lu\n", session, jiffies); + else + printk("iSCSI: session %p connect failed at %lu\n", session, jiffies); + /* switch to the next portal */ + spin_lock(&session->portal_lock); + next_portal(session); + spin_unlock(&session->portal_lock); + goto done; + } + + /* We need to grab the config_mutex before we start trying to + * login, to ensure update_session doesn't try to change the + * per-session settings while the login code is using them. Any + * config updates will be deferred until after the login + * completes. We grab the mutex now, so that the connect timeout + * will break us out if we can't get the mutex for some reason. + */ + if (down_interruptible(&session->config_mutex)) { + printk("iSCSI: session %p failed to acquire mutex before login at %lu\n", session, jiffies); + goto done; + } + + /* make sure we have auth buffers for the login library to use */ + if (session->bidirectional_auth || session->username || session->password) { + /* make sure we've allocated everything we need */ + if (session->auth_client_block == NULL) { + session->auth_client_block = kmalloc(sizeof(*session->auth_client_block), GFP_KERNEL); + if (session->auth_client_block) + DEBUG_INIT("iSCSI: session %p allocated auth_client_block %p (size %Zu) while establishing session\n", + session, session->auth_client_block, sizeof(*session->auth_client_block)); + } + if (session->auth_recv_string_block == NULL) { + session->auth_recv_string_block = kmalloc(sizeof(*session->auth_recv_string_block), GFP_KERNEL); + if (session->auth_recv_string_block) + DEBUG_INIT("iSCSI: session %p allocated auth_recv_string_block %p (size %Zu) while establishing session\n", + session, session->auth_recv_string_block, sizeof(*session->auth_recv_string_block)); + } + if (session->auth_send_string_block == NULL) { + session->auth_send_string_block = kmalloc(sizeof(*session->auth_send_string_block), GFP_KERNEL); + if (session->auth_send_string_block) + DEBUG_INIT("iSCSI: session %p allocated auth_send_string_block %p (size %Zu) while establishing session\n", + session, session->auth_send_string_block, sizeof(*session->auth_send_string_block)); + } + if (session->auth_recv_binary_block == NULL) { + session->auth_recv_binary_block = kmalloc(sizeof(*session->auth_recv_binary_block), GFP_KERNEL); + if (session->auth_recv_binary_block) + DEBUG_INIT("iSCSI: session %p allocated auth_recv_binary_block %p (size %Zu) while establishing session\n", + session, session->auth_recv_binary_block, sizeof(*session->auth_recv_binary_block)); + } + if (session->auth_send_binary_block == NULL) { + session->auth_send_binary_block = kmalloc(sizeof(*session->auth_send_binary_block), GFP_KERNEL); + if (session->auth_send_binary_block) + DEBUG_INIT("iSCSI: session %p allocated auth_send_binary_block %p (size %Zu) while establishing session\n", + session, session->auth_send_binary_block, sizeof(*session->auth_send_binary_block)); + } + + /* if we have everything we need, setup the auth buffer descriptors for the login library */ + session->num_auth_buffers = 0; + memset(&session->auth_buffers, 0, sizeof(session->auth_buffers)); + if (session->auth_client_block && session->auth_recv_string_block && session->auth_send_string_block && + session->auth_recv_binary_block && session->auth_send_binary_block) + { + session->auth_buffers[0].address = session->auth_client_block; + session->auth_buffers[0].length = sizeof(*session->auth_client_block); + + session->auth_buffers[1].address = session->auth_recv_string_block; + session->auth_buffers[1].length = sizeof(*session->auth_recv_string_block); + + session->auth_buffers[2].address = session->auth_send_string_block; + session->auth_buffers[2].length = sizeof(*session->auth_send_string_block); + + session->auth_buffers[3].address = session->auth_recv_binary_block; + session->auth_buffers[3].length = sizeof(*session->auth_recv_binary_block); + + session->auth_buffers[4].address = session->auth_send_binary_block; + session->auth_buffers[4].length = sizeof(*session->auth_send_binary_block); + + session->num_auth_buffers = 5; + } + else if (session->bidirectional_auth) { + /* we must authenticate, but can't. error out */ + printk("iSCSI: session %p requires birectional authentication, but couldn't allocate authentication stuctures\n", + session); + ret = -1; /* retry */ + up(&session->config_mutex); + goto done; + } + else { + /* try to login without auth structures, and see if the target + * will let us in anyway. If we get rejected, retry, and hope + * we can allocate auth structures next time. + */ + DEBUG_INIT("iSCSI: session %p authentication configured, but couldn't allocate authentication structures\n", session); + } + } + + /* clear the connect timer */ + session->login_phase_timer = 0; + smp_mb(); + iscsi_handle_signals(session); + + /* try to make sure other timeouts don't go off as soon as the session is established */ + session->last_rx = jiffies; + session->last_ping = jiffies - 1; + + /* initialize session fields for the iscsi-login code */ + session->type = ISCSI_SESSION_TYPE_NORMAL; + /* iSCSI default, unless declared otherwise by the target during login */ + session->MaxXmitDataSegmentLength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; + session->vendor_specific_keys = 1; + smp_mb(); + + /* use the session's rx_buffer for a login PDU buffer, since it is + * currently unused. We can't afford to dynamically allocate + * memory right now, since it's possible we're reconnecting, and + * the VM system is already blocked trying to write dirty pages to + * the iSCSI device we're trying to reconnect. The session's + * rx_buffer was sized to have enough space for us to handle the login + * phase. + */ + login_status = iscsi_login(session, session->rx_buffer, sizeof(session->rx_buffer), &status_class, &status_detail); + + /* release the lock on the per-session settings used by the login code */ + up(&session->config_mutex); + + switch (login_status) { + case LOGIN_OK: + /* check the status class and detail */ + break; + + case LOGIN_IO_ERROR: + case LOGIN_WRONG_PORTAL_GROUP: + case LOGIN_REDIRECTION_FAILED: + /* these may indicate problems with just the current portal. Try a different one */ + iscsi_disconnect(session); + spin_lock(&session->portal_lock); + next_portal(session); + printk("iSCSI: session %p retrying login to portal %u at %lu\n", session, session->current_portal, jiffies); + spin_unlock(&session->portal_lock); + ret = -1; + goto done; + + default: + case LOGIN_FAILED: + case LOGIN_NEGOTIATION_FAILED: + case LOGIN_AUTHENTICATION_FAILED: + case LOGIN_VERSION_MISMATCH: + case LOGIN_INVALID_PDU: + /* these are problems that will probably occur with any portal of this target. */ + if (session->ever_established && session->num_luns && session->commands_queued) { + /* the session has found LUNs and been used before, so + * applications or the buffer cache may be expecting + * it to continue working. Keep trying to login even + * though clearing the error may require + * reconfiguration on the target. + */ + iscsi_disconnect(session); + spin_lock(&session->portal_lock); + next_portal(session); + printk("iSCSI: session %p may be in use, retrying login to portal %u at %lu\n", session, session->current_portal, jiffies); + spin_unlock(&session->portal_lock); + ret = -1; + } + else { + printk("iSCSI: session %p giving up on login attempts at %lu\n", session, jiffies); + iscsi_disconnect(session); + ret = 0; + } + goto done; + } + + /* check the login status */ + switch (status_class) { + case STATUS_CLASS_SUCCESS: + session->auth_failures = 0; + ret = 1; + break; + case STATUS_CLASS_REDIRECT: + switch (status_detail) { + case ISCSI_LOGIN_STATUS_TGT_MOVED_TEMP: { + unsigned int portal; + + /* the session IP address was changed by the login + * library, sp just try again with this portal + * config but the new address. + */ + session->auth_failures = 0; + smp_mb(); + ret = 1; /* not really success, but we want to retry immediately, with no delay */ + + spin_lock(&session->portal_lock); + portal = find_portal(session, session->ip_address, session->ip_length, session->port); + if (portal != UINT_MAX) { + /* FIXME: IPv6 */ + printk("iSCSI: session %p login to portal %u temporarily redirected to portal %u = %u.%u.%u.%u port %d\n", + session, session->current_portal, portal, + session->ip_address[0], session->ip_address[1], + session->ip_address[2], session->ip_address[3], + session->port); + + /* try to switch to the portal we've been redirected to. + * if that fails, try to come back to the portal we were redirected away from. + * if that fails, try any other portals. + */ + session->requested_portal = portal; + session->fallback_portal = session->current_portal; + } + else { + /* FIXME: IPv6 */ + printk("iSCSI: session %p login to portal %u temporarily redirected to %u.%u.%u.%u port %d\n", + session, session->current_portal, + session->ip_address[0], session->ip_address[1], + session->ip_address[2], session->ip_address[3], + session->port); + + /* we'll connect to the session's address next time. If that fails, + * we'll fallback to the current portal automatically. + */ + } + spin_unlock(&session->portal_lock); + goto done; + } + case ISCSI_LOGIN_STATUS_TGT_MOVED_PERM: { + unsigned int portal; + + /* for a permanent redirect, we need to update the portal address, and then try again. */ + session->auth_failures = 0; + smp_mb(); + ret = 1; /* not really success, but we want to retry immediately, with no delay */ + + spin_lock(&session->portal_lock); + portal = find_portal(session, session->ip_address, session->ip_length, session->port); + if (portal != UINT_MAX) { + /* FIXME: IPv6 */ + printk("iSCSI: session %p login to portal %u permanently redirected to portal %u = %u.%u.%u.%u port %d\n", + session, session->current_portal, portal, + session->ip_address[0], session->ip_address[1], + session->ip_address[2], session->ip_address[3], + session->port); + + /* We want to forget about the current portal. + * Mark this portal dead, and switch to the new portal. + */ + session->portals[session->current_portal].ip_length = 0; + + /* and switch to the other portal */ + set_portal(session, portal); + } + else { + printk("iSCSI: session %p login to portal %u permanently redirected to %u.%u.%u.%u port %d\n", + session, session->current_portal, + session->ip_address[0], session->ip_address[1], + session->ip_address[2], session->ip_address[3], + session->port); + + /* reset the address in the current portal info */ + session->portals[session->current_portal].ip_length = session->ip_length; + memcpy(session->portals[session->current_portal].ip_address, session->ip_address, session->ip_length); + session->portals[session->current_portal].port = session->port; + + /* and just try logging in again with the current portal's config. + * It'd be nice for Subnet entries in the iscsi.conf file to take effect, + * but arranging for that means exporting them all into the kernel module. + */ + } + + spin_unlock(&session->portal_lock); + goto done; + } + default: + ret = -1; + session->auth_failures = 0; + smp_mb(); + printk("iSCSI: session %p login rejected: redirection type 0x%x not supported\n", session, status_detail); + break; + } + iscsi_disconnect(session); + goto done; + case STATUS_CLASS_INITIATOR_ERR: + switch (status_detail) { + case ISCSI_LOGIN_STATUS_AUTH_FAILED: + printk("iSCSI: session %p login rejected: initiator failed authentication with target %s\n", + session, session->TargetName); + iscsi_disconnect(session); + spin_lock(&session->portal_lock); + if ((session->num_auth_buffers < 5) && + (session->username || session->password_length || session->bidirectional_auth)) + { + /* retry, and hope we can allocate the auth structures next time */ + DEBUG_INIT("iSCSI: session %p retrying the same portal, no authentication structures allocated\n", session); + ret = -1; + } + else if ((!session->ever_established) && (session->auth_failures >= session->num_portals)) { + /* give up, since we've tried every portal, and have never established a session */ + printk("iSCSI: session %p terminating login attempts, %d of %d portals failed authentication or authorization\n", + session, session->auth_failures, session->num_portals); + ret = 0; + } + else if (session->portal_failover) { + /* try a different portal */ + session->auth_failures++; + next_portal(session); + ret = -1; + } + else { + session->auth_failures = 0; + ret = 0; + } + spin_unlock(&session->portal_lock); + goto done; + case ISCSI_LOGIN_STATUS_TGT_FORBIDDEN: + printk("iSCSI: session %p login rejected: initiator failed authorization with target %s\n", + session, session->TargetName); + iscsi_disconnect(session); + spin_lock(&session->portal_lock); + session->auth_failures++; + if ((!session->ever_established) && (session->auth_failures >= session->num_portals)) { + /* give up, since we've tried every portal, and have never established a session */ + printk("iSCSI: session %p terminating login attempts, %d of %d portals failed authentication or authorization\n", + session, session->auth_failures, session->num_portals); + ret = 0; + } + else if (session->portal_failover) { + /* try a different portal */ + next_portal(session); + ret = -1; + } + else { + session->auth_failures = 0; + ret = 0; + } + spin_unlock(&session->portal_lock); + goto done; + case ISCSI_LOGIN_STATUS_TGT_NOT_FOUND: + printk("iSCSI: session %p login rejected: initiator error - target not found (%02x/%02x)\n", + session, status_class, status_detail); + session->auth_failures = 0; + iscsi_disconnect(session); + ret = 0; + goto done; + case ISCSI_LOGIN_STATUS_NO_VERSION: + /* FIXME: if we handle multiple protocol versions, before we log an error, try the other supported versions. */ + printk("iSCSI: session %p login rejected: incompatible version (%02x/%02x), non-retryable, giving up\n", + session, status_class, status_detail); + session->auth_failures = 0; + iscsi_disconnect(session); + ret = 0; + goto done; + default: + printk("iSCSI: session %p login rejected: initiator error (%02x/%02x), non-retryable, giving up\n", + session, status_class, status_detail); + session->auth_failures = 0; + iscsi_disconnect(session); + ret = 0; + goto done; + } + case STATUS_CLASS_TARGET_ERR: + printk("iSCSI: session %p login rejected: target error (%02x/%02x)\n", + session, status_class, status_detail); + session->auth_failures = 0; + iscsi_disconnect(session); + /* Try a different portal for the retry. We have no idea + * what the problem is, but maybe a different portal will + * work better. + */ + spin_lock(&session->portal_lock); + if (session->portal_failover) { + next_portal(session); + ret = -1; + } + else + ret = 0; + spin_unlock(&session->portal_lock); + goto done; + default: + printk("iSCSI: session %p login response with unknown status class 0x%x, detail 0x%x\n", + session, status_class, status_detail); + session->auth_failures = 0; + iscsi_disconnect(session); + ret = 0; + goto done; + } + + /* logged in, get the new session ready */ + clear_bit(SESSION_LOGGED_OUT, &session->control_bits); + session->fallback_portal = UINT_MAX; + session->tried_portal_bitmap = 0; + session->ever_established = 1; + session->generation++; + session->auth_failures = 0; + session->last_rx = jiffies; + session->last_ping = jiffies - 1; + session->last_window_check = jiffies; + session->last_peak_window_size = 0; + session->last_kill = 0; + session->window_closed = 0; + session->window_full = 0; + session->current_peak_window_size = max_tasks_for_session(session); + session->window_peak_check = jiffies; + session->warm_reset_itt = RSVD_TASK_TAG; + session->cold_reset_itt = RSVD_TASK_TAG; + session->nop_reply.ttt = RSVD_TASK_TAG; + session->nop_reply_head = session->nop_reply_tail = NULL; + session->session_established_time = jiffies; /* used to detect sessions that die as soon as we hit FFP */ + session->session_drop_time = 0; /* used to detect sessions that aren't coming back up */ + session->login_phase_timer = 0; + if (session->TargetAlias[0]) + session->log_name = session->TargetAlias; + smp_mb(); + + /* announce it */ + if (session->TargetAlias[0] != '\0') + printk("iSCSI: bus %d target %d established session %p #%lu to portal %u, address %u.%u.%u.%u port %d group %d, alias %s\n", + session->iscsi_bus, session->target_id, session, session->generation, session->current_portal, + session->ip_address[0], session->ip_address[1], session->ip_address[2], session->ip_address[3], session->port, + session->portal_group_tag, session->TargetAlias); + else + printk("iSCSI: bus %d target %d established session %p #%lu, portal %u, address %u.%u.%u.%u port %d group %d\n", + session->iscsi_bus, session->target_id, session, session->generation, session->current_portal, + session->ip_address[0], session->ip_address[1], session->ip_address[2], session->ip_address[3], session->port, + session->portal_group_tag); + + if (LOG_ENABLED(ISCSI_LOG_INIT) || LOG_ENABLED(ISCSI_LOG_EH)) { + printk("iSCSI: session %p #%lu established at %lu, isid 0x%02x%02x%02x%02x%02x%02x, tsih %u, %u normal cmnds, %u deferred cmnds, %u tasks, bits 0x%08lx\n", + session, session->generation, jiffies, + session->isid[0], session->isid[1], session->isid[2], session->isid[3], + session->isid[4], session->isid[5], session->tsid, + atomic_read(&session->num_cmnds), session->num_deferred_cmnds, + atomic_read(&session->num_active_tasks), session->control_bits); + } + + /* mark the session as up and accepting commands again */ + clear_bit(SESSION_REPLACEMENT_TIMEDOUT, &session->control_bits); + smp_wmb(); + set_bit(SESSION_ESTABLISHED, &session->control_bits); + smp_mb(); + + /* wake up everyone waiting for the session to be established */ + wake_up(&session->login_wait_q); + + /* make sure we start sending commands again */ + wake_tx_thread(TX_SCSI_COMMAND, session); + + done: + /* clear any timer that may have been left running */ + session->login_phase_timer = 0; + smp_mb(); + /* cleanup after a possible timeout expiration */ + if (iscsi_handle_signals(session)) { + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + DEBUG_INIT("iSCSI: session %p terminating, giving up on login attempts\n", session); + return 0; + } + else { + DEBUG_INIT("iSCSI: session %p received signal during login, retrying\n", session); + return -1; + } + } + + return ret; +} + +static inline void append_queue(Scsi_Cmnd **to_head, Scsi_Cmnd **to_tail, Scsi_Cmnd **from_head, Scsi_Cmnd **from_tail) +{ + if (*to_head && *from_head) { + /* both non-empty, append 'from' to 'to' */ + (*to_tail)->host_scribble = (void *)*from_head; + *to_tail = *from_tail; + *from_head = NULL; + *from_tail = NULL; + } + else if (*from_head) { + /* 'from' becomes 'to' */ + *to_head = *from_head; + *to_tail = *from_tail; + *from_head = NULL; + *from_tail = NULL; + } +} + +/* caller must hold the task_lock */ +static void requeue_or_fail_commands(iscsi_session_t *session) +{ + Scsi_Cmnd *fatal_head = NULL, *fatal_tail = NULL; + Scsi_Cmnd *requeue_head = NULL, *requeue_tail = NULL; + Scsi_Cmnd *sc = NULL; + iscsi_task_t *task = NULL; + int fail_all = 0; + int num_failed = 0; + int num_tasks = 0; + DECLARE_MIDLAYER_FLAGS; + DECLARE_NOQUEUE_FLAGS; + + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + /* no point in retrying anything */ + if (test_bit(ISCSI_HBA_SHUTTING_DOWN, &session->hba->flags)) + DEBUG_INIT("iSCSI: session %p terminating, failing all SCSI commands\n", session); + else + printk("iSCSI: session %p terminating, failing all SCSI commands\n", session); + fail_all = 1; + } + else { + DEBUG_INIT("iSCSI: session %p requeue_or_fail_commands at %lu\n", session, jiffies); + } + + /* grab all the tasks for this connection */ + while ((task = session->arrival_order.head)) { + session->arrival_order.head = task->order_next; + + del_task_timer(task); + + if (atomic_read(&task->refcount) == 0) { + ISCSI_TRACE(ISCSI_TRACE_TaskAborted, sc, task, 0, 0); + + task->next = task->prev = task->order_next = task->order_prev = NULL; + sc = task->scsi_cmnd; + task->scsi_cmnd = NULL; + + if (sc) + add_cmnd(sc, &requeue_head, &requeue_tail); + + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + DEBUG_ALLOC("iSCSI: session %p requeue_or_fail freeing task %p at %lu\n", + session, task, jiffies); + + num_tasks++; + free_task(session, task); + } + else { + /* This should never happen, which is good, since we don't really + * have any good options here. Leak the task memory, and fail to + * complete the cmnd, which may leave apps blocked forever in the kernel. + */ + printk("iSCSI: bug - session %p can't complete itt %u task %p, refcount %u, command %p, leaking task memory\n", + session, task->itt, task, atomic_read(&task->refcount), task->scsi_cmnd); + } + } + + if (test_bit(SESSION_TERMINATING, &session->control_bits) && LOG_ENABLED(ISCSI_LOG_ALLOC)) + printk("iSCSI: session %p for (%u %u %u *) requeue_or_fail freed %d tasks at %lu, alloc %u freed %u\n", + session, session->host_no, session->channel, session->target_id, num_tasks, jiffies, + session->tasks_allocated, session->tasks_freed); + + session->arrival_order.head = session->arrival_order.tail = NULL; + atomic_set(&session->num_active_tasks, 0); + /* clear out the task collections */ + session->tx_tasks.head = session->tx_tasks.tail = NULL; + session->warm_reset_itt = RSVD_TASK_TAG; + session->cold_reset_itt = RSVD_TASK_TAG; + + /* grab the retry, deferred, and normal queues in that order */ + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + append_queue(&requeue_head, &requeue_tail, &session->retry_cmnd_head, &session->retry_cmnd_tail); + atomic_set(&session->num_retry_cmnds, 0); + append_queue(&requeue_head, &requeue_tail, &session->deferred_cmnd_head, &session->deferred_cmnd_tail); + session->num_deferred_cmnds = 0; + append_queue(&requeue_head, &requeue_tail, &session->scsi_cmnd_head, &session->scsi_cmnd_tail); + atomic_set(&session->num_cmnds, 0); + + while ((sc = requeue_head)) { + requeue_head = (Scsi_Cmnd *)sc->host_scribble; + + if (fail_all || (sc->allowed <= 1)) { + /* fail it */ + add_cmnd(sc, &fatal_head, &fatal_tail); + num_failed++; + } + else { + /* requeue it */ + add_cmnd(sc, &session->scsi_cmnd_head, &session->scsi_cmnd_tail); + atomic_inc(&session->num_cmnds); + } + } + + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + + /* fail any commands that can't be retried */ + LOCK_MIDLAYER_LOCK(session->hba->host); + while ((sc = fatal_head)) { + fatal_head = (Scsi_Cmnd *)sc->host_scribble; + + del_command_timer(sc); + sc->host_scribble = NULL; + sc->resid = iscsi_expected_data_length(sc); + sc->result = HOST_BYTE(DID_NO_CONNECT); + if (sc->allowed > 1) + sc->retries = sc->allowed - 1; + + set_not_ready(sc); + + /* FIXME: always log these? sometimes log these? */ + printk("iSCSI: session %p failing command %p cdb 0x%02x to (%u %u %u %u) at %lu\n", + session, sc, sc->cmnd[0], sc->host->host_no, sc->channel, sc->target, sc->lun, jiffies); + + if (sc->scsi_done) { + add_completion_timer(sc); + sc->scsi_done(sc); + } + } + + UNLOCK_MIDLAYER_LOCK(session->hba->host); +} + +static int iscsi_rx_thread(void *vtaskp) +{ + iscsi_session_t *session; + iscsi_hba_t *hba; + int rc = -EPIPE, length, xlen; + struct msghdr msg; + struct iovec iov[2]; + struct IscsiHdr sth; + uint32_t crc32c; + unsigned char *rxbuf; + long login_delay = 0; + int pad; + unsigned long session_failures = 0; + + + if (vtaskp == NULL) { + printk("iSCSI: rx thread task parameter NULL\n"); + return 0; + } + + session = (iscsi_session_t *)vtaskp; + /* whoever created the thread already incremented the session's refcount for us */ + + hba = session->hba; + + DEBUG_INIT("iSCSI: session %p rx thread %d about to daemonize on cpu%d\n", + session, current->pid, smp_processor_id()); + + /* become a daemon kernel thread, and abandon any user space resources */ + sprintf(current->comm,"iscsi-rx"); + iscsi_daemonize(); + session->rx_pid = current->pid; + current->flags |= PF_MEMALLOC; + smp_mb(); + + /* check to see if iscsi_terminate_session was called before we + * started running, since we can't get a signal from it until + * until we set session->rx_pid. + */ + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + goto ThreadExit; + + /* Block all signals except SIGHUP and SIGKILL */ + LOCK_SIGNALS(); + siginitsetinv(¤t->blocked, sigmask(SIGKILL) | sigmask(SIGHUP)); + RECALC_PENDING_SIGNALS; + UNLOCK_SIGNALS(); + + DEBUG_INIT("iSCSI: session %p rx thread %d starting on cpu%d\n", session, current->pid, smp_processor_id()); + + while (!test_bit(SESSION_TERMINATING, &session->control_bits)) { + unsigned long login_failures = 0; + + /* we need a session for the rx and tx threads to use */ + while (!test_bit(SESSION_ESTABLISHED, &session->control_bits)) { + if (login_delay) { + printk("iSCSI: session %p to %s waiting %ld seconds before next login attempt\n", + session, session->log_name, login_delay); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(login_delay * HZ); + } + + /* ensure we can write to the socket without interference */ + DEBUG_INIT("iSCSI: session %p rx thread %d waiting for tx blocked for at %lu\n", + session, current->pid, jiffies); + wait_for_tx_blocked(session); + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + goto ThreadExit; + + /* now that the tx thread is idle, it's safe to clean up the old session, if there was one */ + iscsi_disconnect(session); + /* FIXME: should clearing these bits move to iscsi_establish_session? */ + clear_bit(SESSION_DROPPED, &session->control_bits); + clear_bit(SESSION_TASK_ALLOC_FAILED, &session->control_bits); + clear_bit(SESSION_LOGOUT_REQUESTED, &session->control_bits); + clear_bit(SESSION_WINDOW_CLOSED, &session->control_bits); + clear_bit(SESSION_RESETTING, &session->control_bits); + clear_bit(SESSION_RESET, &session->control_bits); + clear_bit(SESSION_TASK_MGMT_TIMEDOUT, &session->control_bits); + smp_mb(); + + /* try to get a new session */ + rc = iscsi_establish_session(session); + if (rc > 0) { + /* established or redirected */ + login_failures = 0; + } + else if (rc < 0) { + /* failed, retry */ + login_failures++; + } + else { + /* failed, give up */ + printk("iSCSI: session %p giving up at %lu\n", session, jiffies); + iscsi_terminate_session(session); + goto ThreadExit; + } + + /* slowly back off the frequency of login attempts */ + if (login_failures == 0) + login_delay = 0; + else if (login_failures < 30) + login_delay = 1; /* 30 seconds at 1 sec each */ + else if (login_failures < 48) + login_delay = 5; /* another 90 seconds at 5 sec each */ + else if (session->replacement_timeout && + time_before_eq(session->session_drop_time + (HZ * session->replacement_timeout), jiffies)) + { + login_delay = 10; /* every 10 seconds */ + } + else { + /* we've already failed all commands out of the + * driver, but if we can bring the session back up, we + * can stop failing new commands in queuecommand. + */ + login_delay = 60; + } + } + + DEBUG_INIT("iSCSI: session %p established by rx thread %d at %lu\n", session, current->pid, jiffies); + + /* handle rx for this session */ + while (!signal_pending(current)) { + /* check for anything to read on socket */ + iov[0].iov_base = &sth; + iov[0].iov_len = length = sizeof(sth); + memset(&msg, 0, sizeof(msg)); + msg.msg_iov = iov; + msg.msg_iovlen = 1; + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) { + iov[1].iov_base = &crc32c; + iov[1].iov_len = sizeof(crc32c); + msg.msg_iovlen = 2; + length += sizeof(crc32c); + } + + DEBUG_FLOW("iSCSI: session %p rx thread %d waiting to receive %d header bytes\n", + session, session->rx_pid, length); + + rc = iscsi_recvmsg(session, &msg, length); + if (signal_pending(current)) { + DEBUG_FLOW("iSCSI: session %p rx thread %d received signal\n", session, session->rx_pid); + goto EndSession; + } + if (rc == length) { + DEBUG_FLOW("iSCSI: session %p rx thread %d received %d header bytes, opcode 0x%x\n", + session, session->rx_pid, length, sth.opcode); + /* HeaderDigests */ + if (session->HeaderDigest == ISCSI_DIGEST_CRC32C) { + uint32_t calculated_crc32c = iscsi_crc32c(&sth, sizeof(sth)); + + if (session->fake_read_header_mismatch > 0) { + session->fake_read_header_mismatch--; + smp_mb(); + printk("iSCSI: session %p faking HeaderDigest mismatch for itt %u\n", session, ntohl(sth.itt)); + calculated_crc32c = 0x01020304; + } + + if (calculated_crc32c != crc32c) { + printk("iSCSI: session %p HeaderDigest mismatch, received 0x%08x, calculated 0x%08x, dropping session at %lu\n", + session, crc32c, calculated_crc32c, jiffies); + iscsi_drop_session(session); + goto EndSession; + } + } + + /* received something */ + xlen = ntoh24(sth.dlength); + + if (sth.hlength) { + /* FIXME: read any additional header segments. + * For now, drop the session if one is received, since we can't handle them. + */ + printk("iSCSI: session %p received opcode %x, ahs length %d, dlength %d, itt %u at %lu\n", + session, sth.opcode, sth.hlength, xlen, ntohl(sth.itt), jiffies); + printk("iSCSI: session %p dropping, additional header segments not supported by this driver version.\n", + session); + iscsi_drop_session(session); + goto EndSession; + } + + /* If there are padding bytes, read them as well */ + pad = xlen % PAD_WORD_LEN; + if (pad) { + pad = PAD_WORD_LEN - pad; + xlen += pad; + } + + DEBUG_FLOW("iSCSI: session %p rx PDU, opcode 0x%x, dlength %d at %lu\n", + session, sth.opcode, xlen, jiffies); + + if (xlen && (sth.opcode != ISCSI_OP_SCSI_DATA_RSP) && (sth.opcode != ISCSI_OP_NOOP_IN)) { + /* unless it's got a (possibly large) data payload, read the whole PDU into memory beforehand */ + if (xlen > ISCSI_RXCTRL_SIZE) { + printk("iSCSI: session %p PDU data length too large, opcode %x, dlen %d\n", session, sth.opcode, xlen); + iscsi_drop_session(session); + goto EndSession; + } + rxbuf = session->rx_buffer; + iov[0].iov_base = rxbuf; + iov[0].iov_len = xlen; + memset( &msg, 0, sizeof(struct msghdr) ); + msg.msg_iov = iov; + msg.msg_iovlen = 1; + length = xlen; + + if (session->DataDigest == ISCSI_DIGEST_CRC32C) { + iov[1].iov_base = &crc32c; + iov[1].iov_len = sizeof(crc32c); + msg.msg_iovlen = 2; + length += sizeof(crc32c); + } + + rc = iscsi_recvmsg(session, &msg, length); + if (rc != length) { + printk("iSCSI: session %p PDU opcode 0x%x, recvmsg %d failed, rc %d\n", + session, sth.opcode, length, rc); + iscsi_drop_session(session); + goto EndSession; + } + + if (session->DataDigest == ISCSI_DIGEST_CRC32C) { + uint32_t calculated_crc32c = iscsi_crc32c(rxbuf, xlen); + + if (calculated_crc32c != crc32c) { + /* FIXME: if it's a command response, we MUST do a Logout and drop the session. + * it's not a command response or data, we're allowed to just ignore the PDU. + * It must have been Async with sense, or a Reject, or Nop-in with data, and + * other timers should handle those. For now, ignore the spec, and just + * drop the session unconditionally. + */ + printk("iSCSI: session %p DataDigest mismatch, opcode 0x%x, received 0x%08x, calculated 0x%08x, dropping session at %lu\n", + session, sth.opcode, crc32c, calculated_crc32c, jiffies); + iscsi_drop_session(session); + goto EndSession; + } + } + } + else { + rxbuf = NULL; + } + + switch (sth.opcode) { + case ISCSI_OP_NOOP_IN|0xc0: /* work-around a bug in the Intel Nov05 target */ + case ISCSI_OP_NOOP_IN: + iscsi_recv_nop( session, (struct IscsiNopInHdr *)&sth); + break; + case ISCSI_OP_SCSI_RSP: + iscsi_recv_cmd(session, (struct IscsiScsiRspHdr *)&sth, rxbuf); + break; + case ISCSI_OP_SCSI_TASK_MGT_RSP: + iscsi_recv_task_mgmt(session, (struct IscsiScsiTaskMgtRspHdr *)&sth); + break; + case ISCSI_OP_RTT_RSP: + iscsi_recv_r2t(session, (struct IscsiRttHdr *)&sth); + break; + case ISCSI_OP_SCSI_DATA_RSP: + iscsi_recv_data( session, (struct IscsiDataRspHdr *)&sth); + break; + case ISCSI_OP_ASYNC_EVENT: + iscsi_recv_async_event(session, (struct IscsiAsyncEvtHdr *)&sth, rxbuf); + break; + case ISCSI_OP_REJECT_MSG: + iscsi_recv_reject(session, (struct IscsiRejectRspHdr *)&sth, rxbuf); + break; + case ISCSI_OP_LOGOUT_RSP: + iscsi_recv_logout(session, (struct IscsiLogoutRspHdr *)&sth); + break; + default: + printk("iSCSI: session %p dropping after receiving unexpected opcode 0x%x\n", session, sth.opcode); + session->time2wait = 2; /* don't spin if the target always sends illegal opcodes */ + iscsi_drop_session(session); + goto EndSession; + } + } + else { + if ( rc != -EAGAIN ) { + if (rc == 0) { + printk( "iSCSI: session %p closed by target %s at %lu\n", + session, session->log_name, jiffies); + } + else if (rc == -ECONNRESET) { + printk( "iSCSI: session %p to %s received connection reset at %lu\n", + session, session->log_name, jiffies); + } + else if ( rc == -ERESTARTSYS ) { + printk( "iSCSI: session %p to %s received signal at %lu\n", + session, session->log_name, jiffies); + } + else { + printk("iSCSI: session %p to %s short PDU header read, %d of %d at %lu\n", + session, session->log_name, rc, length, jiffies); + } + iscsi_drop_session(session); + goto EndSession; + } + } + } + + EndSession: + DEBUG_INIT("iSCSI: session %p going down at %lu\n", session, jiffies); + + /* calculate how long to wait before logging in again */ + if (session->time2wait >= 0) { + /* the target gave us a specific Time2Wait */ + login_delay = session->time2wait; + session->time2wait = -1; + DEBUG_INIT("iSCSI: session %p Time2Wait %ld\n", session, login_delay); + } + else { + /* use the default */ + login_delay = session->DefaultTime2Wait; + DEBUG_INIT("iSCSI: session %p DefaultTime2Wait %ld\n", session, login_delay); + } + + if (time_before_eq(session->session_drop_time, session->session_established_time + (2 * HZ))) { + /* if the session dies really quicky after we reach + * full-feature phase, we may not be interoperable due to + * bugs in the target (or this driver) that send illegal + * opcodes, or disagreements about how to do CRC + * calculations. To avoid spinning, we track sessions + * with really short lifetimes, and decrease the login + * frequency if we keep getting session failures, like we + * do for login failures. + */ + session_failures++; + + if (session_failures < 30) + login_delay = MAX(login_delay, 1); /* 30 seconds at 1 sec each */ + else if (session_failures < 48) + login_delay = MAX(login_delay, 5); /* another 90 seconds at 5 sec each */ + else if (session->replacement_timeout && + time_before_eq(session->session_drop_time + (HZ * session->replacement_timeout), jiffies)) + { + login_delay = MAX(login_delay, 10); /* every 10 seconds */ + } + else { + /* after the replacement timeout has expired, the + * device will probably be offline, so we probably + * don't need a session anymore, but it's possible the + * device isn't offline yet because of all the + * hard-coded sleeps in the SCSI midlayer after resets + * occur, and in any case it might be useful to know + * if we ever get a session back for debugging + * purposes, so we'll keep trying occasionally. + */ + login_delay = MAX(login_delay, 60); + } + + printk("iSCSI: session %p has ended quickly %lu times, login delay %ld seconds\n", + session, session_failures, login_delay); + } + else { + /* session lived long enough that the target is probably ok */ + session_failures = 0; + } + + /* handle any signals that may have occured, which may kill the tx thread */ + iscsi_handle_signals(session); + + /* we need to wait for the tx thread to block before trying to complete commands, + * since it may be using a task at the moment, which means we can't complete it yet. + * even if the session is terminating, we must wait for the tx thread. + */ + wait_for_tx_blocked(session); + + spin_lock(&session->task_lock); + + if (session->warm_reset_itt != RSVD_TASK_TAG) { + printk("iSCSI: session %p dropped during warm target reset, assuming SCSI commands completed by reset\n", session); + session->warm_reset_itt = RSVD_TASK_TAG; + smp_mb(); + + /* FIXME: complete everything with DID_RESET? */ + requeue_or_fail_commands(session); + } + else if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + requeue_or_fail_commands(session); + } + else if (test_bit(SESSION_LOGGED_OUT, &session->control_bits)) { + /* the session has logged out, so there shouldn't be any tasks, but check anyway */ + requeue_or_fail_commands(session); + } + else { + /* session dropped unexpectedly, often due to network problems */ + printk("iSCSI: session %p to %s dropped\n", session, session->log_name); + + /* fail all commands that don't allow retries, and requeue everything else */ + requeue_or_fail_commands(session); + } + + /* can't send any nop replies now */ + session->nop_reply.ttt = RSVD_TASK_TAG; + while (session->nop_reply_head) { + iscsi_nop_info_t *nop_info = session->nop_reply_head; + session->nop_reply_head = nop_info->next; + DEBUG_ALLOC("iSCSI: kfree nop_info %p\n", nop_info); + kfree(nop_info); + } + session->nop_reply_tail = NULL; + /* a ping test also fails on a connection drop */ + session->ping_test_start = 0; + session->ping_test_rx_length = 0; + session->ping_test_data_length = 0; + if (session->ping_test_tx_buffer) { + kfree(session->ping_test_tx_buffer); + session->ping_test_tx_buffer = NULL; + } + + /* no point trying to logout now */ + session->logout_deadline = 0; + session->logout_response_deadline = 0; + + /* terminate error recovery and command retries */ + session->mgmt_itt = RSVD_TASK_TAG; + session->task_mgmt_response_deadline = 0; + del_timer_sync(&session->busy_task_timer); + del_timer_sync(&session->busy_command_timer); + del_timer_sync(&session->immediate_reject_timer); + del_timer_sync(&session->retry_timer); + memset(session->luns_timing_out, 0, sizeof(session->luns_timing_out)); + memset(session->luns_doing_recovery, 0, sizeof(session->luns_doing_recovery)); + memset(session->luns_delaying_commands, 0, sizeof(session->luns_delaying_commands)); + session->num_luns_delaying_commands = 0; + + /* we'll never get a reset reply now */ + session->warm_reset_itt = RSVD_TASK_TAG; + session->reset_response_deadline = 0; + + /* cancel any testing */ + session->ignore_lun = -2; + session->ignore_completions = 0; + session->ignore_aborts = 0; + session->ignore_abort_task_sets = 0; + session->ignore_lun_resets = 0; + session->ignore_warm_resets = 0; + session->ignore_cold_resets = 0; + session->reject_lun = -2; + session->reject_aborts = 0; + session->reject_abort_task_sets = 0; + session->reject_lun_resets = 0; + session->reject_warm_resets = 0; + session->fake_status_lun = -2; + session->fake_status_unreachable = 0; + session->fake_status_busy = 0; + session->fake_status_queue_full = 0; + + spin_unlock(&session->task_lock); + } + + ThreadExit: + DEBUG_INIT("iSCSI: session %p for (%u %u %u *) rx thread %d exiting\n", + session, session->host_no, session->channel, session->target_id, session->rx_pid); + /* indicate that we're already going down, so that we don't get killed */ + session->rx_pid = 0; + smp_mb(); + + /* this will fail all commands, since the SESSION_TERMINATING bit is set */ + requeue_or_fail_commands(session); + + spin_lock(&session->task_lock); + /* no point trying to logout now */ + session->logout_deadline = 0; + session->logout_response_deadline = 0; + /* terminate error recovery */ + session->mgmt_itt = RSVD_TASK_TAG; + session->task_mgmt_response_deadline = 0; + session->reset_response_deadline = 0; + /* ensure the timers have been deleted before we free their memory */ + del_timer_sync(&session->busy_task_timer); + del_timer_sync(&session->busy_command_timer); + del_timer_sync(&session->immediate_reject_timer); + del_timer_sync(&session->retry_timer); + memset(session->luns_timing_out, 0, sizeof(session->luns_timing_out)); + memset(session->luns_doing_recovery, 0, sizeof(session->luns_doing_recovery)); + memset(session->luns_delaying_commands, 0, sizeof(session->luns_delaying_commands)); + if (session->preallocated_task) { + iscsi_task_ctor(session->preallocated_task, NULL, 0); + kmem_cache_free(session->hba->task_cache, session->preallocated_task); + session->preallocated_task = NULL; + } + else { + printk("iSCSI: session %p for (%u %u %u *) terminating, but has no preallocated task to free at %lu\n", + session, session->host_no, session->channel, session->target_id, jiffies); + } + spin_unlock(&session->task_lock); + + /* cleanup the socket */ + if (session->socket) { + /* wait for the tx thread to exit */ + while (session->tx_pid) { + DEBUG_INIT("iSCSI: session %p rx thread %d waiting for tx thread %d to exit\n", + session, current->pid, session->tx_pid); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(MSECS_TO_JIFFIES(10)); + } + + /* drop the connection */ + iscsi_disconnect(session); + } + + set_bit(SESSION_TERMINATED, &session->control_bits); + +#if PREVENT_DATA_CORRUPTION + if (session->xmit_data_buffer) + kfree(session->xmit_data_buffer); +#endif + + /* wake up any ioctls sleeping on the session */ + wake_up(&session->login_wait_q); + up(&session->probe_sem); + + /* iscsi_remove_luns(session); */ + if (!session->this_is_root_disk) { + iscsi_remove_luns(session); + } + + drop_reference(session); + + return 0; +} + + +static int iscsi_inet_aton(char *asciiz, unsigned char *ip_address, int *ip_length) +{ + char *c = asciiz; + unsigned char *ip = ip_address; + uint32_t base = 10, value = 0; + int empty; + + if ((asciiz == NULL) || (*asciiz == '\0') || (ip_address == NULL) || (ip_length == NULL)) + return 0; + + /* FIXME: IPv6 */ + + /* look for an IPv4 dotted-decimal IP address */ + while (*c) { + value = 0; + base = 10; + empty = 1; + + /* each part must start with a digit */ + if (!is_digit(*c)) + return 0; + + /* figure out the base for this part */ + if (*c == '0') { + empty = 0; + base = 8; + c++; + if (*c == 'x') { + base = 16; + c++; + } + } + + /* get the value of this part */ + while (*c && (*c != '.')) { + if ((base == 16) && (is_hex_lower(*c))) { + value = (value * base) + (*c - 'a') + 10; + c++; + empty = 0; + } + else if ((base == 16) && (is_hex_upper(*c))) { + value = (value * base) + (*c - 'A') + 10; + c++; + empty = 0; + } + else if (is_digit(*c)) { + value = (value * base) + (*c - '0'); + c++; + empty = 0; + } + else + return 0; + } + + /* reached the end of the part? */ + if (empty) { + return 0; + } + else if (*c == '.') { + c++; + if (value <= 0xFF) { + *ip++ = value & 0xFF; + value = 0; + } + else + return 0; + } + } + + if (*c == '\0') { + /* end of the ascii address */ + switch (ip - ip_address) { + default: + return 0; + case 0: + /* a = 32 */ + *ip++ = (value >> 24) & 0xFF; + *ip++ = (value >> 16) & 0xFF; + *ip++ = (value >> 8) & 0xFF; + *ip++ = (value >> 0) & 0xFF; + if (ip_length) + *ip_length = 4; + return 1; + case 1: + /* a.b = 8,24*/ + if (value <= 0x00FFFFFF) { + *ip++ = (value >> 16) & 0xFF; + *ip++ = (value >> 8) & 0xFF; + *ip++ = (value >> 0) & 0xFF; + if (ip_length) + *ip_length = 4; + return 1; + } + else + return 0; + case 2: + /* a.b.c = 8,8,16 */ + if (value <= 0x0000FFFF) { + *ip++ = (value >> 8) & 0xFF; + *ip++ = (value >> 0) & 0xFF; + if (ip_length) + *ip_length = 4; + return 1; + } + else + return 0; + case 3: + /* a.b.c.d = 8,8,8,8 */ + if (value <= 0x000000FF) { + *ip++ = (value >> 0) & 0xFF; + if (ip_length) + *ip_length = 4; + return 1; + } + else + return 0; + } + } + + return 0; +} + +static int update_address(iscsi_session_t *session, char *address) +{ + char *tag; + char *port; + int ret = 0; + unsigned char ip[16]; + int ip_length = 4; + + memset(ip, 0, sizeof(ip)); + + if ((tag = iscsi_strrchr(address, ','))) { + *tag = '\0'; + tag++; + } + if ((port = iscsi_strrchr(address, ':'))) { + *port = '\0'; + port++; + } + + /* update the session's IP address and port, based on the + * TargetAddress passed to us. For now, we can't resolve DNS + * names, since that would require us to pass the request up to a + * user-mode process, and use the NSS system. We have our own + * equivalent of inet_aton, and fail to change the address if we + * get a DNS name instead of an IP address. + */ + if (iscsi_inet_aton(address, ip, &ip_length)) { + memcpy(session->ip_address, ip, sizeof(session->ip_address)); + session->ip_length = ip_length; + if (port) + session->port = iscsi_strtoul(port, NULL, 0); + else + session->port = ISCSI_LISTEN_PORT; + + if (tag) + session->portal_group_tag = iscsi_strtoul(tag, NULL, 0); + + ret = 1; + } + + /* restore the original strings */ + if (tag) { + --tag; + *tag = ','; + } + if (port) { + --port; + *port = ':'; + } + + smp_mb(); + return ret; +} + +static int same_network_portal(iscsi_portal_info_t *p1, iscsi_portal_info_t *p2) +{ + if (p1->port != p2->port) + return 0; + + if (p1->tag != p2->tag) + return 0; + + if (p1->ip_length != p2->ip_length) + return 0; + + if (memcmp(p1->ip_address, p2->ip_address, p1->ip_length)) + return 0; + + return 1; +} + + +static int update_session(iscsi_session_t *session, iscsi_session_ioctl_t *ioctld, iscsi_portal_info_t *portals) +{ + iscsi_portal_info_t *old_portals; + iscsi_portal_info_t *old_portal; + iscsi_portal_info_t *new_portal = NULL; + iscsi_portal_info_t *q = NULL; + char *username = NULL; + unsigned char *password = NULL; + int password_length = 0; + char *username_in = NULL; + unsigned char *password_in = NULL; + int password_length_in = 0; + int bidirectional = 0; + int auth_update_failed = 0; + int p; + int relogin = 0; + size_t length; + char *str; + unsigned int requested_portal = UINT_MAX; + unsigned int portal = 0; + int ret = 0; + int found = 0; + + if (down_interruptible(&session->config_mutex)) { + /* signalled before we got the mutex */ + printk("iSCSI: session %p configuration update aborted by signal at %lu\n", + session, jiffies); + return 0; + } + + if (ioctld->update && (ioctld->config_number < session->config_number)) { + /* this update is obsolete, ignore it */ + DEBUG_INIT("iSCSI: session %p ignoring obsolete update #%u, currently on config #%u\n", + session, ioctld->config_number, session->config_number); + return 0; + } + + session->config_number = ioctld->config_number; + + printk("iSCSI: bus %d target %d updating configuration of session %p to %s\n", + ioctld->iscsi_bus, ioctld->target_id, session, session->log_name); + + /* once we have the mutex, we're guaranteed that the session is + * initialized and not logging in at the moment, so we can safely + * change the per-session settings stored in the session itself: + * isid, InitiatorName, InitiatorAlias, username, password + */ + + if (memcmp(session->isid, ioctld->isid, sizeof(session->isid))) { + /* FIXME: the explicit logout better work, since there won't be an implicit logout */ + memcpy(session->isid, ioctld->isid, sizeof(session->isid)); + relogin = 1; + } + + if ((session->InitiatorName == NULL) || strcmp(ioctld->InitiatorName, session->InitiatorName)) { + length = strlen(ioctld->InitiatorName); + if ((str = kmalloc(length + 1, GFP_ATOMIC))) { + if (session->InitiatorName) + kfree(session->InitiatorName); + session->InitiatorName = str; + strcpy(session->InitiatorName, ioctld->InitiatorName); + relogin = 1; + DEBUG_INIT("iSCSI: session %p updated InitiatorName at %lu\n", session, jiffies); + } + else { + printk("iSCSI: session %p failed to change InitiatorName from %s to %s\n", + session, session->InitiatorName, ioctld->InitiatorName); + up(&session->config_mutex); + return 0; + } + } + + if ((session->InitiatorAlias == NULL) || strcmp(ioctld->InitiatorAlias, session->InitiatorAlias)) { + length = strlen(ioctld->InitiatorAlias); + if ((str = kmalloc(length + 1, GFP_ATOMIC))) { + if (session->InitiatorAlias) + kfree(session->InitiatorAlias); + session->InitiatorAlias = str; + strcpy(session->InitiatorAlias, ioctld->InitiatorAlias); + relogin = 1; + DEBUG_INIT("iSCSI: session %p updated InitiatorAlias at %lu\n", session, jiffies); + } + else { + printk("iSCSI: session %p failed to change InitiatorAlias from %s to %s\n", + session, session->InitiatorAlias, ioctld->InitiatorAlias); + up(&session->config_mutex); + return 0; + } + } + + /* transactional (all-or-nothing) update of the auth config */ + if (ioctld->username_in[0] || ioctld->password_length_in) + bidirectional = 1; + + /* cases: + * 1) no new or current value (unchanged), NULL, NULL + * 2) new but no current value (transactional update) NULL, * + * 3) no new but current value (transactional delete) *, NULL + * 4) new and current value are different (transactional update) *,* + * 5) new and current value are the same (unchanged) * == * + */ + if (ioctld->username[0]) { + if ((session->username == NULL) || strcmp(ioctld->username, session->username)) { + /* update the username */ + length = strlen(ioctld->username); + if ((username = kmalloc(length + 1, GFP_ATOMIC))) { + strncpy(username, ioctld->username, length); + username[length] = '\0'; + } + else { + printk("iSCSI: session %p failed to change outgoing username\n", session); + ret = -ENOMEM; + auth_update_failed = 1; + } + } + else { + /* they're the same, just keep the current one */ + username = session->username; + } + } + + if (ioctld->password_length) { + if ((session->password == NULL) || (session->password_length != ioctld->password_length) || + memcmp(ioctld->password, session->password, session->password_length)) + { + /* update the existing password */ + if ((password = kmalloc(ioctld->password_length + 1, GFP_ATOMIC))) { + password_length = ioctld->password_length; + memcpy(password, ioctld->password, password_length); + password[password_length] = '\0'; + } + else { + printk("iSCSI: session %p failed to change outgoing password\n", session); + password_length = 0; + ret = -ENOMEM; + auth_update_failed = 1; + } + } + else { + /* they're the same, just keep the current one */ + password = session->password; + } + } + + if (ioctld->username_in[0]) { + if ((session->username_in == NULL) || strcmp(ioctld->username_in, session->username_in)) { + /* update the username */ + length = strlen(ioctld->username_in); + if ((username_in = kmalloc(length + 1, GFP_ATOMIC))) { + strncpy(username_in, ioctld->username_in, length); + username_in[length] = '\0'; + } + else { + printk("iSCSI: session %p failed to change incoming username\n", session); + ret = -ENOMEM; + auth_update_failed = 1; + } + } + else { + /* they're the same, just keep the current one */ + username_in = session->username_in; + } + } + + if (ioctld->password_length_in) { + if ((session->password_in == NULL) || (session->password_length_in != ioctld->password_length_in) || + memcmp(ioctld->password_in, session->password_in, session->password_length_in)) + { + /* update the existing password */ + if ((password_in = kmalloc(ioctld->password_length_in + 1, GFP_ATOMIC))) { + password_length = ioctld->password_length_in; + memcpy(password_in, ioctld->password_in, password_length_in); + password[password_length_in] = '\0'; + } + else { + printk("iSCSI: session %p failed to change incoming password\n", session); + password_length_in = 0; + ret = -ENOMEM; + auth_update_failed = 1; + } + } + else { + /* they're the same, just keep the current one */ + password_in = session->password_in; + } + } + + + if (!auth_update_failed) { + /* update to the new auth config */ + session->bidirectional_auth = bidirectional; + + if (username != session->username) { + /* update current */ + if (session->username) { + memset(session->username, 0, strlen(session->username)); + kfree(session->username); + } + session->username = username; + if (username) + DEBUG_INIT("iSCSI: session %p updated outgoing username to %s at %lu\n", session, session->username, jiffies); + } + + if (password != session->password) { + /* update current */ + if (session->password) { + memset(session->password, 0, session->password_length); + kfree(session->password); + } + session->password = password; + session->password_length = password_length; + if (password) + DEBUG_INIT("iSCSI: session %p updated outgoing password at %lu\n", session, jiffies); + } + + if (username_in != session->username_in) { + /* update current */ + if (session->username_in) { + memset(session->username_in, 0, strlen(session->username_in)); + kfree(session->username_in); + } + session->username_in = username_in; + if (username_in) + DEBUG_INIT("iSCSI: session %p updated incoming username to %s at %lu\n", session, session->username_in, jiffies); + } + + if (password_in != session->password_in) { + /* update current */ + if (session->password) { + memset(session->password, 0, session->password_length); + kfree(session->password); + } + session->password_in = password_in; + session->password_length_in = password_length_in; + if (password_in) + DEBUG_INIT("iSCSI: session %p updated incoming password at %lu\n", session, jiffies); + } + } + else { + /* update failed, free anything we allocated */ + if (username) + kfree(username); + if (password) + kfree(password); + if (username_in) + kfree(username_in); + if (password_in) + kfree(password_in); + } + + /* iscsi_establish_session will ensure we have auth structures, + * or error out if bidi auth is required and we can't do authentication. + */ + + up(&session->config_mutex); + + /* the portals are guarded by a spinlock instead of the config + * mutex, so that we can request portal changes while a login is + * occuring. + */ + spin_lock(&session->portal_lock); + + /* replace the portals */ + old_portals = session->portals; + session->portals = portals; + session->num_portals = ioctld->num_portals; + session->requested_portal = UINT_MAX; /* cancel any request, since the portals may have changed */ + session->fallback_portal = UINT_MAX; /* cancel any fallback, since the portals may have changed */ + session->portal_failover = ioctld->portal_failover; + memset(session->preferred_portal, 0, sizeof(session->preferred_portal) ); + memset(session->preferred_subnet, 0, sizeof(session->preferred_subnet) ); + session->preferred_portal_bitmap = 0; + session->preferred_subnet_bitmap = 0; + session->tried_portal_bitmap = 0; + + if (ioctld->preferred_portal && strlen(ioctld->preferred_portal)) { + memcpy(session->preferred_portal, ioctld->preferred_portal, strlen(ioctld->preferred_portal)); + set_preferred_portal_bitmap(session); + } + + if (ioctld->preferred_subnet && strlen(ioctld->preferred_subnet)) { + memcpy(session->preferred_subnet, ioctld->preferred_subnet, strlen(ioctld->preferred_subnet)); + session->preferred_subnet_mask = ioctld->preferred_subnet_mask; + set_preferred_subnet_bitmap(session); + } + + printk("iSCSI: bus %d target %d = %s\n", ioctld->iscsi_bus, ioctld->target_id, ioctld->TargetName); + for (p = 0; p < session->num_portals; p++) { + /* FIXME: IPv6 */ + printk("iSCSI: bus %d target %d portal %u = address %u.%u.%u.%u port %d group %d\n", + ioctld->iscsi_bus, ioctld->target_id, p, + portals[p].ip_address[0], portals[p].ip_address[1], + portals[p].ip_address[2], portals[p].ip_address[3], + portals[p].port, portals[p].tag); + } + + old_portal = &old_portals[session->current_portal]; + + /* figure out which new portal (if any) we're currently connected/connecting to */ + for (p = 0; p < ioctld->num_portals; p++) { + if (same_network_portal(&portals[p], old_portal)) { + new_portal = &portals[p]; + + if (session->current_portal == p) { + DEBUG_INIT("iSCSI: bus %d target %d staying with portal %u\n", session->iscsi_bus, session->target_id, p); + } + else if (session->portal_failover) { + printk("iSCSI: bus %d target %d portals have changed, old portal %u is new portal %u\n", + session->iscsi_bus, session->target_id, session->current_portal, p); + /* request the new portal if we decide we need to relogin */ + requested_portal = p; + /* but reset the current portal in case we don't need to relogin */ + session->current_portal = p; + } + } + } + + if ((new_portal == NULL) && session->portal_failover) { + /* no matching new portal. try to find a portal in the same portal group */ + for (p = 0; p < ioctld->num_portals; p++) { + if (portals[p].tag == old_portal->tag) { + printk("iSCSI: bus %d target %d portals have changed, session %p switching to portal %u in group %u\n", + session->iscsi_bus, session->target_id, session, p, portals[p].tag); + requested_portal = p; + new_portal = &portals[p]; + relogin = 1; + } + } + } + + if ((new_portal == NULL) && session->portal_failover) { + /* we couldn't find a portal in the same portal group. + * if we can do a clean logout, we can login to any portal. + * if we can't logout, we risk command reordering if we login to a different group. + */ + new_portal = &portals[0]; + requested_portal = 0; + printk("iSCSI: bus %d target %d portals have changed, failed to find a new portal in portal group %u, session %p trying portal 0 group %u\n", + session->iscsi_bus, session->target_id, old_portal->tag, session, new_portal->tag); + /* FIXME: if the logout fails, we'll need to error out somehow. */ + relogin = 1; + } + + if (session->portal_failover) { + /* the driver timeouts can change on the fly, with no relogin */ + if (new_portal->login_timeout != old_portal->login_timeout) + session->login_timeout = new_portal->login_timeout; + + if (new_portal->auth_timeout != old_portal->auth_timeout) + session->auth_timeout = new_portal->auth_timeout; + + if (new_portal->active_timeout != old_portal->active_timeout) + session->active_timeout = new_portal->active_timeout; + + if (new_portal->idle_timeout != old_portal->idle_timeout) + session->idle_timeout = new_portal->idle_timeout; + + if (new_portal->ping_timeout != old_portal->ping_timeout) { + session->ping_timeout = new_portal->ping_timeout; + relogin = 1; /* because we ask the target to use it as well with com.cisco.PingTimeout */ + } + + if (new_portal->abort_timeout != old_portal->abort_timeout) + session->abort_timeout = new_portal->abort_timeout; + + if (new_portal->reset_timeout != old_portal->reset_timeout) + session->reset_timeout = new_portal->reset_timeout; + + /* FIXME: this should probably be per-session rather than per-portal */ + if (new_portal->replacement_timeout != old_portal->replacement_timeout) + session->replacement_timeout = new_portal->replacement_timeout; + + /* FIXME: get the scsi_cmnd_lock when setting these? */ + if (new_portal->min_disk_command_timeout != old_portal->min_disk_command_timeout) + session->min_disk_command_timeout = new_portal->min_disk_command_timeout; + + if (new_portal->max_disk_command_timeout != old_portal->max_disk_command_timeout) + session->max_disk_command_timeout = new_portal->max_disk_command_timeout; + + /* the iSCSI op params need a relogin to change */ + if (new_portal->InitialR2T != old_portal->InitialR2T) + relogin = 1; + + if (new_portal->ImmediateData != old_portal->ImmediateData) + relogin = 1; + + if (new_portal->MaxRecvDataSegmentLength != old_portal->MaxRecvDataSegmentLength) + relogin = 1; + + if (new_portal->FirstBurstLength != old_portal->FirstBurstLength) + relogin = 1; + + if (new_portal->MaxBurstLength != old_portal->MaxBurstLength) + relogin = 1; + + if (new_portal->DefaultTime2Wait != old_portal->DefaultTime2Wait) + relogin = 1; + + if (new_portal->DefaultTime2Retain != old_portal->DefaultTime2Retain) + relogin = 1; + + if (new_portal->HeaderDigest != old_portal->HeaderDigest) + relogin = 1; + + if (new_portal->DataDigest != old_portal->DataDigest) + relogin = 1; + + /* the TCP connection settings need a relogin */ + if (new_portal->tcp_window_size != old_portal->tcp_window_size) + relogin = 1; + + /* FIXME: TCP type_of_service */ + + } else { + relogin = 0; + q = session->portals; + for (portal=0; portal < session->num_portals; portal++) { + if (memcmp( q[portal].ip_address, session->ip_address, session->ip_length) == 0) { + found = 1; + break; + } + } + if (!found) { + iscsi_terminate_session(session); + drop_reference(session); + return 1; + } + } + + if (relogin) { + /* if we have to relogin, place any portal request decided on earlier */ + session->requested_portal = requested_portal; + session->fallback_portal = UINT_MAX; + } + + spin_unlock(&session->portal_lock); + + kfree(old_portals); + + smp_mb(); + + if (relogin) { + if (test_bit(SESSION_ESTABLISHED, &session->control_bits)) { + spin_lock(&session->task_lock); + printk("iSCSI: bus %d target %d configuration updated at %lu, session %p to %s must logout\n", + session->iscsi_bus, session->target_id, jiffies, session, session->log_name); + iscsi_request_logout(session, 3, session->active_timeout); + spin_unlock(&session->task_lock); + } + else { + printk("iSCSI: bus %d target %d configuration updated at %lu while session %p to %s is not established\n", + session->iscsi_bus, session->target_id, jiffies, session, session->log_name); + } + } else { + printk("iSCSI: bus %d target %d configuration updated at %lu, session %p to %s does not need to logout\n", + session->iscsi_bus, session->target_id, jiffies, session, session->log_name); + } + + return 1; +} + +static iscsi_session_t *allocate_session(iscsi_session_ioctl_t *ioctld, iscsi_portal_info_t *portals) +{ + iscsi_session_t *session = (iscsi_session_t *)kmalloc(sizeof(*session), GFP_KERNEL); + size_t length; + int pp; + + if (session == NULL) { + printk("iSCSI: bus %d target %d cannot allocate new session (size %Zu) at %lu\n", + ioctld->iscsi_bus, ioctld->target_id, sizeof(*session), jiffies); + return NULL; + } + + memset(session, 0, sizeof(*session)); + atomic_set(&session->refcount, 1); + DEBUG_INIT("iSCSI: bus %d target %d allocated session %p (size %Zu) at %lu\n", + ioctld->iscsi_bus, ioctld->target_id, session, sizeof(*session), jiffies); + + /* an InitiatorName is required */ + length = strlen(ioctld->InitiatorName); + if (length) { + if ((session->InitiatorName = kmalloc(length + 1, GFP_KERNEL))) { + strncpy(session->InitiatorName, ioctld->InitiatorName, length); + session->InitiatorName[length] = '\0'; + } + else { + printk("iSCSI: bus %d target %d cannot allocate InitiatorName at %lu\n", + ioctld->iscsi_bus, ioctld->target_id, jiffies); + delete_session(session); + return NULL; + } + } + else { + printk("iSCSI: bus %d target %d has no InitiatorName at %lu\n", + ioctld->iscsi_bus, ioctld->target_id, jiffies); + delete_session(session); + return NULL; + } + + /* an InitiatorAlias is optional */ + length = strlen(ioctld->InitiatorAlias); + if (length && (session->InitiatorAlias = kmalloc(length + 1, GFP_KERNEL))) { + strncpy(session->InitiatorAlias, ioctld->InitiatorAlias, length); + session->InitiatorAlias[length] = '\0'; + } + + memcpy(session->isid, ioctld->isid, sizeof(session->isid)); + + if (this_is_iscsi_boot) { + if (strcmp(iscsi_inbp_info.targetstring, ioctld->TargetName)) { + session->this_is_root_disk = 0; + DEBUG_INIT("\nMaking session->this_is_root_disk = 0 for %s\n", ioctld->TargetName); + } else { + session->this_is_root_disk = 1; + DEBUG_INIT("\nMaking session->this_is_root_disk = 1 for %s\n", ioctld->TargetName); + } + } + + strncpy(session->TargetName, ioctld->TargetName, sizeof(session->TargetName)); + session->TargetName[sizeof(session->TargetName)-1] = '\0'; + session->log_name = session->TargetName; + session->TargetAlias[0] = '\0'; /* none unless declared by the target */ + + session->num_auth_buffers = 0; + session->auth_client_block = NULL; + session->auth_recv_string_block = NULL; + session->auth_send_string_block = NULL; + session->auth_recv_binary_block = NULL; + session->auth_send_binary_block = NULL; + + /* allocate authentication info */ + if (ioctld->username_in[0] || ioctld->password_length_in) { + /* we must authenticate the target or refuse to login */ + session->bidirectional_auth = 1; + } + else { + session->bidirectional_auth = 0; /* authentication is optional */ + } + + /* FIXME: should we fail the ioctl if the allocation fails? */ + if ((length = strlen(ioctld->username))) { + if ((session->username = kmalloc(length + 1, GFP_KERNEL))) { + strncpy(session->username, ioctld->username, length); + session->username[length] = '\0'; + } + else { + printk("iSCSI: bus %d target %d failed to allocate outgoing username at %lu\n", + ioctld->iscsi_bus, ioctld->target_id, jiffies); + delete_session(session); + return NULL; + } + } + + if (ioctld->password_length) { + if ((session->password = kmalloc(ioctld->password_length + 1, GFP_KERNEL))) { + memcpy(session->password, ioctld->password, ioctld->password_length); + session->password_length = ioctld->password_length; + session->password[session->password_length] = '\0'; + } + else { + printk("iSCSI: bus %d target %d failed to allocate outgoing password at %lu\n", + ioctld->iscsi_bus, ioctld->target_id, jiffies); + delete_session(session); + return NULL; + } + } + + if ((length = strlen(ioctld->username_in))) { + if ((session->username_in = kmalloc(length + 1, GFP_KERNEL))) { + strncpy(session->username_in, ioctld->username_in, length); + session->username_in[length] = '\0'; + } + else { + printk("iSCSI: bus %d target %d failed to allocate incoming username at %lu\n", + ioctld->iscsi_bus, ioctld->target_id, jiffies); + delete_session(session); + return NULL; + } + } + + if (ioctld->password_length_in) { + if ((session->password_in = kmalloc(ioctld->password_length_in + 1, GFP_KERNEL))) { + memcpy(session->password_in, ioctld->password_in, ioctld->password_length_in); + session->password_length_in = ioctld->password_length_in; + session->password_in[session->password_length_in] = '\0'; + } + else { + printk("iSCSI: bus %d target %d failed to allocate incoming password at %lu\n", + ioctld->iscsi_bus, ioctld->target_id, jiffies); + delete_session(session); + return NULL; + } + } + + /* the auth structures are allocated in iscsi_establish_session, so that + * any allocation failures are retried automatically. + */ + +#if 0 + if (session->username) + printk("iSCSI: session %p username %s\n", session, session->username); + + if (session->password) + printk("iSCSI: session %p password %s\n", session, session->password); + + if (session->username_in) + printk("iSCSI: session %p username_in %s\n", session, session->username_in); + + if (session->password_in) + printk("iSCSI: session %p password_in %s\n", session, session->password_in); +#endif + + /* initialize the session structure */ + session->socket = NULL; + + session->config_number = ioctld->config_number; + + spin_lock_init(&session->portal_lock); + session->num_portals = 0; + session->portals = NULL; + session->auth_failures = 0; + session->portal_failover = 1; + session->current_portal = 0; + session->requested_portal = UINT_MAX; + session->fallback_portal = UINT_MAX; + session->portal_group_tag = -1; + memset(session->preferred_portal, 0, sizeof(session->preferred_portal) ); + memset(session->preferred_subnet, 0, sizeof(session->preferred_subnet) ); + session->preferred_portal_bitmap = 0; + session->preferred_subnet_bitmap = 0; + session->tried_portal_bitmap = 0; + + spin_lock_init( &session->scsi_cmnd_lock); + session->retry_cmnd_head = session->retry_cmnd_tail = NULL; + atomic_set(&session->num_retry_cmnds, 0); + session->scsi_cmnd_head = session->scsi_cmnd_tail = NULL; + atomic_set(&session->num_cmnds, 0); + session->deferred_cmnd_head = session->deferred_cmnd_tail = NULL; + session->num_deferred_cmnds = 0; + + sema_init(&session->probe_sem, 0); /* the first down should block */ + session->probe_order = ioctld->probe_order; + + sema_init(&session->config_mutex, 0); /* the first down should block */ + + spin_lock_init( &session->task_lock); + session->arrival_order.head = session->arrival_order.tail = NULL; + session->tx_tasks.head = session->tx_tasks.tail = NULL; + atomic_set(&session->num_active_tasks, 0); + session->preallocated_task = NULL; + + init_waitqueue_head(&session->tx_wait_q); + init_waitqueue_head(&session->tx_blocked_wait_q); + init_waitqueue_head(&session->login_wait_q); + + init_timer(&session->busy_task_timer); + init_timer(&session->busy_command_timer); + init_timer(&session->immediate_reject_timer); + init_timer(&session->retry_timer); + + /* save the portal info, and pick which portal to start with */ + session->update_address = &update_address; + session->portals = portals; + session->num_portals = ioctld->num_portals; + session->portal_failover = ioctld->portal_failover; + + if (ioctld->preferred_portal && strlen(ioctld->preferred_portal)) { + memcpy(session->preferred_portal, ioctld->preferred_portal, strlen(ioctld->preferred_portal)); + set_preferred_portal_bitmap(session); + } + + if (ioctld->preferred_subnet && strlen(ioctld->preferred_subnet)) { + memcpy(session->preferred_subnet, ioctld->preferred_subnet, strlen(ioctld->preferred_subnet)); + session->preferred_subnet_mask = ioctld->preferred_subnet_mask; + set_preferred_subnet_bitmap(session); + } + + pp = get_appropriate_portal(session); + if (pp < 0) + set_portal(session, 0U); + else + set_portal(session, pp); + + session->channel = ioctld->iscsi_bus % ISCSI_MAX_CHANNELS_PER_HBA; + session->iscsi_bus = ioctld->iscsi_bus; + session->target_id = ioctld->target_id; + session->dir_mode = ioctld->link_dir_mode; + + session->itt = ioctld->target_id; /* to make reading login traces easier */ + session->generation = 0; + session->ever_established = 0; + session->time2wait = -1; + session->logout_itt = RSVD_TASK_TAG; + session->mgmt_itt = RSVD_TASK_TAG; + + session->ignore_lun = -2; + session->reject_lun = -2; + session->fake_status_lun = -2; + + /* in case the session never comes up */ + session->session_drop_time = jiffies; + +#if PREVENT_DATA_CORRUPTION + session->xmit_data_buffer = NULL; + session->xmit_buffer_size = 0; +#endif + + smp_mb(); + + return session; +} + +void clear_device_symlinks(iscsi_session_t *session, char *link_base_dir) +{ + char *check = link_base_dir + strlen(link_base_dir) - 1; + + /* remove any trailing slashes, since we add one below */ + while ((check > link_base_dir) && (*check == '/')) + *check-- = '\0'; + + /* don't let people use the root directory */ + if (check == link_base_dir) { + printk("iSCSI: cannot use root directory as link base dir\n"); + } + else { + sprintf(session->target_link_dir, "%s/bus%d/target%d/", + link_base_dir, session->iscsi_bus, session->target_id); + + /* this will remove any existing LUN dirs, but MUST be called while + * session->rx_buffer is unused, since it gets used as temp space to hold dirents. + */ + DEBUG_INIT("iSCSI: session %p clearing LUN dirs under %s\n", session, session->target_link_dir); + iscsi_remove_luns(session); + } +} + +int start_session_threads(iscsi_session_t *session) +{ + pid_t rx_pid, tx_pid; + + /* start a tx thread */ + DEBUG_INIT("iSCSI: session %p about to start tx and rx threads at %lu\n", + session, jiffies); + atomic_inc(&session->refcount); + tx_pid = kernel_thread(iscsi_tx_thread, (void *)session, CLONE_VM| CLONE_FS | CLONE_FILES | CLONE_SIGHAND); + if (tx_pid > 0) { + DEBUG_INIT("iSCSI: session %p started tx thread %u at %lu\n", session, tx_pid, jiffies); + } + else { + printk("iSCSI: session %p failed to start tx thread, terminating session\n", session); + atomic_dec(&session->refcount); /* the thread isn't actually using it */ + iscsi_terminate_session(session); + drop_reference(session); + return -EAGAIN; + } + + /* start an rx thread */ + atomic_inc(&session->refcount); + rx_pid = kernel_thread(iscsi_rx_thread, (void *)session, CLONE_VM| CLONE_FS | CLONE_FILES | CLONE_SIGHAND); + if (rx_pid > 0) { + DEBUG_INIT("iSCSI: session %p started rx thread %u at %lu\n", session, rx_pid, jiffies); + } + else { + printk("iSCSI: session %p failed to start rx thread, terminating session\n", session); + atomic_dec(&session->refcount); /* the thread isn't actually using it */ + iscsi_terminate_session(session); + drop_reference(session); + return -EAGAIN; + } + + DEBUG_INIT("iSCSI: session %p waiting for rx %d and tx %d at %lu\n", session, rx_pid, tx_pid, jiffies); + + /* wait for the threads to start */ + while ((session->tx_pid == 0) || (session->rx_pid == 0)) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(MSECS_TO_JIFFIES(10)); + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + printk("iSCSI: session %p terminating, failed to start threads at %lu\n", session, jiffies); + return -EAGAIN; + } + else if (signal_pending(current)) { + iscsi_terminate_session(session); + printk("iSCSI: session %p thread start terminated, returning at %lu\n", session, jiffies); + return -EAGAIN; + } + smp_mb(); + } + + DEBUG_INIT("iSCSI: session %p started tx_pid %d, rx_pid %d\n", session, session->tx_pid, session->rx_pid); + + if (signal_pending(current)) + return -EINTR; + else + return 0; +} + + +/* do timer processing for one session, and return the length of time + * (in jiffies) til this session needs to be checked again. + */ +static unsigned long check_session_timeouts(iscsi_session_t *session) +{ + unsigned long timeout; + unsigned long session_timeout = 0; + + if (!test_bit(SESSION_ESTABLISHED, &session->control_bits)) { + /* check login phase timeouts */ + if (session->login_phase_timer) { + session_timeout = session->login_phase_timer; + if (time_before_eq(session_timeout, jiffies)) { + printk("iSCSI: login phase for session %p (rx %d, tx %d) timed out at %lu, timeout was set for %lu\n", + session, session->rx_pid, session->tx_pid, jiffies, session_timeout); + session->login_phase_timer = 0; + smp_mb(); + session_timeout = 0; + iscsi_drop_session(session); + } + } + } + else { + /* check full-feature phase timeouts. */ + if (atomic_read(&session->num_active_tasks)) + timeout = session->active_timeout; + else + timeout = session->idle_timeout; + + if (timeout) { + if (session->ping_timeout && + time_before_eq(session->last_rx + (timeout * HZ) + (session->ping_timeout * HZ), jiffies)) { + /* should have received something by now, kill the connection */ + if ((session->last_kill == 0) || time_before_eq(session->last_kill + HZ, jiffies)) { + + session->last_kill = jiffies; + + printk("iSCSI: %lu second timeout expired for session %p, rx %lu, ping %lu, now %lu\n", + timeout + session->ping_timeout, session, session->last_rx, session->last_ping, jiffies); + + iscsi_drop_session(session); + + session_timeout = jiffies + HZ; + } + else + session_timeout = 0; + } + else if (time_before_eq(session->last_rx + (timeout * HZ), jiffies)) { + + if (time_before_eq(session->last_ping, session->last_rx)) { + /* send a ping to try to provoke some traffic */ + DEBUG_FLOW("iSCSI: timer queuing ping for session %p, rx %lu, ping %lu, now %lu\n", + session, session->last_rx, session->last_ping, jiffies); + session->last_ping = jiffies - 1; + + wake_tx_thread(TX_PING, session); + } + session_timeout = session->last_rx + (timeout * HZ) + (session->ping_timeout * HZ); + } + else { + if (atomic_read(&session->num_active_tasks)) { + session_timeout = session->last_rx + (session->active_timeout * HZ); + } + else { + unsigned long active_timeout, idle_timeout; + + /* session is idle, but may become active without the timer being notified, + * so use smaller of (now + active_timeout, last_rx + idle_timeout) + */ + idle_timeout = session->last_rx + (session->idle_timeout * HZ); + active_timeout = jiffies + (session->active_timeout * HZ); + if (time_before_eq(idle_timeout, active_timeout)) { + session_timeout = idle_timeout; + } + else { + session_timeout = active_timeout; + } + } + } + } + + /* we limit how long we'll wait for a task mgmt response, to avoid blocking + * forever in error recovery. + */ + if (session->task_mgmt_response_deadline && + time_before_eq(session->task_mgmt_response_deadline, jiffies)) + { + printk("iSCSI: session %p task mgmt %u response timeout at %lu\n", session, session->mgmt_itt, jiffies); + session->task_mgmt_response_deadline = 0; + smp_mb(); + + /* tell the tx thread that the task mgmt PDU timed out, and have it escalate error recovery */ + set_bit(SESSION_TASK_MGMT_TIMEDOUT, &session->control_bits); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + + if ((session_timeout == 0) || time_before(session->task_mgmt_response_deadline, session_timeout)) + session_timeout = session->task_mgmt_response_deadline; + } + + /* there's a separate deadline for responses to requested resets, so that error recovery + * and reset requests don't get in each other's way. + */ + if (session->reset_response_deadline && + time_before_eq(session->reset_response_deadline, jiffies)) + { + session->reset_response_deadline = 0; + smp_mb(); + + if (test_and_clear_bit(SESSION_RESET_REQUESTED, &session->control_bits)) { + /* FIXME: what should we do when a requested reset times out? + * for now, just give up on the reset and drop the session. The + * target should always reply, so we probably have a network + * problem. + */ + printk("iSCSI: session %p timed out waiting for mgmt %u reset response, dropping session at %lu\n", + session, session->warm_reset_itt, jiffies); + iscsi_drop_session(session); + return 0; + } + + if ((session_timeout == 0) || time_before(session->task_mgmt_response_deadline, session_timeout)) + session_timeout = session->task_mgmt_response_deadline; + } + + if (test_bit(SESSION_LOGOUT_REQUESTED, &session->control_bits)) { + /* we're waiting for tasks to complete before logging out. + * no need to check the CmdSN window, since we won't be starting any more tasks. + */ + if (time_before_eq(session->logout_response_deadline, jiffies)) { + /* passed the deadline for a logout response, just drop the session */ + printk("iSCSI: session %p logout response timeout at %lu, dropping session\n", session, jiffies); + session->logout_response_deadline = 0; + session->logout_deadline = 0; + smp_mb(); + iscsi_drop_session(session); + } + else if (time_before_eq(session->logout_deadline, jiffies)) { + /* send a logout */ + DEBUG_INIT("iSCSI: session %p logout deadline reached at %lu\n", session, jiffies); + session->logout_deadline = 0; + smp_mb(); + wake_tx_thread(TX_LOGOUT, session); + if ((session_timeout == 0) || time_before(session->logout_response_deadline, session_timeout)) + session_timeout = session->logout_response_deadline; + } + else { + if ((session_timeout == 0) || time_before(session->logout_deadline, session_timeout)) + session_timeout = session->logout_deadline; + } + } + else if (test_bit(SESSION_WINDOW_CLOSED, &session->control_bits) && + time_before_eq(session->last_window_check + HZ, jiffies)) { + /* command window closed, ping once a second to ensure we find out + * when it re-opens. Target should send us an update when it does, + * but we're not very trusting of target correctness. + */ + session->last_window_check = jiffies; + printk("iSCSI: session %p command window closed, ExpCmdSN %u, MaxCmdSN %u, polling target at %lu\n", + session, session->ExpCmdSn, session->MaxCmdSn, jiffies); + + /* request a window update from the target with Nops */ + wake_tx_thread(TX_PING, session); + + if ((session_timeout == 0) || time_before(session->last_window_check + HZ, session_timeout)) + session_timeout = session->last_window_check + HZ; + } + } + + return session_timeout; +} +/* + * FIXME: it'd probably be cleaner to move the timeout logic to the rx thread. + * The only danger is if the rx thread somehow blocks indefinately. + * Doing timeouts here makes sure the timeouts get checked, at the + * cost of having this code constantly loop. + */ +static int iscsi_timer_thread(void *vtaskp) +{ + iscsi_session_t *session; + iscsi_hba_t *hba; + + /* become a child of init, and abandon any user space resources */ + sprintf(current->comm, "iscsi-timer"); + iscsi_daemonize(); + + iscsi_timer_pid = current->pid; + smp_mb(); + DEBUG_INIT("iSCSI: timer pid %d starting at %lu\n", iscsi_timer_pid, jiffies); + + LOCK_SIGNALS(); + /* Block all signals except SIGKILL */ + siginitsetinv(¤t->blocked, sigmask(SIGKILL)); + RECALC_PENDING_SIGNALS; + UNLOCK_SIGNALS(); + + /* wait for the module to initialize */ + while (test_bit(0, &init_module_complete) == 0) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(MSECS_TO_JIFFIES(10)); + if (signal_pending(current)) { + iscsi_timer_running = 0; + smp_mb(); + return 0; + } + } + + DEBUG_INIT("iSCSI: timer waiting for HBA at %lu\n", jiffies); + while (!signal_pending(current)) { + spin_lock(&iscsi_hba_list_lock); + hba = iscsi_hba_list; + spin_unlock(&iscsi_hba_list_lock); + + if (hba) + break; + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(MSECS_TO_JIFFIES(10)); + } + + DEBUG_INIT("iSCSI: timer looping over HBAs at %lu\n", jiffies); + +continue_timer_thread: + + while (!signal_pending(current)) { + unsigned long next_timeout = jiffies + (5 * HZ); +#if (ISCSI_MIN_CANQUEUE != ISCSI_MAX_CANQUEUE) + int can_queue = 0; +#endif + + spin_lock(&iscsi_hba_list_lock); + hba = iscsi_hba_list; + while (hba) { + DECLARE_NOQUEUE_FLAGS; + + SPIN_LOCK_NOQUEUE(&hba->session_lock); + session = hba->session_list_head; + while (session) { + unsigned long session_timeout = 0; + + if (LOG_ENABLED(ISCSI_LOG_ALLOC)) + printk("iSCSI: session %p, rx %5u, tx %5u, %u luns, %3u r, %3u d, %3u n, %3u t, bits 0x%08lx at %lu\n", + session, session->rx_pid, session->tx_pid, session->num_luns, + atomic_read(&session->num_retry_cmnds), session->num_deferred_cmnds, + atomic_read(&session->num_cmnds), atomic_read(&session->num_active_tasks), + session->control_bits, jiffies); + +#if (ISCSI_MIN_CANQUEUE != ISCSI_MAX_CANQUEUE) + if (!sna_lt(session->MaxCmdSn, session->CmdSn)) { + /* record how many more commands we can send on this session */ + can_queue += max_tasks_for_session(session); + } +#endif + session_timeout = check_session_timeouts(session); + + /* find the earliest timeout that might occur, so that we know how long to sleep */ + if (session_timeout && time_before_eq(session_timeout, jiffies)) + printk("iSCSI: ignoring session timeout %lu at %lu, last rx %lu, for session %p\n", + session_timeout, jiffies, session->last_rx, session); + else if (session_timeout && time_before(session_timeout, next_timeout)) + next_timeout = session_timeout; + + session = session->next; + } + SPIN_UNLOCK_NOQUEUE(&hba->session_lock); + +#if (ISCSI_MIN_CANQUEUE != ISCSI_MAX_CANQUEUE) + /* dynamically adjust the number of commands the HBA will accept, based + * on each session's CmdSN window. + */ + if (can_queue > ISCSI_MAX_CANQUEUE) { + /* to avoid exhausting system resources, clamp the maximum number of commands + * the driver will accept. This hopefully fixes the stalls seen when sessions drop + * and the daemon can't get a new session up because it's blocked on something. + */ + hba->host->can_queue = ISCSI_MAX_CANQUEUE; + smp_mb(); + } + else if (can_queue > ISCSI_MIN_CANQUEUE) { + hba->host->can_queue = can_queue; + smp_mb(); + } + else { + hba->host->can_queue = ISCSI_MIN_CANQUEUE; + smp_mb(); + } +#endif + + if (LOG_ENABLED(ISCSI_LOG_ALLOC)) + printk("iSCSI: timer - host %d can_queue %d at %lu\n", + hba->host->host_no, hba->host->can_queue, jiffies); + + hba = hba->next; + } + spin_unlock(&iscsi_hba_list_lock); + + /* possibly start LUN probing */ + if (iscsi_lun_probe_start) { + if (time_before_eq(iscsi_lun_probe_start, jiffies)) { + iscsi_possibly_start_lun_probing(); + } + else if (time_before_eq(iscsi_lun_probe_start, next_timeout)) { + next_timeout = iscsi_lun_probe_start; + } + } + + /* sleep for a while */ + if (time_before(jiffies, next_timeout)) { + unsigned long sleep; + + /* sleep til the next time a timeout might occur, and handle jiffies wrapping */ + if (next_timeout < jiffies) + sleep = (ULONG_MAX - jiffies + next_timeout); + else + sleep = (next_timeout - jiffies); + DEBUG_FLOW("iSCSI: timer sleeping for %lu jiffies, now %lu, next %lu, HZ %u\n", + sleep, jiffies, next_timeout, HZ); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(sleep); + if (signal_pending(current)) + goto finished; + } + else { + /* this should never happen, but make sure we block for at least a little while + * if it does somehow, otherwise it'll lock up the machine and be impossible + * to debug what went wrong. + */ + DEBUG_FLOW("iSCSI: timer forced to sleep, now %lu, next %lu, HZ %u\n", + jiffies, next_timeout, HZ); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + if (signal_pending(current)) + goto finished; + } + } + + finished: + /* timer finished */ + + if ((this_is_iscsi_boot) && (!iscsi_system_is_rebooting)) { + printk("\niSCSI: timer_thread got signalled\n"); + flush_signals(current); + goto continue_timer_thread; + } + + DEBUG_INIT("iSCSI: timer leaving kernel at %lu\n", jiffies); + + set_current_state(TASK_RUNNING); + + iscsi_timer_running = 0; + iscsi_timer_pid = 0; + smp_mb(); + + return 0; +} + +/* shutdown every session on the HBA */ +static int iscsi_shutdown_hba(iscsi_hba_t *hba) +{ + int num_sessions = 0; + iscsi_session_t *session; + DECLARE_NOQUEUE_FLAGS; + + /* FIXME: we lose info on LUNs probed when this happens. After + * this, the kernel module must be reloaded in order for another + * LUN probe to work correctly. Just restarting the daemon causes + * LUN probe attempts, but the kernel's scsi.c will detect that + * the device is already on the HBA's device list and error out + * the add-single-device. + */ + + /* ensure no more sessions get added to the HBA while we're trying to shut it down */ + set_bit(ISCSI_HBA_SHUTTING_DOWN, &hba->flags); + + do { + num_sessions = 0; + + SPIN_LOCK_NOQUEUE(&hba->session_lock); + for (session = hba->session_list_head; session; session = session->next) { + num_sessions++; + set_bit(SESSION_TERMINATING, &session->control_bits); + if ((session->last_kill == 0) || time_before_eq(session->last_kill + (5 * HZ), jiffies)) { + session->last_kill = jiffies; + iscsi_drop_session(session); + } + } + SPIN_UNLOCK_NOQUEUE(&hba->session_lock); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(MSECS_TO_JIFFIES(20)); + + } while (num_sessions); + + /* let sessions get added again */ + clear_bit(ISCSI_HBA_SHUTTING_DOWN, &hba->flags); + smp_mb(); + + return 1; +} + +static int iscsi_shutdown(void) +{ + iscsi_hba_t *hba; + iscsi_session_t *session; + int num_sessions = 0; + pid_t pid; + DECLARE_NOQUEUE_FLAGS; + + /* terminate every session on every HBA */ + if (this_is_iscsi_boot && !iscsi_system_is_rebooting) + printk("iSCSI: driver shutdown killing all sessions, except session to ROOT disk\n"); + else + printk("iSCSI: driver shutdown killing all sessions\n"); + + do { + num_sessions = 0; + + spin_lock(&iscsi_hba_list_lock); + for (hba = iscsi_hba_list; hba; hba = hba->next) { + set_bit(ISCSI_HBA_SHUTTING_DOWN, &hba->flags); + SPIN_LOCK_NOQUEUE(&hba->session_lock); + for (session = hba->session_list_head; session; session = session->next) { + if (!session->this_is_root_disk || iscsi_system_is_rebooting) { + num_sessions++; + set_bit(SESSION_TERMINATING, &session->control_bits); + if (session->last_kill == 0) { + DEBUG_INIT("iSCSI: shutdown killing session %p with refcount %u\n", + session, atomic_read(&session->refcount)); + session->last_kill = jiffies; + /* FIXME: should we try to cleanly terminate the session the first time? May have locking issues with that */ + iscsi_drop_session(session); + } + else if (time_before_eq(session->last_kill + (5 * HZ), jiffies)) { + printk("iSCSI: shutdown killing session %p with refcount %u\n", + session, atomic_read(&session->refcount)); + session->last_kill = jiffies; + iscsi_drop_session(session); + } + } + } + SPIN_UNLOCK_NOQUEUE(&hba->session_lock); + } + spin_unlock(&iscsi_hba_list_lock); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(MSECS_TO_JIFFIES(20)); + } while (num_sessions); + + /* kill the timer */ + if (!this_is_iscsi_boot || iscsi_system_is_rebooting) { + if ((pid = iscsi_timer_pid)) { + printk("iSCSI: driver shutdown killing timer %d\n", pid); + kill_proc(pid, SIGKILL, 1); + } + + printk("iSCSI: driver shutdown waiting for timer to terminate\n"); + while (test_bit(0, &iscsi_timer_running)) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(MSECS_TO_JIFFIES(10)); + if (signal_pending(current)) + return 0; + } + } + + /* reset LUN probing */ + iscsi_reset_lun_probing(); + + /* let sessions get added again later */ + spin_lock(&iscsi_hba_list_lock); + for (hba = iscsi_hba_list; hba; hba = hba->next) { + clear_bit(ISCSI_HBA_SHUTTING_DOWN, &hba->flags); + } + spin_unlock(&iscsi_hba_list_lock); + + if (!this_is_iscsi_boot || iscsi_system_is_rebooting) + printk("iSCSI: driver shutdown complete at %lu\n", jiffies); + + return 1; +} + +static int iscsi_reboot_notifier_function(struct notifier_block *this, unsigned long code, void *unused) +{ + mm_segment_t oldfs; + + if (code == SYS_DOWN) { + DEBUG_INIT("\niSCSI: iscsi_reboot_notifier_function called with code SYS_DOWN = 0x%lu\n", code); + } else if (code == SYS_RESTART) { + DEBUG_INIT("\niSCSI: iscsi_reboot_notifier_function called with code SYS_RESTART = 0x%lu\n", code); + } else if (code == SYS_HALT) { + DEBUG_INIT("\niSCSI: iscsi_reboot_notifier_function called with code SYS_HALT = 0x%lu\n", code); + } else if (code == SYS_POWER_OFF) { + DEBUG_INIT("\niSCSI: iscsi_reboot_notifier_function called with code SYS_POWER_OFF = 0x%lu\n", code); + } else { + printk("\niSCSI: iscsi_reboot_notifier_function called with unknown code = 0x%lu !!!\n", code); + } + + oldfs = get_fs(); + set_fs( get_ds() ); + /* + * We don't do this in while loop as someone can do CTL-ALT-DEL after a + * 'halt' which will cause this to fail and retried again and again + */ + if(!iscsi_set_if_addr()) { + printk("\niSCSI: iscsi_set_if_addr failed !!!\n"); + schedule_timeout(10 * HZ); + } + set_fs( oldfs ); + + DEBUG_INIT("\niSCSI: Setting iscsi_system_is_rebooting, current->pid = %d this->next = 0x%p iscsi_reboot_notifier_function = 0x%p\n", current->pid, this->next, &iscsi_reboot_notifier_function); + + iscsi_system_is_rebooting = 1; + + while (!iscsi_shutdown()) { + printk("iSCSI: driver shutdown failed\n"); + } + printk("\niSCSI: iscsi_reboot_notifier_function: driver shutdown succeeded\n"); + + oldfs = get_fs(); + set_fs( get_ds() ); + /* + * We don't do this in while loop as someone can do CTL-ALT-DEL after a + * 'halt' which will cause this to fail and retried again and again + */ + if(!iscsi_ifdown()) { + printk("\niSCSI: iscsi_set_if_addr failed !!!\n"); + schedule_timeout(10 * HZ); + } + set_fs( oldfs ); + + return NOTIFY_DONE; +} + +static struct notifier_block iscsi_reboot_notifier = { + notifier_call: iscsi_reboot_notifier_function, + next: NULL, + priority: 255 /* priority, might need to have a relook at the value */ +}; + +/* + * This is called as part of ISCSI_SET_INBP_INFO ioctl which gets called + * only from iscsi-network-boot.c from initrd. iscsid will never call + * ISCSI_SET_INBP_INFO. + */ +/* For now always returns 0 */ +static int set_inbp_info(void) +{ + int tmp_index; + int rv; + struct ifreq req; + int second_index; + mm_segment_t oldfs; + + this_is_iscsi_boot = 1; + printk("\nSetting this_is_iscsi_boot in the kernel\n"); + + printk("\n##############################################################################\n"); + printk("iscsi_inbp_info.myethaddr = %2x %2x %2x %2x %2x %2x\n", + iscsi_inbp_info.myethaddr[0], iscsi_inbp_info.myethaddr[1], + iscsi_inbp_info.myethaddr[2], iscsi_inbp_info.myethaddr[3], + iscsi_inbp_info.myethaddr[4], iscsi_inbp_info.myethaddr[5]); + + printk("iscsi_inbp_info.targetstring = %s\n", + iscsi_inbp_info.targetstring); + printk("iscsi_inbp_info.myipaddr = 0x%x\n", iscsi_inbp_info.myipaddr); + printk("##############################################################################\n"); + + oldfs = get_fs(); + set_fs( get_ds() ); + + for (tmp_index = 1; tmp_index < NETDEV_BOOT_SETUP_MAX; tmp_index++) { + memset(&req,0,sizeof(req)); + req.ifr_ifindex = tmp_index; + rv = dev_ioctl(SIOCGIFNAME ,&req); + DEBUG_INIT("\nifrn_name = %s : hw_addr \n", req.ifr_name); + req.ifr_ifindex = 0; + rv = dev_ioctl(SIOCGIFHWADDR ,&req); + for (second_index = 0; second_index < IFHWADDRLEN; second_index++) { + DEBUG_INIT("\nifr_hwaddr[%d] = 0x%2x ", second_index, + req.ifr_hwaddr.sa_data[second_index]); + } + + if(memcmp(iscsi_inbp_info.myethaddr, req.ifr_hwaddr.sa_data, IFHWADDRLEN)){ + DEBUG_INIT("\nInterface %s does not correspond to the mac address in inbp structure : %2x %2x %2x %2x %2x %2x\n", req.ifr_name, req.ifr_hwaddr.sa_data[0], req.ifr_hwaddr.sa_data[1], req.ifr_hwaddr.sa_data[2], req.ifr_hwaddr.sa_data[3], req.ifr_hwaddr.sa_data[4], req.ifr_hwaddr.sa_data[5]); + } else { + printk("\nInterface %s corresponds to mac address in inbp structure : %2x %2x %2x %2x %2x %2x\n", req.ifr_name, iscsi_inbp_info.myethaddr[0], iscsi_inbp_info.myethaddr[1], iscsi_inbp_info.myethaddr[2], iscsi_inbp_info.myethaddr[3], iscsi_inbp_info.myethaddr[4], iscsi_inbp_info.myethaddr[5]); + + strcpy(inbp_interface_name, req.ifr_name); + DEBUG_INIT("\ninbp_interface_name resolved as %s\n", inbp_interface_name); + } + } + + while(!iscsi_set_if_addr()) { + printk("\niSCSI: set_inbp_info: iscsi_set_if_addr failed\n"); + schedule_timeout(10 * HZ); + } + + set_fs( oldfs ); + /* + * We will register reboot notifier only in case of iSCSI boot (not under + * usual driver runs) so we should never need to unregister it. + */ + if ( register_reboot_notifier (&iscsi_reboot_notifier)) { + /* FIXME: return error */ + DEBUG_INIT("\niSCSI: register_reboot_notifier failed\n"); + } else { + DEBUG_INIT("\niSCSI: register_reboot_notifier succeeded\n"); + } + return 0; +} + + +#if defined(HAS_SLAVE_CONFIGURE) + +int iscsi_slave_alloc(Scsi_Device *dev) +{ + return 0; +} + +int iscsi_slave_configure(Scsi_Device *dev) +{ + unsigned char depth; + + /* select queue depth and tcq for this device */ + if (dev->tagged_supported) { + depth = ISCSI_CMDS_PER_LUN; + scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, depth); + if (LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: enabled tagged command queueing for device %p (%u %u %u %u), type 0x%x, depth %u\n", + dev, dev->host->host_no, dev->channel, dev->id, dev->lun, dev->type, depth); + } + else if (force_tcq) { + depth = ISCSI_CMDS_PER_LUN; + scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, depth); + if (LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: forced tagged command queueing for device %p (%u %u %u %u), type 0x%x, depth %u\n", + dev, dev->host->host_no, dev->channel, dev->id, dev->lun, dev->type, depth); + } + else { + depth = untagged_queue_depth; + scsi_adjust_queue_depth(dev, 0, depth); + if (LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: tagged command queueing not supported for device %p (%u %u %u %u), type 0x%x, depth %u\n", + dev, dev->host->host_no, dev->channel, dev->id, dev->lun, dev->type, depth); + } + + return 0; +} + +void iscsi_slave_destroy(Scsi_Device *dev) +{ +} + +#elif defined(HAS_NEW_SLAVE_ATTACH) + +int iscsi_slave_attach(Scsi_Device *dev) +{ + unsigned char depth; + + /* select queue depth and tcq for this device */ + if (dev->tagged_supported) { + depth = ISCSI_CMDS_PER_LUN; + scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, depth); + if (LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: enabled tagged command queueing for device %p (%u %u %u %u), type 0x%x, depth %u\n", + dev, dev->host->host_no, dev->channel, dev->id, dev->lun, dev->type, depth); + } + else if (force_tcq) { + depth = ISCSI_CMDS_PER_LUN; + scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, depth); + if (LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: forced tagged command queueing for device %p (%u %u %u %u), type 0x%x, depth %u\n", + dev, dev->host->host_no, dev->channel, dev->id, dev->lun, dev->type, depth); + } + else { + depth = untagged_queue_depth; + scsi_adjust_queue_depth(dev, 0, depth); + if (LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: tagged command queueing not supported for device %p (%u %u %u %u), type 0x%x, depth %u\n", + dev, dev->host->host_no, dev->channel, dev->id, dev->lun, dev->type, depth); + } + + return 0; +} + +void iscsi_slave_detach(Scsi_Device *dev) +{ +} + +#elif defined(HAS_SELECT_QUEUE_DEPTHS) + +static void iscsi_select_queue_depths(struct Scsi_Host *host, Scsi_Device *device_list) +{ + Scsi_Device *dev; + + DEBUG_INIT("iSCSI: selecting queue depths for host #%u\n", host->host_no); + + for (dev = device_list; dev; dev = dev->next) { + if (dev->host != host) continue; + + if (force_tcq) { + dev->queue_depth = ISCSI_CMDS_PER_LUN; + if (dev->tagged_supported) { + if (dev->tagged_queue == 0) + printk("iSCSI: enabled tagged command queueing for (%u %u %u %u), type 0x%x, depth %d\n", + host->host_no, dev->channel, dev->id, dev->lun, dev->type, dev->queue_depth); + else + DEBUG_INIT("iSCSI: enabled tagged command queueing for (%u %u %u %u), type 0x%x, depth %d\n", + host->host_no, dev->channel, dev->id, dev->lun, dev->type, dev->queue_depth); + } + else { + if (dev->tagged_queue == 0) + printk("iSCSI: forced tagged command queueing for (%u %u %u %u), type 0x%x, depth %d\n", + host->host_no, dev->channel, dev->id, dev->lun, dev->type, dev->queue_depth); + else + DEBUG_INIT("iSCSI: forced tagged command queueing for (%u %u %u %u), type 0x%x, depth %d\n", + host->host_no, dev->channel, dev->id, dev->lun, dev->type, dev->queue_depth); + } + dev->tagged_queue = 1; + } + else if (dev->tagged_supported) { + dev->tagged_queue = 1; + dev->queue_depth = ISCSI_CMDS_PER_LUN; + DEBUG_INIT("iSCSI: enabled tagged command queueing for (%u %u %u %u), type 0x%x, depth %d\n", + host->host_no, dev->channel, dev->id, dev->lun, dev->type, dev->queue_depth); + } + else { + dev->tagged_queue = 0; + dev->queue_depth = untagged_queue_depth; + if (LOG_ENABLED(ISCSI_LOG_INIT)) + printk("iSCSI: tagged command queueing not supported for (%u %u %u %u), type 0x%x, depth %d\n", + host->host_no, dev->channel, dev->id, dev->lun, dev->type, dev->queue_depth); + } + } +} + +#endif + + +int iscsi_detect( Scsi_Host_Template *sht ) +{ + struct Scsi_Host *sh; + iscsi_hba_t *hba; + unsigned char cache_name[20]; + + sht->proc_name = "iscsi"; + + sh = scsi_register( sht, sizeof(iscsi_hba_t) ); + if (!sh ) { + printk("iSCSI: Unable to register iSCSI HBA\n"); + return 0; + } + + /* zero these now to disable the scan done during scsi_register_host. + * iscsi_probe_luns will set them later. + */ + sh->max_id = 0; + sh->max_lun = 0; + sh->max_channel = 0; + +#if defined(HAS_SELECT_QUEUE_DEPTHS) + sh->select_queue_depths = iscsi_select_queue_depths; +#endif + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,0) + /* indicate the maximum CDB length we can accept */ + sh->max_cmd_len = ISCSI_MAX_CMD_LEN; +#endif + + hba = (iscsi_hba_t *)sh->hostdata; + memset( hba, 0, sizeof(iscsi_hba_t) ); + + hba->next = NULL; + hba->host = sh; + + /* list of sessions on this HBA */ + spin_lock_init(&hba->session_lock); + hba->session_list_head = NULL; + hba->session_list_tail = NULL; + atomic_set(&hba->num_sessions, 0); + + /* pool of iscsi tasks */ + /* Note: we uniqify the cache name, since the kernel bugchecks if you try to create + * a name that already exists. Since kmem_cache_destroy may fail, a unique name + * keeps the kernel from panicing if the module is unloaded and reloaded. + */ + sprintf(cache_name, "iscsi_%.10u", (unsigned int)(jiffies & 0xFFFFFFFFU)); + if (iscsi_reap_tasks) { + printk("iSCSI: allocating task cache %s with reaping enabled\n", cache_name); + hba->task_cache = kmem_cache_create(cache_name, sizeof(iscsi_task_t), 0, 0, iscsi_task_ctor, NULL); + } + else { + printk("iSCSI: allocating task cache %s with reaping disabled\n", cache_name); + hba->task_cache = kmem_cache_create(cache_name, sizeof(iscsi_task_t), 0, SLAB_NO_REAP, iscsi_task_ctor, NULL); + } + if (hba->task_cache) { + iscsi_task_t *head = NULL, *task; + int n; + + /* try to provoke some slab allocation while we can safely block. + * this probably won't accomplish much without SLAB_NO_REAP, + * but it won't hurt in that case either, so we always do it. + */ + /* FIXME: is there some way to do this on all processors, so that we prime + * the CPU cache for each processor on SMP machines? smp_call_function() + * says the function shouldn't block, which means we couldn't use SLAB_KERNEL. + */ + for (n = 0; n < ISCSI_PREALLOCATED_TASKS; n++) { + task = kmem_cache_alloc(hba->task_cache, SLAB_KERNEL); + if (task) { + task->next = head; + head = task; + } + } + while (head) { + task = head; + head = task->next; + task->next = NULL; + kmem_cache_free(hba->task_cache, task); + } + } + else { + /* FIXME: do we need to undo the scsi_register, or will iscsi_release get called? */ + printk("iSCSI: kmem_cache_create failed at %lu\n", jiffies); + return 0; + } + + set_bit(ISCSI_HBA_ACTIVE, &hba->flags); + clear_bit(ISCSI_HBA_SHUTTING_DOWN, &hba->flags); + + hba->host_no = sh->host_no; + + /* for now, there's just one iSCSI HBA */ + smp_mb(); + iscsi_hba_list = hba; + smp_mb(); + printk("iSCSI: detected HBA %p, host #%d\n", hba, sh->host_no); + return 1; +} + + +/* cleanup before unloading the module */ +int iscsi_release(struct Scsi_Host *sh) +{ + iscsi_hba_t *hba; + + hba = (iscsi_hba_t *)sh->hostdata; + if ( ! hba ) { + return FALSE; + } + + printk("iSCSI: releasing HBA %p, host #%d\n", hba, hba->host->host_no); + set_bit(ISCSI_HBA_RELEASING, &hba->flags); + smp_mb(); + + /* remove all sessions on this HBA, and prevent any from being added */ + if (!iscsi_shutdown_hba(hba)) { + printk("iSCSI: can't release HBA %p, host #%u failed to shutdown\n", hba, sh->host_no); + return FALSE; + } + + /* remove from the iSCSI HBA list */ + spin_lock(&iscsi_hba_list_lock); + if (hba == iscsi_hba_list) { + iscsi_hba_list = iscsi_hba_list->next; + } + else { + iscsi_hba_t *prior = iscsi_hba_list; + + while (prior && prior->next != hba) + prior = prior->next; + if (prior && prior->next == hba) + prior->next = hba->next; + } + spin_unlock(&iscsi_hba_list_lock); + + /* free this HBA's tasks */ + if (hba->task_cache) { + DEBUG_INIT("iSCSI: HBA %p destroying task cache %p at %lu\n", hba, hba->task_cache, jiffies); + if (kmem_cache_destroy(hba->task_cache)) { + printk("iSCSI: HBA %p failed to destroy task cache %p at %lu\n", hba, hba->task_cache, jiffies); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ); + + printk("iSCSI: HBA %p destroying task cache %p again at %lu\n", hba, hba->task_cache, jiffies); + if (kmem_cache_destroy(hba->task_cache)) { + printk("iSCSI: HBA %p failed to destroy task cache %p again, giving up at %lu\n", + hba, hba->task_cache, jiffies); + } + } + + hba->task_cache = NULL; + } + + scsi_unregister( sh ); + + printk("iSCSI: released HBA %p\n", hba); + return TRUE; +} + +/* remove a Scsi_Cmnd from a singly linked list joined by the host_scribble pointers. */ +static int remove_cmnd(Scsi_Cmnd *sc, Scsi_Cmnd **head, Scsi_Cmnd **tail) +{ + if (!sc || !head || !tail) { + printk("iSCSI: bug - remove_cmnd %p, head %p, tail %p\n", sc, head, tail); + return 0; + } + + if (sc == *head) { + /* it's the head, remove it */ + *head = (Scsi_Cmnd *)sc->host_scribble; /* next */ + if (*head == NULL) + *tail = NULL; + sc->host_scribble = NULL; + return 1; + } + else if (*head) { + Scsi_Cmnd *prior, *next; + + /* try find the command prior to sc */ + prior = *head; + next = (Scsi_Cmnd *)prior->host_scribble; + while (next && (next != sc)) { + prior = next; + next = (Scsi_Cmnd *)prior->host_scribble; /* next command */ + } + if (prior && (next == sc)) { + /* remove the command */ + prior->host_scribble = sc->host_scribble; + if (*tail == sc) + *tail = prior; + sc->host_scribble = NULL; + return 1; + } + } + + return 0; +} + +/* unconditionally remove the cmnd from all driver data structures + * The probing code uses this when cmnds time out or the probe is killed. + * It aborts the command on our side, but doesn't inform the target. + * Since the cmnd is either INQUIRY or REPORT_LUNs, the target should + * always complete the command, and we just discard the response if + * it's already been removed from our data structures. + */ +int iscsi_squash_cmnd(iscsi_session_t *session, Scsi_Cmnd *sc) +{ + iscsi_task_t *task; + int ret = 0; + DECLARE_NOQUEUE_FLAGS; + + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + if (remove_cmnd(sc, &session->retry_cmnd_head, &session->retry_cmnd_tail)) { + del_command_timer(sc); + ret = 1; + } + else if (remove_cmnd(sc, &session->scsi_cmnd_head, &session->scsi_cmnd_tail)) { + del_command_timer(sc); + ret = 1; + } + else if (remove_cmnd(sc, &session->deferred_cmnd_head, &session->deferred_cmnd_tail)) { + del_command_timer(sc); + ret = 1; + } + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + + if (ret) + return ret; + + /* remove any task for this cmnd */ + spin_lock(&session->task_lock); + if ((task = remove_task_for_cmnd(&session->tx_tasks, sc))) { + /* it's received an R2T, and is queued to have data sent */ + atomic_inc(&task->refcount); + } + else if ((task = find_task_for_cmnd(session, sc))) { + atomic_inc(&task->refcount); + } + + if (task) { + DEBUG_EH("iSCSI: session %p squashing task %p, itt %u\n", session, task, task->itt); + remove_session_task(session, task); + + while (atomic_read(&task->refcount) > 1) { + /* wait for the driver to quit using the task */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(MSECS_TO_JIFFIES(10)); + } + + /* delete the timers */ + del_task_timer(task); + del_command_timer(sc); + + /* free the task */ + free_task(session, task); + + ret = 1; + DEBUG_EH("iSCSI: session %p squashed task %p, itt %u\n", session, task, task->itt); + } + spin_unlock(&session->task_lock); + + if (ret == 0) { + printk("iSCSI: session %p couldn't squash cmnd %p\n", session, sc); + } + return ret; +} +/* + * All the docs say we're supposed to reset the device and complete + * all commands for it back to the SCSI layer. However, the SCSI + * layer doesn't actually count how many commands are completed back + * to it after a device reset, but rather just assumes only 1 command, + * with a comment saying it should be fixed to handle the case where + * there are multiple commands. + * + * If there are multiple commands, the SCSI layer will blindly + * continue on to the next stage of error recovery, even if we + * complete all the failed commands back to it after a device reset. + * Hopefully the Linux SCSI layer will be fixed to handle this + * corectly someday. In the meantime, we do the right thing here, and + * make sure the other reset handlers can deal with the case where + * they get called with a command that has already been completed back + * to the SCSI layer by a device reset. + * + */ +int +iscsi_eh_device_reset( Scsi_Cmnd *sc ) +{ + struct Scsi_Host *host = NULL; + iscsi_hba_t *hba = NULL; + iscsi_session_t *session = NULL; + int ret = FAILED; + + if ( ! sc ) { + printk("iSCSI: device reset, no SCSI command\n"); + return FAILED; + } + host = sc->host; + if (! host) { + printk("iSCSI: device reset, no host for SCSI command %p\n", sc); + return FAILED; + } + hba = (iscsi_hba_t *)host->hostdata; + if (!hba) { + printk("iSCSI: device reset, no iSCSI HBA associated with SCSI command %p\n", sc); + return FAILED; + } + + RELEASE_MIDLAYER_LOCK(host); + + /* find the appropriate session for the command */ + session = find_session_for_cmnd(sc); + if (session) { + set_bit(SESSION_RESET_REQUESTED, &session->control_bits); + printk("iSCSI: session %p eh_device_reset at %lu for command %p to (%u %u %u %u), cdb 0x%x\n", + session, jiffies, sc, sc->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0]); + drop_reference(session); + ret = SUCCESS; + } + else { + printk("iSCSI: session %p eh_device_reset failed at %lu, no session for command %p to (%u %u %u %u), cdb 0x%x\n", + session, jiffies, sc, sc->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0]); + ret = FAILED; + } + + REACQUIRE_MIDLAYER_LOCK(host); + return ret; +} + +/* NOTE: due to bugs in the linux SCSI layer (scsi_unjam_host), it's + * possible for this handler to be called even if the device_reset + * handler completed all the failed commands back to the SCSI layer + * with DID_RESET and returned SUCCESS. To compensate for this, we + * must ensure that this reset handler doesn't actually care whether + * the command is still in the driver. Just find the session + * associated with the command, and reset it. + */ +int iscsi_eh_bus_reset( Scsi_Cmnd *sc ) +{ + struct Scsi_Host *host = NULL; + iscsi_hba_t *hba = NULL; + iscsi_session_t *session; + DECLARE_NOQUEUE_FLAGS; + + if ( ! sc ) { + return FAILED; + } + host = sc->host; + if (! host) { + printk("iSCSI: bus reset, no host for SCSI command %p\n", sc); + return FAILED; + } + hba = (iscsi_hba_t *)host->hostdata; + if (!hba) { + printk("iSCSI: bus reset, no iSCSI HBA associated with SCSI command %p\n", sc); + return FAILED; + } + + RELEASE_MIDLAYER_LOCK(host); + + SPIN_LOCK_NOQUEUE(&hba->session_lock); + for (session = hba->session_list_head; session; session = session->next) { + if (session->channel == sc->channel) { + set_bit(SESSION_RESET_REQUESTED, &session->control_bits); + printk("iSCSI: session %p eh_bus_reset at %lu for command %p to (%u %u %u %u), cdb 0x%x\n", + session, jiffies, sc, sc->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0]); + } + } + SPIN_UNLOCK_NOQUEUE(&hba->session_lock); + + REACQUIRE_MIDLAYER_LOCK(host); + return SUCCESS; +} + + + +int +iscsi_eh_host_reset( Scsi_Cmnd *sc ) +{ + struct Scsi_Host *host = NULL; + iscsi_hba_t *hba = NULL; + iscsi_session_t *session; + DECLARE_NOQUEUE_FLAGS; + + if ( ! sc ) { + return FAILED; + } + host = sc->host; + if (! host) { + printk("iSCSI: host reset, no host for SCSI command %p\n", sc); + return FAILED; + } + hba = (iscsi_hba_t *)host->hostdata; + if (!hba) { + printk("iSCSI: host reset, no iSCSI HBA associated with SCSI command %p\n", sc); + return FAILED; + } + + RELEASE_MIDLAYER_LOCK(host); + + SPIN_LOCK_NOQUEUE(&hba->session_lock); + for (session = hba->session_list_head; session; session = session->next) { + set_bit(SESSION_RESET_REQUESTED, &session->control_bits); + printk("iSCSI: session %p eh_bus_reset at %lu for command %p to (%u %u %u %u), cdb 0x%x\n", + session, jiffies, sc, sc->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0]); + } + SPIN_UNLOCK_NOQUEUE(&hba->session_lock); + + REACQUIRE_MIDLAYER_LOCK(host); + return SUCCESS; +} + +/* try to queue a command to the session, returning a boolean indicating success or failure */ +int iscsi_queue(iscsi_session_t *session, Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *)) +{ + DECLARE_NOQUEUE_FLAGS; + + if (session == NULL) + return 0; + + /* make sure we can complete it properly later */ + sc->scsi_done = done; + sc->result = 0; + + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + if ((sc->device == NULL) || LOG_ENABLED(ISCSI_LOG_QUEUE) || + (test_bit(DEVICE_LOG_TERMINATING, device_flags(sc->device)) == 0)) + { + /* by default, log this only once per Scsi_Device, to avoid flooding the log */ + printk("iSCSI: session %p terminating, failing to queue %p cdb 0x%x and any following commands to (%u %u %u %u), %s\n", + session, sc, sc->cmnd[0], session->host_no, sc->channel, sc->target, sc->lun, session->log_name); + if (sc->device) + set_bit(DEVICE_LOG_TERMINATING, device_flags(sc->device)); + } + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + return 0; + } + + if (test_bit(SESSION_REPLACEMENT_TIMEDOUT, &session->control_bits)) { + if ((sc->device == NULL) || LOG_ENABLED(ISCSI_LOG_QUEUE) || + (test_bit(DEVICE_LOG_REPLACEMENT_TIMEDOUT, device_flags(sc->device)) == 0)) + { + /* by default, log this only once per Scsi_Device, to avoid flooding the log */ + printk("iSCSI: session %p replacement timed out, failing to queue %p cdb 0x%x and any following commands to (%u %u %u %u), %s\n", + session, sc, sc->cmnd[0], session->host_no, sc->channel, sc->target, sc->lun, session->log_name); + if (sc->device) + set_bit(DEVICE_LOG_REPLACEMENT_TIMEDOUT, device_flags(sc->device)); + } + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + return 0; + } + +#ifdef DEBUG + /* make sure the command hasn't already been queued */ + { + Scsi_Cmnd *search = session->scsi_cmnd_head; + while (search) { + if (search == sc) { + printk("iSCSI: bug - cmnd %p, state %x, eh_state %x, scribble %p is already queued to session %p\n", + sc, sc->state, sc->eh_state, sc->host_scribble, session); + print_session_cmnds(session); + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + ISCSI_TRACE(ISCSI_TRACE_QFailed, sc, NULL, sc->retries, sc->allowed); + return 0; + } + search = (Scsi_Cmnd *)search->host_scribble; + } + } +#endif + + /* initialize Scsi_Pointer fields that we might use later */ + memset(&sc->SCp, 0, sizeof(sc->SCp)); + sc->SCp.ptr = (char *)session; + sc->host_scribble = NULL; + + if (session->print_cmnds > 0) { + session->print_cmnds--; + printk("iSCSI: session %p iscsi_queue printing command at %lu\n", session, jiffies); + print_cmnd(sc); + } + + /* add a command timer that tells us to fail the command back to the OS */ + DEBUG_QUEUE("iSCSI: session %p adding timer to command %p at %lu\n", session, sc, jiffies); + add_command_timer(session, sc, iscsi_command_times_out); + + /* add it to the session's command queue so the tx thread will send it */ + if (session->scsi_cmnd_head) { + /* append at the tail */ + session->scsi_cmnd_tail->host_scribble = (unsigned char *)sc; + session->scsi_cmnd_tail = sc; + } + else { + /* make it the head */ + session->scsi_cmnd_head = session->scsi_cmnd_tail = sc; + } + atomic_inc(&session->num_cmnds); + + DEBUG_QUEUE("iSCSI: queued %p to session %p at %lu, %u cmnds, head %p, tail %p\n", + sc, session, jiffies, atomic_read(&session->num_cmnds), + session->scsi_cmnd_head, session->scsi_cmnd_tail); + + ISCSI_TRACE(ISCSI_TRACE_Qd, sc, NULL, sc->retries, sc->timeout_per_command); + wake_tx_thread(TX_SCSI_COMMAND, session); + + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + + return 1; +} + +int iscsi_queuecommand(Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *)) +{ + iscsi_hba_t *hba; + iscsi_session_t *session = NULL; + struct Scsi_Host *host; + int queued = 0; + int fake_transport_error = 0; + + host = sc->host; + if (host == NULL) { + ISCSI_TRACE(ISCSI_TRACE_QFailed, sc, NULL, sc->retries, sc->allowed); + printk("iSCSI: queuecommand but no Scsi_Host\n"); + sc->result = HOST_BYTE(DID_NO_CONNECT); + set_lun_comm_failure(sc); + done(sc); + return 0; + } + + hba = (iscsi_hba_t *)sc->host->hostdata; + if ( (!hba) || (!test_bit(ISCSI_HBA_ACTIVE, &hba->flags))) { + ISCSI_TRACE(ISCSI_TRACE_QFailed, sc, NULL, sc->retries, sc->allowed); + printk("iSCSI: queuecommand but no HBA\n"); + sc->result = HOST_BYTE(DID_NO_CONNECT); + set_lun_comm_failure(sc); + done(sc); + return 0; + } + + if ( ! iscsi_timer_running ) { + /* iSCSI coming up or going down, fail the command */ + ISCSI_TRACE(ISCSI_TRACE_QFailed, sc, NULL, sc->retries, sc->allowed); + DEBUG_QUEUE("iSCSI: no timer, failing to queue %p to (%u %u %u %u), cdb 0x%x\n", + sc, hba->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0]); + sc->result = HOST_BYTE(DID_NO_CONNECT); + done(sc); + return 0; + } + + if (sc->target >= ISCSI_MAX_TARGET_IDS_PER_BUS) { + ISCSI_TRACE(ISCSI_TRACE_QFailed, sc, NULL, sc->retries, sc->allowed); + printk("iSCSI: invalid target id %u, failing to queue %p to (%u %u %u %u), cdb 0x%x\n", + sc->target, sc, hba->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0]); + sc->result = HOST_BYTE(DID_NO_CONNECT); + set_lun_comm_failure(sc); + done(sc); + return 0; + } + if (sc->lun >= ISCSI_MAX_LUNS_PER_TARGET) { + ISCSI_TRACE(ISCSI_TRACE_QFailed, sc, NULL, sc->retries, sc->allowed); + printk("iSCSI: invalid LUN %u, failing to queue %p to (%u %u %u %u), cdb 0x%x\n", + sc->lun, sc, hba->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0]); + sc->result = HOST_BYTE(DID_NO_CONNECT); + set_lun_comm_failure(sc); + done(sc); + return 0; + } + /* CDBs larger than 16 bytes require additional header segments, not yet implemented */ + if (sc->cmd_len > ISCSI_MAX_CMD_LEN) { + ISCSI_TRACE(ISCSI_TRACE_QFailed, sc, NULL, sc->retries, sc->allowed); + printk("iSCSI: cmd_len %u too large, failing to queue %p to (%u %u %u %u), cdb 0x%x\n", + sc->cmd_len, sc, hba->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0]); + sc->result = HOST_BYTE(DID_NO_CONNECT); + set_lun_comm_failure(sc); + done(sc); + return 0; + } + /* make sure our SG_TABLESIZE limit was respected */ + if (sc->use_sg > ISCSI_MAX_SG) { + ISCSI_TRACE(ISCSI_TRACE_QFailed, sc, NULL, sc->retries, sc->allowed); + printk("iSCSI: use_sg %u too large, failing to queue %p to (%u %u %u %u), cdb 0x%x\n", + sc->use_sg, sc, hba->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0]); + sc->result = HOST_BYTE(DID_NO_CONNECT); + set_lun_comm_failure(sc); + done(sc); + return 0; + } +#ifdef DEBUG + if (sc->use_sg) { + int index; + struct scatterlist *sglist = (struct scatterlist *)sc->request_buffer; + unsigned int length = 0; + int bogus = 0; + + /* sanity check the sglist, to make sure the segments have at least bufflen */ + for (index = 0; index < sc->use_sg; index++) { + length += sglist[index].length; + if (sglist[index].length == 0) + bogus = 1; + } + + if (bogus || (length < sc->request_bufflen)) { + printk("iSCSI: attempted to queue %p at %lu to (%u %u %u %u), cdb 0x%x, corrupt sglist, sg length %u, buflen %u\n", + sc, jiffies, hba->host->host_no, sc->channel, sc->target, sc->lun, sc->cmnd[0], + length, sc->request_bufflen); + print_cmnd(sc); + ISCSI_TRACE(ISCSI_TRACE_QFailed, sc, NULL, sc->retries, sc->timeout_per_command); + sc->result = HOST_BYTE(DID_NO_CONNECT); + set_lun_comm_failure(sc); + done(sc); + return 0; + } + } +#endif + + RELEASE_MIDLAYER_LOCK(host); + + DEBUG_QUEUE("iSCSI: queueing %p to (%u %u %u %u) at %lu, cdb 0x%x, cpu%d\n", + sc, hba->host->host_no, sc->channel, sc->target, sc->lun, jiffies, sc->cmnd[0], smp_processor_id()); + + if (hba) { + session = find_session_for_cmnd(sc); + + if (session) { + DEBUG_QUEUE("iSCSI: session %p queuecommand %p at %lu, retries %d, allowed %d, timeout %u\n", + session, sc, jiffies, sc->retries, sc->allowed, sc->timeout_per_command); + + /* record whether I/O commands have been ever been sent on this session, + * to help us decide when we need the session and should retry logins regardless + * of the login status. Ignore all the commands sent by default as part of the + * LUN being scanned or a device being opened, so that sessions that have always + * been idle can be dropped. Of course, this is always true for disks, since + * Linux will do reads looking for a partition table. + */ + switch (sc->cmnd[0]) { + case INQUIRY: + case REPORT_LUNS: + case TEST_UNIT_READY: + case READ_CAPACITY: + case START_STOP: + case MODE_SENSE: + break; + default: + session->commands_queued = 1; + smp_mb(); + break; + } + + /* For testing, possibly fake transport errors for some commands */ + if (session->fake_not_ready > 0) { + session->fake_not_ready--; /* not atomic to avoid overhead, and miscounts won't matter much */ + smp_mb(); + fake_transport_error = 1; + } + else { + /* delete the existing command timer before iscsi_queue adds ours */ + del_command_timer(sc); + + queued = iscsi_queue(session, sc, done); + + if (!queued) + add_completion_timer(sc); /* need a timer for the midlayer to delete */ + } + + drop_reference(session); + } + else { + /* couldn't find a session */ + if ((sc->device == NULL) || LOG_ENABLED(ISCSI_LOG_QUEUE) || + (test_bit(DEVICE_LOG_NO_SESSION, device_flags(sc->device)) == 0)) + { + printk("iSCSI: queuecommand %p failed to find a session for HBA %p, (%u %u %u %u)\n", + sc, hba, hba->host->host_no, sc->channel, sc->target, sc->lun); + if (sc->device) + set_bit(DEVICE_LOG_NO_SESSION, device_flags(sc->device)); + } + } + } + + REACQUIRE_MIDLAYER_LOCK(host); + + if (fake_transport_error) { + printk("iSCSI: session %p faking transport failure for command %p to (%u %u %u %u) at %lu\n", + session, sc, sc->host->host_no, sc->channel, sc->target, sc->lun, jiffies); + /* act as if recv_cmd() received a non-zero iSCSI response */ + memset(sc->sense_buffer, 0, sizeof(sc->sense_buffer)); + set_lun_comm_failure(sc); + sc->result = HOST_BYTE(DID_ERROR) | STATUS_BYTE(STATUS_CHECK_CONDITION); + sc->resid = iscsi_expected_data_length(sc); + done(sc); + } + else if (!queued) { + DEBUG_QUEUE("iSCSI: queuecommand completing %p with DID_NO_CONNECT at %lu\n", sc, jiffies); + ISCSI_TRACE(ISCSI_TRACE_QFailed, sc, NULL, sc->retries, sc->allowed); + sc->result = HOST_BYTE(DID_NO_CONNECT); + sc->resid = iscsi_expected_data_length(sc); + set_lun_comm_failure(sc); + done(sc); + /* "queued" successfully, and already completed (with a fatal error), so we still return 0 */ + } + + return 0; +} + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,45)) +int iscsi_biosparam(struct scsi_device *sdev, struct block_device *n, sector_t capacity, int geom[]) +{ + /* FIXME: should we use 255h,63s if there are more than 1024 cylinders? */ + geom[0] = 64; /* heads */ + geom[1] = 32; /* sectors */ + geom[2] = (unsigned long)capacity / (64*32); /* cylinders */ + return 1; +} +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,44)) +# include "sd.h" +int iscsi_biosparam(Disk *disk, struct block_device *bdev, int geom[]) +{ + /* FIXME: should we use 255h,63s if there are more than 1024 cylinders? */ + geom[0] = 64; /* heads */ + geom[1] = 32; /* sectors */ + geom[2] = disk->capacity / (64*32); /* cylinders */ + return 1; +} +#else +# include "sd.h" +int iscsi_biosparam(Disk *disk, kdev_t dev, int geom[]) +{ + /* FIXME: should we use 255h,63s if there are more than 1024 cylinders? */ + geom[0] = 64; /* heads */ + geom[1] = 32; /* sectors */ + geom[2] = disk->capacity / (64*32); /* cylinders */ + DEBUG_INIT("iSCSI: biosparam = %d cylinders, %d heads, %d sectors\n", geom[2], geom[0], geom[1]); + + return 1; +} +#endif + +const char *iscsi_info( struct Scsi_Host *sh ) +{ + iscsi_hba_t *hba; + static char buffer[256]; + char *build_str = BUILD_STR; + + DEBUG_INIT("iSCSI: Info\n"); + hba = (iscsi_hba_t *)sh->hostdata; + if ( ! hba ) { + return NULL; + } + + memset(buffer, 0, sizeof(buffer) ); + + if (build_str) { + /* developer-built variant of a 4-digit internal release */ + sprintf(buffer, "iSCSI %d.%d.%d.%d%s variant (%s)", + DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION, DRIVER_PATCH_VERSION, DRIVER_INTERNAL_VERSION, + DRIVER_EXTRAVERSION, ISCSI_DATE); + } + else if (DRIVER_INTERNAL_VERSION > 0) { + /* 4-digit release */ + sprintf(buffer, "iSCSI %d.%d.%d.%d%s (%s)", + DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION, DRIVER_PATCH_VERSION, DRIVER_INTERNAL_VERSION, + DRIVER_EXTRAVERSION, ISCSI_DATE); + } + else { + /* 3-digit release */ + sprintf(buffer, "iSCSI %d.%d.%d%s (%s)", + DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION, DRIVER_PATCH_VERSION, + DRIVER_EXTRAVERSION, ISCSI_DATE); + } + + return buffer; +} + + +static void right_justify(char *buffer, int fieldsize) +{ + char *src = buffer; + int length = strlen(buffer); + int shift; + char *dst; + + if (length > fieldsize) { + printk("iSCSI: can't right justify, length %d, fieldsize %d\n", length, fieldsize); + return; + } + + shift = (fieldsize - length); + + if ((length > 0) && (shift > 0)) { + /* memmove it to the right, assuming the buffer is at least one byte longer than fieldsize */ + dst = buffer + fieldsize; + src = buffer + length; + + /* have to copy 1 byte at a time from the end, to avoid clobbering the source */ + while (src >= buffer) { + *dst-- = *src--; + } + + /* fill spaces on the left */ + for (dst = buffer; shift; shift--) + *dst++ = ' '; + } +} + +/* if current_offset is in [start..finish), add str to the buffer */ +static int add_proc_buffer(char *str, off_t start, off_t finish, off_t *current_offset, char **buffer) +{ + int length = strlen(str); + int leading = 0; + + if (*current_offset + length <= start) { + /* don't need any of it */ + *current_offset += length; + return 1; + } + else if (*current_offset + length <= finish) { + /* need everything at or above start */ + if (*current_offset < start) + leading = start - *current_offset; + + strcpy(*buffer, str + leading); + *buffer += (length - leading); + *current_offset += length; + return 1; + } + else { + /* need everything at or above start, and before finish */ + if (*current_offset < start) { + leading = start - *current_offset; + strncpy(*buffer, str + leading, finish - start); + *buffer += (finish - start); + *current_offset = finish; + } + return 0; /* no more */ + } +} + + +/* Each LUN line has: + * 2 spaces + ((3-digit field + space) * 4) + 4 spaces + 15 char IP address + 2 spaces + 5-digit port + 2 spaces + * 2 + 16 + 4 + 15 + 2 + 5 + 2 = fixed length 46 + TargetName + newline + * the TargetName ends the line, and has a variable length. + */ +static int add_lun_line(iscsi_session_t *session, int lun, + off_t start, off_t finish, off_t *current_offset, char **buffer) +{ + char str[32]; + + DEBUG_FLOW("iSCSI: add_lun_line %p, %d, %lu, %lu, %lu, %p\n", + session, lun, start, finish, *current_offset, *buffer); + + /* 2 spaces + (4 * (3-digit field + space)) + 4 spaces = 22 chars */ + if (lun >= 0) + sprintf(str, " %3.1u %3.1u %3.1u ", + session->iscsi_bus, session->target_id, lun); + else + sprintf(str, " %3.1u %3.1u ? ", + session->iscsi_bus, session->target_id); + + if (!add_proc_buffer(str, start, finish, current_offset, buffer)) + return 0; + + if (test_bit(SESSION_ESTABLISHED, &session->control_bits)) { + /* up to 15 char IP address + 2 spaces = 17 chars */ + sprintf(str, "%u.%u.%u.%u ", + session->ip_address[0], session->ip_address[1], session->ip_address[2], session->ip_address[3]); + right_justify(str, 17); + + if (!add_proc_buffer(str, start, finish, current_offset, buffer)) + return 0; + + /* 5-digit port + 2 spaces = 7 chars */ + sprintf(str, "%d ", session->port); + right_justify(str, 7); + + if (!add_proc_buffer(str, start, finish, current_offset, buffer)) + return 0; + } + else { + /* fill in '?' for each empty field, so that /proc/scsi/iscsi can easily + * be processed by tools such as awk and perl. + */ + sprintf(str, " ? ? "); + if (!add_proc_buffer(str, start, finish, current_offset, buffer)) + return 0; + } + + if (!add_proc_buffer(session->TargetName, start, finish, current_offset, buffer)) + return 0; + + /* and a newline */ + sprintf(str, "\n"); + if (!add_proc_buffer(str, start, finish, current_offset, buffer)) + return 0; + + /* keep going */ + return 1; +} + +/* Show LUNs for every session on the HBA. The parameters tell us + * what part of the /proc "file" we're supposed to put in the buffer. + * This is somewhat broken, since the data may change in between the + * multiple calls to this function, since we can't keep holding the + * locks. Implemented by throwing away bytes that we would have + * written to the buffer until we reach , and then putting + * everything before in the buffer. Return when we run + * out of data, or out of buffer. We avoid oddities in the output, + * we must ensure that the size of the output doesn't vary while /proc + * is being read. We can easily make each line a fixed size, but + * if the number of LUNs or sessions varies during a /proc read, + * the user loses. + */ +static int show_session_luns(iscsi_session_t *session, + off_t start, off_t finish, off_t *current_offset, char **buffer) +{ + /* if we've already found LUNs, show them all */ + int lfound = 0; + int l; + + /* FIXME: IPv6 */ + if (session->ip_length != 4) + return 1; + + for (l=0; lluns_activated)) { + lfound += 1; + + if (!add_lun_line(session, l, start, finish, current_offset, buffer)) { + DEBUG_FLOW("iSCSI: show session luns returning 0 with current offset %lu, buffer %p\n", + *current_offset, buffer); + return 0; + } + } + } + + /* if we haven't found any LUNs, use ? for a LUN number */ + if ( ! lfound ) { + if (!add_lun_line(session, -1, start, finish, current_offset, buffer)) { + DEBUG_FLOW("iSCSI: show session luns returning 0 with current offset %lu, buffer %p\n", + *current_offset, buffer); + return 0; + } + } + else { + DEBUG_FLOW("iSCSI: show session luns returning 1 with current offset %lu, buffer %p\n", + *current_offset, buffer); + } + + return 1; +} + + + +/* returns number of bytes matched */ +static int find_keyword(char *start, char *end, char *key) +{ + char *ptr = start; + int key_len = strlen(key); + + /* skip leading whitespace */ + while ((ptr < end) && is_space(*ptr)) + ptr++; + + /* compare */ + if (((end - ptr) == key_len) && !memcmp(key, ptr, key_len)) { + return (ptr - start) + key_len; + } + else if (((end - ptr) > key_len) && !memcmp(key, ptr, key_len) && is_space(ptr[key_len])) { + return (ptr - start) + key_len; + } + else { + return 0; + } +} + +static int find_number(char *start, char *end, int *number) +{ + char *ptr = start; + int found = 0; + int acc = 0; + + /* skip leading whitespace */ + while ((ptr < end) && is_space(*ptr)) + ptr++; + + while (ptr < end) { + if (is_space(*ptr)) { + break; + } + else if (is_digit(*ptr)) { + found = 1; + acc = (acc * 10) + (*ptr - '0'); + } + else { + /* something bogus */ + return 0; + } + ptr++; + } + + if (found) { + if (number) + *number = acc; + return (ptr - start); + } + else + return 0; +} + +static int find_ip(char *start, char *end, char *addr) +{ + char *ptr = start; + char *ptr1; + int ip_length = 0; + + /* skip leading whitespace */ + while ((ptr < end) && is_space(*ptr)) + ptr++; + + ptr1 = ptr; + while ((ptr1 < end) && !is_space(*ptr1)) { + ptr1++; + ip_length++; + } + if (ip_length) { + memcpy(addr, ptr, ip_length); + return (ptr1 - start); + } + else + return 0; +} + +/* + * *buffer: I/O buffer + * **start: for user reads, driver can report where valid data starts in the buffer + * offset: current offset into a /proc/scsi/iscsi/[0-9]* file + * length: length of buffer + * hostno: Scsi_Host host_no + * write: TRUE - user is writing; FALSE - user is reading + * + * Return the number of bytes read from or written to a + * /proc/scsi/iscsi/[0-9]* file. + */ + +int iscsi_proc_info( char *buffer, + char **start, + off_t offset, + int length, + int hostno, + int write) +{ + char *bp = buffer; + iscsi_hba_t *hba; + iscsi_session_t *session; + unsigned int bus = 0, target = 0, lun = 0; + unsigned int completions = 0; + unsigned int aborts = 0; + unsigned int abort_task_sets = 0; + unsigned int lun_resets = 0; + unsigned int warm_resets = 0; + unsigned int cold_resets = 0; + DECLARE_NOQUEUE_FLAGS; + scsi_device_info_t *device_info; + uint32_t lun_bitmap = 0xFF; + + if (!buffer) + return -EINVAL; + + if (write) { + int cmd_len; + char *end = buffer + length; + DECLARE_NOQUEUE_FLAGS; + + if ((cmd_len = find_keyword(bp, end, "log"))) { + unsigned int log_setting = 0; + + bp += cmd_len; + + if ((cmd_len = find_keyword(bp, end, "all")) != 0) { + iscsi_log_settings = 0xFFFFFFFF; + printk("iSCSI: all logging enabled\n"); + } + else if ((cmd_len = find_keyword(bp, end, "none")) != 0) { + iscsi_log_settings = 0; + printk("iSCSI: all logging disabled\n"); + } + else if ((cmd_len = find_keyword(bp, end, "sense")) != 0) { + log_setting = ISCSI_LOG_SENSE; + bp += cmd_len; + + if ((cmd_len = find_keyword(bp, end, "always")) != 0) { + iscsi_log_settings |= LOG_SET(log_setting); + printk("iSCSI: log sense always\n"); + } + else if ((cmd_len = find_keyword(bp, end, "on")) != 0) { + iscsi_log_settings |= LOG_SET(log_setting); + printk("iSCSI: log sense yes\n"); + } + else if ((cmd_len = find_keyword(bp, end, "yes")) != 0) { + iscsi_log_settings |= LOG_SET(log_setting); + printk("iSCSI: log sense yes\n"); + } + else if ((cmd_len = find_keyword(bp, end, "1")) != 0) { + iscsi_log_settings |= LOG_SET(log_setting); + printk("iSCSI: log sense 1\n"); + } + else if ((cmd_len = find_keyword(bp, end, "minimal")) != 0) { + iscsi_log_settings &= ~LOG_SET(log_setting); + printk("iSCSI: log sense off\n"); + } + else if ((cmd_len = find_keyword(bp, end, "off")) != 0) { + iscsi_log_settings &= ~LOG_SET(log_setting); + printk("iSCSI: log sense off\n"); + } + else if ((cmd_len = find_keyword(bp, end, "no")) != 0) { + iscsi_log_settings &= ~LOG_SET(log_setting); + printk("iSCSI: log sense no\n"); + } + else if ((cmd_len = find_keyword(bp, end, "0")) != 0) { + iscsi_log_settings &= ~LOG_SET(log_setting); + printk("iSCSI: log sense 0\n"); + } + } + else { + if ((cmd_len = find_keyword(bp, end, "login")) != 0) { + log_setting = ISCSI_LOG_LOGIN; + bp += cmd_len; + } + else if ((cmd_len = find_keyword(bp, end, "init")) != 0) { + log_setting = ISCSI_LOG_INIT; + bp += cmd_len; + } + else if ((cmd_len = find_keyword(bp, end, "queue")) != 0) { + log_setting = ISCSI_LOG_QUEUE; + bp += cmd_len; + } + else if ((cmd_len = find_keyword(bp, end, "alloc")) != 0) { + log_setting = ISCSI_LOG_ALLOC; + bp += cmd_len; + } + else if ((cmd_len = find_keyword(bp, end, "flow")) != 0) { + log_setting = ISCSI_LOG_FLOW; + bp += cmd_len; + } + else if ((cmd_len = find_keyword(bp, end, "error")) != 0) { + log_setting = ISCSI_LOG_ERR; + bp += cmd_len; + } + else if ((cmd_len = find_keyword(bp, end, "eh")) != 0) { + log_setting = ISCSI_LOG_EH; + bp += cmd_len; + } + else if ((cmd_len = find_keyword(bp, end, "retry")) != 0) { + log_setting = ISCSI_LOG_RETRY; + bp += cmd_len; + } + + if (log_setting) { + if ((cmd_len = find_keyword(bp, end, "on")) != 0) { + iscsi_log_settings |= LOG_SET(log_setting); + } + else if ((cmd_len = find_keyword(bp, end, "yes")) != 0) { + iscsi_log_settings |= LOG_SET(log_setting); + } + else if ((cmd_len = find_keyword(bp, end, "1")) != 0) { + iscsi_log_settings |= LOG_SET(log_setting); + } + else if ((cmd_len = find_keyword(bp, end, "off")) != 0) { + iscsi_log_settings &= ~LOG_SET(log_setting); + } + else if ((cmd_len = find_keyword(bp, end, "no")) != 0) { + iscsi_log_settings &= ~LOG_SET(log_setting); + } + else if ((cmd_len = find_keyword(bp, end, "0")) != 0) { + iscsi_log_settings &= ~LOG_SET(log_setting); + } + } + } + + printk("iSCSI: log settings %8x\n", iscsi_log_settings); + smp_mb(); + } + else if ((cmd_len = find_keyword(bp, end, "shutdown"))) { + /* try to shutdown the driver */ + if (!iscsi_shutdown()) { + printk("iSCSI: driver shutdown failed\n"); + } + } + else if ((cmd_len = find_keyword(bp, end, "lun"))) { + bp += cmd_len; + + if ((cmd_len = find_number(bp, end, &bus)) == 0) { + printk("iSCSI: /proc/scsi/iscsi couldn't determine bus number of session\n"); + return length; + } + bp += cmd_len; + + if ((cmd_len = find_number(bp, end, &target)) == 0) { + printk("iSCSI: /proc/scsi/iscsi couldn't determine target id number of session\n"); + return length; + } + bp += cmd_len; + + if ((cmd_len = find_number(bp, end, &lun)) == 0) { + printk("iSCSI: /proc/scsi/iscsi couldn't determine logical unit number\n"); + return length; + } + bp += cmd_len; + + session = find_session_by_bus(bus, target); + if (session) { + + if ((cmd_len = find_keyword(bp, end, "ignore"))) { + bp += cmd_len; + cmd_len = find_number(bp, end, &completions); + bp += cmd_len; + cmd_len = find_number(bp, end, &aborts); + bp += cmd_len; + cmd_len = find_number(bp, end, &abort_task_sets); + bp += cmd_len; + cmd_len = find_number(bp, end, &lun_resets); + bp += cmd_len; + cmd_len = find_number(bp, end, &warm_resets); + bp += cmd_len; + cmd_len = find_number(bp, end, &cold_resets); + + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u LUN %u at %lu, ignore %u completions, %u aborts, %u abort task sets, %u LUN resets, %u warm target resets, %u cold target resets\n", + session, bus, target, lun, jiffies, + completions, aborts, abort_task_sets, lun_resets, warm_resets, cold_resets); + + spin_lock(&session->task_lock); + session->ignore_lun = lun; + session->ignore_completions = completions; + session->ignore_aborts = aborts; + session->ignore_abort_task_sets = abort_task_sets; + session->ignore_lun_resets = lun_resets; + session->ignore_warm_resets = warm_resets; + session->ignore_cold_resets = cold_resets; + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "reject"))) { + bp += cmd_len; + cmd_len = find_number(bp, end, &aborts); + bp += cmd_len; + cmd_len = find_number(bp, end, &abort_task_sets); + bp += cmd_len; + cmd_len = find_number(bp, end, &lun_resets); + bp += cmd_len; + cmd_len = find_number(bp, end, &warm_resets); + bp += cmd_len; + cmd_len = find_number(bp, end, &cold_resets); + + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u LUN %u at %lu, reject %u aborts, %u abort task sets, %u LUN resets, %u warm target resets, %u cold target resets\n", + session, bus, target, lun, jiffies, aborts, + abort_task_sets, lun_resets, warm_resets, cold_resets); + + spin_lock(&session->task_lock); + session->reject_lun = lun; + session->reject_aborts = aborts; + session->reject_abort_task_sets = abort_task_sets; + session->reject_lun_resets = lun_resets; + session->reject_warm_resets = warm_resets; + session->reject_cold_resets = cold_resets; + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "unreachable"))) { + unsigned int count = 1; + + bp += cmd_len; + cmd_len = find_number(bp, end, &count); + + spin_lock(&session->task_lock); + session->fake_status_lun = lun; + session->fake_status_unreachable = count; + printk("iSCSI: session %p will fake %u iSCSI transport errors from LUN %u at %lu\n", + session, count, lun, jiffies); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "busy"))) { + unsigned int count = 1; + + bp += cmd_len; + cmd_len = find_number(bp, end, &count); + + spin_lock(&session->task_lock); + session->fake_status_lun = lun; + session->fake_status_busy = count; + printk("iSCSI: session %p will fake %u SCSI status BUSY responses from LUN %u at %lu\n", + session, count, lun, jiffies); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "queuefull"))) { + unsigned int count = 1; + + bp += cmd_len; + cmd_len = find_number(bp, end, &count); + + spin_lock(&session->task_lock); + session->fake_status_lun = lun; + session->fake_status_queue_full = count; + printk("iSCSI: session %p will fake %u SCSI status QUEUE_FULL responses from LUN %u at %lu\n", + session, count, lun, jiffies); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "aborted"))) { + unsigned int count = 1; + + bp += cmd_len; + cmd_len = find_number(bp, end, &count); + + spin_lock(&session->task_lock); + session->fake_status_lun = lun; + session->fake_status_aborted = count; + printk("iSCSI: session %p will fake %u target command aborts from LUN %u at %lu\n", + session, count, lun, jiffies); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "tasktimeouts"))) { + iscsi_task_t *t; + unsigned int count = 0xFFFFFFFF; + + bp += cmd_len; + cmd_len = find_number(bp, end, &count); + printk("iSCSI: session %p faking up to %u task timeouts for LUN %u at %lu\n", + session, count, lun, jiffies); + + spin_lock(&session->task_lock); + /* fake task timeouts, to try to test the error recovery code */ + for (t = session->arrival_order.head; t; t = t->order_next) { + if (count == 0) + break; + + if ((t->lun == lun) && !test_bit(0, &t->timedout)) { + printk("iSCSI: session %p faking task timeout of itt %u, task %p, LUN %u, sc %p at %lu\n", + session, t->itt, t, t->lun, t->scsi_cmnd, jiffies); + + /* make the task look like it timedout */ + del_task_timer(t); + set_bit(t->lun, session->luns_timing_out); + smp_wmb(); + set_bit(0, &t->timedout); + smp_mb(); + + count--; + } + } + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "commandtimeouts"))) { + iscsi_task_t *t; + Scsi_Cmnd *sc; + unsigned int count = 0xFFFFFFFF; + + bp += cmd_len; + cmd_len = find_number(bp, end, &count); + printk("iSCSI: session %p faking up to %u command timeouts for LUN %u at %lu\n", + session, count, lun, jiffies); + + spin_lock(&session->task_lock); + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + /* fake command timeouts for all tasks and queued commands */ + for (t = session->arrival_order.head; t; t = t->order_next) { + if (count == 0) + goto finished_lun; + + if ((t->lun == lun) && t->scsi_cmnd && !test_bit(COMMAND_TIMEDOUT, command_flags(t->scsi_cmnd))) { + printk("iSCSI: session %p faking command timeout of itt %u, task %p, LUN %u, cmnd %p at %lu\n", + session, t->itt, t, t->lun, t->scsi_cmnd, jiffies); + + /* make the task look like it timedout */ + del_command_timer(t->scsi_cmnd); + set_bit(COMMAND_TIMEDOUT, command_flags(t->scsi_cmnd)); + count--; + } + } + for (sc = session->retry_cmnd_head; sc; sc = (Scsi_Cmnd *)sc->host_scribble) { + if (count == 0) + goto finished_lun; + + if (sc->lun == lun) { + printk("iSCSI: session %p faking command timeout of retry cmnd %p, LUN %u, at %lu\n", + session, sc, sc->lun, jiffies); + del_command_timer(sc); + set_bit(COMMAND_TIMEDOUT, command_flags(sc)); + count--; + } + } + for (sc = session->deferred_cmnd_head; sc; sc = (Scsi_Cmnd *)sc->host_scribble) { + if (count == 0) + goto finished_lun; + + if (sc->lun == lun) { + printk("iSCSI: session %p faking command timeout of deferred cmnd %p, LUN %u, at %lu\n", + session, sc, sc->lun, jiffies); + del_command_timer(sc); + set_bit(COMMAND_TIMEDOUT, command_flags(sc)); + count--; + } + } + for (sc = session->scsi_cmnd_head; sc; sc = (Scsi_Cmnd *)sc->host_scribble) { + if (count == 0) + goto finished_lun; + + if (sc->lun == lun) { + printk("iSCSI: session %p faking command timeout of normal cmnd %p, LUN %u, at %lu\n", + session, sc, sc->lun, jiffies); + del_command_timer(sc); + set_bit(COMMAND_TIMEDOUT, command_flags(sc)); + count--; + } + } + + finished_lun: + smp_wmb(); + set_bit(SESSION_COMMAND_TIMEDOUT, &session->control_bits); + smp_wmb(); + + /* wake up the tx thread to deal with the timeout */ + set_bit(TX_WAKE, &session->control_bits); + smp_mb(); + /* we can't know which wait_q the tx thread is in (if any), so wake them both */ + wake_up(&session->tx_wait_q); + wake_up(&session->login_wait_q); + + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "status"))) { + spin_lock(&session->task_lock); + printk("iSCSI: session %p LUN %u detected=%s, activated=%s, timing out=%s, doing recovery=%s, delaying commands=%s, unreachable=%s\n", + session, lun, + test_bit(lun, session->luns_detected) ? "yes" : "no", + test_bit(lun, session->luns_activated) ? "yes" : "no", + test_bit(lun, session->luns_timing_out) ? "yes" : "no", + test_bit(lun, session->luns_doing_recovery) ? "yes" : "no", + test_bit(lun, session->luns_delaying_commands) ? "yes" : "no", + test_bit(lun, session->luns_unreachable) ? "yes" : "no"); + + spin_unlock(&session->task_lock); + } + + /* done with the session */ + drop_reference(session); + } + else { + printk("iSCSI: /proc/scsi/iscsi failed to find session bsu %u target %u LUN %u\n", bus, target, lun); + } + } + else if ((cmd_len = find_keyword(bp, end, "target"))) { + bp += cmd_len; + + if ((cmd_len = find_number(bp, end, &bus)) == 0) { + printk("iSCSI: /proc/scsi/iscsi couldn't determine host number of session\n"); + return length; + } + bp += cmd_len; + + if ((cmd_len = find_number(bp, end, &target)) == 0) { + printk("iSCSI: /proc/scsi/iscsi couldn't determine target id number of session\n"); + return length; + } + bp += cmd_len; + + session = find_session_by_bus(bus, target); + if (session) { + if ((cmd_len = find_keyword(bp, end, "nop"))) { + unsigned int data_length = 0; + + bp += cmd_len; + if ((cmd_len = find_number(bp, end, &data_length)) == 0) { + printk("iSCSI: session %p Nop test couldn't determine data length\n", session); + return length; + } + bp += cmd_len; + + if (data_length) { + printk("iSCSI: session %p for bus %u target %u, %d byte Nop data test requested at %lu\n", + session, bus, target, data_length, jiffies); + iscsi_ping_test_session(session, data_length); + } + else { + printk("iSCSI: session %p for bus %u target %u, 0 byte Nop data test ignored at %lu\n", + session, bus, target, jiffies); + } + } + else if ((cmd_len = find_keyword(bp, end, "portal"))) { + if (session->portal_failover) { + unsigned int portal = 0; + + bp += cmd_len; + if ((cmd_len = find_number(bp, end, &portal)) == 0) { + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u, no portal specified\n", + session, bus, target); + return length; + } + bp += cmd_len; + + spin_lock(&session->portal_lock); +#ifndef DEBUG + if (portal < session->num_portals) { +#endif + session->requested_portal = portal; + session->fallback_portal = session->current_portal; + + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u requesting switch to portal %u at %lu\n", + session, bus, target, portal, jiffies); + + /* request a logout for the current session */ + spin_lock(&session->task_lock); + iscsi_request_logout(session, session->active_timeout, session->active_timeout); + spin_unlock(&session->task_lock); +#ifndef DEBUG + } + else { + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u can't switch to portal %u, only %d portals\n", + session, bus, target, portal, session->num_portals); + } +#endif + spin_unlock(&session->portal_lock); + } + else + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u can't switch to requested portal, because portal failover is disabled.\n", session, bus, target); + } + else if ((cmd_len = find_keyword(bp, end, "probe"))) { + /* try to probe the driver and create symlinks */ + device_info = (scsi_device_info_t *)kmalloc(sizeof(scsi_device_info_t), GFP_KERNEL); + device_info->max_sd_devices = MAX_SCSI_DISKS; + device_info->max_sd_partitions = MAX_SCSI_DISK_PARTITIONS; + device_info->max_sg_devices = MAX_SCSI_GENERICS; + device_info->max_sr_devices = MAX_SCSI_CDROMS; + device_info->max_st_devices = MAX_SCSI_TAPES; + printk(" Calling kernel thread for creating symlinks\n"); + iscsi_probe_luns(session, &lun_bitmap, device_info); + } + else if((cmd_len = find_keyword(bp, end, "link"))) { + memset(session->target_link_dir, 0, sizeof(session->target_link_dir)); + sprintf(session->target_link_dir, "%s/bus%d/target%d/", LINK_DIR, bus, target); + } + else if ((cmd_len = find_keyword(bp, end, "address"))) { + if (session->portal_failover) { + char ip[16]; + char address[17]; + int ip_length = 4; + iscsi_portal_info_t *p = NULL; + unsigned int portal = 0; + + bp += cmd_len; + memset(address, 0, sizeof(address)); + if ((cmd_len = find_ip(bp, end, address)) == 0) { + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u, no ip address specified\n", session, bus, target); + } + else { + bp += cmd_len; + p = session->portals; + for (portal=0; portalnum_portals; portal++) { + iscsi_inet_aton(address, ip, &ip_length); + if(memcmp(p[portal].ip_address, ip, p[portal].ip_length) == 0) { + break; + } + } + + spin_lock(&session->portal_lock); + if (portal < session->num_portals) { + session->requested_portal = portal; + session->fallback_portal = session->current_portal; + + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u requesting switch to ip %s at %lu\n", session, bus, target, address, jiffies); + /* request a logout for the current session */ + + spin_lock(&session->task_lock); + iscsi_request_logout(session, session->active_timeout, session->active_timeout); + spin_unlock(&session->task_lock); + } + else { + address[16] = '\0'; + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u can't switch to ip %s\n", session, bus, target, address); + } + spin_unlock(&session->portal_lock); + } + } + else + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u can't switch to requested ip, because portal failover is disabled\n", session, bus, target); + } + else if ((cmd_len = find_keyword(bp, end, "logout"))) { + unsigned int deadline = 0; + unsigned int response_deadline = 0; + + bp += cmd_len; + if ((cmd_len = find_number(bp, end, &deadline)) == 0) { + deadline = session->active_timeout; + } + bp += cmd_len; + + if ((cmd_len = find_number(bp, end, &response_deadline)) == 0) { + response_deadline = session->active_timeout; + } + bp += cmd_len; + + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u requesting logout at %lu, " + "logout deadline %u seconds, response deadline %u seconds\n", + session, bus, target, jiffies, deadline, response_deadline); + + /* request a logout for the current session */ + spin_lock(&session->task_lock); + iscsi_request_logout(session, session->active_timeout, session->active_timeout); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "drop"))) { + unsigned int time2wait = 0; + + bp += cmd_len; + if ((cmd_len = find_number(bp, end, &time2wait))) { + session->time2wait = time2wait; + smp_mb(); + printk("iSCSI: /proc/scsi/iscsi dropping session %p for bus %u target %u at %lu, time2wait %u\n", + session, bus, target, jiffies, time2wait); + } + else { + printk("iSCSI: /proc/scsi/iscsi dropping session %p for bus %u target %u at %lu\n", + session, bus, target, jiffies); + } + bp += cmd_len; + + iscsi_drop_session(session); + } + else if ((cmd_len = find_keyword(bp, end, "terminate"))) { + printk("iSCSI: /proc/scsi/iscsi terminating session %p for bus %u target %u at %lu\n", + session, bus, target, jiffies); + iscsi_terminate_session(session); + } + else if ((cmd_len = find_keyword(bp, end, "queues"))) { + /* show all of the session's queues */ + spin_lock(&session->task_lock); + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + printk("iSCSI: session %p to %s, bits 0x%08lx, next itt %u at %lu\n", + session, session->log_name, session->control_bits, session->itt, jiffies); + printk("iSCSI: session %p ExpCmdSN %08u, next CmdSN %08u, MaxCmdSN %08u, window closed %lu times, full %lu times\n", + session, session->ExpCmdSn, session->CmdSn, session->MaxCmdSn, + session->window_closed, session->window_full); + print_session_tasks(session); + print_session_cmnds(session); + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "notready"))) { + unsigned int notready = 1; + + bp += cmd_len; + cmd_len = find_number(bp, end, ¬ready); + + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + session->fake_not_ready = notready; + printk("iSCSI: session %p will fake %u NOT_READY errors at %lu\n", session, notready, jiffies); + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + } + else if ((cmd_len = find_keyword(bp, end, "printcommand"))) { + unsigned int count = 1; + + bp += cmd_len; + cmd_len = find_number(bp, end, &count); + + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + session->print_cmnds = count; + printk("iSCSI: session %p will print %u commands at %lu\n", session, count, jiffies); + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + } + else if ((cmd_len = find_keyword(bp, end, "unreachable"))) { + unsigned int count = 1; + + bp += cmd_len; + cmd_len = find_number(bp, end, &count); + + spin_lock(&session->task_lock); + session->fake_status_lun = -1; + session->fake_status_unreachable = count; + printk("iSCSI: session %p will fake %u iSCSI transport errors at %lu\n", session, count, jiffies); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "busy"))) { + unsigned int count = 1; + + bp += cmd_len; + cmd_len = find_number(bp, end, &count); + + spin_lock(&session->task_lock); + session->fake_status_lun = -1; + session->fake_status_busy = count; + printk("iSCSI: session %p will fake %u SCSI status BUSY responses at %lu\n", + session, count, jiffies); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "queuefull"))) { + unsigned int count = 1; + + bp += cmd_len; + cmd_len = find_number(bp, end, &count); + + spin_lock(&session->task_lock); + session->fake_status_lun = -1; + session->fake_status_queue_full = count; + printk("iSCSI: session %p will fake %u SCSI status QUEUE_FULL responses at %lu\n", + session, count, jiffies); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "aborted"))) { + unsigned int count = 1; + + bp += cmd_len; + cmd_len = find_number(bp, end, &count); + + spin_lock(&session->task_lock); + session->fake_status_lun = -1; + session->fake_status_aborted = count; + printk("iSCSI: session %p will fake %u target command aborted responses at %lu\n", + session, count, jiffies); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "tasktimeouts"))) { + iscsi_task_t *t; + unsigned int count = 0xFFFFFFFF; + + bp += cmd_len; + cmd_len = find_number(bp, end, &count); + printk("iSCSI: session %p faking up to %u task timeouts at %lu\n", session, count, jiffies); + + spin_lock(&session->task_lock); + /* fake task timeouts, to try to test the error recovery code */ + for (t = session->arrival_order.head; t; t = t->order_next) { + if (count == 0) + break; + + if (!test_bit(0, &t->timedout)) { + printk("iSCSI: session %p faking task timeout of itt %u, task %p, LUN %u, sc %p at %lu\n", + session, t->itt, t, t->lun, t->scsi_cmnd, jiffies); + + /* make the task look like it timedout */ + del_task_timer(t); + set_bit(t->lun, session->luns_timing_out); + smp_wmb(); + set_bit(0, &t->timedout); + smp_mb(); + count--; + } + } + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "commandtimeouts"))) { + iscsi_task_t *t; + Scsi_Cmnd *sc; + unsigned int count = 0xFFFFFFFF; + + bp += cmd_len; + cmd_len = find_number(bp, end, &count); + printk("iSCSI: session %p faking up to %u command timeouts at %lu\n", session, count, jiffies); + + spin_lock(&session->task_lock); + SPIN_LOCK_NOQUEUE(&session->scsi_cmnd_lock); + /* fake command timeouts for all tasks and queued commands */ + for (t = session->arrival_order.head; t; t = t->order_next) { + if (count == 0) + goto finished; + + if (t->scsi_cmnd && !test_bit(COMMAND_TIMEDOUT, command_flags(t->scsi_cmnd))) { + printk("iSCSI: session %p faking command timeout of itt %u, task %p, LUN %u, cmnd %p at %lu\n", + session, t->itt, t, t->lun, t->scsi_cmnd, jiffies); + + /* make the task look like it timedout */ + del_command_timer(t->scsi_cmnd); + set_bit(COMMAND_TIMEDOUT, command_flags(t->scsi_cmnd)); + count--; + } + } + for (sc = session->retry_cmnd_head; sc; sc = (Scsi_Cmnd *)sc->host_scribble) { + if (count == 0) + goto finished; + + printk("iSCSI: session %p faking command timeout of retry cmnd %p, LUN %u, at %lu\n", + session, sc, sc->lun, jiffies); + del_command_timer(sc); + set_bit(COMMAND_TIMEDOUT, command_flags(sc)); + count--; + } + for (sc = session->deferred_cmnd_head; sc; sc = (Scsi_Cmnd *)sc->host_scribble) { + if (count == 0) + goto finished; + + printk("iSCSI: session %p faking command timeout of deferred cmnd %p, LUN %u, at %lu\n", + session, sc, sc->lun, jiffies); + del_command_timer(sc); + set_bit(COMMAND_TIMEDOUT, command_flags(sc)); + count--; + } + for (sc = session->scsi_cmnd_head; sc; sc = (Scsi_Cmnd *)sc->host_scribble) { + if (count == 0) + goto finished; + + printk("iSCSI: session %p faking command timeout of normal cmnd %p, LUN %u, at %lu\n", + session, sc, sc->lun, jiffies); + del_command_timer(sc); + set_bit(COMMAND_TIMEDOUT, command_flags(sc)); + count--; + } + + finished: + smp_wmb(); + set_bit(SESSION_COMMAND_TIMEDOUT, &session->control_bits); + smp_wmb(); + + /* wake up the tx thread to deal with the timeout */ + set_bit(TX_WAKE, &session->control_bits); + smp_mb(); + /* we can't know which wait_q the tx thread is in (if any), so wake them both */ + wake_up(&session->tx_wait_q); + wake_up(&session->login_wait_q); + + SPIN_UNLOCK_NOQUEUE(&session->scsi_cmnd_lock); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "writeheadermismatch"))) { + unsigned int mismatch = 1; + + bp += cmd_len; + cmd_len = find_number(bp, end, &mismatch); + + spin_lock(&session->task_lock); + session->fake_write_header_mismatch = mismatch; + printk("iSCSI: session %p will fake %u write HeaderDigest mismatches at %lu\n", session, mismatch, jiffies); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "readdatamismatch"))) { + unsigned int mismatch = 1; + + bp += cmd_len; + cmd_len = find_number(bp, end, &mismatch); + + spin_lock(&session->task_lock); + session->fake_read_data_mismatch = mismatch; + printk("iSCSI: session %p will fake %u read DataDigest mismatches at %lu\n", session, mismatch, jiffies); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "writedatamismatch"))) { + unsigned int mismatch = 1; + + bp += cmd_len; + cmd_len = find_number(bp, end, &mismatch); + + spin_lock(&session->task_lock); + session->fake_write_data_mismatch = mismatch; + printk("iSCSI: session %p will fake %u write DataDigest mismatches at %lu\n", session, mismatch, jiffies); + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "ignore"))) { + + bp += cmd_len; + cmd_len = find_number(bp, end, &completions); + bp += cmd_len; + cmd_len = find_number(bp, end, &aborts); + bp += cmd_len; + cmd_len = find_number(bp, end, &abort_task_sets); + bp += cmd_len; + cmd_len = find_number(bp, end, &lun_resets); + bp += cmd_len; + cmd_len = find_number(bp, end, &warm_resets); + bp += cmd_len; + cmd_len = find_number(bp, end, &cold_resets); + + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u at %lu, ignore %u completions, %u aborts, %u abort task sets, %u LUN resets, %u warm target resets, %u cold target resets\n", + session, bus, target, jiffies, completions, aborts, abort_task_sets, lun_resets, warm_resets, cold_resets); + + spin_lock(&session->task_lock); + session->ignore_lun = -1; + session->ignore_completions = completions; + session->ignore_aborts = aborts; + session->ignore_abort_task_sets = abort_task_sets; + session->ignore_lun_resets = lun_resets; + session->ignore_warm_resets = warm_resets; + session->ignore_cold_resets = cold_resets; + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "reject"))) { + + bp += cmd_len; + cmd_len = find_number(bp, end, &aborts); + bp += cmd_len; + cmd_len = find_number(bp, end, &abort_task_sets); + bp += cmd_len; + cmd_len = find_number(bp, end, &lun_resets); + bp += cmd_len; + cmd_len = find_number(bp, end, &warm_resets); + bp += cmd_len; + cmd_len = find_number(bp, end, &cold_resets); + + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u at %lu, reject %u aborts, %u abort task sets, %u LUN resets, %u warm target resets, %u cold target resets\n", + session, bus, target, jiffies, aborts, abort_task_sets, lun_resets, warm_resets, cold_resets); + + spin_lock(&session->task_lock); + session->reject_lun = -1; + session->reject_aborts = aborts; + session->reject_abort_task_sets = abort_task_sets; + session->reject_lun_resets = lun_resets; + session->reject_warm_resets = warm_resets; + session->reject_cold_resets = cold_resets; + spin_unlock(&session->task_lock); + } + else if ((cmd_len = find_keyword(bp, end, "reset"))) { + printk("iSCSI: /proc/scsi/iscsi session %p warm target reset requested at %lu\n", session, jiffies); + wake_tx_thread(SESSION_RESET_REQUESTED, session); + } + else if ((cmd_len = find_keyword(bp, end, "commandtimedout"))) { + printk("iSCSI: /proc/scsi/iscsi session %p waking tx thread SESSION_COMMAND_TIMEDOUT at %lu\n", + session, jiffies); + wake_tx_thread(SESSION_COMMAND_TIMEDOUT, session); + } + else if ((cmd_len = find_keyword(bp, end, "tasktimedout"))) { + printk("iSCSI: /proc/scsi/iscsi session %p waking tx thread SESSION_TASK_TIMEDOUT at %lu\n", + session, jiffies); + wake_tx_thread(SESSION_TASK_TIMEDOUT, session); + } + else if ((cmd_len = find_keyword(bp, end, "retry"))) { + printk("iSCSI: /proc/scsi/iscsi session %p waking tx thread SESSION_RETRY_COMMANDS at %lu\n", + session, jiffies); + wake_tx_thread(SESSION_RETRY_COMMANDS, session); + } + else if ((cmd_len = find_keyword(bp, end, "txcommand"))) { + printk("iSCSI: /proc/scsi/iscsi session %p waking tx thread TX_SCSI_COMMAND at %lu\n", + session, jiffies); + wake_tx_thread(TX_SCSI_COMMAND, session); + } + else { + printk("iSCSI: /proc/scsi/iscsi session %p for bus %u target %u, unknown command\n", + session, bus, target); + } + + /* done with the session */ + drop_reference(session); + } + else { + printk("iSCSI: /proc/scsi/iscsi failed to find session for bus %u target %u\n", bus, target); + } + } + + /* FIXME: some SCSI read and write tests would be useful as + * well. Allow user to specify read6, write6, read10, + * write10, specify the command queue size (max outstanding), + * total number of commands, command buffer size, starting + * block offset, and block increment per command. This should + * let us do sequential or fixed offset I/O tests, and try to + * determine throughput without having do worry about what the + * SCSI layer or applications above us are capable of. Also, + * consider a flag that controls writing/validating a data + * pattern. We don't always want to do it, but it may be + * useful sometimes. + */ + return length; + } + else { + /* it's a read */ + char *build_str = BUILD_STR; + off_t current_offset = 0; + off_t finish = offset + length; + + DEBUG_FLOW("iSCSI: /proc read, buffer %p, start %p, offset %lu, length %d, hostno %d\n", + buffer, start, offset, length, hostno); + + /* comment header with version number */ + /* FIXME: we assume the buffer length is always large enough for our header */ + if (build_str) { + /* developer-built variant of a 4-digit internal release */ + bp += sprintf(bp, "# iSCSI driver version: %d.%d.%d.%d%s variant (%s)\n#\n", + DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION, DRIVER_PATCH_VERSION, DRIVER_INTERNAL_VERSION, + DRIVER_EXTRAVERSION, ISCSI_DATE); + } + else if (DRIVER_INTERNAL_VERSION > 0) { + /* 4-digit release */ + bp += sprintf(bp, "# iSCSI driver version: %d.%d.%d.%d%s (%s)\n#\n", + DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION, DRIVER_PATCH_VERSION, DRIVER_INTERNAL_VERSION, + DRIVER_EXTRAVERSION, ISCSI_DATE); + } + else { + /* 3-digit release */ + bp += sprintf(bp, "# iSCSI driver version: %d.%d.%d%s (%s)\n#\n", + DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION, DRIVER_PATCH_VERSION, + DRIVER_EXTRAVERSION, ISCSI_DATE); + } + bp += sprintf(bp,"# SCSI: iSCSI:\n"); + bp += sprintf(bp,"# Bus Tgt LUN IP address Port TargetName\n"); + + *start = buffer; + current_offset = bp - buffer; + + if (offset >= current_offset) { + bp = buffer; /* don't need any of that header, toss it all */ + DEBUG_FLOW("iSCSI: /proc skipping header, current offset %lu, buffer %p\n", current_offset, bp); + } + else if (offset != 0) { + /* need only some of the header */ + char *src = buffer + offset; + char *dst = buffer; + + /* memmove what we need to the beginning of the buffer, since we may have + * to return the whole buffer length to avoid prematurely indicating EOF. + */ + while ((*dst++ = *src++)) + ; + + bp = dst; + + DEBUG_FLOW("iSCSI: /proc partial header, current offset %lu, buffer %p\n", current_offset, bp); + } + else { + DEBUG_FLOW("iSCSI: /proc full header, current offset %lu, buffer %p\n", current_offset, bp); + } + + /* find the HBA corresponding to hostno */ + spin_lock(&iscsi_hba_list_lock); + hba = iscsi_hba_list; + while (hba && hba->host->host_no != hostno) + hba = hba->next; + spin_unlock(&iscsi_hba_list_lock); + + if (hba) { + SPIN_LOCK_NOQUEUE(&hba->session_lock); + session = hba->session_list_head; + while (session) { + if (!show_session_luns(session, offset, finish, ¤t_offset, &bp)) + break; + session = session->next; + } + SPIN_UNLOCK_NOQUEUE(&hba->session_lock); + } + else { + printk("iSCSI: /proc read couldn't find HBA #%d\n", hostno); + } + + /* tell the caller about the output */ + if (current_offset <= offset) { + /* return no valid data if the desired offset is beyond the total "file" length */ + DEBUG_FLOW("iSCSI: /proc read returning 0 of %ld (EOF), start %p\n", finish - offset, *start); + return 0; + } + + /* return how much valid data is in the buffer */ + DEBUG_FLOW("iSCSI: /proc read returning %ld of %ld, start %p\n", current_offset - offset, finish - offset, *start); + return (current_offset - offset); + } +} + +/* + * We cannot include scsi_module.c because the daemon has not got a connection + * up yet. + */ +static int +ctl_open( struct inode *inode, struct file *file ) +{ + MOD_INC_USE_COUNT; + return 0; +} + +static int +ctl_close( struct inode *inode, struct file *file ) +{ + MOD_DEC_USE_COUNT; + return 0; +} + +static int +ctl_ioctl( struct inode *inode, + struct file *file, + unsigned int cmd, + unsigned long arg) +{ + int rc = 0; + + if (cmd == ISCSI_ESTABLISH_SESSION) { + iscsi_session_ioctl_t *ioctld = kmalloc(sizeof(*ioctld), GFP_KERNEL); + iscsi_session_t *session = NULL; + iscsi_portal_info_t *portals = NULL; + int probe_luns; + + if (!ioctld) { + printk("iSCSI: couldn't allocate space for session ioctl data\n"); + return -ENOMEM; + } + if (copy_from_user(ioctld, (void *)arg, sizeof(*ioctld))) { + printk("iSCSI: Cannot copy session ioctl data\n"); + kfree(ioctld); + return -EFAULT; + } + + DEBUG_INIT("iSCSI: ioctl establish session to %s at %lu\n", ioctld->TargetName, jiffies); + + if (ioctld->ioctl_size != sizeof(iscsi_session_ioctl_t)) { + printk("iSCSI: ioctl size %u incorrect, expecting %Zu\n", ioctld->ioctl_size, sizeof(*ioctld)); + kfree(ioctld); + return -EINVAL; + } + if (ioctld->ioctl_version != ISCSI_SESSION_IOCTL_VERSION) { + printk("iSCSI: ioctl version %u incorrect, expecting %u\n", ioctld->ioctl_version, ISCSI_SESSION_IOCTL_VERSION); + kfree(ioctld); + return -EINVAL; + } + if (ioctld->portal_info_size != sizeof(iscsi_portal_info_t)) { + printk("iSCSI: ioctl portal info size %u incorrect, expecting %Zu\n", ioctld->portal_info_size, sizeof(*portals)); + kfree(ioctld); + return -EINVAL; + } + if (ioctld->num_portals == 0) { + printk("iSCSI: ioctl has no portals\n"); + kfree(ioctld); + return -EINVAL; + } + + /* allocate the portals */ + if (ioctld->num_portals <= 0) { + printk("iSCSI: bus %d target %d has no portals in session ioctl\n", ioctld->iscsi_bus, ioctld->target_id); + kfree(ioctld); + return -EINVAL; + } + portals = (iscsi_portal_info_t *)kmalloc(ioctld->num_portals * sizeof(*portals), GFP_KERNEL); + if (portals == NULL) { + printk("iSCSI: bus %d target %d cannot allocate %d portals for session ioctl\n", + ioctld->iscsi_bus, ioctld->target_id, ioctld->num_portals); + kfree(ioctld); + return -ENOMEM; + } + DEBUG_INIT("iSCSI: bus %d target %d allocated portals %p (size %u) at %lu\n", + ioctld->iscsi_bus, ioctld->target_id, portals, ioctld->num_portals * sizeof(*portals), jiffies); + memset(portals, 0, ioctld->num_portals * sizeof(*portals)); + + /* copy the portal info from the user ioctl structure */ + if (copy_from_user(portals, (void *)arg + offsetof(struct iscsi_session_ioctl, portals), + ioctld->num_portals * sizeof(*portals))) + { + printk("iSCSI: bus %d target %d cannot copy portal info, ioctl %p, size %Zu, portals %p\n", + ioctld->iscsi_bus, ioctld->target_id, ioctld, sizeof(*portals), + (void *)arg + offsetof(struct iscsi_session_ioctl, portals)); + kfree(ioctld); + kfree(portals); + return -EFAULT; + } + else { + DEBUG_ALLOC("iSCSI: copied %u bytes of portal info from %p to %p\n", + ioctld->num_portals * sizeof(*portals), (void *)arg + offsetof(struct iscsi_session_ioctl, portals), portals); + } + + /* if this is the daemon's ioctl for a new session + * process, then we need to wait for the session to be + * established and probe LUNs, regardless of whether or not + * the session already exists, and regardless of the config + * number of any existing session. This is because the config + * is guaranteed to be the newest, regardless of the config + * number, and because the daemon needs to know if the session + * failed to start in order for the session process to exit + * with the appropriate exit value. + * + * if this is an update and there is no existing session, then + * we need to create a session, wait for it to be established, + * and probe LUNs for the new session. + * + * if this is an update and there is an existing session, then + * we need to update the config if the ioctl config number is + * greater than the existing session's config number. + * Regardless of whether or not the config update was + * accepted, LUN probing must occur if requested. If LUN + * probing is already in progress, return -EBUSY so that the + * daemon tries again later. + */ + probe_luns = ioctld->probe_luns; + + /* create or update a session */ + do { + if ((session = find_session_by_bus(ioctld->iscsi_bus, ioctld->target_id))) { + if (strcmp(ioctld->TargetName, session->TargetName) == 0) { + rc = update_session(session, ioctld, portals); + if (rc < 0) { + /* error out */ + goto done; + } + } + else { + /* otherwise error out */ + printk("iSCSI: bus %d target %d already bound to %s\n", ioctld->iscsi_bus, ioctld->target_id, session->TargetName); + drop_reference(session); + kfree(ioctld); + kfree(portals); + return 0; + } + } + else if ((session = allocate_session(ioctld, portals))) { + /* the config_mutex is initialized to locked, so that + * any calls to update_session block if they see the + * session we're about to add before we've had a chance + * to clear symlinks and start the session threads. + */ + if (add_session(session)) { + /* preallocate a task for this session to use in + * case the HBA's task_cache ever becomes empty, + * since we can use SLAB_KERNEL now, but would + * have to use SLAB_ATOMIC later. We have to do + * this after adding the session to the HBA, so + * that a driver shutdown can see this session and + * wait for it to terminate. Otherwise we'd fail + * to detroy the task cache because this session + * still had a task allocated, but wasn't visible + * to the shutdown code. + */ + if ((session->preallocated_task = kmem_cache_alloc(session->hba->task_cache, SLAB_KERNEL))) { + DEBUG_ALLOC("iSCSI: session %p preallocated task %p at %lu\n", session, session->preallocated_task, jiffies); + __set_bit(TASK_PREALLOCATED, &session->preallocated_task->flags); + } + else { + printk("iSCSI: session %p couldn't preallocate task at %lu\n", session, jiffies); + drop_reference(session); + kfree(ioctld); + return -ENOMEM; + } + + /* we now own the bus/target id. if we're building device symlinks, clear away any old ones */ + if (ioctld->link_base_dir[0] == '/') + clear_device_symlinks(session, ioctld->link_base_dir); + else if (ioctld->link_base_dir[0]) + printk("iSCSI: link base directory must be an absolute directory name, ignoring %s\n", ioctld->link_base_dir); + + /* give the caller the host and channel numbers we just claimed */ + ioctld->host_number = session->host_no; + ioctld->channel = session->channel; + + /* unless we already have one, start a timer thread */ + if (!test_and_set_bit(0, &iscsi_timer_running)) { + DEBUG_INIT("iSCSI: starting timer thread at %lu\n", jiffies); + if (kernel_thread(iscsi_timer_thread, NULL, 0) < 0) { + printk("iSCSI: failed to start timer thread at %lu\n", jiffies); + drop_reference(session); + kfree(ioctld); + up(&session->config_mutex); + return -ENOMEM; + } + } + + /* try to start the threads for this session */ + rc = start_session_threads(session); + + /* unlock the mutex so that any waiting or future update_session() calls can proceed */ + up(&session->config_mutex); + + if (rc < 0) + goto done; /* we failed to start the session threads */ + + /* always probe LUNs when we create a new session */ + probe_luns = 1; + } + else { + /* some session claimed this bus/target id while + * we were allocating a session. Loop, so that we + * either update the existing session or error + * out. + */ + drop_reference(session); + session = NULL; + } + } + else { + /* couldn't allocate a new session. error out and let the daemonr etry the ioctl */ + kfree(ioctld); + kfree(portals); + return -ENOMEM; + } + } while (session == NULL); + + + if (probe_luns || !ioctld->update) { + /* wait for the session login to complete */ + DEBUG_INIT("iSCSI: ioctl waiting for session %p at %lu\n", session, jiffies); + wait_for_session(session, FALSE); + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + printk("iSCSI: session %p terminating, ioctl returning at %lu\n", session, jiffies); + goto done; + } + else if (signal_pending(current)) { + iscsi_terminate_session(session); + printk("iSCSI: session %p ioctl terminated, returning at %lu\n", session, jiffies); + goto done; + } + } + + if (probe_luns) { + /* if another ioctl is already trying to probe LUNs, must wait for it to finish */ + if (test_and_set_bit(SESSION_PROBING_LUNS, &session->control_bits)) { + DEBUG_INIT("iSCSI: session %p already has an ioctl probing or waiting to probe LUNs for bus %d, target %d\n", + session, ioctld->iscsi_bus, ioctld->target_id); + rc = -EBUSY; + goto done; + } + /* first figure out what LUNs actually exist */ + iscsi_detect_luns(session); + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + printk("iSCSI: session %p terminating, ioctl returning at %lu\n", session, jiffies); + clear_bit(SESSION_PROBING_LUNS, &session->control_bits); + smp_mb(); + goto done; + } + else if (signal_pending(current)) { + iscsi_terminate_session(session); + printk("iSCSI: session %p ioctl terminated, returning at %lu\n", session, jiffies); + clear_bit(SESSION_PROBING_LUNS, &session->control_bits); + smp_mb(); + goto done; + } + + /* and then try to probe the intersection of the allowed and detected LUNs */ + iscsi_probe_luns(session, ioctld->lun_bitmap, &ioctld->device_info); + + /* and then we're done */ + clear_bit(SESSION_PROBING_LUNS, &session->control_bits); + smp_mb(); + + if (test_bit(SESSION_TERMINATING, &session->control_bits)) { + printk("iSCSI: session %p terminating, ioctl returning at %lu\n", session, jiffies); + goto done; + } + else if (signal_pending(current)) { + iscsi_terminate_session(session); + printk("iSCSI: session %p ioctl terminated, returning at %lu\n", session, jiffies); + goto done; + } + } + + rc = 1; + + done: + /* pass back the TargetAlias to the caller */ + memcpy(ioctld->TargetAlias, session->TargetAlias, MIN(sizeof(ioctld->TargetAlias), sizeof(session->TargetAlias))); + ioctld->TargetAlias[sizeof(ioctld->TargetAlias) - 1] = '\0'; + if (copy_to_user((void *)arg, ioctld, sizeof(*ioctld)) ) { + printk("iSCSI: failed to copy ioctl data back to user mode for session %p\n", session); + } + kfree(ioctld); + + drop_reference(session); + + if (signal_pending(current)) + return -EINTR; + else + return rc; + } + else if (cmd == ISCSI_SET_INBP_INFO) { + if (copy_from_user(&iscsi_inbp_info, (void *)arg, + sizeof(iscsi_inbp_info))) { + printk("iSCSI: Cannot copy set_inbp_info ioctl data\n"); + return -EFAULT; + } + return (set_inbp_info()); + } + else if (cmd == ISCSI_CHECK_INBP_BOOT) { + if (copy_to_user((int *)arg, &this_is_iscsi_boot, + sizeof(this_is_iscsi_boot))) { + printk("iSCSI: Cannot copy out this_is_iscsi_boot variable\n"); + return -EFAULT; + } + return 0; + } + else if ( cmd == ISCSI_SHUTDOWN ) { + return iscsi_shutdown(); + } + else if ( cmd == ISCSI_RESET_PROBING ) { + return iscsi_reset_lun_probing(); + } + else if ( cmd == ISCSI_PROBE_LUNS ) { + iscsi_session_t *session = NULL; + iscsi_probe_luns_ioctl_t *ioctld = kmalloc(sizeof(*ioctld), GFP_KERNEL); + + if (!ioctld) { + printk("iSCSI: couldn't allocate space for probe ioctl data\n"); + return -ENOMEM; + } + if (copy_from_user(ioctld, (void *)arg, sizeof(*ioctld)) ) { + printk("iSCSI: Cannot copy session ioctl data\n"); + kfree(ioctld); + return -EFAULT; + } + if (ioctld->ioctl_size != sizeof(*ioctld)) { + printk("iSCSI: ioctl size %u incorrect, expecting %u\n", ioctld->ioctl_size, sizeof(*ioctld)); + kfree(ioctld); + return -EINVAL; + } + if (ioctld->ioctl_version != ISCSI_PROBE_LUNS_IOCTL_VERSION) { + printk("iSCSI: ioctl version %u incorrect, expecting %u\n", ioctld->ioctl_version, ISCSI_PROBE_LUNS_IOCTL_VERSION); + kfree(ioctld); + return -EINVAL; + } + + rc = 0; + + /* find the session */ + session = find_session_by_bus(ioctld->iscsi_bus, ioctld->target_id); + if (session == NULL) { + printk("iSCSI: ioctl probe LUNs (bus %d, target %d) failed, no session\n", + ioctld->iscsi_bus, ioctld->target_id); + goto done_probing; + } + + /* if another ioctl is already trying to probe LUNs, we don't need a 2nd */ + if (test_and_set_bit(SESSION_PROBING_LUNS, &session->control_bits)) { + DEBUG_INIT("iSCSI: session %p already has an ioctl probing or waiting to probe LUNs for bus %d, target %d\n", + session, ioctld->iscsi_bus, ioctld->target_id); + rc = 1; + goto done_probing; + } + + if (signal_pending(current)) + goto done_probing; + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + goto done_probing; + + /* if the session has been established before, but isn't established now, + * try to wait for it, at least until we get signalled or the session + * replacement timeout expires. + */ + if (!test_bit(SESSION_ESTABLISHED, &session->control_bits)) { + DEBUG_INIT("iSCSI: session %p LUN probe ioctl for bus %d, target %d waiting for session to be established at %lu\n", + session, ioctld->iscsi_bus, ioctld->target_id, jiffies); + } + if (wait_for_session(session, TRUE)) { + DEBUG_INIT("iSCSI: session %p ioctl triggering LUN probe for bus %d, target %d at %lu\n", + session, ioctld->iscsi_bus, ioctld->target_id, jiffies); + + iscsi_detect_luns(session); + + if (signal_pending(current)) + goto done_probing; + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + goto done_probing; + + iscsi_probe_luns(session, ioctld->lun_bitmap, &ioctld->device_info); + if (signal_pending(current)) + goto done_probing; + if (test_bit(SESSION_TERMINATING, &session->control_bits)) + goto done_probing; + + rc = 1; + } + else { + /* we got signalled or the session replacement timer expired */ + DEBUG_INIT("iSCSI: session %p LUN probe ioctl for bus %d, target %d failed\n", + session, ioctld->iscsi_bus, ioctld->target_id); + + rc = 0; + } + + done_probing: + if (session) { + clear_bit(SESSION_PROBING_LUNS, &session->control_bits); + smp_mb(); + drop_reference(session); + } + kfree(ioctld); + return rc; + } + else if ( cmd == ISCSI_TERMINATE_SESSION ) { + iscsi_terminate_session_ioctl_t ioctld; + iscsi_session_t *session = NULL; + + if (copy_from_user(&ioctld, (void *)arg, sizeof(ioctld))) { + printk("iSCSI: Cannot copy session ioctl data\n"); + return -EFAULT; + } + if (ioctld.ioctl_size != sizeof(ioctld)) { + printk("iSCSI: terminate session ioctl size %u incorrect, expecting %Zu\n", + ioctld.ioctl_size, sizeof(ioctld)); + return -EINVAL; + } + if (ioctld.ioctl_version != ISCSI_TERMINATE_SESSION_IOCTL_VERSION) { + printk("iSCSI: terminate session ioctl version %u incorrect, expecting %u\n", + ioctld.ioctl_version, ISCSI_TERMINATE_SESSION_IOCTL_VERSION); + return -EINVAL; + } + + /* find the session */ + session = find_session_by_bus(ioctld.iscsi_bus, ioctld.target_id); + if (session) { + if (!session->this_is_root_disk) { + printk("iSCSI: bus %d target %d session %p terminated by ioctl\n", + ioctld.iscsi_bus, ioctld.target_id, session); + iscsi_terminate_session(session); + drop_reference(session); + return 1; + } + else { + printk("iSCSI: bus %d target %d session %p NOT terminated by ioctl because this session belongs to ROOT disk\n", + ioctld.iscsi_bus, ioctld.target_id, session); + return 1; + } + } + else { + printk("iSCSI: terminate session ioctl for bus %d target %d failed, no session\n", + ioctld.iscsi_bus, ioctld.target_id); + return 0; + } + } + else if ( cmd == ISCSI_GETTRACE ) { +#if DEBUG_TRACE + iscsi_trace_dump_t dump; + iscsi_trace_dump_t *user_dump; + int rc; + DECLARE_NOQUEUE_FLAGS; + + user_dump = (iscsi_trace_dump_t *)arg; + if (copy_from_user(&dump, user_dump, sizeof(dump))) { + printk("iSCSI: trace copy_from_user %p, %p, %Zu failed\n", + &dump, user_dump, sizeof(dump)); + return -EFAULT; + } + + if (dump.dump_ioctl_size != sizeof(iscsi_trace_dump_t)) { + printk("iSCSI: trace dump ioctl size is %Zu, but caller uses %u\n", + sizeof(iscsi_trace_dump_t), dump.dump_ioctl_size); + return -EINVAL; + } + + if (dump.dump_version != TRACE_DUMP_VERSION) { + printk("iSCSI: trace dump version is %u, but caller uses %u\n", + TRACE_DUMP_VERSION, dump.dump_version); + return -EINVAL; + } + + if (dump.trace_entry_size != sizeof(iscsi_trace_entry_t)) { + printk("iSCSI: trace dump ioctl size is %Zu, but caller uses %u\n", + sizeof(iscsi_trace_dump_t), dump.dump_ioctl_size); + return -EINVAL; + } + + if (dump.num_entries < ISCSI_TRACE_COUNT) { + /* tell the caller to use a bigger buffer */ + dump.num_entries = ISCSI_TRACE_COUNT; + if (copy_to_user(user_dump, &dump, sizeof(dump))) + return -EFAULT; + else + return -E2BIG; + } + + /* the caller is responsible for zeroing the buffer before the ioctl, so + * if the caller asks for too many entries, it should be able to tell which + * ones actually have data. + */ + + /* only send what we've got */ + dump.num_entries = ISCSI_TRACE_COUNT; + if (copy_to_user(user_dump, &dump, sizeof(dump))) { + printk("iSCSI: trace copy_to_user %p, %p, %Zu failed\n", + user_dump, &dump, sizeof(dump)); + return -EFAULT; + } + + SPIN_LOCK_NOQUEUE(&iscsi_trace_lock); + /* FIXME: copy_to_user may sleep, but we're holding a spin_lock with interrupts off */ + if (copy_to_user(user_dump->trace, &trace_table[0], dump.num_entries * sizeof(iscsi_trace_entry_t))) { + printk("iSCSI: trace copy_to_user %p, %p, %u failed\n", + user_dump->trace, &trace_table[0], dump.num_entries); + SPIN_UNLOCK_NOQUEUE(&iscsi_trace_lock); + return -EFAULT; + } + rc = trace_index; + printk("iSCSI: copied %d trace entries to %p at %lu\n", dump.num_entries, user_dump->trace, jiffies); + SPIN_UNLOCK_NOQUEUE(&iscsi_trace_lock); + + return rc; +#else + printk("iSCSI: iSCSI kernel module does not implement tracing\n"); + return -ENXIO; +#endif + } + else if ( cmd == ISCSI_LS_TARGET_INFO ) { + int target_index,rc=0; + target_info_t tmp_buf; + iscsi_hba_t *hba; + iscsi_session_t *session; + + if (copy_from_user(&tmp_buf, (void *)arg, sizeof(target_info_t))) { + printk("iSCSI: Cannot copy user-level data\n"); + return -EFAULT; + } + target_index = tmp_buf.target_id; + spin_lock(&iscsi_hba_list_lock); + for (hba = iscsi_hba_list; hba; hba = hba->next) { + spin_lock(&hba->session_lock); + for (session = hba->session_list_head; session; session = session->next) { + if ( session->target_id == target_index ) { + tmp_buf.host_no = session->host_no; + tmp_buf.channel = session->channel; + strcpy(tmp_buf.target_name, session->TargetName); + strcpy(tmp_buf.target_alias, session->TargetAlias); + tmp_buf.num_portals = session->num_portals; + memcpy(tmp_buf.session_data.isid, session->isid, sizeof(session->isid)); + tmp_buf.session_data.tsid = session->tsid; + tmp_buf.session_data.addr[0] = session->ip_address[0]; + tmp_buf.session_data.addr[1] = session->ip_address[1]; + tmp_buf.session_data.addr[2] = session->ip_address[2]; + tmp_buf.session_data.addr[3] = session->ip_address[3]; + tmp_buf.session_data.port = session->port; + + tmp_buf.session_data.establishment_time = (jiffies - session->session_established_time)/HZ; + tmp_buf.session_data.InitialR2T = session->InitialR2T; + tmp_buf.session_data.ImmediateData = session->ImmediateData; + tmp_buf.session_data.HeaderDigest = session->HeaderDigest; + tmp_buf.session_data.DataDigest = session->DataDigest; + tmp_buf.session_data.FirstBurstLength = session->FirstBurstLength; + tmp_buf.session_data.MaxBurstLength = session->MaxBurstLength; + tmp_buf.session_data.MaxRecvDataSegmentLength = session->MaxRecvDataSegmentLength; + tmp_buf.session_data.MaxXmitDataSegmentLength = session->MaxXmitDataSegmentLength; + tmp_buf.session_data.login_timeout = session->login_timeout; + tmp_buf.session_data.auth_timeout = session->auth_timeout; + tmp_buf.session_data.active_timeout = session->active_timeout; + tmp_buf.session_data.idle_timeout = session->idle_timeout; + tmp_buf.session_data.ping_timeout = session->ping_timeout; + } + } + spin_unlock(&hba->session_lock); + } + spin_unlock(&iscsi_hba_list_lock); + if (copy_to_user((void *)arg, &tmp_buf, sizeof(target_info_t)) ) { + printk("iSCSI: failed to copy target-specific data back to user mode\n"); + return -EFAULT; + } + return rc; + } else if ( cmd == ISCSI_LS_PORTAL_INFO ) { + iscsi_hba_t *hba; + iscsi_session_t *session; + portal_list_t portalp; + int flag = 0; + + if (copy_from_user(&portalp, (void *)arg, sizeof(portalp))) { + printk("iSCSI: Cannot copy user-level data\n"); + return -EFAULT; + } + + spin_lock(&iscsi_hba_list_lock); + for (hba = iscsi_hba_list; hba; hba = hba->next) { + spin_lock(&hba->session_lock); + for (session = hba->session_list_head; session; session = session->next) { + if ( session->target_id == portalp.target_id ) { + if (copy_to_user(portalp.portals, session->portals, session->num_portals * sizeof(iscsi_portal_info_t))) { + printk("iSCSI: failed to copy target-specific data back to user mode\n"); + return -EFAULT; + } + flag = 1; + break; + } + } + spin_unlock(&hba->session_lock); + if ( flag == 1) + break; + } + spin_unlock(&iscsi_hba_list_lock); + return 0; + + } + + return -EINVAL; +} + + +Scsi_Host_Template iscsi_driver_template = { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,44) ) + next : NULL, + module : NULL, + proc_dir : NULL, +#endif + proc_info : iscsi_proc_info, + name : NULL, + detect : iscsi_detect, + release : iscsi_release, + info : iscsi_info, + ioctl : NULL, + command : NULL, + queuecommand : iscsi_queuecommand, + eh_strategy_handler : NULL, + eh_abort_handler : NULL, + eh_device_reset_handler : iscsi_eh_device_reset, + eh_bus_reset_handler : iscsi_eh_bus_reset, + eh_host_reset_handler : iscsi_eh_host_reset, + abort : NULL, + reset : NULL, +#if defined(HAS_SLAVE_CONFIGURE) + slave_alloc : iscsi_slave_alloc, + slave_configure : iscsi_slave_configure, + slave_destroy : iscsi_slave_destroy, +#elif defined(HAS_NEW_SLAVE_ATTACH) + slave_attach : iscsi_slave_attach, + slave_detach : iscsi_slave_detach, +#else + slave_attach : NULL, +#endif + bios_param : iscsi_biosparam, + this_id: -1, + can_queue : ISCSI_MIN_CANQUEUE, + sg_tablesize : ISCSI_MAX_SG, + cmd_per_lun: ISCSI_CMDS_PER_LUN, + present : 0, + unchecked_isa_dma : 0, + use_clustering : ENABLE_CLUSTERING, +#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0) ) + use_new_eh_code : 1, +#endif + emulated : 1 +}; + +#ifdef MODULE +EXPORT_NO_SYMBOLS; + +static int __init iscsi_init( void ) +{ + char *build_str = BUILD_STR; + int ret = -ENODEV; + + DEBUG_INIT("iSCSI: init module\n"); + + if (build_str) { + /* developer-built variant of a 4-digit internal release */ + printk("iSCSI: %d.%d.%d.%d%s variant (%s) built for Linux %s\niSCSI: %s\n", + DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION, DRIVER_PATCH_VERSION, DRIVER_INTERNAL_VERSION, + DRIVER_EXTRAVERSION, ISCSI_DATE, UTS_RELEASE, + build_str); + } + else if (DRIVER_INTERNAL_VERSION > 0) { + /* 4-digit release */ + printk("iSCSI: %d.%d.%d.%d%s (%s) built for Linux %s\n", + DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION, DRIVER_PATCH_VERSION, DRIVER_INTERNAL_VERSION, + DRIVER_EXTRAVERSION, ISCSI_DATE, UTS_RELEASE); + } + else { + /* 3-digit release */ + printk("iSCSI: %d.%d.%d%s (%s) built for Linux %s\n", + DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION, DRIVER_PATCH_VERSION, + DRIVER_EXTRAVERSION, ISCSI_DATE, UTS_RELEASE); + } + + /* log any module param settings the user has changed */ + if (translate_deferred_sense) + printk("iSCSI: will translate deferred sense to current sense on disk command responses\n"); + + if (force_tcq) + printk("iSCSI: will force tagged command queueing for all devices\n"); + + if (untagged_queue_depth != 1) + printk("iSCSI: untagged queue depth %d\n", untagged_queue_depth); + + control_major = register_chrdev( 0, control_name, &control_fops ); + if ( control_major < 0 ) { + printk("iSCSI: failed to register the control device\n"); + return control_major; + } + printk("iSCSI: control device major number %d\n", control_major); + + iscsi_driver_template.module = THIS_MODULE; + + REGISTER_SCSI_HOST(&iscsi_driver_template); + if (iscsi_driver_template.present ) { + ret = 0; + } + else { + printk("iSCSI: failed to register SCSI HBA driver\n"); + UNREGISTER_SCSI_HOST(&iscsi_driver_template); + } + + set_bit(0, &init_module_complete); + return ret; +} + +static void __exit iscsi_cleanup( void ) +{ + pid_t pid = 0; + int rc; + + DEBUG_INIT("iSCSI: cleanup module\n"); + if ( control_major > 0 ) { + rc = unregister_chrdev( control_major, control_name ); + if ( rc ) { + printk("iSCSI: error trying to unregister control device\n"); + } + else { + control_major = 0; + } + } + + if ( iscsi_driver_template.present ) { + DEBUG_INIT("iSCSI: SCSI template present\n"); + /* this will cause the SCSI layer to call our iscsi_release function */ + UNREGISTER_SCSI_HOST(&iscsi_driver_template); + iscsi_driver_template.present = 0; + } + + /* kill the timer */ + if ((pid = iscsi_timer_pid)) + kill_proc(pid, SIGKILL, 1); + + /* wait for the timer to exit */ + while (test_bit(0, &iscsi_timer_running)) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(MSECS_TO_JIFFIES(10)); + } + + DEBUG_INIT("iSCSI: cleanup module complete\n"); + return; +} + +module_init(iscsi_init); +module_exit(iscsi_cleanup); + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsi.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsi.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,181 @@ +#ifndef ISCSI_H_ +#define ISCSI_H_ + +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsi.h,v 1.40 2003/01/10 23:10:12 smferris Exp $ + * + * iscsi.h + * + * include for iSCSI kernel module + * + */ + +#include "iscsiAuthClient.h" +#include "iscsi-common.h" +#include "iscsi-limits.h" +#include "iscsi-task.h" +#include "iscsi-session.h" + + +#ifndef MIN +# define MIN(x, y) ((x) < (y)) ? (x) : (y) +#endif + +#ifndef MAX +# define MAX(x, y) ((x) > (y)) ? (x) : (y) +#endif + +typedef struct iscsi_hba { + struct iscsi_hba *next; + struct Scsi_Host *host; + unsigned int host_no; + unsigned long flags; + spinlock_t session_lock; + struct iscsi_session *session_list_head; + struct iscsi_session *session_list_tail; + atomic_t num_sessions; + kmem_cache_t *task_cache; +} iscsi_hba_t; + +/* HBA flags */ +#define ISCSI_HBA_ACTIVE 0 +#define ISCSI_HBA_SHUTTING_DOWN 1 +#define ISCSI_HBA_RELEASING 2 + + +/* driver entry points needed by the probing code */ +int iscsi_queue(iscsi_session_t *session, Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *)); +int iscsi_squash_cmnd(iscsi_session_t *session, Scsi_Cmnd *sc); + +/* flags we set on Scsi_Cmnds */ +#define COMMAND_TIMEDOUT 1 + +/* run-time controllable logging */ +#define ISCSI_LOG_ERR 1 +#define ISCSI_LOG_SENSE 2 +#define ISCSI_LOG_INIT 3 +#define ISCSI_LOG_QUEUE 4 +#define ISCSI_LOG_ALLOC 5 +#define ISCSI_LOG_EH 6 +#define ISCSI_LOG_FLOW 7 +#define ISCSI_LOG_RETRY 8 +#define ISCSI_LOG_LOGIN 9 +#define ISCSI_LOG_TIMEOUT 10 + +#define LOG_SET(flag) (1U << (flag)) +#define LOG_ENABLED(flag) (iscsi_log_settings & (1U << (flag))) + +extern volatile unsigned int iscsi_log_settings; + +#ifdef DEBUG +/* compile in all the debug messages and tracing */ +# define INCLUDE_DEBUG_ERROR 1 +# define INCLUDE_DEBUG_TRACE 1 +# define INCLUDE_DEBUG_INIT 1 +# define INCLUDE_DEBUG_QUEUE 1 +# define INCLUDE_DEBUG_FLOW 1 +# define INCLUDE_DEBUG_ALLOC 1 +# define INCLUDE_DEBUG_EH 1 +# define INCLUDE_DEBUG_RETRY 1 +# define INCLUDE_DEBUG_TIMEOUT 1 +#else +/* leave out the tracing and most of the debug messages */ +# define INCLUDE_DEBUG_ERROR 1 +# define INCLUDE_DEBUG_TRACE 0 +# define INCLUDE_DEBUG_INIT 0 +# define INCLUDE_DEBUG_QUEUE 0 +# define INCLUDE_DEBUG_FLOW 0 +# define INCLUDE_DEBUG_ALLOC 0 +# define INCLUDE_DEBUG_EH 0 +# define INCLUDE_DEBUG_RETRY 0 +# define INCLUDE_DEBUG_TIMEOUT 0 +#endif + +#if INCLUDE_DEBUG_INIT +# define DEBUG_INIT(fmt, args...) do { if (LOG_ENABLED(ISCSI_LOG_INIT)) printk(fmt , ## args); } while (0) +#else +# define DEBUG_INIT(fmt, args...) do { } while (0) +#endif + +#if INCLUDE_DEBUG_QUEUE +# define DEBUG_QUEUE(fmt, args...) do { if (LOG_ENABLED(ISCSI_LOG_QUEUE)) printk(fmt , ## args); } while (0) +#else +# define DEBUG_QUEUE(fmt, args...) do { } while (0) +#endif + +#if INCLUDE_DEBUG_FLOW +# define DEBUG_FLOW(fmt, args...) do { if (LOG_ENABLED(ISCSI_LOG_FLOW)) printk(fmt , ## args); } while (0) +#else +# define DEBUG_FLOW(fmt, args...) do { } while (0) +#endif + +#if INCLUDE_DEBUG_ALLOC +# define DEBUG_ALLOC(fmt, args...) do { if (LOG_ENABLED(ISCSI_LOG_ALLOC)) printk(fmt , ## args); } while (0) +#else +# define DEBUG_ALLOC(fmt, args...) do { } while (0) +#endif + +#if INCLUDE_DEBUG_RETRY +# define DEBUG_RETRY(fmt, args...) do { if (LOG_ENABLED(ISCSI_LOG_RETRY)) printk(fmt , ## args); } while (0) +#else +# define DEBUG_RETRY(fmt, args...) do { } while (0) +#endif + +#if INCLUDE_DEBUG_EH +# define DEBUG_EH(fmt, args...) do { if (LOG_ENABLED(ISCSI_LOG_EH)) printk(fmt , ## args); } while (0) +#else +# define DEBUG_EH(fmt, args...) do { } while (0) +#endif + +#if INCLUDE_DEBUG_ERROR +# define DEBUG_ERR(fmt, args...) do { if (LOG_ENABLED(ISCSI_LOG_ERR)) printk(fmt , ## args); } while (0) +#else +# define DEBUG_ERR(fmt, args...) do { } while (0) +#endif + +#if INCLUDE_DEBUG_TIMEOUT +# define DEBUG_TIMEOUT(fmt, args...) do { if (LOG_ENABLED(ISCSI_LOG_TIMEOUT)) printk(fmt , ## args); } while (0) +#else +# define DEBUG_TIMEOUT(fmt, args...) do { } while (0) +#endif + + +/* the Scsi_Cmnd's request_bufflen doesn't always match the actual amount of data + * to be read or written. Try to compensate by decoding the cdb. + */ +extern unsigned int iscsi_expected_data_length(Scsi_Cmnd *sc); + +/* Scsi_cmnd->result */ +#define DRIVER_BYTE(byte) ((byte) << 24) +#define HOST_BYTE(byte) ((byte) << 16) /* HBA codes */ +#define MSG_BYTE(byte) ((byte) << 8) +#define STATUS_BYTE(byte) ((byte)) /* SCSI status */ + +/* extract parts of the sense data from an (unsigned char *) to the beginning of sense data */ +#define SENSE_KEY(sensebuf) ((sensebuf)[2] & 0x0F) +#define ASC(sensebuf) ((sensebuf)[12]) +#define ASCQ(sensebuf) ((sensebuf)[13]) + +/* the Linux defines are bit shifted, so we define our own */ +#define STATUS_CHECK_CONDITION 0x02 +#define STATUS_BUSY 0x08 +#define STATUS_QUEUE_FULL 0x28 + +#endif + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClient.c linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClient.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClient.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClient.c 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,3076 @@ +/* + * iSCSI connection daemon + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsiAuthClient.c,v 1.13 2003/02/14 21:54:52 smferris Exp $ + */ + +/* + * This file implements the iSCSI CHAP authentication method based on + * draft-ietf-ips-iscsi-15.txt. The code in this file is meant + * to be platform independent, and makes use of only limited library + * functions, presently only string.h. Platform dependent routines are + * defined in iscsiAuthClient.h, but implemented in another file. + * + * This code in this files assumes a single thread of execution + * for each IscsiAuthClient structure, and does no locking. + */ + +#include "iscsiAuthClient.h" + + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + + +struct iscsiAuthKeyInfo_t { + const char *name; +}; +typedef struct iscsiAuthKeyInfo_t IscsiAuthKeyInfo; + + +IscsiAuthClientGlobalStats iscsiAuthClientGlobalStats; + +/* + * Note: The ordering of this table must match the order + * defined by IscsiAuthKeyType in iscsiAuthClient.h. + */ +static IscsiAuthKeyInfo + iscsiAuthClientKeyInfo[iscsiAuthKeyTypeMaxCount] = { + {"AuthMethod"}, + {"CHAP_A"}, + {"CHAP_N"}, + {"CHAP_R"}, + {"CHAP_I"}, + {"CHAP_C"} +}; + +static const char iscsiAuthClientHexString[] = "0123456789abcdefABCDEF"; +static const char iscsiAuthClientBase64String[] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +static const char iscsiAuthClientAuthMethodChapOptionName[] = "CHAP"; + + +static int +iscsiAuthClientCheckString( + const char *s, unsigned int maxLength, unsigned int *pOutLength) + +{ + unsigned int length; + + if (!s) { + return TRUE; + } + + for (length = 0; length < maxLength; length++) { + if (*s++ == '\0') { + if (pOutLength) { + *pOutLength = length; + } + return FALSE; + } + } + + return TRUE; +} + + +static int +iscsiAuthClientStringCopy( + char *stringOut, const char *stringIn, unsigned int length) + +{ + if (!stringOut || !stringIn || length == 0) { + return TRUE; + } + + while ((*stringOut++ = *stringIn++) != '\0') { + if (--length == 0) { + stringOut--; + *stringOut = '\0'; + return TRUE; + } + } + + return FALSE; +} + + +static int +iscsiAuthClientStringAppend( + char *stringOut, const char *stringIn, unsigned int length) + +{ + if (!stringOut || !stringIn || length == 0) { + return TRUE; + } + + while (*stringOut++ != '\0') { + if (--length == 0) { + stringOut--; + *stringOut = '\0'; + return TRUE; + } + } + + stringOut--; + + while ((*stringOut++ = *stringIn++) != '\0') { + if (--length == 0) { + stringOut--; + *stringOut = '\0'; + return TRUE; + } + } + + return FALSE; +} + + +static int +iscsiAuthClientStringIndex(const char *s, int c) + +{ + int n = 0; + + while (*s != '\0') { + if (*s++ == c) { + return n; + } + n++; + } + + return -1; +} + + +static int +iscsiAuthClientCheckNodeType(int nodeType) + +{ + if (nodeType == iscsiAuthNodeTypeInitiator || + nodeType == iscsiAuthNodeTypeTarget) { + + return FALSE; + } + + return TRUE; +} + + +static int +iscsiAuthClientCheckVersion(int value) + +{ + if (value == iscsiAuthVersionDraft8 || + value == iscsiAuthVersionRfc) { + + return FALSE; + } + + return TRUE; +} + + +static int +iscsiAuthClientCheckNegRole(int value) + +{ + if (value == iscsiAuthNegRoleOriginator || + value == iscsiAuthNegRoleResponder) { + + return FALSE; + } + + return TRUE; +} + + +static int +iscsiAuthClientCheckAuthMethodOption(int value) + +{ + if (value == iscsiAuthOptionNone || + value == iscsiAuthMethodChap) { + + return FALSE; + } + + return TRUE; +} + + +static const char * +iscsiAuthClientAuthMethodOptionToText(IscsiAuthClient *client, int value) + +{ + const char *s; + + switch (value) { + + case iscsiAuthOptionReject: + s = client->rejectOptionName; + break; + + case iscsiAuthOptionNone: + s = client->noneOptionName; + break; + + case iscsiAuthMethodChap: + s = iscsiAuthClientAuthMethodChapOptionName; + break; + + default: + s = 0; + } + + return s; +} + + +static int +iscsiAuthClientCheckChapAlgorithmOption(int chapAlgorithm) + +{ + if (chapAlgorithm == iscsiAuthOptionNone || + chapAlgorithm == iscsiAuthChapAlgorithmMd5) { + return FALSE; + } + + return TRUE; +} + + +static int +iscsiAuthClientDataToHex( + unsigned char *data, unsigned int dataLength, + char *text, unsigned int textLength) + +{ + unsigned long n; + + if (!text || textLength == 0) { + return TRUE; + } + + if (!data || dataLength == 0) { + *text = '\0'; + return TRUE; + } + + if (textLength < 3) { + *text = '\0'; + return TRUE; + } + + *text++ = '0'; + *text++ = 'x'; + + textLength -= 2; + + while (dataLength > 0) { + + if (textLength < 3) { + *text = '\0'; + return TRUE; + } + + n = *data++; + dataLength--; + + *text++ = iscsiAuthClientHexString[(n >> 4) & 0xf]; + *text++ = iscsiAuthClientHexString[n & 0xf]; + + textLength -= 2; + } + + *text = '\0'; + + return FALSE; +} + + +static int +iscsiAuthClientDataToBase64( + unsigned char *data, unsigned int dataLength, + char *text, unsigned int textLength) + +{ + unsigned long n; + + if (!text || textLength == 0) { + return TRUE; + } + + if (!data || dataLength == 0) { + *text = '\0'; + return TRUE; + } + + if (textLength < 3) { + *text = '\0'; + return TRUE; + } + + *text++ = '0'; + *text++ = 'b'; + + textLength -= 2; + + while (dataLength >= 3) { + + if (textLength < 5) { + *text = '\0'; + return TRUE; + } + + n = *data++; + n = (n << 8) | *data++; + n = (n << 8) | *data++; + dataLength -= 3; + + *text++ = iscsiAuthClientBase64String[(n >> 18) & 0x3f]; + *text++ = iscsiAuthClientBase64String[(n >> 12) & 0x3f]; + *text++ = iscsiAuthClientBase64String[(n >> 6) & 0x3f]; + *text++ = iscsiAuthClientBase64String[n & 0x3f]; + + textLength -= 4; + } + + if (dataLength == 1) { + + if (textLength < 5) { + *text = '\0'; + return TRUE; + } + + n = *data++; + n = n << 4; + + *text++ = iscsiAuthClientBase64String[(n >> 6) & 0x3f]; + *text++ = iscsiAuthClientBase64String[n & 0x3f]; + *text++ = '='; + *text++ = '='; + + } else if (dataLength == 2) { + + if (textLength < 5) { + return TRUE; + } + + n = *data++; + n = (n << 8) | *data++; + n = n << 2; + + *text++ = iscsiAuthClientBase64String[(n >> 12) & 0x3f]; + *text++ = iscsiAuthClientBase64String[(n >> 6) & 0x3f]; + *text++ = iscsiAuthClientBase64String[n & 0x3f]; + *text++ = '='; + } + + *text = '\0'; + + return FALSE; +} + + +static int +iscsiAuthClientDataToText( + int base64, + unsigned char *data, unsigned int dataLength, + char *text, unsigned int textLength) + +{ + int status; + + if (base64) { + status = + iscsiAuthClientDataToBase64(data, dataLength, text, textLength); + } else { + status = + iscsiAuthClientDataToHex(data, dataLength, text, textLength); + } + + return status; +} + + +static int +iscsiAuthClientHexToData( + const char *text, unsigned int textLength, + unsigned char *data, unsigned int *pDataLength) + +{ + int i; + unsigned int n1; + unsigned int n2; + unsigned int dataLength = *pDataLength; + + if ((textLength % 2) == 1) { + + i = iscsiAuthClientStringIndex(iscsiAuthClientHexString, *text++); + if (i < 0) return TRUE; /* error, bad character */ + + if (i > 15) i -= 6; + n2 = i; + + if (dataLength < 1) { + return TRUE; /* error, too much data */ + } + + *data++ = n2; + dataLength--; + } + + while (*text != '\0') { + + i = iscsiAuthClientStringIndex(iscsiAuthClientHexString, *text++); + if (i < 0) return TRUE; /* error, bad character */ + + if (i > 15) i -= 6; + n1 = i; + + if (*text == '\0') return TRUE; /* error, odd string length */ + + i = iscsiAuthClientStringIndex(iscsiAuthClientHexString, *text++); + if (i < 0) return TRUE; /* error, bad character */ + + if (i > 15) i -= 6; + n2 = i; + + if (dataLength < 1) { + return TRUE; /* error, too much data */ + } + + *data++ = (n1 << 4) | n2; + dataLength--; + } + + if (dataLength >= *pDataLength) { + return TRUE; /* error, no data */ + } + + *pDataLength = *pDataLength - dataLength; + + return FALSE; /* no error */ +} + + +static int +iscsiAuthClientBase64ToData( + const char *text, unsigned int textLength, + unsigned char *data, unsigned int *pDataLength) + +{ + int i; + unsigned int n; + unsigned int count; + unsigned int dataLength = *pDataLength; + + textLength = textLength; /* not used */ + + n = 0; + count = 0; + + while (*text != '\0' && *text != '=') { + + i = iscsiAuthClientStringIndex(iscsiAuthClientBase64String, *text++); + if (i < 0) return TRUE; /* error, bad character */ + + n = (n << 6 | (unsigned int)i); + count++; + + if (count >= 4) { + if (dataLength < 3) { + return TRUE; /* error, too much data */ + } + *data++ = n >> 16; + *data++ = n >> 8; + *data++ = n; + dataLength -= 3; + n = 0; + count = 0; + } + } + + while (*text != '\0') { + if (*text++ != '=') { + return TRUE; /* error, bad pad */ + } + } + + if (count == 0) { + /* do nothing */ + } else if (count == 2) { + if (dataLength < 1) { + return TRUE; /* error, too much data */ + } + n = n >> 4; + *data++ = n; + dataLength--; + } else if (count == 3) { + if (dataLength < 2) { + return TRUE; /* error, too much data */ + } + n = n >> 2; + *data++ = n >> 8; + *data++ = n; + dataLength -= 2; + } else { + return TRUE; /* bad encoding */ + } + + if (dataLength >= *pDataLength) { + return TRUE; /* error, no data */ + } + + *pDataLength = *pDataLength - dataLength; + + return FALSE; /* no error */ +} + + +static int +iscsiAuthClientTextToData( + const char *text, unsigned char *data, unsigned int *dataLength) + +{ + int status; + unsigned int textLength; + + status = iscsiAuthClientCheckString( + text, 2 + 2 * iscsiAuthLargeBinaryMaxLength + 1, &textLength); + + if (status) { + return status; + } + + if (text[0] == '0' && (text[1] == 'x' || text[1] == 'X')) { + /* skip prefix */ + text += 2; + textLength -= 2; + status = + iscsiAuthClientHexToData(text, textLength, data, dataLength); + } else if (text[0] == '0' && (text[1] == 'b' || text[1] == 'B')) { + /* skip prefix */ + text += 2; + textLength -= 2; + status = + iscsiAuthClientBase64ToData(text, textLength, data, dataLength); + } else { + status = TRUE; /* prefix not recognized. */ + } + + return status; +} + + +static IscsiAuthDebugStatus +iscsiAuthClientChapComputeResponse( + IscsiAuthClient *client, + int remoteAuthentication, + unsigned int id, + unsigned char *challengeData, unsigned int challengeLength, + unsigned char *responseData) + +{ + unsigned char idData[1]; + IscsiAuthMd5Context context; + unsigned char outData[iscsiAuthStringMaxLength]; + unsigned int outLength = iscsiAuthStringMaxLength; + + if (!client->passwordPresent) { + return iscsiAuthDebugStatusLocalPasswordNotSet; + } + + iscsiAuthMd5Init(&context); + + /* id byte */ + idData[0] = id; + iscsiAuthMd5Update(&context, idData, 1); + + /* decrypt password */ + if (iscsiAuthClientData( + outData, &outLength, client->passwordData, client->passwordLength)) { + + return iscsiAuthDebugStatusPasswordDecryptFailed; + } + + if (!remoteAuthentication && !client->ipSec && outLength < 12) { + return iscsiAuthDebugStatusPasswordTooShortWithNoIpSec; + } + + /* shared secret */ + iscsiAuthMd5Update(&context, outData, outLength); + + /* clear decrypted password */ + memset(outData, 0, iscsiAuthStringMaxLength); + + /* challenge value */ + iscsiAuthMd5Update(&context, challengeData, challengeLength); + + iscsiAuthMd5Final(responseData, &context); + + return iscsiAuthDebugStatusNotSet; /* no error */ +} + + +static void +iscsiAuthClientInitKeyBlock(IscsiAuthKeyBlock *keyBlock) + +{ + char *stringBlock = keyBlock->stringBlock; + + memset(keyBlock, 0, sizeof(*keyBlock)); + keyBlock->stringBlock = stringBlock; +} + + +static void +iscsiAuthClientSetKeyValue( + IscsiAuthKeyBlock *keyBlock, int keyType, const char *keyValue) + +{ + unsigned int length; + char *string; + + if (keyBlock->key[keyType].valueSet) { + keyBlock->duplicateSet = TRUE; + return; + } + + keyBlock->key[keyType].valueSet = TRUE; + + if (!keyValue) { + return; + } + + if (iscsiAuthClientCheckString( + keyValue, iscsiAuthStringMaxLength, &length)) { + + keyBlock->stringTooLong = TRUE; + return; + } + + length += 1; + + if ((keyBlock->blockLength + length) > iscsiAuthStringBlockMaxLength) { + keyBlock->tooMuchData = TRUE; + return; + } + + string = &keyBlock->stringBlock[keyBlock->blockLength]; + + if (iscsiAuthClientStringCopy(string, keyValue, length)) { + keyBlock->tooMuchData = TRUE; + return; + } + keyBlock->blockLength += length; + + keyBlock->key[keyType].string = string; + keyBlock->key[keyType].present = TRUE; +} + + +static const char * +iscsiAuthClientGetKeyValue(IscsiAuthKeyBlock *keyBlock, int keyType) + +{ + keyBlock->key[keyType].processed = TRUE; + + if (!keyBlock->key[keyType].present) { + return 0; + } + + return keyBlock->key[keyType].string; +} + + +static void +iscsiAuthClientCheckKey( + IscsiAuthClient *client, + int keyType, + int *negotiatedOption, + unsigned int optionCount, + int *optionList, + const char *(*valueToText)(IscsiAuthClient *, int)) + +{ + const char *keyValue; + int length; + unsigned int i; + + keyValue = iscsiAuthClientGetKeyValue(&client->recvKeyBlock, keyType); + if (!keyValue) { + *negotiatedOption = iscsiAuthOptionNotPresent; + return; + } + + while (*keyValue != '\0') { + + length = 0; + + while (*keyValue != '\0' && *keyValue != ',') { + client->scratchKeyValue[length++] = *keyValue++; + } + + if (*keyValue == ',') keyValue++; + client->scratchKeyValue[length++] = '\0'; + + for (i = 0; i < optionCount; i++) { + const char *s = (*valueToText)(client, optionList[i]); + + if (!s) continue; + + if (strcmp(client->scratchKeyValue, s) == 0) { + *negotiatedOption = optionList[i]; + return; + } + } + } + + *negotiatedOption = iscsiAuthOptionReject; +} + + +static void +iscsiAuthClientSetKey( + IscsiAuthClient *client, + int keyType, + unsigned int optionCount, + int *optionList, + const char *(*valueToText)(IscsiAuthClient *, int)) + +{ + unsigned int i; + + if (optionCount == 0) { + /* + * No valid options to send, but we always want to + * send something. + */ + iscsiAuthClientSetKeyValue( + &client->sendKeyBlock, keyType, client->noneOptionName); + return; + } + + if (optionCount == 1 && optionList[0] == iscsiAuthOptionNotPresent) { + iscsiAuthClientSetKeyValue(&client->sendKeyBlock, keyType, 0); + return; + } + + for (i = 0; i < optionCount; i++) { + const char *s = (*valueToText)(client, optionList[i]); + + if (!s) continue; + + if (i == 0) { + iscsiAuthClientStringCopy( + client->scratchKeyValue, s, iscsiAuthStringMaxLength); + } else { + iscsiAuthClientStringAppend( + client->scratchKeyValue, ",", iscsiAuthStringMaxLength); + iscsiAuthClientStringAppend( + client->scratchKeyValue, s, iscsiAuthStringMaxLength); + } + } + + iscsiAuthClientSetKeyValue( + &client->sendKeyBlock, keyType, client->scratchKeyValue); +} + + +static void +iscsiAuthClientCheckAuthMethodKey(IscsiAuthClient *client) + +{ + iscsiAuthClientCheckKey( + client, + iscsiAuthKeyTypeAuthMethod, + &client->negotiatedAuthMethod, + client->authMethodValidCount, + client->authMethodValidList, + iscsiAuthClientAuthMethodOptionToText); +} + + +static void +iscsiAuthClientSetAuthMethodKey( + IscsiAuthClient *client, + unsigned int authMethodCount, + int *authMethodList) + +{ + iscsiAuthClientSetKey( + client, + iscsiAuthKeyTypeAuthMethod, + authMethodCount, + authMethodList, + iscsiAuthClientAuthMethodOptionToText); +} + + +static void +iscsiAuthClientCheckChapAlgorithmKey(IscsiAuthClient *client) + +{ + const char *keyValue; + int length; + unsigned long number; + unsigned int i; + + keyValue = iscsiAuthClientGetKeyValue( + &client->recvKeyBlock, iscsiAuthKeyTypeChapAlgorithm); + if (!keyValue) { + client->negotiatedChapAlgorithm = iscsiAuthOptionNotPresent; + return; + } + + while (*keyValue != '\0') { + + length = 0; + + while (*keyValue != '\0' && *keyValue != ',') { + client->scratchKeyValue[length++] = *keyValue++; + } + + if (*keyValue == ',') keyValue++; + client->scratchKeyValue[length++] = '\0'; + + if (iscsiAuthClientTextToNumber(client->scratchKeyValue, &number)) { + continue; + } + + for (i = 0; i < client->chapAlgorithmCount; i++) { + + if (number == (unsigned long)client->chapAlgorithmList[i]) { + client->negotiatedChapAlgorithm = number; + return; + } + } + } + + client->negotiatedChapAlgorithm = iscsiAuthOptionReject; +} + + +static void +iscsiAuthClientSetChapAlgorithmKey( + IscsiAuthClient *client, + unsigned int chapAlgorithmCount, + int *chapAlgorithmList) + +{ + unsigned int i; + + if (chapAlgorithmCount == 0) { + iscsiAuthClientSetKeyValue( + &client->sendKeyBlock, iscsiAuthKeyTypeChapAlgorithm, 0); + return; + } + + if (chapAlgorithmCount == 1 && + chapAlgorithmList[0] == iscsiAuthOptionNotPresent) { + + iscsiAuthClientSetKeyValue( + &client->sendKeyBlock, iscsiAuthKeyTypeChapAlgorithm, 0); + return; + } + + if (chapAlgorithmCount == 1 && + chapAlgorithmList[0] == iscsiAuthOptionReject) { + + iscsiAuthClientSetKeyValue( + &client->sendKeyBlock, + iscsiAuthKeyTypeChapAlgorithm, + client->rejectOptionName); + return; + } + + for (i = 0; i < chapAlgorithmCount; i++) { + char s[20]; + + iscsiAuthClientNumberToText(chapAlgorithmList[i], s, sizeof(s)); + + if (i == 0) { + iscsiAuthClientStringCopy( + client->scratchKeyValue, s, iscsiAuthStringMaxLength); + } else { + iscsiAuthClientStringAppend( + client->scratchKeyValue, ",", iscsiAuthStringMaxLength); + iscsiAuthClientStringAppend( + client->scratchKeyValue, s, iscsiAuthStringMaxLength); + } + } + + iscsiAuthClientSetKeyValue( + &client->sendKeyBlock, + iscsiAuthKeyTypeChapAlgorithm, + client->scratchKeyValue); +} + + +static void +iscsiAuthClientNextPhase(IscsiAuthClient *client) + +{ + switch (client->phase) { + + case iscsiAuthPhaseConfigure: + client->phase = iscsiAuthPhaseNegotiate; + break; + + case iscsiAuthPhaseNegotiate: + client->phase = iscsiAuthPhaseAuthenticate; + + if (client->negotiatedAuthMethod == iscsiAuthOptionReject || + client->negotiatedAuthMethod == iscsiAuthOptionNotPresent || + client->negotiatedAuthMethod == iscsiAuthOptionNone) { + + client->localState = iscsiAuthLocalStateDone; + client->remoteState = iscsiAuthRemoteStateDone; + + if (client->authRemote) { + client->remoteAuthStatus = iscsiAuthStatusFail; + client->phase = iscsiAuthPhaseDone; + } else { + client->remoteAuthStatus = iscsiAuthStatusPass; + } + + switch (client->negotiatedAuthMethod) { + + case iscsiAuthOptionReject: + client->debugStatus = + iscsiAuthDebugStatusAuthMethodReject; + break; + + case iscsiAuthOptionNotPresent: + client->debugStatus = + iscsiAuthDebugStatusAuthMethodNotPresent; + break; + + case iscsiAuthOptionNone: + client->debugStatus = + iscsiAuthDebugStatusAuthMethodNone; + } + + } else if (client->negotiatedAuthMethod == iscsiAuthMethodChap) { + + client->localState = iscsiAuthLocalStateSendAlgorithm; + client->remoteState = iscsiAuthRemoteStateSendAlgorithm; + + } else { + + client->localState = iscsiAuthLocalStateDone; + client->remoteState = iscsiAuthRemoteStateDone; + client->remoteAuthStatus = iscsiAuthStatusFail; + client->debugStatus = iscsiAuthDebugStatusAuthMethodBad; + } + + break; + + case iscsiAuthPhaseAuthenticate: + client->phase = iscsiAuthPhaseDone; + break; + + case iscsiAuthPhaseDone: + case iscsiAuthPhaseError: + default: + client->phase = iscsiAuthPhaseError; + } +} + + +static void +iscsiAuthClientLocalAuthentication(IscsiAuthClient *client) + +{ + unsigned int chapIdentifier; + unsigned char responseData[iscsiAuthChapResponseLength]; + unsigned long number; + int status; + IscsiAuthDebugStatus debugStatus; + const char *chapIdentifierKeyValue; + const char *chapChallengeKeyValue; + + switch (client->localState) { + + case iscsiAuthLocalStateSendAlgorithm: + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + + iscsiAuthClientSetChapAlgorithmKey( + client, client->chapAlgorithmCount, client->chapAlgorithmList); + + client->localState = iscsiAuthLocalStateRecvAlgorithm; + break; + } + + /* Fall through */ + + case iscsiAuthLocalStateRecvAlgorithm: + iscsiAuthClientCheckChapAlgorithmKey(client); + + if (client->nodeType == iscsiAuthNodeTypeTarget) { + + iscsiAuthClientSetChapAlgorithmKey( + client, 1, &client->negotiatedChapAlgorithm); + } + + /* Make sure only supported CHAP algorithm is used. */ + if (client->negotiatedChapAlgorithm == iscsiAuthOptionNotPresent) { + + client->localState = iscsiAuthLocalStateError; + client->debugStatus = iscsiAuthDebugStatusChapAlgorithmExpected; + break; + + } else if (client->negotiatedChapAlgorithm == iscsiAuthOptionReject) { + + client->localState = iscsiAuthLocalStateError; + client->debugStatus = iscsiAuthDebugStatusChapAlgorithmReject; + break; + + } else if (client->negotiatedChapAlgorithm != + iscsiAuthChapAlgorithmMd5) { + + client->localState = iscsiAuthLocalStateError; + client->debugStatus = iscsiAuthDebugStatusChapAlgorithmBad; + break; + } + + if (client->nodeType == iscsiAuthNodeTypeTarget) { + + client->localState = iscsiAuthLocalStateRecvChallenge; + break; + } + + /* Fall through */ + + case iscsiAuthLocalStateRecvChallenge: + chapIdentifierKeyValue = iscsiAuthClientGetKeyValue( + &client->recvKeyBlock, iscsiAuthKeyTypeChapIdentifier); + chapChallengeKeyValue = iscsiAuthClientGetKeyValue( + &client->recvKeyBlock, iscsiAuthKeyTypeChapChallenge); + + if (client->nodeType == iscsiAuthNodeTypeTarget) { + + if (!chapIdentifierKeyValue && !chapChallengeKeyValue) { + client->localState = iscsiAuthLocalStateDone; + break; + } + } + + if (!chapIdentifierKeyValue) { + client->localState = iscsiAuthLocalStateError; + client->debugStatus = iscsiAuthDebugStatusChapIdentifierExpected; + break; + } + + if (!chapChallengeKeyValue) { + client->localState = iscsiAuthLocalStateError; + client->debugStatus = iscsiAuthDebugStatusChapChallengeExpected; + break; + } + + status = iscsiAuthClientTextToNumber(chapIdentifierKeyValue, &number); + + if (status || (255 < number)) { + client->localState = iscsiAuthLocalStateError; + client->debugStatus = iscsiAuthDebugStatusChapIdentifierBad; + break; + } + chapIdentifier = number; + + if (client->recvChapChallengeStatus) { + client->localState = iscsiAuthLocalStateError; + client->debugStatus = iscsiAuthDebugStatusChapChallengeBad; + break; + } + + if (client->nodeType == iscsiAuthNodeTypeTarget && + client->recvChapChallenge.length == + client->sendChapChallenge.length && + memcmp( + client->recvChapChallenge.largeBinary, + client->sendChapChallenge.largeBinary, + client->sendChapChallenge.length) == 0) { + + client->localState = iscsiAuthLocalStateError; + client->debugStatus = iscsiAuthDebugStatusChapChallengeReflected; + break; + } + + debugStatus = iscsiAuthClientChapComputeResponse( + client, + FALSE, + chapIdentifier, + client->recvChapChallenge.largeBinary, + client->recvChapChallenge.length, + responseData); + + if (debugStatus != iscsiAuthDebugStatusNotSet) { + client->localState = iscsiAuthLocalStateError; + client->debugStatus = debugStatus; + break; + } + + iscsiAuthClientDataToText( + client->base64, + responseData, iscsiAuthChapResponseLength, + client->scratchKeyValue, + iscsiAuthStringMaxLength); + iscsiAuthClientSetKeyValue( + &client->sendKeyBlock, + iscsiAuthKeyTypeChapResponse, + client->scratchKeyValue); + + iscsiAuthClientSetKeyValue( + &client->sendKeyBlock, + iscsiAuthKeyTypeChapUsername, + client->username); + + client->localState = iscsiAuthLocalStateDone; + break; + + case iscsiAuthLocalStateDone: + break; + + case iscsiAuthLocalStateError: + default: + client->phase = iscsiAuthPhaseError; + } +} + + +static void +iscsiAuthClientRemoteAuthentication(IscsiAuthClient *client) + +{ + unsigned char idData[1]; + unsigned char responseData[iscsiAuthStringMaxLength]; + unsigned int responseLength = iscsiAuthStringMaxLength; + unsigned char myResponseData[iscsiAuthChapResponseLength]; + int status; + IscsiAuthDebugStatus debugStatus; + const char *chapResponseKeyValue; + const char *chapUsernameKeyValue; + + switch (client->remoteState) { + + case iscsiAuthRemoteStateSendAlgorithm: + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + client->remoteState = iscsiAuthRemoteStateSendChallenge; + break; + } + /* Fall through */ + + case iscsiAuthRemoteStateSendChallenge: + if (!client->authRemote) { + client->remoteAuthStatus = iscsiAuthStatusPass; + client->debugStatus = iscsiAuthDebugStatusAuthRemoteFalse; + client->remoteState = iscsiAuthRemoteStateDone; + break; + } + + iscsiAuthRandomSetData(idData, 1); + client->sendChapIdentifier = idData[0]; + iscsiAuthClientNumberToText( + client->sendChapIdentifier, + client->scratchKeyValue, + iscsiAuthStringMaxLength); + iscsiAuthClientSetKeyValue( + &client->sendKeyBlock, + iscsiAuthKeyTypeChapIdentifier, + client->scratchKeyValue); + + client->sendChapChallenge.length = client->chapChallengeLength; + iscsiAuthRandomSetData( + client->sendChapChallenge.largeBinary, + client->sendChapChallenge.length); + iscsiAuthClientSetKeyValue( + &client->sendKeyBlock, iscsiAuthKeyTypeChapChallenge, ""); + + client->remoteState = iscsiAuthRemoteStateRecvResponse; + break; + + case iscsiAuthRemoteStateRecvResponse: + chapResponseKeyValue = iscsiAuthClientGetKeyValue( + &client->recvKeyBlock, iscsiAuthKeyTypeChapResponse); + + chapUsernameKeyValue = iscsiAuthClientGetKeyValue( + &client->recvKeyBlock, iscsiAuthKeyTypeChapUsername); + + if (!chapResponseKeyValue) { + client->remoteState = iscsiAuthRemoteStateError; + client->debugStatus = iscsiAuthDebugStatusChapResponseExpected; + break; + } + + if (!chapUsernameKeyValue) { + client->remoteState = iscsiAuthRemoteStateError; + client->debugStatus = iscsiAuthDebugStatusChapUsernameExpected; + break; + } + + status = iscsiAuthClientTextToData( + chapResponseKeyValue, responseData, &responseLength); + + if (status) { + client->remoteState = iscsiAuthRemoteStateError; + client->debugStatus = iscsiAuthDebugStatusChapResponseBad; + break; + } + + if (responseLength == iscsiAuthChapResponseLength) { + + debugStatus = iscsiAuthClientChapComputeResponse( + client, + TRUE, + client->sendChapIdentifier, + client->sendChapChallenge.largeBinary, + client->sendChapChallenge.length, + myResponseData); + + if (debugStatus == iscsiAuthDebugStatusNotSet && + memcmp( + myResponseData, + responseData, + iscsiAuthChapResponseLength) == 0) { + + client->remoteState = iscsiAuthRemoteStateError; + client->debugStatus = iscsiAuthDebugStatusPasswordIdentical; + break; + } + } + + iscsiAuthClientStringCopy( + client->chapUsername, + chapUsernameKeyValue, + iscsiAuthStringMaxLength); + + status = iscsiAuthClientChapAuthRequest( + client, + client->chapUsername, + client->sendChapIdentifier, + client->sendChapChallenge.largeBinary, + client->sendChapChallenge.length, + responseData, responseLength); + + if (status == iscsiAuthStatusInProgress) { + iscsiAuthClientGlobalStats.requestSent++; + client->remoteState = iscsiAuthRemoteStateAuthRequest; + break; + } + + client->remoteAuthStatus = (IscsiAuthStatus)status; + client->authResponseFlag = TRUE; + + /* Fall through */ + + case iscsiAuthRemoteStateAuthRequest: + /* client->remoteAuthStatus already set */ + if (client->authServerErrorFlag) { + client->remoteAuthStatus = iscsiAuthStatusFail; + client->debugStatus = iscsiAuthDebugStatusAuthServerError; + } else if (client->remoteAuthStatus == iscsiAuthStatusPass) { + client->debugStatus = iscsiAuthDebugStatusAuthPass; + } else if (client->remoteAuthStatus == iscsiAuthStatusFail) { + client->debugStatus = iscsiAuthDebugStatusAuthFail; + } else { + client->remoteAuthStatus = iscsiAuthStatusFail; + client->debugStatus = iscsiAuthDebugStatusAuthStatusBad; + } + client->remoteState = iscsiAuthRemoteStateDone; + + /* Fall through */ + + case iscsiAuthRemoteStateDone: + break; + + case iscsiAuthRemoteStateError: + default: + client->phase = iscsiAuthPhaseError; + } +} + + +static void +iscsiAuthClientHandshake(IscsiAuthClient *client) + +{ + if (client->phase == iscsiAuthPhaseDone) { + + /* + * Should only happen if authentication + * protocol error occured. + */ + return; + } + + if (client->remoteState == iscsiAuthRemoteStateAuthRequest) { + + /* + * Defer until authentication response received + * from internal authentication service. + */ + return; + } + + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + + /* + * Target should only have set T bit on response if + * initiator set it on previous message. + */ + if (client->recvKeyBlock.transitBit && !client->transitBitSentFlag) { + + client->remoteAuthStatus = iscsiAuthStatusFail; + client->phase = iscsiAuthPhaseDone; + client->debugStatus = iscsiAuthDebugStatusTbitSetIllegal; + return; + } + } + + if (client->phase == iscsiAuthPhaseNegotiate) { + /* + * Should only happen if waiting for peer + * to send AuthMethod key or set Transit Bit. + */ + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + client->sendKeyBlock.transitBit = TRUE; + } + return; + } + + if (client->remoteState == iscsiAuthRemoteStateRecvResponse || + client->remoteState == iscsiAuthRemoteStateDone) { + + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + + if (client->recvKeyBlock.transitBit) { + if (client->remoteState != iscsiAuthRemoteStateDone) { + goto recvTransitBitError; + } + iscsiAuthClientNextPhase(client); + } else { + client->sendKeyBlock.transitBit = TRUE; + } + + } else { + + if (client->remoteState == iscsiAuthRemoteStateDone && + client->remoteAuthStatus != iscsiAuthStatusPass) { + + /* + * Authentication failed, don't do T bit handshake. + */ + iscsiAuthClientNextPhase(client); + + } else { + + /* + * Target can only set T bit on response if + * initiator set it on current message. + */ + if (client->recvKeyBlock.transitBit) { + client->sendKeyBlock.transitBit = TRUE; + iscsiAuthClientNextPhase(client); + } + } + } + + } else { + + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + + if (client->recvKeyBlock.transitBit) { + goto recvTransitBitError; + } + } + } + + return; + +recvTransitBitError: + /* + * Target set T bit on response but + * initiator was not done with authentication. + */ + client->remoteAuthStatus = iscsiAuthStatusFail; + client->phase = iscsiAuthPhaseDone; + client->debugStatus = iscsiAuthDebugStatusTbitSetPremature; +} + + +static int +iscsiAuthClientRecvEndStatus(IscsiAuthClient *client) + +{ + int authStatus; + int keyType; + + if (client->phase == iscsiAuthPhaseError) { + return iscsiAuthStatusError; + } + + if (client->phase == iscsiAuthPhaseDone) { + + /* Perform sanity check against configured parameters. */ + + if (client->authRemote && !client->authResponseFlag && + client->remoteAuthStatus == iscsiAuthStatusPass) { + + client->remoteAuthStatus = iscsiAuthStatusFail; + client->debugStatus = iscsiAuthDebugStatusAuthPassNotValid; + } + + authStatus = client->remoteAuthStatus; + + } else if (client->remoteState == iscsiAuthRemoteStateAuthRequest) { + + authStatus = iscsiAuthStatusInProgress; + + } else { + + authStatus = iscsiAuthStatusContinue; + } + + if (authStatus != iscsiAuthStatusInProgress) { + client->recvInProgressFlag = FALSE; + } + + if (authStatus == iscsiAuthStatusContinue || + authStatus == iscsiAuthStatusPass) { + + if (client->sendKeyBlock.duplicateSet) { + + client->remoteAuthStatus = iscsiAuthStatusFail; + client->phase = iscsiAuthPhaseDone; + client->debugStatus = iscsiAuthDebugStatusSendDuplicateSetKeyValue; + authStatus = iscsiAuthStatusFail; + + } else if (client->sendKeyBlock.stringTooLong) { + + client->remoteAuthStatus = iscsiAuthStatusFail; + client->phase = iscsiAuthPhaseDone; + client->debugStatus = iscsiAuthDebugStatusSendStringTooLong; + authStatus = iscsiAuthStatusFail; + + } else if (client->sendKeyBlock.tooMuchData) { + + client->remoteAuthStatus = iscsiAuthStatusFail; + client->phase = iscsiAuthPhaseDone; + client->debugStatus = iscsiAuthDebugStatusSendTooMuchData; + authStatus = iscsiAuthStatusFail; + + } else { + + /* Check that all incoming keys have been processed. */ + + for (keyType = iscsiAuthKeyTypeFirst; + keyType < iscsiAuthKeyTypeMaxCount; keyType++) { + + if (client->recvKeyBlock.key[keyType].present && + !client->recvKeyBlock.key[keyType].processed) { + + break; + } + } + + if (keyType < iscsiAuthKeyTypeMaxCount) { + client->remoteAuthStatus = iscsiAuthStatusFail; + client->phase = iscsiAuthPhaseDone; + client->debugStatus = iscsiAuthDebugStatusUnexpectedKeyPresent; + authStatus = iscsiAuthStatusFail; + } + } + } + + if (authStatus != iscsiAuthStatusPass && + authStatus != iscsiAuthStatusContinue && + authStatus != iscsiAuthStatusInProgress) { + int authMethodKeyPresent = FALSE; + int chapAlgorithmKeyPresent = FALSE; + + /* Suppress send keys on error, except for AuthMethod and CHAP_A. */ + + if (client->nodeType == iscsiAuthNodeTypeTarget) { + + if (iscsiAuthClientGetKeyValue( + &client->sendKeyBlock, + iscsiAuthKeyTypeAuthMethod)) { + + authMethodKeyPresent = TRUE; + + } else if (iscsiAuthClientGetKeyValue( + &client->sendKeyBlock, + iscsiAuthKeyTypeChapAlgorithm)) { + + chapAlgorithmKeyPresent = TRUE; + } + } + + iscsiAuthClientInitKeyBlock(&client->sendKeyBlock); + + if (client->nodeType == iscsiAuthNodeTypeTarget) { + + if (authMethodKeyPresent && + client->negotiatedAuthMethod == iscsiAuthOptionReject) { + + iscsiAuthClientSetKeyValue( + &client->sendKeyBlock, + iscsiAuthKeyTypeAuthMethod, + client->rejectOptionName); + + } else if (chapAlgorithmKeyPresent && + client->negotiatedChapAlgorithm == iscsiAuthOptionReject) { + + iscsiAuthClientSetKeyValue( + &client->sendKeyBlock, + iscsiAuthKeyTypeChapAlgorithm, + client->rejectOptionName); + } + } + } + + return authStatus; +} + + +int +iscsiAuthClientRecvBegin(IscsiAuthClient *client) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase == iscsiAuthPhaseError) { + return iscsiAuthStatusError; + } + + if (client->phase == iscsiAuthPhaseDone) { + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + if (client->recvInProgressFlag) { + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + client->recvInProgressFlag = TRUE; + + if (client->phase == iscsiAuthPhaseConfigure) { + iscsiAuthClientNextPhase(client); + } + + client->transitBitSentFlag = client->sendKeyBlock.transitBit; + + iscsiAuthClientInitKeyBlock(&client->recvKeyBlock); + iscsiAuthClientInitKeyBlock(&client->sendKeyBlock); + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientRecvEnd( + IscsiAuthClient *client, + IscsiAuthClientCallback *callback, + void *userHandle, + void *messageHandle) + +{ + int nextPhaseFlag = FALSE; + + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase == iscsiAuthPhaseError) { + return iscsiAuthStatusError; + } + + if (!callback || !client->recvInProgressFlag) { + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + if (client->recvEndCount > iscsiAuthRecvEndMaxCount) { + + client->remoteAuthStatus = iscsiAuthStatusFail; + client->phase = iscsiAuthPhaseDone; + client->debugStatus = iscsiAuthDebugStatusRecvMessageCountLimit; + + } else if (client->recvKeyBlock.duplicateSet) { + + client->remoteAuthStatus = iscsiAuthStatusFail; + client->phase = iscsiAuthPhaseDone; + client->debugStatus = iscsiAuthDebugStatusRecvDuplicateSetKeyValue; + + } else if (client->recvKeyBlock.stringTooLong) { + + client->remoteAuthStatus = iscsiAuthStatusFail; + client->phase = iscsiAuthPhaseDone; + client->debugStatus = iscsiAuthDebugStatusRecvStringTooLong; + + } else if (client->recvKeyBlock.tooMuchData) { + + client->remoteAuthStatus = iscsiAuthStatusFail; + client->phase = iscsiAuthPhaseDone; + client->debugStatus = iscsiAuthDebugStatusRecvTooMuchData; + } + + client->recvEndCount++; + + client->callback = callback; + client->userHandle = userHandle; + client->messageHandle = messageHandle; + + switch (client->phase) { + + case iscsiAuthPhaseNegotiate: + iscsiAuthClientCheckAuthMethodKey(client); + + if (client->authMethodValidNegRole == iscsiAuthNegRoleResponder) { + + if (client->negotiatedAuthMethod == iscsiAuthOptionNotPresent) { + + if (client->authRemote || !client->recvKeyBlock.transitBit) { + + /* + * No AuthMethod key from peer on first message, + * try moving the process along by sending the + * AuthMethod key. + */ + + client->authMethodValidNegRole = + iscsiAuthNegRoleOriginator; + + iscsiAuthClientSetAuthMethodKey( + client, + client->authMethodValidCount, + client->authMethodValidList); + break; + } + + /* + * Special case if peer sent no AuthMethod key, + * but did set Transit Bit, allowing this side + * to do a null authentication, and compelete + * the iSCSI security phase without either side + * sending the AuthMethod key. + */ + + } else { + + /* Send response to AuthMethod key. */ + + iscsiAuthClientSetAuthMethodKey( + client, 1, &client->negotiatedAuthMethod); + } + + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + iscsiAuthClientNextPhase(client); + } else { + nextPhaseFlag = TRUE; + } + + } else { + + if (client->negotiatedAuthMethod == iscsiAuthOptionNotPresent) { + client->remoteAuthStatus = iscsiAuthStatusFail; + client->phase = iscsiAuthPhaseDone; + client->debugStatus = iscsiAuthDebugStatusAuthMethodExpected; + break; + } + + iscsiAuthClientNextPhase(client); + } + break; + + case iscsiAuthPhaseAuthenticate: + case iscsiAuthPhaseDone: + break; + + default: + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + switch (client->phase) { + + case iscsiAuthPhaseNegotiate: + if (nextPhaseFlag) { + iscsiAuthClientNextPhase(client); + } + break; + + case iscsiAuthPhaseAuthenticate: + /* + * Must call iscsiAuthClientLocalAuthentication() + * before iscsiAuthClientRemoteAuthentication() + * to insure processing of the CHAP algorithm key, + * and to avoid leaving an in progress request to the + * authentication service. + */ + iscsiAuthClientLocalAuthentication(client); + + if (client->localState != iscsiAuthLocalStateError) { + iscsiAuthClientRemoteAuthentication(client); + } + + if (client->localState == iscsiAuthLocalStateError || + client->remoteState == iscsiAuthRemoteStateError) { + + client->remoteAuthStatus = iscsiAuthStatusFail; + client->phase = iscsiAuthPhaseDone; + /* client->debugStatus should already be set. */ + } + break; + + case iscsiAuthPhaseDone: + break; + + default: + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + iscsiAuthClientHandshake(client); + + return iscsiAuthClientRecvEndStatus(client); +} + + +void +iscsiAuthClientAuthResponse(IscsiAuthClient *client, int authStatus) + +{ + iscsiAuthClientGlobalStats.responseReceived++; + + if (!client || client->signature != iscsiAuthClientSignature) { + return; + } + + if (!client->recvInProgressFlag || + client->phase != iscsiAuthPhaseAuthenticate || + client->remoteState != iscsiAuthRemoteStateAuthRequest) { + + client->phase = iscsiAuthPhaseError; + return; + } + + client->remoteAuthStatus = (IscsiAuthStatus)authStatus; + client->authResponseFlag = TRUE; + + iscsiAuthClientRemoteAuthentication(client); + + iscsiAuthClientHandshake(client); + + authStatus = iscsiAuthClientRecvEndStatus(client); + + client->callback( + client->userHandle, client->messageHandle, authStatus); +} + + +const char * +iscsiAuthClientGetKeyName(int keyType) + +{ + if (keyType < iscsiAuthKeyTypeFirst || + keyType > iscsiAuthKeyTypeLast) { + + return 0; + } + + return iscsiAuthClientKeyInfo[keyType].name; +} + + +int +iscsiAuthClientGetNextKeyType(int *pKeyType) + +{ + int keyType = *pKeyType; + + if (keyType >= iscsiAuthKeyTypeLast) { + return iscsiAuthStatusError; + } + + if (keyType < iscsiAuthKeyTypeFirst) { + keyType = iscsiAuthKeyTypeFirst; + } else { + keyType++; + } + + *pKeyType = keyType; + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientKeyNameToKeyType(const char *keyName) + +{ + int keyType = iscsiAuthKeyTypeNone; + + while (iscsiAuthClientGetNextKeyType(&keyType) == iscsiAuthStatusNoError) { + const char *keyName2 = iscsiAuthClientGetKeyName(keyType); + + if (!keyName2) { + return iscsiAuthKeyTypeNone; + } + + if (strcmp(keyName, keyName2) == 0) { + return keyType; + } + } + + return iscsiAuthKeyTypeNone; +} + + +int +iscsiAuthClientRecvKeyValue( + IscsiAuthClient *client, int keyType, const char *userKeyValue) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseNegotiate && + client->phase != iscsiAuthPhaseAuthenticate) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + if (keyType < iscsiAuthKeyTypeFirst || + keyType > iscsiAuthKeyTypeLast) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + if (keyType == iscsiAuthKeyTypeChapChallenge) { + client->recvChapChallenge.length = + iscsiAuthLargeBinaryMaxLength; + client->recvChapChallengeStatus = + iscsiAuthClientTextToData( + userKeyValue, + client->recvChapChallenge.largeBinary, + &client->recvChapChallenge.length); + userKeyValue = ""; + } + + iscsiAuthClientSetKeyValue(&client->recvKeyBlock, keyType, userKeyValue); + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientSendKeyValue( + IscsiAuthClient *client, int keyType, int *keyPresent, + char *userKeyValue, unsigned int maxLength) + +{ + const char *keyValue; + + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure && + client->phase != iscsiAuthPhaseNegotiate && + client->phase != iscsiAuthPhaseAuthenticate && + client->phase != iscsiAuthPhaseDone) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + if (keyType < iscsiAuthKeyTypeFirst || + keyType > iscsiAuthKeyTypeLast) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + keyValue = iscsiAuthClientGetKeyValue(&client->sendKeyBlock, keyType); + if (keyValue) { + if (keyType == iscsiAuthKeyTypeChapChallenge) { + if (iscsiAuthClientDataToText( + client->base64, + client->sendChapChallenge.largeBinary, + client->sendChapChallenge.length, + userKeyValue, maxLength)) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + } else { + if (iscsiAuthClientStringCopy(userKeyValue, keyValue, maxLength)) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + } + *keyPresent = TRUE; + } else { + *keyPresent = FALSE; + } + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientRecvTransitBit( + IscsiAuthClient *client, int value) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseNegotiate && + client->phase != iscsiAuthPhaseAuthenticate) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + if (value) { + client->recvKeyBlock.transitBit = TRUE; + } else { + client->recvKeyBlock.transitBit = FALSE; + } + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientSendTransitBit( + IscsiAuthClient *client, int *value) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure && + client->phase != iscsiAuthPhaseNegotiate && + client->phase != iscsiAuthPhaseAuthenticate && + client->phase != iscsiAuthPhaseDone) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + *value = client->sendKeyBlock.transitBit; + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientInit( + int nodeType, + int bufferDescCount, + IscsiAuthBufferDesc *bufferDesc) + +{ + IscsiAuthClient *client; + IscsiAuthStringBlock *recvStringBlock; + IscsiAuthStringBlock *sendStringBlock; + IscsiAuthLargeBinary *recvChapChallenge; + IscsiAuthLargeBinary *sendChapChallenge; + int valueList[2]; + + if (bufferDescCount != 5 || !bufferDesc) { + return iscsiAuthStatusError; + } + + if (!bufferDesc[0].address || + bufferDesc[0].length != sizeof(*client)) { + return iscsiAuthStatusError; + } + client = (IscsiAuthClient *)bufferDesc[0].address; + + if (!bufferDesc[1].address || + bufferDesc[1].length != sizeof(*recvStringBlock)) { + return iscsiAuthStatusError; + } + recvStringBlock = (IscsiAuthStringBlock *)bufferDesc[1].address; + + if (!bufferDesc[2].address || + bufferDesc[2].length != sizeof(*sendStringBlock)) { + return iscsiAuthStatusError; + } + sendStringBlock = (IscsiAuthStringBlock *)bufferDesc[2].address; + + if (!bufferDesc[3].address || + bufferDesc[3].length != sizeof(*recvChapChallenge)) { + return iscsiAuthStatusError; + } + recvChapChallenge = (IscsiAuthLargeBinary *)bufferDesc[3].address; + + if (!bufferDesc[4].address || + bufferDesc[4].length != sizeof(*sendChapChallenge)) { + return iscsiAuthStatusError; + } + sendChapChallenge = (IscsiAuthLargeBinary *)bufferDesc[4].address; + + memset(client, 0, sizeof(*client)); + memset(recvStringBlock, 0, sizeof(*recvStringBlock)); + memset(sendStringBlock, 0, sizeof(*sendStringBlock)); + memset(recvChapChallenge, 0, sizeof(*recvChapChallenge)); + memset(sendChapChallenge, 0, sizeof(*sendChapChallenge)); + + client->recvKeyBlock.stringBlock = recvStringBlock->stringBlock; + client->sendKeyBlock.stringBlock = sendStringBlock->stringBlock; + client->recvChapChallenge.largeBinary = recvChapChallenge->largeBinary; + client->sendChapChallenge.largeBinary = sendChapChallenge->largeBinary; + + if (iscsiAuthClientCheckNodeType(nodeType)) { + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + client->signature = iscsiAuthClientSignature; + client->nodeType = (IscsiAuthNodeType)nodeType; + client->authRemote = TRUE; + client->passwordPresent = FALSE; + client->version = iscsiAuthVersionRfc; + client->chapChallengeLength = iscsiAuthChapResponseLength; + client->ipSec = TRUE; + client->base64 = FALSE; + + client->phase = iscsiAuthPhaseConfigure; + client->negotiatedAuthMethod = iscsiAuthOptionNotPresent; + client->negotiatedChapAlgorithm = iscsiAuthOptionNotPresent; + + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + client->authMethodNegRole = iscsiAuthNegRoleOriginator; + } else { + /* Initial value ignored for Target. */ + client->authMethodNegRole = iscsiAuthNegRoleResponder; + } + + valueList[0] = iscsiAuthMethodChap; + valueList[1] = iscsiAuthOptionNone; + + /* + * Must call after setting authRemote, password, + * version and authMethodNegRole + */ + if (iscsiAuthClientSetAuthMethodList(client, 2, valueList) != + iscsiAuthStatusNoError) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + valueList[0] = iscsiAuthChapAlgorithmMd5; + + if (iscsiAuthClientSetChapAlgorithmList(client, 1, valueList) != + iscsiAuthStatusNoError) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientFinish(IscsiAuthClient *client) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + iscsiAuthClientChapAuthCancel(client); + + memset(client, 0, sizeof(*client)); + + return iscsiAuthStatusNoError; +} + + +static int +iscsiAuthClientSetOptionList( + IscsiAuthClient *client, + unsigned int optionCount, + const int *optionList, + unsigned int *clientOptionCount, + int *clientOptionList, + unsigned int optionMaxCount, + int (*checkOption)(int), + int (*checkList)(unsigned int optionCount, const int *optionList)) + +{ + unsigned int i; + unsigned int j; + + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure || + optionCount > optionMaxCount) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + for (i = 0; i < optionCount; i++) { + if ((*checkOption)(optionList[i])) { + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + } + + /* Check for duplicate entries. */ + for (i = 0; i < optionCount; i++) { + for (j = 0; j < optionCount; j++) { + if (j == i) continue; + if (optionList[i] == optionList[j]) { + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + } + } + + /* Check for key specific constraints. */ + if (checkList) { + if ((*checkList)(optionCount, optionList)) { + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + } + + for (i = 0; i < optionCount; i++) { + clientOptionList[i] = optionList[i]; + } + + *clientOptionCount = optionCount; + + return iscsiAuthStatusNoError; +} + + +static void +iscsiAuthClientSetAuthMethodValid(IscsiAuthClient *client) + +{ + static const char rejectOptionNameDraft8[] = "reject"; + static const char rejectOptionNameRfc[] = "Reject"; + static const char noneOptionNameDraft8[] = "none"; + static const char noneOptionNameRfc[] = "None"; + unsigned int i; + unsigned int j = 0; + int option = 0; + + if (client->version == iscsiAuthVersionDraft8) { + client->rejectOptionName = rejectOptionNameDraft8; + client->noneOptionName = noneOptionNameDraft8; + } else { + client->rejectOptionName = rejectOptionNameRfc; + client->noneOptionName = noneOptionNameRfc; + } + + /* + * Following checks may need to be revised if + * authentication options other than CHAP and none + * are supported. + */ + + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + + if (client->authRemote) { + /* + * If initiator doing authentication, + * don't offer authentication option none. + */ + option = 1; + } else if (!client->passwordPresent) { + /* + * If initiator password not set, + * only offer authentication option none. + */ + option = 2; + } + } + + if (client->nodeType == iscsiAuthNodeTypeTarget) { + + if (client->authRemote) { + /* + * If target doing authentication, + * don't accept authentication option none. + */ + option = 1; + } else { + /* + * If target not doing authentication, + * only accept authentication option none. + */ + option = 2; + } + } + + for (i = 0; i < client->authMethodCount; i++) { + + if (option == 1) { + if (client->authMethodList[i] == iscsiAuthOptionNone) { + continue; + } + } else if (option == 2) { + if (client->authMethodList[i] != iscsiAuthOptionNone) { + continue; + } + } + + client->authMethodValidList[j++] = client->authMethodList[i]; + } + + client->authMethodValidCount = j; + + iscsiAuthClientInitKeyBlock(&client->sendKeyBlock); + + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + if (client->authRemote) { + /* + * Initiator wants to authenticate target, + * always send AuthMethod key. + */ + client->sendKeyBlock.transitBit = FALSE; + client->authMethodValidNegRole = iscsiAuthNegRoleOriginator; + } else { + client->sendKeyBlock.transitBit = TRUE; + client->authMethodValidNegRole = client->authMethodNegRole; + } + } else { + client->sendKeyBlock.transitBit = FALSE; + client->authMethodValidNegRole = iscsiAuthNegRoleResponder; + } + + if (client->authMethodValidNegRole == iscsiAuthNegRoleOriginator) { + iscsiAuthClientSetAuthMethodKey( + client, client->authMethodValidCount, client->authMethodValidList); + } else { + int value = iscsiAuthOptionNotPresent; + iscsiAuthClientSetAuthMethodKey(client, 1, &value); + } +} + + +static int +iscsiAuthClientCheckAuthMethodList( + unsigned int optionCount, + const int *optionList) + +{ + unsigned int i; + + if (!optionList || optionCount < 2) { + return TRUE; + } + + if (optionList[optionCount - 1] != iscsiAuthOptionNone) { + return TRUE; + } + + for (i = 0; i < (optionCount - 1); i++) { + if (optionList[i] != iscsiAuthOptionNone) { + return FALSE; + } + } + + return FALSE; +} + + +int +iscsiAuthClientSetAuthMethodList( + IscsiAuthClient *client, + unsigned int optionCount, + const int *optionList) + +{ + int status; + + status = iscsiAuthClientSetOptionList( + client, + optionCount, + optionList, + &client->authMethodCount, + client->authMethodList, + iscsiAuthMethodMaxCount, + iscsiAuthClientCheckAuthMethodOption, + iscsiAuthClientCheckAuthMethodList); + + if (status != iscsiAuthStatusNoError) { + return status; + } + + /* Setting authMethod affects authMethodValid. */ + iscsiAuthClientSetAuthMethodValid(client); + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientSetAuthMethodNegRole(IscsiAuthClient *client, int negRole) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure || + iscsiAuthClientCheckNegRole(negRole) || + client->nodeType != iscsiAuthNodeTypeInitiator) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + client->authMethodNegRole = (IscsiAuthNegRole)negRole; + + /* Setting negRole affects authMethodValid. */ + iscsiAuthClientSetAuthMethodValid(client); + + return iscsiAuthStatusNoError; +} + + +static int +iscsiAuthClientCheckChapAlgorithmList( + unsigned int optionCount, + const int *optionList) + +{ + if (!optionList || optionCount < 1) { + return TRUE; + } + + return FALSE; +} + + +int +iscsiAuthClientSetChapAlgorithmList( + IscsiAuthClient *client, + unsigned int optionCount, + const int *optionList) + +{ + return iscsiAuthClientSetOptionList( + client, + optionCount, + optionList, + &client->chapAlgorithmCount, + client->chapAlgorithmList, + iscsiAuthChapAlgorithmMaxCount, + iscsiAuthClientCheckChapAlgorithmOption, + iscsiAuthClientCheckChapAlgorithmList); +} + + +int +iscsiAuthClientSetUsername(IscsiAuthClient *client, const char *username) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure || + iscsiAuthClientCheckString(username, iscsiAuthStringMaxLength, 0)) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + if (iscsiAuthClientStringCopy( + client->username, username, iscsiAuthStringMaxLength)) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientSetPassword( + IscsiAuthClient *client, + const unsigned char *passwordData, + unsigned int passwordLength) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure || + passwordLength > iscsiAuthStringMaxLength) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + memcpy(client->passwordData, passwordData, passwordLength); + client->passwordLength = passwordLength; + client->passwordPresent = TRUE; + + /* Setting password may affect authMethodValid. */ + iscsiAuthClientSetAuthMethodValid(client); + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientSetAuthRemote(IscsiAuthClient *client, int authRemote) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure) { + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + client->authRemote = authRemote; + + /* Setting authRemote may affect authMethodValid. */ + iscsiAuthClientSetAuthMethodValid(client); + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientSetGlueHandle(IscsiAuthClient *client, void *glueHandle) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure && + client->phase != iscsiAuthPhaseNegotiate && + client->phase != iscsiAuthPhaseAuthenticate) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + client->glueHandle = glueHandle; + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientSetMethodListName( + IscsiAuthClient *client, const char *methodListName) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure || + iscsiAuthClientCheckString( + methodListName, iscsiAuthStringMaxLength, 0)) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + if (iscsiAuthClientStringCopy( + client->methodListName, methodListName, iscsiAuthStringMaxLength)) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientSetVersion(IscsiAuthClient *client, int version) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure || + iscsiAuthClientCheckVersion(version)) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + client->version = (IscsiAuthVersion)version; + + iscsiAuthClientSetAuthMethodValid(client); + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientSetIpSec(IscsiAuthClient *client, int ipSec) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure) { + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + client->ipSec = ipSec; + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientSetBase64(IscsiAuthClient *client, int base64) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure) { + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + client->base64 = base64; + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientSetChapChallengeLength( + IscsiAuthClient *client, unsigned int chapChallengeLength) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure || + chapChallengeLength < iscsiAuthChapResponseLength || + chapChallengeLength > iscsiAuthLargeBinaryMaxLength) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + client->chapChallengeLength = chapChallengeLength; + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientCheckPasswordNeeded( + IscsiAuthClient *client, int *passwordNeeded) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure) { + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + if (client->authRemote && !client->passwordPresent) { + *passwordNeeded = TRUE; + } else { + *passwordNeeded = FALSE; + } + } else { + *passwordNeeded = FALSE; + } + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientGetAuthPhase(IscsiAuthClient *client, int *value) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + *value = client->phase; + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientGetAuthStatus(IscsiAuthClient *client, int *value) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseDone) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + *value = client->remoteAuthStatus; + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientAuthStatusPass(int authStatus) + +{ + if (authStatus == iscsiAuthStatusPass) { + return TRUE; + } + + return FALSE; +} + + +int +iscsiAuthClientGetAuthMethod(IscsiAuthClient *client, int *value) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseDone && + client->phase != iscsiAuthPhaseAuthenticate) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + *value = client->negotiatedAuthMethod; + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientGetChapAlgorithm(IscsiAuthClient *client, int *value) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseDone) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + *value = client->negotiatedChapAlgorithm; + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientGetChapUsername( + IscsiAuthClient *client, char *value, unsigned int maxLength) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseDone) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + if (iscsiAuthClientStringCopy(value, client->chapUsername, maxLength)) { + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientSendStatusCode(IscsiAuthClient *client, int *statusCode) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseConfigure && + client->phase != iscsiAuthPhaseNegotiate && + client->phase != iscsiAuthPhaseAuthenticate && + client->phase != iscsiAuthPhaseDone) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseDone) { + *statusCode = 0x0000; + return iscsiAuthStatusNoError; + } + + switch (client->remoteAuthStatus) { + + case iscsiAuthStatusPass: + *statusCode = 0x0000; /* no error */ + break; + + case iscsiAuthStatusFail: + switch (client->debugStatus) { + + case iscsiAuthDebugStatusAuthFail: + /* Authentication error with peer. */ + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + *statusCode = 0x0300; /* iSCSI Target error */ + } else { + *statusCode = 0x0201; /* iSCSI Initiator error */ + } + break; + + case iscsiAuthDebugStatusAuthMethodExpected: + case iscsiAuthDebugStatusChapAlgorithmExpected: + case iscsiAuthDebugStatusChapIdentifierExpected: + case iscsiAuthDebugStatusChapChallengeExpected: + case iscsiAuthDebugStatusChapResponseExpected: + case iscsiAuthDebugStatusChapUsernameExpected: + /* Missing parameter with peer. */ + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + *statusCode = 0x0300; /* iSCSI Target error */ + } else { + *statusCode = 0x0207; /* iSCSI Initiator error */ + } + break; + + case iscsiAuthDebugStatusAuthMethodNotPresent: + case iscsiAuthDebugStatusAuthMethodReject: + case iscsiAuthDebugStatusAuthMethodNone: + case iscsiAuthDebugStatusChapAlgorithmReject: + case iscsiAuthDebugStatusChapChallengeReflected: + case iscsiAuthDebugStatusPasswordIdentical: + /* Could not authenticate with peer. */ + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + *statusCode = 0x0300; /* iSCSI Target error */ + } else { + *statusCode = 0x0201; /* iSCSI Initiator error */ + } + break; + + case iscsiAuthDebugStatusLocalPasswordNotSet: + /* Local password not set. */ + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + *statusCode = 0x0200; /* iSCSI Initiator error */ + } else { + *statusCode = 0x0201; /* iSCSI Target error */ + } + break; + + case iscsiAuthDebugStatusChapIdentifierBad: + case iscsiAuthDebugStatusChapChallengeBad: + case iscsiAuthDebugStatusChapResponseBad: + case iscsiAuthDebugStatusUnexpectedKeyPresent: + case iscsiAuthDebugStatusTbitSetIllegal: + case iscsiAuthDebugStatusTbitSetPremature: + case iscsiAuthDebugStatusRecvMessageCountLimit: + case iscsiAuthDebugStatusRecvDuplicateSetKeyValue: + case iscsiAuthDebugStatusRecvStringTooLong: + case iscsiAuthDebugStatusRecvTooMuchData: + /* Other error with peer. */ + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + *statusCode = 0x0300; /* iSCSI Target error */ + } else { + *statusCode = 0x0200; /* iSCSI Initiator error */ + } + break; + + case iscsiAuthDebugStatusNotSet: + case iscsiAuthDebugStatusAuthPass: + case iscsiAuthDebugStatusAuthRemoteFalse: + case iscsiAuthDebugStatusAuthMethodBad: + case iscsiAuthDebugStatusChapAlgorithmBad: + case iscsiAuthDebugStatusPasswordDecryptFailed: + case iscsiAuthDebugStatusPasswordTooShortWithNoIpSec: + case iscsiAuthDebugStatusAuthServerError: + case iscsiAuthDebugStatusAuthStatusBad: + case iscsiAuthDebugStatusAuthPassNotValid: + case iscsiAuthDebugStatusSendDuplicateSetKeyValue: + case iscsiAuthDebugStatusSendStringTooLong: + case iscsiAuthDebugStatusSendTooMuchData: + default: + /* Error on this side. */ + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + *statusCode = 0x0200; /* iSCSI Initiator error */ + } else { + *statusCode = 0x0300; /* iSCSI Target error */ + } + + } + break; + + case iscsiAuthStatusNoError: + case iscsiAuthStatusError: + case iscsiAuthStatusContinue: + case iscsiAuthStatusInProgress: + default: + /* Bad authStatus */ + if (client->nodeType == iscsiAuthNodeTypeInitiator) { + *statusCode = 0x0200; /* iSCSI Initiator error */ + } else { + *statusCode = 0x0300; /* iSCSI Target error */ + } + } + + return iscsiAuthStatusNoError; +} + + +int +iscsiAuthClientGetDebugStatus(IscsiAuthClient *client, int *value) + +{ + if (!client || client->signature != iscsiAuthClientSignature) { + return iscsiAuthStatusError; + } + + if (client->phase != iscsiAuthPhaseDone) { + + client->phase = iscsiAuthPhaseError; + return iscsiAuthStatusError; + } + + *value = client->debugStatus; + + return iscsiAuthStatusNoError; +} + + +const char * +iscsiAuthClientDebugStatusToText(int debugStatus) + +{ + const char *s; + + switch (debugStatus) { + + case iscsiAuthDebugStatusNotSet: + s = "Debug status not set"; + break; + + case iscsiAuthDebugStatusAuthPass: + s = "Authentication request passed"; + break; + + case iscsiAuthDebugStatusAuthRemoteFalse: + s = "Authentication not enabled"; + break; + + case iscsiAuthDebugStatusAuthFail: + s = "Authentication request failed"; + break; + + case iscsiAuthDebugStatusAuthMethodBad: + s = "AuthMethod bad"; + break; + + case iscsiAuthDebugStatusChapAlgorithmBad: + s = "CHAP algorithm bad"; + break; + + case iscsiAuthDebugStatusPasswordDecryptFailed: + s = "Decrypt password failed"; + break; + + case iscsiAuthDebugStatusPasswordTooShortWithNoIpSec: + s = "Local password too short with no IPSec"; + break; + + case iscsiAuthDebugStatusAuthServerError: + s = "Unexpected error from authentication server"; + break; + + case iscsiAuthDebugStatusAuthStatusBad: + s = "Authentication request status bad"; + break; + + case iscsiAuthDebugStatusAuthPassNotValid: + s = "Authentication pass status not valid"; + break; + + case iscsiAuthDebugStatusSendDuplicateSetKeyValue: + s = "Same key set more than once on send"; + break; + + case iscsiAuthDebugStatusSendStringTooLong: + s = "Key value too long on send"; + break; + + case iscsiAuthDebugStatusSendTooMuchData: + s = "Too much data on send"; + break; + + case iscsiAuthDebugStatusAuthMethodExpected: + s = "AuthMethod key expected"; + break; + + case iscsiAuthDebugStatusChapAlgorithmExpected: + s = "CHAP algorithm key expected"; + break; + + case iscsiAuthDebugStatusChapIdentifierExpected: + s = "CHAP identifier expected"; + break; + + case iscsiAuthDebugStatusChapChallengeExpected: + s = "CHAP challenge expected"; + break; + + case iscsiAuthDebugStatusChapResponseExpected: + s = "CHAP response expected"; + break; + + case iscsiAuthDebugStatusChapUsernameExpected: + s = "CHAP username expected"; + break; + + case iscsiAuthDebugStatusAuthMethodNotPresent: + s = "AuthMethod key not present"; + break; + + case iscsiAuthDebugStatusAuthMethodReject: + s = "AuthMethod negotiation failed"; + break; + + case iscsiAuthDebugStatusAuthMethodNone: + s = "AuthMethod negotiated to none"; + break; + + case iscsiAuthDebugStatusChapAlgorithmReject: + s = "CHAP algorithm negotiation failed"; + break; + + case iscsiAuthDebugStatusChapChallengeReflected: + s = "CHAP challange reflected"; + break; + + case iscsiAuthDebugStatusPasswordIdentical: + s = "Local password same as remote"; + break; + + case iscsiAuthDebugStatusLocalPasswordNotSet: + s = "Local password not set"; + break; + + case iscsiAuthDebugStatusChapIdentifierBad: + s = "CHAP identifier bad"; + break; + + case iscsiAuthDebugStatusChapChallengeBad: + s = "CHAP challenge bad"; + break; + + case iscsiAuthDebugStatusChapResponseBad: + s = "CHAP response bad"; + break; + + case iscsiAuthDebugStatusUnexpectedKeyPresent: + s = "Unexpected key present"; + break; + + case iscsiAuthDebugStatusTbitSetIllegal: + s = "T bit set on response, but not on previous message"; + break; + + case iscsiAuthDebugStatusTbitSetPremature: + s = "T bit set on response, but authenticaton not complete"; + break; + + case iscsiAuthDebugStatusRecvMessageCountLimit: + s = "Message count limit reached on receive"; + break; + + case iscsiAuthDebugStatusRecvDuplicateSetKeyValue: + s = "Same key set more than once on receive"; + break; + + case iscsiAuthDebugStatusRecvStringTooLong: + s = "Key value too long on receive"; + break; + + case iscsiAuthDebugStatusRecvTooMuchData: + s = "Too much data on receive"; + break; + + default: + s = "Unknown error"; + } + + return s; +} diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClient.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClient.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClient.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClient.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,394 @@ +#ifndef ISCSIAUTHCLIENT_H +#define ISCSIAUTHCLIENT_H + +/* + * iSCSI connection daemon + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsiAuthClient.h,v 1.13 2003/02/14 21:54:53 smferris Exp $ + */ + +/* + * This file is the include file for for iscsiAuthClient.c + */ + + +#ifdef __cplusplus +extern "C" { +#endif + + +enum {iscsiAuthStringMaxLength = 256}; +enum {iscsiAuthStringBlockMaxLength = 1024}; +enum {iscsiAuthLargeBinaryMaxLength = 1024}; + +enum {iscsiAuthRecvEndMaxCount = 10}; + +enum {iscsiAuthClientSignature = 0x5984B2E3}; + +enum {iscsiAuthChapResponseLength = 16}; + +/* + * Note: The ordering of these values are chosen to match + * the ordering of the keys as shown in the iSCSI spec. + * The table IscsiAuthClientKeyInfo in iscsiAuthClient.c + * must also match this order. + */ +enum iscsiAuthKeyType_t { + iscsiAuthKeyTypeNone = -1, + iscsiAuthKeyTypeFirst = 0, + iscsiAuthKeyTypeAuthMethod = iscsiAuthKeyTypeFirst, + iscsiAuthKeyTypeChapAlgorithm, + iscsiAuthKeyTypeChapUsername, + iscsiAuthKeyTypeChapResponse, + iscsiAuthKeyTypeChapIdentifier, + iscsiAuthKeyTypeChapChallenge, + iscsiAuthKeyTypeMaxCount, + iscsiAuthKeyTypeLast = iscsiAuthKeyTypeMaxCount - 1 +}; +typedef enum iscsiAuthKeyType_t IscsiAuthKeyType; + +enum { + /* Common options for all keys. */ + iscsiAuthOptionReject = -2, + iscsiAuthOptionNotPresent = -1, + iscsiAuthOptionNone = 1, + + iscsiAuthMethodChap = 2, + iscsiAuthMethodMaxCount = 2, + + iscsiAuthChapAlgorithmMd5 = 5, + iscsiAuthChapAlgorithmMaxCount = 2 +}; + +enum iscsiAuthNegRole_t { + iscsiAuthNegRoleOriginator = 1, + iscsiAuthNegRoleResponder = 2 +}; +typedef enum iscsiAuthNegRole_t IscsiAuthNegRole; + +/* + * Note: These values are chosen to map to the values sent + * in the iSCSI header. + */ +enum iscsiAuthVersion_t { + iscsiAuthVersionDraft8 = 2, + iscsiAuthVersionRfc = 0 +}; +typedef enum iscsiAuthVersion_t IscsiAuthVersion; + +enum iscsiAuthStatus_t { + iscsiAuthStatusNoError = 0, + iscsiAuthStatusError, + iscsiAuthStatusPass, + iscsiAuthStatusFail, + iscsiAuthStatusContinue, + iscsiAuthStatusInProgress +}; +typedef enum iscsiAuthStatus_t IscsiAuthStatus; + +enum iscsiAuthDebugStatus_t { + iscsiAuthDebugStatusNotSet = 0, + + iscsiAuthDebugStatusAuthPass, + iscsiAuthDebugStatusAuthRemoteFalse, + + iscsiAuthDebugStatusAuthFail, + + iscsiAuthDebugStatusAuthMethodBad, + iscsiAuthDebugStatusChapAlgorithmBad, + iscsiAuthDebugStatusPasswordDecryptFailed, + iscsiAuthDebugStatusPasswordTooShortWithNoIpSec, + iscsiAuthDebugStatusAuthServerError, + iscsiAuthDebugStatusAuthStatusBad, + iscsiAuthDebugStatusAuthPassNotValid, + iscsiAuthDebugStatusSendDuplicateSetKeyValue, + iscsiAuthDebugStatusSendStringTooLong, + iscsiAuthDebugStatusSendTooMuchData, + + iscsiAuthDebugStatusAuthMethodExpected, + iscsiAuthDebugStatusChapAlgorithmExpected, + iscsiAuthDebugStatusChapIdentifierExpected, + iscsiAuthDebugStatusChapChallengeExpected, + iscsiAuthDebugStatusChapResponseExpected, + iscsiAuthDebugStatusChapUsernameExpected, + + iscsiAuthDebugStatusAuthMethodNotPresent, + iscsiAuthDebugStatusAuthMethodReject, + iscsiAuthDebugStatusAuthMethodNone, + iscsiAuthDebugStatusChapAlgorithmReject, + iscsiAuthDebugStatusChapChallengeReflected, + iscsiAuthDebugStatusPasswordIdentical, + + iscsiAuthDebugStatusLocalPasswordNotSet, + + iscsiAuthDebugStatusChapIdentifierBad, + iscsiAuthDebugStatusChapChallengeBad, + iscsiAuthDebugStatusChapResponseBad, + iscsiAuthDebugStatusUnexpectedKeyPresent, + iscsiAuthDebugStatusTbitSetIllegal, + iscsiAuthDebugStatusTbitSetPremature, + + iscsiAuthDebugStatusRecvMessageCountLimit, + iscsiAuthDebugStatusRecvDuplicateSetKeyValue, + iscsiAuthDebugStatusRecvStringTooLong, + iscsiAuthDebugStatusRecvTooMuchData +}; +typedef enum iscsiAuthDebugStatus_t IscsiAuthDebugStatus; + +enum iscsiAuthNodeType_t { + iscsiAuthNodeTypeInitiator = 1, + iscsiAuthNodeTypeTarget = 2 +}; +typedef enum iscsiAuthNodeType_t IscsiAuthNodeType; + +enum iscsiAuthPhase_t { + iscsiAuthPhaseConfigure = 1, + iscsiAuthPhaseNegotiate, + iscsiAuthPhaseAuthenticate, + iscsiAuthPhaseDone, + iscsiAuthPhaseError +}; +typedef enum iscsiAuthPhase_t IscsiAuthPhase; + +enum iscsiAuthLocalState_t { + iscsiAuthLocalStateSendAlgorithm = 1, + iscsiAuthLocalStateRecvAlgorithm, + iscsiAuthLocalStateRecvChallenge, + iscsiAuthLocalStateDone, + iscsiAuthLocalStateError +}; +typedef enum iscsiAuthLocalState_t IscsiAuthLocalState; + +enum iscsiAuthRemoteState_t { + iscsiAuthRemoteStateSendAlgorithm = 1, + iscsiAuthRemoteStateSendChallenge, + iscsiAuthRemoteStateRecvResponse, + iscsiAuthRemoteStateAuthRequest, + iscsiAuthRemoteStateDone, + iscsiAuthRemoteStateError +}; +typedef enum iscsiAuthRemoteState_t IscsiAuthRemoteState; + + +typedef void IscsiAuthClientCallback(void *, void *, int); + + +struct iscsiAuthClientGlobalStats_t { + unsigned long requestSent; + unsigned long responseReceived; +}; +typedef struct iscsiAuthClientGlobalStats_t IscsiAuthClientGlobalStats; + +struct iscsiAuthBufferDesc_t { + unsigned int length; + void *address; +}; +typedef struct iscsiAuthBufferDesc_t IscsiAuthBufferDesc; + +struct iscsiAuthKey_t { + unsigned int present: 1; + unsigned int processed: 1; + unsigned int valueSet: 1; + char *string; +}; +typedef struct iscsiAuthKey_t IscsiAuthKey; + +struct iscsiAuthLargeBinaryKey_t { + unsigned int length; + unsigned char *largeBinary; +}; +typedef struct iscsiAuthLargeBinaryKey_t IscsiAuthLargeBinaryKey; + +struct iscsiAuthKeyBlock_t { + unsigned int transitBit: 1; + unsigned int duplicateSet: 1; + unsigned int stringTooLong: 1; + unsigned int tooMuchData: 1; + unsigned int blockLength: 16; + char *stringBlock; + IscsiAuthKey key[iscsiAuthKeyTypeMaxCount]; +}; +typedef struct iscsiAuthKeyBlock_t IscsiAuthKeyBlock; + +struct iscsiAuthStringBlock_t { + char stringBlock[iscsiAuthStringBlockMaxLength]; +}; +typedef struct iscsiAuthStringBlock_t IscsiAuthStringBlock; + +struct iscsiAuthLargeBinary_t { + unsigned char largeBinary[iscsiAuthLargeBinaryMaxLength]; +}; +typedef struct iscsiAuthLargeBinary_t IscsiAuthLargeBinary; + +struct iscsiAuthClient_t { + unsigned long signature; + + void *glueHandle; + struct iscsiAuthClient_t *next; + unsigned int authRequestId; + + IscsiAuthNodeType nodeType; + unsigned int authMethodCount; + int authMethodList[iscsiAuthMethodMaxCount]; + IscsiAuthNegRole authMethodNegRole; + unsigned int chapAlgorithmCount; + int chapAlgorithmList[iscsiAuthChapAlgorithmMaxCount]; + int authRemote; + char username[iscsiAuthStringMaxLength]; + int passwordPresent; + unsigned int passwordLength; + unsigned char passwordData[iscsiAuthStringMaxLength]; + char methodListName[iscsiAuthStringMaxLength]; + IscsiAuthVersion version; + unsigned int chapChallengeLength; + int ipSec; + int base64; + + unsigned int authMethodValidCount; + int authMethodValidList[iscsiAuthMethodMaxCount]; + int authMethodValidNegRole; + const char *rejectOptionName; + const char *noneOptionName; + + int recvInProgressFlag; + int recvEndCount; + IscsiAuthClientCallback *callback; + void *userHandle; + void *messageHandle; + + IscsiAuthPhase phase; + IscsiAuthLocalState localState; + IscsiAuthRemoteState remoteState; + IscsiAuthStatus remoteAuthStatus; + IscsiAuthDebugStatus debugStatus; + int negotiatedAuthMethod; + int negotiatedChapAlgorithm; + int authResponseFlag; + int authServerErrorFlag; + int transitBitSentFlag; + + unsigned int sendChapIdentifier; + IscsiAuthLargeBinaryKey sendChapChallenge; + char chapUsername[iscsiAuthStringMaxLength]; + + int recvChapChallengeStatus; + IscsiAuthLargeBinaryKey recvChapChallenge; + + char scratchKeyValue[iscsiAuthStringMaxLength]; + + IscsiAuthKeyBlock recvKeyBlock; + IscsiAuthKeyBlock sendKeyBlock; +}; +typedef struct iscsiAuthClient_t IscsiAuthClient; + + +#ifdef __cplusplus +} +#endif + + +#include "iscsiAuthClientGlue.h" + + +#ifdef __cplusplus +extern "C" { +#endif + + +extern IscsiAuthClientGlobalStats iscsiAuthClientGlobalStats; + + +extern int iscsiAuthClientInit(int, int, IscsiAuthBufferDesc *); +extern int iscsiAuthClientFinish(IscsiAuthClient *); + +extern int iscsiAuthClientRecvBegin(IscsiAuthClient *); +extern int iscsiAuthClientRecvEnd( + IscsiAuthClient *, IscsiAuthClientCallback *, void *, void *); + +extern const char *iscsiAuthClientGetKeyName(int); +extern int iscsiAuthClientGetNextKeyType(int *); +extern int iscsiAuthClientKeyNameToKeyType(const char *); +extern int iscsiAuthClientRecvKeyValue(IscsiAuthClient *, int, const char *); +extern int iscsiAuthClientSendKeyValue( + IscsiAuthClient *, int, int *, char *, unsigned int); +extern int iscsiAuthClientRecvTransitBit(IscsiAuthClient *, int); +extern int iscsiAuthClientSendTransitBit(IscsiAuthClient *, int *); + +extern int iscsiAuthClientSetAuthMethodList( + IscsiAuthClient *, unsigned int, const int *); +extern int iscsiAuthClientSetAuthMethodNegRole( + IscsiAuthClient *, int); +extern int iscsiAuthClientSetChapAlgorithmList( + IscsiAuthClient *, unsigned int, const int *); +extern int iscsiAuthClientSetUsername(IscsiAuthClient *, const char *); +extern int iscsiAuthClientSetPassword( + IscsiAuthClient *, const unsigned char *, unsigned int); +extern int iscsiAuthClientSetAuthRemote(IscsiAuthClient *, int); +extern int iscsiAuthClientSetGlueHandle(IscsiAuthClient *, void *); +extern int iscsiAuthClientSetMethodListName(IscsiAuthClient *, const char *); +extern int iscsiAuthClientSetIpSec(IscsiAuthClient *, int); +extern int iscsiAuthClientSetBase64(IscsiAuthClient *, int); +extern int iscsiAuthClientSetChapChallengeLength( + IscsiAuthClient *, unsigned int); +extern int iscsiAuthClientSetVersion(IscsiAuthClient *, int); +extern int iscsiAuthClientCheckPasswordNeeded(IscsiAuthClient *, int *); + +extern int iscsiAuthClientGetAuthPhase(IscsiAuthClient *, int *); +extern int iscsiAuthClientGetAuthStatus(IscsiAuthClient *, int *); +extern int iscsiAuthClientAuthStatusPass(int); +extern int iscsiAuthClientGetAuthMethod(IscsiAuthClient *, int *); +extern int iscsiAuthClientGetChapAlgorithm(IscsiAuthClient *, int *); +extern int iscsiAuthClientGetChapUsername( + IscsiAuthClient *, char *, unsigned int); + +extern int iscsiAuthClientSendStatusCode(IscsiAuthClient *, int *); +extern int iscsiAuthClientGetDebugStatus(IscsiAuthClient *, int *); +extern const char *iscsiAuthClientDebugStatusToText(int); + +/* + * The following is called by platform dependent code. + */ +extern void iscsiAuthClientAuthResponse(IscsiAuthClient *, int); + +/* + * The following routines are considered platform dependent, + * and need to be implemented for use by iscsiAuthClient.c. + */ + +extern int iscsiAuthClientChapAuthRequest( + IscsiAuthClient *, char *, unsigned int, + unsigned char *, unsigned int, unsigned char *, unsigned int); +extern void iscsiAuthClientChapAuthCancel(IscsiAuthClient *); + +extern int iscsiAuthClientTextToNumber(const char *, unsigned long *); +extern void iscsiAuthClientNumberToText(unsigned long, char *, unsigned int); + +extern void iscsiAuthRandomSetData(unsigned char *, unsigned int); +extern void iscsiAuthMd5Init(IscsiAuthMd5Context *); +extern void iscsiAuthMd5Update( + IscsiAuthMd5Context *, unsigned char *, unsigned int); +extern void iscsiAuthMd5Final(unsigned char *, IscsiAuthMd5Context *); + +extern int iscsiAuthClientData( + unsigned char *, unsigned int *, unsigned char *, unsigned int); + + +#ifdef __cplusplus +} +#endif + +#endif /* #ifndef ISCSIAUTHCLIENT_H */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClientGlue.c linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClientGlue.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClientGlue.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClientGlue.c 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,193 @@ +/* + * iSCSI connection daemon + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsiAuthClientGlue.c,v 1.12 2003/01/21 15:34:30 smferris Exp $ + * + */ + + +#include "iscsiAuthClient.h" +#include "iscsi-platform.h" +#include "iscsi-protocol.h" +#include "iscsi-session.h" + +/* + * Authenticate a target's CHAP response. + */ +int +iscsiAuthClientChapAuthRequest( + IscsiAuthClient *client, + char *username, unsigned int id, + unsigned char *challengeData, unsigned int challengeLength, + unsigned char *responseData, unsigned int responseLength) +{ + iscsi_session_t *session = (iscsi_session_t *)client->userHandle; + IscsiAuthMd5Context context; + unsigned char verifyData[16]; + + if (session == NULL) { + return iscsiAuthStatusFail; + } + + /* the expected credentials are in the session */ + if (session->username_in == NULL) { + logmsg(AS_ERROR, "failing authentication, no incoming username configured to authenticate target %s\n", session->TargetName); + return iscsiAuthStatusFail; + } + if (iscsi_strcmp(username, session->username_in) != 0) { + logmsg(AS_ERROR, "failing authentication, received incorrect username from target %s\n", session->TargetName); + return iscsiAuthStatusFail; + } + + if ((session->password_length_in < 1) || (session->password_in == NULL) || (session->password_in[0] == '\0')) { + logmsg(AS_ERROR, "failing authentication, no incoming password configured to authenticate target %s\n", session->TargetName); + return iscsiAuthStatusFail; + } + + /* challenge length is I->T, and shouldn't need to be checked */ + + if (responseLength != sizeof(verifyData)) { + logmsg(AS_ERROR, "failing authentication, received incorrect CHAP response length %u from target %s\n", + responseLength, session->TargetName); + return iscsiAuthStatusFail; + } + + iscsiAuthMd5Init(&context); + + /* id byte */ + verifyData[0] = id; + iscsiAuthMd5Update(&context, verifyData, 1); + + /* shared secret */ + iscsiAuthMd5Update(&context, (unsigned char *)session->password_in, session->password_length_in); + + /* challenge value */ + iscsiAuthMd5Update(&context, (unsigned char *)challengeData, challengeLength); + + iscsiAuthMd5Final(verifyData, &context); + + if (iscsi_memcmp(responseData, verifyData, sizeof(verifyData)) == 0) { + debugmsg(1, "initiator authenticated target %s\n", session->TargetName); + return iscsiAuthStatusPass; + } + + logmsg(AS_ERROR, "failing authentication, received incorrect CHAP response from target %s\n", session->TargetName); + return iscsiAuthStatusFail; +} + + +void +iscsiAuthClientChapAuthCancel(IscsiAuthClient *client) +{ +} + + +int +iscsiAuthClientTextToNumber(const char *text, unsigned long *pNumber) +{ + char *pEnd; + unsigned long number; + + if (text[0] == '0' && (text[1] == 'x' || text[1] == 'X')) { + number = iscsi_strtoul(text + 2, &pEnd, 16); + } else { + number = iscsi_strtoul(text, &pEnd, 10); + } + + if (*text != '\0' && *pEnd == '\0') { + *pNumber = number; + return 0; /* No error */ + } else { + return 1; /* Error */ + } +} + +void +iscsiAuthClientNumberToText(unsigned long number, char *text, unsigned int length) +{ + iscsi_sprintf(text, "%lu", number); +} + + +void +iscsiAuthRandomSetData(unsigned char *data, unsigned int length) + +{ +#if defined(LINUX) && defined(__KERNEL__) + get_random_bytes(data, length); +#else + long r; + unsigned n; + + while (length > 0) { + + r = rand(); + r = r ^ (r >> 8); + r = r ^ (r >> 4); + n = r & 0x7; + + r = rand(); + r = r ^ (r >> 8); + r = r ^ (r >> 5); + n = (n << 3) | (r & 0x7); + + r = rand(); + r = r ^ (r >> 8); + r = r ^ (r >> 5); + n = (n << 2) | (r & 0x3); + + *data++ = n; + length--; + } +#endif +} + + +void +iscsiAuthMd5Init(IscsiAuthMd5Context *context) +{ + MD5Init(context); +} + + +void +iscsiAuthMd5Update( + IscsiAuthMd5Context *context, unsigned char *data, unsigned int length) +{ + MD5Update(context, data, length); +} + + +void +iscsiAuthMd5Final(unsigned char *hash, IscsiAuthMd5Context *context) +{ + MD5Final(hash, context); +} + + +int +iscsiAuthClientData( + unsigned char *outData, unsigned int *outLength, + unsigned char *inData, unsigned int inLength) +{ + if (*outLength < inLength) return 1; /* error */ + + memcpy(outData, inData, inLength); + *outLength = inLength; + + return 0; /* no error */ +} diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClientGlue.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClientGlue.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClientGlue.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsiAuthClientGlue.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,44 @@ +/* + * iSCSI connection daemon + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsiAuthClientGlue.h,v 1.6 2003/01/21 15:31:51 smferris Exp $ + * + */ + +#ifndef ISCSIAUTHCLIENTGLUE_H +#define ISCSIAUTHCLIENTGLUE_H + +#include "iscsi-platform.h" +#include "md5.h" + +typedef struct MD5Context IscsiAuthMd5Context; + + +#ifdef __cplusplus +extern "C" { +#endif + + +extern int iscsiAuthIscsiServerHandle; +extern int iscsiAuthIscsiClientHandle; + + +#ifdef __cplusplus +} +#endif + +#endif /* #ifndef ISCSIAUTHCLIENTGLUE_H */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsid.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsid.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/iscsid.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/iscsid.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,141 @@ +#ifndef ISCSID_H_ +#define ISCSID_H_ + +/* + * iSCSI driver for Linux + * Copyright (C) 2002 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * $Id: iscsid.h,v 1.18 2003/01/20 23:23:33 smferris Exp $ + * + * iscsid.h + * + * Main include for iSCSI daemon + * + */ + +#ifndef MIN +# define MIN(x,y) ((x) < (y) ? (x) : (y)) +#endif + +#ifndef MAX +# define MAX(x,y) ((x) >= (y) ? (x) : (y)) +#endif + +/* header plus alignment plus max login pdu size + pad */ +#define ISCSI_LOGIN_BUFFER_SIZE ((2 * sizeof(struct IscsiHdr)) + 4096 + 4) + +#include "iscsi-config.h" + +/* structures representing processes that the main daemon has started */ +typedef struct iscsi_session_process { + struct iscsi_session_process *volatile next; + struct iscsi_session_process *volatile prev; + struct iscsi_session_config *volatile config; + unsigned long config_number; + pid_t pid; + volatile int remove; /* kill and remove this from the list at the next opportunity */ + volatile int restart; /* restart if the pid is 0 */ + volatile int failed; /* process failed, don't restart unless the user reloads the config */ + volatile int lun_inventory_changed; /* the session's LUNs may have changed */ +} iscsi_session_process_t; + +typedef struct iscsi_discovery_process { + struct iscsi_discovery_process *volatile prev; + struct iscsi_discovery_process *volatile next; + iscsi_config_entry_t *entry; + pid_t pid; + int order; + int pipe_fd; + int in_progress; + volatile int remove; /* kill and remove this from the list at the next opportunity */ + volatile int restart; /* restart if the pid is 0 */ +} iscsi_discovery_process_t; + +typedef struct iscsi_discovery_process_list { + iscsi_discovery_process_t *volatile head; + iscsi_discovery_process_t *volatile tail; + volatile int changed; + int count; +} iscsi_discovery_process_list_t; + +typedef struct iscsi_session_process_list { + iscsi_session_process_t *volatile head; + iscsi_session_process_t *volatile tail; + volatile int changed; + volatile int lun_inventory_changed; + int count; +} iscsi_session_process_list_t; + +struct iscsi_target_config; + +typedef struct iscsi_target { + struct iscsi_target *next; + + char *TargetName; + + struct iscsi_target_config *new_config; + struct iscsi_target_config *current_config; + + iscsi_portal_descriptor_t *new_portals; /* one or more portals referenced by the new_config */ + iscsi_portal_descriptor_t *current_portals; /* one or more portals referenced by the current_config */ + + /* used for tracking which process gets to propagate auth credentials to this target, to avoid flapping */ + iscsi_discovery_process_t *discovery; + int discovered; + +} iscsi_target_t; + +typedef struct iscsi_target_list { + iscsi_target_t *volatile head; + iscsi_target_t *volatile tail; + volatile int lun_inventory_changed; + volatile int check_configs; +} iscsi_target_list_t; + +/* daemon config */ +typedef struct iscsi_daemon_config { + char *config_file; + char *pid_file; + char *bindings_file; + char *initiator_name_file; + char *slp_program; + + char *initiator_name; + char *initiator_alias; + + int debug_level; /* for use by debugmsg */ + int foreground; /* if non-zero, the main process will remain in the foreground, + * instead of daemonizing itself. + */ +} iscsi_daemon_config_t; + +/* let everything access the daemon config */ +extern iscsi_daemon_config_t daemon_config; + +/* handling session processes */ +extern void add_session_process(iscsi_session_process_list_t *list, iscsi_session_process_t *process); +extern void remove_session_process(iscsi_session_process_list_t *list, iscsi_session_process_t *process); +extern void free_session_process(iscsi_session_process_t *process); + + +/* exit code for session processes that failed, but shouldn't be retried */ +#define ISCSI_SESSION_FAILED_NO_RETRY 100 + +#endif + + + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/md5.c linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/md5.c --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/md5.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/md5.c 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,236 @@ +/* + * This code implements the MD5 message-digest algorithm. + * The algorithm is due to Ron Rivest. This code was + * written by Colin Plumb in 1993, no copyright is claimed. + * This code is in the public domain; do with it what you wish. + * + * Equivalent code is available from RSA Data Security, Inc. + * This code has been tested against that, and is equivalent, + * except that you don't need to include two pages of legalese + * with every copy. + * + * To compute the message digest of a chunk of bytes, declare an + * MD5Context structure, pass it to MD5Init, call MD5Update as + * needed on buffers full of bytes, and then call MD5Final, which + * will fill a supplied 16-byte array with the digest. + * + * Changed so as no longer to depend on Colin Plumb's `usual.h' header + * definitions; now uses stuff from dpkg's config.h. + * - Ian Jackson . + * Still in the public domain. + */ + +#include "md5.h" + +#ifdef WORDS_BIGENDIAN +void +byteSwap(UWORD32 *buf, unsigned words) +{ + md5byte *p = (md5byte *)buf; + + do { + *buf++ = (UWORD32)((unsigned)p[3] << 8 | p[2]) << 16 | + ((unsigned)p[1] << 8 | p[0]); + p += 4; + } while (--words); +} +#else +#define byteSwap(buf,words) +#endif + +/* + * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious + * initialization constants. + */ +void +MD5Init(struct MD5Context *ctx) +{ + ctx->buf[0] = 0x67452301; + ctx->buf[1] = 0xefcdab89; + ctx->buf[2] = 0x98badcfe; + ctx->buf[3] = 0x10325476; + + ctx->bytes[0] = 0; + ctx->bytes[1] = 0; +} + +/* + * Update context to reflect the concatenation of another buffer full + * of bytes. + */ +void +MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len) +{ + UWORD32 t; + + /* Update byte count */ + + t = ctx->bytes[0]; + if ((ctx->bytes[0] = t + len) < t) + ctx->bytes[1]++; /* Carry from low to high */ + + t = 64 - (t & 0x3f); /* Space available in ctx->in (at least 1) */ + if (t > len) { + memcpy((md5byte *)ctx->in + 64 - t, buf, len); + return; + } + /* First chunk is an odd size */ + memcpy((md5byte *)ctx->in + 64 - t, buf, t); + byteSwap(ctx->in, 16); + MD5Transform(ctx->buf, ctx->in); + buf += t; + len -= t; + + /* Process data in 64-byte chunks */ + while (len >= 64) { + memcpy(ctx->in, buf, 64); + byteSwap(ctx->in, 16); + MD5Transform(ctx->buf, ctx->in); + buf += 64; + len -= 64; + } + + /* Handle any remaining bytes of data. */ + memcpy(ctx->in, buf, len); +} + +/* + * Final wrapup - pad to 64-byte boundary with the bit pattern + * 1 0* (64-bit count of bits processed, MSB-first) + */ +void +MD5Final(md5byte digest[16], struct MD5Context *ctx) +{ + int count = ctx->bytes[0] & 0x3f; /* Number of bytes in ctx->in */ + md5byte *p = (md5byte *)ctx->in + count; + + /* Set the first char of padding to 0x80. There is always room. */ + *p++ = 0x80; + + /* Bytes of padding needed to make 56 bytes (-8..55) */ + count = 56 - 1 - count; + + if (count < 0) { /* Padding forces an extra block */ + memset(p, 0, count + 8); + byteSwap(ctx->in, 16); + MD5Transform(ctx->buf, ctx->in); + p = (md5byte *)ctx->in; + count = 56; + } + memset(p, 0, count); + byteSwap(ctx->in, 14); + + /* Append length in bits and transform */ + ctx->in[14] = ctx->bytes[0] << 3; + ctx->in[15] = ctx->bytes[1] << 3 | ctx->bytes[0] >> 29; + MD5Transform(ctx->buf, ctx->in); + + byteSwap(ctx->buf, 4); + memcpy(digest, ctx->buf, 16); + memset(ctx, 0, sizeof(ctx)); /* In case it's sensitive */ +} + +#ifndef ASM_MD5 + +/* The four core functions - F1 is optimized somewhat */ + +/* #define F1(x, y, z) (x & y | ~x & z) */ +#define F1(x, y, z) (z ^ (x & (y ^ z))) +#define F2(x, y, z) F1(z, x, y) +#define F3(x, y, z) (x ^ y ^ z) +#define F4(x, y, z) (y ^ (x | ~z)) + +/* This is the central step in the MD5 algorithm. */ +#define MD5STEP(f,w,x,y,z,in,s) \ + (w += f(x,y,z) + in, w = (w<>(32-s)) + x) + +/* + * The core of the MD5 algorithm, this alters an existing MD5 hash to + * reflect the addition of 16 longwords of new data. MD5Update blocks + * the data and converts bytes into longwords for this routine. + */ +void +MD5Transform(UWORD32 buf[4], UWORD32 const in[16]) +{ + register UWORD32 a, b, c, d; + + a = buf[0]; + b = buf[1]; + c = buf[2]; + d = buf[3]; + + MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); + MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); + MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); + MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); + MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); + MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); + MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); + MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); + MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); + MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); + MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); + MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); + MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); + MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); + MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); + MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); + + MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); + MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); + MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); + MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); + MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); + MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); + MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); + MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); + MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); + MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); + MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); + MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); + MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); + MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); + MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); + MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); + + MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); + MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); + MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); + MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); + MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); + MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); + MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); + MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); + MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); + MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); + MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); + MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); + MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); + MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); + MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); + MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); + + MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); + MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); + MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); + MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); + MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); + MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); + MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); + MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); + MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); + MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); + MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); + MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); + MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); + MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); + MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); + MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); + + buf[0] += a; + buf[1] += b; + buf[2] += c; + buf[3] += d; +} + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/md5.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/md5.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/md5.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/md5.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,55 @@ +/* + * This is the header file for the MD5 message-digest algorithm. + * The algorithm is due to Ron Rivest. This code was + * written by Colin Plumb in 1993, no copyright is claimed. + * This code is in the public domain; do with it what you wish. + * + * Equivalent code is available from RSA Data Security, Inc. + * This code has been tested against that, and is equivalent, + * except that you don't need to include two pages of legalese + * with every copy. + * + * To compute the message digest of a chunk of bytes, declare an + * MD5Context structure, pass it to MD5Init, call MD5Update as + * needed on buffers full of bytes, and then call MD5Final, which + * will fill a supplied 16-byte array with the digest. + * + * Changed so as no longer to depend on Colin Plumb's `usual.h' + * header definitions; now uses stuff from dpkg's config.h + * - Ian Jackson . + * Still in the public domain. + */ + +#ifndef MD5_H +#define MD5_H + +/* for uint32_t, WORDS_BIGENDIAN */ +#include "iscsi-platform.h" + + +typedef uint32_t UWORD32; + +#ifdef __cplusplus +extern "C" { +#endif + + +#define md5byte unsigned char + +struct MD5Context { + UWORD32 buf[4]; + UWORD32 bytes[2]; + UWORD32 in[16]; +}; + +void MD5Init(struct MD5Context *context); +void MD5Update(struct MD5Context *context, md5byte const *buf, unsigned len); +void MD5Final(unsigned char digest[16], struct MD5Context *context); +void MD5Transform(UWORD32 buf[4], UWORD32 const in[16]); + + +#ifdef __cplusplus +} +#endif + +#endif /* !MD5_H */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/version.h linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/version.h --- linux-2.4.20-wolk4.8-fullkernel/drivers/scsi/iscsi-new/version.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/drivers/scsi/iscsi-new/version.h 2003-08-25 20:35:59.000000000 +0200 @@ -0,0 +1,51 @@ +/* + * iSCSI driver for Linux + * Copyright (C) 2001 Cisco Systems, Inc. + * maintained by linux-iscsi@cisco.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * + * $Id: iscsi-version.h,v 1.26.6.3 2003/08/22 10:38:53 naveenb Exp $ + * + * controls the version number printed by the iSCSI driver + * + */ + +#define DRIVER_MAJOR_VERSION 3 +#define DRIVER_MINOR_VERSION 4 +#define DRIVER_PATCH_VERSION 0 +#define DRIVER_INTERNAL_VERSION 3 + +/* DRIVER_EXTRAVERSION is intended to be customized by Linux + * distributors, similar to the kernel Makefile's EXTRAVERSION. This + * string will be appended to all version numbers displayed by the + * driver. RPMs that patch the driver are encouraged to also patch + * this string to indicate to users that the driver has been patched, + * and may behave differently than a driver tarball from SourceForge. + */ + +#define DRIVER_EXTRAVERSION "" + +#define ISCSI_DATE "22-Aug-2003" + +/* Distributors may also set BUILD_STR to a string, which will be + * logged by the kernel module after it loads and displays the version + * number. It is currently used as part of the driver development + * process, to mark tarballs built by developers containing code + * not yet checked into CVS. Publically available tarballs on + * SourceForge should always have BUILD_STR set to NULL, since + * all code should be checked in prior to making a public release. + */ + +#define BUILD_STR NULL diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/Config.in linux-2.4.20-wolk4.9-fullkernel/fs/Config.in --- linux-2.4.20-wolk4.8-fullkernel/fs/Config.in 2003-08-25 18:27:04.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/Config.in 2003-08-26 17:59:08.000000000 +0200 @@ -157,6 +157,8 @@ if [ "$CONFIG_NET" = "y" ]; then define_bool CONFIG_NCPFS_NLS n fi + dep_tristate 'DAV file system support (Web-based Distributed Authoring and Versioning)' CONFIG_DAV_FS $CONFIG_INET + dep_tristate 'Oracle Cluster Filesystem (OCFS)' CONFIG_OCFS_FS $CONFIG_INET dep_tristate 'AFS distributed file system support' CONFIG_AFS_FS $CONFIG_EXPERIMENTAL @@ -166,7 +168,7 @@ if [ "$CONFIG_NET" = "y" ]; then dep_tristate 'Secure SHell Filesystem support (shfs/sshfs)' CONFIG_SH_FS $CONFIG_EXPERIMENTAL $CONFIG_INET if [ "$CONFIG_SH_FS" != "n" ]; then choice ' Debug Level' \ - "DISABLED CONFIG_SH_FS_DEBUG0 + "DISABLED CONFIG_SH_FS_DEBUG0 \ VERBOSE CONFIG_SH_FS_DEBUG1 \ ALLOC_DEBUG CONFIG_SH_FS_DEBUG2 \ DEBUG CONFIG_SH_FS_DEBUG3" DISABLED diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/Makefile linux-2.4.20-wolk4.9-fullkernel/fs/Makefile --- linux-2.4.20-wolk4.8-fullkernel/fs/Makefile 2003-08-25 18:27:04.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/Makefile 2003-08-25 20:35:51.000000000 +0200 @@ -49,8 +49,8 @@ subdir-$(CONFIG_ISO9660_FS) += isofs subdir-$(CONFIG_CD_FS) += cdfs subdir-$(CONFIG_CIFS) += cifs subdir-$(CONFIG_DEVFS_FS) += devfs +subdir-$(CONFIG_HFSPLUS_FS) += hfsplus # Before hfs to find wrapped HFS+ subdir-$(CONFIG_HFS_FS) += hfs -subdir-$(CONFIG_HFSPLUS_FS) += hfsplus subdir-$(CONFIG_VXFS_FS) += freevxfs subdir-$(CONFIG_NFS_FS) += nfs subdir-$(CONFIG_NFSD) += nfsd @@ -59,6 +59,7 @@ subdir-$(CONFIG_NLS) += nls subdir-$(CONFIG_SYSV_FS) += sysv subdir-$(CONFIG_SMB_FS) += smbfs subdir-$(CONFIG_NCP_FS) += ncpfs +subdir-$(CONFIG_DAV_FS) += davfs subdir-$(CONFIG_HPFS_FS) += hpfs subdir-$(CONFIG_NTFS_FS) += ntfs subdir-$(CONFIG_NWFS_FS) += nwfs diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/binfmt_aout.c linux-2.4.20-wolk4.9-fullkernel/fs/binfmt_aout.c --- linux-2.4.20-wolk4.8-fullkernel/fs/binfmt_aout.c 2003-08-25 18:24:58.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/binfmt_aout.c 2003-08-25 20:35:58.000000000 +0200 @@ -37,7 +37,7 @@ static int aout_core_dump(long signr, st extern void dump_thread(struct pt_regs *, struct user *); static struct linux_binfmt aout_format = { - NULL, THIS_MODULE, load_aout_binary, load_aout_library, aout_core_dump, PAGE_SIZE + NULL, THIS_MODULE, load_aout_binary, load_aout_library, aout_core_dump, PAGE_SIZE, "a.out" }; static void set_brk(unsigned long start, unsigned long end) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/binfmt_elf.c linux-2.4.20-wolk4.9-fullkernel/fs/binfmt_elf.c --- linux-2.4.20-wolk4.8-fullkernel/fs/binfmt_elf.c 2003-08-25 18:27:04.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/binfmt_elf.c 2003-08-25 20:35:58.000000000 +0200 @@ -77,8 +77,12 @@ static int elf_core_dump(long signr, str #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) +#ifndef ELF_NAME +#define ELF_NAME "elf" +#endif + static struct linux_binfmt elf_format = { - NULL, THIS_MODULE, load_elf_binary, load_elf_library, elf_core_dump, ELF_EXEC_PAGESIZE + NULL, THIS_MODULE, load_elf_binary, load_elf_library, elf_core_dump, ELF_EXEC_PAGESIZE, ELF_NAME }; #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE) @@ -382,7 +386,6 @@ static unsigned long load_aout_interp(st unsigned long text_data, elf_entry = ~0UL; char * addr; loff_t offset; - int retval; current->mm->end_code = interp_ex->a_text; text_data = interp_ex->a_text + interp_ex->a_data; @@ -404,11 +407,9 @@ static unsigned long load_aout_interp(st } do_brk(0, text_data); - retval = -ENOEXEC; if (!interpreter->f_op || !interpreter->f_op->read) goto out; - retval = interpreter->f_op->read(interpreter, addr, text_data, &offset); - if (retval < 0) + if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0) goto out; flush_icache_range((unsigned long)addr, (unsigned long)addr + text_data); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/binfmt_em86.c linux-2.4.20-wolk4.9-fullkernel/fs/binfmt_em86.c --- linux-2.4.20-wolk4.8-fullkernel/fs/binfmt_em86.c 2002-08-03 02:39:45.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/binfmt_em86.c 2003-08-25 20:35:58.000000000 +0200 @@ -95,7 +95,7 @@ static int load_em86(struct linux_binprm } struct linux_binfmt em86_format = { - NULL, THIS_MODULE, load_em86, NULL, NULL, 0 + NULL, THIS_MODULE, load_em86, NULL, NULL, 0, "em86" }; static int __init init_em86_binfmt(void) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/binfmt_misc.c linux-2.4.20-wolk4.9-fullkernel/fs/binfmt_misc.c --- linux-2.4.20-wolk4.8-fullkernel/fs/binfmt_misc.c 2003-08-25 18:24:58.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/binfmt_misc.c 2003-08-25 20:35:58.000000000 +0200 @@ -685,7 +685,7 @@ out1: } static struct linux_binfmt misc_format = { - NULL, THIS_MODULE, load_misc_binary, NULL, NULL, 0 + NULL, THIS_MODULE, load_misc_binary, NULL, NULL, 0, "misc" }; static DECLARE_FSTYPE(bm_fs_type, "binfmt_misc", bm_read_super, FS_SINGLE|FS_LITTER); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/binfmt_script.c linux-2.4.20-wolk4.9-fullkernel/fs/binfmt_script.c --- linux-2.4.20-wolk4.8-fullkernel/fs/binfmt_script.c 2002-08-03 02:39:45.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/binfmt_script.c 2003-08-25 20:35:58.000000000 +0200 @@ -93,7 +93,7 @@ static int load_script(struct linux_binp } struct linux_binfmt script_format = { - NULL, THIS_MODULE, load_script, NULL, NULL, 0 + NULL, THIS_MODULE, load_script, NULL, NULL, 0, "script" }; static int __init init_script_binfmt(void) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/davfs/Makefile linux-2.4.20-wolk4.9-fullkernel/fs/davfs/Makefile --- linux-2.4.20-wolk4.8-fullkernel/fs/davfs/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/davfs/Makefile 2003-08-25 20:31:33.000000000 +0200 @@ -0,0 +1,38 @@ +# +# Makefile for the linux smb-filesystem routines. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definitions are now in the main makefile... +# +# Modified by Sung Hun Kim for DAVfs +# + + +O_TARGET := davfs.o + +obj-y := proc.o dir.o cache.o sock.o inode.o file.o ioctl.o +obj-m := $(O_TARGET) + +# If you want debugging output, you may add these flags to the EXTRA_CFLAGS +# SMBFS_PARANOIA should normally be enabled. + +EXTRA_CFLAGS += -DSMBFS_PARANOIA +#EXTRA_CFLAGS += -DSMBFS_DEBUG +#EXTRA_CFLAGS += -DSMBFS_DEBUG_VERBOSE +#EXTRA_CFLAGS += -DDEBUG_SMB_MALLOC +#EXTRA_CFLAGS += -DDEBUG_SMB_TIMESTAMP +#EXTRA_CFLAGS += -Werror + +include $(TOPDIR)/Rules.make + + + + + + + + + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/davfs/cache.c linux-2.4.20-wolk4.9-fullkernel/fs/davfs/cache.c --- linux-2.4.20-wolk4.8-fullkernel/fs/davfs/cache.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/davfs/cache.c 2003-08-25 20:31:33.000000000 +0200 @@ -0,0 +1,235 @@ +/* + * cache.c + * + * Copyright (C) 1997 by Bill Hawes + * + * Routines to support directory cacheing using the page cache. + * This cache code is almost directly taken from ncpfs. + * + * Please add a note about your changes to davfs in the ChangeLog file. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "davfs.h" +#include "dav_debug.h" + +/* + * Force the next attempt to use the cache to be a timeout. + * If we can't find the page that's fine, it will cause a refresh. + */ +void +dav_invalid_dir_cache(struct inode * dir) +{ + struct dav_sb_info *server = dav_server_from_inode(dir); + union dav_dir_cache *cache = NULL; + struct page *page = NULL; + + TRACE(); + + page = grab_cache_page(&dir->i_data, 0); + if (!page) + goto out; + + if (!Page_Uptodate(page)) + goto out_unlock; + + cache = kmap(page); + cache->head.time = jiffies - DAV_MAX_AGE(server); + + kunmap(page); + SetPageUptodate(page); +out_unlock: + UnlockPage(page); + page_cache_release(page); +out: + return; +} + +/* + * Mark all dentries for 'parent' as invalid, forcing them to be re-read + */ +void +dav_invalidate_dircache_entries(struct dentry *parent) +{ + struct dav_sb_info *server = dav_server_from_dentry(parent); + struct list_head *next; + struct dentry *dentry; + + TRACE(); + + spin_lock(&dcache_lock); + next = parent->d_subdirs.next; + while (next != &parent->d_subdirs) { + dentry = list_entry(next, struct dentry, d_child); + dentry->d_fsdata = NULL; + dav_age_dentry(server, dentry); + next = next->next; + } + spin_unlock(&dcache_lock); +} + +/* + * dget, but require that fpos and parent matches what the dentry contains. + * dentry is not known to be a valid pointer at entry. + */ +struct dentry * +dav_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos) +{ + struct dentry *dent = dentry; + struct list_head *next; + + TRACE(); + + /* + ** kernel 2.4.2 or too few arg error + ** use this function + */ + /* + if (d_validate(dent, dent->d_parent, dent->d_name.hash, dent->d_name.len)) { + */ + + if (d_validate(dent, parent)) { + if (dent->d_name.len <= DAV_MAXPATHLEN && + (unsigned long)dent->d_fsdata == fpos) { + if (!dent->d_inode) { + dput(dent); + dent = NULL; + } + return dent; + } + dput(dent); + } + + /* If a pointer is invalid, we search the dentry. */ + spin_lock(&dcache_lock); + next = parent->d_subdirs.next; + while (next != &parent->d_subdirs) { + dent = list_entry(next, struct dentry, d_child); + if ((unsigned long)dent->d_fsdata == fpos) { + if (dent->d_inode) + dget_locked(dent); + else + dent = NULL; + goto out_unlock; + } + next = next->next; + } + dent = NULL; +out_unlock: + spin_unlock(&dcache_lock); + return dent; +} + + +/* + * Create dentry/inode for this file and add it to the dircache. + */ +int +dav_fill_cache(struct file *filp, void *dirent, filldir_t filldir, + struct dav_cache_control *ctrl, struct qstr *qname, + struct dav_fattr *entry) +{ + struct dentry *newdent, *dentry = filp->f_dentry; + struct inode *newino, *inode = dentry->d_inode; + struct dav_cache_control ctl = *ctrl; + int valid = 0; + int hashed = 0; + ino_t ino = 0; + + TRACE(); + + qname->hash = full_name_hash(qname->name, qname->len); + + if (dentry->d_op && dentry->d_op->d_hash) + if (dentry->d_op->d_hash(dentry, qname) != 0) + goto end_advance; + + newdent = d_lookup(dentry, qname); + + if (!newdent) { + newdent = d_alloc(dentry, qname); + if (!newdent) + goto end_advance; + } else { + hashed = 1; + memcpy((char *) newdent->d_name.name, qname->name, + newdent->d_name.len); + } + + if (!newdent->d_inode) { + dav_renew_times(newdent); + entry->f_ino = iunique(inode->i_sb, 2); + newino = dav_iget(inode->i_sb, entry); + if (newino) { + dav_new_dentry(newdent); + d_instantiate(newdent, newino); + if (!hashed) + d_rehash(newdent); + } + } else + dav_set_inode_attr(newdent->d_inode, entry); + + if (newdent->d_inode) { + ino = newdent->d_inode->i_ino; + newdent->d_fsdata = (void *) ctl.fpos; + dav_new_dentry(newdent); + } + + if (ctl.idx >= DAV_DIRCACHE_SIZE) { + if (ctl.page) { + kunmap(ctl.page); + SetPageUptodate(ctl.page); + UnlockPage(ctl.page); + page_cache_release(ctl.page); + } + ctl.cache = NULL; + ctl.idx -= DAV_DIRCACHE_SIZE; + ctl.ofs += 1; + ctl.page = grab_cache_page(&inode->i_data, ctl.ofs); + if (ctl.page) + ctl.cache = kmap(ctl.page); + } + if (ctl.cache) { + ctl.cache->dentry[ctl.idx] = newdent; + valid = 1; + } + dput(newdent); + +end_advance: + if (!valid) + ctl.valid = 0; + if (!ctl.filled && (ctl.fpos == filp->f_pos)) { + if (!ino) + ino = find_inode_number(dentry, qname); + if (!ino) + ino = iunique(inode->i_sb, 2); + ctl.filled = filldir(dirent, qname->name, qname->len, + filp->f_pos, ino, DT_UNKNOWN); + if (!ctl.filled) + filp->f_pos += 1; + } + ctl.fpos += 1; + ctl.idx += 1; + *ctrl = ctl; + return (ctl.valid || !ctl.filled); +} + + + + + + + + + + + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/davfs/config.h linux-2.4.20-wolk4.9-fullkernel/fs/davfs/config.h --- linux-2.4.20-wolk4.8-fullkernel/fs/davfs/config.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/davfs/config.h 2003-08-25 20:31:33.000000000 +0200 @@ -0,0 +1,134 @@ +/* config.h. Generated automatically by configure. */ +/* config.h.in. Generated automatically from configure.in by autoheader. */ +#ifndef __PVFS_AUTOCONFIG_H +#define __PVFS_AUTOCONFIG_H +/* from config.h.top */ + +/* Define to empty if the keyword does not work. */ +/* #undef const */ + +/* Define as __inline if that's what the C compiler calls it. */ +/* #undef inline */ + +/* Define as the return type of signal handlers (int or void). */ +#define RETSIGTYPE void + +/* Define to `unsigned' if doesn't define. */ +/* #undef size_t */ + +/* Define if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Define if you have the select function. */ +#define HAVE_SELECT 1 + +/* Define if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* Define if you have the header file. */ +#define HAVE_LINUX_DEVFS_FS_KERNEL_H 1 + +/* Define if you have the header file. */ +#define HAVE_LINUX_HIGHMEM_H 1 + +/* Define if you have the header file. */ +#define HAVE_LINUX_IOBUF_H 1 + +/* Define if you have the header file. */ +#define HAVE_LINUX_LOCKS_H 1 + +/* Define if you have the header file. */ +#define HAVE_LINUX_PAGEMAP_H 1 + +/* Define if you have the header file. */ +#define HAVE_LINUX_POSIX_TYPES_H 1 + +/* Define if you have the header file. */ +#define HAVE_LINUX_SCHED_H 1 + +/* Define if you have the header file. */ +#define HAVE_LINUX_SLAB_H 1 + +/* Define if you have the header file. */ +#define HAVE_LINUX_SMP_LOCK_H 1 + +/* Define if you have the header file. */ +#define HAVE_LINUX_TQUEUE_H 1 + +/* Define if you have the header file. */ +#define HAVE_LINUX_VMALLOC_H 1 + +/* Define if you have the header file. */ +#define HAVE_MALLOC_H 1 + +/* Define if you have the header file. */ +#define HAVE_SYS_IOCTL_H 1 + +/* Define if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define if you have the minipvfs library (-lminipvfs). */ +#define HAVE_LIBMINIPVFS 1 + +/* Define if kiovec symbols are present */ +#define HAVE_KIOVEC_SYMS 1 + +/* Define if kiovec locking functions are present */ +#define HAVE_KIOVEC_LOCK 1 + +/* Define if PVFS kernel patch is applied */ +/* #undef HAVE_PVFS_KERNEL_PATCH */ + +/* Define if devfs is enabled */ +/* #undef HAVE_DEVFS_SYMS */ + +/* Define if linux struct task_struct has a pending member */ +/* #undef HAVE_LINUX_STRUCT_TASK_STRUCT_PENDING */ + +/* Define if pagelist member of kiobuf structure exists */ +/* #undef HAVE_PAGELIST */ + +/* Define if linux struct file_operations has an owner member */ +#define HAVE_LINUX_STRUCT_FILE_OPERATIONS_OWNER 1 + +/* Define if struct inode has an i_fop member */ +#define HAVE_LINUX_STRUCT_INODE_I_FOP 1 + +/* Define if struct address_space_operations is defined */ +#define HAVE_LINUX_STRUCT_ADDRESS_SPACE_OPERATIONS 1 + +/* Define if filldir function should take six parameters */ +#define HAVE_LINUX_6_PARAM_FILLDIR 1 + +/* Define if statfs member function of struct super_operations takes 3 params */ +/* #undef HAVE_LINUX_3_PARAM_SUPER_STATFS */ + +/* Define if fsync member function of struct file_operations takes 3 params */ +#define HAVE_LINUX_3_PARAM_FILE_FSYNC 1 + +/* Define if linux struct page has an offset member */ +/* #undef HAVE_LINUX_STRUCT_PAGE_OFFSET */ + +/* Define if DECLARE_WAIT_QUEUE_HEAD macro is available */ +#define HAVE_DECLARE_WAIT_QUEUE_HEAD 1 + +/* Define if DECLARE_WAITQUEUE macro is available */ +#define HAVE_DECLARE_WAITQUEUE 1 + +/* Define if DECLARE_MUTEX macro is available */ +#define HAVE_DECLARE_MUTEX 1 + +/* Define if kmap function or macro exists */ +#define HAVE_KMAP 1 + +/* Define if UnlockPage macro is available */ +#define HAVE_UNLOCKPAGE 1 + +/* from config.h.bot */ +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/davfs/dav_debug.h linux-2.4.20-wolk4.9-fullkernel/fs/davfs/dav_debug.h --- linux-2.4.20-wolk4.8-fullkernel/fs/davfs/dav_debug.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/davfs/dav_debug.h 2003-08-25 20:31:33.000000000 +0200 @@ -0,0 +1,51 @@ +/* + * Defines some debug macros for smbfs. + */ + +/* This makes a dentry parent/child name pair. Useful for debugging printk's */ +#define DENTRY_PATH(dentry) \ + (dentry)->d_parent->d_name.name,(dentry)->d_name.name + +/* + * safety checks that should never happen ??? + * these are normally enabled. + */ +#ifdef SMBFS_PARANOIA +#define PARANOIA(fmt,args...) printk(KERN_NOTICE "%s: " fmt, __FUNCTION__, args) +#else +#define PARANOIA(x...) do { ; } while(0) +#endif + +/* lots of debug messages */ +#ifdef SMBFS_DEBUG_VERBOSE +#define VERBOSE(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__, args) +#else +#define VERBOSE(x...) do { ; } while(0) +#endif + +/* + * "normal" debug messages, but not with a normal DEBUG define ... way + * too common name. + */ +#ifdef SMBFS_TRACE +#define TRACE() printk(KERN_DEBUG "%s--trace--\n", __FUNCTION__) +#else +#define TRACE() do { ; } while(0) +#endif + +#ifdef SMBFS_DEBUG +#define DEBUG1(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__, args) +#else +#define DEBUG1(x...) do { ; } while(0) +#endif + +#ifdef SMBFS_DEBUG2 +#define DEBUG2(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__, args) +#else +#define DEBUG2(x...) do { ; } while(0) +#endif + +#define PRINT_INODE(inode) printk("imode_%d uid_%d gid_%d\n",\ + inode->i_mode, inode->i_uid, inode->i_gid) + +#define PRINT_DENTRY(x) do { ; } while(0) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/davfs/davfs.h linux-2.4.20-wolk4.9-fullkernel/fs/davfs/davfs.h --- linux-2.4.20-wolk4.8-fullkernel/fs/davfs/davfs.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/davfs/davfs.h 2003-08-25 20:31:33.000000000 +0200 @@ -0,0 +1,380 @@ +/* + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +enum { + PVFSHOSTLEN = 64, + PVFSDIRLEN = 1023 +}; + +enum { + DAV_SUPER_MAGIC = 0x7890abcd +}; + + +enum { + DAV_HOSTLEN = 1024, + DAV_PATHLEN = 1024, + DAV_IDLEN = 80, + DAV_PASSLEN = 80, +}; + +/* + * Data from mount.davfs command + * + * You should keep same copy of mount module + */ +struct dav_mount_data { + unsigned info_magic; + int flags; + int port; + char host[DAV_HOSTLEN]; + char path[DAV_PATHLEN]; + char id [DAV_IDLEN]; + char pass[DAV_PASSLEN]; + char mpoint[DAV_PATHLEN]; + /* 'r' or 'rw' */ + char amode[3]; + /* socket */ + unsigned int dav_fd; + + /* mount demonized pid */ + long pid; + + /* user id ang group id */ + umode_t mode; + uid_t uid; + gid_t gid; + + /* SSL mode */ + int ssl_mode; + + /* for Proxy */ + char proxy[DAV_HOSTLEN]; +}; + +struct dav_sb_info { + int flags; + + /* Initial path */ + char *path; + + /* server */ + int state; + struct file * sock_file; + struct socket *sock; + pid_t conn_pid; + struct smb_conn_opt opt; + + /* general buff */ + char *req; + char *buf; + + char *connect; + int ret_code; + int length; + + int ttl; + + struct semaphore sem; + wait_queue_head_t wait; + + struct super_block *sb; + int rcls, err; + + uid_t uid; + gid_t gid; + + umode_t file_mode; + umode_t dir_mode; + +}; + +struct dav_i_info { + /* + * file handles are local to a connection. A file is open if + * (open == generation). + */ + unsigned int open; /* open generation */ + unsigned long fileid; /* What id to handle a file with? */ + __u16 attr; /* Attribute fields, DOS value */ + + __u16 access; /* Access mode */ + __u16 flags; /* status flags */ + unsigned long oldmtime; /* last time refreshed */ + unsigned long closed; /* timestamp when closed */ + unsigned openers; /* number of fileid users */ +}; + +/* metadata associated defines */ +typedef uint64_t pvfs_handle_t; +typedef uint64_t pvfs_off_t; +typedef uint64_t pvfs_size_t; +typedef time_t pvfs_time_t; +typedef uint32_t pvfs_uid_t; +typedef uint32_t pvfs_gid_t; +typedef uint32_t pvfs_mode_t; +typedef uint32_t bitfield_t; + + +#define DAV_MAXNAMELEN 255 +#define DAV_MAXPATHLEN 1024 +#define DAV_HEAD_SIZE 20 + +#define DAV_SERVER(inode) ((inode->i_sb->u.generic_sbp)) +#define DAV_INOP(inode) ((inode->u.generic_ip)) + + +/* + * Contains all relevant data on a DAV networked file. + */ +struct dav_fattr { + __u16 attr; + + char f_name[DAV_MAXNAMELEN]; + int f_name_len; + unsigned long f_ino; + umode_t f_mode; + nlink_t f_nlink; + uid_t f_uid; + gid_t f_gid; + kdev_t f_rdev; + off_t f_size; + time_t f_atime; + time_t f_mtime; + time_t f_ctime; + unsigned long f_blksize; + unsigned long f_blocks; +}; + +/* +** data struct to communicate to davfsd +** it should keep same size with davfsd +*/ +struct dav_finfo { + char f_name[DAV_MAXNAMELEN]; + int f_name_len; + int f_isdir; + off_t f_size; + time_t f_atime; + time_t f_mtime; + time_t f_ctime; +}; + +/* +** data struct to communicate to davfsd for quota +** it should keep same size with davfsd +*/ +struct dav_statfs { + long f_blocks; + long f_bfree; +}; + + +/* + * This is the time we allow an inode, dentry or dir cache to live. It is bad + * for performance to have shorter ttl on an inode than on the cache. It can + * cause refresh on each inode for a dir listing ... one-by-one + */ +#define DAV_MAX_AGE(server) (((server)->ttl * HZ) / 1000) + +struct dav_cache_head { + time_t mtime; /* unused */ + unsigned long time; /* cache age */ + unsigned long end; /* last valid fpos in cache */ + int eof; +}; + +#define DAV_DIRCACHE_SIZE ((int)(PAGE_CACHE_SIZE/sizeof(struct dentry *))) + +#define DAV_FIRSTCACHE_SIZE ((int)((DAV_DIRCACHE_SIZE * \ + sizeof(struct dentry *) - sizeof(struct dav_cache_head)) / \ + sizeof(struct dentry *))) + +#define DAV_DIRCACHE_START (DAV_DIRCACHE_SIZE - DAV_FIRSTCACHE_SIZE) +union dav_dir_cache { + struct dav_cache_head head; + struct dentry *dentry[DAV_DIRCACHE_SIZE]; +}; + + +struct dav_cache_control { + struct dav_cache_head head; + struct page *page; + union dav_dir_cache *cache; + unsigned long fpos, ofs; + int filled, valid, idx; +}; + +/* + * Flags for the in-memory inode + */ +#define DAV_F_LOCALWRITE 0x02 /* file modified locally */ + + +/* From apache source code */ +/* ----------------------- HTTP Status Codes ------------------------- */ + +/* The size of the static array in http_protocol.c for storing + * all of the potential response status-lines (a sparse table). + * A future version should dynamically generate the apr_table_t at startup. + */ + +#define RESPONSE_CODES 55 + +#define HTTP_CONTINUE 100 +#define HTTP_SWITCHING_PROTOCOLS 101 +#define HTTP_PROCESSING 102 +#define HTTP_OK 200 +#define HTTP_CREATED 201 +#define HTTP_ACCEPTED 202 +#define HTTP_NON_AUTHORITATIVE 203 +#define HTTP_NO_CONTENT 204 +#define HTTP_RESET_CONTENT 205 +#define HTTP_PARTIAL_CONTENT 206 +#define HTTP_MULTI_STATUS 207 +#define HTTP_MULTIPLE_CHOICES 300 +#define HTTP_MOVED_PERMANENTLY 301 +#define HTTP_MOVED_TEMPORARILY 302 +#define HTTP_SEE_OTHER 303 +#define HTTP_NOT_MODIFIED 304 +#define HTTP_USE_PROXY 305 +#define HTTP_TEMPORARY_REDIRECT 307 +#define HTTP_BAD_REQUEST 400 +#define HTTP_UNAUTHORIZED 401 +#define HTTP_PAYMENT_REQUIRED 402 +#define HTTP_FORBIDDEN 403 +#define HTTP_NOT_FOUND 404 +#define HTTP_METHOD_NOT_ALLOWED 405 +#define HTTP_NOT_ACCEPTABLE 406 +#define HTTP_PROXY_AUTHENTICATION_REQUIRED 407 +#define HTTP_REQUEST_TIME_OUT 408 +#define HTTP_CONFLICT 409 +#define HTTP_GONE 410 +#define HTTP_LENGTH_REQUIRED 411 +#define HTTP_PRECONDITION_FAILED 412 +#define HTTP_REQUEST_ENTITY_TOO_LARGE 413 +#define HTTP_REQUEST_URI_TOO_LARGE 414 +#define HTTP_UNSUPPORTED_MEDIA_TYPE 415 +#define HTTP_RANGE_NOT_SATISFIABLE 416 +#define HTTP_EXPECTATION_FAILED 417 +#define HTTP_UNPROCESSABLE_ENTITY 422 +#define HTTP_LOCKED 423 +#define HTTP_FAILED_DEPENDENCY 424 +#define HTTP_INTERNAL_SERVER_ERROR 500 +#define HTTP_NOT_IMPLEMENTED 501 +#define HTTP_BAD_GATEWAY 502 +#define HTTP_SERVICE_UNAVAILABLE 503 +#define HTTP_GATEWAY_TIME_OUT 504 +#define HTTP_VERSION_NOT_SUPPORTED 505 +#define HTTP_VARIANT_ALSO_VARIES 506 +#define HTTP_INSUFFICIENT_STORAGE 507 +#define HTTP_NOT_EXTENDED 510 + +/* + * Access modes when opening a file + */ +#define DAV_ACCMASK 0x0003 +#define DAV_O_RDONLY 0x0000 +#define DAV_O_WRONLY 0x0001 +#define DAV_O_RDWR 0x0002 + +/* structure access macros */ +#define dav_server_from_inode(inode) ((inode)->i_sb->u.generic_sbp) +#define dav_server_from_dentry(dentry) ((dentry)->d_sb->u.generic_sbp) +#define dav_SB_of(server) (server->sb) + +/* linux/fs/smbfs/file.c */ +extern struct inode_operations dav_file_inode_operations; +extern struct file_operations dav_file_operations; +extern struct address_space_operations dav_file_aops; + +/* dir.c */ +extern struct inode_operations dav_dir_inode_operations; +extern struct file_operations dav_dir_operations; +void dav_new_dentry(struct dentry *dentry); +void dav_renew_times(struct dentry *); + +/* linux/fs/smbfs/ioctl.c */ +int dav_ioctl (struct inode *, struct file *, unsigned int, unsigned long); + +/* linux/fs/smbfs/inode.c */ +struct super_block *dav_read_super(struct super_block *, void *, int); +void dav_get_inode_attr(struct inode *, struct dav_fattr *); +void dav_set_inode_attr(struct inode *, struct dav_fattr *); +void dav_invalidate_inodes(struct dav_sb_info *); +int dav_revalidate_inode(struct dentry *); +int dav_notify_change(struct dentry *, struct iattr *); +struct inode *dav_iget(struct super_block *, struct dav_fattr *); + + +/* cache.c */ +void dav_invalid_dir_cache(struct inode * dir); +void dav_invalidate_dircache_entries(struct dentry *parent); +struct dentry * dav_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos); +int dav_fill_cache(struct file *filp, void *dirent, filldir_t filldir, + struct dav_cache_control *ctrl, struct qstr *qname, + struct dav_fattr *entry); + +/* proc */ +int dav_get_rsize(struct dav_sb_info *server); +int dav_get_wsize(struct dav_sb_info *server); +int dav_proc_readdir(struct file *filp, void *dirent, filldir_t filldir, + struct dav_cache_control *ctl); + +int dav_proc_getattr(struct dentry *dir, struct dav_fattr *fattr); +int dav_proc_mv(struct dentry *old_dentry, struct dentry *new_dentry); +int dav_proc_mkdir(struct dentry *dentry); +int dav_proc_rmdir(struct dentry *dentry); +int dav_proc_unlink(struct dentry *dentry); +int dav_proc_trunc(struct dav_sb_info *server, long fid, __u32 length); +int dav_proc_setattr(struct dentry *dir, struct dav_fattr *fattr); +int dav_proc_settime(struct dentry *dentry, struct dav_fattr *fattr); +int dav_proc_open(struct dav_sb_info *server, struct dentry *dentry, int wish); +int dav_open(struct dentry *dentry, int wish); +int dav_close(struct inode *ino); +int dav_close_fileid(struct dentry *dentry, long fileid); +int dav_proc_create(struct dentry *dentry, __u16 attr, time_t ctime, long *fileid); +int dav_proc_read(struct inode *inode, off_t offset, int count, char *data); +int dav_proc_write(struct inode *inode, off_t offset, int count, const char *data); +void dav_init_root_dirent(struct dav_sb_info *server, struct dav_fattr *fattr); +int dav_proc_disconnect(struct dav_sb_info *server); +int dav_proc_statfs(struct dentry *dentry, struct statfs *buf); +static inline void +dav_age_dentry(struct dav_sb_info *server, struct dentry *dentry) +{ + dentry->d_time = jiffies - DAV_MAX_AGE(server); +} + + +/* socket.c */ +int _recvfrom(struct socket *socket, unsigned char *ubuf, int size, unsigned flags); +int dav_send_raw(struct socket *socket, unsigned char *source, int length); +int dav_receive_raw(struct socket *socket, unsigned char *target, int length); +int do_tcp_rcv(struct dav_sb_info *server, void *buffer, size_t len); +int dav_sendline_raw(struct socket *socket, unsigned char *target); +int dav_readline_raw(struct dav_sb_info *server, unsigned char *target, int length); +struct socket *dav_get_sock(struct dav_sb_info *server, unsigned int fd); + + + + + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/davfs/dir.c linux-2.4.20-wolk4.9-fullkernel/fs/davfs/dir.c --- linux-2.4.20-wolk4.8-fullkernel/fs/davfs/dir.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/davfs/dir.c 2003-08-25 20:31:33.000000000 +0200 @@ -0,0 +1,626 @@ +/* + * dir.c + * + * Copyright (C) 1995, 1996 by Paal-Kr. Engstad and Volker Lendecke + * Copyright (C) 1997 by Volker Lendecke + * + * Please add a note about your changes to davfs in the ChangeLog file. + */ + +#include +#include +#include +#include +#include + +#include "davfs.h" +#include "dav_debug.h" + +static int dav_readdir(struct file *, void *, filldir_t); +static int dav_dir_open(struct inode *, struct file *); + +static struct dentry *dav_lookup(struct inode *, struct dentry *); +static int dav_create(struct inode *, struct dentry *, int); +static int dav_mkdir(struct inode *, struct dentry *, int); +static int dav_rmdir(struct inode *, struct dentry *); +static int dav_unlink(struct inode *, struct dentry *); +static int dav_rename(struct inode *, struct dentry *, + struct inode *, struct dentry *); + +struct file_operations dav_dir_operations = +{ + read: generic_read_dir, + readdir: dav_readdir, + ioctl: dav_ioctl, + open: dav_dir_open, +}; + +struct inode_operations dav_dir_inode_operations = +{ + create: dav_create, + lookup: dav_lookup, + unlink: dav_unlink, + mkdir: dav_mkdir, + rmdir: dav_rmdir, + rename: dav_rename, + revalidate: dav_revalidate_inode, + setattr: dav_notify_change, +}; + +/* + * Read a directory, using filldir to fill the dirent memory. + * dav_proc_readdir does the actual reading from the dav server. + * + * The cache code is almost directly taken from ncpfs + */ +static int +dav_readdir(struct file *filp, void *dirent, filldir_t filldir) +{ + struct dentry *dentry = filp->f_dentry; + struct inode *dir = dentry->d_inode; + struct dav_sb_info *server = dav_server_from_dentry(dentry); + union dav_dir_cache *cache = NULL; + struct dav_cache_control ctl; + struct page *page = NULL; + int result; + + ctl.page = NULL; + ctl.cache = NULL; + + TRACE(); + + VERBOSE("reading %s/%s, f_pos=%d\n", + DENTRY_PATH(dentry), (int) filp->f_pos); + + result = 0; + switch ((unsigned int) filp->f_pos) { + case 0: + if (filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR) < 0) + goto out; + filp->f_pos = 1; + /* fallthrough */ + case 1: + if (filldir(dirent, "..", 2, 1, + dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) + goto out; + filp->f_pos = 2; + } + + /* + * Make sure our inode is up-to-date. + */ + result = dav_revalidate_inode(dentry); + if (result) + goto out; + + + page = grab_cache_page(&dir->i_data, 0); + if (!page) + goto read_really; + + ctl.cache = cache = kmap(page); + ctl.head = cache->head; + + if (!Page_Uptodate(page) || !ctl.head.eof) { + VERBOSE("%s/%s, page uptodate=%d, eof=%d\n", + DENTRY_PATH(dentry), Page_Uptodate(page),ctl.head.eof); + goto init_cache; + } + + if (filp->f_pos == 2) { + if (jiffies - ctl.head.time >= DAV_MAX_AGE(server)) + goto init_cache; + + /* + * N.B. ncpfs checks mtime of dentry too here, we don't. + * 1. common dav servers do not update mtime on dir changes + * 2. it requires an extra dav request + * (revalidate has the same timeout as ctl.head.time) + * + * Instead davfs invalidates its own cache on local changes + * and remote changes are not seen until timeout. + */ + } + + if (filp->f_pos > ctl.head.end) + goto finished; + + ctl.fpos = filp->f_pos + (DAV_DIRCACHE_START - 2); + ctl.ofs = ctl.fpos / DAV_DIRCACHE_SIZE; + ctl.idx = ctl.fpos % DAV_DIRCACHE_SIZE; + + for (;;) { + if (ctl.ofs != 0) { + ctl.page = find_lock_page(&dir->i_data, ctl.ofs); + if (!ctl.page) + goto invalid_cache; + ctl.cache = kmap(ctl.page); + if (!Page_Uptodate(ctl.page)) + goto invalid_cache; + } + while (ctl.idx < DAV_DIRCACHE_SIZE) { + struct dentry *dent; + int res; + + dent = dav_dget_fpos(ctl.cache->dentry[ctl.idx], dentry, filp->f_pos); + if (!dent) + goto invalid_cache; + + res = filldir(dirent, dent->d_name.name, + dent->d_name.len, filp->f_pos, + dent->d_inode->i_ino, DT_UNKNOWN); + dput(dent); + if (res) + goto finished; + filp->f_pos += 1; + ctl.idx += 1; + if (filp->f_pos > ctl.head.end) + goto finished; + } + if (ctl.page) { + kunmap(ctl.page); + SetPageUptodate(ctl.page); + UnlockPage(ctl.page); + page_cache_release(ctl.page); + ctl.page = NULL; + } + ctl.idx = 0; + ctl.ofs += 1; + } +invalid_cache: + if (ctl.page) { + kunmap(ctl.page); + UnlockPage(ctl.page); + page_cache_release(ctl.page); + ctl.page = NULL; + } + ctl.cache = cache; +init_cache: + dav_invalidate_dircache_entries(dentry); + ctl.head.time = jiffies; + ctl.head.eof = 0; + ctl.fpos = 2; + ctl.ofs = 0; + ctl.idx = DAV_DIRCACHE_START; + ctl.filled = 0; + ctl.valid = 1; +read_really: + result = dav_proc_readdir(filp, dirent, filldir, &ctl); + if (ctl.idx == -1) + goto invalid_cache; /* retry */ + ctl.head.end = ctl.fpos - 1; + ctl.head.eof = ctl.valid; +finished: + if (page) { + cache->head = ctl.head; + kunmap(page); + SetPageUptodate(page); + UnlockPage(page); + page_cache_release(page); + } + if (ctl.page) { + kunmap(ctl.page); + SetPageUptodate(ctl.page); + UnlockPage(ctl.page); + page_cache_release(ctl.page); + } +out: + return result; +} + +/* + * Note: in order to allow the davmount process to open the + * mount point, we don't revalidate if conn_pid is NULL. + * + * return 0 for next + */ +static int +dav_dir_open(struct inode *dir, struct file *file) +{ + struct dentry *dentry = file->f_dentry; + struct dav_sb_info *server; + int error = 0; + + TRACE(); + + VERBOSE("(%s/%s)\n", dentry->d_parent->d_name.name, + file->f_dentry->d_name.name); + + /* + * Directory timestamps in the core protocol aren't updated + * when a file is added, so we give them a very short TTL. + */ + lock_kernel(); + server = dav_server_from_dentry(dentry); + + /* + if (server->conn_pid) + error = dav_revalidate_inode(dentry); + */ + DEBUG1("conn_pid : %d\n", server->conn_pid); + unlock_kernel(); + return error; +} + +/* + * Dentry operations routines + */ +static int dav_lookup_validate(struct dentry *, int); +static int dav_hash_dentry(struct dentry *, struct qstr *); +static int dav_compare_dentry(struct dentry *, struct qstr *, struct qstr *); +static int dav_delete_dentry(struct dentry *); + +static struct dentry_operations davfs_dentry_operations = +{ + d_revalidate: dav_lookup_validate, + d_hash: dav_hash_dentry, + d_compare: dav_compare_dentry, + d_delete: dav_delete_dentry, +}; + +/* +static struct dentry_operations davfs_dentry_operations_case = +{ + d_revalidate: dav_lookup_validate, + d_delete: dav_delete_dentry, +}; +*/ + +/* + * This is the callback when the dcache has a lookup hit. + */ +static int +dav_lookup_validate(struct dentry * dentry, int flags) +{ + struct dav_sb_info *server = dav_server_from_dentry(dentry); + struct inode * inode = dentry->d_inode; + unsigned long age = jiffies - dentry->d_time; + int valid = 1; + + TRACE(); + + /* + * The default validation is based on dentry age: + * we believe in dentries for a few seconds. (But each + * successful server lookup renews the timestamp.) + */ + valid = (age <= DAV_MAX_AGE(server)); +#ifdef davFS_DEBUG_VERBOSE + if (!valid) + VERBOSE("%s/%s not valid, age=%lu\n", + DENTRY_PATH(dentry), age); +#endif + + if (inode) { + lock_kernel(); + if (is_bad_inode(inode)) { + PARANOIA("%s/%s has dud inode\n", DENTRY_PATH(dentry)); + valid = 0; + } else if (!valid) + valid = (dav_revalidate_inode(dentry) == 0); + unlock_kernel(); + } else { + /* + * What should we do for negative dentries? + */ + } + return valid; +} + +static int +dav_hash_dentry(struct dentry *dir, struct qstr *this) +{ + unsigned long hash; + int i; + + TRACE(); + + hash = init_name_hash(); + for (i=0; i < this->len ; i++) + hash = partial_name_hash(tolower(this->name[i]), hash); + this->hash = end_name_hash(hash); + + return 0; +} + +static int +dav_compare_dentry(struct dentry *dir, struct qstr *a, struct qstr *b) +{ + int i, result = 1; + + TRACE(); + + if (a->len != b->len) + goto out; + for (i=0; i < a->len; i++) { + if (tolower(a->name[i]) != tolower(b->name[i])) + goto out; + } + result = 0; +out: + return result; +} + +/* + * This is the callback from dput() when d_count is going to 0. + * We use this to unhash dentries with bad inodes. + */ +static int +dav_delete_dentry(struct dentry * dentry) +{ + + TRACE(); + if (dentry->d_inode) { + if (is_bad_inode(dentry->d_inode)) { + PARANOIA("bad inode, unhashing %s/%s\n", + DENTRY_PATH(dentry)); + return 1; + } + } else { + /* N.B. Unhash negative dentries? */ + } + return 0; +} + +/* + * Initialize a new dentry + */ +void +dav_new_dentry(struct dentry *dentry) +{ + // struct dav_sb_info *server = dav_server_from_dentry(dentry); + + TRACE(); + + dentry->d_op = &davfs_dentry_operations; + + dentry->d_time = jiffies; +} + + +/* + * Whenever a lookup succeeds, we know the parent directories + * are all valid, so we want to update the dentry timestamps. + * N.B. Move this to dcache? + */ +void +dav_renew_times(struct dentry * dentry) +{ + TRACE(); + + for (;;) { + dentry->d_time = jiffies; + if (IS_ROOT(dentry)) + break; + dentry = dentry->d_parent; + } +} + +static struct dentry * +dav_lookup(struct inode *dir, struct dentry *dentry) +{ + struct dav_fattr finfo; + struct inode *inode; + int error; + struct dav_sb_info *server; + + TRACE(); + + error = -ENAMETOOLONG; + if (dentry->d_name.len > DAV_MAXNAMELEN) + goto out; + + error = dav_proc_getattr(dentry, &finfo); +#ifdef davFS_PARANOIA + if (error && error != -ENOENT) + PARANOIA("find %s/%s failed, error=%d\n", + DENTRY_PATH(dentry), error); +#endif + DEBUG1("=== getattr result : %d\n", error); + + inode = NULL; + if (error == -ENOENT) + goto add_entry; + if (!error) { + error = -EACCES; + finfo.f_ino = iunique(dentry->d_sb, 2); + inode = dav_iget(dir->i_sb, &finfo); + if (inode) { + add_entry: + server = dav_server_from_dentry(dentry); + dentry->d_op = &davfs_dentry_operations; + + d_add(dentry, inode); + dav_renew_times(dentry); + error = 0; + } + } +out: + return ERR_PTR(error); +} + +/* + * This code is common to all routines creating a new inode. + */ +static int +dav_instantiate(struct dentry *dentry, long fileid, int have_id) +{ + // struct dav_sb_info *server = dav_server_from_dentry(dentry); + struct inode *inode; + int error; + struct dav_fattr fattr; + + TRACE(); + + VERBOSE("file %s/%s, fileid=%ld\n", DENTRY_PATH(dentry), fileid); + + + error = dav_proc_getattr(dentry, &fattr); + if (error) + goto out_close; + + dav_renew_times(dentry); + fattr.f_ino = iunique(dentry->d_sb, 2); + inode = dav_iget(dentry->d_sb, &fattr); + if (!inode) + goto out_no_inode; + + if (have_id) + { + /* we don't have inode before here */ + /* bug fix 05/09/01 hunkim */ + struct dav_i_info *dii = DAV_INOP(inode); + dii->fileid = fileid; + DEBUG2("FILEID = %ld\n", dii->fileid); + dii->access = DAV_O_RDWR; + dii->open++; + } + + d_instantiate(dentry, inode); + out: + return error; + + out_no_inode: + error = -EACCES; + out_close: + if (have_id) + { + PARANOIA("%s/%s failed, error=%d, closing %ld\n", + DENTRY_PATH(dentry), error, fileid); + dav_close_fileid(dentry, fileid); + } + goto out; +} + +/* N.B. How should the mode argument be used? */ +static int +dav_create(struct inode *dir, struct dentry *dentry, int mode) +{ + /* fake file id */ + long fileid=0; + int error; + + TRACE(); + + VERBOSE("creating %s/%s, mode=%d\n", DENTRY_PATH(dentry), mode); + + dav_invalid_dir_cache(dir); + error = dav_proc_create(dentry, 0, CURRENT_TIME, &fileid); + if (!error) { + error = dav_instantiate(dentry, fileid, 1); + } else { + PARANOIA("%s/%s failed, error=%d\n", + DENTRY_PATH(dentry), error); + } + + DEBUG2("FILEID = %ld\n", fileid); + return error; +} + +/* N.B. How should the mode argument be used? */ +static int +dav_mkdir(struct inode *dir, struct dentry *dentry, int mode) +{ + int error; + + TRACE(); + + dav_invalid_dir_cache(dir); + error = dav_proc_mkdir(dentry); + if (!error) { + error = dav_instantiate(dentry, 0, 0); + } + return error; +} + +static int +dav_rmdir(struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + int error; + + TRACE(); + + /* + * Close the directory if it's open. + */ + dav_close(inode); + + /* + * Check that nobody else is using the directory.. + */ + error = -EBUSY; + if (!d_unhashed(dentry)) + goto out; + + dav_invalid_dir_cache(dir); + error = dav_proc_rmdir(dentry); + +out: + return error; +} + +static int +dav_unlink(struct inode *dir, struct dentry *dentry) +{ + int error; + + TRACE(); + + /* + * Close the file if it's open. + */ + dav_close(dentry->d_inode); + + dav_invalid_dir_cache(dir); + error = dav_proc_unlink(dentry); + if (!error) + dav_renew_times(dentry); + return error; +} + +static int +dav_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) +{ + int error; + + TRACE(); + + /* + * Close any open files, and check whether to delete the + * target before attempting the rename. + */ + if (old_dentry->d_inode) + dav_close(old_dentry->d_inode); + if (new_dentry->d_inode) + { + dav_close(new_dentry->d_inode); + error = dav_proc_unlink(new_dentry); + if (error) + { + VERBOSE("unlink %s/%s, error=%d\n", + DENTRY_PATH(new_dentry), error); + goto out; + } + /* FIXME */ + d_delete(new_dentry); + } + + dav_invalid_dir_cache(old_dir); + dav_invalid_dir_cache(new_dir); + error = dav_proc_mv(old_dentry, new_dentry); + if (!error) + { + dav_renew_times(old_dentry); + dav_renew_times(new_dentry); + } +out: + return error; +} + + + + + + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/davfs/file.c linux-2.4.20-wolk4.9-fullkernel/fs/davfs/file.c --- linux-2.4.20-wolk4.8-fullkernel/fs/davfs/file.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/davfs/file.c 2003-08-25 20:31:33.000000000 +0200 @@ -0,0 +1,397 @@ +/* + * file.c + * + * Copyright (C) 1995, 1996, 1997 by Paal-Kr. Engstad and Volker Lendecke + * Copyright (C) 1997 by Volker Lendecke + * + * Please add a note about your changes to davfs in the ChangeLog file. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "dav_debug.h" +#include "davfs.h" + +static int +dav_fsync(struct file *file, struct dentry * dentry, int datasync) +{ + VERBOSE("sync file %s/%s\n", DENTRY_PATH(dentry)); + return 0; +} + +/* + * Read a page synchronously. + */ +static int +dav_readpage_sync(struct dentry *dentry, struct page *page) +{ + char *buffer = kmap(page); + unsigned long offset = page->index << PAGE_CACHE_SHIFT; + int rsize = dav_get_rsize(dav_server_from_dentry(dentry)); + int count = PAGE_SIZE; + int result; + + VERBOSE("file %s/%s, count=%d@%ld, rsize=%d\n", + DENTRY_PATH(dentry), count, offset, rsize); + + result = dav_open(dentry, DAV_O_RDONLY); + if (result < 0) { + PARANOIA("%s/%s open failed, error=%d\n", + DENTRY_PATH(dentry), result); + goto io_error; + } + + do { + if (count < rsize) + rsize = count; + + result = dav_proc_read(dentry->d_inode, offset, rsize, buffer); + if (result < 0) + goto io_error; + + count -= result; + offset += result; + buffer += result; + dentry->d_inode->i_atime = CURRENT_TIME; + if (result < rsize) + break; + } while (count); + + memset(buffer, 0, count); + flush_dcache_page(page); + SetPageUptodate(page); + result = 0; + +io_error: + kunmap(page); + UnlockPage(page); + return result; +} + +/* + * We are called with the page locked and we unlock it when done. + */ +static int +dav_readpage(struct file *file, struct page *page) +{ + int error; + struct dentry *dentry = file->f_dentry; + + TRACE(); + + get_page(page); + error = dav_readpage_sync(dentry, page); + put_page(page); + return error; +} + +/* + * Write a page synchronously. + * Offset is the data offset within the page. + */ +static int +dav_writepage_sync(struct inode *inode, struct page *page, + unsigned long offset, unsigned int count) +{ + char *buffer = kmap(page) + offset; + struct dav_i_info *dii = inode->u.generic_ip; + + int wsize = dav_get_wsize(dav_server_from_inode(inode)); + int result, written = 0; + + TRACE(); + + offset += page->index << PAGE_CACHE_SHIFT; + VERBOSE("file ino=%ld, count=%d@%ld, wsize=%d\n", + inode->i_ino, count, offset, wsize); + + do { + if (count < wsize) + wsize = count; + + result = dav_proc_write(inode, offset, wsize, buffer); + if (result < 0) { + PARANOIA("failed write, wsize=%d, result=%d\n", + wsize, result); + break; + } + /* N.B. what if result < wsize?? */ +#ifdef davFS_PARANOIA + if (result < wsize) + PARANOIA("short write, wsize=%d, result=%d\n", + wsize, result); +#endif + buffer += wsize; + offset += wsize; + written += wsize; + count -= wsize; + /* + * Update the inode now rather than waiting for a refresh. + */ + inode->i_mtime = inode->i_atime = CURRENT_TIME; + if (offset > inode->i_size) + inode->i_size = offset; + dii->flags |= DAV_F_LOCALWRITE; + } while (count); + + kunmap(page); + return written ? written : result; +} + +/* + * Write a page to the server. This will be used for NFS swapping only + * (for now), and we currently do this synchronously only. + * + * We are called with the page locked and we unlock it when done. + */ +static int +dav_writepage(struct page *page) +{ + struct address_space *mapping = page->mapping; + struct inode *inode; + unsigned long end_index; + unsigned offset = PAGE_CACHE_SIZE; + int err=0; + + TRACE(); + + if (!mapping) + BUG(); + inode = mapping->host; + if (!inode) + BUG(); + + end_index = inode->i_size >> PAGE_CACHE_SHIFT; + + /* easy case */ + if (page->index < end_index) + goto do_it; + /* things got complicated... */ + offset = inode->i_size & (PAGE_CACHE_SIZE-1); + /* OK, are we completely out? */ + if (page->index >= end_index+1 || !offset) + return -EIO; +do_it: + get_page(page); + err = dav_writepage_sync(inode, page, 0, offset); + SetPageUptodate(page); + UnlockPage(page); + put_page(page); + return err; +} + +static int +dav_updatepage(struct file *file, struct page *page, unsigned long offset, + unsigned int count) +{ + struct dentry *dentry = file->f_dentry; + + DEBUG1("(%s/%s %d@%ld)\n", DENTRY_PATH(dentry), + count, (page->index << PAGE_CACHE_SHIFT)+offset); + + return dav_writepage_sync(dentry->d_inode, page, offset, count); +} + +static ssize_t +dav_file_read(struct file * file, char * buf, size_t count, loff_t *ppos) +{ + struct dentry * dentry = file->f_dentry; + ssize_t status; + + VERBOSE("file %s/%s, count=%lu@%lu\n", DENTRY_PATH(dentry), + (unsigned long) count, (unsigned long) *ppos); + + status = dav_revalidate_inode(dentry); + if (status) + { + PARANOIA("%s/%s validation failed, error=%Zd\n", + DENTRY_PATH(dentry), status); + goto out; + } + + VERBOSE("before read, size=%ld, flags=%x, atime=%ld\n", + (long)dentry->d_inode->i_size, + dentry->d_inode->i_flags, dentry->d_inode->i_atime); + + status = generic_file_read(file, buf, count, ppos); +out: + return status; +} + +static int +dav_file_mmap(struct file * file, struct vm_area_struct * vma) +{ + struct dentry * dentry = file->f_dentry; + int status; + + VERBOSE("file %s/%s, address %lu - %lu\n", + DENTRY_PATH(dentry), vma->vm_start, vma->vm_end); + + status = dav_revalidate_inode(dentry); + if (status) + { + PARANOIA("%s/%s validation failed, error=%d\n", + DENTRY_PATH(dentry), status); + goto out; + } + status = generic_file_mmap(file, vma); +out: + return status; +} + +/* + * This does the "real" work of the write. The generic routine has + * allocated the page, locked it, done all the page alignment stuff + * calculations etc. Now we should just copy the data from user + * space and write it back to the real medium.. + * + * If the writer ends up delaying the write, the writer needs to + * increment the page use counts until he is done with the page. + */ +static int dav_prepare_write(struct file *file, struct page *page, + unsigned offset, unsigned to) +{ + TRACE(); + kmap(page); + return 0; +} + +static int dav_commit_write(struct file *file, struct page *page, + unsigned offset, unsigned to) +{ + int status; + + TRACE(); + + status = -EFAULT; + lock_kernel(); + status = dav_updatepage(file, page, offset, to-offset); + unlock_kernel(); + kunmap(page); + return status; +} + +struct address_space_operations dav_file_aops = { + readpage: dav_readpage, + writepage: dav_writepage, + prepare_write: dav_prepare_write, + commit_write: dav_commit_write +}; + +/* + * Write to a file (through the page cache). + */ +static ssize_t +dav_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos) +{ + struct dentry * dentry = file->f_dentry; + ssize_t result; + + VERBOSE("file %s/%s, count=%lu@%lu\n", + DENTRY_PATH(dentry), + (unsigned long) count, (unsigned long) *ppos); + + result = dav_revalidate_inode(dentry); + if (result) + { + PARANOIA("%s/%s validation failed, error=%Zd\n", + DENTRY_PATH(dentry), result); + goto out; + } + + result = dav_open(dentry, DAV_O_WRONLY); + if (result) + goto out; + + if (count > 0) + { + result = generic_file_write(file, buf, count, ppos); + VERBOSE("pos=%ld, size=%ld, mtime=%ld, atime=%ld\n", + (long) file->f_pos, (long) dentry->d_inode->i_size, + dentry->d_inode->i_mtime, dentry->d_inode->i_atime); + } +out: + return result; +} + +static int +dav_file_open(struct inode *inode, struct file * file) +{ + int result=0; + struct dentry *dentry = file->f_dentry; + struct dav_i_info *dii = inode->u.generic_ip; + int dav_mode = (file->f_mode & O_ACCMODE) - 1; + + TRACE(); + + lock_kernel(); + result = dav_open(dentry, dav_mode); + if (result) + goto out; + dii->openers++; +out: + unlock_kernel(); + return result; +} + +static int +dav_file_release(struct inode *inode, struct file * file) +{ + struct dav_i_info *dii = inode->u.generic_ip; + + TRACE(); + lock_kernel(); + if (!--dii->openers) + dav_close(inode); + unlock_kernel(); + return 0; +} + +/* + * Check whether the required access is compatible with + * an inode's permission. dav doesn't recognize superuser + * privileges, so we need our own check for this. + */ +static int +dav_file_permission(struct inode *inode, int mask) +{ + int mode = inode->i_mode; + int error = 0; + + VERBOSE("mode=%x, mask=%x\n", mode, mask); + + /* Look at user permissions */ + mode >>= 6; + if ((mode & 7 & mask) != mask) + error = -EACCES; + return error; +} + +struct file_operations dav_file_operations = +{ + read: dav_file_read, + write: dav_file_write, + ioctl: dav_ioctl, + mmap: dav_file_mmap, + open: dav_file_open, + release: dav_file_release, + fsync: dav_fsync, +}; + +struct inode_operations dav_file_inode_operations = +{ + permission: dav_file_permission, + revalidate: dav_revalidate_inode, + setattr: dav_notify_change, +}; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/davfs/getopt.c linux-2.4.20-wolk4.9-fullkernel/fs/davfs/getopt.c --- linux-2.4.20-wolk4.8-fullkernel/fs/davfs/getopt.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/davfs/getopt.c 2003-08-25 20:31:33.000000000 +0200 @@ -0,0 +1,7 @@ +/* + * getopt.c + */ + +#include +#include + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/davfs/inode.c linux-2.4.20-wolk4.9-fullkernel/fs/davfs/inode.c --- linux-2.4.20-wolk4.8-fullkernel/fs/davfs/inode.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/davfs/inode.c 2003-08-25 20:31:33.000000000 +0200 @@ -0,0 +1,625 @@ +/* + * inode.c + * + * Copyright (C) 1995, 1996 by Paal-Kr. Engstad and Volker Lendecke + * Copyright (C) 1997 by Volker Lendecke + * Copyright (C) 2001 by SungHun Kim, edit and change for dav support + * + * Please add a note about your changes to davfs in the ChangeLog file. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +/* +#include +#include +#include + +*/ +#include +#include + +#include "davfs.h" +#include "dav_debug.h" + +static void dav_delete_inode(struct inode *); +static void dav_put_super(struct super_block *); +static int dav_statfs(struct super_block *, struct statfs *); + +static struct super_operations dav_sops = +{ + put_inode: force_delete, + delete_inode: dav_delete_inode, + put_super: dav_put_super, + statfs: dav_statfs, +}; + + +/* We are always generating a new inode here */ +struct inode * +dav_iget(struct super_block *sb, struct dav_fattr *fattr) +{ + struct inode *result; + struct dav_i_info *dav_i_info; + + TRACE(); + + result = new_inode(sb); + if (!result) + return result; + + result->i_ino = fattr->f_ino; + + /* malloc for dav_i_info */ + dav_i_info = (struct dav_i_info *) + kmalloc(sizeof(struct dav_i_info), GFP_KERNEL); + + if(!dav_i_info) + return NULL; + + memset(dav_i_info, 0x00, sizeof(struct dav_i_info)); + + /* we need to use generic pointer */ + result->u.generic_ip = dav_i_info; + + dav_set_inode_attr(result, fattr); + + if (S_ISREG(result->i_mode)) { + result->i_op = &dav_file_inode_operations; + result->i_fop = &dav_file_operations; + result->i_data.a_ops = &dav_file_aops; + } else if (S_ISDIR(result->i_mode)) { + result->i_op = &dav_dir_inode_operations; + result->i_fop = &dav_dir_operations; + } + insert_inode_hash(result); + + DEBUG1("dav_iget: %p\n", fattr); + + return result; +} + +/* + * Copy the inode data to a dav_fattr structure. + */ +void +dav_get_inode_attr(struct inode *inode, struct dav_fattr *fattr) +{ + struct dav_i_info *dii = inode->u.generic_ip; + + DEBUG1("here\n"); + + memset(fattr, 0, sizeof(struct dav_fattr)); + fattr->f_mode = inode->i_mode; + fattr->f_nlink = inode->i_nlink; + fattr->f_ino = inode->i_ino; + fattr->f_uid = inode->i_uid; + fattr->f_gid = inode->i_gid; + fattr->f_rdev = inode->i_rdev; + fattr->f_size = inode->i_size; + fattr->f_mtime = inode->i_mtime; + fattr->f_ctime = inode->i_ctime; + fattr->f_atime = inode->i_atime; + fattr->f_blksize= inode->i_blksize; + fattr->f_blocks = inode->i_blocks; + + fattr->attr = dii->attr; + /* + * Keep the attributes in sync with the inode permissions. + */ + if (fattr->f_mode & S_IWUSR) + fattr->attr &= ~aRONLY; + else + fattr->attr |= aRONLY; +} + +void +dav_set_inode_attr(struct inode *inode, struct dav_fattr *fattr) +{ + struct dav_i_info *dii = inode->u.generic_ip; + + TRACE(); + + inode->i_mode = fattr->f_mode; + inode->i_nlink = fattr->f_nlink; + inode->i_uid = fattr->f_uid; + inode->i_gid = fattr->f_gid; + inode->i_rdev = fattr->f_rdev; + inode->i_ctime = fattr->f_ctime; + inode->i_blksize = fattr->f_blksize; + inode->i_blocks = fattr->f_blocks; + /* + * Don't change the size and mtime/atime fields + * if we're writing to the file. + */ + /* + if (!(dii->flags & DAV_F_LOCALWRITE)) + */ + { + inode->i_size = fattr->f_size; + inode->i_mtime = fattr->f_mtime; + inode->i_atime = fattr->f_atime; + } + + if(dii) { + + dii->attr = fattr->attr; + /* + * Update the "last time refreshed" field for revalidation. + */ + dii->oldmtime = jiffies; + } +} + +/* + * This is called if the connection has gone bad ... + * try to kill off all the current inodes. + */ +void +dav_invalidate_inodes(struct dav_sb_info *server) +{ + VERBOSE("\n"); + shrink_dcache_sb(dav_SB_of(server)); + invalidate_inodes(dav_SB_of(server)); +} + +/* + * This is called to update the inode attributes after + * we've made changes to a file or directory. + */ +static int +dav_refresh_inode(struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + int error; + struct dav_fattr fattr; + + DEBUG1("here\n"); + error = dav_proc_getattr(dentry, &fattr); + + if (!error) { + dav_renew_times(dentry); + /* + * Check whether the type part of the mode changed, + * and don't update the attributes if it did. + */ + if ((inode->i_mode & S_IFMT) == (fattr.f_mode & S_IFMT)) { + dav_set_inode_attr(inode, &fattr); + } else { + /* + * Big trouble! The inode has become a new object, + * so any operations attempted on it are invalid. + * + * To limit damage, mark the inode as bad so that + * subsequent lookup validations will fail. + */ + PARANOIA("%s/%s changed mode, %07o to %07o\n", + DENTRY_PATH(dentry), + inode->i_mode, fattr.f_mode); + + fattr.f_mode = inode->i_mode; /* save mode */ + make_bad_inode(inode); + inode->i_mode = fattr.f_mode; /* restore mode */ + /* + * No need to worry about unhashing the dentry: the + * lookup validation will see that the inode is bad. + * But we do want to invalidate the caches ... + */ + if (!S_ISDIR(inode->i_mode)) + invalidate_inode_pages(inode); + else + dav_invalid_dir_cache(inode); + error = -EIO; + } + } + return error; +} + +/* + * This is called when we want to check whether the inode + * has changed on the server. If it has changed, we must + * invalidate our local caches. + */ +int +dav_revalidate_inode(struct dentry *dentry) +{ + struct dav_sb_info *s = dav_server_from_dentry(dentry); + struct inode *inode = dentry->d_inode; + struct dav_i_info *dii = inode->u.generic_ip; + time_t last_time; + loff_t last_sz; + int error = 0; + + TRACE(); + + VERBOSE(" for dentry %s/%s\n", + DENTRY_PATH(dentry)); + + lock_kernel(); + + /* + * Check whether we've recently refreshed the inode. + */ + if (time_before(jiffies, dii->oldmtime + DAV_MAX_AGE(s))) { + VERBOSE("up-to-date, ino=%ld, jiffies=%lu, oldtime=%lu\n", + inode->i_ino, jiffies, dii->oldmtime); + goto out; + } + + /* + * Save the last modified time, then refresh the inode. + * (Note: a size change should have a different mtime, + * or same mtime but different size.) + */ + last_time = inode->i_mtime; + last_sz = inode->i_size; + error = dav_refresh_inode(dentry); + + if (error || inode->i_mtime != last_time || inode->i_size != last_sz) { + VERBOSE("%s/%s changed, old=%ld, new=%ld\n", + DENTRY_PATH(dentry), + (long) last_time, (long) inode->i_mtime); + + if (!S_ISDIR(inode->i_mode)) + invalidate_inode_pages(inode); + } +out: + unlock_kernel(); + return error; +} + +/* + * This routine is called when i_nlink == 0 and i_count goes to 0. + * All blocking cleanup operations need to go here to avoid races. + */ +static void +dav_delete_inode(struct inode *ino) +{ + int ret; + + DEBUG1("ino=%ld\n", ino->i_ino); + lock_kernel(); + + + if(ino->u.generic_ip) + kfree(ino->u.generic_ip); + + if ((ret=dav_close(ino))) + PARANOIA("could not close inode %ld(%d)\n", ino->i_ino, ret); + + unlock_kernel(); + + clear_inode(ino); +} + + +static void +dav_put_super(struct super_block *sb) +{ + struct dav_sb_info *server = sb->u.generic_sbp; + + TRACE(); + + /* disconnect with davfsd */ + dav_proc_disconnect(server); + + /* free general buf */ + kfree(server->buf); + kfree(server->req); + + /* + if (server->sock_file) { + dav_proc_disconnect(server); + dav_dont_catch_keepalive(server); + fput(server->sock_file); + } + + if (server->conn_pid) + kill_proc(server->conn_pid, SIGTERM, 1); + */ + + /* + kfree(sb->u.davfs_sb.temp_buf); + + if (server->packet) + dav_vfree(server->packet); + */ +} + +char * dav_strdup(char *str) { + char *ret = kmalloc(strlen(str)+1 ,GFP_KERNEL); + if(!ret) + return ret; + + memset(ret, 0, sizeof(strlen(str)+1)); + strcpy(ret, str); + + return ret; +} + +struct super_block * +dav_read_super(struct super_block *sb, void *raw_data, int silent) +{ + struct dav_mount_data *davmnt; + struct inode *root_inode; + struct dav_fattr root; + struct dav_sb_info *server; + + TRACE(); + + if (!raw_data) + goto out_no_data; + + davmnt = (struct dav_mount_data *) raw_data; + + /* malloc for sb */ + server = (struct dav_sb_info *) + kmalloc(sizeof(struct dav_sb_info) ,GFP_KERNEL); + if(!server) + goto out_no_mem; + + memset(server, 0, sizeof(*server)); + + /* copy data from mount information to server data */ + server->flags = davmnt->flags; + server->ttl = 1000; + server->uid = davmnt->uid; + server->gid = davmnt->gid; + + server->dir_mode = S_IRWXU | S_IRWXG | S_IRWXO | S_IFDIR; + server->file_mode = S_IRWXU | S_IRWXG | S_IRWXO | S_IFREG; + server->sb = sb; + + server->path = dav_strdup(davmnt->path); + if(!server->path) + goto out_no_mem; + + /* socket handler to communicate davfsd */ + server->sock = dav_get_sock(server, davmnt->dav_fd); + if(!server->sock) + goto out_bad_sock; + + /* init_ MUTEX and waitques */ + init_MUTEX(&(server->sem)); + init_waitqueue_head(&server->wait); + + /* memory alloc for general buf */ + server->buf = kmalloc(DAV_MAXPATHLEN ,GFP_KERNEL); + server->req = kmalloc(DAV_MAXPATHLEN ,GFP_KERNEL); + if(!server->buf || !server->req) + goto out_no_mem; + + +#ifdef SERVER_TEST + /* test server */ + dav_server_test(server); +#endif + + /* assign generic sbp */ + sb->u.generic_sbp = server; + + sb->s_blocksize = 1024; /* Eh... Is this correct? */ + sb->s_blocksize_bits = 12; + sb->s_magic = DAV_SUPER_MAGIC; + sb->s_flags = 0; + sb->s_op = &dav_sops; + sb->s_maxbytes = MAX_NON_LFS; /* client support missing */ + + dav_init_root_dirent(server, &root); + + DEBUG1("root_mode : %d\n", root.f_mode); + root_inode = dav_iget(sb, &root); + if (!root_inode) + goto out_no_root; + + sb->s_root = d_alloc_root(root_inode); + if (!sb->s_root) + goto out_no_root; + dav_new_dentry(sb->s_root); + + PRINT_INODE(root_inode); + PRINT_DENTRY(sb->s_root); + + DEBUG1("org_mode %d\n", server->dir_mode); + + return sb; + +out_no_root: + iput(root_inode); +out_bad_sock: + printk(KERN_ERR "dav_get_socket: no socket\n"); +out_no_mem: + /* + if (!sb->u.davfs_sb.mnt) + printk(KERN_ERR "dav_read_super: allocation failure\n"); + */ + goto out_fail; +out_no_data: + printk(KERN_ERR "dav_read_super: missing data argument\n"); +out_fail: + return NULL; +} + +static int +dav_statfs(struct super_block *sb, struct statfs *buf) +{ + /*NetWare Server, because free space is distributed over + volumes, and the current user might have disk quotas. So + free space is not that simple to determine. Our decision + here is to err conservatively. */ + + DEBUG1("here\n"); + + /* get statfs information from davfsd */ + dav_proc_statfs(sb->s_root, buf); + + return 0; +} + +int +dav_notify_change(struct dentry *dentry, struct iattr *attr) +{ + struct inode *inode = dentry->d_inode; + struct dav_sb_info *server = dav_server_from_dentry(dentry); + struct dav_i_info *dii = DAV_INOP(inode); + unsigned int mask = (S_IFREG | S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO); + int error, changed, refresh = 0; + struct dav_fattr fattr; + + TRACE(); + + error = dav_revalidate_inode(dentry); + if (error) + goto out; + + if ((error = inode_change_ok(inode, attr)) < 0) + goto out; + + error = -EPERM; + if ((attr->ia_valid & ATTR_UID) && (attr->ia_uid != server->uid)) + goto out; + + if ((attr->ia_valid & ATTR_GID) && (attr->ia_uid != server->gid)) + goto out; + + if ((attr->ia_valid & ATTR_MODE) && (attr->ia_mode & ~mask)) + goto out; + + if ((attr->ia_valid & ATTR_SIZE) != 0) + { + VERBOSE("changing %s/%s, old size=%ld, new size=%ld\n", + DENTRY_PATH(dentry), + (long) inode->i_size, (long) attr->ia_size); + error = dav_open(dentry, O_WRONLY); + if (error) + goto out; + error = dav_proc_trunc(server, dii->fileid, + attr->ia_size); + if (error) + goto out; + vmtruncate(inode, attr->ia_size); + refresh = 1; + } + + /* + * Initialize the fattr and check for changed fields. + * Note: CTIME under DAV is creation time rather than + * change time, so we don't attempt to change it. + */ + dav_get_inode_attr(inode, &fattr); + + changed = 0; + if ((attr->ia_valid & ATTR_MTIME) != 0) + { + fattr.f_mtime = attr->ia_mtime; + changed = 1; + } + if ((attr->ia_valid & ATTR_ATIME) != 0) + { + fattr.f_atime = attr->ia_atime; + + } + if (changed) + { + error = dav_proc_setattr(dentry, &fattr); + if (error) + goto out; + refresh = 1; + } + + /* + * Check for mode changes ... we're extremely limited in + * what can be set for DAV servers: just the read-only bit. + */ + if ((attr->ia_valid & ATTR_MODE) != 0) + { + VERBOSE("%s/%s mode change, old=%x, new=%x\n", + DENTRY_PATH(dentry), fattr.f_mode, attr->ia_mode); + changed = 0; + if (attr->ia_mode & S_IWUSR) + { + if (fattr.attr & aRONLY) + { + fattr.attr &= ~aRONLY; + changed = 1; + } + } else { + if (!(fattr.attr & aRONLY)) + { + fattr.attr |= aRONLY; + changed = 1; + } + } + if (changed) + { + error = dav_proc_setattr(dentry, &fattr); + if (error) + goto out; + refresh = 1; + } + } + error = 0; + +out: + if (refresh) + dav_refresh_inode(dentry); + return error; +} + +#ifdef DEBUG_DAV_MALLOC +int dav_malloced; +int dav_current_kmalloced; +int dav_current_vmalloced; +#endif + +static DECLARE_FSTYPE( dav_fs_type, "davfs", dav_read_super, 0); + +static int __init init_dav_fs(void) +{ + DEBUG1("registering ...\n"); + +#ifdef DEBUG_DAV_MALLOC + dav_malloced = 0; + dav_current_kmalloced = 0; + dav_current_vmalloced = 0; +#endif + + return register_filesystem(&dav_fs_type); +} + +static void __exit exit_dav_fs(void) +{ + DEBUG1("unregistering ...\n"); + unregister_filesystem(&dav_fs_type); +#ifdef DEBUG_DAV_MALLOC + printk(KERN_DEBUG "dav_malloced: %d\n", dav_malloced); + printk(KERN_DEBUG "dav_current_kmalloced: %d\n",dav_current_kmalloced); + printk(KERN_DEBUG "dav_current_vmalloced: %d\n",dav_current_vmalloced); +#endif +} + +EXPORT_NO_SYMBOLS; + +/* +** Added by Philipp Hahn (pmhahn) +*/ +MODULE_AUTHOR("Sung Kim "); +MODULE_DESCRIPTION("Web Distributed Authoring and Versioning Filesystem"); +MODULE_LICENSE("GPL"); + +module_init(init_dav_fs) +module_exit(exit_dav_fs) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/davfs/ioctl.c linux-2.4.20-wolk4.9-fullkernel/fs/davfs/ioctl.c --- linux-2.4.20-wolk4.8-fullkernel/fs/davfs/ioctl.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/davfs/ioctl.c 2003-08-25 20:31:33.000000000 +0200 @@ -0,0 +1,66 @@ +/* + * ioctl.c + * + * Copyright (C) 1995, 1996 by Volker Lendecke + * Copyright (C) 1997 by Volker Lendecke + * + * Please add a note about your changes to davfs in the ChangeLog file. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "davfs.h" + +int +dav_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + // struct dav_sb_info *server = DAV_SERVER(inode); + // struct dav_conn_opt opt; + int result = -EINVAL; + /* + switch (cmd) { + case DAV_IOC_GETMOUNTUID: + result = put_user(NEW_TO_OLD_UID(server->mnt->mounted_uid), + (uid16_t *) arg); + break; + case DAV_IOC_GETMOUNTUID32: + result = put_user(server->mnt->mounted_uid, (uid_t *) arg); + break; + + case DAV_IOC_NEWCONN: + + if (!arg) + break; + + result = -EFAULT; + if (!copy_from_user(&opt, (void *)arg, sizeof(opt))) + result = dav_newconn(server, &opt); + break; + default: + } + */ + return result; +} + + + + + + + + + + + + + + + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/davfs/proc.c linux-2.4.20-wolk4.9-fullkernel/fs/davfs/proc.c --- linux-2.4.20-wolk4.8-fullkernel/fs/davfs/proc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/davfs/proc.c 2003-08-25 20:31:33.000000000 +0200 @@ -0,0 +1,980 @@ + /* + * proc.c + * + * Copyright (C) 1995, 1996 by Paal-Kr. Engstad and Volker Lendecke + * Copyright (C) 1997 by Volker Lendecke + * + * Please add a note about your changes to davfs in the ChangeLog file. + */ + + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + + #include + + #include "davfs.h" + #include "dav_debug.h" + + /* Features. Undefine if they cause problems, this should perhaps be a + config option. */ + #define davFS_POSIX_UNLINK 1 + + /* Allow dav_retry to be interrupted. Not sure of the benefit ... */ + /* #define dav_RETRY_INTR */ + + #define dav_DIRINFO_SIZE 43 + #define dav_STATUS_SIZE 21 + + + static inline void + dav_lock_server(struct dav_sb_info *server) + { + down(&(server->sem)); + } + + static inline void + dav_unlock_server(struct dav_sb_info *server) + { + up(&(server->sem)); + } + + + /* + * Returns the maximum read or write size for the "payload". Making all of the + * packet fit within the negotiated max_xmit size. + * + * N.B. Since this value is usually computed before locking the server, + * the server's packet size must never be decreased! + */ + static inline int + dav_get_xmitsize(struct dav_sb_info *server, int overhead) + { + TRACE(); + return 4096; + } + + /* + * Calculate the maximum read size + */ + int + dav_get_rsize(struct dav_sb_info *server) + { + int overhead = 0; + int size = dav_get_xmitsize(server, overhead); + + TRACE(); + + return size; + } + +/* + * Calculate the maximum write size + */ +int +dav_get_wsize(struct dav_sb_info *server) +{ + int overhead = 0; + int size = dav_get_xmitsize(server, overhead); + + TRACE(); + + return size; +} + +static int +dav_errno(int ret_code) +{ + int result; + + //TRACE(); + + switch(ret_code) { + case HTTP_OK: + case HTTP_CREATED: + case HTTP_ACCEPTED: + case HTTP_NO_CONTENT: + case HTTP_MULTI_STATUS: + result = 0; + break; + + case HTTP_BAD_REQUEST: + result = -EREMOTEIO; + break; + + case HTTP_NON_AUTHORITATIVE: + case HTTP_UNAUTHORIZED: + case HTTP_FORBIDDEN: + case HTTP_METHOD_NOT_ALLOWED: + case HTTP_NOT_ACCEPTABLE: + result = -EACCES; + break; + + case HTTP_NOT_FOUND: + result = -ENOENT; + break; + + case HTTP_INTERNAL_SERVER_ERROR: + result = -EFAULT; + break; + + default : + result = -EIO; + } + + DEBUG1("errno : %d\n", result); + return result; + } + + +/* reverse a string inline. This is used by the dircache walking routines */ +static int reverse_string(char *buf, int len) +{ + char c; + char *end = buf+len-1; + + while(buf < end) { + c = *buf; + *(buf++) = *end; + *(end--) = c; + } + return len; +} + + +/* +** atoi +*/ +int dav_atoi(char *digit) { + int ret=0; + int i = (strlen(digit)-1); + double weight=1; + + for(;i;i--) + weight*=10; + + for(;*digit;digit++, weight/=10) { + if(!weight) + weight = 1; + + if(*digit-'0'>=0 && *digit-'0' <=9) + ret+=weight*(*digit-'0'); + } + + return ret; +} + + +/* +** clear buff +*/ +void +dav_clear_sbuf(struct dav_sb_info *server) { + int ret; + TRACE(); + + while((ret= _recvfrom(server->sock, + server->buf, DAV_MAXPATHLEN, MSG_DONTWAIT))>0) + ; +} + + +static inline int +dav_is_open(struct inode *i) +{ + struct dav_i_info *dii = DAV_INOP(i); + TRACE(); + DEBUG1("IS OPEN : %d\n", dii->open); + + return dii->open; +} + +/* + * general request + * server->req : request body, should fill out + * server->length : need to read or something + * return error number, 0 on sucess + */ +int dav_request(struct dav_sb_info *server, int count, const char *data) { + int result = -EIO; + char *ret_code, *ret_length; + + /* clear socket like flush */ + dav_clear_sbuf(server); + + DEBUG1("req: %s\n", server->req); + + /* + ** send request + ** request should be filled before calling this function + */ + if(dav_sendline_raw(server->sock, server->req)<0) + goto out; + + /* send data if have*/ + if(count) + result=dav_send_raw(server->sock, (unsigned char *)data, count); + + /* read 20bytes head from socket */ + if(dav_readline_raw(server, server->buf, DAV_HEAD_SIZE)<0) + goto out; + + /* parse head data */ + ret_code = strtok(server->buf, " \t\n\r"); + ret_length = strtok(NULL, " \t\n\r"); + + if(!ret_code || !ret_length) + goto out; + + server->length = dav_atoi(ret_length); + result = dav_errno(dav_atoi(ret_code)); + + out: + return result; +} + +/* + * smb_build_path: build the path to entry and name storing it in buf. + * The path returned will have the trailing '\0'. + */ +static int dav_build_path(struct dav_sb_info *server, char * buf, + struct dentry * entry, char *base_path) +{ + char *path = buf; + int ret = strlen(base_path); + + TRACE(); + + /* memset */ + memset(buf, 0x00, DAV_MAXPATHLEN); + + if (!entry) + return 0; + + /* + * Build the path string walking the tree backward from end to ROOT + * and store it in reversed order [see reverse_string()] + */ +#define ALPHA_COM_LEN 50 + for (;!IS_ROOT(entry);entry = entry->d_parent) { + if (strlen(path) + + entry->d_name.len + ret + ALPHA_COM_LEN > DAV_MAXPATHLEN) + return -ENAMETOOLONG; + + reverse_string((char*)entry->d_name.name, entry->d_name.len); + strncpy(path, entry->d_name.name, entry->d_name.len); + reverse_string((char*)entry->d_name.name, entry->d_name.len); + + path+=entry->d_name.len; + *path++='/'; + } + + reverse_string(base_path, ret); + strncpy(path, base_path, ret); + reverse_string(base_path, ret); + path+=ret; + + reverse_string(buf, path-buf); + + return path-buf; +} + + /* + * We're called with the server locked, and we leave it that way. + * + * get whole file from http and save it. + * return file id. + */ +int +dav_proc_open(struct dav_sb_info *server, struct dentry *dentry, int wish) +{ + struct inode *ino = dentry->d_inode; + struct dav_i_info *dii = DAV_INOP(ino); + int result=-EIO; + mm_segment_t fs; + + TRACE(); + + fs = get_fs(); + set_fs(get_ds()); + + /* make path */ + dav_build_path(server, server->buf, dentry, server->path); + + /* make open request */ + sprintf(server->req, "open\t%s\t%d", server->buf, wish); + + result = dav_request(server, 0, NULL); + if(result<0) + goto out; + + dii->fileid = server->length; + dii->open++; + DEBUG2("PROC_FILEID = %ld\n", dii->fileid); + + out: + set_fs(fs); + + return result; +} + + /* + * + */ +int +dav_proc_read(struct inode *inode, off_t offset, int count, char *data) +{ + int result = -EIO; + struct dav_sb_info *server = dav_server_from_inode(inode); + struct dav_i_info *dii = DAV_INOP(inode); + mm_segment_t fs; + + TRACE(); + + dav_lock_server(server); + + fs = get_fs(); + set_fs(get_ds()); + + sprintf(server->req, "read\t%ld\t%ld:%d", + dii->fileid, offset, count); + + result = dav_request(server, 0, NULL); + if(result<0) + goto out; + + if(server->length) + result=dav_receive_raw(server->sock, data, server->length); + out: + set_fs(fs); + dav_unlock_server(server); + return result; +} + +int +dav_proc_write(struct inode *inode, off_t offset, int count, const char *data) +{ + int result = -EIO; + struct dav_i_info *dii = DAV_INOP(inode); + struct dav_sb_info *server = dav_server_from_inode(inode); + mm_segment_t fs; + + TRACE(); + + dav_lock_server(server); + + fs = get_fs(); + set_fs(get_ds()); + + /* make write command */ + sprintf(server->req, "write\t%ld\t%ld:%d", + dii->fileid, offset, count); + + /* do request */ + result = dav_request(server, count, data); + + set_fs(fs); + dav_unlock_server(server); + return result; +} + +/* +** create file +** we should not use dav_i_info->fileid, here. +** just pass to *fileid +** +** dav_inisiate will crate dav_i_info with fileid +** +*/ +int +dav_proc_create(struct dentry *dentry, __u16 attr, time_t ctime, long *fileid) +{ + struct dav_sb_info *server = dav_server_from_dentry(dentry); + int result=-EIO; + mm_segment_t fs; + + TRACE(); + + dav_lock_server(server); + + fs = get_fs(); + set_fs(get_ds()); + + /* make path */ + dav_build_path(server, server->buf, dentry, server->path); + + sprintf(server->req, "create\t%s", server->buf); + + result = dav_request(server, 0, NULL); + if(result<0) + goto out; + + *fileid = server->length; + DEBUG2("PROC_FILEID = %ld\n", *fileid); + + out: + set_fs(fs); + dav_unlock_server(server); + + return result; +} + +int +dav_proc_mv(struct dentry *old_dentry, struct dentry *new_dentry) +{ + struct dav_sb_info *server = dav_server_from_dentry(old_dentry); + int result=-EIO; + mm_segment_t fs; + + TRACE(); + + dav_lock_server(server); + + fs = get_fs(); + set_fs(get_ds()); + + /* make command with old path */ + dav_build_path(server, server->buf, old_dentry, server->path); + + sprintf(server->req, "mv\t%s\t", server->buf); + + /* meke destination path */ + dav_build_path(server, server->buf, new_dentry, server->path); + + strcat(server->req, server->buf); + + result = dav_request(server, 0, NULL); + + set_fs(fs); + dav_unlock_server(server); + return result; +} + +int +dav_proc_mkdir(struct dentry *dentry) +{ + struct dav_sb_info *server = dav_server_from_dentry(dentry); + int result=-EIO; + mm_segment_t fs; + + TRACE(); + + dav_lock_server(server); + + fs = get_fs(); + set_fs(get_ds()); + + /* make path */ + result = dav_build_path(server, server->buf, dentry, server->path); + + result = -EIO; + + sprintf(server->req, "mkdir\t%s", server->buf); + + result = dav_request(server, 0, NULL); + + set_fs(fs); + dav_unlock_server(server); + return result; + +} + +int +dav_proc_rmdir(struct dentry *dentry) +{ + struct dav_sb_info *server = dav_server_from_dentry(dentry); + int result=-EIO; + mm_segment_t fs; + + TRACE(); + + dav_lock_server(server); + + fs = get_fs(); + set_fs(get_ds()); + + dav_build_path(server, server->buf, dentry, server->path); + + sprintf(server->req, "rmdir\t%s", server->buf); + + result = dav_request(server, 0, NULL); + + set_fs(fs); + dav_unlock_server(server); + return result; +} + + +int +dav_proc_unlink(struct dentry *dentry) +{ + struct dav_sb_info *server = dav_server_from_dentry(dentry); + int result=-EIO; + mm_segment_t fs; + + TRACE(); + + dav_lock_server(server); + + fs = get_fs(); + set_fs(get_ds()); + + /* make path */ + dav_build_path(server, server->buf, dentry, server->path); + sprintf(server->req, "unlink\t%s", server->buf); + + result = dav_request(server, 0, NULL); + + set_fs(fs); + dav_unlock_server(server); + return result; +} + + +int +dav_proc_trunc(struct dav_sb_info *server, long fid, __u32 length) +{ + int result=0; + mm_segment_t fs; + TRACE(); + + dav_lock_server(server); + + fs = get_fs(); + set_fs(get_ds()); + + sprintf(server->req, "trunc\t%ld\t%d", fid, length); + result = dav_request(server, 0, NULL); + + DEBUG1("TRUNC FID : %ld LENGHT: %d\n", fid, length); + + dav_unlock_server(server); + return result; +} + +static void +dav_init_dirent(struct dav_sb_info *server, struct dav_fattr *fattr) +{ + memset(fattr, 0, sizeof(struct dav_fattr)); + TRACE(); + + fattr->f_nlink = 1; + fattr->f_uid = server->uid; + fattr->f_gid = server->gid; + fattr->f_blksize = 4096; + + DEBUG1("UID : %d(%d)\n", server->uid, server->gid); +} + +static void +dav_finish_dirent(struct dav_sb_info *server, struct dav_fattr *fattr) +{ + TRACE(); + fattr->f_mode = server->file_mode; + + if (fattr->attr & aDIR) + { + fattr->f_mode = server->dir_mode; + fattr->f_size = 512; + } else { + /* normal file mode */ + fattr->f_mode &= ~(S_IXGRP | S_IXOTH); + } + + /* no write permission for user */ + if(fattr->f_gid) { + fattr->f_mode &= ~S_IWOTH | S_IXGRP; + } else { + fattr->f_mode &= ~(S_IWGRP | S_IWOTH); + } + + /* Check the read-only flag */ + if (fattr->attr & aRONLY) + fattr->f_mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH); + + fattr->f_blocks = 0; + if ((fattr->f_blksize != 0) && (fattr->f_size != 0)) { + fattr->f_blocks = + (fattr->f_size - 1) / fattr->f_blksize + 1; + } + return; +} + +void +dav_init_root_dirent(struct dav_sb_info *server, struct dav_fattr *fattr) +{ + TRACE(); + dav_init_dirent(server, fattr); + fattr->attr = aDIR; + fattr->f_ino = 2; /* traditional root inode number */ + fattr->f_mtime = CURRENT_TIME; + dav_finish_dirent(server, fattr); +} + + +int +dav_proc_fill_fattr(struct dav_finfo *finfo, struct dav_fattr *fattr) { + + fattr->f_size = finfo->f_size; + fattr->f_atime = finfo->f_atime; + fattr->f_mtime = finfo->f_mtime; + fattr->f_ctime = finfo->f_ctime; + + if(finfo->f_isdir) + fattr->attr = aDIR; + return 0; +} + + +int +dav_proc_fill_finfo( struct dav_fattr *fattr, struct dav_finfo *finfo) { + + finfo->f_size = fattr->f_size; + finfo->f_atime = fattr->f_atime; + finfo->f_mtime = fattr->f_mtime; + finfo->f_ctime = fattr->f_ctime; + + if(fattr->attr == aDIR) + finfo->f_isdir = 1; + return 0; +} + + +/* findfirst/findnext flags */ +#define dav_CLOSE_AFTER_FIRST (1<<0) +#define dav_CLOSE_IF_END (1<<1) +#define dav_REQUIRE_RESUME_KEY (1<<2) +#define dav_CONTINUE_BIT (1<<3) + +int +dav_proc_readdir(struct file *filp, void *dirent, filldir_t filldir, + struct dav_cache_control *ctl) +{ + struct dentry *dir = filp->f_dentry; + struct dav_sb_info *server = dav_server_from_dentry(dir); + struct dav_fattr fattr; + struct dav_finfo finfo; + int ff_searchcount = 0; + int i, result=-EIO; + mm_segment_t fs; + static struct qstr star = { "*", 1, 0 }; + + TRACE(); + + dav_lock_server(server); + + fs = get_fs(); + set_fs(get_ds()); + + /* make ls command */ + dav_build_path(server, server->buf, dir, server->path); + sprintf(server->req, "ls\t%s", server->buf); + result = dav_request(server, 0, NULL); + if(result<0) + goto out; + + ff_searchcount = server->length/sizeof(finfo); + + /* no result , go home */ + if(!ff_searchcount) + goto out; + + for(i=0, result=0;isock, (unsigned char*)&finfo, sizeof(finfo))<0) + goto out; + + if(!finfo.f_name_len) { + continue; + } + + star.name = finfo.f_name; + star.len = finfo.f_name_len; + + dav_init_dirent(server, &fattr); + + /* fill fattr using finfo */ + dav_proc_fill_fattr(&finfo, &fattr); + + dav_finish_dirent(server, &fattr); + + if (!dav_fill_cache(filp, dirent, filldir, ctl, + &star, &fattr)) + result++; + } + out: + dav_unlock_server(server); + set_fs(fs); + return result; +} + +int +dav_proc_getattr(struct dentry *dir, struct dav_fattr *fattr) +{ + struct dav_sb_info *server = dav_server_from_dentry(dir); + struct dav_finfo finfo; + mm_segment_t fs; + int result = -EIO; + + TRACE(); + + dav_lock_server(server); + + fs = get_fs(); + set_fs(get_ds()); + + dav_build_path(server, server->buf, dir, server->path); + sprintf(server->req, "attr\t%s", server->buf); + + result = dav_request(server, 0, NULL); + if(result<0) + goto out; + + /* read finfo */ + if(dav_receive_raw(server->sock, (unsigned char*)&finfo, server->length)<0) + goto out; + + /* init */ + dav_init_dirent(server, fattr); + + /* fill fattr using finfo */ + dav_proc_fill_fattr(&finfo, fattr); + + /* finish dirent */ + dav_finish_dirent(server, fattr); + + out: + dav_unlock_server(server); + set_fs(fs); + return result; +} + +/* +** + */ +int +dav_proc_setattr(struct dentry *dir, struct dav_fattr *fattr) +{ + struct dav_sb_info *server = dav_server_from_dentry(dir); + struct dav_finfo finfo; + mm_segment_t fs; + int result = -EIO; + + TRACE(); + + dav_lock_server(server); + + fs = get_fs(); + set_fs(get_ds()); + + /* fill finfo using fattr */ + dav_proc_fill_finfo(fattr, &finfo); + + dav_build_path(server, server->buf, dir, server->path); + sprintf(server->req, "setattr\t%s\t%d", server->buf, sizeof(finfo)); + + result = dav_request(server, sizeof(finfo), (char*)&finfo); + + set_fs(fs); + dav_unlock_server(server); + return result; +} + +int +dav_proc_disconnect(struct dav_sb_info *server) +{ + mm_segment_t fs; + int result = -EIO; + + TRACE(); + + dav_lock_server(server); + + fs = get_fs(); + set_fs(get_ds()); + + sprintf(server->req, "quit"); + result = dav_request(server, 0, NULL); + + set_fs(fs); + dav_unlock_server(server); + + return result; +} + + +int +dav_close(struct inode *ino) +{ + struct dav_sb_info *server = dav_server_from_inode(ino); + struct dav_i_info *dii = DAV_INOP(ino); + int result=0; + mm_segment_t fs; + + TRACE(); + + dav_lock_server(server); + + /* not open */ + if (!dav_is_open(ino) || !dii->fileid) + goto ret; + + result=-EIO; + + fs = get_fs(); + set_fs(get_ds()); + + sprintf(server->req, "close\t%ld", dii->fileid); + result = dav_request(server, 0, NULL); + + set_fs(fs); + + /* + dii->fileid = 0; + */ + dii->open = 0; + +ret: + dav_unlock_server(server); + + return result; +} + +/* + * This is used to close a file following a failed instantiate. + * Since we don't have an inode, we can't use any of the above. + */ +int +dav_close_fileid(struct dentry *dentry, long fileid) +{ + int result= 0; + struct dav_i_info *dii = DAV_INOP(dentry->d_inode); + struct dav_sb_info *server = dav_server_from_dentry(dentry); + mm_segment_t fs; + + TRACE(); + + dav_lock_server(server); + + /* not open */ + if (!dii->open || !fileid) + goto ret; + + result=-EIO; + + fs = get_fs(); + set_fs(get_ds()); + + sprintf(server->req, "close\t%ld", fileid); + result = dav_request(server, 0, NULL); + + set_fs(fs); + + dii->open = 0; + +ret: + dav_unlock_server(server); + return result; +} + + +int +dav_open(struct dentry *dentry, int wish) +{ + struct inode *inode = dentry->d_inode; + int result; + TRACE(); + + result = -ENOENT; + if (!inode) { + printk(KERN_ERR "dav_open: no inode for dentry %s/%s\n", + DENTRY_PATH(dentry)); + goto out; + } + + if (!dav_is_open(inode)) { + struct dav_sb_info *server = DAV_SERVER(inode); + dav_lock_server(server); + result = 0; + if (!dav_is_open(inode)) + result = dav_proc_open(server, dentry, wish); + dav_unlock_server(server); + if (result) { + PARANOIA("%s/%s open failed, result=%d\n", + DENTRY_PATH(dentry), result); + goto out; + } + /* + * A successful open means the path is still valid ... + */ + dav_renew_times(dentry); + } + + /* + * Check whether the access is compatible with the desired mode. + */ + result = 0; + + out: + return result; +} + + +int +dav_proc_statfs(struct dentry *dentry, struct statfs *buf) +{ + struct dav_sb_info *server = dav_server_from_dentry(dentry); + struct dav_statfs ds; + int result=-EIO; + mm_segment_t fs; + + TRACE(); + + /* reset a ds struct */ + memset(&ds, 0x00, sizeof(ds)); + + dav_lock_server(server); + + fs = get_fs(); + set_fs(get_ds()); + + /* make command with old path */ + dav_build_path(server, server->buf, dentry, server->path); + + sprintf(server->req, "statfs\t%s\t", server->buf); + result = dav_request(server, 0, NULL); + + + if(result) { + /* On error , set default value */ + + buf->f_blocks = 0; + buf->f_bfree = 0; + buf->f_bavail = 0; + } else { + if(server->length) + result=dav_receive_raw(server->sock, (void*)&ds, server->length); + buf->f_blocks = ds.f_blocks; + buf->f_bavail = buf->f_bfree = ds.f_bfree; + } + + buf->f_bsize = 4098; + buf->f_namelen = 256; + set_fs(fs); + dav_unlock_server(server); + + /* Always return OK */ + return 0; +} + + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/davfs/sock.c linux-2.4.20-wolk4.9-fullkernel/fs/davfs/sock.c --- linux-2.4.20-wolk4.8-fullkernel/fs/davfs/sock.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/davfs/sock.c 2003-08-25 20:31:33.000000000 +0200 @@ -0,0 +1,302 @@ +/* + * sock.c + * + * Copyright (C) 1995, 1996 by Paal-Kr. Engstad and Volker Lendecke + * Copyright (C) 1997 by Volker Lendecke + * + * Please add a note about your changes to davfs in the ChangeLog file. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "davfs.h" +#include "dav_debug.h" + + +int +_recvfrom(struct socket *socket, unsigned char *ubuf, int size, + unsigned flags) +{ + struct iovec iov; + struct msghdr msg; + struct scm_cookie scm; + + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = NULL; + iov.iov_base = ubuf; + iov.iov_len = size; + + memset(&scm, 0,sizeof(scm)); + size=socket->ops->recvmsg(socket, &msg, size, flags, &scm); + if(size>=0) + scm_recv(socket,&msg,&scm,flags); + return size; +} + +static int +_send(struct socket *socket, const void *buff, int len) +{ + struct iovec iov; + struct msghdr msg; + struct scm_cookie scm; + int err; + + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = NULL; + msg.msg_controllen = 0; + + iov.iov_base = (void *)buff; + iov.iov_len = len; + + msg.msg_flags = 0; + + err = scm_send(socket, &msg, &scm); + if (err >= 0) + { + err = socket->ops->sendmsg(socket, &msg, len, &scm); + scm_destroy(&scm); + } + return err; +} + +struct data_callback { + struct tq_struct cb; + struct sock *sk; +}; + + +/* + * get socket struct + * from mount's fd + * + * return NULL, if fail + */ +struct socket * +dav_get_sock(struct dav_sb_info *server, unsigned int fd) +{ + struct inode *sock_inode; + + server->sock_file = fget(fd); + if(!server->sock_file) + return NULL; + + sock_inode = server->sock_file->f_dentry->d_inode; + if(!S_ISSOCK(sock_inode->i_mode)) { + fput(server->sock_file); + return NULL; + } + + server->sock = &sock_inode->u.socket_i; + if(!server->sock) + fput(server->sock_file); + + return server->sock; +} + + +/* + * Called with the server locked. + */ +void +dav_close_socket(struct dav_sb_info *server) +{ + struct file * file = server->sock_file; + + if (file) { + server->sock_file = NULL; + fput(file); + } +} + +int +dav_send_raw(struct socket *socket, unsigned char *source, int length) +{ + int result; + int already_sent = 0; + + while (already_sent < length) + { + result = _send(socket, + (void *) (source + already_sent), + length - already_sent); + + if (result == 0) + { + return -EIO; + } + if (result < 0) + { + DEBUG1("dav_send_raw: sendto error = %d\n", -result); + return result; + } + already_sent += result; + } + return already_sent; +} + +int +dav_receive_raw(struct socket *socket, unsigned char *target, int length) +{ + int result; + int already_read = 0; + + while (already_read < length) + { + result = _recvfrom(socket, + (void *) (target + already_read), + length - already_read, 0); + if (result == 0) + { + return -EIO; + } + if (result < 0) + { + DEBUG1("recvfrom error = %d\n", -result); + return result; + } + already_read += result; + } + return already_read; +} + + +int do_tcp_rcv(struct dav_sb_info *server, void *buffer, size_t len) { + struct poll_wqueues wait_table; + struct socket *sock = server->sock; + int init_timeout=10; + size_t dataread; + int result = 0; + + dataread = 0; + /* + init_timeout = server->m.time_out * 20; + */ + /* hard-mounted volumes have no timeout, except connection close... */ + /* + if (!(server->m.flags & NCP_MOUNT_SOFT)) + init_timeout = 0x7FFF0000; + */ + while (len) { + poll_initwait(&wait_table); + /* mb() is not necessary because ->poll() will serialize + instructions adding the wait_table waitqueues in the + waitqueue-head before going to calculate the mask-retval. */ + __set_current_state(TASK_INTERRUPTIBLE); + if (!(sock->ops->poll(server->sock_file, sock, &wait_table.pt) & POLLIN)) { + init_timeout = schedule_timeout(init_timeout); + poll_freewait(&wait_table); + current->state = TASK_RUNNING; + if (signal_pending(current)) { + return -ERESTARTSYS; + } + if (!init_timeout) { + return -EIO; + } + if(wait_table.error) { + return wait_table.error; + } + } else { + poll_freewait(&wait_table); + } + current->state = TASK_RUNNING; + + result = _recvfrom(sock, buffer, len, MSG_DONTWAIT); + if (result < 0) { + if (result == -EAGAIN) { + DEBUG1("ncpfs: tcp: bad select ready\n"); + continue; + } + return result; + } + if (result == 0) { + printk(KERN_ERR "ncpfs: tcp: EOF on socket\n"); + return -EIO; + } + if (result > len) { + printk(KERN_ERR "ncpfs: tcp: bug in recvmsg\n"); + return -EIO; + } + dataread += result; + buffer += result; + len -= result; + } + return 0; +} + +int +dav_sendline_raw(struct socket *socket, unsigned char *target) { + int ret = 0; + ret+=dav_send_raw(socket, target, strlen(target)); + ret+=dav_send_raw(socket, "\r\n", 2); + + return ret; +} + +int +dav_readline_tcp(struct dav_sb_info *server, unsigned char *target, int length) { + int ret = 0; + int ch; + + for(ret=0;ret0; ret++) { + if(ch=='\r') { + /* read '\n' */ + do_tcp_rcv(server, &ch, 1); + break; + } + *target++=ch; + } + + *target = 0; + + return ret; +} + +int +dav_readline_raw(struct dav_sb_info *server, unsigned char *target, int length) { + int ret = 0; + unsigned char ch; + + for(ret=0;retsock, &ch, 1)>0; ret++) { + if(ch=='\r' && ch=='\n') { + /* read '\n' */ + //dav_receive_raw(server->sock, &ch, 1); + break; + } + *target++=ch; + } + + *target = 0; + + return ret; +} + + + + + + + + + + + + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/dcache.c linux-2.4.20-wolk4.9-fullkernel/fs/dcache.c --- linux-2.4.20-wolk4.8-fullkernel/fs/dcache.c 2003-08-25 18:26:39.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/dcache.c 2003-08-28 11:50:23.000000000 +0200 @@ -67,7 +67,6 @@ static inline void d_free(struct dentry dentry->d_extra_attributes = NULL; } kmem_cache_free(dentry_cache, dentry); - dentry_stat.nr_dentry--; } /* @@ -153,6 +152,7 @@ unhash_it: kill_it: { struct dentry *parent; list_del(&dentry->d_child); + dentry_stat.nr_dentry--; /* drops the lock, at that point nobody can reach this dentry */ dentry_iput(dentry); parent = dentry->d_parent; @@ -300,6 +300,7 @@ static inline void prune_one_dentry(stru list_del_init(&dentry->d_hash); list_del(&dentry->d_child); + dentry_stat.nr_dentry--; dentry_iput(dentry); parent = dentry->d_parent; d_free(dentry); @@ -631,11 +632,15 @@ struct dentry * d_alloc(struct dentry * dentry->d_sb = parent->d_sb; spin_lock(&dcache_lock); list_add(&dentry->d_child, &parent->d_subdirs); + dentry_stat.nr_dentry++; spin_unlock(&dcache_lock); - } else + } else { INIT_LIST_HEAD(&dentry->d_child); + spin_lock(&dcache_lock); + dentry_stat.nr_dentry++; + spin_unlock(&dcache_lock); + } - dentry_stat.nr_dentry++; return dentry; } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/ext3/namei.c linux-2.4.20-wolk4.9-fullkernel/fs/ext3/namei.c --- linux-2.4.20-wolk4.8-fullkernel/fs/ext3/namei.c 2003-08-25 18:27:13.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/ext3/namei.c 2003-08-25 20:35:39.000000000 +0200 @@ -1254,6 +1254,7 @@ static int make_indexed_dir(handle_t *ha unsigned blocksize; struct dx_hash_info hinfo; u32 block; + struct fake_dirent *fde; blocksize = dir->i_sb->s_blocksize; dxtrace(printk("Creating index\n")); @@ -1274,8 +1275,8 @@ static int make_indexed_dir(handle_t *ha data1 = bh2->b_data; /* The 0th block becomes the root, move the dirents out */ - de = &root->dotdot; - de = (struct ext3_dir_entry_2 *) ((char *)de + de->rec_len); + fde = &root->dotdot; + de = (struct ext3_dir_entry_2 *)((char *)fde + fde->rec_len); len = ((char *) root) + blocksize - (char *) de; memcpy (data1, de, len); de = (struct ext3_dir_entry_2 *) data1; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfs/mdb.c linux-2.4.20-wolk4.9-fullkernel/fs/hfs/mdb.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfs/mdb.c 2001-02-13 23:13:45.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfs/mdb.c 2003-08-25 20:35:28.000000000 +0200 @@ -197,7 +197,7 @@ struct hfs_mdb *hfs_mdb_get(hfs_sysmdb s if (!(mdb->attrib & htons(HFS_SB_ATTRIB_CLEAN))) { hfs_warn("hfs_fs: WARNING: mounting unclean filesystem.\n"); - } else if (!readonly) { + } else if (!readonly && !(mdb->attrib & (HFS_SB_ATTRIB_HLOCK | HFS_SB_ATTRIB_SLOCK))) { /* Mark the volume uncleanly unmounted in case we crash */ hfs_put_ns(mdb->attrib & htons(~HFS_SB_ATTRIB_CLEAN), raw->drAtrb); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfs/super.c linux-2.4.20-wolk4.9-fullkernel/fs/hfs/super.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfs/super.c 2002-08-03 02:39:45.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfs/super.c 2003-08-25 20:35:28.000000000 +0200 @@ -49,6 +49,7 @@ static struct super_operations hfs_super put_super: hfs_put_super, write_super: hfs_write_super, statfs: hfs_statfs, + remount_fs: hfs_remount, }; /*================ File-local variables ================*/ @@ -162,23 +163,24 @@ static int parse_options(char *options, char *this_char, *value; char names, fork; - /* initialize the sb with defaults */ - memset(hsb, 0, sizeof(*hsb)); - hsb->magic = HFS_SB_MAGIC; - hsb->s_uid = current->uid; - hsb->s_gid = current->gid; - hsb->s_umask = current->fs->umask; - hsb->s_type = 0x3f3f3f3f; /* == '????' */ - hsb->s_creator = 0x3f3f3f3f; /* == '????' */ - hsb->s_lowercase = 0; - hsb->s_quiet = 0; - hsb->s_afpd = 0; - /* default version. 0 just selects the defaults */ - hsb->s_version = 0; - hsb->s_conv = 'b'; - names = '?'; - fork = '?'; - *part = 0; + if (hsb->magic != HFS_SB_MAGIC) { + /* initialize the sb with defaults */ + hsb->magic = HFS_SB_MAGIC; + hsb->s_uid = current->uid; + hsb->s_gid = current->gid; + hsb->s_umask = current->fs->umask; + hsb->s_type = 0x3f3f3f3f; /* == '????' */ + hsb->s_creator = 0x3f3f3f3f; /* == '????' */ + hsb->s_lowercase = 0; + hsb->s_quiet = 0; + hsb->s_afpd = 0; + /* default version. 0 just selects the defaults */ + hsb->s_version = 0; + hsb->s_conv = 'b'; + names = '?'; + fork = '?'; + *part = 0; + } if (!options) { goto done; @@ -397,6 +399,7 @@ struct super_block *hfs_read_super(struc struct inode *root_inode; int part; + memset(HFS_SB(s), 0, sizeof(*(HFS_SB(s)))); if (!parse_options((char *)data, HFS_SB(s), &part)) { hfs_warn("hfs_fs: unable to parse mount options.\n"); goto bail3; @@ -434,6 +437,12 @@ struct super_block *hfs_read_super(struc goto bail2; } + if (mdb->attrib & (HFS_SB_ATTRIB_HLOCK | HFS_SB_ATTRIB_SLOCK)) { + if (!silent) + hfs_warn("hfs_fs: Filesystem is marked locked, mounting read-only.\n"); + s->s_flags |= MS_RDONLY; + } + HFS_SB(s)->s_mdb = mdb; if (HFS_ITYPE(mdb->next_id) != 0) { hfs_warn("hfs_fs: too many files.\n"); @@ -474,6 +483,27 @@ bail3: return NULL; } +int hfs_remount(struct super_block *s, int *flags, char *data) +{ + int part; /* ignored */ + + if (!parse_options(data, HFS_SB(s), &part)) { + hfs_warn("hfs_fs: unable to parse mount options.\n"); + return -EINVAL; + } + + if ((*flags & MS_RDONLY) == (s->s_flags & MS_RDONLY)) + return 0; + if (!(*flags & MS_RDONLY)) { + if (HFS_SB(s)->s_mdb->attrib & (HFS_SB_ATTRIB_HLOCK | HFS_SB_ATTRIB_SLOCK)) { + hfs_warn("hfs_fs: Filesystem is marked locked, leaving it read-only.\n"); + s->s_flags |= MS_RDONLY; + *flags |= MS_RDONLY; + } + } + return 0; +} + static int __init init_hfs_fs(void) { hfs_cat_init(); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/Makefile linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/Makefile --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/Makefile 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/Makefile 2003-08-25 20:35:28.000000000 +0200 @@ -1,37 +1,17 @@ -#KERNELSRC= +# +# Makefile for the linux hfsplus filesystem routines. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definitions are now in the main makefile... -hfsplus-objs := super.o options.o inode.o extents.o catalog.o dir.o btree.o \ - bnode.o brec.o bfind.o btiter.o tables.o unicode.o wrapper.o +O_TARGET := hfsplus.o -EXTRA_CFLAGS = -fno-inline +obj-y := super.o options.o inode.o extents.o catalog.o dir.o btree.o \ + bnode.o brec.o bfind.o tables.o unicode.o wrapper.o -ifeq ($(PATCHLEVEL),4) -O_TARGET := hfsplus.o -obj-y := $(hfsplus-objs) -endif +obj-m := $(O_TARGET) -obj-$(CONFIG_HFSPLUS_FS) += hfsplus.o - -ifeq ("$(TOPDIR)","") -ifeq ("$(KERNELSRC)","") -all install: - @echo 'use "make KERNELSRC=..."' -else -all: - make modules -C $(KERNELSRC) SUBDIRS=$$PWD CONFIG_HFSPLUS_FS=m - -install: - @eval `sed -n '1,5s/^\([A-Z]*\) *= *\(.*\)$$/\1=\2/p' $(KERNELSRC)/Makefile`; \ - KERNELRELEASE=$$VERSION.$$PATCHLEVEL.$$SUBLEVEL$$EXTRAVERSION; \ - mkdir -pv $(INSTALL_MOD_PATH)/lib/modules/$$KERNELRELEASE/kernel/fs/hfsplus; \ - mod=hfsplus.o; test -f hfsplus.ko && mod=hfsplus.ko; \ - cp -v $$mod $(INSTALL_MOD_PATH)/lib/modules/$$KERNELRELEASE/kernel/fs/hfsplus - - -endif -endif - --include $(TOPDIR)/Rules.make - -clean: - rm -f hfsplus.o hfsplus.ko $(hfsplus-objs) .*.flags .*.cmd *.mod.? +include $(TOPDIR)/Rules.make diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/README linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/README --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/README 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/README 2003-08-25 20:35:28.000000000 +0200 @@ -0,0 +1,33 @@ + Copyright (C) 2001 Brad Boyer + Copyright (C) 2003 Ardis Technologies bv + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +HFS+ is the standard volume format for Mac OS X. + +A basic overview and also a detailed description of the HFS+ volume format +is provided by Apple's technical note 1150. + +The new driver provides full access to HFS+ plus volumes under Linux. It +bases on the driver by Brad Boyer at +http://sourceforge.net/projects/linux-hfsplus. + +This driver now supports full read and write access and has a better +perfomance. It also supports hard links and the resource fork is accessible +via /rsrc. + +This is a beta release, it was intensively tested, but use at your own risk. + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/bfind.c linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/bfind.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/bfind.c 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/bfind.c 2003-08-25 20:35:28.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) + * (C) 2003 Ardis Technologies * * Search routines for btrees */ @@ -122,6 +123,68 @@ int hfsplus_btree_find_entry(struct hfsp return 0; } +int hfsplus_btree_move(struct hfsplus_find_data *fd, int cnt) +{ + struct hfsplus_btree *tree; + hfsplus_bnode *bnode; + int idx, res = 0; + u16 off, len, keylen; + + bnode = fd->bnode; + tree = bnode->tree; + + if (cnt < -0xFFFF || cnt > 0xFFFF) + return -EINVAL; + + if (cnt < 0) { + cnt = -cnt; + while (cnt > fd->record) { + cnt -= fd->record + 1; + fd->record = bnode->num_recs - 1; + idx = bnode->prev; + if (!idx) { + res = -ENOENT; + goto out; + } + hfsplus_put_bnode(bnode); + bnode = hfsplus_find_bnode(tree, idx); + if (!bnode) { + res = -EIO; + goto out; + } + } + fd->record -= cnt; + } else { + while (cnt >= bnode->num_recs - fd->record) { + cnt -= bnode->num_recs - fd->record; + fd->record = 0; + idx = bnode->next; + if (!idx) { + res = -ENOENT; + goto out; + } + hfsplus_put_bnode(bnode); + bnode = hfsplus_find_bnode(tree, idx); + if (!bnode) { + res = -EIO; + goto out; + } + } + fd->record += cnt; + } + + len = hfsplus_brec_lenoff(bnode, fd->record, &off); + keylen = hfsplus_brec_keylen(bnode, fd->record); + fd->keyoffset = off; + fd->keylength = keylen; + fd->entryoffset = off + keylen; + fd->entrylength = len - keylen; + hfsplus_bnode_readbytes(bnode, fd->key, off, keylen); +out: + fd->bnode = bnode; + return res; +} + int hfsplus_find_init(hfsplus_btree *tree, struct hfsplus_find_data *fd) { fd->tree = tree; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/bnode.c linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/bnode.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/bnode.c 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/bnode.c 2003-08-25 20:35:28.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) + * (C) 2003 Ardis Technologies * * Handle basic btree node operations */ @@ -756,7 +757,7 @@ hfsplus_bnode *__hfsplus_find_bnode(hfsp hfsplus_bnode *node; if (cnid >= tree->node_count) { - printk("HFS+-fs: request for non-existent node in B*Tree\n"); + printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid); return NULL; } @@ -779,7 +780,7 @@ hfsplus_bnode *__hfsplus_create_bnode(hf loff_t off; if (cnid >= tree->node_count) { - printk("HFS+-fs: request for non-existent node in B*Tree\n"); + printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid); return NULL; } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/brec.c linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/brec.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/brec.c 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/brec.c 2003-08-25 20:35:28.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) + * (C) 2003 Ardis Technologies * * Handle individual btree records */ @@ -10,26 +11,6 @@ #include "hfsplus_fs.h" #include "hfsplus_raw.h" -/* Get the offset of the given record in the given node */ -u16 hfsplus_brec_off(hfsplus_bnode *node, u16 rec) -{ - u16 dataoff; - - dataoff = node->tree->node_size - (rec + 1) * 2; - return hfsplus_bnode_read_u16(node, dataoff); -} - -/* Get the length of the given record in the given node */ -u16 hfsplus_brec_len(hfsplus_bnode *node, u16 rec) -{ - u16 retval[2]; - u16 dataoff; - - dataoff = node->tree->node_size - (rec + 2) * 2; - hfsplus_bnode_readbytes(node, retval, dataoff, 4); - return be16_to_cpu(retval[0]) - be16_to_cpu(retval[1]); -} - /* Get the length and offset of the given record in the given node */ u16 hfsplus_brec_lenoff(hfsplus_bnode *node, u16 rec, u16 *off) { @@ -42,20 +23,6 @@ u16 hfsplus_brec_lenoff(hfsplus_bnode *n return be16_to_cpu(retval[0]) - *off; } -/* Copy a record from a node into a buffer, return the actual length */ -u16 hfsplus_brec_data(hfsplus_bnode *node, u16 rec, char *buf, - u16 len) -{ - u16 recoff, reclen, cplen; - - reclen = hfsplus_brec_lenoff(node, rec, &recoff); - if (!reclen) - return 0; - cplen = (reclen>len) ? len : reclen; - hfsplus_bnode_readbytes(node, buf, recoff, cplen); - return reclen; -} - /* Get the length of the key from a keyed record */ u16 hfsplus_brec_keylen(hfsplus_bnode *node, u16 rec) { @@ -70,7 +37,7 @@ u16 hfsplus_brec_keylen(hfsplus_bnode *n !(node->tree->attributes & HFSPLUS_TREE_VAR_NDXKEY_SIZE)) { retval = node->tree->max_key_len; } else { - recoff = hfsplus_brec_off(node, rec); + recoff = hfsplus_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2); if (!recoff) return 0; hfsplus_bnode_readbytes(node, buf, recoff, klsz); @@ -81,22 +48,3 @@ u16 hfsplus_brec_keylen(hfsplus_bnode *n } return (retval + klsz + 1) & 0xFFFE; } - -/* Get a copy of the key of the given record, returns real key length */ -u16 hfsplus_brec_key(hfsplus_bnode *node, u16 rec, void *buf, - u16 len) -{ - u16 recoff, reclen, keylen, tocopy; - - reclen = hfsplus_brec_lenoff(node, rec, &recoff); - keylen = hfsplus_brec_keylen(node, rec); - if (!reclen || !keylen) - return 0; - if (keylen > reclen) { - printk("HFS+-fs: corrupt key length in B*Tree (%d,%d,%d,%d,%d)\n", node->this, rec, reclen, keylen, recoff); - return 0; - } - tocopy = (len > keylen) ? keylen : len; - hfsplus_bnode_readbytes(node, buf, recoff, tocopy); - return keylen; -} diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/btiter.c linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/btiter.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/btiter.c 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/btiter.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,72 +0,0 @@ -/* - * linux/fs/hfsplus/btiter.c - * - * Copyright (C) 2001 - * Brad Boyer (flar@allandria.com) - * - * Iterators for btrees - */ - -#include "hfsplus_fs.h" - -int hfsplus_btiter_move(struct hfsplus_find_data *fd, int cnt) -{ - struct hfsplus_btree *tree; - hfsplus_bnode *bnode; - int idx, res = 0; - u16 off, len, keylen; - - bnode = fd->bnode; - tree = bnode->tree; - - if (cnt < -0xFFFF || cnt > 0xFFFF) - return -EINVAL; - - if (cnt < 0) { - cnt = -cnt; - while (cnt > fd->record) { - cnt -= fd->record + 1; - fd->record = bnode->num_recs - 1; - idx = bnode->prev; - if (!idx) { - res = -ENOENT; - goto out; - } - hfsplus_put_bnode(bnode); - bnode = hfsplus_find_bnode(tree, idx); - if (!bnode) { - res = -EIO; - goto out; - } - } - fd->record -= cnt; - } else { - while (cnt >= bnode->num_recs - fd->record) { - cnt -= bnode->num_recs - fd->record; - fd->record = 0; - idx = bnode->next; - if (!idx) { - res = -ENOENT; - goto out; - } - hfsplus_put_bnode(bnode); - bnode = hfsplus_find_bnode(tree, idx); - if (!bnode) { - res = -EIO; - goto out; - } - } - fd->record += cnt; - } - - len = hfsplus_brec_lenoff(bnode, fd->record, &off); - keylen = hfsplus_brec_keylen(bnode, fd->record); - fd->keyoffset = off; - fd->keylength = keylen; - fd->entryoffset = off + keylen; - fd->entrylength = len - keylen; - hfsplus_bnode_readbytes(bnode, fd->key, off, keylen); -out: - fd->bnode = bnode; - return res; -} diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/btree.c linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/btree.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/btree.c 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/btree.c 2003-08-25 20:35:28.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) + * (C) 2003 Ardis Technologies * * Handle opening/closing btree */ @@ -164,10 +165,13 @@ hfsplus_bnode *hfsplus_btree_alloc_node( u8 *data, byte, m; int i; - if (!tree->free_nodes) { + while (!tree->free_nodes) { loff_t size; + int res; - hfsplus_extend_file(tree->inode); + res = hfsplus_extend_file(tree->inode); + if (res) + return ERR_PTR(res); HFSPLUS_I(tree->inode).total_blocks = HFSPLUS_I(tree->inode).alloc_blocks; size = HFSPLUS_I(tree->inode).total_blocks; size <<= tree->sb->s_blocksize_bits; @@ -204,7 +208,7 @@ hfsplus_bnode *hfsplus_btree_alloc_node( } } } - if (++off >= PAGE_CACHE_MASK) { + if (++off >= PAGE_CACHE_SIZE) { hfsplus_kunmap(*pagep++); data = hfsplus_kmap(*pagep); off = 0; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/catalog.c linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/catalog.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/catalog.c 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/catalog.c 2003-08-25 20:35:28.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) + * (C) 2003 Ardis Technologies * * Handling of catalog records */ @@ -71,6 +72,9 @@ static int hfsplus_fill_cat_entry(hfsplu folder->attribute_mod_date = folder->access_date = hfsp_now2mt(); hfsplus_set_perms(inode, &folder->permissions); + if (inode == HFSPLUS_SB(inode->i_sb).hidden_dir) + /* invisible and namelocked */ + folder->user_info.frFlags = cpu_to_be16(0x5000); return sizeof(*folder); } else { hfsplus_cat_file *file; @@ -82,9 +86,11 @@ static int hfsplus_fill_cat_entry(hfsplu file->create_date = file->content_mod_date = file->attribute_mod_date = file->access_date = hfsp_now2mt(); - if (cnid == inode->i_ino) + if (cnid == inode->i_ino) { hfsplus_set_perms(inode, &file->permissions); - else { + file->user_info.fdType = cpu_to_be32(HFSPLUS_SB(inode->i_sb).type); + file->user_info.fdCreator = cpu_to_be32(HFSPLUS_SB(inode->i_sb).creator); + } else { file->user_info.fdType = cpu_to_be32(HFSP_HARDLINK_TYPE); file->user_info.fdCreator = cpu_to_be32(HFSP_HFSPLUS_CREATOR); file->user_info.fdFlags = cpu_to_be16(0x100); @@ -181,6 +187,7 @@ int hfsplus_delete_cat(u32 cnid, struct hfsplus_fork_raw fork; struct list_head *pos; int err, off; + u16 type; dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid); sb = dir->i_sb; @@ -207,15 +214,18 @@ int hfsplus_delete_cat(u32 cnid, struct if (err) goto out; + type = hfsplus_bnode_read_u16(fd.bnode, fd.entryoffset); + if (type == HFSPLUS_FILE) { #if 0 - off = fd.entryoffset + offsetof(hfsplus_cat_file, data_fork); - hfsplus_bnode_readbytes(fd.bnode, &fork, off, sizeof(fork)); - hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_DATA); + off = fd.entryoffset + offsetof(hfsplus_cat_file, data_fork); + hfsplus_bnode_readbytes(fd.bnode, &fork, off, sizeof(fork)); + hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_DATA); #endif - off = fd.entryoffset + offsetof(hfsplus_cat_file, rsrc_fork); - hfsplus_bnode_readbytes(fd.bnode, &fork, off, sizeof(fork)); - hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC); + off = fd.entryoffset + offsetof(hfsplus_cat_file, rsrc_fork); + hfsplus_bnode_readbytes(fd.bnode, &fork, off, sizeof(fork)); + hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC); + } list_for_each(pos, &HFSPLUS_I(dir).open_dir_list) { struct hfsplus_readdir_data *rd = diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/dir.c linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/dir.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/dir.c 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/dir.c 2003-08-25 20:35:28.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) + * (C) 2003 Ardis Technologies * * Handling of directories */ @@ -143,7 +144,7 @@ static int hfsplus_readdir(struct file * default: if (filp->f_pos >= inode->i_size) goto out; - err = hfsplus_btiter_move(&fd, filp->f_pos - 1); + err = hfsplus_btree_move(&fd, filp->f_pos - 1); if (err) goto out; } @@ -190,7 +191,7 @@ static int hfsplus_readdir(struct file * filp->f_pos++; if (filp->f_pos >= inode->i_size) goto out; - err = hfsplus_btiter_move(&fd, 1); + err = hfsplus_btree_move(&fd, 1); if (err) goto out; } @@ -211,6 +212,7 @@ out: return err; } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) static loff_t hfsplus_seek_dir(struct file *file, loff_t offset, int origin) { loff_t res; @@ -221,6 +223,7 @@ static loff_t hfsplus_seek_dir(struct fi return res; } +#endif static int hfsplus_dir_release(struct inode *inode, struct file *file) { @@ -234,11 +237,10 @@ static int hfsplus_dir_release(struct in int hfsplus_create(struct inode *dir, struct dentry *dentry, int mode) { - struct super_block *sb = dir->i_sb; struct inode *inode; int res; - inode = hfsplus_new_inode(sb, mode); + inode = hfsplus_new_inode(dir->i_sb, mode); if (!inode) return -ENOSPC; @@ -353,13 +355,10 @@ int hfsplus_unlink(struct inode *dir, st int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, int mode) { - struct super_block *sb; struct inode *inode; int res; - sb = dir->i_sb; - inode = dentry->d_inode; - inode = hfsplus_new_inode(sb, S_IFDIR | mode); + inode = hfsplus_new_inode(dir->i_sb, S_IFDIR | mode); if (!inode) return -ENOSPC; @@ -461,9 +460,11 @@ int hfsplus_rename(struct inode *old_dir return res; } - res = hfsplus_rename_cat(old_dentry->d_inode->i_ino, + res = hfsplus_rename_cat((u32)(unsigned long)old_dentry->d_fsdata, old_dir, &old_dentry->d_name, new_dir, &new_dentry->d_name); + if (!res) + new_dentry->d_fsdata = old_dentry->d_fsdata; return res; } @@ -482,6 +483,10 @@ struct inode_operations hfsplus_dir_inod struct file_operations hfsplus_dir_operations = { .read = generic_read_dir, .readdir = hfsplus_readdir, +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) .llseek = hfsplus_seek_dir, +#else + .llseek = generic_file_llseek, +#endif .release = hfsplus_dir_release, }; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/extents.c linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/extents.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/extents.c 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/extents.c 2003-08-25 20:35:28.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) + * (C) 2003 Ardis Technologies * * Handling of Extents both in catalog and extents overflow trees */ @@ -49,7 +50,7 @@ void hfsplus_fill_ext_key(hfsplus_btree_ key->ext.pad = 0; } -static int hfsplus_find_extent(hfsplus_extent *extent, u32 off) +static u32 hfsplus_find_extent(hfsplus_extent *extent, u32 off) { int i; u32 count; @@ -85,30 +86,26 @@ static int hfsplus_find_extentry(struct int hfsplus_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { - struct super_block *s; + struct super_block *sb; hfsplus_extent_rec ext_entry; struct hfsplus_find_data fd; - unsigned long ino; int err = -EIO; u32 ablock, dblock = 0; - ino = inode->i_ino; - s = inode->i_sb; + sb = inode->i_sb; /* Convert inode block to disk allocation block */ ablock = iblock; if (ablock >= HFSPLUS_I(inode).total_blocks) { - if (ablock > HFSPLUS_I(inode).total_blocks || !create) { - BUG(); + if (ablock > HFSPLUS_I(inode).total_blocks || !create) return -EIO; - } if (ablock >= HFSPLUS_I(inode).alloc_blocks) { err = hfsplus_extend_file(inode); if (err) return err; } - HFSPLUS_I(inode).mmu_private += s->s_blocksize; + HFSPLUS_I(inode).mmu_private += sb->s_blocksize; HFSPLUS_I(inode).total_blocks++; mark_inode_dirty(inode); } else @@ -117,27 +114,23 @@ int hfsplus_get_block(struct inode *inod if (ablock < HFSPLUS_I(inode).extent_blocks) { dblock = hfsplus_find_extent(HFSPLUS_I(inode).extents, ablock); } else { - hfsplus_find_init(HFSPLUS_SB(s).ext_tree, &fd); - hfsplus_fill_ext_key(fd.search_key, ino, ablock, HFSPLUS_IS_RSRC(inode) ? + hfsplus_find_init(HFSPLUS_SB(sb).ext_tree, &fd); + hfsplus_fill_ext_key(fd.search_key, inode->i_ino, ablock, HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); err = hfsplus_find_extentry(&fd, ext_entry); if (!err) dblock = hfsplus_find_extent(ext_entry, ablock - be32_to_cpu(fd.key->ext.start_block)); hfsplus_find_exit(&fd); - if (err) { - BUG(); + if (err) return err; - } } - if (!dblock) { - BUG(); + if (!dblock) return -EIO; - } dprint(DBG_EXTENT, "get_block(%lu): %lu - %u\n", inode->i_ino, iblock, dblock); - map_bh(bh_result, s, dblock + HFSPLUS_SB(s).blockoffset); + map_bh(bh_result, sb, dblock + HFSPLUS_SB(sb).blockoffset); if (create) set_buffer_new(bh_result); return 0; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/hfsplus_fs.h linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/hfsplus_fs.h --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/hfsplus_fs.h 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/hfsplus_fs.h 2003-08-25 21:57:00.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 1999 * Brad Boyer (flar@pants.nu) + * (C) 2003 Ardis Technologies * */ @@ -12,8 +13,6 @@ #include #include #include "hfsplus_raw.h" -#include "hfsplus_fs_sb.h" -#include "hfsplus_fs_i.h" #define DBG_BNODE_REFS 0x00000001 #define DBG_BNODE_MOD 0x00000002 @@ -111,6 +110,88 @@ typedef struct hfsplus_bnode { #define HFSPLUS_BNODE_DIRTY 3 #define HFSPLUS_BNODE_DELETED 4 +/* + * HFS+ superblock info (built from Volume Header on disk) + */ + +struct hfsplus_vh; +struct hfsplus_btree; + +struct hfsplus_sb_info { + struct buffer_head *s_vhbh; + struct hfsplus_vh *s_vhdr; + struct hfsplus_btree *ext_tree; + struct hfsplus_btree *cat_tree; + struct hfsplus_btree *attr_tree; + struct inode *alloc_file; + struct inode *hidden_dir; + + /* Runtime variables */ + u32 blockoffset; + u32 sect_count; + //int a2b_shift; + + /* Stuff in host order from Vol Header */ + u32 total_blocks; + u32 free_blocks; + u32 next_alloc; + u32 next_cnid; + u32 file_count; + u32 folder_count; + + /* Config options */ + u32 creator; + u32 type; + + int charcase; + int fork; + int namemap; + + umode_t umask; + uid_t uid; + gid_t gid; + + unsigned long flags; + + atomic_t inode_cnt; + u32 last_inode_cnt; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) + struct list_head rsrc_inodes; +#else + struct hlist_head rsrc_inodes; +#endif +}; + +#define HFSPLUS_SB_WRITEBACKUP 0x0001 + + +struct hfsplus_inode_info { + /* Device number in hfsplus_permissions in catalog */ + u32 dev; + /* Allocation extents from catlog record or volume header */ + hfsplus_extent_rec extents; + u32 total_blocks, extent_blocks, alloc_blocks; + atomic_t opencnt; + + struct inode *rsrc_inode; + unsigned long flags; + + struct list_head open_dir_list; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) + unsigned long mmu_private; +#else + loff_t mmu_private; + struct inode vfs_inode; +#endif +}; + +#define HFSPLUS_FLG_RSRC 0x0001 +#define HFSPLUS_FLG_DIRTYMODE 0x0002 + +#define HFSPLUS_IS_DATA(inode) (!(HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC)) +#define HFSPLUS_IS_RSRC(inode) (HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC) + struct hfsplus_find_data { /* filled by caller */ hfsplus_btree_key *search_key; @@ -143,6 +224,7 @@ void hfsplus_find_rec(hfsplus_bnode *, s int hfsplus_btree_find(struct hfsplus_find_data *); int hfsplus_btree_find_entry(struct hfsplus_find_data *, void *, int); +int hfsplus_btree_move(struct hfsplus_find_data *, int); int hfsplus_find_init(hfsplus_btree *, struct hfsplus_find_data *); void hfsplus_find_exit(struct hfsplus_find_data *); @@ -166,15 +248,8 @@ int hfsplus_bnode_insert_rec(struct hfsp int hfsplus_bnode_remove_rec(struct hfsplus_find_data *); /* brec.c */ -u16 hfsplus_brec_off(hfsplus_bnode *, u16); -u16 hfsplus_brec_len(hfsplus_bnode *, u16); u16 hfsplus_brec_lenoff(hfsplus_bnode *, u16, u16 *); -u16 hfsplus_brec_data(hfsplus_bnode *, u16, char *, u16); u16 hfsplus_brec_keylen(hfsplus_bnode *, u16); -u16 hfsplus_brec_key(hfsplus_bnode *, u16, void *, u16); - -/* btiter.c */ -int hfsplus_btiter_move(struct hfsplus_find_data *, int); /* btree.c */ hfsplus_btree *hfsplus_open_btree(struct super_block *, u32); @@ -246,9 +321,9 @@ static inline struct hfsplus_inode_info #define HFSPLUS_I(inode) (*list_entry(inode, struct hfsplus_inode_info, vfs_inode)) #endif -#if 0 +#if 1 #define hfsplus_kmap(p) ({ struct page *__p = (p); kmap(__p); }) -#define hfsplus_kunmap(p) ({ struct page *__p = (p); kunmap(__p); }) +#define hfsplus_kunmap(p) ({ struct page *__p = (p); kunmap(__p); __p; }) #else #define hfsplus_kmap(p) kmap(p) #define hfsplus_kunmap(p) kunmap(p) @@ -260,17 +335,27 @@ static inline struct hfsplus_inode_info /* compatibility */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) -#define unlock_page(page) UnlockPage(page) #define PageUptodate(page) Page_Uptodate(page) #define wait_on_page_locked(page) wait_on_page(page) #define get_seconds() CURRENT_TIME #define page_symlink(i,n,l) block_symlink(i,n,l) -#define map_bh(bh, sb, bl) ({ \ +#define map_bh(bh, sb, block) ({ \ bh->b_dev = kdev_t_to_nr(sb->s_dev); \ - bh->b_blocknr = dblock + HFSPLUS_SB(sb).blockoffset; \ + bh->b_blocknr = block; \ bh->b_state |= (1UL << BH_Mapped); \ }) #define set_buffer_new(bh) (bh->b_state |= (1UL << BH_New)) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) +#define new_inode(sb) ({ \ + struct inode *inode = get_empty_inode(); \ + if (inode) { \ + inode->i_sb = sb; \ + inode->i_dev = sb->s_dev; \ + inode->i_blkbits = sb->s_blocksize_bits; \ + } \ + inode; \ +}) +#endif #define hfsp_mt2ut(t) __hfsp_mt2ut(t) #define hfsp_ut2mt(t) __hfsp_ut2mt(t) #define hfsp_now2mt() __hfsp_ut2mt(CURRENT_TIME) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/hfsplus_fs_i.h linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/hfsplus_fs_i.h --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/hfsplus_fs_i.h 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/hfsplus_fs_i.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,41 +0,0 @@ -/* - * linux/include/linux/hfsplus_fs_i.h - * - * Copyright (C) 1999 - * Brad Boyer (flar@pants.nu) - * - */ - -#ifndef _LINUX_HFSPLUS_FS_I_H -#define _LINUX_HFSPLUS_FS_I_H - -#include -#include -#include "hfsplus_raw.h" - -struct hfsplus_inode_info { - /* Device number in hfsplus_permissions in catalog */ - u32 dev; - /* Allocation extents from catlog record or volume header */ - hfsplus_extent_rec extents; - u32 total_blocks, extent_blocks, alloc_blocks; - atomic_t opencnt; - - struct inode *rsrc_inode; - unsigned long flags; - - struct list_head open_dir_list; -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) - unsigned long mmu_private; -#else - loff_t mmu_private; - struct inode vfs_inode; -#endif -}; - -#define HFSPLUS_FLG_RSRC 0x0001 - -#define HFSPLUS_IS_DATA(inode) (!(HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC)) -#define HFSPLUS_IS_RSRC(inode) (HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC) - -#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/hfsplus_fs_sb.h linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/hfsplus_fs_sb.h --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/hfsplus_fs_sb.h 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/hfsplus_fs_sb.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,69 +0,0 @@ -/* - * linux/include/linux/hfsplus_fs_sb.h - * - * Copyright (C) 1999 - * Brad Boyer (flar@pants.nu) - * - */ - -#ifndef _LINUX_HFSPLUS_FS_SB_H -#define _LINUX_HFSPLUS_FS_SB_H - -#include - -/* - * HFS+ superblock info (built from Volume Header on disk) - */ - -struct hfsplus_vh; -struct hfsplus_btree; - -struct hfsplus_sb_info { - struct buffer_head *s_vhbh; - struct hfsplus_vh *s_vhdr; - struct hfsplus_btree *ext_tree; - struct hfsplus_btree *cat_tree; - struct hfsplus_btree *attr_tree; - struct inode *alloc_file; - struct inode *hidden_dir; - - /* Runtime variables */ - u32 blockoffset; - u32 sect_count; - //int a2b_shift; - - /* Stuff in host order from Vol Header */ - u32 total_blocks; - u32 free_blocks; - u32 next_alloc; - u32 next_cnid; - u32 file_count; - u32 folder_count; - - /* Config options */ - u32 creator; - u32 type; - - int charcase; - int fork; - int namemap; - - umode_t umask; - uid_t uid; - gid_t gid; - - unsigned long flags; - - atomic_t inode_cnt; - u32 last_inode_cnt; - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) - struct list_head rsrc_inodes; -#else - struct hlist_head rsrc_inodes; -#endif -}; - -#define HFSPLUS_SB_WRITEBACKUP 0x0001 - -#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/hfsplus_raw.h linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/hfsplus_raw.h --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/hfsplus_raw.h 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/hfsplus_raw.h 2003-08-25 20:35:28.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 1999 * Brad Boyer (flar@pants.nu) + * (C) 2003 Ardis Technologies * * Format of structures on disk * Information taken from Apple Technote #1150 (HFS Plus Volume Format) @@ -36,8 +37,10 @@ #define HFSP_HIDDENDIR_NAME "\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80HFS+ Private Data" -#define HFSP_HARDLINK_TYPE 0x686c6e6b -#define HFSP_HFSPLUS_CREATOR 0x6866732b +#define HFSP_HARDLINK_TYPE 0x686c6e6b /* 'hlnk' */ +#define HFSP_HFSPLUS_CREATOR 0x6866732b /* 'hfs+' */ + +#define HFSP_MOUNT_VERSION 0x482b4c78 /* 'H+Lx' */ /* Structures used on disk */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/inode.c linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/inode.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/inode.c 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/inode.c 2003-08-25 21:57:42.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) + * (C) 2003 Ardis Technologies * * Inode handling routines */ @@ -184,21 +185,29 @@ out: return NULL; } -static void hfsplus_get_perms(struct inode *inode, hfsplus_perm *perms) +static void hfsplus_get_perms(struct inode *inode, hfsplus_perm *perms, int dir) { - struct super_block *s = inode->i_sb; + struct super_block *sb = inode->i_sb; + int mode; - inode->i_mode = be32_to_cpu(perms->mode); - if (!inode->i_mode) - inode->i_mode = S_IFREG | (S_IRWXUGO & ~(HFSPLUS_SB(s).umask)); + mode = be32_to_cpu(perms->mode) & 0xffff; inode->i_uid = be32_to_cpu(perms->owner); - if (!inode->i_uid) - inode->i_uid = HFSPLUS_SB(s).uid; + if (!inode->i_uid && !mode) + inode->i_uid = HFSPLUS_SB(sb).uid; inode->i_gid = be32_to_cpu(perms->group); - if (!inode->i_gid) - inode->i_gid = HFSPLUS_SB(s).gid; + if (!inode->i_gid && !mode) + inode->i_gid = HFSPLUS_SB(sb).gid; + + if (dir) { + mode = mode ? (mode & S_IALLUGO) : + (S_IRWXUGO & ~(HFSPLUS_SB(sb).umask)); + mode |= S_IFDIR; + } else if (!mode) + mode = S_IFREG | ((S_IRUGO|S_IWUGO) & + ~(HFSPLUS_SB(sb).umask)); + inode->i_mode = mode; } static void hfsplus_set_perms(struct inode *inode, hfsplus_perm *perms) @@ -257,35 +266,6 @@ struct file_operations hfsplus_file_oper .release = hfsplus_file_release, }; -#if 0 -static nlink_t hfsplus_count_subdirs(struct inode *inode) -{ - struct hfsplus_find_data fd; - hfsplus_cat_entry entry; - nlink_t res = 0; - u32 cnid; - - hfsplus_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd); - hfsplus_fill_cat_key(fd.search_key, inode->i_ino, NULL); - res = hfsplus_btree_find(&fd); - if (res) - goto out; - cnid = cpu_to_be32(inode->i_ino); - for (;;) { - if (fd.key->cat.parent != cnid) - break; - if (be16_to_cpu(entry.type) == HFSPLUS_FOLDER) - res++; - - if (hfsplus_btiter_move(&fd, 1)) - break; - } -out: - hfsplus_find_exit(&fd); - return res; -} -#endif - struct inode *hfsplus_new_inode(struct super_block *sb, int mode) { struct inode *inode = new_inode(sb); @@ -392,9 +372,7 @@ int hfsplus_cat_read_inode(struct inode sizeof(hfsplus_cat_folder)); memset(&HFSPLUS_I(inode).extents, 0, sizeof(hfsplus_extent_rec)); - hfsplus_get_perms(inode, &(folder->permissions)); - inode->i_mode = S_IFDIR | (inode->i_mode & ~S_IFMT); - //inode->i_nlink = 2 + hfsplus_count_subdirs(inode); + hfsplus_get_perms(inode, &folder->permissions, 1); inode->i_nlink = 1; inode->i_size = 2 + be32_to_cpu(folder->valence); inode->i_atime = hfsp_mt2ut(folder->access_date); @@ -414,12 +392,11 @@ int hfsplus_cat_read_inode(struct inode hfsplus_inode_read_fork(inode, HFSPLUS_IS_DATA(inode) ? &file->data_fork : &file->rsrc_fork); - hfsplus_get_perms(inode, &file->permissions); + hfsplus_get_perms(inode, &file->permissions, 0); inode->i_nlink = 1; if (S_ISREG(inode->i_mode)) { if (file->permissions.dev) inode->i_nlink = be32_to_cpu(file->permissions.dev); - inode->i_mode = S_IFREG | (inode->i_mode & ~S_IFMT); inode->i_op = &hfsplus_file_inode_operations; inode->i_fop = &hfsplus_file_operations; inode->i_mapping->a_ops = &hfsplus_aops; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/options.c linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/options.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/options.c 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/options.c 2003-08-25 20:35:28.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) + * (C) 2003 Ardis Technologies * * Option parsing */ @@ -100,12 +101,12 @@ int parse_options(char *input, struct hf if (!strcmp(curropt, "creator")) { if (!fill_fourchar(&(results->creator), value)) { - printk("HFS+-fs: creator requires a value\n"); + printk("HFS+-fs: creator requires a 4 character value\n"); return 0; } } else if (!strcmp(curropt, "type")) { if (!fill_fourchar(&(results->type), value)) { - printk("HFS+-fs: type requires a value\n"); + printk("HFS+-fs: type requires a 4 character value\n"); return 0; } } else if (!strcmp(curropt, "case")) { diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/super.c linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/super.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/super.c 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/super.c 2003-08-25 20:35:28.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) + * (C) 2003 Ardis Technologies * */ @@ -232,6 +233,28 @@ static int hfsplus_statfs(struct super_b return 0; } +int hfsplus_remount(struct super_block *sb, int *flags, char *data) +{ + if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) + return 0; + if (!(*flags & MS_RDONLY)) { + struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr; + + if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_INCNSTNT)) || + !(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { + printk("HFS+-fs warning: Filesystem was not cleanly unmounted, " + "running fsck.hfsplus is recommended. leaving read-only.\n"); + sb->s_flags |= MS_RDONLY; + *flags |= MS_RDONLY; + } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { + printk("HFS+-fs: Filesystem is marked locked, leaving read-only.\n"); + sb->s_flags |= MS_RDONLY; + *flags |= MS_RDONLY; + } + } + return 0; +} + static struct super_operations hfsplus_sops = { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) .alloc_inode = hfsplus_alloc_inode, @@ -243,6 +266,7 @@ static struct super_operations hfsplus_s .put_super = hfsplus_put_super, .write_super = hfsplus_write_super, .statfs = hfsplus_statfs, + .remount_fs = hfsplus_remount, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) @@ -266,6 +290,16 @@ static int hfsplus_fill_super(struct sup } memset(sbi, 0, sizeof(HFSPLUS_SB(sb))); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) + if (sizeof(struct inode) - offsetof(struct inode, u) < sizeof(struct hfsplus_inode_info)) { + extern void hfsplus_inode_info_exceeded_space_in_inode_error(void); + hfsplus_inode_info_exceeded_space_in_inode_error(); + } + + if (sizeof(struct super_block) - offsetof(struct super_block, u) < sizeof(struct hfsplus_sb_info)) { + extern void hfsplus_sb_info_exceeded_space_in_super_block_error(void); + hfsplus_sb_info_exceeded_space_in_super_block_error(); + } + INIT_LIST_HEAD(&HFSPLUS_SB(sb).rsrc_inodes); #else sb->s_fs_info = sbi; @@ -309,6 +343,18 @@ static int hfsplus_fill_super(struct sup /* Set up operations so we can load metadata */ sb->s_op = &hfsplus_sops; + if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_INCNSTNT)) || + !(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { + if (!silent) + printk("HFS+-fs warning: Filesystem was not cleanly unmounted, " + "running fsck.hfsplus is recommended. mounting read-only.\n"); + sb->s_flags |= MS_RDONLY; + } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { + if (!silent) + printk("HFS+-fs: Filesystem is marked locked, mounting read-only.\n"); + sb->s_flags |= MS_RDONLY; + } + /* Load metadata objects (B*Trees) */ HFSPLUS_SB(sb).ext_tree = hfsplus_open_btree(sb, HFSPLUS_EXT_CNID); if (!HFSPLUS_SB(sb).ext_tree) { @@ -355,7 +401,10 @@ static int hfsplus_fill_super(struct sup if (sb->s_flags & MS_RDONLY) goto out; - //vhdr->last_mount_vers = cpu_to_be32('LNX0'); + /* H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused + * all three are registered with Apple for our use + */ + vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION); vhdr->modify_date = hfsp_now2mt(); vhdr->write_count = cpu_to_be32(be32_to_cpu(vhdr->write_count) + 1); vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/unicode.c linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/unicode.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/unicode.c 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/unicode.c 2003-08-25 20:35:28.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) + * (C) 2003 Ardis Technologies * * Handler routines for unicode strings */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/wrapper.c linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/wrapper.c --- linux-2.4.20-wolk4.8-fullkernel/fs/hfsplus/wrapper.c 2003-08-25 18:24:59.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/hfsplus/wrapper.c 2003-08-25 20:35:28.000000000 +0200 @@ -3,6 +3,7 @@ * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) + * (C) 2003 Ardis Technologies * * Handling of HFS wrappers around HFS+ volumes */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/proc/base.c linux-2.4.20-wolk4.9-fullkernel/fs/proc/base.c --- linux-2.4.20-wolk4.8-fullkernel/fs/proc/base.c 2003-08-25 18:26:48.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/proc/base.c 2003-08-25 20:36:21.000000000 +0200 @@ -26,6 +26,7 @@ #include #include #include +#include /* RSBAC */ #ifdef CONFIG_RSBAC @@ -375,6 +376,17 @@ static int proc_pid_cmdline(struct task_ return res; } +static int proc_pid_binfmt(struct task_struct *task, char *buffer) +{ + char *s; + int ret = -EIO; + if (task->binfmt && task->binfmt->name) + s = task->binfmt->name; + else + s = "unknown"; + return sprintf(buffer,"%s\n",s); +} + /************************************************************************/ /* Here the fs part begins */ /************************************************************************/ @@ -902,6 +914,7 @@ enum pid_directory_inos { PROC_PID_MAPS, PROC_PID_CPU, PROC_PID_MOUNTS, + PROC_PID_BINFMT, #ifndef CONFIG_GRKERNSEC PROC_PID_MAPBASE, #endif @@ -927,6 +940,7 @@ static struct pid_entry base_stuff[] = { E(PROC_PID_ROOT, "root", S_IFLNK|S_IRWXUGO), E(PROC_PID_EXE, "exe", S_IFLNK|S_IRWXUGO), E(PROC_PID_MOUNTS, "mounts", S_IFREG|S_IRUGO), + E(PROC_PID_BINFMT, "binfmt", S_IFREG|S_IRUGO), #if defined (__HAS_ARCH_PROC_MAPPED_BASE) && !defined (CONFIG_GRKERNSEC) E(PROC_PID_MAPBASE, "mapped_base", S_IFREG|S_IRUSR|S_IWUSR), #endif @@ -1319,6 +1333,10 @@ static struct dentry *proc_base_lookup(s case PROC_PID_MOUNTS: inode->i_fop = &proc_mounts_operations; break; + case PROC_PID_BINFMT: + inode->i_fop = &proc_info_file_operations; + inode->u.proc_i.op.proc_read = proc_pid_binfmt; + break; default: printk("procfs: impossible type (%d)",p->type); iput(inode); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/proc/proc_misc.c linux-2.4.20-wolk4.9-fullkernel/fs/proc/proc_misc.c --- linux-2.4.20-wolk4.8-fullkernel/fs/proc/proc_misc.c 2003-08-25 18:26:48.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/proc/proc_misc.c 2003-08-25 23:41:21.000000000 +0200 @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -677,6 +678,28 @@ static struct file_operations proc_profi write: write_profile, }; +#ifdef CONFIG_MAGIC_SYSRQ +/* + * writing 'C' to /proc/sysrq-trigger is like sysrq-C + */ +static ssize_t write_sysrq_trigger(struct file *file, const char *buf, + size_t count, loff_t *ppos) +{ + if (count) { + char c; + + if (get_user(c, buf)) + return -EFAULT; + handle_sysrq(c, NULL, NULL, NULL); + } + return count; +} + +static struct file_operations proc_sysrq_trigger_operations = { + .write = write_sysrq_trigger, +}; +#endif + struct proc_dir_entry *proc_root_kcore; static void create_seq_entry(char *name, mode_t mode, struct file_operations *f) @@ -789,6 +812,11 @@ void __init proc_misc_init(void) entry->size = (1+prof_len) * sizeof(unsigned int); } } +#ifdef CONFIG_MAGIC_SYSRQ + entry = create_proc_entry("sysrq-trigger", S_IWUSR, NULL); + if (entry) + entry->proc_fops = &proc_sysrq_trigger_operations; +#endif #ifdef CONFIG_PPC32 { extern struct file_operations ppc_htab_operations; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/seq_file.c linux-2.4.20-wolk4.9-fullkernel/fs/seq_file.c --- linux-2.4.20-wolk4.8-fullkernel/fs/seq_file.c 2003-08-25 18:25:03.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/seq_file.c 2003-08-25 20:35:13.000000000 +0200 @@ -297,6 +297,37 @@ int seq_printf(struct seq_file *m, const return -1; } +int seq_path(struct seq_file *m, + struct vfsmount *mnt, struct dentry *dentry, + char *esc) +{ + if (m->count < m->size) { + char *s = m->buf + m->count; + char *p = d_path(dentry, mnt, s, m->size - m->count); + if (!IS_ERR(p)) { + while (s <= p) { + char c = *p++; + if (!c) { + p = m->buf + m->count; + m->count = s - m->buf; + return s - p; + } else if (!strchr(esc, c)) { + *s++ = c; + } else if (s + 4 > p) { + break; + } else { + *s++ = '\\'; + *s++ = '0' + ((c & 0300) >> 6); + *s++ = '0' + ((c & 070) >> 3); + *s++ = '0' + (c & 07); + } + } + } + } + m->count = m->size; + return -1; +} + static void *single_start(struct seq_file *p, loff_t *pos) { return NULL + (*pos == 0); @@ -338,3 +369,13 @@ int single_release(struct inode *inode, kfree(op); return res; } + +int seq_release_private(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + + kfree(seq->private); + seq->private = NULL; + return seq_release(inode, file); +} + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/dmapi/dmapi_event.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/dmapi/dmapi_event.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/dmapi/dmapi_event.c 2003-08-25 18:27:06.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/dmapi/dmapi_event.c 2003-08-25 20:25:58.000000000 +0200 @@ -221,9 +221,9 @@ dm_vfs_data( } tdp->td_vcount = 0; - memcpy(&tdp->td_handle.ha_fsid, vfsp->vfs_altfsid, sizeof(fsid_t)); - memset((char *)&tdp->td_handle.ha_fsid + sizeof(fsid_t), 0, - sizeof(tdp->td_handle) - sizeof(fsid_t)); + memcpy(&tdp->td_handle.ha_fsid, vfsp->vfs_altfsid, sizeof(xfs_fsid_t)); + memset((char *)&tdp->td_handle.ha_fsid + sizeof(xfs_fsid_t), 0, + sizeof(tdp->td_handle) - sizeof(xfs_fsid_t)); return(tdp); } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/dmapi/dmapi_private.h linux-2.4.20-wolk4.9-fullkernel/fs/xfs/dmapi/dmapi_private.h --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/dmapi/dmapi_private.h 2003-08-25 18:27:06.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/dmapi/dmapi_private.h 2003-08-25 20:25:58.000000000 +0200 @@ -183,7 +183,7 @@ typedef struct dm_fsreg { struct dm_fsreg *fr_next; vfs_t *fr_vfsp; /* filesystem pointer */ dm_tokevent_t *fr_tevp; - fsid_t fr_fsid; /* filesystem ID */ + xfs_fsid_t fr_fsid; /* filesystem ID */ void *fr_msg; /* dm_mount_event_t for filesystem */ int fr_msgsize; /* size of dm_mount_event_t */ dm_fsstate_t fr_state; @@ -471,7 +471,7 @@ int dm_check_dmapi_vp( vnode_t *vp); dm_tokevent_t * dm_find_mount_tevp_and_lock( - fsid_t *fsidp, + xfs_fsid_t *fsidp, unsigned long *lcp); int dm_path_to_hdl( diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/dmapi/dmapi_register.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/dmapi/dmapi_register.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/dmapi/dmapi_register.c 2003-08-25 18:27:06.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/dmapi/dmapi_register.c 2003-08-25 20:25:58.000000000 +0200 @@ -133,7 +133,7 @@ fsreg_read_pfs(char *buffer, char **star static dm_fsreg_t * dm_find_fsreg( - fsid_t *fsidp) + xfs_fsid_t *fsidp) { dm_fsreg_t *fsrp; @@ -153,7 +153,7 @@ dm_find_fsreg( static dm_fsreg_t * dm_find_fsreg_and_lock( - fsid_t *fsidp, + xfs_fsid_t *fsidp, unsigned long *lcp) /* address of returned lock cookie */ { dm_fsreg_t *fsrp; @@ -477,7 +477,7 @@ dm_handle_to_vp( int error; fid_t *fidp; - if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handlep->ha_fsid, &lc)) == NULL) + if ((fsrp = dm_find_fsreg_and_lock(&handlep->ha_fsid, &lc)) == NULL) return(NULL); fidp = (fid_t*)&handlep->ha_fid; @@ -565,7 +565,7 @@ dm_vp_to_handle( if (error) return(error); - memcpy(&handlep->ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(fsid_t)); + memcpy(&handlep->ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t)); memcpy(&handlep->ha_fid, &fid, fid.fid_len + sizeof fid.fid_len); hsize = XFS_HSIZE(*handlep); memset((char *)handlep + hsize, 0, sizeof(*handlep) - hsize); @@ -591,7 +591,7 @@ dm_check_dmapi_vp( if ((error = dm_vp_to_handle(vp, &handle)) != 0) return(error); - if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handle.ha_fsid, &lc)) == NULL) + if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL) return(EBADF); mutex_spinunlock(&fsrp->fr_lock, lc); return(0); @@ -606,7 +606,7 @@ dm_check_dmapi_vp( dm_tokevent_t * dm_find_mount_tevp_and_lock( - fsid_t *fsidp, + xfs_fsid_t *fsidp, unsigned long *lcp) /* address of returned lock cookie */ { dm_fsreg_t *fsrp; @@ -854,7 +854,7 @@ dm_path_to_hdl( if (error) return(error); - if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handle.ha_fsid, &lc)) == NULL) + if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL) return(EBADF); mutex_spinunlock(&fsrp->fr_lock, lc); @@ -925,7 +925,7 @@ dm_path_to_fshdl( if (error) return(error); - if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handle.ha_fsid, &lc)) == NULL) + if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL) return(EBADF); mutex_spinunlock(&fsrp->fr_lock, lc); @@ -955,7 +955,7 @@ dm_fd_to_hdl( if ((error = dm_vp_to_handle(LINVFS_GET_VP(filep->f_dentry->d_inode), &handle)) != 0) return(error); - if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handle.ha_fsid, &lc)) == NULL) + if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL) return(EBADF); mutex_spinunlock(&fsrp->fr_lock, lc); @@ -1134,7 +1134,7 @@ dm_set_disp( if (error != 0) return(error); - fsrp = dm_find_fsreg_and_lock((fsid_t*)&tdp->td_handle.ha_fsid, &lc1); + fsrp = dm_find_fsreg_and_lock(&tdp->td_handle.ha_fsid, &lc1); if (fsrp == NULL) { dm_app_put_tdp(tdp); return(EINVAL); @@ -1221,7 +1221,7 @@ dm_set_return_on_destroy( if (error != 0) return(error); - fsrp = dm_find_fsreg_and_lock((fsid_t*)&tdp->td_handle.ha_fsid, &lc1); + fsrp = dm_find_fsreg_and_lock(&tdp->td_handle.ha_fsid, &lc1); if (fsrp == NULL) { dm_app_put_tdp(tdp); return(EINVAL); @@ -1298,7 +1298,7 @@ dm_get_mountinfo( spinlock while doing copyout calls. */ - fsrp = dm_find_fsreg_and_lock((fsid_t*)&tdp->td_handle.ha_fsid, &lc); + fsrp = dm_find_fsreg_and_lock(&tdp->td_handle.ha_fsid, &lc); if (fsrp == NULL) { dm_app_put_tdp(tdp); return(EINVAL); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/dmapi/dmapi_right.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/dmapi/dmapi_right.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/dmapi/dmapi_right.c 2003-08-25 18:27:06.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/dmapi/dmapi_right.c 2003-08-25 20:25:58.000000000 +0200 @@ -569,7 +569,7 @@ dm_get_config_tdp( see if this is one of the handles in the DM_EVENT_MOUNT tevp. */ - if ((tevp = dm_find_mount_tevp_and_lock((fsid_t*)&handle.ha_fsid, &lc)) == NULL) + if ((tevp = dm_find_mount_tevp_and_lock(&handle.ha_fsid, &lc)) == NULL) return(EBADF); return(dm_app_lookup_tdp(&handle, tevp, &lc, DM_TDT_ANY, diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/linux/xfs_aops.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/linux/xfs_aops.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/linux/xfs_aops.c 2003-08-25 18:27:06.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/linux/xfs_aops.c 2003-08-25 20:26:00.000000000 +0200 @@ -79,7 +79,7 @@ linvfs_unwritten_done( * to written extents. */ STATIC void -linvfs_unwritten_conv( +linvfs_unwritten_convert( xfs_buf_t *bp) { vnode_t *vp = XFS_BUF_FSPRIVATE(bp, vnode_t *); @@ -201,7 +201,8 @@ probe_unwritten_page( page_buf_bmap_t *mp, page_buf_t *pb, unsigned long max_offset, - unsigned long *fsbs) + unsigned long *fsbs, + unsigned int bbits) { struct page *page; @@ -222,6 +223,7 @@ probe_unwritten_page( break; if (p_offset >= max_offset) break; + map_buffer_at_offset(page, bh, p_offset, bbits, mp); set_buffer_unwritten_io(bh); bh->b_private = pb; p_offset += bh->b_size; @@ -280,7 +282,7 @@ probe_unmapped_cluster( struct buffer_head *bh, struct buffer_head *head) { - unsigned long tindex, tlast; + unsigned long tindex, tlast, tloff; unsigned int len, total = 0; struct address_space *mapping = inode->i_mapping; @@ -297,18 +299,17 @@ probe_unmapped_cluster( if (bh == head) { tlast = inode->i_size >> PAGE_CACHE_SHIFT; /* Prune this back to avoid pathological behavior */ - tlast = min(tlast, startpage->index + 64); - for (tindex = startpage->index + 1; tindex < tlast; tindex++) { - len = probe_unmapped_page(mapping, tindex, - PAGE_CACHE_SIZE); + tloff = min(tlast, startpage->index + 64); + for (tindex = startpage->index + 1; tindex < tloff; tindex++) { + len = probe_unmapped_page(mapping, + tindex, PAGE_CACHE_SIZE); if (!len) break; total += len; } - if ((tindex == tlast) && (inode->i_size & ~PAGE_CACHE_MASK)) { - len = probe_unmapped_page(mapping, tindex, - inode->i_size & ~PAGE_CACHE_MASK); - total += len; + if (tindex == tlast && + (tloff = inode->i_size & (PAGE_CACHE_SIZE - 1))) { + total += probe_unmapped_page(mapping, tindex, tloff); } } return total; @@ -422,14 +423,15 @@ map_unwritten( */ if (bh == head) { struct address_space *mapping = inode->i_mapping; - unsigned long tindex, tlast, bs; + unsigned long tindex, tloff, tlast, bs; + unsigned int bbits = inode->i_blkbits; struct page *page; tlast = inode->i_size >> PAGE_CACHE_SHIFT; - tlast = min(tlast, start_page->index + pb->pb_page_count - 1); - for (tindex = start_page->index + 1; tindex < tlast; tindex++) { + tloff = min(tlast, start_page->index + pb->pb_page_count - 1); + for (tindex = start_page->index + 1; tindex < tloff; tindex++) { page = probe_unwritten_page(mapping, tindex, mp, pb, - PAGE_CACHE_SIZE, &bs); + PAGE_CACHE_SIZE, &bs, bbits); if (!page) break; nblocks += bs; @@ -437,9 +439,10 @@ map_unwritten( convert_page(inode, page, mp, pb, 1, all_bh); } - if ((tindex == tlast) && (inode->i_size & ~PAGE_CACHE_MASK)) { + if (tindex == tlast && + (tloff = inode->i_size & (PAGE_CACHE_SIZE - 1))) { page = probe_unwritten_page(mapping, tindex, mp, pb, - inode->i_size & ~PAGE_CACHE_MASK, &bs); + tloff, &bs, bbits); if (page) { nblocks += bs; atomic_add(bs, &pb->pb_io_remaining); @@ -456,7 +459,7 @@ map_unwritten( XFS_BUF_SET_SIZE(pb, size); XFS_BUF_SET_OFFSET(pb, offset); XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode)); - XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_conv); + XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert); if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { pagebuf_iodone(pb, 1, 1); @@ -522,7 +525,8 @@ convert_page( offset = i << bbits; if (!(Page_Uptodate(page) || buffer_uptodate(bh))) continue; - if (buffer_mapped(bh) && !buffer_delay(bh) && all_bh) { + if (buffer_mapped(bh) && all_bh && + !buffer_unwritten(bh) && !buffer_delay(bh)) { if (startio && (offset < end)) { lock_buffer(bh); bh_arr[index++] = bh; @@ -543,7 +547,7 @@ convert_page( ASSERT(tmp->pbm_flags & PBMF_UNWRITTEN); map_unwritten(inode, page, head, bh, offset, bbits, tmp, all_bh); - } else { + } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) { map_buffer_at_offset(page, bh, offset, bbits, tmp); if (buffer_unwritten(bh)) { set_buffer_unwritten_io(bh); @@ -1191,7 +1195,8 @@ linvfs_direct_IO( if ((igrab(inode)) != inode) BUG(); XFS_BUF_SET_FSPRIVATE(pb, vp); - XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_conv); + XFS_BUF_SET_IODONE_FUNC(pb, + linvfs_unwritten_convert); } error = pagebuf_iostart(pb, pb_flags); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/linux/xfs_iops.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/linux/xfs_iops.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/linux/xfs_iops.c 2003-08-25 18:27:06.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/linux/xfs_iops.c 2003-08-25 20:26:00.000000000 +0200 @@ -109,12 +109,18 @@ linvfs_mknod( struct inode *ip; vattr_t va; vnode_t *vp = NULL, *dvp = LINVFS_GET_VP(dir); + xfs_acl_t *default_acl = NULL; xattr_exists_t test_default_acl = _ACL_DEFAULT_EXISTS; - int have_default_acl = 0; - int error = EINVAL; + int error; - if (test_default_acl) - have_default_acl = test_default_acl(dvp); + if (test_default_acl && test_default_acl(dvp)) { + if (!_ACL_ALLOC(default_acl)) + return -ENOMEM; + if (!_ACL_GET_DEFAULT(dvp, default_acl)) { + _ACL_FREE(default_acl); + default_acl = NULL; + } + } #ifdef CONFIG_XFS_POSIX_ACL /* @@ -122,7 +128,7 @@ linvfs_mknod( * split out into separate patches - remove this once MS_POSIXACL is * accepted, or some other way to implement this exists. */ - if (IS_POSIXACL(dir) && !have_default_acl && has_fs_struct(current)) + if (IS_POSIXACL(dir) && !default_acl && has_fs_struct(current)) mode &= ~current->fs->umask; #endif @@ -147,13 +153,36 @@ linvfs_mknod( break; } + if (default_acl) { + if (!error) { + error = _ACL_INHERIT(vp, &va, default_acl); + if (!error) { + VMODIFY(vp); + } else { + struct dentry teardown = {}; + int err2; + + /* Oh, the horror. + * If we can't add the ACL we must back out. + * ENOSPC can hit here, among other things. + */ + teardown.d_inode = ip = LINVFS_GET_IP(vp); + teardown.d_name = dentry->d_name; + remove_inode_hash(ip); + make_bad_inode(ip); + if (S_ISDIR(mode)) + VOP_RMDIR(dvp, &teardown, NULL, err2); + else + VOP_REMOVE(dvp, &teardown, NULL, err2); + VN_RELE(vp); + } + } + _ACL_FREE(default_acl); + } + if (!error) { ASSERT(vp); ip = LINVFS_GET_IP(vp); - if (!ip) { - VN_RELE(vp); - return -ENOMEM; - } if (S_ISCHR(mode) || S_ISBLK(mode)) ip->i_rdev = to_kdev_t(rdev); @@ -162,19 +191,6 @@ linvfs_mknod( d_instantiate(dentry, ip); validate_fields(dir); } - - if (!error && have_default_acl) { - _ACL_DECL (pdacl); - - if (!_ACL_ALLOC(pdacl)) { - error = -ENOMEM; - } else { - if (_ACL_GET_DEFAULT(dvp, pdacl)) - error = _ACL_INHERIT(vp, &va, pdacl); - VMODIFY(vp); - _ACL_FREE(pdacl); - } - } return -error; } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/linux/xfs_super.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/linux/xfs_super.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/linux/xfs_super.c 2003-08-25 18:27:06.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/linux/xfs_super.c 2003-08-25 20:25:58.000000000 +0200 @@ -542,7 +542,7 @@ linvfs_write_super( sb->s_dirt = 0; } -STATIC void +STATIC int linvfs_sync_super( struct super_block *sb) { @@ -550,6 +550,7 @@ linvfs_sync_super( int error; VFS_SYNC(vfsp, SYNC_FSDATA|SYNC_WAIT, NULL, error); + return -error; } STATIC int diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/linux/xfs_version.h linux-2.4.20-wolk4.9-fullkernel/fs/xfs/linux/xfs_version.h --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/linux/xfs_version.h 2003-08-25 18:27:06.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/linux/xfs_version.h 2003-08-25 20:26:02.000000000 +0200 @@ -39,6 +39,6 @@ #ifndef __XFS_VERSION_H__ #define __XFS_VERSION_H__ -#define XFS_VERSION_STRING "SGI XFS 1.3.0pre5 - Tuned for WOLK," +#define XFS_VERSION_STRING "SGI XFS 1.3.0 - Tuned for WOLK," #endif /* __XFS_VERSION_H__ */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/linux/xfs_vfs.h linux-2.4.20-wolk4.9-fullkernel/fs/xfs/linux/xfs_vfs.h --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/linux/xfs_vfs.h 2003-08-25 18:27:06.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/linux/xfs_vfs.h 2003-08-25 20:25:58.000000000 +0200 @@ -33,6 +33,7 @@ #define __XFS_VFS_H__ #include +#include "xfs_fs.h" struct fid; struct cred; @@ -44,8 +45,8 @@ struct xfs_mount_args; typedef struct vfs { u_int vfs_flag; /* flags */ - fsid_t vfs_fsid; /* file system ID */ - fsid_t *vfs_altfsid; /* An ID fixed for life of FS */ + xfs_fsid_t vfs_fsid; /* file system ID */ + xfs_fsid_t *vfs_altfsid; /* An ID fixed for life of FS */ bhv_head_t vfs_bh; /* head of vfs behavior chain */ struct super_block *vfs_super; /* Linux superblock structure */ struct task_struct *vfs_sync_task; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/pagebuf/page_buf.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/pagebuf/page_buf.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/pagebuf/page_buf.c 2003-08-25 18:27:06.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/pagebuf/page_buf.c 2003-08-25 20:26:00.000000000 +0200 @@ -653,7 +653,7 @@ _pagebuf_get_prealloc_bh(void) do { set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irqrestore(&pb_resv_bh_lock, flags); - pagebuf_run_queues(NULL); + blk_run_queues(); schedule(); spin_lock_irqsave(&pb_resv_bh_lock, flags); } while (pb_resv_bh_cnt < 1); @@ -1237,10 +1237,10 @@ _pagebuf_wait_unpin( add_wait_queue(&pb->pb_waiters, &wait); for (;;) { current->state = TASK_UNINTERRUPTIBLE; - if (atomic_read(&pb->pb_pin_count) == 0) { + if (atomic_read(&pb->pb_pin_count) == 0) break; - } - pagebuf_run_queues(pb); + if (atomic_read(&pb->pb_io_remaining)) + blk_run_queues(); schedule(); } remove_wait_queue(&pb->pb_waiters, &wait); @@ -1355,26 +1355,27 @@ pagebuf_iostart( /* start I/O on a buf return status; } - pb->pb_flags &= - ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI|PBF_READ_AHEAD); - pb->pb_flags |= flags & - (PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_SYNC|PBF_READ_AHEAD); + pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | \ + PBF_DELWRI | PBF_READ_AHEAD | PBF_RUN_QUEUES); + pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \ + PBF_SYNC | PBF_READ_AHEAD | PBF_RUN_QUEUES); BUG_ON(pb->pb_bn == PAGE_BUF_DADDR_NULL); - /* For writes call internal function which checks for - * filesystem specific callout function and execute it. + /* For writes allow an alternate strategy routine to precede + * the actual I/O request (which may not be issued at all in + * a shutdown situation, for example). */ - if (flags & PBF_WRITE) { - status = __pagebuf_iorequest(pb); - } else { - status = pagebuf_iorequest(pb); - } + status = (flags & PBF_WRITE) ? + pagebuf_iostrategy(pb) : pagebuf_iorequest(pb); - /* Wait for I/O if we are not an async request */ - if ((status == 0) && (flags & PBF_ASYNC) == 0) { + /* Wait for I/O if we are not an async request. + * Note: async I/O request completion will release the buffer, + * and that can already be done by this point. So using the + * buffer pointer from here on, after async I/O, is invalid. + */ + if (!status && !(flags & PBF_ASYNC)) status = pagebuf_iowait(pb); - } return status; } @@ -1715,6 +1716,8 @@ pagebuf_iorequest( /* start real I/O _pagebuf_wait_unpin(pb); } + pagebuf_hold(pb); + /* Set the count to 1 initially, this will stop an I/O * completion callout which happens before we have started * all the I/O from calling pagebuf_iodone too early. @@ -1722,6 +1725,8 @@ pagebuf_iorequest( /* start real I/O atomic_set(&pb->pb_io_remaining, 1); _pagebuf_ioapply(pb); _pagebuf_iodone(pb, 0, 0); + + pagebuf_rele(pb); return 0; } @@ -1737,7 +1742,8 @@ pagebuf_iowait( page_buf_t *pb) { PB_TRACE(pb, PB_TRACE_REC(iowait), 0); - pagebuf_run_queues(pb); + if (atomic_read(&pb->pb_io_remaining)) + blk_run_queues(); down(&pb->pb_iodonesema); PB_TRACE(pb, PB_TRACE_REC(iowaited), (int)pb->pb_error); return pb->pb_error; @@ -1831,8 +1837,6 @@ _pagebuf_ioapply( /* apply function to size_t page_offset, len; size_t cur_offset, cur_len; - pagebuf_hold(pb); - cur_offset = pb->pb_offset; cur_len = buffer_len; @@ -1859,7 +1863,11 @@ _pagebuf_ioapply( /* apply function to buffer_len -= len; } - pagebuf_rele(pb); + if (pb->pb_flags & PBF_RUN_QUEUES) { + pb->pb_flags &= ~PBF_RUN_QUEUES; + if (atomic_read(&pb->pb_io_remaining) > 1) + blk_run_queues(); + } } @@ -2082,13 +2090,13 @@ pagebuf_daemon( pb->pb_flags &= ~PBF_DELWRI; pb->pb_flags |= PBF_WRITE; - __pagebuf_iorequest(pb); + pagebuf_iostrategy(pb); } if (as_list_len > 0) purge_addresses(); if (count) - pagebuf_run_queues(NULL); + blk_run_queues(); force_flush = 0; } while (pbd_active == 1); @@ -2140,49 +2148,53 @@ pagebuf_delwri_flush( } } - list_del_init(&pb->pb_list); - if (flags & PBDF_WAIT) { - list_add(&pb->pb_list, &tmp); - pb->pb_flags &= ~PBF_ASYNC; - } + list_move(&pb->pb_list, &tmp); - spin_unlock(&pbd_delwrite_lock); + } + + /* ok found all the items that can be worked on + * drop the lock and process the private list */ + spin_unlock(&pbd_delwrite_lock); + + list_for_each(curr,&tmp) { + pb = list_entry(curr, page_buf_t, pb_list); + + if (flags & PBDF_WAIT) + pb->pb_flags &= ~PBF_ASYNC; - if ((flags & PBDF_TRYLOCK) == 0) { + if ((flags & PBDF_TRYLOCK) == 0) pagebuf_lock(pb); - } - + pb->pb_flags &= ~PBF_DELWRI; pb->pb_flags |= PBF_WRITE; + pagebuf_iostrategy(pb); - __pagebuf_iorequest(pb); if (++flush_cnt > 32) { - pagebuf_run_queues(NULL); + blk_run_queues(); flush_cnt = 0; } - - spin_lock(&pbd_delwrite_lock); } - spin_unlock(&pbd_delwrite_lock); - - pagebuf_run_queues(NULL); - - if (pinptr) - *pinptr = pincount; - - if ((flags & PBDF_WAIT) == 0) - return; + blk_run_queues(); + /* must run list the second time even if PBDF_WAIT isn't + * to reset all the pb_list pointers + */ while (!list_empty(&tmp)) { pb = list_entry(tmp.next, page_buf_t, pb_list); list_del_init(&pb->pb_list); - pagebuf_iowait(pb); - if (!pb->pb_relse) - pagebuf_unlock(pb); - pagebuf_rele(pb); + + if (flags & PBDF_WAIT) { + pagebuf_iowait(pb); + if (!pb->pb_relse) + pagebuf_unlock(pb); + pagebuf_rele(pb); + } } + + if (pinptr) + *pinptr = pincount; } STATIC int diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/pagebuf/page_buf.h linux-2.4.20-wolk4.9-fullkernel/fs/xfs/pagebuf/page_buf.h --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/pagebuf/page_buf.h 2003-08-25 18:25:05.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/pagebuf/page_buf.h 2003-08-25 20:25:58.000000000 +0200 @@ -127,6 +127,7 @@ typedef enum page_buf_flags_e { /* pb_f PBF_FORCEIO = (1 << 21), PBF_FLUSH = (1 << 22), /* flush disk write cache */ PBF_READ_AHEAD = (1 << 23), + PBF_RUN_QUEUES = (1 << 24), /* run block device task queue */ } page_buf_flags_t; @@ -240,10 +241,6 @@ typedef struct page_buf_s { } page_buf_t; -/* - * page_buf module entry points - */ - /* Finding and Reading Buffers */ extern page_buf_t *pagebuf_find( /* find buffer for block if */ @@ -276,12 +273,11 @@ extern page_buf_t *pagebuf_get_no_daddr( size_t len, struct pb_target *); /* mount point "fake" inode */ -extern int pagebuf_associate_memory( +extern int pagebuf_associate_memory( page_buf_t *, void *, size_t); - extern void pagebuf_hold( /* increment reference count */ page_buf_t *); /* buffer to hold */ @@ -291,7 +287,7 @@ extern void pagebuf_readahead( /* read size_t, /* length of range */ page_buf_flags_t); /* additional read flags */ -/* Writing and Releasing Buffers */ +/* Releasing Buffers */ extern void pagebuf_free( /* deallocate a buffer */ page_buf_t *); /* buffer to deallocate */ @@ -314,11 +310,7 @@ extern int pagebuf_lock( /* lock buffer extern void pagebuf_unlock( /* unlock buffer */ page_buf_t *); /* buffer to unlock */ -/* Buffer Utility Routines */ -static inline int pagebuf_geterror(page_buf_t *pb) -{ - return (pb ? pb->pb_error : ENOMEM); -} +/* Buffer Read and Write Routines */ extern void pagebuf_iodone( /* mark buffer I/O complete */ page_buf_t *, /* buffer to mark */ @@ -339,21 +331,9 @@ extern int pagebuf_iostart( /* start I/ extern int pagebuf_iorequest( /* start real I/O */ page_buf_t *); /* buffer to convey to device */ - /* - * pagebuf_iorequest is the core I/O request routine. - * It assumes that the buffer is well-formed and - * mapped and ready for physical I/O, unlike - * pagebuf_iostart() and pagebuf_iophysio(). Those - * routines call the inode pagebuf_ioinitiate routine to start I/O, - * if it is present, or else call pagebuf_iorequest() - * directly if the inode pagebuf_ioinitiate routine is not present. - */ - extern int pagebuf_iowait( /* wait for buffer I/O done */ page_buf_t *); /* buffer to wait on */ -extern caddr_t pagebuf_offset(page_buf_t *, size_t); - extern void pagebuf_iomove( /* move data in/out of pagebuf */ page_buf_t *, /* buffer to manipulate */ size_t, /* starting buffer offset */ @@ -361,6 +341,22 @@ extern void pagebuf_iomove( /* move dat caddr_t, /* data pointer */ page_buf_rw_t); /* direction */ +static inline int pagebuf_iostrategy(page_buf_t *pb) +{ + return pb->pb_strat ? pb->pb_strat(pb) : pagebuf_iorequest(pb); +} + +static inline int pagebuf_geterror(page_buf_t *pb) +{ + return pb ? pb->pb_error : ENOMEM; +} + +/* Buffer Utility Routines */ + +extern caddr_t pagebuf_offset( /* pointer at offset in buffer */ + page_buf_t *, /* buffer to offset into */ + size_t); /* offset */ + /* Pinning Buffer Storage in Memory */ extern void pagebuf_pin( /* pin buffer in memory */ @@ -369,33 +365,24 @@ extern void pagebuf_pin( /* pin buffer extern void pagebuf_unpin( /* unpin buffered data */ page_buf_t *); /* buffer to unpin */ -extern int pagebuf_ispin( page_buf_t *); /* check if pagebuf is pinned */ - -/* Reading and writing pages */ +extern int pagebuf_ispin( /* check if buffer is pinned */ + page_buf_t *); /* buffer to check */ -extern void pagebuf_delwri_dequeue(page_buf_t *); +/* Delayed Write Buffer Routines */ #define PBDF_WAIT 0x01 #define PBDF_TRYLOCK 0x02 extern void pagebuf_delwri_flush( - struct pb_target *, + pb_target_t *, unsigned long, int *); -extern int pagebuf_init(void); -extern void pagebuf_terminate(void); +extern void pagebuf_delwri_dequeue( + page_buf_t *); -static __inline__ int __pagebuf_iorequest(page_buf_t *pb) -{ - if (pb->pb_strat) - return pb->pb_strat(pb); - return pagebuf_iorequest(pb); -} +/* Buffer Daemon Setup Routines */ -static __inline__ void pagebuf_run_queues(page_buf_t *pb) -{ - if (!pb || atomic_read(&pb->pb_io_remaining)) - run_task_queue(&tq_disk); -} +extern int pagebuf_init(void); +extern void pagebuf_terminate(void); #endif /* __PAGE_BUF_H__ */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/pagebuf/page_buf_internal.h linux-2.4.20-wolk4.9-fullkernel/fs/xfs/pagebuf/page_buf_internal.h --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/pagebuf/page_buf_internal.h 2003-08-25 18:27:06.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/pagebuf/page_buf_internal.h 2003-08-25 20:25:58.000000000 +0200 @@ -47,6 +47,9 @@ #define page_buffers(page) ((page)->buffers) #define page_has_buffers(page) ((page)->buffers) #endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,67) +#define blk_run_queues() run_task_queue(&tq_disk) +#endif #ifdef PAGEBUF_LOCK_TRACKING #define PB_SET_OWNER(pb) (pb->pb_last_holder = current->pid) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/pagebuf/page_buf_locking.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/pagebuf/page_buf_locking.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/pagebuf/page_buf_locking.c 2003-08-25 18:25:05.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/pagebuf/page_buf_locking.c 2003-08-25 20:25:58.000000000 +0200 @@ -113,7 +113,8 @@ pagebuf_lock( ASSERT(pb->pb_flags & _PBF_LOCKABLE); PB_TRACE(pb, PB_TRACE_REC(lock), 0); - pagebuf_run_queues(pb); + if (atomic_read(&pb->pb_io_remaining)) + blk_run_queues(); down(&pb->pb_sema); PB_SET_OWNER(pb); PB_TRACE(pb, PB_TRACE_REC(locked), 0); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_attr.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_attr.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_attr.c 2003-08-25 18:27:06.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_attr.c 2003-08-25 20:25:58.000000000 +0200 @@ -1718,6 +1718,7 @@ xfs_attr_node_get(xfs_da_args_t *args) int i; state = xfs_da_state_alloc(); + state->holeok = 1; state->args = args; state->mp = args->dp->i_mount; state->blocksize = state->mp->m_sb.sb_blocksize; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_bmap.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_bmap.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_bmap.c 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_bmap.c 2003-08-25 20:25:58.000000000 +0200 @@ -2170,7 +2170,7 @@ xfs_bmap_alloc( xfs_extlen_t ralen=0; /* realtime allocation length */ #endif -#define ISLEGAL(x,y) \ +#define ISVALID(x,y) \ (rt ? \ (x) < mp->m_sb.sb_rblocks : \ XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ @@ -2249,7 +2249,7 @@ xfs_bmap_alloc( /* * If we're now overlapping the next or previous extent that * means we can't fit an extsz piece in this hole. Just move - * the start forward to the first legal spot and set + * the start forward to the first valid spot and set * the length so we hit the end. */ if ((ap->off != orig_off && ap->off < prevo) || @@ -2310,7 +2310,7 @@ xfs_bmap_alloc( ralen = ap->alen / mp->m_sb.sb_rextsize; /* * If the old value was close enough to MAXEXTLEN that - * we rounded up to it, cut it back so it's legal again. + * we rounded up to it, cut it back so it's valid again. * Note that if it's a really large request (bigger than * MAXEXTLEN), we don't hear about that number, and can't * adjust the starting point to match it. @@ -2343,7 +2343,7 @@ xfs_bmap_alloc( */ if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF && !ISNULLSTARTBLOCK(ap->prevp->br_startblock) && - ISLEGAL(ap->prevp->br_startblock + ap->prevp->br_blockcount, + ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount, ap->prevp->br_startblock)) { ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount; /* @@ -2352,7 +2352,7 @@ xfs_bmap_alloc( adjust = ap->off - (ap->prevp->br_startoff + ap->prevp->br_blockcount); if (adjust && - ISLEGAL(ap->rval + adjust, ap->prevp->br_startblock)) + ISVALID(ap->rval + adjust, ap->prevp->br_startblock)) ap->rval += adjust; } /* @@ -2374,7 +2374,7 @@ xfs_bmap_alloc( !ISNULLSTARTBLOCK(ap->prevp->br_startblock) && (prevbno = ap->prevp->br_startblock + ap->prevp->br_blockcount) && - ISLEGAL(prevbno, ap->prevp->br_startblock)) { + ISVALID(prevbno, ap->prevp->br_startblock)) { /* * Calculate gap to end of previous block. */ @@ -2386,11 +2386,11 @@ xfs_bmap_alloc( * end and the gap size. * Heuristic! * If the gap is large relative to the piece we're - * allocating, or using it gives us an illegal block + * allocating, or using it gives us an invalid block * number, then just use the end of the previous block. */ if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->alen && - ISLEGAL(prevbno + prevdiff, + ISVALID(prevbno + prevdiff, ap->prevp->br_startblock)) prevbno += adjust; else @@ -2425,14 +2425,14 @@ xfs_bmap_alloc( /* * Heuristic! * If the gap is large relative to the piece we're - * allocating, or using it gives us an illegal block + * allocating, or using it gives us an invalid block * number, then just use the start of the next block * offset by our length. */ if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->alen && - ISLEGAL(gotbno - gotdiff, gotbno)) + ISVALID(gotbno - gotdiff, gotbno)) gotbno -= adjust; - else if (ISLEGAL(gotbno - ap->alen, gotbno)) { + else if (ISVALID(gotbno - ap->alen, gotbno)) { gotbno -= ap->alen; gotdiff += adjust - ap->alen; } else @@ -2734,7 +2734,7 @@ xfs_bmap_alloc( } } return 0; -#undef ISLEGAL +#undef ISVALID } /* @@ -3353,7 +3353,7 @@ xfs_bmap_local_to_extents( /* * We don't want to deal with the case of keeping inode data inline yet. - * So sending the data fork of a regular inode is illegal. + * So sending the data fork of a regular inode is invalid. */ ASSERT(!((ip->i_d.di_mode & IFMT) == IFREG && whichfork == XFS_DATA_FORK)); @@ -3810,7 +3810,7 @@ xfs_bmap_add_attrfork( xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); switch (ip->i_d.di_format) { case XFS_DINODE_FMT_DEV: - ip->i_d.di_forkoff = roundup(sizeof(dev_t), 8) >> 3; + ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; break; case XFS_DINODE_FMT_UUID: ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_buf.h linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_buf.h --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_buf.h 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_buf.h 2003-08-25 20:25:58.000000000 +0200 @@ -215,21 +215,16 @@ extern inline xfs_caddr_t xfs_buf_offset static inline int xfs_bawrite(void *mp, page_buf_t *bp) { - int ret; - bp->pb_fspriv3 = mp; bp->pb_strat = xfs_bdstrat_cb; xfs_buf_undelay(bp); - if ((ret = pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC)) == 0) - pagebuf_run_queues(bp); - return ret; + return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | PBF_RUN_QUEUES); } static inline void xfs_buf_relse(page_buf_t *bp) { if ((bp->pb_flags & _PBF_LOCKABLE) && !bp->pb_relse) pagebuf_unlock(bp); - pagebuf_rele(bp); } @@ -263,23 +258,19 @@ static inline void xfs_buf_relse(page_bu static inline int XFS_bwrite(page_buf_t *pb) { - int sync = (pb->pb_flags & PBF_ASYNC) == 0; - int error; + int iowait = (pb->pb_flags & PBF_ASYNC) == 0; + int error = 0; pb->pb_flags |= PBF_SYNC; + if (!iowait) + pb->pb_flags |= PBF_RUN_QUEUES; xfs_buf_undelay(pb); - - __pagebuf_iorequest(pb); - - if (sync) { + pagebuf_iostrategy(pb); + if (iowait) { error = pagebuf_iowait(pb); xfs_buf_relse(pb); - } else { - pagebuf_run_queues(pb); - error = 0; } - return error; } @@ -323,4 +314,4 @@ static inline int xfs_bdwrite(void *mp, #define XFS_freerbuf(bp) pagebuf_free(bp) #define XFS_nfreerbuf(bp) pagebuf_free(bp) -#endif +#endif /* __XFS_BUF_H__ */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_buf_item.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_buf_item.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_buf_item.c 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_buf_item.c 2003-08-25 20:25:58.000000000 +0200 @@ -1007,7 +1007,7 @@ xfs_buf_iodone_callbacks( { xfs_log_item_t *lip; static ulong lasttime; - static dev_t lastdev; + static xfs_dev_t lastdev; xfs_mount_t *mp; ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_da_btree.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_da_btree.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_da_btree.c 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_da_btree.c 2003-08-25 20:25:58.000000000 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -1141,10 +1141,13 @@ xfs_da_node_lookup_int(xfs_da_state_t *s xfs_da_node_entry_t *btree; xfs_dablk_t blkno; int probe, span, max, error, retval; + xfs_daddr_t mappedbno; xfs_dahash_t hashval; xfs_da_args_t *args; args = state->args; + mappedbno = state->holeok ? -2 : -1; + /* * Descend thru the B-tree searching each level for the right * node to use, until the right hashval is found. @@ -1160,15 +1163,15 @@ xfs_da_node_lookup_int(xfs_da_state_t *s * Read the next node down in the tree. */ blk->blkno = blkno; - error = xfs_da_read_buf(state->args->trans, state->args->dp, - blkno, -1, &blk->bp, - state->args->whichfork); + error = xfs_da_read_buf(args->trans, args->dp, blkno, + mappedbno, &blk->bp, args->whichfork); + if (!error && unlikely(state->holeok && !blk->bp)) + error = XFS_ERROR(ENOATTR); /* always attr here */ if (error) { blk->blkno = 0; state->path.active--; return(error); } - ASSERT(blk->bp != NULL); curr = blk->bp->data; ASSERT(INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC || INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || @@ -1187,7 +1190,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *s */ max = INT_GET(node->hdr.count, ARCH_CONVERT); probe = span = max / 2; - hashval = state->args->hashval; + hashval = args->hashval; for (btree = &node->btree[probe]; span > 4; btree = &node->btree[probe]) { span /= 2; @@ -1250,22 +1253,22 @@ xfs_da_node_lookup_int(xfs_da_state_t *s for (;;) { if (blk->magic == XFS_DIR_LEAF_MAGIC) { ASSERT(XFS_DIR_IS_V1(state->mp)); - retval = xfs_dir_leaf_lookup_int(blk->bp, state->args, + retval = xfs_dir_leaf_lookup_int(blk->bp, args, &blk->index); } else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) { ASSERT(XFS_DIR_IS_V2(state->mp)); - retval = xfs_dir2_leafn_lookup_int(blk->bp, state->args, + retval = xfs_dir2_leafn_lookup_int(blk->bp, args, &blk->index, state); } #ifdef __KERNEL__ else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { - retval = xfs_attr_leaf_lookup_int(blk->bp, state->args); - blk->index = state->args->index; - state->args->blkno = blk->blkno; + retval = xfs_attr_leaf_lookup_int(blk->bp, args); + blk->index = args->index; + args->blkno = blk->blkno; } #endif if (((retval == ENOENT) || (retval == ENOATTR)) && - (blk->hashval == state->args->hashval)) { + (blk->hashval == args->hashval)) { error = xfs_da_path_shift(state, &state->path, 1, 1, &retval); if (error) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_da_btree.h linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_da_btree.h --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_da_btree.h 2003-08-25 18:25:06.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_da_btree.h 2003-08-25 20:25:58.000000000 +0200 @@ -185,14 +185,14 @@ typedef struct xfs_da_args { int index; /* index of attr of interest in blk */ xfs_dablk_t rmtblkno; /* remote attr value starting blkno */ int rmtblkcnt; /* remote attr value block count */ - int rename; /* T/F: this is an atomic rename op */ xfs_dablk_t blkno2; /* blkno of 2nd attr leaf of interest */ int index2; /* index of 2nd attr in blk */ xfs_dablk_t rmtblkno2; /* remote attr value starting blkno */ int rmtblkcnt2; /* remote attr value block count */ - int justcheck; /* check for ok with no space */ - int addname; /* T/F: this is an add operation */ - int oknoent; /* T/F: ok to return ENOENT, else die */ + int justcheck : 1; /* T/F: check for ok with no space */ + int rename : 1; /* T/F: this is an atomic rename op */ + int addname : 1; /* T/F: this is an add operation */ + int oknoent : 1; /* T/F: ok to return ENOENT, else die */ } xfs_da_args_t; /* @@ -253,6 +253,7 @@ typedef struct xfs_da_state { xfs_da_state_path_t path; /* search/split paths */ xfs_da_state_path_t altpath; /* alternate path for join */ unsigned int inleaf : 1; /* insert into 1->lf, 0->splf */ + unsigned int holeok : 1; /* T/F: can deal with a hole */ unsigned int extravalid : 1; /* T/F: extrablk is in use */ unsigned int extraafter : 1; /* T/F: extrablk is after new */ xfs_da_state_blk_t extrablk; /* for double-splits on leafs */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_error.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_error.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_error.c 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_error.c 2003-08-25 20:25:58.000000000 +0200 @@ -102,7 +102,7 @@ xfs_error_test(int error_tag, int *fsidp if (random() % randfactor) return 0; - memcpy(&fsid, fsidp, sizeof(fsid_t)); + memcpy(&fsid, fsidp, sizeof(xfs_fsid_t)); for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { if (xfs_etest[i] == error_tag && xfs_etest_fsid[i] == fsid) { @@ -123,7 +123,7 @@ xfs_errortag_add(int error_tag, xfs_moun int len; int64_t fsid; - memcpy(&fsid, mp->m_fixedfsid, sizeof(fsid_t)); + memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t)); for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) { @@ -156,7 +156,7 @@ xfs_errortag_clear(int error_tag, xfs_mo int i; int64_t fsid; - memcpy(&fsid, mp->m_fixedfsid, sizeof(fsid_t)); + memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t)); for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) { @@ -209,7 +209,7 @@ xfs_errortag_clearall(xfs_mount_t *mp) { int64_t fsid; - memcpy(&fsid, mp->m_fixedfsid, sizeof(fsid_t)); + memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t)); return xfs_errortag_clearall_umount(fsid, mp->m_fsname, 1); } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_fsops.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_fsops.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_fsops.c 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_fsops.c 2003-08-25 20:25:58.000000000 +0200 @@ -172,7 +172,7 @@ xfs_growfs_data_private( if (nb < mp->m_sb.sb_dblocks) return XFS_ERROR(EINVAL); } - new = in->newblocks - mp->m_sb.sb_dblocks; + new = nb - mp->m_sb.sb_dblocks; oagcount = mp->m_sb.sb_agcount; if (nagcount > oagcount) { down_write(&mp->m_peraglock); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_inode.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_inode.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_inode.c 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_inode.c 2003-08-25 20:25:58.000000000 +0200 @@ -1043,7 +1043,7 @@ xfs_iread_extents( size = XFS_IFORK_NEXTENTS(ip, whichfork) * (uint)sizeof(xfs_bmbt_rec_t); ifp = XFS_IFORK_PTR(ip, whichfork); /* - * We know that the size is legal (it's checked in iformat_btree) + * We know that the size is valid (it's checked in iformat_btree) */ ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP); ASSERT(ifp->if_u1.if_extents != NULL); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_log.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_log.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_log.c 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_log.c 2003-08-25 20:25:58.000000000 +0200 @@ -65,7 +65,7 @@ STATIC int xlog_bdstrat_cb(struct xfs_b STATIC int xlog_commit_record(xfs_mount_t *mp, xlog_ticket_t *ticket, xlog_in_core_t **, xfs_lsn_t *); STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, - dev_t log_dev, + xfs_dev_t log_dev, xfs_daddr_t blk_offset, int num_bblks); STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes); @@ -155,7 +155,7 @@ int xlog_error_mod = 33; */ #if defined(XLOG_NOLOG) || defined(DEBUG) int xlog_debug = 1; -dev_t xlog_devt = 0; +xfs_dev_t xlog_devt = 0; #endif #if defined(XFS_LOG_TRACE) @@ -777,7 +777,7 @@ xfs_log_move_tail(xfs_mount_t *mp, s = GRANT_LOCK(log); - /* Also an illegal lsn. 1 implies that we aren't passing in a legal + /* Also an invalid lsn. 1 implies that we aren't passing in a valid * tail_lsn. */ if (tail_lsn != 1) @@ -1160,7 +1160,7 @@ xlog_get_iclog_buffer_size(xfs_mount_t * log->l_iclog_bufs = 8; break; default: - xlog_panic("XFS: Illegal blocksize"); + xlog_panic("XFS: Invalid blocksize"); break; } } @@ -1175,7 +1175,7 @@ xlog_get_iclog_buffer_size(xfs_mount_t * */ STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, - dev_t log_dev, + xfs_dev_t log_dev, xfs_daddr_t blk_offset, int num_bblks) { @@ -3400,7 +3400,7 @@ xlog_verify_iclog(xlog_t *log, icptr = log->l_iclog; for (i=0; i < log->l_iclog_bufs; i++) { if (icptr == 0) - xlog_panic("xlog_verify_iclog: illegal ptr"); + xlog_panic("xlog_verify_iclog: invalid ptr"); icptr = icptr->ic_next; } if (icptr != log->l_iclog) @@ -3410,7 +3410,7 @@ xlog_verify_iclog(xlog_t *log, /* check log magic numbers */ ptr = (xfs_caddr_t) &(iclog->ic_header); if (INT_GET(*(uint *)ptr, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM) - xlog_panic("xlog_verify_iclog: illegal magic num"); + xlog_panic("xlog_verify_iclog: invalid magic num"); for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&(iclog->ic_header))+count; ptr += BBSIZE) { @@ -3443,7 +3443,7 @@ xlog_verify_iclog(xlog_t *log, } } if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) - cmn_err(CE_WARN, "xlog_verify_iclog: illegal clientid %d op 0x%p offset 0x%x", clientid, ophead, field_offset); + cmn_err(CE_WARN, "xlog_verify_iclog: invalid clientid %d op 0x%p offset 0x%x", clientid, ophead, field_offset); /* check length */ field_offset = (__psint_t) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_log_priv.h linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_log_priv.h --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_log_priv.h 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_log_priv.h 2003-08-25 20:25:58.000000000 +0200 @@ -54,7 +54,7 @@ struct xfs_mount; #define XLOG_MED_ICLOGS 4 #define XLOG_MAX_ICLOGS 8 #define XLOG_CALLBACK_SIZE 10 -#define XLOG_HEADER_MAGIC_NUM 0xFEEDbabe /* Illegal cycle number */ +#define XLOG_HEADER_MAGIC_NUM 0xFEEDbabe /* Invalid cycle number */ #define XLOG_VERSION_1 1 #define XLOG_VERSION_2 2 /* Large IClogs, Log sunit */ #define XLOG_VERSION_OKBITS (XLOG_VERSION_1 | XLOG_VERSION_2) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_log_recover.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_log_recover.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_log_recover.c 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_log_recover.c 2003-08-25 20:25:58.000000000 +0200 @@ -449,7 +449,7 @@ xlog_find_verify_log_record( for (i = (*last_blk) - 1; i >= 0; i--) { if (i < start_blk) { - /* legal log record not found */ + /* valid log record not found */ xlog_warn( "XFS: Log inconsistent (didn't find previous header)"); ASSERT(0); @@ -582,7 +582,7 @@ xlog_find_head( * then the entire log is stamped with the same cycle number. In this * case, head_blk can't be set to zero (which makes sense). The below * math doesn't work out properly with head_blk equal to zero. Instead, - * we set it to log_bbnum which is an illegal block number, but this + * we set it to log_bbnum which is an invalid block number, but this * value makes the math correct. If head_blk doesn't changed through * all the tests below, *head_blk is set to zero at the very end rather * than log_bbnum. In a sense, log_bbnum and zero are the same block @@ -2482,7 +2482,7 @@ xlog_recover_do_inode_trans( break; default: - xlog_warn("XFS: xlog_recover_do_inode_trans: Illegal flag"); + xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag"); ASSERT(0); xfs_buf_relse(bp); return XFS_ERROR(EIO); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_mount.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_mount.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_mount.c 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_mount.c 2003-08-25 20:25:58.000000000 +0200 @@ -892,7 +892,7 @@ xfs_mountfs( * File systems that don't support user level file handles (i.e. * all of them except for XFS) will leave vfs_altfsid as NULL. */ - vfsp->vfs_altfsid = (fsid_t *)mp->m_fixedfsid; + vfsp->vfs_altfsid = (xfs_fsid_t *)mp->m_fixedfsid; mp->m_dmevmask = 0; /* not persistent; set after each mount */ /* diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_trans.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_trans.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfs_trans.c 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfs_trans.c 2003-08-25 20:25:58.000000000 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -672,7 +672,7 @@ xfs_trans_unreserve_and_mod_sb( * be inconsistent. In such cases, this returns an error, and the * caller may assume that all locked objects joined to the transaction * have already been unlocked as if the commit had succeeded. - * It's illegal to reference the transaction structure after this call. + * Do not reference the transaction structure after this call. */ /*ARGSUSED*/ int diff -Naurp linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfsidbg.c linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfsidbg.c --- linux-2.4.20-wolk4.8-fullkernel/fs/xfs/xfsidbg.c 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/fs/xfs/xfsidbg.c 2003-08-25 20:25:58.000000000 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -4167,7 +4167,7 @@ xfsidbg_xiclog(xlog_in_core_t *iclog) if (iclog->ic_state & XLOG_STATE_ALL) printflags(iclog->ic_state, ic_flags, "state:"); else - kdb_printf("state: ILLEGAL 0x%x", iclog->ic_state); + kdb_printf("state: INVALID 0x%x", iclog->ic_state); kdb_printf("\n"); } /* xfsidbg_xiclog */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/asm-i386/hardirq.h linux-2.4.20-wolk4.9-fullkernel/include/asm-i386/hardirq.h --- linux-2.4.20-wolk4.8-fullkernel/include/asm-i386/hardirq.h 2003-08-25 18:25:09.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/asm-i386/hardirq.h 2003-08-25 20:35:57.000000000 +0200 @@ -64,10 +64,15 @@ static inline int irqs_running (void) return 0; } +extern spinlock_t global_bh_lock; /* copied from linux/interrupt.h to break + include loop :-( */ + static inline void release_irqlock(int cpu) { /* if we didn't own the irq lock, just ignore.. */ if (global_irq_holder == (unsigned char) cpu) { + if (!local_bh_count(cpu)) + spin_unlock(&global_bh_lock); global_irq_holder = NO_PROC_ID; clear_bit(0,&global_irq_lock); } @@ -77,6 +82,8 @@ static inline void irq_enter(int cpu, in { ++local_irq_count(cpu); + smp_mb(); /* sync with wait_on_irq() and synchronize_irq() */ + while (test_bit(0,&global_irq_lock)) { cpu_relax(); } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/asm-i386/page.h linux-2.4.20-wolk4.9-fullkernel/include/asm-i386/page.h --- linux-2.4.20-wolk4.8-fullkernel/include/asm-i386/page.h 2003-08-25 18:26:33.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/asm-i386/page.h 2003-08-27 16:40:42.000000000 +0200 @@ -84,7 +84,11 @@ typedef struct { unsigned long pgprot; } * This much address space is reserved for vmalloc() and iomap() * as well as fixmap mappings. */ -#define __VMALLOC_RESERVE (192 << 20) +#define __VMALLOC_RESERVE_MIN (32 << 20) +#define __VMALLOC_RESERVE_DEFAULT (128 << 20) +#define __VMALLOC_RESERVE_MAX (480 << 20) +#define __RESERVED_AREA (10 << 20) + #ifndef __ASSEMBLY__ @@ -133,9 +137,11 @@ static __inline__ int get_order(unsigned #endif /* __ASSEMBLY__ */ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) -#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) -#define __MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) -#define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE)) +#define KERNEL_MEMORY ((unsigned long)(FIXADDR_START - __PAGE_OFFSET)) +#define RESERVED_AREA ((unsigned long)__RESERVED_AREA) +#define KERNEL_MAXMEM ((unsigned long)(KERNEL_MEMORY - RESERVED_AREA)) +#define __MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE_MAX) +#define MAXMEM ((unsigned long)(-PAGE_OFFSET-vmalloc_reserve)) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/linux/binfmts.h linux-2.4.20-wolk4.9-fullkernel/include/linux/binfmts.h --- linux-2.4.20-wolk4.8-fullkernel/include/linux/binfmts.h 2003-08-25 18:26:33.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/linux/binfmts.h 2003-08-25 20:35:58.000000000 +0200 @@ -43,6 +43,7 @@ struct linux_binfmt { int (*load_shlib)(struct file *); int (*core_dump)(long signr, struct pt_regs * regs, struct file * file); unsigned long min_coredump; /* minimal dump size */ + char *name; }; extern int register_binfmt(struct linux_binfmt *); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/linux/dazuko.h linux-2.4.20-wolk4.9-fullkernel/include/linux/dazuko.h --- linux-2.4.20-wolk4.8-fullkernel/include/linux/dazuko.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/include/linux/dazuko.h 2003-08-25 20:35:57.000000000 +0200 @@ -0,0 +1,29 @@ +/* Dazuko. Allow file access control for 3rd-party applications. + Copyright (C) 2002,2003 H+BEDV Datentechnik GmbH + Written by Martin Ritter + John Ogness + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version 2 + of the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +*/ + +#ifndef DAZUKO_H +#define DAZUKO_H + +#include + +#define VERSION "1.2.1" +#define DEVICE_NAME "dazuko" + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/linux/dazukoio.h linux-2.4.20-wolk4.9-fullkernel/include/linux/dazukoio.h --- linux-2.4.20-wolk4.8-fullkernel/include/linux/dazukoio.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/include/linux/dazukoio.h 2003-08-25 20:35:57.000000000 +0200 @@ -0,0 +1,85 @@ +/* Dazuko Interface. Interace with Dazuko for file access control. + Written by John Ogness + +Copyright (C) 2002-2003, H+BEDV Datentechnik GmbH +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. Neither the name of Dazuko nor the names of its contributors may be used +to endorse or promote products derived from this software without specific +prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef DAZUKOIO_H +#define DAZUKOIO_H + +#define DAZUKO_FILENAME_MAX_LENGTH 4095 + +/* ioctl values */ +#define IOCTL_SET_OPTION 0 +#define IOCTL_GET_AN_ACCESS 1 +#define IOCTL_RETURN_ACCESS 2 + +/* event types */ +#define ON_OPEN 1 +#define ON_CLOSE 2 +#define ON_EXEC 4 +#define ON_CLOSE_MODIFIED 8 + +/* various set option commands */ +#define SET_ACCESS_MASK 0 +#define ADD_INCLUDE_PATH 1 +#define ADD_EXCLUDE_PATH 2 +#define REGISTER 3 +#define REMOVE_ALL_PATHS 4 + +struct access_t +{ + int deny; /* set to deny file access */ + int event; /* ON_OPEN, etc */ + int o_flags; /* access flags */ + int o_mode; /* access mode */ + int uid; /* user id */ + int pid; /* user process id */ + char filename[DAZUKO_FILENAME_MAX_LENGTH + 1]; /* accessed file */ +}; + +struct option_t +{ + int command; + int buffer_length; + char buffer[DAZUKO_FILENAME_MAX_LENGTH + 1]; +}; + +int dazukoRegister(const char *groupName); +int dazukoSetAccessMask(unsigned long accessMask); +int dazukoAddIncludePath(const char *path); +int dazukoAddExcludePath(const char *path); +int dazukoRemoveAllPaths(void); +int dazukoGetAccess(struct access_t *acc); +int dazukoReturnAccess(struct access_t *acc); +int dazukoUnregister(void); + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/linux/fs.h linux-2.4.20-wolk4.9-fullkernel/include/linux/fs.h --- linux-2.4.20-wolk4.8-fullkernel/include/linux/fs.h 2003-08-25 18:27:07.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/linux/fs.h 2003-08-25 20:36:20.000000000 +0200 @@ -1294,6 +1294,7 @@ extern void end_buffer_io_async(struct b /* reiserfs_writepage needs this */ extern void set_buffer_async_io(struct buffer_head *bh) ; +extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate); #define BUF_CLEAN 0 #define BUF_LOCKED 1 /* Buffers scheduled for write */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/linux/hfs_fs.h linux-2.4.20-wolk4.9-fullkernel/include/linux/hfs_fs.h --- linux-2.4.20-wolk4.8-fullkernel/include/linux/hfs_fs.h 2001-02-13 23:13:44.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/include/linux/hfs_fs.h 2003-08-25 20:35:28.000000000 +0200 @@ -302,6 +302,7 @@ extern void hfs_sngl_ifill(struct inode /* super.c */ extern struct super_block *hfs_read_super(struct super_block *,void *,int); +extern int hfs_remount(struct super_block *, int *, char *); /* trans.c */ extern void hfs_colon2mac(struct hfs_name *, const char *, int); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/linux/interrupt.h linux-2.4.20-wolk4.9-fullkernel/include/linux/interrupt.h --- linux-2.4.20-wolk4.8-fullkernel/include/linux/interrupt.h 2001-11-22 20:46:19.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/include/linux/interrupt.h 2003-08-26 17:59:09.000000000 +0200 @@ -11,6 +11,12 @@ #include #include +/* For 2.6.x compatibility */ +typedef void irqreturn_t; +#define IRQ_NONE +#define IRQ_HANDLED +#define IRQ_RETVAL(x) + struct irqaction { void (*handler)(int, void *, struct pt_regs *); unsigned long flags; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/linux/mm.h linux-2.4.20-wolk4.9-fullkernel/include/linux/mm.h --- linux-2.4.20-wolk4.8-fullkernel/include/linux/mm.h 2003-08-25 18:27:08.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/linux/mm.h 2003-08-25 20:36:21.000000000 +0200 @@ -371,23 +371,26 @@ typedef struct page { /* note: don't make page flags of values 24 or higher! */ /* Make it prettier to test the above... */ -#define UnlockPage(page) unlock_page(page) -#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags) -#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) -#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) -#define PageDirty(page) test_bit(PG_dirty, &(page)->flags) -#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags) -#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags) -#define PageLocked(page) test_bit(PG_locked, &(page)->flags) -#define LockPage(page) set_bit(PG_locked, &(page)->flags) -#define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags) -#define PageChecked(page) test_bit(PG_checked, &(page)->flags) -#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags) -#define BigPage(page) test_bit(PG_bigpage, &(page)->flags) - -#define PageLaunder(page) test_bit(PG_launder, &(page)->flags) -#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags) -#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags) +#define UnlockPage(page) unlock_page(page) +#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags) +#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) +#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) +#define PageDirty(page) test_bit(PG_dirty, &(page)->flags) +#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags) +#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags) +#define PageLocked(page) test_bit(PG_locked, &(page)->flags) +#define LockPage(page) set_bit(PG_locked, &(page)->flags) +#define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags) +#define PageChecked(page) test_bit(PG_checked, &(page)->flags) +#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags) +#define BigPage(page) test_bit(PG_bigpage, &(page)->flags) +#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags) +#define PageLaunder(page) test_bit(PG_launder, &(page)->flags) +#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags) +#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags) +#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags) +#define ClearPageError(page) clear_bit(PG_error, &(page)->flags) +#define ClearPageArch1(page) clear_bit(PG_arch_1, &(page)->flags) /* * inlines for acquisition and release of PG_chainlock diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/linux/netfilter_ipv4/ip_conntrack.h linux-2.4.20-wolk4.9-fullkernel/include/linux/netfilter_ipv4/ip_conntrack.h --- linux-2.4.20-wolk4.8-fullkernel/include/linux/netfilter_ipv4/ip_conntrack.h 2003-08-25 18:26:53.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/linux/netfilter_ipv4/ip_conntrack.h 2003-08-25 23:52:45.000000000 +0200 @@ -190,6 +190,10 @@ struct ip_conntrack packet has to the conntrack */ struct nf_ct_info infos[IP_CT_NUMBER]; +#if defined(CONFIG_NET_CLS_LAYER7) || defined(CONFIG_NET_CLS_LAYER7_MODULE) + struct timespec timestamp; +#endif + /* Storage reserved for other modules: */ union ip_conntrack_proto proto; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/linux/pci_ids.h linux-2.4.20-wolk4.9-fullkernel/include/linux/pci_ids.h --- linux-2.4.20-wolk4.8-fullkernel/include/linux/pci_ids.h 2003-08-25 18:26:48.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/linux/pci_ids.h 2003-08-26 17:59:09.000000000 +0200 @@ -1629,6 +1629,9 @@ #define PCI_DEVICE_ID_TIGON3_5703 0x1647 #define PCI_DEVICE_ID_TIGON3_5704 0x1648 #define PCI_DEVICE_ID_TIGON3_5702FE 0x164d +#define PCI_DEVICE_ID_TIGON3_5705 0x1653 +#define PCI_DEVICE_ID_TIGON3_5705M 0x165d +#define PCI_DEVICE_ID_TIGON3_5782 0x1696 #define PCI_DEVICE_ID_TIGON3_5702X 0x16a6 #define PCI_DEVICE_ID_TIGON3_5703X 0x16a7 #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/linux/pkt_cls.h linux-2.4.20-wolk4.9-fullkernel/include/linux/pkt_cls.h --- linux-2.4.20-wolk4.8-fullkernel/include/linux/pkt_cls.h 2000-01-08 01:57:13.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/include/linux/pkt_cls.h 2003-08-25 23:52:45.000000000 +0200 @@ -158,4 +158,20 @@ enum #define TCA_TCINDEX_MAX TCA_TCINDEX_POLICE +/* Added by Justin */ +/* Layer 7 filter */ +enum +{ + TCA_LAYER7_UNSPEC, + TCA_LAYER7_HASH, + TCA_LAYER7_MASK, + TCA_LAYER7_SHIFT, + TCA_LAYER7_PROTOCOL, + TCA_LAYER7_CLASSID, + TCA_LAYER7_POLICE, +}; + +#define TCA_LAYER7_MAX TCA_LAYER7_POLICE + #endif + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/linux/sched.h linux-2.4.20-wolk4.9-fullkernel/include/linux/sched.h --- linux-2.4.20-wolk4.8-fullkernel/include/linux/sched.h 2003-08-25 18:26:33.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/linux/sched.h 2003-08-25 20:36:21.000000000 +0200 @@ -149,6 +149,7 @@ extern void update_one_process(task_t *p extern void scheduler_tick(int user_tick, int system); extern void migration_init(void); extern unsigned long cache_decay_ticks; +extern int set_user(uid_t new_ruid, int dumpclear); #define MAX_SCHEDULE_TIMEOUT LONG_MAX extern signed long FASTCALL(schedule_timeout(signed long timeout)); @@ -946,6 +947,7 @@ extern void mm_release(void); extern struct file ** alloc_fd_array(int); extern int expand_fd_array(struct files_struct *, int nr); extern void free_fd_array(struct file **, int); +extern int unshare_files(void); extern fd_set *alloc_fdset(int); extern int expand_fdset(struct files_struct *, int nr); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/linux/seq_file.h linux-2.4.20-wolk4.9-fullkernel/include/linux/seq_file.h --- linux-2.4.20-wolk4.8-fullkernel/include/linux/seq_file.h 2003-08-25 18:25:19.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/linux/seq_file.h 2003-08-25 20:35:13.000000000 +0200 @@ -8,6 +8,8 @@ struct seq_operations; struct file; +struct vfsmount; +struct dentry; struct inode; struct seq_file { @@ -58,7 +60,10 @@ static inline int seq_puts(struct seq_fi int seq_printf(struct seq_file *, const char *, ...) __attribute__ ((format (printf,2,3))); +int seq_path(struct seq_file *, struct vfsmount *, struct dentry *, char *); + int single_open(struct file *, int (*)(struct seq_file *, void *), void *); int single_release(struct inode *, struct file *); +int seq_release_private(struct inode *, struct file *); #endif #endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/linux/sysctl.h linux-2.4.20-wolk4.9-fullkernel/include/linux/sysctl.h --- linux-2.4.20-wolk4.8-fullkernel/include/linux/sysctl.h 2003-08-25 18:27:08.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/linux/sysctl.h 2003-08-25 23:42:07.000000000 +0200 @@ -132,8 +132,7 @@ enum KERN_CORE_NAME_FORMAT=58, /* string: core file name format string */ KERN_PREEMPTWARN=59, /* int: enable warnings about preempted processes */ KERN_ALLOW_SETID_CORE=60, /* int: dump core from setid processes */ - KERN_MAGICKEY=61, /* int: key code to send to magic sysrq handler */ - KERN_RENICEGID=62, /* int: GID of renicing own processes */ + KERN_RENICEGID=61, /* int: GID of renicing own processes */ KERN_GRSECURITY=68, /* grsecurity */ }; @@ -333,9 +332,11 @@ enum NET_TCP_STACK_ACK=97, NET_TCP_FRTO=98, NET_TCP_LOW_LATENCY=99, - NET_IP_PORT_ACL_GID=100, - NET_IP_SOCK_RAW_GID=101, - NET_IP_SOCK_PACKET_GID=102 + NET_IPV4_IPFRAG_SECRET_INTERVAL=100, + NET_IPV4_SECURE_PMTU=101, + NET_IP_PORT_ACL_GID=102, + NET_IP_SOCK_RAW_GID=103, + NET_IP_SOCK_PACKET_GID=104 }; enum { @@ -797,8 +798,6 @@ extern int proc_doulongvec_minmax(ctl_ta void *, size_t *); extern int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int, struct file *, void *, size_t *); -extern int proc_do_magic_sysrq(ctl_table *, int, struct file *, - void *, size_t *); extern int do_sysctl (int *name, int nlen, void *oldval, size_t *oldlenp, diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/net/pkt_sched.h linux-2.4.20-wolk4.9-fullkernel/include/net/pkt_sched.h --- linux-2.4.20-wolk4.8-fullkernel/include/net/pkt_sched.h 2003-08-25 18:26:57.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/net/pkt_sched.h 2003-08-25 20:35:48.000000000 +0200 @@ -5,7 +5,7 @@ #define PSCHED_JIFFIES 2 #define PSCHED_CPU 3 -#if defined (CONFIG_M386) || defined (CONFIG_M486) || defined (CONFIG_PPC32) +#if defined (CONFIG_M386) || defined (CONFIG_M486) || defined (CONFIG_M586) || defined (CONFIG_PPC32) #define PSCHED_CLOCK_SOURCE PSCHED_JIFFIES #else #define PSCHED_CLOCK_SOURCE PSCHED_CPU diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/net/snmp.h linux-2.4.20-wolk4.9-fullkernel/include/net/snmp.h --- linux-2.4.20-wolk4.8-fullkernel/include/net/snmp.h 2001-11-22 20:47:11.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/include/net/snmp.h 2003-08-25 20:35:30.000000000 +0200 @@ -256,6 +256,7 @@ struct linux_mib unsigned long TCPAbortOnLinger; unsigned long TCPAbortFailed; unsigned long TCPMemoryPressures; + unsigned long TCPShortIcmp; unsigned long __pad[0]; } ____cacheline_aligned; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/net/transp_v6.h linux-2.4.20-wolk4.9-fullkernel/include/net/transp_v6.h --- linux-2.4.20-wolk4.8-fullkernel/include/net/transp_v6.h 2001-11-22 20:47:58.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/include/net/transp_v6.h 2003-08-25 20:35:30.000000000 +0200 @@ -15,6 +15,8 @@ extern struct proto tcpv6_prot; struct flowi; +extern void ipv6_frag_init(void); + extern void rawv6_init(void); extern void udpv6_init(void); extern void tcpv6_init(void); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/aci.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/aci.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/aci.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/aci.h 2003-08-25 20:33:03.000000000 +0200 @@ -26,7 +26,11 @@ /* disk. After this call, all ACI is kept in memory for performance reasons,*/ /* but user and file/dir object ACI are written to disk on every change. */ +#ifdef CONFIG_RSBAC_INIT_DELAY +extern int rsbac_init(kdev_t root_dev); +#else extern int rsbac_init(kdev_t root_dev) __init; +#endif /* To turn RSBAC off on umount of root device */ extern void rsbac_off(void); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/aci_data_structures.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/aci_data_structures.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/aci_data_structures.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/aci_data_structures.h 2003-08-25 20:33:03.000000000 +0200 @@ -1,8 +1,8 @@ /**************************************/ /* Rule Set Based Access Control */ -/* Author and (c) 1999-2002: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* Data structures */ -/* Last modified: 18/Jun/2002 */ +/* Last modified: 06/Jun/2003 */ /**************************************/ #ifndef __RSBAC_DATA_STRUC_H @@ -54,6 +54,7 @@ #define RSBAC_RC_FD_NAME "fd_rc." #define RSBAC_AUTH_FD_NAME "fd_auth." #define RSBAC_CAP_FD_NAME "fd_cap." +#define RSBAC_RES_FD_NAME "fd_res." #define RSBAC_ACI_USER_NAME "useraci" /* dir creation mode for discretionary access control: no rights*/ @@ -83,6 +84,7 @@ #define RSBAC_RC_NR_FD_LISTS 16 #define RSBAC_AUTH_NR_FD_LISTS 2 #define RSBAC_CAP_NR_FD_LISTS 4 +#define RSBAC_RES_NR_FD_LISTS 4 #ifdef CONFIG_RSBAC_INIT_THREAD /* Check and set init timeout */ @@ -111,7 +113,7 @@ /* Caution: whenever ACI changes, version and old_version should be increased! */ -#define RSBAC_GEN_FD_ACI_VERSION 3 +#define RSBAC_GEN_FD_ACI_VERSION 4 #define RSBAC_GEN_FD_ACI_KEY 1001 struct rsbac_gen_fd_aci_t { @@ -119,6 +121,7 @@ struct rsbac_gen_fd_aci_t rsbac_log_array_t log_array_high; /* high and low bits */ rsbac_request_vector_t log_program_based; /* Program based logging */ rsbac_enum_t symlink_add_uid; + rsbac_enum_t symlink_add_mac_level; rsbac_enum_t symlink_add_rc_role; rsbac_enum_t linux_dac_disable; }; @@ -128,6 +131,7 @@ struct rsbac_gen_fd_aci_t -1, /* log_array_high (log request based) */ \ 0, /* log_program_based (don't log specially) */ \ FALSE, /* symlink_add_uid (don't add uid) */ \ + FALSE, /* symlink_add_mac_level (don't add MAC security level) */ \ FALSE, /* symlink_add_rc_role (don't add RC role) */ \ LDD_inherit /* linux_dac_disable (inherit from parent) */ \ } @@ -138,36 +142,79 @@ struct rsbac_gen_fd_aci_t -1, /* log_array_high (log request based) */ \ 0, /* log_program_based (don't log specially) */ \ FALSE, /* symlink_add_uid (don't add uid) */ \ + FALSE, /* symlink_add_mac_level (don't add MAC security level) */ \ FALSE, /* symlink_add_rc_role (don't add RC role) */ \ LDD_false /* linux_dac_disable (do not disable) */ \ } -#define RSBAC_GEN_FD_OLD_ACI_VERSION 2 +#define RSBAC_GEN_FD_OLD_ACI_VERSION 3 struct rsbac_gen_fd_old_aci_t { rsbac_log_array_t log_array_low; /* file/dir based logging, */ rsbac_log_array_t log_array_high; /* high and low bits */ rsbac_request_vector_t log_program_based; /* Program based logging */ rsbac_enum_t symlink_add_uid; + rsbac_enum_t symlink_add_rc_role; + rsbac_enum_t linux_dac_disable; }; -#define RSBAC_GEN_FD_OLD_OLD_ACI_VERSION 1 +#define RSBAC_GEN_FD_OLD_OLD_ACI_VERSION 2 struct rsbac_gen_fd_old_old_aci_t { rsbac_log_array_t log_array_low; /* file/dir based logging, */ rsbac_log_array_t log_array_high; /* high and low bits */ rsbac_request_vector_t log_program_based; /* Program based logging */ + rsbac_enum_t symlink_add_uid; + }; + +#define RSBAC_GEN_FD_OLD_OLD_OLD_ACI_VERSION 1 +struct rsbac_gen_fd_old_old_old_aci_t + { + rsbac_log_array_t log_array_low; /* file/dir based logging, */ + rsbac_log_array_t log_array_high; /* high and low bits */ + rsbac_request_vector_t log_program_based; /* Program based logging */ }; #if defined(CONFIG_RSBAC_MAC) -#define RSBAC_MAC_FD_ACI_VERSION 1 +#define RSBAC_MAC_FD_ACI_VERSION 4 #define RSBAC_MAC_FD_ACI_KEY 1001 struct rsbac_mac_fd_aci_t { rsbac_security_level_t sec_level; /* MAC */ rsbac_uid_t mac_trusted_for_user; /* MAC (for FILE only) */ rsbac_mac_category_vector_t mac_categories; /* MAC category set */ + rsbac_mac_auto_int_t mac_auto; /* auto-adjust current level */ + rsbac_boolean_int_t mac_prop_trusted; /* Keep trusted flag when executing this file */ + rsbac_mac_file_flags_t mac_file_flags; /* allow write_up, read_up etc. to it */ + }; + +#define RSBAC_MAC_FD_OLD_ACI_VERSION 3 +struct rsbac_mac_fd_old_aci_t + { + rsbac_security_level_t sec_level; /* MAC */ + rsbac_uid_t mac_trusted_for_user; /* MAC (for FILE only) */ + rsbac_mac_category_vector_t mac_categories; /* MAC category set */ + rsbac_mac_auto_int_t mac_auto; /* auto-adjust current level */ + rsbac_boolean_int_t mac_prop_trusted; /* Keep trusted flag when executing this file */ + rsbac_boolean_int_t mac_shared; /* Shared dir, i.e., allow write_up to it */ + }; + +#define RSBAC_MAC_FD_OLD_OLD_ACI_VERSION 2 +struct rsbac_mac_fd_old_old_aci_t + { + rsbac_security_level_t sec_level; /* MAC */ + rsbac_uid_t mac_trusted_for_user; /* MAC (for FILE only) */ + rsbac_mac_category_vector_t mac_categories; /* MAC category set */ + rsbac_mac_auto_int_t mac_auto; /* auto-adjust current level */ + }; + +#define RSBAC_MAC_FD_OLD_OLD_OLD_ACI_VERSION 1 +struct rsbac_mac_fd_old_old_old_aci_t + { + rsbac_security_level_t sec_level; /* MAC */ + rsbac_uid_t mac_trusted_for_user; /* MAC (for FILE only) */ + rsbac_mac_category_vector_t mac_categories; /* MAC category set */ }; #define DEFAULT_MAC_FD_ACI_INH \ @@ -175,12 +222,18 @@ struct rsbac_mac_fd_aci_t SL_inherit, /* security_level (MAC) */ \ RSBAC_NO_USER, /* mac_trusted_for_user (MAC) */ \ RSBAC_MAC_INHERIT_CAT_VECTOR, /* mac_categories (MAC) */ \ + MA_inherit, /* mac_auto (MAC) */ \ + FALSE, /* prop_trusted */ \ + FALSE /* shared */ \ } #define DEFAULT_MAC_FD_ACI_NO_INH \ { \ - SL_unclassified, /* security_level (MAC) */ \ - RSBAC_NO_USER, /* mac_trusted_for_user (MAC) */ \ + SL_unclassified, /* security_level (MAC) */ \ + RSBAC_NO_USER, /* mac_trusted_for_user (MAC) */ \ RSBAC_MAC_DEF_CAT_VECTOR, /* mac_categories (MAC) */ \ + MA_yes, /* mac_auto (MAC) */ \ + FALSE, /* prop_trusted */ \ + FALSE /* shared */ \ } #ifdef CONFIG_RSBAC_MAC_DEF_INHERIT @@ -194,6 +247,9 @@ struct rsbac_mac_fd_aci_t SL_unclassified, /* security_level (MAC) */ \ RSBAC_NO_USER, /* mac_trusted_for_user (MAC) */ \ RSBAC_MAC_DEF_CAT_VECTOR, /* mac_categories (MAC) */ \ + MA_yes, /* mac_auto (MAC) */ \ + FALSE, /* prop_trusted */ \ + FALSE /* shared */ \ } #endif @@ -226,7 +282,8 @@ struct rsbac_pm_fd_aci_t #endif #if defined(CONFIG_RSBAC_MS) -#define RSBAC_MS_FD_ACI_VERSION 2 +#define RSBAC_MS_FD_ACI_VERSION 3 +#define RSBAC_MS_FD_OLD_ACI_VERSION 2 #define RSBAC_MS_FD_ACI_KEY 1001 #define RSBAC_MS_SCANNED_FD_ACI_VERSION 2 struct rsbac_ms_fd_aci_t @@ -234,6 +291,14 @@ struct rsbac_ms_fd_aci_t __u8 ms_trusted; /* MS (for FILE only) (boolean) */ rsbac_ms_sock_trusted_int_t ms_sock_trusted_tcp; /* MS (for FILE only) (enum rsbac_ms_sock_trusted_t) */ rsbac_ms_sock_trusted_int_t ms_sock_trusted_udp; /* MS (for FILE only) (enum rsbac_ms_sock_trusted_t) */ + rsbac_ms_need_scan_t ms_need_scan; /* MS */ + }; + +struct rsbac_ms_fd_old_aci_t + { + __u8 ms_trusted; /* MS (for FILE only) (boolean) */ + rsbac_ms_sock_trusted_int_t ms_sock_trusted_tcp; /* MS (for FILE only) (enum rsbac_ms_sock_trusted_t) */ + rsbac_ms_sock_trusted_int_t ms_sock_trusted_udp; /* MS (for FILE only) (enum rsbac_ms_sock_trusted_t) */ }; #define DEFAULT_MS_FD_ACI \ @@ -241,6 +306,15 @@ struct rsbac_ms_fd_aci_t FALSE, /* ms_trusted (MS) */ \ MS_not_trusted, /* ms_sock_trusted_tcp (MS) */ \ MS_not_trusted, /* ms_sock_trusted_udp (MS) */ \ + DEFAULT_MS_FD_NEED_SCAN /* ms_need_scan (MS) */ \ + } + +#define DEFAULT_MS_ROOT_DIR_ACI \ + { \ + FALSE, /* ms_trusted (MS) */ \ + MS_not_trusted, /* ms_sock_trusted_tcp (MS) */ \ + MS_not_trusted, /* ms_sock_trusted_udp (MS) */ \ + DEFAULT_MS_ROOT_DIR_NEED_SCAN /* ms_need_scan (MS) */ \ } #endif @@ -305,11 +379,53 @@ struct rsbac_cap_fd_aci_t } #endif -#define RSBAC_FD_NR_ATTRIBUTES 26 +#if defined(CONFIG_RSBAC_RES) +#define RSBAC_RES_FD_ACI_VERSION 1 +#define RSBAC_RES_FD_ACI_KEY 1002 +struct rsbac_res_fd_aci_t + { + rsbac_res_array_t res_min; + rsbac_res_array_t res_max; + }; +#define DEFAULT_RES_FD_ACI \ + { \ + { \ + RSBAC_RES_UNSET, /* cpu time */ \ + RSBAC_RES_UNSET, /* file size */ \ + RSBAC_RES_UNSET, /* process data segment size */ \ + RSBAC_RES_UNSET, /* stack size */ \ + RSBAC_RES_UNSET, /* core dump size */ \ + RSBAC_RES_UNSET, /* resident memory set size */ \ + RSBAC_RES_UNSET, /* number of processes for this user */ \ + RSBAC_RES_UNSET, /* number of files */ \ + RSBAC_RES_UNSET, /* locked-in-memory address space */ \ + RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \ + RSBAC_RES_UNSET /* maximum file locks */ \ + }, \ + { \ + RSBAC_RES_UNSET, /* cpu time */ \ + RSBAC_RES_UNSET, /* file size */ \ + RSBAC_RES_UNSET, /* process data segment size */ \ + RSBAC_RES_UNSET, /* stack size */ \ + RSBAC_RES_UNSET, /* core dump size */ \ + RSBAC_RES_UNSET, /* resident memory set size */ \ + RSBAC_RES_UNSET, /* number of processes for this user */ \ + RSBAC_RES_UNSET, /* number of files */ \ + RSBAC_RES_UNSET, /* locked-in-memory address space */ \ + RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \ + RSBAC_RES_UNSET /* maximum file locks */ \ + } \ + } +#endif + +#define RSBAC_FD_NR_ATTRIBUTES 33 #define RSBAC_FD_ATTR_LIST { \ A_security_level, \ A_mac_trusted_for_user, \ A_mac_categories, \ + A_mac_auto, \ + A_mac_prop_trusted, \ + A_mac_file_flags, \ A_object_category, \ A_data_type, \ A_pm_object_class, \ @@ -319,6 +435,7 @@ struct rsbac_cap_fd_aci_t A_ms_trusted, \ A_ms_sock_trusted_tcp, \ A_ms_sock_trusted_udp, \ + A_ms_need_scan, \ A_ff_flags, \ A_rc_type_fd, \ A_rc_force_role, \ @@ -329,10 +446,13 @@ struct rsbac_cap_fd_aci_t A_log_array_high, \ A_log_program_based, \ A_symlink_add_uid, \ + A_symlink_add_mac_level, \ A_symlink_add_rc_role, \ A_linux_dac_disable, \ A_min_caps, \ - A_max_caps \ + A_max_caps, \ + A_res_min, \ + A_res_max \ } #ifdef __KERNEL__ @@ -367,6 +487,9 @@ struct rsbac_fd_list_handles_t #if defined(CONFIG_RSBAC_CAP) rsbac_list_handle_t cap[RSBAC_CAP_NR_FD_LISTS]; #endif +#if defined(CONFIG_RSBAC_RES) + rsbac_list_handle_t res[RSBAC_RES_NR_FD_LISTS]; +#endif }; /* The list of devices is also a double linked list, so we define list */ @@ -631,6 +754,7 @@ struct rsbac_ipc_handles_t #define RSBAC_AUTH_ACI_USER_NAME "u_auth" #define RSBAC_CAP_ACI_USER_NAME "u_cap" #define RSBAC_JAIL_ACI_USER_NAME "u_jail" +#define RSBAC_RES_ACI_USER_NAME "u_res" #define RSBAC_GEN_USER_ACI_VERSION 1 #define RSBAC_GEN_USER_ACI_KEY 1001 @@ -646,10 +770,41 @@ struct rsbac_gen_user_aci_t } #if defined(CONFIG_RSBAC_MAC) -#define RSBAC_MAC_USER_ACI_VERSION 1 +#define RSBAC_MAC_USER_ACI_VERSION 4 +#define RSBAC_MAC_USER_OLD_ACI_VERSION 3 +#define RSBAC_MAC_USER_OLD_OLD_ACI_VERSION 2 +#define RSBAC_MAC_USER_OLD_OLD_OLD_ACI_VERSION 1 #define RSBAC_MAC_USER_ACI_KEY 1001 struct rsbac_mac_user_aci_t { + rsbac_security_level_t security_level; /* maximum level */ + rsbac_security_level_t initial_security_level; /* maximum level */ + rsbac_security_level_t min_security_level; /* minimum level / __u8 */ + rsbac_mac_category_vector_t mac_categories; /* MAC max category set */ + rsbac_mac_category_vector_t mac_initial_categories; /* MAC max category set */ + rsbac_mac_category_vector_t mac_min_categories; /* MAC min category set */ + rsbac_system_role_int_t system_role; /* enum rsbac_system_role_t */ + rsbac_mac_user_flags_t mac_user_flags; /* flags (override, trusted, allow_auto etc.) */ + }; +struct rsbac_mac_user_old_aci_t + { + rsbac_security_level_t access_appr; /* maximum level */ + rsbac_security_level_t min_access_appr; /* minimum level / __u8 */ + rsbac_mac_category_vector_t mac_categories; /* MAC max category set */ + rsbac_mac_category_vector_t mac_min_categories; /* MAC min category set */ + rsbac_system_role_int_t system_role; /* enum rsbac_system_role_t */ + rsbac_boolean_int_t mac_allow_auto; /* allow to auto-adjust current level */ + }; +struct rsbac_mac_user_old_old_aci_t + { + rsbac_security_level_t access_appr; /* maximum level */ + rsbac_security_level_t min_access_appr; /* minimum level / __u8 */ + rsbac_mac_category_vector_t mac_categories; /* MAC max category set */ + rsbac_mac_category_vector_t mac_min_categories; /* MAC min category set */ + rsbac_system_role_int_t system_role; /* enum rsbac_system_role_t */ + }; +struct rsbac_mac_user_old_old_old_aci_t + { rsbac_security_level_t access_appr; /* enum old_rsbac_security_level_t / __u8 */ rsbac_mac_category_vector_t mac_categories; /* MAC category set */ rsbac_system_role_int_t system_role; /* enum rsbac_system_role_t */ @@ -657,20 +812,46 @@ struct rsbac_mac_user_aci_t #define DEFAULT_MAC_U_ACI \ { \ SL_unclassified, /* security_level (MAC) */ \ + SL_unclassified, /* initial_security_level (MAC) */ \ + SL_unclassified, /* min_security_level (MAC) */ \ RSBAC_MAC_DEF_CAT_VECTOR, /* mac_categories (MAC) */ \ - SR_user /* system_role (MAC) */ \ + RSBAC_MAC_DEF_CAT_VECTOR, /* mac_initial_categories (MAC) */ \ + RSBAC_MAC_MIN_CAT_VECTOR, /* mac_min_categories (MAC) */ \ + SR_user, /* system_role (MAC) */ \ + RSBAC_MAC_DEF_U_FLAGS /* mac_user_flags */ \ } #define DEFAULT_MAC_U_SYSADM_ACI \ { \ SL_unclassified, /* security_level (MAC) */ \ + SL_unclassified, /* initial_security_level (MAC) */ \ + SL_unclassified, /* min_security_level (MAC) */ \ RSBAC_MAC_DEF_CAT_VECTOR, /* mac_categories (MAC) */ \ - SR_administrator /* system_role (MAC) */ \ + RSBAC_MAC_DEF_CAT_VECTOR, /* mac_initial_categories (MAC) */ \ + RSBAC_MAC_MIN_CAT_VECTOR, /* mac_min_categories (MAC) */ \ + SR_administrator, /* system_role (MAC) */ \ + RSBAC_MAC_DEF_SYSADM_U_FLAGS /* mac_user_flags */ \ } #define DEFAULT_MAC_U_SECOFF_ACI \ { \ SL_unclassified, /* security_level (MAC) */ \ + SL_unclassified, /* initial_security_level (MAC) */ \ + SL_unclassified, /* min_security_level (MAC) */ \ RSBAC_MAC_DEF_CAT_VECTOR, /* mac_categories (MAC) */ \ - SR_security_officer /* system_role (MAC) */ \ + RSBAC_MAC_DEF_CAT_VECTOR, /* mac_initial_categories (MAC) */ \ + RSBAC_MAC_MIN_CAT_VECTOR, /* mac_min_categories (MAC) */ \ + SR_security_officer, /* system_role (MAC) */ \ + RSBAC_MAC_DEF_SECOFF_U_FLAGS /* mac_user_flags */ \ + } +#define DEFAULT_MAC_U_AUDITOR_ACI \ + { \ + SL_unclassified, /* security_level (MAC) */ \ + SL_unclassified, /* initial_security_level (MAC) */ \ + SL_unclassified, /* min_security_level (MAC) */ \ + RSBAC_MAC_DEF_CAT_VECTOR, /* mac_categories (MAC) */ \ + RSBAC_MAC_DEF_CAT_VECTOR, /* mac_initial_categories (MAC) */ \ + RSBAC_MAC_MIN_CAT_VECTOR, /* mac_min_categories (MAC) */ \ + SR_auditor, /* system_role (MAC) */ \ + RSBAC_MAC_DEF_U_FLAGS /* mac_user_flags */ \ } #endif @@ -766,6 +947,12 @@ struct rsbac_cap_user_aci_t 0, /* min_caps (none) */ \ -1 /* max_caps (all) */ \ } +#define DEFAULT_CAP_U_AUDITOR_ACI \ + { \ + SR_auditor, \ + 0, /* min_caps (none) */ \ + -1 /* max_caps (all) */ \ + } #endif #if defined(CONFIG_RSBAC_JAIL) @@ -773,14 +960,150 @@ struct rsbac_cap_user_aci_t #define RSBAC_JAIL_USER_ACI_KEY 1001 #endif +#if defined(CONFIG_RSBAC_RES) +#define RSBAC_RES_USER_ACI_VERSION 1 +#define RSBAC_RES_USER_ACI_KEY 1002 +struct rsbac_res_user_aci_t + { + rsbac_system_role_int_t res_role; /* System role for RES administration */ + rsbac_res_array_t res_min; + rsbac_res_array_t res_max; + }; +#define DEFAULT_RES_U_ACI \ + { \ + SR_user, \ + { \ + RSBAC_RES_UNSET, /* cpu time */ \ + RSBAC_RES_UNSET, /* file size */ \ + RSBAC_RES_UNSET, /* process data segment size */ \ + RSBAC_RES_UNSET, /* stack size */ \ + RSBAC_RES_UNSET, /* core dump size */ \ + RSBAC_RES_UNSET, /* resident memory set size */ \ + RSBAC_RES_UNSET, /* number of processes for this user */ \ + RSBAC_RES_UNSET, /* number of files */ \ + RSBAC_RES_UNSET, /* locked-in-memory address space */ \ + RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \ + RSBAC_RES_UNSET /* maximum file locks */ \ + }, \ + { \ + RSBAC_RES_UNSET, /* cpu time */ \ + RSBAC_RES_UNSET, /* file size */ \ + RSBAC_RES_UNSET, /* process data segment size */ \ + RSBAC_RES_UNSET, /* stack size */ \ + RSBAC_RES_UNSET, /* core dump size */ \ + RSBAC_RES_UNSET, /* resident memory set size */ \ + RSBAC_RES_UNSET, /* number of processes for this user */ \ + RSBAC_RES_UNSET, /* number of files */ \ + RSBAC_RES_UNSET, /* locked-in-memory address space */ \ + RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \ + RSBAC_RES_UNSET /* maximum file locks */ \ + } \ + } +#define DEFAULT_RES_U_SYSADM_ACI \ + { \ + SR_administrator, \ + { \ + RSBAC_RES_UNSET, /* cpu time */ \ + RSBAC_RES_UNSET, /* file size */ \ + RSBAC_RES_UNSET, /* process data segment size */ \ + RSBAC_RES_UNSET, /* stack size */ \ + RSBAC_RES_UNSET, /* core dump size */ \ + RSBAC_RES_UNSET, /* resident memory set size */ \ + RSBAC_RES_UNSET, /* number of processes for this user */ \ + RSBAC_RES_UNSET, /* number of files */ \ + RSBAC_RES_UNSET, /* locked-in-memory address space */ \ + RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \ + RSBAC_RES_UNSET /* maximum file locks */ \ + }, \ + { \ + RSBAC_RES_UNSET, /* cpu time */ \ + RSBAC_RES_UNSET, /* file size */ \ + RSBAC_RES_UNSET, /* process data segment size */ \ + RSBAC_RES_UNSET, /* stack size */ \ + RSBAC_RES_UNSET, /* core dump size */ \ + RSBAC_RES_UNSET, /* resident memory set size */ \ + RSBAC_RES_UNSET, /* number of processes for this user */ \ + RSBAC_RES_UNSET, /* number of files */ \ + RSBAC_RES_UNSET, /* locked-in-memory address space */ \ + RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \ + RSBAC_RES_UNSET /* maximum file locks */ \ + } \ + } +#define DEFAULT_RES_U_SECOFF_ACI \ + { \ + SR_security_officer, \ + { \ + RSBAC_RES_UNSET, /* cpu time */ \ + RSBAC_RES_UNSET, /* file size */ \ + RSBAC_RES_UNSET, /* process data segment size */ \ + RSBAC_RES_UNSET, /* stack size */ \ + RSBAC_RES_UNSET, /* core dump size */ \ + RSBAC_RES_UNSET, /* resident memory set size */ \ + RSBAC_RES_UNSET, /* number of processes for this user */ \ + RSBAC_RES_UNSET, /* number of files */ \ + RSBAC_RES_UNSET, /* locked-in-memory address space */ \ + RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \ + RSBAC_RES_UNSET /* maximum file locks */ \ + }, \ + { \ + RSBAC_RES_UNSET, /* cpu time */ \ + RSBAC_RES_UNSET, /* file size */ \ + RSBAC_RES_UNSET, /* process data segment size */ \ + RSBAC_RES_UNSET, /* stack size */ \ + RSBAC_RES_UNSET, /* core dump size */ \ + RSBAC_RES_UNSET, /* resident memory set size */ \ + RSBAC_RES_UNSET, /* number of processes for this user */ \ + RSBAC_RES_UNSET, /* number of files */ \ + RSBAC_RES_UNSET, /* locked-in-memory address space */ \ + RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \ + RSBAC_RES_UNSET /* maximum file locks */ \ + } \ + } +#define DEFAULT_RES_U_AUDITOR_ACI \ + { \ + SR_auditor, \ + { \ + RSBAC_RES_UNSET, /* cpu time */ \ + RSBAC_RES_UNSET, /* file size */ \ + RSBAC_RES_UNSET, /* process data segment size */ \ + RSBAC_RES_UNSET, /* stack size */ \ + RSBAC_RES_UNSET, /* core dump size */ \ + RSBAC_RES_UNSET, /* resident memory set size */ \ + RSBAC_RES_UNSET, /* number of processes for this user */ \ + RSBAC_RES_UNSET, /* number of files */ \ + RSBAC_RES_UNSET, /* locked-in-memory address space */ \ + RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \ + RSBAC_RES_UNSET /* maximum file locks */ \ + }, \ + { \ + RSBAC_RES_UNSET, /* cpu time */ \ + RSBAC_RES_UNSET, /* file size */ \ + RSBAC_RES_UNSET, /* process data segment size */ \ + RSBAC_RES_UNSET, /* stack size */ \ + RSBAC_RES_UNSET, /* core dump size */ \ + RSBAC_RES_UNSET, /* resident memory set size */ \ + RSBAC_RES_UNSET, /* number of processes for this user */ \ + RSBAC_RES_UNSET, /* number of files */ \ + RSBAC_RES_UNSET, /* locked-in-memory address space */ \ + RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \ + RSBAC_RES_UNSET /* maximum file locks */ \ + } \ + } +#endif + -#define RSBAC_USER_NR_ATTRIBUTES 16 +#define RSBAC_USER_NR_ATTRIBUTES 25 #define RSBAC_USER_ATTR_LIST { \ A_pseudo, \ A_log_user_based, \ A_security_level, \ + A_initial_security_level, \ + A_min_security_level, \ A_mac_categories, \ + A_mac_initial_categories, \ + A_mac_min_categories, \ A_mac_role, \ + A_mac_user_flags, \ A_fc_role, \ A_sim_role, \ A_ms_role, \ @@ -791,7 +1114,11 @@ struct rsbac_cap_user_aci_t A_rc_def_role, \ A_min_caps, \ A_max_caps, \ - A_jail_role \ + A_cap_role, \ + A_jail_role, \ + A_res_role, \ + A_res_min, \ + A_res_max \ } #ifdef __KERNEL__ @@ -828,6 +1155,9 @@ struct rsbac_user_handles_t #if defined(CONFIG_RSBAC_JAIL) rsbac_list_handle_t jail; #endif +#if defined(CONFIG_RSBAC_RES) + rsbac_list_handle_t res; +#endif }; #endif @@ -835,15 +1165,24 @@ struct rsbac_user_handles_t /* Process ACI. */ #define RSBAC_GEN_ACI_PROCESS_NAME "process_gen" -#define RSBAC_MAC_ACI_PROCESS_NAME "process_mac" +#define RSBAC_MAC_ACI_PROCESS_NAME "process_mac." #define RSBAC_PM_ACI_PROCESS_NAME "process_pm" #define RSBAC_MS_ACI_PROCESS_NAME "process_ms" -#define RSBAC_RC_ACI_PROCESS_NAME "process_rc" +#define RSBAC_RC_ACI_PROCESS_NAME "process_rc." #define RSBAC_AUTH_ACI_PROCESS_NAME "process_auth" +#define RSBAC_CAP_ACI_PROCESS_NAME "process_cap" #define RSBAC_JAIL_ACI_PROCESS_NAME "process_jail" #define RSBAC_GEN_PROCESS_ACI_VERSION 1 #define RSBAC_GEN_PROCESS_ACI_KEY 1001 +struct rsbac_gen_process_aci_t + { + rsbac_request_vector_t log_program_based; + }; +#define DEFAULT_GEN_P_ACI \ + { \ + 0 /* log_program_based */ \ + } #if defined(CONFIG_RSBAC_MAC) || defined(CONFIG_RSBAC_MAC_MAINT) #define RSBAC_MAC_PROCESS_ACI_VERSION 1 @@ -851,41 +1190,53 @@ struct rsbac_user_handles_t struct rsbac_mac_process_aci_t { rsbac_security_level_t owner_sec_level; /* enum old_rsbac_security_level_t */ + rsbac_security_level_t owner_initial_sec_level; /* enum old_rsbac_security_level_t */ + rsbac_security_level_t owner_min_sec_level; /* enum old_rsbac_security_level_t */ rsbac_mac_category_vector_t mac_owner_categories; /* MAC category set */ + rsbac_mac_category_vector_t mac_owner_initial_categories; /* MAC category set */ + rsbac_mac_category_vector_t mac_owner_min_categories; /* MAC category set */ rsbac_security_level_t current_sec_level; /* enum rsbac_security_level_t */ rsbac_mac_category_vector_t mac_curr_categories; /* MAC current category set */ rsbac_security_level_t min_write_open; /* for *-property, enum rsbac_security_level_t */ rsbac_mac_category_vector_t min_write_categories; /* MAC, for *-property */ rsbac_security_level_t max_read_open; /* for *-property, enum rsbac_security_level_t */ rsbac_mac_category_vector_t max_read_categories; /* MAC, for *-property */ - rsbac_boolean_int_t mac_auto; /* auto-curr-sec-level?, boolean */ - rsbac_boolean_int_t mac_trusted; /* trusted process?, boolean */ + rsbac_mac_process_flags_t mac_process_flags; /* flags (override, trusted, auto etc.) */ + rsbac_uid_t mac_trusted_for_user; /* copied from program file */ }; #define DEFAULT_MAC_P_ACI \ { \ - SL_unclassified, /* owner-sec-level (MAC) */ \ + SL_unclassified, /* owner-sec-level (MAC) */ \ + SL_unclassified, /* owner-initial_sec-level (MAC) */ \ + SL_unclassified, /* owner-min-sec-level (MAC) */ \ RSBAC_MAC_DEF_CAT_VECTOR, /* owner_categories (MAC) */ \ - SL_unclassified, /* current-sec-level (MAC) */ \ + RSBAC_MAC_DEF_CAT_VECTOR, /* owner_initial_categories (MAC) */ \ + RSBAC_MAC_MIN_CAT_VECTOR, /* owner_min_categories (MAC) */ \ + SL_unclassified, /* current-sec-level (MAC) */ \ RSBAC_MAC_DEF_CAT_VECTOR, /* mac_curr_categories (MAC) */ \ - SL_max, /* min-write-open (MAC) */ \ + SL_max, /* min-write-open (MAC) */ \ RSBAC_MAC_MAX_CAT_VECTOR, /* min_write_categories (MAC) */ \ - SL_unclassified, /* max-read-open (MAC) */ \ + SL_unclassified, /* max-read-open (MAC) */ \ RSBAC_MAC_MIN_CAT_VECTOR, /* max_read_categories (MAC) */ \ - TRUE, /* auto-mac? (MAC) */ \ - FALSE /* trusted? (MAC) */ \ + RSBAC_MAC_DEF_P_FLAGS, /* mac_process_flags */ \ + RSBAC_NO_USER /* mac_trusted_for_user */ \ } #define DEFAULT_MAC_P_INIT_ACI \ { \ - SL_unclassified, /* owner-sec-level (MAC) */ \ + SL_unclassified, /* owner-sec-level (MAC) */ \ + SL_unclassified, /* owner-initial_sec-level (MAC) */ \ + SL_unclassified, /* owner-min-sec-level (MAC) */ \ RSBAC_MAC_DEF_CAT_VECTOR, /* owner_categories (MAC) */ \ - SL_unclassified, /* current-sec-level (MAC) */ \ + RSBAC_MAC_DEF_CAT_VECTOR, /* owner_initial_categories (MAC) */ \ + RSBAC_MAC_MIN_CAT_VECTOR, /* owner_min_categories (MAC) */ \ + SL_unclassified, /* current-sec-level (MAC) */ \ RSBAC_MAC_DEF_CAT_VECTOR, /* mac_curr_categories (MAC) */ \ - SL_max, /* min-write-open (MAC) */ \ + SL_max, /* min-write-open (MAC) */ \ RSBAC_MAC_MAX_CAT_VECTOR, /* min_write_categories (MAC) */ \ - SL_unclassified, /* max-read-open (MAC) */ \ + SL_unclassified, /* max-read-open (MAC) */ \ RSBAC_MAC_MIN_CAT_VECTOR, /* max_read_categories (MAC) */ \ - FALSE, /* auto-mac? (MAC) */ \ - TRUE /* trusted? (MAC) */ \ + RSBAC_MAC_DEF_INIT_P_FLAGS, /* mac_process_flags */ \ + RSBAC_NO_USER /* mac_trusted_for_user */ \ } #endif @@ -962,6 +1313,19 @@ struct rsbac_auth_process_aci_t } #endif +#if defined(CONFIG_RSBAC_CAP) +#define RSBAC_CAP_PROCESS_ACI_VERSION 1 +#define RSBAC_CAP_PROCESS_ACI_KEY 1001 +struct rsbac_cap_process_aci_t + { + rsbac_cap_process_hiding_int_t cap_process_hiding; + }; +#define DEFAULT_CAP_P_ACI \ + { \ + PH_off /* cap_process_hiding */ \ + } +#endif + #if defined(CONFIG_RSBAC_JAIL) #define RSBAC_JAIL_PROCESS_ACI_VERSION 1 #define RSBAC_JAIL_PROCESS_ACI_KEY 1001 @@ -979,18 +1343,20 @@ struct rsbac_jail_process_aci_t } #endif -#define RSBAC_PROCESS_NR_ATTRIBUTES 25 +#define RSBAC_PROCESS_NR_ATTRIBUTES 28 #define RSBAC_PROCESS_ATTR_LIST { \ A_security_level, \ + A_min_security_level, \ A_mac_categories, \ + A_mac_min_categories, \ A_current_sec_level, \ A_mac_curr_categories, \ A_min_write_open, \ A_min_write_categories, \ A_max_read_open, \ A_max_read_categories, \ - A_mac_auto, \ - A_mac_trusted, \ + A_mac_process_flags, \ + A_mac_trusted_for_user, \ A_pm_tp, \ A_pm_current_task, \ A_pm_process_type, \ @@ -1002,6 +1368,7 @@ struct rsbac_jail_process_aci_t A_rc_force_role, \ A_auth_may_setuid, \ A_auth_may_set_cap, \ + A_cap_process_hiding, \ A_jail_id, \ A_jail_ip, \ A_jail_flags, \ @@ -1013,7 +1380,7 @@ struct rsbac_process_handles_t { rsbac_list_handle_t gen; #if defined(CONFIG_RSBAC_MAC) - rsbac_list_handle_t mac; + rsbac_list_handle_t mac[CONFIG_RSBAC_MAC_NR_P_LISTS]; #endif #if defined(CONFIG_RSBAC_PM) rsbac_list_handle_t pm; @@ -1022,11 +1389,14 @@ struct rsbac_process_handles_t rsbac_list_handle_t ms; #endif #if defined(CONFIG_RSBAC_RC) - rsbac_list_handle_t rc; + rsbac_list_handle_t rc[CONFIG_RSBAC_RC_NR_P_LISTS]; #endif #if defined(CONFIG_RSBAC_AUTH) rsbac_list_handle_t auth; #endif +#if defined(CONFIG_RSBAC_CAP) + rsbac_list_handle_t cap; +#endif #if defined(CONFIG_RSBAC_JAIL) rsbac_list_handle_t jail; #endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/acl.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/acl.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/acl.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/acl.h 2003-08-25 20:33:03.000000000 +0200 @@ -25,7 +25,11 @@ /* disk. After this call, all ACI is kept in memory for performance reasons,*/ /* but user and file/dir object ACI are written to disk on every change. */ +#ifdef CONFIG_RSBAC_INIT_DELAY +extern int rsbac_init_acl(void); +#else extern int rsbac_init_acl(void) __init; +#endif /* mounting and umounting */ int rsbac_mount_acl(kdev_t kdev); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/acl_data_structures.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/acl_data_structures.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/acl_data_structures.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/acl_data_structures.h 2003-08-25 20:33:03.000000000 +0200 @@ -19,20 +19,20 @@ #define RSBAC_ACL_GENERAL_FD_ENTRY \ { ACLS_GROUP, \ RSBAC_ACL_GROUP_EVERYONE, \ - ( RSBAC_FD_REQUEST_VECTOR & RSBAC_READ_WRITE_REQUEST_VECTOR ) | RSBAC_ACL_GEN_RIGHTS_VECTOR } + ( RSBAC_FD_REQUEST_VECTOR & RSBAC_READ_WRITE_REQUEST_VECTOR ) | RSBAC_EXECUTE_REQUEST_VECTOR | RSBAC_ACL_GEN_RIGHTS_VECTOR } #define RSBAC_ACL_ACMAN_FD_ENTRY \ { ACLS_USER, \ RSBAC_SECOFF_UID, \ ( RSBAC_FD_REQUEST_VECTOR & \ - ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SECURITY_REQUEST_VECTOR ) ) \ + ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_EXECUTE_REQUEST_VECTOR | RSBAC_SECURITY_REQUEST_VECTOR ) ) \ | RSBAC_ACL_ACMAN_RIGHTS_VECTOR } #define RSBAC_ACL_SYSADM_FD_ENTRY \ { ACLS_USER, \ RSBAC_SYSADM_UID, \ ( RSBAC_FD_REQUEST_VECTOR & \ - ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SYSTEM_REQUEST_VECTOR ) ) \ + ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_EXECUTE_REQUEST_VECTOR | RSBAC_SYSTEM_REQUEST_VECTOR ) ) \ | RSBAC_ACL_SYSADM_RIGHTS_VECTOR } #define RSBAC_ACL_GENERAL_DEV_ENTRY \ @@ -156,6 +156,17 @@ | ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) \ | RSBAC_ACL_SYSADM_RIGHTS_VECTOR } +#define RSBAC_ACL_AUDITOR_SCD_RSBACLOG_ENTRY \ + { ACLS_USER, \ + RSBAC_AUDITOR_UID, \ + ( RSBAC_SCD_REQUEST_VECTOR & \ + ( \ + ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) \ + | ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) \ + ) \ + ) \ + } + #ifdef CONFIG_RSBAC_USER_MOD_IOPERM #define RSBAC_ACL_SYSADM_SCD_KMEM_ENTRY \ { ACLS_USER, \ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/acl_types.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/acl_types.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/acl_types.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/acl_types.h 2003-08-25 20:33:03.000000000 +0200 @@ -14,6 +14,8 @@ #define RSBAC_ACL_TTL_KEEP RSBAC_LIST_TTL_KEEP; +#define RSBAC_ACL_MAX_MAXNUM 1000000 + enum rsbac_acl_subject_type_t {ACLS_USER, ACLS_ROLE, ACLS_GROUP, ACLS_NONE}; typedef __u8 rsbac_acl_int_subject_type_t; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/adf.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/adf.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/adf.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/adf.h 2003-08-25 20:33:03.000000000 +0200 @@ -20,7 +20,11 @@ /***************************************************/ /* Init function */ +#ifdef CONFIG_RSBAC_INIT_DELAY +extern void rsbac_init_adf(void); +#else extern void rsbac_init_adf(void) __init; +#endif /* This function is the internal decision function, called from the next. */ /* It allows to ignore a certain module (last parameter), e.g. for asking */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/adf_main.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/adf_main.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/adf_main.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/adf_main.h 2003-08-25 20:33:03.000000000 +0200 @@ -4,7 +4,7 @@ /* Amon Ott */ /* Data Structs etc. for Access */ /* Control Decision Facility */ -/* Last modified: 06/Sep/2001 */ +/* Last modified: 21/Nov/2002 */ /************************************ */ #ifndef __RSBAC_ADF_MAIN_H @@ -445,6 +445,40 @@ extern inline boolean rsbac_need_overwri #endif #endif /* JAIL */ +/****** RES *******/ + +#ifdef CONFIG_RSBAC_RES +#ifdef CONFIG_RSBAC_SWITCH +extern boolean rsbac_switch_res; +#endif + +extern enum rsbac_adf_req_ret_t rsbac_adf_request_res( + enum rsbac_adf_request_t, + rsbac_pid_t, + enum rsbac_target_t, + union rsbac_target_id_t, + enum rsbac_attribute_t, + union rsbac_attribute_value_t, + rsbac_uid_t); /* process owner */ + +extern int rsbac_adf_set_attr_res (enum rsbac_adf_request_t, + rsbac_pid_t, + enum rsbac_target_t, + union rsbac_target_id_t, + enum rsbac_target_t, + union rsbac_target_id_t, + enum rsbac_attribute_t, + union rsbac_attribute_value_t, + rsbac_uid_t); /* process owner */ + +#ifdef CONFIG_RSBAC_SECDEL +extern inline boolean rsbac_need_overwrite_res(struct dentry * dentry_p) + { + return FALSE; + } +#endif +#endif /* RES */ + /****** REG *******/ #if defined(CONFIG_RSBAC_REG) @@ -483,7 +517,11 @@ extern inline boolean rsbac_need_overwri #if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT) /* Init */ +#ifdef CONFIG_RSBAC_INIT_DELAY +void rsbac_reg_init(void); +#else void rsbac_reg_init(void) __init; +#endif /* mounting and umounting */ extern int rsbac_mount_reg(kdev_t kdev); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/adf_syshelpers.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/adf_syshelpers.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/adf_syshelpers.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/adf_syshelpers.h 2003-08-25 20:33:03.000000000 +0200 @@ -29,21 +29,17 @@ /******* MAC ********/ #if defined(CONFIG_RSBAC_MAC) || defined(CONFIG_RSBAC_MAC_MAINT) -extern int rsbac_mac_set_curr_seclevel(rsbac_security_level_t); +int rsbac_mac_set_curr_level(rsbac_security_level_t level, + rsbac_mac_category_vector_t categories); -extern rsbac_security_level_t rsbac_mac_get_curr_seclevel(void); +int rsbac_mac_get_curr_level(rsbac_security_level_t * level_p, + rsbac_mac_category_vector_t * categories_p); -extern int rsbac_mac_set_max_seclevel(rsbac_security_level_t); +int rsbac_mac_get_max_level(rsbac_security_level_t * level_p, + rsbac_mac_category_vector_t * categories_p); -extern rsbac_security_level_t rsbac_mac_get_max_seclevel(void); - -extern int rsbac_mac_set_curr_categories(rsbac_mac_category_vector_t categories); - -extern int rsbac_mac_get_curr_categories(rsbac_mac_category_vector_t * categories_p); - -extern int rsbac_mac_set_max_categories(rsbac_mac_category_vector_t categories); - -extern int rsbac_mac_get_max_categories(rsbac_mac_category_vector_t * categories_p); +int rsbac_mac_get_min_level(rsbac_security_level_t * level_p, + rsbac_mac_category_vector_t * categories_p); #endif /* MAC */ @@ -115,23 +111,25 @@ int rsbac_rc_sys_get_eff_rights (enum r #if defined(CONFIG_RSBAC_AUTH) || defined(CONFIG_RSBAC_AUTH_MAINT) /* This function is called via sys_rsbac_auth_add_p_cap() system call */ int rsbac_auth_add_p_cap(rsbac_pid_t pid, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid); + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range, + rsbac_time_t ttl); /* This function is called via sys_rsbac_auth_remove_p_cap() system call */ int rsbac_auth_remove_p_cap(rsbac_pid_t pid, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid); + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range); /* This function is called via sys_rsbac_auth_add_f_cap() system call */ int rsbac_auth_add_f_cap(rsbac_auth_file_t file, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid); + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range, + rsbac_time_t ttl); /* This function is called via sys_rsbac_auth_remove_f_cap() system call */ int rsbac_auth_remove_f_cap(rsbac_auth_file_t file, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid); + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range); #endif /* AUTH || AUTH_MAINT */ /****** REG *******/ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/auth.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/auth.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/auth.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/auth.h 2003-08-25 20:33:03.000000000 +0200 @@ -1,10 +1,11 @@ /************************************ */ /* Rule Set Based Access Control */ -/* Author and (c) 1999,2000: Amon Ott */ +/* Author and (c) 1999-2003: */ +/* Amon Ott */ /* API: Data structures */ /* and functions for Access */ /* Control Information / AUTH */ -/* Last modified: 19/Feb/2000 */ +/* Last modified: 16/Jan/2003 */ /************************************ */ #ifndef __RSBAC_AUTH_H @@ -25,7 +26,11 @@ /* disk. After this call, all ACI is kept in memory for performance reasons,*/ /* but user and file/dir object ACI are written to disk on every change. */ +#ifdef CONFIG_RSBAC_INIT_DELAY +extern int rsbac_init_auth(void); +#else extern int rsbac_init_auth(void) __init; +#endif /* mounting and umounting */ int rsbac_mount_auth(kdev_t kdev); @@ -56,39 +61,43 @@ extern int rsbac_write_auth(boolean); /* Add a set member to a set sublist. Set behaviour: also returns success, */ /* if member was already in set! */ -int rsbac_auth_add_to_p_capset(rsbac_auth_p_cap_set_id_t id, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid); +int rsbac_auth_add_to_p_capset(rsbac_pid_t pid, + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range, + rsbac_time_t ttl); int rsbac_auth_add_to_f_capset(rsbac_auth_file_t file, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid); + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range, + rsbac_time_t ttl); /* rsbac_auth_remove_from_p_capset */ /* Remove a set member from a sublist. Set behaviour: Returns no error, if */ /* member is not in list. */ -int rsbac_auth_remove_from_p_capset(rsbac_auth_p_cap_set_id_t id, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid); +int rsbac_auth_remove_from_p_capset(rsbac_pid_t pid, + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range); int rsbac_auth_remove_from_f_capset(rsbac_auth_file_t file, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid); + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range); /* rsbac_auth_clear_p_capset */ /* Remove all set members from a sublist. Set behaviour: Returns no error, */ /* if list is empty. */ -int rsbac_auth_clear_p_capset(rsbac_auth_p_cap_set_id_t id); +int rsbac_auth_clear_p_capset(rsbac_pid_t pid, + enum rsbac_auth_cap_type_t cap_type); + +int rsbac_auth_clear_f_capset(rsbac_auth_file_t file, + enum rsbac_auth_cap_type_t cap_type); /* rsbac_auth_p_capset_member */ /* Return truth value, whether member is in set */ -boolean rsbac_auth_p_capset_member(rsbac_auth_p_cap_set_id_t id, - rsbac_uid_t member); - -boolean rsbac_auth_f_capset_member(rsbac_auth_file_t file, +boolean rsbac_auth_p_capset_member(rsbac_pid_t pid, + enum rsbac_auth_cap_type_t cap_type, rsbac_uid_t member); /* rsbac_auth_remove_p_capset */ @@ -96,25 +105,26 @@ boolean rsbac_auth_f_capset_member(rsba /* creating a new set, anything else returns an error. */ /* To empty an existing set use rsbac_auth_clear_p_capset. */ -int rsbac_auth_remove_p_capset(rsbac_auth_p_cap_set_id_t id); +int rsbac_auth_remove_p_capsets(rsbac_pid_t pid); -int rsbac_auth_remove_f_capset(rsbac_auth_file_t id); +int rsbac_auth_remove_f_capsets(rsbac_auth_file_t file); /* rsbac_auth_copy_fp_capset */ /* copy a file capset to a process capset */ int rsbac_auth_copy_fp_capset(rsbac_auth_file_t file, - rsbac_auth_p_cap_set_id_t p_cap_set_id); + rsbac_pid_t p_cap_set_id); /* rsbac_auth_copy_pp_capset */ /* copy a process capset to another process capset */ -int rsbac_auth_copy_pp_capset(rsbac_auth_p_cap_set_id_t old_p_set_id, - rsbac_auth_p_cap_set_id_t new_p_set_id); +int rsbac_auth_copy_pp_capset(rsbac_pid_t old_p_set_id, + rsbac_pid_t new_p_set_id); /* rsbac_auth_get_f_caplist */ /* copy a file capset to an array of length 2 * maxnum (first+last), */ /* returns number of caps copied */ int rsbac_auth_get_f_caplist(rsbac_auth_file_t file, - rsbac_uid_t caplist[], - int maxnum); + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t **caplist_p, + rsbac_time_t **ttllist_p); #endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/auth_data_structures.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/auth_data_structures.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/auth_data_structures.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/auth_data_structures.h 2003-08-25 20:33:03.000000000 +0200 @@ -1,8 +1,9 @@ /**************************************/ /* Rule Set Based Access Control */ -/* Author and (c) 1999,2000: Amon Ott */ +/* Author and (c) 1999-2003: */ +/* Amon Ott */ /* Data structures / AUTH */ -/* Last modified: 19/Feb/2000 */ +/* Last modified: 06/Jan/2003 */ /**************************************/ #ifndef __RSBAC_AUTH_DATA_STRUC_H @@ -16,65 +17,22 @@ /* Capability lists */ /**********************************************/ -#define RSBAC_AUTH_F_CAP_SET_VERSION 2 -#define RSBAC_AUTH_OLD_F_CAP_SET_VERSION 1 -#define RSBAC_AUTH_F_CAP_FILENAME "auth_fc" +#define RSBAC_AUTH_LIST_KEY 626281 -/* This list represents sets of capabilities, using a set-id and a sublist each */ - -struct rsbac_auth_cap_set_sublist_item_t - { - __u32 first_id; - __u32 last_id; - struct rsbac_auth_cap_set_sublist_item_t * prev; - struct rsbac_auth_cap_set_sublist_item_t * next; - }; - -struct rsbac_auth_old_cap_set_sublist_item_t - { - __u16 id; - struct rsbac_auth_cap_set_sublist_item_t * prev; - struct rsbac_auth_cap_set_sublist_item_t * next; - }; - -struct rsbac_auth_p_cap_set_list_item_t - { - rsbac_auth_p_cap_set_id_t id; - u_int sublist_length; - struct rsbac_auth_cap_set_sublist_item_t * sublist_head; - struct rsbac_auth_cap_set_sublist_item_t * sublist_tail; - struct rsbac_auth_p_cap_set_list_item_t * prev; - struct rsbac_auth_p_cap_set_list_item_t * next; - }; - -struct rsbac_auth_p_cap_set_list_head_t - { - struct rsbac_auth_p_cap_set_list_item_t * head; - struct rsbac_auth_p_cap_set_list_item_t * tail; - struct rsbac_auth_p_cap_set_list_item_t * curr; - rwlock_t lock; - u_int count; - }; - -struct rsbac_auth_f_cap_set_list_item_t - { - rsbac_auth_f_cap_set_id_t id; - u_int sublist_length; - struct rsbac_auth_cap_set_sublist_item_t * sublist_head; - struct rsbac_auth_cap_set_sublist_item_t * sublist_tail; - struct rsbac_auth_f_cap_set_list_item_t * prev; - struct rsbac_auth_f_cap_set_list_item_t * next; - }; - -struct rsbac_auth_f_cap_set_list_head_t - { - struct rsbac_auth_f_cap_set_list_item_t * head; - struct rsbac_auth_f_cap_set_list_item_t * tail; - struct rsbac_auth_f_cap_set_list_item_t * curr; - rwlock_t lock; - u_int count; - boolean dirty; - }; +#define RSBAC_AUTH_P_LIST_VERSION 1 +#define RSBAC_AUTH_P_LIST_NAME "authproc" +#define RSBAC_AUTH_P_EFF_LIST_NAME "authproceff" +#define RSBAC_AUTH_P_FS_LIST_NAME "authprocfs" + +#define RSBAC_AUTH_FD_FILENAME "authfd." +#define RSBAC_AUTH_FD_EFF_FILENAME "authfde." +#define RSBAC_AUTH_FD_FS_FILENAME "authfdf." +#define RSBAC_AUTH_NR_CAP_FD_LISTS 4 +#define RSBAC_AUTH_NR_CAP_EFF_FD_LISTS 2 +#define RSBAC_AUTH_NR_CAP_FS_FD_LISTS 2 +#define RSBAC_AUTH_FD_LIST_VERSION 1 +#define RSBAC_AUTH_FD_EFF_LIST_VERSION 1 +#define RSBAC_AUTH_FD_FS_LIST_VERSION 1 /* The list of devices is also a double linked list, so we define list */ /* items and a list head. */ @@ -85,7 +43,11 @@ struct rsbac_auth_device_list_item_t #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) u_int mount_count; #endif - struct rsbac_auth_f_cap_set_list_head_t list_head; + rsbac_list_handle_t handles[RSBAC_AUTH_NR_CAP_FD_LISTS]; +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + rsbac_list_handle_t eff_handles[RSBAC_AUTH_NR_CAP_EFF_FD_LISTS]; + rsbac_list_handle_t fs_handles[RSBAC_AUTH_NR_CAP_FS_FD_LISTS]; +#endif struct rsbac_auth_device_list_item_t * prev; struct rsbac_auth_device_list_item_t * next; boolean no_write; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/cap_getname.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/cap_getname.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/cap_getname.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/cap_getname.h 2003-08-25 20:33:03.000000000 +0200 @@ -13,7 +13,10 @@ #include #ifndef __KERNEL__ -enum rsbac_rc_special_rights_t get_rc_special_right_nr(const char * name); +char * get_cap_name(char * name, + u_int value); + +int get_cap_nr(const char * name); #endif #endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/debug.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/debug.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/debug.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/debug.h 2003-08-25 20:33:03.000000000 +0200 @@ -31,7 +31,12 @@ extern rsbac_log_entry_t rsbac_log_leve extern int rsbac_no_defaults; +#ifdef CONFIG_RSBAC_INIT_DELAY +extern void rsbac_init_debug(void); +#else extern void rsbac_init_debug(void) __init; +#endif + extern boolean rsbac_parse_koptions(char *); #define RSBAC_WAKEUP_KEY 'w' @@ -47,6 +52,10 @@ extern int rsbac_ind_softmode[SW_NONE]; #endif #endif +#if defined(CONFIG_RSBAC_CAP_PROC_HIDE) +extern int rsbac_cap_process_hiding; +#endif + #ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL extern int rsbac_dac_disable; extern int rsbac_dac_is_disabled(void); @@ -56,6 +65,11 @@ extern int rsbac_dac_is_disabled(void); extern int rsbac_nosyslog; #endif +#ifdef CONFIG_RSBAC_INIT_DELAY +extern int rsbac_delay_init; +extern kdev_t rsbac_delayed_root; +#endif + #if defined(CONFIG_RSBAC_RMSG) #define RSBAC_LOG rsbac_printk #define RSBAC_DEF_MESS_LOGLEVEL 4 @@ -65,6 +79,10 @@ extern int rsbac_log(int, char *, int); #define RSBAC_LOG printk #endif +#if defined(CONFIG_RSBAC_LOG_REMOTE) +extern rsbac_pid_t rsbaclogd_pid; +#endif + #ifdef CONFIG_RSBAC_NET extern int rsbac_debug_ds_net; extern int rsbac_debug_aef_net; @@ -87,6 +105,12 @@ int rsbac_get_adf_log(rsbac_adf_request_ extern int rsbac_debug_auto; #endif /* CONFIG_RSBAC_AUTO_WRITE > 0 */ +#if defined(CONFIG_RSBAC_MAC) +extern int rsbac_debug_ds_mac; +extern int rsbac_debug_aef_mac; +extern int rsbac_debug_adf_mac; +#endif + #if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT) extern int rsbac_debug_ds_pm; extern int rsbac_debug_aef_pm; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/gen_lists.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/gen_lists.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/gen_lists.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/gen_lists.h 2003-08-25 20:33:03.000000000 +0200 @@ -25,7 +25,11 @@ /* Prototypes */ /* Init */ +#ifdef CONFIG_RSBAC_INIT_DELAY +void rsbac_list_init(void); +#else void __init rsbac_list_init(void); +#endif /* mount / umount */ int rsbac_list_mount(kdev_t kdev); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/helpers.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/helpers.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/helpers.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/helpers.h 2003-08-25 20:33:03.000000000 +0200 @@ -15,6 +15,9 @@ char * inttostr(char[], int); char * ulongtostr(char[], u_long); +/* convert u_long_long to binary string representation for MAC module */ +char * u64tostrmac(char[], __u64); + #ifndef __KERNEL__ rsbac_uid_t get_user(char * name); @@ -25,8 +28,7 @@ char * u64tostrlog(char[], __u64); /* and back */ __u64 strtou64log(char[], __u64 *); -/* covert u_long_long to binary string representation for MAC module */ -char * u64tostrmac(char[], __u64); +/* convert u_long_long to binary string representation for MAC module */ /* and back */ __u64 strtou64mac(char[], __u64 *); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/lists.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/lists.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/lists.h 2003-08-25 18:25:20.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/lists.h 2003-08-25 20:33:03.000000000 +0200 @@ -428,6 +428,23 @@ int rsbac_list_lol_exist( int rsbac_list_lol_exist_u32(rsbac_list_handle_t handle, __u32 desc); +/* + * Note: The subdesc/data value given here is always used as second parameter to the + * given subdesc compare function, so you can use different types for storage and + * lookup. If compare is NULL, call is forwarded to rsbac_list_lol_subexist. + * Warning: This function does not use the list optimization when searching the sublist! + */ +int rsbac_list_lol_subexist_compare( + rsbac_list_handle_t handle, + void * desc, + void * subdesc, + rsbac_list_compare_function_t compare); + +int rsbac_list_lol_subexist_compare_u32(rsbac_list_handle_t handle, + __u32 desc, + __u32 subdesc, + rsbac_list_compare_function_t compare); + /* count number of elements */ /* returns number of elements or negative error code */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/pm.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/pm.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/pm.h 2003-08-25 18:25:21.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/pm.h 2003-08-25 20:33:03.000000000 +0200 @@ -25,7 +25,11 @@ /* disk. After this call, all ACI is kept in memory for performance reasons,*/ /* but user and file/dir object ACI are written to disk on every change. */ +#ifdef CONFIG_RSBAC_INIT_DELAY +extern int rsbac_init_pm(void); +#else extern int rsbac_init_pm(void) __init; +#endif /* Some information about the current status is also available */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/rc.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/rc.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/rc.h 2003-08-25 18:25:21.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/rc.h 2003-08-25 20:33:03.000000000 +0200 @@ -25,7 +25,11 @@ /* Initialization, including ACI restoration for all mounted devices from */ /* disk. After this call, all ACI is kept in memory for performance reasons.*/ +#ifdef CONFIG_RSBAC_INIT_DELAY +extern int rsbac_init_rc(void); +#else extern int rsbac_init_rc(void) __init; +#endif /* Some information about the current status is also available */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/rc_data_structures.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/rc_data_structures.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/rc_data_structures.h 2003-08-25 18:25:21.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/rc_data_structures.h 2003-08-25 20:33:03.000000000 +0200 @@ -201,6 +201,17 @@ struct rsbac_rc_type_fd_entry_t RSBAC_RC_GENERAL_TYPE /* def_ipc_create_type */ \ } +#define RSBAC_RC_AUDITOR_ROLE_ENTRY \ + { \ + RC_no_admin, /* admin_type */ \ + "Auditor", /* name */ \ + RC_type_inherit_parent, /* def_fd_create_type */ \ + RC_type_inherit_parent, /* def_process_create_type */ \ + RC_type_use_new_role_def_create, /* def_process_chown_type */ \ + RC_type_inherit_process, /* def_process_execute_type */ \ + RSBAC_RC_GENERAL_TYPE /* def_ipc_create_type */ \ + } + /**********************************************/ /* Declarations */ /**********************************************/ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/rc_types.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/rc_types.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/rc_types.h 2003-08-25 18:25:21.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/rc_types.h 2003-08-25 20:33:03.000000000 +0200 @@ -1,9 +1,9 @@ /************************************ */ /* Rule Set Based Access Control */ -/* Author and (c) 1999,2000: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* API: Data types for */ /* Role Compatibility Module */ -/* Last modified: 21/Nov/2001 */ +/* Last modified: 02/Jan/2002 */ /************************************ */ #ifndef __RSBAC_RC_TYPES_H @@ -16,6 +16,7 @@ #define RSBAC_RC_GENERAL_ROLE 0 #define RSBAC_RC_ROLE_ADMIN_ROLE 1 #define RSBAC_RC_SYSTEM_ADMIN_ROLE 2 +#define RSBAC_RC_AUDITOR_ROLE 3 #define RSBAC_RC_GENERAL_TYPE 0 #define RSBAC_RC_SEC_TYPE 1 #define RSBAC_RC_SYS_TYPE 2 @@ -30,6 +31,7 @@ enum rsbac_rc_special_rights_t RCR_ASSIGN, RCR_ACCESS_CONTROL, RCR_SUPERVISOR, + RCR_MODIFY_AUTH, RCR_NONE}; typedef __u64 rsbac_rc_rights_vector_t; @@ -45,7 +47,8 @@ typedef __u64 rsbac_rc_role_vector_t; RSBAC_RC_RIGHTS_VECTOR(RCR_ADMIN) | \ RSBAC_RC_RIGHTS_VECTOR(RCR_ASSIGN) | \ RSBAC_RC_RIGHTS_VECTOR(RCR_ACCESS_CONTROL) | \ - RSBAC_RC_RIGHTS_VECTOR(RCR_SUPERVISOR) \ + RSBAC_RC_RIGHTS_VECTOR(RCR_SUPERVISOR) | \ + RSBAC_RC_RIGHTS_VECTOR(RCR_MODIFY_AUTH) \ ) #define RSBAC_RC_SUPERVISOR_RIGHT_VECTOR (\ @@ -198,6 +201,46 @@ enum rsbac_rc_scd_type_t {RST_auth_admin /* 32 = RST_auth_admin */ 0, \ /* 33 = RST_none */ 0 \ } +#ifdef CONFIG_RSBAC_USER_MOD_IOPERM +#define RSBAC_RC_AUDITOR_COMP_SCD { \ + 0, \ + 0, \ + 0, \ + 0, \ + /* ST_ioports */ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA), \ + /* ST_rlimit */ -1, \ + /* ST_swap */ 0, \ + /* ST_syslog */ 0, \ + /* ST_rsbac */ 0, \ + /* ST_rsbac_log */ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA), \ + /* ST_other */ ( \ + ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \ + ), \ + /* ST_kmem */ 0, \ + /* ST_network */ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA), \ + /* 13 = ST_none */ 0 \ + } +#else +#define RSBAC_RC_AUDITOR_COMP_SCD { \ + 0, \ + 0, \ + 0, \ + 0, \ + 0, \ + /* ST_rlimit */ -1, \ + /* ST_swap */ 0, \ + /* ST_syslog */ 0, \ + /* ST_rsbac */ 0, \ + /* ST_rsbac_log */ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA), \ + /* ST_other */ ( \ + ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \ + ), \ + /* ST_kmem */ 0, \ + /* ST_network */ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA), \ + /* 13 = ST_none */ 0 \ + } +#endif + #define RC_type_inherit_process ((rsbac_rc_type_id_t) -1) #define RC_type_inherit_parent ((rsbac_rc_type_id_t) -2) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/request_groups.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/request_groups.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/request_groups.h 2003-08-25 18:25:21.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/request_groups.h 2003-08-25 20:33:03.000000000 +0200 @@ -12,14 +12,12 @@ #define RSBAC_READ_REQUEST_VECTOR (\ ((rsbac_request_vector_t) 1 << R_CHDIR) | \ ((rsbac_request_vector_t) 1 << R_CLOSE) | \ - ((rsbac_request_vector_t) 1 << R_EXECUTE) | \ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \ ((rsbac_request_vector_t) 1 << R_READ) | \ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \ ((rsbac_request_vector_t) 1 << R_SEARCH) | \ - ((rsbac_request_vector_t) 1 << R_TERMINATE) | \ - ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \ + ((rsbac_request_vector_t) 1 << R_TERMINATE) \ ) #define RSBAC_WRITE_REQUEST_VECTOR (\ @@ -27,6 +25,8 @@ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \ + ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_OWNER) | \ + ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_OWNER) | \ ((rsbac_request_vector_t) 1 << R_CLONE) | \ ((rsbac_request_vector_t) 1 << R_CREATE) | \ ((rsbac_request_vector_t) 1 << R_DELETE) | \ @@ -47,6 +47,8 @@ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \ + ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_OWNER) | \ + ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_OWNER) | \ ((rsbac_request_vector_t) 1 << R_CLONE) | \ ((rsbac_request_vector_t) 1 << R_CREATE) | \ ((rsbac_request_vector_t) 1 << R_DELETE) | \ @@ -73,6 +75,12 @@ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) \ ) +#define RSBAC_EXECUTE_REQUEST_VECTOR (\ + ((rsbac_request_vector_t) 1 << R_EXECUTE) | \ + ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \ + ) + + #define RSBAC_SYSTEM_REQUEST_VECTOR (\ ((rsbac_request_vector_t) 1 << R_ADD_TO_KERNEL) | \ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \ @@ -170,6 +178,8 @@ #define RSBAC_PROCESS_REQUEST_VECTOR (\ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \ + ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_OWNER) | \ + ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_OWNER) | \ ((rsbac_request_vector_t) 1 << R_CLONE) | \ ((rsbac_request_vector_t) 1 << R_CREATE) | \ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \ @@ -231,6 +241,8 @@ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \ + ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_OWNER) | \ + ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_OWNER) | \ ((rsbac_request_vector_t) 1 << R_CHDIR) | \ ((rsbac_request_vector_t) 1 << R_CLONE) | \ ((rsbac_request_vector_t) 1 << R_CLOSE) | \ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/res_getname.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/res_getname.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/res_getname.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/res_getname.h 2003-08-25 20:33:03.000000000 +0200 @@ -0,0 +1,20 @@ +/********************************** */ +/* Rule Set Based Access Control */ +/* Author and (c) 2002: */ +/* Amon Ott */ +/* Getname functions for RES module */ +/* Last modified: 22/Nov/2002 */ +/********************************** */ + +#ifndef __RSBAC_RES_GETNAME_H +#define __RSBAC_RES_GETNAME_H + +#include + +#ifndef __KERNEL__ +char * get_res_name(char * name, + u_int value); +int get_res_nr(const char * name); +#endif + +#endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/rkmem.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/rkmem.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/rkmem.h 2003-08-25 18:25:21.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/rkmem.h 2003-08-25 20:33:03.000000000 +0200 @@ -13,8 +13,12 @@ #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) +#ifdef CONFIG_RSBAC_INIT_DELAY +void rsbac_kmem_cache_sizes_init(void); +#else void __init rsbac_kmem_cache_sizes_init(void); #endif +#endif /* if you might need more than this, use rsbac_vkmalloc! */ #define RSBAC_MAX_KMALLOC 8192 diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/syscall_rsbac.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/syscall_rsbac.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/syscall_rsbac.h 2003-08-25 18:25:21.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/syscall_rsbac.h 2003-08-25 20:33:03.000000000 +0200 @@ -6,7 +6,7 @@ /* */ /* System Calls */ /* */ -/* Last modified: 20/Mar/2002 */ +/* Last modified: 21/Nov/2002 */ /************************************ */ #ifndef __RSBAC_SYSCALL_RSBAC_H @@ -21,7 +21,8 @@ /* General */ -inline _syscall2(int, rsbac, +inline _syscall3(int, rsbac, + rsbac_version_t, version, enum rsbac_syscall_t, call, union rsbac_syscall_arg_t *, arg_p); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/syscalls.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/syscalls.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/syscalls.h 2003-08-25 18:25:21.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/syscalls.h 2003-08-25 20:33:03.000000000 +0200 @@ -1,10 +1,10 @@ /************************************* */ /* Rule Set Based Access Control */ -/* Author and (c) 1999-2002: */ +/* Author and (c) 1999-2003: */ /* Amon Ott */ /* Syscall wrapper functions for all */ /* parts */ -/* Last modified: 05/Jul/2002 */ +/* Last modified: 21/Jul/2003 */ /************************************* */ #ifndef __RSBAC_SYSCALLS_H @@ -15,6 +15,9 @@ #include #include #include +#ifndef __KERNEL__ +#include +#endif enum rsbac_syscall_t { @@ -34,14 +37,10 @@ enum rsbac_syscall_t RSYS_get_adf_log, RSYS_write, RSYS_log, - RSYS_mac_set_curr_seclevel, - RSYS_mac_get_curr_seclevel, - RSYS_mac_set_max_seclevel, - RSYS_mac_get_max_seclevel, - RSYS_mac_set_curr_categories, - RSYS_mac_get_curr_categories, - RSYS_mac_set_max_categories, - RSYS_mac_get_max_categories, + RSYS_mac_set_curr_level, + RSYS_mac_get_curr_level, + RSYS_mac_get_max_level, + RSYS_mac_get_min_level, RSYS_stats_pm, RSYS_pm, RSYS_pm_change_current_task, @@ -69,6 +68,7 @@ enum rsbac_syscall_t RSYS_acl_group, RSYS_reg, RSYS_jail, + RSYS_init, RSYS_none }; @@ -175,33 +175,27 @@ struct rsys_log_t int len; }; -struct rsys_mac_set_curr_seclevel_t +struct rsys_mac_set_curr_level_t { rsbac_security_level_t level; - }; - -struct rsys_mac_set_max_seclevel_t - { - rsbac_security_level_t level; - }; - -struct rsys_mac_set_curr_categories_t - { rsbac_mac_category_vector_t * categories_p; }; -struct rsys_mac_get_curr_categories_t +struct rsys_mac_get_curr_level_t { + rsbac_security_level_t * level_p; rsbac_mac_category_vector_t * categories_p; }; -struct rsys_mac_set_max_categories_t +struct rsys_mac_get_max_level_t { + rsbac_security_level_t * level_p; rsbac_mac_category_vector_t * categories_p; }; -struct rsys_mac_get_max_categories_t +struct rsys_mac_get_min_level_t { + rsbac_security_level_t * level_p; rsbac_mac_category_vector_t * categories_p; }; @@ -281,36 +275,40 @@ struct rsys_rc_get_eff_rights_n_t struct rsys_auth_add_p_cap_t { rsbac_pid_t pid; - rsbac_uid_t first_uid; - rsbac_uid_t last_uid; + enum rsbac_auth_cap_type_t cap_type; + struct rsbac_auth_cap_range_t cap_range; + rsbac_time_t ttl; }; struct rsys_auth_remove_p_cap_t { rsbac_pid_t pid; - rsbac_uid_t first_uid; - rsbac_uid_t last_uid; + enum rsbac_auth_cap_type_t cap_type; + struct rsbac_auth_cap_range_t cap_range; }; struct rsys_auth_add_f_cap_t { char * filename; - rsbac_uid_t first_uid; - rsbac_uid_t last_uid; + enum rsbac_auth_cap_type_t cap_type; + struct rsbac_auth_cap_range_t cap_range; + rsbac_time_t ttl; }; struct rsys_auth_remove_f_cap_t { char * filename; - rsbac_uid_t first_uid; - rsbac_uid_t last_uid; + enum rsbac_auth_cap_type_t cap_type; + struct rsbac_auth_cap_range_t cap_range; }; struct rsys_auth_get_f_caplist_t { char * filename; - rsbac_uid_t * caplist; - int maxnum; + enum rsbac_auth_cap_type_t cap_type; + struct rsbac_auth_cap_range_t * caplist; + rsbac_time_t * ttllist; + u_int maxnum; }; struct rsys_acl_t @@ -391,6 +389,11 @@ struct rsys_jail_t rsbac_jail_flags_t flags; }; +struct rsys_init_t + { + char * root_dev; + }; + union rsbac_syscall_arg_t { struct rsys_check_t check; @@ -407,12 +410,10 @@ union rsbac_syscall_arg_t struct rsys_adf_log_switch_t adf_log_switch; struct rsys_get_adf_log_t get_adf_log; struct rsys_log_t log; - struct rsys_mac_set_curr_seclevel_t mac_set_curr_seclevel; - struct rsys_mac_set_max_seclevel_t mac_set_max_seclevel; - struct rsys_mac_set_curr_categories_t mac_set_curr_categories; - struct rsys_mac_get_curr_categories_t mac_get_curr_categories; - struct rsys_mac_set_max_categories_t mac_set_max_categories; - struct rsys_mac_get_max_categories_t mac_get_max_categories; + struct rsys_mac_set_curr_level_t mac_set_curr_level; + struct rsys_mac_get_curr_level_t mac_get_curr_level; + struct rsys_mac_get_max_level_t mac_get_max_level; + struct rsys_mac_get_min_level_t mac_get_min_level; struct rsys_pm_t pm; struct rsys_pm_change_current_task_t pm_change_current_task; struct rsys_pm_create_file_t pm_create_file; @@ -439,6 +440,7 @@ union rsbac_syscall_arg_t struct rsys_acl_group_t acl_group; struct rsys_reg_t reg; struct rsys_jail_t jail; + struct rsys_init_t init; int dummy; }; @@ -500,21 +502,17 @@ int rsbac_switch(enum rsbac_switch_targe /************** MAC ***************/ -int rsbac_mac_set_curr_seclevel(rsbac_security_level_t level); - -rsbac_security_level_t rsbac_mac_get_curr_seclevel(void); +int rsbac_mac_set_curr_level(rsbac_security_level_t level, + rsbac_mac_category_vector_t * categories_p); -int rsbac_mac_set_max_seclevel(rsbac_security_level_t level); +int rsbac_mac_get_curr_level(rsbac_security_level_t * level_p, + rsbac_mac_category_vector_t * categories_p); -rsbac_security_level_t rsbac_mac_get_max_seclevel(void); +int rsbac_mac_get_max_level(rsbac_security_level_t * level_p, + rsbac_mac_category_vector_t * categories_p); -int rsbac_mac_set_curr_categories(rsbac_mac_category_vector_t * categories_p); - -int rsbac_mac_get_curr_categories(rsbac_mac_category_vector_t * categories_p); - -int rsbac_mac_set_max_categories(rsbac_mac_category_vector_t * categories_p); - -int rsbac_mac_get_max_categories(rsbac_mac_category_vector_t * categories_p); +int rsbac_mac_get_min_level(rsbac_security_level_t * level_p, + rsbac_mac_category_vector_t * categories_p); /************** PM ***************/ @@ -572,25 +570,29 @@ int rsbac_rc_get_eff_rights_n(enum rsba /* Provide means for adding and removing of capabilities */ int rsbac_auth_add_p_cap(rsbac_pid_t pid, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid); + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range, + rsbac_time_t ttl); int rsbac_auth_remove_p_cap(rsbac_pid_t pid, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid); + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range); int rsbac_auth_add_f_cap(char * filename, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid); + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range, + rsbac_time_t ttl); int rsbac_auth_remove_f_cap(char * filename, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid); + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range); -/* caplist must have space for 2 * maxnum uid entries - first and last each! */ +/* caplist must have space for maxnum cap_range entries - first and last each! */ int rsbac_auth_get_f_caplist(char * filename, - rsbac_uid_t caplist[], - int maxnum); + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t caplist[], + rsbac_time_t ttllist[], + u_int maxnum); /**********************************/ /************** REG ***************/ @@ -677,6 +679,8 @@ int rsbac_log(int type, char * buf, int len); +int rsbac_init(char * root_dev); + #endif /* ifndef __KERNEL__ */ #endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/include/rsbac/types.h linux-2.4.20-wolk4.9-fullkernel/include/rsbac/types.h --- linux-2.4.20-wolk4.8-fullkernel/include/rsbac/types.h 2003-08-25 18:25:21.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/include/rsbac/types.h 2003-08-25 20:33:03.000000000 +0200 @@ -1,10 +1,10 @@ /*********************************** */ /* Rule Set Based Access Control */ -/* Author and (c)1999-2002: */ +/* Author and (c)1999-2003: */ /* Amon Ott */ /* API: Data types for attributes */ /* and standard module calls */ -/* Last modified: 23/Sep/2002 */ +/* Last modified: 25/Jul/2003 */ /*********************************** */ #ifndef __RSBAC_TYPES_H @@ -14,10 +14,10 @@ #ifdef CONFIG_MODULES #endif -#define RSBAC_VERSION "v1.2.1" +#define RSBAC_VERSION "v1.2.2" #define RSBAC_VERSION_MAJOR 1 #define RSBAC_VERSION_MID 2 -#define RSBAC_VERSION_MINOR 1 +#define RSBAC_VERSION_MINOR 2 #define RSBAC_VERSION_NR \ ((RSBAC_VERSION_MAJOR << 16) | (RSBAC_VERSION_MID << 8) | RSBAC_VERSION_MINOR) #define RSBAC_VERSION_MAKE_NR(x,y,z) \ @@ -78,7 +78,7 @@ typedef __u8 rsbac_boolean_int_t; #define RSBAC_IFNAMSIZ 16 typedef u_char rsbac_netdev_id_t[RSBAC_IFNAMSIZ + 1]; -#define RSBAC_SEC_DEL_CHUNK_SIZE 4096 +#define RSBAC_SEC_DEL_CHUNK_SIZE 65536 /* Adjust these, if you have to, but if you do, adjust them all! */ /* Note: no / allowed, file must be exactly in second level! */ @@ -91,8 +91,6 @@ typedef u_char rsbac_netdev_id_t[RSBAC_I /* Any change to RSBAC data will NOT modify any other linux data, */ /* e.g. userlists, process lists or inodes. */ -/* The first enum member is treated as default value. */ - typedef __u32 rsbac_version_t; typedef __u32 rsbac_uid_t; /* Same as user in Linux kernel */ typedef __u32 rsbac_gid_t; /* Same as group in Linux kernel */ @@ -115,12 +113,20 @@ typedef __u8 rsbac_enum_t; /* internally #endif #define RSBAC_DATAPROT_UID (RSBAC_SECOFF_UID+1) #define RSBAC_TPMAN_UID (RSBAC_SECOFF_UID+2) +#define RSBAC_AUDITOR_UID (RSBAC_SECOFF_UID+4) typedef __u32 rsbac_pseudo_t; /* For Pseudonymic Logging */ typedef __u32 rsbac_pid_t; /* Same as pid in Linux */ + +enum rsbac_cap_process_hiding_t {PH_off, PH_from_other_users, PH_full, + PH_none}; +typedef rsbac_enum_t rsbac_cap_process_hiding_int_t; + + typedef __u8 rsbac_security_level_t; #define SL_max 252 -#define SL_rsbac_internal 253 +#define SL_min 0 +// #define SL_rsbac_internal 253 #define SL_inherit 254 #define SL_none 255 enum rsbac_old_security_level_t {SL_unclassified, SL_confidential, SL_secret, @@ -146,7 +152,7 @@ typedef u_int rsbac_cwi_relation_id_t; /* For MAC, FC, SIM, FF, AUTH */ enum rsbac_system_role_t {SR_user, SR_security_officer, SR_administrator, - SR_inherit, SR_none}; + SR_auditor, SR_none}; typedef rsbac_enum_t rsbac_system_role_int_t; /* For FC */ @@ -221,6 +227,36 @@ typedef __u64 rsbac_request_vector_t; /* The max length of each filename is kept in a macro */ #define RSBAC_MAXNAMELEN 256 +/* MAC */ + +typedef __u8 rsbac_mac_user_flags_t; +typedef __u16 rsbac_mac_process_flags_t; +typedef __u8 rsbac_mac_file_flags_t; + +#define MAC_override 1 +#define MAC_auto 2 +#define MAC_trusted 4 +#define MAC_write_up 8 +#define MAC_read_up 16 +#define MAC_write_down 32 +#define MAC_allow_auto 64 +#define MAC_prop_trusted 128 +#define MAC_program_auto 256 + +#define RSBAC_MAC_U_FLAGS (MAC_override | MAC_trusted | MAC_write_up | MAC_read_up | MAC_write_down | MAC_allow_auto) +#define RSBAC_MAC_P_FLAGS (MAC_override | MAC_auto | MAC_trusted | MAC_write_up | MAC_read_up | MAC_write_down | MAC_prop_trusted | MAC_program_auto) +#define RSBAC_MAC_F_FLAGS (MAC_auto | MAC_trusted | MAC_write_up | MAC_read_up | MAC_write_down) + +#define RSBAC_MAC_DEF_U_FLAGS 0 +#define RSBAC_MAC_DEF_SYSADM_U_FLAGS MAC_allow_auto +#define RSBAC_MAC_DEF_SECOFF_U_FLAGS MAC_override + +#define RSBAC_MAC_DEF_P_FLAGS 0 +#define RSBAC_MAC_DEF_INIT_P_FLAGS MAC_auto + +typedef rsbac_enum_t rsbac_mac_auto_int_t; +enum rsbac_mac_auto_t {MA_no, MA_yes, MA_inherit}; + /* PM */ #include @@ -234,6 +270,14 @@ typedef __u32 rsbac_ms_scanned_t; #define MS_scanning 4 #define DEFAULT_MS_FD_SCANNED MS_unscanned +#define MS_need_scan_no 0 +#define MS_need_scan_exec 1 +#define MS_need_scan_full 2 +#define MS_need_scan_inherit 3 +#define DEFAULT_MS_FD_NEED_SCAN MS_need_scan_inherit +#define DEFAULT_MS_ROOT_DIR_NEED_SCAN MS_need_scan_no +typedef rsbac_enum_t rsbac_ms_need_scan_t; + enum rsbac_ms_trusted_t {MT_not_trusted, MT_read, MT_full, MT_none}; typedef rsbac_enum_t rsbac_ms_trusted_int_t; @@ -264,6 +308,7 @@ typedef __u16 rsbac_ff_flags_t; #define FF_no_execute 32 #define FF_no_delete_or_rename 64 #define FF_append_only 256 +#define FF_no_mount 512 #define FF_add_inherited 128 @@ -276,11 +321,18 @@ typedef __u16 rsbac_ff_flags_t; /**** AUTH ****/ /* special cap value, replaced by process owner at execute time */ +#define RSBAC_AUTH_MAX_MAXNUM 1000000 #define RSBAC_AUTH_OLD_OWNER_F_CAP (rsbac_old_uid_t) -3 #define RSBAC_AUTH_OWNER_F_CAP (rsbac_uid_t) -3 -typedef rsbac_pid_t rsbac_auth_p_cap_set_id_t; +#define RSBAC_AUTH_MAX_RANGE_UID (rsbac_uid_t) -10 typedef struct rsbac_fs_file_t rsbac_auth_file_t; -typedef __u32 rsbac_auth_f_cap_set_id_t; +struct rsbac_auth_cap_range_t + { + rsbac_uid_t first; + rsbac_uid_t last; + }; +enum rsbac_auth_cap_type_t {ACT_real, ACT_eff, ACT_fs, ACT_none}; +typedef rsbac_enum_t rsbac_auth_cap_type_int_t; /**** ACL ****/ /* include at end of types.h */ @@ -304,6 +356,19 @@ typedef __u32 rsbac_jail_flags_t; #define JAIL_allow_rlimit 4 #define JAIL_allow_inet_raw 8 #define JAIL_auto_adjust_inet_any 16 +#define JAIL_allow_inet_localhost 32 + +#define RSBAC_JAIL_LOCALHOST ((1 << 24) | 127) + +/**** RES ****/ + +typedef __u32 rsbac_res_limit_t; +#define RSBAC_RES_UNSET 0 + +#define RSBAC_RES_MAX 10 /* RLIMIT_LOCKS in 2.4.x kernels */ +#define RSBAC_RES_NONE 11 + +typedef rsbac_res_limit_t rsbac_res_array_t[RSBAC_RES_MAX + 1]; /**** REG ****/ typedef __s32 rsbac_reg_handle_t; @@ -377,6 +442,8 @@ enum rsbac_adf_request_t { R_SEND, R_RECEIVE, R_NET_SHUTDOWN, + R_CHANGE_DAC_EFF_OWNER, + R_CHANGE_DAC_FS_OWNER, R_NONE }; @@ -394,8 +461,8 @@ enum rsbac_adf_req_ret_t {NOT_GRANTED,G /****************************************************************************/ /* For switching adf-modules */ -enum rsbac_switch_target_t {GEN,MAC,FC,SIM,PM,MS,FF,RC,AUTH,REG,ACL,CAP,JAIL,SOFTMODE,DAC_DISABLE,SW_NONE}; -#define RSBAC_MAX_MOD CAP +enum rsbac_switch_target_t {GEN,MAC,FC,SIM,PM,MS,FF,RC,AUTH,REG,ACL,CAP,JAIL,RES,SOFTMODE,DAC_DISABLE,SW_NONE}; +#define RSBAC_MAX_MOD (SOFTMODE - 1) typedef rsbac_enum_t rsbac_switch_target_int_t; /****************************************************************************/ @@ -445,11 +512,18 @@ enum rsbac_attribute_t { A_pseudo, A_security_level, + A_initial_security_level, A_local_sec_level, A_remote_sec_level, + A_min_security_level, A_mac_categories, + A_mac_initial_categories, A_local_mac_categories, A_remote_mac_categories, + A_mac_min_categories, + A_mac_user_flags, + A_mac_process_flags, + A_mac_file_flags, A_object_category, A_local_object_category, A_remote_object_category, @@ -472,9 +546,9 @@ enum rsbac_attribute_t A_max_read_open, A_max_read_categories, A_mac_auto, - A_mac_trusted, A_mac_trusted_for_user, A_mac_check, + A_mac_prop_trusted, A_pm_role, A_pm_process_type, A_pm_current_task, @@ -498,6 +572,7 @@ enum rsbac_attribute_t A_ms_str_offset, A_ms_sock_trusted_tcp, A_ms_sock_trusted_udp, + A_ms_need_scan, A_ff_flags, A_rc_type, A_local_rc_type, @@ -515,6 +590,9 @@ enum rsbac_attribute_t A_jail_id, A_jail_ip, A_jail_flags, + A_res_role, + A_res_min, + A_res_max, A_log_array_low, A_local_log_array_low, A_remote_log_array_low, @@ -524,8 +602,10 @@ enum rsbac_attribute_t A_log_program_based, A_log_user_based, A_symlink_add_uid, + A_symlink_add_mac_level, A_symlink_add_rc_role, A_linux_dac_disable, + A_cap_process_hiding, #ifdef __KERNEL__ /* adf-request helpers */ A_owner, @@ -545,6 +625,9 @@ enum rsbac_attribute_t A_internal, /* used with CREATE on DIR */ A_create_data, + A_new_object, + A_rlimit, + A_new_dir_dentry_p, #endif A_none}; @@ -562,10 +645,13 @@ union rsbac_attribute_value_t rsbac_security_level_t current_sec_level; rsbac_security_level_t min_write_open; rsbac_security_level_t max_read_open; - boolean mac_auto; - boolean mac_trusted; + rsbac_mac_user_flags_t mac_user_flags; + rsbac_mac_process_flags_t mac_process_flags; + rsbac_mac_file_flags_t mac_file_flags; rsbac_uid_t mac_trusted_for_user; + rsbac_mac_auto_int_t mac_auto; boolean mac_check; + boolean mac_prop_trusted; rsbac_pm_role_int_t pm_role; rsbac_pm_process_type_int_t pm_process_type; rsbac_pm_task_id_t pm_current_task; @@ -583,6 +669,7 @@ union rsbac_attribute_value_t int ms_str_offset[RSBAC_MS_NR_MALWARE]; rsbac_ms_sock_trusted_int_t ms_sock_trusted_tcp; rsbac_ms_sock_trusted_int_t ms_sock_trusted_udp; + rsbac_ms_need_scan_t ms_need_scan; rsbac_ff_flags_t ff_flags; rsbac_rc_type_id_t rc_type; rsbac_rc_type_id_t rc_type_fd; @@ -592,21 +679,24 @@ union rsbac_attribute_value_t rsbac_rc_role_id_t rc_def_role; boolean auth_may_setuid; boolean auth_may_set_cap; - rsbac_auth_p_cap_set_id_t auth_p_capset; - rsbac_auth_f_cap_set_id_t auth_f_capset; + rsbac_pid_t auth_p_capset; + rsbac_inode_nr_t auth_f_capset; rsbac_cap_vector_t min_caps; rsbac_cap_vector_t max_caps; rsbac_jail_id_t jail_id; rsbac_jail_ip_t jail_ip; rsbac_jail_flags_t jail_flags; + rsbac_res_array_t res_array; rsbac_log_array_t log_array_low; rsbac_log_array_t log_array_high; rsbac_request_vector_t log_program_based; rsbac_request_vector_t log_user_based; boolean symlink_add_uid; + boolean symlink_add_mac_level; boolean symlink_add_rc_role; rsbac_linux_dac_disable_int_t linux_dac_disable; - rsbac_net_temp_id_t net_temp; +// rsbac_net_temp_id_t net_temp; + rsbac_cap_process_hiding_int_t cap_process_hiding; #ifdef __KERNEL__ rsbac_gid_t group; /* process/fd group */ struct sockaddr * sockaddr_p; /* socket-ipc address */ @@ -618,11 +708,15 @@ union rsbac_attribute_value_t enum rsbac_adf_request_t request; /* for SWITCH_LOG */ struct ms_segment_t ms_segment; long trace_request; /* request for sys_trace */ - rsbac_uid_t auth_cap; + struct rsbac_auth_cap_range_t auth_cap_range; int prot_bits;/* prot bits for mmap()/mprotect() */ boolean internal; /* used with CREATE on DIR */ struct rsbac_create_data_t create_data; + /* newly created object in OPEN requests? */ + boolean new_object; + u_int rlimit; + struct dentry * new_dir_dentry_p; #endif u_char u_char_dummy; u_short u_short_dummy; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/init/main.c linux-2.4.20-wolk4.9-fullkernel/init/main.c --- linux-2.4.20-wolk4.8-fullkernel/init/main.c 2003-08-25 18:27:08.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/init/main.c 2003-08-29 12:07:38.000000000 +0200 @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -553,9 +554,6 @@ asmlinkage void __init start_kernel(void #ifdef CONFIG_PROC_FS proc_root_init(); #endif -#if defined(CONFIG_SYSVIPC) - ipc_init(); -#endif check_bugs(); printk("POSIX conformance testing by UNIFIX\n"); @@ -627,6 +625,10 @@ asmlinkage void __init start_kernel(void */ smp_init(); +#if defined(CONFIG_SYSVIPC) + ipc_init(); +#endif + #ifdef CONFIG_X86_REMOTE_DEBUG if (gdb_enter) gdb_hook(); /* right at boot time */ @@ -752,6 +754,7 @@ extern void prepare_namespace(void); static int init(void * unused) { + struct files_struct *files; lock_kernel(); do_basic_setup(); @@ -787,7 +790,17 @@ static int init(void * unused) */ free_initmem(); unlock_kernel(); - + + /* + * Right now we are a thread sharing with a ton of kernel + * stuff. We don't want to end up in user space in that state + */ + + files = current->files; + if(unshare_files()) + panic("unshare"); + put_files_struct(files); + if (open("/dev/console", O_RDWR, 0) < 0) printk("Warning: unable to open an initial console.\n"); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/init/version.c linux-2.4.20-wolk4.9-fullkernel/init/version.c --- linux-2.4.20-wolk4.8-fullkernel/init/version.c 2003-08-25 18:27:13.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/init/version.c 2003-08-25 20:36:17.000000000 +0200 @@ -26,7 +26,7 @@ struct new_utsname system_utsname = { }; const char *wolk_banner = - "WOLK - Working Overloaded Linux Kernel - v4.8 - Server Edition""\n\n"; + "WOLK - Working Overloaded Linux Kernel - v4.9 - Server Edition""\n\n"; const char *linux_banner = "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@" diff -Naurp linux-2.4.20-wolk4.8-fullkernel/kernel/kmod.c linux-2.4.20-wolk4.9-fullkernel/kernel/kmod.c --- linux-2.4.20-wolk4.8-fullkernel/kernel/kmod.c 2002-08-03 02:39:46.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/kernel/kmod.c 2003-08-25 20:35:56.000000000 +0200 @@ -119,15 +119,8 @@ int exec_usermodehelper(char *program_pa if (curtask->files->fd[i]) close(i); } - /* Drop the "current user" thing */ - { - struct user_struct *user = curtask->user; - curtask->user = INIT_USER; - atomic_inc(&INIT_USER->__count); - atomic_inc(&INIT_USER->processes); - atomic_dec(&user->processes); - free_uid(user); - } + /* Become root */ + set_user(0, 0); /* Give kmod all effective privileges.. */ curtask->euid = curtask->fsuid = 0; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/kernel/ksyms.c linux-2.4.20-wolk4.9-fullkernel/kernel/ksyms.c --- linux-2.4.20-wolk4.8-fullkernel/kernel/ksyms.c 2003-08-25 18:27:08.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/kernel/ksyms.c 2003-08-25 20:35:56.000000000 +0200 @@ -111,6 +111,7 @@ EXPORT_SYMBOL(exit_mm); EXPORT_SYMBOL(exit_files); EXPORT_SYMBOL(exit_fs); EXPORT_SYMBOL(exit_sighand); +EXPORT_SYMBOL(unshare_files); /* internal kernel memory management */ EXPORT_SYMBOL(__alloc_pages); @@ -598,8 +599,10 @@ EXPORT_SYMBOL(seq_open); EXPORT_SYMBOL(seq_release); EXPORT_SYMBOL(seq_read); EXPORT_SYMBOL(seq_lseek); +EXPORT_SYMBOL(seq_path); EXPORT_SYMBOL(single_open); EXPORT_SYMBOL(single_release); +EXPORT_SYMBOL(seq_release_private); extern int disable_all_usb; EXPORT_SYMBOL(disable_all_usb); EXPORT_SYMBOL_NOVERS(__guard); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/kernel/panic.c linux-2.4.20-wolk4.9-fullkernel/kernel/panic.c --- linux-2.4.20-wolk4.8-fullkernel/kernel/panic.c 2003-08-25 18:26:57.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/kernel/panic.c 2003-08-27 22:47:31.000000000 +0200 @@ -205,6 +205,7 @@ NORET_TYPE void panic(const char * fmt, #endif #ifdef CONFIG_VT + extern void disable_console_blank(void); disable_console_blank(); #endif machine_paniced = 1; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/kernel/sys.c linux-2.4.20-wolk4.9-fullkernel/kernel/sys.c --- linux-2.4.20-wolk4.8-fullkernel/kernel/sys.c 2003-08-25 18:27:08.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/kernel/sys.c 2003-08-25 20:35:56.000000000 +0200 @@ -732,9 +732,10 @@ static inline void cap_emulate_setxuid(i } } -static int set_user(uid_t new_ruid, int dumpclear) +int set_user(uid_t new_ruid, int dumpclear) { struct user_struct *new_user, *old_user; + struct task_struct *this_task = current; /* What if a process setreuid()'s and this brings the * new uid over his NPROC rlimit? We can check this now @@ -748,17 +749,16 @@ static int set_user(uid_t new_ruid, int #endif /* CONFIG_SCONTEXTS */ if (!new_user) return -EAGAIN; - old_user = current->user; - atomic_dec(&old_user->processes); + old_user = this_task->user; atomic_inc(&new_user->processes); + atomic_dec(&old_user->processes); - if(dumpclear) - { - current->mm->dumpable = 0; + if (dumpclear && this_task->mm) { + this_task->mm->dumpable = 0; wmb(); } - current->uid = new_ruid; - current->user = new_user; + this_task->uid = new_ruid; + this_task->user = new_user; free_uid(old_user); return 0; } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/kernel/sysctl.c linux-2.4.20-wolk4.9-fullkernel/kernel/sysctl.c --- linux-2.4.20-wolk4.8-fullkernel/kernel/sysctl.c 2003-08-25 18:26:48.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/kernel/sysctl.c 2003-08-25 23:40:47.000000000 +0200 @@ -32,7 +32,6 @@ #include #include #include -#include #ifdef CONFIG_KDB #include @@ -141,11 +140,6 @@ int proc_dol2crvec(ctl_table *table, int extern int acct_parm[]; #endif -#ifdef CONFIG_MAGIC_SYSRQ -int magickey; /* key value for magic sysrq handler */ -extern int fg_console; -#endif - extern int pgt_cache_water[]; static int parse_table(int *, int, void *, size_t *, void *, size_t, @@ -298,8 +292,6 @@ static ctl_table kern_table[] = { #ifdef CONFIG_MAGIC_SYSRQ {KERN_SYSRQ, "sysrq", &sysrq_enabled, sizeof (int), 0644, NULL, &proc_dointvec}, - {KERN_MAGICKEY, "magickey", &magickey, sizeof (int), - 0600, NULL, &proc_do_magic_sysrq}, #endif #if defined(__i386__) && defined(CONFIG_KMSGDUMP) {KERN_KMSGDUMP, "kmsgdump", &kmsgdump_flags, 16, @@ -1349,61 +1341,6 @@ int proc_dointvec(ctl_table *table, int return do_proc_dointvec(table,write,filp,buffer,lenp,1,OP_SET); } -#ifdef CONFIG_MAGIC_SYSRQ -/** - * proc_do_magic_sysrq - handle a magic sysrq key - * @table: the sysctl table - * @write: %TRUE if this is a write to the sysctl file - * @filp: the file structure - * @buffer: the user buffer - * @lenp: the size of the user buffer - * - * Reads/writes up to table->maxlen/sizeof(unsigned int) integer - * values from/to the user buffer, treated as an ASCII string. - * - * Returns 0 on success. - */ -extern struct pt_regs *kbd_pt_regs; -extern struct kbd_struct kbd_table[MAX_NR_CONSOLES]; -extern struct tty_struct **ttytab; - -int proc_do_magic_sysrq(ctl_table *table, int write, struct file *filp, - void *buffer, size_t *lenp) -{ - int ret; - char ch; - struct tty_struct *tty; - - ret = do_proc_dointvec(table, write, filp, buffer, lenp, 1, OP_SET); - if ((ret && ret != -EINVAL) || !write) { - return ret; - } - - /* get the key value: first char in buffer */ - if (write && lenp) { - if (get_user(ch, (char *) buffer)) { - return -EFAULT; - } - } - - tty = ttytab ? ttytab[fg_console] : NULL; - if (tty && (!tty->driver_data)) { - /* - * We touch the tty structure via the ttytab array - * without knowing whether or not tty is open, which - * is inherently dangerous. We currently rely on that - * fact that console_open sets tty->driver_data when - * it opens it, and clears it when it closes it. - */ - tty = NULL; - } - - handle_sysrq((int)ch, kbd_pt_regs, kbd_table, tty); - return 0; -} -#endif - - /* * init may raise the set. */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/mm/filemap.c linux-2.4.20-wolk4.9-fullkernel/mm/filemap.c --- linux-2.4.20-wolk4.8-fullkernel/mm/filemap.c 2003-08-25 18:27:08.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/mm/filemap.c 2003-08-25 20:35:56.000000000 +0200 @@ -658,10 +658,13 @@ static inline void __add_to_page_cache(s struct address_space *mapping, unsigned long offset, struct page **hash) { - unsigned long flags; - - flags = page->flags & ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_dirty | 1 << PG_referenced | 1 << PG_arch_1 | 1 << PG_checked); - page->flags = flags | (1 << PG_locked); + ClearPageUptodate(page); + ClearPageError(page); + ClearPageDirty(page); + ClearPageReferenced(page); + ClearPageArch1(page); + ClearPageChecked(page); + LockPage(page); page_cache_get(page); page->index = offset; add_page_to_inode_queue(mapping, page); @@ -3176,7 +3179,7 @@ static int precheck_file_write(struct fi } /* FIXME: this is for backwards compatibility with 2.4 */ - if (!S_ISBLK(inode->i_mode) && file->f_flags & O_APPEND) + if (!S_ISBLK(inode->i_mode) && (file->f_flags & O_APPEND)) *ppos = pos = inode->i_size; /* @@ -3419,7 +3422,7 @@ generic_direct_write(struct file *file,c if (err != 0 || count == 0) goto out; - if (!file->f_flags & O_DIRECT) + if (!(file->f_flags & O_DIRECT)) BUG(); written = generic_file_direct_IO(WRITE, file, (char *) buf, count, pos); @@ -3436,7 +3439,7 @@ generic_direct_write(struct file *file,c * Sync the fs metadata but not the minor inode changes and * of course not the data as we did direct DMA for the IO. */ - if (written >= 0 && file->f_flags & O_SYNC) + if (written >= 0 && (file->f_flags & O_SYNC)) status = generic_osync_inode(inode, OSYNC_METADATA); err = written ? written : status; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/mm/highmem.c linux-2.4.20-wolk4.9-fullkernel/mm/highmem.c --- linux-2.4.20-wolk4.8-fullkernel/mm/highmem.c 2003-08-25 18:26:34.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/mm/highmem.c 2003-08-25 20:35:56.000000000 +0200 @@ -451,6 +451,7 @@ repeat_alloc: /* we need to wait I/O completion */ run_task_queue(&tq_disk); + __set_current_state(TASK_RUNNING); yield(); iteration++; goto repeat_alloc; @@ -516,6 +517,7 @@ repeat_alloc: /* we need to wait I/O completion */ run_task_queue(&tq_disk); + __set_current_state(TASK_RUNNING); yield(); iteration++; goto repeat_alloc; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/mm/memory.c linux-2.4.20-wolk4.9-fullkernel/mm/memory.c --- linux-2.4.20-wolk4.8-fullkernel/mm/memory.c 2003-08-25 18:27:08.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/mm/memory.c 2003-08-25 20:36:21.000000000 +0200 @@ -967,17 +967,18 @@ static inline void zeromap_pte_range(pte static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size, pgprot_t prot) { - unsigned long end; + unsigned long base, end; + base = address & PGDIR_MASK; address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; do { - pte_t * pte = pte_alloc_map(mm, pmd, address); + pte_t * pte = pte_alloc_map(mm, pmd, base + address); if (!pte) return -ENOMEM; - zeromap_pte_range(pte, address, end - address, prot); + zeromap_pte_range(pte, base + address, end - address, prot); pte_unmap(pte); address = (address + PMD_SIZE) & PMD_MASK; pmd++; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/mm/page_alloc.c linux-2.4.20-wolk4.9-fullkernel/mm/page_alloc.c --- linux-2.4.20-wolk4.8-fullkernel/mm/page_alloc.c 2003-08-25 18:26:57.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/mm/page_alloc.c 2003-08-25 20:36:21.000000000 +0200 @@ -184,7 +184,9 @@ static void __free_pages_ok (struct page remove_hidden_pages(page, order); #endif - page->flags &= ~((1<val) { - unsigned long flags; - /* Look it up and read it in.. */ page = lookup_swap_cache(*entry); if (!page) { @@ -729,7 +727,13 @@ repeat: *entry = (swp_entry_t) {0}; delete_from_swap_cache(page); /* delete_from_swap_cache marks the page dirty */ - page->flags &= ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_referenced) | (1 << PG_arch_1)); + + ClearPageUptodate(page); + ClearPageError(page); + ClearPageReferenced(page); + ClearPageArch1(page); + SetPageDirty(page); + add_to_page_cache_locked(page, mapping, idx); info->swapped--; spin_unlock (&info->lock); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/ipv4/af_inet.c linux-2.4.20-wolk4.9-fullkernel/net/ipv4/af_inet.c --- linux-2.4.20-wolk4.8-fullkernel/net/ipv4/af_inet.c 2003-08-25 18:25:26.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/net/ipv4/af_inet.c 2003-08-25 20:35:30.000000000 +0200 @@ -1199,6 +1199,7 @@ inet_unregister_protosw(struct inet_prot } } +extern void ipfrag_init(void); /* * Called by socket.c on kernel startup. @@ -1306,6 +1307,9 @@ static int __init inet_init(void) proc_net_create ("tcp", 0, tcp_get_info); proc_net_create ("udp", 0, udp_get_info); #endif /* CONFIG_PROC_FS */ + + ipfrag_init(); + return 0; } module_init(inet_init); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/ipv4/ip_fragment.c linux-2.4.20-wolk4.9-fullkernel/net/ipv4/ip_fragment.c --- linux-2.4.20-wolk4.8-fullkernel/net/ipv4/ip_fragment.c 2002-02-25 20:38:14.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/net/ipv4/ip_fragment.c 2003-08-25 20:35:30.000000000 +0200 @@ -19,6 +19,7 @@ * Bill Hawes : Frag accounting and evictor fixes. * John McDonald : 0 length frag bug. * Alexey Kuznetsov: SMP races, threading, cleanup. + * Patrick McHardy : LRU queue of frag heads for evictor. */ #include @@ -26,9 +27,12 @@ #include #include #include +#include #include #include #include +#include +#include #include #include #include @@ -67,6 +71,7 @@ struct ipfrag_skb_cb /* Describe an entry in the "incomplete datagrams" queue. */ struct ipq { struct ipq *next; /* linked list pointers */ + struct list_head lru_list; /* lru list member */ u32 saddr; u32 daddr; u16 id; @@ -94,6 +99,8 @@ struct ipq { /* Per-bucket lock is easy to add now. */ static struct ipq *ipq_hash[IPQ_HASHSZ]; static rwlock_t ipfrag_lock = RW_LOCK_UNLOCKED; +static u32 ipfrag_hash_rnd; +static LIST_HEAD(ipq_lru_list); int ip_frag_nqueues = 0; static __inline__ void __ipq_unlink(struct ipq *qp) @@ -101,6 +108,7 @@ static __inline__ void __ipq_unlink(stru if(qp->next) qp->next->pprev = qp->pprev; *qp->pprev = qp->next; + list_del(&qp->lru_list); ip_frag_nqueues--; } @@ -111,21 +119,51 @@ static __inline__ void ipq_unlink(struct write_unlock(&ipfrag_lock); } -/* - * Was: ((((id) >> 1) ^ (saddr) ^ (daddr) ^ (prot)) & (IPQ_HASHSZ - 1)) - * - * I see, I see evil hand of bigendian mafia. On Intel all the packets hit - * one hash bucket with this hash function. 8) - */ -static __inline__ unsigned int ipqhashfn(u16 id, u32 saddr, u32 daddr, u8 prot) +static unsigned int ipqhashfn(u16 id, u32 saddr, u32 daddr, u8 prot) { - unsigned int h = saddr ^ daddr; - - h ^= (h>>16)^id; - h ^= (h>>8)^prot; - return h & (IPQ_HASHSZ - 1); + return jhash_3words((u32)id << 16 | prot, saddr, daddr, + ipfrag_hash_rnd) & (IPQ_HASHSZ - 1); } +static struct timer_list ipfrag_secret_timer; +int sysctl_ipfrag_secret_interval = 10 * 60 * HZ; + +static void ipfrag_secret_rebuild(unsigned long dummy) +{ + unsigned long now = jiffies; + int i; + + write_lock(&ipfrag_lock); + get_random_bytes(&ipfrag_hash_rnd, sizeof(u32)); + for (i = 0; i < IPQ_HASHSZ; i++) { + struct ipq *q; + + q = ipq_hash[i]; + while (q) { + struct ipq *next = q->next; + unsigned int hval = ipqhashfn(q->id, q->saddr, + q->daddr, q->protocol); + + if (hval != i) { + /* Unlink. */ + if (q->next) + q->next->pprev = q->pprev; + *q->pprev = q->next; + + /* Relink to new hash chain. */ + if ((q->next = ipq_hash[hval]) != NULL) + q->next->pprev = &q->next; + ipq_hash[hval] = q; + q->pprev = &ipq_hash[hval]; + } + + q = next; + } + } + write_unlock(&ipfrag_lock); + + mod_timer(&ipfrag_secret_timer, now + sysctl_ipfrag_secret_interval); +} atomic_t ip_frag_mem = ATOMIC_INIT(0); /* Memory used for fragments */ @@ -202,39 +240,30 @@ static __inline__ void ipq_kill(struct i */ static void ip_evictor(void) { - int i, progress; + struct ipq *qp; + struct list_head *tmp; - do { + for(;;) { if (atomic_read(&ip_frag_mem) <= sysctl_ipfrag_low_thresh) return; - progress = 0; - /* FIXME: Make LRU queue of frag heads. -DaveM */ - for (i = 0; i < IPQ_HASHSZ; i++) { - struct ipq *qp; - if (ipq_hash[i] == NULL) - continue; - - read_lock(&ipfrag_lock); - if ((qp = ipq_hash[i]) != NULL) { - /* find the oldest queue for this hash bucket */ - while (qp->next) - qp = qp->next; - atomic_inc(&qp->refcnt); - read_unlock(&ipfrag_lock); - - spin_lock(&qp->lock); - if (!(qp->last_in&COMPLETE)) - ipq_kill(qp); - spin_unlock(&qp->lock); - - ipq_put(qp); - IP_INC_STATS_BH(IpReasmFails); - progress = 1; - continue; - } + read_lock(&ipfrag_lock); + if (list_empty(&ipq_lru_list)) { read_unlock(&ipfrag_lock); + return; } - } while (progress); + tmp = ipq_lru_list.next; + qp = list_entry(tmp, struct ipq, lru_list); + atomic_inc(&qp->refcnt); + read_unlock(&ipfrag_lock); + + spin_lock(&qp->lock); + if (!(qp->last_in&COMPLETE)) + ipq_kill(qp); + spin_unlock(&qp->lock); + + ipq_put(qp); + IP_INC_STATS_BH(IpReasmFails); + } } /* @@ -302,6 +331,8 @@ static struct ipq *ip_frag_intern(unsign qp->next->pprev = &qp->next; ipq_hash[hash] = qp; qp->pprev = &ipq_hash[hash]; + INIT_LIST_HEAD(&qp->lru_list); + list_add_tail(&qp->lru_list, &ipq_lru_list); ip_frag_nqueues++; write_unlock(&ipfrag_lock); return qp; @@ -496,6 +527,10 @@ static void ip_frag_queue(struct ipq *qp if (offset == 0) qp->last_in |= FIRST_IN; + write_lock(&ipfrag_lock); + list_move_tail(&qp->lru_list, &ipq_lru_list); + write_unlock(&ipfrag_lock); + return; err: @@ -629,3 +664,14 @@ struct sk_buff *ip_defrag(struct sk_buff kfree_skb(skb); return NULL; } + +void ipfrag_init(void) +{ + ipfrag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ + (jiffies ^ (jiffies >> 6))); + + init_timer(&ipfrag_secret_timer); + ipfrag_secret_timer.function = ipfrag_secret_rebuild; + ipfrag_secret_timer.expires = jiffies + sysctl_ipfrag_secret_interval; + add_timer(&ipfrag_secret_timer); +} diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/ipv4/netfilter/Config.in linux-2.4.20-wolk4.9-fullkernel/net/ipv4/netfilter/Config.in --- linux-2.4.20-wolk4.8-fullkernel/net/ipv4/netfilter/Config.in 2003-08-25 18:26:49.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/net/ipv4/netfilter/Config.in 2003-08-25 23:52:45.000000000 +0200 @@ -9,7 +9,7 @@ comment ' to use all below successfully comment ' Vanilla Netfilter stuff does _not_ need this package!' comment ' See http://wolk.sf.net/iptables for the packages.' -tristate 'Connection tracking (required for masq/NAT)' CONFIG_IP_NF_CONNTRACK +tristate 'Connection tracking (required for masq/NAT + layer7)' CONFIG_IP_NF_CONNTRACK if [ "$CONFIG_IP_NF_CONNTRACK" != "n" ]; then dep_tristate ' FTP protocol support' CONFIG_IP_NF_FTP $CONFIG_IP_NF_CONNTRACK dep_tristate ' H.323 (netmeeting) support' CONFIG_IP_NF_H323 $CONFIG_IP_NF_CONNTRACK diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/ipv4/proc.c linux-2.4.20-wolk4.9-fullkernel/net/ipv4/proc.c --- linux-2.4.20-wolk4.8-fullkernel/net/ipv4/proc.c 2002-11-29 00:53:15.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/net/ipv4/proc.c 2003-08-25 20:35:30.000000000 +0200 @@ -192,7 +192,7 @@ int netstat_get_info(char *buffer, char " TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv TCPDSACKOfoRecv" " TCPAbortOnSyn TCPAbortOnData TCPAbortOnClose" " TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger" - " TCPAbortFailed TCPMemoryPressures\n" + " TCPAbortFailed TCPMemoryPressures TCPShortIcmp\n" "TcpExt:"); for (i=0; ilen < (iph->ihl << 2) + 8) { - ICMP_INC_STATS_BH(IcmpInErrors); - return; + NET_INC_STATS_BH(TCPShortIcmp); + if (type != ICMP_DEST_UNREACH || code != ICMP_FRAG_NEEDED || + sysctl_tcp_secure_pmtu || skb->len < (iph->ihl<<2) + 4) { + ICMP_INC_STATS_BH(IcmpInErrors); + return; + } + too_short = 1; } sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr, th->source, tcp_v4_iif(skb)); @@ -1154,7 +1162,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 tp = &sk->tp_pinfo.af_tcp; seq = ntohl(th->seq); - if (sk->state != TCP_LISTEN && !between(seq, tp->snd_una, tp->snd_nxt)) { + if (sk->state != TCP_LISTEN && !too_short && !between(seq, tp->snd_una, tp->snd_nxt)) { NET_INC_STATS(OutOfWindowIcmps); goto out; } @@ -2050,7 +2058,7 @@ process: bh_lock_sock(sk); ret = 0; if (!sk->lock.users) { - if (!tcp_prequeue(sk, skb)) + if (skb->ip_summed != CHECKSUM_NONE || !tcp_prequeue(sk, skb)) ret = tcp_v4_do_rcv(sk, skb); } else sk_add_backlog(sk, skb); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/ipv4/tcp_output.c linux-2.4.20-wolk4.9-fullkernel/net/ipv4/tcp_output.c --- linux-2.4.20-wolk4.8-fullkernel/net/ipv4/tcp_output.c 2003-08-25 18:25:27.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/net/ipv4/tcp_output.c 2003-08-25 20:35:30.000000000 +0200 @@ -87,12 +87,10 @@ static __u16 tcp_advertise_mss(struct so { struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); struct dst_entry *dst = __sk_dst_get(sk); - int mss = tp->advmss; + unsigned mss = tp->advmss; - if (dst && dst->advmss < mss) { - mss = dst->advmss; - tp->advmss = mss; - } + if (dst && dst->advmss < tp->advmss) + mss = tp->advmss = dst->advmss; return (__u16)mss; } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/ipv6/af_inet6.c linux-2.4.20-wolk4.9-fullkernel/net/ipv6/af_inet6.c --- linux-2.4.20-wolk4.8-fullkernel/net/ipv6/af_inet6.c 2003-08-25 18:25:27.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/net/ipv6/af_inet6.c 2003-08-25 20:35:30.000000000 +0200 @@ -673,6 +673,7 @@ static int __init inet6_init(void) ip6_flowlabel_init(); addrconf_init(); sit_init(); + ipv6_frag_init(); /* Init v6 transport protocols. */ udpv6_init(); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/ipv6/reassembly.c linux-2.4.20-wolk4.9-fullkernel/net/ipv6/reassembly.c --- linux-2.4.20-wolk4.8-fullkernel/net/ipv6/reassembly.c 2002-11-29 00:53:15.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/net/ipv6/reassembly.c 2003-08-25 20:35:30.000000000 +0200 @@ -22,6 +22,7 @@ * * Horst von Brand Add missing #include * Alexey Kuznetsov SMP races, threading, cleanup. + * Patrick McHardy LRU queue of frag heads for evictor. */ #include #include @@ -30,11 +31,14 @@ #include #include #include +#include #include #include #include #include #include +#include +#include #include #include @@ -67,6 +71,7 @@ struct ip6frag_skb_cb struct frag_queue { struct frag_queue *next; + struct list_head lru_list; /* lru list member */ __u32 id; /* fragment id */ struct in6_addr saddr; @@ -95,6 +100,8 @@ struct frag_queue static struct frag_queue *ip6_frag_hash[IP6Q_HASHSZ]; static rwlock_t ip6_frag_lock = RW_LOCK_UNLOCKED; +static u32 ip6_frag_hash_rnd; +static LIST_HEAD(ip6_frag_lru_list); int ip6_frag_nqueues = 0; static __inline__ void __fq_unlink(struct frag_queue *fq) @@ -102,6 +109,7 @@ static __inline__ void __fq_unlink(struc if(fq->next) fq->next->pprev = fq->pprev; *fq->pprev = fq->next; + list_del(&fq->lru_list); ip6_frag_nqueues--; } @@ -112,16 +120,73 @@ static __inline__ void fq_unlink(struct write_unlock(&ip6_frag_lock); } -static __inline__ unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr, - struct in6_addr *daddr) +static unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr, + struct in6_addr *daddr) { - unsigned int h = saddr->s6_addr32[3] ^ daddr->s6_addr32[3] ^ id; + u32 a, b, c; - h ^= (h>>16); - h ^= (h>>8); - return h & (IP6Q_HASHSZ - 1); + a = saddr->s6_addr32[0]; + b = saddr->s6_addr32[1]; + c = saddr->s6_addr32[2]; + + a += JHASH_GOLDEN_RATIO; + b += JHASH_GOLDEN_RATIO; + c += ip6_frag_hash_rnd; + __jhash_mix(a, b, c); + + a += saddr->s6_addr32[3]; + b += daddr->s6_addr32[0]; + c += daddr->s6_addr32[1]; + __jhash_mix(a, b, c); + + a += daddr->s6_addr32[2]; + b += daddr->s6_addr32[3]; + c += id; + __jhash_mix(a, b, c); + + return c & (IP6Q_HASHSZ - 1); } +static struct timer_list ip6_frag_secret_timer; +static int ip6_frag_secret_interval = 10 * 60 * HZ; + +static void ip6_frag_secret_rebuild(unsigned long dummy) +{ + unsigned long now = jiffies; + int i; + + write_lock(&ip6_frag_lock); + get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32)); + for (i = 0; i < IP6Q_HASHSZ; i++) { + struct frag_queue *q; + + q = ip6_frag_hash[i]; + while (q) { + struct frag_queue *next = q->next; + unsigned int hval = ip6qhashfn(q->id, + &q->saddr, + &q->daddr); + + if (hval != i) { + /* Unlink. */ + if (q->next) + q->next->pprev = q->pprev; + *q->pprev = q->next; + + /* Relink to new hash chain. */ + if ((q->next = ip6_frag_hash[hval]) != NULL) + q->next->pprev = &q->next; + ip6_frag_hash[hval] = q; + q->pprev = &ip6_frag_hash[hval]; + } + + q = next; + } + } + write_unlock(&ip6_frag_lock); + + mod_timer(&ip6_frag_secret_timer, now + ip6_frag_secret_interval); +} atomic_t ip6_frag_mem = ATOMIC_INIT(0); @@ -193,38 +258,30 @@ static __inline__ void fq_kill(struct fr static void ip6_evictor(void) { - int i, progress; + struct frag_queue *fq; + struct list_head *tmp; - do { + for(;;) { if (atomic_read(&ip6_frag_mem) <= sysctl_ip6frag_low_thresh) return; - progress = 0; - for (i = 0; i < IP6Q_HASHSZ; i++) { - struct frag_queue *fq; - if (ip6_frag_hash[i] == NULL) - continue; - - read_lock(&ip6_frag_lock); - if ((fq = ip6_frag_hash[i]) != NULL) { - /* find the oldest queue for this hash bucket */ - while (fq->next) - fq = fq->next; - atomic_inc(&fq->refcnt); - read_unlock(&ip6_frag_lock); - - spin_lock(&fq->lock); - if (!(fq->last_in&COMPLETE)) - fq_kill(fq); - spin_unlock(&fq->lock); - - fq_put(fq); - IP6_INC_STATS_BH(Ip6ReasmFails); - progress = 1; - continue; - } + read_lock(&ip6_frag_lock); + if (list_empty(&ip6_frag_lru_list)) { read_unlock(&ip6_frag_lock); + return; } - } while (progress); + tmp = ip6_frag_lru_list.next; + fq = list_entry(tmp, struct frag_queue, lru_list); + atomic_inc(&fq->refcnt); + read_unlock(&ip6_frag_lock); + + spin_lock(&fq->lock); + if (!(fq->last_in&COMPLETE)) + fq_kill(fq); + spin_unlock(&fq->lock); + + fq_put(fq); + IP6_INC_STATS_BH(Ip6ReasmFails); + } } static void ip6_frag_expire(unsigned long data) @@ -294,6 +351,8 @@ static struct frag_queue *ip6_frag_inter fq->next->pprev = &fq->next; ip6_frag_hash[hash] = fq; fq->pprev = &ip6_frag_hash[hash]; + INIT_LIST_HEAD(&fq->lru_list); + list_add_tail(&fq->lru_list, &ip6_frag_lru_list); ip6_frag_nqueues++; write_unlock(&ip6_frag_lock); return fq; @@ -501,6 +560,9 @@ static void ip6_frag_queue(struct frag_q fq->nhoffset = nhoff; fq->last_in |= FIRST_IN; } + write_lock(&ip6_frag_lock); + list_move_tail(&fq->lru_list, &ip6_frag_lru_list); + write_unlock(&ip6_frag_lock); return; err: @@ -679,3 +741,14 @@ int ipv6_reassembly(struct sk_buff **skb kfree_skb(skb); return -1; } + +void __init ipv6_frag_init(void) +{ + ip6_frag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ + (jiffies ^ (jiffies >> 6))); + + init_timer(&ip6_frag_secret_timer); + ip6_frag_secret_timer.function = ip6_frag_secret_rebuild; + ip6_frag_secret_timer.expires = jiffies + ip6_frag_secret_interval; + add_timer(&ip6_frag_secret_timer); +} diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/sched/Config.in linux-2.4.20-wolk4.9-fullkernel/net/sched/Config.in --- linux-2.4.20-wolk4.8-fullkernel/net/sched/Config.in 2003-08-25 18:27:08.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/net/sched/Config.in 2003-08-25 23:52:45.000000000 +0200 @@ -40,6 +40,7 @@ if [ "$CONFIG_NET_CLS" = "y" ]; then fi tristate ' Firewall based classifier' CONFIG_NET_CLS_FW tristate ' U32 classifier' CONFIG_NET_CLS_U32 + dep_tristate ' Layer7 classifier' CONFIG_NET_CLS_LAYER7 $CONFIG_IP_NF_CONNTRACK $CONFIG_NETFILTER $CONFIG_EXPERIMENTAL if [ "$CONFIG_NET_QOS" = "y" ]; then tristate ' Special RSVP classifier' CONFIG_NET_CLS_RSVP tristate ' Special RSVP classifier for IPv6' CONFIG_NET_CLS_RSVP6 diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/sched/Makefile linux-2.4.20-wolk4.9-fullkernel/net/sched/Makefile --- linux-2.4.20-wolk4.8-fullkernel/net/sched/Makefile 2003-08-25 18:27:08.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/net/sched/Makefile 2003-08-25 23:59:29.000000000 +0200 @@ -32,5 +32,6 @@ obj-$(CONFIG_NET_CLS_RSVP) += cls_rsvp.o obj-$(CONFIG_NET_CLS_RSVP6) += cls_rsvp6.o obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o obj-$(CONFIG_NET_CLS_FW) += cls_fw.o +obj-$(CONFIG_NET_CLS_LAYER7) += cls_layer7.o regexp/regexp.o regexp/regsub.o include $(TOPDIR)/Rules.make diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/sched/cls_api.c linux-2.4.20-wolk4.9-fullkernel/net/sched/cls_api.c --- linux-2.4.20-wolk4.8-fullkernel/net/sched/cls_api.c 2001-12-21 18:42:06.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/net/sched/cls_api.c 2003-08-25 23:52:45.000000000 +0200 @@ -466,5 +466,9 @@ int __init tc_filter_init(void) #ifdef CONFIG_NET_CLS_RSVP6 INIT_TC_FILTER(rsvp6); #endif +#ifdef CONFIG_NET_CLS_LAYER7 + INIT_TC_FILTER(layer7); + layer7_init_proc(); +#endif return 0; } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/sched/cls_layer7.c linux-2.4.20-wolk4.9-fullkernel/net/sched/cls_layer7.c --- linux-2.4.20-wolk4.8-fullkernel/net/sched/cls_layer7.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/net/sched/cls_layer7.c 2003-08-25 23:52:45.000000000 +0200 @@ -0,0 +1,950 @@ +/* + net/sched/cls_layer7.c + + Layer 7 (application layer) packet classifier. + + Written by Matthew Strait, Ethan Sommer and Justin Levandoski, 2003. + + Modeled after: + cls_tcindex.c: Written 1998,1999 by Werner Almesberger, EPFL ICA + + TODO: + -Support IPv6 + -Do matching across multiple packets + -Get a better regexp implementation, preferably one with documentation + about what it can do! + -Improve module unloading support, if possible (this may be tc's fault...) + -Better support for connections with children (FTP, etc): ability to + classify children seperately from their parents. + -Finish writing the functions below that currently say "not implemented" +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "regexp/regexp.h" +#include + +/* this needs to be last (or at least after some of the other stuff, or + * else isprint() doesn't work! */ +#include + +/* uncomment the next line to to get debugging information printed, including +dumps of the first few packets of each connection, classifications and warnings +about incomplete functions. */ +#define LAYER7_DEBUG + +/* uncomment for more debugging info */ +//#define LAYER7_DEBUG_MORE + +#ifdef LAYER7_DEBUG + #define DPRINTK(format,args...) printk(format,##args) +#else + #define DPRINTK(format,args...) +#endif + +#ifdef LAYER7_DEBUG_MORE + #define DPRINTK2(format,args...) printk(format,##args) +#else + #define DPRINTK2(format,args...) +#endif + +#define LAYER7_MAX_PATTERN_DEF_SIZE 8192 +#define LAYER7_MAX_PROTOCOL_NAME_SIZE 256 + +#define PRIV(tp) ((struct layer7_data *) (tp)->root) + +struct layer7_filter_result { + struct tcf_police *police; + struct tcf_result res; +}; + +struct layer7_filter { + __u16 key; + struct layer7_filter_result result; + struct layer7_filter *next; +}; + +struct layer7_data { + struct layer7_filter_result * perfect; /* perfect hash; NULL if none */ + struct layer7_filter ** h; /* imperfect hash; only used if !perfect; + NULL if unused */ + __u16 mask; /* AND key with mask */ + int shift; /* shift ANDed key to the right */ + int hash; /* hash table size; 0 if undefined */ + int alloc_hash; /* allocated size */ + int fall_through; /* 0: only classify if explicit match */ +}; + +/* one element in the classification hash table, in theory each "connection" + * (aka socket) should be remembered here so that it doesn't need to be + * reclassified for each packet once the "connection" has been identified */ +struct ct_hashElem { + u32 classid; + u32 hash; + int num_pkts_so_far; + int classified; +}; + +/* hash table that matches connections to the connection's state */ +struct ct_hashElem currentSockets[32768]; + +/* a pattern defined by writing to /proc/net/layer7_protocols */ +struct layer7_pattern { + char * name; + regexp * pattern; + int patternsize; + char * uncomppattern; +}; + +/* pattern classification pair (aka filter rule) */ +struct layer7_patclas_pair { + regexp *pattern; + u32 classification; + u32 handle; + void *parent; +}; + + +/* all the rules we are currently attempting to match on */ +struct layer7_patclas_pair *layer7_patclas_pairs = NULL; + +/* how many pairs we have so far */ +int layer7_num_patclas_pairs = 0; + +/* array of all the patterns which have been defined * + * and an int to keep track of how many we have */ +struct layer7_pattern * layer7_patterns = NULL; +int layer7_num_patterns = 0; + +/* the char* which holds the pattern definitions given to us + * through the /proc filesystem */ +char * layer7_unparsed_patterns = NULL; + +/* clear out all of the patterns, so that new ones can be defined + * this is called each time someone writes to /proc/net/layer7_protocols + * before the new protocols are read in */ +void clear_layer7_patterns( void ) +{ + int x; + for (x = 0; x < layer7_num_patterns; x++) { + kfree(layer7_patterns[x].name); + kfree(layer7_patterns[x].pattern); + kfree(layer7_patterns[x].uncomppattern); + } + kfree(layer7_patterns); + layer7_num_patterns = 0; +} + + +/* + * Define a new pattern (which consists of a name (eg "http") and a regular + * expression (eg "http.*get")) + * + * this is made to be memory efficent at the cost of time + * (it reallocates the memory each time so it only uses exactly + * the ammount necesary) because it will only be called one time per + * pattern definition */ +void add_layer7_pattern(const char *name, char *pattern) +{ + struct layer7_pattern *newpatterns=NULL; + int x; + /* first see if we already have a pattern by that name */ + for (x = 0; x < layer7_num_patterns; x++){ + if (!strcmp(name, layer7_patterns[x].name)) { + /* keep a copy of the old regexp in case the new comp fails */ + regexp * oldpattern = kmalloc(layer7_patterns[x].patternsize, GFP_KERNEL); + memcpy(oldpattern, layer7_patterns[x].pattern, layer7_patterns[x].patternsize); + + /* just recompile the regexp and return */ + /* compile the pattern (we only want to do this once */ + if (!(layer7_patterns[x].pattern = + regcomp(pattern, &layer7_patterns[x].patternsize))) /* if regcomp fails */ + { + printk("<3>layer7: ERROR COMPILING REGEX \"%s\"\nold regex will be kept instead\n", pattern); + /* go back to the old regex */ + layer7_patterns[x].pattern = oldpattern; + } + else + { + kfree(layer7_patterns[x].uncomppattern); + layer7_patterns[x].uncomppattern = + kmalloc(strlen(pattern)+1, GFP_KERNEL); + strcpy(layer7_patterns[x].uncomppattern, pattern); + kfree(oldpattern); + } + + return; + } + } + + /* if we have not found a pattern by that name add a new one*/ + + /* allocate the memory for the new array */ + newpatterns = kmalloc( sizeof(struct layer7_pattern) * + (layer7_num_patterns + 1), GFP_KERNEL); + + if (layer7_num_patterns > 0) + { + /* copy any previously declared patterns in */ + memcpy(newpatterns, layer7_patterns, + sizeof(struct layer7_pattern) * (layer7_num_patterns + 1)); + /* free the memory the old patterns were using */ + kfree(layer7_patterns); + } + layer7_num_patterns++; + + /* set the newpatterns to be the authoritative patterns */ + layer7_patterns = newpatterns; + + /* copy the name */ + layer7_patterns[layer7_num_patterns-1].name = + kmalloc(strlen(name)+1, GFP_KERNEL); + + strcpy(layer7_patterns[layer7_num_patterns-1].name, name); + /* copy the uncomp pattern */ + layer7_patterns[layer7_num_patterns-1].uncomppattern = + kmalloc(strlen(pattern)+1, GFP_KERNEL); + + strcpy(layer7_patterns[layer7_num_patterns-1].uncomppattern, pattern); + + /* compile the pattern (we only want to do this once) */ + if (!(layer7_patterns[layer7_num_patterns-1].pattern = + regcomp(pattern, &layer7_patterns[layer7_num_patterns-1].patternsize))) /* if regcomp fails */ + { + printk("<3>layer7: ERROR COMPILING REGEX \"%s\"\n", pattern); + /* make sure we don't use this regexp, + if more are added they will just overwrite the bad regexp */ + layer7_num_patterns--; + } +} + +/* add_layer7_filter_rule() + * defines a new filtering rule, for example "any packet which matches + * the pattern called http should be classified as 0x10001" + * + * this is made to be memory efficent at the cost of time (it reallocates the + * memory each time so it only uses exactly the ammount necesary) because + * it will only be called one time per pattern we are matching on per boot + */ +void add_layer7_filter_rule(const char *name, const u32 classification, const u32 handle, void * parent) +{ + int x; + /* loop through all the patterns */ + for (x = 0; x < layer7_num_patterns; x++) + { + if (!strcmp(name, layer7_patterns[x].name)) { + + /* allocate the memory for the new array */ + struct layer7_patclas_pair * newpairs = + kmalloc( sizeof(struct layer7_patclas_pair) * + (layer7_num_patclas_pairs + 1), GFP_KERNEL); + + /* don't copy or free things if they don't exist yet*/ + if (layer7_num_patclas_pairs > 0) { + /* copy any previously declared patterns in */ + memcpy(newpairs, layer7_patclas_pairs, + sizeof(struct layer7_patclas_pair) * + (layer7_num_patclas_pairs+1)); + + /* free the memory the old patterns were using */ + kfree(layer7_patclas_pairs); + } + layer7_num_patclas_pairs++; + + /* set the newpatterns to be the authoritative patterns */ + layer7_patclas_pairs = newpairs; + + /* copy in the pattern so that if it is freed we don't crash */ + + layer7_patclas_pairs[layer7_num_patclas_pairs - 1].pattern = + kmalloc(layer7_patterns[x].patternsize, GFP_KERNEL); + memcpy(layer7_patclas_pairs[layer7_num_patclas_pairs-1].pattern, + layer7_patterns[x].pattern, layer7_patterns[x].patternsize); + + layer7_patclas_pairs[layer7_num_patclas_pairs-1].classification = + classification; + layer7_patclas_pairs[layer7_num_patclas_pairs-1].handle = + handle; + layer7_patclas_pairs[layer7_num_patclas_pairs-1].parent = + parent; + return; + } + } + printk("<3>layer-7: There is no rule for \"%s\"\n", name); +} + +/* this is a hash function which acts on the timespec to get a relatively + * good hash. It uses 15 bit chunks and XORs them. + * TODO: make the chunk size user defined so that the hash table + * can be bigger/smaller? */ +static int layer7_hash(struct timespec ts) +{ + int hash = (ts.tv_nsec&32767) ^ + ((ts.tv_nsec>>15)&32767) ^ + ((ts.tv_nsec>>30)&32767) ^ + (ts.tv_sec&32767) ^ + ((ts.tv_sec>>15)&32767) ^ + ((ts.tv_sec>>30)&32767); + return hash; +} + +/* for debugging only -- enable and call this if you want lots of debug info */ +#if 0 +static void print_pkt_stuff(struct sk_buff *skb) +{ + int x; + + /* set this pointer to the beginning of the TCP data, + which is what we care about. */ + unsigned char * tcpdata = skb->data + app_data_offset(skb); + + printk("Here's the IP header in hex:\n"); + for(x = 0; x < sizeof(struct iphdr); x++) + printk(" %.2x", ((unsigned char *)skb->nh.iph)[x]); + printk("\n"); + + /* when routing, this will just print the IP header again */ + printk("Here's the TCP header in hex:\n"); + for(x = 0; x < sizeof(struct tcphdr); x++) + printk(" %.2x", ((unsigned char *)skb->h.th)[x]); + printk("\n"); + + printk("TCP header detail: This packet is coming from %d.%d.%d.%d:%d\n", + (skb->nh.iph->saddr&0x000000FF), (skb->nh.iph->saddr&0x0000FF00) >> 8, + (skb->nh.iph->saddr&0x00FF0000) >> 16, (skb->nh.iph->saddr&0xFF000000) >> 24, skb->h.th->source); + printk("and going to %d.%d.%d.%d:%d\n", + (skb->nh.iph->daddr&0x000000FF), (skb->nh.iph->daddr&0x0000FF00) >> 8, + (skb->nh.iph->daddr&0x00FF0000) >> 16, (skb->nh.iph->daddr&0xFF000000) >> 24, skb->h.th->dest); + + printk("syn=%d ack=%d rst=%d fin=%d psh=%d urg=%d.\n", + skb->h.th->syn, skb->h.th->ack, skb->h.th->rst, + skb->h.th->fin, skb->h.th->psh, skb->h.th->urg); + + printk("TCP data ('X' == non-printable) is:\n"); + for(x = 0; x < ( (int)skb->tail - (int)tcpdata ); x++) + { + if (isprint(tcpdata[x])) printk("%c", tcpdata[x]); + else printk("X"); + } + printk("\nAnd here's the same thing in hex:\n"); + for(x = 0; x < ( (int)skb->tail - (int)tcpdata ); x++) + { + printk(" %.2x", tcpdata[x]); + } + printk("\n\n"); + + printk("data ('X' == non-printable) is:\n"); + for(x = 0; x < ( (int)skb->tail - (int)skb->data ); x++) + { + if (isprint(skb->data[x])) printk("%c", skb->data[x]); + else printk("X"); + } + printk("\nAnd here's the same thing in hex:\n"); + for(x = 0; x < ( (int)skb->tail - (int)skb->data ); x++) + { + printk(" %.2x", skb->data[x]); + } + printk("\n\n"); +} +#endif + + +/* These functions test what kind of packet we're dealing with. +include/linux/if_ether.h suggests that all packets are treated as +Ethernet, but I'm not absolutely sure, and the presence of *raw in +skb->mac troubles me. I depend on the IP header always starting at the +same offset, so if this is wrong, there's trouble. -MLS */ + +static int is_ipv4(struct sk_buff * skb) +{ + /* I'm also not convinced that this code ever gets run if + it isn't IP, since running dhclient (which should send ARPs or + RARPs) doesn't cause this to print numbers other than 800. + I'm not sure what other testing I can do. */ + + /* the htons is important. It fixes the endianness */ + if(htons(skb->protocol) != ETH_P_IP){ + return 0; + } + + return 1; +} + +/* I'd rather just call this "is_tcp", except it depends on it being IPv4 and +TCP could be used on top of other protocols */ +static inline int is_tcp_over_ipv4(struct sk_buff * skb) +{ + /* I don't want to depend on skb->nh.iph->protocol being set, because + I bet it isn't when we are acting as a switch, just like skb->h.th isn't + when acting as a router. */ + #define IP_PROTO_OFFSET 9 + + /* If it's not TCP */ + if(skb->data[ETH_HLEN + IP_PROTO_OFFSET] != IPPROTO_TCP){ + return 0; + } + + return 1; +} + +/* Again, I'd rather just call this "is_udp"... */ +static inline int is_udp_over_ipv4(struct sk_buff * skb) +{ + #define IP_PROTO_OFFSET 9 + + if(skb->data[ETH_HLEN + IP_PROTO_OFFSET] != IPPROTO_UDP){ + return 0; + } + + return 1; +} + +/* Returns the number of bytes into the skb->data that the application +data starts. This is a kludge because we don't know how to do it right, +or even if there really is a right way of doing it. This fact is why +we are not currently attempting to classify anything except TCP and UDP over +IPv4. */ +/* HLEN == hl == header length. 4 == bytes/word */ +static int app_data_offset(struct sk_buff *skb) +{ + /* 12 == offset into TCP header for the header length field. We can't get this + with skb->h.th->doff because the tcphdr struct doesn't get set when routing */ + #define TCP_DOFF_OFF 12 + + /* ip_hl = 4*skb->nh.iph->ihl would usually work, but I bet the + iph struct isn't set when acting as a switch! */ + int ip_hl = 4*(skb->data[ETH_HLEN] & 0x0f); + + if( is_udp_over_ipv4(skb) ){ + return ETH_HLEN + ip_hl + 8; /* UDP header is always 8 bytes */ + } + else{ /* is_tcp_over_ipv4 */ + int tcp_hl = 4*(skb->data[ETH_HLEN + ip_hl + TCP_DOFF_OFF] >> 4); + return ETH_HLEN + ip_hl + tcp_hl; + } +} + +/* This function is only called until the connection is classified or for the + * first few packets (whichever limit comes first.) The classification happens + * here. After a connection has been identified it continues to be of that + * type. */ +static int layer7_really_classify(struct sk_buff *skb, struct tcf_result *res, int hash, void* parent) +{ + int x, y = 0; + int match = 0; + + /* the application layer data */ + unsigned char * app_data = skb->data + app_data_offset(skb); + + /* get the data segment of the packet and, removing any nulls in it, + put it into buf, then we null terminate buf so it's a happy string */ + char * buf = (char *)kmalloc((int)skb->tail - (int)app_data + 1, GFP_KERNEL); + if(!buf) + { + printk("<3>layer7: kmalloc failed to allocate %d bytes for buf!\n", + (int)skb->tail - (int)app_data + 1); + return TC_POLICE_UNSPEC; /* give up */ + } + + /* this looks slow, but changing it to a memcpy (which loses the ability to + strip out nulls and do tolower) does not make a noticable difference in speed, + so we suspect that this is not a bottleneck. Even if we avoided copying altogether + it doesn't seem like it would get much faster. */ + y = 0; + for(x = 0; x < ( (int)skb->tail - (int)app_data ); x++) + { + if (app_data[x] != 0) buf[y++] = tolower(app_data[x]); + } + buf[y] = '\0'; // make it into a null-terminated string + +#ifdef LAYER7_DEBUG + if (strlen(buf)!=0) { + printk("buf: (non-printable chars are printed as '.'\n"); + for (x=0;xclassid = layer7_patclas_pairs[x].classification; + + /* we are a "generic filter", so class is always set to 0. + See "Linux Network Traffic Control -- Implementation Overview, + 4 Feb 2001, section 5.3 */ + res->class = 0; + + /* record how we classified it */ + currentSockets[hash].classid = layer7_patclas_pairs[x].classification; + currentSockets[hash].hash = hash; + currentSockets[hash].classified = 1; + return TC_POLICE_OK; + } + else{ + res->class = 0; + + /* remember to use the default in the futrure */ + currentSockets[hash].classid=res->classid; + currentSockets[hash].hash = hash; + + /* this is the "unclassified" case, so leave + currentSockets[hash].classified alone */ + return TC_POLICE_UNSPEC; + } +} + + +static int layer7_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) +{ + enum ip_conntrack_info ctinfo; + struct ip_conntrack *conntrack; + int hash; + + /* check if we can deal with the protocol */ + if( is_ipv4(skb) ){ + DPRINTK2("layer7: Is IPv4, going on.\n"); + + if ( is_udp_over_ipv4(skb)){ + DPRINTK2(" layer7: Is UDP/IPv4, going on.\n"); } + else if( is_tcp_over_ipv4(skb)){ + DPRINTK2(" layer7: Is TCP/IPv4, going on.\n"); } + else{ + DPRINTK2(" layer7: Not UDP or TCP, leaving.\n"); + return TC_POLICE_UNSPEC; + } + } + else{ + DPRINTK2("layer7: Not IPv4, leaving.\n"); + return TC_POLICE_UNSPEC; + } + + /* get a ip_conntrack */ + if(!(conntrack = ip_conntrack_get(skb, &ctinfo))) + { + printk("<3>layer7_classify: error getting conntrack, dropping to default.\n"); + return TC_POLICE_UNSPEC; + } + + /* see if we can get a master conntrack (and its master etc) + (for ftp etc) */ + while (master_ct(conntrack) != NULL) { + conntrack=master_ct(conntrack); + } + + /* the conntrack got bzeroed somewhere, so that should be 0 + the first time around... */ + if (conntrack->timestamp.tv_sec == 0){ + jiffies_to_timespec(jiffies,&conntrack->timestamp); /* 2.4/2.6 difference */ + hash = layer7_hash(conntrack->timestamp); + memset(¤tSockets[hash], 0, sizeof(struct ct_hashElem)); + } + + /* we hash on the timestamp we added to the conntrack */ + hash = layer7_hash(conntrack->timestamp); + + /* If we already know about this connection, this increments the + packet count. If not, this doesn't hurt anything. */ + currentSockets[hash].num_pkts_so_far++; + + /* If we've seen this connection before and we're not trying to + classify it anymore, either because we've given up (we've arbitrarily + chosen to test the first 8 packets) or because we've found a match */ + if ( currentSockets[hash].hash == hash && + (currentSockets[hash].num_pkts_so_far > 8 || + currentSockets[hash].classified) ) + { + if(currentSockets[hash].classified){ + /* classify it as what we classified it as before */ + res->classid = currentSockets[hash].classid; + res->class = 0; + return TC_POLICE_OK; + } + else{ + return TC_POLICE_UNSPEC; + } + } + /* if we've seen it before, but we still need to classify it */ + else if(currentSockets[hash].hash == hash){ + int retval = layer7_really_classify(skb, res, hash,tp->root); + if(retval == TC_POLICE_UNSPEC) + DPRINTK("layer7: seen before, still unmatched. Classified as %x for now\n", currentSockets[hash].classid); + else + DPRINTK("layer7: found match. Classified as %x\n", currentSockets[hash].classid); + return retval; + } + /* otherwise this is the first packet of a new connection */ + else{ + int retval; + + currentSockets[hash].num_pkts_so_far = 1; + currentSockets[hash].classified = 0; + + retval = layer7_really_classify(skb, res, hash,tp->root); + if(retval == TC_POLICE_UNSPEC) + DPRINTK("layer7: first packet, still unmatched. Classified as %x for now\n", currentSockets[hash].classid); + else + DPRINTK("layer7: first packet. Classified as %x\n", currentSockets[hash].classid); + return retval; + } + + return TC_POLICE_OK; /* == 0 */ +} + +/* returns the "internal id" (the index into the patclas array) of the + rule corresponding to handle. Untested. */ +static unsigned long layer7_get(struct tcf_proto *tp, u32 handle) +{ + int x; + /* loop through to find the corresponding rule */ + for (x = 0; x < layer7_num_patclas_pairs; x++) { + if (layer7_patclas_pairs[x].handle == handle) + return x; + } + /* otherwise return layer7_num_patclas_pairs */ + return layer7_num_patclas_pairs; +} + + +/* This doesn't do anything in other filters either... +(but this is one of the required functions) */ +static void layer7_put(struct tcf_proto *tp, unsigned long f) +{ + DPRINTK("layer7_put called. Not implemented.\n"); +} + +/* This actually does something, but we're not sure what. +Or rather, we know that it sets tp and that it makes tc crash if tp isn't +set, but we don't know why. */ +static int layer7_init(struct tcf_proto *tp) +{ + struct layer7_data *p; + + DPRINTK("layer7_init called: (tp %p). Might not be doing the right thing.\n", tp); + + MOD_INC_USE_COUNT; + p = kmalloc(sizeof(struct layer7_data), GFP_KERNEL); + if (!p) { + MOD_DEC_USE_COUNT; + return -ENOMEM; + } + tp->root = p; + p->perfect = NULL; + p->h = NULL; + p->hash = 0; + p->mask = 0xffff; + p->shift = 0; + p->fall_through = 1; + + return 0; +} + +/* XXX More info needed here. +We're not sure exactly what this is supposed to do. We're copying what +cls_tcindex.c does and nothing appears to be broken because of this approach. */ +static int layer7_delete(struct tcf_proto *tp, unsigned long arg) +{ + struct layer7_filter_result *r = (struct layer7_filter_result *) arg; + unsigned long cl; + + DPRINTK("layer7_delete called: might not be doing the right thing.\n"); + + cl = __cls_set_class(&r->res.class,0); + if (cl) + tp->q->ops->cl_ops->unbind_tcf(tp->q,cl); + +#ifdef CONFIG_NET_CLS_POLICE + tcf_police_release(r->police); +#endif + return 0; +} + + +/* There are no parameters for layer7_init, so we overload layer7_change */ +static int layer7_change(struct tcf_proto * tp, unsigned long base, u32 handle, + struct rtattr ** tca, unsigned long * arg) +{ + struct layer7_filter_result new_filter_result = { + NULL, /* no policing */ + { 0,0 }, /* no classification */ + }; + struct rtattr * opt = tca[TCA_OPTIONS-1]; + struct rtattr * tb[TCA_LAYER7_MAX]; + struct layer7_filter_result * r = (struct layer7_filter_result *) * arg; + char* protocol = NULL; + u32 classid = 0; + + + //if (arg) + // DPRINTK("layer7_change: *arg = 0x%lx\n",*arg); + if (!opt) + return 0; + if(rtattr_parse(tb, TCA_LAYER7_MAX,RTA_DATA(opt), RTA_PAYLOAD(opt)) < 0) + return -EINVAL; + + /* Get protocol here */ + if (tb[TCA_LAYER7_PROTOCOL - 1]) { + if (RTA_PAYLOAD(tb[TCA_LAYER7_PROTOCOL - 1]) < sizeof(int)) + return -EINVAL; + + protocol = (char *)RTA_DATA(tb[TCA_LAYER7_PROTOCOL - 1]); + } + + /* vestigal comment from tcindex.c + * + * Note: this could be as restrictive as + * if (handle & ~(mask >> shift)) + * but then, we'd fail handles that may become valid after some + * future mask change. While this is extremely unlikely to ever + * matter, the check below is safer (and also more + * backwards-compatible). + */ + + r = &new_filter_result; + + if (tb[TCA_LAYER7_CLASSID-1]) { + classid = *(__u32 *) RTA_DATA(tb[TCA_LAYER7_CLASSID - 1]); + } + + DPRINTK2("add_layer7_filter_rule, protocol: %s, with classid: %x, handle %u\n", protocol, classid, handle); + add_layer7_filter_rule(protocol, classid, handle,tp->root); + +#ifdef CONFIG_NET_CLS_POLICE + { + struct tcf_police *police; + + police = tb[TCA_LAYER7_POLICE - 1] ? + tcf_police_locate(tb[TCA_LAYER7_POLICE - 1], NULL) : NULL; + tcf_tree_lock(tp); + police = xchg(&r->police, police); + tcf_tree_unlock(tp); + tcf_police_release(police); + } +#endif + return 0; +} + +/* XXX More information needed here. +Can't find any documentation on what this function is supposed to do. +The cls_tcindex.c version of this doesn't appear to do anything that applies +to us and so far doing nothing appears to work fine. */ +static void layer7_walk(struct tcf_proto * tp, struct tcf_walker * walker) +{ + DPRINTK("layer7_walk called. Not implemented.\n"); +} + +/* delete all the rules in the filter */ +static void layer7_destroy(struct tcf_proto *tp) +{ + int x; + + /* clear the filter rules */ + if (layer7_patclas_pairs != NULL) { + for (x=0;xlen; /* cls_tcindex.c does this, don't know why... */ +} + +struct tcf_proto_ops cls_layer7_ops = { + NULL, + "layer7", + layer7_classify, + layer7_init, + layer7_destroy, + + layer7_get, + layer7_put, + layer7_change, + layer7_delete, + layer7_walk, + layer7_dump +}; + + +/* write out the patterns to userland. (yes, write reads and read writes.) */ +int layer7_read_proc(char* page, char ** start, off_t off, int count, + int* eof, void * data) +{ + if (layer7_patterns == NULL){ + /* there are no patterns yet */ + *eof=1; + page='\0'; + return 0; + } + else{ + int x; + /* there are patterns */ + page[0]='\0'; + for (x=0;xread_proc = layer7_read_proc; + entry->write_proc = layer7_write_proc; +} + +#ifdef MODULE +int init_module(void) +{ + DPRINTK2("layer7: init_module called\n"); + layer7_init_proc(); + return register_tcf_proto_ops(&cls_layer7_ops); +} + +void cleanup_module(void) +{ + DPRINTK2("layer7: cleanup_module called\n"); + unregister_tcf_proto_ops(&cls_layer7_ops); +} +MODULE_LICENSE("GPL"); +#endif + diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/sched/regexp/regexp.c linux-2.4.20-wolk4.9-fullkernel/net/sched/regexp/regexp.c --- linux-2.4.20-wolk4.8-fullkernel/net/sched/regexp/regexp.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/net/sched/regexp/regexp.c 2003-08-25 23:52:45.000000000 +0200 @@ -0,0 +1,1236 @@ +/* + * regcomp and regexec -- regsub and regerror are elsewhere + * @(#)regexp.c 1.3 of 18 April 87 + * + * Copyright (c) 1986 by University of Toronto. + * Written by Henry Spencer. Not derived from licensed software. + * + * Permission is granted to anyone to use this software for any + * purpose on any computer system, and to redistribute it freely, + * subject to the following restrictions: + * + * 1. The author is not responsible for the consequences of use of + * this software, no matter how awful, even if they arise + * from defects in it. + * + * 2. The origin of this software must not be misrepresented, either + * by explicit claim or by omission. + * + * 3. Altered versions must be plainly marked as such, and must not + * be misrepresented as being the original software. + * + * Beware that some of this code is subtly aware of the way operator + * precedence is structured in regular expressions. Serious changes in + * regular-expression syntax might require a total rethink. + * + * This code was modified by Ethan Sommer to work within the kernel + * (it now uses kmalloc etc..) + * + * Modified slightly by Matthew Strait to use more modern C. + */ +#include "regexp.h" +#include "regmagic.h" +#include +#include + +/* added by ethan */ +#define malloc(foo) kmalloc(foo,GFP_KERNEL) + +/* added by MLS */ +#ifdef MODULE +int init_module(void) +{ return 0; } +int cleanup_module(void) +{ return 0; } +#include +#include +MODULE_LICENSE("GPL"); /* I think this is ok based on the above text... */ +#endif + +void regerror(char * s) +{ + printk("regexp(3): %s\n", s); + /* NOTREACHED */ +} + +/* + * The "internal use only" fields in regexp.h are present to pass info from + * compile to execute that permits the execute phase to run lots faster on + * simple cases. They are: + * + * regstart char that must begin a match; '\0' if none obvious + * reganch is the match anchored (at beginning-of-line only)? + * regmust string (pointer into program) that match must include, or NULL + * regmlen length of regmust string + * + * Regstart and reganch permit very fast decisions on suitable starting points + * for a match, cutting down the work a lot. Regmust permits fast rejection + * of lines that cannot possibly match. The regmust tests are costly enough + * that regcomp() supplies a regmust only if the r.e. contains something + * potentially expensive (at present, the only such thing detected is * or + + * at the start of the r.e., which can involve a lot of backup). Regmlen is + * supplied because the test in regexec() needs it and regcomp() is computing + * it anyway. + */ + +/* + * Structure for regexp "program". This is essentially a linear encoding + * of a nondeterministic finite-state machine (aka syntax charts or + * "railroad normal form" in parsing technology). Each node is an opcode + * plus a "next" pointer, possibly plus an operand. "Next" pointers of + * all nodes except BRANCH implement concatenation; a "next" pointer with + * a BRANCH on both ends of it is connecting two alternatives. (Here we + * have one of the subtle syntax dependencies: an individual BRANCH (as + * opposed to a collection of them) is never concatenated with anything + * because of operator precedence.) The operand of some types of node is + * a literal string; for others, it is a node leading into a sub-FSM. In + * particular, the operand of a BRANCH node is the first node of the branch. + * (NB this is *not* a tree structure: the tail of the branch connects + * to the thing following the set of BRANCHes.) The opcodes are: + */ + +/* definition number opnd? meaning */ +#define END 0 /* no End of program. */ +#define BOL 1 /* no Match "" at beginning of line. */ +#define EOL 2 /* no Match "" at end of line. */ +#define ANY 3 /* no Match any one character. */ +#define ANYOF 4 /* str Match any character in this string. */ +#define ANYBUT 5 /* str Match any character not in this string. */ +#define BRANCH 6 /* node Match this alternative, or the next... */ +#define BACK 7 /* no Match "", "next" ptr points backward. */ +#define EXACTLY 8 /* str Match this string. */ +#define NOTHING 9 /* no Match empty string. */ +#define STAR 10 /* node Match this (simple) thing 0 or more times. */ +#define PLUS 11 /* node Match this (simple) thing 1 or more times. */ +#define OPEN 20 /* no Mark this point in input as start of #n. */ + /* OPEN+1 is number 1, etc. */ +#define CLOSE 30 /* no Analogous to OPEN. */ + +/* + * Opcode notes: + * + * BRANCH The set of branches constituting a single choice are hooked + * together with their "next" pointers, since precedence prevents + * anything being concatenated to any individual branch. The + * "next" pointer of the last BRANCH in a choice points to the + * thing following the whole choice. This is also where the + * final "next" pointer of each individual branch points; each + * branch starts with the operand node of a BRANCH node. + * + * BACK Normal "next" pointers all implicitly point forward; BACK + * exists to make loop structures possible. + * + * STAR,PLUS '?', and complex '*' and '+', are implemented as circular + * BRANCH structures using BACK. Simple cases (one character + * per match) are implemented with STAR and PLUS for speed + * and to minimize recursive plunges. + * + * OPEN,CLOSE ...are numbered at compile time. + */ + +/* + * A node is one char of opcode followed by two chars of "next" pointer. + * "Next" pointers are stored as two 8-bit pieces, high order first. The + * value is a positive offset from the opcode of the node containing it. + * An operand, if any, simply follows the node. (Note that much of the + * code generation knows about this implicit relationship.) + * + * Using two bytes for the "next" pointer is vast overkill for most things, + * but allows patterns to get big without disasters. + */ +#define OP(p) (*(p)) +#define NEXT(p) (((*((p)+1)&0377)<<8) + (*((p)+2)&0377)) +#define OPERAND(p) ((p) + 3) + +/* + * See regmagic.h for one further detail of program structure. + */ + + +/* + * Utility definitions. + */ +#ifndef CHARBITS +#define UCHARAT(p) ((int)*(unsigned char *)(p)) +#else +#define UCHARAT(p) ((int)*(p)&CHARBITS) +#endif + +#define FAIL(m) { regerror(m); return(NULL); } +#define ISMULT(c) ((c) == '*' || (c) == '+' || (c) == '?') +#define META "^$.[()|?+*\\" + +/* + * Flags to be passed up and down. + */ +#define HASWIDTH 01 /* Known never to match null string. */ +#define SIMPLE 02 /* Simple enough to be STAR/PLUS operand. */ +#define SPSTART 04 /* Starts with * or +. */ +#define WORST 0 /* Worst case. */ + +/* + * Global work variables for regcomp(). + */ +static char *regparse; /* Input-scan pointer. */ +static int regnpar; /* () count. */ +static char regdummy; +static char *regcode; /* Code-emit pointer; ®dummy = don't. */ +static long regsize; /* Code size. */ + +/* + * Forward declarations for regcomp()'s friends. + */ +#ifndef STATIC +#define STATIC static +#endif +STATIC char *reg(int paren,int *flagp); +STATIC char *regbranch(int *flagp); +STATIC char *regpiece(int *flagp); +STATIC char *regatom(int *flagp); +STATIC char *regnode(char op); +STATIC char *regnext(char *p); +STATIC void regc(char b); +STATIC void reginsert(char op, char *opnd); +STATIC void regtail(char *p, char *val); +STATIC void regoptail(char *p, char *val); +STATIC int strcspn(char *s1, char *s2); + +/* + - regcomp - compile a regular expression into internal code + * + * We can't allocate space until we know how big the compiled form will be, + * but we can't compile it (and thus know how big it is) until we've got a + * place to put the code. So we cheat: we compile it twice, once with code + * generation turned off and size counting turned on, and once "for real". + * This also means that we don't allocate space until we are sure that the + * thing really will compile successfully, and we never have to move the + * code and thus invalidate pointers into it. (Note that it has to be in + * one piece because free() must be able to free it all.) + * + * Beware that the optimization-preparation code in here knows about some + * of the structure of the compiled regexp. + */ +regexp * +regcomp(char *exp,int *patternsize) +{ + register regexp *r; + register char *scan; + register char *longest; + register int len; + int flags; + /* commented out by ethan + extern char *malloc(); + */ + + if (exp == NULL) + FAIL("NULL argument"); + + /* First pass: determine size, legality. */ + regparse = exp; + regnpar = 1; + regsize = 0L; + regcode = ®dummy; + regc(MAGIC); + if (reg(0, &flags) == NULL) + return(NULL); + + /* Small enough for pointer-storage convention? */ + if (regsize >= 32767L) /* Probably could be 65535L. */ + FAIL("regexp too big"); + + /* Allocate space. */ + *patternsize=sizeof(regexp) + (unsigned)regsize; + r = (regexp *)malloc(sizeof(regexp) + (unsigned)regsize); + if (r == NULL) + FAIL("out of space"); + + /* Second pass: emit code. */ + regparse = exp; + regnpar = 1; + regcode = r->program; + regc(MAGIC); + if (reg(0, &flags) == NULL) + return(NULL); + + /* Dig out information for optimizations. */ + r->regstart = '\0'; /* Worst-case defaults. */ + r->reganch = 0; + r->regmust = NULL; + r->regmlen = 0; + scan = r->program+1; /* First BRANCH. */ + if (OP(regnext(scan)) == END) { /* Only one top-level choice. */ + scan = OPERAND(scan); + + /* Starting-point info. */ + if (OP(scan) == EXACTLY) + r->regstart = *OPERAND(scan); + else if (OP(scan) == BOL) + r->reganch++; + + /* + * If there's something expensive in the r.e., find the + * longest literal string that must appear and make it the + * regmust. Resolve ties in favor of later strings, since + * the regstart check works with the beginning of the r.e. + * and avoiding duplication strengthens checking. Not a + * strong reason, but sufficient in the absence of others. + */ + if (flags&SPSTART) { + longest = NULL; + len = 0; + for (; scan != NULL; scan = regnext(scan)) + if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) { + longest = OPERAND(scan); + len = strlen(OPERAND(scan)); + } + r->regmust = longest; + r->regmlen = len; + } + } + + return(r); +} + +/* + - reg - regular expression, i.e. main body or parenthesized thing + * + * Caller must absorb opening parenthesis. + * + * Combining parenthesis handling with the base level of regular expression + * is a trifle forced, but the need to tie the tails of the branches to what + * follows makes it hard to avoid. + */ +static char * +reg(paren, flagp) +int paren; /* Parenthesized? */ +int *flagp; +{ + register char *ret; + register char *br; + register char *ender; + register int parno; + int flags; + + *flagp = HASWIDTH; /* Tentatively. */ + + /* Make an OPEN node, if parenthesized. */ + if (paren) { + if (regnpar >= NSUBEXP) + FAIL("too many ()"); + parno = regnpar; + regnpar++; + ret = regnode(OPEN+parno); + } else + ret = NULL; + + /* Pick up the branches, linking them together. */ + br = regbranch(&flags); + if (br == NULL) + return(NULL); + if (ret != NULL) + regtail(ret, br); /* OPEN -> first. */ + else + ret = br; + if (!(flags&HASWIDTH)) + *flagp &= ~HASWIDTH; + *flagp |= flags&SPSTART; + while (*regparse == '|') { + regparse++; + br = regbranch(&flags); + if (br == NULL) + return(NULL); + regtail(ret, br); /* BRANCH -> BRANCH. */ + if (!(flags&HASWIDTH)) + *flagp &= ~HASWIDTH; + *flagp |= flags&SPSTART; + } + + /* Make a closing node, and hook it on the end. */ + ender = regnode((paren) ? CLOSE+parno : END); + regtail(ret, ender); + + /* Hook the tails of the branches to the closing node. */ + for (br = ret; br != NULL; br = regnext(br)) + regoptail(br, ender); + + /* Check for proper termination. */ + if (paren && *regparse++ != ')') { + FAIL("unmatched ()"); + } else if (!paren && *regparse != '\0') { + if (*regparse == ')') { + FAIL("unmatched ()"); + } else + FAIL("junk on end"); /* "Can't happen". */ + /* NOTREACHED */ + } + + return(ret); +} + +/* + - regbranch - one alternative of an | operator + * + * Implements the concatenation operator. + */ +static char * +regbranch(flagp) +int *flagp; +{ + register char *ret; + register char *chain; + register char *latest; + int flags; + + *flagp = WORST; /* Tentatively. */ + + ret = regnode(BRANCH); + chain = NULL; + while (*regparse != '\0' && *regparse != '|' && *regparse != ')') { + latest = regpiece(&flags); + if (latest == NULL) + return(NULL); + *flagp |= flags&HASWIDTH; + if (chain == NULL) /* First piece. */ + *flagp |= flags&SPSTART; + else + regtail(chain, latest); + chain = latest; + } + if (chain == NULL) /* Loop ran zero times. */ + (void) regnode(NOTHING); + + return(ret); +} + +/* + - regpiece - something followed by possible [*+?] + * + * Note that the branching code sequences used for ? and the general cases + * of * and + are somewhat optimized: they use the same NOTHING node as + * both the endmarker for their branch list and the body of the last branch. + * It might seem that this node could be dispensed with entirely, but the + * endmarker role is not redundant. + */ +static char * +regpiece(flagp) +int *flagp; +{ + register char *ret; + register char op; + register char *next; + int flags; + + ret = regatom(&flags); + if (ret == NULL) + return(NULL); + + op = *regparse; + if (!ISMULT(op)) { + *flagp = flags; + return(ret); + } + + if (!(flags&HASWIDTH) && op != '?') + FAIL("*+ operand could be empty"); + *flagp = (op != '+') ? (WORST|SPSTART) : (WORST|HASWIDTH); + + if (op == '*' && (flags&SIMPLE)) + reginsert(STAR, ret); + else if (op == '*') { + /* Emit x* as (x&|), where & means "self". */ + reginsert(BRANCH, ret); /* Either x */ + regoptail(ret, regnode(BACK)); /* and loop */ + regoptail(ret, ret); /* back */ + regtail(ret, regnode(BRANCH)); /* or */ + regtail(ret, regnode(NOTHING)); /* null. */ + } else if (op == '+' && (flags&SIMPLE)) + reginsert(PLUS, ret); + else if (op == '+') { + /* Emit x+ as x(&|), where & means "self". */ + next = regnode(BRANCH); /* Either */ + regtail(ret, next); + regtail(regnode(BACK), ret); /* loop back */ + regtail(next, regnode(BRANCH)); /* or */ + regtail(ret, regnode(NOTHING)); /* null. */ + } else if (op == '?') { + /* Emit x? as (x|) */ + reginsert(BRANCH, ret); /* Either x */ + regtail(ret, regnode(BRANCH)); /* or */ + next = regnode(NOTHING); /* null. */ + regtail(ret, next); + regoptail(ret, next); + } + regparse++; + if (ISMULT(*regparse)) + FAIL("nested *?+"); + + return(ret); +} + +/* + - regatom - the lowest level + * + * Optimization: gobbles an entire sequence of ordinary characters so that + * it can turn them into a single node, which is smaller to store and + * faster to run. Backslashed characters are exceptions, each becoming a + * separate node; the code is simpler that way and it's not worth fixing. + */ +static char * +regatom(flagp) +int *flagp; +{ + register char *ret; + int flags; + + *flagp = WORST; /* Tentatively. */ + + switch (*regparse++) { + case '^': + ret = regnode(BOL); + break; + case '$': + ret = regnode(EOL); + break; + case '.': + ret = regnode(ANY); + *flagp |= HASWIDTH|SIMPLE; + break; + case '[': { + register int class; + register int classend; + + if (*regparse == '^') { /* Complement of range. */ + ret = regnode(ANYBUT); + regparse++; + } else + ret = regnode(ANYOF); + if (*regparse == ']' || *regparse == '-') + regc(*regparse++); + while (*regparse != '\0' && *regparse != ']') { + if (*regparse == '-') { + regparse++; + if (*regparse == ']' || *regparse == '\0') + regc('-'); + else { + class = UCHARAT(regparse-2)+1; + classend = UCHARAT(regparse); + if (class > classend+1) + FAIL("invalid [] range"); + for (; class <= classend; class++) + regc(class); + regparse++; + } + } else + regc(*regparse++); + } + regc('\0'); + if (*regparse != ']') + FAIL("unmatched []"); + regparse++; + *flagp |= HASWIDTH|SIMPLE; + } + break; + case '(': + ret = reg(1, &flags); + if (ret == NULL) + return(NULL); + *flagp |= flags&(HASWIDTH|SPSTART); + break; + case '\0': + case '|': + case ')': + FAIL("internal urp"); /* Supposed to be caught earlier. */ + break; + case '?': + case '+': + case '*': + FAIL("?+* follows nothing"); + break; + case '\\': + if (*regparse == '\0') + FAIL("trailing \\"); + ret = regnode(EXACTLY); + regc(*regparse++); + regc('\0'); + *flagp |= HASWIDTH|SIMPLE; + break; + default: { + register int len; + register char ender; + + regparse--; + len = strcspn(regparse, META); + if (len <= 0) + FAIL("internal disaster"); + ender = *(regparse+len); + if (len > 1 && ISMULT(ender)) + len--; /* Back off clear of ?+* operand. */ + *flagp |= HASWIDTH; + if (len == 1) + *flagp |= SIMPLE; + ret = regnode(EXACTLY); + while (len > 0) { + regc(*regparse++); + len--; + } + regc('\0'); + } + break; + } + + return(ret); +} + +/* + - regnode - emit a node + */ +static char * /* Location. */ +regnode(op) +char op; +{ + register char *ret; + register char *ptr; + + ret = regcode; + if (ret == ®dummy) { + regsize += 3; + return(ret); + } + + ptr = ret; + *ptr++ = op; + *ptr++ = '\0'; /* Null "next" pointer. */ + *ptr++ = '\0'; + regcode = ptr; + + return(ret); +} + +/* + - regc - emit (if appropriate) a byte of code + */ +static void +regc(b) +char b; +{ + if (regcode != ®dummy) + *regcode++ = b; + else + regsize++; +} + +/* + - reginsert - insert an operator in front of already-emitted operand + * + * Means relocating the operand. + */ +static void +reginsert(op, opnd) +char op; +char *opnd; +{ + register char *src; + register char *dst; + register char *place; + + if (regcode == ®dummy) { + regsize += 3; + return; + } + + src = regcode; + regcode += 3; + dst = regcode; + while (src > opnd) + *--dst = *--src; + + place = opnd; /* Op node, where operand used to be. */ + *place++ = op; + *place++ = '\0'; + *place++ = '\0'; +} + +/* + - regtail - set the next-pointer at the end of a node chain + */ +static void +regtail(p, val) +char *p; +char *val; +{ + register char *scan; + register char *temp; + register int offset; + + if (p == ®dummy) + return; + + /* Find last node. */ + scan = p; + for (;;) { + temp = regnext(scan); + if (temp == NULL) + break; + scan = temp; + } + + if (OP(scan) == BACK) + offset = scan - val; + else + offset = val - scan; + *(scan+1) = (offset>>8)&0377; + *(scan+2) = offset&0377; +} + +/* + - regoptail - regtail on operand of first argument; nop if operandless + */ +static void +regoptail(p, val) +char *p; +char *val; +{ + /* "Operandless" and "op != BRANCH" are synonymous in practice. */ + if (p == NULL || p == ®dummy || OP(p) != BRANCH) + return; + regtail(OPERAND(p), val); +} + +/* + * regexec and friends + */ + +/* + * Global work variables for regexec(). + */ +static char *reginput; /* String-input pointer. */ +static char *regbol; /* Beginning of input, for ^ check. */ +static char **regstartp; /* Pointer to startp array. */ +static char **regendp; /* Ditto for endp. */ + +/* + * Forwards. + */ +STATIC int regtry(regexp *prog, char *string); +STATIC int regmatch(char *prog); +STATIC int regrepeat(char *p); + +#ifdef DEBUG +int regnarrate = 0; +void regdump(); +STATIC char *regprop(char *op); +#endif + +/* + - regexec - match a regexp against a string + */ +int +regexec(prog, string) +register regexp *prog; +register char *string; +{ + register char *s; + + /* Be paranoid... */ + if (prog == NULL || string == NULL) { + regerror("NULL parameter"); + return(0); + } + + /* Check validity of program. */ + if (UCHARAT(prog->program) != MAGIC) { + regerror("corrupted program"); + return(0); + } + + /* If there is a "must appear" string, look for it. */ + if (prog->regmust != NULL) { + s = string; + while ((s = strchr(s, prog->regmust[0])) != NULL) { + if (strncmp(s, prog->regmust, prog->regmlen) == 0) + break; /* Found it. */ + s++; + } + if (s == NULL) /* Not present. */ + return(0); + } + + /* Mark beginning of line for ^ . */ + regbol = string; + + /* Simplest case: anchored match need be tried only once. */ + if (prog->reganch) + return(regtry(prog, string)); + + /* Messy cases: unanchored match. */ + s = string; + if (prog->regstart != '\0') + /* We know what char it must start with. */ + while ((s = strchr(s, prog->regstart)) != NULL) { + if (regtry(prog, s)) + return(1); + s++; + } + else + /* We don't -- general case. */ + do { + if (regtry(prog, s)) + return(1); + } while (*s++ != '\0'); + + /* Failure. */ + return(0); +} + +/* + - regtry - try match at specific point + */ +static int /* 0 failure, 1 success */ +regtry(prog, string) +regexp *prog; +char *string; +{ + register int i; + register char **sp; + register char **ep; + + reginput = string; + regstartp = prog->startp; + regendp = prog->endp; + + sp = prog->startp; + ep = prog->endp; + for (i = NSUBEXP; i > 0; i--) { + *sp++ = NULL; + *ep++ = NULL; + } + if (regmatch(prog->program + 1)) { + prog->startp[0] = string; + prog->endp[0] = reginput; + return(1); + } else + return(0); +} + +/* + - regmatch - main matching routine + * + * Conceptually the strategy is simple: check to see whether the current + * node matches, call self recursively to see whether the rest matches, + * and then act accordingly. In practice we make some effort to avoid + * recursion, in particular by going through "ordinary" nodes (that don't + * need to know whether the rest of the match failed) by a loop instead of + * by recursion. + */ +static int /* 0 failure, 1 success */ +regmatch(prog) +char *prog; +{ + register char *scan; /* Current node. */ + char *next; /* Next node. */ + + scan = prog; +#ifdef DEBUG + if (scan != NULL && regnarrate) + fprintf(stderr, "%s(\n", regprop(scan)); +#endif + while (scan != NULL) { +#ifdef DEBUG + if (regnarrate) + fprintf(stderr, "%s...\n", regprop(scan)); +#endif + next = regnext(scan); + + switch (OP(scan)) { + case BOL: + if (reginput != regbol) + return(0); + break; + case EOL: + if (*reginput != '\0') + return(0); + break; + case ANY: + if (*reginput == '\0') + return(0); + reginput++; + break; + case EXACTLY: { + register int len; + register char *opnd; + + opnd = OPERAND(scan); + /* Inline the first character, for speed. */ + if (*opnd != *reginput) + return(0); + len = strlen(opnd); + if (len > 1 && strncmp(opnd, reginput, len) != 0) + return(0); + reginput += len; + } + break; + case ANYOF: + if (*reginput == '\0' || strchr(OPERAND(scan), *reginput) == NULL) + return(0); + reginput++; + break; + case ANYBUT: + if (*reginput == '\0' || strchr(OPERAND(scan), *reginput) != NULL) + return(0); + reginput++; + break; + case NOTHING: + break; + case BACK: + break; + case OPEN+1: + case OPEN+2: + case OPEN+3: + case OPEN+4: + case OPEN+5: + case OPEN+6: + case OPEN+7: + case OPEN+8: + case OPEN+9: { + register int no; + register char *save; + + no = OP(scan) - OPEN; + save = reginput; + + if (regmatch(next)) { + /* + * Don't set startp if some later + * invocation of the same parentheses + * already has. + */ + if (regstartp[no] == NULL) + regstartp[no] = save; + return(1); + } else + return(0); + } + break; + case CLOSE+1: + case CLOSE+2: + case CLOSE+3: + case CLOSE+4: + case CLOSE+5: + case CLOSE+6: + case CLOSE+7: + case CLOSE+8: + case CLOSE+9: { + register int no; + register char *save; + + no = OP(scan) - CLOSE; + save = reginput; + + if (regmatch(next)) { + /* + * Don't set endp if some later + * invocation of the same parentheses + * already has. + */ + if (regendp[no] == NULL) + regendp[no] = save; + return(1); + } else + return(0); + } + break; + case BRANCH: { + register char *save; + + if (OP(next) != BRANCH) /* No choice. */ + next = OPERAND(scan); /* Avoid recursion. */ + else { + do { + save = reginput; + if (regmatch(OPERAND(scan))) + return(1); + reginput = save; + scan = regnext(scan); + } while (scan != NULL && OP(scan) == BRANCH); + return(0); + /* NOTREACHED */ + } + } + break; + case STAR: + case PLUS: { + register char nextch; + register int no; + register char *save; + register int min; + + /* + * Lookahead to avoid useless match attempts + * when we know what character comes next. + */ + nextch = '\0'; + if (OP(next) == EXACTLY) + nextch = *OPERAND(next); + min = (OP(scan) == STAR) ? 0 : 1; + save = reginput; + no = regrepeat(OPERAND(scan)); + while (no >= min) { + /* If it could work, try it. */ + if (nextch == '\0' || *reginput == nextch) + if (regmatch(next)) + return(1); + /* Couldn't or didn't -- back up. */ + no--; + reginput = save + no; + } + return(0); + } + break; + case END: + return(1); /* Success! */ + break; + default: + regerror("memory corruption"); + return(0); + break; + } + + scan = next; + } + + /* + * We get here only if there's trouble -- normally "case END" is + * the terminating point. + */ + regerror("corrupted pointers"); + return(0); +} + +/* + - regrepeat - repeatedly match something simple, report how many + */ +static int +regrepeat(p) +char *p; +{ + register int count = 0; + register char *scan; + register char *opnd; + + scan = reginput; + opnd = OPERAND(p); + switch (OP(p)) { + case ANY: + count = strlen(scan); + scan += count; + break; + case EXACTLY: + while (*opnd == *scan) { + count++; + scan++; + } + break; + case ANYOF: + while (*scan != '\0' && strchr(opnd, *scan) != NULL) { + count++; + scan++; + } + break; + case ANYBUT: + while (*scan != '\0' && strchr(opnd, *scan) == NULL) { + count++; + scan++; + } + break; + default: /* Oh dear. Called inappropriately. */ + regerror("internal foulup"); + count = 0; /* Best compromise. */ + break; + } + reginput = scan; + + return(count); +} + +/* + - regnext - dig the "next" pointer out of a node + */ +static char * +regnext(p) +register char *p; +{ + register int offset; + + if (p == ®dummy) + return(NULL); + + offset = NEXT(p); + if (offset == 0) + return(NULL); + + if (OP(p) == BACK) + return(p-offset); + else + return(p+offset); +} + +#ifdef DEBUG + +STATIC char *regprop(); + +/* + - regdump - dump a regexp onto stdout in vaguely comprehensible form + */ +void +regdump(r) +regexp *r; +{ + register char *s; + register char op = EXACTLY; /* Arbitrary non-END op. */ + register char *next; + extern char *strchr(); + + + s = r->program + 1; + while (op != END) { /* While that wasn't END last time... */ + op = OP(s); + printf("%2d%s", s-r->program, regprop(s)); /* Where, what. */ + next = regnext(s); + if (next == NULL) /* Next ptr. */ + printf("(0)"); + else + printf("(%d)", (s-r->program)+(next-s)); + s += 3; + if (op == ANYOF || op == ANYBUT || op == EXACTLY) { + /* Literal string, where present. */ + while (*s != '\0') { + putchar(*s); + s++; + } + s++; + } + putchar('\n'); + } + + /* Header fields of interest. */ + if (r->regstart != '\0') + printf("start `%c' ", r->regstart); + if (r->reganch) + printf("anchored "); + if (r->regmust != NULL) + printf("must have \"%s\"", r->regmust); + printf("\n"); +} + +/* + - regprop - printable representation of opcode + */ +static char * +regprop(op) +char *op; +{ + register char *p; + static char buf[50]; + + (void) strcpy(buf, ":"); + + switch (OP(op)) { + case BOL: + p = "BOL"; + break; + case EOL: + p = "EOL"; + break; + case ANY: + p = "ANY"; + break; + case ANYOF: + p = "ANYOF"; + break; + case ANYBUT: + p = "ANYBUT"; + break; + case BRANCH: + p = "BRANCH"; + break; + case EXACTLY: + p = "EXACTLY"; + break; + case NOTHING: + p = "NOTHING"; + break; + case BACK: + p = "BACK"; + break; + case END: + p = "END"; + break; + case OPEN+1: + case OPEN+2: + case OPEN+3: + case OPEN+4: + case OPEN+5: + case OPEN+6: + case OPEN+7: + case OPEN+8: + case OPEN+9: + sprintf(buf+strlen(buf), "OPEN%d", OP(op)-OPEN); + p = NULL; + break; + case CLOSE+1: + case CLOSE+2: + case CLOSE+3: + case CLOSE+4: + case CLOSE+5: + case CLOSE+6: + case CLOSE+7: + case CLOSE+8: + case CLOSE+9: + sprintf(buf+strlen(buf), "CLOSE%d", OP(op)-CLOSE); + p = NULL; + break; + case STAR: + p = "STAR"; + break; + case PLUS: + p = "PLUS"; + break; + default: + regerror("corrupted opcode"); + break; + } + if (p != NULL) + (void) strcat(buf, p); + return(buf); +} +#endif + +/* + * The following is provided for those people who do not have strcspn() in + * their C libraries. They should get off their butts and do something + * about it; at least one public-domain implementation of those (highly + * useful) string routines has been published on Usenet. + */ +/* + * strcspn - find length of initial segment of s1 consisting entirely + * of characters not from s2 + */ + +static int +strcspn(s1, s2) +char *s1; +char *s2; +{ + register char *scan1; + register char *scan2; + register int count; + + count = 0; + for (scan1 = s1; *scan1 != '\0'; scan1++) { + for (scan2 = s2; *scan2 != '\0';) /* ++ moved down. */ + if (*scan1 == *scan2++) + return(count); + count++; + } + return(count); +} diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/sched/regexp/regexp.h linux-2.4.20-wolk4.9-fullkernel/net/sched/regexp/regexp.h --- linux-2.4.20-wolk4.8-fullkernel/net/sched/regexp/regexp.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/net/sched/regexp/regexp.h 2003-08-25 23:52:45.000000000 +0200 @@ -0,0 +1,20 @@ +/* + * Definitions etc. for regexp(3) routines. + * + * Caveat: this is V8 regexp(3) [actually, a reimplementation thereof], + * not the System V one. + */ +#define NSUBEXP 10 +typedef struct regexp { + char *startp[NSUBEXP]; + char *endp[NSUBEXP]; + char regstart; /* Internal use only. */ + char reganch; /* Internal use only. */ + char *regmust; /* Internal use only. */ + int regmlen; /* Internal use only. */ + char program[1]; /* Unwarranted chumminess with compiler. */ +} regexp; + +extern regexp *regcomp(char *exp, int *patternlength); +int regexec(regexp *prog, char *string); +void regsub(regexp *prog, char *source, char *dest); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/sched/regexp/regmagic.h linux-2.4.20-wolk4.9-fullkernel/net/sched/regexp/regmagic.h --- linux-2.4.20-wolk4.8-fullkernel/net/sched/regexp/regmagic.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/net/sched/regexp/regmagic.h 2003-08-25 23:52:45.000000000 +0200 @@ -0,0 +1,5 @@ +/* + * The first byte of the regexp internal "program" is actually this magic + * number; the start node begins in the second byte. + */ +#define MAGIC 0234 diff -Naurp linux-2.4.20-wolk4.8-fullkernel/net/sched/regexp/regsub.c linux-2.4.20-wolk4.9-fullkernel/net/sched/regexp/regsub.c --- linux-2.4.20-wolk4.8-fullkernel/net/sched/regexp/regsub.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/net/sched/regexp/regsub.c 2003-08-25 23:52:45.000000000 +0200 @@ -0,0 +1,88 @@ +/* + * regsub + * @(#)regsub.c 1.3 of 2 April 86 + * + * Copyright (c) 1986 by University of Toronto. + * Written by Henry Spencer. Not derived from licensed software. + * + * Permission is granted to anyone to use this software for any + * purpose on any computer system, and to redistribute it freely, + * subject to the following restrictions: + * + * 1. The author is not responsible for the consequences of use of + * this software, no matter how awful, even if they arise + * from defects in it. + * + * 2. The origin of this software must not be misrepresented, either + * by explicit claim or by omission. + * + * 3. Altered versions must be plainly marked as such, and must not + * be misrepresented as being the original software. + * + * + * This code was modified by Ethan Sommer to work within the kernel + * (it now uses kmalloc etc..) + * + */ +#include "regexp.h" +#include "regmagic.h" +#include + +#ifndef CHARBITS +#define UCHARAT(p) ((int)*(unsigned char *)(p)) +#else +#define UCHARAT(p) ((int)*(p)&CHARBITS) +#endif + +extern void regerror(char * s); + +/* + - regsub - perform substitutions after a regexp match + */ +void +regsub(regexp * prog, char * source, char * dest) +{ + register char *src; + register char *dst; + register char c; + register int no; + register int len; + + /* Not necessary and gcc doesn't like it -MLS */ + /*extern char *strncpy();*/ + + if (prog == NULL || source == NULL || dest == NULL) { + regerror("NULL parm to regsub"); + return; + } + if (UCHARAT(prog->program) != MAGIC) { + regerror("damaged regexp fed to regsub"); + return; + } + + src = source; + dst = dest; + while ((c = *src++) != '\0') { + if (c == '&') + no = 0; + else if (c == '\\' && '0' <= *src && *src <= '9') + no = *src++ - '0'; + else + no = -1; + + if (no < 0) { /* Ordinary character. */ + if (c == '\\' && (*src == '\\' || *src == '&')) + c = *src++; + *dst++ = c; + } else if (prog->startp[no] != NULL && prog->endp[no] != NULL) { + len = prog->endp[no] - prog->startp[no]; + (void) strncpy(dst, prog->startp[no], len); + dst += len; + if (len != 0 && *(dst-1) == '\0') { /* strncpy hit NUL. */ + regerror("damaged match string"); + return; + } + } + } + *dst++ = '\0'; +} diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/Config.in linux-2.4.20-wolk4.9-fullkernel/rsbac/Config.in --- linux-2.4.20-wolk4.8-fullkernel/rsbac/Config.in 2003-08-25 18:25:28.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/Config.in 2003-08-25 20:33:02.000000000 +0200 @@ -23,6 +23,7 @@ fi bool 'RSBAC debugging support' CONFIG_RSBAC_DEBUG bool 'Provide DEV and USER backup files' CONFIG_RSBAC_DEV_USER_BACKUP int 'RSBAC default security officer user ID' CONFIG_RSBAC_SECOFF_UID 400 +bool 'Delayed init for initial ramdisk' CONFIG_RSBAC_INIT_DELAY endmenu bool 'RSBAC Maintenance Kernel (Use with care!)' CONFIG_RSBAC_MAINT @@ -62,6 +63,9 @@ if [ "$CONFIG_RSBAC_MAC" = "y" ]; then bool ' MAC protection for AUTH module' CONFIG_RSBAC_MAC_AUTH_PROT bool ' MAC protection for GENeral attributes' CONFIG_RSBAC_MAC_GEN_PROT bool ' Light MAC edition' CONFIG_RSBAC_MAC_LIGHT + bool ' Give trusted processes full read access' CONFIG_RSBAC_MAC_TRUSTED_READ + bool ' Reset current level on each execute' CONFIG_RSBAC_MAC_RESET_CURR + bool ' Log all automatic changes to current level' CONFIG_RSBAC_MAC_LOG_LEVEL_CHANGE if [ "$CONFIG_RSBAC_NET" = "y" ]; then if [ "$CONFIG_RSBAC_NET_DEV" = "y" ]; then bool ' MAC network device protection' CONFIG_RSBAC_MAC_NET_DEV_PROT @@ -70,6 +74,7 @@ if [ "$CONFIG_RSBAC_MAC" = "y" ]; then bool ' MAC network object protection' CONFIG_RSBAC_MAC_NET_OBJ_PROT fi fi + int ' MAC number of process lists' CONFIG_RSBAC_MAC_NR_P_LISTS 4 fi bool 'RSBAC support for FC policy' CONFIG_RSBAC_FC if [ "$CONFIG_RSBAC_FC" = "y" ]; then @@ -113,12 +118,21 @@ fi bool 'RSBAC support for MS policy' CONFIG_RSBAC_MS if [ "$CONFIG_RSBAC_MS" = "y" ]; then int ' Scanning result lifetime in seconds' CONFIG_RSBAC_MS_TTL 604800 - bool ' Keep scanning results over reboot' CONFIG_RSBAC_MS_PERSIST - bool ' Export do_scan for external scanner modules' CONFIG_RSBAC_MS_EXTERNAL - if [ "$CONFIG_RSBAC_MS_EXTERNAL" = "y" ]; then - bool ' Disable internal do_scan' CONFIG_RSBAC_MS_NO_INTERNAL + bool ' Keep scanning results over reboot' CONFIG_RSBAC_MS_PERSIST + bool ' Propagate ms-trusted' CONFIG_RSBAC_MS_PROP_TRUSTED + bool ' External scanner module' CONFIG_RSBAC_MS_EXT + if [ "$CONFIG_RSBAC_MS_EXT" = "y" ]; then + bool ' Support for F-Protd' CONFIG_RSBAC_MS_EXT_FPROTD + if [ "$CONFIG_RSBAC_MS_EXT_FPROTD" = "y" ]; then + string ' F-Protd command line switches' CONFIG_RSBAC_MS_EXT_FPROTD_SW "-ai%20-old" + fi + bool ' Support for clamd' CONFIG_RSBAC_MS_EXT_CLAMD + if [ "$CONFIG_RSBAC_MS_EXT_CLAMD" = "y" ]; then + int ' Clamd port' CONFIG_RSBAC_MS_EXT_CLAMD_PORT 3310 + string ' Clamd scan action' CONFIG_RSBAC_MS_EXT_CLAMD_ACTION "SCAN" fi - bool ' Also check read-open and read-write-open' CONFIG_RSBAC_MS_READ + fi +# bool ' Also check read-open and read-write-open' CONFIG_RSBAC_MS_READ # bool ' Also do socket scan' CONFIG_RSBAC_MS_SOCK if [ "$CONFIG_RSBAC_ADVANCED" = "y" ]; then bool ' MS protection for AUTH module' CONFIG_RSBAC_MS_AUTH_PROT @@ -144,10 +158,12 @@ if [ "$CONFIG_RSBAC_RC" = "y" ]; then bool ' RC network object protection' CONFIG_RSBAC_RC_NET_OBJ_PROT fi fi + int ' RC number of process lists' CONFIG_RSBAC_RC_NR_P_LISTS 4 fi bool 'RSBAC support for AUTH policy' CONFIG_RSBAC_AUTH if [ "$CONFIG_RSBAC_AUTH" = "y" ]; then bool ' AUTH module and attribute protection' CONFIG_RSBAC_AUTH_AUTH_PROT + bool ' AUTH support for effective and fs owner control' CONFIG_RSBAC_AUTH_DAC_OWNER fi bool 'RSBAC support for ACL policy' CONFIG_RSBAC_ACL if [ "$CONFIG_RSBAC_ACL" = "y" ]; then @@ -168,6 +184,7 @@ if [ "$CONFIG_RSBAC_ACL" = "y" ]; then fi bool 'RSBAC support for Linux Caps (CAP) policy' CONFIG_RSBAC_CAP if [ "$CONFIG_RSBAC_CAP" = "y" ]; then + bool ' Support CAP process hiding' CONFIG_RSBAC_CAP_PROC_HIDE bool ' CAP protection for AUTH module' CONFIG_RSBAC_CAP_AUTH_PROT fi bool 'RSBAC support for JAIL policy' CONFIG_RSBAC_JAIL @@ -181,6 +198,10 @@ if [ "$CONFIG_RSBAC_JAIL" = "y" ]; then fi fi fi +bool 'RSBAC support for System Resources (RES) policy' CONFIG_RSBAC_RES +if [ "$CONFIG_RSBAC_RES" = "y" ]; then + bool ' RES protection for AUTH module' CONFIG_RSBAC_RES_AUTH_PROT +fi endmenu if [ "$CONFIG_RSBAC_MAINT" = "n" ]; then @@ -188,9 +209,9 @@ if [ "$CONFIG_RSBAC_MAINT" = "n" ]; then mainmenu_option next_comment comment 'Softmode and switching' bool 'RSBAC policies switchable' CONFIG_RSBAC_SWITCH - bool 'RSBAC softmode' CONFIG_RSBAC_SOFTMODE + bool 'RSBAC soft mode' CONFIG_RSBAC_SOFTMODE if [ "$CONFIG_RSBAC_SOFTMODE" = "y" ]; then - bool ' Toggle softmode with SysRq-X' CONFIG_RSBAC_SOFTMODE_SYSRQ + bool ' Toggle soft mode with SysRq-X' CONFIG_RSBAC_SOFTMODE_SYSRQ bool ' Individual module softmode support' CONFIG_RSBAC_SOFTMODE_IND fi endmenu @@ -202,7 +223,7 @@ if [ "$CONFIG_RSBAC_MAINT" = "n" ]; then bool 'Individual program logging' CONFIG_RSBAC_IND_PROG_LOG bool 'Log full path' CONFIG_RSBAC_LOG_FULL_PATH if [ "$CONFIG_RSBAC_LOG_FULL_PATH" = "y" ]; then - int ' Maximum path length (256 - ?)' CONFIG_RSBAC_MAX_PATH_LEN 1024 + int ' Maximum path length (256 - 4000)' CONFIG_RSBAC_MAX_PATH_LEN 512 fi bool 'RSBAC own logging facility' CONFIG_RSBAC_RMSG if [ "$CONFIG_RSBAC_RMSG" = "y" ]; then @@ -210,6 +231,18 @@ if [ "$CONFIG_RSBAC_MAINT" = "n" ]; then if [ "$CONFIG_RSBAC_RMSG_EXCL" = "n" ]; then bool ' Allow to disable logging to syslog' CONFIG_RSBAC_RMSG_NOSYSLOG fi + comment ' ' + bool ' Log to remote UDP network socket' CONFIG_RSBAC_LOG_REMOTE + if [ "$CONFIG_RSBAC_LOG_REMOTE" = "y" ]; then + bool ' Immediate remote logging' CONFIG_RSBAC_LOG_REMOTE_SYNC + if [ "$CONFIG_RSBAC_LOG_REMOTE_SYNC" = "n" ]; then + int ' Logging interval in timer ticks' CONFIG_RSBAC_LOG_INTERVAL 100 + fi + string ' Local UDP address' CONFIG_RSBAC_LOG_LOCAL_ADDR "0.0.0.0" + int ' Local UDP port' CONFIG_RSBAC_LOG_LOCAL_PORT 0 + string ' Remote UDP address' CONFIG_RSBAC_LOG_REMOTE_ADDR "127.0.0.1" + int ' Remote UDP port' CONFIG_RSBAC_LOG_REMOTE_PORT 514 + fi fi endmenu @@ -218,6 +251,12 @@ if [ "$CONFIG_RSBAC_MAINT" = "n" ]; then bool 'RSBAC symlink redirection' CONFIG_RSBAC_SYM_REDIR if [ "$CONFIG_RSBAC_SYM_REDIR" = "y" ]; then bool ' Add user ID number' CONFIG_RSBAC_SYM_REDIR_UID + if [ "$CONFIG_RSBAC_MAC" = "y" ]; then + bool ' Add MAC current security level' CONFIG_RSBAC_SYM_REDIR_MAC + if [ "$CONFIG_RSBAC_MAC" = "y" ]; then + bool ' Also add MAC current category vector' CONFIG_RSBAC_SYM_REDIR_MAC_CAT + fi + fi if [ "$CONFIG_RSBAC_RC" = "y" ]; then bool ' Add RC role number' CONFIG_RSBAC_SYM_REDIR_RC fi @@ -238,8 +277,10 @@ if [ "$CONFIG_RSBAC_MAINT" = "n" ]; then bool 'Support secure_delete' CONFIG_RSBAC_SECDEL bool 'Intercept sys_read and sys_write' CONFIG_RSBAC_RW bool 'Intercept Semaphore IPC operations' CONFIG_RSBAC_IPC_SEM + bool 'Control DAC process owner (seteuid, setfsuid)' CONFIG_RSBAC_DAC_OWNER bool 'RSBAC check sys_syslog' CONFIG_RSBAC_SYSLOG - bool 'No decision on net mounts' CONFIG_RSBAC_NO_DECISION_ON_NETMOUNT + bool 'Make RSBAC data files visible' CONFIG_RSBAC_DAT_VISIBLE + bool 'No decision on net mounts' CONFIG_RSBAC_NO_DECISION_ON_NETMOUNT bool 'X support (normal user MODIFY_PERM access to ST_ioports)' CONFIG_RSBAC_USER_MOD_IOPERM if [ "$CONFIG_RSBAC_PROC" = "y" ]; then bool 'RSBAC extra statistics' CONFIG_RSBAC_XSTATS diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/Makefile linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/Makefile --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/Makefile 2003-08-25 18:25:28.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/Makefile 2003-08-25 20:33:02.000000000 +0200 @@ -9,7 +9,7 @@ #TOPDIR := ../.. O_TARGET := adf.o -ALL_SUB_DIRS := mac fc sim pm ms ff rc auth reg acl jail +ALL_SUB_DIRS := mac fc sim pm ms ff rc auth reg acl cap jail ifeq ($(PATCHLEVEL),2) @@ -95,6 +95,13 @@ SUB_DIRS += jail O_OBJS += jail/jail.o endif +ifeq ($(CONFIG_RSBAC_RES),y) +ifneq ($(CONFIG_RSBAC_MAINT),y) +SUB_DIRS += res +O_OBJS += res/res.o +endif +endif + else obj-y := adf_main.o @@ -148,6 +155,11 @@ endif subdir-$(CONFIG_RSBAC_JAIL) += jail obj-$(CONFIG_RSBAC_JAIL) += jail/jail.o +ifneq ($(CONFIG_RSBAC_MAINT),y) +subdir-$(CONFIG_RSBAC_RES) += res +obj-$(CONFIG_RSBAC_RES) += res/res.o +endif + subdir-$(CONFIG_RSBAC_REG) += reg obj-$(CONFIG_RSBAC_REG) += reg/reg.o subdir-m += reg diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/acl/acl_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/acl/acl_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/acl/acl_main.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/acl/acl_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -4,9 +4,9 @@ /* Facility (ADF) - Access Control Lists (ACL) */ /* File: rsbac/adf/acl/acl_main.c */ /* */ -/* Author and (c) 1999-2002: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* */ -/* Last modified: 11/Mar/2002 */ +/* Last modified: 22/Jan/2003 */ /**************************************************** */ #include diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/acl/acl_syscalls.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/acl/acl_syscalls.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/acl/acl_syscalls.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/acl/acl_syscalls.c 2003-08-25 20:33:02.000000000 +0200 @@ -6,7 +6,7 @@ /* */ /* Author and (c) 1999-2002: Amon Ott */ /* */ -/* Last modified: 11/Mar/2002 */ +/* Last modified: 08/Oct/2002 */ /*************************************************** */ #include @@ -1754,6 +1754,8 @@ int rsbac_acl_sys_group(enum rsbac_acl_ err = -RSBAC_EINVALIDVALUE; break; } + if(arg.get_group_members.maxnum > RSBAC_ACL_MAX_MAXNUM) + arg.get_group_members.maxnum = RSBAC_ACL_MAX_MAXNUM; if(!arg.get_group_members.user_array) { err = -RSBAC_EINVALIDPOINTER; @@ -1824,6 +1826,7 @@ int rsbac_acl_sys_group(enum rsbac_acl_ break; default: + break; } #ifdef CONFIG_RSBAC_SOFTMODE if( ( rsbac_softmode diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/adf_check.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/adf_check.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/adf_check.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/adf_check.c 2003-08-25 20:33:02.000000000 +0200 @@ -99,6 +99,22 @@ enum rsbac_adf_req_ret_t return(UNDEFINED); } +#ifdef CONFIG_RSBAC_DAC_OWNER + case R_CHANGE_DAC_EFF_OWNER: + case R_CHANGE_DAC_FS_OWNER: + switch(target) + { + case T_PROCESS: + /* there must be a new owner specified */ + if(attr == A_owner) + return(DO_NOT_CARE); + /* fall through */ + /* all other cases are undefined */ + default: + return(UNDEFINED); + } +#endif + case R_CHDIR: switch(target) { @@ -560,6 +576,24 @@ int rsbac_adf_set_attr_check( return(-RSBAC_EINVALIDTARGET); } +#ifdef CONFIG_RSBAC_DAC_OWNER + case R_CHANGE_DAC_EFF_OWNER: + case R_CHANGE_DAC_FS_OWNER: + switch(target) + { + /* Changing process owner affects access decisions, */ + /* so attributes have to be adjusted. */ + case T_PROCESS: + /* there must be a new owner specified */ + if(attr != A_owner) + return(-RSBAC_EINVALIDATTR); + return(0); + /* all other cases are undefined */ + default: + return(-RSBAC_EINVALIDTARGET); + } +#endif + case R_CHDIR: switch(target) { diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/adf_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/adf_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/adf_main.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/adf_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -3,9 +3,9 @@ /* Implementation of the Access Control Decision */ /* Facility (ADF) - Main file main.c */ /* */ -/* Author and (c) 1999-2002: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* */ -/* Last modified: 29/Aug/2002 */ +/* Last modified: 16/Jul/2003 */ /*************************************************** */ #include @@ -96,13 +96,18 @@ boolean rsbac_switch_acl = TRUE; /****** CAP *******/ #ifdef CONFIG_RSBAC_CAP boolean rsbac_switch_cap = TRUE; -#endif /* ACL */ +#endif /* CAP */ /****** JAIL *******/ #ifdef CONFIG_RSBAC_JAIL boolean rsbac_switch_jail = TRUE; #endif /* JAIL */ +/****** RES *******/ +#ifdef CONFIG_RSBAC_RES +boolean rsbac_switch_res = TRUE; +#endif /* RES */ + #endif /* SWITCH */ /************************************************* */ @@ -115,7 +120,11 @@ boolean rsbac_switch_jail = TRUE; /* Init function, calls inits for all sub-modules */ +#ifdef CONFIG_RSBAC_INIT_DELAY +void rsbac_init_adf(void) +#else void __init rsbac_init_adf(void) +#endif { #if defined(CONFIG_RSBAC_REG) rsbac_reg_init(); @@ -164,7 +173,14 @@ enum rsbac_adf_req_ret_t enum rsbac_adf_req_ret_t ret_result = DO_NOT_CARE; #endif #ifndef CONFIG_RSBAC_MAINT - enum rsbac_adf_req_ret_t mod_result[SW_NONE] = { + enum rsbac_adf_req_ret_t mod_result[SW_NONE + 1] = { + DO_NOT_CARE, + DO_NOT_CARE, + DO_NOT_CARE, + DO_NOT_CARE, + DO_NOT_CARE, + DO_NOT_CARE, + DO_NOT_CARE, DO_NOT_CARE, DO_NOT_CARE, DO_NOT_CARE, @@ -184,7 +200,7 @@ enum rsbac_adf_req_ret_t union rsbac_attribute_value_t i_attr_val2; enum rsbac_log_level_t log_level; #endif -#ifdef CONFIG_RSBAC_NO_DECISION_ON_NETMOUNT +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,0) || defined(CONFIG_RSBAC_NO_DECISION_ON_NETMOUNT) struct super_block * sb_p; #endif #ifdef CONFIG_RSBAC_SOFTMODE @@ -195,8 +211,12 @@ enum rsbac_adf_req_ret_t if (!rsbac_is_initialized()) return(DO_NOT_CARE); -/* Always granted for kernel (pid 0) */ - if (!caller_pid) +/* Always granted for kernel (pid 0) and logging daemon */ + if ( !caller_pid + #if defined(CONFIG_RSBAC_LOG_REMOTE) + || (caller_pid == rsbaclogd_pid) + #endif + ) return(GRANTED); /* Checking base values */ @@ -262,44 +282,70 @@ enum rsbac_adf_req_ret_t result = DO_NOT_CARE; goto log; } - if ((tmperr = rsbac_get_attr(GEN, - target, - tid, - A_internal, - &i_attr_val, - TRUE) )) - { - if(tmperr == -RSBAC_EINVALIDDEV) - { - printk(KERN_WARNING - "rsbac_adf_request(): rsbac_get_attr() for internal returned EINVALIDDEV!\n"); - return(DO_NOT_CARE); /* last calls on shutdown */ - } - else - { - printk(KERN_WARNING - "rsbac_adf_request(): rsbac_get_attr() for internal returned error %i!\n", - tmperr); - return(NOT_GRANTED); /* something weird happened */ - } - } - /* no access to rsbac_internal objects is granted in any case */ - if (i_attr_val.internal) + switch(request) { - if(request != R_GET_STATUS_DATA) - { - printk(KERN_WARNING - "rsbac_adf_request(): trial to access object declared RSBAC-internal!\n"); - result = NOT_GRANTED; - #ifdef CONFIG_RSBAC_SOFTMODE_IND - ret_result = NOT_GRANTED; - #endif - } - #ifdef CONFIG_RSBAC_SOFTMODE - rsbac_internal = TRUE; + case R_GET_STATUS_DATA: + #if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,0) + if( (target == T_FIFO) + && ((sb_p = rsbac_get_super_block(tid.file.device))) + && (sb_p->s_magic == PIPEFS_MAGIC) + ) + return DO_NOT_CARE; + break; #endif + case R_READ_ATTRIBUTE: +#ifdef CONFIG_RSBAC_DAT_VISIBLE + case R_SEARCH: + case R_READ: + case R_CLOSE: + case R_CHDIR: + case R_GET_PERMISSIONS_DATA: +#endif + break; + + default: + if ((tmperr = rsbac_get_attr(GEN, + target, + tid, + A_internal, + &i_attr_val, + TRUE) )) + { + if(tmperr == -RSBAC_EINVALIDDEV) + { + printk(KERN_WARNING + "rsbac_adf_request(): rsbac_get_attr() for internal returned EINVALIDDEV!\n"); + return(DO_NOT_CARE); /* last calls on shutdown */ + } + else + { + printk(KERN_WARNING + "rsbac_adf_request(): rsbac_get_attr() for internal returned error %i!\n", + tmperr); + return(NOT_GRANTED); /* something weird happened */ + } + } + /* no access to rsbac_internal objects is granted in any case */ + if (i_attr_val.internal) + { + printk(KERN_WARNING + "rsbac_adf_request(): trial to access object declared RSBAC-internal!\n"); + result = NOT_GRANTED; + #ifndef CONFIG_RSBAC_MAINT + mod_result[SW_NONE] = NOT_GRANTED; + #endif + #ifdef CONFIG_RSBAC_SOFTMODE + #ifdef CONFIG_RSBAC_SOFTMODE_IND + ret_result = NOT_GRANTED; + #endif + rsbac_internal = TRUE; + #endif + } } + break; + default: + break; } /**********************************************************/ @@ -574,6 +620,29 @@ if (rsbac_switch_jail) } #endif /* JAIL */ +/****** RES *******/ +#if defined(CONFIG_RSBAC_RES) +#ifdef CONFIG_RSBAC_SWITCH +if (rsbac_switch_res) +#endif + /* no need to call module, if to be ignored */ + if(ignore_module != RES) + { + mod_result[RES] = rsbac_adf_request_res(request, + caller_pid, + target, + tid, + attr, + attr_val, + owner); + result = adf_and_plus(result, mod_result[RES]); +#ifdef CONFIG_RSBAC_SOFTMODE_IND + if(!rsbac_ind_softmode[RES]) + ret_result = adf_and_plus(ret_result, mod_result[RES]); +#endif + } +#endif /* RES */ + /****** REG *******/ #if defined(CONFIG_RSBAC_REG) if(ignore_module != REG) @@ -587,8 +656,8 @@ if(ignore_module != REG) owner); result = adf_and_plus(result, mod_result[REG]); #ifdef CONFIG_RSBAC_SOFTMODE_IND - if(!rsbac_ind_softmode[CAP]) - ret_result = adf_and_plus(ret_result, mod_result[CAP]); + if(!rsbac_ind_softmode[REG]) + ret_result = adf_and_plus(ret_result, mod_result[REG]); #endif } #endif /* REG */ @@ -660,6 +729,7 @@ if(ignore_module != REG) rsbac_remove_target(T_PROCESS,tid); break; default: + break; } /* logging request on info level, if requested by file/dir/dev attributes */ @@ -908,51 +978,156 @@ log: command[0] = (char) 0; get_request_name(request_name, request); #if !defined(CONFIG_RSBAC_MAINT) + if(result == mod_result[SW_NONE]) + { + strcat(res_mods, " GEN"); + } #if defined(CONFIG_RSBAC_MAC) if(result == mod_result[MAC]) - strcat(res_mods, " MAC"); + { + #ifdef CONFIG_RSBAC_SOFTMODE_IND + if(rsbac_ind_softmode[MAC]) + strcat(res_mods, " MAC(Softmode)"); + else + #endif + strcat(res_mods, " MAC"); + } #endif #if defined(CONFIG_RSBAC_FC) if(result == mod_result[FC]) - strcat(res_mods, " FC"); + { + #ifdef CONFIG_RSBAC_SOFTMODE_IND + if(rsbac_ind_softmode[FC]) + strcat(res_mods, " FC(Softmode)"); + else + #endif + strcat(res_mods, " FC"); + } #endif #if defined(CONFIG_RSBAC_SIM) if(result == mod_result[SIM]) - strcat(res_mods, " SIM"); + { + #ifdef CONFIG_RSBAC_SOFTMODE_IND + if(rsbac_ind_softmode[SIM]) + strcat(res_mods, " SIM(Softmode)"); + else + #endif + strcat(res_mods, " SIM"); + } #endif #if defined(CONFIG_RSBAC_PM) if(result == mod_result[PM]) - strcat(res_mods, " PM"); + { + #ifdef CONFIG_RSBAC_SOFTMODE_IND + if(rsbac_ind_softmode[PM]) + strcat(res_mods, " PM(Softmode)"); + else + #endif + strcat(res_mods, " PM"); + } #endif #if defined(CONFIG_RSBAC_MS) if(result == mod_result[MS]) - strcat(res_mods, " MS"); + { + #ifdef CONFIG_RSBAC_SOFTMODE_IND + if(rsbac_ind_softmode[MS]) + strcat(res_mods, " MS(Softmode)"); + else + #endif + strcat(res_mods, " MS"); + } #endif #ifdef CONFIG_RSBAC_FF if(result == mod_result[FF]) - strcat(res_mods, " FF"); + { + #ifdef CONFIG_RSBAC_SOFTMODE_IND + if(rsbac_ind_softmode[FF]) + strcat(res_mods, " FF(Softmode)"); + else + #endif + strcat(res_mods, " FF"); + } #endif #ifdef CONFIG_RSBAC_RC if(result == mod_result[RC]) - strcat(res_mods, " RC"); + { + #ifdef CONFIG_RSBAC_SOFTMODE_IND + if(rsbac_ind_softmode[RC]) + strcat(res_mods, " RC(Softmode)"); + else + #endif + strcat(res_mods, " RC"); + } #endif #ifdef CONFIG_RSBAC_AUTH if(result == mod_result[AUTH]) - strcat(res_mods, " AUTH"); + { + #ifdef CONFIG_RSBAC_SOFTMODE_IND + if(rsbac_ind_softmode[AUTH]) + strcat(res_mods, " AUTH(Softmode)"); + else + #endif + strcat(res_mods, " AUTH"); + } #endif #ifdef CONFIG_RSBAC_ACL if(result == mod_result[ACL]) - strcat(res_mods, " ACL"); + { + #ifdef CONFIG_RSBAC_SOFTMODE_IND + if(rsbac_ind_softmode[ACL]) + strcat(res_mods, " ACL(Softmode)"); + else + #endif + strcat(res_mods, " ACL"); + } + #endif + #ifdef CONFIG_RSBAC_CAP + if(result == mod_result[CAP]) + { + #ifdef CONFIG_RSBAC_SOFTMODE_IND + if(rsbac_ind_softmode[CAP]) + strcat(res_mods, " CAP(Softmode)"); + else + #endif + strcat(res_mods, " CAP"); + } #endif #ifdef CONFIG_RSBAC_JAIL if(result == mod_result[JAIL]) - strcat(res_mods, " JAIL"); + { + #ifdef CONFIG_RSBAC_SOFTMODE_IND + if(rsbac_ind_softmode[JAIL]) + strcat(res_mods, " JAIL(Softmode)"); + else + #endif + strcat(res_mods, " JAIL"); + } + #endif + #ifdef CONFIG_RSBAC_RES + if(result == mod_result[RES]) + { + #ifdef CONFIG_RSBAC_SOFTMODE_IND + if(rsbac_ind_softmode[RES]) + strcat(res_mods, " RES(Softmode)"); + else + #endif + strcat(res_mods, " RES"); + } #endif #ifdef CONFIG_RSBAC_REG if(result == mod_result[REG]) - strcat(res_mods, " REG"); + { + #ifdef CONFIG_RSBAC_SOFTMODE_IND + if(rsbac_ind_softmode[REG]) + strcat(res_mods, " REG(Softmode)"); + else + #endif + strcat(res_mods, " REG"); + } #endif #endif /* !MAINT */ + if(!res_mods[0]) + strcat(res_mods, " ADF"); get_target_name(target_type_name, target, target_id_name, tid); get_attribute_name(attr_name, attr); get_result_name(res_name, result); @@ -965,11 +1140,27 @@ log: #ifdef CONFIG_RSBAC_RMSG /* if pseudo is set, its value is != 0, else -> use id */ if (pseudo) - rsbac_printk(KERN_INFO "rsbac_adf_request(): request %s, pid %u, ppid %u, prog_name %s, pseudo %u, target_type %s, tid %s, attr %s, value %u, result %s by%s\n", - request_name, caller_pid, parent_pid, command, i_attr_val.pseudo, target_type_name, target_id_name, attr_name, attr_val.dummy, res_name, res_mods); + { + #ifdef CONFIG_RSBAC_SOFTMODE + if(rsbac_softmode) + rsbac_printk(KERN_INFO "rsbac_adf_request(): request %s, pid %u, ppid %u, prog_name %s, pseudo %u, target_type %s, tid %s, attr %s, value %u, result %s (Softmode) by%s\n", + request_name, caller_pid, parent_pid, command, i_attr_val.pseudo, target_type_name, target_id_name, attr_name, attr_val.dummy, res_name, res_mods); + else + #endif + rsbac_printk(KERN_INFO "rsbac_adf_request(): request %s, pid %u, ppid %u, prog_name %s, pseudo %u, target_type %s, tid %s, attr %s, value %u, result %s by%s\n", + request_name, caller_pid, parent_pid, command, i_attr_val.pseudo, target_type_name, target_id_name, attr_name, attr_val.dummy, res_name, res_mods); + } else - rsbac_printk(KERN_INFO "rsbac_adf_request(): request %s, pid %u, ppid %u, prog_name %s, uid %u, target_type %s, tid %s, attr %s, value %u, result %s by%s\n", - request_name, caller_pid, parent_pid, command, owner, target_type_name, target_id_name, attr_name, attr_val.dummy, res_name, res_mods); + { + #ifdef CONFIG_RSBAC_SOFTMODE + if(rsbac_softmode) + rsbac_printk(KERN_INFO "rsbac_adf_request(): request %s, pid %u, ppid %u, prog_name %s, uid %u, target_type %s, tid %s, attr %s, value %u, result %s (Softmode) by%s\n", + request_name, caller_pid, parent_pid, command, owner, target_type_name, target_id_name, attr_name, attr_val.dummy, res_name, res_mods); + else + #endif + rsbac_printk(KERN_INFO "rsbac_adf_request(): request %s, pid %u, ppid %u, prog_name %s, uid %u, target_type %s, tid %s, attr %s, value %u, result %s by%s\n", + request_name, caller_pid, parent_pid, command, owner, target_type_name, target_id_name, attr_name, attr_val.dummy, res_name, res_mods); + } #endif #ifndef CONFIG_RSBAC_RMSG_EXCL /* only log to standard syslog, if not disabled by kernel boot parameter */ @@ -1084,7 +1275,11 @@ int rsbac_adf_set_attr( return(0); /* kernel (pid 0) is ignored */ - if (!caller_pid) + if ( !caller_pid + #if defined(CONFIG_RSBAC_LOG_REMOTE) + || (caller_pid == rsbaclogd_pid) + #endif + ) return(0); /* Checking base values */ @@ -1238,6 +1433,7 @@ if (rsbac_switch_ms) #endif /* MS */ /******* FF ********/ +#if 0 /* Nothing to do in there */ #ifdef CONFIG_RSBAC_FF #ifdef CONFIG_RSBAC_SWITCH if (rsbac_switch_ff) @@ -1252,6 +1448,7 @@ if (rsbac_switch_ff) attr_val, owner); #endif /* FF */ +#endif /* 0 */ /******* RC ********/ #ifdef CONFIG_RSBAC_RC @@ -1333,6 +1530,22 @@ if (rsbac_switch_jail) owner); #endif /* JAIL */ +/****** RES *******/ +#ifdef CONFIG_RSBAC_RES +#ifdef CONFIG_RSBAC_SWITCH +if (rsbac_switch_res) +#endif + error |= rsbac_adf_set_attr_res (request, + caller_pid, + target, + tid, + new_target, + new_tid, + attr, + attr_val, + owner); +#endif /* RES */ + /****** REG *******/ #ifdef CONFIG_RSBAC_REG error |= rsbac_adf_set_attr_reg (request, @@ -1379,7 +1592,7 @@ general_work: } break; -#ifdef CONFIG_RSBAC_IND_PROG_LOG +#if defined(CONFIG_RSBAC_IND_PROG_LOG) case R_CLONE: switch (target) { @@ -1470,6 +1683,7 @@ general_work: #endif /* CONFIG_RSBAC_IND_PROG_LOG */ default: + break; } #if defined(CONFIG_RSBAC_DEBUG) && defined(CONFIG_RSBAC_NET) @@ -1714,6 +1928,7 @@ static int open_by_dentry(struct dentry return(-RSBAC_EWRITEFAILED); } /* trying to get write access */ +/* if (get_write_access(file_dentry_p->d_inode)) { printk(KERN_WARNING @@ -1722,6 +1937,7 @@ static int open_by_dentry(struct dentry file_p->f_op->release(file_dentry_p->d_inode,file_p); return(-RSBAC_EWRITEFAILED); } +*/ return 0; } @@ -1739,10 +1955,6 @@ int rsbac_sec_trunc(struct dentry * dent #else int err = 0; boolean need_overwrite = FALSE; - struct file file; - int tmperr = 0; - int len; - mm_segment_t oldfs; /* security checks */ if( !dentry_p @@ -1864,6 +2076,16 @@ int rsbac_sec_trunc(struct dentry * dent need_overwrite = rsbac_need_overwrite_jail(dentry_p); #endif /* JAIL */ + /****** RES *******/ + #ifdef CONFIG_RSBAC_RES + #ifdef CONFIG_RSBAC_SWITCH + if (rsbac_switch_res) + #endif + /* no need to call module, if already need_overwrite */ + if(!need_overwrite) + need_overwrite = rsbac_need_overwrite_res(dentry_p); + #endif /* RES */ + /****** REG *******/ #ifdef CONFIG_RSBAC_REG if(!need_overwrite) @@ -1872,9 +2094,14 @@ int rsbac_sec_trunc(struct dentry * dent if(need_overwrite) { - char * buffer; + char * buffer; + boolean vmalloc_used; + struct file file; + int tmperr = 0; + int len; + mm_segment_t oldfs; - buffer = rsbac_kmalloc(RSBAC_SEC_DEL_CHUNK_SIZE); + buffer = rsbac_vkmalloc(RSBAC_SEC_DEL_CHUNK_SIZE, &vmalloc_used); if(!buffer) return -RSBAC_ENOMEM; @@ -1892,10 +2119,19 @@ int rsbac_sec_trunc(struct dentry * dent err = open_by_dentry(dentry_p, &file); if(err) { - rsbac_kfree(buffer); + rsbac_vkfree(buffer, vmalloc_used); return(err); } +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_write) + printk(KERN_DEBUG + "rsbac_sec_trunc(): file %lu on device %02u:%02u is open, starting to write!\n", + dentry_p->d_inode->i_ino, + MAJOR(dentry_p->d_inode->i_dev), + MINOR(dentry_p->d_inode->i_dev)); +#endif + /* OK, now we can start writing */ /* Set current user space to kernel space, because write() reads @@ -1926,19 +2162,13 @@ int rsbac_sec_trunc(struct dentry * dent len, &file.f_pos); /* if none written, end of file is reached -> complain and return */ - if (tmperr < 0) + if (tmperr <= 0) { printk(KERN_WARNING "rsbac_sec_trunc(): write error on file!\n"); err = -RSBAC_EWRITEFAILED; goto out; } - if(tmperr < len) - { - printk(KERN_WARNING - "rsbac_sec_trunc(): unexpectedly reached end of file!\n"); - goto out; - } new_len += tmperr; } @@ -1947,34 +2177,40 @@ int rsbac_sec_trunc(struct dentry * dent /* to user space */ set_fs(oldfs); +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_write) + printk(KERN_DEBUG + "rsbac_sec_trunc(): syncing file %lu on device %02u:%02u!\n", + dentry_p->d_inode->i_ino, + MAJOR(dentry_p->d_inode->i_dev), + MINOR(dentry_p->d_inode->i_dev)); +#endif + #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) if (file.f_op->fsync) err = file.f_op->fsync(&file,dentry_p); #else -#if 0 - if ( file.f_op->fsync - && (file.f_op != &ext3_file_operations) - ) - err = file.f_op->fsync(&file,dentry_p,1); -#endif +// err = fsync_inode_buffers(dentry_p->d_inode); err = fsync_inode_data_buffers(dentry_p->d_inode); #endif +/* if (file.f_op->flush) file.f_op->flush(&file); if (file.f_op->release) file.f_op->release(dentry_p->d_inode,&file); +*/ /* End of write access */ - put_write_access(dentry_p->d_inode); +// put_write_access(dentry_p->d_inode); - rsbac_kfree(buffer); + rsbac_vkfree(buffer, vmalloc_used); } /* Ready. */ return(err); #endif /* else of MAINT */ - }; + } EXPORT_SYMBOL(rsbac_sec_del); int rsbac_sec_del(struct dentry * dentry_p) @@ -1982,7 +2218,7 @@ int rsbac_sec_del(struct dentry * dentry return(rsbac_sec_trunc(dentry_p, 0, dentry_p->d_inode->i_size)); - }; + } #else /* no SECDEL */ EXPORT_SYMBOL(rsbac_sec_trunc); @@ -2003,10 +2239,17 @@ void rsbac_symlink_redirect(struct dentr { int err; union rsbac_target_id_t i_tid; + union rsbac_target_id_t i_tid2; union rsbac_attribute_value_t i_attr_val; if(!name || !dentry_p || !dentry_p->d_inode) return; + if (!rsbac_is_initialized()) + return; + + i_tid.symlink.device = dentry_p->d_inode->i_dev; + i_tid.symlink.inode = dentry_p->d_inode->i_ino; + i_tid.symlink.dentry_p = dentry_p; if(!S_ISLNK(dentry_p->d_inode->i_mode)) { printk(KERN_DEBUG @@ -2015,12 +2258,6 @@ void rsbac_symlink_redirect(struct dentr MAJOR(i_tid.symlink.device), MINOR(i_tid.symlink.device) ); return; } - if (!rsbac_is_initialized()) - return; - - i_tid.symlink.device = dentry_p->d_inode->i_dev; - i_tid.symlink.inode = dentry_p->d_inode->i_ino; - i_tid.symlink.dentry_p = dentry_p; #ifdef CONFIG_RSBAC_DEBUG if (rsbac_debug_aef) @@ -2061,6 +2298,79 @@ void rsbac_symlink_redirect(struct dentr } #endif +#ifdef CONFIG_RSBAC_SYM_REDIR_MAC + if ((err = rsbac_get_attr(GEN, + T_SYMLINK, + i_tid, + A_symlink_add_mac_level, + &i_attr_val, + FALSE) )) + { + printk(KERN_WARNING + "rsbac_symlink_redirect(): rsbac_get_attr() for symlink_add_mac_level returned error %i!\n", + err); + return; /* something weird happened */ + } + if(i_attr_val.symlink_add_mac_level) + { + u_int len; + + len = strlen(name); + while( len + && ( ( (name[len-1] >= '0') + && (name[len-1] <= '9') + ) +#ifdef CONFIG_RSBAC_SYM_REDIR_MAC_CAT + || (name[len-1] == ':') +#endif + ) + ) + len--; +#ifdef CONFIG_RSBAC_SYM_REDIR_MAC_CAT + if(len > (PAGE_SIZE - 85)) +#else + if(len > (PAGE_SIZE - 20)) +#endif + return; + + i_tid2.process = current->pid; + if ((err = rsbac_get_attr(MAC, + T_PROCESS, + i_tid2, + A_current_sec_level, + &i_attr_val, + FALSE) )) + { + printk(KERN_WARNING + "rsbac_symlink_redirect(): rsbac_get_attr() for current_sec_level returned error %i!\n", + err); + return; /* something weird happened */ + } + +#ifdef CONFIG_RSBAC_SYM_REDIR_MAC_CAT + len+=sprintf(name+len, "%u:", i_attr_val.current_sec_level); +#else + len+=sprintf(name+len, "%u", i_attr_val.current_sec_level); +#endif + +#ifdef CONFIG_RSBAC_SYM_REDIR_MAC_CAT + if ((err = rsbac_get_attr(MAC, + T_PROCESS, + i_tid2, + A_mac_curr_categories, + &i_attr_val, + FALSE) )) + { + printk(KERN_WARNING + "rsbac_symlink_redirect(): rsbac_get_attr() for mac_curr_categories returned error %i!\n", + err); + return; /* something weird happened */ + } + u64tostrmac(name+len, i_attr_val.mac_categories); +#endif + } +#endif + #ifdef CONFIG_RSBAC_SYM_REDIR_RC if ((err = rsbac_get_attr(GEN, T_SYMLINK, @@ -2087,10 +2397,10 @@ void rsbac_symlink_redirect(struct dentr if(len > (PAGE_SIZE - 20)) return; - i_tid.process = current->pid; + i_tid2.process = current->pid; if ((err = rsbac_get_attr(RC, T_PROCESS, - i_tid, + i_tid2, A_rc_role, &i_attr_val, FALSE) )) diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/auth/auth_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/auth/auth_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/auth/auth_main.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/auth/auth_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -4,9 +4,9 @@ /* Facility (ADF) - Authorization module */ /* File: rsbac/adf/auth/main.c */ /* */ -/* Author and (c) 1999-2001: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* */ -/* Last modified: 27/Sep/2001 */ +/* Last modified: 16/Jan/2003 */ /**************************************************** */ #include @@ -133,12 +133,79 @@ enum rsbac_adf_req_ret_t case T_PROCESS: if(attr != A_owner) return(UNDEFINED); - /* if nothing changes - who cares? */ - if(owner == attr_val.owner) + /* check auth_may_setuid of process */ + if (rsbac_get_attr(AUTH, + T_PROCESS, + tid, + A_auth_may_setuid, + &i_attr_val1, + FALSE)) + { + printk(KERN_WARNING + "rsbac_adf_request_auth(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + /* if auth_may_setuid is set, then grant */ + if (i_attr_val1.auth_may_setuid) + return(GRANTED); + + /* check, if the target uid is in capset, grant, if yes, deny, if not. */ + if(rsbac_auth_p_capset_member(caller_pid, ACT_real, attr_val.owner)) + return(GRANTED); + else + return(NOT_GRANTED); + + /* all other cases are not checked */ + default: + return(DO_NOT_CARE); + } + +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + case R_CHANGE_DAC_EFF_OWNER: + switch(target) + { + case T_PROCESS: + if(attr != A_owner) + return(UNDEFINED); + if(i_attr_val1.owner == owner) + return DO_NOT_CARE; + /* check auth_may_setuid of process */ + if (rsbac_get_attr(AUTH, + T_PROCESS, + tid, + A_auth_may_setuid, + &i_attr_val1, + FALSE)) + { + printk(KERN_WARNING + "rsbac_adf_request_auth(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + /* if auth_may_setuid is set, then grant */ + if (i_attr_val1.auth_may_setuid) + return(GRANTED); + + /* check, if the target uid is in capset, grant, if yes, deny, if not. */ + if(rsbac_auth_p_capset_member(caller_pid, ACT_eff, attr_val.owner)) return(GRANTED); + else + return(NOT_GRANTED); + + /* all other cases are not checked */ + default: + return(DO_NOT_CARE); + } + case R_CHANGE_DAC_FS_OWNER: + switch(target) + { + case T_PROCESS: + if(attr != A_owner) + return(UNDEFINED); + if(i_attr_val1.owner == owner) + return DO_NOT_CARE; /* check auth_may_setuid of process */ if (rsbac_get_attr(AUTH, - T_PROCESS, + T_PROCESS, tid, A_auth_may_setuid, &i_attr_val1, @@ -153,7 +220,7 @@ enum rsbac_adf_req_ret_t return(GRANTED); /* check, if the target uid is in capset, grant, if yes, deny, if not. */ - if(rsbac_auth_p_capset_member(caller_pid, attr_val.owner)) + if(rsbac_auth_p_capset_member(caller_pid, ACT_fs, attr_val.owner)) return(GRANTED); else return(NOT_GRANTED); @@ -162,6 +229,7 @@ enum rsbac_adf_req_ret_t default: return(DO_NOT_CARE); } +#endif case R_MODIFY_ATTRIBUTE: switch(attr) @@ -227,7 +295,7 @@ enum rsbac_adf_req_ret_t /* target rsbaclog? only for secoff */ if (tid.scd != ST_rsbaclog) return(GRANTED); - /* Secoff? */ + /* Secoff or Auditor? */ i_tid.user = owner; if ((err=rsbac_get_attr(AUTH, T_USER, @@ -241,7 +309,9 @@ enum rsbac_adf_req_ret_t return(NOT_GRANTED); } /* grant only for secoff */ - if (i_attr_val1.system_role == SR_security_officer) + if ( (i_attr_val1.system_role == SR_security_officer) + || (i_attr_val1.system_role == SR_auditor) + ) return(GRANTED); else return(NOT_GRANTED); @@ -331,8 +401,10 @@ enum rsbac_adf_req_ret_t "rsbac_adf_request_auth(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - /* grant only for secoff */ - if (i_attr_val1.system_role == SR_security_officer) + /* grant only for secoff and auditor */ + if ( (i_attr_val1.system_role == SR_security_officer) + || (i_attr_val1.system_role == SR_auditor) + ) return(GRANTED); else return(NOT_GRANTED); @@ -555,16 +627,22 @@ int rsbac_adf_set_attr_auth( return(-RSBAC_EWRITEFAILED); } /* replace RSBAC_AUTH_OWNER_F_CAP by current owner */ - if(rsbac_auth_p_capset_member(caller_pid, RSBAC_AUTH_OWNER_F_CAP)) + if(rsbac_auth_p_capset_member(caller_pid, ACT_real, RSBAC_AUTH_OWNER_F_CAP)) { + struct rsbac_auth_cap_range_t cap_range; + /* remove it and set cap for owner */ - if (rsbac_auth_add_to_p_capset(caller_pid, owner, owner)) + cap_range.first = owner; + cap_range.last = owner; + if (rsbac_auth_add_to_p_capset(caller_pid, ACT_real, cap_range, 0)) { printk(KERN_WARNING "rsbac_adf_set_attr_auth(): rsbac_auth_add_to_p_capset() returned error!\n"); return(-RSBAC_EWRITEFAILED); } - if (rsbac_auth_remove_from_p_capset(caller_pid, RSBAC_AUTH_OWNER_F_CAP, RSBAC_AUTH_OWNER_F_CAP)) + cap_range.first = RSBAC_AUTH_OWNER_F_CAP; + cap_range.last = RSBAC_AUTH_OWNER_F_CAP; + if (rsbac_auth_remove_from_p_capset(caller_pid, ACT_real, cap_range)) { printk(KERN_WARNING "rsbac_adf_set_attr_auth(): rsbac_auth_remove_from_p_capset() returned error!\n"); @@ -594,10 +672,10 @@ int rsbac_adf_set_attr_auth( { case T_FILE: /* remove cap set */ - if(rsbac_auth_remove_f_capset(tid.file)) + if(rsbac_auth_remove_f_capsets(tid.file)) { printk(KERN_WARNING - "rsbac_adf_set_attr_auth(): rsbac_auth_remove_f_capset() returned error!\n"); + "rsbac_adf_set_attr_auth(): rsbac_auth_remove_f_capsets() returned error!\n"); return(-RSBAC_EWRITEFAILED); } return(0); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/auth/auth_syscalls.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/auth/auth_syscalls.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/auth/auth_syscalls.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/auth/auth_syscalls.c 2003-08-25 20:33:02.000000000 +0200 @@ -32,8 +32,9 @@ /************************************************* */ int rsbac_auth_add_p_cap(rsbac_pid_t pid, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid) + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range, + rsbac_time_t ttl) { /* check only in non-maint mode */ #if !defined(CONFIG_RSBAC_MAINT) @@ -63,16 +64,16 @@ int rsbac_auth_add_p_cap(rsbac_pid_t pid #ifdef CONFIG_RSBAC_RMSG rsbac_printk(KERN_INFO "rsbac_auth_add_p_cap(): adding AUTH cap %u:%u to process %u denied for process %u!\n", - first_uid, - last_uid, + cap_range.first, + cap_range.last, pid, current->pid); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL printk(KERN_INFO "rsbac_auth_add_p_cap(): adding AUTH cap %u:%u to process %u denied for process %u!\n", - first_uid, - last_uid, + cap_range.first, + cap_range.last, pid, current->pid); #endif @@ -89,7 +90,7 @@ int rsbac_auth_add_p_cap(rsbac_pid_t pid #endif /* OK, check passed. Add the capability. */ - if(rsbac_auth_add_to_p_capset(pid, first_uid, last_uid)) + if(rsbac_auth_add_to_p_capset(pid, cap_type, cap_range, ttl)) { printk(KERN_WARNING "rsbac_auth_add_p_cap(): rsbac_auth_add_to_p_capset() returned error!\n"); @@ -99,8 +100,8 @@ int rsbac_auth_add_p_cap(rsbac_pid_t pid } int rsbac_auth_remove_p_cap(rsbac_pid_t pid, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid) + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range) { /* check only in non-maint mode */ #if !defined(CONFIG_RSBAC_MAINT) @@ -130,16 +131,16 @@ int rsbac_auth_remove_p_cap(rsbac_pid_t #ifdef CONFIG_RSBAC_RMSG rsbac_printk(KERN_INFO "rsbac_auth_remove_p_cap(): removing AUTH cap %u:%u from process %u denied for process %u!\n", - first_uid, - last_uid, + cap_range.first, + cap_range.last, pid, current->pid); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL printk(KERN_INFO "rsbac_auth_remove_p_cap(): removing AUTH cap %u:%u from process %u denied for process %u!\n", - first_uid, - last_uid, + cap_range.first, + cap_range.last, pid, current->pid); #endif @@ -156,15 +157,16 @@ int rsbac_auth_remove_p_cap(rsbac_pid_t #endif /* OK, check passed. Try to remove the capability. */ - return(rsbac_auth_remove_from_p_capset(pid, first_uid, last_uid)); + return(rsbac_auth_remove_from_p_capset(pid, cap_type, cap_range)); } int rsbac_auth_add_f_cap(rsbac_auth_file_t file, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid) + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range, + rsbac_time_t ttl) { /* check has been done in help/syscalls.c: sys_rsbac_auth_add_f_cap */ - if(rsbac_auth_add_to_f_capset(file, first_uid, last_uid)) + if(rsbac_auth_add_to_f_capset(file, cap_type, cap_range, ttl)) { printk(KERN_WARNING "rsbac_auth_add_f_cap(): rsbac_auth_add_to_f_capset() returned error!\n"); @@ -174,11 +176,11 @@ int rsbac_auth_add_f_cap(rsbac_auth_file } int rsbac_auth_remove_f_cap(rsbac_auth_file_t file, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid) + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range) { /* check has been done in help/syscalls.c: sys_rsbac_auth_remove_f_cap */ - return(rsbac_auth_remove_from_f_capset(file, first_uid, last_uid)); + return(rsbac_auth_remove_from_f_capset(file, cap_type, cap_range)); } /* end of rsbac/adf/auth/syscalls.c */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/cap/cap_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/cap/cap_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/cap/cap_main.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/cap/cap_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -4,9 +4,9 @@ /* Facility (ADF) - Linux Capabilities (CAP) */ /* File: rsbac/adf/cap/main.c */ /* */ -/* Author and (c) 1999-2001: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* */ -/* Last modified: 03/Sep/2001 */ +/* Last modified: 24/Jan/2003 */ /**************************************************** */ #include @@ -51,6 +51,7 @@ enum rsbac_adf_req_ret_t case A_cap_role: case A_min_caps: case A_max_caps: + case A_cap_process_hiding: #ifdef CONFIG_RSBAC_CAP_AUTH_PROT case A_auth_may_setuid: case A_auth_may_set_cap: @@ -89,6 +90,7 @@ enum rsbac_adf_req_ret_t case A_cap_role: case A_min_caps: case A_max_caps: + case A_cap_process_hiding: /* All attributes (remove target!) */ case A_none: /* Security Officer or Admin? */ @@ -181,6 +183,91 @@ enum rsbac_adf_req_ret_t default: return(DO_NOT_CARE); } +#ifdef CONFIG_RSBAC_CAP_PROC_HIDE + case R_GET_STATUS_DATA: + switch(target) + { + case T_PROCESS: + if (rsbac_get_attr(CAP, + target, + tid, + A_cap_process_hiding, + &i_attr_val1, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_request_cap(): rsbac_get_attr() for cap_process_hiding returned error!\n"); + return(NOT_GRANTED); /* something weird happened */ + } + switch(i_attr_val1.cap_process_hiding) + { + case PH_full: + if(current->pid == tid.process) + return GRANTED; + else + /* Security Officer or Admin? */ + i_tid.user = owner; + if (rsbac_get_attr(CAP, + T_USER, + i_tid, + A_cap_role, + &i_attr_val1, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_request_cap(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + /* if sec_officer, then grant */ + if(i_attr_val1.system_role == SR_security_officer) + return(GRANTED); + else + return(NOT_GRANTED); + case PH_from_other_users: + { + struct task_struct * task_p; + enum rsbac_adf_req_ret_t result; + + read_lock(&tasklist_lock); + task_p = find_task_by_pid(tid.process); + if( task_p + && (task_p->uid != owner) + ) + result = NOT_GRANTED; + else + result = GRANTED; + read_unlock(&tasklist_lock); + if(result == GRANTED) + return GRANTED; + /* Security Officer or Admin? */ + i_tid.user = owner; + if (rsbac_get_attr(CAP, + T_USER, + i_tid, + A_cap_role, + &i_attr_val1, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_request_cap(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + /* if sec_officer or admin, then grant */ + if( (i_attr_val1.system_role == SR_security_officer) + || (i_attr_val1.system_role == SR_administrator) + ) + return(GRANTED); + else + return(NOT_GRANTED); + } + default: + return DO_NOT_CARE; + } + + default: + return DO_NOT_CARE; + } +#endif /*********************/ default: return DO_NOT_CARE; @@ -279,6 +366,46 @@ int rsbac_adf_set_attr_cap( } break; +#if defined (CONFIG_RSBAC_CAP_PROC_HIDE) + case R_CLONE: + switch(target) + { + case T_PROCESS: + /* get process hiding from old process */ + if (rsbac_get_attr(CAP, + target, + tid, + A_cap_process_hiding, + &i_attr_val1, + FALSE)) + { + printk(KERN_WARNING + "rsbac_adf_request_cap(): rsbac_get_attr() for cap_process_hiding returned error!\n"); + } + else + { /* only set, of not default value 0 */ + if(i_attr_val1.cap_process_hiding) + { + /* set program based log for new process */ + if (rsbac_set_attr(CAP, + new_target, + new_tid, + A_cap_process_hiding, + i_attr_val1)) + { + printk(KERN_WARNING + "rsbac_adf_request_cap(): rsbac_set_attr() for cap_process_hiding returned error!\n"); + } + } + } + return 0; + + /* all other cases are unknown */ + default: + return(0); + } +#endif /* PROC_HIDE */ + case R_EXECUTE: switch(target) { diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/fc/fc_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/fc/fc_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/fc/fc_main.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/fc/fc_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -4,9 +4,9 @@ /* Facility (ADF) - Functional Control */ /* File: rsbac/adf/fc/main.c */ /* */ -/* Author and (c) 1999-2002: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* */ -/* Last modified: 08/08/2002 */ +/* Last modified: 06/Jun/2003 */ /*************************************************** */ #include @@ -384,8 +384,11 @@ enum rsbac_adf_req_ret_t /* target rsbaclog? only for secoff */ if (tid.scd != ST_rsbaclog) return(GRANTED); - /* Secoff? */ - return fc_check_sysrole(owner, SR_security_officer); + /* Secoff or auditor? */ + if(fc_check_sysrole(owner, SR_security_officer) == NOT_GRANTED) + return fc_check_sysrole(owner, SR_auditor); + else + return GRANTED; default: return(DO_NOT_CARE); }; @@ -520,7 +523,9 @@ enum rsbac_adf_req_ret_t /* if rsbaclog: grant only for secoff */ if(tid.scd == ST_rsbaclog) { - if (i_attr_val1.system_role == SR_security_officer) + if ( (i_attr_val1.system_role == SR_security_officer) + || (i_attr_val1.system_role == SR_auditor) + ) return(GRANTED); else return(NOT_GRANTED); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/ff/ff_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/ff/ff_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/ff/ff_main.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/ff/ff_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -4,9 +4,9 @@ /* Facility (ADF) - File Flags */ /* File: rsbac/adf/ff/main.c */ /* */ -/* Author and (c) 1999-2002: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* */ -/* Last modified: 08/Aug/2002 */ +/* Last modified: 06/Jun/2003 */ /*************************************************** */ #include @@ -70,9 +70,6 @@ enum rsbac_adf_req_ret_t enum rsbac_adf_req_ret_t result = DO_NOT_CARE; union rsbac_target_id_t i_tid; union rsbac_attribute_value_t i_attr_val1; -#ifdef CONFIG_RSBAC_FF_ROLE_PROT - union rsbac_attribute_value_t i_attr_val2; -#endif int err=0; switch (request) @@ -119,50 +116,6 @@ enum rsbac_adf_req_ret_t case T_DIR: return(check_flags_ff(target,tid, FF_read_only | FF_search_only)); - case T_PROCESS: - #ifdef CONFIG_RSBAC_FF_ROLE_PROT - if(attr != A_owner) - return(UNDEFINED); - /* Administrator or secoff? */ - i_tid.user = owner; - if (rsbac_get_attr(FF, T_USER, - i_tid, - A_ff_role, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_ff(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* if general user or secoff, then grant */ - if ( (i_attr_val1.system_role == SR_user) - || (i_attr_val1.system_role == SR_security_officer) - ) - return(GRANTED); - /* old owner is sys-admin */ - /* get target user's role */ - i_tid.user = attr_val.owner; - if (rsbac_get_attr(FF, T_USER, - i_tid, - A_ff_role, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_ff(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* if target is security officer -> deny */ - if(i_attr_val2.system_role == SR_security_officer) - return(NOT_GRANTED); - else - return(GRANTED); - #endif /* ROLE_PROT */ - - /* fall through */ - case T_NONE: - return(DO_NOT_CARE); /* all other cases are undefined */ default: return(DO_NOT_CARE); @@ -240,7 +193,9 @@ enum rsbac_adf_req_ret_t return(NOT_GRANTED); } /* grant only for secoff */ - if (i_attr_val1.system_role == SR_security_officer) + if ( (i_attr_val1.system_role == SR_security_officer) + || (i_attr_val1.system_role == SR_auditor) + ) return(GRANTED); else return(NOT_GRANTED); @@ -327,7 +282,9 @@ enum rsbac_adf_req_ret_t return(NOT_GRANTED); } /* grant only for secoff */ - if (i_attr_val1.system_role == SR_security_officer) + if ( (i_attr_val1.system_role == SR_security_officer) + || (i_attr_val1.system_role == SR_auditor) + ) return(GRANTED); else return(NOT_GRANTED); @@ -343,10 +300,10 @@ enum rsbac_adf_req_ret_t case T_FILE: return(check_flags_ff(target,tid, FF_read_only | FF_execute_only - | FF_write_only | FF_append_only)); + | FF_write_only | FF_append_only | FF_no_mount)); case T_DIR: return(check_flags_ff(target,tid, - FF_read_only | FF_search_only)); + FF_read_only | FF_search_only | FF_no_mount)); /* all other cases are undefined */ default: return(DO_NOT_CARE); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/jail/jail_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/jail/jail_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/jail/jail_main.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/jail/jail_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -6,7 +6,7 @@ /* */ /* Author and (c) 1999-2002: Amon Ott */ /* */ -/* Last modified: 14/Aug/2002 */ +/* Last modified: 21/Nov/2002 */ /**************************************************** */ #include @@ -146,6 +146,7 @@ enum rsbac_adf_req_ret_t jail_check_ip(rsbac_pid_t pid, union rsbac_target_id_t tid) { rsbac_jail_ip_t jail_ip; + rsbac_jail_flags_t jail_flags; if(!tid.netobj.sock_p) { @@ -167,13 +168,17 @@ enum rsbac_adf_req_ret_t jail_ip = jail_get_ip_process(pid); if(jail_ip == INADDR_ANY) return GRANTED; + jail_flags = jail_get_flags_process(pid); if(tid.netobj.local_addr) { struct sockaddr_in * addr = tid.netobj.local_addr; if( (jail_ip == addr->sin_addr.s_addr) + || ( (jail_flags & JAIL_allow_inet_localhost) + && (addr->sin_addr.s_addr == RSBAC_JAIL_LOCALHOST) + ) #if defined(CONFIG_RSBAC_JAIL_NET_ADJUST) - || ( (jail_get_flags_process(pid) & JAIL_auto_adjust_inet_any) + || ( (jail_flags & JAIL_auto_adjust_inet_any) && (addr->sin_addr.s_addr == INADDR_ANY) ) #endif @@ -192,12 +197,24 @@ enum rsbac_adf_req_ret_t } } else + if( (tid.netobj.remote_addr) + && (jail_flags & JAIL_allow_inet_localhost) + && ( ((struct sockaddr_in *) tid.netobj.remote_addr)->sin_addr.s_addr + == RSBAC_JAIL_LOCALHOST) + ) + return GRANTED; + else { if( ( (jail_ip == tid.netobj.sock_p->sk->rcv_saddr) && (jail_ip == tid.netobj.sock_p->sk->saddr) ) + || ( (jail_flags & JAIL_allow_inet_localhost) + && ( (tid.netobj.sock_p->sk->saddr == RSBAC_JAIL_LOCALHOST) + || (tid.netobj.sock_p->sk->daddr == RSBAC_JAIL_LOCALHOST) + ) + ) #if defined(CONFIG_RSBAC_JAIL_NET_ADJUST) - || ( (jail_get_flags_process(pid) & JAIL_auto_adjust_inet_any) + || ( (jail_flags & JAIL_auto_adjust_inet_any) && (tid.netobj.sock_p->sk->rcv_saddr == INADDR_ANY) && (tid.netobj.sock_p->sk->saddr == INADDR_ANY) ) @@ -402,6 +419,7 @@ enum rsbac_adf_req_ret_t jail_id = jail_get_id_process(caller_pid); if( !jail_id || (jail_id == jail_get_id(target,tid)) + || (jail_get_flags_process(caller_pid) & JAIL_allow_external_ipc) ) return GRANTED; else @@ -493,6 +511,7 @@ enum rsbac_adf_req_ret_t jail_id = jail_get_id_process(caller_pid); if( !jail_id || (jail_id == jail_get_id(target,tid)) + || (jail_get_flags_process(caller_pid) & JAIL_allow_external_ipc) ) return GRANTED; else @@ -528,6 +547,7 @@ enum rsbac_adf_req_ret_t jail_id = jail_get_id_process(caller_pid); if( !jail_id || (jail_id == jail_get_id(target,tid)) + || (jail_get_flags_process(caller_pid) & JAIL_allow_external_ipc) ) return GRANTED; else @@ -586,6 +606,7 @@ enum rsbac_adf_req_ret_t jail_id = jail_get_id_process(caller_pid); if( !jail_id || (jail_id == jail_get_id(target,tid)) + || (jail_get_flags_process(caller_pid) & JAIL_allow_external_ipc) ) return GRANTED; else @@ -653,6 +674,7 @@ enum rsbac_adf_req_ret_t jail_id = jail_get_id_process(caller_pid); if( !jail_id || (jail_id == jail_get_id(target,tid)) + || (jail_get_flags_process(caller_pid) & JAIL_allow_external_ipc) ) return GRANTED; else @@ -724,6 +746,7 @@ enum rsbac_adf_req_ret_t jail_id = jail_get_id_process(caller_pid); if( !jail_id || (jail_id == jail_get_id(target,tid)) + || (jail_get_flags_process(caller_pid) & JAIL_allow_external_ipc) ) return GRANTED; else @@ -915,6 +938,7 @@ int rsbac_adf_set_attr_jail( return 0; default: + break; } #endif return 0; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/mac/mac_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/mac/mac_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/mac/mac_main.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/mac/mac_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -4,11 +4,11 @@ /* Facility (ADF) - Mandatory Access Control */ /* File: rsbac/adf/mac/main.c */ /* */ -/* Author and (c) 1999-2002: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* MAC_LIGHT Modifications (c) 2000 Stanislav Ievlev */ /* and (c) 2001 Amon Ott */ /* */ -/* Last modified: 08/Aug/2002 */ +/* Last modified: 15/Jul/2003 */ /*************************************************** */ #include @@ -17,6 +17,8 @@ #include #include #include +#include +#include /************************************************* */ /* Global Variables */ @@ -45,10 +47,21 @@ static enum rsbac_adf_req_ret_t return(NOT_GRANTED); } /* if correct role, then grant */ - if (i_attr_val1.system_role == role) + if (i_attr_val1.system_role & role) return(GRANTED); else - return(NOT_GRANTED); + { +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_check_role(): pid %u/%.15s: wrong mac_role %u -> NOT_GRANTED!\n", + current->pid, current->comm, + i_attr_val1.system_role); + } +#endif + return(NOT_GRANTED); + } } /* auto_write() */ @@ -67,717 +80,2658 @@ static enum rsbac_adf_req_ret_t /* access. */ static enum rsbac_adf_req_ret_t - auto_write( rsbac_pid_t pid, - rsbac_security_level_t target_sec_level, - rsbac_mac_category_vector_t target_categories, + auto_write_attr( rsbac_pid_t pid, + enum rsbac_target_t target, + union rsbac_target_id_t tid, + enum rsbac_attribute_t t_level_attr, + enum rsbac_attribute_t t_cat_attr, boolean set_level) { + rsbac_security_level_t curr_level; + rsbac_mac_category_vector_t curr_categories; + rsbac_security_level_t target_sec_level; + rsbac_mac_category_vector_t target_categories; union rsbac_target_id_t i_tid; - union rsbac_attribute_value_t attr_val_1; - union rsbac_attribute_value_t attr_val_2; - - /* Access to security_level SL_rsbac_internal is never granted. */ - if (target_sec_level == SL_rsbac_internal) - return(NOT_GRANTED); + union rsbac_attribute_value_t attr_val1; + union rsbac_attribute_value_t attr_val2; + rsbac_mac_process_flags_t flags; + boolean mac_auto_used_level = FALSE; + boolean mac_auto_used_cat = FALSE; + boolean raise_object_level = FALSE; + boolean raise_object_cat = FALSE; - /* trusted process? */ + /* first check for mac_override, which allows everything */ i_tid.process = pid; if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_mac_trusted, - &attr_val_2, + A_mac_process_flags, + &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - if (attr_val_2.mac_trusted) - return(GRANTED); + flags = attr_val1.mac_process_flags; + if(flags & MAC_override) + return GRANTED; /* Get current security level */ if (rsbac_get_attr(MAC, T_PROCESS, i_tid, A_current_sec_level, - &attr_val_1, + &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - /* write is only allowed on same level -> try to auto-adjust, if levels differ */ - if (attr_val_1.current_sec_level != target_sec_level) - { - /* change current_sec_level automatically? */ - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_mac_auto, - &attr_val_1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* Do not change -> do not grant */ - if (!attr_val_1.mac_auto) - return(NOT_GRANTED); - - /* Get max. security_level of earlier reads */ - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_max_read_open, - &attr_val_2, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* if our target is below that, *-property is violated -> do not grant */ - if (target_sec_level < attr_val_2.max_read_open) - return(NOT_GRANTED); - - /* adjust current_sec_level, if set_level is true */ - attr_val_1.current_sec_level = target_sec_level; - if (set_level && (rsbac_set_attr(MAC, - T_PROCESS, - i_tid, - A_current_sec_level, - attr_val_1)) ) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_set_attr() returned error!\n"); - return(NOT_GRANTED); - } - } /* end of sec_level auto-section */ - + curr_level = attr_val1.security_level; /* Get current categories */ if (rsbac_get_attr(MAC, T_PROCESS, i_tid, A_mac_curr_categories, - &attr_val_1, + &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - /* write is only allowed with same category set, otherwise auto-adjust */ - if (attr_val_1.mac_categories != target_categories) - { - /* change mac_curr_categories automatically? */ - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_mac_auto, - &attr_val_2, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* Do not change -> do not grant */ - if (!attr_val_2.mac_auto) - return(NOT_GRANTED); - - /* Get max. read_categories of earlier reads */ - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_max_read_categories, - &attr_val_2, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* if our target is below that, *-property is violated -> do not grant */ - if (!( (attr_val_2.mac_categories & target_categories) - == attr_val_2.mac_categories)) - return(NOT_GRANTED); - - /* adjust mac_curr_categories, if set_level is true */ - attr_val_1.mac_categories = target_categories; - if (set_level && (rsbac_set_attr(MAC, - T_PROCESS, - i_tid, - A_mac_curr_categories, - attr_val_1)) ) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_set_attr() returned error!\n"); - return(NOT_GRANTED); - } - } /* end of category auto-section */ - - /* Get minimum sec_level of earlier writes */ + curr_categories = attr_val1.mac_categories; + /* Get target security level */ if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_min_write_open, - &attr_val_2, - FALSE)) + target, + tid, + t_level_attr, + &attr_val1, + TRUE)) { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - /* and adjust it, if necessary and set_level is TRUE */ - if (set_level && (attr_val_2.min_write_open > target_sec_level)) - { - attr_val_2.min_write_open = target_sec_level; - if (rsbac_set_attr(MAC, - T_PROCESS, - i_tid, - A_min_write_open, - attr_val_2)) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_set_attr() returned error!\n"); - return(NOT_GRANTED); - } - } - /* Get minimum categories of earlier writes */ + target_sec_level = attr_val1.security_level; + /* Get target categories */ if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_min_write_categories, - &attr_val_2, - FALSE)) + target, + tid, + t_cat_attr, + &attr_val1, + TRUE)) { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - /* and adjust it, if necessary and set_level is TRUE */ - if ( set_level - && !((attr_val_2.mac_categories & target_categories) - == attr_val_2.mac_categories) - ) - { - attr_val_2.mac_categories &= target_categories; - if (rsbac_set_attr(MAC, - T_PROCESS, - i_tid, - A_min_write_categories, - attr_val_2)) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_set_attr() returned error!\n"); - return(NOT_GRANTED); - } - } - /* Everything done, so return */ - return(GRANTED); - }; + target_categories = attr_val1.mac_categories; - -/* auto_read() */ -/* This function works similar to auto_write() */ - -static enum rsbac_adf_req_ret_t - auto_read ( rsbac_pid_t pid, - rsbac_security_level_t target_sec_level, - rsbac_mac_category_vector_t target_categories, - boolean set_level) - { - union rsbac_target_id_t i_tid; - union rsbac_attribute_value_t attr_val_1; - union rsbac_attribute_value_t attr_val_2; - - /* Access to security_level SL_rsbac_internal is never granted. */ - if (target_sec_level == SL_rsbac_internal) - return(NOT_GRANTED); - - /* Get process's current_sec_level */ - i_tid.process = pid; - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_current_sec_level, - &attr_val_1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* read-up is no problem, but otherwise we can try auto-adjust */ - if (attr_val_1.current_sec_level < target_sec_level) + if(target_sec_level > curr_level) { - /* no read-up: Can we adjust current_sec_level? */ - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_mac_auto, - &attr_val_1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* no -> not granted */ - if (!attr_val_1.mac_auto) - return(NOT_GRANTED); - - /* how about our upper boundary by writes / *-property? */ if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_min_write_open, - &attr_val_2, + A_security_level, + &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - /* our target is too high up -> not granted */ - if (target_sec_level > attr_val_2.min_write_open) - return(NOT_GRANTED); - - /* at last adjust current_sec_level, if set_level is on */ - attr_val_1.current_sec_level = target_sec_level; - if (set_level && (rsbac_set_attr(MAC, - T_PROCESS, - i_tid, - A_current_sec_level, - attr_val_1)) ) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_set_attr() returned error!\n"); + if(attr_val1.security_level < target_sec_level) + { +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_write(): pid %u/%.15s: security_level %u under target_sec_level %u, no override -> NOT_GRANTED!\n", + current->pid, current->comm, + attr_val1.security_level, target_sec_level); + } +#endif return(NOT_GRANTED); } - } /* end of auto-section */ + /* curr_level < target_level <= max_level -> need mac_auto, write_up, trusted (at process) + or shared (at object) */ + if(flags & MAC_auto) + mac_auto_used_level = TRUE; + else + { + if( !(flags & MAC_write_up) + && !(flags & MAC_trusted) + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_write_up) + || (attr_val1.mac_file_flags & MAC_trusted) + ) + { + break; + } + /* fall through */ - /* Get process's mac_curr_categories */ - i_tid.process = pid; - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_mac_curr_categories, - &attr_val_1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_write(): pid %u/%.15s: current security_level %u under target_sec_level %u, no auto, write_up, trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + curr_level, target_sec_level); + } +#endif + return(NOT_GRANTED); + } + } + } } - /* read-up is no problem, but otherwise we can try auto-adjust */ - if (!( (attr_val_1.mac_categories & target_categories) - == target_categories)) + else + if(target_sec_level < curr_level) { - /* no read-up: Can we adjust mac_curr_categories? */ - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_mac_auto, - &attr_val_2, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* no -> not granted */ - if (!attr_val_2.mac_auto) - return(NOT_GRANTED); - - /* how about our upper boundary by writes / *-property? */ if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_min_write_categories, - &attr_val_2, + A_min_security_level, + &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* our target is too high up -> not granted */ - if (!( (attr_val_2.mac_categories & target_categories) - == target_categories)) - return(NOT_GRANTED); - - /* at last adjust mac_curr_categories, if set_level is on */ - attr_val_1.mac_categories |= target_categories; - if (set_level && (rsbac_set_attr(MAC, - T_PROCESS, - i_tid, - A_mac_curr_categories, - attr_val_1)) ) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_set_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - } /* end of auto-section */ - - /* now we are in grant area -> adjust max_read_open, if set_level is on... */ - if (set_level) - { - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_max_read_open, - &attr_val_2, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); + if(attr_val1.security_level > target_sec_level) + { +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_write(): pid %u/%.15s: min_security_level %u over target_sec_level %u, no override -> NOT_GRANTED!\n", + current->pid, current->comm, + attr_val1.security_level, target_sec_level); + } +#endif return(NOT_GRANTED); } - /* ...and it is necessary */ - if (attr_val_2.max_read_open < target_sec_level) + /* min_level <= target_level < curr_level -> need mac_auto, write_down or trusted */ + if(flags & MAC_auto) { - attr_val_2.max_read_open = target_sec_level; - if (rsbac_set_attr(MAC, + /* check max_read boundary */ + if (rsbac_get_attr(MAC, T_PROCESS, i_tid, A_max_read_open, - attr_val_2)) + &attr_val1, + FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_set_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } + if(attr_val1.security_level > target_sec_level) + { + if( !(flags & MAC_write_down) + && !(flags & MAC_trusted) + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_write_down) + || (attr_val1.mac_file_flags & MAC_trusted) + ) + { + if(attr_val1.mac_file_flags & MAC_auto) + { + raise_object_level = TRUE; + } + break; + } + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_write(): pid %u/%.15s: max_read_open %u over target_sec_level %u, no write_down or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + attr_val1.security_level, target_sec_level); + } +#endif + return(NOT_GRANTED); + } + } + } + else + mac_auto_used_level = TRUE; } - /* also adjust max_read_categories */ - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_max_read_categories, - &attr_val_2, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* ...and it is necessary */ - if (!( (attr_val_2.mac_categories & target_categories) - == target_categories)) + else { - attr_val_2.mac_categories |= target_categories; - if (rsbac_set_attr(MAC, - T_PROCESS, - i_tid, - A_max_read_categories, - attr_val_2)) - { /* failed! */ - printk(KERN_WARNING "auto_write(): rsbac_set_attr() returned error!\n"); - return(NOT_GRANTED); + if( !(flags & MAC_write_down) + && !(flags & MAC_trusted) + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_write_down) + || (attr_val1.mac_file_flags & MAC_trusted) + ) + { + if(attr_val1.mac_file_flags & MAC_auto) + { + raise_object_level = TRUE; + } + break; + } + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_write(): pid %u/%.15s: current security_level %u over target_sec_level %u, no auto, write_down or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + curr_level, target_sec_level); + } +#endif + return(NOT_GRANTED); + } } } } - /* ready and go */ - return(GRANTED); - }; - - -/* auto-read-write() */ -/* combines auto-read and auto-write */ - -static enum rsbac_adf_req_ret_t - auto_read_write ( rsbac_pid_t pid, - rsbac_security_level_t target_sec_level, - rsbac_mac_category_vector_t target_categories, - boolean set_level) - { - union rsbac_target_id_t i_tid; - union rsbac_attribute_value_t attr_val_1; - union rsbac_attribute_value_t attr_val_2; - union rsbac_attribute_value_t attr_val_3; - union rsbac_attribute_value_t attr_val_4; - union rsbac_attribute_value_t attr_val_5; - - /* Access to security_level SL_rsbac_internal is never granted. */ - if (target_sec_level == SL_rsbac_internal) - return(NOT_GRANTED); - - /* is our process trusted? If yes -> no write check -> perform auto-read! */ - i_tid.process = pid; - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_mac_trusted, - &attr_val_2, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* if trusted -> no write-check -> auto-read */ - if (attr_val_2.mac_trusted) - return(auto_read(pid, target_sec_level, target_categories, set_level)); - /* get current_sec_level */ - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_current_sec_level, - &attr_val_1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* if it equals our target_level, prepare adjusting of boundaries */ - if (attr_val_1.current_sec_level == target_sec_level) + if((target_categories & curr_categories) != target_categories) { - /* Get max. security_level of earlier reads */ if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_max_read_open, - &attr_val_1, + A_mac_categories, + &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - /* Get min. security_level of earlier writes */ - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_min_write_open, - &attr_val_2, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); + if((target_categories & attr_val1.mac_categories) != target_categories) + { +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_write(): pid %u/%.15s: max_categories %s under target categories %s, no override -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, attr_val1.mac_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif return(NOT_GRANTED); } + /* curr_categories < target_categories <= max_categories -> need mac_auto, write_up or trusted */ + if(flags & MAC_auto) + mac_auto_used_cat = TRUE; + else + { + if( !(flags & MAC_write_up) + && !(flags & MAC_trusted) + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_write_up) + || (attr_val1.mac_file_flags & MAC_trusted) + ) + break; + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_write(): pid %u/%.15s: curr_categories %s under target categories %s, no auto, write_up or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, curr_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif + return(NOT_GRANTED); + } + } + } } else + if((target_categories & curr_categories) != curr_categories) { - /* if not, try to adjust current_sec_level */ if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_mac_auto, - &attr_val_1, + A_mac_min_categories, + &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - /* not allowed -> do not grant */ - if (!attr_val_1.mac_auto) - return(NOT_GRANTED); - - /* Get max. security_level of earlier reads */ - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_max_read_open, - &attr_val_1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); + if((target_categories & attr_val1.mac_categories) != attr_val1.mac_categories) + { +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_write(): pid %u/%.15s: min_categories %s over target categories %s, no override -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, attr_val1.mac_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif return(NOT_GRANTED); } - /* if our target is below that, *-property is violated -> do not grant */ - if (target_sec_level < attr_val_1.max_read_open) - return(NOT_GRANTED); - - /* Get min. security_level of earlier writes */ - if (rsbac_get_attr(MAC, - T_PROCESS, - i_tid, - A_min_write_open, - &attr_val_2, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); + /* min_level <= target_level < curr_level -> need mac_auto, write_down or trusted */ + if(flags & MAC_auto) + { + /* check max_read boundary */ + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_max_read_categories, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if((target_categories & attr_val1.mac_categories) != attr_val1.mac_categories) + { + if( !(flags & MAC_write_down) + && !(flags & MAC_trusted) + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_write_down) + || (attr_val1.mac_file_flags & MAC_trusted) + ) + { + if(attr_val1.mac_file_flags & MAC_auto) + { + raise_object_cat = TRUE; + } + break; + } + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_write(): pid %u/%.15s: max_read_categories %s over target categories %s, no write_down or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, attr_val1.mac_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif + return(NOT_GRANTED); + } + } + } + else + mac_auto_used_cat = TRUE; } - /* if our target is above that, *-property is violated -> do not grant */ - if (target_sec_level > attr_val_2.min_write_open) - return(NOT_GRANTED); - - /* at last, adjust current_sec_level, if set_level is TRUE */ - attr_val_3.current_sec_level = target_sec_level; - if (set_level && (rsbac_set_attr(MAC, - T_PROCESS, - i_tid, - A_current_sec_level, - attr_val_3)) ) - { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_set_attr() returned error!\n"); - return(NOT_GRANTED); + else + { + if( !(flags & MAC_write_down) + && !(flags & MAC_trusted) + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_write_down) + || (attr_val1.mac_file_flags & MAC_trusted) + ) + { + if(attr_val1.mac_file_flags & MAC_auto) + { + raise_object_cat = TRUE; + } + break; + } + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_write(): pid %u/%.15s: curr_categories %s over target categories %s, no auto, write_down or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, curr_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif + return(NOT_GRANTED); + } + } } - } /* end of sec_level auto-section */ + } - /* get mac_curr_categories */ + /* grant area */ + + /* adjust current_sec_level and min_write_level, */ + /* if set_level is true and mac_auto has been used*/ + if( set_level + && ( mac_auto_used_level + || raise_object_level + ) + ) + { +#ifdef CONFIG_RSBAC_MAC_LOG_LEVEL_CHANGE + { + char * target_type_name; + char * target_id_name; + + target_type_name = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(target_type_name) + { + #ifdef CONFIG_RSBAC_LOG_FULL_PATH + target_id_name + = rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN); + /* max. path name len + some extra */ + #else + target_id_name = rsbac_kmalloc(2 * RSBAC_MAXNAMELEN); + /* max. file name len + some extra */ + #endif + if(target_id_name) + { + get_target_name(target_type_name, target, target_id_name, tid); + + if(mac_auto_used_level) + { + #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "mac_auto_write(): Changing process %u (%.15s, owner %u) current level from %u to %u for %s %s\n", + pid, + current->comm, + current->uid, + curr_level, + target_sec_level, + target_type_name, + target_id_name); + #endif + #ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "mac_auto_write(): Changing process %u (%.15s, owner %u) current level from %u to %u for %s %s\n", + pid, + current->comm, + current->uid, + curr_level, + target_sec_level, + target_type_name, + target_id_name); + #endif + } + else + { + #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "mac_auto_write(): Process %u (%.15s, owner %u): Raising object level from %u to %u for %s %s\n", + pid, + current->comm, + current->uid, + target_sec_level, + curr_level, + target_type_name, + target_id_name); + #endif + #ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "mac_auto_write(): Process %u (%.15s, owner %u): Raising object level from %u to %u for %s %s\n", + pid, + current->comm, + current->uid, + target_sec_level, + curr_level, + target_type_name, + target_id_name); + #endif + } + rsbac_kfree(target_id_name); + } + rsbac_kfree(target_type_name); + } + } +#endif + if(mac_auto_used_level) + { + i_tid.process = pid; + attr_val1.current_sec_level = target_sec_level; + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_min_write_open, + &attr_val2, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if(attr_val1.min_write_open < attr_val2.min_write_open) + { + if (rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_min_write_open, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + if (rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_current_sec_level, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + else + { + attr_val1.security_level = curr_level; + if (rsbac_set_attr(MAC, + target, + tid, + A_security_level, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + } + /* adjust current_categories and min_write_categories, */ + /* if set_level is true and mac_auto has been used*/ + if( set_level + && ( mac_auto_used_cat + || raise_object_cat + ) + ) + { +#ifdef CONFIG_RSBAC_MAC_LOG_LEVEL_CHANGE + { + char * target_type_name = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(target_type_name) + { + char * target_id_name; + + #ifdef CONFIG_RSBAC_LOG_FULL_PATH + target_id_name + = rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN); + /* max. path name len + some extra */ + #else + target_id_name = rsbac_kmalloc(2 * RSBAC_MAXNAMELEN); + /* max. file name len + some extra */ + #endif + if(target_id_name) + { + char * tmp1 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp1) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp2) + { + get_target_name(target_type_name, target, target_id_name, tid); + + if(mac_auto_used_cat) + { + #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "mac_auto_write(): Changing process %u (%.15s, owner %u) current categories from %s to %s for %s %s\n", + pid, + current->comm, + current->uid, + u64tostrmac(tmp1, curr_categories), + u64tostrmac(tmp2, target_categories), + target_type_name, + target_id_name); + #endif + #ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "mac_auto_write(): Changing process %u (%.15s, owner %u) current categories from %s to %s for %s %s\n", + pid, + current->comm, + current->uid, + u64tostrmac(tmp1, curr_categories), + u64tostrmac(tmp2, target_categories), + target_type_name, + target_id_name); + #endif + } + else + { + #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "mac_auto_write(): Process %u (%.15s, owner %u): raising current categories from %s to %s for %s %s\n", + pid, + current->comm, + current->uid, + u64tostrmac(tmp2, target_categories), + u64tostrmac(tmp1, curr_categories), + target_type_name, + target_id_name); + #endif + #ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "mac_auto_write(): Process %u (%.15s, owner %u): raising current categories from %s to %s for %s %s\n", + pid, + current->comm, + current->uid, + u64tostrmac(tmp2, target_categories), + u64tostrmac(tmp1, curr_categories), + target_type_name, + target_id_name); + #endif + } + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp1); + } + rsbac_kfree(target_id_name); + } + rsbac_kfree(target_type_name); + } + } +#endif + if(mac_auto_used_cat) + { + i_tid.process = pid; + attr_val1.mac_categories = target_categories; + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_min_write_categories, + &attr_val2, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if((attr_val1.mac_categories & attr_val2.mac_categories) + != attr_val2.mac_categories + ) + { + if (rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_min_write_categories, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + if (rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_mac_curr_categories, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + else + { + attr_val1.mac_categories = curr_categories; + if (rsbac_set_attr(MAC, + target, + tid, + A_mac_categories, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + } + + /* Everything done, so return */ + return(GRANTED); + } + +static enum rsbac_adf_req_ret_t + auto_write( rsbac_pid_t pid, + enum rsbac_target_t target, + union rsbac_target_id_t tid, + boolean set_level) + { + return auto_write_attr(pid, + target, + tid, + A_security_level, + A_mac_categories, + set_level); + } + +/* auto_read() */ +/* This function works similar to auto_write() */ + +static enum rsbac_adf_req_ret_t + auto_read_attr ( rsbac_pid_t pid, + enum rsbac_target_t target, + union rsbac_target_id_t tid, + enum rsbac_attribute_t t_level_attr, + enum rsbac_attribute_t t_cat_attr, + boolean set_level) + { + rsbac_security_level_t curr_level; + rsbac_mac_category_vector_t curr_categories; + rsbac_security_level_t target_sec_level; + rsbac_mac_category_vector_t target_categories; + union rsbac_target_id_t i_tid; + union rsbac_attribute_value_t attr_val1; + union rsbac_attribute_value_t attr_val2; + rsbac_mac_process_flags_t flags; + boolean mac_auto_used_level = FALSE; + boolean mac_auto_used_cat = FALSE; + boolean set_level_level = FALSE; + boolean set_level_cat = FALSE; + + /* first check for mac_override, which allows everything */ + i_tid.process = pid; + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_mac_process_flags, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + flags = attr_val1.mac_process_flags; + if(flags & MAC_override) + return GRANTED; + + /* Get current security level */ + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_current_sec_level, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + curr_level = attr_val1.security_level; + /* Get current categories */ if (rsbac_get_attr(MAC, T_PROCESS, i_tid, A_mac_curr_categories, - &attr_val_3, + &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_read(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + curr_categories = attr_val1.mac_categories; + /* Get target security level */ + if (rsbac_get_attr(MAC, + target, + tid, + t_level_attr, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - /* if it equals our target_categories, prepare adjusting of boundaries */ - if (attr_val_1.mac_categories == target_categories) + target_sec_level = attr_val1.security_level; + /* Get target categories */ + if (rsbac_get_attr(MAC, + target, + tid, + t_cat_attr, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + target_categories = attr_val1.mac_categories; + + if(target_sec_level > curr_level) { - /* Get max_read_categories of earlier reads */ if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_max_read_categories, - &attr_val_3, + A_security_level, + &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_read(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - /* Get min_write_categories of earlier writes */ + if(attr_val1.security_level < target_sec_level) + { +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_read(): pid %u/%.15s: security_level %u under target_sec_level %u, no override -> NOT_GRANTED!\n", + current->pid, current->comm, + attr_val1.security_level, target_sec_level); + } +#endif + return(NOT_GRANTED); + } + /* curr_level < target_level <= max_level -> need mac_auto, read_up or trusted (with read option) */ + if(flags & MAC_auto) + { + /* check min_write boundary */ + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_min_write_open, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if(attr_val1.security_level < target_sec_level) + { + if( !(flags & MAC_read_up) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + && !(flags & MAC_trusted) +#endif + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_read_up) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + || (attr_val1.mac_file_flags & MAC_trusted) +#endif + ) + { + break; + } + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_read(): pid %u/%.15s: min_write_open %u under target_sec_level %u, no read_up or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + attr_val1.security_level, target_sec_level); + } +#endif + return(NOT_GRANTED); + } + } + } + else + { + mac_auto_used_level = TRUE; + set_level_level = TRUE; + } + } + else + { + if( !(flags & MAC_read_up) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + && !(flags & MAC_trusted) +#endif + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_read_up) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + || (attr_val1.mac_file_flags & MAC_trusted) +#endif + ) + { + break; + } + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_read(): pid %u/%.15s: current level %u under target_sec_level %u, no auto, read_up or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + curr_level, target_sec_level); + } +#endif + return(NOT_GRANTED); + } + } + } + } + else + if(target_sec_level < curr_level) + { + if(flags & MAC_auto) + { + mac_auto_used_level = TRUE; + } + } + if((target_categories & curr_categories) != target_categories) + { if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_min_write_categories, - &attr_val_4, + A_mac_categories, + &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_read(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if((target_categories & attr_val1.mac_categories) != target_categories) + { +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_write(): pid %u/%.15s: max_categories %s under target categories %s, no override -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, attr_val1.mac_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif return(NOT_GRANTED); } + /* curr_categories < target_categories <= max_categories -> need mac_auto, read_up or trusted */ + if(flags & MAC_auto) + { + /* check min_write boundary */ + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_min_write_categories, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if((target_categories & attr_val1.mac_categories) != target_categories) + { + if( !(flags & MAC_read_up) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + && !(flags & MAC_trusted) +#endif + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_read_up) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + || (attr_val1.mac_file_flags & MAC_trusted) +#endif + ) + { + break; + } + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_write(): pid %u/%.15s: min_write_categories %s under target categories %s, no read_up or trusted with read option -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, attr_val1.mac_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif + return(NOT_GRANTED); + } + } + } + else + { + mac_auto_used_cat = TRUE; + set_level_cat = TRUE; + } + } + else + { + if( !(flags & MAC_read_up) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + && !(flags & MAC_trusted) +#endif + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_read_up) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + || (attr_val1.mac_file_flags & MAC_trusted) +#endif + ) + { + break; + } + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_write(): pid %u/%.15s: curr_categories %s under target categories %s, no auto, read_up or trusted with read option -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, curr_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif + return(NOT_GRANTED); + } + } + } } else + if((target_categories & curr_categories) != curr_categories) + { + if(flags & MAC_auto) + { + mac_auto_used_level = TRUE; + } + } + + /* grant area */ + + /* adjust current_sec_level and max_read_level, */ + /* if set_level is true and mac_auto has been used*/ + if(set_level && mac_auto_used_level) { - /* if not, try to adjust current_sec_level */ + i_tid.process = pid; + attr_val1.current_sec_level = target_sec_level; + if(set_level_level) + { +#ifdef CONFIG_RSBAC_MAC_LOG_LEVEL_CHANGE + char * target_type_name; + char * target_id_name; + + target_type_name = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(target_type_name) + { + #ifdef CONFIG_RSBAC_LOG_FULL_PATH + target_id_name + = rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN); + /* max. path name len + some extra */ + #else + target_id_name = rsbac_kmalloc(2 * RSBAC_MAXNAMELEN); + /* max. file name len + some extra */ + #endif + if(target_id_name) + { + get_target_name(target_type_name, target, target_id_name, tid); + + #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "mac_auto_read(): Changing process %u (%.15s, owner %u) current level from %u to %u for %s %s\n", + pid, + current->comm, + current->uid, + curr_level, + target_sec_level, + target_type_name, + target_id_name); + #endif + #ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "mac_auto_read(): Changing process %u (%.15s, owner %u) current level from %u to %u for %s %s\n", + pid, + current->comm, + current->uid, + curr_level, + target_sec_level, + target_type_name, + target_id_name); + #endif + rsbac_kfree(target_id_name); + } + rsbac_kfree(target_type_name); + } +#endif + if(rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_current_sec_level, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_mac_auto, - &attr_val_3, - FALSE)) + A_max_read_open, + &attr_val2, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if(attr_val1.max_read_open > attr_val2.max_read_open) + { + if (rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_max_read_open, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + } + /* adjust current_categories and max_read_categories, */ + /* if set_level is true and mac_auto has been used*/ + if(set_level && mac_auto_used_cat) + { + i_tid.process = pid; + attr_val1.mac_categories = target_categories; + if(set_level_cat) + { +#ifdef CONFIG_RSBAC_MAC_LOG_LEVEL_CHANGE + char * target_type_name = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(target_type_name) + { + char * target_id_name; + + #ifdef CONFIG_RSBAC_LOG_FULL_PATH + target_id_name + = rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN); + /* max. path name len + some extra */ + #else + target_id_name = rsbac_kmalloc(2 * RSBAC_MAXNAMELEN); + /* max. file name len + some extra */ + #endif + if(target_id_name) + { + char * tmp1 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp1) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp2) + { + get_target_name(target_type_name, target, target_id_name, tid); + + #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "mac_auto_read(): Changing process %u (15%s, owner %u) current categories from %s to %s for %s %s\n", + pid, + current->comm, + current->uid, + u64tostrmac(tmp1, curr_categories), + u64tostrmac(tmp2, target_categories), + target_type_name, + target_id_name); + #endif + #ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "mac_auto_read(): Changing process %u (15%s, owner %u) current categories from %s to %s for %s %s\n", + pid, + current->comm, + current->uid, + u64tostrmac(tmp1, curr_categories), + u64tostrmac(tmp2, target_categories), + target_type_name, + target_id_name); + #endif + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp1); + } + rsbac_kfree(target_id_name); + } + rsbac_kfree(target_type_name); + } +#endif + if(rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_mac_curr_categories, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_max_read_categories, + &attr_val2, + TRUE)) { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_read(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - /* not allowed -> do not grant */ - if (!attr_val_3.mac_auto) - return(NOT_GRANTED); - - /* Get mac_categories for adjusting */ + if((attr_val1.mac_categories & attr_val2.mac_categories) + != attr_val1.mac_categories + ) + { + if (rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_max_read_categories, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + } + + /* Everything done, so return */ + return(GRANTED); + } + +static enum rsbac_adf_req_ret_t + auto_read( rsbac_pid_t pid, + enum rsbac_target_t target, + union rsbac_target_id_t tid, + boolean set_level) + { + return auto_read_attr(pid, + target, + tid, + A_security_level, + A_mac_categories, + set_level); + } + + +/* auto-read-write() */ +/* combines auto-read and auto-write */ + +static enum rsbac_adf_req_ret_t + auto_read_write_attr( rsbac_pid_t pid, + enum rsbac_target_t target, + union rsbac_target_id_t tid, + enum rsbac_attribute_t t_level_attr, + enum rsbac_attribute_t t_cat_attr, + boolean set_level) + { + rsbac_security_level_t curr_level; + rsbac_mac_category_vector_t curr_categories; + rsbac_security_level_t target_sec_level; + rsbac_mac_category_vector_t target_categories; + union rsbac_target_id_t i_tid; + union rsbac_attribute_value_t attr_val1; + union rsbac_attribute_value_t attr_val2; + rsbac_mac_process_flags_t flags; + boolean mac_auto_used_level = FALSE; + boolean mac_auto_used_cat = FALSE; + boolean raise_object_level = FALSE; + boolean raise_object_cat = FALSE; + + /* first check for mac_override, which allows everything */ + i_tid.process = pid; + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_mac_process_flags, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + flags = attr_val1.mac_process_flags; + if(flags & MAC_override) + return GRANTED; + + /* Get current security level */ + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_current_sec_level, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + curr_level = attr_val1.security_level; + /* Get current categories */ + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_mac_curr_categories, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + curr_categories = attr_val1.mac_categories; + /* Get target security level */ + if (rsbac_get_attr(MAC, + target, + tid, + t_level_attr, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + target_sec_level = attr_val1.security_level; + /* Get target categories */ + if (rsbac_get_attr(MAC, + target, + tid, + t_cat_attr, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + target_categories = attr_val1.mac_categories; + + if(target_sec_level > curr_level) + { if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_mac_categories, - &attr_val_4, + A_security_level, + &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if(attr_val1.security_level < target_sec_level) + { +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_read_write(): pid %u/%.15s: security_level %u under target_sec_level %u, no override -> NOT_GRANTED!\n", + current->pid, current->comm, + attr_val1.security_level, target_sec_level); + } +#endif return(NOT_GRANTED); } - /* if our target is too high up -> not granted */ - if (!( (attr_val_4.mac_categories & target_categories) - == target_categories)) - return(NOT_GRANTED); + /* curr_level < target_level <= max_level */ + /* -> need mac_auto, (write_up && read_up) or trusted (with read option) */ + if(flags & MAC_auto) + { + /* check min_write boundary */ + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_min_write_open, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if(attr_val1.security_level < target_sec_level) + { + if( !((flags & MAC_write_up) && (flags & MAC_read_up)) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + && !(flags & MAC_trusted) +#endif + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( ( (attr_val1.mac_file_flags & MAC_write_up) + && (attr_val1.mac_file_flags & MAC_read_up) + ) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + || (flags & MAC_trusted) +#endif + ) + { + break; + } + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_read_write(): pid %u/%.15s: min_write_open %u under target_sec_level %u, no read_up or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + attr_val1.security_level, target_sec_level); + } +#endif + return(NOT_GRANTED); + } + } + } + else + mac_auto_used_level = TRUE; + } + else + { + if( !((flags & MAC_write_up) && (flags & MAC_read_up)) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + && !(flags & MAC_trusted) +#endif + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( ( (attr_val1.mac_file_flags & MAC_write_up) + && (attr_val1.mac_file_flags & MAC_read_up) + ) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + || (flags & MAC_trusted) +#endif + ) + { + break; + } + /* fall through */ - /* Get max. categories of earlier reads */ + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_read_write(): pid %u/%.15s: current level %u under target_sec_level %u, no auto, (write_up && read_up) or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + curr_level, target_sec_level); + } +#endif + return(NOT_GRANTED); + } + } + } + } + else + if(target_sec_level < curr_level) + { if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_max_read_categories, - &attr_val_3, + A_min_security_level, + &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if(attr_val1.security_level > target_sec_level) + { +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_read_write(): pid %u/%.15s: min_security_level %u over target_sec_level %u, no override -> NOT_GRANTED!\n", + current->pid, current->comm, + attr_val1.security_level, target_sec_level); + } +#endif return(NOT_GRANTED); } - /* if our target is below that, *-property is violated -> do not grant */ - if (!( (attr_val_3.mac_categories & target_categories) - == attr_val_3.mac_categories)) - return(NOT_GRANTED); + /* min_level <= target_level < curr_level -> need mac_auto, write_down or trusted */ + if(flags & MAC_auto) + { + /* check max_read boundary */ + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_max_read_open, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if(attr_val1.security_level > target_sec_level) + { + if( !(flags & MAC_write_down) + && !(flags & MAC_trusted) + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_write_down) + || (attr_val1.mac_file_flags & MAC_trusted) + ) + { + if(attr_val1.mac_file_flags & MAC_auto) + { + raise_object_level = TRUE; + } + break; + } + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_read_write(): pid %u/%.15s: max_read_open %u over target_sec_level %u, no write_down or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + attr_val1.security_level, target_sec_level); + } +#endif + return(NOT_GRANTED); + } + } + } + else + mac_auto_used_level = TRUE; + } + else + { + if( !(flags & MAC_write_down) + && !(flags & MAC_trusted) + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_write_down) + || (attr_val1.mac_file_flags & MAC_trusted) + ) + { + if(attr_val1.mac_file_flags & MAC_auto) + { + raise_object_level = TRUE; + } + break; + } + /* fall through */ - /* Get min. write categories of earlier writes */ + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "mac_auto_read_write(): pid %u/%.15s: current security_level %u over target_sec_level %u, no auto, write_down or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + curr_level, target_sec_level); + } +#endif + return(NOT_GRANTED); + } + } + } + } + if((target_categories & curr_categories) != target_categories) + { if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_min_write_categories, - &attr_val_4, + A_mac_categories, + &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - /* if our target is above that, *-property is violated -> do not grant */ - if (!( (attr_val_4.mac_categories & target_categories) - == target_categories)) - return(NOT_GRANTED); - - /* at last, adjust current_sec_level, if set_level is TRUE */ - attr_val_5.mac_categories = target_categories; - if (set_level && (rsbac_set_attr(MAC, - T_PROCESS, - i_tid, - A_mac_curr_categories, - attr_val_5)) ) - { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_set_attr() returned error!\n"); + if((target_categories & attr_val1.mac_categories) != target_categories) + { +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_read_write(): pid %u/%.15s: max_categories %s under target categories %s, no override -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, attr_val1.mac_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif return(NOT_GRANTED); } - } /* end of categories auto-section */ + /* curr_categories < target_categories <= max_categories */ + /* -> need mac_auto, (read_up && write_up) or trusted (with read option) */ + if(flags & MAC_auto) + { + /* check min_write boundary */ + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_min_write_categories, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if((target_categories & attr_val1.mac_categories) != target_categories) + { + if( !((flags & MAC_write_up) && (flags & MAC_read_up)) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + && !(flags & MAC_trusted) +#endif + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( ( (attr_val1.mac_file_flags & MAC_write_up) + && (attr_val1.mac_file_flags & MAC_read_up) + ) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + || (flags & MAC_trusted) +#endif + ) + { + break; + } + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_read_write(): pid %u/%.15s: min_write_categories %s under target categories %s, no (read_up and write_up) or trusted with read option -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, attr_val1.mac_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif + return(NOT_GRANTED); + } + } + } + else + mac_auto_used_cat = TRUE; + } + else + { + if( !((flags & MAC_write_up) && (flags & MAC_read_up)) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + && !(flags & MAC_trusted) +#endif + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( ( (attr_val1.mac_file_flags & MAC_write_up) + && (attr_val1.mac_file_flags & MAC_read_up) + ) +#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ + || (flags & MAC_trusted) +#endif + ) + { + break; + } + /* fall through */ + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); - /* We now enter grant area - only boundaries are still to be adjusted */ - /* adjust max_read_open, if necessary and set_level is TRUE */ - if (set_level && (attr_val_1.max_read_open < target_sec_level) ) + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_read_write(): pid %u/%.15s: curr_categories %s under target categories %s, no auto, (read_up and write_up) or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, curr_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif + return(NOT_GRANTED); + } + } + } + } + else + if((target_categories & curr_categories) != curr_categories) { - attr_val_1.max_read_open = target_sec_level; - if (rsbac_set_attr(MAC, + if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_max_read_open, - attr_val_1)) + A_mac_min_categories, + &attr_val1, + FALSE)) { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_set_attr() returned error!\n"); + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } + if((target_categories & attr_val1.mac_categories) != attr_val1.mac_categories) + { +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_read_write(): pid %u/%.15s: min_categories %s over target categories %s, no override -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, attr_val1.mac_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif + return(NOT_GRANTED); + } + /* min_level <= target_level < curr_level -> need mac_auto, write_down or trusted */ + if(flags & MAC_auto) + { + /* check max_read boundary */ + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_max_read_categories, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if((target_categories & attr_val1.mac_categories) != attr_val1.mac_categories) + { + if( !(flags & MAC_write_down) + && !(flags & MAC_trusted) + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_write_down) + || (attr_val1.mac_file_flags & MAC_trusted) + ) + { + if(attr_val1.mac_file_flags & MAC_auto) + { + raise_object_cat = TRUE; + } + break; + } + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_read_write(): pid %u/%.15s: max_read_categories %s over target categories %s, no write_down or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, attr_val1.mac_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif + return(NOT_GRANTED); + } + } + } + else + mac_auto_used_cat = TRUE; + } + else + { + if( !(flags & MAC_write_down) + && !(flags & MAC_trusted) + ) + { + /* Try mac_file_flags on the target, if FD object */ + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_file_flags, + &attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if( (attr_val1.mac_file_flags & MAC_write_down) + || (attr_val1.mac_file_flags & MAC_trusted) + ) + { + if(attr_val1.mac_file_flags & MAC_auto) + { + raise_object_cat = TRUE; + } + break; + } + /* fall through */ + + default: +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { + printk(KERN_DEBUG + "mac_auto_read_write(): pid %u/%.15s: curr_categories %s over target categories %s, no auto, write_down or trusted -> NOT_GRANTED!\n", + current->pid, current->comm, + u64tostrmac(tmp, curr_categories), + u64tostrmac(tmp2, target_categories)); + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + } +#endif + return(NOT_GRANTED); + } + } + } } - /* adjust min_write_open, if necessary and set_level is TRUE */ - if (set_level && (attr_val_2.min_write_open > target_sec_level)) + + /* grant area */ + + /* adjust current_sec_level and min_write_level, */ + /* if set_level is true and mac_auto has been used*/ + if( set_level + && ( mac_auto_used_level + || raise_object_level + ) + ) { - attr_val_2.min_write_open = target_sec_level; - if (rsbac_set_attr(MAC, - T_PROCESS, - i_tid, - A_min_write_open, - attr_val_2)) - { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_set_attr() returned error!\n"); - return(NOT_GRANTED); +#ifdef CONFIG_RSBAC_MAC_LOG_LEVEL_CHANGE + { + char * target_type_name; + char * target_id_name; + + target_type_name = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(target_type_name) + { + #ifdef CONFIG_RSBAC_LOG_FULL_PATH + target_id_name + = rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN); + /* max. path name len + some extra */ + #else + target_id_name = rsbac_kmalloc(2 * RSBAC_MAXNAMELEN); + /* max. file name len + some extra */ + #endif + if(target_id_name) + { + get_target_name(target_type_name, target, target_id_name, tid); + + if(mac_auto_used_level) + { + #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "mac_auto_read_write(): Changing process %u (%.15s, owner %u) current level from %u to %u for %s %s\n", + pid, + current->comm, + current->uid, + curr_level, + target_sec_level, + target_type_name, + target_id_name); + #endif + #ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "mac_auto_read_write(): Changing process %u (%.15s, owner %u) current level from %u to %u for %s %s\n", + pid, + current->comm, + current->uid, + curr_level, + target_sec_level, + target_type_name, + target_id_name); + #endif + } + else + { + #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "mac_auto_read_write(): Process %u (%.15s, owner %u): Raising object level from %u to %u for %s %s\n", + pid, + current->comm, + current->uid, + target_sec_level, + curr_level, + target_type_name, + target_id_name); + #endif + #ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "mac_auto_read_write(): Process %u (%.15s, owner %u): Raising object level from %u to %u for %s %s\n", + pid, + current->comm, + current->uid, + target_sec_level, + curr_level, + target_type_name, + target_id_name); + #endif + } + rsbac_kfree(target_id_name); + } + rsbac_kfree(target_type_name); + } + } +#endif + if(mac_auto_used_level) + { + i_tid.process = pid; + attr_val1.current_sec_level = target_sec_level; + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_min_write_open, + &attr_val2, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if(attr_val1.min_write_open < attr_val2.min_write_open) + { + if (rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_min_write_open, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_max_read_open, + &attr_val2, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if(attr_val1.max_read_open > attr_val2.max_read_open) + { + if (rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_max_read_open, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + if (rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_current_sec_level, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } } - } - /* adjust max_read_categories, if necessary and set_level is TRUE */ - if ( set_level - && !( (attr_val_3.mac_categories & target_categories) - == target_categories) - ) - { - attr_val_3.mac_categories |= target_categories; - if (rsbac_set_attr(MAC, - T_PROCESS, - i_tid, - A_max_read_categories, - attr_val_3)) - { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_set_attr() returned error!\n"); - return(NOT_GRANTED); + else + { + attr_val1.security_level = curr_level; + if (rsbac_set_attr(MAC, + target, + tid, + A_security_level, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } } } - /* adjust min_write_categories, if necessary and set_level is TRUE */ - if ( set_level - && !( (attr_val_4.mac_categories & target_categories) - == attr_val_4.mac_categories) - ) + /* adjust current_categories and min_write_categories, */ + /* if set_level is true and mac_auto has been used*/ + if( set_level + && ( mac_auto_used_cat + || raise_object_cat + ) + ) { - attr_val_4.mac_categories &= target_categories; - if (rsbac_set_attr(MAC, - T_PROCESS, - i_tid, - A_min_write_categories, - attr_val_4)) - { /* failed! */ - printk(KERN_WARNING "auto_read_write(): rsbac_set_attr() returned error!\n"); - return(NOT_GRANTED); +#ifdef CONFIG_RSBAC_MAC_LOG_LEVEL_CHANGE + { + char * target_type_name = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(target_type_name) + { + char * target_id_name; + + #ifdef CONFIG_RSBAC_LOG_FULL_PATH + target_id_name + = rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN); + /* max. path name len + some extra */ + #else + target_id_name = rsbac_kmalloc(2 * RSBAC_MAXNAMELEN); + /* max. file name len + some extra */ + #endif + if(target_id_name) + { + char * tmp1 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp1) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp2) + { + get_target_name(target_type_name, target, target_id_name, tid); + + if(mac_auto_used_cat) + { + #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "mac_auto_read_write(): Changing process %u (%.15s, owner %u) current categories from %s to %s for %s %s\n", + pid, + current->comm, + current->uid, + u64tostrmac(tmp1, curr_categories), + u64tostrmac(tmp2, target_categories), + target_type_name, + target_id_name); + #endif + #ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "mac_auto_read_write(): Changing process %u (%.15s, owner %u) current categories from %s to %s for %s %s\n", + pid, + current->comm, + current->uid, + u64tostrmac(tmp1, curr_categories), + u64tostrmac(tmp2, target_categories), + target_type_name, + target_id_name); + #endif + } + else + { + #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "mac_auto_read_write(): Process %u (%.15s, owner %u): raising current categories from %s to %s for %s %s\n", + pid, + current->comm, + current->uid, + u64tostrmac(tmp2, target_categories), + u64tostrmac(tmp1, curr_categories), + target_type_name, + target_id_name); + #endif + #ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "mac_auto_read_write(): Process %u (%.15s, owner %u): raising current categories from %s to %s for %s %s\n", + pid, + current->comm, + current->uid, + u64tostrmac(tmp2, target_categories), + u64tostrmac(tmp1, curr_categories), + target_type_name, + target_id_name); + #endif + } + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp1); + } + rsbac_kfree(target_id_name); + } + rsbac_kfree(target_type_name); + } + } +#endif + if(mac_auto_used_cat) + { + i_tid.process = pid; + attr_val1.mac_categories = target_categories; + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_min_write_categories, + &attr_val2, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if((attr_val1.mac_categories & attr_val2.mac_categories) + != attr_val2.mac_categories + ) + { + if (rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_min_write_categories, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_max_read_categories, + &attr_val2, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if((attr_val1.mac_categories & attr_val2.mac_categories) + != attr_val1.mac_categories + ) + { + if (rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_max_read_categories, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + if (rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_mac_curr_categories, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + } + else + { + attr_val1.mac_categories = curr_categories; + if (rsbac_set_attr(MAC, + target, + tid, + A_mac_categories, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "mac_auto_read_write(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } } } - - /* ready and go */ + + /* Everything done, so return */ return(GRANTED); - }; + } + +static enum rsbac_adf_req_ret_t + auto_read_write( rsbac_pid_t pid, + enum rsbac_target_t target, + union rsbac_target_id_t tid, + boolean set_level) + { + return auto_read_write_attr(pid, + target, + tid, + A_security_level, + A_mac_categories, + set_level); + } /************************************************* */ /* Externally visible functions */ @@ -793,14 +2747,11 @@ enum rsbac_adf_req_ret_t rsbac_uid_t owner) { enum rsbac_adf_req_ret_t result = DO_NOT_CARE; -/* enum rsbac_target_t i_target; -*/ union rsbac_target_id_t i_tid; -/* enum rsbac_attribute_t i_attr; -*/ union rsbac_attribute_value_t i_attr_val1; +#if defined(CONFIG_RSBAC_MAC_NET_OBJ_PROT) union rsbac_attribute_value_t i_attr_val2; - boolean inherit; +#endif switch (request) { @@ -819,34 +2770,11 @@ enum rsbac_adf_req_ret_t /* only for IPC */ if (target == T_IPC) { - /* test write access to ipc: get its sec_level */ - if (rsbac_get_attr(MAC, - T_IPC, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - T_IPC, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* and perform auto-write without setting attributes */ - return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - FALSE)); + /* and perform auto-write without setting attributes */ + return(auto_write(caller_pid, + target, + tid, + FALSE)); } else /* all other targets are unknown */ @@ -858,63 +2786,17 @@ enum rsbac_adf_req_ret_t { case T_FILE: case T_FIFO: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; case T_IPC: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; case T_DEV: @@ -926,107 +2808,38 @@ enum rsbac_adf_req_ret_t &i_attr_val1, FALSE)) { - printk(KERN_WARNING "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if(!i_attr_val1.mac_check) - return(DO_NOT_CARE); - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* and perform auto-write without setting attributes */ - return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - FALSE)); - break; - /* all other cases are unknown */ - default: return(DO_NOT_CARE); - } - - case R_CHANGE_GROUP: - switch(target) - { - case T_FILE: - case T_DIR: - case T_FIFO: - case T_SYMLINK: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* and perform auto-write without setting attributes */ - return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - FALSE)); - case T_IPC: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } + if(!i_attr_val1.mac_check) + return(DO_NOT_CARE); + /* and perform auto-write without setting attributes */ + return(auto_write(caller_pid, + target, + tid, + FALSE)); + break; + /* all other cases are unknown */ + default: return(DO_NOT_CARE); + } + + case R_CHANGE_GROUP: + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_SYMLINK: + /* and perform auto-write without setting attributes */ + return(auto_write(caller_pid, + target, + tid, + FALSE)); + case T_IPC: /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); /* We do not care about */ @@ -1041,88 +2854,39 @@ enum rsbac_adf_req_ret_t case T_DIR: case T_FIFO: case T_SYMLINK: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); - break; + case T_IPC: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); - break; - /* processes may only be given to dominated user, */ - /* if Role Protection is on, not between administrator */ - /* and security officer. */ +#if 0 /* Disabled, not correct */ + /* processes may only be given to dominated user */ case T_PROCESS: /* For target process there MUST be a new owner specified */ if (attr != A_owner) return(UNDEFINED); - /* Allow change owner for trusted processes*/ + /* Allow change owner for trusted and override processes*/ /* trusted process? */ i_tid.process = caller_pid; if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_mac_trusted, + A_mac_process_flags, &i_attr_val1, FALSE)) { /* failed! */ printk(KERN_WARNING "adf_request_mac(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - if (i_attr_val1.mac_trusted) - return(GRANTED); + if (i_attr_val1.mac_process_flags & (MAC_trusted | MAC_override)) + return(GRANTED); /* Get security level for old and new owner */ i_tid.user = owner; @@ -1151,51 +2915,21 @@ enum rsbac_adf_req_ret_t } /* if old user does not dominate new user -> do not grant */ if (i_attr_val1.security_level < i_attr_val2.security_level) - return(NOT_GRANTED); - /* else: go on (test roles only, if role protection) */ - - #ifdef CONFIG_RSBAC_MAC_ROLE_PROT - /* Administrator or secoff? */ - i_tid.user = owner; - if (rsbac_get_attr(MAC, - T_USER, - i_tid, - A_mac_role, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* if general user, then grant */ - if (i_attr_val1.system_role == SR_user) - return(GRANTED); - /* get target user's role */ - i_tid.user = attr_val.owner; - if (rsbac_get_attr(MAC, - T_USER, - i_tid, - A_mac_role, - &i_attr_val2, - TRUE)) { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_mac) + { + printk(KERN_DEBUG + "rsbac_adf_request_mac(): pid %u/%.15s: old owner's sec_level %u does not dominate new owner's sec_level %u -> NOT_GRANTED!\n", + current->pid, current->comm, + i_attr_val1.security_level, i_attr_val2.security_level); + } +#endif return(NOT_GRANTED); } - /* target is no general user and has different role -> deny */ - if ( (i_attr_val2.system_role != SR_user) - && (i_attr_val1.system_role != i_attr_val2.system_role) - ) - return(NOT_GRANTED); - #endif - return(GRANTED); +#endif /* 0 */ - /* Change-user without target is only for DAC -> who cares? */ - case T_NONE: - return(DO_NOT_CARE); /* all other cases are unknown */ default: return(DO_NOT_CARE); @@ -1205,33 +2939,10 @@ enum rsbac_adf_req_ret_t switch(target) { case T_DIR: - /* test read access to dir: get its sec_level */ - if (rsbac_get_attr(MAC, - T_DIR, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - T_DIR, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-read without setting attributes */ return(auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; /* all other cases are unknown */ @@ -1253,33 +2964,10 @@ enum rsbac_adf_req_ret_t return GRANTED; #else /* Mode of created item is ignored! */ - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - T_DIR, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - T_DIR, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); #endif break; @@ -1289,34 +2977,13 @@ enum rsbac_adf_req_ret_t return mac_check_role(owner, SR_security_officer); case T_NETOBJ: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_local_sec_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_local_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ - return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - FALSE)); + return(auto_write_attr(caller_pid, + target, + tid, + A_local_sec_level, + A_local_mac_categories, + FALSE)); #endif /* all other cases are unknown */ @@ -1330,106 +2997,37 @@ enum rsbac_adf_req_ret_t case T_DIR: case T_FIFO: case T_SYMLINK: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; case T_IPC: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; #ifdef CONFIG_RSBAC_MAC_NET_OBJ_PROT case T_NETTEMP: return mac_check_role(owner, SR_security_officer); -#endif - - /* all other cases are unknown */ - default: return(DO_NOT_CARE); - } - - case R_EXECUTE: - switch(target) - { - case T_FILE: - /* test read access to file: get its sec_level */ - if (rsbac_get_attr(MAC, - T_FILE, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - T_FILE, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } +#endif + + /* all other cases are unknown */ + default: return(DO_NOT_CARE); + } + + case R_EXECUTE: + switch(target) + { + case T_FILE: /* and perform auto-read without setting attributes */ return(auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); /* all other cases are unknown */ @@ -1452,7 +3050,19 @@ enum rsbac_adf_req_ret_t if (tid.scd != ST_rsbaclog) return(GRANTED); /* Secoff? */ - return mac_check_role(owner, SR_security_officer); + if(mac_check_role(owner, SR_security_officer) == NOT_GRANTED) + return mac_check_role(owner, SR_auditor); + else + return GRANTED; + + case T_PROCESS: + /* perform auto-read without setting attributes */ + return(auto_read_attr(caller_pid, + target, + tid, + A_current_sec_level, + A_mac_curr_categories, + FALSE)); default: return(DO_NOT_CARE); @@ -1464,33 +3074,10 @@ enum rsbac_adf_req_ret_t case T_FILE: case T_FIFO: case T_SYMLINK: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; /* all other cases are unknown */ @@ -1498,40 +3085,16 @@ enum rsbac_adf_req_ret_t } case R_MODIFY_ACCESS_DATA: - case R_RENAME: switch(target) { case T_FILE: case T_DIR: case T_FIFO: case T_SYMLINK: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; /* all other cases are unknown */ @@ -1544,9 +3107,11 @@ enum rsbac_adf_req_ret_t case A_security_level: case A_local_sec_level: case A_remote_sec_level: + case A_min_security_level: case A_mac_categories: case A_local_mac_categories: case A_remote_mac_categories: + case A_mac_min_categories: case A_pseudo: case A_system_role: case A_mac_role: @@ -1556,10 +3121,11 @@ enum rsbac_adf_req_ret_t case A_max_read_open: case A_min_write_categories: case A_max_read_categories: - case A_mac_auto: - case A_mac_trusted: + case A_mac_user_flags: + case A_mac_process_flags: case A_mac_trusted_for_user: case A_mac_check: + case A_mac_auto: #ifdef CONFIG_RSBAC_MAC_GEN_PROT case A_log_array_low: case A_log_array_high: @@ -1591,63 +3157,11 @@ enum rsbac_adf_req_ret_t case T_DIR: case T_FIFO: case T_SYMLINK: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* and perform auto-write without setting attributes */ - return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - FALSE)); - break; case T_IPC: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; @@ -1726,15 +3240,17 @@ enum rsbac_adf_req_ret_t "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } - /* if rsbaclog: grant only for secoff */ + /* if rsbaclog: grant only for secoff and auditor */ if(tid.scd == ST_rsbaclog) { - if (i_attr_val1.system_role == SR_security_officer) + if ( (i_attr_val1.system_role == SR_security_officer) + || (i_attr_val1.system_role == SR_auditor) + ) return(GRANTED); else return(NOT_GRANTED); } - /* if administrator or (rsbaclog and secoff), then grant */ + /* if administrator, then grant */ if (i_attr_val1.system_role == SR_administrator) return(GRANTED); else @@ -1761,41 +3277,18 @@ enum rsbac_adf_req_ret_t return(NOT_GRANTED); #endif /* test read-write access to mount dir / dev: */ - /* get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-read(-write) without setting of attributes */ if( (target == T_DEV) && (attr == A_mode) && (attr_val.mode & MS_RDONLY)) return(auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); else return(auto_read_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); /* all other cases are unknown */ @@ -1811,33 +3304,10 @@ enum rsbac_adf_req_ret_t case T_FILE: case T_FIFO: #endif - /* test read access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-read without setting attributes */ return(auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; @@ -1856,33 +3326,10 @@ enum rsbac_adf_req_ret_t } if(!i_attr_val1.mac_check) return(DO_NOT_CARE); - /* test read access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-read without setting attributes */ return(auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; #endif @@ -1894,34 +3341,13 @@ enum rsbac_adf_req_ret_t return mac_check_role(owner, SR_administrator); case T_NETOBJ: - /* test read access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_remote_sec_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_remote_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-read without setting attributes */ - return(auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - FALSE)); + return(auto_read_attr(caller_pid, + target, + tid, + A_remote_sec_level, + A_remote_mac_categories, + FALSE)); #endif /* all other cases are unknown */ @@ -1936,19 +3362,22 @@ enum rsbac_adf_req_ret_t case A_security_level: case A_local_sec_level: case A_remote_sec_level: + case A_min_security_level: case A_mac_categories: case A_local_mac_categories: case A_remote_mac_categories: + case A_mac_min_categories: case A_pseudo: case A_system_role: case A_mac_role: case A_current_sec_level: case A_min_write_open: case A_max_read_open: - case A_mac_auto: - case A_mac_trusted: + case A_mac_user_flags: + case A_mac_process_flags: case A_mac_trusted_for_user: case A_mac_check: + case A_mac_auto: #ifdef CONFIG_RSBAC_MAC_GEN_PROT case A_log_array_low: case A_log_array_high: @@ -1961,7 +3390,7 @@ enum rsbac_adf_req_ret_t case A_auth_may_set_cap: #endif /* Security Officer? */ - return mac_check_role(owner, SR_security_officer); + return mac_check_role(owner, SR_security_officer | SR_administrator); default: return(DO_NOT_CARE); @@ -1974,33 +3403,10 @@ enum rsbac_adf_req_ret_t case T_DIR: case T_FIFO: case T_IPC: - /* test read access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-read without setting attributes */ return(auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; case T_DEV: @@ -2016,34 +3422,11 @@ enum rsbac_adf_req_ret_t return(NOT_GRANTED); } if(!i_attr_val1.mac_check) - return(DO_NOT_CARE); - /* test read access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } + return(DO_NOT_CARE); /* and perform auto-read without setting attributes */ return(auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; /* all other cases are unknown */ @@ -2056,39 +3439,10 @@ enum rsbac_adf_req_ret_t case T_FILE: case T_FIFO: case T_IPC: - /* test read-write access to target: get its sec_level */ - if( (target == T_FILE) - || (target == T_FIFO) - ) - inherit = TRUE; - else - inherit = FALSE; - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - inherit)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - inherit)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-read-write without setting attributes */ return(auto_read_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); case T_DEV: @@ -2105,33 +3459,10 @@ enum rsbac_adf_req_ret_t } if(!i_attr_val1.mac_check) return(DO_NOT_CARE); - /* test read-write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-read-write without setting attributes */ return(auto_read_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); /* all other cases are unknown */ @@ -2150,40 +3481,83 @@ enum rsbac_adf_req_ret_t default: return(DO_NOT_CARE); } -/* case R_RENAME: see R_MODIFY_ACCESS_DATA */ - - case R_SEARCH: + case R_RENAME: switch(target) { + case T_FILE: case T_DIR: + case T_FIFO: case T_SYMLINK: - /* test read access to dir/symlink: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) + /* and perform auto-write without setting attributes */ + result = auto_write(caller_pid, + target, + tid, + FALSE); + /* if parent dir might change, convert inherit to explicit level/cat: + get and set effective value */ + if( ( (result == GRANTED) + || (result == DO_NOT_CARE) + ) + && ( (attr != A_new_dir_dentry_p) + || (attr_val.new_dir_dentry_p != tid.file.dentry_p->d_parent) + ) + ) { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); + if (rsbac_get_attr(MAC, + target, + tid, + A_security_level, + &i_attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if(rsbac_set_attr(MAC, + target, + tid, + A_security_level, + i_attr_val1)) + { /* failed! */ + printk(KERN_WARNING "rsbac_adf_request_mac(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_categories, + &i_attr_val1, + TRUE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + if(rsbac_set_attr(MAC, + target, + tid, + A_mac_categories, + i_attr_val1)) + { /* failed! */ + printk(KERN_WARNING "rsbac_adf_request_mac(): rsbac_set_attr() returned error!\n"); + return(NOT_GRANTED); + } } + return result; + break; + /* all other cases are unknown */ + default: return(DO_NOT_CARE); + } + + + case R_SEARCH: + switch(target) + { + case T_DIR: + case T_SYMLINK: /* and perform auto-read without setting attributes */ return(auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; /* all other cases are unknown */ @@ -2194,34 +3568,13 @@ enum rsbac_adf_req_ret_t switch(target) { case T_PROCESS: - /* test write access to process: get its sec_level */ - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_current_sec_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_mac_curr_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ - return(auto_write(caller_pid, - i_attr_val1.current_sec_level, - i_attr_val2.mac_categories, - FALSE)); + return(auto_write_attr(caller_pid, + target, + tid, + A_current_sec_level, + A_mac_curr_categories, + FALSE)); /* all other cases are unknown */ default: @@ -2276,34 +3629,13 @@ enum rsbac_adf_req_ret_t switch(target) { case T_PROCESS: - /* test read-write access to process: get its sec_level */ - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_current_sec_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_mac_curr_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-read-write without setting attributes */ - return(auto_read_write(caller_pid, - i_attr_val1.current_sec_level, - i_attr_val2.mac_categories, - FALSE)); + return(auto_read_write_attr(caller_pid, + target, + tid, + A_current_sec_level, + A_mac_curr_categories, + FALSE)); /* all other cases are unknown */ default: @@ -2314,33 +3646,10 @@ enum rsbac_adf_req_ret_t switch(target) { case T_FILE: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; /* all other cases are unknown */ @@ -2369,112 +3678,34 @@ enum rsbac_adf_req_ret_t #ifdef CONFIG_RSBAC_RW case T_FILE: case T_FIFO: -#ifdef CONFIG_RSBAC_RW_SOCK - case T_IPC: -#endif #endif /* Mode of created item is ignored! */ - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - /* and perform auto-write without setting attributes */ - return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - FALSE)); - - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ - return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - FALSE)); - break; - -#ifdef CONFIG_RSBAC_RW - case T_DEV: - /* Only check for devices with mac_check set */ - if (rsbac_get_attr(MAC, - T_DEV, - tid, - A_mac_check, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if(!i_attr_val1.mac_check) - return(DO_NOT_CARE); - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } + return(auto_write(caller_pid, + target, + tid, + FALSE)); + + +#ifdef CONFIG_RSBAC_RW + case T_DEV: + /* Only check for devices with mac_check set */ if (rsbac_get_attr(MAC, - target, + T_DEV, tid, - A_mac_categories, - &i_attr_val2, + A_mac_check, + &i_attr_val1, FALSE)) { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } + if(!i_attr_val1.mac_check) + return(DO_NOT_CARE); /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; #endif @@ -2508,10 +3739,12 @@ enum rsbac_adf_req_ret_t return(NOT_GRANTED); } /* and perform auto-write without setting attributes */ - return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - FALSE)); + return(auto_write_attr(caller_pid, + target, + tid, + A_remote_sec_level, + A_remote_mac_categories, + FALSE)); #endif /* all other cases are unknown */ @@ -2524,33 +3757,10 @@ enum rsbac_adf_req_ret_t case T_FILE: case T_FIFO: case T_IPC: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; case T_DEV: @@ -2567,33 +3777,10 @@ enum rsbac_adf_req_ret_t } if(!i_attr_val1.mac_check) return(DO_NOT_CARE); - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-write without setting attributes */ return(auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE)); break; /* all other cases are unknown */ @@ -2606,34 +3793,13 @@ enum rsbac_adf_req_ret_t switch(target) { case T_NETOBJ: - /* test read-write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_local_sec_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_local_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-read-write without setting attributes */ - return(auto_read_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - FALSE)); + return(auto_read_write_attr(caller_pid, + target, + tid, + A_local_sec_level, + A_local_mac_categories, + FALSE)); /* all other cases are unknown */ default: return(DO_NOT_CARE); @@ -2646,34 +3812,13 @@ enum rsbac_adf_req_ret_t switch(target) { case T_NETOBJ: - /* test read-write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_remote_sec_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_remote_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_request_mac(): rsbac_get_attr() returned error!\n"); - return(NOT_GRANTED); - } /* and perform auto-read-write without setting attributes */ - return(auto_read_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - FALSE)); + return(auto_read_write_attr(caller_pid, + target, + tid, + A_remote_sec_level, + A_remote_mac_categories, + FALSE)); /* all other cases are unknown */ default: return(DO_NOT_CARE); @@ -2686,7 +3831,7 @@ enum rsbac_adf_req_ret_t } return(result); - }; /* end of rsbac_adf_request_mac() */ + } /* end of rsbac_adf_request_mac() */ /*****************************************************************************/ @@ -2720,6 +3865,8 @@ int rsbac_adf_set_attr_mac( union rsbac_attribute_value_t i_attr_val5; union rsbac_attribute_value_t i_attr_val6; union rsbac_attribute_value_t i_attr_val7; + union rsbac_attribute_value_t i_attr_val8; + union rsbac_attribute_value_t i_attr_val9; boolean inherit; switch (request) @@ -2761,9 +3908,9 @@ int rsbac_adf_set_attr_mac( } /* and perform auto-write with setting attributes */ result = auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - TRUE); + target, + tid, + TRUE); if ((result == GRANTED) || (result == DO_NOT_CARE)) return(0); else @@ -2783,60 +3930,187 @@ int rsbac_adf_set_attr_mac( } if(!i_attr_val1.mac_check) return(0); - /* test write access to target: get its sec_level */ + /* and perform auto-write with setting attributes */ + result = auto_write(caller_pid, + target, + tid, + TRUE); + if ((result == GRANTED) || (result == DO_NOT_CARE)) + return(0); + else + return(-RSBAC_EDECISIONMISMATCH); + break; + /* all other cases are unknown */ + default: return(0); + } + + case R_CHANGE_OWNER: + switch(target) + { + /* Changing process owner affects access decisions, */ + /* so attributes have to be adjusted. */ + case T_PROCESS: + /* For target process there MUST be a new owner specified */ + if (attr != A_owner) + return(-RSBAC_EINVALIDATTR); + + /* Get owner-sec-level and mac_categories for new owner */ + i_tid.user = attr_val.owner; if (rsbac_get_attr(MAC, - target, - tid, + T_USER, + i_tid, A_security_level, - &i_attr_val1, - FALSE)) + &i_attr_val2, + TRUE)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); return(-RSBAC_EREADFAILED); } if (rsbac_get_attr(MAC, - target, + T_USER, + i_tid, + A_mac_categories, + &i_attr_val3, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + /* set owner-sec-level and mac_categories for process to new values */ + if (rsbac_set_attr(MAC, + T_PROCESS, + tid, + A_security_level, + i_attr_val2)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } + if (rsbac_set_attr(MAC, + T_PROCESS, tid, A_mac_categories, - &i_attr_val2, - FALSE)) + i_attr_val3)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } + /* Get min_write_open and min_write_categories of process */ + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_min_write_open, + &i_attr_val4, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_min_write_categories, + &i_attr_val5, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + /* adjust min_write_open and min_write_categories, if too high */ + if(i_attr_val2.security_level < i_attr_val4.min_write_open) + { + i_attr_val4.min_write_open = i_attr_val2.security_level; + if(rsbac_set_attr(MAC, + T_PROCESS, + tid, + A_min_write_open, + i_attr_val4)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } + } + /* does process have categories in min_write that the new owner has not? */ + /* If yes, throw them out. */ + if ((i_attr_val3.mac_categories & i_attr_val5.mac_categories) + != i_attr_val5.mac_categories) + { + i_attr_val5.mac_categories &= i_attr_val3.mac_categories; + if(rsbac_set_attr(MAC, + T_PROCESS, + tid, + A_min_write_categories, + i_attr_val5)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } + } + /* Get owner-initial-sec-level and mac_initial_categories for new owner */ + /* These values will be adjusted by max_read / min_write and then used as */ + /* new current level/categories. */ + i_tid.user = attr_val.owner; + if (rsbac_get_attr(MAC, + T_USER, + i_tid, + A_initial_security_level, + &i_attr_val6, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if (rsbac_set_attr(MAC, + T_PROCESS, + tid, + A_initial_security_level, + i_attr_val6)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } +#if 0 + /* restrict current_level to be a maximum of min_write */ + if(i_attr_val6.security_level > i_attr_val4.min_write_open) + i_attr_val6.security_level = i_attr_val4.min_write_open; +#endif + if (rsbac_get_attr(MAC, + T_USER, + i_tid, + A_mac_initial_categories, + &i_attr_val7, + TRUE)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); return(-RSBAC_EREADFAILED); } - /* and perform auto-write with setting attributes */ - result = auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - TRUE); - if ((result == GRANTED) || (result == DO_NOT_CARE)) - return(0); - else - return(-RSBAC_EDECISIONMISMATCH); - break; - /* all other cases are unknown */ - default: return(0); - } - - case R_CHANGE_OWNER: - switch(target) - { - /* Changing process owner affects access decisions, */ - /* so attributes have to be adjusted. */ - case T_PROCESS: - /* For target process there MUST be a new owner specified */ - if (attr != A_owner) - return(-RSBAC_EINVALIDATTR); - - /* Get owner-sec-level and mac_categories for new owner */ + if (rsbac_set_attr(MAC, + T_PROCESS, + tid, + A_mac_initial_categories, + i_attr_val7)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } +#if 0 + /* restrict current_categories to be a maximum of min_write */ + if((i_attr_val7.mac_categories & i_attr_val5.mac_categories) != i_attr_val7.mac_categories) + i_attr_val7.mac_categories &= i_attr_val5.mac_categories; +#endif + /* Get owner-min-sec-level and mac_min_categories for new owner */ i_tid.user = attr_val.owner; if (rsbac_get_attr(MAC, T_USER, i_tid, - A_security_level, - &i_attr_val2, + A_min_security_level, + &i_attr_val8, TRUE)) { printk(KERN_WARNING @@ -2846,8 +4120,8 @@ int rsbac_adf_set_attr_mac( if (rsbac_get_attr(MAC, T_USER, i_tid, - A_mac_categories, - &i_attr_val3, + A_mac_min_categories, + &i_attr_val9, TRUE)) { printk(KERN_WARNING @@ -2859,8 +4133,8 @@ int rsbac_adf_set_attr_mac( if (rsbac_set_attr(MAC, T_PROCESS, tid, - A_security_level, - i_attr_val2)) + A_min_security_level, + i_attr_val8)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); return(-RSBAC_EWRITEFAILED); @@ -2868,17 +4142,17 @@ int rsbac_adf_set_attr_mac( if (rsbac_set_attr(MAC, T_PROCESS, tid, - A_mac_categories, - i_attr_val3)) + A_mac_min_categories, + i_attr_val9)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); return(-RSBAC_EWRITEFAILED); } - /* Get min_write_open and min_write_categories of process */ + /* Get max_read_open and max_read_categories of process */ if (rsbac_get_attr(MAC, T_PROCESS, tid, - A_min_write_open, + A_max_read_open, &i_attr_val4, TRUE)) { @@ -2889,7 +4163,7 @@ int rsbac_adf_set_attr_mac( if (rsbac_get_attr(MAC, T_PROCESS, tid, - A_min_write_categories, + A_max_read_categories, &i_attr_val5, TRUE)) { @@ -2897,34 +4171,140 @@ int rsbac_adf_set_attr_mac( "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); return(-RSBAC_EREADFAILED); } - /* adjust min_write_open and min_write_categories, if too high */ - if ( (i_attr_val2.security_level < i_attr_val4.min_write_open) - && rsbac_set_attr(MAC, - T_PROCESS, + /* adjust max_read_open and max_read_categories, if too low */ + if (i_attr_val8.security_level > i_attr_val4.max_read_open) + { + i_attr_val4.max_read_open = i_attr_val8.security_level; + if(rsbac_set_attr(MAC, + T_PROCESS, tid, - A_min_write_open, - i_attr_val2) - ) + A_max_read_open, + i_attr_val4)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } + } +#if 0 + /* adjust current sec level to a minimum of max_read */ + if(i_attr_val6.security_level < i_attr_val4.max_read_open) + i_attr_val6.security_level = i_attr_val4.max_read_open; +#endif + /* but never set it over new max_level or under new min_level */ + if(i_attr_val6.security_level > i_attr_val2.security_level) + i_attr_val6.security_level = i_attr_val2.security_level; + else + if(i_attr_val6.security_level < i_attr_val8.security_level) + i_attr_val6.security_level = i_attr_val8.security_level; + if(rsbac_set_attr(MAC, + T_PROCESS, + tid, + A_current_sec_level, + i_attr_val6)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); return(-RSBAC_EWRITEFAILED); } - /* does process have categories in min_write that the new owner has not? */ - /* If yes, throw them out. */ - if ((i_attr_val3.mac_categories & i_attr_val5.mac_categories) - != i_attr_val5.mac_categories) + + /* does new owner have categories in min_categories that the process max_read + has not? */ + /* If yes, add them. */ + if ((i_attr_val9.mac_categories & i_attr_val5.mac_categories) + != i_attr_val9.mac_categories) { - i_attr_val5.mac_categories &= i_attr_val3.mac_categories; + i_attr_val5.mac_categories |= i_attr_val9.mac_categories; if(rsbac_set_attr(MAC, - T_PROCESS, + T_PROCESS, tid, - A_min_write_categories, + A_max_read_categories, i_attr_val5)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); return(-RSBAC_EWRITEFAILED); } } +#if 0 + /* adjust current categories to include all from max_read (from initial) */ + if((i_attr_val7.mac_categories & i_attr_val5.mac_categories) != i_attr_val5.mac_categories) + i_attr_val7.mac_categories |= i_attr_val5.mac_categories; +#endif + /* but never set it over new max_cats or under new min_cats */ + if((i_attr_val7.mac_categories & i_attr_val3.mac_categories) != i_attr_val7.mac_categories) + i_attr_val7.mac_categories &= i_attr_val3.mac_categories; + else + if((i_attr_val7.mac_categories & i_attr_val9.mac_categories) != i_attr_val9.mac_categories) + i_attr_val7.mac_categories |= i_attr_val9.mac_categories; + if(rsbac_set_attr(MAC, + T_PROCESS, + tid, + A_mac_curr_categories, + i_attr_val7)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } + + /* Get mac_user_flags from user */ + i_tid.user = attr_val.owner; + if (rsbac_get_attr(MAC, + T_USER, + i_tid, + A_mac_user_flags, + &i_attr_val3, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + i_attr_val1.mac_process_flags = i_attr_val3.mac_user_flags; + /* adjust flags - first get old process flags*/ + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_mac_process_flags, + &i_attr_val2, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if( (i_attr_val2.mac_process_flags & MAC_program_auto) + && (i_attr_val3.mac_user_flags & MAC_allow_auto) + ) + i_attr_val1.mac_process_flags |= MAC_auto; + + i_attr_val1.mac_process_flags &= RSBAC_MAC_P_FLAGS; + + if(!(i_attr_val1.mac_process_flags & MAC_trusted)) + { + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_mac_trusted_for_user, + &i_attr_val2, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if( (i_attr_val2.mac_trusted_for_user == attr_val.owner) + || (i_attr_val2.mac_trusted_for_user == RSBAC_ALL_USERS) + ) + i_attr_val1.mac_process_flags |= MAC_trusted; + } + /* Set mac_process_flags on process */ + if (rsbac_set_attr(MAC, + T_PROCESS, + tid, + A_mac_process_flags, + i_attr_val1)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } /* OK, we are ready */ return(0); @@ -2986,11 +4366,11 @@ int rsbac_adf_set_attr_mac( "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); return(-RSBAC_EREADFAILED); } - /* Get mac_auto from first process */ + /* Get mac_trusted_for_user from first process */ if (rsbac_get_attr(MAC, T_PROCESS, tid, - A_mac_auto, + A_mac_trusted_for_user, &i_attr_val6, FALSE)) { @@ -2998,11 +4378,11 @@ int rsbac_adf_set_attr_mac( "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); return(-RSBAC_EREADFAILED); } - /* Get mac_trusted from first process */ + /* Get mac_process_flags from first process */ if (rsbac_get_attr(MAC, T_PROCESS, tid, - A_mac_trusted, + A_mac_process_flags, &i_attr_val7, FALSE)) { @@ -3063,21 +4443,21 @@ int rsbac_adf_set_attr_mac( printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); return(-RSBAC_EWRITEFAILED); } - /* Set mac_auto for new process */ + /* Set mac_trusted_for_user (from program file) for new process */ if (rsbac_set_attr(MAC, T_PROCESS, new_tid, - A_mac_auto, + A_mac_trusted_for_user, i_attr_val6)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); return(-RSBAC_EWRITEFAILED); } - /* Set mac_trusted for new process */ + /* Set mac_process_flags for new process */ if (rsbac_set_attr(MAC, T_PROCESS, new_tid, - A_mac_trusted, + A_mac_process_flags, i_attr_val7)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); @@ -3132,6 +4512,30 @@ int rsbac_adf_set_attr_mac( "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); return(-RSBAC_EREADFAILED); } + /* Get initial_sec_level from first process */ + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_initial_security_level, + &i_attr_val6, + FALSE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + /* Get initial_categories from first process */ + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_mac_initial_categories, + &i_attr_val7, + FALSE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } /* Set mac_categories for new process */ if (rsbac_set_attr(MAC, T_PROCESS, @@ -3172,6 +4576,69 @@ int rsbac_adf_set_attr_mac( printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); return(-RSBAC_EWRITEFAILED); } + /* Set initial_security_level for new process */ + if (rsbac_set_attr(MAC, + T_PROCESS, + new_tid, + A_initial_security_level, + i_attr_val6)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } + /* Set initial_categories for new process */ + if (rsbac_set_attr(MAC, + T_PROCESS, + new_tid, + A_mac_initial_categories, + i_attr_val7)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } + /* Get owner-min_sec-level/cat from first process */ + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_min_security_level, + &i_attr_val2, + FALSE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_mac_min_categories, + &i_attr_val3, + FALSE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + /* Set min_security_level for new process */ + if (rsbac_set_attr(MAC, + T_PROCESS, + new_tid, + A_min_security_level, + i_attr_val2)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } + /* Set min_categories for new process */ + if (rsbac_set_attr(MAC, + T_PROCESS, + new_tid, + A_mac_min_categories, + i_attr_val3)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } return(0); } else @@ -3183,6 +4650,17 @@ int rsbac_adf_set_attr_mac( /* Creating dir or (pseudo) file IN target dir! */ case T_DIR: /* Mode of created item is ignored! */ + /* and perform auto-write without(!) setting of attributes - no need */ + /* -> decision consistency check only */ + /* only check, if not MAC_LIGHT */ +#ifndef CONFIG_RSBAC_MAC_LIGHT + result = auto_write(caller_pid, + target, + tid, + FALSE); + if ((result != GRANTED) && (result != DO_NOT_CARE)) + return(-RSBAC_EDECISIONMISMATCH); +#endif /* test write access to target: get its sec_level */ if (rsbac_get_attr(MAC, T_DIR, @@ -3206,23 +4684,12 @@ int rsbac_adf_set_attr_mac( "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); return(-RSBAC_EREADFAILED); } - /* and perform auto-write without(!) setting of attributes - no need */ - /* -> decision consistency check only */ - /* only check, if not MAC_LIGHT */ -#ifndef CONFIG_RSBAC_MAC_LIGHT - result = auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - FALSE); - if ((result != GRANTED) && (result != DO_NOT_CARE)) - return(-RSBAC_EDECISIONMISMATCH); -#endif - /* Get min_write_open from process (initialized to owner_sec_level)... */ + /* Get current_sec_level from process (initialized to owner_sec_level)... */ i_tid.process = caller_pid; if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_min_write_open, + A_current_sec_level, &i_attr_val3, FALSE)) { @@ -3236,7 +4703,7 @@ int rsbac_adf_set_attr_mac( #endif /* Set security-level for new item */ if (rsbac_set_attr(MAC, - new_target, + new_target, new_tid, A_security_level, i_attr_val3)) @@ -3244,11 +4711,11 @@ int rsbac_adf_set_attr_mac( printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); return(-RSBAC_EWRITEFAILED); } - /* Get min_write_categories from process (initialized to owner_categories)... */ + /* Get current_categories from process (initialized to owner_categories)... */ if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_min_write_categories, + A_mac_curr_categories, &i_attr_val3, FALSE)) { @@ -3262,7 +4729,7 @@ int rsbac_adf_set_attr_mac( #endif /* Set mac_categories for new item */ if (rsbac_set_attr(MAC, - new_target, + new_target, new_tid, A_mac_categories, i_attr_val3)) @@ -3387,39 +4854,12 @@ int rsbac_adf_set_attr_mac( case T_FIFO: case T_SYMLINK: case T_IPC: - /* test write access to target: get its sec_level */ - if(target != T_IPC) - inherit = TRUE; - else - inherit = FALSE; - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - inherit)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - inherit)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-write without(!) setting of attributes */ /* - no information flow apart from missing file */ /* -> decision consistency check only */ result = auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, FALSE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); @@ -3427,53 +4867,31 @@ int rsbac_adf_set_attr_mac( return(0); /* all other cases are unknown */ default: - return(0); - } - - case R_EXECUTE: - switch(target) - { - case T_FILE: - /* test read access to file: get its sec_level */ - if (rsbac_get_attr(MAC, - T_FILE, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - T_FILE, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } + return(0); + } + + case R_EXECUTE: + switch(target) + { + case T_FILE: /* and perform auto-read with setting of attributes */ result = auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); /* reset current_sec_level, mac_auto, min_write_open */ /* and max_read_open for process */ - - /* First, set current_sec_level and min_write_open to process owner's seclevel */ i_tid.process = caller_pid; + +#ifdef CONFIG_RSBAC_MAC_RESET_CURR + /* First, set current_sec_level and min_write_open to process owner's initial and seclevel */ if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_security_level, + A_initial_security_level, &i_attr_val1, TRUE)) { @@ -3481,6 +4899,17 @@ int rsbac_adf_set_attr_mac( "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); return(-RSBAC_EREADFAILED); } + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_mac_initial_categories, + &i_attr_val2, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } if (rsbac_set_attr(MAC, T_PROCESS, i_tid, @@ -3493,66 +4922,99 @@ int rsbac_adf_set_attr_mac( if (rsbac_set_attr(MAC, T_PROCESS, i_tid, - A_min_write_open, - i_attr_val1)) + A_mac_curr_categories, + i_attr_val2)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); return(-RSBAC_EWRITEFAILED); } - /* Next, set mac_curr_categories and min_write_categories to process owner's mac_categories */ +#endif +#if 0 + /* Now, set min_write_open to process owner's seclevel */ if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_mac_categories, - &i_attr_val2, + A_security_level, + &i_attr_val1, TRUE)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); return(-RSBAC_EREADFAILED); } +#endif + i_attr_val1.min_write_open = SL_max; if (rsbac_set_attr(MAC, T_PROCESS, i_tid, - A_mac_curr_categories, - i_attr_val2)) + A_min_write_open, + i_attr_val1)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); return(-RSBAC_EWRITEFAILED); } - if (rsbac_set_attr(MAC, +#if 0 + /* Next, set min_write_categories to process owner's mac_categories */ + if (rsbac_get_attr(MAC, T_PROCESS, i_tid, - A_min_write_categories, - i_attr_val2)) + A_mac_categories, + &i_attr_val2, + TRUE)) { - printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); - return(-RSBAC_EWRITEFAILED); + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); } - /* set mac_auto */ - i_attr_val2.mac_auto = TRUE; +#endif + i_attr_val2.mac_categories = RSBAC_MAC_MAX_CAT_VECTOR; if (rsbac_set_attr(MAC, T_PROCESS, i_tid, - A_mac_auto, + A_min_write_categories, i_attr_val2)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); return(-RSBAC_EWRITEFAILED); } /* reset max_read boundary */ - i_attr_val2.max_read_open = SL_unclassified; +#if 0 + /* Get owner-min-sec-level and mac_min_categories for owner */ + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_min_security_level, + &i_attr_val1, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_mac_min_categories, + &i_attr_val2, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } +#endif + i_attr_val1.max_read_open = SL_min; + i_attr_val2.mac_categories = RSBAC_MAC_MIN_CAT_VECTOR; if (rsbac_set_attr(MAC, T_PROCESS, i_tid, A_max_read_open, - i_attr_val2)) + i_attr_val1)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); return(-RSBAC_EWRITEFAILED); } /* reset category max_read boundary */ - i_attr_val2.mac_categories = RSBAC_MAC_MIN_CAT_VECTOR; if (rsbac_set_attr(MAC, T_PROCESS, i_tid, @@ -3562,28 +5024,120 @@ int rsbac_adf_set_attr_mac( printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); return(-RSBAC_EWRITEFAILED); } + /* set flags */ + if (rsbac_get_attr(MAC, + T_PROCESS, + i_tid, + A_mac_process_flags, + &i_attr_val1, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if (rsbac_get_attr(MAC, + target, + tid, + A_mac_auto, + &i_attr_val2, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if(i_attr_val2.mac_auto) + { + i_attr_val1.mac_process_flags |= MAC_program_auto; + i_tid.user = owner; + if (rsbac_get_attr(MAC, + T_USER, + i_tid, + A_mac_user_flags, + &i_attr_val2, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if(i_attr_val2.mac_user_flags & MAC_allow_auto) + i_attr_val1.mac_process_flags |= MAC_auto; + else + i_attr_val1.mac_process_flags &= ~MAC_auto; + i_tid.process = caller_pid; + } + else + { + i_attr_val1.mac_process_flags &= ~MAC_program_auto; + i_attr_val1.mac_process_flags &= ~MAC_auto; + } /* set mac_trusted: get mac_trusted_for_user from FILE aci */ if (rsbac_get_attr(MAC, T_FILE, tid, A_mac_trusted_for_user, - &i_attr_val1, + &i_attr_val2, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if (rsbac_set_attr(MAC, + T_PROCESS, + i_tid, + A_mac_trusted_for_user, + i_attr_val2)) + { + printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } + if (rsbac_get_attr(MAC, + T_FILE, + tid, + A_mac_prop_trusted, + &i_attr_val3, TRUE)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); return(-RSBAC_EREADFAILED); } - if( (i_attr_val1.mac_trusted_for_user == RSBAC_ALL_USERS) - || (i_attr_val1.mac_trusted_for_user == owner) + if( !(i_attr_val3.mac_prop_trusted) + || !(i_attr_val1.mac_process_flags & MAC_trusted) ) - i_attr_val1.mac_trusted = TRUE; - else - i_attr_val1.mac_trusted = FALSE; + { + if( (i_attr_val2.mac_trusted_for_user == RSBAC_ALL_USERS) + || (i_attr_val2.mac_trusted_for_user == owner) + ) + i_attr_val1.mac_process_flags |= MAC_trusted; + else + { + i_tid.user = owner; + if (rsbac_get_attr(MAC, + T_USER, + i_tid, + A_mac_user_flags, + &i_attr_val2, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if(i_attr_val2.mac_user_flags & MAC_trusted) + i_attr_val1.mac_process_flags |= MAC_trusted; + else + i_attr_val1.mac_process_flags &= ~MAC_trusted; + i_tid.process = caller_pid; + } + } if (rsbac_set_attr(MAC, T_PROCESS, i_tid, - A_mac_trusted, + A_mac_process_flags, i_attr_val1)) { printk(KERN_WARNING "rsbac_adf_set_attr_mac(): rsbac_set_attr() returned error!\n"); @@ -3601,41 +5155,18 @@ int rsbac_adf_set_attr_mac( { case T_DIR: case T_DEV: - /* test read-write access to mount dir/dev: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-read(-write) with setting of attributes */ if( (target == T_DEV) && (attr == A_mode) && (attr_val.mode & MS_RDONLY)) result = auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, TRUE); else result = auto_read_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); @@ -3656,33 +5187,10 @@ int rsbac_adf_set_attr_mac( case T_FIFO: case T_IPC: #endif - /* test read access to dir: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-read with setting of attributes */ result = auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); @@ -3703,33 +5211,10 @@ int rsbac_adf_set_attr_mac( } if(!i_attr_val1.mac_check) return(0); - /* test read access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-read with setting of attributes */ result = auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); @@ -3738,34 +5223,13 @@ int rsbac_adf_set_attr_mac( #ifdef CONFIG_RSBAC_MAC_NET_OBJ_PROT case T_NETOBJ: - /* test read access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_remote_sec_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_remote_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-read with setting of attributes */ - result = auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - TRUE); + result = auto_read_attr(caller_pid, + target, + tid, + A_remote_sec_level, + A_remote_mac_categories, + TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); return(0); @@ -3775,45 +5239,18 @@ int rsbac_adf_set_attr_mac( default: return(0); } - - case R_READ_OPEN: - switch(target) - { - case T_FILE: - case T_DIR: - case T_FIFO: - case T_IPC: - /* test read access to target: get its sec_level */ - if(target != T_IPC) - inherit = TRUE; - else - inherit = FALSE; - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - inherit)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - inherit)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } + + case R_READ_OPEN: + switch(target) + { + case T_FILE: + case T_DIR: + case T_FIFO: + case T_IPC: /* and perform auto-read with setting attributes */ result = auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); @@ -3833,33 +5270,10 @@ int rsbac_adf_set_attr_mac( } if(!i_attr_val1.mac_check) return(0); - /* test read access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-read with setting attributes */ result = auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); @@ -3876,37 +5290,10 @@ int rsbac_adf_set_attr_mac( case T_FILE: case T_FIFO: case T_IPC: - /* test read-write access to target: get its sec_level */ - if(target != T_IPC) - inherit = TRUE; - else - inherit = FALSE; - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - inherit)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - inherit)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-read-write without setting attributes */ result = auto_read_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); @@ -3926,33 +5313,10 @@ int rsbac_adf_set_attr_mac( } if(!i_attr_val1.mac_check) return(0); - /* test read-write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-read-write with setting of attributes */ result = auto_read_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); @@ -3967,33 +5331,10 @@ int rsbac_adf_set_attr_mac( { case T_DIR: case T_SYMLINK: - /* test read access to dir/symlink: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - TRUE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-read with setting of attributes */ result = auto_read(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); @@ -4007,34 +5348,13 @@ int rsbac_adf_set_attr_mac( switch(target) { case T_PROCESS: - /* test read-write access to process: get its sec_level */ - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_current_sec_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_mac_curr_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-read-write with setting attributes */ - result = auto_read_write(caller_pid, - i_attr_val1.current_sec_level, - i_attr_val2.mac_categories, - TRUE); + result = auto_read_write_attr(caller_pid, + target, + tid, + A_current_sec_level, + A_mac_curr_categories, + TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); return(0); @@ -4051,38 +5371,10 @@ int rsbac_adf_set_attr_mac( case T_FILE: case T_FIFO: case T_IPC: - /* test read-write access to target: get its sec_level */ - if(target != T_IPC) - inherit = TRUE; - else - inherit = FALSE; - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - inherit)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - inherit)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-write with setting attributes */ result = auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); @@ -4102,33 +5394,10 @@ int rsbac_adf_set_attr_mac( } if(!i_attr_val1.mac_check) return(0); - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_security_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-write with setting attributes */ result = auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, + target, + tid, TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); @@ -4136,34 +5405,13 @@ int rsbac_adf_set_attr_mac( #ifdef CONFIG_RSBAC_MAC_NET_OBJ_PROT case T_NETOBJ: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_remote_sec_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_remote_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-write with setting attributes */ - result = auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - TRUE); + result = auto_write_attr(caller_pid, + target, + tid, + A_remote_sec_level, + A_remote_mac_categories, + TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); return(0); @@ -4180,34 +5428,13 @@ int rsbac_adf_set_attr_mac( switch(target) { case T_NETOBJ: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_local_sec_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_local_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-write with setting attributes */ - result = auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - TRUE); + result = auto_write_attr(caller_pid, + target, + tid, + A_local_sec_level, + A_local_mac_categories, + TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); return(0); @@ -4224,34 +5451,13 @@ int rsbac_adf_set_attr_mac( switch(target) { case T_NETOBJ: - /* test write access to target: get its sec_level */ - if (rsbac_get_attr(MAC, - target, - tid, - A_remote_sec_level, - &i_attr_val1, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (rsbac_get_attr(MAC, - target, - tid, - A_remote_mac_categories, - &i_attr_val2, - FALSE)) - { - printk(KERN_WARNING - "rsbac_adf_set_attr_mac(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } /* and perform auto-write with setting attributes */ - result = auto_write(caller_pid, - i_attr_val1.security_level, - i_attr_val2.mac_categories, - TRUE); + result = auto_write_attr(caller_pid, + target, + tid, + A_remote_sec_level, + A_remote_mac_categories, + TRUE); if ((result != GRANTED) && (result != DO_NOT_CARE)) return(-RSBAC_EDECISIONMISMATCH); return(0); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/mac/mac_syscalls.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/mac/mac_syscalls.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/mac/mac_syscalls.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/mac/mac_syscalls.c 2003-08-25 20:33:02.000000000 +0200 @@ -4,9 +4,9 @@ /* Facility (ADF) - Mandatory Access Control */ /* File: rsbac/adf/mac/syscalls.c */ /* */ -/* Author and (c) 1999-2001: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* */ -/* Last modified: 24/Aug/2001 */ +/* Last modified: 07/Jun/2003 */ /*************************************************** */ #include @@ -16,6 +16,8 @@ #include #include #include +#include +#include /************************************************* */ /* Global Variables */ @@ -36,392 +38,607 @@ /* must not be greater than owner_sec_level. Setting current_sec_level by */ /* this function also turns off auto-levelling via mac_auto. */ -int rsbac_mac_set_curr_seclevel(rsbac_security_level_t level) +int rsbac_mac_set_curr_level(rsbac_security_level_t level, + rsbac_mac_category_vector_t categories) { union rsbac_target_id_t tid; union rsbac_attribute_value_t attr_val1; + rsbac_mac_process_flags_t flags; - /* get maximum security level */ + if( (level > SL_max) + && (level != SL_none) + ) + return -RSBAC_EINVALIDVALUE; + + /* check flags */ tid.process = current->pid; if (rsbac_get_attr(MAC, T_PROCESS, tid, - A_security_level, + A_mac_process_flags, &attr_val1, FALSE)) { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_curr_seclevel(): rsbac_get_attr() returned error!\n"); + printk(KERN_WARNING "rsbac_mac_set_curr_level(): rsbac_get_attr() returned error!\n"); return(-RSBAC_EREADFAILED); } - /* if level is too high -> error */ - if (level > attr_val1.security_level) - return(-EPERM); - - /* check against upper/write boundary */ - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_min_write_open, - &attr_val1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_curr_seclevel(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (level > attr_val1.min_write_open) - return(-EPERM); - - /* check against lower/read boundary */ - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_max_read_open, - &attr_val1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_curr_seclevel(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (level < attr_val1.max_read_open) - return(-EPERM); - - /* OK, checks passed: set mac_auto to OFF */ - attr_val1.mac_auto = FALSE; - if (rsbac_set_attr(MAC, T_PROCESS, - tid, - A_mac_auto, - attr_val1)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_curr_seclevel(): rsbac_set_attr() returned error!\n"); - return(-RSBAC_EWRITEFAILED); - } - - /* ...and set current_sec_level to level */ - attr_val1.current_sec_level = level; - if (rsbac_set_attr(MAC, T_PROCESS, - tid, - A_current_sec_level, - attr_val1)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_curr_seclevel(): rsbac_set_attr() returned error!\n"); - return(-RSBAC_EWRITEFAILED); + flags = attr_val1.mac_process_flags; + if( !(flags & MAC_auto) + && !(flags & MAC_trusted) + && !(flags & MAC_override) + ) + { +#ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: no auto, trusted or override -> not granted \n", + current->uid, + current->pid, + current->comm); +#endif +#ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: no auto, trusted or override -> not granted \n", + current->uid, + current->pid, + current->comm); +#endif + #ifdef CONFIG_RSBAC_SOFTMODE + if( !rsbac_softmode + #ifdef CONFIG_RSBAC_SOFTMODE_IND + && !rsbac_ind_softmode[MAC] + #endif + ) + #endif + return -EPERM; + } + + /* override allows full range */ + if(!(flags & MAC_override)) + { + if(level != SL_none) + { + /* get maximum security level */ + tid.process = current->pid; + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_security_level, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_set_curr_level(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + /* if level is too high -> error */ + if (level > attr_val1.security_level) + { +#ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested level %u over max level %u, no override -> not granted \n", + current->uid, + current->pid, + current->comm, + level, + attr_val1.security_level); +#endif +#ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested level %u over max level %u, no override -> not granted \n", + current->uid, + current->pid, + current->comm, + level, + attr_val1.security_level); +#endif + #ifdef CONFIG_RSBAC_SOFTMODE + if( !rsbac_softmode + #ifdef CONFIG_RSBAC_SOFTMODE_IND + && !rsbac_ind_softmode[MAC] + #endif + ) + #endif + return -EPERM; + } + /* get minimum security level */ + tid.process = current->pid; + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_min_security_level, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_set_curr_level(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + /* if level is too low -> error */ + if (level < attr_val1.security_level) + { +#ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested level %u under min level %u, no override -> not granted \n", + current->uid, + current->pid, + current->comm, + level, + attr_val1.security_level); +#endif +#ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested level %u under min level %u, no override -> not granted \n", + current->uid, + current->pid, + current->comm, + level, + attr_val1.security_level); +#endif + #ifdef CONFIG_RSBAC_SOFTMODE + if( !rsbac_softmode + #ifdef CONFIG_RSBAC_SOFTMODE_IND + && !rsbac_ind_softmode[MAC] + #endif + ) + #endif + return -EPERM; + } + + /* auto needed? -> stay inside boundaries */ + if(!flags & MAC_trusted) + { + /* check against upper/write boundary */ + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_min_write_open, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_set_curr_seclevel(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if (level > attr_val1.min_write_open) + { +#ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested level %u over min_write_open %u, no override or trusted -> not granted \n", + current->uid, + current->pid, + current->comm, + level, + attr_val1.min_write_open); +#endif +#ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested level %u over min_write_open %u, no override or trusted -> not granted \n", + current->uid, + current->pid, + current->comm, + level, + attr_val1.min_write_open); +#endif + #ifdef CONFIG_RSBAC_SOFTMODE + if( !rsbac_softmode + #ifdef CONFIG_RSBAC_SOFTMODE_IND + && !rsbac_ind_softmode[MAC] + #endif + ) + #endif + return -EPERM; + } + + /* check against lower/read boundary */ + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_max_read_open, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_set_curr_seclevel(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if (level < attr_val1.max_read_open) + return(-EPERM); + } + } + if(categories != RSBAC_MAC_MIN_CAT_VECTOR) + { + /* get maximum categories */ + tid.process = current->pid; + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_mac_categories, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_set_curr_level(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + /* if categories are no subset -> error */ + if ((categories & attr_val1.mac_categories) != categories) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { +#ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested categories %s over max categories %s, no override -> not granted \n", + current->uid, + current->pid, + current->comm, + u64tostrmac(tmp, categories), + u64tostrmac(tmp2, attr_val1.mac_categories)); +#endif +#ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested categories %s over max categories %s, no override -> not granted \n", + current->uid, + current->pid, + current->comm, + u64tostrmac(tmp, categories), + u64tostrmac(tmp2, attr_val1.mac_categories)); +#endif + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + #ifdef CONFIG_RSBAC_SOFTMODE + if( !rsbac_softmode + #ifdef CONFIG_RSBAC_SOFTMODE_IND + && !rsbac_ind_softmode[MAC] + #endif + ) + #endif + return -EPERM; + } + /* get minimum categories */ + tid.process = current->pid; + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_mac_min_categories, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_set_curr_level(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + /* if level is too low -> error */ + if ((categories & attr_val1.mac_categories) != attr_val1.mac_categories) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { +#ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested categories %s under min categories %s, no override -> not granted \n", + current->uid, + current->pid, + current->comm, + u64tostrmac(tmp, categories), + u64tostrmac(tmp2, attr_val1.mac_categories)); +#endif +#ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested categories %s under min categories %s, no override -> not granted \n", + current->uid, + current->pid, + current->comm, + u64tostrmac(tmp, categories), + u64tostrmac(tmp2, attr_val1.mac_categories)); +#endif + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + #ifdef CONFIG_RSBAC_SOFTMODE + if( !rsbac_softmode + #ifdef CONFIG_RSBAC_SOFTMODE_IND + && !rsbac_ind_softmode[MAC] + #endif + ) + #endif + return -EPERM; + } + + /* auto needed? -> stay inside boundaries */ + if(!flags & MAC_trusted) + { + /* check against upper/write boundary */ + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_min_write_categories, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_set_curr_level(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if ((categories & attr_val1.mac_categories) != categories) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { +#ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested categories %s over min_write categories %s, no override or trusted -> not granted \n", + current->uid, + current->pid, + current->comm, + u64tostrmac(tmp, categories), + u64tostrmac(tmp2, attr_val1.mac_categories)); +#endif +#ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested categories %s over min_write categories %s, no override or trusted -> not granted \n", + current->uid, + current->pid, + current->comm, + u64tostrmac(tmp, categories), + u64tostrmac(tmp2, attr_val1.mac_categories)); +#endif + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + #ifdef CONFIG_RSBAC_SOFTMODE + if( !rsbac_softmode + #ifdef CONFIG_RSBAC_SOFTMODE_IND + && !rsbac_ind_softmode[MAC] + #endif + ) + #endif + return -EPERM; + } + /* check against lower/read boundary */ + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_max_read_categories, + &attr_val1, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_set_curr_level(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + if ((categories & attr_val1.mac_categories) != attr_val1.mac_categories) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp2) + { +#ifdef CONFIG_RSBAC_RMSG + rsbac_printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested categories %s under max_read categories %s, no override or trusted -> not granted \n", + current->uid, + current->pid, + current->comm, + u64tostrmac(tmp, categories), + u64tostrmac(tmp2, attr_val1.mac_categories)); +#endif +#ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif + printk(KERN_INFO + "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested categories %s under max_read categories %s, no override or trusted -> not granted \n", + current->uid, + current->pid, + current->comm, + u64tostrmac(tmp, categories), + u64tostrmac(tmp2, attr_val1.mac_categories)); +#endif + rsbac_kfree(tmp2); + } + rsbac_kfree(tmp); + } + #ifdef CONFIG_RSBAC_SOFTMODE + if( !rsbac_softmode + #ifdef CONFIG_RSBAC_SOFTMODE_IND + && !rsbac_ind_softmode[MAC] + #endif + ) + #endif + return -EPERM; + } + } + } + } + + /* OK, checks passed: set values */ + if(level != SL_none) + { + attr_val1.current_sec_level = level; + if (rsbac_set_attr(MAC, + T_PROCESS, + tid, + A_current_sec_level, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_set_curr_level(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } + } + if(categories != RSBAC_MAC_MIN_CAT_VECTOR) + { + attr_val1.mac_categories = categories; + if (rsbac_set_attr(MAC, + T_PROCESS, + tid, + A_mac_curr_categories, + attr_val1)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_set_curr_level(): rsbac_set_attr() returned error!\n"); + return(-RSBAC_EWRITEFAILED); + } } return(0); - }; - -/* To make programmers happy: getting current level as well */ - -rsbac_security_level_t rsbac_mac_get_curr_seclevel(void) - { - union rsbac_target_id_t tid; - union rsbac_attribute_value_t attr_val; - - tid.process = current->pid; - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_current_sec_level, - &attr_val, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_get_curr_seclevel(): rsbac_get_attr() returned error!\n"); - return(SL_none); - } - return(attr_val.current_sec_level); - }; - -/*****************************************************************************/ -/* These functions allow processes to set their own maximum security level */ -/* (= owner_sec_level) via sys_rsbac_mac_set_max_seclevel() system call. */ -/* The level must keep within the min_write_open/max_read_open-boundary and */ -/* must not be greater than the process owner's security_level. */ -/* Remember: owner_sec_level is inherited on fork! */ - -int rsbac_mac_set_max_seclevel(rsbac_security_level_t level) - { - union rsbac_target_id_t tid; - union rsbac_attribute_value_t attr_val1; - - /* get old maximum security level */ - tid.process = current->pid; - - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_security_level, - &attr_val1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_max_seclevel(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - /* if level is too high -> error */ - if (level > attr_val1.security_level) - return(-EPERM); - - /* check against upper/write boundary */ - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_min_write_open, - &attr_val1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_max_seclevel(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (level > attr_val1.min_write_open) - return(-EPERM); - - /* check against lower/read boundary */ - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_max_read_open, - &attr_val1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_max_seclevel(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (level < attr_val1.max_read_open) - return(-EPERM); - - /* set new max security_level to level */ - attr_val1.security_level = level; - if (rsbac_set_attr(MAC, T_PROCESS, - tid, - A_security_level, - attr_val1)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_max_seclevel(): rsbac_set_attr() returned error!\n"); - return(-RSBAC_EWRITEFAILED); - } - - /* ...and set current_sec_level to level */ - attr_val1.current_sec_level = level; - if (rsbac_set_attr(MAC, T_PROCESS, - tid, - A_current_sec_level, - attr_val1)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_max_seclevel(): rsbac_set_attr() returned error!\n"); - return(-RSBAC_EWRITEFAILED); - } - - return(0); - }; - -/* To make programmers happy: getting max security level as well */ + } -rsbac_security_level_t rsbac_mac_get_max_seclevel(void) +/* getting own levels as well - no restrictions */ +int rsbac_mac_get_curr_level(rsbac_security_level_t * level_p, + rsbac_mac_category_vector_t * categories_p) { union rsbac_target_id_t tid; union rsbac_attribute_value_t attr_val; tid.process = current->pid; - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_security_level, - &attr_val, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_get_max_seclevel(): rsbac_get_attr() returned error!\n"); - return(SL_none); + if(level_p) + { + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_current_sec_level, + &attr_val, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_get_curr_level(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + *level_p = attr_val.current_sec_level; + } + if(categories_p) + { + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_mac_curr_categories, + &attr_val, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_get_curr_level(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + *categories_p = attr_val.mac_categories; } - return(attr_val.security_level); - }; - -int rsbac_mac_set_curr_categories(rsbac_mac_category_vector_t categories) - { - union rsbac_target_id_t tid; - union rsbac_attribute_value_t attr_val1; - - /* get maximum categories */ - tid.process = current->pid; - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_mac_categories, - &attr_val1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_curr_categories(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - /* if new set is no subset -> error */ - if (!( (attr_val1.mac_categories & categories) - == categories)) - return(-EPERM); - - /* check against upper/write boundary */ - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_min_write_categories, - &attr_val1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_curr_categories(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (!( (attr_val1.mac_categories & categories) - == categories)) - return(-EPERM); - - /* check against lower/read boundary */ - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_max_read_categories, - &attr_val1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_curr_categories(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (!( (attr_val1.mac_categories & categories) - == attr_val1.mac_categories)) - return(-EPERM); - - /* OK, checks passed: set mac_auto to OFF */ - attr_val1.mac_auto = FALSE; - if (rsbac_set_attr(MAC, T_PROCESS, - tid, - A_mac_auto, - attr_val1)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_curr_categories(): rsbac_set_attr() returned error!\n"); - return(-RSBAC_EWRITEFAILED); - } - - /* ...and set mac_curr_categories to level */ - attr_val1.mac_categories = categories; - if (rsbac_set_attr(MAC, T_PROCESS, - tid, - A_mac_curr_categories, - attr_val1)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_curr_categories(): rsbac_set_attr() returned error!\n"); - return(-RSBAC_EWRITEFAILED); - } - return(0); - }; + return 0; + } -int rsbac_mac_get_curr_categories(rsbac_mac_category_vector_t * categories_p) +int rsbac_mac_get_max_level(rsbac_security_level_t * level_p, + rsbac_mac_category_vector_t * categories_p) { union rsbac_target_id_t tid; union rsbac_attribute_value_t attr_val; tid.process = current->pid; - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_mac_curr_categories, - &attr_val, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_get_curr_categories(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); + if(level_p) + { + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_security_level, + &attr_val, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_get_max_level(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + *level_p = attr_val.security_level; + } + if(categories_p) + { + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_mac_categories, + &attr_val, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_get_curr_level(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + *categories_p = attr_val.mac_categories; } - *categories_p = attr_val.mac_categories; - return(0); + return 0; } -int rsbac_mac_set_max_categories(rsbac_mac_category_vector_t categories) - { - union rsbac_target_id_t tid; - union rsbac_attribute_value_t attr_val1; - /* get old maximum categories */ - tid.process = current->pid; - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_mac_categories, - &attr_val1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_max_categories(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - /* if new set is no subset -> error */ - if (!( (attr_val1.mac_categories & categories) - == categories)) - return(-EPERM); - - /* check against upper/write boundary */ - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_min_write_categories, - &attr_val1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_max_categories(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (!( (attr_val1.mac_categories & categories) - == categories)) - return(-EPERM); - - /* check against lower/read boundary */ - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_max_read_categories, - &attr_val1, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_max_categories(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); - } - if (!( (attr_val1.mac_categories & categories) - == attr_val1.mac_categories)) - return(-EPERM); - - /* OK, checks passed: set mac_categories and */ - /* mac_curr_categories to categories */ - attr_val1.mac_categories = categories; - if (rsbac_set_attr(MAC, T_PROCESS, - tid, - A_mac_curr_categories, - attr_val1)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_max_categories(): rsbac_set_attr() returned error!\n"); - return(-RSBAC_EWRITEFAILED); - } - if (rsbac_set_attr(MAC, T_PROCESS, - tid, - A_mac_categories, - attr_val1)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_set_max_categories(): rsbac_set_attr() returned error!\n"); - return(-RSBAC_EWRITEFAILED); - } - return(0); - }; - -int rsbac_mac_get_max_categories(rsbac_mac_category_vector_t * categories_p) +int rsbac_mac_get_min_level(rsbac_security_level_t * level_p, + rsbac_mac_category_vector_t * categories_p) { union rsbac_target_id_t tid; union rsbac_attribute_value_t attr_val; tid.process = current->pid; - if (rsbac_get_attr(MAC, - T_PROCESS, - tid, - A_mac_categories, - &attr_val, - FALSE)) - { /* failed! */ - printk(KERN_WARNING "rsbac_mac_get_max_categories(): rsbac_get_attr() returned error!\n"); - return(-RSBAC_EREADFAILED); + if(level_p) + { + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_min_security_level, + &attr_val, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_get_min_level(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + *level_p = attr_val.security_level; + } + if(categories_p) + { + if (rsbac_get_attr(MAC, + T_PROCESS, + tid, + A_mac_min_categories, + &attr_val, + FALSE)) + { /* failed! */ + printk(KERN_WARNING "rsbac_mac_get_min_level(): rsbac_get_attr() returned error!\n"); + return(-RSBAC_EREADFAILED); + } + *categories_p = attr_val.mac_categories; } - *categories_p = attr_val.mac_categories; - return(0); + return 0; } - /* end of rsbac/adf/mac/syscalls.c */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/ms/ms_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/ms/ms_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/ms/ms_main.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/ms/ms_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -4,24 +4,28 @@ /* Facility (ADF) - Malware Scan */ /* File: rsbac/adf/ms/main.c */ /* */ -/* Author and (c) 1999-2002: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* */ -/* Last modified: 22/Feb/2002 */ +/* Last modified: 11/Mar/2003 */ /*************************************************** */ #include #include +#include #include #include #include +#include #include #include #include #include #include #include +#include #include -#include +#include +#include /************************************************* */ /* Global Variables */ @@ -29,18 +33,49 @@ static rsbac_ms_all_malware_t all_malware = RSBAC_MS_ALL_STRINGS; -#ifdef CONFIG_RSBAC_MS_EXTERNAL +#ifdef CONFIG_RSBAC_MS_EXT EXPORT_SYMBOL(rsbac_ms_do_scan); EXPORT_SYMBOL(rsbac_ms_scan_level); #endif rsbac_ms_do_scan_t * rsbac_ms_do_scan = NULL; +#if defined(CONFIG_RSBAC_MS_EXT_FPROTD) || defined(CONFIG_RSBAC_MS_EXT_CLAMD) +int rsbac_ms_scan_level = 0; +#else int rsbac_ms_scan_level = RSBAC_MS_LEVEL; +#endif + +#if defined(CONFIG_RSBAC_MS_EXT_FPROTD) || defined(CONFIG_RSBAC_MS_EXT_CLAMD) +#ifdef CONFIG_RSBAC_MS_EXT_FPROTD +/*** Network settings ***/ +#define FPROTD_LOCAL_PORT 0 +#define FPROTD_LOCAL_ADDR "127.0.0.1" +#define FPROT_REMOTE_PORT_MIN 10200 +#define FPROT_REMOTE_PORT_MAX 10204 +#define FPROT_REMOTE_ADDR "127.0.0.1" +rsbac_ms_do_scan_t * rsbac_ms_do_scan_fprotd = NULL; +#endif +#ifdef CONFIG_RSBAC_MS_EXT_CLAMD +#define CLAMD_LOCAL_PORT 0 +#define CLAMD_LOCAL_ADDR "127.0.0.1" +#define CLAM_REMOTE_PORT_MIN CONFIG_RSBAC_MS_EXT_CLAMD_PORT +#define CLAM_REMOTE_PORT_MAX CONFIG_RSBAC_MS_EXT_CLAMD_PORT +#define CLAM_REMOTE_ADDR "127.0.0.1" +rsbac_ms_do_scan_t * rsbac_ms_do_scan_clamd = NULL; +#endif + +static u_long ms_nr_calls = 0; + +#if defined(CONFIG_RSBAC_PROC) +#define MS_LEVEL_PROC_NAME "ms_level" +static struct proc_dir_entry * ms_proc_info_p; +#endif +#endif /************************************************* */ /* Internal Help functions */ /************************************************* */ -#ifndef CONFIG_RSBAC_MS_NO_INTERNAL +#ifndef CONFIG_RSBAC_MS_EXT /* open_by_dentry */ /* This is done by hand (copy from rsbac_read_open), because system calls */ /* are currently blocked by rsbac semaphores */ @@ -82,7 +117,7 @@ static int open_by_dentry(struct dentry } /* do_scan() */ -/* This function scans the given file for malware, returning a boolean value */ +/* This function scans the given file for malware, returning a result value */ /* whether the file is accepted or not. */ static int do_scan(struct dentry * dentry_p) @@ -290,6 +325,663 @@ out: return(retval); } #endif + +#if defined(CONFIG_RSBAC_MS_EXT_FPROTD) || defined(CONFIG_RSBAC_MS_EXT_CLAMD) +/* declare net functions */ +long sys_socket(int family, int type, int protocol); +long sys_bind(int fd, struct sockaddr *umyaddr, int addrlen); +long sys_connect(int fd, struct sockaddr *uservaddr, int addrlen); +//ssize_t sys_read(unsigned int fd, char * buf, size_t count); +//ssize_t sys_write(unsigned int fd, const char * buf, size_t count); +long sys_send(int fd, void * buff, size_t len, unsigned flags); +long sys_recv(int fd, void * ubuf, size_t size, unsigned flags); +long sys_shutdown(int fd, int how); + +#ifdef CONFIG_RSBAC_MS_EXT_FPROTD +/**** Scanning Function ****/ + +static int ms_fprotd_do_scan(struct dentry * dentry_p) + { + u_int i; + mm_segment_t oldfs; + int sock_fd; + struct sockaddr_in addr; + int err; + u_int nr_restart = 0; + + /* create a socket */ + sock_fd = sys_socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); + if(sock_fd < 0) + { + printk(KERN_WARNING + "ms_fprotd_do_scan(): creating local log socket failed with error %u, exiting!\n", + sock_fd); + return -RSBAC_EWRITEFAILED; + } + /* bind local address */ + addr.sin_family = PF_INET; + addr.sin_port = htons(FPROTD_LOCAL_PORT); + err = rsbac_net_str_to_inet(FPROTD_LOCAL_ADDR, + &addr.sin_addr.s_addr); + if(err < 0) + { + printk(KERN_WARNING + "ms_fprotd_do_scan(): converting local socket address %s failed with error %u, exiting!\n", + FPROTD_LOCAL_ADDR, + err); + sys_close(sock_fd); + return -RSBAC_EINVALIDVALUE; + } + /* change data segment - sys_bind reads address from user space */ + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_bind(sock_fd, (struct sockaddr *)&addr, sizeof(addr)); + set_fs(oldfs); + if(err < 0) + { + printk(KERN_WARNING + "ms_fprotd_do_scan(): binding local socket address %u.%u.%u.%u:%u failed with error %u, exiting!\n", + NIPQUAD(addr.sin_addr.s_addr), + FPROTD_LOCAL_PORT, + err); + sys_close(sock_fd); + return -RSBAC_EWRITEFAILED; + } + + /* convert remote address */ + addr.sin_family = PF_INET; + err = rsbac_net_str_to_inet(FPROT_REMOTE_ADDR, + &addr.sin_addr.s_addr); + if(err < 0) + { + printk(KERN_WARNING + "ms_fprotd_do_scan(): converting remote socket address %s failed with error %u, exiting!\n", + FPROT_REMOTE_ADDR, + err); + sys_close(sock_fd); + return -RSBAC_EINVALIDVALUE; + } + +restart: + + for(i = FPROT_REMOTE_PORT_MIN; i <= FPROT_REMOTE_PORT_MAX ; i++) + { + addr.sin_port = htons(i); + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_connect(sock_fd, + (struct sockaddr *)&addr, + sizeof(addr)); + set_fs(oldfs); + if(err >=0) + { + char * sendbuf; + char * filename; + int size; + int len; + +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_ms) + printk(KERN_DEBUG + "ms_fprotd_do_scan(): successfully connected to scanner port %u.\n", + i); +#endif + sendbuf = rsbac_kmalloc(PAGE_SIZE + 1024); + if(!sendbuf) + { + sys_close(sock_fd); + return -ENOMEM; + } + filename = rsbac_kmalloc(PAGE_SIZE); + if(!filename) + { + rsbac_kfree(sendbuf); + sys_close(sock_fd); + return -ENOMEM; + } + err = rsbac_get_full_path(dentry_p, filename, PAGE_SIZE-1); + if(err < 0) + { + rsbac_kfree(sendbuf); + rsbac_kfree(filename); + sys_close(sock_fd); + return -RSBAC_EREADFAILED; + } + size = sprintf(sendbuf, "GET %s?%s HTTP/1.0\r\n\r\n", + filename, + CONFIG_RSBAC_MS_EXT_FPROTD_SW); +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_ms) + printk(KERN_DEBUG + "ms_fprotd_do_scan(): sending request %s", + sendbuf); +#endif + len = 0; + err = 1; + while((err > 0) && (len < size)) + { + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_send(sock_fd, sendbuf + len, size - len, MSG_DONTWAIT); + set_fs(oldfs); + if(err > 0) + len += err; + } + if(err < 0) + { + rsbac_kfree(sendbuf); + rsbac_kfree(filename); + sys_close(sock_fd); + return -RSBAC_EWRITEFAILED; + } +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_ms) + printk(KERN_DEBUG + "ms_fprotd_do_scan(): sent %u bytes\n", + len); +#endif + sys_shutdown(sock_fd, 1); + len = 0; + err = 1; + memset(sendbuf, 0, PAGE_SIZE); + while(err > 0) + { + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_recv(sock_fd, sendbuf + len, PAGE_SIZE - len, 0); + set_fs(oldfs); + if(err > 0) + { + len += err; + if(len > (PAGE_SIZE - 512)) + { + memcpy(sendbuf, sendbuf + PAGE_SIZE - 1024, len - (PAGE_SIZE - 1024)); + len -= (PAGE_SIZE - 1024); + } + } + } + if(err >= 0) + { + sendbuf[len] = 0; +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_ms) + printk(KERN_DEBUG + "ms_fprotd_do_scan(): got reply %s\n", + sendbuf); +#endif + if(strstr(sendbuf, "infected")) + err = 0; + else + if( strstr(sendbuf, "clean") +// || strstr(sendbuf, "was not scanned") + || strstr(sendbuf, "unknown") + ) + err = rsbac_ms_scan_level; + else + err = -RSBAC_EREADFAILED; + } + else + { +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_ms) + printk(KERN_DEBUG + "ms_fprotd_do_scan(): got reply error %i\n", + err); +#endif + } + rsbac_kfree(sendbuf); + rsbac_kfree(filename); + sys_close(sock_fd); + if( (err == -ERESTARTSYS) + && (nr_restart < 10) + ) + { + nr_restart++; + printk(KERN_DEBUG + "ms_fprotd_do_scan(): scanner connection on port %u failed with ERESTARTSYS, starting reconnection cycle %u.\n", + i, + nr_restart); + goto restart; + } + return err; + } +#ifdef CONFIG_RSBAC_DEBUG + else + { + if(rsbac_debug_adf_ms) + printk(KERN_DEBUG + "ms_fprotd_do_scan(): connection to scanner port %u failed with error %i.\n", + i, + err); + } +#endif + } + + printk(KERN_WARNING + "ms_fprotd_do_scan(): connecting to scanner failed!\n"); + sys_close(sock_fd); + return -RSBAC_EWRITEFAILED; + } +#endif + +#ifdef CONFIG_RSBAC_MS_EXT_CLAMD +/**** Scanning Function ****/ + +static int ms_clamd_do_scan(struct dentry * dentry_p) + { + u_int i; + mm_segment_t oldfs; + int sock_fd; + struct sockaddr_in addr; + int err; + u_int nr_restart = 0; + + /* create a socket */ + sock_fd = sys_socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); + if(sock_fd < 0) + { + printk(KERN_WARNING + "ms_clamd_do_scan(): creating local socket failed with error %u, exiting!\n", + sock_fd); + return -RSBAC_EWRITEFAILED; + } + /* bind local address */ + addr.sin_family = PF_INET; + addr.sin_port = htons(CLAMD_LOCAL_PORT); + err = rsbac_net_str_to_inet(CLAMD_LOCAL_ADDR, + &addr.sin_addr.s_addr); + if(err < 0) + { + printk(KERN_WARNING + "ms_clamd_do_scan(): converting local socket address %s failed with error %u, exiting!\n", + CLAMD_LOCAL_ADDR, + err); + sys_close(sock_fd); + return -RSBAC_EINVALIDVALUE; + } + /* change data segment - sys_bind reads address from user space */ + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_bind(sock_fd, (struct sockaddr *)&addr, sizeof(addr)); + set_fs(oldfs); + if(err < 0) + { + printk(KERN_WARNING + "ms_clamd_do_scan(): binding local socket address %u.%u.%u.%u:%u failed with error %u, exiting!\n", + NIPQUAD(addr.sin_addr.s_addr), + CLAMD_LOCAL_PORT, + err); + sys_close(sock_fd); + return -RSBAC_EWRITEFAILED; + } + + /* convert remote address */ + addr.sin_family = PF_INET; + err = rsbac_net_str_to_inet(CLAM_REMOTE_ADDR, + &addr.sin_addr.s_addr); + if(err < 0) + { + printk(KERN_WARNING + "ms_clamd_do_scan(): converting remote socket address %s failed with error %u, exiting!\n", + CLAM_REMOTE_ADDR, + err); + sys_close(sock_fd); + return -RSBAC_EINVALIDVALUE; + } + +restart: + + for(i = CLAM_REMOTE_PORT_MIN; i <= CLAM_REMOTE_PORT_MAX ; i++) + { + addr.sin_port = htons(i); + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_connect(sock_fd, + (struct sockaddr *)&addr, + sizeof(addr)); + set_fs(oldfs); + if(err >=0) + { + char * sendbuf; + char * filename; + int size; + int len; + +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_ms) + printk(KERN_DEBUG + "ms_clamd_do_scan(): successfully connected to scanner port %u.\n", + i); +#endif + sendbuf = rsbac_kmalloc(PAGE_SIZE + 1024); + if(!sendbuf) + { + sys_close(sock_fd); + return -ENOMEM; + } + filename = rsbac_kmalloc(PAGE_SIZE); + if(!filename) + { + rsbac_kfree(sendbuf); + sys_close(sock_fd); + return -ENOMEM; + } + err = rsbac_get_full_path(dentry_p, filename, PAGE_SIZE-1); + if(err < 0) + { + rsbac_kfree(sendbuf); + rsbac_kfree(filename); + sys_close(sock_fd); + return -RSBAC_EREADFAILED; + } + size = sprintf(sendbuf, "%s %s\n", + CONFIG_RSBAC_MS_EXT_CLAMD_ACTION, + filename); +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_ms) + printk(KERN_DEBUG + "ms_clamd_do_scan(): sending request %s", + sendbuf); +#endif + len = 0; + err = 1; + while((err > 0) && (len < size)) + { + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_send(sock_fd, sendbuf + len, size - len, MSG_DONTWAIT); + set_fs(oldfs); + if(err > 0) + len += err; + } + if(err < 0) + { + rsbac_kfree(sendbuf); + rsbac_kfree(filename); + sys_close(sock_fd); + return -RSBAC_EWRITEFAILED; + } +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_ms) + printk(KERN_DEBUG + "ms_clamd_do_scan(): sent %u bytes\n", + len); +#endif + sys_shutdown(sock_fd, 1); + len = 0; + err = 1; + memset(sendbuf, 0, PAGE_SIZE); + while(err > 0) + { + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_recv(sock_fd, sendbuf + len, PAGE_SIZE - len, 0); + set_fs(oldfs); + if(err > 0) + { + len += err; + if(len > (PAGE_SIZE - 512)) + { + memcpy(sendbuf, sendbuf + PAGE_SIZE - 1024, len - (PAGE_SIZE - 1024)); + len -= (PAGE_SIZE - 1024); + } + } + } + if(err >= 0) + { + sendbuf[len] = 0; +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_ms) + printk(KERN_DEBUG + "ms_clamd_do_scan(): got reply %s\n", + sendbuf); +#endif + if(strstr(sendbuf, "FOUND\n")) + err = 0; + else + if( strstr(sendbuf, "OK\n") +// || strstr(sendbuf, "was not scanned") +// || strstr(sendbuf, "unknown") + ) + err = rsbac_ms_scan_level; + else + err = -RSBAC_EREADFAILED; + } + else + { +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_adf_ms) + printk(KERN_DEBUG + "ms_clamd_do_scan(): got reply error %i\n", + err); +#endif + } + rsbac_kfree(sendbuf); + rsbac_kfree(filename); + sys_close(sock_fd); + if( (err == -ERESTARTSYS) + && (nr_restart < 10) + ) + { + nr_restart++; + printk(KERN_DEBUG + "ms_clamd_do_scan(): scanner connection on port %u failed with ERESTARTSYS, starting reconnection cycle %u.\n", + i, + nr_restart); + goto restart; + } + return err; + } +#ifdef CONFIG_RSBAC_DEBUG + else + { + if(rsbac_debug_adf_ms) + printk(KERN_DEBUG + "ms_clamd_do_scan(): connection to scanner port %u failed with error %i.\n", + i, + err); + } +#endif + } + + printk(KERN_WARNING + "ms_clamd_do_scan(): connecting to scanner failed!\n"); + sys_close(sock_fd); + return -RSBAC_EWRITEFAILED; + } +#endif + +/* PROC interface */ +#ifdef CONFIG_RSBAC_PROC +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) +static int +ms_proc_info(char *buffer, char **start, off_t offset, int length, int dummy) +#else +static int +ms_proc_info(char *buffer, char **start, off_t offset, int length) +#endif +{ + int len = 0; + off_t pos = 0; + off_t begin = 0; + + union rsbac_target_id_t rsbac_target_id; + union rsbac_attribute_value_t rsbac_attribute_value; + + if (!rsbac_is_initialized()) + return (-ENOSYS); + + rsbac_target_id.scd = ST_rsbac; + rsbac_attribute_value.dummy = 0; + if (!rsbac_adf_request(R_GET_STATUS_DATA, + current->pid, + T_SCD, + rsbac_target_id, + A_none, + rsbac_attribute_value)) + { + return -EPERM; + } + len += sprintf(buffer, "MS scanning levels\n------------------\n"); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; + +#ifdef CONFIG_RSBAC_MS_EXT_FPROTD + len += sprintf(buffer + len, "F-Protd support included.\n"); +#endif +#ifdef CONFIG_RSBAC_MS_EXT_CLAMD + len += sprintf(buffer + len, ">Clamd support included.\n"); +#endif + len += sprintf(buffer + len, "%lu calls to do_scan function,\n", + ms_nr_calls); + len += sprintf(buffer + len, "scan level is %u.\n", + rsbac_ms_scan_level); + if(!rsbac_ms_scan_level) + len += sprintf(buffer + len, "(scanning is off)\n"); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; + +out: + *start = buffer + (offset - begin); + len -= (offset - begin); + + if (len > length) + len = length; + return len; +} + +static ssize_t ms_proc_write(struct file * file, const char * buf, + u_long count, void *ppos) +{ + ssize_t err = -EINVAL; + char * k_buf; + char * p; + unsigned int new_level; + + union rsbac_attribute_value_t i_attr_val1; + union rsbac_target_id_t i_tid; + + if(count > PROC_BLOCK_SIZE) + return(-EOVERFLOW); + + + k_buf = (char *) __get_free_page(GFP_KERNEL); + if(!k_buf) + return(-ENOMEM); + copy_from_user(k_buf, buf, count); + + if(count < 8 || strncmp("level", k_buf, 5)) + { + goto out; + } + if (!rsbac_is_initialized()) + { + err=-ENOSYS; + goto out; + } + + /* + * Usage: echo "level #N" > /proc/rsbac_info/ms_fprotd + * to set scanning level to given level. Use level 0 to switch scanning off (default). + */ + p = k_buf + 6; + + if( *p == '\0' ) + goto out; + + if(!strncmp(p, "inc", 3)) + { + if(!rsbac_ms_scan_level) + new_level = 20; + else + new_level = rsbac_ms_scan_level + 1; + } + else + { + new_level = simple_strtoul(p, NULL, 0); + } + +#ifdef CONFIG_RSBAC_SOFTMODE + if( !rsbac_softmode +#ifdef CONFIG_RSBAC_SOFTMODE_IND + && !rsbac_ind_softmode[MS] +#endif + ) +#endif + { + /* Administrator or secoff? */ + i_tid.user = current->uid; + if (rsbac_get_attr(MS, + T_USER, + i_tid, + A_ms_role, + &i_attr_val1, + TRUE)) + { + printk(KERN_WARNING + "ms_proc_write(): rsbac_get_attr() returned error!\n"); + return(-EPERM); + } + /* only allow, if secoff or (admin && level raised) */ + if ( (i_attr_val1.system_role != SR_security_officer) + && ( (i_attr_val1.system_role != SR_administrator) + || (new_level < rsbac_ms_scan_level) + ) + ) + return -EPERM; + } + + if(new_level) + { + if(!rsbac_ms_scan_level) + { + #ifdef CONFIG_RSBAC_MS_EXT_FPROTD + rsbac_ms_do_scan_fprotd = ms_fprotd_do_scan; + #endif + #ifdef CONFIG_RSBAC_MS_EXT_CLAMD + rsbac_ms_do_scan_clamd = ms_clamd_do_scan; + #endif + printk(KERN_INFO + "ms_proc_write(): activated scanner on scan level %u\n", + new_level); + } + rsbac_ms_scan_level = new_level; + } + else + { + if(rsbac_ms_scan_level) + { + #ifdef CONFIG_RSBAC_MS_EXT_FPROTD + rsbac_ms_do_scan_fprotd = NULL; + #endif + #ifdef CONFIG_RSBAC_MS_EXT_CLAMD + rsbac_ms_do_scan_clamd = NULL; + #endif + printk(KERN_INFO + "ms_proc_write(): deactivated scanner\n"); + } + rsbac_ms_scan_level = new_level; + } + err = count; + +out: + free_page((ulong) k_buf); + return(err); +} +#endif /* CONFIG_RSBAC_PROC */ +#endif /* scan() */ /* This function checks the scanned status for given file, calls do_scan, if */ @@ -303,7 +995,7 @@ static boolean scan(struct rsbac_fs_file union rsbac_attribute_value_t i_attr_val1; union rsbac_target_id_t i_tid; struct dentry * dentry_p = file.dentry_p; - int err; + int err = 1; /* just in case... */ if(!dentry_p || !dentry_p->d_inode) @@ -353,7 +1045,24 @@ static boolean scan(struct rsbac_fs_file ) return(TRUE); + /* only scan, if requested for this target and access type */ i_tid.file = file; + if (rsbac_get_attr(MS, + T_FILE, + i_tid, + A_ms_need_scan, + &i_attr_val1, + TRUE)) + { + printk(KERN_WARNING + "scan(): rsbac_get_attr() returned error!\n"); + return(FALSE); + } + if( (!execute && (i_attr_val1.ms_need_scan != MS_need_scan_full)) + || (execute && (i_attr_val1.ms_need_scan == MS_need_scan_no)) + ) + return TRUE; + /* get scanned status for file */ if (rsbac_get_attr(MS, T_FILE, @@ -374,10 +1083,21 @@ static boolean scan(struct rsbac_fs_file return(FALSE); /* Always allow, if no scanning engine */ - if(!rsbac_ms_do_scan) - return(TRUE); - /* default: scan and set to level or MS_rejected*/ - err = rsbac_ms_do_scan(dentry_p); + if(rsbac_ms_do_scan) + err = rsbac_ms_do_scan(dentry_p); + #ifdef CONFIG_RSBAC_MS_EXT_FPROTD + if( (err > 0) + && rsbac_ms_do_scan_fprotd + ) + err = rsbac_ms_do_scan_fprotd(dentry_p); + #endif + #ifdef CONFIG_RSBAC_MS_EXT_CLAMD + if( (err > 0) + && rsbac_ms_do_scan_clamd + ) + err = rsbac_ms_do_scan_clamd(dentry_p); + #endif + ms_nr_calls++; if(err > 0) { i_attr_val1.ms_scanned = rsbac_ms_scan_level; @@ -411,7 +1131,7 @@ static boolean scan(struct rsbac_fs_file else { printk(KERN_WARNING - "scan(): rsbac_ms_do_scan returned error %i!\n", + "scan(): rsbac_ms_do_scan function returned error %i!\n", err); return(FALSE); } @@ -844,7 +1564,11 @@ static enum rsbac_adf_req_ret_t /* Externally visible functions */ /************************************************* */ +#ifdef CONFIG_RSBAC_INIT_DELAY +int rsbac_init_ms(void) +#else int __init rsbac_init_ms(void) +#endif { int i; @@ -859,9 +1583,26 @@ int __init rsbac_init_ms(void) for(i=0; iget_info = ms_proc_info; + ms_proc_info_p->write_proc = ms_proc_write; + } + #endif +#endif return 0; } @@ -876,7 +1617,6 @@ enum rsbac_adf_req_ret_t union rsbac_attribute_value_t attr_val, rsbac_uid_t owner) { - enum rsbac_adf_req_ret_t result = DO_NOT_CARE; /* enum rsbac_target_t i_target; */ union rsbac_target_id_t i_tid; @@ -970,6 +1710,7 @@ enum rsbac_adf_req_ret_t case A_ms_trusted: case A_system_role: case A_ms_role: + case A_ms_need_scan: #ifdef CONFIG_RSBAC_MS_AUTH_PROT case A_auth_may_setuid: case A_auth_may_set_cap: @@ -1057,28 +1798,15 @@ enum rsbac_adf_req_ret_t } #endif -#ifdef CONFIG_RSBAC_MS_READ case R_READ_OPEN: - switch(target) - { - case T_FILE: - /* scan file as non-execute */ - if (scan(tid.file, caller_pid, FALSE)) - { /* passed scanner: grant */ - return(GRANTED); - } - else - return(NOT_GRANTED); - - /* all other cases */ - default: - return(DO_NOT_CARE); - } - case R_READ_WRITE_OPEN: switch(target) { case T_FILE: + if( (attr == A_new_object) + && (attr_val.new_object) + ) + return DO_NOT_CARE; /* scan file as non-execute */ if (scan(tid.file, caller_pid, FALSE)) { /* passed scanner: grant */ @@ -1091,7 +1819,6 @@ enum rsbac_adf_req_ret_t default: return(DO_NOT_CARE); } -#endif case R_SWITCH_MODULE: switch(target) @@ -1133,7 +1860,7 @@ enum rsbac_adf_req_ret_t default: return DO_NOT_CARE; } - return(result); + return(DO_NOT_CARE); }; /* end of rsbac_adf_request_ms() */ @@ -1285,7 +2012,11 @@ int rsbac_adf_set_attr_ms( return(-RSBAC_EREADFAILED); } /* and set for process, if different */ + #ifdef CONFIG_RSBAC_MS_PROP_TRUSTED + if(i_attr_val1.ms_trusted > i_attr_val2.ms_trusted) + #else if(i_attr_val1.ms_trusted != i_attr_val2.ms_trusted) + #endif if (rsbac_set_attr(MS, T_PROCESS, i_tid, diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/pm/pm_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/pm/pm_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/pm/pm_main.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/pm/pm_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -4,9 +4,9 @@ /* Facility (ADF) - Privacy Model */ /* File: rsbac/adf/pm/main.c */ /* */ -/* Author and (c) 1999-2002: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* */ -/* Last modified: 08/Aug/2002 */ +/* Last modified: 22/Jan/2003 */ /*************************************************** */ #include diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/rc/rc_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/rc/rc_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/rc/rc_main.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/rc/rc_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -4,9 +4,9 @@ /* Facility (ADF) - Role Compatibility */ /* File: rsbac/adf/rc/main.c */ /* */ -/* Author and (c) 1999-2002: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* */ -/* Last modified: 08/Aug/2002 */ +/* Last modified: 22/Jan/2003 */ /*************************************************** */ #include @@ -128,16 +128,19 @@ static enum rsbac_adf_req_ret_t { #ifdef CONFIG_RSBAC_DEBUG if(rsbac_debug_adf_rc) - { - char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); +#ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) +#endif + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); - if(tmp) - { - printk(KERN_DEBUG "check_comp_rc(): rc_role is %i, rc_type is %i, request is %s -> NOT_GRANTED!\n", - i_attr_val1.rc_role, i_attr_val2.rc_type, get_request_name(tmp, request)); - rsbac_kfree(tmp); - } - } + if(tmp) + { + printk(KERN_DEBUG "check_comp_rc(): rc_role is %i, rc_type is %i, request is %s -> NOT_GRANTED!\n", + i_attr_val1.rc_role, i_attr_val2.rc_type, get_rc_special_right_name(tmp, request)); + rsbac_kfree(tmp); + } + } #endif return(NOT_GRANTED); } @@ -172,7 +175,9 @@ static enum rsbac_adf_req_ret_t i_rc_subtid, RI_type_comp_scd, request)) - return(GRANTED); + { + return(GRANTED); + } else { #ifdef CONFIG_RSBAC_DEBUG @@ -182,8 +187,8 @@ static enum rsbac_adf_req_ret_t if(tmp) { - printk(KERN_DEBUG "check_comp_rc_scd(): rc_role is %i, scd_type is %i, request is %s -> NOT_GRANTED!\n", - i_attr_val1.rc_role, scd_type, get_request_name(tmp,request)); + printk(KERN_DEBUG "check_comp_rc_scd(): pid is %u (%u), owner is %u, rc_role is %i, scd_type is %i, request is %s -> NOT_GRANTED!\n", + caller_pid, current->pid, current->uid, i_attr_val1.rc_role, scd_type, get_request_name(tmp,request)); rsbac_kfree(tmp); } } @@ -1053,8 +1058,24 @@ enum rsbac_adf_req_ret_t case A_auth_may_set_cap: case A_auth_add_f_cap: case A_auth_remove_f_cap: - /* may manipulate auth capabilities, if compatible */ - return(check_comp_rc_scd(RST_auth_administration, request, caller_pid)); + /* may manipulate auth capabilities, if allowed in general... */ + result = check_comp_rc_scd(RST_auth_administration, request, caller_pid); + if( (result == GRANTED) + || (result == DO_NOT_CARE) + ) + { + /* ...and for this target */ + result = check_comp_rc(target, tid, RCR_MODIFY_AUTH, caller_pid); + if( (result == GRANTED) + || (result == DO_NOT_CARE) + ) + return result; + } + /* Last chance: classical admin_type check */ + if ((err=rsbac_rc_test_role_admin(TRUE))) + return(NOT_GRANTED); + else + return(GRANTED); #endif default: diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/rc/rc_syscalls.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/rc/rc_syscalls.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/rc/rc_syscalls.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/rc/rc_syscalls.c 2003-08-25 20:33:02.000000000 +0200 @@ -81,6 +81,10 @@ int rsbac_rc_sys_copy_role (rsbac_rc_rol user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_copy_role(): reading role %u denied for pid %u, user %u - not in admin_roles!\n", from_role, @@ -119,6 +123,10 @@ int rsbac_rc_sys_copy_role (rsbac_rc_rol user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_copy_role(): changing role %u denied for pid %u, user %u - not in admin_roles!\n", to_role, @@ -195,6 +203,10 @@ int rsbac_rc_sys_get_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_get_item(): reading fd_need_secdel of type %u denied for pid %u, user %u - no ADMIN right!\n", tid.type, @@ -238,6 +250,10 @@ int rsbac_rc_sys_get_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_get_item(): getting item of role %u denied for pid %u, user %u - not in admin_roles!\n", tid.role, @@ -308,6 +324,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing %s of FD type %u denied for pid %u, user %u - no ADMIN right!\n", get_rc_item_name(tmp, item), @@ -351,6 +371,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing name or removing of DEV type %u denied for pid %u, user %u - no ADMIN right!\n", tid.type, @@ -393,6 +417,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing name or removing of IPC type %u denied for pid %u, user %u - no ADMIN right!\n", tid.type, @@ -435,6 +463,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing name or removing of process type %u denied for pid %u, user %u - no ADMIN right!\n", tid.type, @@ -476,6 +508,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing name or removing of SCD type %u denied for pid %u, user %u - no ADMIN right!\n", tid.type, @@ -518,6 +554,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing name or removing of NETDEV type %u denied for pid %u, user %u - no ADMIN right!\n", tid.type, @@ -560,6 +600,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing name or removing of NETTEMP type %u denied for pid %u, user %u - no ADMIN right!\n", tid.type, @@ -602,6 +646,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing name or removing of NETOBJ type %u denied for pid %u, user %u - no ADMIN right!\n", tid.type, @@ -651,6 +699,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing role_comp of role %u denied for pid %u, user %u - not in admin_roles!\n", tid.role, @@ -700,6 +752,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r i_attr_val1.rc_role); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing role_comp for role %u denied for user %u, role %u - not in assign_roles!\n", tid.role, @@ -741,6 +797,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing %s of role %u denied for pid %u, user %u - no Role Admin!\n", get_rc_item_name(tmp, item), @@ -784,6 +844,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing name or removing of role %u denied for pid %u, user %u - not in admin_roles!\n", tid.role, @@ -826,6 +890,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing def_fd_create_type of role %u denied for pid %u, user %u - not in admin_roles!\n", tid.role, @@ -850,8 +918,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r enum rsbac_adf_req_ret_t result; result = rsbac_rc_check_type_comp(T_FILE, value.type_id, RCR_ASSIGN, 0); - if( (result == NOT_GRANTED) - || (result == UNDEFINED) + if( ( (result == NOT_GRANTED) + || (result == UNDEFINED) + ) + && (err=rsbac_rc_test_role_admin(TRUE)) ) { rsbac_uid_t user; @@ -866,6 +936,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing def_fd_create_type for role %u to %u denied for user %u - no ASSIGN right for type!\n", tid.role, @@ -909,6 +983,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing %s of role %u denied for pid %u, user %u - not in admin_roles!\n", get_rc_item_name(tmp, item), @@ -934,8 +1012,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r enum rsbac_adf_req_ret_t result; result = rsbac_rc_check_type_comp(T_PROCESS, value.type_id, RCR_ASSIGN, 0); - if( (result == NOT_GRANTED) - || (result == UNDEFINED) + if( ( (result == NOT_GRANTED) + || (result == UNDEFINED) + ) + && (err=rsbac_rc_test_role_admin(TRUE)) ) { rsbac_uid_t user; @@ -950,6 +1030,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing def_process_*_type for role %u to %u denied for user %u - no ASSIGN right for type!\n", tid.role, @@ -989,6 +1073,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing def_ipc_create_type of role %u denied for pid %u, user %u - not in admin_roles!\n", tid.role, @@ -1013,8 +1101,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r enum rsbac_adf_req_ret_t result; result = rsbac_rc_check_type_comp(T_IPC, value.type_id, RCR_ASSIGN, 0); - if( (result == NOT_GRANTED) - || (result == UNDEFINED) + if( ( (result == NOT_GRANTED) + || (result == UNDEFINED) + ) + && (err=rsbac_rc_test_role_admin(TRUE)) ) { rsbac_uid_t user; @@ -1029,6 +1119,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing def_ipc_create_type for role %u to %u denied for user %u - no ASSIGN right for type!\n", tid.role, @@ -1085,6 +1179,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing %s of role %u denied for pid %u, user %u - not in admin_roles!\n", get_rc_item_name(tmp, item), @@ -1173,6 +1271,10 @@ int rsbac_rc_sys_set_item (enum rsbac_r user); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_set_item(): changing %s of role %u denied for pid %u, user %u - insufficent rights!\n", get_rc_item_name(tmp, item), @@ -1255,6 +1357,10 @@ int rsbac_rc_sys_change_role (rsbac_rc_r i_attr_val1.rc_role); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL + /* only log to standard syslog, if not disabled by kernel boot parameter */ + #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG + if (!rsbac_nosyslog) + #endif printk(KERN_INFO "rsbac_rc_sys_change role(): changing from role %u to %u denied for pid %u, user %u, role %u - roles not compatible!\n", i_attr_val1.rc_role, diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/reg/reg_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/reg/reg_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/reg/reg_main.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/reg/reg_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -402,7 +402,11 @@ out: /* Externally visible functions */ /************************************************* */ +#ifdef CONFIG_RSBAC_INIT_DELAY +void rsbac_reg_init(void) +#else void __init rsbac_reg_init(void) +#endif { if (rsbac_is_initialized()) { diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/res/Makefile linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/res/Makefile --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/res/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/res/Makefile 2003-08-25 20:33:02.000000000 +0200 @@ -0,0 +1,18 @@ +# +# File: rsbac/adf/RES/Makefile +# +# Makefile for the Linux rsbac RES decision module. +# +# Author and (c) 1999-2002 Amon Ott +# + +O_TARGET := res.o +M_OBJS := + +ifeq ($(PATCHLEVEL),2) +O_OBJS := res_main.o +else +obj-y := res_main.o +endif + +include $(TOPDIR)/Rules.make diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/res/res_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/res/res_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/res/res_main.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/res/res_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -0,0 +1,356 @@ +/**************************************************** */ +/* Rule Set Based Access Control */ +/* Implementation of the Access Control Decision */ +/* Facility (ADF) - System Resources (RES) */ +/* File: rsbac/adf/res/main.c */ +/* */ +/* Author and (c) 2002: Amon Ott */ +/* */ +/* Last modified: 22/Nov/2002 */ +/**************************************************** */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/************************************************* */ +/* Global Variables */ +/************************************************* */ + +/************************************************* */ +/* Internal Help functions */ +/************************************************* */ + +/************************************************* */ +/* Externally visible functions */ +/************************************************* */ + +enum rsbac_adf_req_ret_t + rsbac_adf_request_res (enum rsbac_adf_request_t request, + rsbac_pid_t caller_pid, + enum rsbac_target_t target, + union rsbac_target_id_t tid, + enum rsbac_attribute_t attr, + union rsbac_attribute_value_t attr_val, + rsbac_uid_t owner) + { + union rsbac_target_id_t i_tid; + union rsbac_attribute_value_t i_attr_val1; + + switch (request) + { + case R_MODIFY_ATTRIBUTE: + switch(attr) + { + case A_system_role: + case A_auth_role: + case A_res_min: + case A_res_max: + #ifdef CONFIG_RSBAC_RES_AUTH_PROT + case A_auth_may_setuid: + case A_auth_may_set_cap: + case A_auth_add_f_cap: + case A_auth_remove_f_cap: + #endif + /* All attributes (remove target!) */ + case A_none: + /* Security Officer? */ + i_tid.user = owner; + if (rsbac_get_attr(RES, + T_USER, + i_tid, + A_res_role, + &i_attr_val1, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_request_res(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + /* if sec_officer, then grant */ + if (i_attr_val1.system_role == SR_security_officer) + return(GRANTED); + else + return(NOT_GRANTED); + + default: + return(DO_NOT_CARE); + } + + case R_READ_ATTRIBUTE: + switch(attr) + { + case A_system_role: + case A_auth_role: + case A_res_min: + case A_res_max: + /* All attributes (remove target!) */ + case A_none: + /* Security Officer or Admin? */ + i_tid.user = owner; + if (rsbac_get_attr(RES, + T_USER, + i_tid, + A_res_role, + &i_attr_val1, + TRUE)) + { + printk(KERN_WARNING + "rsbac_adf_request_res(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + /* if sec_officer, then grant */ + if( (i_attr_val1.system_role == SR_security_officer) + || (i_attr_val1.system_role == SR_administrator) + ) + return(GRANTED); + else + return(NOT_GRANTED); + + default: + return(DO_NOT_CARE); + } + + case R_SWITCH_LOG: + switch(target) + { + case T_NONE: + /* test owner's res_role */ + i_tid.user = owner; + if (rsbac_get_attr(RES, + T_USER, + i_tid, + A_res_role, + &i_attr_val1, + TRUE)) + { + printk(KERN_WARNING "rsbac_adf_request_res(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + /* security officer? -> grant */ + if (i_attr_val1.system_role == SR_security_officer) + return(GRANTED); + else + return(NOT_GRANTED); + + /* all other cases are unknown */ + default: return(DO_NOT_CARE); + } + + case R_SWITCH_MODULE: + switch(target) + { + case T_NONE: + /* we need the switch_target */ + if(attr != A_switch_target) + return(UNDEFINED); + /* do not care for other modules */ + if( (attr_val.switch_target != RES) + #ifdef CONFIG_RSBAC_RES_AUTH_PROT + && (attr_val.switch_target != AUTH) + #endif + #ifdef CONFIG_RSBAC_SOFTMODE + && (attr_val.switch_target != SOFTMODE) + #endif + ) + return(DO_NOT_CARE); + /* test owner's res_role */ + i_tid.user = owner; + if (rsbac_get_attr(RES, + T_USER, + i_tid, + A_res_role, + &i_attr_val1, + TRUE)) + { + printk(KERN_WARNING "rsbac_adf_request_res(): rsbac_get_attr() returned error!\n"); + return(NOT_GRANTED); + } + /* security officer? -> grant */ + if (i_attr_val1.system_role == SR_security_officer) + return(GRANTED); + else + return(NOT_GRANTED); + + /* all other cases are unknown */ + default: return(DO_NOT_CARE); + } + + +/*********************/ + default: return DO_NOT_CARE; + } + + return(DO_NOT_CARE); + }; /* end of rsbac_adf_request_res() */ + + +/*****************************************************************************/ +/* If the request returned granted and the operation is performed, */ +/* the following function can be called by the AEF to get all aci set */ +/* correctly. For write accesses that are performed fully within the kernel, */ +/* this is usually not done to prevent extra calls, including R_CLOSE for */ +/* cleaning up. */ +/* The second instance of target specification is the new target, if one has */ +/* been created, otherwise its values are ignored. */ +/* On success, 0 is returned, and an error from rsbac/error.h otherwise. */ + +int rsbac_adf_set_attr_res( + enum rsbac_adf_request_t request, + rsbac_pid_t caller_pid, + enum rsbac_target_t target, + union rsbac_target_id_t tid, + enum rsbac_target_t new_target, + union rsbac_target_id_t new_tid, + enum rsbac_attribute_t attr, + union rsbac_attribute_value_t attr_val, + rsbac_uid_t owner) + { + union rsbac_target_id_t i_tid; + union rsbac_attribute_value_t i_attr_val1; + + switch (request) + { + case R_CHANGE_OWNER: + switch(target) + { + case T_PROCESS: + if(attr != A_owner) + return(-RSBAC_EINVALIDATTR); + /* Adjust Linux resources */ + i_tid.user = attr_val.owner; + #ifdef CONFIG_RSBAC_SOFTMODE + if(!rsbac_softmode) + #endif + { + int maxval = rsbac_min(RLIM_NLIMITS - 1, RSBAC_RES_MAX); + int i; + + if (rsbac_get_attr(RES, + T_USER, + i_tid, + A_res_max, + &i_attr_val1, + FALSE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_res(): rsbac_get_attr() for res_user_full returned error!\n"); + return -RSBAC_EREADFAILED; + } + for(i = 0; i <= maxval ; i++) + { + if(i_attr_val1.res_array[i]) + { + if(current->rlim[i].rlim_max > i_attr_val1.res_array[i]) + current->rlim[i].rlim_max = i_attr_val1.res_array[i]; + if(current->rlim[i].rlim_cur > i_attr_val1.res_array[i]) + current->rlim[i].rlim_cur = i_attr_val1.res_array[i]; + } + } + if (rsbac_get_attr(RES, + T_USER, + i_tid, + A_res_min, + &i_attr_val1, + FALSE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_res(): rsbac_get_attr() for res_user_full returned error!\n"); + return -RSBAC_EREADFAILED; + } + if(i_attr_val1.res_array[RLIMIT_NOFILE] > NR_OPEN) + i_attr_val1.res_array[RLIMIT_NOFILE] = NR_OPEN; + for(i = 0; i <= maxval ; i++) + { + if(i_attr_val1.res_array[i]) + { + if(current->rlim[i].rlim_max < i_attr_val1.res_array[i]) + current->rlim[i].rlim_max = i_attr_val1.res_array[i]; + if(current->rlim[i].rlim_cur < i_attr_val1.res_array[i]) + current->rlim[i].rlim_cur = i_attr_val1.res_array[i]; + } + } + } + return 0; + + /* all other cases are unknown */ + default: + return(0); + } + break; + + case R_EXECUTE: + switch(target) + { + case T_FILE: + #ifdef CONFIG_RSBAC_SOFTMODE + if(!rsbac_softmode) + #endif + { + int maxval = rsbac_min(RLIM_NLIMITS - 1, RSBAC_RES_MAX); + int i; + + if (rsbac_get_attr(RES, + target, + tid, + A_res_max, + &i_attr_val1, + FALSE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_res(): rsbac_get_attr() for res_user_full returned error!\n"); + return -RSBAC_EREADFAILED; + } + for(i = 0; i <= maxval ; i++) + { + if(i_attr_val1.res_array[i]) + { + if(current->rlim[i].rlim_max > i_attr_val1.res_array[i]) + current->rlim[i].rlim_max = i_attr_val1.res_array[i]; + if(current->rlim[i].rlim_cur > i_attr_val1.res_array[i]) + current->rlim[i].rlim_cur = i_attr_val1.res_array[i]; + } + } + if (rsbac_get_attr(RES, + target, + tid, + A_res_min, + &i_attr_val1, + FALSE)) + { + printk(KERN_WARNING + "rsbac_adf_set_attr_res(): rsbac_get_attr() for res_user_full returned error!\n"); + return -RSBAC_EREADFAILED; + } + for(i = 0; i <= maxval ; i++) + { + if(i_attr_val1.res_array[i]) + { + if(current->rlim[i].rlim_max < i_attr_val1.res_array[i]) + current->rlim[i].rlim_max = i_attr_val1.res_array[i]; + if(current->rlim[i].rlim_cur < i_attr_val1.res_array[i]) + current->rlim[i].rlim_cur = i_attr_val1.res_array[i]; + } + } + } + return 0; + + /* all other cases are unknown */ + default: + return(0); + } + break; + +/*********************/ + default: return(0); + } + + return(0); + }; /* end of rsbac_adf_set_attr_res() */ + +/* end of rsbac/adf/res/main.c */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/sim/sim_main.c linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/sim/sim_main.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/adf/sim/sim_main.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/adf/sim/sim_main.c 2003-08-25 20:33:02.000000000 +0200 @@ -4,9 +4,9 @@ /* Facility (ADF) - Security Information Modification */ /* File: rsbac/adf/sim/main.c */ /* */ -/* Author and (c) 1999-2002: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* */ -/* Last modified: 08/Aug/2002 */ +/* Last modified: 06/Jun/2003 */ /**************************************************** */ #include @@ -137,25 +137,25 @@ static enum rsbac_adf_req_ret_t #endif static enum rsbac_adf_req_ret_t - sim_check_secoff_role(rsbac_uid_t owner) + sim_check_sysrole(rsbac_uid_t owner, enum rsbac_system_role_t role) { union rsbac_target_id_t i_tid; union rsbac_attribute_value_t i_attr_val1; i_tid.user = owner; - if (rsbac_get_attr(FC, + if (rsbac_get_attr(SIM, T_USER, i_tid, - A_fc_role, + A_sim_role, &i_attr_val1, TRUE)) { printk(KERN_WARNING - "sim_check_secoff_role(): rsbac_get_attr() returned error!\n"); + "sim_check_sysrole(): rsbac_get_attr() returned error!\n"); return(NOT_GRANTED); } /* if correct role, then grant */ - if (i_attr_val1.system_role == SR_security_officer) + if (i_attr_val1.system_role == role) return(GRANTED); else return(NOT_GRANTED); @@ -307,7 +307,7 @@ enum rsbac_adf_req_ret_t #ifdef CONFIG_RSBAC_SIM_NET_OBJ_PROT case T_NETTEMP: - return sim_check_secoff_role(owner); + return sim_check_sysrole(owner, SR_security_officer); case T_NETOBJ: return(check_role_sim_netobj(request,tid,owner)); @@ -329,7 +329,7 @@ enum rsbac_adf_req_ret_t #ifdef CONFIG_RSBAC_SIM_NET_OBJ_PROT case T_NETTEMP: - return sim_check_secoff_role(owner); + return sim_check_sysrole(owner, SR_security_officer); #endif /* all other cases */ @@ -352,7 +352,10 @@ enum rsbac_adf_req_ret_t if (tid.scd != ST_rsbaclog) return(GRANTED); /* Secoff? */ - return sim_check_secoff_role(owner); + if(sim_check_sysrole(owner, SR_security_officer) == NOT_GRANTED) + return sim_check_sysrole(owner, SR_auditor); + else + return GRANTED; default: return(DO_NOT_CARE); @@ -410,7 +413,7 @@ enum rsbac_adf_req_ret_t /* All attributes (remove target!) */ case A_none: /* Security Officer? */ - return sim_check_secoff_role(owner); + return sim_check_sysrole(owner, SR_security_officer); default: return(DO_NOT_CARE); @@ -458,7 +461,7 @@ enum rsbac_adf_req_ret_t /* switching Linux DAC */ case T_NONE: /* Security Officer? */ - return sim_check_secoff_role(owner); + return sim_check_sysrole(owner, SR_security_officer); #endif /* all other cases are unknown */ @@ -472,7 +475,10 @@ enum rsbac_adf_req_ret_t /* target not rsbaclog? no problem -> grant */ if (tid.scd != ST_rsbaclog) return(GRANTED); - return sim_check_secoff_role(owner); + if(sim_check_sysrole(owner, SR_security_officer) == NOT_GRANTED) + return sim_check_sysrole(owner, SR_auditor); + else + return GRANTED; /* all other cases are unknown */ default: return(DO_NOT_CARE); @@ -496,7 +502,7 @@ enum rsbac_adf_req_ret_t { case T_NONE: /* test owner's sim_role */ - return sim_check_secoff_role(owner); + return sim_check_sysrole(owner, SR_security_officer); /* all other cases are unknown */ default: return(DO_NOT_CARE); @@ -520,7 +526,7 @@ enum rsbac_adf_req_ret_t ) return(DO_NOT_CARE); /* test owner's sim_role */ - return sim_check_secoff_role(owner); + return sim_check_sysrole(owner, SR_security_officer); /* all other cases are unknown */ default: return(DO_NOT_CARE); @@ -550,7 +556,7 @@ enum rsbac_adf_req_ret_t #ifdef CONFIG_RSBAC_SIM_NET_OBJ_PROT case T_NETTEMP: - return sim_check_secoff_role(owner); + return sim_check_sysrole(owner, SR_security_officer); case T_NETOBJ: return(check_role_sim_netobj(request,tid,owner)); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/data_structures/aci_data_structures.c linux-2.4.20-wolk4.9-fullkernel/rsbac/data_structures/aci_data_structures.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/data_structures/aci_data_structures.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/data_structures/aci_data_structures.c 2003-08-25 20:33:02.000000000 +0200 @@ -1,11 +1,11 @@ /*************************************************** */ /* Rule Set Based Access Control */ /* Implementation of ACI data structures */ -/* Author and (c) 1999-2002: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* (some smaller parts copied from fs/namei.c */ /* and others) */ /* */ -/* Last modified: 19/Sep/2002 */ +/* Last modified: 01/Jul/2003 */ /*************************************************** */ #include @@ -25,6 +25,7 @@ #include #endif #include +#include #include #include #include @@ -133,6 +134,9 @@ static struct rsbac_gen_fd_aci_t def_g #if defined(CONFIG_RSBAC_MAC) static struct rsbac_mac_fd_aci_t def_mac_root_dir_aci = DEFAULT_MAC_ROOT_DIR_ACI; #endif +#if defined(CONFIG_RSBAC_MS) +static struct rsbac_ms_fd_aci_t def_ms_root_dir_aci = DEFAULT_MS_ROOT_DIR_ACI; +#endif #if defined(CONFIG_RSBAC_RC) static struct rsbac_rc_fd_aci_t def_rc_root_dir_aci = DEFAULT_RC_ROOT_DIR_ACI; #endif @@ -158,11 +162,6 @@ struct proc_dir_entry * proc_rsbac_backu #endif /* PROC */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,11) -static struct super_block * sockfs_sb_p = NULL; -static struct super_block * pipefs_sb_p = NULL; -#endif - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) #ifdef CONFIG_DEVFS_MOUNT #include @@ -195,6 +194,10 @@ static inline int mac_fd_hash(u_long ino { return(inode % RSBAC_MAC_NR_FD_LISTS); } +static inline int mac_p_hash(rsbac_pid_t pid) + { + return(pid % CONFIG_RSBAC_MAC_NR_P_LISTS); + } #endif #if defined(CONFIG_RSBAC_FC) static inline int fc_fd_hash(u_long inode) @@ -235,6 +238,10 @@ static inline int rc_fd_hash(u_long inod { return(inode % RSBAC_RC_NR_FD_LISTS); } +static inline int rc_p_hash(rsbac_pid_t pid) + { + return(pid % CONFIG_RSBAC_RC_NR_P_LISTS); + } #endif #if defined(CONFIG_RSBAC_AUTH) static inline int auth_fd_hash(u_long inode) @@ -248,6 +255,12 @@ static inline int cap_fd_hash(u_long ino return(inode % RSBAC_CAP_NR_FD_LISTS); } #endif +#if defined(CONFIG_RSBAC_RES) +static inline int res_fd_hash(u_long inode) + { + return(inode % RSBAC_RES_NR_FD_LISTS); + } +#endif /* These help functions do NOT handle data consistency protection by */ /* rw-spinlocks! This is done exclusively by non-internal functions! */ @@ -600,8 +613,9 @@ static int gen_fd_conv( new_aci->log_array_high = old_aci->log_array_high; new_aci->log_program_based = old_aci->log_program_based; new_aci->symlink_add_uid = old_aci->symlink_add_uid; - new_aci->symlink_add_rc_role = FALSE; - new_aci->linux_dac_disable = LDD_false; + new_aci->symlink_add_mac_level = FALSE; + new_aci->symlink_add_rc_role = old_aci->symlink_add_rc_role; + new_aci->linux_dac_disable = old_aci->linux_dac_disable; return 0; } @@ -618,7 +632,28 @@ static int gen_fd_old_conv( new_aci->log_array_low = old_aci->log_array_low; new_aci->log_array_high = old_aci->log_array_high; new_aci->log_program_based = old_aci->log_program_based; + new_aci->symlink_add_uid = old_aci->symlink_add_uid; + new_aci->symlink_add_mac_level = FALSE; + new_aci->symlink_add_rc_role = FALSE; + new_aci->linux_dac_disable = LDD_false; + return 0; + } + +static int gen_fd_old_old_conv( + void * old_desc, + void * old_data, + void * new_desc, + void * new_data) + { + struct rsbac_gen_fd_aci_t * new_aci = new_data; + struct rsbac_gen_fd_old_old_old_aci_t * old_aci = old_data; + + memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t)); + new_aci->log_array_low = old_aci->log_array_low; + new_aci->log_array_high = old_aci->log_array_high; + new_aci->log_program_based = old_aci->log_program_based; new_aci->symlink_add_uid = FALSE; + new_aci->symlink_add_mac_level = FALSE; new_aci->symlink_add_rc_role = FALSE; new_aci->linux_dac_disable = LDD_false; return 0; @@ -632,11 +667,201 @@ rsbac_list_conv_function_t * gen_fd_get_ return gen_fd_conv; case RSBAC_GEN_FD_OLD_OLD_ACI_VERSION: return gen_fd_old_conv; + case RSBAC_GEN_FD_OLD_OLD_OLD_ACI_VERSION: + return gen_fd_old_old_conv; + default: + return NULL; + } + } + +#ifdef CONFIG_RSBAC_MAC +static int mac_old_fd_conv( + void * old_desc, + void * old_data, + void * new_desc, + void * new_data) + { + struct rsbac_mac_fd_aci_t * new_aci = new_data; + struct rsbac_mac_fd_old_aci_t * old_aci = old_data; + + memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t)); + new_aci->sec_level = old_aci->sec_level; + new_aci->mac_trusted_for_user = old_aci->mac_trusted_for_user; + new_aci->mac_categories = old_aci->mac_categories; + new_aci->mac_auto = old_aci->mac_auto; + new_aci->mac_prop_trusted = FALSE; + if(old_aci->mac_shared) + new_aci->mac_file_flags = MAC_write_up; + else + new_aci->mac_file_flags = 0; + return 0; + } + +static int mac_old_old_fd_conv( + void * old_desc, + void * old_data, + void * new_desc, + void * new_data) + { + struct rsbac_mac_fd_aci_t * new_aci = new_data; + struct rsbac_mac_fd_old_old_aci_t * old_aci = old_data; + + memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t)); + new_aci->sec_level = old_aci->sec_level; + new_aci->mac_trusted_for_user = old_aci->mac_trusted_for_user; + new_aci->mac_categories = old_aci->mac_categories; + new_aci->mac_auto = old_aci->mac_auto; + new_aci->mac_prop_trusted = FALSE; + new_aci->mac_file_flags = 0; + return 0; + } + +static int mac_old_old_old_fd_conv( + void * old_desc, + void * old_data, + void * new_desc, + void * new_data) + { + struct rsbac_mac_fd_aci_t * new_aci = new_data; + struct rsbac_mac_fd_old_old_old_aci_t * old_aci = old_data; + + memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t)); + new_aci->sec_level = old_aci->sec_level; + new_aci->mac_trusted_for_user = old_aci->mac_trusted_for_user; + new_aci->mac_categories = old_aci->mac_categories; + new_aci->mac_auto = TRUE; + new_aci->mac_prop_trusted = FALSE; + new_aci->mac_file_flags = 0; + return 0; + } + +rsbac_list_conv_function_t * mac_fd_get_conv(rsbac_version_t old_version) + { + switch(old_version) + { + case RSBAC_MAC_FD_OLD_ACI_VERSION: + return mac_old_fd_conv; + case RSBAC_MAC_FD_OLD_OLD_ACI_VERSION: + return mac_old_old_fd_conv; + case RSBAC_MAC_FD_OLD_OLD_OLD_ACI_VERSION: + return mac_old_old_old_fd_conv; + default: + return NULL; + } + } + +static int mac_old_user_conv( + void * old_desc, + void * old_data, + void * new_desc, + void * new_data) + { + struct rsbac_mac_user_aci_t * new_aci = new_data; + struct rsbac_mac_user_old_aci_t * old_aci = old_data; + + memcpy(new_desc, old_desc, sizeof(rsbac_uid_t)); + new_aci->security_level = old_aci->access_appr; + new_aci->initial_security_level = old_aci->access_appr; + new_aci->min_security_level = old_aci->min_access_appr; + new_aci->mac_categories = old_aci->mac_categories; + new_aci->mac_initial_categories = old_aci->mac_categories; + new_aci->mac_min_categories = old_aci->mac_min_categories; + new_aci->system_role = old_aci->system_role; + new_aci->mac_user_flags = RSBAC_MAC_DEF_U_FLAGS; + if(old_aci->mac_allow_auto) + new_aci->mac_user_flags |= MAC_allow_auto; + return 0; + } + +static int mac_old_old_user_conv( + void * old_desc, + void * old_data, + void * new_desc, + void * new_data) + { + struct rsbac_mac_user_aci_t * new_aci = new_data; + struct rsbac_mac_user_old_old_aci_t * old_aci = old_data; + + memcpy(new_desc, old_desc, sizeof(rsbac_uid_t)); + new_aci->security_level = old_aci->access_appr; + new_aci->initial_security_level = old_aci->access_appr; + new_aci->min_security_level = old_aci->min_access_appr; + new_aci->mac_categories = old_aci->mac_categories; + new_aci->mac_initial_categories = old_aci->mac_categories; + new_aci->mac_min_categories = old_aci->mac_min_categories; + new_aci->system_role = old_aci->system_role; + new_aci->mac_user_flags = RSBAC_MAC_DEF_U_FLAGS; + return 0; + } + +static int mac_old_old_old_user_conv( + void * old_desc, + void * old_data, + void * new_desc, + void * new_data) + { + struct rsbac_mac_user_aci_t * new_aci = new_data; + struct rsbac_mac_user_old_old_old_aci_t * old_aci = old_data; + + memcpy(new_desc, old_desc, sizeof(rsbac_uid_t)); + new_aci->security_level = old_aci->access_appr; + new_aci->initial_security_level = old_aci->access_appr; + new_aci->min_security_level = SL_unclassified; + new_aci->mac_categories = old_aci->mac_categories; + new_aci->mac_initial_categories = old_aci->mac_categories; + new_aci->mac_min_categories = RSBAC_MAC_MIN_CAT_VECTOR; + new_aci->system_role = old_aci->system_role; + new_aci->mac_user_flags = RSBAC_MAC_DEF_U_FLAGS; + return 0; + } + +rsbac_list_conv_function_t * mac_user_get_conv(rsbac_version_t old_version) + { + switch(old_version) + { + case RSBAC_MAC_USER_OLD_ACI_VERSION: + return mac_old_user_conv; + case RSBAC_MAC_USER_OLD_OLD_ACI_VERSION: + return mac_old_old_user_conv; + case RSBAC_MAC_USER_OLD_OLD_OLD_ACI_VERSION: + return mac_old_old_old_user_conv; + default: + return NULL; + } + } +#endif + +#ifdef CONFIG_RSBAC_MS +static int ms_old_fd_conv( + void * old_desc, + void * old_data, + void * new_desc, + void * new_data) + { + struct rsbac_ms_fd_aci_t * new_aci = new_data; + struct rsbac_ms_fd_old_aci_t * old_aci = old_data; + + memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t)); + new_aci->ms_trusted = old_aci->ms_trusted; + new_aci->ms_sock_trusted_tcp = old_aci->ms_sock_trusted_tcp; + new_aci->ms_sock_trusted_udp = old_aci->ms_sock_trusted_udp; + new_aci->ms_need_scan = DEFAULT_MS_FD_NEED_SCAN; + return 0; + } + +rsbac_list_conv_function_t * ms_fd_get_conv(rsbac_version_t old_version) + { + switch(old_version) + { + case RSBAC_MS_FD_OLD_ACI_VERSION: + return ms_old_fd_conv; default: return NULL; } } +#endif + /************************************************************************** */ /* The add_item() functions add an item to the list, set head.curr to it, */ /* and return a pointer to the item. */ @@ -686,6 +911,9 @@ static int register_fd_lists(struct rsba #if defined(CONFIG_RSBAC_CAP) struct rsbac_cap_fd_aci_t def_cap_fd_aci = DEFAULT_CAP_FD_ACI; #endif +#if defined(CONFIG_RSBAC_RES) + struct rsbac_res_fd_aci_t def_res_fd_aci = DEFAULT_RES_FD_ACI; +#endif if(!device_p) return(-RSBAC_EINVALIDPOINTER); name = rsbac_kmalloc(RSBAC_MAXNAMELEN); @@ -750,7 +978,7 @@ static int register_fd_lists(struct rsba info, RSBAC_LIST_PERSIST | RSBAC_LIST_DEF_DATA, rsbac_list_compare_u32, - NULL, + mac_fd_get_conv, &def_mac_fd_aci, name, kdev); @@ -919,7 +1147,7 @@ static int register_fd_lists(struct rsba RSBAC_LIST_PERSIST | RSBAC_LIST_DEF_DATA, rsbac_list_compare_u32, - NULL, + ms_fd_get_conv, &def_ms_fd_aci, name, kdev); @@ -1153,6 +1381,48 @@ static int register_fd_lists(struct rsba } #endif +#if defined(CONFIG_RSBAC_RES) + /* register RES lists */ + for (file_no = 0; file_no < RSBAC_RES_NR_FD_LISTS; file_no++) + { + /* construct name from base name + number */ + strcpy(name, RSBAC_RES_FD_NAME); + strcat(name, inttostr(number,file_no) ); + + info.version = RSBAC_RES_FD_ACI_VERSION; + info.key = RSBAC_RES_FD_ACI_KEY; + info.desc_size = sizeof(rsbac_inode_nr_t); + info.data_size = sizeof(struct rsbac_res_fd_aci_t); + info.max_age = 0; + tmperr = rsbac_list_register(RSBAC_LIST_VERSION, + &(device_p->handles.res[file_no]), + info, + RSBAC_LIST_PERSIST | RSBAC_LIST_DEF_DATA, + rsbac_list_compare_u32, + NULL, + &def_res_fd_aci, + name, + kdev); + if(tmperr) + { + char * tmp; + + tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp) + { + printk(KERN_WARNING + "register_fd_lists(): registering RES list %u for device %02u:%02u failed with error %s!\n", + file_no, + MAJOR(kdev), + MINOR(kdev), + get_error_name(tmp, tmperr)); + rsbac_kfree(tmp); + } + err = tmperr; + } + } +#endif + return err; } @@ -1449,6 +1719,32 @@ static int aci_detach_fd_lists(struct rs } #endif +#if defined(CONFIG_RSBAC_RES) + /* detach all RES lists */ + for (file_no = 0; file_no < RSBAC_RES_NR_FD_LISTS; file_no++) + { + tmperr = rsbac_list_detach(&device_p->handles.res[file_no], + RSBAC_RES_FD_ACI_KEY); + if(tmperr) + { + char * tmp; + + tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp) + { + printk(KERN_WARNING + "detach_fd_lists(): detaching from RES list %u for device %02u:%02u failed with error %s!\n", + file_no, + MAJOR(device_p->id), + MINOR(device_p->id), + get_error_name(tmp, tmperr)); + rsbac_kfree(tmp); + } + err = tmperr; + } + } +#endif + return err; } @@ -1616,11 +1912,35 @@ struct super_block * rsbac_get_super_blo device_p = lookup_device(kdev); if (!device_p) { - printk(KERN_WARNING - "rsbac_get_super_block(): unknown device %02u:%02u\n", - MAJOR(kdev), MINOR(kdev)); rsbac_read_unlock(&device_list_head.lock, &dflags); - return NULL; + sb_p = get_super(kdev); + if(sb_p) + { + printk(KERN_INFO + "rsbac_get_super_block(): auto-mounting device %02u:%02u\n", + MAJOR(kdev), MINOR(kdev)); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + rsbac_mount(sb_p); +#else + rsbac_mount(sb_p, NULL); +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,8) + /* free super_block pointer */ + drop_super(sb_p); +#endif + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = lookup_device(kdev); + if (!device_p) + { + printk(KERN_WARNING + "rsbac_get_super_block(): unknown device %02u:%02u\n", + MAJOR(kdev), MINOR(kdev)); + rsbac_read_unlock(&device_list_head.lock, &dflags); + return NULL; + } + } + else + return NULL; } sb_p = device_p->sb_p; rsbac_read_unlock(&device_list_head.lock, &dflags); @@ -2465,6 +2785,10 @@ stats_proc_info(char *buffer, char **sta if(rsbac_switch_jail) strcat(active_modules, " JAIL"); #endif + #ifdef CONFIG_RSBAC_RES + if(rsbac_switch_res) + strcat(active_modules, " RES"); + #endif len += sprintf(buffer+len, "Active Modules: %s\n", active_modules); rsbac_kfree(active_modules); @@ -2699,6 +3023,19 @@ stats_proc_info(char *buffer, char **sta fd_dev_count += fd_count; #endif +#if defined(CONFIG_RSBAC_RES) + fd_count = 0; + for (i=0; i < RSBAC_RES_NR_FD_LISTS; i++) + { + tmp_count = rsbac_list_count(device_p->handles.res[i]); + if(tmp_count > 0) + fd_count+=tmp_count; + }; + len += sprintf(buffer + len, ", %lu RES", + fd_count); + fd_dev_count += fd_count; +#endif + len += sprintf(buffer + len, ", %lu total\n", fd_dev_count); pos = begin + len; @@ -2895,6 +3232,12 @@ stats_proc_info(char *buffer, char **sta tmp_count); sum += tmp_count; #endif +#if defined(CONFIG_RSBAC_RES) + tmp_count = rsbac_list_count(user_handles.res); + len += sprintf(buffer + len, ", %lu RES", + tmp_count); + sum += tmp_count; +#endif len += sprintf(buffer + len, ", %lu total\n", sum); total_sum += sum; @@ -2914,7 +3257,9 @@ stats_proc_info(char *buffer, char **sta tmp_count); sum += tmp_count; #if defined(CONFIG_RSBAC_MAC) - tmp_count = rsbac_list_count(process_handles.mac); + tmp_count = 0; + for(i=0; i offset+length) goto out; #endif +#if defined(CONFIG_RSBAC_RES) + len += sprintf(buffer + len, "RES aci version is %i, aci entry size is %i, %i lists per device\n", + RSBAC_RES_FD_ACI_VERSION, sizeof(struct rsbac_res_fd_aci_t), RSBAC_RES_NR_FD_LISTS); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; +#endif len += sprintf(buffer + len, "\nDEV lists:\nGEN aci version is %i, aci entry size is %i\n", @@ -4106,6 +4471,18 @@ versions_proc_info(char *buffer, char ** if (pos > offset+length) goto out; #endif +#if defined(CONFIG_RSBAC_RES) + len += sprintf(buffer + len, "RES aci version is %i, aci entry size is %i\n", + RSBAC_RES_USER_ACI_VERSION, sizeof(struct rsbac_res_user_aci_t)); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; +#endif len += sprintf(buffer + len, "\nPROCESS lists:\nGEN aci version is %i, aci entry size is %i\n", @@ -4119,8 +4496,10 @@ versions_proc_info(char *buffer, char ** if (pos > offset+length) goto out; #if defined(CONFIG_RSBAC_MAC) - len += sprintf(buffer + len, "MAC aci version is %i, aci entry size is %i\n", - RSBAC_MAC_PROCESS_ACI_VERSION, sizeof(struct rsbac_mac_process_aci_t)); + len += sprintf(buffer + len, "MAC aci version is %u, aci entry size is %u, number of lists is %u\n", + RSBAC_MAC_PROCESS_ACI_VERSION, + sizeof(struct rsbac_mac_process_aci_t), + CONFIG_RSBAC_MAC_NR_P_LISTS); pos = begin + len; if (pos < offset) { @@ -4155,8 +4534,10 @@ versions_proc_info(char *buffer, char ** goto out; #endif #if defined(CONFIG_RSBAC_RC) - len += sprintf(buffer + len, "RC aci version is %i, aci entry size is %i\n", - RSBAC_RC_PROCESS_ACI_VERSION, sizeof(struct rsbac_rc_process_aci_t)); + len += sprintf(buffer + len, "RC aci version is %u, aci entry size is %u, number of lists is %u\n", + RSBAC_RC_PROCESS_ACI_VERSION, + sizeof(struct rsbac_rc_process_aci_t), + CONFIG_RSBAC_RC_NR_P_LISTS); pos = begin + len; if (pos < offset) { @@ -4178,6 +4559,18 @@ versions_proc_info(char *buffer, char ** if (pos > offset+length) goto out; #endif +#if defined(CONFIG_RSBAC_CAP) + len += sprintf(buffer + len, "CAP aci version is %i, aci entry size is %i\n", + RSBAC_CAP_PROCESS_ACI_VERSION, sizeof(struct rsbac_cap_process_aci_t)); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; +#endif #if defined(CONFIG_RSBAC_JAIL) len += sprintf(buffer + len, "JAIL aci version is %i, aci entry size is %i\n", RSBAC_JAIL_PROCESS_ACI_VERSION, sizeof(struct rsbac_jail_process_aci_t)); @@ -4468,7 +4861,7 @@ jail_proc_info(char *buffer, char **star return -EPERM; } - len = sprintf(buffer, "JAILed Processes\n----------------\nPID Jail-ID IP Flags\n"); + len = sprintf(buffer, "JAILed Processes\n----------------\nPID Jail-ID Flags IP\n"); pos = begin + len; if (pos < offset) { @@ -4488,11 +4881,11 @@ jail_proc_info(char *buffer, char **star { if(!rsbac_list_get_data(process_handles.jail, &pid_array[i], &data)) { - len += sprintf(buffer + len, "%-5u %-10u %u.%u.%u.%u %u\n", + len += sprintf(buffer + len, "%-5u %-10u %-7u %u.%u.%u.%u\n", pid_array[i], data.id, - NIPQUAD(data.ip), - data.flags); + data.flags, + NIPQUAD(data.ip)); pos = begin + len; if (pos < offset) { @@ -4685,10 +5078,18 @@ static int unregister_all_rsbac_proc(voi /* MS init prototype */ #if defined(CONFIG_RSBAC_MS) && !defined(CONFIG_RSBAC_MAINT) +#ifdef CONFIG_RSBAC_INIT_DELAY +int rsbac_init_ms(void); +#else int __init rsbac_init_ms(void); #endif +#endif +#ifdef CONFIG_RSBAC_INIT_DELAY +static void registration_error(int err, char * listname) +#else static void __init registration_error(int err, char * listname) +#endif { if(err < 0) { @@ -4705,7 +5106,11 @@ static void __init registration_error(in } } +#ifdef CONFIG_RSBAC_INIT_DELAY +static int rsbac_do_init(void) +#else static int __init rsbac_do_init(void) +#endif { int err = 0; struct rsbac_device_list_item_t * device_p; @@ -4766,6 +5171,9 @@ static int __init rsbac_do_init(void) #ifdef CONFIG_RSBAC_JAIL strcat(compiled_modules, " JAIL"); #endif + #ifdef CONFIG_RSBAC_RES + strcat(compiled_modules, " RES"); + #endif #ifdef CONFIG_RSBAC_MAINT printk(KERN_INFO "rsbac_do_init(): Initializing RSBAC %s (Maintenance Mode)\n", @@ -5288,7 +5696,7 @@ static int __init rsbac_do_init(void) #endif RSBAC_LIST_PERSIST | RSBAC_LIST_DEF_DATA, rsbac_list_compare_u32, - NULL, + mac_user_get_conv, &def_aci, RSBAC_MAC_ACI_USER_NAME, 0); @@ -5301,6 +5709,7 @@ static int __init rsbac_do_init(void) { struct rsbac_mac_user_aci_t sysadm_aci = DEFAULT_MAC_U_SYSADM_ACI; struct rsbac_mac_user_aci_t secoff_aci = DEFAULT_MAC_U_SECOFF_ACI; + struct rsbac_mac_user_aci_t auditor_aci = DEFAULT_MAC_U_AUDITOR_ACI; rsbac_uid_t user; printk(KERN_WARNING @@ -5313,6 +5722,10 @@ static int __init rsbac_do_init(void) if(rsbac_list_add(user_handles.mac, &user, &secoff_aci)) printk(KERN_WARNING "rsbac_do_init(): SECOFF USER MAC entry could not be added!\n"); + user = RSBAC_AUDITOR_UID; + if(rsbac_list_add(user_handles.mac, &user, &auditor_aci)) + printk(KERN_WARNING + "rsbac_do_init(): AUDITOR USER MAC entry could not be added!\n"); } } #endif @@ -5364,6 +5777,11 @@ static int __init rsbac_do_init(void) if(rsbac_list_add(user_handles.fc, &user, &role)) printk(KERN_WARNING "rsbac_do_init(): SECOFF USER FC entry could not be added!\n"); + user = RSBAC_AUDITOR_UID; + role = SR_auditor; + if(rsbac_list_add(user_handles.fc, &user, &role)) + printk(KERN_WARNING + "rsbac_do_init(): AUDITOR USER FC entry could not be added!\n"); } } #endif @@ -5410,6 +5828,11 @@ static int __init rsbac_do_init(void) if(rsbac_list_add(user_handles.sim, &user, &role)) printk(KERN_WARNING "rsbac_do_init(): SECOFF USER SIM entry could not be added!\n"); + user = RSBAC_AUDITOR_UID; + role = SR_auditor; + if(rsbac_list_add(user_handles.sim, &user, &role)) + printk(KERN_WARNING + "rsbac_do_init(): AUDITOR USER SIM entry could not be added!\n"); } } #endif @@ -5557,6 +5980,11 @@ static int __init rsbac_do_init(void) if(rsbac_list_add(user_handles.ff, &user, &role)) printk(KERN_WARNING "rsbac_do_init(): SECOFF USER FF entry could not be added!\n"); + user = RSBAC_AUDITOR_UID; + role = SR_auditor; + if(rsbac_list_add(user_handles.ff, &user, &role)) + printk(KERN_WARNING + "rsbac_do_init(): AUDITOR USER FF entry could not be added!\n"); } } #endif @@ -5603,6 +6031,11 @@ static int __init rsbac_do_init(void) if(rsbac_list_add(user_handles.rc, &user, &role)) printk(KERN_WARNING "rsbac_do_init(): SECOFF USER RC entry could not be added!\n"); + user = RSBAC_AUDITOR_UID; + role = RSBAC_RC_AUDITOR_ROLE; + if(rsbac_list_add(user_handles.rc, &user, &role)) + printk(KERN_WARNING + "rsbac_do_init(): AUDITOR USER RC entry could not be added!\n"); } } #endif @@ -5649,6 +6082,11 @@ static int __init rsbac_do_init(void) if(rsbac_list_add(user_handles.auth, &user, &role)) printk(KERN_WARNING "rsbac_do_init(): SECOFF USER AUTH entry could not be added!\n"); + user = RSBAC_AUDITOR_UID; + role = SR_auditor; + if(rsbac_list_add(user_handles.auth, &user, &role)) + printk(KERN_WARNING + "rsbac_do_init(): AUDITOR USER AUTH entry could not be added!\n"); } } #endif @@ -5682,6 +6120,7 @@ static int __init rsbac_do_init(void) { struct rsbac_cap_user_aci_t sysadm_aci = DEFAULT_CAP_U_SYSADM_ACI; struct rsbac_cap_user_aci_t secoff_aci = DEFAULT_CAP_U_SECOFF_ACI; + struct rsbac_cap_user_aci_t auditor_aci = DEFAULT_CAP_U_AUDITOR_ACI; rsbac_uid_t user; printk(KERN_WARNING @@ -5694,6 +6133,10 @@ static int __init rsbac_do_init(void) if(rsbac_list_add(user_handles.cap, &user, &secoff_aci)) printk(KERN_WARNING "rsbac_do_init(): SECOFF USER CAP entry could not be added!\n"); + user = RSBAC_AUDITOR_UID; + if(rsbac_list_add(user_handles.cap, &user, &auditor_aci)) + printk(KERN_WARNING + "rsbac_do_init(): AUDITOR USER CAP entry could not be added!\n"); } } #endif @@ -5743,6 +6186,49 @@ static int __init rsbac_do_init(void) } } #endif +#if defined(CONFIG_RSBAC_RES) + { + list_info.version = RSBAC_RES_USER_ACI_VERSION; + list_info.key = RSBAC_RES_USER_ACI_KEY; + list_info.desc_size = sizeof(rsbac_uid_t); + list_info.data_size = sizeof(struct rsbac_res_user_aci_t); + list_info.max_age = 0; + err = rsbac_list_register(RSBAC_LIST_VERSION, + &user_handles.res, + list_info, + #ifdef CONFIG_RSBAC_DEV_USER_BACKUP + RSBAC_LIST_BACKUP | + #endif + RSBAC_LIST_PERSIST, + rsbac_list_compare_u32, + NULL, + NULL, + RSBAC_RES_ACI_USER_NAME, + 0); + if(err) + { + registration_error(err, "USER RES"); + } + else + if(!rsbac_no_defaults && !rsbac_list_count(user_handles.res)) + { + struct rsbac_res_user_aci_t sysadm_aci = DEFAULT_RES_U_SYSADM_ACI; + struct rsbac_res_user_aci_t secoff_aci = DEFAULT_RES_U_SECOFF_ACI; + rsbac_uid_t user; + + printk(KERN_WARNING + "rsbac_do_init(): USER RES ACI could not be read - generating standard entries!\n"); + user = RSBAC_SYSADM_UID; + if(rsbac_list_add(user_handles.res, &user, &sysadm_aci)) + printk(KERN_WARNING + "rsbac_do_init(): SYSADM USER RES entry could not be added!\n"); + user = RSBAC_SECOFF_UID; + if(rsbac_list_add(user_handles.res, &user, &secoff_aci)) + printk(KERN_WARNING + "rsbac_do_init(): SECOFF USER RES entry could not be added!\n"); + } + } +#endif #ifdef CONFIG_RSBAC_DEBUG if(rsbac_debug_stack) @@ -5761,12 +6247,12 @@ static int __init rsbac_do_init(void) printk(KERN_DEBUG "rsbac_do_init(): registering PROCESS lists\n"); #endif { - rsbac_request_vector_t def_aci = 0; + struct rsbac_gen_process_aci_t def_aci = DEFAULT_GEN_P_ACI; list_info.version = RSBAC_GEN_PROCESS_ACI_VERSION; list_info.key = RSBAC_GEN_PROCESS_ACI_KEY; list_info.desc_size = sizeof(rsbac_pid_t); - list_info.data_size = sizeof(rsbac_request_vector_t); /* ind. process log */ + list_info.data_size = sizeof(struct rsbac_gen_process_aci_t); list_info.max_age = 0; err = rsbac_list_register(RSBAC_LIST_VERSION, &process_handles.gen, @@ -5785,24 +6271,31 @@ static int __init rsbac_do_init(void) #if defined(CONFIG_RSBAC_MAC) { struct rsbac_mac_process_aci_t def_aci = DEFAULT_MAC_P_ACI; + char name[RSBAC_MAXNAMELEN]; + int i; list_info.version = RSBAC_MAC_PROCESS_ACI_VERSION; list_info.key = RSBAC_MAC_PROCESS_ACI_KEY; list_info.desc_size = sizeof(rsbac_pid_t); list_info.data_size = sizeof(struct rsbac_mac_process_aci_t); list_info.max_age = 0; - err = rsbac_list_register(RSBAC_LIST_VERSION, - &process_handles.mac, - list_info, - RSBAC_LIST_DEF_DATA, - rsbac_list_compare_u32, - NULL, - &def_aci, - RSBAC_MAC_ACI_PROCESS_NAME, - 0); - if(err) + for(i=0; ipid || (p->pid == 1)) continue; + pid = p->pid; #ifdef CONFIG_RSBAC_DEBUG if (rsbac_debug_ds) printk(KERN_DEBUG "rsbac_do_init(): setting aci for process %u\n", - p->pid); + pid); #endif #ifdef CONFIG_RSBAC_MAC - if(rsbac_list_add(process_handles.mac, &p->pid, &mac_p_aci)) + if(rsbac_list_add(process_handles.mac[mac_p_hash(pid)], &pid, &mac_p_aci)) printk(KERN_WARNING "rsbac_do_init(): MAC ACI for Init process %u could not be added!", - p->pid); + pid); #endif #ifdef CONFIG_RSBAC_RC - if(rsbac_list_add(process_handles.rc, &p->pid, &rc_p_aci)) + if(rsbac_list_add(process_handles.rc[rc_p_hash(pid)], &pid, &rc_p_aci)) printk(KERN_WARNING "rsbac_do_init(): RC ACI for Init process %u could not be added!", - p->pid); + pid); #endif } read_unlock(&tasklist_lock); @@ -6712,6 +7249,7 @@ static int __init rsbac_do_init(void) dput(t_dentry); #endif auth_out: + { } } #endif @@ -6763,6 +7301,37 @@ static int __init rsbac_do_init(void) /* Tell that rsbac is initialized */ rsbac_initialized = TRUE; +/* Add initrd mount */ +#ifdef CONFIG_BLK_DEV_INITRD + if(initrd_start) + { + sb_p = get_super(MKDEV(RAMDISK_MAJOR,0)); + if(sb_p) + { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + rsbac_mount(sb_p); +#else + rsbac_mount(sb_p, NULL); +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,8) + drop_super(sb_p); +#endif + } + sb_p = get_super(MKDEV(RAMDISK_MAJOR,INITRD_MINOR)); + if(sb_p) + { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + rsbac_mount(sb_p); +#else + rsbac_mount(sb_p, NULL); +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,8) + drop_super(sb_p); +#endif + } + } +#endif + /* Add devfs mount */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) #ifdef CONFIG_DEVFS_MOUNT @@ -6777,21 +7346,6 @@ static int __init rsbac_do_init(void) #endif #endif - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,11) - if(sockfs_sb_p) - { - printk(KERN_WARNING "rsbac_do_init(): automounting sockfs device %02u:%02u\n", - MAJOR(sockfs_sb_p->s_dev), MINOR(sockfs_sb_p->s_dev)); - rsbac_mount(sockfs_sb_p, NULL); - } - if(pipefs_sb_p) - { - printk(KERN_WARNING "rsbac_do_init(): automounting pipefs device %02u:%02u\n", - MAJOR(pipefs_sb_p->s_dev), MINOR(pipefs_sb_p->s_dev)); - rsbac_mount(pipefs_sb_p, NULL); - } - #endif - /* Force a check, if configured */ #ifdef CONFIG_RSBAC_INIT_CHECK #ifdef CONFIG_RSBAC_DEBUG @@ -6990,7 +7544,11 @@ int sys_wait4(pid_t pid,unsigned int * s #endif +#ifdef CONFIG_RSBAC_INIT_DELAY +int rsbac_init(kdev_t root_dev) +#else int __init rsbac_init(kdev_t root_dev) +#endif { int err = 0; #if (defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)) \ @@ -7052,10 +7610,15 @@ int __init rsbac_init(kdev_t root_dev) #if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0) if(rsbac_initialized) { - /* Start rsbac thread for auto write */ + /* Start rsbacd thread for auto write */ rsbacd_pid = kernel_thread(rsbacd, NULL, 0); - printk(KERN_INFO "rsbac_init(): Started rsbacd thread with pid %u\n", - rsbacd_pid); + if(rsbacd_pid < 0) + printk(KERN_ERR + "rsbac_init(): *** Starting rsbacd thread failed with error %i! ***\n", + rsbacd_pid); + else + printk(KERN_INFO "rsbac_init(): Started rsbacd thread with pid %u\n", + rsbacd_pid); } #endif @@ -7099,23 +7662,42 @@ int rsbac_mount(struct super_block * sb_ } if (!rsbac_initialized) { - printk(KERN_WARNING - "rsbac_mount(): RSBAC not initialized while mounting DEV %02u:%02u, delaying\n", - MAJOR(sb_p->s_dev), MINOR(sb_p->s_dev)); - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,11) - if(sb_p->s_magic == SOCKFS_MAGIC) - { - printk(KERN_WARNING - "rsbac_mount(): sockfs mount detected, keeping values for later use\n"); - sockfs_sb_p = sb_p; - } - if(sb_p->s_magic == PIPEFS_MAGIC) + #ifdef CONFIG_RSBAC_INIT_DELAY + if( rsbac_delay_init + && ( ( !MAJOR(rsbac_delayed_root) + && !MINOR(rsbac_delayed_root) + && (MAJOR(sb_p->s_dev) > 1) + ) + || ( ( MAJOR(rsbac_delayed_root) + || MINOR(rsbac_delayed_root) + ) + && ( (MAJOR(sb_p->s_dev) == MAJOR(rsbac_delayed_root)) + && ( !MINOR(rsbac_delayed_root) + || (MINOR(sb_p->s_dev) == MINOR(rsbac_delayed_root)) + ) + ) + ) + ) + ) { - printk(KERN_WARNING - "rsbac_mount(): pipefs mount detected, keeping values for later use\n"); - pipefs_sb_p = sb_p; + if(MAJOR(rsbac_delayed_root) || MINOR(rsbac_delayed_root)) + printk(KERN_INFO + "rsbac_mount(): forcing delayed RSBAC init on DEV %02u:%02u, matching %02u:%02u!\n", + MAJOR(sb_p->s_dev), MINOR(sb_p->s_dev), + MAJOR(rsbac_delayed_root), MINOR(rsbac_delayed_root)); + else + printk(KERN_INFO + "rsbac_mount(): forcing delayed RSBAC init on DEV %02u:%02u!\n", + MAJOR(sb_p->s_dev), MINOR(sb_p->s_dev)); + rsbac_init(sb_p->s_dev); + return 0; } #endif + + printk(KERN_WARNING + "rsbac_mount(): RSBAC not initialized while mounting DEV %02u:%02u, delaying\n", + MAJOR(sb_p->s_dev), MINOR(sb_p->s_dev)); + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) #ifdef CONFIG_DEVFS_MOUNT if(sb_p->s_magic == DEVFS_SUPER_MAGIC) @@ -7127,7 +7709,6 @@ int rsbac_mount(struct super_block * sb_ } #endif #endif - return(-RSBAC_ENOTINITIALIZED); } #ifdef CONFIG_RSBAC_DEBUG @@ -7375,14 +7956,14 @@ int rsbac_umount(struct super_block * sb device_p->mount_count--; if(device_p->d_covers == d_covers) { - printk(KERN_WARNING "rsbac_mount: removed primary mount for device %02u:%02u, inheritance broken!\n", + printk(KERN_WARNING "rsbac_umount: removed primary mount for device %02u:%02u, inheritance broken!\n", MAJOR(sb_p->s_dev), MINOR(sb_p->s_dev)); device_p->d_covers = NULL; } } else { - printk(KERN_WARNING "rsbac_mount: device %02u:%02u has mount_count < 1!\n", + printk(KERN_WARNING "rsbac_umount: device %02u:%02u has mount_count < 1!\n", MAJOR(sb_p->s_dev), MINOR(sb_p->s_dev)); } } @@ -7565,6 +8146,18 @@ int rsbac_stats(void) fd_count); fd_sum += fd_count; #endif +#if defined(CONFIG_RSBAC_RES) + fd_count = 0; + for (i=0; i < RSBAC_RES_NR_FD_LISTS; i++) + { + tmp_count = rsbac_list_count(device_p->handles.res[i]); + if(tmp_count > 0) + fd_count+=tmp_count; + }; + printk(", %lu RES", + fd_count); + fd_sum += fd_count; +#endif printk("\n"); device_p = device_p->next; @@ -7717,6 +8310,12 @@ int rsbac_stats(void) tmp_count); user_sum += tmp_count; #endif +#if defined(CONFIG_RSBAC_RES) + tmp_count = rsbac_list_count(user_handles.res); + printk(", %lu RES", + tmp_count); + user_sum += tmp_count; +#endif printk("\n"); printk(KERN_INFO "Sum of %lu USER items\n", user_sum); @@ -7728,7 +8327,9 @@ int rsbac_stats(void) tmp_count); process_sum += tmp_count; #if defined(CONFIG_RSBAC_MAC) - tmp_count = rsbac_list_count(process_handles.mac); + tmp_count = 0; + for(i=0; ihandles.res[list_no], (void **) &fd_desc_p); + if(desc_count > 0) + { + for(i=0; isb_p) + { + if(rsbac_check_inode(device_p->sb_p, fd_desc_p[i])) + { /* inode is bad -> remove */ + b_count++; + if(correct) + { + printk(KERN_INFO + "rsbac_check(): fd_item for bad inode %u on device %02u:%02u, list %u, removing!\n", + fd_desc_p[i], MAJOR(device_p->id), MINOR(device_p->id), list_no); + rsbac_list_remove(device_p->handles.res[list_no], &fd_desc_p[i]); + continue; + } + else + { + printk(KERN_INFO + "rsbac_check(): fd_item for bad inode %u on device %02u:%02u, list %u!\n", + fd_desc_p[i], MAJOR(device_p->id), MINOR(device_p->id), list_no); + } + } /* end of bad_inode */ + } + } + tmp_count++; + vfree(fd_desc_p); + fd_count += desc_count; + } + } /* end of for-fd-list-array */ + #endif /* RES */ switch(correct) { @@ -8560,23 +9210,6 @@ int rsbac_write(boolean need_lock) err); } -#if defined(CONFIG_RSBAC_AUTH) - subcount = rsbac_write_auth(need_lock); - if(subcount > 0) - { - count += subcount; - } - else - if(subcount < 0) - { - err = subcount; - if(err != -RSBAC_ENOTWRITABLE) - printk(KERN_WARNING - "rsbac_write(): rsbac_write_auth() returned error %i\n", - err); - } -#endif - /****** REG *******/ #if defined(CONFIG_RSBAC_REG) subcount = rsbac_write_reg(need_lock); @@ -8792,12 +9425,37 @@ int rsbac_get_attr(enum rsbac_switch_tar device_p = lookup_device(tid.file.device); if (!device_p) { - printk(KERN_WARNING - "rsbac_get_attr(): Could not lookup device %02u:%02u!\n", - MAJOR(tid.file.device), MINOR(tid.file.device)); - /* free read lock */ + struct super_block * sb_p; + rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + sb_p = get_super(tid.file.device); + if(sb_p) + { + printk(KERN_INFO + "rsbac_get_attr(): auto-mounting device %02u:%02u\n", + MAJOR(tid.file.device), MINOR(tid.file.device)); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + rsbac_mount(sb_p); +#else + rsbac_mount(sb_p, NULL); +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,8) + /* free super_block pointer */ + drop_super(sb_p); +#endif + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = lookup_device(tid.file.device); + if (!device_p) + { + printk(KERN_WARNING + "rsbac_get_attr(): unknown device %02u:%02u\n", + MAJOR(tid.file.device), MINOR(tid.file.device)); + rsbac_read_unlock(&device_list_head.lock, &dflags); + return -RSBAC_EINVALIDDEV; + } + } + else + return -RSBAC_EINVALIDDEV; } switch(module) { @@ -8807,31 +9465,34 @@ int rsbac_get_attr(enum rsbac_switch_tar if(attr == A_internal) { - if(device_p->rsbac_dir_inode == tid.file.inode) - value->internal = TRUE; + if(!device_p->rsbac_dir_inode || !tid.file.inode) + value->internal = FALSE; else - if(inherit) - { - enum rsbac_target_t parent_target; - union rsbac_target_id_t parent_tid; + if(device_p->rsbac_dir_inode == tid.file.inode) + value->internal = TRUE; + else + if(inherit) + { + enum rsbac_target_t parent_target; + union rsbac_target_id_t parent_tid; - /* inheritance possible? */ - if(!rsbac_get_parent(target, tid, &parent_target, &parent_tid)) - { /* yes: inherit this single level */ - if(device_p->rsbac_dir_inode == parent_tid.file.inode) - value->internal = TRUE; - else + /* inheritance possible? */ + if(!rsbac_get_parent(target, tid, &parent_target, &parent_tid)) + { /* yes: inherit this single level */ + if(device_p->rsbac_dir_inode == parent_tid.file.inode) + value->internal = TRUE; + else + value->internal = FALSE; + } + else + { value->internal = FALSE; - } - else - { - value->internal = FALSE; - } - } - else - { - value->internal = FALSE; - } + } + } + else + { + value->internal = FALSE; + } /* free access to device_list_head */ rsbac_read_unlock(&device_list_head.lock, &dflags); @@ -8854,6 +9515,9 @@ int rsbac_get_attr(enum rsbac_switch_tar case A_symlink_add_uid: value->symlink_add_uid = aci.symlink_add_uid; break; + case A_symlink_add_mac_level: + value->symlink_add_mac_level = aci.symlink_add_mac_level; + break; case A_symlink_add_rc_role: value->symlink_add_rc_role = aci.symlink_add_rc_role; break; @@ -8953,6 +9617,39 @@ int rsbac_get_attr(enum rsbac_switch_tar case A_mac_trusted_for_user: value->mac_trusted_for_user = aci.mac_trusted_for_user; break; + case A_mac_auto: + value->mac_auto = aci.mac_auto; + if( (value->mac_auto == MA_inherit) + && inherit + ) + { + enum rsbac_target_t parent_target; + union rsbac_target_id_t parent_tid; + + /* free access to device_list_head - see above */ + rsbac_read_unlock(&device_list_head.lock, &dflags); + + /* inheritance possible? */ + if(!rsbac_get_parent(target, tid, &parent_target, &parent_tid)) + { + target = parent_target; + tid = parent_tid; + continue; + } + else + { + value->mac_auto + = def_mac_root_dir_aci.mac_auto; + return 0; + } + } + break; + case A_mac_prop_trusted: + value->mac_prop_trusted = aci.mac_prop_trusted; + break; + case A_mac_file_flags: + value->mac_file_flags = aci.mac_file_flags; + break; default: err = -RSBAC_EINVALIDATTR; @@ -9096,6 +9793,31 @@ int rsbac_get_attr(enum rsbac_switch_tar case A_ms_sock_trusted_udp: value->ms_sock_trusted_udp = aci.ms_sock_trusted_udp; break; + case A_ms_need_scan: + value->ms_need_scan = aci.ms_need_scan; + if(value->ms_need_scan == MS_need_scan_inherit && inherit) + { + enum rsbac_target_t parent_target; + union rsbac_target_id_t parent_tid; + + /* free access to device_list_head - see above */ + rsbac_read_unlock(&device_list_head.lock, &dflags); + + /* inheritance possible? */ + if(!rsbac_get_parent(target, tid, &parent_target, &parent_tid)) + { + target = parent_target; + tid = parent_tid; + continue; + } + else + { + value->ms_need_scan + = def_ms_root_dir_aci.ms_need_scan; + return 0; + } + } + break; default: err = -RSBAC_EINVALIDATTR; } @@ -9118,19 +9840,14 @@ int rsbac_get_attr(enum rsbac_switch_tar value->ff_flags = ff_flags; if((ff_tmp_flags & FF_add_inherited) && inherit) { - enum rsbac_target_t parent_target; - union rsbac_target_id_t parent_tid; - /* inheritance possible? */ - if(!rsbac_get_parent(target, tid, &parent_target, &parent_tid)) + if(!rsbac_get_parent(target, tid, &target, &tid)) { /* free access to device_list_head - see above */ rsbac_read_unlock(&device_list_head.lock, &dflags); ff_mask &= ~(FF_no_delete_or_rename | FF_add_inherited); ff_flags &= ~(FF_add_inherited); - target = parent_target; - tid = parent_tid; continue; } else @@ -9284,6 +10001,29 @@ int rsbac_get_attr(enum rsbac_switch_tar break; #endif /* CAP */ +#if defined(CONFIG_RSBAC_RES) + case RES: + { + struct rsbac_res_fd_aci_t aci = DEFAULT_RES_FD_ACI; + + rsbac_list_get_data(device_p->handles.res[res_fd_hash(tid.file.inode)], + &tid.file.inode, + &aci); + switch (attr) + { + case A_res_min: + memcpy(&value->res_array, &aci.res_min, sizeof(aci.res_min)); + break; + case A_res_max: + memcpy(&value->res_array, &aci.res_max, sizeof(aci.res_max)); + break; + default: + err = -RSBAC_EINVALIDATTR; + } + } + break; +#endif /* RES */ + default: err = -RSBAC_EINVALIDMODULE; } @@ -9634,15 +10374,30 @@ int rsbac_get_attr(enum rsbac_switch_tar switch (attr) { case A_security_level: - value->security_level = aci.access_appr; + value->security_level = aci.security_level; + break; + case A_initial_security_level: + value->security_level = aci.initial_security_level; + break; + case A_min_security_level: + value->security_level = aci.min_security_level; break; case A_mac_categories: value->mac_categories = aci.mac_categories; break; + case A_mac_initial_categories: + value->mac_categories = aci.mac_initial_categories; + break; + case A_mac_min_categories: + value->mac_categories = aci.mac_min_categories; + break; case A_system_role: case A_mac_role: value->system_role = aci.system_role; break; + case A_mac_user_flags: + value->mac_user_flags = aci.mac_user_flags; + break; default: err = -RSBAC_EINVALIDATTR; @@ -9854,6 +10609,41 @@ int rsbac_get_attr(enum rsbac_switch_tar break; #endif /* JAIL */ +#if defined(CONFIG_RSBAC_RES) + case RES: + { + struct rsbac_res_user_aci_t aci = DEFAULT_RES_U_ACI; + + if( rsbac_list_get_data(user_handles.res, + &tid.user, + &aci) + && (tid.user != RSBAC_ALL_USERS) + ) + { + tid.user = RSBAC_ALL_USERS; + rsbac_list_get_data(user_handles.res, + &tid.user, + &aci); + } + switch (attr) + { + case A_system_role: + case A_res_role: + value->system_role = aci.res_role; + break; + case A_res_min: + memcpy(&value->res_array, &aci.res_min, sizeof(aci.res_min)); + break; + case A_res_max: + memcpy(&value->res_array, &aci.res_max, sizeof(aci.res_max)); + break; + default: + err = -RSBAC_EINVALIDATTR; + } + } + break; +#endif /* RES */ + default: err = -RSBAC_EINVALIDMODULE; } @@ -9871,15 +10661,15 @@ int rsbac_get_attr(enum rsbac_switch_tar { case GEN: { - rsbac_request_vector_t rv = 0; + struct rsbac_gen_process_aci_t aci = DEFAULT_GEN_P_ACI; rsbac_list_get_data(process_handles.gen, &tid.process, - &rv); + &aci); switch (attr) { case A_log_program_based: - value->log_program_based = rv; + value->log_program_based = aci.log_program_based; break; default: err = -RSBAC_EINVALIDATTR; @@ -9892,7 +10682,7 @@ int rsbac_get_attr(enum rsbac_switch_tar { struct rsbac_mac_process_aci_t aci = DEFAULT_MAC_P_ACI; - rsbac_list_get_data(process_handles.mac, + rsbac_list_get_data(process_handles.mac[mac_p_hash(tid.process)], &tid.process, &aci); switch (attr) @@ -9900,9 +10690,21 @@ int rsbac_get_attr(enum rsbac_switch_tar case A_security_level: value->security_level = aci.owner_sec_level; break; + case A_initial_security_level: + value->security_level = aci.owner_initial_sec_level; + break; + case A_min_security_level: + value->security_level = aci.owner_min_sec_level; + break; case A_mac_categories: value->mac_categories = aci.mac_owner_categories; break; + case A_mac_initial_categories: + value->mac_categories = aci.mac_owner_initial_categories; + break; + case A_mac_min_categories: + value->mac_categories = aci.mac_owner_min_categories; + break; case A_current_sec_level: value->current_sec_level = aci.current_sec_level; break; @@ -9921,11 +10723,17 @@ int rsbac_get_attr(enum rsbac_switch_tar case A_max_read_categories: value->mac_categories = aci.max_read_categories; break; + case A_mac_process_flags: + value->mac_process_flags = aci.mac_process_flags; + break; case A_mac_auto: - value->mac_auto = aci.mac_auto; + if(aci.mac_process_flags & MAC_auto) + value->mac_auto = TRUE; + else + value->mac_auto = FALSE; break; - case A_mac_trusted: - value->mac_trusted = aci.mac_trusted; + case A_mac_trusted_for_user: + value->mac_trusted_for_user = aci.mac_trusted_for_user; break; default: @@ -9992,7 +10800,7 @@ int rsbac_get_attr(enum rsbac_switch_tar { struct rsbac_rc_process_aci_t aci = DEFAULT_RC_P_ACI; - rsbac_list_get_data(process_handles.rc, + rsbac_list_get_data(process_handles.rc[rc_p_hash(tid.process)], &tid.process, &aci); switch (attr) @@ -10036,6 +10844,26 @@ int rsbac_get_attr(enum rsbac_switch_tar break; #endif /* AUTH */ +#if defined(CONFIG_RSBAC_CAP) + case CAP: + { + struct rsbac_cap_process_aci_t aci = DEFAULT_CAP_P_ACI; + + rsbac_list_get_data(process_handles.cap, + &tid.process, + &aci); + switch (attr) + { + case A_cap_process_hiding: + value->cap_process_hiding = aci.cap_process_hiding; + break; + default: + err = -RSBAC_EINVALIDATTR; + } + } + break; +#endif /* CAP */ + #if defined(CONFIG_RSBAC_JAIL) case JAIL: { @@ -10744,11 +11572,37 @@ int rsbac_set_attr(enum rsbac_switch_tar device_p = lookup_device(tid.file.device); if (!device_p) { - printk(KERN_WARNING - "rsbac_set_attr(): Could not lookup device!\n"); - /* free read lock */ + struct super_block * sb_p; + rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + sb_p = get_super(tid.file.device); + if(sb_p) + { + printk(KERN_INFO + "rsbac_set_attr(): auto-mounting device %02u:%02u\n", + MAJOR(tid.file.device), MINOR(tid.file.device)); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + rsbac_mount(sb_p); +#else + rsbac_mount(sb_p, NULL); +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,8) + /* free super_block pointer */ + drop_super(sb_p); +#endif + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = lookup_device(tid.file.device); + if (!device_p) + { + printk(KERN_WARNING + "rsbac_set_attr(): unknown device %02u:%02u\n", + MAJOR(tid.file.device), MINOR(tid.file.device)); + rsbac_read_unlock(&device_list_head.lock, &dflags); + return -RSBAC_EINVALIDDEV; + } + } + else + return -RSBAC_EINVALIDDEV; } switch(module) { @@ -10773,6 +11627,9 @@ int rsbac_set_attr(enum rsbac_switch_tar case A_symlink_add_uid: aci.symlink_add_uid = value.symlink_add_uid; break; + case A_symlink_add_mac_level: + aci.symlink_add_mac_level = value.symlink_add_mac_level; + break; case A_symlink_add_rc_role: aci.symlink_add_rc_role = value.symlink_add_rc_role; break; @@ -10810,6 +11667,15 @@ int rsbac_set_attr(enum rsbac_switch_tar case A_mac_trusted_for_user: aci.mac_trusted_for_user = value.mac_trusted_for_user; break; + case A_mac_auto: + aci.mac_auto = value.mac_auto; + break; + case A_mac_prop_trusted: + aci.mac_prop_trusted = value.mac_prop_trusted; + break; + case A_mac_file_flags: + aci.mac_file_flags = value.mac_file_flags & RSBAC_MAC_F_FLAGS; + break; default: err = -RSBAC_EINVALIDATTR; @@ -10920,6 +11786,9 @@ int rsbac_set_attr(enum rsbac_switch_tar case A_ms_sock_trusted_udp: aci.ms_sock_trusted_udp = value.ms_sock_trusted_udp; break; + case A_ms_need_scan: + aci.ms_need_scan = value.ms_need_scan; + break; default: err = -RSBAC_EINVALIDATTR; } @@ -11043,6 +11912,38 @@ int rsbac_set_attr(enum rsbac_switch_tar break; #endif +#if defined(CONFIG_RSBAC_RES) + case RES: + { + struct rsbac_res_fd_aci_t aci = DEFAULT_RES_FD_ACI; + + rsbac_list_get_data(device_p->handles.res[res_fd_hash(tid.file.inode)], + &tid.file.inode, + &aci); + switch (attr) + { + case A_res_min: + memcpy(&aci.res_min, &value.res_array, sizeof(aci.res_min)); + break; + case A_res_max: + memcpy(&aci.res_max, &value.res_array, sizeof(aci.res_max)); + break; + default: + err = -RSBAC_EINVALIDATTR; + } + if(!err) + { + struct rsbac_res_fd_aci_t def_aci = DEFAULT_RES_FD_ACI; + + if(memcmp(&aci, &def_aci, sizeof(aci))) + err = rsbac_list_add(device_p->handles.res[res_fd_hash(tid.file.inode)], + &tid.file.inode, + &aci); + } + } + break; +#endif + default: err = -RSBAC_EINVALIDMODULE; } @@ -11419,15 +12320,52 @@ int rsbac_set_attr(enum rsbac_switch_tar switch (attr) { case A_security_level: - aci.access_appr = value.security_level; + if(value.security_level < aci.min_security_level) + err = -RSBAC_EINVALIDVALUE; + else + aci.security_level = value.security_level; + break; + case A_initial_security_level: + if( (value.security_level < aci.min_security_level) + || (value.security_level > aci.security_level) + ) + err = -RSBAC_EINVALIDVALUE; + else + aci.initial_security_level = value.security_level; + break; + case A_min_security_level: + if(value.security_level > aci.security_level) + err = -RSBAC_EINVALIDVALUE; + else + aci.min_security_level = value.security_level; break; case A_mac_categories: - aci.mac_categories = value.mac_categories; + if((value.mac_categories & aci.mac_min_categories) != aci.mac_min_categories) + err = -RSBAC_EINVALIDVALUE; + else + aci.mac_categories = value.mac_categories; + break; + case A_mac_initial_categories: + if( ((value.mac_categories & aci.mac_min_categories) != aci.mac_min_categories) + || ((value.mac_categories & aci.mac_categories) != value.mac_categories) + ) + err = -RSBAC_EINVALIDVALUE; + else + aci.mac_initial_categories = value.mac_categories; + break; + case A_mac_min_categories: + if((value.mac_categories & aci.mac_categories) != value.mac_categories) + err = -RSBAC_EINVALIDVALUE; + else + aci.mac_min_categories = value.mac_categories; break; case A_system_role: case A_mac_role: aci.system_role = value.system_role; break; + case A_mac_user_flags: + aci.mac_user_flags = value.mac_user_flags & RSBAC_MAC_U_FLAGS; + break; default: err = -RSBAC_EINVALIDATTR; } @@ -11600,7 +12538,7 @@ int rsbac_set_attr(enum rsbac_switch_tar switch (attr) { case A_system_role: - case A_auth_role: + case A_cap_role: aci.cap_role = value.system_role; break; case A_min_caps: @@ -11642,6 +12580,53 @@ int rsbac_set_attr(enum rsbac_switch_tar break; #endif +#if defined(CONFIG_RSBAC_RES) + case RES: + { + struct rsbac_res_user_aci_t aci = DEFAULT_RES_U_ACI; + + rsbac_list_get_data(user_handles.res, + &tid.user, + &aci); + switch (attr) + { + case A_system_role: + case A_res_role: + aci.res_role = value.system_role; + break; + case A_res_min: + memcpy(&aci.res_min, &value.res_array, sizeof(aci.res_min)); + break; + case A_res_max: + memcpy(&aci.res_max, &value.res_array, sizeof(aci.res_max)); + break; + default: + err = -RSBAC_EINVALIDATTR; + } + if(!err) + { + struct rsbac_res_user_aci_t def_aci = DEFAULT_RES_U_ACI; + + if(tid.user != RSBAC_ALL_USERS) + { + rsbac_uid_t all_users = RSBAC_ALL_USERS; + + rsbac_list_get_data(user_handles.res, + &all_users, + &def_aci); + } + if(memcmp(&aci, &def_aci, sizeof(aci))) + err = rsbac_list_add(user_handles.res, + &tid.user, + &aci); + else + err = rsbac_list_remove(user_handles.res, + &tid.user); + } + } + break; +#endif + default: err = -RSBAC_EINVALIDMODULE; } @@ -11666,18 +12651,25 @@ int rsbac_set_attr(enum rsbac_switch_tar { case GEN: { - rsbac_request_vector_t rv = value.log_program_based; + struct rsbac_gen_process_aci_t aci = DEFAULT_GEN_P_ACI; + rsbac_list_get_data(process_handles.gen, + &tid.process, + &aci); switch (attr) { case A_log_program_based: - err = rsbac_list_add(process_handles.gen, - &tid.process, - &rv); + aci.log_program_based = value.log_program_based; break; default: err = -RSBAC_EINVALIDATTR; } + if(!err) + { + err = rsbac_list_add(process_handles.gen, + &tid.process, + &aci); + } } break; @@ -11686,7 +12678,7 @@ int rsbac_set_attr(enum rsbac_switch_tar { struct rsbac_mac_process_aci_t aci = DEFAULT_MAC_P_ACI; - rsbac_list_get_data(process_handles.mac, + rsbac_list_get_data(process_handles.mac[mac_p_hash(tid.process)], &tid.process, &aci); switch (attr) @@ -11694,9 +12686,21 @@ int rsbac_set_attr(enum rsbac_switch_tar case A_security_level: aci.owner_sec_level = value.security_level; break; + case A_initial_security_level: + aci.owner_initial_sec_level = value.security_level; + break; + case A_min_security_level: + aci.owner_min_sec_level = value.security_level; + break; case A_mac_categories: aci.mac_owner_categories = value.mac_categories; break; + case A_mac_initial_categories: + aci.mac_owner_initial_categories = value.mac_categories; + break; + case A_mac_min_categories: + aci.mac_owner_min_categories = value.mac_categories; + break; case A_current_sec_level: aci.current_sec_level = value.current_sec_level; break; @@ -11715,18 +12719,24 @@ int rsbac_set_attr(enum rsbac_switch_tar case A_max_read_categories: aci.max_read_categories = value.mac_categories; break; + case A_mac_process_flags: + aci.mac_process_flags = value.mac_process_flags & RSBAC_MAC_P_FLAGS; + break; case A_mac_auto: - aci.mac_auto = value.mac_auto; + if(value.mac_auto) + aci.mac_process_flags |= MAC_auto; + else + aci.mac_process_flags &= ~MAC_auto; break; - case A_mac_trusted: - aci.mac_trusted = value.mac_trusted; + case A_mac_trusted_for_user: + aci.mac_trusted_for_user = value.mac_trusted_for_user; break; default: err = -RSBAC_EINVALIDATTR; } if(!err) { - err = rsbac_list_add(process_handles.mac, + err = rsbac_list_add(process_handles.mac[mac_p_hash(tid.process)], &tid.process, &aci); } @@ -11803,7 +12813,7 @@ int rsbac_set_attr(enum rsbac_switch_tar { struct rsbac_rc_process_aci_t aci = DEFAULT_RC_P_ACI; - rsbac_list_get_data(process_handles.rc, + rsbac_list_get_data(process_handles.rc[rc_p_hash(tid.process)], &tid.process, &aci); switch (attr) @@ -11822,7 +12832,7 @@ int rsbac_set_attr(enum rsbac_switch_tar } if(!err) { - err = rsbac_list_add(process_handles.rc, + err = rsbac_list_add(process_handles.rc[rc_p_hash(tid.process)], &tid.process, &aci); } @@ -11859,6 +12869,32 @@ int rsbac_set_attr(enum rsbac_switch_tar break; #endif +#if defined(CONFIG_RSBAC_CAP) + case CAP: + { + struct rsbac_cap_process_aci_t aci = DEFAULT_CAP_P_ACI; + + rsbac_list_get_data(process_handles.cap, + &tid.process, + &aci); + switch (attr) + { + case A_cap_process_hiding: + aci.cap_process_hiding = value.cap_process_hiding; + break; + default: + err = -RSBAC_EINVALIDATTR; + } + if(!err) + { + err = rsbac_list_add(process_handles.cap, + &tid.process, + &aci); + } + } + break; +#endif + #if defined(CONFIG_RSBAC_JAIL) case JAIL: { @@ -12484,9 +13520,9 @@ int rsbac_remove_target(enum rsbac_targe "rsbac_remove_target(): Removing file/dir/fifo/symlink ACI"); #endif #if defined(CONFIG_RSBAC_AUTH) - /* file items can also have an auth_f_capset -> remove first */ + /* file items can also have auth_f_capsets -> remove first */ if(target == T_FILE) - error = rsbac_auth_remove_f_capset(tid.file); + error = rsbac_auth_remove_f_capsets(tid.file); #endif #if defined(CONFIG_RSBAC_ACL) /* items can also have an acl_fd_item -> remove first */ @@ -12498,8 +13534,40 @@ int rsbac_remove_target(enum rsbac_targe /* lookup device */ device_p = lookup_device(tid.file.device); - if (device_p) + if (!device_p) { + struct super_block * sb_p; + + rsbac_read_unlock(&device_list_head.lock, &dflags); + sb_p = get_super(tid.file.device); + if(sb_p) + { + printk(KERN_INFO + "rsbac_remove_target(): auto-mounting device %02u:%02u\n", + MAJOR(tid.file.device), MINOR(tid.file.device)); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + rsbac_mount(sb_p); +#else + rsbac_mount(sb_p, NULL); +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,8) + /* free super_block pointer */ + drop_super(sb_p); +#endif + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = lookup_device(tid.file.device); + if (!device_p) + { + printk(KERN_WARNING + "rsbac_remove_target(): unknown device %02u:%02u\n", + MAJOR(tid.file.device), MINOR(tid.file.device)); + rsbac_read_unlock(&device_list_head.lock, &dflags); + return -RSBAC_EINVALIDDEV; + } + } + else + return -RSBAC_EINVALIDDEV; + } rsbac_list_remove(device_p->handles.gen[gen_fd_hash(tid.file.inode)], &tid.file.inode); #if defined(CONFIG_RSBAC_MAC) @@ -12540,9 +13608,10 @@ int rsbac_remove_target(enum rsbac_targe rsbac_list_remove(device_p->handles.cap[cap_fd_hash(tid.file.inode)], &tid.file.inode); #endif - } - else - error = -RSBAC_EINVALIDDEV; +#if defined(CONFIG_RSBAC_RES) + rsbac_list_remove(device_p->handles.res[res_fd_hash(tid.file.inode)], + &tid.file.inode); +#endif /* free access to device_list_head */ rsbac_read_unlock(&device_list_head.lock, &dflags); @@ -12656,6 +13725,10 @@ int rsbac_remove_target(enum rsbac_targe rsbac_list_remove(user_handles.jail, &tid.user); #endif +#if defined(CONFIG_RSBAC_RES) + rsbac_list_remove(user_handles.res, + &tid.user); +#endif break; case T_PROCESS: @@ -12673,7 +13746,7 @@ int rsbac_remove_target(enum rsbac_targe rsbac_list_remove(process_handles.gen, &tid.process); #if defined(CONFIG_RSBAC_MAC) - rsbac_list_remove(process_handles.mac, + rsbac_list_remove(process_handles.mac[mac_p_hash(tid.process)], &tid.process); #endif #if defined(CONFIG_RSBAC_PM) @@ -12685,15 +13758,19 @@ int rsbac_remove_target(enum rsbac_targe &tid.process); #endif #if defined(CONFIG_RSBAC_RC) - rsbac_list_remove(process_handles.rc, + rsbac_list_remove(process_handles.rc[rc_p_hash(tid.process)], &tid.process); #endif #if defined(CONFIG_RSBAC_AUTH) - /* process items can also have an auth_p_capset -> remove first */ - error = rsbac_auth_remove_p_capset(tid.process); + /* process items can also have auth_p_capsets -> remove first */ + error = rsbac_auth_remove_p_capsets(tid.process); rsbac_list_remove(process_handles.auth, &tid.process); #endif +#if defined(CONFIG_RSBAC_CAP) + rsbac_list_remove(process_handles.cap, + &tid.process); +#endif #if defined(CONFIG_RSBAC_JAIL) rsbac_list_remove(process_handles.jail, &tid.process); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/data_structures/acl_data_structures.c linux-2.4.20-wolk4.9-fullkernel/rsbac/data_structures/acl_data_structures.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/data_structures/acl_data_structures.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/data_structures/acl_data_structures.c 2003-08-25 20:33:02.000000000 +0200 @@ -1,9 +1,9 @@ /*************************************************** */ /* Rule Set Based Access Control */ /* Implementation of ACL data structures */ -/* Author and (c) 1999-2002: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* */ -/* Last modified: 02/Sep/2002 */ +/* Last modified: 14/Jan/2003 */ /*************************************************** */ #include @@ -1783,7 +1783,11 @@ out: /* data is kept in memory for performance reasons, but is written to disk */ /* on every change. */ +#ifdef CONFIG_RSBAC_INIT_DELAY +static void registration_error(int err, char * listname) +#else static void __init registration_error(int err, char * listname) +#endif { if(err) { @@ -1803,7 +1807,11 @@ static void __init registration_error(in /* Because there can be no access to aci data structures before init, */ /* rsbac_init_acl() will initialize all rw-spinlocks to unlocked. */ +#ifdef CONFIG_RSBAC_INIT_DELAY +int rsbac_init_acl(void) +#else int __init rsbac_init_acl(void) +#endif { int err = 0; struct rsbac_acl_device_list_item_t * device_p = NULL; @@ -2052,6 +2060,7 @@ int __init rsbac_init_acl(void) struct rsbac_acl_entry_t sysadm_kmem_entry = RSBAC_ACL_SYSADM_SCD_KMEM_ENTRY; #endif struct rsbac_acl_entry_t acman_other_entry = RSBAC_ACL_ACMAN_SCD_OTHER_ENTRY; + struct rsbac_acl_entry_t auditor_rsbaclog_entry = RSBAC_ACL_AUDITOR_SCD_RSBACLOG_ENTRY; enum rsbac_acl_scd_type_t scd; printk(KERN_WARNING @@ -2073,6 +2082,13 @@ int __init rsbac_init_acl(void) rsbac_list_lol_subadd(scd_handle, &scd, &desc, &sysadm_entry.rights); } } + scd = ST_rsbaclog; + if(!rsbac_list_lol_add(scd_handle, &scd, &mask)) + { + desc.subj_type = auditor_rsbaclog_entry.subj_type; + desc.subj_id = auditor_rsbaclog_entry.subj_id; + rsbac_list_lol_subadd(scd_handle, &scd, &desc, &auditor_rsbaclog_entry.rights); + } scd=ST_network; if(!rsbac_list_lol_add(scd_handle, &scd, &mask)) { @@ -3126,7 +3142,7 @@ int rsbac_check_acl(int correct, int che f_sum += f_count; /* go on */ device_p = device_p->next; - }; + } printk(KERN_INFO "rsbac_check_acl(): Sum of %u Devices with %lu file/dir ACLs\n", device_list_head.count, f_sum); /* free access to device_list_head */ @@ -3684,11 +3700,20 @@ int rsbac_acl_set_acl_entry (enum device_p = acl_lookup_device(tid.file.device); if (!device_p) { - printk(KERN_WARNING - "rsbac_acl_set_acl_entry(): Could not lookup device!\n"); - /* free read lock */ + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + rsbac_get_super_block(tid.file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = acl_lookup_device(tid.file.device); + if(!device_p) + { + printk(KERN_WARNING + "rsbac_acl_set_acl_entry(): Could not lookup device!\n"); + /* free read lock */ + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } list_no = fd_hash(tid.file.inode); if(!rsbac_list_lol_exist(device_p->handles[list_no], &tid.file.inode)) @@ -3949,11 +3974,20 @@ int rsbac_acl_remove_acl_entry (enum device_p = acl_lookup_device(tid.file.device); if (!device_p) { - printk(KERN_WARNING - "rsbac_acl_remove_acl_entry(): Could not lookup device!\n"); - /* free read lock */ + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + rsbac_get_super_block(tid.file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = acl_lookup_device(tid.file.device); + if(!device_p) + { + printk(KERN_WARNING + "rsbac_acl_remove_acl_entry(): Could not lookup device!\n"); + /* free read lock */ + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } list_no = fd_hash(tid.file.inode); err = rsbac_list_lol_subremove(device_p->handles[list_no], &tid.file.inode, &desc); @@ -4215,17 +4249,23 @@ int rsbac_acl_remove_acl (enum rsb device_p = acl_lookup_device(tid.file.device); if (!device_p) { - printk(KERN_WARNING - "rsbac_acl_remove_acl(): Could not lookup device!\n"); - err = -RSBAC_EINVALIDDEV; - } - else - { - list_no = fd_hash(tid.file.inode); - err = rsbac_list_lol_remove(device_p->handles[list_no], &tid.file.inode); + /* trigger rsbac_mount() */ + rsbac_read_unlock(&device_list_head.lock, &dflags); + rsbac_get_super_block(tid.file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = acl_lookup_device(tid.file.device); + if(!device_p) + { + printk(KERN_WARNING + "rsbac_acl_remove_acl(): Could not lookup device!\n"); + rsbac_read_unlock(&device_list_head.lock, &dflags); + return -RSBAC_EINVALIDDEV; + } } + list_no = fd_hash(tid.file.inode); + err = rsbac_list_lol_remove(device_p->handles[list_no], &tid.file.inode); rsbac_read_unlock(&device_list_head.lock, &dflags); - /* ready. */ return err; case T_DEV: @@ -4374,11 +4414,20 @@ int rsbac_acl_add_to_acl_entry (enum device_p = acl_lookup_device(tid.file.device); if (!device_p) { - printk(KERN_WARNING - "rsbac_acl_set_acl_entry(): Could not lookup device!\n"); - /* free read lock */ + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + rsbac_get_super_block(tid.file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = acl_lookup_device(tid.file.device); + if(!device_p) + { + printk(KERN_WARNING + "rsbac_acl_set_acl_entry(): Could not lookup device!\n"); + /* free read lock */ + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } /* protect this list */ list_no = fd_hash(tid.file.inode); @@ -4748,11 +4797,20 @@ int rsbac_acl_remove_from_acl_entry(enum device_p = acl_lookup_device(tid.file.device); if (!device_p) { - printk(KERN_WARNING - "rsbac_acl_set_acl_entry(): Could not lookup device!\n"); - /* free read lock */ + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + rsbac_get_super_block(tid.file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = acl_lookup_device(tid.file.device); + if(!device_p) + { + printk(KERN_WARNING + "rsbac_acl_remove_from_acl_entry(): Could not lookup device!\n"); + /* free read lock */ + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } list_no = fd_hash(tid.file.inode); if(!rsbac_list_lol_get_subdata(device_p->handles[list_no], @@ -5082,11 +5140,20 @@ int rsbac_acl_set_mask (enum rs device_p = acl_lookup_device(tid.file.device); if (!device_p) { - printk(KERN_WARNING - "rsbac_acl_set_mask(): Could not lookup device!\n"); - /* free read lock */ + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + rsbac_get_super_block(tid.file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = acl_lookup_device(tid.file.device); + if(!device_p) + { + printk(KERN_WARNING + "rsbac_acl_set_mask(): Could not lookup device!\n"); + /* free read lock */ + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } list_no = fd_hash(tid.file.inode); err = rsbac_list_lol_add(device_p->handles[list_no], &tid.file.inode, &mask); @@ -5242,11 +5309,20 @@ int rsbac_acl_get_mask (enum rs device_p = acl_lookup_device(tid.file.device); if (!device_p) { - printk(KERN_WARNING - "rsbac_acl_get_mask(): Could not lookup device!\n"); - /* free read lock */ + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + rsbac_get_super_block(tid.file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = acl_lookup_device(tid.file.device); + if(!device_p) + { + printk(KERN_WARNING + "rsbac_acl_get_mask(): Could not lookup device!\n"); + /* free read lock */ + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } list_no = fd_hash(tid.file.inode); err = rsbac_list_lol_get_data(device_p->handles[list_no], &tid.file.inode, mask_p); @@ -5478,12 +5554,21 @@ int rsbac_acl_get_rights (enum r device_p = acl_lookup_device(tid.file.device); if (!device_p) { - printk(KERN_WARNING - "rsbac_acl_get_rights(): Could not lookup device %02u:%02u!\n", - MAJOR(tid.file.device),MINOR(tid.file.device)); - /* free read lock */ + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + rsbac_get_super_block(tid.file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = acl_lookup_device(tid.file.device); + if(!device_p) + { + printk(KERN_WARNING + "rsbac_acl_get_rights(): Could not lookup device %02u:%02u!\n", + MAJOR(tid.file.device),MINOR(tid.file.device)); + /* free read lock */ + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } list_no = fd_hash(tid.file.inode); if(!rsbac_list_lol_get_subdata(device_p->handles[list_no], @@ -6028,11 +6113,20 @@ int rsbac_acl_get_single_right (enum r device_p = acl_lookup_device(tid.file.device); if (!device_p) { - printk(KERN_WARNING - "rsbac_acl_get_single_right(): Could not lookup device!\n"); - /* free read lock */ + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + rsbac_get_super_block(tid.file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = acl_lookup_device(tid.file.device); + if(!device_p) + { + printk(KERN_WARNING + "rsbac_acl_get_single_right(): Could not lookup device!\n"); + /* free read lock */ + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } list_no = fd_hash(tid.file.inode); if( !rsbac_list_lol_get_subdata(device_p->handles[list_no], @@ -6725,11 +6819,20 @@ int rsbac_acl_get_tlist (enum rs device_p = acl_lookup_device(tid.file.device); if (!device_p) { - printk(KERN_WARNING - "rsbac_acl_get_tlist(): Could not lookup device!\n"); - /* free read lock */ + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + rsbac_get_super_block(tid.file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = acl_lookup_device(tid.file.device); + if(!device_p) + { + printk(KERN_WARNING + "rsbac_acl_get_tlist(): Could not lookup device!\n"); + /* free read lock */ + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } /* protect this list */ list_no = fd_hash(tid.file.inode); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/data_structures/auth_data_structures.c linux-2.4.20-wolk4.9-fullkernel/rsbac/data_structures/auth_data_structures.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/data_structures/auth_data_structures.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/data_structures/auth_data_structures.c 2003-08-25 20:33:02.000000000 +0200 @@ -1,9 +1,9 @@ /*************************************************** */ /* Rule Set Based Access Control */ /* Implementation of AUTH data structures */ -/* Author and (c) 1999-2001: Amon Ott */ +/* Author and (c) 1999-2003: Amon Ott */ /* */ -/* Last modified: 31/Jul/2001 */ +/* Last modified: 21/Jan/2003 */ /*************************************************** */ #include @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -33,7 +34,12 @@ /* The following global variables are needed for access to PM data. */ static struct rsbac_auth_device_list_head_t device_list_head; -static struct rsbac_auth_p_cap_set_list_head_t p_cap_set_list_head; + +static rsbac_list_handle_t process_handle = NULL; +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER +static rsbac_list_handle_t process_eff_handle = NULL; +static rsbac_list_handle_t process_fs_handle = NULL; +#endif /**************************************************/ /* Declarations of external functions */ @@ -45,353 +51,314 @@ boolean writable(struct super_block * sb /* Declarations of internal functions */ /**************************************************/ -static struct rsbac_auth_cap_set_sublist_item_t * - add_f_cap_set_subitem(struct rsbac_auth_f_cap_set_list_item_t * set, - rsbac_uid_t first_id, - rsbac_uid_t last_id); - -static struct rsbac_auth_f_cap_set_list_item_t * - lookup_f_cap_set(struct rsbac_auth_device_list_item_t * device_p, - rsbac_auth_f_cap_set_id_t cap_set); - -static struct rsbac_auth_f_cap_set_list_item_t* - add_f_cap_set_item(struct rsbac_auth_device_list_item_t * device_p, - rsbac_auth_f_cap_set_id_t id); - /************************************************* */ /* Internal Help functions */ /************************************************* */ -/* These help functions do NOT handle data consistency protection by */ -/* rw-spinlocks! This is done exclusively by non-internal functions! */ +static inline int fd_hash(rsbac_inode_nr_t inode) + { + return(inode % RSBAC_AUTH_NR_CAP_FD_LISTS); + } +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER +static inline int eff_fd_hash(rsbac_inode_nr_t inode) + { + return(inode % RSBAC_AUTH_NR_CAP_EFF_FD_LISTS); + } +static inline int fs_fd_hash(rsbac_inode_nr_t inode) + { + return(inode % RSBAC_AUTH_NR_CAP_FS_FD_LISTS); + } +#endif -/************************************************************************** */ -/* Read/Write functions */ -/* The read function is only called from rsbac_init_auth(), the write */ -/* function is called on every change from all functions that modify data. */ +static int cap_compare(void * desc1, void * desc2) + { + struct rsbac_auth_cap_range_t * range1 = desc1; + struct rsbac_auth_cap_range_t * range2 = desc2; -/* read_f_cap_set_list() */ -/* reading a device AUTH list from filesystem */ + if(!desc1 || !desc2) + return 0; + if(range1->first < range2->first) + return -1; + if(range1->first > range2->first) + return 1; + if(range1->last < range2->last) + return -1; + if(range1->last > range2->last) + return 1; + return 0; + }; -static int read_f_cap_set_list(struct rsbac_auth_device_list_item_t * device_p, - kdev_t kdev) +static int single_cap_compare(void * desc1, void * desc2) { - struct file file; + struct rsbac_auth_cap_range_t * range = desc1; + rsbac_uid_t * uid = desc2; + + if(!desc1 || !desc2) + return 0; + if( (*uid < range->first) + || (*uid > range->last) + ) + return 1; + else + return 0; + }; + +/* auth_register_fd_lists() */ +/* register fd ACL lists for device */ + +static int auth_register_fd_lists(struct rsbac_auth_device_list_item_t * device_p, + kdev_t kdev) + { + char * name; int err = 0; - u_int i; int tmperr; - u_long read_count = 0; - rsbac_auth_f_cap_set_id_t id; - rsbac_uid_t sub_first; - rsbac_uid_t sub_last; - rsbac_old_uid_t old_sub_id; - u_int len; - rsbac_version_t aci_version; - mm_segment_t oldfs; + char number[10]; + u_int file_no; + struct rsbac_list_lol_info_t lol_info; if(!device_p) return(-RSBAC_EINVALIDPOINTER); + name = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(!name) + return -RSBAC_ENOMEM; + + /* register all the AUTH lists of lists */ + for (file_no = 0; file_no < RSBAC_AUTH_NR_CAP_FD_LISTS; file_no++) + { + /* construct name from base name + number */ + strcpy(name, RSBAC_AUTH_FD_FILENAME); + strcat(name, inttostr(number,file_no) ); + + lol_info.version = RSBAC_AUTH_FD_LIST_VERSION; + lol_info.key = RSBAC_AUTH_LIST_KEY; + lol_info.desc_size = sizeof(rsbac_inode_nr_t); + lol_info.data_size = 0; + lol_info.subdesc_size = sizeof(struct rsbac_auth_cap_range_t); + lol_info.subdata_size = 0; /* rights */ + lol_info.max_age = 0; + tmperr = rsbac_list_lol_register(RSBAC_LIST_VERSION, + &(device_p->handles[file_no]), + lol_info, + RSBAC_LIST_PERSIST | RSBAC_LIST_DEF_DATA, + rsbac_list_compare_u32, + cap_compare, + NULL, + NULL, + NULL, + NULL, + name, + kdev); + if(tmperr) + { + char * tmp; - /* open file */ - if ((err = rsbac_read_open (RSBAC_AUTH_F_CAP_FILENAME, - &file, - kdev) )) - return(err); - - /* OK, now we can start reading */ - /* There is a read function for this file, so read as many items */ - /* as possible. A positive return value means a read success, */ - /* 0 end of file and a negative value an error. */ - - /* Set current user space to kernel space, because read() writes */ - /* to user space */ - oldfs = get_fs(); - set_fs(KERNEL_DS); - tmperr = file.f_op->read(&file, - (char *) &aci_version, - sizeof(aci_version), - &file.f_pos); - /* error? */ - if (tmperr < sizeof(aci_version)) - { - printk(KERN_WARNING - "read_f_cap_set_list(): read error from file!\n"); - err = -RSBAC_EREADFAILED; - goto end_read; + tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp) + { + printk(KERN_WARNING + "auth_register_fd_lists(): registering list %s for device %02u:%02u failed with error %s!\n", + name, + MAJOR(kdev), + MINOR(kdev), + get_error_name(tmp, tmperr)); + rsbac_kfree(tmp); + } + err = tmperr; + } } - /* if wrong aci version, set no_write and skip */ - switch(aci_version) - { - case RSBAC_AUTH_F_CAP_SET_VERSION: - break; - case RSBAC_AUTH_OLD_F_CAP_SET_VERSION: - printk(KERN_WARNING - "read_f_cap_set_list(): old version %u on device %02u:%02u, new is %u - upconverting!\n", - aci_version, - MAJOR(kdev), - MINOR(kdev), - RSBAC_AUTH_F_CAP_SET_VERSION); - break; - default: - printk(KERN_WARNING - "read_f_cap_set_list(): wrong version %u on device %02u:%02u, expected %u - skipping file and setting no_write for DEV list!\n", - aci_version, - MAJOR(kdev), - MINOR(kdev), - RSBAC_AUTH_F_CAP_SET_VERSION); - device_p->no_write = TRUE; - err = -RSBAC_EREADFAILED; - goto end_read; - } - do - { - tmperr = file.f_op->read(&file, - (char *) &id, - sizeof(id), - &file.f_pos); - /* if successful, add item without writing to disk */ - if (tmperr > 0) +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + /* register all the AUTH DAC lists of lists */ + for (file_no = 0; file_no < RSBAC_AUTH_NR_CAP_EFF_FD_LISTS; file_no++) + { + /* construct name from base name + number */ + strcpy(name, RSBAC_AUTH_FD_EFF_FILENAME); + strcat(name, inttostr(number,file_no) ); + + lol_info.version = RSBAC_AUTH_FD_EFF_LIST_VERSION; + lol_info.key = RSBAC_AUTH_LIST_KEY; + lol_info.desc_size = sizeof(rsbac_inode_nr_t); + lol_info.data_size = 0; + lol_info.subdesc_size = sizeof(struct rsbac_auth_cap_range_t); + lol_info.subdata_size = 0; /* rights */ + lol_info.max_age = 0; + tmperr = rsbac_list_lol_register(RSBAC_LIST_VERSION, + &(device_p->eff_handles[file_no]), + lol_info, + RSBAC_LIST_PERSIST | RSBAC_LIST_DEF_DATA, + rsbac_list_compare_u32, + cap_compare, + NULL, + NULL, + NULL, + NULL, + name, + kdev); + if(tmperr) { - if(!add_f_cap_set_item(device_p,id)) + char * tmp; + + tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp) { printk(KERN_WARNING - "read_f_cap_set_list(): could not add item %u!\n", - id); - tmperr = 0; - goto end_read; - } - read_count++; - tmperr = file.f_op->read(&file, - (char *) &len, - sizeof(len), - &file.f_pos); - /* if successful, read and add len subitems */ - if (tmperr > 0) - { - for(i=0;iread(&file, - (char *) &sub_first, - sizeof(sub_first), - &file.f_pos); - if(tmperr>0) - tmperr = file.f_op->read(&file, - (char *) &sub_last, - sizeof(sub_last), - &file.f_pos); - break; - case RSBAC_AUTH_OLD_F_CAP_SET_VERSION: - old_sub_id = 0; - tmperr = file.f_op->read(&file, - (char *) &old_sub_id, - sizeof(old_sub_id), - &file.f_pos); - switch(old_sub_id) - { - case RSBAC_AUTH_OLD_OWNER_F_CAP: - sub_first = RSBAC_AUTH_OWNER_F_CAP; - sub_last = RSBAC_AUTH_OWNER_F_CAP; - break; - default: - sub_first = old_sub_id; - sub_last = old_sub_id; - } - break; - } - if(tmperr > 0) - { - if (!add_f_cap_set_subitem(lookup_f_cap_set(device_p, id), - sub_first, - sub_last)) - { - printk(KERN_WARNING - "read_f_cap_set_list(): could not add sub-item %u:%u for id %u on device %02u:%02u!\n", - sub_first, - sub_last, - id, - MAJOR(kdev), - MINOR(kdev)); - i = len; - tmperr = -1; - } - } - else - { - i = len; - tmperr = -1; - } - } + "auth_register_fd_lists(): registering list %s for device %02u:%02u failed with error %s!\n", + name, + MAJOR(kdev), + MINOR(kdev), + get_error_name(tmp, tmperr)); + rsbac_kfree(tmp); } + err = tmperr; } } - while (tmperr > 0); /* end of do */ - - if (tmperr < 0) + for (file_no = 0; file_no < RSBAC_AUTH_NR_CAP_FS_FD_LISTS; file_no++) { - printk(KERN_WARNING "read_f_cap_set_list(): read error from file, item %lu!\n", - read_count); - err = -RSBAC_EREADFAILED; - } - -end_read: - /* Set current user space back to user space, because read() writes */ - /* to user space */ - set_fs(oldfs); + /* construct name from base name + number */ + strcpy(name, RSBAC_AUTH_FD_FS_FILENAME); + strcat(name, inttostr(number,file_no) ); + + lol_info.version = RSBAC_AUTH_FD_FS_LIST_VERSION; + lol_info.key = RSBAC_AUTH_LIST_KEY; + lol_info.desc_size = sizeof(rsbac_inode_nr_t); + lol_info.data_size = 0; + lol_info.subdesc_size = sizeof(struct rsbac_auth_cap_range_t); + lol_info.subdata_size = 0; /* rights */ + lol_info.max_age = 0; + tmperr = rsbac_list_lol_register(RSBAC_LIST_VERSION, + &(device_p->fs_handles[file_no]), + lol_info, + RSBAC_LIST_PERSIST | RSBAC_LIST_DEF_DATA, + rsbac_list_compare_u32, + cap_compare, + NULL, + NULL, + NULL, + NULL, + name, + kdev); + if(tmperr) + { + char * tmp; -#ifdef CONFIG_RSBAC_DEBUG - if (rsbac_debug_ds_auth) - printk(KERN_DEBUG "read_f_cap_set_list(): %lu entries read.\n", - read_count); + tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp) + { + printk(KERN_WARNING + "auth_register_fd_lists(): registering list %s for device %02u:%02u failed with error %s!\n", + name, + MAJOR(kdev), + MINOR(kdev), + get_error_name(tmp, tmperr)); + rsbac_kfree(tmp); + } + err = tmperr; + } + } #endif - /* We do not need this file any more */ - rsbac_read_close(&file); - return(err); - }; /* end of read_f_cap_set_list() */ + return err; + } -/********************/ +/* auth_detach_fd_lists() */ +/* detach from fd AUTH lists for device */ -#ifndef CONFIG_RSBAC_NO_WRITE -static int write_f_cap_set_list(struct rsbac_auth_device_list_item_t * device_p) +static int auth_detach_fd_lists(struct rsbac_auth_device_list_item_t * device_p) { - struct file file; + char * name; int err = 0; - struct rsbac_auth_f_cap_set_list_item_t * current_p = NULL; - struct rsbac_auth_cap_set_sublist_item_t * sub_item_p; - mm_segment_t oldfs; - u_long write_count = 0, written = 0, bytes, buflen = 0; - int tmperr = 0; - u_long flags; - rsbac_version_t aci_version = RSBAC_AUTH_F_CAP_SET_VERSION; - char * buffer = NULL; - boolean vmalloc_used = FALSE; + int tmperr; + char number[10]; + u_int file_no; if(!device_p) return(-RSBAC_EINVALIDPOINTER); - /* test no_write */ - if(device_p->no_write) - return(-RSBAC_ENOTWRITABLE); - /* protect this list */ - rsbac_read_lock(&device_p->list_head.lock, &flags); - /* calculate buflen */ - buflen = device_p->list_head.count * - (sizeof(current_p->id) /* one set id each */ - + sizeof(int) ) /* sublist len */ - + sizeof(aci_version); - current_p = device_p->list_head.head; - while (current_p) - { - buflen += current_p->sublist_length - * (sizeof(sub_item_p->first_id) + sizeof(sub_item_p->last_id)); - current_p = current_p->next; - } - /* try to rsbac_vkmalloc */ - buffer = rsbac_vkmalloc(buflen, &vmalloc_used); - if(!buffer) - { - /* unprotect this list */ - rsbac_read_unlock(&device_p->list_head.lock, &flags); - return(-RSBAC_ENOMEM); - } - current_p = device_p->list_head.head; - /* copy version */ - memcpy(buffer, - (char *) &aci_version, - sizeof(aci_version)); - write_count = sizeof(aci_version); - /* copy list */ - while (current_p) - { - memcpy(buffer+write_count, - (char *) &(current_p->id), - sizeof(current_p->id)); - write_count += sizeof(current_p->id); - memcpy(buffer+write_count, - (char *) &(current_p->sublist_length), - sizeof(current_p->sublist_length)); - write_count += sizeof(current_p->sublist_length); - sub_item_p = current_p->sublist_head; - while (sub_item_p) - { - memcpy(buffer+write_count, - (char *) &(sub_item_p->first_id), - sizeof(sub_item_p->first_id)); - write_count += sizeof(sub_item_p->first_id); - memcpy(buffer+write_count, - (char *) &(sub_item_p->last_id), - sizeof(sub_item_p->last_id)); - write_count += sizeof(sub_item_p->last_id); - sub_item_p = sub_item_p->next; - } - current_p = current_p->next; - } - /* unprotect this list */ - rsbac_read_unlock(&device_p->list_head.lock, &flags); + name = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(!name) + return -RSBAC_ENOMEM; - /* get rsbac write-to-disk semaphore */ - down(&rsbac_write_sem); - - /* open file */ - if ((err = rsbac_write_open(RSBAC_AUTH_F_CAP_FILENAME, - &file, - device_p->id) )) + /* detach all the AUTH lists of lists */ + for (file_no = 0; file_no < RSBAC_AUTH_NR_CAP_FD_LISTS; file_no++) { - up(&rsbac_write_sem); - /* free buffer */ - rsbac_vkfree(buffer, vmalloc_used); - return(err); - } + /* construct name from base name + number */ + strcpy(name, RSBAC_AUTH_FD_FILENAME); + strcat(name, inttostr(number,file_no) ); - /* OK, now we can start writing the buffer. */ - /* Set current user space to kernel space, because write() reads */ - /* from user space */ - oldfs = get_fs(); - set_fs(KERNEL_DS); + tmperr = rsbac_list_lol_detach(&device_p->handles[file_no], + RSBAC_AUTH_LIST_KEY); + if(tmperr) + { + char * tmp; - while ((written < write_count) && (tmperr >= 0)) + tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp) + { + printk(KERN_WARNING + "auth_detach_fd_lists(): detaching from list %s for device %02u:%02u failed with error %s!\n", + name, + MAJOR(device_p->id), + MINOR(device_p->id), + get_error_name(tmp, tmperr)); + rsbac_kfree(tmp); + } + err = tmperr; + } + } +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + for (file_no = 0; file_no < RSBAC_AUTH_NR_CAP_EFF_FD_LISTS; file_no++) { - bytes = rsbac_min(write_count - written, RSBAC_MAX_WRITE_CHUNK); - tmperr = file.f_op->write(&file, - buffer+written, - bytes, - &file.f_pos); - if(tmperr > 0) + /* construct name from base name + number */ + strcpy(name, RSBAC_AUTH_FD_EFF_FILENAME); + strcat(name, inttostr(number,file_no) ); + + tmperr = rsbac_list_lol_detach(&device_p->eff_handles[file_no], + RSBAC_AUTH_LIST_KEY); + if(tmperr) { - written += tmperr; + char * tmp; + + tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp) + { + printk(KERN_WARNING + "auth_detach_fd_lists(): detaching from list %s for device %02u:%02u failed with error %s!\n", + name, + MAJOR(device_p->id), + MINOR(device_p->id), + get_error_name(tmp, tmperr)); + rsbac_kfree(tmp); + } + err = tmperr; } } - if (tmperr < 0) + for (file_no = 0; file_no < RSBAC_AUTH_NR_CAP_FS_FD_LISTS; file_no++) { - printk(KERN_WARNING - "write_f_cap_set_list(): write error %i on file %s on device %02u:%02u!\n", - tmperr, - RSBAC_AUTH_F_CAP_FILENAME, - MAJOR(device_p->id), - MINOR(device_p->id)); - err |= -RSBAC_EWRITEFAILED; - } - /* Set current user space back to user space, because write() reads */ - /* from user space */ - set_fs(oldfs); + /* construct name from base name + number */ + strcpy(name, RSBAC_AUTH_FD_FS_FILENAME); + strcat(name, inttostr(number,file_no) ); -#ifdef CONFIG_RSBAC_DEBUG - if (rsbac_debug_write) - printk(KERN_DEBUG "write_f_cap_set_list(): %u main lists written to device %02u:%02u.\n", - device_p->list_head.count, - MAJOR(device_p->id), - MINOR(device_p->id)); -#endif - /* End of write access */ - rsbac_write_close(&file); - /* free overall sem */ - up(&rsbac_write_sem); - /* free buffer */ - rsbac_vkfree(buffer, vmalloc_used); - return(err); - }; /* end of write_f_cap_set_list() */ -#endif /* ifndef CONFIG_RSBAC_NO_WRITE */ + tmperr = rsbac_list_lol_detach(&device_p->fs_handles[file_no], + RSBAC_AUTH_LIST_KEY); + if(tmperr) + { + char * tmp; + + tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + if(tmp) + { + printk(KERN_WARNING + "auth_detach_fd_lists(): detaching from list %s for device %02u:%02u failed with error %s!\n", + name, + MAJOR(device_p->id), + MINOR(device_p->id), + get_error_name(tmp, tmperr)); + rsbac_kfree(tmp); + } + err = tmperr; + } + } +#endif + + return err; + } /************************************************************************** */ /* The lookup functions return NULL, if the item is not found, and a */ @@ -417,93 +384,6 @@ static struct rsbac_auth_device_list_ite return (curr); }; -/* lookup_p_cap_set_subitem() */ -/* lookup process cap set sub item, return pointer */ -static struct rsbac_auth_cap_set_sublist_item_t * - lookup_p_cap_set_subitem(struct rsbac_auth_p_cap_set_list_item_t * set, - rsbac_uid_t first_id, - rsbac_uid_t last_id) - { - struct rsbac_auth_cap_set_sublist_item_t * curr; - - if(!set) - return(NULL); - curr = set->sublist_head; - while ( curr - && ( (first_id != curr->first_id) - || (last_id != curr->last_id) - ) - ) - curr = curr->next; - return (curr); - }; - -/* lookup_f_cap_set_subitem() */ -/* lookup file cap set sub item, return pointer */ -static struct rsbac_auth_cap_set_sublist_item_t * - lookup_f_cap_set_subitem(struct rsbac_auth_f_cap_set_list_item_t * set, - rsbac_uid_t first_id, - rsbac_uid_t last_id) - { - struct rsbac_auth_cap_set_sublist_item_t * curr; - - if(!set) - return(NULL); - curr = set->sublist_head; - while ( curr - && ( (first_id != curr->first_id) - || (last_id != curr->last_id) - ) - ) - curr = curr->next; - return (curr); - }; - -/* lookup_p_cap_set() */ -/* lookup process cap set item */ -static struct rsbac_auth_p_cap_set_list_item_t * - lookup_p_cap_set(rsbac_auth_p_cap_set_id_t cap_set) - { - struct rsbac_auth_p_cap_set_list_item_t * curr = p_cap_set_list_head.curr; - - /* is the current item the one we look for? yes -> return, else search */ - if (curr && (curr->id == cap_set) ) - return (curr); - else - curr = p_cap_set_list_head.head; - while (curr && (curr->id != cap_set) ) - curr = curr->next; - - if (curr) - p_cap_set_list_head.curr=curr; - return (curr); - }; - -/* lookup_f_cap_set() */ -/* lookup file cap set item */ -static struct rsbac_auth_f_cap_set_list_item_t * - lookup_f_cap_set(struct rsbac_auth_device_list_item_t * device_p, - rsbac_auth_f_cap_set_id_t cap_set) - { - struct rsbac_auth_f_cap_set_list_item_t * curr; - - if(!device_p) - return NULL; - curr = device_p->list_head.curr; - /* is the current item the one we look for? yes -> return, else search */ - if (curr && (curr->id == cap_set) ) - return (curr); - else - curr = device_p->list_head.head; - while (curr && (curr->id != cap_set) ) - curr = curr->next; - - if (curr) - device_p->list_head.curr=curr; - return (curr); - }; - - /************************************************************************** */ /* The add_item() functions add an item to the list, set head.curr to it, */ /* and return a pointer to the item. */ @@ -517,6 +397,7 @@ static struct rsbac_auth_device_list_ite * create_device_item(kdev_t kdev) { struct rsbac_auth_device_list_item_t * new_item_p; + int i; /* allocate memory for new device, return NULL, if failed */ if ( !(new_item_p = (struct rsbac_auth_device_list_item_t *) @@ -530,13 +411,14 @@ static struct rsbac_auth_device_list_ite new_item_p->no_write = FALSE; /* init file/dir sublists */ - new_item_p->list_head.lock = RW_LOCK_UNLOCKED; - new_item_p->list_head.head = NULL; - new_item_p->list_head.tail = NULL; - new_item_p->list_head.curr = NULL; - new_item_p->list_head.dirty = FALSE; - new_item_p->list_head.count = 0; - + for(i=0 ; i < RSBAC_AUTH_NR_CAP_FD_LISTS ; i++) + new_item_p->handles[i] = NULL; +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + for(i=0 ; i < RSBAC_AUTH_NR_CAP_EFF_FD_LISTS ; i++) + new_item_p->eff_handles[i] = NULL; + for(i=0 ; i < RSBAC_AUTH_NR_CAP_FS_FD_LISTS ; i++) + new_item_p->fs_handles[i] = NULL; +#endif return(new_item_p); }; @@ -569,166 +451,6 @@ static struct rsbac_auth_device_list_ite return(device_p); }; -static struct rsbac_auth_cap_set_sublist_item_t * - add_p_cap_set_subitem(struct rsbac_auth_p_cap_set_list_item_t * set, - rsbac_uid_t first_id, - rsbac_uid_t last_id) - { - struct rsbac_auth_cap_set_sublist_item_t * new_item_p = NULL; - - if(!set) - return(NULL); - if ( !(new_item_p = (struct rsbac_auth_cap_set_sublist_item_t *) - rsbac_kmalloc(sizeof(*new_item_p))) ) - return(NULL); - new_item_p->first_id = first_id; - new_item_p->last_id = last_id; - - if (!set->sublist_head) - { - set->sublist_head=new_item_p; - set->sublist_tail=new_item_p; - set->sublist_length=1; - new_item_p->prev=NULL; - new_item_p->next=NULL; - } - else - { - new_item_p->prev=set->sublist_tail; - new_item_p->next=NULL; - set->sublist_tail->next=new_item_p; - set->sublist_tail=new_item_p; - set->sublist_length++; - }; - return(new_item_p); - }; - -static struct rsbac_auth_cap_set_sublist_item_t * - add_f_cap_set_subitem(struct rsbac_auth_f_cap_set_list_item_t * set, - rsbac_uid_t first_id, - rsbac_uid_t last_id) - { - struct rsbac_auth_cap_set_sublist_item_t * new_item_p = NULL; - struct rsbac_auth_cap_set_sublist_item_t * tmp_p = NULL; - - if(!set) - return(NULL); - if ( !(new_item_p = (struct rsbac_auth_cap_set_sublist_item_t *) - rsbac_kmalloc(sizeof(*new_item_p))) ) - return(NULL); - new_item_p->first_id = first_id; - new_item_p->last_id = last_id; - - if (!set->sublist_head) - { - set->sublist_head=new_item_p; - set->sublist_tail=new_item_p; - set->sublist_length=1; - new_item_p->prev=NULL; - new_item_p->next=NULL; - } - else - { - tmp_p = set->sublist_head; - if(tmp_p->first_id > first_id) - { /* new item becomes first */ - new_item_p->prev=NULL; - new_item_p->next=tmp_p; - tmp_p->prev = new_item_p; - set->sublist_head=new_item_p; - } - else - { /* search for place */ - while( tmp_p->next - && (tmp_p->next->first_id < first_id) - ) - tmp_p = tmp_p->next; - /* insert behind */ - new_item_p->prev=tmp_p; - new_item_p->next=tmp_p->next; - tmp_p->next = new_item_p; - if(!new_item_p->next) - set->sublist_tail=new_item_p; - else - new_item_p->next->prev = new_item_p; - } - set->sublist_length++; - }; - return(new_item_p); - }; - - -static struct rsbac_auth_p_cap_set_list_item_t* - add_p_cap_set_item(rsbac_auth_p_cap_set_id_t id) - { - struct rsbac_auth_p_cap_set_list_item_t * new_item_p = NULL; - - if ( !(new_item_p = (struct rsbac_auth_p_cap_set_list_item_t *) - rsbac_kmalloc(sizeof(*new_item_p))) ) - return(NULL); - new_item_p->id = id; - new_item_p->sublist_length = 0; - new_item_p->sublist_head = NULL; - new_item_p->sublist_tail = NULL; - - if (!p_cap_set_list_head.head) - { - p_cap_set_list_head.head=new_item_p; - p_cap_set_list_head.tail=new_item_p; - p_cap_set_list_head.curr=new_item_p; - p_cap_set_list_head.count = 1; - new_item_p->prev=NULL; - new_item_p->next=NULL; - } - else - { - new_item_p->prev=p_cap_set_list_head.tail; - new_item_p->next=NULL; - p_cap_set_list_head.tail->next=new_item_p; - p_cap_set_list_head.tail=new_item_p; - p_cap_set_list_head.curr=new_item_p; - p_cap_set_list_head.count++; - }; - return(new_item_p); - }; - -static struct rsbac_auth_f_cap_set_list_item_t* - add_f_cap_set_item(struct rsbac_auth_device_list_item_t * device_p, - rsbac_auth_f_cap_set_id_t id) - { - struct rsbac_auth_f_cap_set_list_item_t * new_item_p = NULL; - - if(!device_p) - return NULL; - if ( !(new_item_p = (struct rsbac_auth_f_cap_set_list_item_t *) - rsbac_kmalloc(sizeof(*new_item_p))) ) - return(NULL); - new_item_p->id = id; - new_item_p->sublist_length = 0; - new_item_p->sublist_head = NULL; - new_item_p->sublist_tail = NULL; - - if (!device_p->list_head.head) - { - device_p->list_head.head=new_item_p; - device_p->list_head.tail=new_item_p; - device_p->list_head.curr=new_item_p; - device_p->list_head.count = 1; - new_item_p->prev=NULL; - new_item_p->next=NULL; - } - else - { - new_item_p->prev=device_p->list_head.tail; - new_item_p->next=NULL; - device_p->list_head.tail->next=new_item_p; - device_p->list_head.tail=new_item_p; - device_p->list_head.curr=new_item_p; - device_p->list_head.count++; - }; - return(new_item_p); - }; - /************************************************************************** */ /* The remove_item() functions remove an item from the list. If this item */ /* is head, tail or curr, these pointers are set accordingly. */ @@ -736,274 +458,13 @@ static struct rsbac_auth_f_cap_set_list_ /* item, if possible. */ /* If the item is not found, nothing is done. */ -static void remove_p_cap_set_subitem(struct rsbac_auth_p_cap_set_list_item_t * set, - rsbac_uid_t first_id, - rsbac_uid_t last_id) - { - struct rsbac_auth_cap_set_sublist_item_t * item_p; - - /* first we must locate the item. */ - if (set && (item_p = lookup_p_cap_set_subitem(set, first_id,last_id)) ) - { /* ok, item was found */ - if ( (set->sublist_head == item_p) ) - { /* item is head */ - if ( (set->sublist_tail == item_p) ) - { /* item is head and tail = only item -> list will be empty*/ - set->sublist_head = NULL; - set->sublist_tail = NULL; - set->sublist_length = 0; - } - else - { /* item is head, but not tail -> next item becomes head */ - item_p->next->prev = NULL; - set->sublist_head = item_p->next; - set->sublist_length--; - }; - } - else - { /* item is not head */ - if ( (set->sublist_tail == item_p) ) - { /*item is not head, but tail -> previous item becomes tail*/ - item_p->prev->next = NULL; - set->sublist_tail = item_p->prev; - set->sublist_length--; - } - else - { /* item is neither head nor tail -> item is cut out */ - item_p->prev->next = item_p->next; - item_p->next->prev = item_p->prev; - set->sublist_length--; - }; - }; - - /* now we can remove the item from memory */ - rsbac_kfree(item_p); - }; /* end of if: item was found */ - - }; /* end of remove_p_cap_set_subitem() */ - -static void remove_f_cap_set_subitem(struct rsbac_auth_f_cap_set_list_item_t * set, - rsbac_uid_t first_id, - rsbac_uid_t last_id) - { - struct rsbac_auth_cap_set_sublist_item_t * item_p; - - /* first we must locate the item. */ - if (set && (item_p = lookup_f_cap_set_subitem(set, first_id, last_id)) ) - { /* ok, item was found */ - if ( (set->sublist_head == item_p) ) - { /* item is head */ - if ( (set->sublist_tail == item_p) ) - { /* item is head and tail = only item -> list will be empty*/ - set->sublist_head = NULL; - set->sublist_tail = NULL; - set->sublist_length = 0; - } - else - { /* item is head, but not tail -> next item becomes head */ - item_p->next->prev = NULL; - set->sublist_head = item_p->next; - set->sublist_length--; - }; - } - else - { /* item is not head */ - if ( (set->sublist_tail == item_p) ) - { /*item is not head, but tail -> previous item becomes tail*/ - item_p->prev->next = NULL; - set->sublist_tail = item_p->prev; - set->sublist_length--; - } - else - { /* item is neither head nor tail -> item is cut out */ - item_p->prev->next = item_p->next; - item_p->next->prev = item_p->prev; - set->sublist_length--; - }; - }; - - /* now we can remove the item from memory */ - rsbac_kfree(item_p); - }; /* end of if: item was found */ - - }; /* end of remove_f_cap_set_subitem() */ - - -static void - remove_all_p_cap_set_subitems(struct rsbac_auth_p_cap_set_list_item_t * set_p) - { - struct rsbac_auth_cap_set_sublist_item_t * sub_item_p; - struct rsbac_auth_cap_set_sublist_item_t * next_sub_item_p; - - /* first we must locate the item. */ - if (set_p) - { /* ok, valid set pointer */ - /* remove all subitems from memory */ - sub_item_p = set_p->sublist_head; - while(sub_item_p) - { - next_sub_item_p = sub_item_p->next; - rsbac_kfree(sub_item_p); - sub_item_p = next_sub_item_p; - } - /* adjust start pointers */ - set_p->sublist_head = NULL; - set_p->sublist_tail = NULL; - set_p->sublist_length = 0; - }; /* end of if: item was found */ - - }; /* end of remove_all_p_cap_set_subitems() */ - -static void - remove_all_f_cap_set_subitems(struct rsbac_auth_f_cap_set_list_item_t * set_p) - { - struct rsbac_auth_cap_set_sublist_item_t * sub_item_p; - struct rsbac_auth_cap_set_sublist_item_t * next_sub_item_p; - - /* first we must locate the item. */ - if (set_p) - { /* ok, valid set pointer */ - /* remove all subitems from memory */ - sub_item_p = set_p->sublist_head; - while(sub_item_p) - { - next_sub_item_p = sub_item_p->next; - rsbac_kfree(sub_item_p); - sub_item_p = next_sub_item_p; - } - /* adjust start pointers */ - set_p->sublist_head = NULL; - set_p->sublist_tail = NULL; - set_p->sublist_length = 0; - }; /* end of if: item was found */ - - }; /* end of remove_all_f_cap_set_subitems() */ - - -static void remove_p_cap_set_item(rsbac_auth_p_cap_set_id_t id) - { - struct rsbac_auth_p_cap_set_list_item_t * item_p; - struct rsbac_auth_cap_set_sublist_item_t * sub_item_p; - struct rsbac_auth_cap_set_sublist_item_t * next_sub_item_p; - - /* first we must locate the item. */ - if ( (item_p = lookup_p_cap_set(id)) ) - { /* ok, item was found */ - if ( (p_cap_set_list_head.head == item_p) ) - { /* item is head */ - if ( (p_cap_set_list_head.tail == item_p) ) - { /* item is head and tail = only item -> list will be empty*/ - p_cap_set_list_head.head = NULL; - p_cap_set_list_head.tail = NULL; - } - else - { /* item is head, but not tail -> next item becomes head */ - item_p->next->prev = NULL; - p_cap_set_list_head.head = item_p->next; - }; - } - else - { /* item is not head */ - if ( (p_cap_set_list_head.tail == item_p) ) - { /*item is not head, but tail -> previous item becomes tail*/ - item_p->prev->next = NULL; - p_cap_set_list_head.tail = item_p->prev; - } - else - { /* item is neither head nor tail -> item is cut out */ - item_p->prev->next = item_p->next; - item_p->next->prev = item_p->prev; - }; - }; - - /* curr is no longer valid -> reset */ - p_cap_set_list_head.curr=NULL; - /* adjust count */ - p_cap_set_list_head.count--; - /* now we can remove the item from memory, but first the sub-items */ - sub_item_p = item_p->sublist_head; - while(sub_item_p) - { - next_sub_item_p = sub_item_p->next; - rsbac_kfree(sub_item_p); - sub_item_p = next_sub_item_p; - } - rsbac_kfree(item_p); - }; /* end of if: item was found */ - - }; /* end of remove_p_cap_set_item() */ - -static void remove_f_cap_set_item(struct rsbac_auth_device_list_item_t * device_p, - rsbac_auth_f_cap_set_id_t id) - { - struct rsbac_auth_f_cap_set_list_item_t * item_p; - struct rsbac_auth_cap_set_sublist_item_t * sub_item_p; - struct rsbac_auth_cap_set_sublist_item_t * next_sub_item_p; - - /* first we must locate the item. */ - if ( (item_p = lookup_f_cap_set(device_p,id)) ) - { /* ok, item was found */ - if ( (device_p->list_head.head == item_p) ) - { /* item is head */ - if ( (device_p->list_head.tail == item_p) ) - { /* item is head and tail = only item -> list will be empty*/ - device_p->list_head.head = NULL; - device_p->list_head.tail = NULL; - } - else - { /* item is head, but not tail -> next item becomes head */ - item_p->next->prev = NULL; - device_p->list_head.head = item_p->next; - }; - } - else - { /* item is not head */ - if ( (device_p->list_head.tail == item_p) ) - { /*item is not head, but tail -> previous item becomes tail*/ - item_p->prev->next = NULL; - device_p->list_head.tail = item_p->prev; - } - else - { /* item is neither head nor tail -> item is cut out */ - item_p->prev->next = item_p->next; - item_p->next->prev = item_p->prev; - }; - }; - - /* curr is no longer valid -> reset */ - device_p->list_head.curr=NULL; - /* adjust count */ - device_p->list_head.count--; - /* now we can remove the item from memory, but first the sub-items */ - sub_item_p = item_p->sublist_head; - while(sub_item_p) - { - next_sub_item_p = sub_item_p->next; - rsbac_kfree(sub_item_p); - sub_item_p = next_sub_item_p; - } - rsbac_kfree(item_p); - }; /* end of if: item was found */ - - }; /* end of remove_f_cap_set_item() */ - static void clear_device_item(struct rsbac_auth_device_list_item_t * item_p) { - struct rsbac_auth_f_cap_set_list_item_t * cap_item_p; - struct rsbac_auth_f_cap_set_list_item_t * cap_item_p2; - if(!item_p) return; - /* First clean up sublists... */ - cap_item_p = item_p->list_head.head; - while (cap_item_p) - { - cap_item_p2 = cap_item_p->next; - item_p->list_head.curr = cap_item_p; - remove_f_cap_set_item(item_p, cap_item_p->id); - cap_item_p = cap_item_p2; - } + /* First deregister lists... */ + auth_detach_fd_lists(item_p); /* OK, lets remove the device item itself */ rsbac_kfree(item_p); }; /* end of clear_device_item() */ @@ -1059,108 +520,151 @@ static void remove_device_item(kdev_t kd /* cap set */ static int copy_fp_cap_set_item(struct rsbac_auth_device_list_item_t * device_p, - rsbac_auth_f_cap_set_id_t fid, - rsbac_auth_p_cap_set_id_t pid) + rsbac_inode_nr_t fid, + rsbac_pid_t pid) { - struct rsbac_auth_p_cap_set_list_item_t * p_item_p; - struct rsbac_auth_f_cap_set_list_item_t * f_item_p; - struct rsbac_auth_cap_set_sublist_item_t * sub_item_p; - - /* locate the file item. */ - if (!(f_item_p = lookup_f_cap_set(device_p,fid)) ) - { /* f_item was not found -> remove process set */ - remove_p_cap_set_item(pid); - return(0); - } - /* locate the process item. */ - if (!(p_item_p = lookup_p_cap_set(pid)) ) - { /* p_item cannot be found -> add */ - if (!(p_item_p = add_p_cap_set_item(pid)) ) - return(-RSBAC_ECOULDNOTADDITEM); - } - else + struct rsbac_auth_cap_range_t * cap_item_p; + rsbac_time_t * ttl_p; + int i; + long count; + + rsbac_list_lol_remove(process_handle, &pid); + count = rsbac_list_lol_get_all_subdesc_ttl(device_p->handles[fd_hash(fid)], + &fid, + (void **) &cap_item_p, + &ttl_p); + if(count > 0) { - /* clear old p_set */ - remove_all_p_cap_set_subitems(p_item_p); + for(i=0; i < count ; i++) + { + rsbac_list_lol_subadd_ttl(process_handle, + ttl_p[i], + &pid, + &cap_item_p[i], + NULL); + } + vfree(cap_item_p); + vfree(ttl_p); } - /* copy */ - sub_item_p = f_item_p->sublist_head; - while(sub_item_p) + else { - add_p_cap_set_subitem(p_item_p, - sub_item_p->first_id, - sub_item_p->last_id); - sub_item_p = sub_item_p->next; + if( (count < 0) + && (count != -RSBAC_ENOTFOUND) + ) + return count; } - return(0); - }; /* end of copy_fp_cap_set_item() */ - -/************************************************************************** */ -/* The copy_pp_cap_set_item() function copies a process cap set to another */ -static int copy_pp_cap_set_item(rsbac_auth_p_cap_set_id_t old_pid, - rsbac_auth_p_cap_set_id_t new_pid) - { - struct rsbac_auth_p_cap_set_list_item_t * old_p_item_p; - struct rsbac_auth_p_cap_set_list_item_t * new_p_item_p; - struct rsbac_auth_cap_set_sublist_item_t * sub_item_p; - - /* locate the old process item. */ - if (!(old_p_item_p = lookup_p_cap_set(old_pid)) ) - { /* old_p_item was not found -> remove new_p_item too and return */ - remove_p_cap_set_item(new_pid); - return(0); - } - /* There is an old_p_item -> locate the new process item. */ - if ((new_p_item_p = lookup_p_cap_set(new_pid)) ) - { /* new_p_item was found -> clear */ - remove_all_p_cap_set_subitems(new_p_item_p); +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + rsbac_list_lol_remove(process_eff_handle, &pid); + count = rsbac_list_lol_get_all_subdesc_ttl(device_p->eff_handles[eff_fd_hash(fid)], + &fid, + (void **) &cap_item_p, + &ttl_p); + if(count > 0) + { + for(i=0; i < count ; i++) + { + rsbac_list_lol_subadd_ttl(process_eff_handle, + ttl_p[i], + &pid, + &cap_item_p[i], + NULL); + } + vfree(cap_item_p); + vfree(ttl_p); } else { - new_p_item_p = add_p_cap_set_item(new_pid); - if(!new_p_item_p) - return(-RSBAC_ECOULDNOTADDITEM); + if( (count < 0) + && (count != -RSBAC_ENOTFOUND) + ) + return count; } - /* copy */ - sub_item_p = old_p_item_p->sublist_head; - while(sub_item_p) + rsbac_list_lol_remove(process_fs_handle, &pid); + count = rsbac_list_lol_get_all_subdesc_ttl(device_p->fs_handles[fs_fd_hash(fid)], + &fid, + (void **) &cap_item_p, + &ttl_p); + if(count > 0) { - add_p_cap_set_subitem(new_p_item_p, - sub_item_p->first_id, - sub_item_p->last_id); - sub_item_p = sub_item_p->next; + for(i=0; i < count ; i++) + { + rsbac_list_lol_subadd_ttl(process_fs_handle, + ttl_p[i], + &pid, + &cap_item_p[i], + NULL); + } + vfree(cap_item_p); + vfree(ttl_p); } - return(0); - }; /* end of copy_pp_cap_set_item() */ + else + { + if( (count < 0) + && (count != -RSBAC_ENOTFOUND) + ) + return count; + } +#endif -/************************************************************ */ -/* The get_f_caplist() function copies a cap set to an array */ + return 0; + }; /* end of copy_fp_cap_set_item() */ -static int - get_f_caplist(struct rsbac_auth_f_cap_set_list_item_t * set_p, - rsbac_uid_t caplist[], - int maxnum) - { - struct rsbac_auth_cap_set_sublist_item_t * sub_item_p; - int count=0; - - /* first we must locate the item. */ - if (set_p && (maxnum > 0)) - { /* ok, valid set pointer */ - /* copy all subitems */ - sub_item_p = set_p->sublist_head; - while(sub_item_p && (count < maxnum)) +/************************************************************************** */ +/* The copy_pp_cap_set_item() function copies a process cap set to another */ + +static int copy_pp_cap_set_item_handle(rsbac_list_handle_t handle, + rsbac_pid_t old_pid, + rsbac_pid_t new_pid) + { + struct rsbac_auth_cap_range_t * cap_item_p; + rsbac_time_t * ttl_p; + int i; + long count; + + rsbac_list_lol_remove(handle, &new_pid); + count = rsbac_list_lol_get_all_subdesc_ttl(handle, + &old_pid, + (void **) &cap_item_p, + &ttl_p); + if(count > 0) + { + for(i=0; i < count ; i++) { - caplist[2*count] = sub_item_p->first_id; - caplist[2*count+1] = sub_item_p->last_id; - count++; - sub_item_p = sub_item_p->next; + rsbac_list_lol_subadd_ttl(handle, + ttl_p[i], + &new_pid, + &cap_item_p[i], + NULL); } - }; /* end of if: item was found */ - return(count); - }; /* end of get_f_caplist() */ + vfree(cap_item_p); + vfree(ttl_p); + } + else + { + if(count < 0) + return count; + } + return 0; + } + +static int copy_pp_cap_set_item(rsbac_pid_t old_pid, + rsbac_pid_t new_pid) + { + int res; + res = copy_pp_cap_set_item_handle(process_handle, old_pid, new_pid); + +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + if(res) + return res; + res = copy_pp_cap_set_item_handle(process_eff_handle, old_pid, new_pid); + if(res) + return res; + res = copy_pp_cap_set_item_handle(process_fs_handle, old_pid, new_pid); +#endif + return(res); + }; /* end of copy_pp_cap_set_item() */ /************************************************* */ /* proc functions */ @@ -1305,11 +809,20 @@ static ssize_t auth_devices_proc_write(s device_p = lookup_device(MKDEV(major, minor)); if(!device_p) { - printk(KERN_WARNING - "auth_devices_proc_write(): invalid device %02u:%02u!\n", - major,minor); - rsbac_read_unlock(&device_list_head.lock,&flags); - goto out; + /* trigger rsbac_mount() */ + rsbac_read_unlock(&device_list_head.lock, &flags); + rsbac_get_super_block(MKDEV(major, minor)); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &flags); + device_p = lookup_device(MKDEV(major, minor)); + if(!device_p) + { + printk(KERN_WARNING + "auth_devices_proc_write(): invalid device %02u:%02u!\n", + major,minor); + rsbac_read_unlock(&device_list_head.lock,&flags); + goto out; + } } printk(KERN_INFO "auth_devices_proc_write(): setting no_write for device %02u:%02u to %u\n", @@ -1342,12 +855,11 @@ stats_auth_proc_info(char *buffer, char off_t pos = 0; off_t begin = 0; - struct rsbac_auth_p_cap_set_list_item_t * p_cap_set_item_p; - struct rsbac_auth_f_cap_set_list_item_t * f_cap_set_item_p; u_int cap_set_count = 0; u_int member_count = 0; - u_long flags,dflags; + u_long dflags; struct rsbac_auth_device_list_item_t * device_p; + int i; union rsbac_target_id_t rsbac_target_id; union rsbac_attribute_value_t rsbac_attribute_value; @@ -1375,19 +887,33 @@ stats_auth_proc_info(char *buffer, char len += sprintf(buffer, "AUTH Status\n-----------\n"); - /* protect process cap set list */ - rsbac_read_lock(&p_cap_set_list_head.lock, &flags); - p_cap_set_item_p = p_cap_set_list_head.head; - while (p_cap_set_item_p) - { - cap_set_count++; - member_count += p_cap_set_item_p->sublist_length; - p_cap_set_item_p = p_cap_set_item_p->next; - }; - /* unprotect the list */ - rsbac_read_unlock(&p_cap_set_list_head.lock, &flags); - len += sprintf(buffer + len, "%u process cap set items, sum of %u members\n", - cap_set_count,member_count); + len += sprintf(buffer + len, "%lu process cap set items, sum of %lu members\n", + rsbac_list_lol_count(process_handle), + rsbac_list_lol_all_subcount(process_handle)); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; + +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + len += sprintf(buffer + len, "%lu process eff cap set items, sum of %lu members\n", + rsbac_list_lol_count(process_eff_handle), + rsbac_list_lol_all_subcount(process_eff_handle)); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; + len += sprintf(buffer + len, "%lu process fs cap set items, sum of %lu members\n", + rsbac_list_lol_count(process_fs_handle), + rsbac_list_lol_all_subcount(process_fs_handle)); pos = begin + len; if (pos < offset) { @@ -1396,6 +922,7 @@ stats_auth_proc_info(char *buffer, char } if (pos > offset+length) goto out; +#endif /* protect device list */ rsbac_read_lock(&device_list_head.lock, &dflags); @@ -1405,31 +932,55 @@ stats_auth_proc_info(char *buffer, char /* reset counters */ cap_set_count = 0; member_count = 0; - /* protect file cap set list */ - rsbac_read_lock(&device_p->list_head.lock, &flags); - f_cap_set_item_p = device_p->list_head.head; - while (f_cap_set_item_p) - { - cap_set_count++; - member_count += f_cap_set_item_p->sublist_length; - f_cap_set_item_p = f_cap_set_item_p->next; - }; - /* unprotect the list */ - rsbac_read_unlock(&device_p->list_head.lock, &flags); - if(device_p->list_head.dirty) - { - len += sprintf(buffer + len, "device %02u:%02u has %u file cap set items, sum of %u members, list is dirty\n", - MAJOR(device_p->id), - MINOR(device_p->id), - cap_set_count,member_count); + for(i=0 ; i < RSBAC_AUTH_NR_CAP_FD_LISTS; i++) + { + cap_set_count += rsbac_list_lol_count(device_p->handles[i]); + member_count += rsbac_list_lol_all_subcount(device_p->handles[i]); } - else + len += sprintf(buffer + len, "device %02u:%02u has %u file cap set items, sum of %u members\n", + MAJOR(device_p->id), + MINOR(device_p->id), + cap_set_count,member_count); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out_unlock; + +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + cap_set_count = 0; + member_count = 0; + for(i=0 ; i < RSBAC_AUTH_NR_CAP_EFF_FD_LISTS; i++) + { + cap_set_count += rsbac_list_lol_count(device_p->eff_handles[i]); + member_count += rsbac_list_lol_all_subcount(device_p->eff_handles[i]); + } + len += sprintf(buffer + len, "device %02u:%02u has %u file eff cap set items, sum of %u members\n", + MAJOR(device_p->id), + MINOR(device_p->id), + cap_set_count,member_count); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out_unlock; + cap_set_count = 0; + member_count = 0; + for(i=0 ; i < RSBAC_AUTH_NR_CAP_FS_FD_LISTS; i++) { - len += sprintf(buffer + len, "device %02u:%02u has %u file cap set items, sum of %u members, list is clean\n", - MAJOR(device_p->id), - MINOR(device_p->id), - cap_set_count,member_count); + cap_set_count += rsbac_list_lol_count(device_p->fs_handles[i]); + member_count += rsbac_list_lol_all_subcount(device_p->fs_handles[i]); } + len += sprintf(buffer + len, "device %02u:%02u has %u file fs cap set items, sum of %u members\n", + MAJOR(device_p->id), + MINOR(device_p->id), + cap_set_count,member_count); pos = begin + len; if (pos < offset) { @@ -1438,6 +989,8 @@ stats_auth_proc_info(char *buffer, char } if (pos > offset+length) goto out_unlock; +#endif + device_p = device_p->next; } out_unlock: @@ -1465,13 +1018,15 @@ auth_caplist_proc_info(char *buffer, cha off_t pos = 0; off_t begin = 0; - struct rsbac_auth_p_cap_set_list_item_t * p_cap_set_item_p; - struct rsbac_auth_f_cap_set_list_item_t * f_cap_set_item_p; - struct rsbac_auth_cap_set_sublist_item_t * cap_set_sub_item_p; u_int count = 0; u_int member_count = 0; - u_long flags,dflags; + u_long all_member_count; + u_long dflags; + int i,j,list; struct rsbac_auth_device_list_item_t * device_p; + rsbac_pid_t * p_list; + rsbac_inode_nr_t * f_list; + struct rsbac_auth_cap_range_t * cap_list; union rsbac_target_id_t rsbac_target_id; union rsbac_attribute_value_t rsbac_attribute_value; @@ -1500,9 +1055,6 @@ auth_caplist_proc_info(char *buffer, cha len += sprintf(buffer, "AUTH Cap Lists\n--------------\n"); /* protect process cap set list */ - rsbac_read_lock(&p_cap_set_list_head.lock, &flags); - p_cap_set_item_p = p_cap_set_list_head.head; - member_count = 0; len += sprintf(buffer + len, "Process capabilities:\nset-id count cap-members"); pos = begin + len; if (pos < offset) @@ -1513,31 +1065,196 @@ auth_caplist_proc_info(char *buffer, cha if (pos > offset+length) goto out; - while (p_cap_set_item_p) + all_member_count = 0; + count = rsbac_list_lol_get_all_desc(process_handle, + (void **) &p_list); + if(count > 0) + { + for(i=0; i 0) + { + for(j=0; j offset+length) + { + vfree(cap_list); + vfree(p_list); + goto out; + } + } + vfree(cap_list); + all_member_count += member_count; + } + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + { + vfree(p_list); + goto out; + } + } + vfree(p_list); + } + len += sprintf(buffer + len, "\n%u process cap set items, sum of %lu members\n", + count,all_member_count); + pos = begin + len; + if (pos < offset) { - count++; - len += sprintf(buffer + len, "\n %u\t%u\t", - p_cap_set_item_p->id, - p_cap_set_item_p->sublist_length); - pos = begin + len; - if (pos < offset) - { - len = 0; - begin = pos; + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; + +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + len += sprintf(buffer + len, "\nProcess eff capabilities:\nset-id count cap-members"); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; + + all_member_count = 0; + count = rsbac_list_lol_get_all_desc(process_eff_handle, + (void **) &p_list); + if(count > 0) + { + for(i=0; i 0) + { + for(j=0; j offset+length) + { + vfree(cap_list); + vfree(p_list); + goto out; + } + } + vfree(cap_list); + all_member_count += member_count; + } + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + { + vfree(p_list); + goto out; + } } - if (pos > offset+length) - goto out; - cap_set_sub_item_p = p_cap_set_item_p->sublist_head; - while (cap_set_sub_item_p) - { - member_count++; - if(cap_set_sub_item_p->first_id != cap_set_sub_item_p->last_id) - len += sprintf(buffer + len, "%u:%u ", - cap_set_sub_item_p->first_id, - cap_set_sub_item_p->last_id); - else - len += sprintf(buffer + len, "%u ", - cap_set_sub_item_p->first_id); + vfree(p_list); + } + len += sprintf(buffer + len, "\n%u process eff cap set items, sum of %lu members\n", + count,all_member_count); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; + len += sprintf(buffer + len, "\nProcess fs capabilities:\nset-id count cap-members"); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; + + all_member_count = 0; + count = rsbac_list_lol_get_all_desc(process_fs_handle, + (void **) &p_list); + if(count > 0) + { + for(i=0; i 0) + { + for(j=0; j offset+length) + { + vfree(cap_list); + vfree(p_list); + goto out; + } + } + vfree(cap_list); + all_member_count += member_count; + } pos = begin + len; if (pos < offset) { @@ -1545,15 +1262,15 @@ auth_caplist_proc_info(char *buffer, cha begin = pos; } if (pos > offset+length) - goto out; - cap_set_sub_item_p = cap_set_sub_item_p->next; - }; - p_cap_set_item_p = p_cap_set_item_p->next; - }; - /* unprotect the list */ - rsbac_read_unlock(&p_cap_set_list_head.lock, &flags); - len += sprintf(buffer + len, "\n\n%u process cap set items, sum of %u members\n", - count,member_count); + { + vfree(p_list); + goto out; + } + } + vfree(p_list); + } + len += sprintf(buffer + len, "\n\n%u process fs cap set items, sum of %lu members\n", + count,all_member_count); pos = begin + len; if (pos < offset) { @@ -1562,6 +1279,7 @@ auth_caplist_proc_info(char *buffer, cha } if (pos > offset+length) goto out; +#endif len += sprintf(buffer + len, "\nFile capabilities:\nset-id count cap-members"); pos = begin + len; @@ -1579,64 +1297,207 @@ auth_caplist_proc_info(char *buffer, cha while(device_p) { /* reset counters */ - count = 0; - member_count = 0; - /* protect file cap set list */ - rsbac_read_lock(&device_p->list_head.lock, &flags); - f_cap_set_item_p = device_p->list_head.head; - while (f_cap_set_item_p) - { - count++; - len += sprintf(buffer + len, "\n%u\t%u\t", - f_cap_set_item_p->id, - f_cap_set_item_p->sublist_length); - pos = begin + len; - if (pos < offset) + all_member_count = 0; + for(list=0 ; list < RSBAC_AUTH_NR_CAP_FD_LISTS; list++) + { + count = rsbac_list_lol_get_all_desc(device_p->handles[list], + (void **) &f_list); + if(count > 0) { - len = 0; - begin = pos; + for(i=0; ihandles[list], + &f_list[i], + (void **) &cap_list); + len += sprintf(buffer + len, "\n %u\t%u\t", + f_list[i], + member_count); + if(member_count > 0) + { + for(j=0; j offset+length) + { + vfree(cap_list); + vfree(f_list); + goto out; + } + } + vfree(cap_list); + all_member_count += member_count; + } + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + { + vfree(f_list); + goto out; + } + } + vfree(f_list); } - if (pos > offset+length) - goto out_funlock; - cap_set_sub_item_p = f_cap_set_item_p->sublist_head; - while (cap_set_sub_item_p) - { - member_count++; - if(cap_set_sub_item_p->first_id != cap_set_sub_item_p->last_id) - len += sprintf(buffer + len, "%u:%u ", - cap_set_sub_item_p->first_id, - cap_set_sub_item_p->last_id); - else - len += sprintf(buffer + len, "%u ", - cap_set_sub_item_p->first_id); - pos = begin + len; - if (pos < offset) + } + len += sprintf(buffer + len, "\ndevice %02u:%02u has %u file cap set items, sum of %lu members, list is clean\n", + MAJOR(device_p->id), + MINOR(device_p->id), + count, all_member_count); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out_unlock; +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + all_member_count = 0; + for(list=0 ; list < RSBAC_AUTH_NR_CAP_EFF_FD_LISTS; list++) + { + count = rsbac_list_lol_get_all_desc(device_p->eff_handles[list], + (void **) &f_list); + if(count > 0) + { + for(i=0; ieff_handles[list], + &f_list[i], + (void **) &cap_list); + len += sprintf(buffer + len, "\n %u\t%u\t", + f_list[i], + member_count); + if(member_count > 0) + { + for(j=0; j offset+length) + { + vfree(cap_list); + vfree(f_list); + goto out; + } + } + vfree(cap_list); + all_member_count += member_count; + } + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + { + vfree(f_list); + goto out; + } } - if (pos > offset+length) - goto out_funlock; - cap_set_sub_item_p = cap_set_sub_item_p->next; - }; - f_cap_set_item_p = f_cap_set_item_p->next; - }; - /* unprotect the list */ - rsbac_read_unlock(&device_p->list_head.lock, &flags); - if(device_p->list_head.dirty) - { - len += sprintf(buffer + len, "\ndevice %02u:%02u has %u file cap set items, sum of %u members, list is dirty\n", - MAJOR(device_p->id), - MINOR(device_p->id), - count,member_count); + vfree(f_list); + } } - else + len += sprintf(buffer + len, "\ndevice %02u:%02u has %u file eff cap set items, sum of %lu members, list is clean\n", + MAJOR(device_p->id), + MINOR(device_p->id), + count, all_member_count); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out_unlock; + + all_member_count = 0; + for(list=0 ; list < RSBAC_AUTH_NR_CAP_FS_FD_LISTS; list++) { - len += sprintf(buffer + len, "\ndevice %02u:%02u has %u file cap set items, sum of %u members, list is clean\n", - MAJOR(device_p->id), - MINOR(device_p->id), - count,member_count); + count = rsbac_list_lol_get_all_desc(device_p->fs_handles[list], + (void **) &f_list); + if(count > 0) + { + for(i=0; ifs_handles[list], + &f_list[i], + (void **) &cap_list); + len += sprintf(buffer + len, "\n %u\t%u\t", + f_list[i], + member_count); + if(member_count > 0) + { + for(j=0; j offset+length) + { + vfree(cap_list); + vfree(f_list); + goto out; + } + } + vfree(cap_list); + all_member_count += member_count; + } + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + { + vfree(f_list); + goto out; + } + } + vfree(f_list); + } } + len += sprintf(buffer + len, "\ndevice %02u:%02u has %u file fs cap set items, sum of %lu members, list is clean\n", + MAJOR(device_p->id), + MINOR(device_p->id), + count, all_member_count); pos = begin + len; if (pos < offset) { @@ -1645,6 +1506,7 @@ auth_caplist_proc_info(char *buffer, cha } if (pos > offset+length) goto out_unlock; +#endif device_p = device_p->next; } out_unlock: @@ -1658,11 +1520,6 @@ out: if (len > length) len = length; return len; - -out_funlock: - /* unprotect the list */ - rsbac_read_unlock(&device_p->list_head.lock, &flags); - goto out_unlock; } #endif /* CONFIG_PROC_FS && CONFIG_RSBAC_PROC */ @@ -1681,12 +1538,17 @@ out_funlock: /* Because there can be no access to aci data structures before init, */ /* rsbac_init_auth() will initialize all rw-spinlocks to unlocked. */ +#ifdef CONFIG_RSBAC_INIT_DELAY +int rsbac_init_auth(void) +#else int __init rsbac_init_auth(void) +#endif { int err = 0; struct rsbac_auth_device_list_item_t * device_p = NULL; u_long dflags; struct proc_dir_entry * tmp_entry_p; + struct rsbac_list_lol_info_t lol_info; if (rsbac_is_initialized()) { @@ -1694,15 +1556,106 @@ int __init rsbac_init_auth(void) return(-RSBAC_EREINIT); } - /* set rw-spinlocks to unlocked status and init data structures */ - printk(KERN_INFO "rsbac_init_auth(): Initializing RSBAC: AUTH subsystem\n"); - - /* Init lists */ - p_cap_set_list_head.lock = RW_LOCK_UNLOCKED; - p_cap_set_list_head.head = NULL; - p_cap_set_list_head.tail = NULL; - p_cap_set_list_head.curr = NULL; - p_cap_set_list_head.count = 0; + /* set rw-spinlocks to unlocked status and init data structures */ + printk(KERN_INFO "rsbac_init_auth(): Initializing RSBAC: AUTH subsystem\n"); + + lol_info.version = RSBAC_AUTH_P_LIST_VERSION; + lol_info.key = RSBAC_AUTH_LIST_KEY; + lol_info.desc_size = sizeof(rsbac_pid_t); + lol_info.data_size = 0; + lol_info.subdesc_size = sizeof(struct rsbac_auth_cap_range_t); + lol_info.subdata_size = 0; + lol_info.max_age = 0; + err = rsbac_list_lol_register(RSBAC_LIST_VERSION, + &process_handle, + lol_info, + RSBAC_LIST_DEF_DATA, + NULL, + cap_compare, + NULL, + NULL, + NULL, + NULL, + RSBAC_AUTH_P_LIST_NAME, + 0); + if(err) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + printk(KERN_WARNING + "rsbac_init_auth(): Registering AUTH process cap list failed with error %s\n", + get_error_name(tmp, err)); + rsbac_kfree(tmp); + } + } +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + lol_info.version = RSBAC_AUTH_P_LIST_VERSION; + lol_info.key = RSBAC_AUTH_LIST_KEY; + lol_info.desc_size = sizeof(rsbac_pid_t); + lol_info.data_size = 0; + lol_info.subdesc_size = sizeof(struct rsbac_auth_cap_range_t); + lol_info.subdata_size = 0; + lol_info.max_age = 0; + err = rsbac_list_lol_register(RSBAC_LIST_VERSION, + &process_eff_handle, + lol_info, + RSBAC_LIST_DEF_DATA, + NULL, + cap_compare, + NULL, + NULL, + NULL, + NULL, + RSBAC_AUTH_P_EFF_LIST_NAME, + 0); + if(err) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + printk(KERN_WARNING + "rsbac_init_auth(): Registering AUTH process eff cap list failed with error %s\n", + get_error_name(tmp, err)); + rsbac_kfree(tmp); + } + } + lol_info.version = RSBAC_AUTH_P_LIST_VERSION; + lol_info.key = RSBAC_AUTH_LIST_KEY; + lol_info.desc_size = sizeof(rsbac_pid_t); + lol_info.data_size = 0; + lol_info.subdesc_size = sizeof(struct rsbac_auth_cap_range_t); + lol_info.subdata_size = 0; + lol_info.max_age = 0; + err = rsbac_list_lol_register(RSBAC_LIST_VERSION, + &process_fs_handle, + lol_info, + RSBAC_LIST_DEF_DATA, + NULL, + cap_compare, + NULL, + NULL, + NULL, + NULL, + RSBAC_AUTH_P_FS_LIST_NAME, + 0); + if(err) + { + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + + if(tmp) + { + printk(KERN_WARNING + "rsbac_init_auth(): Registering AUTH process fs cap list failed with error %s\n", + get_error_name(tmp, err)); + rsbac_kfree(tmp); + } + } +#endif + + /* Init FD lists */ device_list_head.lock = RW_LOCK_UNLOCKED; device_list_head.head = NULL; device_list_head.tail = NULL; @@ -1710,35 +1663,23 @@ int __init rsbac_init_auth(void) device_list_head.count = 0; /* read all data */ -#ifdef CONFIG_RSBAC_DEBUG - if (rsbac_debug_ds_auth) - printk(KERN_DEBUG "rsbac_init_auth(): Reading data\n"); -#endif +//#ifdef CONFIG_RSBAC_DEBUG +// if (rsbac_debug_ds_auth) + printk(KERN_INFO "rsbac_init_auth(): Registering FD lists\n"); +//#endif device_p = create_device_item(rsbac_root_dev); if (!device_p) { printk(KERN_CRIT "rsbac_init_auth(): Could not add device!\n"); return(-RSBAC_ECOULDNOTADDDEVICE); } - if((err = read_f_cap_set_list(device_p,rsbac_root_dev))) + if((err = auth_register_fd_lists(device_p,rsbac_root_dev))) { - if(err == -RSBAC_ENOTFOUND) - printk(KERN_WARNING - "rsbac_init_auth(): No file capabilities on dev %02u:%02u.\n", - MAJOR(rsbac_root_dev), MINOR(rsbac_root_dev)); - else - { - char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + char tmp[RSBAC_MAXNAMELEN]; - if(tmp) - { - printk(KERN_WARNING - "rsbac_init_auth(): File capabilities not fully read from dev %02u:%02u, error %s!\n", - MAJOR(rsbac_root_dev), MINOR(rsbac_root_dev), - get_error_name(tmp, err)); - rsbac_kfree(tmp); - } - } + printk(KERN_WARNING + "rsbac_init_auth(): File/Dir ACL registration failed for dev %02u:%02u, err %s!\n", + MAJOR(rsbac_root_dev), MINOR(rsbac_root_dev), get_error_name(tmp,err)); } /* wait for write access to device_list_head */ rsbac_write_lock_irq(&device_list_head.lock, &dflags); @@ -1823,37 +1764,14 @@ int rsbac_mount_auth(kdev_t kdev) if(!new_device_p) return -RSBAC_ECOULDNOTADDDEVICE; - /* There can be no /rsbac dir on non-device mounts (MAJOR 0), so do not even try */ - if(MAJOR(kdev)) + /* register lists */ + if((err = auth_register_fd_lists(new_device_p, kdev))) { - /* block writing */ - down(&rsbac_write_sem); - /* read attributes */ - err = read_f_cap_set_list(new_device_p, kdev); - #ifdef CONFIG_RSBAC_DEBUG - if(err && rsbac_debug_ds_auth) - { - if(err == -RSBAC_ENOTFOUND) - printk(KERN_WARNING - "rsbac_init_auth(): No file capabilities on dev %02u:%02u.\n", - MAJOR(kdev), MINOR(kdev)); - else - { - char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + char tmp[RSBAC_MAXNAMELEN]; - if(tmp) - { - printk(KERN_WARNING - "rsbac_init_auth(): File capabilities not fully read from dev %02u:%02u, error %s!\n", - MAJOR(kdev), MINOR(kdev), - get_error_name(tmp, err)); - rsbac_kfree(tmp); - } - } - } - #endif - /* allow writing */ - up(&rsbac_write_sem); + printk(KERN_WARNING + "rsbac_mount_auth(): File/Dir ACL registration failed for dev %02u:%02u, err %s!\n", + MAJOR(kdev), MINOR(kdev), get_error_name(tmp,err)); } /* wait for read access to device_list_head */ @@ -1943,12 +1861,11 @@ int rsbac_umount_auth(kdev_t kdev) int rsbac_stats_auth(void) { - struct rsbac_auth_p_cap_set_list_item_t * p_cap_set_item_p; - struct rsbac_auth_f_cap_set_list_item_t * f_cap_set_item_p; u_int cap_set_count = 0; u_int member_count = 0; - u_long flags,dflags; - struct rsbac_auth_device_list_item_t * device_p; + u_long dflags; + struct rsbac_auth_device_list_item_t * device_p; + int i; union rsbac_target_id_t rsbac_target_id; union rsbac_attribute_value_t rsbac_attribute_value; @@ -1976,20 +1893,9 @@ int rsbac_stats_auth(void) printk(KERN_INFO "AUTH Status\n-----------\n"); - /* protect process cap set list */ - rsbac_read_lock(&p_cap_set_list_head.lock, &flags); - p_cap_set_item_p = p_cap_set_list_head.head; - member_count = 0; - while (p_cap_set_item_p) - { - cap_set_count++; - member_count += p_cap_set_item_p->sublist_length; - p_cap_set_item_p = p_cap_set_item_p->next; - }; - /* unprotect the list */ - rsbac_read_unlock(&p_cap_set_list_head.lock, &flags); - printk(KERN_INFO "%u process cap set items, sum of %u members\n", - cap_set_count,member_count); + printk(KERN_INFO "%lu process cap set items, sum of %lu members\n", + rsbac_list_lol_count(process_handle), + rsbac_list_lol_all_subcount(process_handle)); /* protect device list */ rsbac_read_lock(&device_list_head.lock, &dflags); @@ -1999,17 +1905,11 @@ int rsbac_stats_auth(void) /* reset counters */ cap_set_count = 0; member_count = 0; - /* protect file cap set list */ - rsbac_read_lock(&device_p->list_head.lock, &flags); - f_cap_set_item_p = device_p->list_head.head; - while (f_cap_set_item_p) - { - cap_set_count++; - member_count += f_cap_set_item_p->sublist_length; - f_cap_set_item_p = f_cap_set_item_p->next; - }; - /* unprotect the list */ - rsbac_read_unlock(&device_p->list_head.lock, &flags); + for(i=0 ; i < RSBAC_AUTH_NR_CAP_FD_LISTS; i++) + { + cap_set_count += rsbac_list_lol_count(device_p->handles[i]); + member_count += rsbac_list_lol_all_subcount(device_p->handles[i]); + } printk(KERN_INFO "device %02u:%02u has %u file cap set items, sum of %u members\n", MAJOR(device_p->id), MINOR(device_p->id), @@ -2027,17 +1927,14 @@ int rsbac_stats_auth(void) int rsbac_check_auth(int correct, int check_inode) { struct rsbac_auth_device_list_item_t * device_p; - boolean no_write; u_long f_count = 0, f_sum = 0, tmp_count, r_count, u_count, b_count, no_member_count; - u_int f_dirty = 0, dirty = 0; - u_long flags, dflags; + long desc_count; + u_int i,list_no; + u_long dflags; struct super_block * sb_p; struct inode * inode_p; - struct rsbac_auth_f_cap_set_list_item_t * f_item_p, * f_item_tmp_p; - struct rsbac_auth_p_cap_set_list_item_t * p_item_p, * p_item_tmp_p; - struct task_struct * task_p; - struct rsbac_auth_f_cap_set_list_head_t * head_p; + rsbac_inode_nr_t * fd_desc_p; if (!rsbac_is_initialized()) { @@ -2045,51 +1942,6 @@ int rsbac_check_auth(int correct, int ch return(-RSBAC_ENOTINITIALIZED); } - /* process list */ - rsbac_write_lock(&p_cap_set_list_head.lock, &flags); - tmp_count = 0; - p_item_p = p_cap_set_list_head.head; - while(p_item_p) - { - read_lock(&tasklist_lock); - task_p = find_task_by_pid(p_item_p->id); - read_unlock(&tasklist_lock); - if(!task_p) - { - p_item_tmp_p = p_item_p; - p_item_p = p_item_p->next; - if(correct) - { - printk(KERN_WARNING - "rsbac_check_auth(): process item has invalid id %u, removed!\n", - p_item_tmp_p->id); - remove_p_cap_set_item(p_item_tmp_p->id); - continue; - } - else - { - printk(KERN_WARNING - "rsbac_check_auth(): process item has invalid id %u!\n", - p_item_tmp_p->id); - } - } - else - { - p_item_p = p_item_p->next; - } - tmp_count++; - } - if(tmp_count != p_cap_set_list_head.count) - { - printk(KERN_WARNING - "rsbac_check_auth(): correcting count mismatch for process list - was %u, counted %lu!\n", - p_cap_set_list_head.count, tmp_count); - p_cap_set_list_head.count = tmp_count; - } - rsbac_write_unlock(&p_cap_set_list_head.lock, &flags); - printk(KERN_INFO "rsbac_check_auth(): %u process-items\n", - p_cap_set_list_head.count); - /* wait for read access to device_list_head */ rsbac_read_lock(&device_list_head.lock, &dflags); /* OK, go on */ @@ -2103,10 +1955,6 @@ int rsbac_check_auth(int correct, int ch u_count = 0; b_count = 0; no_member_count = 0; - f_dirty = 0; - /* let no new write start on device */ - no_write = device_p->no_write; - device_p->no_write = TRUE; if(check_inode) { sb_p = rsbac_get_super_block(device_p->id); @@ -2131,240 +1979,129 @@ int rsbac_check_auth(int correct, int ch sb_p = NULL; /* OK, go ahead */ - tmp_count = 0; - head_p = &device_p->list_head; - rsbac_write_lock(&head_p->lock, &flags); - f_item_p = head_p->head; - while(f_item_p) - { - /* check for inode on disk (but not for reiserfs, because of 64bit inode numbers) */ - #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) - if(sb_p) - #else - if(sb_p && !sb_p->s_op->read_inode2) - #endif - { - inode_p = iget(sb_p, f_item_p->id); - if(is_bad_inode(inode_p)) - { /* inode is bad -> remove */ - b_count++; - if(correct) - { - printk(KERN_INFO - "rsbac_check_auth(): f_item for bad inode %u on device %02u:%02u, removing!\n", - f_item_p->id, MAJOR(device_p->id), MINOR(device_p->id)); - f_item_tmp_p = f_item_p; - f_item_p = f_item_p->next; - head_p->curr = f_item_tmp_p; - remove_f_cap_set_item(device_p, f_item_tmp_p->id); - head_p->dirty = TRUE; - continue; - } - else - { - printk(KERN_INFO - "rsbac_check_auth(): f_item for bad inode %u on device %02u:%02u!\n", - f_item_p->id, MAJOR(device_p->id), MINOR(device_p->id)); - } - } /* end of bad_inode */ - else - { /* good inode */ - /* currently only deletion checking of ext2 inodes is possible */ - if(sb_p->s_magic == EXT2_SUPER_MAGIC) + for(list_no = 0; list_no < RSBAC_AUTH_NR_CAP_FD_LISTS; list_no++) + { +/* printk(KERN_INFO "rsbac_check_auth(): list %u\n", + list_no); */ + tmp_count = 0; + desc_count = rsbac_list_lol_get_all_desc(device_p->handles[list_no], (void **) &fd_desc_p); + if(desc_count > 0) + { + for(i=0; is_op->read_inode2) + #endif { - if(inode_p->u.ext2_i.i_dtime) - { /* inode has been deleted -> remove */ - r_count++; + inode_p = iget(sb_p, fd_desc_p[i]); + if(is_bad_inode(inode_p)) + { /* inode is bad -> remove */ + b_count++; if(correct) { printk(KERN_INFO - "rsbac_check_auth(): f_item for deleted inode %u on device %02u:%02u, removing!\n", - f_item_p->id, MAJOR(device_p->id), MINOR(device_p->id)); - f_item_tmp_p = f_item_p; - f_item_p = f_item_p->next; - head_p->curr = f_item_tmp_p; - remove_f_cap_set_item(device_p, f_item_tmp_p->id); - head_p->dirty = TRUE; + "rsbac_check_auth(): fd_item for bad inode %u on device %02u:%02u, list %u, removing!\n", + fd_desc_p[i], MAJOR(device_p->id), MINOR(device_p->id), list_no); + rsbac_list_lol_remove(device_p->handles[list_no], &fd_desc_p[i]); continue; } else { printk(KERN_INFO - "rsbac_check_auth(): f_item for deleted inode %u on device %02u:%02u!\n", - f_item_p->id, MAJOR(device_p->id), MINOR(device_p->id)); + "rsbac_check_auth(): fd_item for bad inode %u on device %02u:%02u, list %u!\n", + fd_desc_p[i], MAJOR(device_p->id), MINOR(device_p->id), list_no); } - } + } /* end of bad_inode */ else - { - if(inode_p->i_nlink <= 0) - { /* inode has been unlinked, but no dtime is set -> warn */ - u_count++; - if(correct >= 2) - { - printk(KERN_INFO - "rsbac_check_auth(): f_item for inode %u with nlink <= 0 on device %02u:%02u, removing!\n", - f_item_p->id, MAJOR(device_p->id), MINOR(device_p->id)); - f_item_tmp_p = f_item_p; - f_item_p = f_item_p->next; - head_p->curr = f_item_tmp_p; - remove_f_cap_set_item(device_p, f_item_tmp_p->id); - head_p->dirty = TRUE; - continue; + { /* good inode */ + /* currently only deletion checking of ext2 inodes is possible */ + if(sb_p->s_magic == EXT2_SUPER_MAGIC) + { + if(inode_p->u.ext2_i.i_dtime) + { /* inode has been deleted -> remove */ + r_count++; + if(correct) + { + printk(KERN_INFO + "rsbac_check_auth(): fd_item for deleted inode %u on device %02u:%02u, list %u, removing!\n", + fd_desc_p[i], MAJOR(device_p->id), MINOR(device_p->id), list_no); + rsbac_list_lol_remove(device_p->handles[list_no], &fd_desc_p[i]); + continue; + } + else + { + printk(KERN_INFO + "rsbac_check_auth(): fd_item for deleted inode %u on device %02u:%02u, list %u!\n", + fd_desc_p[i], MAJOR(device_p->id), MINOR(device_p->id), list_no); + } } else { - printk(KERN_INFO - "rsbac_check_auth(): deleted inode %u on device %02u:%02u, has no dtime!\n", - f_item_p->id, MAJOR(device_p->id), MINOR(device_p->id)); + if(inode_p->i_nlink <= 0) + { /* inode has been unlinked, but no dtime is set -> warn */ + u_count++; + if(correct >= 2) + { + printk(KERN_INFO + "rsbac_check_auth(): fd_item for inode %u with nlink <= 0 on device %02u:%02u, list %u, removing!\n", + fd_desc_p[i], MAJOR(device_p->id), MINOR(device_p->id), list_no); + rsbac_list_lol_remove(device_p->handles[list_no], &fd_desc_p[i]); + continue; + } + else + { + printk(KERN_INFO + "rsbac_check_auth(): deleted inode %u on device %02u:%02u, list %u, has no dtime!\n", + fd_desc_p[i], MAJOR(device_p->id), MINOR(device_p->id), list_no); + } + } } } - } - } - } /* end of is_good_inode */ - iput(inode_p); - } /* end of sb_p */ - - /* if all values for this item (apart from id) are default values, - remove it */ - if(!f_item_p->sublist_length) - { - no_member_count++; - if(correct) - { -#ifdef CONFIG_RSBAC_DEBUG - if(rsbac_debug_ds) - printk(KERN_DEBUG - "rsbac_check_auth(): f_item for inode %u on device %02u:%02u, has no set members, removing!\n", - f_item_p->id, MAJOR(device_p->id), MINOR(device_p->id)); -#endif - f_item_tmp_p = f_item_p; - f_item_p = f_item_p->next; - remove_f_cap_set_item(device_p, f_item_tmp_p->id); - head_p->dirty = TRUE; - continue; + } /* end of is_good_inode */ + iput(inode_p); + } /* end of sb_p */ } -#ifdef CONFIG_RSBAC_DEBUG - else - { - if(rsbac_debug_ds) - printk(KERN_DEBUG - "rsbac_check_auth(): f_item for inode %u on device %02u:%02u, has no set members!\n", - f_item_p->id, MAJOR(device_p->id), MINOR(device_p->id)); - } -#endif + tmp_count++; + vfree(fd_desc_p); + f_count += desc_count; } - tmp_count++; - f_item_p = f_item_p->next; - } - if(tmp_count != head_p->count) - { - printk(KERN_WARNING - "rsbac_check_auth(): correcting count mismatch for device %02u:%02u - was %u, counted %lu!\n", - MAJOR(device_p->id), MINOR(device_p->id), head_p->count, tmp_count); - head_p->count = tmp_count; - } - if(head_p->dirty) - f_dirty++; - rsbac_write_unlock(&head_p->lock, &flags); - f_count += head_p->count; + } /* end of for-fd-list-array */ + switch(correct) { case 2: printk(KERN_INFO - "rsbac_check_auth(): Device %02u:%02u has %lu fd-items (%lu removed (%lu bad inodes, %lu dtimed inodes, %lu unlinked inodes, %lu had no members)) and %u dirty lists\n", + "rsbac_check_auth(): Device %02u:%02u has %lu file/dir AUTHs (%lu removed (%lu bad inodes, %lu dtimed inodes, %lu unlinked inodes, %lu had no members and default mask))\n", MAJOR(device_p->id), MINOR(device_p->id), f_count, b_count + r_count + u_count + no_member_count, - b_count, r_count, u_count, no_member_count, f_dirty); + b_count, r_count, u_count, no_member_count); break; case 1: printk(KERN_INFO - "rsbac_check_auth(): Device %02u:%02u has %lu fd-items (%lu removed (%lu bad inodes, %lu dtimed inodes, %lu had no members), %lu unlinked inodes) and %u dirty lists\n", + "rsbac_check_auth(): Device %02u:%02u has %lu file/dir AUTHs (%lu removed (%lu bad inodes, %lu dtimed inodes, %lu had no members and default mask), %lu unlinked inodes)\n", MAJOR(device_p->id), MINOR(device_p->id), f_count, b_count + r_count + no_member_count, - b_count, r_count, no_member_count, u_count, f_dirty); + b_count, r_count, no_member_count, u_count); break; default: printk(KERN_INFO - "rsbac_check_auth(): Device %02u:%02u has %lu fd-items (%lu with bad inodes, %lu with dtimed inodes, %lu unlinked inodes, %lu without members) and %u dirty lists\n", + "rsbac_check_auth(): Device %02u:%02u has %lu file/dir AUTHs (%lu with bad inodes, %lu with dtimed inodes, %lu unlinked inodes, %lu without members and with default mask)\n", MAJOR(device_p->id), MINOR(device_p->id), f_count, - b_count, r_count, u_count, no_member_count, f_dirty); + b_count, r_count, u_count, no_member_count); } f_sum += f_count; - dirty += f_dirty; - /* set no_write back to old state */ - device_p->no_write = no_write; /* go on */ device_p = device_p->next; - }; - printk(KERN_INFO "rsbac_check_auth(): Sum of %u Devices with %lu fd-items\n", + } + printk(KERN_INFO "rsbac_check_auth(): Sum of %u Devices with %lu file/dir AUTHs\n", device_list_head.count, f_sum); /* free access to device_list_head */ rsbac_read_unlock(&device_list_head.lock, &dflags); printk(KERN_INFO - "rsbac_check_auth(): Total of %lu registered auth items, %u lists dirty\n", - f_sum + p_cap_set_list_head.count, - dirty); + "rsbac_check_auth(): Total of %lu registered auth items\n", + f_sum); return(0); }; -/***************************************************/ -/* rsbac_write_auth() to write all dirty lists to */ -/* disk, returns no. of lists written */ - -#if defined(CONFIG_RSBAC_AUTO_WRITE) -int rsbac_write_auth(boolean need_lock) - { - struct rsbac_auth_device_list_item_t * device_p; - int err=0; - u_int count = 0; - u_long dflags; - - if (!rsbac_is_initialized()) - { - printk(KERN_WARNING "rsbac_write_auth(): RSBAC not initialized\n"); - return(-RSBAC_ENOTINITIALIZED); - } - /* wait for read access to device_list_head */ - rsbac_read_lock(&device_list_head.lock, &dflags); - /* OK, go on */ -/* printk(KERN_INFO "rsbac_write_auth(): currently %u processes working on file/dir aci\n", - device_list_head.lock.lock); */ - device_p = device_list_head.head; - while (device_p) - { - if(device_p->list_head.dirty) - { - if(need_lock) - lock_kernel(); - device_p->list_head.dirty = FALSE; - err=write_f_cap_set_list(device_p); - if(need_lock) - unlock_kernel(); - if(!err) - { - count++; - } - else - { - device_p->list_head.dirty = TRUE; - if(err != -RSBAC_ENOTWRITABLE) - printk(KERN_WARNING - "rsbac_write_auth(): write_f_cap_set_list() returned error %i for device %02u:%02u\n", - err, - MAJOR(device_p->id), - MINOR(device_p->id)); - } - } - device_p = device_p->next; - }; - /* free access to device_list_head */ - rsbac_read_unlock(&device_list_head.lock, &dflags); - -#ifdef CONFIG_RSBAC_DEBUG - if(rsbac_debug_write) - printk(KERN_DEBUG "rsbac_write_auth(): %u file cap lists written\n", - count); -#endif - return(count); - }; -#endif - /************************************************* */ /* Access functions */ /************************************************* */ @@ -2377,14 +2114,11 @@ int rsbac_write_auth(boolean need_lock) /* Add a set member to a set sublist. Set behaviour: also returns success, */ /* if member was already in set! */ -int rsbac_auth_add_to_p_capset(rsbac_auth_p_cap_set_id_t id, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid) +int rsbac_auth_add_to_p_capset(rsbac_pid_t pid, + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range, + rsbac_time_t ttl) { - int err=0; - struct rsbac_auth_p_cap_set_list_item_t * cap_set_p; - u_long flags; - if (!rsbac_is_initialized()) { printk(KERN_WARNING "rsbac_auth_add_to_p_capset(): RSBAC not initialized\n"); @@ -2394,38 +2128,30 @@ int rsbac_auth_add_to_p_capset(rsbac_aut { printk(KERN_WARNING "rsbac_auth_add_to_p_capset(): called from interrupt!\n"); } - if(first_uid > last_uid) + if(cap_range.first > cap_range.last) return(-RSBAC_EINVALIDVALUE); - - /* protect the list */ - rsbac_write_lock(&p_cap_set_list_head.lock, &flags); - /* check, whether set-id exists */ - cap_set_p = lookup_p_cap_set(id); - if ( !cap_set_p - && !((cap_set_p = add_p_cap_set_item(id))) - ) - { - err = -RSBAC_ECOULDNOTADDITEM; - goto out; - } - /* check for subitem, try to add subitem, if it does not exist, */ - /* and return error, if adding failed */ - if ( (!lookup_p_cap_set_subitem(cap_set_p,first_uid,last_uid)) - && (!add_p_cap_set_subitem(cap_set_p,first_uid,last_uid))) - err = -RSBAC_ECOULDNOTADDITEM; -out: - /* unprotect the list */ - rsbac_write_unlock(&p_cap_set_list_head.lock, &flags); - return(err); + switch(cap_type) + { + case ACT_real: + return rsbac_list_lol_subadd_ttl(process_handle, ttl, &pid, &cap_range, NULL); +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + case ACT_eff: + return rsbac_list_lol_subadd_ttl(process_eff_handle, ttl, &pid, &cap_range, NULL); + case ACT_fs: + return rsbac_list_lol_subadd_ttl(process_fs_handle, ttl, &pid, &cap_range, NULL); +#endif + default: + return -RSBAC_EINVALIDTARGET; + } } int rsbac_auth_add_to_f_capset(rsbac_auth_file_t file, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid) + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range, + rsbac_time_t ttl) { int err=0; - struct rsbac_auth_f_cap_set_list_item_t * cap_set_p; - u_long flags,dflags; + u_long dflags; struct rsbac_auth_device_list_item_t * device_p; if (!rsbac_is_initialized()) @@ -2437,7 +2163,7 @@ int rsbac_auth_add_to_f_capset(rsbac_aut { printk(KERN_WARNING "rsbac_auth_add_to_f_capset(): called from interrupt!\n"); } - if(first_uid > last_uid) + if(cap_range.first > cap_range.last) return(-RSBAC_EINVALIDVALUE); /* protect device list */ @@ -2445,34 +2171,40 @@ int rsbac_auth_add_to_f_capset(rsbac_aut device_p = lookup_device(file.device); if(!device_p) { - printk(KERN_WARNING "rsbac_auth_add_to_f_capset(): invalid device %02u:%02u!\n", - MAJOR(file.device),MINOR(file.device)); + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + rsbac_get_super_block(file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = lookup_device(file.device); + if(!device_p) + { + printk(KERN_WARNING "rsbac_auth_add_to_f_capset(): invalid device %02u:%02u!\n", + MAJOR(file.device),MINOR(file.device)); + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } - /* protect the list */ - rsbac_write_lock(&device_p->list_head.lock, &flags); - /* check, whether set-id exists, add, if not */ - cap_set_p = lookup_f_cap_set(device_p,file.inode); - if ( !cap_set_p - && !((cap_set_p = add_f_cap_set_item(device_p, file.inode))) - ) - { - err = -RSBAC_ECOULDNOTADDITEM; - goto out; - } - /* check for subitem, try to add subitem, if it does not exist, */ - /* and return error, if adding failed */ - if ( (!lookup_f_cap_set_subitem(cap_set_p, first_uid, last_uid)) - && (!add_f_cap_set_subitem(cap_set_p, first_uid, last_uid))) - err = -RSBAC_ECOULDNOTADDITEM; - else - { /* successful -> write to disk or mark dirty */ - device_p->list_head.dirty = TRUE; + + switch(cap_type) + { + case ACT_real: + err = rsbac_list_lol_subadd_ttl(device_p->handles[fd_hash(file.inode)], + ttl, &file.inode, &cap_range, NULL); + break; +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + case ACT_eff: + err = rsbac_list_lol_subadd_ttl(device_p->eff_handles[eff_fd_hash(file.inode)], + ttl, &file.inode, &cap_range, NULL); + break; + case ACT_fs: + err = rsbac_list_lol_subadd_ttl(device_p->fs_handles[fs_fd_hash(file.inode)], + ttl, &file.inode, &cap_range, NULL); + break; +#endif + default: + err = -RSBAC_EINVALIDTARGET; } -out: - /* unprotect the lists */ - rsbac_write_unlock(&device_p->list_head.lock, &flags); rsbac_read_unlock(&device_list_head.lock, &dflags); return(err); } @@ -2481,13 +2213,10 @@ out: /* Remove a set member from a sublist. Set behaviour: Returns no error, if */ /* member is not in list. */ -int rsbac_auth_remove_from_p_capset(rsbac_auth_p_cap_set_id_t id, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid) +int rsbac_auth_remove_from_p_capset(rsbac_pid_t pid, + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range) { - struct rsbac_auth_p_cap_set_list_item_t * cap_set_p; - u_long flags; - if (!rsbac_is_initialized()) { printk(KERN_WARNING "rsbac_auth_remove_from_p_capset(): RSBAC not initialized\n"); @@ -2497,32 +2226,29 @@ int rsbac_auth_remove_from_p_capset(rsba { printk(KERN_WARNING "rsbac_auth_remove_from_p_capset(): called from interrupt!\n"); } - if(first_uid > last_uid) + if(cap_range.first > cap_range.last) return(-RSBAC_EINVALIDVALUE); - - /* protect the list */ - rsbac_write_lock(&p_cap_set_list_head.lock, &flags); - /* check, whether set-id exists */ - cap_set_p = lookup_p_cap_set(id); - if (cap_set_p) - { - remove_p_cap_set_subitem(cap_set_p, first_uid, last_uid); - /* remove set, if empty */ - if(!cap_set_p->sublist_head) - remove_p_cap_set_item(id); + switch(cap_type) + { + case ACT_real: + return rsbac_list_lol_subremove(process_handle, &pid, &cap_range); +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + case ACT_eff: + return rsbac_list_lol_subremove(process_handle, &pid, &cap_range); + case ACT_fs: + return rsbac_list_lol_subremove(process_handle, &pid, &cap_range); +#endif + default: + return -RSBAC_EINVALIDTARGET; } - /* unprotect the lists */ - rsbac_write_unlock(&p_cap_set_list_head.lock, &flags); - return(0); } int rsbac_auth_remove_from_f_capset(rsbac_auth_file_t file, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid) + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range) { int err=0; - struct rsbac_auth_f_cap_set_list_item_t * cap_set_p; - u_long flags,dflags; + u_long dflags; struct rsbac_auth_device_list_item_t * device_p; if (!rsbac_is_initialized()) @@ -2534,7 +2260,7 @@ int rsbac_auth_remove_from_f_capset(rsba { printk(KERN_WARNING "rsbac_auth_remove_from_f_capset(): called from interrupt!\n"); } - if(first_uid > last_uid) + if(cap_range.first > cap_range.last) return(-RSBAC_EINVALIDVALUE); /* protect device list */ @@ -2542,26 +2268,36 @@ int rsbac_auth_remove_from_f_capset(rsba device_p = lookup_device(file.device); if(!device_p) { - printk(KERN_WARNING "rsbac_auth_remove_from_f_capset(): invalid device %02u:%02u!\n", - MAJOR(file.device),MINOR(file.device)); + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + rsbac_get_super_block(file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = lookup_device(file.device); + if(!device_p) + { + printk(KERN_WARNING "rsbac_auth_remove_from_f_capset(): invalid device %02u:%02u!\n", + MAJOR(file.device),MINOR(file.device)); + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } - /* protect the list */ - rsbac_write_lock(&device_p->list_head.lock, &flags); - /* check, whether set-id exists */ - cap_set_p = lookup_f_cap_set(device_p,file.inode); - if (cap_set_p) - { /* remove */ - remove_f_cap_set_subitem(cap_set_p, first_uid, last_uid); - /* remove set, if empty */ - if(!cap_set_p->sublist_head) - remove_f_cap_set_item(device_p, file.inode); - /* successful -> write to disk or mark dirty */ - device_p->list_head.dirty = TRUE; + switch(cap_type) + { + case ACT_real: + err = rsbac_list_lol_subremove(device_p->handles[fd_hash(file.inode)], &file.inode, &cap_range); + break; +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + case ACT_eff: + err = rsbac_list_lol_subremove(device_p->eff_handles[eff_fd_hash(file.inode)], &file.inode, &cap_range); + break; + case ACT_fs: + err = rsbac_list_lol_subremove(device_p->fs_handles[fs_fd_hash(file.inode)], &file.inode, &cap_range); + break; +#endif + default: + err = -RSBAC_EINVALIDTARGET; } - /* unprotect the lists */ - rsbac_write_unlock(&device_p->list_head.lock, &flags); rsbac_read_unlock(&device_list_head.lock, &dflags); return(err); } @@ -2570,12 +2306,9 @@ int rsbac_auth_remove_from_f_capset(rsba /* Remove all set members from a sublist. Set behaviour: Returns no error, */ /* if list is empty. */ -int rsbac_auth_clear_p_capset(rsbac_auth_p_cap_set_id_t id) +int rsbac_auth_clear_p_capset(rsbac_pid_t pid, + enum rsbac_auth_cap_type_t cap_type) { - int err=0; - struct rsbac_auth_p_cap_set_list_item_t * cap_set_p; - u_long flags; - if (!rsbac_is_initialized()) { printk(KERN_WARNING "rsbac_auth_clear_p_capset(): RSBAC not initialized\n"); @@ -2585,22 +2318,26 @@ int rsbac_auth_clear_p_capset(rsbac_auth { printk(KERN_WARNING "rsbac_auth_clear_p_capset(): called from interrupt!\n"); } - /* protect the list */ - rsbac_write_lock(&p_cap_set_list_head.lock, &flags); - /* check, whether set-id exists */ - cap_set_p = lookup_p_cap_set(id); - if (cap_set_p) - remove_all_p_cap_set_subitems(cap_set_p); - /* unprotect the list */ - rsbac_write_unlock(&p_cap_set_list_head.lock, &flags); - return(err); + switch(cap_type) + { + case ACT_real: + return rsbac_list_lol_remove(process_handle, &pid); +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + case ACT_eff: + return rsbac_list_lol_remove(process_handle, &pid); + case ACT_fs: + return rsbac_list_lol_remove(process_handle, &pid); +#endif + default: + return -RSBAC_EINVALIDTARGET; + } } -int rsbac_auth_clear_f_capset(rsbac_auth_file_t file) +int rsbac_auth_clear_f_capset(rsbac_auth_file_t file, + enum rsbac_auth_cap_type_t cap_type) { int err=0; - struct rsbac_auth_f_cap_set_list_item_t * cap_set_p; - u_long flags,dflags; + u_long dflags; struct rsbac_auth_device_list_item_t * device_p; if (!rsbac_is_initialized()) @@ -2617,23 +2354,36 @@ int rsbac_auth_clear_f_capset(rsbac_auth device_p = lookup_device(file.device); if(!device_p) { - printk(KERN_WARNING "rsbac_auth_clear_f_capset(): invalid device %02u:%02u!\n", - MAJOR(file.device),MINOR(file.device)); + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + rsbac_get_super_block(file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = lookup_device(file.device); + if(!device_p) + { + printk(KERN_WARNING "rsbac_auth_clear_f_capset(): invalid device %02u:%02u!\n", + MAJOR(file.device),MINOR(file.device)); + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } - /* protect the list */ - rsbac_write_lock(&device_p->list_head.lock, &flags); - /* check, whether set-id exists */ - cap_set_p = lookup_f_cap_set(device_p,file.inode); - if (cap_set_p) - { /* remove */ - remove_all_f_cap_set_subitems(cap_set_p); - /* successful -> write to disk or mark dirty */ - device_p->list_head.dirty = TRUE; + switch(cap_type) + { + case ACT_real: + err = rsbac_list_lol_remove(device_p->handles[fd_hash(file.inode)], &file.inode); + break; +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + case ACT_eff: + err = rsbac_list_lol_remove(device_p->eff_handles[eff_fd_hash(file.inode)], &file.inode); + break; + case ACT_fs: + err = rsbac_list_lol_remove(device_p->fs_handles[fs_fd_hash(file.inode)], &file.inode); + break; +#endif + default: + err = -RSBAC_EINVALIDTARGET; } - /* unprotect the lists */ - rsbac_write_unlock(&device_p->list_head.lock, &flags); rsbac_read_unlock(&device_list_head.lock, &dflags); return(err); } @@ -2641,179 +2391,70 @@ int rsbac_auth_clear_f_capset(rsbac_auth /* rsbac_auth_capset_member */ /* Return truth value, whether member is in set */ -boolean rsbac_auth_p_capset_member(rsbac_auth_p_cap_set_id_t id, +boolean rsbac_auth_p_capset_member(rsbac_pid_t pid, + enum rsbac_auth_cap_type_t cap_type, rsbac_uid_t member) { - boolean res=FALSE; - struct rsbac_auth_p_cap_set_list_item_t * cap_set_p; - struct rsbac_auth_cap_set_sublist_item_t * sub_item_p; - u_long flags; - if (!rsbac_is_initialized()) { printk(KERN_WARNING "rsbac_auth_p_capset_member(): RSBAC not initialized\n"); - return(-RSBAC_ENOTINITIALIZED); + return FALSE; } if (in_interrupt()) { printk(KERN_WARNING "rsbac_auth_p_capset_member(): called from interrupt!\n"); } - /* protect the list */ - rsbac_read_lock(&p_cap_set_list_head.lock, &flags); - /* check, whether set-id exists */ - cap_set_p = lookup_p_cap_set(id); - if (cap_set_p) - { - sub_item_p = cap_set_p->sublist_head; - while(sub_item_p) - { - if ( (member >= sub_item_p->first_id) - && (member <= sub_item_p->last_id) - ) - { - res = TRUE; - break; - } - sub_item_p = sub_item_p->next; - } - } - /* unprotect the list */ - rsbac_read_unlock(&p_cap_set_list_head.lock, &flags); - return(res); - } - -boolean rsbac_auth_f_capset_member(rsbac_auth_file_t file, - rsbac_uid_t member) - { - boolean res = FALSE; - struct rsbac_auth_f_cap_set_list_item_t * cap_set_p; - struct rsbac_auth_cap_set_sublist_item_t * sub_item_p; - u_long flags,dflags; - struct rsbac_auth_device_list_item_t * device_p; - - if (!rsbac_is_initialized()) - { - printk(KERN_WARNING "rsbac_auth_f_capset_member(): RSBAC not initialized\n"); - return(-RSBAC_ENOTINITIALIZED); - } - if (in_interrupt()) - { - printk(KERN_WARNING "rsbac_auth_f_capset_member(): called from interrupt!\n"); - } - /* protect device list */ - rsbac_read_lock(&device_list_head.lock, &dflags); - device_p = lookup_device(file.device); - if(!device_p) - { - printk(KERN_WARNING "rsbac_auth_f_capset_member(): invalid device %02u:%02u!\n", - MAJOR(file.device),MINOR(file.device)); - rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); - } - /* protect the list */ - rsbac_read_lock(&device_p->list_head.lock, &flags); - /* check, whether set-id exists */ - cap_set_p = lookup_f_cap_set(device_p,file.inode); - if (cap_set_p) + switch(cap_type) { - sub_item_p = cap_set_p->sublist_head; - while(sub_item_p) - { - if ( (member >= sub_item_p->first_id) - && (member <= sub_item_p->last_id) - ) - { - res = TRUE; - break; - } - sub_item_p = sub_item_p->next; - } + case ACT_real: + return rsbac_list_lol_subexist_compare(process_handle, &pid, &member, single_cap_compare); +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + case ACT_eff: + return rsbac_list_lol_subexist_compare(process_eff_handle, &pid, &member, single_cap_compare); + case ACT_fs: + return rsbac_list_lol_subexist_compare(process_fs_handle, &pid, &member, single_cap_compare); +#endif + default: + return FALSE; } - /* unprotect the lists */ - rsbac_read_unlock(&device_p->list_head.lock, &flags); - rsbac_read_unlock(&device_list_head.lock, &dflags); - return(res); } /* rsbac_auth_remove_capset */ /* Remove a full set. For cleanup, if object is deleted. */ /* To empty an existing set use rsbac_auth_clear_capset. */ -int rsbac_auth_remove_p_capset(rsbac_auth_p_cap_set_id_t id) +int rsbac_auth_remove_p_capsets(rsbac_pid_t pid) { - u_long flags; + int err; - if (!rsbac_is_initialized) - { - printk(KERN_WARNING "rsbac_auth_remove_p_capset(): RSBAC not initialized\n"); - return(-RSBAC_ENOTINITIALIZED); - } - if (in_interrupt()) - { - printk(KERN_WARNING "rsbac_auth_remove_p_capset(): called from interrupt!\n"); - } -#ifdef CONFIG_RSBAC_DEBUG - if (rsbac_debug_ds_auth) - printk(KERN_DEBUG - "rsbac_auth_remove_capset(): Removing process cap set data\n"); + err = rsbac_auth_clear_p_capset(pid, ACT_real); +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + if(!err) + err = rsbac_auth_clear_p_capset(pid, ACT_eff); + if(!err) + err = rsbac_auth_clear_p_capset(pid, ACT_fs); #endif - /* protect the list */ - rsbac_write_lock(&p_cap_set_list_head.lock, &flags); - /* call the remove function */ - remove_p_cap_set_item(id); - /* we are ready, so unprotect the list */ - rsbac_write_unlock(&p_cap_set_list_head.lock, &flags); - return(0); + return err; } -int rsbac_auth_remove_f_capset(rsbac_auth_file_t file) +int rsbac_auth_remove_f_capsets(rsbac_auth_file_t file) { - u_long flags,dflags; - struct rsbac_auth_device_list_item_t * device_p; + int err; - if (!rsbac_is_initialized) - { - printk(KERN_WARNING "rsbac_auth_remove_f_capset(): RSBAC not initialized\n"); - return(-RSBAC_ENOTINITIALIZED); - } - if (in_interrupt()) - { - printk(KERN_WARNING "rsbac_auth_remove_f_capset(): called from interrupt!\n"); - } -#ifdef CONFIG_RSBAC_DEBUG - if (rsbac_debug_ds_auth) - printk(KERN_DEBUG - "rsbac_auth_remove_capset(): Removing file cap set data\n"); + err = rsbac_auth_clear_f_capset(file, ACT_real); +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + if(!err) + err = rsbac_auth_clear_f_capset(file, ACT_eff); + if(!err) + err = rsbac_auth_clear_f_capset(file, ACT_fs); #endif - /* protect device list */ - rsbac_read_lock(&device_list_head.lock, &dflags); - device_p = lookup_device(file.device); - if(!device_p) - { - printk(KERN_WARNING "rsbac_auth_remove_f_capset(): invalid device %02u:%02u!\n", - MAJOR(file.device),MINOR(file.device)); - rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); - } - /* protect the list */ - rsbac_write_lock(&device_p->list_head.lock, &flags); - /* call the remove function */ - if(lookup_f_cap_set(device_p,file.inode)) - { - remove_f_cap_set_item(device_p,file.inode); - /* successful -> write to disk or only mark dirty */ - device_p->list_head.dirty = TRUE; - } - /* unprotect the lists */ - rsbac_write_unlock(&device_p->list_head.lock, &flags); - rsbac_read_unlock(&device_list_head.lock, &dflags); - return(0); + return err; } int rsbac_auth_copy_fp_capset(rsbac_auth_file_t file, - rsbac_auth_p_cap_set_id_t p_cap_set_id) + rsbac_pid_t p_cap_set_id) { - u_long flags,dflags,pflags; + u_long dflags; struct rsbac_auth_device_list_item_t * device_p; int err=0; @@ -2836,29 +2477,29 @@ int rsbac_auth_copy_fp_capset(rsbac_auth device_p = lookup_device(file.device); if(!device_p) { - printk(KERN_WARNING "rsbac_auth_copy_fp_capset(): invalid device %02u:%02u!\n", - MAJOR(file.device),MINOR(file.device)); + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); + rsbac_get_super_block(file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = lookup_device(file.device); + if(!device_p) + { + printk(KERN_WARNING "rsbac_auth_copy_fp_capset(): invalid device %02u:%02u!\n", + MAJOR(file.device),MINOR(file.device)); + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } - /* protect the set lists */ - rsbac_read_lock(&device_p->list_head.lock, &flags); - rsbac_write_lock(&p_cap_set_list_head.lock, &pflags); /* call the copy function */ err = copy_fp_cap_set_item(device_p,file.inode,p_cap_set_id); - /* unprotect the lists */ - rsbac_write_unlock(&p_cap_set_list_head.lock, &pflags); - rsbac_read_unlock(&device_p->list_head.lock, &flags); rsbac_read_unlock(&device_list_head.lock, &dflags); return(err); } -int rsbac_auth_copy_pp_capset(rsbac_auth_p_cap_set_id_t old_p_set_id, - rsbac_auth_p_cap_set_id_t new_p_set_id) +int rsbac_auth_copy_pp_capset(rsbac_pid_t old_p_set_id, + rsbac_pid_t new_p_set_id) { - u_long pflags; - int err=0; - if (!rsbac_is_initialized) { printk(KERN_WARNING "rsbac_auth_copy_pp_capset(): RSBAC not initialized\n"); @@ -2873,22 +2514,18 @@ int rsbac_auth_copy_pp_capset(rsbac_auth printk(KERN_DEBUG "rsbac_auth_copy_pp_capset(): Copying process cap set data to process cap set\n"); #endif - /* protect the set list */ - rsbac_write_lock(&p_cap_set_list_head.lock, &pflags); /* call the copy function */ - err = copy_pp_cap_set_item(old_p_set_id,new_p_set_id); - /* unprotect the lists */ - rsbac_write_unlock(&p_cap_set_list_head.lock, &pflags); - return(err); + return copy_pp_cap_set_item(old_p_set_id,new_p_set_id); } int rsbac_auth_get_f_caplist(rsbac_auth_file_t file, - rsbac_uid_t caplist[], - int maxnum) + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t **caplist_p, + rsbac_time_t **ttllist_p) { - u_long flags,dflags; + u_long dflags; struct rsbac_auth_device_list_item_t * device_p; - struct rsbac_auth_f_cap_set_list_item_t * cap_set_p; + long count; if (!rsbac_is_initialized) { @@ -2909,26 +2546,47 @@ int rsbac_auth_get_f_caplist(rsbac_auth_ device_p = lookup_device(file.device); if(!device_p) { - printk(KERN_WARNING "rsbac_auth_get_f_caplist(): invalid device %02u:%02u!\n", - MAJOR(file.device),MINOR(file.device)); + /* trigger rsbac_mount() */ rsbac_read_unlock(&device_list_head.lock, &dflags); - return(-RSBAC_EINVALIDDEV); - } - /* protect the list */ - rsbac_read_lock(&device_p->list_head.lock, &flags); - /* lookup set */ - if((cap_set_p = lookup_f_cap_set(device_p,file.inode))) - { - maxnum = get_f_caplist(cap_set_p,caplist,maxnum); + rsbac_get_super_block(file.device); + /* retry */ + rsbac_read_lock(&device_list_head.lock, &dflags); + device_p = lookup_device(file.device); + if(!device_p) + { + printk(KERN_WARNING "rsbac_auth_get_f_caplist(): invalid device %02u:%02u!\n", + MAJOR(file.device),MINOR(file.device)); + rsbac_read_unlock(&device_list_head.lock, &dflags); + return(-RSBAC_EINVALIDDEV); + } } - else - { - maxnum = 0; + switch(cap_type) + { + case ACT_real: + count = rsbac_list_lol_get_all_subdesc_ttl(device_p->handles[fd_hash(file.inode)], + &file.inode, + (void **) caplist_p, + ttllist_p); + break; +#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER + case ACT_eff: + count = rsbac_list_lol_get_all_subdesc_ttl(device_p->eff_handles[eff_fd_hash(file.inode)], + &file.inode, + (void **) caplist_p, + ttllist_p); + break; + case ACT_fs: + count = rsbac_list_lol_get_all_subdesc_ttl(device_p->fs_handles[fs_fd_hash(file.inode)], + &file.inode, + (void **) caplist_p, + ttllist_p); + break; +#endif + default: + count = -RSBAC_EINVALIDTARGET; } - /* unprotect the lists */ - rsbac_read_unlock(&device_p->list_head.lock, &flags); rsbac_read_unlock(&device_list_head.lock, &dflags); - return(maxnum); + return(count); } /* end of auth_data_structures.c */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/data_structures/gen_lists.c linux-2.4.20-wolk4.9-fullkernel/rsbac/data_structures/gen_lists.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/data_structures/gen_lists.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/data_structures/gen_lists.c 2003-08-25 20:33:02.000000000 +0200 @@ -1,9 +1,9 @@ /************************************* */ /* Rule Set Based Access Control */ -/* Author and (c) 1999-2002: */ +/* Author and (c) 1999-2003: */ /* Amon Ott */ /* Generic lists for all parts */ -/* Last modified: 27/Feb/2002 */ +/* Last modified: 16/Jan/2003 */ /************************************* */ #include @@ -242,11 +242,12 @@ static struct rsbac_list_item_t * lookup static struct rsbac_list_item_t * lookup_lol_subitem_compare( struct rsbac_list_lol_reg_item_t * list, struct rsbac_list_lol_item_t * sublist, - void * subdesc) + void * subdesc, + rsbac_list_compare_function_t compare) { struct rsbac_list_item_t * curr; - if(!list || !sublist || !subdesc || !list->subcompare) + if(!list || !sublist || !subdesc || !compare) return NULL; curr = sublist->curr; @@ -258,13 +259,13 @@ static struct rsbac_list_item_t * lookup } /* if current item is not the right one, search... */ /* note: item desc is behind official struct */ - if(list->subcompare(subdesc, &curr[1])) + if(compare(&curr[1],subdesc)) { - if((list->subcompare(subdesc, &curr[1]) > 0)) + if((compare(&curr[1], subdesc) < 0)) { curr = curr->next; while ( curr - && (list->subcompare(subdesc, &curr[1]) > 0) + && (compare(&curr[1], subdesc) < 0) ) { curr = curr->next; @@ -274,7 +275,7 @@ static struct rsbac_list_item_t * lookup { curr = curr->prev; while ( curr - && (list->subcompare(subdesc, &curr[1]) < 0) + && (compare(&curr[1], subdesc) > 0) ) { curr = curr->prev; @@ -284,7 +285,7 @@ static struct rsbac_list_item_t * lookup { /* keep for speedup */ sublist->curr = curr; - if(!list->subcompare(subdesc, &curr[1])) + if(!compare(&curr[1], subdesc)) { /* found */ return curr; @@ -374,11 +375,33 @@ static struct rsbac_list_item_t * lookup return NULL; if(list->subcompare) - return lookup_lol_subitem_compare(list, sublist, subdesc); + return lookup_lol_subitem_compare(list, sublist, subdesc, list->subcompare); else return lookup_lol_subitem_memcmp(list, sublist, subdesc); } +static struct rsbac_list_item_t * lookup_lol_subitem_user_compare( + struct rsbac_list_lol_reg_item_t * list, + struct rsbac_list_lol_item_t * sublist, + void * subdesc, + rsbac_list_compare_function_t compare) + { + struct rsbac_list_item_t * curr; + + if(!list || !sublist || !subdesc || !compare) + return NULL; + + curr = sublist->head; + /* note: item desc is behind official struct */ + while(curr) + { + if(!compare(&curr[1],subdesc)) + return curr; + curr = curr->next; + } + return (curr); + }; + /* list of lists - items */ static struct rsbac_list_lol_item_t * lookup_lol_item_compare( @@ -2884,7 +2907,7 @@ static int write_lol_list(struct rsbac_l } /* Ready. */ return(err); - }; /* end of write_list() */ + }; /* end of write_lol_list() */ #endif /* ifndef CONFIG_RSBAC_NO_WRITE */ @@ -3327,7 +3350,11 @@ static int genlist_compare(void * desc1, return strncmp(i_desc1->filename, i_desc2->filename, RSBAC_LIST_MAX_FILENAME); } +#ifdef CONFIG_RSBAC_INIT_DELAY +void rsbac_list_init(void) +#else void __init rsbac_list_init(void) +#endif { /* removed devicelist registration - it is not used anyway */ struct rsbac_list_info_t fileinfo; @@ -3562,7 +3589,7 @@ int rsbac_write_lists(boolean need_lock) if(subcount != -RSBAC_ENOTWRITABLE) printk(KERN_WARNING "rsbac_write_lists(): write_list() for list of lists %s returned error %i\n", - item_p->name, subcount); + lol_item_p->name, subcount); } else count++; @@ -5805,6 +5832,79 @@ int rsbac_list_lol_subexist_u32(rsbac_li } #if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT) +EXPORT_SYMBOL(rsbac_list_lol_subexist_compare); +#endif +int rsbac_list_lol_subexist_compare( + rsbac_list_handle_t handle, + void * desc, + void * subdesc, + rsbac_list_compare_function_t compare) + { + struct rsbac_list_lol_reg_item_t * list; + struct rsbac_list_lol_item_t * sublist; + u_long lock_flags, rlock_flags; + struct rsbac_list_item_t * item_p; + int result; + + if(!handle || !desc || !subdesc) + return FALSE; + if(!list_initialized) + return FALSE; + /* Use standard function, if compare is not provided. */ + if(!compare) + return rsbac_list_lol_subexist(handle, desc, subdesc); + + list = (struct rsbac_list_lol_reg_item_t *) handle; + if(list->self != list) + return -RSBAC_EINVALIDVALUE; + + rsbac_read_lock(&lol_reg_head.lock, &rlock_flags); +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_lists) + printk(KERN_DEBUG "rsbac_list_lol_subexist_compare: testing on list %s.\n", + list->name); +#endif + rsbac_read_lock(&list->lock, &lock_flags); + + sublist = lookup_lol_item(list, desc); + if(sublist) + { /* exists -> lookup subitem */ + item_p = lookup_lol_subitem_user_compare(list, sublist, subdesc, compare); + if( item_p + && ( !item_p->max_age + || (item_p->max_age > CURRENT_TIME) + ) + ) + { /* exists -> TRUE */ + result = TRUE; + } + else + { + result = FALSE; + } + } + else + { + result = FALSE; + } + rsbac_read_unlock(&list->lock, &lock_flags); + rsbac_read_unlock(&lol_reg_head.lock, &rlock_flags); + return result; + } + +/* simple wrapper for 32Bit desc to allow using const values */ +#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT) +EXPORT_SYMBOL(rsbac_list_lol_subexist_compare_u32); +#endif +int rsbac_list_lol_subexist_compare_u32(rsbac_list_handle_t handle, + __u32 desc, + __u32 subdesc, + rsbac_list_compare_function_t compare) + { + return rsbac_list_lol_subexist(handle, &desc, &subdesc); + } + +#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT) EXPORT_SYMBOL(rsbac_list_lol_exist); #endif int rsbac_list_lol_exist( @@ -6222,7 +6322,9 @@ long rsbac_list_lol_get_all_desc(rsbac_l /* If return value > 0, *array_p contains a pointer to a vmalloc'd array of datas, otherwise *array_p is set to NULL. If *array_p has been set, caller must call vfree(*array_p) after use! */ - +#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT) +EXPORT_SYMBOL(rsbac_list_get_all_data); +#endif long rsbac_list_get_all_data(rsbac_list_handle_t handle, void ** array_p) { struct rsbac_list_reg_item_t * list; @@ -6293,6 +6395,9 @@ long rsbac_list_get_all_data(rsbac_list_ return result; } +#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT) +EXPORT_SYMBOL(rsbac_list_lol_get_all_subdata); +#endif long rsbac_list_lol_get_all_subdata(rsbac_list_handle_t handle, void * desc, void ** array_p) { struct rsbac_list_lol_reg_item_t * list; @@ -6365,6 +6470,9 @@ long rsbac_list_lol_get_all_subdata(rsba return result; } +#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT) +EXPORT_SYMBOL(rsbac_list_lol_get_all_data); +#endif long rsbac_list_lol_get_all_data(rsbac_list_handle_t handle, void ** array_p) { struct rsbac_list_lol_reg_item_t * list; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/data_structures/pm_data_structures.c linux-2.4.20-wolk4.9-fullkernel/rsbac/data_structures/pm_data_structures.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/data_structures/pm_data_structures.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/data_structures/pm_data_structures.c 2003-08-26 10:15:11.000000000 +0200 @@ -54,7 +54,7 @@ static rsbac_list_handle_t tkt_handle = /* Declarations of external functions */ /**************************************************/ -/*int sys_write(u_int,cont char *, size_t);*/ +/* int sys_write(u_int, char *, u_int); */ /**************************************************/ /* Declarations of internal functions */ @@ -1105,7 +1105,11 @@ static int pm_list_proc_read(char *buffe /* Init functions */ /************************************************* */ +#ifdef CONFIG_RSBAC_INIT_DELAY +static void registration_error(int err, char * listname) +#else static void __init registration_error(int err, char * listname) +#endif { if(err) { @@ -1130,7 +1134,11 @@ static void __init registration_error(in /* is kept in memory for performance reasons, but is written to disk on */ /* every change. */ +#ifdef CONFIG_RSBAC_INIT_DELAY +int rsbac_init_pm(void) +#else int __init rsbac_init_pm(void) +#endif { int err = 0; struct proc_dir_entry * tmp_entry_p; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/data_structures/rc_data_structures.c linux-2.4.20-wolk4.9-fullkernel/rsbac/data_structures/rc_data_structures.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/data_structures/rc_data_structures.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/data_structures/rc_data_structures.c 2003-08-25 20:33:02.000000000 +0200 @@ -175,7 +175,11 @@ out: /* There can be no access to aci data structures before init. */ +#ifdef CONFIG_RSBAC_INIT_DELAY +static void registration_error(int err, char * listname) +#else static void __init registration_error(int err, char * listname) +#endif { if(err) { @@ -192,7 +196,11 @@ static void __init registration_error(in } } +#ifdef CONFIG_RSBAC_INIT_DELAY +int rsbac_init_rc(void) +#else int __init rsbac_init_rc(void) +#endif { int err = 0; struct proc_dir_entry * tmp_entry_p; @@ -546,6 +554,7 @@ int __init rsbac_init_rc(void) struct rsbac_rc_role_entry_t gen_entry = RSBAC_RC_GENERAL_ROLE_ENTRY; struct rsbac_rc_role_entry_t ra_entry = RSBAC_RC_ROLE_ADMIN_ROLE_ENTRY; struct rsbac_rc_role_entry_t sa_entry = RSBAC_RC_SYSTEM_ADMIN_ROLE_ENTRY; + struct rsbac_rc_role_entry_t au_entry = RSBAC_RC_AUDITOR_ROLE_ENTRY; char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); if(tmp) @@ -561,7 +570,8 @@ int __init rsbac_init_rc(void) if(!rsbac_list_lol_add(role_tcfd_handle, &role, NULL)) { type = RSBAC_RC_GENERAL_TYPE; - rights = RSBAC_READ_WRITE_REQUEST_VECTOR & RSBAC_FD_REQUEST_VECTOR; + rights = (RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_EXECUTE_REQUEST_VECTOR) + & RSBAC_FD_REQUEST_VECTOR; rsbac_list_lol_subadd(role_tcfd_handle, &role, &type, &rights); } if(!rsbac_list_lol_add(role_tcdv_handle, &role, NULL)) @@ -618,10 +628,12 @@ int __init rsbac_init_rc(void) if(!rsbac_list_lol_add(role_tcfd_handle, &role, NULL)) { type = RSBAC_RC_GENERAL_TYPE; - rights = (RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SECURITY_REQUEST_VECTOR) & RSBAC_FD_REQUEST_VECTOR; + rights = (RSBAC_READ_WRITE_REQUEST_VECTOR + | RSBAC_EXECUTE_REQUEST_VECTOR + | RSBAC_SECURITY_REQUEST_VECTOR) + & RSBAC_FD_REQUEST_VECTOR; rsbac_list_lol_subadd(role_tcfd_handle, &role, &type, &rights); type = RSBAC_RC_SEC_TYPE; - rights = (RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SECURITY_REQUEST_VECTOR) & RSBAC_FD_REQUEST_VECTOR; rsbac_list_lol_subadd(role_tcfd_handle, &role, &type, &rights); type = RSBAC_RC_SYS_TYPE; rights = RSBAC_READ_REQUEST_VECTOR & RSBAC_FD_REQUEST_VECTOR; @@ -719,10 +731,12 @@ int __init rsbac_init_rc(void) if(!rsbac_list_lol_add(role_tcfd_handle, &role, NULL)) { type = RSBAC_RC_GENERAL_TYPE; - rights = (RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SYSTEM_REQUEST_VECTOR) & RSBAC_FD_REQUEST_VECTOR; + rights = (RSBAC_READ_WRITE_REQUEST_VECTOR + | RSBAC_EXECUTE_REQUEST_VECTOR + | RSBAC_SYSTEM_REQUEST_VECTOR) + & RSBAC_FD_REQUEST_VECTOR; rsbac_list_lol_subadd(role_tcfd_handle, &role, &type, &rights); type = RSBAC_RC_SYS_TYPE; - rights = (RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SYSTEM_REQUEST_VECTOR) & RSBAC_FD_REQUEST_VECTOR; rsbac_list_lol_subadd(role_tcfd_handle, &role, &type, &rights); } if(!rsbac_list_lol_add(role_tcdv_handle, &role, NULL)) @@ -792,6 +806,68 @@ int __init rsbac_init_rc(void) rsbac_list_lol_subadd(role_tcsc_handle, &role, &type, &rights); } } + role = RSBAC_RC_AUDITOR_ROLE; + if(!rsbac_list_add(role_handle, &role, &au_entry)) + { + if(!rsbac_list_lol_add(role_tcfd_handle, &role, NULL)) + { + type = RSBAC_RC_GENERAL_TYPE; + rights = (RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_EXECUTE_REQUEST_VECTOR) + & RSBAC_FD_REQUEST_VECTOR; + rsbac_list_lol_subadd(role_tcfd_handle, &role, &type, &rights); + } + if(!rsbac_list_lol_add(role_tcdv_handle, &role, NULL)) + { + type = RSBAC_RC_GENERAL_TYPE; + rights = RSBAC_READ_WRITE_REQUEST_VECTOR & RSBAC_DEV_REQUEST_VECTOR; + rsbac_list_lol_subadd(role_tcdv_handle, &role, &type, &rights); + } + if(!rsbac_list_lol_add(role_tcpr_handle, &role, NULL)) + { + type = RSBAC_RC_GENERAL_TYPE; + rights = RSBAC_READ_WRITE_REQUEST_VECTOR & RSBAC_PROCESS_REQUEST_VECTOR; + rsbac_list_lol_subadd(role_tcpr_handle, &role, &type, &rights); + } + if(!rsbac_list_lol_add(role_tcip_handle, &role, NULL)) + { + type = RSBAC_RC_GENERAL_TYPE; + rights = RSBAC_READ_WRITE_REQUEST_VECTOR & RSBAC_IPC_REQUEST_VECTOR; + rsbac_list_lol_subadd(role_tcip_handle, &role, &type, &rights); + } + if(!rsbac_list_lol_add(role_tcnd_handle, &role, NULL)) + { + type = RSBAC_RC_GENERAL_TYPE; + rights = RSBAC_READ_WRITE_REQUEST_VECTOR & RSBAC_NETDEV_REQUEST_VECTOR; + rsbac_list_lol_subadd(role_tcnd_handle, &role, &type, &rights); + } + if(!rsbac_list_lol_add(role_tcno_handle, &role, NULL)) + { + type = RSBAC_RC_GENERAL_TYPE; + rights = RSBAC_READ_WRITE_REQUEST_VECTOR & RSBAC_NETOBJ_REQUEST_VECTOR; + rsbac_list_lol_subadd(role_tcno_handle, &role, &type, &rights); + } + if(!rsbac_list_lol_add(role_tcsc_handle, &role, NULL)) + { + #ifdef CONFIG_RSBAC_USER_MOD_IOPERM + type = ST_ioports; + rights = RSBAC_RC_RIGHTS_VECTOR(R_MODIFY_PERMISSIONS_DATA); + rsbac_list_lol_subadd(role_tcsc_handle, &role, &type, &rights); + #endif + type = ST_rlimit; + rights = -1; + rsbac_list_lol_subadd(role_tcsc_handle, &role, &type, &rights); + type = ST_rsbaclog; + rights = RSBAC_RC_RIGHTS_VECTOR(R_GET_STATUS_DATA) + | RSBAC_RC_RIGHTS_VECTOR(R_MODIFY_SYSTEM_DATA); + rsbac_list_lol_subadd(role_tcsc_handle, &role, &type, &rights); + type = ST_other; + rights = RSBAC_RC_RIGHTS_VECTOR(R_MAP_EXEC); + rsbac_list_lol_subadd(role_tcsc_handle, &role, &type, &rights); + type = ST_network; + rights = RSBAC_RC_RIGHTS_VECTOR(R_GET_STATUS_DATA); + rsbac_list_lol_subadd(role_tcsc_handle, &role, &type, &rights); + } + } } list_info.version = RSBAC_RC_TYPE_FD_LIST_VERSION; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/help/debug.c linux-2.4.20-wolk4.9-fullkernel/rsbac/help/debug.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/help/debug.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/help/debug.c 2003-08-25 20:33:02.000000000 +0200 @@ -1,12 +1,12 @@ /************************************ */ /* Rule Set Based Access Control */ /* */ -/* Author and (c) 1999-2002: */ +/* Author and (c) 1999-2003: */ /* Amon Ott */ /* */ -/* Debug functions for all parts */ +/* Debug and logging functions for all parts */ /* */ -/* Last modified: 08/Aug/2002 */ +/* Last modified: 01/Jul/2003 */ /************************************ */ #include @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -24,6 +25,7 @@ #include #include #include +#include #ifdef CONFIG_RSBAC_DEBUG /* Boolean debug switch for data structures */ @@ -51,6 +53,15 @@ int rsbac_debug_adf_net = 0; int rsbac_debug_aef_net = 0; #endif +#if defined(CONFIG_RSBAC_MAC) +/* Boolean debug switch for MAC data structures */ +int rsbac_debug_ds_mac = 0; +/* Boolean debug switch for MAC syscalls / AEF */ +int rsbac_debug_aef_mac = 0; +/* Boolean debug switch for MAC decisions / ADF */ +int rsbac_debug_adf_mac = 0; +#endif + #if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT) /* Boolean debug switch for PM data structures */ int rsbac_debug_ds_pm = 0; @@ -136,6 +147,10 @@ int rsbac_in_softmode(void) } #endif +#if defined(CONFIG_RSBAC_CAP_PROC_HIDE) +int rsbac_cap_process_hiding = 0; +#endif + #ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL /* Boolean switch for disabling Linux DAC */ int rsbac_dac_disable = 0; @@ -154,6 +169,12 @@ int rsbac_dac_is_disabled(void) int rsbac_nosyslog = 0; #endif +/* Boolean switch for delayed init option*/ +#ifdef CONFIG_RSBAC_INIT_DELAY +int rsbac_delay_init = 1; +kdev_t rsbac_delayed_root = MKDEV(0,0); +#endif + /* Array of Boolean debug switches for ADF */ int rsbac_debug_adf_default = 1; rsbac_log_entry_t rsbac_log_levels[R_NONE+1]; @@ -164,18 +185,41 @@ boolean rsbac_debug_adf_dirty = FALSE; #if defined(CONFIG_RSBAC_RMSG) #include #include -#define RLOG_BUF_LEN (16384) +#define RLOG_BUF_LEN (1 << 14) #define RLOG_BUF_MASK (RLOG_BUF_LEN-1) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) static struct wait_queue * rlog_wait = NULL; +static u_long rlog_size = 0; #else DECLARE_WAIT_QUEUE_HEAD(rlog_wait); +static unsigned long log_end = 0; /* Index into log_buf: most-recently-written-char + 1 */ #endif -static char buf[1024]; -static u_long rlog_size = 0; static char rlog_buf[RLOG_BUF_LEN]; +#define RLOG_BUF(idx) (rlog_buf[(idx) & RLOG_BUF_MASK]) static unsigned long log_start = 0; static unsigned long logged_chars = 0; + +#if defined(CONFIG_RSBAC_LOG_REMOTE) +#define REMOTE_RLOG_BUF_LEN (16384) +#define REMOTE_RLOG_BUF_MASK (REMOTE_RLOG_BUF_LEN-1) +static DECLARE_WAIT_QUEUE_HEAD(rsbaclogd_wait); +#ifndef CONFIG_RSBAC_LOG_REMOTE_SYNC +static struct timer_list rsbac_log_remote_timer; +u_int rsbac_log_remote_interval = CONFIG_RSBAC_LOG_INTERVAL; +#endif +rsbac_pid_t rsbaclogd_pid=0; +#define REMOTE_SEND_BUF_LEN 1024 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) +static u_long remote_rlog_size = 0; +#else +static unsigned long remote_log_end; +#endif +static char remote_rlog_buf[REMOTE_RLOG_BUF_LEN]; +static unsigned long remote_log_start = 0; +static unsigned long remote_logged_chars = 0; +#define REMOTE_RLOG_BUF(idx) (remote_rlog_buf[(idx) & REMOTE_RLOG_BUF_MASK]) +#endif + #endif void rsbac_adf_log_switch(rsbac_adf_request_int_t request, @@ -288,8 +332,26 @@ inline boolean rsbac_parse_koptions(char rsbac_ind_softmode[CAP] = 1; return(TRUE); } + if (!strcmp(line,"rsbac_softmode_jail")) + { + rsbac_ind_softmode[JAIL] = 1; + return(TRUE); + } + if (!strcmp(line,"rsbac_softmode_res")) + { + rsbac_ind_softmode[RES] = 1; + return(TRUE); + } #endif #endif + #if defined(CONFIG_RSBAC_CAP_PROC_HIDE) + /* RSBAC: hide processes? */ + if (!strcmp(line,"rsbac_cap_process_hiding")) + { + rsbac_cap_process_hiding = 1; + return(TRUE); + } + #endif #ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL /* RSBAC: disable Linux DAC? */ if (!strcmp(line,"rsbac_dac_disable")) @@ -307,6 +369,42 @@ inline boolean rsbac_parse_koptions(char return(TRUE); } #endif + #ifdef CONFIG_RSBAC_INIT_DELAY + if ( !strcmp(line,"rsbac_delay_init") + ) + { + rsbac_delay_init = 1; + return(TRUE); + } + if ( !strcmp(line,"rsbac_no_delay_init") + || !strcmp(line,"rsbac_no_init_delay") + ) + { + rsbac_delay_init = 0; + return(TRUE); + } + if ( !strncmp(line,"rsbac_delayed_root=",19) + ) + { + char * p = line+19; + u_int major = 0; + u_int minor = 0; + + if(*p) + { + major = simple_strtoul(p, NULL, 0); + while((*p != ':') && (*p != '\0')) + p++; + if(*p) + { + p++; + minor = simple_strtoul(p, NULL, 0); + } + rsbac_delayed_root = MKDEV(major,minor); + } + return(TRUE); + } + #endif #ifdef CONFIG_RSBAC_DEBUG #ifdef CONFIG_RSBAC_NET @@ -338,6 +436,34 @@ inline boolean rsbac_parse_koptions(char } #endif + #if defined(CONFIG_RSBAC_MAC) + /* RSBAC: debug for all of mac? */ + if (!strcmp(line,"rsbac_debug_mac")) + { + rsbac_debug_ds_mac = 1; + rsbac_debug_aef_mac = 1; + rsbac_debug_adf_mac = 1; + return(TRUE); + } + /* RSBAC: debug for mac data structures? */ + if (!strcmp(line,"rsbac_debug_ds_mac")) + { + rsbac_debug_ds_mac = 1; + return(TRUE); + } + /* RSBAC: debug for MAC-syscalls/AEF? */ + if (!strcmp(line,"rsbac_debug_aef_mac")) + { + rsbac_debug_aef_mac = 1; + return(TRUE); + } + /* RSBAC: debug for MAC-decisions/ADF? */ + if (!strcmp(line,"rsbac_debug_adf_mac")) + { + rsbac_debug_adf_mac = 1; + return(TRUE); + } + #endif #if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT) /* RSBAC: debug for all of pm? */ if (!strcmp(line,"rsbac_debug_pm")) @@ -504,6 +630,11 @@ inline boolean rsbac_parse_koptions(char rsbac_debug_write = 1; rsbac_debug_aef = 1; rsbac_debug_adf_default = 2; + #if defined(CONFIG_RSBAC_MAC) + rsbac_debug_ds_mac = 1; + rsbac_debug_aef_mac = 1; + rsbac_debug_adf_mac = 1; + #endif #if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT) rsbac_debug_ds_pm = 1; rsbac_debug_aef_pm = 1; @@ -599,8 +730,13 @@ inline boolean rsbac_parse_koptions(char #if defined(CONFIG_RSBAC_RMSG) -static spinlock_t rsbac_log_lock; +static spinlock_t rsbac_log_lock = SPIN_LOCK_UNLOCKED; + +#if defined(CONFIG_RSBAC_LOG_REMOTE) +static spinlock_t rsbac_log_remote_lock = SPIN_LOCK_UNLOCKED; +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) /* * Commands to rsbac_log: * @@ -613,32 +749,28 @@ static spinlock_t rsbac_log_lock; */ int rsbac_log(int type, char * buf, int len) { -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) - unsigned long i, j, count, flags; -#else - unsigned long i, j, limit, count; -#endif int do_clear = 0; char c; int error = -EPERM; + int i,j; + u_long flags; + u_long count; union rsbac_target_id_t rsbac_target_id; union rsbac_attribute_value_t rsbac_attribute_value; -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) lock_kernel(); -#endif /* RSBAC */ -#ifdef CONFIG_RSBAC_DEBUG - if (rsbac_debug_aef) - printk(KERN_DEBUG "rsbac_log(): calling ADF\n"); -#endif rsbac_target_id.scd = ST_rsbaclog; rsbac_attribute_value.dummy = 0; - if ((type == 2) || (type == 3)) + if ((type == 4) || (type == 5)) { - if (!rsbac_adf_request(R_GET_STATUS_DATA, +#ifdef CONFIG_RSBAC_DEBUG + if (rsbac_debug_aef) + printk(KERN_DEBUG "rsbac_log(): function %u, calling ADF for MODIFY_SYSTEM_DATA\n", type); +#endif + if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA, current->pid, T_SCD, rsbac_target_id, @@ -650,8 +782,13 @@ int rsbac_log(int type, char * buf, int } } else + if(type >= 1) { - if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA, +#ifdef CONFIG_RSBAC_DEBUG + if (rsbac_debug_aef) + printk(KERN_DEBUG "rsbac_log(): function %u, calling ADF for GET_STATUS_DATA\n", type); +#endif + if (!rsbac_adf_request(R_GET_STATUS_DATA, current->pid, T_SCD, rsbac_target_id, @@ -679,7 +816,6 @@ int rsbac_log(int type, char * buf, int error = verify_area(VERIFY_WRITE,buf,len); if (error) goto out; -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) cli(); error = -ERESTARTSYS; while (!rlog_size) { @@ -702,24 +838,6 @@ int rsbac_log(int type, char * buf, int cli(); } sti(); -#else - error = wait_event_interruptible(rlog_wait, rlog_size); - if(error) - goto out; - i = 0; - spin_lock_irq(&rsbac_log_lock); - while (rlog_size && i < len) { - c = rlog_buf[log_start & RLOG_BUF_MASK]; - log_start++; - rlog_size--; - spin_unlock_irq(&rsbac_log_lock); - __put_user(c,buf); - buf++; - i++; - spin_lock_irq(&rsbac_log_lock); - } - spin_unlock_irq(&rsbac_log_lock); -#endif error = i; break; case 4: /* Read/clear last kernel messages */ @@ -735,7 +853,6 @@ int rsbac_log(int type, char * buf, int error = verify_area(VERIFY_WRITE,buf,len); if (error) goto out; -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) /* * The logged_chars, log_start, and rlog_size values may * change from an interrupt, so we disable interrupts. @@ -756,15 +873,135 @@ int rsbac_log(int type, char * buf, int if (do_clear) logged_chars = 0; error = i; -#else + break; + case 5: /* Clear ring buffer */ + logged_chars = 0; + break; + default: + error = -EINVAL; + break; + } +out: + unlock_kernel(); + return error; +} + +#else /* Kernel Version >= 2.3.0 */ + +/* + * Commands to do_syslog: + * + * 0 -- Close the log. Currently a NOP. + * 1 -- Open the log. Currently a NOP. + * 2 -- Read from the log. + * 3 -- Read all messages remaining in the ring buffer. + * 4 -- Read and clear all messages remaining in the ring buffer + * 5 -- Clear ring buffer. + * 9 -- Return number of unread characters in the log buffer + */ +int rsbac_log(int type, char * buf, int len) +{ + unsigned long i, j, limit, count; + int do_clear = 0; + char c; + int error = 0; + + union rsbac_target_id_t rsbac_target_id; + union rsbac_attribute_value_t rsbac_attribute_value; + + /* RSBAC */ + rsbac_target_id.scd = ST_rsbaclog; + rsbac_attribute_value.dummy = 0; + if ((type == 4) || (type == 5)) + { +#ifdef CONFIG_RSBAC_DEBUG + if (rsbac_debug_aef) + printk(KERN_DEBUG "rsbac_log(): function %u, calling ADF for MODIFY_SYSTEM_DATA\n", type); +#endif + if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA, + current->pid, + T_SCD, + rsbac_target_id, + A_none, + rsbac_attribute_value)) + { + error = -EPERM; + goto out; + } + } + else + if(type >= 1) + { +#ifdef CONFIG_RSBAC_DEBUG + if (rsbac_debug_aef) + printk(KERN_DEBUG "rsbac_log(): function %u, calling ADF for GET_STATUS_DATA\n", type); +#endif + if (!rsbac_adf_request(R_GET_STATUS_DATA, + current->pid, + T_SCD, + rsbac_target_id, + A_none, + rsbac_attribute_value)) + { + error = -EPERM; + goto out; + } + } + + switch (type) { + case 0: /* Close log */ + break; + case 1: /* Open log */ + break; + case 2: /* Read from log */ + error = -EINVAL; + if (!buf || len < 0) + goto out; + error = 0; + if (!len) + goto out; + error = verify_area(VERIFY_WRITE,buf,len); + if (error) + goto out; + error = wait_event_interruptible(rlog_wait, (log_start - log_end)); + if (error) + goto out; + i = 0; + spin_lock_irq(&rsbac_log_lock); + while ((log_start != log_end) && i < len) { + c = RLOG_BUF(log_start); + log_start++; + spin_unlock_irq(&rsbac_log_lock); + __put_user(c,buf); + buf++; + i++; + spin_lock_irq(&rsbac_log_lock); + } + spin_unlock_irq(&rsbac_log_lock); + error = i; + break; + case 4: /* Read/clear last kernel messages */ + do_clear = 1; + /* FALL THRU */ + case 3: /* Read last kernel messages */ + error = -EINVAL; + if (!buf || len < 0) + goto out; + error = 0; + if (!len) + goto out; + error = verify_area(VERIFY_WRITE,buf,len); + if (error) + goto out; count = len; if (count > RLOG_BUF_LEN) count = RLOG_BUF_LEN; + spin_lock_irq(&rsbac_log_lock); if (count > logged_chars) count = logged_chars; if (do_clear) logged_chars = 0; - limit = log_start + rlog_size; + limit = log_end; /* * __put_user() could sleep, and while we sleep * printk() could overwrite the messages @@ -773,11 +1010,14 @@ int rsbac_log(int type, char * buf, int */ for(i=0;i < count;i++) { j = limit-1-i; - if (j+RLOG_BUF_LEN < log_start+rlog_size) + if (j+RLOG_BUF_LEN < log_end) break; - c = rlog_buf[ j & RLOG_BUF_MASK ]; + c = RLOG_BUF(j); + spin_unlock_irq(&rsbac_log_lock); __put_user(c,&buf[count-1-i]); + spin_lock_irq(&rsbac_log_lock); } + spin_unlock_irq(&rsbac_log_lock); error = i; if(i != count) { int offset = count-error; @@ -787,41 +1027,54 @@ int rsbac_log(int type, char * buf, int __put_user(c,&buf[i]); } } -#endif + break; case 5: /* Clear ring buffer */ + spin_lock_irq(&rsbac_log_lock); logged_chars = 0; + spin_unlock_irq(&rsbac_log_lock); + break; + case 9: /* Number of chars in the log buffer */ + spin_lock_irq(&rsbac_log_lock); + error = log_end - log_start; + spin_unlock_irq(&rsbac_log_lock); break; default: error = -EINVAL; break; } out: -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) - unlock_kernel(); -#endif return error; } +#endif /* Kernel Version */ #if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT) EXPORT_SYMBOL(rsbac_printk); #endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) int rsbac_printk(const char *fmt, ...) { va_list args; int i; - char *msg, *p, *buf_end; + static u_int log_seq = 0; +#if defined(CONFIG_RSBAC_LOG_REMOTE) + int j; + static char remote_buf[1024]; + static u_int remote_log_seq = 0; +#endif + char *p, *buf_end; + static char buf[1024]; int line_feed; static signed char msg_level = -1; long flags; spin_lock_irqsave(&rsbac_log_lock, flags); va_start(args, fmt); - i = vsprintf(buf + 3, fmt, args); /* hopefully i < sizeof(buf)-4 */ - buf_end = buf + 3 + i; + i = vsprintf(buf + 14, fmt, args); /* hopefully i < sizeof(buf)-15 */ + buf_end = buf + i + 14; va_end(args); - for (p = buf + 3; p < buf_end; p++) { - msg = p; + for (p = buf + 14; p < buf_end; p++) { if (msg_level < 0) { if ( p[0] != '<' || @@ -829,12 +1082,20 @@ int rsbac_printk(const char *fmt, ...) p[1] > '7' || p[2] != '>' ) { - p -= 3; + p -= 14; p[0] = '<'; p[1] = RSBAC_DEF_MESS_LOGLEVEL + '0'; p[2] = '>'; - } else - msg += 3; + sprintf(p + 3, "%010u", log_seq++); + p[13] = '|'; + } else { + p -= 11; + p[0] = '<'; + p[1] = p[12]; + p[2] = '>'; + sprintf(p + 3, "%010u", log_seq++); + p[13] = '|'; + } msg_level = p[1] - '0'; } line_feed = 0; @@ -857,9 +1118,196 @@ int rsbac_printk(const char *fmt, ...) } spin_unlock_irqrestore(&rsbac_log_lock, flags); wake_up_interruptible(&rlog_wait); + +#if defined(CONFIG_RSBAC_LOG_REMOTE) + spin_lock_irqsave(&rsbac_log_remote_lock, flags); + va_start(args, fmt); + j = vsprintf(remote_buf + 14, fmt, args); /* hopefully j < sizeof(buf)-15 */ + buf_end = remote_buf + j + 14; + va_end(args); + for (p = remote_buf + 14; p < buf_end; p++) { + if (msg_level < 0) { + if ( + p[0] != '<' || + p[1] < '0' || + p[1] > '7' || + p[2] != '>' + ) { + p -= 14; + p[0] = '<'; + p[1] = RSBAC_DEF_MESS_LOGLEVEL + '0'; + p[2] = '>'; + sprintf(p + 3, "%010u", remote_log_seq++); + p[13] = '|'; + } else { + p -= 11; + p[0] = '<'; + p[1] = p[12]; + p[2] = '>'; + sprintf(p + 3, "%010u", remote_log_seq++); + p[13] = '|'; + } + msg_level = p[1] - '0'; + } + line_feed = 0; + for (; p < buf_end; p++) { + remote_rlog_buf[(remote_log_start+remote_rlog_size) & (REMOTE_RLOG_BUF_LEN-1)] = *p; + if (remote_rlog_size < REMOTE_RLOG_BUF_LEN) + remote_rlog_size++; + else { + remote_log_start++; + remote_log_start &= REMOTE_RLOG_BUF_LEN-1; + } + remote_logged_chars++; + if (*p == '\n') { + line_feed = 1; + break; + } + } + if (line_feed) + msg_level = -1; + } + spin_unlock_irqrestore(&rsbac_log_remote_lock, flags); +#ifdef CONFIG_RSBAC_LOG_REMOTE_SYNC + wake_up_interruptible(&rsbaclogd_wait); +#endif +#endif + return i; } +#else /* Kernel Version >= 2.3.0 */ + +static void emit_log_char(char c) +{ + RLOG_BUF(log_end) = c; + log_end++; + if (log_end - log_start > RLOG_BUF_LEN) + log_start = log_end - RLOG_BUF_LEN; + if (logged_chars < RLOG_BUF_LEN) + logged_chars++; +} + +#if defined(CONFIG_RSBAC_LOG_REMOTE) +static void emit_remote_log_char(char c) +{ + REMOTE_RLOG_BUF(remote_log_end) = c; + remote_log_end++; + if (remote_log_end - remote_log_start > REMOTE_RLOG_BUF_LEN) + remote_log_start = remote_log_end - REMOTE_RLOG_BUF_LEN; + if (remote_logged_chars < REMOTE_RLOG_BUF_LEN) + remote_logged_chars++; +} +#endif + +int rsbac_printk(const char *fmt, ...) +{ + va_list args; + unsigned long flags; + int printed_len; + char *p; + static char buf[2048]; + static int log_level_unknown = 1; + static u_int log_seq = 0; +#if defined(CONFIG_RSBAC_LOG_REMOTE) + static u_int remote_log_seq = 0; + static int remote_log_level_unknown = 1; +#endif + + if (oops_in_progress) { + /* If a crash is occurring, make sure we can't deadlock */ + spin_lock_init(&rsbac_log_lock); + } + + /* This stops the holder of console_sem just where we want him */ + spin_lock_irqsave(&rsbac_log_lock, flags); + + /* Emit the output into the temporary buffer */ + va_start(args, fmt); + printed_len = vsnprintf(buf + 14, sizeof(buf) - 14, fmt, args); + va_end(args); + + /* + * Copy the output into log_buf. If the caller didn't provide + * appropriate log level tags, we insert them here + */ + for (p = buf + 14; *p; p++) { + if (log_level_unknown) { + if (p[0] != '<' || p[1] < '0' || p[1] > '7' || p[2] != '>') { + p -= 14; + p[0] = '<'; + p[1] = RSBAC_DEF_MESS_LOGLEVEL + '0'; + p[2] = '>'; + sprintf(p + 3, "%010u", log_seq++); + p[13] = '|'; + } else { + p -= 11; + p[0] = '<'; + p[1] = p[12]; + p[2] = '>'; + sprintf(p + 3, "%010u", log_seq++); + p[13] = '|'; + } + log_level_unknown = 0; + } + emit_log_char(*p); + if (*p == '\n') + log_level_unknown = 1; + } + spin_unlock_irqrestore(&rsbac_log_lock, flags); + wake_up_interruptible(&rlog_wait); + +#if defined(CONFIG_RSBAC_LOG_REMOTE) + if (oops_in_progress) { + /* If a crash is occurring, make sure we can't deadlock */ + spin_lock_init(&rsbac_log_remote_lock); + } + + /* This stops the holder of console_sem just where we want him */ + spin_lock_irqsave(&rsbac_log_remote_lock, flags); + + /* Emit the output into the temporary buffer */ + va_start(args, fmt); + printed_len = vsnprintf(buf + 14, sizeof(buf) - 14, fmt, args); + va_end(args); + + /* + * Copy the output into log_buf. If the caller didn't provide + * appropriate log level tags, we insert them here + */ + for (p = buf + 14; *p; p++) { + if (remote_log_level_unknown) { + if (p[0] != '<' || p[1] < '0' || p[1] > '7' || p[2] != '>') { + p -= 14; + p[0] = '<'; + p[1] = RSBAC_DEF_MESS_LOGLEVEL + '0'; + p[2] = '>'; + sprintf(p + 3, "%010u", remote_log_seq++); + p[13] = '|'; + } else { + p -= 11; + p[0] = '<'; + p[1] = p[12]; + p[2] = '>'; + sprintf(p + 3, "%010u", remote_log_seq++); + p[13] = '|'; + } + remote_log_level_unknown = 0; + } + emit_remote_log_char(*p); + if (*p == '\n') + remote_log_level_unknown = 1; + } + spin_unlock_irqrestore(&rsbac_log_remote_lock, flags); +#ifdef CONFIG_RSBAC_LOG_REMOTE_SYNC + wake_up_interruptible(&rsbaclogd_wait); +#endif +#endif + + return printed_len; +} +#endif /* Kernel Version */ + #if defined(CONFIG_RSBAC_PROC) && defined(CONFIG_PROC_FS) static int rmsg_open(struct inode * inode, struct file * file) { @@ -881,8 +1329,13 @@ static ssize_t rmsg_read(struct file * f static unsigned int rmsg_poll(struct file *file, poll_table * wait) { poll_wait(file, &rlog_wait, wait); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) if (rlog_size) return POLLIN | POLLRDNORM; +#else + if (rsbac_log(9, 0, 0)) + return POLLIN | POLLRDNORM; +#endif return 0; } @@ -1197,6 +1650,14 @@ debug_proc_info(char *buffer, char **sta len += sprintf(buffer + len, "rsbac_ind_softmode[CAP] is %i\n", rsbac_ind_softmode[CAP]); #endif +#ifdef CONFIG_RSBAC_JAIL + len += sprintf(buffer + len, "rsbac_ind_softmode[JAIL] is %i\n", + rsbac_ind_softmode[JAIL]); +#endif +#ifdef CONFIG_RSBAC_RES + len += sprintf(buffer + len, "rsbac_ind_softmode[RES] is %i\n", + rsbac_ind_softmode[RES]); +#endif pos = begin + len; if (pos < offset) { @@ -1208,6 +1669,18 @@ debug_proc_info(char *buffer, char **sta #endif #endif +#ifdef CONFIG_RSBAC_CAP_PROC_HIDE + len += sprintf(buffer + len, "rsbac_cap_process_hiding is %i\n", + rsbac_cap_process_hiding); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; +#endif #ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL len += sprintf(buffer + len, "rsbac_dac_disable is %i\n", @@ -1235,6 +1708,29 @@ debug_proc_info(char *buffer, char **sta goto out; #endif +#ifdef CONFIG_RSBAC_INIT_DELAY + len += sprintf(buffer + len, "rsbac_delay_init is %i\n", + rsbac_delay_init); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; + len += sprintf(buffer + len, "rsbac_delayed_root is %02u:%02u\n", + MAJOR(rsbac_delayed_root), MINOR(rsbac_delayed_root)); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; +#endif + #if defined(CONFIG_RSBAC_AUTH) len += sprintf(buffer + len, "rsbac_auth_enable_login is %i\n", rsbac_auth_enable_login); @@ -1378,6 +1874,44 @@ debug_proc_info(char *buffer, char **sta goto out; #endif +#if defined(CONFIG_RSBAC_MAC) +/* Boolean debug switch for MAC data structures */ + len += sprintf(buffer + len, "rsbac_debug_ds_mac is %i\n", + rsbac_debug_ds_mac); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; + +/* Boolean debug switch for MAC syscalls / AEF */ + len += sprintf(buffer + len, "rsbac_debug_aef_mac is %i\n", + rsbac_debug_aef_mac); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; + +/* Boolean debug switch for MAC decisions / ADF */ + len += sprintf(buffer + len, "rsbac_debug_adf_mac is %i\n", + rsbac_debug_adf_mac); + pos = begin + len; + if (pos < offset) + { + len = 0; + begin = pos; + } + if (pos > offset+length) + goto out; +#endif + #if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT) /* Boolean debug switch for PM data structures */ len += sprintf(buffer + len, "rsbac_debug_ds_pm is %i\n", @@ -1643,7 +2177,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug ind_softmode modname #N" > /proc/rsbac_info/debug * to set rsbac_ind_softmode[module] to given value */ - if(!strncmp("ind_softmode", p, 12)) + if(!strncmp("ind_softmode", k_buf + 6, 12)) { char tmp[20]; @@ -1701,7 +2235,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug softmode #N" > /proc/rsbac_info/debug * to set rsbac_softmode to given value */ - if(!strncmp("softmode", p, 8)) + if(!strncmp("softmode", k_buf + 6, 8)) { p += 9; @@ -1750,7 +2284,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug dac_disable #N" > /proc/rsbac_info/debug * to set rsbac_softmode to given value */ - if(!strncmp("dac_disable", p, 11)) + if(!strncmp("dac_disable", k_buf + 6, 11)) { p += 12; @@ -1799,7 +2333,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug nosyslog #N" > /proc/rsbac_info/debug * to set rsbac_nosyslog to given value */ - if(!strncmp("nosyslog", p, 8)) + if(!strncmp("nosyslog", k_buf + 6, 8)) { p += 9; @@ -1863,7 +2397,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug ds_net #N" > /proc/rsbac_info/debug * to set rsbac_debug_ds_net to given value */ - if(!strncmp("ds_net", p, 6)) + if(!strncmp("ds_net", k_buf + 6, 6)) { p += 7; @@ -1893,7 +2427,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug aef_net #N" > /proc/rsbac_info/debug * to set rsbac_debug_aef_net to given value */ - if(!strncmp("aef_net", p, 7)) + if(!strncmp("aef_net", k_buf + 6, 7)) { p += 8; @@ -1924,7 +2458,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug adf_net #N" > /proc/rsbac_info/debug * to set rsbac_debug_adf_net to given value */ - if(!strncmp("adf_net", p, 7)) + if(!strncmp("adf_net", k_buf + 6, 7)) { p += 8; @@ -1951,13 +2485,107 @@ static ssize_t debug_proc_write(struct f } #endif +#if defined(CONFIG_RSBAC_MAC) +/* Boolean debug switch for MAC data structures */ + /* + * Usage: echo "debug ds_mac #N" > /proc/rsbac_info/debug + * to set rsbac_debug_ds_mac to given value + */ + if(!strncmp("ds_mac", k_buf + 6, 6)) + { + p += 7; + + if( *p == '\0' ) + goto out; + + debug_level = simple_strtoul(p, NULL, 0); + /* only accept 0 or 1 */ + if(!debug_level || (debug_level == 1)) + { + printk(KERN_INFO + "debug_proc_write(): setting rsbac_debug_ds_mac to %u\n", + debug_level); + rsbac_debug_ds_mac = debug_level; + err = count; + goto out; + } + else + { + printk(KERN_INFO + "debug_proc_write(): rejecting invalid debug level (should be 0 or 1)\n"); + goto out; + } + } +/* Boolean debug switch for MAC syscalls / AEF */ + /* + * Usage: echo "debug aef_mac #N" > /proc/rsbac_info/debug + * to set rsbac_debug_aef_mac to given value + */ + if(!strncmp("aef_mac", k_buf + 6, 7)) + { + p += 8; + + if( *p == '\0' ) + goto out; + + debug_level = simple_strtoul(p, NULL, 0); + /* only accept 0 or 1 */ + if(!debug_level || (debug_level == 1)) + { + printk(KERN_INFO + "debug_proc_write(): setting rsbac_debug_aef_mac to %u\n", + debug_level); + rsbac_debug_aef_mac = debug_level; + err = count; + goto out; + } + else + { + printk(KERN_INFO + "debug_proc_write(): rejecting invalid debug level (should be 0 or 1)\n"); + goto out; + } + } + +/* Boolean debug switch for MAC decisions / ADF */ + /* + * Usage: echo "debug adf_mac #N" > /proc/rsbac_info/debug + * to set rsbac_debug_adf_mac to given value + */ + if(!strncmp("adf_mac", k_buf + 6, 7)) + { + p += 8; + + if( *p == '\0' ) + goto out; + + debug_level = simple_strtoul(p, NULL, 0); + /* only accept 0 or 1 */ + if(!debug_level || (debug_level == 1)) + { + printk(KERN_INFO + "debug_proc_write(): setting rsbac_debug_adf_mac to %u\n", + debug_level); + rsbac_debug_adf_mac = debug_level; + err = count; + goto out; + } + else + { + printk(KERN_INFO + "debug_proc_write(): rejecting invalid debug level (should be 0 or 1)\n"); + goto out; + } + } +#endif + #if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT) /* Boolean debug switch for PM data structures */ /* * Usage: echo "debug ds_pm #N" > /proc/rsbac_info/debug * to set rsbac_debug_ds_pm to given value */ - if(!strncmp("ds_pm", p, 5)) + if(!strncmp("ds_pm", k_buf + 6, 5)) { p += 6; @@ -1987,7 +2615,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug aef_pm #N" > /proc/rsbac_info/debug * to set rsbac_debug_aef_pm to given value */ - if(!strncmp("aef_pm", p, 6)) + if(!strncmp("aef_pm", k_buf + 6, 6)) { p += 7; @@ -2018,7 +2646,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug adf_pm #N" > /proc/rsbac_info/debug * to set rsbac_debug_adf_pm to given value */ - if(!strncmp("adf_pm", p, 6)) + if(!strncmp("adf_pm", k_buf + 6, 6)) { p += 7; @@ -2051,7 +2679,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug adf_ms #N" > /proc/rsbac_info/debug * to set rsbac_debug_adf_ms to given value */ - if(!strncmp("adf_ms", p, 6)) + if(!strncmp("adf_ms", k_buf + 6, 6)) { p += 7; @@ -2084,7 +2712,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug ds_rc #N" > /proc/rsbac_info/debug * to set rsbac_debug_ds_rc to given value */ - if(!strncmp("ds_rc", p, 5)) + if(!strncmp("ds_rc", k_buf + 6, 5)) { p += 6; @@ -2114,7 +2742,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug aef_rc #N" > /proc/rsbac_info/debug * to set rsbac_debug_aef_rc to given value */ - if(!strncmp("aef_rc", p, 6)) + if(!strncmp("aef_rc", k_buf + 6, 6)) { p += 7; @@ -2145,7 +2773,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug adf_rc #N" > /proc/rsbac_info/debug * to set rsbac_debug_adf_rc to given value */ - if(!strncmp("adf_rc", p, 6)) + if(!strncmp("adf_rc", k_buf + 6, 6)) { p += 7; @@ -2178,7 +2806,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug ds_auth #N" > /proc/rsbac_info/debug * to set rsbac_debug_ds_auth to given value */ - if(!strncmp("ds_auth", p, 7)) + if(!strncmp("ds_auth", k_buf + 6, 7)) { p += 8; @@ -2208,7 +2836,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug aef_auth #N" > /proc/rsbac_info/debug * to set rsbac_debug_aef_auth to given value */ - if(!strncmp("aef_auth", p, 8)) + if(!strncmp("aef_auth", k_buf + 6, 8)) { p += 9; @@ -2239,7 +2867,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug adf_auth #N" > /proc/rsbac_info/debug * to set rsbac_debug_adf_auth to given value */ - if(!strncmp("adf_auth", p, 8)) + if(!strncmp("adf_auth", k_buf + 6, 8)) { p += 9; @@ -2272,7 +2900,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug reg #N" > /proc/rsbac_info/debug * to set rsbac_debug_reg to given value */ - if(!strncmp("reg", p, 3)) + if(!strncmp("reg", k_buf + 6, 3)) { p += 3; @@ -2305,7 +2933,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug ds_acl #N" > /proc/rsbac_info/debug * to set rsbac_debug_ds_acl to given value */ - if(!strncmp("ds_acl", p, 6)) + if(!strncmp("ds_acl", k_buf + 6, 6)) { p += 7; @@ -2335,7 +2963,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug aef_acl #N" > /proc/rsbac_info/debug * to set rsbac_debug_aef_acl to given value */ - if(!strncmp("aef_acl", p, 7)) + if(!strncmp("aef_acl", k_buf + 6, 7)) { p += 8; @@ -2366,7 +2994,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug adf_acl #N" > /proc/rsbac_info/debug * to set rsbac_debug_adf_acl to given value */ - if(!strncmp("adf_acl", p, 7)) + if(!strncmp("adf_acl", k_buf + 6, 7)) { p += 8; @@ -2399,7 +3027,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug aef_jail #N" > /proc/rsbac_info/debug * to set rsbac_debug_aef_jail to given value */ - if(!strncmp("aef_jail", p, 8)) + if(!strncmp("aef_jail", k_buf + 6, 8)) { p += 9; @@ -2430,7 +3058,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug adf_jail #N" > /proc/rsbac_info/debug * to set rsbac_debug_adf_jail to given value */ - if(!strncmp("adf_jail", p, 8)) + if(!strncmp("adf_jail", k_buf + 6, 8)) { p += 9; @@ -2461,7 +3089,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug ds #N" > /proc/rsbac_info/debug * to set rsbac_debug_ds to given value */ - if(!strncmp("ds", p, 2)) + if(!strncmp("ds", k_buf + 6, 2)) { p += 3; @@ -2491,7 +3119,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug write #N" > /proc/rsbac_info/debug * to set rsbac_debug_write to given value */ - if(!strncmp("write", p, 5)) + if(!strncmp("write", k_buf + 6, 5)) { p += 6; @@ -2521,7 +3149,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug stack #N" > /proc/rsbac_info/debug * to set rsbac_debug_stack to given value */ - if(!strncmp("stack", p, 5)) + if(!strncmp("stack", k_buf + 6, 5)) { p += 6; @@ -2551,7 +3179,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug lists #N" > /proc/rsbac_info/debug * to set rsbac_debug_lists to given value */ - if(!strncmp("lists", p, 5)) + if(!strncmp("lists", k_buf + 6, 5)) { p += 6; @@ -2582,7 +3210,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug aef #N" > /proc/rsbac_info/debug * to set rsbac_debug_aef to given value */ - if(!strncmp("aef", p, 3)) + if(!strncmp("aef", k_buf + 6, 3)) { p += 4; @@ -2613,7 +3241,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug no_write #N" > /proc/rsbac_info/debug * to set rsbac_debug_no_write to given value */ - if(!strncmp("no_write", p, 8)) + if(!strncmp("no_write", k_buf + 6, 8)) { p += 9; @@ -2644,7 +3272,7 @@ static ssize_t debug_proc_write(struct f * Usage: echo "debug auto #N" > /proc/rsbac_info/debug * to set rsbac_debug_auto to given value */ - if(!strncmp("auto", p, 4)) + if(!strncmp("auto", k_buf + 6, 4)) { p += 5; @@ -2678,11 +3306,219 @@ out: } #endif /* defined(CONFIG_RSBAC_PROC) && defined(CONFIG_PROC_FS) */ +#if defined(CONFIG_RSBAC_LOG_REMOTE) +/* declare net functions */ +long sys_socket(int family, int type, int protocol); +long sys_bind(int fd, struct sockaddr *umyaddr, int addrlen); +long sys_sendto(int fd, void * buff, size_t len, unsigned flags, + struct sockaddr *addr, int addr_len); + +#ifndef CONFIG_RSBAC_LOG_REMOTE_SYNC +/* rsbac kernel timer for auto-write */ +static void wakeup_rsbaclogd(u_long dummy) + { + wake_up(&rsbaclogd_wait); + } +#endif + +/* rsbac kernel daemon for remote logging */ +static int rsbaclogd(void * dummy) + { + struct task_struct *tsk = current; + int err; + int i; + char c; + int sock_fd; + struct sockaddr_in addr; + char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN); + static char remote_send_buf[REMOTE_SEND_BUF_LEN]; + mm_segment_t oldfs; + + printk(KERN_INFO "rsbaclogd(): Initializing.\n"); + tsk->session = 1; + tsk->pgrp = 1; + strcpy(tsk->comm, "rsbaclogd"); +/* lock_kernel(); */ + +#ifdef CONFIG_RSBAC_DEBUG + printk(KERN_DEBUG "rsbaclogd(): Setting auto timer.\n"); +#endif +#ifndef CONFIG_RSBAC_LOG_REMOTE_SYNC + init_timer(&rsbac_log_remote_timer); + rsbac_log_remote_timer.function = wakeup_rsbaclogd; + rsbac_log_remote_timer.data = 0; + rsbac_log_remote_timer.expires = jiffies + rsbac_log_remote_interval; + add_timer(&rsbac_log_remote_timer); +#endif + interruptible_sleep_on(&rsbaclogd_wait); + + /* create a socket */ + sock_fd = sys_socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP); + if(sock_fd < 0) + { + printk(KERN_WARNING + "rsbaclogd(): creating local log socket failed with error %s, exiting!\n", + get_error_name(tmp, sock_fd)); + rsbaclogd_pid = 0; + return -RSBAC_EWRITEFAILED; + } + /* bind local address */ + addr.sin_family = PF_INET; + addr.sin_port = htons(CONFIG_RSBAC_LOG_LOCAL_PORT); + err = rsbac_net_str_to_inet(CONFIG_RSBAC_LOG_LOCAL_ADDR, + &addr.sin_addr.s_addr); + if(err < 0) + { + printk(KERN_WARNING + "rsbaclogd(): converting local socket address %s failed with error %s, exiting!\n", + CONFIG_RSBAC_LOG_LOCAL_ADDR, + get_error_name(tmp, err)); + sys_close(sock_fd); + rsbaclogd_pid = 0; + return -RSBAC_EINVALIDVALUE; + } + /* change data segment - sys_bind reads address from user space */ + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_bind(sock_fd, (struct sockaddr *)&addr, sizeof(addr)); + set_fs(oldfs); + if(err < 0) + { + printk(KERN_WARNING + "rsbaclogd(): binding local socket address %u.%u.%u.%u:%u failed with error %s, exiting!\n", + NIPQUAD(addr.sin_addr.s_addr), + CONFIG_RSBAC_LOG_LOCAL_PORT, + get_error_name(tmp, err)); + sys_close(sock_fd); + rsbaclogd_pid = 0; + return -RSBAC_EWRITEFAILED; + } + + /* convert remote address */ + addr.sin_family = PF_INET; + addr.sin_port = htons(CONFIG_RSBAC_LOG_REMOTE_PORT); + err = rsbac_net_str_to_inet(CONFIG_RSBAC_LOG_REMOTE_ADDR, + &addr.sin_addr.s_addr); + if(err < 0) + { + printk(KERN_WARNING + "rsbaclogd(): converting remote socket address %s failed with error %s, exiting!\n", + CONFIG_RSBAC_LOG_REMOTE_ADDR, + get_error_name(tmp, err)); + sys_close(sock_fd); + rsbaclogd_pid = 0; + return -RSBAC_EINVALIDVALUE; + } + +#ifdef CONFIG_RSBAC_DEBUG + if(rsbac_debug_stack) + { + unsigned long * n = (unsigned long *) (current+1); + + while (!*n) + n++; + printk(KERN_DEBUG "rsbaclogd: free stack: %lu\n", + (unsigned long) n - (unsigned long)(current+1)); + } +#endif + for(;;) + { + /* wait */ + spin_lock_irq(&tsk->sigmask_lock); + flush_signals(tsk); + sigfillset(&tsk->blocked); + recalc_sigpending(tsk); + spin_unlock_irq(&tsk->sigmask_lock); +/* +#ifdef CONFIG_RSBAC_DEBUG + if (rsbac_debug_auto) + printk(KERN_DEBUG + "rsbacd(): calling rsbac_write()\n"); +#endif +*/ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) + while(remote_rlog_size) + { + cli(); + i = 0; + c = 0; + while (remote_rlog_size && i < REMOTE_SEND_BUF_LEN && c != '\n') + { + c = *((char *) remote_rlog_buf+remote_log_start); + remote_log_start++; + remote_rlog_size--; + remote_log_start &= REMOTE_RLOG_BUF_MASK; + sti(); + remote_send_buf[i] = c; + i++; + cli(); + } + sti(); +#else + while(remote_log_start != remote_log_end) + { + i = 0; + c = 0; + spin_lock_irq(&rsbac_log_remote_lock); + while ( (remote_log_start != remote_log_end) + && (i < sizeof(remote_send_buf)) + && (c != '\n') + ) + { + c = REMOTE_RLOG_BUF(remote_log_start); + remote_log_start++; + remote_logged_chars--; + remote_send_buf[i] = c; +// spin_unlock_irq(&rsbac_log_remote_lock); + i++; +// spin_lock_irq(&rsbac_log_remote_lock); + } + spin_unlock_irq(&rsbac_log_remote_lock); +#endif + + /* change data segment - sys_sendto reads data and address from user space */ + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_sendto(sock_fd, + remote_send_buf, + i, + MSG_DONTWAIT, + (struct sockaddr *)&addr, + sizeof(addr)); + set_fs(oldfs); + if( (err < 0) +// && (err != -EPERM) + ) + { + printk(KERN_WARNING + "rsbaclogd(): sending to remote socket address %u.%u.%u.%u:%u failed with error %i!\n", + NIPQUAD(addr.sin_addr.s_addr), + CONFIG_RSBAC_LOG_REMOTE_PORT, + err); + continue; + } + } +#ifndef CONFIG_RSBAC_LOG_REMOTE_SYNC + /* set new timer (only, if not woken up by rsbac_printk()) */ + mod_timer(&rsbac_log_remote_timer, jiffies + rsbac_log_remote_interval); +#endif + interruptible_sleep_on(&rsbaclogd_wait); + /* sleep */ + } + } +#endif + + /********************************/ /* Init */ /********************************/ +#ifdef CONFIG_RSBAC_INIT_DELAY +inline void rsbac_init_debug(void) +#else inline void __init rsbac_init_debug(void) +#endif { int i; #if defined(CONFIG_RSBAC_PROC) && defined(CONFIG_PROC_FS) @@ -2695,8 +3531,8 @@ inline void __init rsbac_init_debug(void int tmperr; rsbac_log_entry_t def_data; -#if defined(CONFIG_RSBAC_RMSG) - /* init rsbac_console lock */ +#if 0 && defined(CONFIG_RSBAC_RMSG) + /* init rsbac_log_lock */ spin_lock_init(&rsbac_log_lock); #endif /* register log_levels list */ @@ -2771,11 +3607,27 @@ inline void __init rsbac_init_debug(void } #endif #endif + + #if defined(CONFIG_RSBAC_LOG_REMOTE) + /* init rsbac_console lock */ +// spin_lock_init(&rsbac_log_remote_lock); + /* init the rsbaclogd wait queue head */ + init_waitqueue_head(&rsbaclogd_wait); + /* Start rsbac logging thread for auto write */ + rsbaclogd_pid = kernel_thread(rsbaclogd, NULL, 0); + printk(KERN_INFO "rsbac_init_debug(): Started rsbaclogd thread with pid %u\n", + rsbaclogd_pid); + #endif } + #ifdef CONFIG_RSBAC_SOFTMODE if(rsbac_softmode) printk(KERN_DEBUG "rsbac_softmode is set\n"); #endif + #ifdef CONFIG_RSBAC_CAP_PROC_HIDE + if(rsbac_cap_process_hiding) + printk(KERN_DEBUG "rsbac_cap_process_hiding is set\n"); + #endif #ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL if(rsbac_dac_disable) printk(KERN_DEBUG "rsbac_dac_disable is set\n"); @@ -2784,6 +3636,13 @@ inline void __init rsbac_init_debug(void if(rsbac_nosyslog) printk(KERN_DEBUG "rsbac_nosyslog is set\n"); #endif + #ifdef CONFIG_RSBAC_INIT_DELAY + if(rsbac_delay_init) + printk(KERN_DEBUG "rsbac_delay_init is set\n"); + if(MAJOR(rsbac_delayed_root) || MINOR(rsbac_delayed_root)) + printk(KERN_DEBUG "rsbac_delayed_root is %02u:%02u\n", + MAJOR(rsbac_delayed_root), MINOR(rsbac_delayed_root)); + #endif if(rsbac_no_defaults) printk(KERN_DEBUG "rsbac_no_defaults is set\n"); @@ -2818,6 +3677,15 @@ inline void __init rsbac_init_debug(void printk(KERN_DEBUG "rsbac_debug_adf_net is set\n"); #endif + #if defined(CONFIG_RSBAC_MAC) + if(rsbac_debug_ds_mac) + printk(KERN_DEBUG "rsbac_debug_ds_mac is set\n"); + if(rsbac_debug_aef_mac) + printk(KERN_DEBUG "rsbac_debug_aef_mac is set\n"); + if(rsbac_debug_adf_mac) + printk(KERN_DEBUG "rsbac_debug_adf_mac is set\n"); + #endif + #if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT) if(rsbac_debug_ds_pm) printk(KERN_DEBUG "rsbac_debug_ds_pm is set\n"); diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/help/getname.c linux-2.4.20-wolk4.9-fullkernel/rsbac/help/getname.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/help/getname.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/help/getname.c 2003-08-25 20:33:02.000000000 +0200 @@ -3,7 +3,7 @@ /* Author and (c) 1999-2002: */ /* Amon Ott */ /* Helper functions for all parts */ -/* Last modified: 24/Jun/2002 */ +/* Last modified: 16/Oct/2002 */ /************************************* */ #include @@ -71,6 +71,8 @@ static char request_list[R_NONE+1][24] "SEND", "RECEIVE", "NET_SHUTDOWN", + "CHANGE_DAC_EFF_OWNER", + "CHANGE_DAC_FS_OWNER", "NONE" }; static char result_list[UNDEFINED+1][12] = { @@ -82,11 +84,18 @@ static char result_list[UNDEFINED+1][12 static rsbac_switch_target_int_t attr_mod_list[A_none+1] = { GEN, /* pseudo */ MAC, /* security_level */ + MAC, /* initial_security_level */ MAC, /* local_sec_level */ MAC, /* remote_sec_level */ + MAC, /* min_security_level */ MAC, /* mac_categories */ + MAC, /* mac_initial_categories */ MAC, /* local_mac_categories */ MAC, /* remote_mac_categories */ + MAC, /* mac_min_categories */ + MAC, /* mac_user_flags */ + MAC, /* mac_process_flags */ + MAC, /* mac_file_flags */ FC, /* object_category */ FC, /* local_object_category */ FC, /* remote_object_category */ @@ -109,9 +118,9 @@ static rsbac_switch_target_int_t attr_m MAC, /* max_read_open */ MAC, /* max_read_categories */ MAC, /* mac_auto */ - MAC, /* mac_trusted */ MAC, /* mac_trusted_for_user */ MAC, /* mac_check */ + MAC, /* mac_prop_trusted */ PM, /* pm_role */ PM, /* pm_process_type */ PM, /* pm_current_task */ @@ -135,6 +144,7 @@ static rsbac_switch_target_int_t attr_m MS, /* ms_str_offset */ MS, /* ms_sock_trusted_tcp */ MS, /* ms_sock_trusted_udp */ + MS, /* ms_need_scan */ FF, /* ff_flags */ RC, /* rc_type */ RC, /* local_rc_type */ @@ -152,6 +162,9 @@ static rsbac_switch_target_int_t attr_m JAIL, /* jail_id */ JAIL, /* jail_ip */ JAIL, /* jail_flags */ + RES, /* res_role */ + RES, /* res_min */ + RES, /* res_max */ GEN, /* log_array_low */ GEN, /* local_log_array_low */ GEN, /* remote_log_array_low */ @@ -161,8 +174,10 @@ static rsbac_switch_target_int_t attr_m GEN, /* log_program_based */ GEN, /* log_user_based */ GEN, /* symlink_add_uid */ + GEN, /* symlink_add_mac_level */ GEN, /* symlink_add_rc_role */ GEN, /* linux_dac_disable */ + CAP, /* cap_process_hiding */ #ifdef __KERNEL__ /* adf-request helpers */ SW_NONE, /* group */ @@ -179,20 +194,27 @@ static rsbac_switch_target_int_t attr_m SW_NONE, /* auth_get_caplist */ SW_NONE, /* prot_bits */ SW_NONE, /* internal */ - #if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT) SW_NONE, /* create_data */ - #endif + SW_NONE, /* new_object */ + SW_NONE, /* rlimit */ #endif SW_NONE /* none */ }; static char attribute_list[A_none+1][23] = { "pseudo", "security_level", + "initial_security_level", "local_sec_level", "remote_sec_level", + "min_security_level", "mac_categories", + "mac_initial_categories", "local_mac_categories", "remote_mac_categories", + "mac_min_categories", + "mac_user_flags", + "mac_process_flags", + "mac_file_flags", "object_category", "local_object_category", "remote_object_category", @@ -215,9 +237,9 @@ static char attribute_list[A_none+1][23 "max_read_open", "max_read_categories", "mac_auto", - "mac_trusted", "mac_trusted_for_user", "mac_check", + "mac_prop_trusted", "pm_role", "pm_process_type", "pm_current_task", @@ -241,6 +263,7 @@ static char attribute_list[A_none+1][23 "ms_str_offset", "ms_sock_trusted_tcp", "ms_sock_trusted_udp", + "ms_need_scan", "ff_flags", "rc_type", "local_rc_type", @@ -258,6 +281,9 @@ static char attribute_list[A_none+1][23 "jail_id", "jail_ip", "jail_flags", + "res_role", + "res_min", + "res_max", "log_array_low", "local_log_array_low", "remote_log_array_low", @@ -267,8 +293,10 @@ static char attribute_list[A_none+1][23 "log_program_based", "log_user_based", "symlink_add_uid", + "symlink_add_mac_level", "symlink_add_rc_role", "linux_dac_disable", + "cap_process_hiding", #ifdef __KERNEL__ /* adf-request helpers */ "owner", @@ -286,9 +314,9 @@ static char attribute_list[A_none+1][23 "auth_get_caplist", "prot_bits", "internal", - #if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT) "create_data", - #endif + "new_object", + "rlimit", #endif "none" }; @@ -331,6 +359,7 @@ static char switch_target_list[SW_NONE+ "ACL", "CAP", "JAIL", + "RES", "SOFTMODE", "DAC_DISABLE", "NONE" }; @@ -385,39 +414,46 @@ static char scd_type_list[ST_none+1][12] /* Attribute types */ #ifndef __KERNEL__ -static char attribute_param_list[A_none+1][181] = { +static char attribute_param_list[A_none+1][193] = { "user-pseudo (positive long integer)", "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, 254 = inherit, max. level 252", "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, 254 = inherit, max. level 252", "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, 254 = inherit, max. level 252", + "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, 254 = inherit, max. level 252", + "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, 254 = inherit, max. level 252", + "Bit Set String of length 64 for all categories", "Bit Set String of length 64 for all categories", "Bit Set String of length 64 for all categories", "Bit Set String of length 64 for all categories", + "Bit Set String of length 64 for all categories", + "1 = override, 4 = trusted, 8 = write_up, 16 = read_up,\n\t32 = write_down, 64 = allow_mac_auto", + "1 = override, 2 = auto, 4 = trusted, 8 = write_up,\n\t16 = read_up, 32 = write_down, 128 = prop_trusted", + "2 = auto, 4 = trusted, 8 = write_up, 16 = read_up,\n\t32 = write_down", "0 = general, 1 = security, 2 = system, 3 = inherit", "0 = general, 1 = security, 2 = system, 3 = inherit", "0 = general, 1 = security, 2 = system, 3 = inherit", "0 = none, 1 = SI, 2 = inherit", "0 = none, 1 = SI, 2 = inherit", "0 = none, 1 = SI, 2 = inherit", - "0 = user, 1 = security officer, 2 = administrator", - "0 = user, 1 = security officer, 2 = administrator", - "0 = user, 1 = security officer, 2 = administrator", - "0 = user, 1 = security officer, 2 = administrator", - "0 = user, 1 = security officer, 2 = administrator", - "0 = user, 1 = security officer, 2 = administrator", - "0 = user, 1 = security officer, 2 = administrator", - "0 = user, 1 = security officer, 2 = administrator", - "0 = user, 1 = security officer, 2 = administrator", + "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", + "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", + "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", + "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", + "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", + "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", + "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", + "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", + "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, max. level 252", "Bit Set String of length 64 for all categories", "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, max. level 252", "Bit Set String of length 64 for all categories", "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, max. level 252", "Bit Set String of length 64 for all categories", - "0 = false, 1 = true", - "0 = false, 1 = true", + "0 = no, 1 = yes, 2 = inherit (default value)", "-3 = no user, -4 = all users, user-ID = for this user", "0 = false, 1 = true", + "0 = false, 1 = true", "0 = user, 1 = security officer, 2 = data protection officer,\n\t3 = TP-manager, 4 = system-admin", "0 = none, 1 = TP", "Task-ID (positive integer)", @@ -441,7 +477,8 @@ static char attribute_param_list[A_none "(internal only, do not set)", "0 = not_trusted, 1 = active, 2 = full", "0 = not_trusted, 1 = active, 2 = full", - "1 = read_only, 2 = execute_only, 4 = search_only, 8 = write_only,\n\t16 = secure_delete, 32 = no_execute, 64 = no_delete_or_rename,\n\t128 = add_inherited (or'd), 256 = append_only", + "Bit-String for all Requests or list, I for inherit", + "1 = read_only, 2 = execute_only, 4 = search_only, 8 = write_only,\n\t16 = secure_delete, 32 = no_execute, 64 = no_delete_or_rename,\n\t128 = add_inherited (or'd), 256 = append_only, 512 = no_mount", "RC-type-id", "RC-type-id", "RC-type-id", @@ -458,17 +495,22 @@ static char attribute_param_list[A_none "JAIL ID (0 = off)", "JAIL IP address a.b.c.d", "JAIL flags (or'd, 1 = allow external IPC, 2 = allow all net families,\n\t4 = allow_rlimit, 8 = allow raw IP, 16 = auto adjust IP)", + "0 = user, 1 = security officer, 2 = administrator", /* res_role */ + "array of non-negative integer values, all 0 for unset", /* res_min */ + "array of non-negative integer values, all 0 for unset", /* res_max */ "Bit-String for all Requests, low bit", "Bit-String for all Requests, low bit", "Bit-String for all Requests, low bit", - "Bit-String for all Requests, high bit (l=0,h=0 = none, l=1,h=0 = denied,\n\tl=0, h=1 = full, l=1,h=1 = request based)", - "Bit-String for all Requests, high bit (l=0,h=0 = none, l=1,h=0 = denied,\n\tl=0, h=1 = full, l=1,h=1 = request based)", - "Bit-String for all Requests, high bit (l=0,h=0 = none, l=1,h=0 = denied,\n\tl=0, h=1 = full, l=1,h=1 = request based)", + "Bit-String for all Requests, high bit (l=0,h=0 = none, l=1,h=0 = denied,\n\tl=0,h=1 = full, l=1,h=1 = request based)", + "Bit-String for all Requests, high bit (l=0,h=0 = none, l=1,h=0 = denied,\n\tl=0,h=1 = full, l=1,h=1 = request based)", + "Bit-String for all Requests, high bit (l=0,h=0 = none, l=1,h=0 = denied,\n\tl=0,h=1 = full, l=1,h=1 = request based)", "Bit-String for all Requests", "Bit-String for all Requests", "0 = false, 1 = true", "0 = false, 1 = true", + "0 = false, 1 = true", "0 = false, 1 = true, 2 = inherit (default)", + "0 = off (default), 1 = from other users, 2 = full", "INVALID!" }; #endif diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/help/helpers.c linux-2.4.20-wolk4.9-fullkernel/rsbac/help/helpers.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/help/helpers.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/help/helpers.c 2003-08-25 20:33:02.000000000 +0200 @@ -135,6 +135,28 @@ char * longtostr(char * str, long i) return (str); }; +char * u64tostrmac(char * str, __u64 i) + { + int j = 0; + __u64 k; + + if(!str) + return(NULL); + + k = 1; + for(j = RSBAC_MAC_MAX_CAT;j >= 0;j--) + { + if (i & k) + str[j] = '1'; + else + str[j] = '0'; + k<<=1; + }; + + str[RSBAC_MAC_NR_CATS] = 0; + return (str); + }; + #ifndef __KERNEL__ void error_exit(int error) @@ -331,28 +353,6 @@ __u64 strtou64rcr(char * str, __u64 * i_ return(res); }; -char * u64tostrmac(char * str, __u64 i) - { - int j = 0; - __u64 k; - - if(!str) - return(NULL); - - k = 1; - for(j = RSBAC_MAC_MAX_CAT;j >= 0;j--) - { - if (i & k) - str[j] = '1'; - else - str[j] = '0'; - k<<=1; - }; - - str[RSBAC_MAC_NR_CATS] = 0; - return (str); - }; - __u64 strtou64mac(char * str, __u64 * i_p) { int j; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/help/rc_getname.c linux-2.4.20-wolk4.9-fullkernel/rsbac/help/rc_getname.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/help/rc_getname.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/help/rc_getname.c 2003-08-25 20:33:02.000000000 +0200 @@ -31,7 +31,7 @@ static char rc_admin_list[RC_none+1][13 "system_admin", "none" }; -static char rc_scd_type_list[RST_none-32+1][20] = { +static char rc_scd_type_list[RST_none - RST_min + 1][20] = { "auth_administration", "none" }; @@ -107,11 +107,12 @@ static char rc_item_param_list[RI_none+ "\t\t(none)" }; #endif -static char rc_special_right_list[RCR_NONE-32+1][20] = { +static char rc_special_right_list[RCR_NONE - RSBAC_RC_SPECIAL_RIGHT_BASE + 1][20] = { "ADMIN", "ASSIGN", "ACCESS_CONTROL", "SUPERVISOR", + "MODIFY_AUTH", "NONE" }; /*****************************************/ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/help/res_getname.c linux-2.4.20-wolk4.9-fullkernel/rsbac/help/res_getname.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/help/res_getname.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/help/res_getname.c 2003-08-25 20:33:02.000000000 +0200 @@ -0,0 +1,62 @@ +/********************************** */ +/* Rule Set Based Access Control */ +/* Author and (c) 2002: */ +/* Amon Ott */ +/* Getname functions for RES module */ +/* Last modified: 22/Nov/2002 */ +/********************************** */ + +#ifndef __KERNEL__ + +#include +#include +#include +#include + +#include + +static char res_list[RSBAC_RES_MAX+2][8] = { + "cpu", + "fsize", + "data", + "stack", + "core", + "rss", + "nproc", + "nofile", + "memlock", + "as", + "locks", + "NONE" }; + +/*****************************************/ + +char * get_res_name(char * name, + u_int value) + { + if(!name) + return(NULL); + if(value > RSBAC_RES_MAX) + strcpy(name, "ERROR!"); + else + strcpy(name, res_list[value]); + return(name); + }; + +int get_res_nr(const char * name) + { + int i; + + if(!name) + return(RSBAC_RES_NONE); + for (i = 0; i <= RSBAC_RES_MAX; i++) + { + if (!strcmp(name, res_list[i])) + { + return(i); + } + } + return(RSBAC_RES_NONE); + }; + +#endif /* !__KERNEL__ */ diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/help/rkmem.c linux-2.4.20-wolk4.9-fullkernel/rsbac/help/rkmem.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/help/rkmem.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/help/rkmem.c 2003-08-25 20:33:02.000000000 +0200 @@ -48,7 +48,11 @@ static rsbac_cache_sizes_t rsbac_cache_s /* Initialisation - setup caches. */ +#ifdef CONFIG_RSBAC_INIT_DELAY +void rsbac_kmem_cache_sizes_init(void) +#else void __init rsbac_kmem_cache_sizes_init(void) +#endif { rsbac_cache_sizes_t *sizes = rsbac_cache_sizes; char name[21]; diff -Naurp linux-2.4.20-wolk4.8-fullkernel/rsbac/help/syscalls.c linux-2.4.20-wolk4.9-fullkernel/rsbac/help/syscalls.c --- linux-2.4.20-wolk4.8-fullkernel/rsbac/help/syscalls.c 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/rsbac/help/syscalls.c 2003-08-25 20:33:02.000000000 +0200 @@ -1,10 +1,10 @@ -/************************************************* */ -/* Rule Set Based Access Control */ -/* Implementation of RSBAC general system calls */ -/* Author and (C) 1999-2002: Amon Ott */ -/* */ -/* Last modified: 09/Aug/2002 */ -/************************************************* */ +/*************************************************** */ +/* Rule Set Based Access Control */ +/* Implementation of RSBAC general system calls */ +/* Author and (C) 1999-2003: Amon Ott */ +/* */ +/* Last modified: 04/Jul/2003 */ +/*************************************************** */ #include #include @@ -1270,8 +1270,8 @@ int sys_rsbac_net_template(enum rsbac_ne case NTS_get_name: err = rsbac_put_user((u_char *) &k_data, (u_char *) data_p, sizeof(k_data) ); break; - default: + break; } } return err; @@ -1340,7 +1340,13 @@ int sys_rsbac_switch(enum rsbac_switch_t /* call ADF */ if(target >= SW_NONE) return(-RSBAC_EINVALIDTARGET); - if ((value != 0) && (value != 1)) + if ( (value < 0) +#ifdef CONFIG_RSBAC_SOFTMODE_IND + || (value > 3) +#else + || (value > 1) +#endif + ) return (-RSBAC_EINVALIDVALUE); #ifdef CONFIG_RSBAC_DEBUG if (rsbac_debug_aef) @@ -1350,6 +1356,10 @@ int sys_rsbac_switch(enum rsbac_switch_t #ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE if(target == DAC_DISABLE) { +#ifdef CONFIG_RSBAC_SOFTMODE_IND + if(value > 1) + return -RSBAC_EINVALIDVALUE; +#endif rsbac_attribute_value.dummy = 0; if (!rsbac_adf_request(R_MODIFY_PERMISSIONS_DATA, current->pid, @@ -1378,190 +1388,210 @@ int sys_rsbac_switch(enum rsbac_switch_t switch_name = rsbac_kmalloc(RSBAC_MAXNAMELEN); if(switch_name) { + int show_value = value; + get_switch_target_name(switch_name,target); +#ifdef CONFIG_RSBAC_SOFTMODE_IND + switch(value) + { + case 2: + case 3: + strcat(switch_name, " softmode"); + show_value -= 2; + break; + default: + break; + } +#endif #ifdef CONFIG_RSBAC_RMSG rsbac_printk(KERN_WARNING "sys_rsbac_switch(): switching RSBAC module %s to %i!\n", - switch_name, value); + switch_name, show_value); #endif #ifndef CONFIG_RSBAC_RMSG_EXCL printk(KERN_WARNING "sys_rsbac_switch(): switching RSBAC module %s to %i!\n", - switch_name, value); + switch_name, show_value); #endif rsbac_kfree(switch_name); } - switch (target) + switch(value) { +#ifdef CONFIG_RSBAC_SOFTMODE_IND + case 2: + case 3: + rsbac_ind_softmode[target] = value - 2; + break; +#endif + + default: + switch (target) + { #ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL - case DAC_DISABLE: rsbac_dac_disable = value; - break; + case DAC_DISABLE: rsbac_dac_disable = value; + break; #endif #ifdef CONFIG_RSBAC_SOFTMODE - case SOFTMODE: rsbac_softmode = value; + case SOFTMODE: rsbac_softmode = value; break; #endif #ifdef CONFIG_RSBAC_MAC - case MAC: rsbac_switch_mac = value; + case MAC: rsbac_switch_mac = value; break; #endif #ifdef CONFIG_RSBAC_FC - case FC: rsbac_switch_fc = value; + case FC: rsbac_switch_fc = value; break; #endif #ifdef CONFIG_RSBAC_SIM - case SIM: rsbac_switch_sim = value; + case SIM: rsbac_switch_sim = value; break; #endif #ifdef CONFIG_RSBAC_PM - case PM: rsbac_switch_pm = value; + case PM: rsbac_switch_pm = value; break; #endif #ifdef CONFIG_RSBAC_MS - case MS: rsbac_switch_ms = value; + case MS: rsbac_switch_ms = value; break; #endif #ifdef CONFIG_RSBAC_FF - case FF: rsbac_switch_ff = value; + case FF: rsbac_switch_ff = value; break; #endif #if defined(CONFIG_RSBAC_RC) - case RC: rsbac_switch_rc = value; + case RC: rsbac_switch_rc = value; break; #endif #if defined(CONFIG_RSBAC_AUTH) - case AUTH: rsbac_switch_auth = value; + case AUTH: rsbac_switch_auth = value; break; #endif #if defined(CONFIG_RSBAC_ACL) - case ACL: rsbac_switch_acl = value; + case ACL: rsbac_switch_acl = value; break; #endif #if defined(CONFIG_RSBAC_CAP) - case CAP: rsbac_switch_cap = value; + case CAP: rsbac_switch_cap = value; break; #endif - default: - return (-RSBAC_EINVALIDMODULE); +#if defined(CONFIG_RSBAC_JAIL) + case JAIL: rsbac_switch_jail = value; + break; +#endif +#if defined(CONFIG_RSBAC_RES) + case RES: rsbac_switch_res = value; + break; +#endif + default: + return (-RSBAC_EINVALIDMODULE); + } } #endif /* SWITCH */ return(0); - }; + } /************** MAC ***************/ -int sys_rsbac_mac_set_curr_seclevel(rsbac_security_level_t level) - { -#ifdef CONFIG_RSBAC_MAC - if(level > SL_max) - return(-RSBAC_EINVALIDVALUE); - return (rsbac_mac_set_curr_seclevel(level)); -#else - return (-RSBAC_EINVALIDMODULE); -#endif - }; - -rsbac_security_level_t sys_rsbac_mac_get_curr_seclevel(void) - { -#ifdef CONFIG_RSBAC_MAC - return (rsbac_mac_get_curr_seclevel()); -#else - return(SL_unclassified); -#endif - }; - -int sys_rsbac_mac_set_max_seclevel(rsbac_security_level_t level) - { -#ifdef CONFIG_RSBAC_MAC - if(level > SL_max) - return(-RSBAC_EINVALIDVALUE); - return (rsbac_mac_set_max_seclevel(level)); -#else - return (-RSBAC_EINVALIDMODULE); -#endif - }; - -rsbac_security_level_t sys_rsbac_mac_get_max_seclevel(void) - { -#ifdef CONFIG_RSBAC_MAC - return (rsbac_mac_get_max_seclevel()); -#else - return(SL_unclassified); -#endif - }; - -int sys_rsbac_mac_set_curr_categories(rsbac_mac_category_vector_t * categories_p) +int sys_rsbac_mac_set_curr_level(rsbac_security_level_t level, + rsbac_mac_category_vector_t * categories_p) { #ifdef CONFIG_RSBAC_MAC rsbac_mac_category_vector_t k_categories; int err; - err = rsbac_get_user((u_char *) &k_categories, (u_char *) categories_p, sizeof(k_categories) ); + if(!categories_p) + return -RSBAC_EINVALIDPOINTER; + err = rsbac_get_user((char *) &k_categories, (char *) categories_p, sizeof(k_categories)); if(err) return err; - else - return (rsbac_mac_set_curr_categories(k_categories)); + return (rsbac_mac_set_curr_level(level, k_categories)); #else return (-RSBAC_EINVALIDMODULE); #endif - }; + } -int sys_rsbac_mac_get_curr_categories(rsbac_mac_category_vector_t * categories_p) +int sys_rsbac_mac_get_curr_level(rsbac_security_level_t * level_p, + rsbac_mac_category_vector_t * categories_p) { #ifdef CONFIG_RSBAC_MAC - int err; + int err = 0; + rsbac_security_level_t k_level; rsbac_mac_category_vector_t k_categories; - err = rsbac_mac_get_curr_categories(&k_categories); - /* put result value to user space */ - if(!err) + err = rsbac_mac_get_curr_level(&k_level, &k_categories); + if(err) + return err; + if(level_p) { - err = rsbac_put_user((u_char *) &k_categories, - (u_char *) categories_p, - sizeof(k_categories) ); + err = rsbac_put_user((u_char *) &k_level, (u_char *) level_p, sizeof(k_level)); + if(err) + return err; } - return (err); + if(categories_p) + { + err = rsbac_put_user((u_char *) &k_categories, (u_char *) categories_p, sizeof(k_categories)); + } + return err; #else return (-RSBAC_EINVALIDMODULE); #endif - }; + } -int sys_rsbac_mac_set_max_categories(rsbac_mac_category_vector_t * categories_p) +int sys_rsbac_mac_get_max_level(rsbac_security_level_t * level_p, + rsbac_mac_category_vector_t * categories_p) { #ifdef CONFIG_RSBAC_MAC + int err = 0; + rsbac_security_level_t k_level; rsbac_mac_category_vector_t k_categories; - int err; - err = rsbac_get_user((u_char *) &k_categories, (u_char *) categories_p, sizeof(k_categories) ); + err = rsbac_mac_get_max_level(&k_level, &k_categories); if(err) return err; - else - return (rsbac_mac_set_max_categories(k_categories)); + if(level_p) + { + err = rsbac_put_user((u_char *) &k_level, (u_char *) level_p, sizeof(k_level)); + if(err) + return err; + } + if(categories_p) + { + err = rsbac_put_user((u_char *) &k_categories, (u_char *) categories_p, sizeof(k_categories)); + } + return err; #else return (-RSBAC_EINVALIDMODULE); #endif - }; + } -int sys_rsbac_mac_get_max_categories(rsbac_mac_category_vector_t * categories_p) +int sys_rsbac_mac_get_min_level(rsbac_security_level_t * level_p, + rsbac_mac_category_vector_t * categories_p) { #ifdef CONFIG_RSBAC_MAC - int err; + int err = 0; + rsbac_security_level_t k_level; rsbac_mac_category_vector_t k_categories; - err = rsbac_mac_get_max_categories(&k_categories); - /* put result value to user space */ - if(!err) + err = rsbac_mac_get_min_level(&k_level, &k_categories); + if(err) + return err; + if(level_p) + { + err = rsbac_put_user((u_char *) &k_level, (u_char *) level_p, sizeof(k_level)); + if(err) + return err; + } + if(categories_p) { - err = rsbac_put_user((u_char *) &k_categories, - (u_char *) categories_p, - sizeof(k_categories) ); + err = rsbac_put_user((u_char *) &k_categories, (u_char *) categories_p, sizeof(k_categories)); } - return (err); + return err; #else return (-RSBAC_EINVALIDMODULE); #endif - }; + } /************** PM ***************/ @@ -2069,38 +2099,42 @@ int sys_rsbac_rc_get_eff_rights_n(enum /* Provide means for adding and removing of capabilities */ int sys_rsbac_auth_add_p_cap(rsbac_pid_t pid, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid) + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range, + rsbac_time_t ttl) { #if defined(CONFIG_RSBAC_AUTH) struct task_struct * task_p; + if(cap_type >= ACT_none) + return(-RSBAC_EINVALIDTARGET); + if(cap_range.first > cap_range.last) + return(-RSBAC_EINVALIDVALUE); read_lock(&tasklist_lock); task_p = find_task_by_pid(pid); read_unlock(&tasklist_lock); if(!task_p) return(-RSBAC_EINVALIDTARGET); - if(first_uid > last_uid) - return(-RSBAC_EINVALIDVALUE); /* call auth function and return its result */ /* permission checking is done there */ - return(rsbac_auth_add_p_cap(pid, first_uid, last_uid)); + return(rsbac_auth_add_p_cap(pid, cap_type, cap_range, ttl)); #else return (-RSBAC_EINVALIDMODULE); #endif }; int sys_rsbac_auth_remove_p_cap(rsbac_pid_t pid, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid) + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range) { #if defined(CONFIG_RSBAC_AUTH) struct task_struct * task_p; - if(first_uid > last_uid) + if(cap_type >= ACT_none) + return(-RSBAC_EINVALIDTARGET); + if(cap_range.first > cap_range.last) return(-RSBAC_EINVALIDVALUE); - read_lock(&tasklist_lock); task_p = find_task_by_pid(pid); read_unlock(&tasklist_lock); @@ -2108,15 +2142,16 @@ int sys_rsbac_auth_remove_p_cap(rsbac_pi return(-RSBAC_EINVALIDTARGET); /* call auth function and return its result */ /* permission checking is done there */ - return(rsbac_auth_remove_p_cap(pid, first_uid, last_uid)); + return(rsbac_auth_remove_p_cap(pid, cap_type, cap_range)); #else return (-RSBAC_EINVALIDMODULE); #endif }; int sys_rsbac_auth_add_f_cap(char * filename, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid) + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range, + rsbac_time_t ttl) { #if defined(CONFIG_RSBAC_AUTH) struct dentry * t_dentry; @@ -2134,7 +2169,9 @@ int sys_rsbac_auth_add_f_cap(char * file if(!filename) return(-RSBAC_EINVALIDTARGET); - if(first_uid > last_uid) + if(cap_type >= ACT_none) + return(-RSBAC_EINVALIDTARGET); + if(cap_range.first > cap_range.last) return(-RSBAC_EINVALIDVALUE); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) @@ -2173,9 +2210,10 @@ int sys_rsbac_auth_add_f_cap(char * file #if defined(CONFIG_RSBAC_AUTH) && !defined(CONFIG_RSBAC_MAINT) /* call ADF */ #ifdef CONFIG_RSBAC_DEBUG - if (rsbac_debug_aef) printk(KERN_DEBUG "sys_rsbac_auth_add_f_cap(): calling ADF\n"); + if (rsbac_debug_aef) + printk(KERN_DEBUG "sys_rsbac_auth_add_f_cap(): calling ADF\n"); #endif - rsbac_attribute_value.auth_cap = first_uid; + rsbac_attribute_value.auth_cap_range = cap_range; if (!rsbac_adf_request(R_MODIFY_ATTRIBUTE, current->pid, T_FILE, @@ -2187,7 +2225,7 @@ int sys_rsbac_auth_add_f_cap(char * file } else #endif - err = rsbac_auth_add_f_cap(tid.file, first_uid, last_uid); + err = rsbac_auth_add_f_cap(tid.file, cap_type, cap_range, ttl); out_dput: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) @@ -2207,8 +2245,8 @@ out: }; int sys_rsbac_auth_remove_f_cap(char * filename, - rsbac_uid_t first_uid, - rsbac_uid_t last_uid) + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t cap_range) { #if defined(CONFIG_RSBAC_AUTH) struct dentry * t_dentry; @@ -2226,7 +2264,9 @@ int sys_rsbac_auth_remove_f_cap(char * f if(!filename) return(-RSBAC_EINVALIDTARGET); - if(first_uid > last_uid) + if(cap_type >= ACT_none) + return(-RSBAC_EINVALIDTARGET); + if(cap_range.first > cap_range.last) return(-RSBAC_EINVALIDVALUE); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) @@ -2267,7 +2307,7 @@ int sys_rsbac_auth_remove_f_cap(char * f #ifdef CONFIG_RSBAC_DEBUG if (rsbac_debug_aef) printk(KERN_DEBUG "sys_rsbac_auth_add_f_cap(): calling ADF\n"); #endif - rsbac_attribute_value.auth_cap = first_uid; + rsbac_attribute_value.auth_cap_range = cap_range; if (!rsbac_adf_request(R_MODIFY_ATTRIBUTE, current->pid, T_FILE, @@ -2279,7 +2319,7 @@ int sys_rsbac_auth_remove_f_cap(char * f } else #endif - err = rsbac_auth_remove_f_cap(tid.file, first_uid, last_uid); + err = rsbac_auth_remove_f_cap(tid.file, cap_type, cap_range); out_dput: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) @@ -2298,17 +2338,19 @@ out: #endif }; -/* caplist must have space for 2 * maxnum uid entries - first and last each! */ +/* caplist must have space for maxnum auth_cap_range entries - first and last each! */ int sys_rsbac_auth_get_f_caplist(char * filename, - rsbac_uid_t caplist[], - int maxnum) + enum rsbac_auth_cap_type_t cap_type, + struct rsbac_auth_cap_range_t caplist[], + rsbac_time_t ttllist[], + u_int maxnum) { #if defined(CONFIG_RSBAC_AUTH) struct dentry * t_dentry; int err = 0, tmperr = 0; union rsbac_target_id_t tid; - rsbac_uid_t * k_caplist; - boolean vmalloc_used; + struct rsbac_auth_cap_range_t * k_caplist; + rsbac_time_t * k_ttllist; /* for adf_request */ #if defined(CONFIG_RSBAC_AUTH) && !defined(CONFIG_RSBAC_MAINT) @@ -2321,10 +2363,14 @@ int sys_rsbac_auth_get_f_caplist(char * if(!filename) return(-RSBAC_EINVALIDTARGET); + if(cap_type >= ACT_none) + return(-RSBAC_EINVALIDTARGET); if(!caplist) return(-RSBAC_EINVALIDPOINTER); if(maxnum <= 0) return(-RSBAC_EINVALIDVALUE); + if(maxnum > RSBAC_AUTH_MAX_MAXNUM) + maxnum = RSBAC_AUTH_MAX_MAXNUM; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) lock_kernel(); @@ -2376,18 +2422,28 @@ int sys_rsbac_auth_get_f_caplist(char * goto out_dput; } #endif - k_caplist = rsbac_vkmalloc(2 * maxnum * sizeof(rsbac_uid_t), &vmalloc_used); - if(!k_caplist) - return -RSBAC_ENOMEM; - err = rsbac_auth_get_f_caplist(tid.file, k_caplist, maxnum); + err = rsbac_auth_get_f_caplist(tid.file, cap_type, &k_caplist, &k_ttllist); if(err>0) { + if(err > maxnum) + err = maxnum; tmperr = rsbac_put_user((u_char *) k_caplist, (u_char *) caplist, - sizeof(rsbac_uid_t) * 2 * err); + sizeof(struct rsbac_auth_cap_range_t) * err); if(tmperr < 0) err = tmperr; + else + { + if(ttllist) + { + tmperr = rsbac_put_user((u_char *) k_ttllist, (u_char *) ttllist, + sizeof(rsbac_time_t) * err); + if(tmperr < 0) + err = tmperr; + } + } + vfree(k_caplist); + vfree(k_ttllist); } - rsbac_vkfree(k_caplist, vmalloc_used); out_dput: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) @@ -2816,11 +2872,14 @@ int sys_rsbac_acl_n(enum rsbac_acl_sys } out_dput: - #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) - dput(t_dentry); - #else - path_release(&nd); - #endif + if(k_arg.name) + { + #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + dput(t_dentry); + #else + path_release(&nd); + #endif + } out: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) @@ -3179,9 +3238,13 @@ int sys_rsbac_acl_get_tlist (enum rsba return(-RSBAC_EINVALIDPOINTER); if(!maxnum) return(-RSBAC_EINVALIDVALUE); - + if(maxnum > RSBAC_ACL_MAX_MAXNUM) + maxnum = RSBAC_ACL_MAX_MAXNUM; + /* get values from user space */ - rsbac_get_user((u_char *) &k_tid, (u_char *) tid, sizeof(k_tid) ); + err = rsbac_get_user((u_char *) &k_tid, (u_char *) tid, sizeof(k_tid) ); + if(err) + return err; /* call acl function */ err = rsbac_acl_sys_get_tlist(target, k_tid, &k_entry_p, &k_ttl_p); @@ -3491,7 +3554,7 @@ int sys_rsbac_acl_get_mask_n(enum rsbac_ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) struct nameidata nd; #endif - + if(target >= T_NONE) return(-RSBAC_EINVALIDTARGET); if(!mask_p) @@ -3792,7 +3855,7 @@ int sys_rsbac_adf_log_switch(enum rsbac_ #endif rsbac_adf_log_switch(request,target,value); return(0); - }; + } int sys_rsbac_get_adf_log(enum rsbac_adf_request_t request, enum rsbac_target_t target, @@ -3848,7 +3911,7 @@ int sys_rsbac_get_adf_log(enum rsbac_adf sizeof(k_value) ); } return(err); - }; + } /* * Commands to sys_rsbac_log: @@ -3870,16 +3933,78 @@ int sys_rsbac_log(int type, #else return(0); #endif /* RMSG */ - }; + } + +#if defined(CONFIG_RSBAC_INIT_DELAY) +int sys_rsbac_init(char * path) + { + struct dentry * t_dentry = NULL; + boolean need_put = FALSE; + int err = 0; + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + struct nameidata nd; + #endif + if(!path) + return rsbac_init(ROOT_DEV); + + #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + lock_kernel(); + t_dentry = lookup_dentry(path,NULL,0); + if (IS_ERR(t_dentry)) + { + err= -RSBAC_EINVALIDTARGET; + unlock_kernel(); + goto out; + } + #else + if ((err = user_path_walk_link(path, &nd))) + { + goto out; + } + t_dentry = nd.dentry; + #endif + need_put = TRUE; + if (!t_dentry->d_inode) + { + err = -RSBAC_EINVALIDTARGET; + goto out_dput; + } + /* is inode of type file, symlink or block/char device? */ + if(!S_ISBLK(t_dentry->d_inode->i_mode)) + { /* This is no file or device */ + err = -RSBAC_EINVALIDTARGET; + goto out_dput; + } + err = rsbac_init(t_dentry->d_inode->i_rdev); + +out_dput: + if(need_put) + #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) + { + dput(t_dentry); + unlock_kernel(); + } + #else + path_release(&nd); + #endif +out: + return(err); + } +#endif /* Big dispatcher for all syscalls */ -asmlinkage int sys_rsbac(enum rsbac_syscall_t call, +asmlinkage int sys_rsbac(rsbac_version_t version, + enum rsbac_syscall_t call, union rsbac_syscall_arg_t * arg_p) { union rsbac_syscall_arg_t k_arg; int err; + if(version != RSBAC_VERSION_NR) + return -RSBAC_EINVALIDVERSION; + if(call >= RSYS_none) return -RSBAC_EINVALIDREQUEST; @@ -3960,22 +4085,18 @@ asmlinkage int sys_rsbac(enum rsbac_sys return sys_rsbac_log(k_arg.log.type, k_arg.log.buf, k_arg.log.len); - case RSYS_mac_set_curr_seclevel: - return sys_rsbac_mac_set_curr_seclevel(k_arg.mac_set_curr_seclevel.level); - case RSYS_mac_get_curr_seclevel: - return sys_rsbac_mac_get_curr_seclevel(); - case RSYS_mac_set_max_seclevel: - return sys_rsbac_mac_set_max_seclevel(k_arg.mac_set_max_seclevel.level); - case RSYS_mac_get_max_seclevel: - return sys_rsbac_mac_get_max_seclevel(); - case RSYS_mac_set_curr_categories: - return sys_rsbac_mac_set_curr_categories(k_arg.mac_set_curr_categories.categories_p); - case RSYS_mac_get_curr_categories: - return sys_rsbac_mac_get_curr_categories(k_arg.mac_get_curr_categories.categories_p); - case RSYS_mac_set_max_categories: - return sys_rsbac_mac_set_max_categories(k_arg.mac_set_max_categories.categories_p); - case RSYS_mac_get_max_categories: - return sys_rsbac_mac_get_max_categories(k_arg.mac_get_max_categories.categories_p); + case RSYS_mac_set_curr_level: + return sys_rsbac_mac_set_curr_level(k_arg.mac_set_curr_level.level, + k_arg.mac_set_curr_level.categories_p); + case RSYS_mac_get_curr_level: + return sys_rsbac_mac_get_curr_level(k_arg.mac_get_curr_level.level_p, + k_arg.mac_get_curr_level.categories_p); + case RSYS_mac_get_max_level: + return sys_rsbac_mac_get_max_level(k_arg.mac_get_max_level.level_p, + k_arg.mac_get_max_level.categories_p); + case RSYS_mac_get_min_level: + return sys_rsbac_mac_get_min_level(k_arg.mac_get_min_level.level_p, + k_arg.mac_get_min_level.categories_p); case RSYS_stats_pm: return sys_rsbac_stats_pm(); case RSYS_pm: @@ -4023,23 +4144,27 @@ asmlinkage int sys_rsbac(enum rsbac_sys k_arg.rc_get_list.ttl_array_p); case RSYS_auth_add_p_cap: return sys_rsbac_auth_add_p_cap(k_arg.auth_add_p_cap.pid, - k_arg.auth_add_p_cap.first_uid, - k_arg.auth_add_p_cap.last_uid); + k_arg.auth_add_p_cap.cap_type, + k_arg.auth_add_p_cap.cap_range, + k_arg.auth_add_p_cap.ttl); case RSYS_auth_remove_p_cap: return sys_rsbac_auth_remove_p_cap(k_arg.auth_remove_p_cap.pid, - k_arg.auth_remove_p_cap.first_uid, - k_arg.auth_remove_p_cap.last_uid); + k_arg.auth_remove_p_cap.cap_type, + k_arg.auth_remove_p_cap.cap_range); case RSYS_auth_add_f_cap: return sys_rsbac_auth_add_f_cap(k_arg.auth_add_f_cap.filename, - k_arg.auth_add_f_cap.first_uid, - k_arg.auth_add_f_cap.last_uid); + k_arg.auth_add_f_cap.cap_type, + k_arg.auth_add_f_cap.cap_range, + k_arg.auth_add_f_cap.ttl); case RSYS_auth_remove_f_cap: return sys_rsbac_auth_remove_f_cap(k_arg.auth_remove_f_cap.filename, - k_arg.auth_remove_f_cap.first_uid, - k_arg.auth_remove_f_cap.last_uid); + k_arg.auth_remove_f_cap.cap_type, + k_arg.auth_remove_f_cap.cap_range); case RSYS_auth_get_f_caplist: return sys_rsbac_auth_get_f_caplist(k_arg.auth_get_f_caplist.filename, + k_arg.auth_get_f_caplist.cap_type, k_arg.auth_get_f_caplist.caplist, + k_arg.auth_get_f_caplist.ttllist, k_arg.auth_get_f_caplist.maxnum); case RSYS_acl: return sys_rsbac_acl(k_arg.acl.call, @@ -4091,6 +4216,11 @@ asmlinkage int sys_rsbac(enum rsbac_sys return -RSBAC_EINVALIDMODULE; #endif +#if defined(CONFIG_RSBAC_INIT_DELAY) + case RSYS_init: + return sys_rsbac_init(k_arg.init.root_dev); +#endif + default: return -RSBAC_EINVALIDREQUEST; } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/scripts/Configure linux-2.4.20-wolk4.9-fullkernel/scripts/Configure --- linux-2.4.20-wolk4.8-fullkernel/scripts/Configure 2003-08-25 18:25:29.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/scripts/Configure 2003-08-25 20:36:17.000000000 +0200 @@ -356,7 +356,7 @@ function int () { def=${old:-$3} while :; do readln "$1 ($2) [$def] " "$def" "$old" - if expr "$ans" : '[0-9]*$' > /dev/null; then + if expr "$ans" : '[+-]\?[0-9]*$' > /dev/null; then define_int "$2" "$ans" break else diff -Naurp linux-2.4.20-wolk4.8-fullkernel/scripts/include_deps linux-2.4.20-wolk4.9-fullkernel/scripts/include_deps --- linux-2.4.20-wolk4.8-fullkernel/scripts/include_deps 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.4.20-wolk4.9-fullkernel/scripts/include_deps 2003-08-25 20:35:56.000000000 +0200 @@ -0,0 +1,15 @@ +# Read the .depend files, extract the dependencies for .h targets, convert +# relative names to absolute and write the result to stdout. It is part of +# building the global .h dependency graph for kbuild 2.4. KAO + +/^[^ ]/ { copy = 0; fn = "/error/"; } +/^[^ ][^ ]*\.h:/ { copy = 1; fn = FILENAME; sub(/\.depend/, "", fn); } +!copy { next; } + { + indent = $0; sub(/[^ ].*/, "", indent); + if ($1 != "" && $1 !~ /^[@$\/\\]/) { $1 = fn $1 }; + if ($2 != "" && $2 !~ /^[@$\/\\]/) { $2 = fn $2 }; + $1 = $1; # ensure $0 is rebuilt + $0 = indent $0; + print; + } diff -Naurp linux-2.4.20-wolk4.8-fullkernel/scripts/mkdep.c linux-2.4.20-wolk4.9-fullkernel/scripts/mkdep.c --- linux-2.4.20-wolk4.8-fullkernel/scripts/mkdep.c 2003-08-25 18:26:34.000000000 +0200 +++ linux-2.4.20-wolk4.9-fullkernel/scripts/mkdep.c 2003-08-25 20:35:56.000000000 +0200 @@ -45,8 +45,11 @@ -char depname[512]; +char __depname[512] = "\n\t@touch "; +#define depname (__depname+9) int hasdep; +char cwd[PATH_MAX]; +int lcwd; struct path_struct { int len; @@ -74,14 +77,9 @@ do_depname(void) { if (!hasdep) { hasdep = 1; - if (g_filename) { - /* Source file (*.[cS]) */ - printf("%s:", depname); + printf("%s:", depname); + if (g_filename) printf(" %s", g_filename); - } else { - /* header file (*.h) */ - printf("dep_%s +=", depname); - } } } @@ -206,9 +204,28 @@ void handle_include(int start, const cha memcpy(path->buffer+path->len, name, len); path->buffer[path->len+len] = '\0'; if (access(path->buffer, F_OK) == 0) { + int l = lcwd + strlen(path->buffer); + int need_wildcard = 0; + char name2[l+2], *p; + if (path->buffer[0] == '/') { + memcpy(name2, path->buffer, l+1); + } + else { + need_wildcard = 1; + memcpy(name2, cwd, lcwd); + name2[lcwd] = '/'; + memcpy(name2+lcwd+1, path->buffer, path->len+len+1); + } + while ((p = strstr(name2, "/../"))) { + *p = '\0'; + strcpy(strrchr(name2, '/'), p+3); + } do_depname(); - printf(" \\\n %s $(dep_%s)", path->buffer, - path->buffer); + if (need_wildcard) { + printf(" \\\n $(wildcard %s)", name2); + } else { + printf(" \\\n %s", name2); + } return; } } @@ -525,7 +542,7 @@ cee_CONFIG_word: /* * Generate dependencies for one file. */ -void do_depend(const char * filename) +void do_depend(const char * filename, const char * command) { int mapsize; int pagesizem1 = getpagesize()-1; @@ -564,7 +581,9 @@ void do_depend(const char * filename) clear_config(); state_machine(map, map+st.st_size); if (hasdep) { - puts(""); + puts(command); + if (*command) + define_precious(filename); } munmap(map, mapsize); @@ -588,6 +607,12 @@ int main(int argc, char **argv) return 1; } + if (!getcwd(cwd, sizeof(cwd))) { + fprintf(stderr, "mkdep: getcwd() failed %m\n"); + return 1; + } + lcwd = strlen(cwd); + add_path("."); /* for #include "..." */ while (++argv, --argc > 0) { @@ -610,6 +635,7 @@ int main(int argc, char **argv) while (--argc > 0) { const char * filename = *++argv; + const char * command = __depname; g_filename = 0; len = strlen(filename); memcpy(depname, filename, len+1); @@ -617,9 +643,10 @@ int main(int argc, char **argv) if (filename[len-1] == 'c' || filename[len-1] == 'S') { depname[len-1] = 'o'; g_filename = filename; + command = ""; } } - do_depend(filename); + do_depend(filename, command); } if (len_precious) { *(str_precious+len_precious) = '\0';