diff -Nru a/Documentation/DocBook/kernel-hacking.tmpl b/Documentation/DocBook/kernel-hacking.tmpl --- a/Documentation/DocBook/kernel-hacking.tmpl Tue Jun 18 19:12:02 2002 +++ b/Documentation/DocBook/kernel-hacking.tmpl Tue Jun 18 19:12:02 2002 @@ -702,19 +702,14 @@ - <function>smp_processor_id</function>()/<function>cpu_[number/logical]_map()</function> + <title><function>smp_processor_id</function>() <filename class=headerfile>include/asm/smp.h</filename> smp_processor_id() returns the current processor number, between 0 and NR_CPUS (the maximum number of CPUs supported by Linux, currently 32). These - values are not necessarily continuous: to get a number between 0 - and smp_num_cpus() (the number of actual - processors in this machine), the - cpu_number_map() function is used to map the - processor id to a logical number. - cpu_logical_map() does the reverse. + values are not necessarily continuous. diff -Nru a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking --- a/Documentation/filesystems/Locking Tue Jun 18 19:12:02 2002 +++ b/Documentation/filesystems/Locking Tue Jun 18 19:12:02 2002 @@ -50,27 +50,27 @@ int (*removexattr) (struct dentry *, const char *); locking rules: - all may block - BKL i_sem(inode) -lookup: no yes -create: no yes -link: no yes (both) -mknod: no yes -symlink: no yes -mkdir: no yes -unlink: no yes (both) -rmdir: no yes (both) (see below) -rename: no yes (all) (see below) -readlink: no no -follow_link: no no -truncate: no yes (see below) -setattr: no yes -permission: yes no -getattr: no no -setxattr: no yes -getxattr: no yes -listxattr: no yes -removexattr: no yes + all may block, none have BKL + i_sem(inode) +lookup: yes +create: yes +link: yes (both) +mknod: yes +symlink: yes +mkdir: yes +unlink: yes (both) +rmdir: yes (both) (see below) +rename: yes (all) (see below) +readlink: no +follow_link: no +truncate: yes (see below) +setattr: yes +permission: no +getattr: no +setxattr: yes +getxattr: yes +listxattr: yes +removexattr: yes Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_sem on victim. cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem. diff -Nru a/Documentation/filesystems/porting b/Documentation/filesystems/porting --- a/Documentation/filesystems/porting Tue Jun 18 19:12:02 2002 +++ b/Documentation/filesystems/porting Tue Jun 18 19:12:02 2002 @@ -81,9 +81,9 @@ [mandatory] ->lookup(), ->truncate(), ->create(), ->unlink(), ->mknod(), ->mkdir(), -->rmdir(), ->link(), ->lseek(), ->symlink(), ->rename() and ->readdir() -are called without BKL now. Grab it on the entry, drop upon return - that -will guarantee the same locking you used to have. If your method or its +->rmdir(), ->link(), ->lseek(), ->symlink(), ->rename(), ->permission() +and ->readdir() are called without BKL now. Grab it on entry, drop upon return +- that will guarantee the same locking you used to have. If your method or its parts do not need BKL - better yet, now you can shift lock_kernel() and unlock_kernel() so that they would protect exactly what needs to be protected. diff -Nru a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt --- a/Documentation/filesystems/proc.txt Tue Jun 18 19:12:02 2002 +++ b/Documentation/filesystems/proc.txt Tue Jun 18 19:12:02 2002 @@ -948,120 +948,43 @@ ----------------------------------------------- The files in this directory can be used to tune the operation of the virtual -memory (VM) subsystem of the Linux kernel. In addition, one of the files -(bdflush) has some influence on disk usage. +memory (VM) subsystem of the Linux kernel. -bdflush -------- +dirty_background_ratio +---------------------- -This file controls the operation of the bdflush kernel daemon. It currently -contains nine integer values, six of which are actually used by the kernel. -They are listed in table 2-2. - - -Table 2-2: Parameters in /proc/sys/vm/bdflush -.............................................................................. - Value Meaning - nfract Percentage of buffer cache dirty to activate bdflush - ndirty Maximum number of dirty blocks to write out per wake-cycle - nrefill Number of clean buffers to try to obtain each time we call refill - nref_dirt buffer threshold for activating bdflush when trying to refill - buffers. - dummy Unused - age_buffer Time for normal buffer to age before we flush it - age_super Time for superblock to age before we flush it - dummy Unused - dummy Unused -.............................................................................. +Contains, as a percentage of total system memory, the number of pages at which +the pdflush background writeback daemon will start writing out dirty data. -nfract ------- - -This parameter governs the maximum number of dirty buffers in the buffer -cache. Dirty means that the contents of the buffer still have to be written to -disk (as opposed to a clean buffer, which can just be forgotten about). -Setting this to a higher value means that Linux can delay disk writes for a -long time, but it also means that it will have to do a lot of I/O at once when -memory becomes short. A lower value will spread out disk I/O more evenly. - -ndirty ------- - -Ndirty gives the maximum number of dirty buffers that bdflush can write to the -disk at one time. A high value will mean delayed, bursty I/O, while a small -value can lead to memory shortage when bdflush isn't woken up often enough. - -nrefill -------- - -This is the number of buffers that bdflush will add to the list of free -buffers when refill_freelist() is called. It is necessary to allocate free -buffers beforehand, since the buffers are often different sizes than the -memory pages and some bookkeeping needs to be done beforehand. The higher the -number, the more memory will be wasted and the less often refill_freelist() -will need to run. - -nref_dirt ---------- - -When refill_freelist() comes across more than nref_dirt dirty buffers, it will -wake up bdflush. - -age_buffer and age_super ------------------------- - -Finally, the age_buffer and age_super parameters govern the maximum time Linux -waits before writing out a dirty buffer to disk. The value is expressed in -jiffies (clockticks), the number of jiffies per second is 100. Age_buffer is -the maximum age for data blocks, while age_super is for filesystems meta data. - -buffermem ---------- - -The three values in this file control how much memory should be used for -buffer memory. The percentage is calculated as a percentage of total system -memory. - -The values are: - -min_percent ------------ - -This is the minimum percentage of memory that should be spent on buffer -memory. - -borrow_percent --------------- - -When Linux is short on memory, and the buffer cache uses more than it has been -allotted, the memory management (MM) subsystem will prune the buffer cache -more heavily than other memory to compensate. - -max_percent ------------ - -This is the maximum amount of memory that can be used for buffer memory. - -freepages ---------- +dirty_async_ratio +----------------- -This file contains three values: min, low and high: +Contains, as a percentage of total system memory, the number of pages at which +a process which is generating disk writes will itself start writing out dirty +data. + +dirty_sync_ratio +---------------- + +Contains, as a percentage of total system memory, the number of pages at which +a process which is generating disk writes will itself start writing out dirty +data and waiting upon completion of that writeout. + +dirty_writeback_centisecs +------------------------- + +The pdflush writeback daemons will periodically wake up and write `old' data +out to disk. This tunable expresses the interval between those wakeups, in +100'ths of a second. + +dirty_expire_centisecs +---------------------- + +This tunable is used to define when dirty data is old enough to be eligible +for writeout by the pdflush daemons. It is expressed in 100'ths of a second. +Data which has been dirty in-memory for longer than this interval will be +written out next time a pdflush daemon wakes up. -min ---- -When the number of free pages in the system reaches this number, only the -kernel can allocate more memory. - -low ---- -If the number of free pages falls below this point, the kernel starts swapping -aggressively. - -high ----- -The kernel tries to keep up to this amount of memory free; if memory falls -below this point, the kernel starts gently swapping in the hopes that it never -has to do really aggressive swapping. kswapd ------ @@ -1112,79 +1035,6 @@ On the other hand, enabling this feature can cause you to run out of memory and thrash the system to death, so large and/or important servers will want to set this value to 0. - -pagecache ---------- - -This file does exactly the same job as buffermem, only this file controls the -amount of memory allowed for memory mapping and generic caching of files. - -You don't want the minimum level to be too low, otherwise your system might -thrash when memory is tight or fragmentation is high. - -pagetable_cache ---------------- - -The kernel keeps a number of page tables in a per-processor cache (this helps -a lot on SMP systems). The cache size for each processor will be between the -low and the high value. - -On a low-memory, single CPU system, you can safely set these values to 0 so -you don't waste memory. It is used on SMP systems so that the system can -perform fast pagetable allocations without having to acquire the kernel memory -lock. - -For large systems, the settings are probably fine. For normal systems they -won't hurt a bit. For small systems ( less than 16MB ram) it might be -advantageous to set both values to 0. - -swapctl -------- - -This file contains no less than 8 variables. All of these values are used by -kswapd. - -The first four variables -* sc_max_page_age, -* sc_page_advance, -* sc_page_decline and -* sc_page_initial_age -are used to keep track of Linux's page aging. Page aging is a bookkeeping -method to track which pages of memory are often used, and which pages can be -swapped out without consequences. - -When a page is swapped in, it starts at sc_page_initial_age (default 3) and -when the page is scanned by kswapd, its age is adjusted according to the -following scheme: - -* If the page was used since the last time we scanned, its age is increased - by sc_page_advance (default 3). Where the maximum value is given by - sc_max_page_age (default 20). -* Otherwise (meaning it wasn't used) its age is decreased by sc_page_decline - (default 1). - -When a page reaches age 0, it's ready to be swapped out. - -The variables sc_age_cluster_fract, sc_age_cluster_min, sc_pageout_weight and -sc_bufferout_weight, can be used to control kswapd's aggressiveness in -swapping out pages. - -Sc_age_cluster_fract is used to calculate how many pages from a process are to -be scanned by kswapd. The formula used is - -(sc_age_cluster_fract divided by 1024) times resident set size - -So if you want kswapd to scan the whole process, sc_age_cluster_fract needs to -have a value of 1024. The minimum number of pages kswapd will scan is -represented by sc_age_cluster_min, which is done so that kswapd will also scan -small processes. - -The values of sc_pageout_weight and sc_bufferout_weight are used to control -how many tries kswapd will make in order to swap out one page/buffer. These -values can be used to fine-tune the ratio between user pages and buffer/cache -memory. When you find that your Linux system is swapping out too many process -pages in order to satisfy buffer memory demands, you may want to either -increase sc_bufferout_weight, or decrease the value of sc_pageout_weight. 2.5 /proc/sys/dev - Device specific parameters ---------------------------------------------- diff -Nru a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt --- a/Documentation/networking/ip-sysctl.txt Tue Jun 18 19:12:02 2002 +++ b/Documentation/networking/ip-sysctl.txt Tue Jun 18 19:12:02 2002 @@ -43,23 +43,23 @@ Minimum time-to-live of entries. Should be enough to cover fragment time-to-live on the reassembling side. This minimum time-to-live is guaranteed if the pool size is less than inet_peer_threshold. - Measured in jiffies. + Measured in jiffies(1). inet_peer_maxttl - INTEGER Maximum time-to-live of entries. Unused entries will expire after this period of time if there is no memory pressure on the pool (i.e. when the number of entries in the pool is very small). - Measured in jiffies. + Measured in jiffies(1). inet_peer_gc_mintime - INTEGER Minimum interval between garbage collection passes. This interval is in effect under high memory pressure on the pool. - Measured in jiffies. + Measured in jiffies(1). inet_peer_gc_maxtime - INTEGER Minimum interval between garbage collection passes. This interval is in effect under low (or absent) memory pressure on the pool. - Measured in jiffies. + Measured in jiffies(1). TCP variables: @@ -81,7 +81,7 @@ How many keepalive probes TCP sends out, until it decides that the connection is broken. Default value: 9. -tcp_keepalive_interval - INTEGER +tcp_keepalive_intvl - INTEGER How frequently the probes are send out. Multiplied by tcp_keepalive_probes it is time to kill not responding connection, after probes started. Default value: 75sec i.e. connection @@ -316,28 +316,37 @@ Limit the maximal rates for sending ICMP packets whose type matches icmp_ratemask (see below) to specific targets. 0 to disable any limiting, otherwise the maximal rate in jiffies(1) - Default: 1 + Default: 100 icmp_ratemask - INTEGER Mask made of ICMP types for which rates are being limited. - Default: 6168 - Note: 6168 = 0x1818 = 1</{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"; \ + echo ""; \ + echo "#endif" ) +endef + # RPM target # --------------------------------------------------------------------------- @@ -461,6 +479,8 @@ else # ifeq ($(filter $(noconfig_targets),$(MAKECMDGOALS)),) +ifeq ($(filter-out $(noconfig_targets),$(MAKECMDGOALS)),) + # Targets which don't need .config # =========================================================================== # @@ -519,23 +539,6 @@ defconfig: yes '' | $(CONFIG_SHELL) scripts/Configure -d arch/$(ARCH)/config.in -# How we generate .config depends on which *config the -# user chose when calling make - -.config: $(filter oldconfig xconfig menuconfig config,$(MAKECMDGOALS)) ; - -# If the user gave commands from both the need / need not -# .config sections, we need to call make again after -# .config is generated, now to take care of the remaining -# targets we know nothing about in this section - -remaining_targets := $(filter-out $(noconfig_targets),$(MAKECMDGOALS)) - -$(remaining_targets) : make_with_config - -make_with_config: .config - @$(MAKE) $(remaining_targets) - # Cleaning up # --------------------------------------------------------------------------- @@ -604,7 +607,8 @@ mrproper: clean archmrproper @echo 'Making mrproper' - @find . \( -size 0 -o -name .depend \) -type f -print | xargs rm -f + @find . \( -size 0 -o -name .depend -o -name .\*.cmd \) \ + -type f -print | xargs rm -f @rm -f $(MRPROPER_FILES) @rm -rf $(MRPROPER_DIRS) @$(MAKE) -C Documentation/DocBook mrproper @@ -639,8 +643,6 @@ @$(MAKE) -C Documentation/DocBook $@ -endif # ifeq ($(filter $(noconfig_targets),$(MAKECMDGOALS)),) - # Scripts to check various things for consistency # --------------------------------------------------------------------------- @@ -652,6 +654,18 @@ checkincludes: find * -name '*.[hcS]' -type f -print | sort | xargs $(PERL) -w scripts/checkincludes.pl + +else # ifneq ($(filter-out $(noconfig_targets),$(MAKECMDGOALS)),) + +# We're called with both targets which do and do not need +# .config included. Handle them one after the other. +# =========================================================================== + +%:: FORCE + $(MAKE) $@ + +endif # ifeq ($(filter-out $(noconfig_targets),$(MAKECMDGOALS)),) +endif # ifeq ($(filter $(noconfig_targets),$(MAKECMDGOALS)),) # FIXME Should go into a make.lib or something # =========================================================================== diff -Nru a/Rules.make b/Rules.make --- a/Rules.make Tue Jun 18 19:12:02 2002 +++ b/Rules.make Tue Jun 18 19:12:02 2002 @@ -131,9 +131,13 @@ genksyms_smp_prefix := endif -$(MODVERDIR)/$(real-objs-y:.o=.ver): modkern_cflags := $(CFLAGS_KERNEL) -$(MODVERDIR)/$(real-objs-m:.o=.ver): modkern_cflags := $(CFLAGS_MODULE) -$(MODVERDIR)/$(export-objs:.o=.ver): export_flags := -D__GENKSYMS__ +# Don't include modversions.h, we're just about to generate it here. + +CFLAGS_MODULE := $(filter-out -include $(HPATH)/linux/modversions.h,$(CFLAGS_MODULE)) + +$(addprefix $(MODVERDIR)/,$(real-objs-y:.o=.ver)): modkern_cflags := $(CFLAGS_KERNEL) +$(addprefix $(MODVERDIR)/,$(real-objs-m:.o=.ver)): modkern_cflags := $(CFLAGS_MODULE) +$(addprefix $(MODVERDIR)/,$(export-objs:.o=.ver)): export_flags := -D__GENKSYMS__ c_flags = -Wp,-MD,$(depfile) $(CFLAGS) $(NOSTDINC_FLAGS) \ $(modkern_cflags) $(EXTRA_CFLAGS) $(CFLAGS_$(*F).o) \ @@ -145,18 +149,34 @@ # files changes quiet_cmd_cc_ver_c = MKVER include/linux/modules/$(RELDIR)/$*.ver -define cmd_cc_ver_c - mkdir -p $(dir $@); \ - $(CPP) $(c_flags) $< | $(GENKSYMS) $(genksyms_smp_prefix) \ - -k $(VERSION).$(PATCHLEVEL).$(SUBLEVEL) > $@.tmp; \ +cmd_cc_ver_c = $(CPP) $(c_flags) $< | $(GENKSYMS) $(genksyms_smp_prefix) \ + -k $(VERSION).$(PATCHLEVEL).$(SUBLEVEL) > $@.tmp + +# Okay, let's explain what's happening in rule_make_cc_ver_c: +# o echo the command +# o execute the command +# o If the $(CPP) fails, we won't notice because it's output is piped +# to $(GENKSYMS) which does not fail. We recognize this case by +# looking if the generated $(depfile) exists, though. +# o If the .ver file changed, touch modversions.h, which is our maker +# of any changed .ver files. +# o Move command line and deps into their normal .*.cmd place. + +define rule_cc_ver_c + $(if $($(quiet)cmd_cc_ver_c),echo ' $($(quiet)cmd_cc_ver_c)';) \ + $(cmd_cc_ver_c); \ + if [ ! -r $(depfile) ]; then exit 1; fi; \ + $(TOPDIR)/scripts/fixdep $(depfile) $@ $(TOPDIR) '$(cmd_cc_ver_c)' > $(@D)/.$(@F).tmp; \ + rm -f $(depfile); \ if [ ! -r $@ ] || cmp -s $@ $@.tmp; then \ touch $(TOPDIR)/include/linux/modversions.h; \ fi; \ mv -f $@.tmp $@ + mv -f $(@D)/.$(@F).tmp $(@D)/.$(@F).cmd endef $(MODVERDIR)/%.ver: %.c FORCE - @$(call if_changed_dep,cc_ver_c) + @$(call if_changed_rule,cc_ver_c) targets := $(addprefix $(MODVERDIR)/,$(export-objs:.o=.ver)) @@ -446,7 +466,7 @@ # execute the command and also postprocess generated .d dependencies # file -if_changed_dep = $(if $(strip $? \ +if_changed_dep = $(if $(strip $? $(filter-out FORCE $(wildcard $^),$^)\ $(filter-out $(cmd_$(1)),$(cmd_$@))\ $(filter-out $(cmd_$@),$(cmd_$(1)))),\ @set -e; \ @@ -455,6 +475,17 @@ $(TOPDIR)/scripts/fixdep $(depfile) $@ $(TOPDIR) '$(cmd_$(1))' > $(@D)/.$(@F).tmp; \ rm -f $(depfile); \ mv -f $(@D)/.$(@F).tmp $(@D)/.$(@F).cmd) + +# Usage: $(call if_changed_rule,foo) +# will check if $(cmd_foo) changed, or any of the prequisites changed, +# and if so will execute $(rule_foo) + +if_changed_rule = $(if $(strip $? \ + $(filter-out $(cmd_$(1)),$(cmd_$@))\ + $(filter-out $(cmd_$@),$(cmd_$(1)))),\ + @set -e; \ + mkdir -p $(dir $@); \ + $(rule_$(1))) # If quiet is set, only print short version of command diff -Nru a/arch/alpha/Makefile b/arch/alpha/Makefile --- a/arch/alpha/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/alpha/Makefile Tue Jun 18 19:12:01 2002 @@ -127,10 +127,6 @@ rm -f arch/alpha/vmlinux.lds rm -f include/asm-alpha/asm_offsets.h -archdep: - $(MAKE) -C arch/alpha/kernel asm_offsets - @$(MAKEBOOT) dep - vmlinux: arch/alpha/vmlinux.lds arch/alpha/vmlinux.lds: arch/alpha/vmlinux.lds.in @@ -138,3 +134,19 @@ bootpfile: @$(MAKEBOOT) bootpfile + + +prepare: include/asm-$(ARCH)/asm_offsets.h + +arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \ + include/config/MARKER + +include/asm-$(ARCH)/asm_offsets.h.tmp: arch/$(ARCH)/kernel/asm-offsets.s + @$(generate-asm-offsets.h) < $< > $@ + +include/asm-$(ARCH)/asm_offsets.h: include/asm-$(ARCH)/asm_offsets.h.tmp + @echo -n ' Generating $@' + @$(update-if-changed) + +CLEAN_FILES += include/asm-$(ARCH)/offset.h.tmp \ + include/asm-$(ARCH)/offset.h diff -Nru a/arch/alpha/boot/Makefile b/arch/alpha/boot/Makefile --- a/arch/alpha/boot/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/alpha/boot/Makefile Tue Jun 18 19:12:01 2002 @@ -96,6 +96,4 @@ rm -f tools/mkbb tools/bootlx tools/lxboot tools/bootph rm -f vmlinux.nh ksize.h -dep: - FORCE: diff -Nru a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile --- a/arch/alpha/kernel/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/alpha/kernel/Makefile Tue Jun 18 19:12:02 2002 @@ -94,17 +94,3 @@ endif # GENERIC include $(TOPDIR)/Rules.make - -ASM_OFFSETS_H = $(TOPDIR)/include/asm-alpha/asm_offsets.h -asm_offsets: - $(CC) $(CFLAGS) -S -o - check_asm.c | \ - sed -e '/xyzzy/ { s/xyzzy //; p; }; d;' > asm_offsets.tmp - @if cmp -s asm_offsets.tmp $(ASM_OFFSETS_H); then \ - set -x; rm asm_offsets.tmp; \ - else \ - set -x; mv asm_offsets.tmp $(ASM_OFFSETS_H); \ - fi - -clean:: - rm -f check_asm - diff -Nru a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/arch/alpha/kernel/asm-offsets.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,29 @@ +/* + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed to extract + * and format the required data. + */ + +#include +#include +#include +#include + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +#define BLANK() asm volatile("\n->" : : ) + +void foo(void) +{ + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); + BLANK(); + DEFINE(PT_PTRACED, PT_PTRACED); + DEFINE(CLONE_VM, CLONE_VM); + DEFINE(SIGCHLD, SIGCHLD); + BLANK(); + DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache)); + DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register)); +} diff -Nru a/arch/alpha/kernel/check_asm.c b/arch/alpha/kernel/check_asm.c --- a/arch/alpha/kernel/check_asm.c Tue Jun 18 19:12:03 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,30 +0,0 @@ -#include -#include -#include -#include - -#define OUT(x) \ - asm ("\nxyzzy " x) -#define DEF(name, val) \ - asm volatile ("\nxyzzy #define " name " %0" : : "i"(val)) - -void foo(void) -{ - OUT("#ifndef __ASM_OFFSETS_H__"); - OUT("#define __ASM_OFFSETS_H__"); - OUT(""); - - DEF("TI_TASK", offsetof(struct thread_info, task)); - DEF("TI_FLAGS", offsetof(struct thread_info, flags)); - DEF("TI_CPU", offsetof(struct thread_info, cpu)); - - DEF("PT_PTRACED", PT_PTRACED); - DEF("CLONE_VM", CLONE_VM); - DEF("SIGCHLD", SIGCHLD); - - DEF("HAE_CACHE", offsetof(struct alpha_machine_vector, hae_cache)); - DEF("HAE_REG", offsetof(struct alpha_machine_vector, hae_register)); - - OUT(""); - OUT("#endif /* __ASM_OFFSETS_H__ */"); -} diff -Nru a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c --- a/arch/alpha/kernel/time.c Tue Jun 18 19:12:02 2002 +++ b/arch/alpha/kernel/time.c Tue Jun 18 19:12:02 2002 @@ -48,6 +48,8 @@ #include "proto.h" #include "irq_impl.h" +u64 jiffies_64; + extern rwlock_t xtime_lock; extern unsigned long wall_jiffies; /* kernel/timer.c */ diff -Nru a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile --- a/arch/alpha/lib/Makefile Tue Jun 18 19:12:03 2002 +++ b/arch/alpha/lib/Makefile Tue Jun 18 19:12:03 2002 @@ -65,6 +65,4 @@ __remlu.o: $(ev6)divide.S $(CC) $(AFLAGS) -DREM -DINTSIZE -c -o __remlu.o $(ev6)divide.S -dep: - include $(TOPDIR)/Rules.make diff -Nru a/arch/arm/Makefile b/arch/arm/Makefile --- a/arch/arm/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/arm/Makefile Tue Jun 18 19:12:02 2002 @@ -195,29 +195,25 @@ MAKEBOOT =$(MAKE) -C arch/$(ARCH)/boot MAKETOOLS =$(MAKE) -C arch/$(ARCH)/tools -# The following is a hack to get 'constants.h' up -# to date before starting compilation - -$(patsubst %,_dir_%, $(SUBDIRS)): maketools -$(patsubst %,_modsubdir_%,$(MOD_DIRS)): maketools - # Update machine arch and proc symlinks if something which affects # them changed. We use .arch and .proc to indicate when they were # updated last, otherwise make uses the target directory mtime. include/asm-arm/.arch: $(wildcard include/config/arch/*.h) - @echo 'Making asm-arm/arch -> asm-arm/arch-$(INCDIR) symlink' + @echo ' Making asm-arm/arch -> asm-arm/arch-$(INCDIR) symlink' @rm -f include/asm-arm/arch @ln -sf arch-$(INCDIR) include/asm-arm/arch @touch $@ include/asm-arm/.proc: $(wildcard include/config/cpu/32.h) $(wildcard include/config/cpu/26.h) - @echo 'Making asm-arm/proc -> asm-arm/proc-$(PROCESSOR) symlink' + @echo ' Making asm-arm/proc -> asm-arm/proc-$(PROCESSOR) symlink' @rm -f include/asm-arm/proc @ln -sf proc-$(PROCESSOR) include/asm-arm/proc @touch $@ -.hdepend: include/asm-arm/.arch include/asm-arm/.proc +prepare: include/asm-arm/.arch include/asm-arm/.proc \ + include/asm-arm/constants.h + @$(MAKETOOLS) vmlinux: arch/arm/vmlinux.lds @@ -234,7 +230,6 @@ arch/arm/vmlinux.lds MRPROPER_FILES += \ - arch/arm/tools/constants.h* \ include/asm-arm/arch include/asm-arm/.arch \ include/asm-arm/proc include/asm-arm/.proc \ include/asm-arm/constants.h* \ @@ -247,13 +242,9 @@ archclean: FORCE @$(MAKEBOOT) clean -archdep: FORCE - @$(MAKETOOLS) dep - @$(MAKEBOOT) dep - # we need version.h maketools: include/linux/version.h FORCE - @$(MAKETOOLS) all + @$(MAKETOOLS) # My testing targets (that short circuit a few dependencies) zImg:; @$(MAKEBOOT) zImage @@ -277,3 +268,13 @@ echo "$$CFG does not exist"; \ fi; \ ) + +arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \ + include/config/MARKER + +include/asm-$(ARCH)/constants.h.tmp: arch/$(ARCH)/kernel/asm-offsets.s + @$(generate-asm-offsets.h) < $< > $@ + +include/asm-$(ARCH)/constants.h: include/asm-$(ARCH)/constants.h.tmp + @echo -n ' Generating $@' + @$(update-if-changed) diff -Nru a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile --- a/arch/arm/boot/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/arm/boot/Makefile Tue Jun 18 19:12:01 2002 @@ -125,7 +125,7 @@ bootpImage: bootp/bootp $(OBJCOPY) -O binary -R .note -R .comment -S bootp/bootp $@ -compressed/vmlinux: $(TOPDIR)/vmlinux dep +compressed/vmlinux: $(TOPDIR)/vmlinux @$(MAKE) -C compressed vmlinux bootp/bootp: zImage initrd @@ -145,5 +145,3 @@ $(RM) Image zImage bootpImage @$(MAKE) -C compressed clean @$(MAKE) -C bootp clean - -dep: diff -Nru a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/arch/arm/kernel/asm-offsets.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,82 @@ +/* + * Copyright (C) 1995-2001 Russell King + * 2001-2002 Keith Owens + * + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed to extract + * and format the required data. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include +#include + +/* + * Make sure that the compiler and target are compatible. + */ +#if defined(__APCS_32__) && defined(CONFIG_CPU_26) +#error Sorry, your compiler targets APCS-32 but this kernel requires APCS-26 +#endif +#if defined(__APCS_26__) && defined(CONFIG_CPU_32) +#error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32 +#endif +#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 95) +#error Sorry, your compiler is known to miscompile kernels. Only use gcc 2.95.3 and later. +#endif +#if __GNUC__ == 2 && __GNUC_MINOR__ == 95 +/* shame we can't detect the .1 or .2 releases */ +#warning GCC 2.95.2 and earlier miscompiles kernels. +#endif + +/* Use marker if you need to separate the values later */ + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +#define BLANK() asm volatile("\n->" : : ) + +int main(void) +{ + DEFINE(TSK_USED_MATH, offsetof(struct task_struct, used_math)); + DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); + BLANK(); + DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); + DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags)); + BLANK(); + DEFINE(VM_EXEC, VM_EXEC); + BLANK(); +#ifdef CONFIG_CPU_32 + DEFINE(HPTE_TYPE_SMALL, PTE_TYPE_SMALL); + DEFINE(HPTE_AP_READ, PTE_AP_READ); + DEFINE(HPTE_AP_WRITE, PTE_AP_WRITE); + BLANK(); + DEFINE(LPTE_PRESENT, L_PTE_PRESENT); + DEFINE(LPTE_YOUNG, L_PTE_YOUNG); + DEFINE(LPTE_BUFFERABLE, L_PTE_BUFFERABLE); + DEFINE(LPTE_CACHEABLE, L_PTE_CACHEABLE); + DEFINE(LPTE_USER, L_PTE_USER); + DEFINE(LPTE_WRITE, L_PTE_WRITE); + DEFINE(LPTE_EXEC, L_PTE_EXEC); + DEFINE(LPTE_DIRTY, L_PTE_DIRTY); +#endif + BLANK(); +#ifdef CONFIG_CPU_26 + DEFINE(PAGE_PRESENT, _PAGE_PRESENT); + DEFINE(PAGE_READONLY, _PAGE_READONLY); + DEFINE(PAGE_NOT_USER, _PAGE_NOT_USER); + DEFINE(PAGE_OLD, _PAGE_OLD); + DEFINE(PAGE_CLEAN, _PAGE_CLEAN); +#endif + BLANK(); + DEFINE(PAGE_SZ, PAGE_SIZE); + BLANK(); + DEFINE(SYS_ERROR0, 0x9f0000); + return 0; +} diff -Nru a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c --- a/arch/arm/kernel/time.c Tue Jun 18 19:12:02 2002 +++ b/arch/arm/kernel/time.c Tue Jun 18 19:12:02 2002 @@ -32,6 +32,8 @@ #include #include +u64 jiffies_64; + extern rwlock_t xtime_lock; extern unsigned long wall_jiffies; diff -Nru a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile --- a/arch/arm/tools/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/arm/tools/Makefile Tue Jun 18 19:12:01 2002 @@ -4,40 +4,9 @@ # Copyright (C) 2001 Russell King # -all: $(TOPDIR)/include/asm-arm/mach-types.h \ - $(TOPDIR)/include/asm-arm/constants.h +all: $(TOPDIR)/include/asm-arm/mach-types.h $(TOPDIR)/include/asm-arm/mach-types.h: mach-types gen-mach-types awk -f gen-mach-types mach-types > $@ -# Generate the constants.h header file using the compiler. We get -# the compiler to spit out assembly code, and then mundge it into -# what we want. We do this in several stages so make picks up on -# any errors that occur along the way. - -constants.h: constants-hdr getconstants.c - $(CC) $(CFLAGS) -S -o $@.tmp.1 getconstants.c - sed 's/^\(#define .* \)[#$$]\(.*\)/\1\2/;/^#define/!d' $@.tmp.1 > $@.tmp.2 - cat constants-hdr $@.tmp.2 > $@ - $(RM) $@.tmp* - -# Only update include/asm-arm/constants.h when it has actually changed. - -$(TOPDIR)/include/asm-arm/constants.h: constants.h - cmp constants.h $@ >/dev/null 2>&1 || cp -p constants.h $@ - -# Build our dependencies, and then generate the constants and -# mach-types header files. If we do it now, mkdep will pick -# the dependencies up later on when it runs through the other -# directories - -dep: - $(TOPDIR)/scripts/mkdep $(CFLAGS) $(EXTRA_CFLAGS) -- getconstants.c |\ - sed s,getconstants.o,constants.h, > .depend - $(MAKE) all - -.PHONY: all dep - -ifneq ($(wildcard .depend),) -include .depend -endif +.PHONY: all diff -Nru a/arch/arm/tools/constants-hdr b/arch/arm/tools/constants-hdr --- a/arch/arm/tools/constants-hdr Tue Jun 18 19:12:01 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,5 +0,0 @@ -/* - * This file is automatically generated from arch/arm/tools/getconstants.c. - * Do not edit! Only include this file in assembly (.S) files! - */ - diff -Nru a/arch/arm/tools/getconstants.c b/arch/arm/tools/getconstants.c --- a/arch/arm/tools/getconstants.c Tue Jun 18 19:12:02 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,75 +0,0 @@ -/* - * linux/arch/arm/tools/getconsdata.c - * - * Copyright (C) 1995-2001 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include -#include -#include - -#include -#include - -/* - * Make sure that the compiler and target are compatible. - */ -#if defined(__APCS_32__) && defined(CONFIG_CPU_26) -#error Sorry, your compiler targets APCS-32 but this kernel requires APCS-26 -#endif -#if defined(__APCS_26__) && defined(CONFIG_CPU_32) -#error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32 -#endif -#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 95) -#error Sorry, your compiler is known to miscompile kernels. Only use gcc 2.95.3 and later. -#endif -#if __GNUC__ == 2 && __GNUC_MINOR__ == 95 -/* shame we can't detect the .1 or .2 releases */ -#warning GCC 2.95.2 and earlier miscompiles kernels. -#endif - -#define OFF_TSK(n) (unsigned long)&(((struct task_struct *)0)->n) -#define OFF_VMA(n) (unsigned long)&(((struct vm_area_struct *)0)->n) - -#define DEFN(name,off) asm("\n#define "name" %0" :: "I" (off)) - -void func(void) -{ -DEFN("TSK_USED_MATH", OFF_TSK(used_math)); -DEFN("TSK_ACTIVE_MM", OFF_TSK(active_mm)); - -DEFN("VMA_VM_MM", OFF_VMA(vm_mm)); -DEFN("VMA_VM_FLAGS", OFF_VMA(vm_flags)); - -DEFN("VM_EXEC", VM_EXEC); - -#ifdef CONFIG_CPU_32 -DEFN("HPTE_TYPE_SMALL", PTE_TYPE_SMALL); -DEFN("HPTE_AP_READ", PTE_AP_READ); -DEFN("HPTE_AP_WRITE", PTE_AP_WRITE); - -DEFN("LPTE_PRESENT", L_PTE_PRESENT); -DEFN("LPTE_YOUNG", L_PTE_YOUNG); -DEFN("LPTE_BUFFERABLE", L_PTE_BUFFERABLE); -DEFN("LPTE_CACHEABLE", L_PTE_CACHEABLE); -DEFN("LPTE_USER", L_PTE_USER); -DEFN("LPTE_WRITE", L_PTE_WRITE); -DEFN("LPTE_EXEC", L_PTE_EXEC); -DEFN("LPTE_DIRTY", L_PTE_DIRTY); -#endif - -#ifdef CONFIG_CPU_26 -DEFN("PAGE_PRESENT", _PAGE_PRESENT); -DEFN("PAGE_READONLY", _PAGE_READONLY); -DEFN("PAGE_NOT_USER", _PAGE_NOT_USER); -DEFN("PAGE_OLD", _PAGE_OLD); -DEFN("PAGE_CLEAN", _PAGE_CLEAN); -#endif - -DEFN("PAGE_SZ", PAGE_SIZE); - -DEFN("SYS_ERROR0", 0x9f0000); -} diff -Nru a/arch/cris/Makefile b/arch/cris/Makefile --- a/arch/cris/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/cris/Makefile Tue Jun 18 19:12:02 2002 @@ -100,6 +100,3 @@ rm -rf $(LD_SCRIPT).tmp archmrproper: - -archdep: - @$(MAKEBOOT) dep diff -Nru a/arch/cris/boot/Makefile b/arch/cris/boot/Makefile --- a/arch/cris/boot/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/cris/boot/Makefile Tue Jun 18 19:12:02 2002 @@ -7,8 +7,6 @@ compressed/vmlinuz: $(TOPDIR)/vmlinux @$(MAKE) -C compressed vmlinuz -dep: - clean: rm -f zImage tools/build compressed/vmlinux.out @$(MAKE) -C compressed clean diff -Nru a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c --- a/arch/cris/kernel/time.c Tue Jun 18 19:12:02 2002 +++ b/arch/cris/kernel/time.c Tue Jun 18 19:12:02 2002 @@ -44,6 +44,8 @@ #include +u64 jiffies_64; + static int have_rtc; /* used to remember if we have an RTC or not */ /* define this if you need to use print_timestamp */ diff -Nru a/arch/i386/Makefile b/arch/i386/Makefile --- a/arch/i386/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/i386/Makefile Tue Jun 18 19:12:02 2002 @@ -108,7 +108,7 @@ vmlinux: arch/i386/vmlinux.lds .PHONY: zImage bzImage compressed zlilo bzlilo zdisk bzdisk install \ - clean archclean archmrproper archdep + clean archclean archmrproper zImage: vmlinux @$(MAKEBOOT) zImage @@ -140,6 +140,3 @@ @$(MAKEBOOT) clean archmrproper: - -archdep: - @$(MAKEBOOT) dep diff -Nru a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile --- a/arch/i386/boot/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/i386/boot/Makefile Tue Jun 18 19:12:02 2002 @@ -23,7 +23,7 @@ # If you want the RAM disk device, define this to be the size in blocks. -RAMDISK := -DRAMDISK=512 +#RAMDISK := -DRAMDISK=512 # --------------------------------------------------------------------------- @@ -96,8 +96,6 @@ bsetup.s: setup.S video.S Makefile $(BOOT_INCL) $(TOPDIR)/include/linux/version.h $(TOPDIR)/include/linux/compile.h $(CPP) $(CPPFLAGS) -D__BIG_KERNEL__ -D__ASSEMBLY__ -traditional $(SVGA_MODE) $(RAMDISK) $< -o $@ - -dep: clean: @echo 'Cleaning up (boot)' diff -Nru a/arch/i386/config.in b/arch/i386/config.in --- a/arch/i386/config.in Tue Jun 18 19:12:02 2002 +++ b/arch/i386/config.in Tue Jun 18 19:12:02 2002 @@ -153,9 +153,24 @@ define_bool CONFIG_X86_OOSTORE y fi +bool 'Symmetric multi-processing support' CONFIG_SMP +bool 'Preemptible Kernel' CONFIG_PREEMPT +if [ "$CONFIG_SMP" != "y" ]; then + bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC + dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC + if [ "$CONFIG_X86_UP_APIC" = "y" ]; then + define_bool CONFIG_X86_LOCAL_APIC y + fi + if [ "$CONFIG_X86_UP_IOAPIC" = "y" ]; then + define_bool CONFIG_X86_IO_APIC y + fi +else + bool 'Multiquad NUMA system' CONFIG_MULTIQUAD +fi + bool 'Machine Check Exception' CONFIG_X86_MCE dep_bool 'Check for non-fatal errors on Athlon/Duron' CONFIG_X86_MCE_NONFATAL $CONFIG_X86_MCE -dep_bool 'check for P4 thermal throttling interrupt.' CONFIG_X86_MCE_P4THERMAL $CONFIG_X86_MCE $CONFIG_X86_LOCAL_APIC +dep_bool 'check for P4 thermal throttling interrupt.' CONFIG_X86_MCE_P4THERMAL $CONFIG_X86_MCE $CONFIG_X86_UP_APIC tristate 'Toshiba Laptop support' CONFIG_TOSHIBA @@ -185,20 +200,6 @@ bool 'Math emulation' CONFIG_MATH_EMULATION bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR -bool 'Symmetric multi-processing support' CONFIG_SMP -bool 'Preemptible Kernel' CONFIG_PREEMPT -if [ "$CONFIG_SMP" != "y" ]; then - bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC - dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC - if [ "$CONFIG_X86_UP_APIC" = "y" ]; then - define_bool CONFIG_X86_LOCAL_APIC y - fi - if [ "$CONFIG_X86_UP_IOAPIC" = "y" ]; then - define_bool CONFIG_X86_IO_APIC y - fi -else - bool 'Multiquad NUMA system' CONFIG_MULTIQUAD -fi if [ "$CONFIG_SMP" = "y" -o "$CONFIG_PREEMPT" = "y" ]; then if [ "$CONFIG_X86_CMPXCHG" = "y" ]; then diff -Nru a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile --- a/arch/i386/kernel/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/i386/kernel/Makefile Tue Jun 18 19:12:01 2002 @@ -15,7 +15,6 @@ obj-y += cpu/ obj-$(CONFIG_MCA) += mca.o -obj-$(CONFIG_EISA) += eisa.o obj-$(CONFIG_MTRR) += mtrr.o obj-$(CONFIG_X86_MSR) += msr.o obj-$(CONFIG_X86_CPUID) += cpuid.o diff -Nru a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c --- a/arch/i386/kernel/apic.c Tue Jun 18 19:12:02 2002 +++ b/arch/i386/kernel/apic.c Tue Jun 18 19:12:02 2002 @@ -813,10 +813,10 @@ * IRQ APIC event being in synchron with the APIC clock we * introduce an interrupt skew to spread out timer events. * - * The number of slices within a 'big' timeslice is smp_num_cpus+1 + * The number of slices within a 'big' timeslice is NR_CPUS+1 */ - slice = clocks / (smp_num_cpus+1); + slice = clocks / (NR_CPUS+1); printk("cpu: %d, clocks: %d, slice: %d\n", smp_processor_id(), clocks, slice); /* diff -Nru a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c --- a/arch/i386/kernel/apm.c Tue Jun 18 19:12:01 2002 +++ b/arch/i386/kernel/apm.c Tue Jun 18 19:12:01 2002 @@ -899,7 +899,7 @@ */ #ifdef CONFIG_SMP /* Some bioses don't like being called from CPU != 0 */ - while (cpu_number_map(smp_processor_id()) != 0) { + while (smp_processor_id() != 0) { kernel_thread(apm_magic, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND | SIGCHLD); schedule(); @@ -1586,7 +1586,7 @@ p = buf; - if ((smp_num_cpus == 1) && + if ((num_online_cpus() == 1) && !(error = apm_get_power_status(&bx, &cx, &dx))) { ac_line_status = (bx >> 8) & 0xff; battery_status = bx & 0xff; @@ -1717,7 +1717,7 @@ } } - if (debug && (smp_num_cpus == 1)) { + if (debug && (num_online_cpus() == 1)) { error = apm_get_power_status(&bx, &cx, &dx); if (error) printk(KERN_INFO "apm: power status not available\n"); @@ -1761,7 +1761,7 @@ pm_power_off = apm_power_off; register_sysrq_key('o', &sysrq_poweroff_op); - if (smp_num_cpus == 1) { + if (num_online_cpus() == 1) { #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) console_blank_hook = apm_console_blank; #endif @@ -1904,7 +1904,9 @@ printk(KERN_NOTICE "apm: disabled on user request.\n"); return -ENODEV; } - if ((smp_num_cpus > 1) && !power_off) { + /* FIXME: When boot code changes, this will need to be + deactivated when/if a CPU comes up --RR */ + if ((num_online_cpus() > 1) && !power_off) { printk(KERN_NOTICE "apm: disabled - APM is not SMP safe.\n"); return -ENODEV; } @@ -1958,7 +1960,9 @@ kernel_thread(apm, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND | SIGCHLD); - if (smp_num_cpus > 1) { + /* FIXME: When boot code changes, this will need to be + deactivated when/if a CPU comes up --RR */ + if (num_online_cpus() > 1) { printk(KERN_NOTICE "apm: disabled - APM is not SMP safe (power off active).\n"); return 0; diff -Nru a/arch/i386/kernel/bluesmoke.c b/arch/i386/kernel/bluesmoke.c --- a/arch/i386/kernel/bluesmoke.c Tue Jun 18 19:12:02 2002 +++ b/arch/i386/kernel/bluesmoke.c Tue Jun 18 19:12:02 2002 @@ -298,7 +298,9 @@ { unsigned int i; - for (i=0; i= smp_num_cpus) + if (cpu >= NR_CPUS) cpu = 0; } else { cpu--; if (cpu == -1) - cpu = smp_num_cpus-1; + cpu = NR_CPUS-1; } - } while (!IRQ_ALLOWED(cpu,allowed_mask) || + } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) || (search_idle && !IDLE_ENOUGH(cpu,now))); return cpu; diff -Nru a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c --- a/arch/i386/kernel/irq.c Tue Jun 18 19:12:01 2002 +++ b/arch/i386/kernel/irq.c Tue Jun 18 19:12:01 2002 @@ -138,8 +138,9 @@ struct irqaction * action; seq_printf(p, " "); - for (j=0; jtypename); seq_printf(p, " %s", action->name); @@ -162,13 +164,15 @@ seq_putc(p, '\n'); } seq_printf(p, "NMI: "); - for (j = 0; j < smp_num_cpus; j++) - seq_printf(p, "%10u ", nmi_count(cpu_logical_map(j))); + for (j = 0; j < NR_CPUS; j++) + if (cpu_online(j)) + p += seq_printf(p, "%10u ", nmi_count(j)); seq_putc(p, '\n'); #if CONFIG_X86_LOCAL_APIC seq_printf(p, "LOC: "); - for (j = 0; j < smp_num_cpus; j++) - seq_printf(p, "%10u ", apic_timer_irqs[cpu_logical_map(j)]); + for (j = 0; j < NR_CPUS; j++) + if (cpu_online(j)) + p += seq_printf(p, "%10u ", apic_timer_irqs[j]); seq_putc(p, '\n'); #endif seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); @@ -198,14 +202,14 @@ printk("\n%s, CPU %d:\n", str, cpu); printk("irq: %d [",irqs_running()); - for(i=0;i < smp_num_cpus;i++) + for(i=0;i < NR_CPUS;i++) printk(" %d",local_irq_count(i)); printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0); - for(i=0;i < smp_num_cpus;i++) + for(i=0;i < NR_CPUS;i++) printk(" %d",local_bh_count(i)); printk(" ]\nStack dumps:"); - for(i = 0; i < smp_num_cpus; i++) { + for(i = 0; i < NR_CPUS; i++) { unsigned long esp; if (i == cpu) continue; @@ -356,8 +360,9 @@ __save_flags(flags); if (flags & (1 << EFLAGS_IF_SHIFT)) { - int cpu = smp_processor_id(); + int cpu; __cli(); + cpu = smp_processor_id(); if (!local_irq_count(cpu)) get_irqlock(cpu); } @@ -365,11 +370,12 @@ void __global_sti(void) { - int cpu = smp_processor_id(); + int cpu = get_cpu(); if (!local_irq_count(cpu)) release_irqlock(cpu); __sti(); + put_cpu(); } /* diff -Nru a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c --- a/arch/i386/kernel/microcode.c Tue Jun 18 19:12:02 2002 +++ b/arch/i386/kernel/microcode.c Tue Jun 18 19:12:02 2002 @@ -188,7 +188,7 @@ } do_update_one(NULL); - for (i=0; i 0) { rep_nop(); barrier(); } /* Set up for completion wait and then release other CPUs to change MTRRs*/ - atomic_set (&undone_count, smp_num_cpus - 1); + atomic_set (&undone_count, num_online_cpus() - 1); wait_barrier_cache_disable = FALSE; set_mtrr_cache_disable (&ctxt); /* Wait for all other CPUs to flush and disable their caches */ while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); } /* Set up for completion wait and then release other CPUs to change MTRRs*/ - atomic_set (&undone_count, smp_num_cpus - 1); + atomic_set (&undone_count, num_online_cpus() - 1); wait_barrier_execute = FALSE; (*set_mtrr_up) (reg, base, size, type, FALSE); /* Now wait for other CPUs to complete the function */ diff -Nru a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c --- a/arch/i386/kernel/nmi.c Tue Jun 18 19:12:02 2002 +++ b/arch/i386/kernel/nmi.c Tue Jun 18 19:12:02 2002 @@ -73,7 +73,7 @@ int __init check_nmi_watchdog (void) { irq_cpustat_t tmp[NR_CPUS]; - int j, cpu; + int cpu; printk(KERN_INFO "testing NMI watchdog ... "); @@ -81,8 +81,9 @@ sti(); mdelay((10*1000)/nmi_hz); // wait 10 ticks - for (j = 0; j < smp_num_cpus; j++) { - cpu = cpu_logical_map(j); + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu)) + continue; if (nmi_count(cpu) - tmp[cpu].__nmi_count <= 5) { printk("CPU#%d: NMI appears to be stuck!\n", cpu); return -1; @@ -330,7 +331,7 @@ * Just reset the alert counters, (other CPUs might be * spinning on locks we hold): */ - for (i = 0; i < smp_num_cpus; i++) + for (i = 0; i < NR_CPUS; i++) alert_counter[i] = 0; } diff -Nru a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c --- a/arch/i386/kernel/smp.c Tue Jun 18 19:12:01 2002 +++ b/arch/i386/kernel/smp.c Tue Jun 18 19:12:01 2002 @@ -247,18 +247,16 @@ * we get an APIC send error if we try to broadcast. * thus we have to avoid sending IPIs in this case. */ - if (!(smp_num_cpus > 1)) + if (!(num_online_cpus() > 1)) return; if (clustered_apic_mode) { // Pointless. Use send_IPI_mask to do this instead int cpu; - if (smp_num_cpus > 1) { - for (cpu = 0; cpu < smp_num_cpus; ++cpu) { - if (cpu != smp_processor_id()) - send_IPI_mask(1 << cpu, vector); - } + for (cpu = 0; cpu < NR_CPUS; ++cpu) { + if (cpu_online(cpu) && cpu != smp_processor_id()) + send_IPI_mask(1 << cpu, vector); } } else { __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); @@ -272,7 +270,9 @@ // Pointless. Use send_IPI_mask to do this instead int cpu; - for (cpu = 0; cpu < smp_num_cpus; ++cpu) { + for (cpu = 0; cpu < NR_CPUS; ++cpu) { + if (!cpu_online(cpu)) + continue; send_IPI_mask(1 << cpu, vector); } } else { @@ -567,7 +567,7 @@ */ { struct call_data_struct data; - int cpus = smp_num_cpus-1; + int cpus = num_online_cpus()-1; if (!cpus) return 0; @@ -617,7 +617,6 @@ void smp_send_stop(void) { smp_call_function(stop_this_cpu, NULL, 1, 0); - smp_num_cpus = 1; __cli(); disable_local_APIC(); diff -Nru a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c --- a/arch/i386/kernel/smpboot.c Tue Jun 18 19:12:02 2002 +++ b/arch/i386/kernel/smpboot.c Tue Jun 18 19:12:02 2002 @@ -56,9 +56,6 @@ /* Setup configured maximum number of CPUs to activate */ static int max_cpus = -1; -/* Total count of live CPUs */ -int smp_num_cpus = 1; - /* Number of siblings per CPU package */ int smp_num_siblings = 1; int __initdata phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */ @@ -292,7 +289,8 @@ /* * all APs synchronize but they loop on '== num_cpus' */ - while (atomic_read(&tsc_count_start) != smp_num_cpus-1) mb(); + while (atomic_read(&tsc_count_start) != num_online_cpus()-1) + mb(); atomic_set(&tsc_count_stop, 0); wmb(); /* @@ -310,21 +308,26 @@ /* * Wait for all APs to leave the synchronization point: */ - while (atomic_read(&tsc_count_stop) != smp_num_cpus-1) mb(); + while (atomic_read(&tsc_count_stop) != num_online_cpus()-1) + mb(); atomic_set(&tsc_count_start, 0); wmb(); atomic_inc(&tsc_count_stop); } sum = 0; - for (i = 0; i < smp_num_cpus; i++) { - t0 = tsc_values[i]; - sum += t0; + for (i = 0; i < NR_CPUS; i++) { + if (cpu_online(i)) { + t0 = tsc_values[i]; + sum += t0; + } } - avg = div64(sum, smp_num_cpus); + avg = div64(sum, num_online_cpus()); sum = 0; - for (i = 0; i < smp_num_cpus; i++) { + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_online(i)) + continue; delta = tsc_values[i] - avg; if (delta < 0) delta = -delta; @@ -356,7 +359,7 @@ int i; /* - * smp_num_cpus is not necessarily known at the time + * num_online_cpus is not necessarily known at the time * this gets called, so we first wait for the BP to * finish SMP initialization: */ @@ -364,14 +367,15 @@ for (i = 0; i < NR_LOOPS; i++) { atomic_inc(&tsc_count_start); - while (atomic_read(&tsc_count_start) != smp_num_cpus) mb(); + while (atomic_read(&tsc_count_start) != num_online_cpus()) + mb(); rdtscll(tsc_values[smp_processor_id()]); if (i == NR_LOOPS-1) write_tsc(0, 0); atomic_inc(&tsc_count_stop); - while (atomic_read(&tsc_count_stop) != smp_num_cpus) mb(); + while (atomic_read(&tsc_count_stop) != num_online_cpus()) mb(); } } #undef NR_LOOPS @@ -1070,7 +1074,6 @@ io_apic_irqs = 0; #endif cpu_online_map = phys_cpu_present_map = 1; - smp_num_cpus = 1; if (APIC_init_uniprocessor()) printk(KERN_NOTICE "Local APIC not detected." " Using dummy APIC emulation.\n"); @@ -1100,7 +1103,6 @@ io_apic_irqs = 0; #endif cpu_online_map = phys_cpu_present_map = 1; - smp_num_cpus = 1; goto smp_done; } @@ -1116,7 +1118,6 @@ io_apic_irqs = 0; #endif cpu_online_map = phys_cpu_present_map = 1; - smp_num_cpus = 1; goto smp_done; } @@ -1197,7 +1198,6 @@ (bogosum/(5000/HZ))%100); Dprintk("Before bogocount - setting activated=1.\n"); } - smp_num_cpus = cpucount + 1; if (smp_b_stepping) printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n"); @@ -1211,11 +1211,12 @@ for (cpu = 0; cpu < NR_CPUS; cpu++) cpu_sibling_map[cpu] = NO_PROC_ID; - for (cpu = 0; cpu < smp_num_cpus; cpu++) { + for (cpu = 0; cpu < NR_CPUS; cpu++) { int i; - - for (i = 0; i < smp_num_cpus; i++) { - if (i == cpu) + if (!cpu_online(cpu)) continue; + + for (i = 0; i < NR_CPUS; i++) { + if (i == cpu || !cpu_online(i)) continue; if (phys_proc_id[cpu] == phys_proc_id[i]) { cpu_sibling_map[cpu] = i; diff -Nru a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c --- a/arch/i386/kernel/time.c Tue Jun 18 19:12:02 2002 +++ b/arch/i386/kernel/time.c Tue Jun 18 19:12:02 2002 @@ -65,6 +65,7 @@ */ #include +u64 jiffies_64; unsigned long cpu_khz; /* Detected as we calibrate the TSC */ diff -Nru a/arch/i386/mm/Makefile b/arch/i386/mm/Makefile --- a/arch/i386/mm/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/i386/mm/Makefile Tue Jun 18 19:12:02 2002 @@ -9,6 +9,7 @@ O_TARGET := mm.o -obj-y := init.o fault.o ioremap.o extable.o +obj-y := init.o fault.o ioremap.o extable.o pageattr.o +export-objs := pageattr.o include $(TOPDIR)/Rules.make diff -Nru a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c --- a/arch/i386/mm/ioremap.c Tue Jun 18 19:12:02 2002 +++ b/arch/i386/mm/ioremap.c Tue Jun 18 19:12:02 2002 @@ -10,12 +10,13 @@ #include #include +#include #include #include #include #include #include - +#include static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, unsigned long phys_addr, unsigned long flags) @@ -155,6 +156,7 @@ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; + area->phys_addr = phys_addr; addr = area->addr; if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) { vfree(addr); @@ -163,10 +165,69 @@ return (void *) (offset + (char *)addr); } + +/** + * ioremap_nocache - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + * + * ioremap_nocache performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + * + * This version of ioremap ensures that the memory is marked uncachable + * on the CPU as well as honouring existing caching rules from things like + * the PCI bus. Note that there are other caches and buffers on many + * busses. In particular driver authors should read up on PCI writes + * + * It's useful if some control registers are in such an area and + * write combining or read caching is not desirable: + * + * Must be freed with iounmap. + */ + +void *ioremap_nocache (unsigned long phys_addr, unsigned long size) +{ + void *p = __ioremap(phys_addr, size, _PAGE_PCD); + if (!p) + return p; + + if (phys_addr + size < virt_to_phys(high_memory)) { + struct page *ppage = virt_to_page(__va(phys_addr)); + unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + + BUG_ON(phys_addr+size > (unsigned long)high_memory); + BUG_ON(phys_addr + size < phys_addr); + + if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { + iounmap(p); + p = NULL; + } + } + + return p; +} + void iounmap(void *addr) { - if (addr > high_memory) - return vfree((void *) (PAGE_MASK & (unsigned long) addr)); + struct vm_struct *p; + if (addr < high_memory) + return; + p = remove_kernel_area(addr); + if (!p) { + printk("__iounmap: bad address %p\n", addr); + return; + } + + vmfree_area_pages(VMALLOC_VMADDR(p->addr), p->size); + if (p->flags && p->phys_addr < virt_to_phys(high_memory)) { + change_page_attr(virt_to_page(__va(p->phys_addr)), + p->size >> PAGE_SHIFT, + PAGE_KERNEL); + } + kfree(p); } void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) diff -Nru a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/arch/i386/mm/pageattr.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,198 @@ +/* + * Copyright 2002 Andi Kleen, SuSE Labs. + * Thanks to Ben LaHaise for precious feedback. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline pte_t *lookup_address(unsigned long address) +{ + pgd_t *pgd = pgd_offset_k(address); + pmd_t *pmd = pmd_offset(pgd, address); + if (pmd_large(*pmd)) + return (pte_t *)pmd; + return pte_offset_kernel(pmd, address); +} + +static struct page *split_large_page(unsigned long address, pgprot_t prot) +{ + int i; + unsigned long addr; + struct page *base = alloc_pages(GFP_KERNEL, 0); + pte_t *pbase; + if (!base) + return NULL; + address = __pa(address); + addr = address & LARGE_PAGE_MASK; + pbase = (pte_t *)page_address(base); + for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { + pbase[i] = pfn_pte(addr >> PAGE_SHIFT, + addr == address ? prot : PAGE_KERNEL); + } + return base; +} + +static void flush_kernel_map(void *dummy) +{ + /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */ + if (boot_cpu_data.x86_model >= 4) + asm volatile("wbinvd":::"memory"); + /* Flush all to work around Errata in early athlons regarding + * large page flushing. + */ + __flush_tlb_all(); +} + +static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) +{ + set_pte_atomic(kpte, pte); /* change init_mm */ +#ifndef CONFIG_X86_PAE + { + struct list_head *l; + spin_lock(&mmlist_lock); + list_for_each(l, &init_mm.mmlist) { + struct mm_struct *mm = list_entry(l, struct mm_struct, mmlist); + pmd_t *pmd = pmd_offset(pgd_offset(mm, address), address); + set_pte_atomic((pte_t *)pmd, pte); + } + spin_unlock(&mmlist_lock); + } +#endif +} + +/* + * No more special protections in this 2/4MB area - revert to a + * large page again. + */ +static inline void revert_page(struct page *kpte_page, unsigned long address) +{ + pte_t *linear = (pte_t *) + pmd_offset(pgd_offset(&init_mm, address), address); + set_pmd_pte(linear, address, + pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT, + PAGE_KERNEL_LARGE)); +} + +static int +__change_page_attr(struct page *page, pgprot_t prot, struct page **oldpage) +{ + pte_t *kpte; + unsigned long address; + struct page *kpte_page; + +#ifdef CONFIG_HIGHMEM + if (page >= highmem_start_page) + BUG(); +#endif + address = (unsigned long)page_address(page); + + kpte = lookup_address(address); + kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); + if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { + if ((pte_val(*kpte) & _PAGE_PSE) == 0) { + pte_t old = *kpte; + pte_t standard = mk_pte(page, PAGE_KERNEL); + + set_pte_atomic(kpte, mk_pte(page, prot)); + if (pte_same(old,standard)) + atomic_inc(&kpte_page->count); + } else { + struct page *split = split_large_page(address, prot); + if (!split) + return -ENOMEM; + set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL)); + } + } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { + set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL)); + atomic_dec(&kpte_page->count); + } + + if (cpu_has_pse && (atomic_read(&kpte_page->count) == 1)) { + *oldpage = kpte_page; + revert_page(kpte_page, address); + } + return 0; +} + +static inline void flush_map(void) +{ +#ifdef CONFIG_SMP + smp_call_function(flush_kernel_map, NULL, 1, 1); +#endif + flush_kernel_map(NULL); +} + +struct deferred_page { + struct deferred_page *next; + struct page *fpage; +}; +static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */ + +/* + * Change the page attributes of an page in the linear mapping. + * + * This should be used when a page is mapped with a different caching policy + * than write-back somewhere - some CPUs do not like it when mappings with + * different caching policies exist. This changes the page attributes of the + * in kernel linear mapping too. + * + * The caller needs to ensure that there are no conflicting mappings elsewhere. + * This function only deals with the kernel linear map. + * + * Caller must call global_flush_tlb() after this. + */ +int change_page_attr(struct page *page, int numpages, pgprot_t prot) +{ + int err = 0; + struct page *fpage; + int i; + + down_write(&init_mm.mmap_sem); + for (i = 0; i < numpages; i++, page++) { + fpage = NULL; + err = __change_page_attr(page, prot, &fpage); + if (err) + break; + if (fpage) { + struct deferred_page *df; + df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL); + if (!df) { + flush_map(); + __free_page(fpage); + } else { + df->next = df_list; + df->fpage = fpage; + df_list = df; + } + } + } + up_write(&init_mm.mmap_sem); + return err; +} + +void global_flush_tlb(void) +{ + struct deferred_page *df, *next_df; + + down_read(&init_mm.mmap_sem); + df = xchg(&df_list, NULL); + up_read(&init_mm.mmap_sem); + flush_map(); + for (; df; df = next_df) { + next_df = df->next; + if (df->fpage) + __free_page(df->fpage); + kfree(df); + } +} + +EXPORT_SYMBOL(change_page_attr); +EXPORT_SYMBOL(global_flush_tlb); diff -Nru a/arch/ia64/Makefile b/arch/ia64/Makefile --- a/arch/ia64/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/ia64/Makefile Tue Jun 18 19:12:01 2002 @@ -127,8 +127,11 @@ rm -f arch/$(ARCH)/vmlinux.lds @$(MAKE) -C arch/$(ARCH)/tools mrproper -archdep: - @$(MAKEBOOT) dep - bootpfile: @$(MAKEBOOT) bootpfile + +prepare: $(TOPDIR)/include/asm-ia64/offsets.h + +$(TOPDIR)/include/asm-ia64/offsets.h: include/asm include/linux/version.h \ + include/config/MARKER + @$(MAKE) -C arch/$(ARCH)/tools $@ \ No newline at end of file diff -Nru a/arch/ia64/boot/Makefile b/arch/ia64/boot/Makefile --- a/arch/ia64/boot/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/ia64/boot/Makefile Tue Jun 18 19:12:02 2002 @@ -23,5 +23,3 @@ clean: rm -f $(TARGETS) - -dep: diff -Nru a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c --- a/arch/ia64/ia32/sys_ia32.c Tue Jun 18 19:12:02 2002 +++ b/arch/ia64/ia32/sys_ia32.c Tue Jun 18 19:12:02 2002 @@ -3629,47 +3629,6 @@ return ret; } -/* In order to reduce some races, while at the same time doing additional - * checking and hopefully speeding things up, we copy filenames to the - * kernel data space before using them.. - * - * POSIX.1 2.4: an empty pathname is invalid (ENOENT). - */ -static inline int -do_getname32 (const char *filename, char *page) -{ - int retval; - - /* 32bit pointer will be always far below TASK_SIZE :)) */ - retval = strncpy_from_user((char *)page, (char *)filename, PAGE_SIZE); - if (retval > 0) { - if (retval < PAGE_SIZE) - return 0; - return -ENAMETOOLONG; - } else if (!retval) - retval = -ENOENT; - return retval; -} - -static char * -getname32 (const char *filename) -{ - char *tmp, *result; - - result = ERR_PTR(-ENOMEM); - tmp = (char *)__get_free_page(GFP_KERNEL); - if (tmp) { - int retval = do_getname32(filename, tmp); - - result = tmp; - if (retval < 0) { - putname(tmp); - result = ERR_PTR(retval); - } - } - return result; -} - asmlinkage long sys32_sched_rr_get_interval (pid_t pid, struct timespec32 *interval) { diff -Nru a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c --- a/arch/ia64/kernel/ia64_ksyms.c Tue Jun 18 19:12:02 2002 +++ b/arch/ia64/kernel/ia64_ksyms.c Tue Jun 18 19:12:02 2002 @@ -85,9 +85,6 @@ EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL(ia64_cpu_to_sapicid); -#include -EXPORT_SYMBOL(smp_num_cpus); - #include EXPORT_SYMBOL(kernel_flag); diff -Nru a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c --- a/arch/ia64/kernel/iosapic.c Tue Jun 18 19:12:02 2002 +++ b/arch/ia64/kernel/iosapic.c Tue Jun 18 19:12:02 2002 @@ -256,7 +256,7 @@ char *addr; int redir = (irq & (1<<31)) ? 1 : 0; - mask &= (1UL << smp_num_cpus) - 1; + mask &= cpu_online_map; if (!mask || irq >= IA64_NUM_VECTORS) return; @@ -759,9 +759,8 @@ set_rte(vector, cpu_physical_id(cpu_index) & 0xffff); - cpu_index++; - if (cpu_index >= smp_num_cpus) - cpu_index = 0; + for (cpu_index++; !cpu_online(cpu_index % NR_CPUS); cpu_index++); + cpu_index %= NR_CPUS; } else { /* * Direct the interrupt vector to the current cpu, diff -Nru a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c --- a/arch/ia64/kernel/irq.c Tue Jun 18 19:12:02 2002 +++ b/arch/ia64/kernel/irq.c Tue Jun 18 19:12:02 2002 @@ -156,8 +156,9 @@ irq_desc_t *idesc; seq_puts(p, " "); - for (j=0; jhandler->typename); seq_printf(p, " %s", action->name); @@ -181,15 +182,15 @@ seq_putc(p, '\n'); } seq_puts(p, "NMI: "); - for (j = 0; j < smp_num_cpus; j++) - seq_printf(p, "%10u ", - nmi_count(cpu_logical_map(j))); + for (j = 0; j < NR_CPUS; j++) + if (cpu_online(j)) + seq_printf(p, "%10u ", nmi_count(j)); seq_putc(p, '\n'); #if defined(CONFIG_SMP) && defined(CONFIG_X86) seq_puts(p, "LOC: "); - for (j = 0; j < smp_num_cpus; j++) - seq_printf(p, "%10u ", - apic_timer_irqs[cpu_logical_map(j)]); + for (j = 0; j < NR_CPUS; j++) + if (cpu_online(j)) + seq_printf(p, "%10u ", apic_timer_irqs[j]); seq_putc(p, '\n'); #endif seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); @@ -218,10 +219,10 @@ printk("\n%s, CPU %d:\n", str, cpu); printk("irq: %d [",irqs_running()); - for(i=0;i < smp_num_cpus;i++) + for(i=0;i < NR_CPUS;i++) printk(" %d",irq_count(i)); printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0); - for(i=0;i < smp_num_cpus;i++) + for(i=0;i < NR_CPUS;i++) printk(" %d",bh_count(i)); printk(" ]\nStack dumps:"); @@ -233,7 +234,7 @@ * idea. */ #elif defined(CONFIG_X86) - for(i=0;i< smp_num_cpus;i++) { + for(i=0;i< NR_CPUS;i++) { unsigned long esp; if(i==cpu) continue; diff -Nru a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c --- a/arch/ia64/kernel/mca.c Tue Jun 18 19:12:02 2002 +++ b/arch/ia64/kernel/mca.c Tue Jun 18 19:12:02 2002 @@ -604,9 +604,12 @@ int cpu; /* Clear the Rendez checkin flag for all cpus */ - for(cpu = 0; cpu < smp_num_cpus; cpu++) + for(cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu)) + continue; if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE) ia64_mca_wakeup(cpu); + } } diff -Nru a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c --- a/arch/ia64/kernel/perfmon.c Tue Jun 18 19:12:03 2002 +++ b/arch/ia64/kernel/perfmon.c Tue Jun 18 19:12:03 2002 @@ -903,7 +903,7 @@ * and it must be a valid CPU */ cpu = ffs(pfx->ctx_cpu_mask); - if (cpu > smp_num_cpus) { + if (!cpu_online(cpu)) { DBprintk(("CPU%d is not online\n", cpu)); return -EINVAL; } diff -Nru a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c --- a/arch/ia64/kernel/smp.c Tue Jun 18 19:12:03 2002 +++ b/arch/ia64/kernel/smp.c Tue Jun 18 19:12:03 2002 @@ -168,8 +168,8 @@ { int i; - for (i = 0; i < smp_num_cpus; i++) { - if (i != smp_processor_id()) + for (i = 0; i < NR_CPUS; i++) { + if (cpu_online(i) && i != smp_processor_id()) send_IPI_single(i, op); } } @@ -179,8 +179,9 @@ { int i; - for (i = 0; i < smp_num_cpus; i++) - send_IPI_single(i, op); + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(i)) + send_IPI_single(i, op); } static inline void @@ -205,8 +206,8 @@ { int i; - for (i = 0; i < smp_num_cpus; i++) - if (i != smp_processor_id()) + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(i) && i != smp_processor_id()) smp_send_reschedule(i); } @@ -290,7 +291,7 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) { struct call_data_struct data; - int cpus = smp_num_cpus-1; + int cpus = num_online_cpus()-1; if (!cpus) return 0; @@ -339,7 +340,6 @@ smp_send_stop (void) { send_IPI_allbutself(IPI_CPU_STOP); - smp_num_cpus = 1; } int __init diff -Nru a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c --- a/arch/ia64/kernel/smpboot.c Tue Jun 18 19:12:02 2002 +++ b/arch/ia64/kernel/smpboot.c Tue Jun 18 19:12:02 2002 @@ -76,9 +76,6 @@ /* Setup configured maximum number of CPUs to activate */ static int max_cpus = -1; -/* Total count of live CPUs */ -int smp_num_cpus = 1; - /* Bitmask of currently online CPUs */ volatile unsigned long cpu_online_map; @@ -505,7 +502,6 @@ if (!max_cpus || (max_cpus < -1)) { printk(KERN_INFO "SMP mode deactivated.\n"); cpu_online_map = 1; - smp_num_cpus = 1; goto smp_done; } if (max_cpus != -1) @@ -535,8 +531,6 @@ printk("phys CPU#%d not responding - cannot use it.\n", cpu); } - smp_num_cpus = cpucount + 1; - /* * Allow the user to impress friends. */ @@ -581,6 +575,5 @@ printk("SMP: Can't set SAL AP Boot Rendezvous: %s\n Forcing UP mode\n", ia64_sal_strerror(sal_ret)); max_cpus = 0; - smp_num_cpus = 1; } } diff -Nru a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c --- a/arch/ia64/kernel/time.c Tue Jun 18 19:12:02 2002 +++ b/arch/ia64/kernel/time.c Tue Jun 18 19:12:02 2002 @@ -27,6 +27,8 @@ extern unsigned long wall_jiffies; extern unsigned long last_time_offset; +u64 jiffies_64; + #ifdef CONFIG_IA64_DEBUG_IRQ unsigned long last_cli_ip; diff -Nru a/arch/ia64/sn/io/sgi_io_init.c b/arch/ia64/sn/io/sgi_io_init.c --- a/arch/ia64/sn/io/sgi_io_init.c Tue Jun 18 19:12:01 2002 +++ b/arch/ia64/sn/io/sgi_io_init.c Tue Jun 18 19:12:01 2002 @@ -255,7 +255,7 @@ cnodeid_t cnode; cpuid_t cpu; - for (cpu = 0; cpu < smp_num_cpus; cpu++) { + for (cpu = 0; cpu < NR_CPUS; cpu++) { /* Skip holes in CPU space */ if (cpu_enabled(cpu)) { init_platform_pda(cpu); diff -Nru a/arch/ia64/sn/io/sn1/ml_SN_intr.c b/arch/ia64/sn/io/sn1/ml_SN_intr.c --- a/arch/ia64/sn/io/sn1/ml_SN_intr.c Tue Jun 18 19:12:01 2002 +++ b/arch/ia64/sn/io/sn1/ml_SN_intr.c Tue Jun 18 19:12:01 2002 @@ -987,12 +987,13 @@ char subnode_done[NUM_SUBNODES]; // cpu = cnodetocpu(cnode); - for (cpu = 0; cpu < smp_num_cpus; cpu++) { + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu)) continue; if (cpuid_to_cnodeid(cpu) == cnode) { break; } } - if (cpu == smp_num_cpus) cpu = CPU_NONE; + if (cpu == NR_CPUS) cpu = CPU_NONE; if (cpu == CPU_NONE) { printk("Node %d has no CPUs", cnode); return; @@ -1001,7 +1002,7 @@ for (i=0; iend = end; params->nbits = nbits; params->rid = (unsigned int) ia64_get_rr(start); - atomic_set(¶ms->unfinished_count, smp_num_cpus); + atomic_set(¶ms->unfinished_count, num_online_cpus()); /* The atomic_set above can hit memory *after* the update * to ptcParamsEmpty below, which opens a timing window @@ -425,7 +425,7 @@ { if (!ia64_ptc_domain_info) { printk("SMP: Can't find PTC domain info. Forcing UP mode\n"); - smp_num_cpus = 1; + cpu_online_map = 1; return; } diff -Nru a/arch/ia64/tools/Makefile b/arch/ia64/tools/Makefile --- a/arch/ia64/tools/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/ia64/tools/Makefile Tue Jun 18 19:12:01 2002 @@ -9,7 +9,7 @@ clean: rm -f print_offsets.s print_offsets offsets.h -fastdep: offsets.h +$(TARGET): offsets.h @if ! cmp -s offsets.h ${TARGET}; then \ echo -e "*** Updating ${TARGET}..."; \ cp offsets.h ${TARGET}; \ diff -Nru a/arch/m68k/Makefile b/arch/m68k/Makefile --- a/arch/m68k/Makefile Tue Jun 18 19:12:03 2002 +++ b/arch/m68k/Makefile Tue Jun 18 19:12:03 2002 @@ -172,5 +172,3 @@ rm -f arch/m68k/kernel/m68k_defs.h arch/m68k/kernel/m68k_defs.d archmrproper: - -archdep: diff -Nru a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c --- a/arch/m68k/kernel/time.c Tue Jun 18 19:12:02 2002 +++ b/arch/m68k/kernel/time.c Tue Jun 18 19:12:02 2002 @@ -24,6 +24,7 @@ #include +u64 jiffies_64; static inline int set_rtc_mmss(unsigned long nowtime) { diff -Nru a/arch/mips/Makefile b/arch/mips/Makefile --- a/arch/mips/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/mips/Makefile Tue Jun 18 19:12:01 2002 @@ -308,9 +308,3 @@ archmrproper: @$(MAKEBOOT) mrproper $(MAKE) -C arch/$(ARCH)/tools mrproper - -archdep: - if [ ! -f $(TOPDIR)/include/asm-$(ARCH)/offset.h ]; then \ - touch $(TOPDIR)/include/asm-$(ARCH)/offset.h; \ - fi; - @$(MAKEBOOT) dep diff -Nru a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile --- a/arch/mips/boot/Makefile Tue Jun 18 19:12:03 2002 +++ b/arch/mips/boot/Makefile Tue Jun 18 19:12:03 2002 @@ -35,9 +35,6 @@ addinitrd: addinitrd.c $(HOSTCC) -o $@ $^ -# Don't build dependencies, this may die if $(CC) isn't gcc -dep: - clean: rm -f vmlinux.ecoff rm -f zImage zImage.tmp diff -Nru a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c --- a/arch/mips/kernel/time.c Tue Jun 18 19:12:02 2002 +++ b/arch/mips/kernel/time.c Tue Jun 18 19:12:02 2002 @@ -32,6 +32,8 @@ #define USECS_PER_JIFFY (1000000/HZ) #define USECS_PER_JIFFY_FRAC ((1000000ULL << 32) / HZ & 0xffffffff) +u64 jiffies_64; + /* * forward reference */ diff -Nru a/arch/mips64/Makefile b/arch/mips64/Makefile --- a/arch/mips64/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/mips64/Makefile Tue Jun 18 19:12:02 2002 @@ -176,9 +176,3 @@ archmrproper: @$(MAKEBOOT) mrproper $(MAKE) -C arch/$(ARCH)/tools mrproper - -archdep: - if [ ! -f $(TOPDIR)/include/asm-$(ARCH)/offset.h ]; then \ - touch $(TOPDIR)/include/asm-$(ARCH)/offset.h; \ - fi; - @$(MAKEBOOT) dep diff -Nru a/arch/mips64/boot/Makefile b/arch/mips64/boot/Makefile --- a/arch/mips64/boot/Makefile Tue Jun 18 19:12:03 2002 +++ b/arch/mips64/boot/Makefile Tue Jun 18 19:12:03 2002 @@ -26,9 +26,6 @@ addinitrd: addinitrd.c $(HOSTCC) -o $@ $^ -# Don't build dependencies, this may die if $(CC) isn't gcc -dep: - clean: rm -f vmlinux.ecoff diff -Nru a/arch/mips64/kernel/syscall.c b/arch/mips64/kernel/syscall.c --- a/arch/mips64/kernel/syscall.c Tue Jun 18 19:12:02 2002 +++ b/arch/mips64/kernel/syscall.c Tue Jun 18 19:12:02 2002 @@ -32,6 +32,8 @@ #include #include +u64 jiffies_64; + extern asmlinkage void syscall_trace(void); asmlinkage int sys_pipe(abi64_no_regargs, struct pt_regs regs) diff -Nru a/arch/parisc/Makefile b/arch/parisc/Makefile --- a/arch/parisc/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/parisc/Makefile Tue Jun 18 19:12:02 2002 @@ -78,5 +78,3 @@ archclean: archmrproper: - -archdep: diff -Nru a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c --- a/arch/parisc/kernel/time.c Tue Jun 18 19:12:02 2002 +++ b/arch/parisc/kernel/time.c Tue Jun 18 19:12:02 2002 @@ -30,6 +30,8 @@ #include +u64 jiffies_64; + extern rwlock_t xtime_lock; static int timer_value; diff -Nru a/arch/ppc/Makefile b/arch/ppc/Makefile --- a/arch/ppc/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/ppc/Makefile Tue Jun 18 19:12:01 2002 @@ -116,6 +116,3 @@ @$(MAKEBOOT) clean archmrproper: - -archdep: scripts/mkdep - $(MAKEBOOT) fastdep diff -Nru a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c --- a/arch/ppc/kernel/irq.c Tue Jun 18 19:12:02 2002 +++ b/arch/ppc/kernel/irq.c Tue Jun 18 19:12:02 2002 @@ -370,8 +370,9 @@ struct irqaction * action; seq_puts(p, " "); - for (j=0; j>= 1) - mask |= (cpumask & 1) << smp_hw_index[i]; + for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1) + if (cpu_online(i)) + mask |= (cpumask & 1) << smp_hw_index[i]; return mask; } #else diff -Nru a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c --- a/arch/ppc/kernel/ppc_ksyms.c Tue Jun 18 19:12:03 2002 +++ b/arch/ppc/kernel/ppc_ksyms.c Tue Jun 18 19:12:03 2002 @@ -228,7 +228,6 @@ #endif EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_hw_index); -EXPORT_SYMBOL(smp_num_cpus); EXPORT_SYMBOL(synchronize_irq); #endif diff -Nru a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c --- a/arch/ppc/kernel/setup.c Tue Jun 18 19:12:03 2002 +++ b/arch/ppc/kernel/setup.c Tue Jun 18 19:12:03 2002 @@ -147,8 +147,8 @@ /* Show summary information */ #ifdef CONFIG_SMP unsigned long bogosum = 0; - for (i = 0; i < smp_num_cpus; ++i) - if (cpu_online_map & (1 << i)) + for (i = 0; i < NR_CPUS; ++i) + if (cpu_online(i)) bogosum += cpu_data[i].loops_per_jiffy; seq_printf(m, "total bogomips\t: %lu.%02lu\n", bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); diff -Nru a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c --- a/arch/ppc/kernel/smp.c Tue Jun 18 19:12:03 2002 +++ b/arch/ppc/kernel/smp.c Tue Jun 18 19:12:03 2002 @@ -42,7 +42,6 @@ int smp_threads_ready; volatile int smp_commenced; -int smp_num_cpus = 1; int smp_tb_synchronized; struct cpuinfo_PPC cpu_data[NR_CPUS]; struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 }; @@ -68,7 +67,6 @@ int start_secondary(void *); extern int cpu_idle(void *unused); void smp_call_function_interrupt(void); -void smp_message_pass(int target, int msg, unsigned long data, int wait); static int __smp_call_function(void (*func) (void *info), void *info, int wait, int target); @@ -174,7 +172,6 @@ void smp_send_stop(void) { smp_call_function(stop_this_cpu, NULL, 1, 0); - smp_num_cpus = 1; } /* @@ -212,7 +209,9 @@ * hardware interrupt handler or from a bottom half handler. */ { - if (smp_num_cpus <= 1) + /* FIXME: get cpu lock with hotplug cpus, or change this to + bitmask. --RR */ + if (num_online_cpus() <= 1) return 0; return __smp_call_function(func, info, wait, MSG_ALL_BUT_SELF); } @@ -226,9 +225,9 @@ int ncpus = 1; if (target == MSG_ALL_BUT_SELF) - ncpus = smp_num_cpus - 1; + ncpus = num_online_cpus() - 1; else if (target == MSG_ALL) - ncpus = smp_num_cpus; + ncpus = num_online_cpus(); data.func = func; data.info = info; @@ -298,7 +297,6 @@ struct task_struct *p; printk("Entering SMP Mode...\n"); - smp_num_cpus = 1; smp_store_cpu_info(0); cpu_online_map = 1UL; @@ -375,7 +373,6 @@ sprintf(buf, "found cpu %d", i); if (ppc_md.progress) ppc_md.progress(buf, 0x350+i); printk("Processor %d found.\n", i); - smp_num_cpus++; } else { char buf[32]; sprintf(buf, "didn't find cpu %d", i); @@ -387,7 +384,8 @@ /* Setup CPU 0 last (important) */ smp_ops->setup_cpu(0); - if (smp_num_cpus < 2) + /* FIXME: Not with hotplug CPUS --RR */ + if (num_online_cpus() < 2) smp_tb_synchronized = 1; } @@ -413,7 +411,7 @@ for (pass = 2; pass < 2+PASSES; pass++){ if (cpu == 0){ mb(); - for (i = j = 1; i < smp_num_cpus; i++, j++){ + for (i = j = 1; i < NR_CPUS; i++, j++){ /* skip stuck cpus */ while (!cpu_callin_map[j]) ++j; @@ -487,7 +485,8 @@ * * NOTE2: this code doesn't seem to work on > 2 cpus. -- paulus/BenH */ - if (!smp_tb_synchronized && smp_num_cpus == 2) { + /* FIXME: This doesn't work with hotplug CPUs --RR */ + if (!smp_tb_synchronized && num_online_cpus() == 2) { unsigned long flags; __save_and_cli(flags); smp_software_tb_sync(0); @@ -501,24 +500,18 @@ smp_store_cpu_info(cpu); set_dec(tb_ticks_per_jiffy); + /* Set online before we acknowledge. */ + set_bit(cpu, &cpu_online_map); + wmb(); cpu_callin_map[cpu] = 1; smp_ops->setup_cpu(cpu); - /* - * This cpu is now "online". Only set them online - * before they enter the loop below since write access - * to the below variable is _not_ guaranteed to be - * atomic. - * -- Cort - */ - cpu_online_map |= 1UL << smp_processor_id(); - while (!smp_commenced) barrier(); /* see smp_commence for more info */ - if (!smp_tb_synchronized && smp_num_cpus == 2) { + if (!smp_tb_synchronized && num_online_cpus() == 2) { smp_software_tb_sync(cpu); } __sti(); diff -Nru a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c --- a/arch/ppc/kernel/time.c Tue Jun 18 19:12:02 2002 +++ b/arch/ppc/kernel/time.c Tue Jun 18 19:12:02 2002 @@ -70,6 +70,9 @@ #include +/* XXX false sharing with below? */ +u64 jiffies_64; + unsigned long disarm_decr[NR_CPUS]; extern int do_sys_settimeofday(struct timeval *tv, struct timezone *tz); diff -Nru a/arch/ppc/platforms/chrp_smp.c b/arch/ppc/platforms/chrp_smp.c --- a/arch/ppc/platforms/chrp_smp.c Tue Jun 18 19:12:02 2002 +++ b/arch/ppc/platforms/chrp_smp.c Tue Jun 18 19:12:02 2002 @@ -63,9 +63,10 @@ static atomic_t ready = ATOMIC_INIT(1); static volatile int frozen = 0; + /* FIXME: Hotplug cpu breaks all this --RR */ if (cpu_nr == 0) { /* wait for all the others */ - while (atomic_read(&ready) < smp_num_cpus) + while (atomic_read(&ready) < num_online_cpus()) barrier(); atomic_set(&ready, 1); /* freeze the timebase */ @@ -75,7 +76,7 @@ /* XXX assumes this is not a 601 */ set_tb(0, 0); last_jiffy_stamp(0) = 0; - while (atomic_read(&ready) < smp_num_cpus) + while (atomic_read(&ready) < num_online_cpus()) barrier(); /* thaw the timebase again */ call_rtas("thaw-time-base", 0, 1, NULL); diff -Nru a/arch/ppc/platforms/iSeries_smp.c b/arch/ppc/platforms/iSeries_smp.c --- a/arch/ppc/platforms/iSeries_smp.c Tue Jun 18 19:12:01 2002 +++ b/arch/ppc/platforms/iSeries_smp.c Tue Jun 18 19:12:01 2002 @@ -50,7 +50,7 @@ int cpu = smp_processor_id(); int msg; - if ( smp_num_cpus < 2 ) + if ( num_online_cpus() < 2 ) return; for ( msg = 0; msg < 4; ++msg ) @@ -62,7 +62,10 @@ static void smp_iSeries_message_pass(int target, int msg, unsigned long data, int wait) { int i; - for (i = 0; i < smp_num_cpus; ++i) { + for (i = 0; i < NR_CPUS; ++i) { + if (!cpu_online(i)) + continue; + if ( (target == MSG_ALL) || (target == i) || ((target == MSG_ALL_BUT_SELF) && (i != smp_processor_id())) ) { diff -Nru a/arch/ppc/platforms/pmac_smp.c b/arch/ppc/platforms/pmac_smp.c --- a/arch/ppc/platforms/pmac_smp.c Tue Jun 18 19:12:02 2002 +++ b/arch/ppc/platforms/pmac_smp.c Tue Jun 18 19:12:02 2002 @@ -282,7 +282,7 @@ /* clear interrupt */ psurge_clr_ipi(cpu); - if (smp_num_cpus < 2) + if (num_online_cpus() < 2) return; /* make sure there is a message there */ @@ -302,10 +302,12 @@ { int i; - if (smp_num_cpus < 2) + if (num_online_cpus() < 2) return; - for (i = 0; i < smp_num_cpus; i++) { + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_online(i)) + continue; if (target == MSG_ALL || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) || target == i) { @@ -497,7 +499,7 @@ { if (cpu_nr == 0) { - if (smp_num_cpus < 2) + if (num_online_cpus() < 2) return; /* reset the entry point so if we get another intr we won't * try to startup again */ diff -Nru a/arch/ppc64/Makefile b/arch/ppc64/Makefile --- a/arch/ppc64/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/ppc64/Makefile Tue Jun 18 19:12:01 2002 @@ -66,5 +66,5 @@ archmrproper: -archdep: - $(MAKEBOOT) fastdep +prepare: + $(MAKEBOOT) dep diff -Nru a/arch/ppc64/boot/Makefile b/arch/ppc64/boot/Makefile --- a/arch/ppc64/boot/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/ppc64/boot/Makefile Tue Jun 18 19:12:02 2002 @@ -121,9 +121,6 @@ clean: rm -f piggyback note addnote $(OBJS) zImage zImage.initrd vmlinux.gz no_initrd.o imagesize.c addSystemMap vmlinux.sm addRamDisk vmlinux.initrd vmlinux.sminitrd -fastdep: - $(TOPDIR)/scripts/mkdep *.[Sch] > .depend - dep: $(CPP) $(CPPFLAGS) -M *.S *.c > .depend diff -Nru a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c --- a/arch/ppc64/kernel/sys_ppc32.c Tue Jun 18 19:12:02 2002 +++ b/arch/ppc64/kernel/sys_ppc32.c Tue Jun 18 19:12:02 2002 @@ -82,47 +82,6 @@ */ #define MSR_USERCHANGE (MSR_FE0 | MSR_FE1) -/* In order to reduce some races, while at the same time doing additional - * checking and hopefully speeding things up, we copy filenames to the - * kernel data space before using them.. - * - * POSIX.1 2.4: an empty pathname is invalid (ENOENT). - */ -static inline int do_getname32(const char *filename, char *page) -{ - int retval; - - /* 32bit pointer will be always far below TASK_SIZE :)) */ - retval = strncpy_from_user((char *)page, (char *)filename, PAGE_SIZE); - if (retval > 0) { - if (retval < PAGE_SIZE) - return 0; - return -ENAMETOOLONG; - } else if (!retval) - retval = -ENOENT; - return retval; -} - -char * getname32(const char *filename) -{ - char *tmp, *result; - - result = ERR_PTR(-ENOMEM); - tmp = __getname(); - if (tmp) { - int retval = do_getname32(filename, tmp); - - result = tmp; - if (retval < 0) { - putname(tmp); - result = ERR_PTR(retval); - } - } - return result; -} - - - extern asmlinkage long sys_utime(char * filename, struct utimbuf * times); struct utimbuf32 { @@ -142,7 +101,7 @@ return sys_utime(filename, NULL); if (get_user(t.actime, ×->actime) || __get_user(t.modtime, ×->modtime)) return -EFAULT; - filenam = getname32(filename); + filenam = getname(filename); ret = PTR_ERR(filenam); if (!IS_ERR(filenam)) { @@ -937,7 +896,7 @@ PPCDBG(PPCDBG_SYS32X, "sys32_statfs - entered - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm); - pth = getname32 (path); + pth = getname (path); ret = PTR_ERR(pth); if (!IS_ERR(pth)) { set_fs (KERNEL_DS); diff -Nru a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c --- a/arch/ppc64/kernel/time.c Tue Jun 18 19:12:03 2002 +++ b/arch/ppc64/kernel/time.c Tue Jun 18 19:12:03 2002 @@ -64,6 +64,8 @@ void smp_local_timer_interrupt(struct pt_regs *); +u64 jiffies_64; + /* keep track of when we need to update the rtc */ time_t last_rtc_update; extern rwlock_t xtime_lock; diff -Nru a/arch/s390/Makefile b/arch/s390/Makefile --- a/arch/s390/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/s390/Makefile Tue Jun 18 19:12:01 2002 @@ -58,8 +58,5 @@ archmrproper: -archdep: - @$(MAKEBOOT) dep - install: vmlinux @$(MAKEBOOT) BOOTIMAGE=image install diff -Nru a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile --- a/arch/s390/boot/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/s390/boot/Makefile Tue Jun 18 19:12:02 2002 @@ -22,8 +22,6 @@ listing: ../../../vmlinux $(OBJDUMP) --disassemble --disassemble-all --disassemble-zeroes --reloc $(TOPDIR)/vmlinux > listing -dep: - clean: rm -f image listing iplfba.boot ipleckd.boot ipldump.boot diff -Nru a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c --- a/arch/s390/kernel/time.c Tue Jun 18 19:12:01 2002 +++ b/arch/s390/kernel/time.c Tue Jun 18 19:12:01 2002 @@ -39,6 +39,8 @@ #define TICK_SIZE tick +u64 jiffies_64; + static ext_int_info_t ext_int_info_timer; static uint64_t init_timer_cc; diff -Nru a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile --- a/arch/s390/math-emu/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/s390/math-emu/Makefile Tue Jun 18 19:12:02 2002 @@ -6,6 +6,7 @@ obj-$(CONFIG_MATHEMU) := math.o qrnnd.o EXTRA_CFLAGS = -I. -I$(TOPDIR)/include/math-emu -w +EXTRA_AFLAGS := -traditional include $(TOPDIR)/Rules.make diff -Nru a/arch/s390/mm/ioremap.c b/arch/s390/mm/ioremap.c --- a/arch/s390/mm/ioremap.c Tue Jun 18 19:12:01 2002 +++ b/arch/s390/mm/ioremap.c Tue Jun 18 19:12:01 2002 @@ -14,6 +14,7 @@ */ #include +#include #include #include #include diff -Nru a/arch/s390x/Makefile b/arch/s390x/Makefile --- a/arch/s390x/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/s390x/Makefile Tue Jun 18 19:12:02 2002 @@ -56,6 +56,3 @@ $(MAKE) -C arch/$(ARCH)/kernel clean archmrproper: - -archdep: - @$(MAKEBOOT) dep diff -Nru a/arch/s390x/boot/Makefile b/arch/s390x/boot/Makefile --- a/arch/s390x/boot/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/s390x/boot/Makefile Tue Jun 18 19:12:02 2002 @@ -24,8 +24,6 @@ listing: ../../../vmlinux $(OBJDUMP) --disassemble --disassemble-all --disassemble-zeroes --reloc $(TOPDIR)/vmlinux > listing -dep: - clean: rm -f image listing iplfba.boot ipleckd.boot ipldump.boot diff -Nru a/arch/s390x/kernel/time.c b/arch/s390x/kernel/time.c --- a/arch/s390x/kernel/time.c Tue Jun 18 19:12:02 2002 +++ b/arch/s390x/kernel/time.c Tue Jun 18 19:12:02 2002 @@ -39,6 +39,8 @@ #define TICK_SIZE tick +u64 jiffies_64; + static ext_int_info_t ext_int_info_timer; static uint64_t init_timer_cc; diff -Nru a/arch/s390x/kernel/wrapper32.S b/arch/s390x/kernel/wrapper32.S --- a/arch/s390x/kernel/wrapper32.S Tue Jun 18 19:12:02 2002 +++ b/arch/s390x/kernel/wrapper32.S Tue Jun 18 19:12:02 2002 @@ -1112,6 +1112,8 @@ sys32_futex_wrapper: llgtr %r2,%r2 # void * lgfr %r3,%r3 # int + lgfr %r4,%r4 # int + llgtr %r5,%r5 # struct timespec * jg sys_futex # branch to system call .globl sys32_setxattr_wrapper diff -Nru a/arch/s390x/mm/ioremap.c b/arch/s390x/mm/ioremap.c --- a/arch/s390x/mm/ioremap.c Tue Jun 18 19:12:02 2002 +++ b/arch/s390x/mm/ioremap.c Tue Jun 18 19:12:02 2002 @@ -14,6 +14,7 @@ */ #include +#include #include #include #include diff -Nru a/arch/sh/Makefile b/arch/sh/Makefile --- a/arch/sh/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/sh/Makefile Tue Jun 18 19:12:02 2002 @@ -94,6 +94,3 @@ archmrproper: rm -f arch/sh/vmlinux.lds - -archdep: - @$(MAKEBOOT) dep diff -Nru a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile --- a/arch/sh/boot/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/sh/boot/Makefile Tue Jun 18 19:12:01 2002 @@ -25,8 +25,6 @@ zinstall: zImage sh -x ./install.sh $(KERNELRELEASE) zImage $(TOPDIR)/System.map "$(INSTALL_PATH)" -dep: - clean: rm -f tools/build rm -f setup bootsect zImage compressed/vmlinux.out diff -Nru a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c --- a/arch/sh/kernel/time.c Tue Jun 18 19:12:01 2002 +++ b/arch/sh/kernel/time.c Tue Jun 18 19:12:01 2002 @@ -70,6 +70,8 @@ #endif /* CONFIG_CPU_SUBTYPE_ST40STB1 */ #endif /* __sh3__ or __SH4__ */ +u64 jiffies_64; + extern rwlock_t xtime_lock; extern unsigned long wall_jiffies; #define TICK_SIZE tick diff -Nru a/arch/sparc/Makefile b/arch/sparc/Makefile --- a/arch/sparc/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/sparc/Makefile Tue Jun 18 19:12:01 2002 @@ -58,9 +58,9 @@ archmrproper: rm -f $(TOPDIR)/include/asm-sparc/asm_offsets.h -archdep: check_asm +prepare: check_asm -check_asm: include/linux/version.h +check_asm: include/linux/version.h include/linux/asm include/config/MARKER $(MAKE) -C arch/sparc/kernel check_asm tftpboot.img: diff -Nru a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c --- a/arch/sparc/kernel/time.c Tue Jun 18 19:12:02 2002 +++ b/arch/sparc/kernel/time.c Tue Jun 18 19:12:02 2002 @@ -43,6 +43,8 @@ extern rwlock_t xtime_lock; +u64 jiffies_64; + enum sparc_clock_type sp_clock_typ; spinlock_t mostek_lock = SPIN_LOCK_UNLOCKED; unsigned long mstk48t02_regs = 0UL; diff -Nru a/arch/sparc64/Makefile b/arch/sparc64/Makefile --- a/arch/sparc64/Makefile Tue Jun 18 19:12:03 2002 +++ b/arch/sparc64/Makefile Tue Jun 18 19:12:03 2002 @@ -85,7 +85,5 @@ archmrproper: -archdep: - tftpboot.img: $(MAKE) -C arch/sparc64/boot tftpboot.img diff -Nru a/arch/sparc64/boot/Makefile b/arch/sparc64/boot/Makefile --- a/arch/sparc64/boot/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/sparc64/boot/Makefile Tue Jun 18 19:12:01 2002 @@ -18,6 +18,3 @@ piggyback: piggyback.c $(HOSTCC) $(HOSTCFLAGS) -o piggyback piggyback.c - -dep: - diff -Nru a/arch/sparc64/defconfig b/arch/sparc64/defconfig --- a/arch/sparc64/defconfig Tue Jun 18 19:12:02 2002 +++ b/arch/sparc64/defconfig Tue Jun 18 19:12:02 2002 @@ -19,7 +19,7 @@ # Loadable module support # CONFIG_MODULES=y -CONFIG_MODVERSIONS=y +# CONFIG_MODVERSIONS is not set CONFIG_KMOD=y # @@ -96,12 +96,10 @@ # CONFIG_FB=y CONFIG_DUMMY_CONSOLE=y -# CONFIG_FB_RIVA is not set # CONFIG_FB_CLGEN is not set -CONFIG_FB_PM2=y -# CONFIG_FB_PM2_FIFO_DISCONNECT is not set -CONFIG_FB_PM2_PCI=y +# CONFIG_FB_PM2 is not set # CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_RIVA is not set # CONFIG_FB_MATROX is not set CONFIG_FB_ATY=y # CONFIG_FB_ATY_GX is not set @@ -112,6 +110,8 @@ # CONFIG_FB_NEOMAGIC is not set # CONFIG_FB_3DFX is not set # CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_PM3 is not set CONFIG_FB_SBUS=y CONFIG_FB_CREATOR=y CONFIG_FB_CGSIX=y @@ -215,7 +215,8 @@ # CONFIG_BRIDGE is not set # CONFIG_X25 is not set # CONFIG_LAPB is not set -# CONFIG_LLC is not set +CONFIG_LLC=m +CONFIG_LLC_UI=y # CONFIG_NET_DIVERT is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set @@ -292,7 +293,7 @@ # CONFIG_BLK_DEV_IDE_TCQ_DEFAULT is not set # CONFIG_IDEDMA_NEW_DRIVE_LISTINGS is not set # CONFIG_BLK_DEV_AEC62XX is not set -# CONFIG_AEC62XX_TUNING is not set +# CONFIG_AEC6280_BURST is not set CONFIG_BLK_DEV_ALI15X3=y # CONFIG_WDC_ALI15X3 is not set # CONFIG_BLK_DEV_AMD74XX is not set @@ -315,6 +316,7 @@ # CONFIG_BLK_DEV_SL82C105 is not set # CONFIG_IDE_CHIPSETS is not set # CONFIG_IDEDMA_IVB is not set +CONFIG_ATAPI=y CONFIG_IDEDMA_AUTO=y # CONFIG_BLK_DEV_ATARAID is not set # CONFIG_BLK_DEV_ATARAID_PDC is not set @@ -539,7 +541,7 @@ # CONFIG_WAN is not set # -# "Tulip" family network device support +# Tulip family network device support # CONFIG_NET_TULIP=y CONFIG_DE2104X=m @@ -617,7 +619,6 @@ # CONFIG_QUOTA is not set # CONFIG_QFMT_V1 is not set # CONFIG_QFMT_V2 is not set -# CONFIG_QIFACE_COMPAT is not set CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m # CONFIG_REISERFS_FS is not set @@ -781,14 +782,13 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_UHCI_HCD=m # CONFIG_USB_UHCI_HCD_ALT is not set -CONFIG_USB_UHCI=y -# CONFIG_USB_UHCI_ALT is not set # # USB Device Class drivers # # CONFIG_USB_AUDIO is not set CONFIG_USB_BLUETOOTH_TTY=m +# CONFIG_USB_MIDI is not set CONFIG_USB_ACM=m CONFIG_USB_PRINTER=m CONFIG_USB_STORAGE=m @@ -808,6 +808,7 @@ CONFIG_USB_HID=y CONFIG_USB_HIDINPUT=y CONFIG_USB_HIDDEV=y +# CONFIG_USB_AIPTEK is not set CONFIG_USB_WACOM=m # @@ -821,15 +822,15 @@ # # USB Multimedia devices # -CONFIG_USB_DABUSB=m -CONFIG_USB_VICAM=m -CONFIG_USB_DSBR=m -CONFIG_USB_IBMCAM=m -CONFIG_USB_KONICAWC=m -CONFIG_USB_OV511=m -CONFIG_USB_PWC=m -CONFIG_USB_SE401=m -CONFIG_USB_STV680=m +# CONFIG_USB_DABUSB is not set +# CONFIG_USB_VICAM is not set +# CONFIG_USB_DSBR is not set +# CONFIG_USB_IBMCAM is not set +# CONFIG_USB_KONICAWC is not set +# CONFIG_USB_OV511 is not set +# CONFIG_USB_PWC is not set +# CONFIG_USB_SE401 is not set +# CONFIG_USB_STV680 is not set # # USB Network adaptors diff -Nru a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S --- a/arch/sparc64/kernel/entry.S Tue Jun 18 19:12:01 2002 +++ b/arch/sparc64/kernel/entry.S Tue Jun 18 19:12:01 2002 @@ -1436,10 +1436,11 @@ * %o7 for us. Check performance counter stuff too. */ andn %o7, _TIF_NEWCHILD, %l0 + stx %l0, [%g6 + TI_FLAGS] #if CONFIG_SMP || CONFIG_PREEMPT call schedule_tail + mov %g5, %o0 #endif - stx %l0, [%g6 + TI_FLAGS] andcc %l0, _TIF_PERFCTR, %g0 be,pt %icc, 1f nop diff -Nru a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c --- a/arch/sparc64/kernel/power.c Tue Jun 18 19:12:03 2002 +++ b/arch/sparc64/kernel/power.c Tue Jun 18 19:12:03 2002 @@ -34,20 +34,28 @@ #endif /* CONFIG_PCI */ extern void machine_halt(void); +extern void machine_alt_power_off(void); +static void (*poweroff_method)(void) = machine_alt_power_off; extern int serial_console; void machine_power_off(void) { + if (!serial_console) { #ifdef CONFIG_PCI - if (power_reg != 0UL && !serial_console) { - /* Both register bits seem to have the - * same effect, so until I figure out - * what the difference is... - */ - writel(POWER_COURTESY_OFF | POWER_SYSTEM_OFF, power_reg); - } + if (power_reg != 0UL) { + /* Both register bits seem to have the + * same effect, so until I figure out + * what the difference is... + */ + writel(POWER_COURTESY_OFF | POWER_SYSTEM_OFF, power_reg); + } else #endif /* CONFIG_PCI */ + if (poweroff_method != NULL) { + poweroff_method(); + /* not reached */ + } + } machine_halt(); } @@ -98,6 +106,7 @@ found: power_reg = (unsigned long)ioremap(edev->resource[0].start, 0x4); printk("power: Control reg at %016lx ... ", power_reg); + poweroff_method = machine_halt; /* able to use the standard halt */ if (edev->irqs[0] != PCI_IRQ_NONE) { if (kernel_thread(powerd, 0, CLONE_FS) < 0) { printk("Failed to start power daemon.\n"); diff -Nru a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c --- a/arch/sparc64/kernel/process.c Tue Jun 18 19:12:02 2002 +++ b/arch/sparc64/kernel/process.c Tue Jun 18 19:12:02 2002 @@ -142,6 +142,21 @@ panic("Halt failed!"); } +void machine_alt_power_off(void) +{ + sti(); + mdelay(8); + cli(); +#ifdef CONFIG_SUN_CONSOLE + if (!serial_console && prom_palette) + prom_palette(1); +#endif + if (prom_keyboard) + prom_keyboard(); + prom_halt_power_off(); + panic("Power-off failed!"); +} + void machine_restart(char * cmd) { char *p; diff -Nru a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c --- a/arch/sparc64/kernel/time.c Tue Jun 18 19:12:02 2002 +++ b/arch/sparc64/kernel/time.c Tue Jun 18 19:12:02 2002 @@ -44,6 +44,8 @@ unsigned long ds1287_regs = 0UL; #endif +u64 jiffies_64; + static unsigned long mstk48t08_regs = 0UL; static unsigned long mstk48t59_regs = 0UL; diff -Nru a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c --- a/arch/sparc64/prom/misc.c Tue Jun 18 19:12:03 2002 +++ b/arch/sparc64/prom/misc.c Tue Jun 18 19:12:03 2002 @@ -97,6 +97,19 @@ goto again; /* PROM is out to get me -DaveM */ } +void +prom_halt_power_off(void) +{ +#ifdef CONFIG_SMP + smp_promstop_others(); + udelay(8000); +#endif + p1275_cmd ("SUNW,power-off", P1275_INOUT(0,0)); + + /* if nothing else helps, we just halt */ + prom_halt (); +} + /* Set prom sync handler to call function 'funcp'. */ void prom_setcallback(callback_func_t funcp) diff -Nru a/arch/x86_64/Makefile b/arch/x86_64/Makefile --- a/arch/x86_64/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/x86_64/Makefile Tue Jun 18 19:12:02 2002 @@ -43,18 +43,12 @@ CFLAGS += -pipe # this makes reading assembly source easier CFLAGS += -fno-reorder-blocks -# needed for later gcc 3.1 CFLAGS += -finline-limit=2000 -# needed for earlier gcc 3.1 -#CFLAGS += -fno-strength-reduce #CFLAGS += -g -# prevent gcc from keeping the stack 16 byte aligned (FIXME) -#CFLAGS += -mpreferred-stack-boundary=2 - HEAD := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o -SUBDIRS := arch/x86_64/tools $(SUBDIRS) arch/x86_64/kernel arch/x86_64/mm arch/x86_64/lib +SUBDIRS += arch/x86_64/kernel arch/x86_64/mm arch/x86_64/lib CORE_FILES := arch/x86_64/kernel/kernel.o $(CORE_FILES) CORE_FILES += arch/x86_64/mm/mm.o LIBS := $(TOPDIR)/arch/x86_64/lib/lib.a $(LIBS) @@ -76,38 +70,43 @@ vmlinux: arch/x86_64/vmlinux.lds .PHONY: zImage bzImage compressed zlilo bzlilo zdisk bzdisk install \ - clean archclean archmrproper archdep checkoffset - -checkoffset: FORCE include/asm - make -C arch/$(ARCH)/tools $(TOPDIR)/include/asm-x86_64/offset.h + clean archclean archmrproper -bzImage: checkoffset vmlinux +bzImage: vmlinux @$(MAKEBOOT) bzImage -bzImage-padded: checkoffset vmlinux +bzImage-padded: vmlinux @$(MAKEBOOT) bzImage-padded tmp: @$(MAKEBOOT) BOOTIMAGE=bzImage zlilo -bzlilo: checkoffset vmlinux +bzlilo: vmlinux @$(MAKEBOOT) BOOTIMAGE=bzImage zlilo -bzdisk: checkoffset vmlinux +bzdisk: vmlinux @$(MAKEBOOT) BOOTIMAGE=bzImage zdisk -install: checkoffset vmlinux +install: vmlinux @$(MAKEBOOT) BOOTIMAGE=bzImage install archclean: @$(MAKEBOOT) clean - @$(MAKE) -C $(TOPDIR)/arch/x86_64/tools clean archmrproper: - rm -f $(TOPDIR)/arch/x86_64/tools/offset.h - rm -f $(TOPDIR)/arch/x86_64/tools/offset.tmp - rm -f $(TOPDIR)/include/asm-x86_64/offset.h - -archdep: - @$(MAKE) -C $(TOPDIR)/arch/x86_64/tools all - @$(MAKEBOOT) dep + + +prepare: include/asm-$(ARCH)/offset.h + +arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \ + include/config/MARKER + +include/asm-$(ARCH)/offset.h.tmp: arch/$(ARCH)/kernel/asm-offsets.s + @$(generate-asm-offsets.h) < $< > $@ + +include/asm-$(ARCH)/offset.h: include/asm-$(ARCH)/offset.h.tmp + @echo -n ' Generating $@' + @$(update-if-changed) + +CLEAN_FILES += include/asm-$(ARCH)/offset.h.tmp \ + include/asm-$(ARCH)/offset.h \ No newline at end of file diff -Nru a/arch/x86_64/boot/Makefile b/arch/x86_64/boot/Makefile --- a/arch/x86_64/boot/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/x86_64/boot/Makefile Tue Jun 18 19:12:02 2002 @@ -21,10 +21,6 @@ SVGA_MODE := -DSVGA_MODE=NORMAL_VGA -# If you want the RAM disk device, define this to be the size in blocks. - -RAMDISK := -DRAMDISK=512 - # --------------------------------------------------------------------------- BOOT_INCL = $(TOPDIR)/include/linux/config.h \ @@ -100,12 +96,8 @@ bsetup.s: setup.S video.S Makefile $(BOOT_INCL) $(TOPDIR)/include/linux/version.h $(TOPDIR)/include/linux/compile.h $(IA32_CPP) $(CPPFLAGS) -D__BIG_KERNEL__ -D__ASSEMBLY__ -traditional $(SVGA_MODE) $(RAMDISK) $< -o $@ -dep: - clean: rm -f tools/build rm -f setup bootsect zImage compressed/vmlinux.out rm -f bsetup bbootsect bzImage compressed/bvmlinux.out @$(MAKE) -C compressed clean - - diff -Nru a/arch/x86_64/config.in b/arch/x86_64/config.in --- a/arch/x86_64/config.in Tue Jun 18 19:12:02 2002 +++ b/arch/x86_64/config.in Tue Jun 18 19:12:02 2002 @@ -47,8 +47,7 @@ define_bool CONFIG_X86_IO_APIC y define_bool CONFIG_X86_LOCAL_APIC y -#currently broken: -#bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR +bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR bool 'Symmetric multi-processing support' CONFIG_SMP if [ "$CONFIG_SMP" = "n" ]; then bool 'Preemptible Kernel' CONFIG_PREEMPT @@ -226,6 +225,7 @@ bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK bool ' Additional run-time checks' CONFIG_CHECKING bool ' Debug __init statements' CONFIG_INIT_DEBUG + bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK fi endmenu diff -Nru a/arch/x86_64/ia32/Makefile b/arch/x86_64/ia32/Makefile --- a/arch/x86_64/ia32/Makefile Tue Jun 18 19:12:02 2002 +++ b/arch/x86_64/ia32/Makefile Tue Jun 18 19:12:02 2002 @@ -9,8 +9,9 @@ all: ia32.o O_TARGET := ia32.o -obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o \ - ia32_binfmt.o fpu32.o socket32.o ptrace32.o +obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_ioctl.o \ + ia32_signal.o \ + ia32_binfmt.o fpu32.o socket32.o ptrace32.o ipc32.o clean:: diff -Nru a/arch/x86_64/ia32/ipc32.c b/arch/x86_64/ia32/ipc32.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/arch/x86_64/ia32/ipc32.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,645 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation.. + * + * This is really horribly ugly. + */ + +struct msgbuf32 { + s32 mtype; + char mtext[1]; +}; + +struct ipc_perm32 { + int key; + __kernel_uid_t32 uid; + __kernel_gid_t32 gid; + __kernel_uid_t32 cuid; + __kernel_gid_t32 cgid; + unsigned short mode; + unsigned short seq; +}; + +struct ipc64_perm32 { + unsigned key; + __kernel_uid32_t32 uid; + __kernel_gid32_t32 gid; + __kernel_uid32_t32 cuid; + __kernel_gid32_t32 cgid; + unsigned short mode; + unsigned short __pad1; + unsigned short seq; + unsigned short __pad2; + unsigned int unused1; + unsigned int unused2; +}; + +struct semid_ds32 { + struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t32 sem_otime; /* last semop time */ + __kernel_time_t32 sem_ctime; /* last change time */ + u32 sem_base; /* ptr to first semaphore in array */ + u32 sem_pending; /* pending operations to be processed */ + u32 sem_pending_last; /* last pending operation */ + u32 undo; /* undo requests on this array */ + unsigned short sem_nsems; /* no. of semaphores in array */ +}; + +struct semid64_ds32 { + struct ipc64_perm32 sem_perm; + __kernel_time_t32 sem_otime; + unsigned int __unused1; + __kernel_time_t32 sem_ctime; + unsigned int __unused2; + unsigned int sem_nsems; + unsigned int __unused3; + unsigned int __unused4; +}; + +struct msqid_ds32 { + struct ipc_perm32 msg_perm; + u32 msg_first; + u32 msg_last; + __kernel_time_t32 msg_stime; + __kernel_time_t32 msg_rtime; + __kernel_time_t32 msg_ctime; + u32 wwait; + u32 rwait; + unsigned short msg_cbytes; + unsigned short msg_qnum; + unsigned short msg_qbytes; + __kernel_ipc_pid_t32 msg_lspid; + __kernel_ipc_pid_t32 msg_lrpid; +}; + +struct msqid64_ds32 { + struct ipc64_perm32 msg_perm; + __kernel_time_t32 msg_stime; + unsigned int __unused1; + __kernel_time_t32 msg_rtime; + unsigned int __unused2; + __kernel_time_t32 msg_ctime; + unsigned int __unused3; + unsigned int msg_cbytes; + unsigned int msg_qnum; + unsigned int msg_qbytes; + __kernel_pid_t32 msg_lspid; + __kernel_pid_t32 msg_lrpid; + unsigned int __unused4; + unsigned int __unused5; +}; + +struct shmid_ds32 { + struct ipc_perm32 shm_perm; + int shm_segsz; + __kernel_time_t32 shm_atime; + __kernel_time_t32 shm_dtime; + __kernel_time_t32 shm_ctime; + __kernel_ipc_pid_t32 shm_cpid; + __kernel_ipc_pid_t32 shm_lpid; + unsigned short shm_nattch; +}; + +struct shmid64_ds32 { + struct ipc64_perm32 shm_perm; + __kernel_size_t32 shm_segsz; + __kernel_time_t32 shm_atime; + unsigned int __unused1; + __kernel_time_t32 shm_dtime; + unsigned int __unused2; + __kernel_time_t32 shm_ctime; + unsigned int __unused3; + __kernel_pid_t32 shm_cpid; + __kernel_pid_t32 shm_lpid; + unsigned int shm_nattch; + unsigned int __unused4; + unsigned int __unused5; +}; + +struct shminfo64_32 { + unsigned int shmmax; + unsigned int shmmin; + unsigned int shmmni; + unsigned int shmseg; + unsigned int shmall; + unsigned int __unused1; + unsigned int __unused2; + unsigned int __unused3; + unsigned int __unused4; +}; + +struct shm_info32 { + int used_ids; + u32 shm_tot, shm_rss, shm_swp; + u32 swap_attempts, swap_successes; +}; + +struct ipc_kludge { + struct msgbuf *msgp; + int msgtyp; +}; + + +#define A(__x) ((unsigned long)(__x)) +#define AA(__x) ((unsigned long)(__x)) + +#define SEMOP 1 +#define SEMGET 2 +#define SEMCTL 3 +#define MSGSND 11 +#define MSGRCV 12 +#define MSGGET 13 +#define MSGCTL 14 +#define SHMAT 21 +#define SHMDT 22 +#define SHMGET 23 +#define SHMCTL 24 + +#define IPCOP_MASK(__x) (1UL << (__x)) + +static int +ipc_parse_version32 (int *cmd) +{ + if (*cmd & IPC_64) { + *cmd ^= IPC_64; + return IPC_64; + } else { + return IPC_OLD; + } +} + +static int +semctl32 (int first, int second, int third, void *uptr) +{ + union semun fourth; + u32 pad; + int err = 0, err2; + struct semid64_ds s; + mm_segment_t old_fs; + int version = ipc_parse_version32(&third); + + if (!uptr) + return -EINVAL; + if (get_user(pad, (u32 *)uptr)) + return -EFAULT; + if (third == SETVAL) + fourth.val = (int)pad; + else + fourth.__pad = (void *)A(pad); + switch (third) { + case IPC_INFO: + case IPC_RMID: + case IPC_SET: + case SEM_INFO: + case GETVAL: + case GETPID: + case GETNCNT: + case GETZCNT: + case GETALL: + case SETVAL: + case SETALL: + err = sys_semctl(first, second, third, fourth); + break; + + case IPC_STAT: + case SEM_STAT: + fourth.__pad = &s; + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = sys_semctl(first, second|IPC_64, third, fourth); + set_fs(old_fs); + + if (version == IPC_64) { + struct semid64_ds32 *usp64 = (struct semid64_ds32 *) A(pad); + + if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) { + err = -EFAULT; + break; + } + err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key); + err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid); + err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid); + err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid); + err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid); + err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode); + err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq); + err2 |= __put_user(s.sem_otime, &usp64->sem_otime); + err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime); + err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems); + } else { + struct semid_ds32 *usp32 = (struct semid_ds32 *) A(pad); + + if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) { + err = -EFAULT; + break; + } + err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key); + err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid); + err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid); + err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid); + err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid); + err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode); + err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq); + err2 |= __put_user(s.sem_otime, &usp32->sem_otime); + err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime); + err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems); + } + if (err2) + err = -EFAULT; + break; + } + return err; +} + +static int +do_sys32_msgsnd (int first, int second, int third, void *uptr) +{ + struct msgbuf *p = kmalloc(second + sizeof(struct msgbuf) + 4, GFP_USER); + struct msgbuf32 *up = (struct msgbuf32 *)uptr; + mm_segment_t old_fs; + int err; + + if (!p) + return -ENOMEM; + err = get_user(p->mtype, &up->mtype); + err |= copy_from_user(p->mtext, &up->mtext, second); + if (err) + goto out; + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = sys_msgsnd(first, p, second, third); + set_fs(old_fs); + out: + kfree(p); + return err; +} + +static int +do_sys32_msgrcv (int first, int second, int msgtyp, int third, int version, void *uptr) +{ + struct msgbuf32 *up; + struct msgbuf *p; + mm_segment_t old_fs; + int err; + + if (!version) { + struct ipc_kludge *uipck = (struct ipc_kludge *)uptr; + struct ipc_kludge ipck; + + err = -EINVAL; + if (!uptr) + goto out; + err = -EFAULT; + if (copy_from_user(&ipck, uipck, sizeof(struct ipc_kludge))) + goto out; + uptr = (void *)A(ipck.msgp); + msgtyp = ipck.msgtyp; + } + err = -ENOMEM; + p = kmalloc(second + sizeof(struct msgbuf) + 4, GFP_USER); + if (!p) + goto out; + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = sys_msgrcv(first, p, second + 4, msgtyp, third); + set_fs(old_fs); + if (err < 0) + goto free_then_out; + up = (struct msgbuf32 *)uptr; + if (put_user(p->mtype, &up->mtype) || copy_to_user(&up->mtext, p->mtext, err)) + err = -EFAULT; +free_then_out: + kfree(p); +out: + return err; +} + +static int +msgctl32 (int first, int second, void *uptr) +{ + int err = -EINVAL, err2; + struct msqid_ds m; + struct msqid64_ds m64; + struct msqid_ds32 *up32 = (struct msqid_ds32 *)uptr; + struct msqid64_ds32 *up64 = (struct msqid64_ds32 *)uptr; + mm_segment_t old_fs; + int version = ipc_parse_version32(&second); + + switch (second) { + case IPC_INFO: + case IPC_RMID: + case MSG_INFO: + err = sys_msgctl(first, second, (struct msqid_ds *)uptr); + break; + + case IPC_SET: + if (version == IPC_64) { + err = get_user(m.msg_perm.uid, &up64->msg_perm.uid); + err |= get_user(m.msg_perm.gid, &up64->msg_perm.gid); + err |= get_user(m.msg_perm.mode, &up64->msg_perm.mode); + err |= get_user(m.msg_qbytes, &up64->msg_qbytes); + } else { + err = get_user(m.msg_perm.uid, &up32->msg_perm.uid); + err |= get_user(m.msg_perm.gid, &up32->msg_perm.gid); + err |= get_user(m.msg_perm.mode, &up32->msg_perm.mode); + err |= get_user(m.msg_qbytes, &up32->msg_qbytes); + } + if (err) + break; + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = sys_msgctl(first, second, &m); + set_fs(old_fs); + break; + + case IPC_STAT: + case MSG_STAT: + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = sys_msgctl(first, second|IPC_64, (void *) &m64); + set_fs(old_fs); + + if (version == IPC_64) { + if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) { + err = -EFAULT; + break; + } + err2 = __put_user(m64.msg_perm.key, &up64->msg_perm.key); + err2 |= __put_user(m64.msg_perm.uid, &up64->msg_perm.uid); + err2 |= __put_user(m64.msg_perm.gid, &up64->msg_perm.gid); + err2 |= __put_user(m64.msg_perm.cuid, &up64->msg_perm.cuid); + err2 |= __put_user(m64.msg_perm.cgid, &up64->msg_perm.cgid); + err2 |= __put_user(m64.msg_perm.mode, &up64->msg_perm.mode); + err2 |= __put_user(m64.msg_perm.seq, &up64->msg_perm.seq); + err2 |= __put_user(m64.msg_stime, &up64->msg_stime); + err2 |= __put_user(m64.msg_rtime, &up64->msg_rtime); + err2 |= __put_user(m64.msg_ctime, &up64->msg_ctime); + err2 |= __put_user(m64.msg_cbytes, &up64->msg_cbytes); + err2 |= __put_user(m64.msg_qnum, &up64->msg_qnum); + err2 |= __put_user(m64.msg_qbytes, &up64->msg_qbytes); + err2 |= __put_user(m64.msg_lspid, &up64->msg_lspid); + err2 |= __put_user(m64.msg_lrpid, &up64->msg_lrpid); + if (err2) + err = -EFAULT; + } else { + if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) { + err = -EFAULT; + break; + } + err2 = __put_user(m64.msg_perm.key, &up32->msg_perm.key); + err2 |= __put_user(m64.msg_perm.uid, &up32->msg_perm.uid); + err2 |= __put_user(m64.msg_perm.gid, &up32->msg_perm.gid); + err2 |= __put_user(m64.msg_perm.cuid, &up32->msg_perm.cuid); + err2 |= __put_user(m64.msg_perm.cgid, &up32->msg_perm.cgid); + err2 |= __put_user(m64.msg_perm.mode, &up32->msg_perm.mode); + err2 |= __put_user(m64.msg_perm.seq, &up32->msg_perm.seq); + err2 |= __put_user(m64.msg_stime, &up32->msg_stime); + err2 |= __put_user(m64.msg_rtime, &up32->msg_rtime); + err2 |= __put_user(m64.msg_ctime, &up32->msg_ctime); + err2 |= __put_user(m64.msg_cbytes, &up32->msg_cbytes); + err2 |= __put_user(m64.msg_qnum, &up32->msg_qnum); + err2 |= __put_user(m64.msg_qbytes, &up32->msg_qbytes); + err2 |= __put_user(m64.msg_lspid, &up32->msg_lspid); + err2 |= __put_user(m64.msg_lrpid, &up32->msg_lrpid); + if (err2) + err = -EFAULT; + } + break; + } + return err; +} + +static int +shmat32 (int first, int second, int third, int version, void *uptr) +{ + unsigned long raddr; + u32 *uaddr = (u32 *)A((u32)third); + int err; + + if (version == 1) + return -EINVAL; /* iBCS2 emulator entry point: unsupported */ + err = sys_shmat(first, uptr, second, &raddr); + if (err) + return err; + return put_user(raddr, uaddr); +} + +static int put_shmid64(struct shmid64_ds *s64p, void *uptr, int version) +{ + int err2; +#define s64 (*s64p) + if (version == IPC_64) { + struct shmid64_ds32 *up64 = (struct shmid64_ds32 *)uptr; + + if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) + return -EFAULT; + + err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key); + err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid); + err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid); + err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid); + err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid); + err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode); + err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq); + err2 |= __put_user(s64.shm_atime, &up64->shm_atime); + err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime); + err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime); + err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz); + err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch); + err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid); + err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid); + } else { + struct shmid_ds32 *up32 = (struct shmid_ds32 *)uptr; + + if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) + return -EFAULT; + + err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key); + err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid); + err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid); + err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid); + err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid); + err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode); + err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq); + err2 |= __put_user(s64.shm_atime, &up32->shm_atime); + err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime); + err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime); + err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz); + err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch); + err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid); + err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid); + } +#undef s64 + return err2 ? -EFAULT : 0; +} +static int +shmctl32 (int first, int second, void *uptr) +{ + int err = -EFAULT, err2; + struct shmid_ds s; + struct shmid64_ds s64; + mm_segment_t old_fs; + struct shm_info32 *uip = (struct shm_info32 *)uptr; + struct shm_info si; + int version = ipc_parse_version32(&second); + struct shminfo64 smi; + struct shminfo *usi32 = (struct shminfo *) uptr; + struct shminfo64_32 *usi64 = (struct shminfo64_32 *) uptr; + + switch (second) { + case IPC_INFO: + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = sys_shmctl(first, second|IPC_64, (struct shmid_ds *)&smi); + set_fs(old_fs); + + if (version == IPC_64) { + if (!access_ok(VERIFY_WRITE, usi64, sizeof(*usi64))) { + err = -EFAULT; + break; + } + err2 = __put_user(smi.shmmax, &usi64->shmmax); + err2 |= __put_user(smi.shmmin, &usi64->shmmin); + err2 |= __put_user(smi.shmmni, &usi64->shmmni); + err2 |= __put_user(smi.shmseg, &usi64->shmseg); + err2 |= __put_user(smi.shmall, &usi64->shmall); + } else { + if (!access_ok(VERIFY_WRITE, usi32, sizeof(*usi32))) { + err = -EFAULT; + break; + } + err2 = __put_user(smi.shmmax, &usi32->shmmax); + err2 |= __put_user(smi.shmmin, &usi32->shmmin); + err2 |= __put_user(smi.shmmni, &usi32->shmmni); + err2 |= __put_user(smi.shmseg, &usi32->shmseg); + err2 |= __put_user(smi.shmall, &usi32->shmall); + } + if (err2) + err = -EFAULT; + break; + + case IPC_RMID: + case SHM_LOCK: + case SHM_UNLOCK: + err = sys_shmctl(first, second, (struct shmid_ds *)uptr); + break; + + case IPC_SET: + if (version == IPC_64) { + struct shmid64_ds32 *up64 = (struct shmid64_ds32 *)uptr; + err = get_user(s.shm_perm.uid, &up64->shm_perm.uid); + err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid); + err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode); + } else { + struct shmid_ds32 *up32 = (struct shmid_ds32 *)uptr; + err = get_user(s.shm_perm.uid, &up32->shm_perm.uid); + err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid); + err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode); + } + if (err) + break; + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = sys_shmctl(first, second, &s); + set_fs(old_fs); + break; + + case IPC_STAT: + case SHM_STAT: + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = sys_shmctl(first, second|IPC_64, (void *) &s64); + set_fs(old_fs); + + if (err < 0) + break; + err2 = put_shmid64(&s64, uptr, version); + if (err2) + err = err2; + break; + + case SHM_INFO: + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = sys_shmctl(first, second, (void *)&si); + set_fs(old_fs); + if (err < 0) + break; + + if (!access_ok(VERIFY_WRITE, uip, sizeof(*uip))) { + err = -EFAULT; + break; + } + err2 = __put_user(si.used_ids, &uip->used_ids); + err2 |= __put_user(si.shm_tot, &uip->shm_tot); + err2 |= __put_user(si.shm_rss, &uip->shm_rss); + err2 |= __put_user(si.shm_swp, &uip->shm_swp); + err2 |= __put_user(si.swap_attempts, &uip->swap_attempts); + err2 |= __put_user(si.swap_successes, &uip->swap_successes); + if (err2) + err = -EFAULT; + break; + + } + return err; +} + +asmlinkage long +sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) +{ + int version; + + version = call >> 16; /* hack for backward compatibility */ + call &= 0xffff; + + switch (call) { + case SEMOP: + /* struct sembuf is the same on 32 and 64bit :)) */ + return sys_semop(first, (struct sembuf *)AA(ptr), second); + case SEMGET: + return sys_semget(first, second, third); + case SEMCTL: + return semctl32(first, second, third, (void *)AA(ptr)); + + case MSGSND: + return do_sys32_msgsnd(first, second, third, (void *)AA(ptr)); + case MSGRCV: + return do_sys32_msgrcv(first, second, fifth, third, version, (void *)AA(ptr)); + case MSGGET: + return sys_msgget((key_t) first, second); + case MSGCTL: + return msgctl32(first, second, (void *)AA(ptr)); + + case SHMAT: + return shmat32(first, second, third, version, (void *)AA(ptr)); + break; + case SHMDT: + return sys_shmdt((char *)AA(ptr)); + case SHMGET: + return sys_shmget(first, second, third); + case SHMCTL: + return shmctl32(first, second, (void *)AA(ptr)); + + default: + return -EINVAL; + } + return -EINVAL; +} + diff -Nru a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c --- a/arch/x86_64/ia32/sys_ia32.c Tue Jun 18 19:12:02 2002 +++ b/arch/x86_64/ia32/sys_ia32.c Tue Jun 18 19:12:02 2002 @@ -1119,422 +1119,6 @@ } /* - * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation.. - * - * This is really horribly ugly. - */ - -struct msgbuf32 { s32 mtype; char mtext[1]; }; - -struct ipc_perm32 -{ - key_t key; - __kernel_uid_t32 uid; - __kernel_gid_t32 gid; - __kernel_uid_t32 cuid; - __kernel_gid_t32 cgid; - __kernel_mode_t32 mode; - unsigned short seq; -}; - -struct semid_ds32 { - struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */ - __kernel_time_t32 sem_otime; /* last semop time */ - __kernel_time_t32 sem_ctime; /* last change time */ - u32 sem_base; /* ptr to first semaphore in array */ - u32 sem_pending; /* pending operations to be processed */ - u32 sem_pending_last; /* last pending operation */ - u32 undo; /* undo requests on this array */ - unsigned short sem_nsems; /* no. of semaphores in array */ -}; - -struct msqid_ds32 -{ - struct ipc_perm32 msg_perm; - u32 msg_first; - u32 msg_last; - __kernel_time_t32 msg_stime; - __kernel_time_t32 msg_rtime; - __kernel_time_t32 msg_ctime; - u32 wwait; - u32 rwait; - unsigned short msg_cbytes; - unsigned short msg_qnum; - unsigned short msg_qbytes; - __kernel_ipc_pid_t32 msg_lspid; - __kernel_ipc_pid_t32 msg_lrpid; -}; - -struct shmid_ds32 { - struct ipc_perm32 shm_perm; - int shm_segsz; - __kernel_time_t32 shm_atime; - __kernel_time_t32 shm_dtime; - __kernel_time_t32 shm_ctime; - __kernel_ipc_pid_t32 shm_cpid; - __kernel_ipc_pid_t32 shm_lpid; - unsigned short shm_nattch; -}; - -#define IPCOP_MASK(__x) (1UL << (__x)) - -static int -do_sys32_semctl(int first, int second, int third, void *uptr) -{ - union semun fourth; - u32 pad; - int err; - struct semid64_ds s; - struct semid_ds32 *usp; - mm_segment_t old_fs; - - if (!uptr) - return -EINVAL; - err = -EFAULT; - if (get_user (pad, (u32 *)uptr)) - return err; - if(third == SETVAL) - fourth.val = (int)pad; - else - fourth.__pad = (void *)A(pad); - - switch (third) { - - case IPC_INFO: - case IPC_RMID: - case IPC_SET: - case SEM_INFO: - case GETVAL: - case GETPID: - case GETNCNT: - case GETZCNT: - case GETALL: - case SETVAL: - case SETALL: - err = sys_semctl (first, second, third, fourth); - break; - - case IPC_STAT: - case SEM_STAT: - usp = (struct semid_ds32 *)A(pad); - fourth.__pad = &s; - old_fs = get_fs (); - set_fs (KERNEL_DS); - err = sys_semctl (first, second, third, fourth); - set_fs (old_fs); - if (verify_area(VERIFY_WRITE, usp, sizeof(struct semid_ds32)) || - __put_user(s.sem_perm.key, &usp->sem_perm.key) || - __put_user(s.sem_perm.uid, &usp->sem_perm.uid) || - __put_user(s.sem_perm.gid, &usp->sem_perm.gid) || - __put_user(s.sem_perm.cuid, &usp->sem_perm.cuid) || - __put_user (s.sem_perm.cgid, &usp->sem_perm.cgid) || - __put_user (s.sem_perm.mode, &usp->sem_perm.mode) || - __put_user (s.sem_perm.seq, &usp->sem_perm.seq) || - __put_user (s.sem_otime, &usp->sem_otime) || - __put_user (s.sem_ctime, &usp->sem_ctime) || - __put_user (s.sem_nsems, &usp->sem_nsems)) - return -EFAULT; - break; - - } - - return err; -} - -static int -do_sys32_msgsnd (int first, int second, int third, void *uptr) -{ - struct msgbuf *p = kmalloc (second + sizeof (struct msgbuf) - + 4, GFP_USER); - struct msgbuf32 *up = (struct msgbuf32 *)uptr; - mm_segment_t old_fs; - int err; - - if (!p) - return -ENOMEM; - err = verify_area(VERIFY_READ, up, sizeof(struct msgbuf32)); - if (err) - goto out; - err = __get_user (p->mtype, &up->mtype); - err |= __copy_from_user (p->mtext, &up->mtext, second); - if (err) - goto out; - old_fs = get_fs (); - set_fs (KERNEL_DS); - err = sys_msgsnd (first, p, second, third); - set_fs (old_fs); -out: - kfree (p); - return err; -} - -static int -do_sys32_msgrcv (int first, int second, int msgtyp, int third, - int version, void *uptr) -{ - struct msgbuf32 *up; - struct msgbuf *p; - mm_segment_t old_fs; - int err; - - if (!version) { - struct ipc_kludge *uipck = (struct ipc_kludge *)uptr; - struct ipc_kludge ipck; - - err = -EINVAL; - if (!uptr) - goto out; - err = -EFAULT; - if (copy_from_user (&ipck, uipck, sizeof (struct ipc_kludge))) - goto out; - uptr = (void *)A(ipck.msgp); - msgtyp = ipck.msgtyp; - } - err = -ENOMEM; - p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER); - if (!p) - goto out; - old_fs = get_fs (); - set_fs (KERNEL_DS); - err = sys_msgrcv (first, p, second + 4, msgtyp, third); - set_fs (old_fs); - if (err < 0) - goto free_then_out; - up = (struct msgbuf32 *)uptr; - if (verify_area(VERIFY_WRITE, up, sizeof(struct msgbuf32)) || - __put_user (p->mtype, &up->mtype) || - __copy_to_user (&up->mtext, p->mtext, err)) - err = -EFAULT; -free_then_out: - kfree (p); -out: - return err; -} - -static int -do_sys32_msgctl (int first, int second, void *uptr) -{ - int err = -EINVAL; - struct msqid_ds m; - struct msqid64_ds m64; - struct msqid_ds32 *up = (struct msqid_ds32 *)uptr; - mm_segment_t old_fs; - - switch (second) { - - case IPC_INFO: - case IPC_RMID: - case MSG_INFO: - err = sys_msgctl (first, second, (struct msqid_ds *)uptr); - break; - - case IPC_SET: - err = verify_area(VERIFY_READ, up, sizeof(struct msqid_ds32)); - if (err) - break; - err = __get_user (m.msg_perm.uid, &up->msg_perm.uid); - err |= __get_user (m.msg_perm.gid, &up->msg_perm.gid); - err |= __get_user (m.msg_perm.mode, &up->msg_perm.mode); - err |= __get_user (m.msg_qbytes, &up->msg_qbytes); - if (err) - break; - old_fs = get_fs (); - set_fs (KERNEL_DS); - err = sys_msgctl (first, second, &m); - set_fs (old_fs); - break; - - case IPC_STAT: - case MSG_STAT: - old_fs = get_fs (); - set_fs (KERNEL_DS); - err = sys_msgctl (first, second, (void *) &m64); - set_fs (old_fs); - if (verify_area(VERIFY_WRITE, up, sizeof(struct msqid_ds32)) || - __put_user (m64.msg_perm.key, &up->msg_perm.key) || - __put_user(m64.msg_perm.uid, &up->msg_perm.uid) || - __put_user(m64.msg_perm.gid, &up->msg_perm.gid) || - __put_user(m64.msg_perm.cuid, &up->msg_perm.cuid) || - __put_user(m64.msg_perm.cgid, &up->msg_perm.cgid) || - __put_user(m64.msg_perm.mode, &up->msg_perm.mode) || - __put_user(m64.msg_perm.seq, &up->msg_perm.seq) || - __put_user(m64.msg_stime, &up->msg_stime) || - __put_user(m64.msg_rtime, &up->msg_rtime) || - __put_user(m64.msg_ctime, &up->msg_ctime) || - __put_user(m64.msg_cbytes, &up->msg_cbytes) || - __put_user(m64.msg_qnum, &up->msg_qnum) || - __put_user(m64.msg_qbytes, &up->msg_qbytes) || - __put_user(m64.msg_lspid, &up->msg_lspid) || - __put_user(m64.msg_lrpid, &up->msg_lrpid)) - return -EFAULT; - break; - - } - - return err; -} - -static int -do_sys32_shmat (int first, int second, int third, int version, void *uptr) -{ - unsigned long raddr; - u32 *uaddr = (u32 *)A((u32)third); - int err = -EINVAL; - - if (version == 1) - return err; - err = sys_shmat (first, uptr, second, &raddr); - if (err) - return err; - err = put_user (raddr, uaddr); - return err; -} - -static int -do_sys32_shmctl (int first, int second, void *uptr) -{ - int err = -EFAULT; - struct shmid_ds s; - struct shmid64_ds s64; - struct shmid_ds32 *up = (struct shmid_ds32 *)uptr; - mm_segment_t old_fs; - struct shm_info32 { - int used_ids; - u32 shm_tot, shm_rss, shm_swp; - u32 swap_attempts, swap_successes; - } *uip = (struct shm_info32 *)uptr; - struct shm_info si; - - switch (second) { - - case IPC_INFO: - case IPC_RMID: - case SHM_LOCK: - case SHM_UNLOCK: - err = sys_shmctl (first, second, (struct shmid_ds *)uptr); - break; - case IPC_SET: - err = verify_area(VERIFY_READ, up, sizeof(struct shmid_ds32)); - if (err) - break; - err = __get_user (s.shm_perm.uid, &up->shm_perm.uid); - err |= __get_user (s.shm_perm.gid, &up->shm_perm.gid); - err |= __get_user (s.shm_perm.mode, &up->shm_perm.mode); - if (err) - break; - old_fs = get_fs (); - set_fs (KERNEL_DS); - err = sys_shmctl (first, second, &s); - set_fs (old_fs); - break; - - case IPC_STAT: - case SHM_STAT: - old_fs = get_fs (); - set_fs (KERNEL_DS); - err = sys_shmctl (first, second, (void *) &s64); - set_fs (old_fs); - if (err < 0) - break; - if (verify_area(VERIFY_WRITE, up, sizeof(struct shmid_ds32)) || - __put_user (s64.shm_perm.key, &up->shm_perm.key) || - __put_user (s64.shm_perm.uid, &up->shm_perm.uid) || - __put_user (s64.shm_perm.gid, &up->shm_perm.gid) || - __put_user (s64.shm_perm.cuid, &up->shm_perm.cuid) || - __put_user (s64.shm_perm.cgid, &up->shm_perm.cgid) || - __put_user (s64.shm_perm.mode, &up->shm_perm.mode) || - __put_user (s64.shm_perm.seq, &up->shm_perm.seq) || - __put_user (s64.shm_atime, &up->shm_atime) || - __put_user (s64.shm_dtime, &up->shm_dtime) || - __put_user (s64.shm_ctime, &up->shm_ctime) || - __put_user (s64.shm_segsz, &up->shm_segsz) || - __put_user (s64.shm_nattch, &up->shm_nattch) || - __put_user (s64.shm_cpid, &up->shm_cpid) || - __put_user (s64.shm_lpid, &up->shm_lpid)) - return -EFAULT; - break; - - case SHM_INFO: - old_fs = get_fs (); - set_fs (KERNEL_DS); - err = sys_shmctl (first, second, (void *)&si); - set_fs (old_fs); - if (err < 0) - break; - if (verify_area(VERIFY_WRITE, uip, sizeof(struct shm_info32)) || - __put_user (si.used_ids, &uip->used_ids) || - __put_user (si.shm_tot, &uip->shm_tot) || - __put_user (si.shm_rss, &uip->shm_rss) || - __put_user (si.shm_swp, &uip->shm_swp) || - __put_user (si.swap_attempts, &uip->swap_attempts) || - __put_user (si.swap_successes, &uip->swap_successes)) - return -EFAULT; - break; - - } - return err; -} - -asmlinkage long -sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) -{ - int version, err; - - version = call >> 16; /* hack for backward compatibility */ - call &= 0xffff; - - switch (call) { - - case SEMOP: - /* struct sembuf is the same on 32 and 64bit :)) */ - err = sys_semop (first, (struct sembuf *)AA(ptr), - second); - break; - case SEMGET: - err = sys_semget (first, second, third); - break; - case SEMCTL: - err = do_sys32_semctl (first, second, third, - (void *)AA(ptr)); - break; - - case MSGSND: - err = do_sys32_msgsnd (first, second, third, - (void *)AA(ptr)); - break; - case MSGRCV: - err = do_sys32_msgrcv (first, second, fifth, third, - version, (void *)AA(ptr)); - break; - case MSGGET: - err = sys_msgget ((key_t) first, second); - break; - case MSGCTL: - err = do_sys32_msgctl (first, second, (void *)AA(ptr)); - break; - - case SHMAT: - err = do_sys32_shmat (first, second, third, - version, (void *)AA(ptr)); - break; - case SHMDT: - err = sys_shmdt ((char *)AA(ptr)); - break; - case SHMGET: - err = sys_shmget (first, second, third); - break; - case SHMCTL: - err = do_sys32_shmctl (first, second, (void *)AA(ptr)); - break; - default: - err = -EINVAL; - break; - } - - return err; -} - -/* * sys_time() can be implemented in user-level using * sys_gettimeofday(). IA64 did this but i386 Linux did not * so we have to implement this system call here. diff -Nru a/arch/x86_64/kernel/asm-offsets.c b/arch/x86_64/kernel/asm-offsets.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/arch/x86_64/kernel/asm-offsets.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,46 @@ +/* + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed to extract + * and format the required data. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +#define BLANK() asm volatile("\n->" : : ) + +int main(void) +{ +#define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry)) + ENTRY(state); + ENTRY(flags); + ENTRY(thread); + BLANK(); +#undef ENTRY +#define ENTRY(entry) DEFINE(threadinfo__ ## entry, offsetof(struct thread_info, entry)) + ENTRY(flags); + ENTRY(addr_limit); + ENTRY(preempt_count); + BLANK(); +#undef ENTRY +#define ENTRY(entry) DEFINE(pda__ ## entry, offsetof(struct x8664_pda, entry)) + ENTRY(kernelstack); + ENTRY(oldrsp); + ENTRY(pcurrent); + ENTRY(irqrsp); + ENTRY(irqcount); + ENTRY(cpunumber); + ENTRY(irqstackptr); + BLANK(); +#undef ENTRY + return 0; +} diff -Nru a/arch/x86_64/kernel/ioport.c b/arch/x86_64/kernel/ioport.c --- a/arch/x86_64/kernel/ioport.c Tue Jun 18 19:12:02 2002 +++ b/arch/x86_64/kernel/ioport.c Tue Jun 18 19:12:02 2002 @@ -14,6 +14,7 @@ #include #include #include +#include /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ static void set_bitmap(unsigned long *bitmap, short base, short extent, int new_value) @@ -61,27 +62,19 @@ return -EINVAL; if (turn_on && !capable(CAP_SYS_RAWIO)) return -EPERM; - /* - * If it's the first ioperm() call in this thread's lifetime, set the - * IO bitmap up. ioperm() is much less timing critical than clone(), - * this is why we delay this operation until now: - */ - if (!t->ioperm) { - /* - * just in case ... - */ - memset(t->io_bitmap,0xff,(IO_BITMAP_SIZE+1)*4); - t->ioperm = 1; - /* - * this activates it in the TSS - */ + + if (!t->io_bitmap_ptr) { + t->io_bitmap_ptr = kmalloc((IO_BITMAP_SIZE+1)*4, GFP_KERNEL); + if (!t->io_bitmap_ptr) + return -ENOMEM; + memset(t->io_bitmap_ptr,0xff,(IO_BITMAP_SIZE+1)*4); tss->io_map_base = IO_BITMAP_OFFSET; } /* * do it in the per-thread copy and in the TSS ... */ - set_bitmap((unsigned long *) t->io_bitmap, from, num, !turn_on); + set_bitmap((unsigned long *) t->io_bitmap_ptr, from, num, !turn_on); set_bitmap((unsigned long *) tss->io_bitmap, from, num, !turn_on); return 0; diff -Nru a/arch/x86_64/kernel/mtrr.c b/arch/x86_64/kernel/mtrr.c --- a/arch/x86_64/kernel/mtrr.c Tue Jun 18 19:12:02 2002 +++ b/arch/x86_64/kernel/mtrr.c Tue Jun 18 19:12:02 2002 @@ -19,10 +19,14 @@ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. (For earlier history, see arch/i386/kernel/mtrr.c) - September 2001 Dave Jones + v2.00 September 2001 Dave Jones Initial rewrite for x86-64. - + Removal of non-Intel style MTRR code. + v2.01 June 2002 Dave Jones + Removal of redundant abstraction layer. + 64-bit fixes. */ + #include #include #include @@ -60,35 +64,19 @@ #include #include -#define MTRR_VERSION "2.00 (20020207)" +#define MTRR_VERSION "2.01 (20020605)" #define TRUE 1 #define FALSE 0 -#define MTRRcap_MSR 0x0fe -#define MTRRdefType_MSR 0x2ff - -#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg)) -#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) +#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) +#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) #define NUM_FIXED_RANGES 88 -#define MTRRfix64K_00000_MSR 0x250 -#define MTRRfix16K_80000_MSR 0x258 -#define MTRRfix16K_A0000_MSR 0x259 -#define MTRRfix4K_C0000_MSR 0x268 -#define MTRRfix4K_C8000_MSR 0x269 -#define MTRRfix4K_D0000_MSR 0x26a -#define MTRRfix4K_D8000_MSR 0x26b -#define MTRRfix4K_E0000_MSR 0x26c -#define MTRRfix4K_E8000_MSR 0x26d -#define MTRRfix4K_F0000_MSR 0x26e -#define MTRRfix4K_F8000_MSR 0x26f -#ifdef CONFIG_SMP #define MTRR_CHANGE_MASK_FIXED 0x01 #define MTRR_CHANGE_MASK_VARIABLE 0x02 #define MTRR_CHANGE_MASK_DEFTYPE 0x04 -#endif typedef u8 mtrr_type; @@ -97,49 +85,43 @@ #ifdef CONFIG_SMP #define set_mtrr(reg,base,size,type) set_mtrr_smp (reg, base, size, type) #else -#define set_mtrr(reg,base,size,type) (*set_mtrr_up) (reg, base, size, type, \ - TRUE) +#define set_mtrr(reg,base,size,type) set_mtrr_up (reg, base, size, type, TRUE) #endif #if defined(CONFIG_PROC_FS) || defined(CONFIG_DEVFS_FS) #define USERSPACE_INTERFACE #endif -#ifndef USERSPACE_INTERFACE -#define compute_ascii() while (0) -#endif - #ifdef USERSPACE_INTERFACE static char *ascii_buffer; static unsigned int ascii_buf_bytes; -#endif -static unsigned int *usage_table; -static DECLARE_MUTEX (main_lock); - -/* Private functions */ -#ifdef USERSPACE_INTERFACE static void compute_ascii (void); +#else +#define compute_ascii() while (0) #endif +static unsigned int *usage_table; +static DECLARE_MUTEX (mtrr_lock); + struct set_mtrr_context { - unsigned long flags; - unsigned long deftype_lo; - unsigned long deftype_hi; - unsigned long cr4val; + u32 deftype_lo; + u32 deftype_hi; + u64 flags; + u64 cr4val; }; /* Put the processor into a state where MTRRs can be safely set */ static void set_mtrr_prepare (struct set_mtrr_context *ctxt) { - unsigned long cr0; + u64 cr0; /* Disable interrupts locally */ __save_flags(ctxt->flags); __cli(); /* Save value of CR4 and clear Page Global Enable (bit 7) */ - if (cpu_has_ge) { + if (cpu_has_pge) { ctxt->cr4val = read_cr4(); write_cr4(ctxt->cr4val & ~(1UL << 7)); } @@ -152,8 +134,8 @@ wbinvd(); /* Disable MTRRs, and set the default type to uncached */ - rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); - wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi); + rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); + wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi); } @@ -164,7 +146,7 @@ wbinvd(); /* Restore MTRRdefType */ - wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); + wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); /* Enable caches */ write_cr0(read_cr0() & 0xbfffffff); @@ -181,9 +163,9 @@ /* This function returns the number of variable MTRRs */ static unsigned int get_num_var_ranges (void) { - unsigned long config, dummy; + u32 config, dummy; - rdmsr (MTRRcap_MSR, config, dummy); + rdmsr (MSR_MTRRcap, config, dummy); return (config & 0xff); } @@ -191,21 +173,21 @@ /* Returns non-zero if we have the write-combining memory type */ static int have_wrcomb (void) { - unsigned long config, dummy; + u32 config, dummy; - rdmsr (MTRRcap_MSR, config, dummy); + rdmsr (MSR_MTRRcap, config, dummy); return (config & (1 << 10)); } -static u32 size_or_mask, size_and_mask; +static u64 size_or_mask, size_and_mask; -static void get_mtrr (unsigned int reg, unsigned long *base, - unsigned long *size, mtrr_type * type) +static void get_mtrr (unsigned int reg, u64 *base, u32 *size, mtrr_type * type) { - unsigned long mask_lo, mask_hi, base_lo, base_hi; + u32 mask_lo, mask_hi, base_lo, base_hi; + u64 newsize; - rdmsr (MTRRphysMask_MSR (reg), mask_lo, mask_hi); + rdmsr (MSR_MTRRphysMask(reg), mask_lo, mask_hi); if ((mask_lo & 0x800) == 0) { /* Invalid (i.e. free) range */ *base = 0; @@ -214,32 +196,29 @@ return; } - rdmsr (MTRRphysBase_MSR (reg), base_lo, base_hi); + rdmsr (MSR_MTRRphysBase(reg), base_lo, base_hi); /* Work out the shifted address mask. */ - mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT) - | mask_lo >> PAGE_SHIFT; - - /* This works correctly if size is a power of two, i.e. a - contiguous range. */ - *size = -mask_lo; + newsize = (u64) mask_hi << 32 | (mask_lo & ~0x800); + newsize = ~newsize+1; + *size = (u32) newsize >> PAGE_SHIFT; *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; *type = base_lo & 0xff; } -static void set_mtrr_up (unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type, int do_safe) -/* [SUMMARY] Set variable MTRR register on the local CPU. - The register to set. - The base address of the region. - The size of the region. If this is 0 the region is disabled. - The type of the region. - If TRUE, do the change safely. If FALSE, safety measures should - be done externally. - [RETURNS] Nothing. -*/ +/* + * Set variable MTRR register on the local CPU. + * The register to set. + * The base address of the region. + * The size of the region. If this is 0 the region is disabled. + * The type of the region. + * If TRUE, do the change safely. If FALSE, safety measures should + * be done externally. + */ +static void set_mtrr_up (unsigned int reg, u64 base, + u32 size, mtrr_type type, int do_safe) { struct set_mtrr_context ctxt; @@ -249,12 +228,12 @@ if (size == 0) { /* The invalid bit is kept in the mask, so we simply clear the relevant mask register to disable a range. */ - wrmsr (MTRRphysMask_MSR (reg), 0, 0); + wrmsr (MSR_MTRRphysMask(reg), 0, 0); } else { - wrmsr (MTRRphysBase_MSR (reg), base << PAGE_SHIFT | type, + wrmsr (MSR_MTRRphysBase(reg), base << PAGE_SHIFT | type, (base & size_and_mask) >> (32 - PAGE_SHIFT)); - wrmsr (MTRRphysMask_MSR (reg), -size << PAGE_SHIFT | 0x800, - (-size & size_and_mask) >> (32 - PAGE_SHIFT)); + wrmsr (MSR_MTRRphysMask(reg), (-size-1) << PAGE_SHIFT | 0x800, + ((-size-1) & size_and_mask) >> (32 - PAGE_SHIFT)); } if (do_safe) set_mtrr_done (&ctxt); @@ -264,41 +243,40 @@ #ifdef CONFIG_SMP struct mtrr_var_range { - unsigned long base_lo; - unsigned long base_hi; - unsigned long mask_lo; - unsigned long mask_hi; + u32 base_lo; + u32 base_hi; + u32 mask_lo; + u32 mask_hi; }; /* Get the MSR pair relating to a var range */ static void __init get_mtrr_var_range (unsigned int index, struct mtrr_var_range *vr) { - rdmsr (MTRRphysBase_MSR (index), vr->base_lo, vr->base_hi); - rdmsr (MTRRphysMask_MSR (index), vr->mask_lo, vr->mask_hi); + rdmsr (MSR_MTRRphysBase(index), vr->base_lo, vr->base_hi); + rdmsr (MSR_MTRRphysMask(index), vr->mask_lo, vr->mask_hi); } /* Set the MSR pair relating to a var range. Returns TRUE if changes are made */ -static int __init -set_mtrr_var_range_testing (unsigned int index, struct mtrr_var_range *vr) +static int __init set_mtrr_var_range_testing (unsigned int index, + struct mtrr_var_range *vr) { - unsigned int lo, hi; + u32 lo, hi; int changed = FALSE; - rdmsr (MTRRphysBase_MSR (index), lo, hi); - if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) - || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) { - wrmsr (MTRRphysBase_MSR (index), vr->base_lo, vr->base_hi); + rdmsr (MSR_MTRRphysBase(index), lo, hi); + if ((vr->base_lo & 0xfffff0ff) != (lo & 0xfffff0ff) + || (vr->base_hi & 0x000fffff) != (hi & 0x000fffff)) { + wrmsr (MSR_MTRRphysBase(index), vr->base_lo, vr->base_hi); changed = TRUE; } - rdmsr (MTRRphysMask_MSR (index), lo, hi); - - if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) - || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) { - wrmsr (MTRRphysMask_MSR (index), vr->mask_lo, vr->mask_hi); + rdmsr (MSR_MTRRphysMask(index), lo, hi); + if ((vr->mask_lo & 0xfffff800) != (lo & 0xfffff800) + || (vr->mask_hi & 0x000fffff) != (hi & 0x000fffff)) { + wrmsr (MSR_MTRRphysMask(index), vr->mask_lo, vr->mask_hi); changed = TRUE; } return changed; @@ -307,45 +285,50 @@ static void __init get_fixed_ranges (mtrr_type * frs) { - unsigned long *p = (unsigned long *) frs; + u32 *p = (u32 *) frs; int i; - rdmsr (MTRRfix64K_00000_MSR, p[0], p[1]); + rdmsr (MSR_MTRRfix64K_00000, p[0], p[1]); for (i = 0; i < 2; i++) - rdmsr (MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]); + rdmsr (MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]); for (i = 0; i < 8; i++) - rdmsr (MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); + rdmsr (MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]); } static int __init set_fixed_ranges_testing (mtrr_type * frs) { - unsigned long *p = (unsigned long *) frs; + u32 *p = (u32 *) frs; int changed = FALSE; int i; - unsigned long lo, hi; + u32 lo, hi; - rdmsr (MTRRfix64K_00000_MSR, lo, hi); + printk (KERN_INFO "mtrr: rdmsr 64K_00000\n"); + rdmsr (MSR_MTRRfix64K_00000, lo, hi); if (p[0] != lo || p[1] != hi) { - wrmsr (MTRRfix64K_00000_MSR, p[0], p[1]); + printk (KERN_INFO "mtrr: Writing %x:%x to 64K MSR. lohi were %x:%x\n", p[0], p[1], lo, hi); + wrmsr (MSR_MTRRfix64K_00000, p[0], p[1]); changed = TRUE; } + printk (KERN_INFO "mtrr: rdmsr 16K_80000\n"); for (i = 0; i < 2; i++) { - rdmsr (MTRRfix16K_80000_MSR + i, lo, hi); + rdmsr (MSR_MTRRfix16K_80000 + i, lo, hi); if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) { - wrmsr (MTRRfix16K_80000_MSR + i, p[2 + i * 2], - p[3 + i * 2]); + printk (KERN_INFO "mtrr: Writing %x:%x to 16K MSR%d. lohi were %x:%x\n", p[2 + i * 2], p[3 + i * 2], i, lo, hi ); + wrmsr (MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]); changed = TRUE; } } + printk (KERN_INFO "mtrr: rdmsr 4K_C0000\n"); for (i = 0; i < 8; i++) { - rdmsr (MTRRfix4K_C0000_MSR + i, lo, hi); + rdmsr (MSR_MTRRfix4K_C0000 + i, lo, hi); + printk (KERN_INFO "mtrr: MTRRfix4K_C0000+%d = %x:%x\n", i, lo, hi); if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) { - wrmsr (MTRRfix4K_C0000_MSR + i, p[6 + i * 2], - p[7 + i * 2]); + printk (KERN_INFO "mtrr: Writing %x:%x to 4K MSR%d. lohi were %x:%x\n", p[6 + i * 2], p[7 + i * 2], i, lo, hi); + wrmsr (MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]); changed = TRUE; } } @@ -357,8 +340,8 @@ unsigned int num_var_ranges; struct mtrr_var_range *var_ranges; mtrr_type fixed_ranges[NUM_FIXED_RANGES]; - unsigned char enabled; mtrr_type def_type; + unsigned char enabled; }; @@ -367,9 +350,9 @@ { unsigned int nvrs, i; struct mtrr_var_range *vrs; - unsigned long lo, dummy; + u32 lo, dummy; - nvrs = state->num_var_ranges = get_num_var_ranges (); + nvrs = state->num_var_ranges = get_num_var_ranges(); vrs = state->var_ranges = kmalloc (nvrs * sizeof (struct mtrr_var_range), GFP_KERNEL); if (vrs == NULL) @@ -379,7 +362,7 @@ get_mtrr_var_range (i, &vrs[i]); get_fixed_ranges (state->fixed_ranges); - rdmsr (MTRRdefType_MSR, lo, dummy); + rdmsr (MSR_MTRRdefType, lo, dummy); state->def_type = (lo & 0xff); state->enabled = (lo & 0xc00) >> 10; } @@ -393,17 +376,18 @@ } -static unsigned long __init set_mtrr_state (struct mtrr_state *state, +/* + * Set the MTRR state for this CPU. + * The MTRR state information to read. + * Some relevant CPU context. + * [NOTE] The CPU must already be in a safe state for MTRR changes. + * [RETURNS] 0 if no changes made, else a mask indication what was changed. + */ +static u64 __init set_mtrr_state (struct mtrr_state *state, struct set_mtrr_context *ctxt) -/* [SUMMARY] Set the MTRR state for this CPU. - The MTRR state information to read. - Some relevant CPU context. - [NOTE] The CPU must already be in a safe state for MTRR changes. - [RETURNS] 0 if no changes made, else a mask indication what was changed. -*/ { unsigned int i; - unsigned long change_mask = 0; + u64 change_mask = 0; for (i = 0; i < state->num_var_ranges; i++) if (set_mtrr_var_range_testing (i, &state->var_ranges[i])) @@ -428,16 +412,16 @@ static volatile int wait_barrier_cache_enable = FALSE; struct set_mtrr_data { - unsigned long smp_base; - unsigned long smp_size; + u64 smp_base; + u32 smp_size; unsigned int smp_reg; mtrr_type smp_type; }; +/* + * Synchronisation handler. Executed by "other" CPUs. + */ static void ipi_handler (void *info) -/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. - [RETURNS] Nothing. -*/ { struct set_mtrr_data *data = info; struct set_mtrr_context ctxt; @@ -449,7 +433,7 @@ barrier (); /* The master has cleared me to execute */ - (*set_mtrr_up) (data->smp_reg, data->smp_base, data->smp_size, + set_mtrr_up (data->smp_reg, data->smp_base, data->smp_size, data->smp_type, FALSE); /* Notify master CPU that I've executed the function */ @@ -462,8 +446,7 @@ } -static void set_mtrr_smp (unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type) +static void set_mtrr_smp (unsigned int reg, u64 base, u32 size, mtrr_type type) { struct set_mtrr_data data; struct set_mtrr_context ctxt; @@ -490,7 +473,7 @@ /* Set up for completion wait and then release other CPUs to change MTRRs */ atomic_set (&undone_count, smp_num_cpus - 1); wait_barrier_execute = FALSE; - (*set_mtrr_up) (reg, base, size, type, FALSE); + set_mtrr_up (reg, base, size, type, FALSE); /* Now wait for other CPUs to complete the function */ while (atomic_read (&undone_count) > 0) @@ -505,7 +488,7 @@ /* Some BIOS's are fucked and don't set all MTRRs the same! */ -static void __init mtrr_state_warn (unsigned long mask) +static void __init mtrr_state_warn (u32 mask) { if (!mask) return; @@ -521,7 +504,7 @@ #endif /* CONFIG_SMP */ -static char inline * attrib_to_str (int x) +static inline char * attrib_to_str (int x) { return (x <= 6) ? mtrr_strings[x] : "?"; } @@ -551,21 +534,20 @@ } -static int generic_get_free_region (unsigned long base, - unsigned long size) -/* [SUMMARY] Get a free MTRR. - The starting (base) address of the region. - The size (in bytes) of the region. - [RETURNS] The index of the region on success, else -1 on error. +/* + * Get a free MTRR. + * returns the index of the region on success, else -1 on error. */ +static int get_free_region(void) { int i, max; mtrr_type ltype; - unsigned long lbase, lsize; + u64 lbase; + u32 lsize; max = get_num_var_ranges (); for (i = 0; i < max; ++i) { - (*get_mtrr) (i, &lbase, &lsize, <ype); + get_mtrr (i, &lbase, &lsize, <ype); if (lsize == 0) return i; } @@ -573,22 +555,19 @@ } -static int (*get_free_region) (unsigned long base, - unsigned long size) = generic_get_free_region; - /** * mtrr_add_page - Add a memory type region * @base: Physical base address of region in pages (4 KB) * @size: Physical size of region in pages (4 KB) * @type: Type of MTRR desired * @increment: If this is true do usage counting on the region + * Returns The MTRR register on success, else a negative number + * indicating the error code. * - * Memory type region registers control the caching on newer Intel and - * non Intel processors. This function allows drivers to request an - * MTRR is added. The details and hardware specifics of each processor's - * implementation are hidden from the caller, but nevertheless the - * caller should expect to need to provide a power of two size on an - * equivalent power of two boundary. + * Memory type region registers control the caching on newer + * processors. This function allows drivers to request an MTRR is added. + * The caller should expect to need to provide a power of two size on + * an equivalent power of two boundary. * * If the region cannot be added either because all regions are in use * or the CPU cannot support it a negative value is returned. On success @@ -596,42 +575,28 @@ * as a cookie only. * * On a multiprocessor machine the changes are made to all processors. - * This is required on x86 by the Intel processors. * * The available types are * * %MTRR_TYPE_UNCACHABLE - No caching - * * %MTRR_TYPE_WRBACK - Write data back in bursts whenever - * * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts - * * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes * * BUGS: Needs a quiet flag for the cases where drivers do not mind * failures and do not wish system log messages to be sent. */ -int mtrr_add_page (unsigned long base, unsigned long size, - unsigned int type, char increment) +int mtrr_add_page (u64 base, u32 size, unsigned int type, char increment) { -/* [SUMMARY] Add an MTRR entry. - The starting (base, in pages) address of the region. - The size of the region. (in pages) - The type of the new region. - If true and the region already exists, the usage count will be - incremented. - [RETURNS] The MTRR register on success, else a negative number indicating - the error code. - [NOTE] This routine uses a spinlock. -*/ int i, max; mtrr_type ltype; - unsigned long lbase, lsize, last; + u64 lbase, last; + u32 lsize; if (base + size < 0x100) { printk (KERN_WARNING - "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n", + "mtrr: cannot set region below 1 MiB (0x%lx000,0x%x000)\n", base, size); return -EINVAL; } @@ -644,7 +609,7 @@ if (lbase != last) { printk (KERN_WARNING - "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", + "mtrr: base(0x%lx000) is not aligned on a size(0x%x000) boundary\n", base, size); return -EINVAL; } @@ -655,7 +620,7 @@ } /* If the type is WC, check that this processor supports it */ - if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb ()) { + if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { printk (KERN_WARNING "mtrr: your processor doesn't support write-combining\n"); return -ENOSYS; @@ -669,9 +634,9 @@ increment = increment ? 1 : 0; max = get_num_var_ranges (); /* Search for existing MTRR */ - down (&main_lock); + down (&mtrr_lock); for (i = 0; i < max; ++i) { - (*get_mtrr) (i, &lbase, &lsize, <ype); + get_mtrr (i, &lbase, &lsize, <ype); if (base >= lbase + lsize) continue; if ((base < lbase) && (base + size <= lbase)) @@ -679,41 +644,41 @@ /* At this point we know there is some kind of overlap/enclosure */ if ((base < lbase) || (base + size > lbase + lsize)) { - up (&main_lock); + up (&mtrr_lock); printk (KERN_WARNING - "mtrr: 0x%lx000,0x%lx000 overlaps existing" - " 0x%lx000,0x%lx000\n", base, size, lbase, - lsize); + "mtrr: 0x%lx000,0x%x000 overlaps existing" + " 0x%lx000,0x%x000\n", base, size, lbase, lsize); return -EINVAL; } /* New region is enclosed by an existing region */ if (ltype != type) { if (type == MTRR_TYPE_UNCACHABLE) continue; - up (&main_lock); + up (&mtrr_lock); printk - ("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", - base, size, attrib_to_str (ltype), + ("mtrr: type mismatch for %lx000,%x000 old: %s new: %s\n", + base, size, + attrib_to_str (ltype), attrib_to_str (type)); return -EINVAL; } if (increment) ++usage_table[i]; compute_ascii (); - up (&main_lock); + up (&mtrr_lock); return i; } /* Search for an empty MTRR */ - i = (*get_free_region) (base, size); + i = get_free_region(); if (i < 0) { - up (&main_lock); + up (&mtrr_lock); printk ("mtrr: no more MTRRs available\n"); return i; } set_mtrr (i, base, size, type); usage_table[i] = 1; compute_ascii (); - up (&main_lock); + up (&mtrr_lock); return i; } @@ -724,13 +689,13 @@ * @size: Physical size of region * @type: Type of MTRR desired * @increment: If this is true do usage counting on the region + * Return the MTRR register on success, else a negative numbe + * indicating the error code. * - * Memory type region registers control the caching on newer Intel and - * non Intel processors. This function allows drivers to request an - * MTRR is added. The details and hardware specifics of each processor's - * implementation are hidden from the caller, but nevertheless the - * caller should expect to need to provide a power of two size on an - * equivalent power of two boundary. + * Memory type region registers control the caching on newer processors. + * This function allows drivers to request an MTRR is added. + * The caller should expect to need to provide a power of two size on + * an equivalent power of two boundary. * * If the region cannot be added either because all regions are in use * or the CPU cannot support it a negative value is returned. On success @@ -743,33 +708,19 @@ * The available types are * * %MTRR_TYPE_UNCACHABLE - No caching - * * %MTRR_TYPE_WRBACK - Write data back in bursts whenever - * * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts - * * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes * * BUGS: Needs a quiet flag for the cases where drivers do not mind * failures and do not wish system log messages to be sent. */ -int mtrr_add (unsigned long base, unsigned long size, unsigned int type, - char increment) +int mtrr_add (u64 base, u32 size, unsigned int type, char increment) { -/* [SUMMARY] Add an MTRR entry. - The starting (base) address of the region. - The size (in bytes) of the region. - The type of the new region. - If true and the region already exists, the usage count will be - incremented. - [RETURNS] The MTRR register on success, else a negative number indicating - the error code. -*/ - if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { printk ("mtrr: size and base must be multiples of 4 kiB\n"); - printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base); + printk ("mtrr: size: 0x%x base: 0x%lx\n", size, base); return -EINVAL; } return mtrr_add_page (base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, @@ -792,55 +743,46 @@ * code. */ -int mtrr_del_page (int reg, unsigned long base, unsigned long size) -/* [SUMMARY] Delete MTRR/decrement usage count. - The register. If this is less than 0 then <> and <> must - be supplied. - The base address of the region. This is ignored if <> is >= 0. - The size of the region. This is ignored if <> is >= 0. - [RETURNS] The register on success, else a negative number indicating - the error code. - [NOTE] This routine uses a spinlock. -*/ +int mtrr_del_page (int reg, u64 base, u32 size) { int i, max; mtrr_type ltype; - unsigned long lbase, lsize; + u64 lbase; + u32 lsize; max = get_num_var_ranges (); - down (&main_lock); + down (&mtrr_lock); if (reg < 0) { /* Search for existing MTRR */ for (i = 0; i < max; ++i) { - (*get_mtrr) (i, &lbase, &lsize, <ype); + get_mtrr (i, &lbase, &lsize, <ype); if (lbase == base && lsize == size) { reg = i; break; } } if (reg < 0) { - up (&main_lock); - printk ("mtrr: no MTRR for %lx000,%lx000 found\n", base, - size); + up (&mtrr_lock); + printk ("mtrr: no MTRR for %lx000,%x000 found\n", base, size); return -EINVAL; } } if (reg >= max) { - up (&main_lock); + up (&mtrr_lock); printk ("mtrr: register: %d too big\n", reg); return -EINVAL; } - (*get_mtrr) (reg, &lbase, &lsize, <ype); + get_mtrr (reg, &lbase, &lsize, <ype); if (lsize < 1) { - up (&main_lock); + up (&mtrr_lock); printk ("mtrr: MTRR %d not used\n", reg); return -EINVAL; } if (usage_table[reg] < 1) { - up (&main_lock); + up (&mtrr_lock); printk ("mtrr: reg: %d has count=0\n", reg); return -EINVAL; } @@ -848,7 +790,7 @@ if (--usage_table[reg] < 1) set_mtrr (reg, 0, 0, 0); compute_ascii (); - up (&main_lock); + up (&mtrr_lock); return reg; } @@ -868,19 +810,11 @@ * code. */ -int mtrr_del (int reg, unsigned long base, unsigned long size) -/* [SUMMARY] Delete MTRR/decrement usage count. - The register. If this is less than 0 then <> and <> must - be supplied. - The base address of the region. This is ignored if <> is >= 0. - The size of the region. This is ignored if <> is >= 0. - [RETURNS] The register on success, else a negative number indicating - the error code. -*/ +int mtrr_del (int reg, u64 base, u32 size) { if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { printk ("mtrr: size and base must be multiples of 4 kiB\n"); - printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base); + printk ("mtrr: size: 0x%x base: 0x%lx\n", size, base); return -EINVAL; } return mtrr_del_page (reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); @@ -889,8 +823,8 @@ #ifdef USERSPACE_INTERFACE -static int mtrr_file_add (unsigned long base, unsigned long size, - unsigned int type, char increment, struct file *file, int page) +static int mtrr_file_add (u64 base, u32 size, unsigned int type, + struct file *file, int page) { int reg, max; unsigned int *fcount = file->private_data; @@ -910,7 +844,7 @@ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { printk ("mtrr: size and base must be multiples of 4 kiB\n"); - printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base); + printk ("mtrr: size: 0x%x base: 0x%lx\n", size, base); return -EINVAL; } base >>= PAGE_SHIFT; @@ -925,7 +859,7 @@ } -static int mtrr_file_del (unsigned long base, unsigned long size, +static int mtrr_file_del (u64 base, u32 size, struct file *file, int page) { int reg; @@ -935,7 +869,7 @@ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { printk ("mtrr: size and base must be multiples of 4 kiB\n"); - printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base); + printk ("mtrr: size: 0x%x base: 0x%lx\n", size, base); return -EINVAL; } base >>= PAGE_SHIFT; @@ -977,9 +911,9 @@ "disable=%d" */ { - int i, err; - unsigned long reg; - unsigned long long base, size; + int i, err, reg; + u64 base; + u32 size; char *ptr; char line[LINE_SIZE]; @@ -1027,7 +961,7 @@ if ((base & 0xfff) || (size & 0xfff)) { printk ("mtrr: size and base must be multiples of 4 kiB\n"); - printk ("mtrr: size: 0x%Lx base: 0x%Lx\n", size, base); + printk ("mtrr: size: 0x%x base: 0x%lx\n", size, base); return -EINVAL; } @@ -1046,9 +980,7 @@ continue; base >>= PAGE_SHIFT; size >>= PAGE_SHIFT; - err = - mtrr_add_page ((unsigned long) base, (unsigned long) size, - i, 1); + err = mtrr_add_page ((u64) base, size, i, 1); if (err < 0) return err; return len; @@ -1076,7 +1008,7 @@ if (copy_from_user (&sentry, (void *) arg, sizeof sentry)) return -EFAULT; err = - mtrr_file_add (sentry.base, sentry.size, sentry.type, 1, + mtrr_file_add (sentry.base, sentry.size, sentry.type, file, 0); if (err < 0) return err; @@ -1117,7 +1049,7 @@ return -EFAULT; if (gentry.regnum >= get_num_var_ranges ()) return -EINVAL; - (*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type); + get_mtrr (gentry.regnum, &gentry.base, &gentry.size, &type); /* Hide entries that go above 4GB */ if (gentry.base + gentry.size > 0x100000 @@ -1139,7 +1071,7 @@ if (copy_from_user (&sentry, (void *) arg, sizeof sentry)) return -EFAULT; err = - mtrr_file_add (sentry.base, sentry.size, sentry.type, 1, + mtrr_file_add (sentry.base, sentry.size, sentry.type, file, 1); if (err < 0) return err; @@ -1180,7 +1112,7 @@ return -EFAULT; if (gentry.regnum >= get_num_var_ranges ()) return -EINVAL; - (*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type); + get_mtrr (gentry.regnum, &gentry.base, &gentry.size, &type); gentry.type = type; if (copy_to_user ((void *) arg, &gentry, sizeof gentry)) @@ -1199,7 +1131,6 @@ if (fcount == NULL) return 0; - lock_kernel (); max = get_num_var_ranges (); for (i = 0; i < max; ++i) { while (fcount[i] > 0) { @@ -1208,7 +1139,6 @@ --fcount[i]; } } - unlock_kernel (); kfree (fcount); file->private_data = NULL; return 0; @@ -1234,12 +1164,13 @@ char factor; int i, max; mtrr_type type; - unsigned long base, size; + u64 base; + u32 size; ascii_buf_bytes = 0; max = get_num_var_ranges (); for (i = 0; i < max; i++) { - (*get_mtrr) (i, &base, &size, &type); + get_mtrr (i, &base, &size, &type); if (size == 0) usage_table[i] = 0; else { @@ -1253,11 +1184,10 @@ } sprintf (ascii_buffer + ascii_buf_bytes, - "reg%02i: base=0x%05lx000 (%4liMB), size=%4li%cB: %s, count=%d\n", + "reg%02i: base=0x%05lx000 (%4liMB), size=%4i%cB: %s, count=%d\n", i, base, base >> (20 - PAGE_SHIFT), size, factor, attrib_to_str (type), usage_table[i]); - ascii_buf_bytes += - strlen (ascii_buffer + ascii_buf_bytes); + ascii_buf_bytes += strlen (ascii_buffer + ascii_buf_bytes); } } devfs_set_file_size (devfs_handle, ascii_buf_bytes); @@ -1283,22 +1213,16 @@ if ((cpuid_eax (0x80000000) >= 0x80000008)) { u32 phys_addr; phys_addr = cpuid_eax (0x80000008) & 0xff; - size_or_mask = - ~((1 << (phys_addr - PAGE_SHIFT)) - 1); - size_and_mask = ~size_or_mask & 0xfff00000; - } else { - /* FIXME: This is to make it work on Athlon during debugging. */ - size_or_mask = 0xff000000; /* 36 bits */ - size_and_mask = 0x00f00000; + size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1); + size_and_mask = ~size_or_mask & 0xfffffffffff00000; } - printk ("mtrr: detected mtrr type: x86-64\n"); } } #ifdef CONFIG_SMP -static volatile unsigned long smp_changes_mask __initdata = 0; +static volatile u32 smp_changes_mask __initdata = 0; static struct mtrr_state smp_mtrr_state __initdata = { 0, 0 }; void __init mtrr_init_boot_cpu (void) @@ -1310,7 +1234,8 @@ void __init mtrr_init_secondary_cpu (void) { - unsigned long mask, count; + u64 mask; + int count; struct set_mtrr_context ctxt; /* Note that this is not ideal, since the cache is only flushed/disabled @@ -1357,4 +1282,3 @@ init_table (); return 0; } - diff -Nru a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c --- a/arch/x86_64/kernel/process.c Tue Jun 18 19:12:01 2002 +++ b/arch/x86_64/kernel/process.c Tue Jun 18 19:12:01 2002 @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -320,9 +321,6 @@ printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); } -#define __STR(x) #x -#define __STR2(x) __STR(x) - extern void load_gs_index(unsigned); /* @@ -330,7 +328,13 @@ */ void exit_thread(void) { - /* nothing to do ... */ + struct task_struct *me = current; + if (me->thread.io_bitmap_ptr) { + kfree(me->thread.io_bitmap_ptr); + me->thread.io_bitmap_ptr = NULL; + (init_tss + smp_processor_id())->io_map_base = + INVALID_IO_BITMAP_OFFSET; + } } void flush_thread(void) @@ -392,6 +396,14 @@ unlazy_fpu(current); p->thread.i387 = current->thread.i387; + if (unlikely(me->thread.io_bitmap_ptr != NULL)) { + p->thread.io_bitmap_ptr = kmalloc((IO_BITMAP_SIZE+1)*4, GFP_KERNEL); + if (!p->thread.io_bitmap_ptr) + return -ENOMEM; + memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, + (IO_BITMAP_SIZE+1)*4); + } + return 0; } @@ -491,21 +503,14 @@ /* * Handle the IO bitmap */ - if (unlikely(prev->ioperm || next->ioperm)) { - if (next->ioperm) { + if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) { + if (next->io_bitmap_ptr) { /* * 4 cachelines copy ... not good, but not that * bad either. Anyone got something better? * This only affects processes which use ioperm(). - * [Putting the TSSs into 4k-tlb mapped regions - * and playing VM tricks to switch the IO bitmap - * is not really acceptable.] - * On x86-64 we could put multiple bitmaps into - * the GDT and just switch offsets - * This would require ugly special cases on overflow - * though -AK */ - memcpy(tss->io_bitmap, next->io_bitmap, + memcpy(tss->io_bitmap, next->io_bitmap_ptr, IO_BITMAP_SIZE*sizeof(u32)); tss->io_map_base = IO_BITMAP_OFFSET; } else { diff -Nru a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c --- a/arch/x86_64/kernel/setup64.c Tue Jun 18 19:12:02 2002 +++ b/arch/x86_64/kernel/setup64.c Tue Jun 18 19:12:02 2002 @@ -91,6 +91,9 @@ pda->me = pda; pda->cpudata_offset = 0; + pda->active_mm = &init_mm; + pda->mmu_state = 0; + asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); wrmsrl(MSR_GS_BASE, cpu_pda + cpu); } diff -Nru a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c --- a/arch/x86_64/kernel/signal.c Tue Jun 18 19:12:01 2002 +++ b/arch/x86_64/kernel/signal.c Tue Jun 18 19:12:01 2002 @@ -84,7 +84,6 @@ char *pretcode; struct ucontext uc; struct siginfo info; - struct _fpstate fpstate; }; static int @@ -186,8 +185,7 @@ */ static int -setup_sigcontext(struct sigcontext *sc, struct _fpstate *fpstate, - struct pt_regs *regs, unsigned long mask) +setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, unsigned long mask) { int tmp, err = 0; struct task_struct *me = current; @@ -221,20 +219,17 @@ err |= __put_user(mask, &sc->oldmask); err |= __put_user(me->thread.cr2, &sc->cr2); - tmp = save_i387(fpstate); - if (tmp < 0) - err = 1; - else - err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate); - return err; } /* * Determine which stack to use.. */ -static inline struct rt_sigframe * -get_sigframe(struct k_sigaction *ka, struct pt_regs * regs) + +#define round_down(p, r) ((void *) ((unsigned long)((p) - (r) + 1) & ~((r)-1))) + +static void * +get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size) { unsigned long rsp; @@ -247,22 +242,34 @@ rsp = current->sas_ss_sp + current->sas_ss_size; } - rsp = (rsp - sizeof(struct _fpstate)) & ~(15UL); - rsp -= offsetof(struct rt_sigframe, fpstate); - - return (struct rt_sigframe *) rsp; + return round_down(rsp - size, 16); } static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs * regs) { - struct rt_sigframe *frame; + struct rt_sigframe *frame = NULL; + struct _fpstate *fp = NULL; int err = 0; - frame = get_sigframe(ka, regs); + if (current->used_math) { + fp = get_stack(ka, regs, sizeof(struct _fpstate)); + frame = round_down((char *)fp - sizeof(struct rt_sigframe), 16) - 8; - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) { goto give_sigsegv; + } + + if (save_i387(fp) < 0) + err |= -1; + } + + if (!frame) + frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; + + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) { + goto give_sigsegv; + } if (ka->sa.sa_flags & SA_SIGINFO) { err |= copy_siginfo_to_user(&frame->info, info); @@ -278,14 +285,10 @@ err |= __put_user(sas_ss_flags(regs->rsp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); - err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, - regs, set->sig[0]); + err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); + err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); - if (err) { - goto give_sigsegv; - } - /* Set up to return from userspace. If provided, use a stub already in userspace. */ /* x86-64 should always use SA_RESTORER. */ @@ -297,7 +300,6 @@ } if (err) { - printk("fault 3\n"); goto give_sigsegv; } @@ -305,7 +307,6 @@ printk("%d old rip %lx old rsp %lx old rax %lx\n", current->pid,regs->rip,regs->rsp,regs->rax); #endif - /* Set up registers for signal handler */ { struct exec_domain *ed = current_thread_info()->exec_domain; @@ -320,8 +321,9 @@ next argument after the signal number on the stack. */ regs->rsi = (unsigned long)&frame->info; regs->rdx = (unsigned long)&frame->uc; - regs->rsp = (unsigned long) frame; regs->rip = (unsigned long) ka->sa.sa_handler; + + regs->rsp = (unsigned long)frame; set_fs(USER_DS); regs->eflags &= ~TF_MASK; diff -Nru a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c --- a/arch/x86_64/kernel/smp.c Tue Jun 18 19:12:02 2002 +++ b/arch/x86_64/kernel/smp.c Tue Jun 18 19:12:02 2002 @@ -25,8 +25,6 @@ /* The 'big kernel lock' */ spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; -struct tlb_state cpu_tlbstate[NR_CPUS] = {[0 ... NR_CPUS-1] = { &init_mm, 0 }}; - /* * the following functions deal with sending IPIs between CPUs. * @@ -147,9 +145,9 @@ */ static void inline leave_mm (unsigned long cpu) { - if (cpu_tlbstate[cpu].state == TLBSTATE_OK) + if (read_pda(mmu_state) == TLBSTATE_OK) BUG(); - clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask); + clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask); __flush_tlb(); } @@ -164,18 +162,18 @@ * the other cpus, but smp_invalidate_interrupt ignore flush ipis * for the wrong mm, and in the worst case we perform a superflous * tlb flush. - * 1a2) set cpu_tlbstate to TLBSTATE_OK + * 1a2) set cpu mmu_state to TLBSTATE_OK * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 * was in lazy tlb mode. - * 1a3) update cpu_tlbstate[].active_mm + * 1a3) update cpu active_mm * Now cpu0 accepts tlb flushes for the new mm. * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask); * Now the other cpus will send tlb flush ipis. * 1a4) change cr3. * 1b) thread switch without mm change - * cpu_tlbstate[].active_mm is correct, cpu0 already handles + * cpu active_mm is correct, cpu0 already handles * flush ipis. - * 1b1) set cpu_tlbstate to TLBSTATE_OK + * 1b1) set cpu mmu_state to TLBSTATE_OK * 1b2) test_and_set the cpu bit in cpu_vm_mask. * Atomically set the bit [other cpus will start sending flush ipis], * and test the bit. @@ -188,7 +186,7 @@ * runs in kernel space, the cpu could load tlb entries for user space * pages. * - * The good news is that cpu_tlbstate is local to each cpu, no + * The good news is that cpu mmu_state is local to each cpu, no * write/read ordering problems. */ @@ -216,8 +214,8 @@ * BUG(); */ - if (flush_mm == cpu_tlbstate[cpu].active_mm) { - if (cpu_tlbstate[cpu].state == TLBSTATE_OK) { + if (flush_mm == read_pda(active_mm)) { + if (read_pda(mmu_state) == TLBSTATE_OK) { if (flush_va == FLUSH_ALL) local_flush_tlb(); else @@ -335,7 +333,7 @@ unsigned long cpu = smp_processor_id(); __flush_tlb_all(); - if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY) + if (read_pda(mmu_state) == TLBSTATE_LAZY) leave_mm(cpu); } diff -Nru a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c --- a/arch/x86_64/kernel/vsyscall.c Tue Jun 18 19:12:03 2002 +++ b/arch/x86_64/kernel/vsyscall.c Tue Jun 18 19:12:03 2002 @@ -47,7 +47,7 @@ #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) -#define NO_VSYSCALL 1 +//#define NO_VSYSCALL 1 #ifdef NO_VSYSCALL #include diff -Nru a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c --- a/arch/x86_64/kernel/x8664_ksyms.c Tue Jun 18 19:12:01 2002 +++ b/arch/x86_64/kernel/x8664_ksyms.c Tue Jun 18 19:12:01 2002 @@ -189,3 +189,5 @@ void out_of_line_bug(void); EXPORT_SYMBOL(out_of_line_bug); + +EXPORT_SYMBOL(init_level4_pgt); diff -Nru a/arch/x86_64/lib/Makefile b/arch/x86_64/lib/Makefile --- a/arch/x86_64/lib/Makefile Tue Jun 18 19:12:01 2002 +++ b/arch/x86_64/lib/Makefile Tue Jun 18 19:12:01 2002 @@ -12,7 +12,7 @@ thunk.o io.o clear_page.o copy_page.o obj-y += memcpy.o obj-y += memmove.o -#obj-y += memset.o +obj-y += memset.o obj-y += copy_user.o export-objs := io.o csum-wrappers.o csum-partial.o diff -Nru a/arch/x86_64/lib/memset.S b/arch/x86_64/lib/memset.S --- a/arch/x86_64/lib/memset.S Tue Jun 18 19:12:02 2002 +++ b/arch/x86_64/lib/memset.S Tue Jun 18 19:12:02 2002 @@ -1,6 +1,4 @@ -/* Copyright 2002 Andi Kleen, SuSE Labs */ - - // #define FIX_ALIGNMENT 1 +/* Copyright 2002 Andi Kleen */ /* * ISO C memset - set a memory block to a byte value. @@ -11,51 +9,51 @@ * * rax original destination */ - .globl ____memset + .globl __memset + .globl memset .p2align -____memset: - movq %rdi,%r10 /* save destination for return address */ - movq %rdx,%r11 /* save count */ +memset: +__memset: + movq %rdi,%r10 + movq %rdx,%r11 /* expand byte value */ - movzbl %sil,%ecx /* zero extend char value */ - movabs $0x0101010101010101,%rax /* expansion pattern */ - mul %rcx /* expand with rax, clobbers rdx */ + movzbl %sil,%ecx + movabs $0x0101010101010101,%rax + mul %rcx /* with rax, clobbers rdx */ -#ifdef FIX_ALIGNMENT /* align dst */ movl %edi,%r9d - andl $7,%r9d /* test unaligned bits */ + andl $7,%r9d jnz bad_alignment after_bad_alignment: -#endif - movq %r11,%rcx /* restore count */ - shrq $6,%rcx /* divide by 64 */ - jz handle_tail /* block smaller than 64 bytes? */ - movl $64,%r8d /* CSE loop block size */ + movq %r11,%rcx + movl $64,%r8d + shrq $6,%rcx + jz handle_tail loop_64: - movnti %rax,0*8(%rdi) - movnti %rax,1*8(%rdi) - movnti %rax,2*8(%rdi) - movnti %rax,3*8(%rdi) - movnti %rax,4*8(%rdi) - movnti %rax,5*8(%rdi) - movnti %rax,6*8(%rdi) - movnti %rax,7*8(%rdi) /* clear 64 byte blocks */ - addq %r8,%rdi /* increase pointer by 64 bytes */ - loop loop_64 /* decrement rcx and if not zero loop */ + movnti %rax,(%rdi) + movnti %rax,8(%rdi) + movnti %rax,16(%rdi) + movnti %rax,24(%rdi) + movnti %rax,32(%rdi) + movnti %rax,40(%rdi) + movnti %rax,48(%rdi) + movnti %rax,56(%rdi) + addq %r8,%rdi + loop loop_64 /* Handle tail in loops. The loops should be faster than hard to predict jump tables. */ handle_tail: movl %r11d,%ecx - andl $63,%ecx - shrl $3,%ecx + andl $63&(~7),%ecx jz handle_7 + shrl $3,%ecx loop_8: - movnti %rax,(%rdi) /* long words */ + movnti %rax,(%rdi) addq $8,%rdi loop loop_8 @@ -64,22 +62,20 @@ andl $7,%ecx jz ende loop_1: - movb %al,(%rdi) /* bytes */ - incq %rdi + movb %al,(%rdi) + addq $1,%rdi loop loop_1 ende: movq %r10,%rax ret -#ifdef FIX_ALIGNMENT bad_alignment: - andq $-8,%r11 /* shorter than 8 bytes */ - jz handle_7 /* if yes handle it in the tail code */ - movnti %rax,(%rdi) /* unaligned store of 8 bytes */ + cmpq $7,%r11 + jbe handle_7 + movnti %rax,(%rdi) /* unaligned store */ movq $8,%r8 - subq %r9,%r8 /* compute alignment (8-misalignment) */ - addq %r8,%rdi /* fix destination */ - subq %r8,%r11 /* fix count */ + subq %r9,%r8 + addq %r8,%rdi + subq %r8,%r11 jmp after_bad_alignment -#endif diff -Nru a/arch/x86_64/tools/Makefile b/arch/x86_64/tools/Makefile --- a/arch/x86_64/tools/Makefile Tue Jun 18 19:12:01 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,29 +0,0 @@ - -TARGET = $(TOPDIR)/include/asm-x86_64/offset.h - -all: - -mrproper: - -fastdep: $(TARGET) - -.PHONY: all - -$(TARGET): offset.h - cmp -s $^ $@ || (cp $^ $(TARGET).new && mv $(TARGET).new $(TARGET)) - -.PHONY : offset.h all modules modules_install - -offset.h: offset.sed offset.c FORCE - $(CC) $(CFLAGS) -S -o offset.tmp offset.c - sed -n -f offset.sed < offset.tmp > offset.h - -clean: - rm -f offset.[hs] $(TARGET).new offset.tmp - -mrproper: - rm -f offset.[hs] $(TARGET) - rm -f $(TARGET) - -include $(TOPDIR)/Rules.make - diff -Nru a/arch/x86_64/tools/offset.c b/arch/x86_64/tools/offset.c --- a/arch/x86_64/tools/offset.c Tue Jun 18 19:12:02 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,49 +0,0 @@ -/* Written 2000 by Andi Kleen */ -/* This program is never executed, just its assembly is examined for offsets - (this trick is needed to get cross compiling right) */ -/* $Id: offset.c,v 1.13 2002/01/08 15:19:57 ak Exp $ */ -#define ASM_OFFSET_H 1 -#ifndef __KERNEL__ -#define __KERNEL__ -#endif -#include -#include -#include -#include -#include -#include -#include -#include - -#define output(x) asm volatile ("--- " x) -#define outconst(x,y) asm volatile ("--- " x : : "i" (y)) - -int main(void) -{ - output("/* Auto generated by arch/../tools/offset.c at " __DATE__ ". Do not edit. */\n"); - output("#ifndef ASM_OFFSET_H\n"); - output("#define ASM_OFFSET_H 1\n"); - -#define ENTRY(entry) outconst("#define tsk_" #entry " %0", offsetof(struct task_struct, entry)) - ENTRY(state); - ENTRY(flags); - ENTRY(thread); -#undef ENTRY -#define ENTRY(entry) outconst("#define threadinfo_" #entry " %0", offsetof(struct thread_info, entry)) - ENTRY(flags); - ENTRY(addr_limit); - ENTRY(preempt_count); -#undef ENTRY -#define ENTRY(entry) outconst("#define pda_" #entry " %0", offsetof(struct x8664_pda, entry)) - ENTRY(kernelstack); - ENTRY(oldrsp); - ENTRY(pcurrent); - ENTRY(irqrsp); - ENTRY(irqcount); - ENTRY(cpunumber); - ENTRY(irqstackptr); -#undef ENTRY - output("#endif\n"); - - return(0); -} diff -Nru a/arch/x86_64/tools/offset.sed b/arch/x86_64/tools/offset.sed --- a/arch/x86_64/tools/offset.sed Tue Jun 18 19:12:01 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,7 +0,0 @@ -/---/ { - s/---// - s/\$// - s/^ // - s/^ // - p -} diff -Nru a/drivers/acpi/osl.c b/drivers/acpi/osl.c --- a/drivers/acpi/osl.c Tue Jun 18 19:12:03 2002 +++ b/drivers/acpi/osl.c Tue Jun 18 19:12:03 2002 @@ -33,6 +33,7 @@ #include #include #include +#include #include #include "acpi.h" diff -Nru a/drivers/acpi/processor.c b/drivers/acpi/processor.c --- a/drivers/acpi/processor.c Tue Jun 18 19:12:02 2002 +++ b/drivers/acpi/processor.c Tue Jun 18 19:12:02 2002 @@ -2060,8 +2060,9 @@ return_VALUE(-EINVAL); #ifdef CONFIG_SMP - if (smp_num_cpus > 1) - errata.smp = smp_num_cpus; + /* FIXME: What should this be? -- RR */ + if (num_online_cpus() > 1) + errata.smp = num_online_cpus(); #endif acpi_processor_errata(pr); diff -Nru a/drivers/block/DAC960.c b/drivers/block/DAC960.c --- a/drivers/block/DAC960.c Tue Jun 18 19:12:03 2002 +++ b/drivers/block/DAC960.c Tue Jun 18 19:12:03 2002 @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include diff -Nru a/drivers/block/cciss.c b/drivers/block/cciss.c --- a/drivers/block/cciss.c Tue Jun 18 19:12:03 2002 +++ b/drivers/block/cciss.c Tue Jun 18 19:12:03 2002 @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include diff -Nru a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c --- a/drivers/block/cpqarray.c Tue Jun 18 19:12:01 2002 +++ b/drivers/block/cpqarray.c Tue Jun 18 19:12:01 2002 @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff -Nru a/drivers/block/elevator.c b/drivers/block/elevator.c --- a/drivers/block/elevator.c Tue Jun 18 19:12:03 2002 +++ b/drivers/block/elevator.c Tue Jun 18 19:12:03 2002 @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -424,3 +425,5 @@ EXPORT_SYMBOL(__elv_add_request); EXPORT_SYMBOL(__elv_next_request); EXPORT_SYMBOL(elv_remove_request); +EXPORT_SYMBOL(elevator_exit); +EXPORT_SYMBOL(elevator_init); diff -Nru a/drivers/block/floppy.c b/drivers/block/floppy.c --- a/drivers/block/floppy.c Tue Jun 18 19:12:01 2002 +++ b/drivers/block/floppy.c Tue Jun 18 19:12:01 2002 @@ -165,6 +165,7 @@ #include #include #include +#include #include #include #include diff -Nru a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c --- a/drivers/block/ll_rw_blk.c Tue Jun 18 19:12:01 2002 +++ b/drivers/block/ll_rw_blk.c Tue Jun 18 19:12:01 2002 @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -160,6 +161,7 @@ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); init_waitqueue_head(&q->queue_wait); + INIT_LIST_HEAD(&q->plug_list); } /** @@ -2002,8 +2004,8 @@ queue_nr_requests = (total_ram >> 8) & ~15; /* One per quarter-megabyte */ if (queue_nr_requests < 32) queue_nr_requests = 32; - if (queue_nr_requests > 512) - queue_nr_requests = 512; + if (queue_nr_requests > 256) + queue_nr_requests = 256; /* * Batch frees according to queue length diff -Nru a/drivers/block/loop.c b/drivers/block/loop.c --- a/drivers/block/loop.c Tue Jun 18 19:12:01 2002 +++ b/drivers/block/loop.c Tue Jun 18 19:12:01 2002 @@ -60,6 +60,7 @@ #include #include #include +#include #include #include #include @@ -168,6 +169,15 @@ } +static inline int lo_do_transfer(struct loop_device *lo, int cmd, char *rbuf, + char *lbuf, int size, int rblock) +{ + if (!lo->transfer) + return 0; + + return lo->transfer(lo, cmd, rbuf, lbuf, size, rblock); +} + static int do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos) { @@ -454,20 +464,43 @@ out_bh: bio->bi_sector = rbh->bi_sector + (lo->lo_offset >> 9); bio->bi_rw = rbh->bi_rw; - spin_lock_irq(&lo->lo_lock); bio->bi_bdev = lo->lo_device; - spin_unlock_irq(&lo->lo_lock); return bio; } -static int loop_make_request(request_queue_t *q, struct bio *rbh) +static int +bio_transfer(struct loop_device *lo, struct bio *to_bio, + struct bio *from_bio) +{ + unsigned long IV = loop_get_iv(lo, from_bio->bi_sector); + struct bio_vec *from_bvec, *to_bvec; + char *vto, *vfrom; + int ret = 0, i; + + __bio_for_each_segment(from_bvec, from_bio, i, 0) { + to_bvec = &to_bio->bi_io_vec[i]; + + kmap(from_bvec->bv_page); + kmap(to_bvec->bv_page); + vfrom = page_address(from_bvec->bv_page) + from_bvec->bv_offset; + vto = page_address(to_bvec->bv_page) + to_bvec->bv_offset; + ret |= lo_do_transfer(lo, bio_data_dir(to_bio), vto, vfrom, + from_bvec->bv_len, IV); + kunmap(from_bvec->bv_page); + kunmap(to_bvec->bv_page); + } + + return ret; +} + +static int loop_make_request(request_queue_t *q, struct bio *old_bio) { - struct bio *bh = NULL; + struct bio *new_bio = NULL; struct loop_device *lo; unsigned long IV; - int rw = bio_rw(rbh); - int unit = minor(to_kdev_t(rbh->bi_bdev->bd_dev)); + int rw = bio_rw(old_bio); + int unit = minor(to_kdev_t(old_bio->bi_bdev->bd_dev)); if (unit >= max_loop) goto out; @@ -489,60 +522,41 @@ goto err; } - blk_queue_bounce(q, &rbh); + blk_queue_bounce(q, &old_bio); /* * file backed, queue for loop_thread to handle */ if (lo->lo_flags & LO_FLAGS_DO_BMAP) { - loop_add_bio(lo, rbh); + loop_add_bio(lo, old_bio); return 0; } /* * piggy old buffer on original, and submit for I/O */ - bh = loop_get_buffer(lo, rbh); - IV = loop_get_iv(lo, rbh->bi_sector); + new_bio = loop_get_buffer(lo, old_bio); + IV = loop_get_iv(lo, old_bio->bi_sector); if (rw == WRITE) { - if (lo_do_transfer(lo, WRITE, bio_data(bh), bio_data(rbh), - bh->bi_size, IV)) + if (bio_transfer(lo, new_bio, old_bio)) goto err; } - generic_make_request(bh); + generic_make_request(new_bio); return 0; err: if (atomic_dec_and_test(&lo->lo_pending)) up(&lo->lo_bh_mutex); - loop_put_buffer(bh); + loop_put_buffer(new_bio); out: - bio_io_error(rbh); + bio_io_error(old_bio); return 0; inactive: spin_unlock_irq(&lo->lo_lock); goto out; } -static int do_bio_blockbacked(struct loop_device *lo, struct bio *bio, - struct bio *rbh) -{ - unsigned long IV = loop_get_iv(lo, rbh->bi_sector); - struct bio_vec *from; - char *vto, *vfrom; - int ret = 0, i; - - bio_for_each_segment(from, rbh, i) { - vfrom = page_address(from->bv_page) + from->bv_offset; - vto = page_address(bio->bi_io_vec[i].bv_page) + bio->bi_io_vec[i].bv_offset; - ret |= lo_do_transfer(lo, bio_data_dir(bio), vto, vfrom, - from->bv_len, IV); - } - - return ret; -} - static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio) { int ret; @@ -556,7 +570,7 @@ } else { struct bio *rbh = bio->bi_private; - ret = do_bio_blockbacked(lo, bio, rbh); + ret = bio_transfer(lo, bio, rbh); bio_endio(rbh, !ret); loop_put_buffer(bio); @@ -588,10 +602,8 @@ set_user_nice(current, -20); - spin_lock_irq(&lo->lo_lock); lo->lo_state = Lo_bound; atomic_inc(&lo->lo_pending); - spin_unlock_irq(&lo->lo_lock); /* * up sem, we are running diff -Nru a/drivers/block/nbd.c b/drivers/block/nbd.c --- a/drivers/block/nbd.c Tue Jun 18 19:12:02 2002 +++ b/drivers/block/nbd.c Tue Jun 18 19:12:02 2002 @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include diff -Nru a/drivers/block/rd.c b/drivers/block/rd.c --- a/drivers/block/rd.c Tue Jun 18 19:12:02 2002 +++ b/drivers/block/rd.c Tue Jun 18 19:12:02 2002 @@ -45,6 +45,8 @@ #include #include #include +#include +#include #include #include #include diff -Nru a/drivers/block/umem.c b/drivers/block/umem.c --- a/drivers/block/umem.c Tue Jun 18 19:12:02 2002 +++ b/drivers/block/umem.c Tue Jun 18 19:12:02 2002 @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -128,6 +129,8 @@ */ struct bio *bio, *currentbio, **biotail; + request_queue_t queue; + struct mm_page { dma_addr_t page_dma; struct mm_dma_desc *desc; @@ -141,8 +144,6 @@ struct tasklet_struct tasklet; unsigned int dma_status; - struct tq_struct plug_tq; - struct { int good; int warned; @@ -292,7 +293,7 @@ * Whenever IO on the active page completes, the Ready page is activated * and the ex-Active page is clean out and made Ready. * Otherwise the Ready page is only activated when it becomes full, or - * when mm_unplug_device is called via run_task_queue(&tq_disk). + * when mm_unplug_device is called via blk_run_queues(). * * If a request arrives while both pages a full, it is queued, and b_rdev is * overloaded to record whether it was a read or a write. @@ -340,8 +341,9 @@ offset = ((char*)desc) - ((char*)page->desc); writel(cpu_to_le32((page->page_dma+offset)&0xffffffff), card->csr_remap + DMA_DESCRIPTOR_ADDR); - /* if sizeof(dma_addr_t) == 32, this will generate a warning, sorry */ - writel(cpu_to_le32((page->page_dma)>>32), + /* Force the value to u64 before shifting otherwise >> 32 is undefined C + * and on some ports will do nothing ! */ + writel(cpu_to_le32(((u64)page->page_dma)>>32), card->csr_remap + DMA_DESCRIPTOR_ADDR + 4); /* Go, go, go */ @@ -383,10 +385,12 @@ static void mm_unplug_device(void *data) { - struct cardinfo *card = data; + request_queue_t *q = data; + struct cardinfo *card = q->queuedata; spin_lock_bh(&card->lock); - activate(card); + if (blk_remove_plug(q)) + activate(card); spin_unlock_bh(&card->lock); } @@ -564,8 +568,7 @@ */ static int mm_make_request(request_queue_t *q, struct bio *bio) { - struct cardinfo *card = &cards[DEVICE_NR( - bio->bi_bdev->bd_dev)]; + struct cardinfo *card = q->queuedata; PRINTK("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size); /* set uptodate now, and clear it if there are any errors */ @@ -575,9 +578,9 @@ *card->biotail = bio; bio->bi_next = NULL; card->biotail = &bio->bi_next; + blk_plug_device(q); spin_unlock_bh(&card->lock); - queue_task(&card->plug_tq, &tq_disk); return 0; } @@ -1064,11 +1067,12 @@ card->bio = NULL; card->biotail = &card->bio; + blk_queue_make_request(&card->queue, mm_make_request); + card->queue.queuedata = card; + card->queue.unplug_fn = mm_unplug_device; + tasklet_init(&card->tasklet, process_page, (unsigned long)card); - card->plug_tq.sync = 0; - card->plug_tq.routine = &mm_unplug_device; - card->plug_tq.data = card; card->check_batteries = 0; mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY); @@ -1236,6 +1240,17 @@ -- mm_init ----------------------------------------------------------------------------------- */ + +static request_queue_t * mm_queue_proc(kdev_t dev) +{ + int c = DEVICE_NR(kdev_val(dev)); + + if (c < MM_MAXCARDS) + return &cards[c].queue; + else + return BLK_DEFAULT_QUEUE(MAJOR_NR); +} + int __init mm_init(void) { int retval, i; @@ -1275,10 +1290,8 @@ mm_gendisk.part = mm_partitions; mm_gendisk.nr_real = num_cards; + blk_dev[MAJOR_NR].queue = mm_queue_proc; add_gendisk(&mm_gendisk); - - blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), - mm_make_request); blk_size[MAJOR_NR] = mm_gendisk.sizes; for (i = 0; i < num_cards; i++) { diff -Nru a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h --- a/drivers/char/agp/agp.h Tue Jun 18 19:12:02 2002 +++ b/drivers/char/agp/agp.h Tue Jun 18 19:12:02 2002 @@ -118,8 +118,8 @@ int (*remove_memory) (agp_memory *, off_t, int); agp_memory *(*alloc_by_type) (size_t, int); void (*free_by_type) (agp_memory *); - unsigned long (*agp_alloc_page) (void); - void (*agp_destroy_page) (unsigned long); + void *(*agp_alloc_page) (void); + void (*agp_destroy_page) (void *); int (*suspend)(void); void (*resume)(void); diff -Nru a/drivers/char/agp/agpgart_be.c b/drivers/char/agp/agpgart_be.c --- a/drivers/char/agp/agpgart_be.c Tue Jun 18 19:12:01 2002 +++ b/drivers/char/agp/agpgart_be.c Tue Jun 18 19:12:01 2002 @@ -22,6 +22,8 @@ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * + * TODO: + * - Allocate more than order 0 pages to avoid too much linear map splitting. */ #include #include @@ -43,6 +45,7 @@ #include #include #include +#include #include #include "agp.h" @@ -59,56 +62,28 @@ EXPORT_SYMBOL(agp_backend_acquire); EXPORT_SYMBOL(agp_backend_release); -static void flush_cache(void); - static struct agp_bridge_data agp_bridge; static int agp_try_unsupported __initdata = 0; - -static inline void flush_cache(void) -{ -#if defined(__i386__) || defined(__x86_64__) - asm volatile ("wbinvd":::"memory"); -#elif defined(__alpha__) || defined(__ia64__) || defined(__sparc__) - /* ??? I wonder if we'll really need to flush caches, or if the - core logic can manage to keep the system coherent. The ARM - speaks only of using `cflush' to get things in memory in - preparation for power failure. - - If we do need to call `cflush', we'll need a target page, - as we can only flush one page at a time. - - Ditto for IA-64. --davidm 00/08/07 */ - mb(); -#else -#error "Please define flush_cache." -#endif -} - #ifdef CONFIG_SMP -static atomic_t cpus_waiting; - static void ipi_handler(void *null) { - flush_cache(); - atomic_dec(&cpus_waiting); - while (atomic_read(&cpus_waiting) > 0) - barrier(); + flush_agp_cache(); } static void smp_flush_cache(void) { - atomic_set(&cpus_waiting, smp_num_cpus - 1); - if (smp_call_function(ipi_handler, NULL, 1, 0) != 0) + if (smp_call_function(ipi_handler, NULL, 1, 1) != 0) panic(PFX "timed out waiting for the other CPUs!\n"); - flush_cache(); - while (atomic_read(&cpus_waiting) > 0) - barrier(); + flush_agp_cache(); } #define global_cache_flush smp_flush_cache #else /* CONFIG_SMP */ -#define global_cache_flush flush_cache -#endif /* CONFIG_SMP */ +static void global_cache_flush(void) +{ + flush_agp_cache(); +} +#endif /* !CONFIG_SMP */ int agp_backend_acquire(void) { @@ -208,8 +183,7 @@ if (curr->page_count != 0) { for (i = 0; i < curr->page_count; i++) { curr->memory[i] &= ~(0x00000fff); - agp_bridge.agp_destroy_page((unsigned long) - phys_to_virt(curr->memory[i])); + agp_bridge.agp_destroy_page(phys_to_virt(curr->memory[i])); } } agp_free_key(curr->key); @@ -252,21 +226,22 @@ MOD_DEC_USE_COUNT; return NULL; } + for (i = 0; i < page_count; i++) { - new->memory[i] = agp_bridge.agp_alloc_page(); + void *addr = agp_bridge.agp_alloc_page(); - if (new->memory[i] == 0) { + if (addr == NULL) { /* Free this structure */ agp_free_memory(new); return NULL; } new->memory[i] = - agp_bridge.mask_memory( - virt_to_phys((void *) new->memory[i]), - type); + agp_bridge.mask_memory(virt_to_phys(addr), type); new->page_count++; } + flush_agp_mappings(); + return new; } @@ -561,6 +536,7 @@ agp_bridge.current_size; break; } + temp = agp_bridge.current_size; } else { agp_bridge.aperture_size_idx = i; } @@ -761,7 +737,7 @@ * against a maximum value. */ -static unsigned long agp_generic_alloc_page(void) +static void *agp_generic_alloc_page(void) { struct page * page; @@ -769,24 +745,26 @@ if (page == NULL) return 0; + map_page_into_agp(page); + get_page(page); SetPageLocked(page); atomic_inc(&agp_bridge.current_memory_agp); - return (unsigned long)page_address(page); + return page_address(page); } -static void agp_generic_destroy_page(unsigned long addr) +static void agp_generic_destroy_page(void *addr) { - void *pt = (void *) addr; struct page *page; - if (pt == NULL) + if (addr == NULL) return; - page = virt_to_page(pt); + page = virt_to_page(addr); + unmap_page_from_agp(page); put_page(page); unlock_page(page); - free_page((unsigned long) pt); + free_page((unsigned long)addr); atomic_dec(&agp_bridge.current_memory_agp); } @@ -993,6 +971,7 @@ return new; } if(type == AGP_PHYS_MEMORY) { + void *addr; /* The I810 requires a physical address to program * it's mouse pointer into hardware. However the * Xserver still writes to it through the agp @@ -1007,17 +986,14 @@ return NULL; } MOD_INC_USE_COUNT; - new->memory[0] = agp_bridge.agp_alloc_page(); + addr = agp_bridge.agp_alloc_page(); - if (new->memory[0] == 0) { + if (addr == NULL) { /* Free this structure */ agp_free_memory(new); return NULL; } - new->memory[0] = - agp_bridge.mask_memory( - virt_to_phys((void *) new->memory[0]), - type); + new->memory[0] = agp_bridge.mask_memory(virt_to_phys(addr), type); new->page_count = 1; new->num_scratch_pages = 1; new->type = AGP_PHYS_MEMORY; @@ -1032,7 +1008,7 @@ { agp_free_key(curr->key); if(curr->type == AGP_PHYS_MEMORY) { - agp_bridge.agp_destroy_page((unsigned long) + agp_bridge.agp_destroy_page( phys_to_virt(curr->memory[0])); vfree(curr->memory); } @@ -1291,7 +1267,7 @@ if (type == AGP_DCACHE_MEMORY) return(NULL); if (type == AGP_PHYS_MEMORY) { - unsigned long physical; + void *addr; /* The i830 requires a physical address to program * it's mouse pointer into hardware. However the @@ -1306,19 +1282,18 @@ if (nw == NULL) return(NULL); MOD_INC_USE_COUNT; - nw->memory[0] = agp_bridge.agp_alloc_page(); - physical = nw->memory[0]; - if (nw->memory[0] == 0) { + addr = agp_bridge.agp_alloc_page(); + if (addr == NULL) { /* free this structure */ agp_free_memory(nw); return(NULL); } - nw->memory[0] = agp_bridge.mask_memory(virt_to_phys((void *) nw->memory[0]),type); + nw->memory[0] = agp_bridge.mask_memory(virt_to_phys(addr),type); nw->page_count = 1; nw->num_scratch_pages = 1; nw->type = AGP_PHYS_MEMORY; - nw->physical = virt_to_phys((void *) physical); + nw->physical = virt_to_phys(addr); return(nw); } @@ -1849,16 +1824,17 @@ * Let's just hope nobody counts on the allocated AGP memory being there * before bind time (I don't think current drivers do)... */ -static unsigned long intel_i460_alloc_page(void) +static void * intel_i460_alloc_page(void) { if (intel_i460_cpk) return agp_generic_alloc_page(); /* Returning NULL would cause problems */ - return ~0UL; + /* AK: really dubious code. */ + return (void *)~0UL; } -static void intel_i460_destroy_page(unsigned long page) +static void intel_i460_destroy_page(void *page) { if (intel_i460_cpk) agp_generic_destroy_page(page); @@ -3298,38 +3274,29 @@ } } -static unsigned long ali_alloc_page(void) +static void *ali_alloc_page(void) { - struct page *page; - u32 temp; + void *adr = agp_generic_alloc_page(); + unsigned temp; - page = alloc_page(GFP_KERNEL); - if (page == NULL) + if (adr == 0) return 0; - get_page(page); - SetPageLocked(page); - atomic_inc(&agp_bridge.current_memory_agp); - - global_cache_flush(); - if (agp_bridge.type == ALI_M1541) { pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp); pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | - virt_to_phys(page_address(page))) | + virt_to_phys(adr)) | ALI_CACHE_FLUSH_EN )); } - return (unsigned long)page_address(page); + return adr; } -static void ali_destroy_page(unsigned long addr) +static void ali_destroy_page(void * addr) { u32 temp; - void *pt = (void *) addr; - struct page *page; - if (pt == NULL) + if (addr == NULL) return; global_cache_flush(); @@ -3338,15 +3305,11 @@ pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp); pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | - virt_to_phys((void *)pt)) | + virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN)); } - page = virt_to_page(pt); - put_page(page); - unlock_page(page); - free_page((unsigned long) pt); - atomic_dec(&agp_bridge.current_memory_agp); + agp_generic_destroy_page(addr); } /* Setup function */ @@ -5011,15 +4974,15 @@ } if (agp_bridge.needs_scratch_page == TRUE) { - agp_bridge.scratch_page = agp_bridge.agp_alloc_page(); + void *addr; + addr = agp_bridge.agp_alloc_page(); - if (agp_bridge.scratch_page == 0) { + if (addr == NULL) { printk(KERN_ERR PFX "unable to get memory for " "scratch page.\n"); return -ENOMEM; } - agp_bridge.scratch_page = - virt_to_phys((void *) agp_bridge.scratch_page); + agp_bridge.scratch_page = virt_to_phys(addr); agp_bridge.scratch_page = agp_bridge.mask_memory(agp_bridge.scratch_page, 0); } @@ -5064,8 +5027,7 @@ err_out: if (agp_bridge.needs_scratch_page == TRUE) { agp_bridge.scratch_page &= ~(0x00000fff); - agp_bridge.agp_destroy_page((unsigned long) - phys_to_virt(agp_bridge.scratch_page)); + agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page)); } if (got_gatt) agp_bridge.free_gatt_table(); @@ -5084,8 +5046,7 @@ if (agp_bridge.needs_scratch_page == TRUE) { agp_bridge.scratch_page &= ~(0x00000fff); - agp_bridge.agp_destroy_page((unsigned long) - phys_to_virt(agp_bridge.scratch_page)); + agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page)); } } diff -Nru a/drivers/char/random.c b/drivers/char/random.c --- a/drivers/char/random.c Tue Jun 18 19:12:01 2002 +++ b/drivers/char/random.c Tue Jun 18 19:12:01 2002 @@ -252,6 +252,7 @@ #include #include #include +#include #include #include diff -Nru a/drivers/char/rio/func.h b/drivers/char/rio/func.h --- a/drivers/char/rio/func.h Tue Jun 18 19:12:02 2002 +++ b/drivers/char/rio/func.h Tue Jun 18 19:12:02 2002 @@ -33,6 +33,8 @@ #ifndef __func_h_def #define __func_h_def +#include + #ifdef SCCS_LABELS #ifndef lint static char *_func_h_sccs_ = "@(#)func.h 1.3"; diff -Nru a/drivers/ide/ioctl.c b/drivers/ide/ioctl.c --- a/drivers/ide/ioctl.c Tue Jun 18 19:12:02 2002 +++ b/drivers/ide/ioctl.c Tue Jun 18 19:12:02 2002 @@ -345,8 +345,9 @@ if (!arg) { if (ide_spin_wait_hwgroup(drive)) return -EBUSY; - else - return 0; + /* Do nothing, just unlock */ + spin_unlock_irq(drive->channel->lock); + return 0; } return do_cmd_ioctl(drive, arg); diff -Nru a/drivers/ide/tcq.c b/drivers/ide/tcq.c --- a/drivers/ide/tcq.c Tue Jun 18 19:12:03 2002 +++ b/drivers/ide/tcq.c Tue Jun 18 19:12:03 2002 @@ -175,13 +175,8 @@ tcq_invalidate_queue(drive); } -static void set_irq(struct ata_device *drive, ata_handler_t *handler) +static void __set_irq(struct ata_channel *ch, ata_handler_t *handler) { - struct ata_channel *ch = drive->channel; - unsigned long flags; - - spin_lock_irqsave(ch->lock, flags); - /* * always just bump the timer for now, the timeout handling will * have to be changed to be per-command @@ -194,7 +189,15 @@ ch->timer.data = (unsigned long) ch->drive; mod_timer(&ch->timer, jiffies + 5 * HZ); ch->handler = handler; +} + +static void set_irq(struct ata_device *drive, ata_handler_t *handler) +{ + struct ata_channel *ch = drive->channel; + unsigned long flags; + spin_lock_irqsave(ch->lock, flags); + __set_irq(ch, handler); spin_unlock_irqrestore(ch->lock, flags); } @@ -230,8 +233,10 @@ */ static ide_startstop_t service(struct ata_device *drive, struct request *rq) { - u8 feat; - u8 stat; + struct ata_channel *ch = drive->channel; + ide_startstop_t ret; + unsigned long flags; + u8 feat, stat; int tag; TCQ_PRINTK("%s: started service\n", drive->name); @@ -291,9 +296,12 @@ TCQ_PRINTK("%s: stat %x, feat %x\n", __FUNCTION__, stat, feat); + spin_lock_irqsave(ch->lock, flags); + rq = blk_queue_find_tag(&drive->queue, tag); if (!rq) { printk(KERN_ERR"%s: missing request for tag %d\n", __FUNCTION__, tag); + spin_unlock_irqrestore(ch->lock, flags); return ide_stopped; } @@ -304,7 +312,10 @@ * interrupt to indicate end of transfer, release is not allowed */ TCQ_PRINTK("%s: starting command %x\n", __FUNCTION__, stat); - return udma_tcq_start(drive, rq); + + ret = udma_tcq_start(drive, rq); + spin_unlock_irqrestore(ch->lock, flags); + return ret; } static ide_startstop_t check_service(struct ata_device *drive, struct request *rq) @@ -538,7 +549,7 @@ if (ata_start_dma(drive, rq)) return ide_stopped; - set_irq(drive, ide_dmaq_intr); + __set_irq(ch, ide_dmaq_intr); udma_start(drive, rq); return ide_started; @@ -590,7 +601,7 @@ if ((feat = GET_FEAT()) & NSEC_REL) { drive->immed_rel++; drive->rq = NULL; - set_irq(drive, ide_dmaq_intr); + __set_irq(drive->channel, ide_dmaq_intr); TCQ_PRINTK("REL in queued_start\n"); diff -Nru a/drivers/md/linear.c b/drivers/md/linear.c --- a/drivers/md/linear.c Tue Jun 18 19:12:02 2002 +++ b/drivers/md/linear.c Tue Jun 18 19:12:02 2002 @@ -1,6 +1,6 @@ /* linear.c : Multiple Devices driver for Linux - Copyright (C) 1994-96 Marc ZYNGIER + Copyright (C) 1994-96 Marc ZYNGIER or @@ -20,7 +20,7 @@ #include #include - +#include #include #define MAJOR_NR MD_MAJOR @@ -33,39 +33,45 @@ linear_conf_t *conf; struct linear_hash *table; mdk_rdev_t *rdev; - int size, i, j, nb_zone; + int size, i, nb_zone, cnt; unsigned int curr_offset; + struct list_head *tmp; MOD_INC_USE_COUNT; conf = kmalloc (sizeof (*conf), GFP_KERNEL); if (!conf) goto out; + memset(conf, 0, sizeof(*conf)); mddev->private = conf; - if (md_check_ordering(mddev)) { - printk("linear: disks are not ordered, aborting!\n"); - goto out; - } /* * Find the smallest device. */ conf->smallest = NULL; - curr_offset = 0; - ITERATE_RDEV_ORDERED(mddev,rdev,j) { + cnt = 0; + ITERATE_RDEV(mddev,rdev,tmp) { + int j = rdev->sb->this_disk.raid_disk; dev_info_t *disk = conf->disks + j; + if (j < 0 || j > mddev->sb->raid_disks || disk->bdev) { + printk("linear: disk numbering problem. Aborting!\n"); + goto out; + } + disk->dev = rdev->dev; disk->bdev = rdev->bdev; atomic_inc(&rdev->bdev->bd_count); disk->size = rdev->size; - disk->offset = curr_offset; - - curr_offset += disk->size; if (!conf->smallest || (disk->size < conf->smallest->size)) conf->smallest = disk; + cnt++; + } + if (cnt != mddev->sb->raid_disks) { + printk("linear: not enough drives present. Aborting!\n"); + goto out; } nb_zone = conf->nr_zones = @@ -81,10 +87,13 @@ * Here we generate the linear hash table */ table = conf->hash_table; - i = 0; size = 0; - for (j = 0; j < mddev->nb_dev; j++) { - dev_info_t *disk = conf->disks + j; + curr_offset = 0; + for (i = 0; i < cnt; i++) { + dev_info_t *disk = conf->disks + i; + + disk->offset = curr_offset; + curr_offset += disk->size; if (size < 0) { table[-1].dev1 = disk; @@ -130,12 +139,13 @@ return 0; } -static int linear_make_request (mddev_t *mddev, int rw, struct bio *bio) +static int linear_make_request (request_queue_t *q, struct bio *bio) { - linear_conf_t *conf = mddev_to_conf(mddev); - struct linear_hash *hash; - dev_info_t *tmp_dev; - long block; + mddev_t *mddev = q->queuedata; + linear_conf_t *conf = mddev_to_conf(mddev); + struct linear_hash *hash; + dev_info_t *tmp_dev; + long block; block = bio->bi_sector >> 1; hash = conf->hash_table + (block / conf->smallest->size); @@ -186,7 +196,7 @@ } sz += sprintf(page+sz, "\n"); #endif - sz += sprintf(page+sz, " %dk rounding", mddev->param.chunk_size/1024); + sz += sprintf(page+sz, " %dk rounding", mddev->sb->chunk_size/1024); return sz; } diff -Nru a/drivers/md/lvm-snap.c b/drivers/md/lvm-snap.c --- a/drivers/md/lvm-snap.c Tue Jun 18 19:12:03 2002 +++ b/drivers/md/lvm-snap.c Tue Jun 18 19:12:03 2002 @@ -224,7 +224,7 @@ for (i = 0; i < nr; i++) { - bh = get_hash_table(dev, start++, blksize); + bh = find_get_block(dev, start++, blksize); if (bh) bforget(bh); } diff -Nru a/drivers/md/lvm.c b/drivers/md/lvm.c --- a/drivers/md/lvm.c Tue Jun 18 19:12:02 2002 +++ b/drivers/md/lvm.c Tue Jun 18 19:12:02 2002 @@ -209,6 +209,7 @@ #include #include #include +#include #include #include #include diff -Nru a/drivers/md/md.c b/drivers/md/md.c --- a/drivers/md/md.c Tue Jun 18 19:12:02 2002 +++ b/drivers/md/md.c Tue Jun 18 19:12:02 2002 @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -106,7 +107,7 @@ * subsystems want to have a pre-defined structure */ struct hd_struct md_hd_struct[MAX_MD_DEVS]; -static int md_maxreadahead[MAX_MD_DEVS]; +static void md_recover_arrays(void); static mdk_thread_t *md_recovery_thread; int md_size[MAX_MD_DEVS]; @@ -128,93 +129,111 @@ /* * Enables to iterate over all existing md arrays + * all_mddevs_lock protects this list as well as mddev_map. */ static LIST_HEAD(all_mddevs); +static spinlock_t all_mddevs_lock = SPIN_LOCK_UNLOCKED; + /* - * The mapping between kdev and mddev is not necessary a simple - * one! Eg. HSM uses several sub-devices to implement Logical - * Volumes. All these sub-devices map to the same mddev. + * iterates through all used mddevs in the system. + * We take care to grab the all_mddevs_lock whenever navigating + * the list, and to always hold a refcount when unlocked. + * Any code which breaks out of this loop while own + * a reference to the current mddev and must mddev_put it. */ -dev_mapping_t mddev_map[MAX_MD_DEVS]; - -void add_mddev_mapping(mddev_t * mddev, kdev_t dev, void *data) -{ - unsigned int minor = minor(dev); +#define ITERATE_MDDEV(mddev,tmp) \ + \ + for (spin_lock(&all_mddevs_lock), \ + (tmp = all_mddevs.next), \ + (mddev = NULL); \ + (void)(tmp != &all_mddevs && \ + mddev_get(list_entry(tmp, mddev_t, all_mddevs))),\ + spin_unlock(&all_mddevs_lock), \ + (mddev ? mddev_put(mddev):(void)NULL), \ + (mddev = list_entry(tmp, mddev_t, all_mddevs)), \ + (tmp != &all_mddevs); \ + spin_lock(&all_mddevs_lock), \ + (tmp = tmp->next) \ + ) - if (major(dev) != MD_MAJOR) { - MD_BUG(); - return; - } - if (mddev_map[minor].mddev) { - MD_BUG(); - return; - } - mddev_map[minor].mddev = mddev; - mddev_map[minor].data = data; -} +static mddev_t *mddev_map[MAX_MD_DEVS]; -void del_mddev_mapping(mddev_t * mddev, kdev_t dev) +static int md_fail_request (request_queue_t *q, struct bio *bio) { - unsigned int minor = minor(dev); - - if (major(dev) != MD_MAJOR) { - MD_BUG(); - return; - } - if (mddev_map[minor].mddev != mddev) { - MD_BUG(); - return; - } - mddev_map[minor].mddev = NULL; - mddev_map[minor].data = NULL; + bio_io_error(bio); + return 0; } -static int md_make_request (request_queue_t *q, struct bio *bio) +static inline mddev_t *mddev_get(mddev_t *mddev) { - mddev_t *mddev = kdev_to_mddev(to_kdev_t(bio->bi_bdev->bd_dev)); - - if (mddev && mddev->pers) - return mddev->pers->make_request(mddev, bio_rw(bio), bio); - else { - bio_io_error(bio); - return 0; - } + atomic_inc(&mddev->active); + return mddev; } -static mddev_t * alloc_mddev(kdev_t dev) +static void mddev_put(mddev_t *mddev) { - mddev_t *mddev; - - if (major(dev) != MD_MAJOR) { - MD_BUG(); - return 0; + if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) + return; + if (!mddev->sb && list_empty(&mddev->disks)) { + list_del(&mddev->all_mddevs); + mddev_map[mdidx(mddev)] = NULL; + kfree(mddev); + MOD_DEC_USE_COUNT; + } + spin_unlock(&all_mddevs_lock); +} + +static mddev_t * mddev_find(int unit) +{ + mddev_t *mddev, *new = NULL; + + retry: + spin_lock(&all_mddevs_lock); + if (mddev_map[unit]) { + mddev = mddev_get(mddev_map[unit]); + spin_unlock(&all_mddevs_lock); + if (new) + kfree(new); + return mddev; + } + if (new) { + mddev_map[unit] = new; + list_add(&new->all_mddevs, &all_mddevs); + spin_unlock(&all_mddevs_lock); + MOD_INC_USE_COUNT; + return new; } - mddev = (mddev_t *) kmalloc(sizeof(*mddev), GFP_KERNEL); - if (!mddev) + spin_unlock(&all_mddevs_lock); + + new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) return NULL; - memset(mddev, 0, sizeof(*mddev)); + memset(new, 0, sizeof(*new)); - mddev->__minor = minor(dev); - init_MUTEX(&mddev->reconfig_sem); - init_MUTEX(&mddev->recovery_sem); - init_MUTEX(&mddev->resync_sem); - INIT_LIST_HEAD(&mddev->disks); - INIT_LIST_HEAD(&mddev->all_mddevs); - atomic_set(&mddev->active, 0); + new->__minor = unit; + init_MUTEX(&new->reconfig_sem); + INIT_LIST_HEAD(&new->disks); + INIT_LIST_HEAD(&new->all_mddevs); + atomic_set(&new->active, 1); - /* - * The 'base' mddev is the one with data NULL. - * personalities can create additional mddevs - * if necessary. - */ - add_mddev_mapping(mddev, dev, 0); - list_add(&mddev->all_mddevs, &all_mddevs); + goto retry; +} - MOD_INC_USE_COUNT; +static inline int mddev_lock(mddev_t * mddev) +{ + return down_interruptible(&mddev->reconfig_sem); +} - return mddev; +static inline int mddev_trylock(mddev_t * mddev) +{ + return down_trylock(&mddev->reconfig_sem); +} + +static inline void mddev_unlock(mddev_t * mddev) +{ + up(&mddev->reconfig_sem); } mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) @@ -248,13 +267,12 @@ struct gendisk *hd; static char nomem [] = ""; dev_name_t *dname; - struct list_head *tmp = device_names.next; + struct list_head *tmp; - while (tmp != &device_names) { + list_for_each(tmp, &device_names) { dname = list_entry(tmp, dev_name_t, list); if (kdev_same(dname->dev, dev)) return dname->name; - tmp = tmp->next; } dname = (dev_name_t *) kmalloc(sizeof(*dname), GFP_KERNEL); @@ -274,7 +292,6 @@ } dname->dev = dev; - INIT_LIST_HEAD(&dname->list); list_add(&dname->list, &device_names); return dname->name; @@ -325,69 +342,6 @@ return 0; } -/* - * We check wether all devices are numbered from 0 to nb_dev-1. The - * order is guaranteed even after device name changes. - * - * Some personalities (raid0, linear) use this. Personalities that - * provide data have to be able to deal with loss of individual - * disks, so they do their checking themselves. - */ -int md_check_ordering(mddev_t *mddev) -{ - int i, c; - mdk_rdev_t *rdev; - struct list_head *tmp; - - /* - * First, all devices must be fully functional - */ - ITERATE_RDEV(mddev,rdev,tmp) { - if (rdev->faulty) { - printk(KERN_ERR "md: md%d's device %s faulty, aborting.\n", - mdidx(mddev), partition_name(rdev->dev)); - goto abort; - } - } - - c = 0; - ITERATE_RDEV(mddev,rdev,tmp) { - c++; - } - if (c != mddev->nb_dev) { - MD_BUG(); - goto abort; - } - if (mddev->nb_dev != mddev->sb->raid_disks) { - printk(KERN_ERR "md: md%d, array needs %d disks, has %d, aborting.\n", - mdidx(mddev), mddev->sb->raid_disks, mddev->nb_dev); - goto abort; - } - /* - * Now the numbering check - */ - for (i = 0; i < mddev->nb_dev; i++) { - c = 0; - ITERATE_RDEV(mddev,rdev,tmp) { - if (rdev->desc_nr == i) - c++; - } - if (!c) { - printk(KERN_ERR "md: md%d, missing disk #%d, aborting.\n", - mdidx(mddev), i); - goto abort; - } - if (c > 1) { - printk(KERN_ERR "md: md%d, too many disks #%d, aborting.\n", - mdidx(mddev), i); - goto abort; - } - } - return 0; -abort: - return 1; -} - static void remove_descriptor(mdp_disk_t *disk, mdp_super_t *sb) { if (disk_active(disk)) { @@ -617,8 +571,7 @@ list_add(&rdev->same_set, &mddev->disks); rdev->mddev = mddev; - mddev->nb_dev++; - printk(KERN_INFO "md: bind<%s,%d>\n", partition_name(rdev->dev), mddev->nb_dev); + printk(KERN_INFO "md: bind<%s>\n", partition_name(rdev->dev)); } static void unbind_rdev_from_array(mdk_rdev_t * rdev) @@ -627,11 +580,8 @@ MD_BUG(); return; } - list_del(&rdev->same_set); - INIT_LIST_HEAD(&rdev->same_set); - rdev->mddev->nb_dev--; - printk(KERN_INFO "md: unbind<%s,%d>\n", partition_name(rdev->dev), - rdev->mddev->nb_dev); + list_del_init(&rdev->same_set); + printk(KERN_INFO "md: unbind<%s>\n", partition_name(rdev->dev)); rdev->mddev = NULL; } @@ -681,13 +631,11 @@ MD_BUG(); unlock_rdev(rdev); free_disk_sb(rdev); - list_del(&rdev->all); - INIT_LIST_HEAD(&rdev->all); - if (rdev->pending.next != &rdev->pending) { + list_del_init(&rdev->all); + if (!list_empty(&rdev->pending)) { printk(KERN_INFO "md: (%s was pending)\n", partition_name(rdev->dev)); - list_del(&rdev->pending); - INIT_LIST_HEAD(&rdev->pending); + list_del_init(&rdev->pending); } #ifndef MODULE md_autodetect_dev(rdev->dev); @@ -721,7 +669,7 @@ } kick_rdev_from_array(rdev); } - if (mddev->nb_dev) + if (!list_empty(&mddev->disks)) MD_BUG(); } @@ -735,21 +683,6 @@ export_array(mddev); md_size[mdidx(mddev)] = 0; md_hd_struct[mdidx(mddev)].nr_sects = 0; - - /* - * Make sure nobody else is using this mddev - * (careful, we rely on the global kernel lock here) - */ - while (atomic_read(&mddev->resync_sem.count) != 1) - schedule(); - while (atomic_read(&mddev->recovery_sem.count) != 1) - schedule(); - - del_mddev_mapping(mddev, mk_kdev(MD_MAJOR, mdidx(mddev))); - list_del(&mddev->all_mddevs); - INIT_LIST_HEAD(&mddev->all_mddevs); - kfree(mddev); - MOD_DEC_USE_COUNT; } #undef BAD_CSUM @@ -891,12 +824,10 @@ struct list_head *tmp; mdk_rdev_t *rdev; - tmp = all_raid_disks.next; - while (tmp != &all_raid_disks) { + list_for_each(tmp, &all_raid_disks) { rdev = list_entry(tmp, mdk_rdev_t, all); if (kdev_same(rdev->dev, dev)) return rdev; - tmp = tmp->next; } return NULL; } @@ -992,12 +923,13 @@ return 0; } -int md_update_sb(mddev_t * mddev) +void __md_update_sb(mddev_t * mddev) { int err, count = 100; struct list_head *tmp; mdk_rdev_t *rdev; + mddev->sb_dirty = 0; repeat: mddev->sb->utime = CURRENT_TIME; if (!(++mddev->sb->events_lo)) @@ -1019,7 +951,7 @@ * nonpersistent superblocks */ if (mddev->sb->not_persistent) - return 0; + return; printk(KERN_INFO "md: updating md%d RAID superblock on device\n", mdidx(mddev)); @@ -1047,9 +979,18 @@ } printk(KERN_ERR "md: excessive errors occurred during superblock update, exiting\n"); } - return 0; } +void md_update_sb(mddev_t *mddev) +{ + if (mddev_lock(mddev)) + return; + if (mddev->sb_dirty) + __md_update_sb(mddev); + mddev_unlock(mddev); +} + + /* * Import a device. If 'on_disk', then sanity check the superblock * @@ -1121,6 +1062,7 @@ } list_add(&rdev->all, &all_raid_disks); INIT_LIST_HEAD(&rdev->pending); + INIT_LIST_HEAD(&rdev->same_set); if (rdev->faulty && rdev->sb) free_disk_sb(rdev); @@ -1573,7 +1515,6 @@ if (sb->level == -3) readahead = 0; } - md_maxreadahead[mdidx(mddev)] = readahead; printk(KERN_INFO "md%d: max total readahead window set to %ldk\n", mdidx(mddev), readahead*(PAGE_SIZE/1024)); @@ -1604,7 +1545,7 @@ mdk_rdev_t *rdev; - if (!mddev->nb_dev) { + if (list_empty(&mddev->disks)) { MD_BUG(); return -EINVAL; } @@ -1629,9 +1570,6 @@ chunk_size = mddev->sb->chunk_size; pnum = level_to_pers(mddev->sb->level); - mddev->param.chunk_size = chunk_size; - mddev->param.personality = pnum; - if ((pnum != MULTIPATH) && (pnum != RAID1)) { if (!chunk_size) { /* @@ -1711,6 +1649,9 @@ } mddev->pers = pers[pnum]; + blk_queue_make_request(&mddev->queue, mddev->pers->make_request); + mddev->queue.queuedata = mddev; + err = mddev->pers->run(mddev); if (err) { printk(KERN_ERR "md: pers->run() failed ...\n"); @@ -1718,9 +1659,15 @@ return -EINVAL; } - mddev->sb->state &= ~(1 << MD_SB_CLEAN); - md_update_sb(mddev); + mddev->in_sync = (mddev->sb->state & (1<pers->sync_request) + mddev->sb->state &= ~(1 << MD_SB_CLEAN); + __md_update_sb(mddev); + md_recover_arrays(); /* * md_size has units of 1K blocks, which are * twice as large as sectors. @@ -1735,21 +1682,21 @@ #undef TOO_BIG_CHUNKSIZE #undef BAD_CHUNKSIZE -#define OUT(x) do { err = (x); goto out; } while (0) - static int restart_array(mddev_t *mddev) { - int err = 0; + int err; /* * Complain if it has no devices */ - if (!mddev->nb_dev) - OUT(-ENXIO); + err = -ENXIO; + if (list_empty(&mddev->disks)) + goto out; if (mddev->pers) { + err = -EBUSY; if (!mddev->ro) - OUT(-EBUSY); + goto out; mddev->ro = 0; set_device_ro(mddev_to_kdev(mddev), 0); @@ -1760,8 +1707,7 @@ * Kick recovery or resync if necessary */ md_recover_arrays(); - if (mddev->pers->restart_resync) - mddev->pers->restart_resync(mddev); + err = 0; } else { printk(KERN_ERR "md: md%d has no personality assigned.\n", mdidx(mddev)); @@ -1779,49 +1725,43 @@ static int do_md_stop(mddev_t * mddev, int ro) { - int err = 0, resync_interrupted = 0; + int err = 0; kdev_t dev = mddev_to_kdev(mddev); if (atomic_read(&mddev->active)>1) { printk(STILL_IN_USE, mdidx(mddev)); - OUT(-EBUSY); + err = -EBUSY; + goto out; } if (mddev->pers) { - /* - * It is safe to call stop here, it only frees private - * data. Also, it tells us if a device is unstoppable - * (eg. resyncing is in progress) - */ - if (mddev->pers->stop_resync) - if (mddev->pers->stop_resync(mddev)) - resync_interrupted = 1; - - if (mddev->recovery_running) - md_interrupt_thread(md_recovery_thread); - - /* - * This synchronizes with signal delivery to the - * resync or reconstruction thread. It also nicely - * hangs the process if some reconstruction has not - * finished. - */ - down(&mddev->recovery_sem); - up(&mddev->recovery_sem); + if (mddev->sync_thread) { + if (mddev->recovery_running > 0) + mddev->recovery_running = -EINTR; + md_unregister_thread(mddev->sync_thread); + mddev->sync_thread = NULL; + if (mddev->spare) { + mddev->pers->diskop(mddev, &mddev->spare, + DISKOP_SPARE_INACTIVE); + mddev->spare = NULL; + } + } invalidate_device(dev, 1); if (ro) { + err = -ENXIO; if (mddev->ro) - OUT(-ENXIO); + goto out; mddev->ro = 1; } else { if (mddev->ro) set_device_ro(dev, 0); if (mddev->pers->stop(mddev)) { + err = -EBUSY; if (mddev->ro) set_device_ro(dev, 1); - OUT(-EBUSY); + goto out; } if (mddev->ro) mddev->ro = 0; @@ -1831,11 +1771,11 @@ * mark it clean only if there was no resync * interrupted. */ - if (!mddev->recovery_running && !resync_interrupted) { + if (mddev->in_sync) { printk(KERN_INFO "md: marking sb clean...\n"); mddev->sb->state |= 1 << MD_SB_CLEAN; } - md_update_sb(mddev); + __md_update_sb(mddev); } if (ro) set_device_ro(dev, 1); @@ -1847,15 +1787,13 @@ if (!ro) { printk(KERN_INFO "md: md%d stopped.\n", mdidx(mddev)); free_mddev(mddev); - } else printk(KERN_INFO "md: md%d switched to read-only mode.\n", mdidx(mddev)); + err = 0; out: return err; } -#undef OUT - /* * We have to safely support old arrays too. */ @@ -1876,7 +1814,7 @@ struct list_head *tmp; int err; - if (mddev->disks.prev == &mddev->disks) { + if (list_empty(&mddev->disks)) { MD_BUG(); return; } @@ -1911,17 +1849,15 @@ * * If "unit" is allocated, then bump its reference count */ -static void autorun_devices(kdev_t countdev) +static void autorun_devices(void) { struct list_head candidates; struct list_head *tmp; mdk_rdev_t *rdev0, *rdev; mddev_t *mddev; - kdev_t md_kdev; - printk(KERN_INFO "md: autorun ...\n"); - while (pending_raid_disks.next != &pending_raid_disks) { + while (!list_empty(&pending_raid_disks)) { rdev0 = list_entry(pending_raid_disks.next, mdk_rdev_t, pending); @@ -1945,29 +1881,34 @@ * mostly sane superblocks. It's time to allocate the * mddev. */ - md_kdev = mk_kdev(MD_MAJOR, rdev0->sb->md_minor); - mddev = kdev_to_mddev(md_kdev); - if (mddev) { - printk(KERN_WARNING "md: md%d already running, cannot run %s\n", - mdidx(mddev), partition_name(rdev0->dev)); - ITERATE_RDEV_GENERIC(candidates,pending,rdev,tmp) - export_rdev(rdev); - continue; - } - mddev = alloc_mddev(md_kdev); + + mddev = mddev_find(rdev0->sb->md_minor); if (!mddev) { printk(KERN_ERR "md: cannot allocate memory for md drive.\n"); break; } - if (kdev_same(md_kdev, countdev)) - atomic_inc(&mddev->active); - printk(KERN_INFO "md: created md%d\n", mdidx(mddev)); - ITERATE_RDEV_GENERIC(candidates,pending,rdev,tmp) { - bind_rdev_to_array(rdev, mddev); - list_del(&rdev->pending); - INIT_LIST_HEAD(&rdev->pending); + if (mddev_lock(mddev)) + printk(KERN_WARNING "md: md%d locked, cannot run\n", + mdidx(mddev)); + else if (mddev->sb || !list_empty(&mddev->disks)) { + printk(KERN_WARNING "md: md%d already running, cannot run %s\n", + mdidx(mddev), partition_name(rdev0->dev)); + mddev_unlock(mddev); + } else { + printk(KERN_INFO "md: created md%d\n", mdidx(mddev)); + ITERATE_RDEV_GENERIC(candidates,pending,rdev,tmp) { + bind_rdev_to_array(rdev, mddev); + list_del_init(&rdev->pending); + } + autorun_array(mddev); + mddev_unlock(mddev); } - autorun_array(mddev); + /* on success, candidates will be empty, on error + * it wont... + */ + ITERATE_RDEV_GENERIC(candidates,pending,rdev,tmp) + export_rdev(rdev); + mddev_put(mddev); } printk(KERN_INFO "md: ... autorun DONE.\n"); } @@ -2004,7 +1945,7 @@ #define AUTORUNNING KERN_INFO \ "md: auto-running md%d.\n" -static int autostart_array(kdev_t startdev, kdev_t countdev) +static int autostart_array(kdev_t startdev) { int err = -EINVAL, i; mdp_super_t *sb = NULL; @@ -2064,7 +2005,7 @@ /* * possibly return codes */ - autorun_devices(countdev); + autorun_devices(); return 0; abort: @@ -2190,7 +2131,7 @@ MD_BUG(); return -EINVAL; } - if (mddev->nb_dev) { + if (!list_empty(&mddev->disks)) { mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, mdk_rdev_t, same_set); if (!uuid_equal(rdev0, rdev)) { @@ -2345,8 +2286,7 @@ remove_descriptor(disk, mddev->sb); kick_rdev_from_array(rdev); - mddev->sb_dirty = 1; - md_update_sb(mddev); + __md_update_sb(mddev); return 0; busy: @@ -2457,9 +2397,7 @@ mddev->sb->spare_disks++; mddev->sb->working_disks++; - mddev->sb_dirty = 1; - - md_update_sb(mddev); + __md_update_sb(mddev); /* * Kick recovery, maybe this spare has to be added to the @@ -2519,36 +2457,6 @@ } #undef SET_SB -static int set_disk_info(mddev_t * mddev, void * arg) -{ - printk(KERN_INFO "md: not yet"); - return -EINVAL; -} - -static int clear_array(mddev_t * mddev) -{ - printk(KERN_INFO "md: not yet"); - return -EINVAL; -} - -static int write_raid_info(mddev_t * mddev) -{ - printk(KERN_INFO "md: not yet"); - return -EINVAL; -} - -static int protect_array(mddev_t * mddev) -{ - printk(KERN_INFO "md: not yet"); - return -EINVAL; -} - -static int unprotect_array(mddev_t * mddev) -{ - printk(KERN_INFO "md: not yet"); - return -EINVAL; -} - static int set_disk_faulty(mddev_t *mddev, kdev_t dev) { mdk_rdev_t *rdev; @@ -2594,7 +2502,7 @@ case PRINT_RAID_DEBUG: err = 0; md_print_devices(); - goto done_unlock; + goto done; #ifndef MODULE case RAID_AUTORUN: @@ -2631,40 +2539,30 @@ * Commands creating/starting a new array: */ - mddev = kdev_to_mddev(dev); + mddev = inode->i_bdev->bd_inode->u.generic_ip; - switch (cmd) - { - case SET_ARRAY_INFO: - case START_ARRAY: - if (mddev) { - printk(KERN_WARNING "md: array md%d already exists!\n", - mdidx(mddev)); - err = -EEXIST; - goto abort; - } - default:; + if (!mddev) { + BUG(); + goto abort; } + + err = mddev_lock(mddev); + if (err) { + printk(KERN_INFO "md: ioctl lock interrupted, reason %d, cmd %d\n", + err, cmd); + goto abort; + } + switch (cmd) { case SET_ARRAY_INFO: - mddev = alloc_mddev(dev); - if (!mddev) { - err = -ENOMEM; - goto abort; - } - atomic_inc(&mddev->active); - /* - * alloc_mddev() should possibly self-lock. - */ - err = lock_mddev(mddev); - if (err) { - printk(KERN_WARNING "md: ioctl, reason %d, cmd %d\n", - err, cmd); - goto abort; + if (!list_empty(&mddev->disks)) { + printk(KERN_WARNING "md: array md%d already has disks!\n", + mdidx(mddev)); + err = -EBUSY; + goto abort_unlock; } - if (mddev->sb) { printk(KERN_WARNING "md: array md%d already has a superblock!\n", mdidx(mddev)); @@ -2689,13 +2587,13 @@ /* * possibly make it lock the array ... */ - err = autostart_array(val_to_kdev(arg), dev); + err = autostart_array(val_to_kdev(arg)); if (err) { printk(KERN_WARNING "md: autostart %s failed!\n", partition_name(val_to_kdev(arg))); - goto abort; + goto abort_unlock; } - goto done; + goto done_unlock; default:; } @@ -2703,16 +2601,6 @@ /* * Commands querying/configuring an existing array: */ - - if (!mddev) { - err = -ENODEV; - goto abort; - } - err = lock_mddev(mddev); - if (err) { - printk(KERN_INFO "md: ioctl lock interrupted, reason %d, cmd %d\n",err, cmd); - goto abort; - } /* if we don't have a superblock yet, only ADD_NEW_DISK or STOP_ARRAY is allowed */ if (!mddev->sb && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY && cmd != RUN_ARRAY) { err = -ENODEV; @@ -2737,8 +2625,7 @@ goto done_unlock; case STOP_ARRAY: - if (!(err = do_md_stop (mddev, 0))) - mddev = NULL; + err = do_md_stop (mddev, 0); goto done_unlock; case STOP_ARRAY_RO: @@ -2783,10 +2670,6 @@ switch (cmd) { - case CLEAR_ARRAY: - err = clear_array(mddev); - goto done_unlock; - case ADD_NEW_DISK: { mdu_disk_info_t info; @@ -2807,35 +2690,12 @@ err = hot_add_disk(mddev, val_to_kdev(arg)); goto done_unlock; - case SET_DISK_INFO: - err = set_disk_info(mddev, (void *)arg); - goto done_unlock; - - case WRITE_RAID_INFO: - err = write_raid_info(mddev); - goto done_unlock; - - case UNPROTECT_ARRAY: - err = unprotect_array(mddev); - goto done_unlock; - - case PROTECT_ARRAY: - err = protect_array(mddev); - goto done_unlock; - case SET_DISK_FAULTY: err = set_disk_faulty(mddev, val_to_kdev(arg)); goto done_unlock; case RUN_ARRAY: { -/* The data is never used.... - mdu_param_t param; - err = copy_from_user(¶m, (mdu_param_t *)arg, - sizeof(param)); - if (err) - goto abort_unlock; -*/ err = do_md_run (mddev); /* * we have to clean up the mess if @@ -2844,8 +2704,7 @@ */ if (err) { mddev->sb_dirty = 0; - if (!do_md_stop (mddev, 0)) - mddev = NULL; + do_md_stop (mddev, 0); } goto done_unlock; } @@ -2860,8 +2719,7 @@ done_unlock: abort_unlock: - if (mddev) - unlock_mddev(mddev); + mddev_unlock(mddev); return err; done: @@ -2874,19 +2732,34 @@ static int md_open(struct inode *inode, struct file *file) { /* - * Always succeed, but increment the usage count + * Succeed if we can find or allocate a mddev structure. */ - mddev_t *mddev = kdev_to_mddev(inode->i_rdev); - if (mddev) - atomic_inc(&mddev->active); - return (0); + mddev_t *mddev = mddev_find(minor(inode->i_rdev)); + int err = -ENOMEM; + + if (!mddev) + goto out; + + if ((err = mddev_lock(mddev))) + goto put; + + err = 0; + mddev_unlock(mddev); + inode->i_bdev->bd_inode->u.generic_ip = mddev_get(mddev); + put: + mddev_put(mddev); + out: + return err; } static int md_release(struct inode *inode, struct file * file) { - mddev_t *mddev = kdev_to_mddev(inode->i_rdev); - if (mddev) - atomic_dec(&mddev->active); + mddev_t *mddev = inode->i_bdev->bd_inode->u.generic_ip; + + if (!mddev) + BUG(); + mddev_put(mddev); + return 0; } @@ -2917,6 +2790,7 @@ */ daemonize(); + reparent_to_init(); sprintf(current->comm, thread->name); current->exit_signal = SIGCHLD; @@ -2940,17 +2814,10 @@ complete(thread->event); while (thread->run) { void (*run)(void *data); - DECLARE_WAITQUEUE(wait, current); - add_wait_queue(&thread->wqueue, &wait); - set_task_state(current, TASK_INTERRUPTIBLE); - if (!test_bit(THREAD_WAKEUP, &thread->flags)) { - dprintk("md: thread %p went to sleep.\n", thread); - schedule(); - dprintk("md: thread %p woke up.\n", thread); - } - current->state = TASK_RUNNING; - remove_wait_queue(&thread->wqueue, &wait); + wait_event_interruptible(thread->wqueue, + test_bit(THREAD_WAKEUP, &thread->flags)); + clear_bit(THREAD_WAKEUP, &thread->flags); run = thread->run; @@ -3025,7 +2892,7 @@ kfree(thread); } -void md_recover_arrays(void) +static void md_recover_arrays(void) { if (!md_recovery_thread) { MD_BUG(); @@ -3041,7 +2908,7 @@ kdev_t rdev = to_kdev_t(bdev->bd_dev); dprintk("md_error dev:(%d:%d), rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", - major(dev),minor(dev),major(rdev),minor(rdev), + MD_MAJOR,mdidx(mddev),major(rdev),minor(rdev), __builtin_return_address(0),__builtin_return_address(1), __builtin_return_address(2),__builtin_return_address(3)); @@ -3054,17 +2921,14 @@ return 0; if (!mddev->pers->error_handler || mddev->pers->error_handler(mddev,rdev) <= 0) { - free_disk_sb(rrdev); rrdev->faulty = 1; } else return 1; /* * if recovery was running, stop it now. */ - if (mddev->pers->stop_resync) - mddev->pers->stop_resync(mddev); - if (mddev->recovery_running) - md_interrupt_thread(md_recovery_thread); + if (mddev->recovery_running) + mddev->recovery_running = -EIO; md_recover_arrays(); return 0; @@ -3079,7 +2943,7 @@ sz += sprintf(page + sz, "unused devices: "); ITERATE_RDEV_ALL(rdev,tmp) { - if (!rdev->same_set.next && !rdev->same_set.prev) { + if (list_empty(&rdev->same_set)) { /* * The device is not yet used by any array. */ @@ -3122,18 +2986,9 @@ sz += sprintf(page + sz, "."); sz += sprintf(page + sz, "] "); } - if (!mddev->recovery_running) - /* - * true resync - */ - sz += sprintf(page + sz, " resync =%3lu.%lu%% (%lu/%lu)", - res/10, res % 10, resync, max_blocks); - else - /* - * recovery ... - */ - sz += sprintf(page + sz, " recovery =%3lu.%lu%% (%lu/%lu)", - res/10, res % 10, resync, max_blocks); + sz += sprintf(page + sz, " %s =%3lu.%lu%% (%lu/%lu)", + (mddev->spare ? "recovery" : "resync"), + res/10, res % 10, resync, max_blocks); /* * We do not want to overflow, so the order of operands and @@ -3171,7 +3026,7 @@ sz += sprintf(page+sz, "\n"); - ITERATE_MDDEV(mddev,tmp) { + ITERATE_MDDEV(mddev,tmp) if (mddev_lock(mddev)==0) { sz += sprintf(page + sz, "md%d : %sactive", mdidx(mddev), mddev->pers ? "" : "in"); if (mddev->pers) { @@ -3191,7 +3046,7 @@ size += rdev->size; } - if (mddev->nb_dev) { + if (!list_empty(&mddev->disks)) { if (mddev->pers) sz += sprintf(page + sz, "\n %d blocks", md_size[mdidx(mddev)]); @@ -3201,19 +3056,20 @@ if (!mddev->pers) { sz += sprintf(page+sz, "\n"); + mddev_unlock(mddev); continue; } sz += mddev->pers->status (page+sz, mddev); sz += sprintf(page+sz, "\n "); - if (mddev->curr_resync) { + if (mddev->curr_resync > 1) sz += status_resync (page+sz, mddev); - } else { - if (atomic_read(&mddev->resync_sem.count) != 1) + else if (mddev->curr_resync == 1) sz += sprintf(page + sz, " resync=DELAYED"); - } + sz += sprintf(page + sz, "\n"); + mddev_unlock(mddev); } sz += status_unused(page + sz); @@ -3314,60 +3170,70 @@ return idle; } -DECLARE_WAIT_QUEUE_HEAD(resync_wait); - void md_done_sync(mddev_t *mddev, int blocks, int ok) { /* another "blocks" (512byte) blocks have been synced */ atomic_sub(blocks, &mddev->recovery_active); wake_up(&mddev->recovery_wait); if (!ok) { + mddev->recovery_running = -EIO; + md_recover_arrays(); // stop recovery, signal do_sync .... } } + +DECLARE_WAIT_QUEUE_HEAD(resync_wait); + #define SYNC_MARKS 10 #define SYNC_MARK_STEP (3*HZ) -int md_do_sync(mddev_t *mddev, mdp_disk_t *spare) +static void md_do_sync(void *data) { + mddev_t *mddev = data; mddev_t *mddev2; unsigned int max_sectors, currspeed = 0, - j, window, err, serialize; + j, window, err; unsigned long mark[SYNC_MARKS]; unsigned long mark_cnt[SYNC_MARKS]; int last_mark,m; struct list_head *tmp; unsigned long last_check; + /* just incase thread restarts... */ + if (mddev->recovery_running <= 0) + return; - err = down_interruptible(&mddev->resync_sem); - if (err) - goto out_nolock; + /* we overload curr_resync somewhat here. + * 0 == not engaged in resync at all + * 2 == checking that there is no conflict with another sync + * 1 == like 2, but have yielded to allow conflicting resync to + * commense + * other == active in resync - this many blocks + */ + do { + mddev->curr_resync = 2; -recheck: - serialize = 0; - ITERATE_MDDEV(mddev2,tmp) { - if (mddev2 == mddev) - continue; - if (mddev2->curr_resync && match_mddev_units(mddev,mddev2)) { - printk(KERN_INFO "md: delaying resync of md%d until md%d " - "has finished resync (they share one or more physical units)\n", - mdidx(mddev), mdidx(mddev2)); - serialize = 1; - break; - } - } - if (serialize) { - interruptible_sleep_on(&resync_wait); - if (signal_pending(current)) { - flush_curr_signals(); - err = -EINTR; - goto out; + ITERATE_MDDEV(mddev2,tmp) { + if (mddev2 == mddev) + continue; + if (mddev2->curr_resync && + match_mddev_units(mddev,mddev2)) { + printk(KERN_INFO "md: delaying resync of md%d until md%d " + "has finished resync (they share one or more physical units)\n", + mdidx(mddev), mdidx(mddev2)); + if (mddev < mddev2) /* arbitrarily yield */ + mddev->curr_resync = 1; + if (wait_event_interruptible(resync_wait, + mddev2->curr_resync < 2)) { + flush_curr_signals(); + err = -EINTR; + mddev_put(mddev2); + goto out; + } + } } - goto recheck; - } + } while (mddev->curr_resync < 2); - mddev->curr_resync = 1; max_sectors = mddev->sb->size << 1; printk(KERN_INFO "md: syncing RAID array md%d\n", mdidx(mddev)); @@ -3405,7 +3271,7 @@ } atomic_add(sectors, &mddev->recovery_active); j += sectors; - mddev->curr_resync = j; + if (j>1) mddev->curr_resync = j; if (last_check + window > j) continue; @@ -3431,7 +3297,6 @@ /* * got a signal, exit. */ - mddev->curr_resync = 0; printk(KERN_INFO "md: md_do_sync() got signal ... exiting\n"); flush_curr_signals(); err = -EINTR; @@ -3466,106 +3331,116 @@ */ out: wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); - up(&mddev->resync_sem); -out_nolock: + /* tell personality that we are finished */ + mddev->pers->sync_request(mddev, max_sectors, 1); + mddev->curr_resync = 0; - wake_up(&resync_wait); - return err; + if (err) + mddev->recovery_running = err; + if (mddev->recovery_running > 0) + mddev->recovery_running = 0; + if (mddev->recovery_running == 0) + mddev->in_sync = 1; + md_recover_arrays(); } /* - * This is a kernel thread which syncs a spare disk with the active array - * - * the amount of foolproofing might seem to be a tad excessive, but an - * early (not so error-safe) version of raid1syncd synced the first 0.5 gigs - * of my root partition with the first 0.5 gigs of my /home partition ... so - * i'm a bit nervous ;) + * This is the kernel thread that watches all md arrays for re-sync action + * that might be needed. + * It does not do any resync itself, but rather "forks" off other threads + * to do that as needed. + * When it is determined that resync is needed, we set "->recovery_running" and + * create a thread at ->sync_thread. + * When the thread finishes is clears recovery_running (or set and error) + * and wakeup up this thread which will reap the thread and finish up. */ void md_do_recovery(void *data) { - int err; mddev_t *mddev; mdp_super_t *sb; - mdp_disk_t *spare; struct list_head *tmp; - printk(KERN_INFO "md: recovery thread got woken up ...\n"); -restart: - ITERATE_MDDEV(mddev,tmp) { + dprintk(KERN_INFO "md: recovery thread got woken up ...\n"); + + ITERATE_MDDEV(mddev,tmp) if (mddev_lock(mddev)==0) { sb = mddev->sb; - if (!sb) - continue; - if (mddev->recovery_running) - continue; - if (sb->active_disks == sb->raid_disks) - continue; - if (!sb->spare_disks) { - printk(KERN_ERR "md%d: no spare disk to reconstruct array! " - "-- continuing in degraded mode\n", mdidx(mddev)); - continue; - } - /* - * now here we get the spare and resync it. - */ - spare = get_spare(mddev); - if (!spare) - continue; - printk(KERN_INFO "md%d: resyncing spare disk %s to replace failed disk\n", - mdidx(mddev), partition_name(mk_kdev(spare->major,spare->minor))); - if (!mddev->pers->diskop) - continue; - if (mddev->pers->diskop(mddev, &spare, DISKOP_SPARE_WRITE)) - continue; - down(&mddev->recovery_sem); - mddev->recovery_running = 1; - err = md_do_sync(mddev, spare); - if (err == -EIO) { - printk(KERN_INFO "md%d: spare disk %s failed, skipping to next spare.\n", - mdidx(mddev), partition_name(mk_kdev(spare->major,spare->minor))); - if (!disk_faulty(spare)) { - mddev->pers->diskop(mddev,&spare,DISKOP_SPARE_INACTIVE); - mark_disk_faulty(spare); - mark_disk_nonsync(spare); - mark_disk_inactive(spare); - sb->spare_disks--; - sb->working_disks--; - sb->failed_disks++; + if (!sb || !mddev->pers || !mddev->pers->diskop || mddev->ro) + goto unlock; + if (mddev->recovery_running > 0) + /* resync/recovery still happening */ + goto unlock; + if (mddev->sync_thread) { + /* resync has finished, collect result */ + md_unregister_thread(mddev->sync_thread); + mddev->sync_thread = NULL; + if (mddev->recovery_running < 0) { + /* some sort of failure. + * If we were doing a reconstruction, + * we need to retrieve the spare + */ + if (mddev->spare) { + mddev->pers->diskop(mddev, &mddev->spare, + DISKOP_SPARE_INACTIVE); + mddev->spare = NULL; + } + } else { + /* success...*/ + if (mddev->spare) { + mddev->pers->diskop(mddev, &mddev->spare, + DISKOP_SPARE_ACTIVE); + mark_disk_sync(mddev->spare); + mark_disk_active(mddev->spare); + sb->active_disks++; + sb->spare_disks--; + mddev->spare = NULL; + } } - } else - if (disk_faulty(spare)) - mddev->pers->diskop(mddev, &spare, - DISKOP_SPARE_INACTIVE); - if (err == -EINTR || err == -ENOMEM) { - /* - * Recovery got interrupted, or ran out of mem ... - * signal back that we have finished using the array. - */ - mddev->pers->diskop(mddev, &spare, - DISKOP_SPARE_INACTIVE); - up(&mddev->recovery_sem); + __md_update_sb(mddev); mddev->recovery_running = 0; - continue; - } else { + wake_up(&resync_wait); + goto unlock; + } + if (mddev->recovery_running) { + /* that's odd.. */ mddev->recovery_running = 0; - up(&mddev->recovery_sem); + wake_up(&resync_wait); } - if (!disk_faulty(spare)) { - /* - * the SPARE_ACTIVE diskop possibly changes the - * pointer too - */ - mddev->pers->diskop(mddev, &spare, DISKOP_SPARE_ACTIVE); - mark_disk_sync(spare); - mark_disk_active(spare); - sb->active_disks++; - sb->spare_disks--; + + if (sb->active_disks < sb->raid_disks) { + mddev->spare = get_spare(mddev); + if (!mddev->spare) + printk(KERN_ERR "md%d: no spare disk to reconstruct array! " + "-- continuing in degraded mode\n", mdidx(mddev)); + else + printk(KERN_INFO "md%d: resyncing spare disk %s to replace failed disk\n", + mdidx(mddev), partition_name(mk_kdev(mddev->spare->major,mddev->spare->minor))); + } + if (!mddev->spare && mddev->in_sync) { + /* nothing we can do ... */ + goto unlock; + } + if (mddev->pers->sync_request) { + mddev->sync_thread = md_register_thread(md_do_sync, + mddev, + "md_resync"); + if (!mddev->sync_thread) { + printk(KERN_ERR "md%d: could not start resync thread...\n", mdidx(mddev)); + if (mddev->spare) + mddev->pers->diskop(mddev, &mddev->spare, DISKOP_SPARE_INACTIVE); + mddev->spare = NULL; + mddev->recovery_running = 0; + } else { + if (mddev->spare) + mddev->pers->diskop(mddev, &mddev->spare, DISKOP_SPARE_WRITE); + mddev->recovery_running = 1; + md_wakeup_thread(mddev->sync_thread); + } } - mddev->sb_dirty = 1; - md_update_sb(mddev); - goto restart; + unlock: + mddev_unlock(mddev); } - printk(KERN_INFO "md: recovery thread finished ...\n"); + dprintk(KERN_INFO "md: recovery thread finished ...\n"); } @@ -3581,7 +3456,8 @@ return NOTIFY_DONE; ITERATE_MDDEV(mddev,tmp) - do_md_stop (mddev, 1); + if (mddev_trylock(mddev)==0) + do_md_stop (mddev, 1); /* * certain more exotic SCSI devices are known to be * volatile wrt too early system reboots. While the @@ -3605,7 +3481,6 @@ for(i = 0; i < MAX_MD_DEVS; i++) { md_size[i] = 0; - md_maxreadahead[i] = 32; } blk_size[MAJOR_NR] = md_size; @@ -3616,6 +3491,18 @@ #endif } +request_queue_t * md_queue_proc(kdev_t dev) +{ + mddev_t *mddev = mddev_find(minor(dev)); + request_queue_t *q = BLK_DEFAULT_QUEUE(MAJOR_NR); + if (!mddev || atomic_read(&mddev->active)<2) + BUG(); + if (mddev->pers) + q = &mddev->queue; + mddev_put(mddev); /* the caller must hold a reference... */ + return q; +} + int __init md_init(void) { static char * name = "mdrecoveryd"; @@ -3640,8 +3527,9 @@ S_IFBLK | S_IRUSR | S_IWUSR, &md_fops, NULL); } - /* forward all md request to md_make_request */ - blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), md_make_request); + /* all requests on an uninitialised device get failed... */ + blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), md_fail_request); + blk_dev[MAJOR_NR].queue = md_queue_proc; add_gendisk(&md_gendisk); @@ -3719,7 +3607,7 @@ } dev_cnt = 0; - autorun_devices(to_kdev_t(-1)); + autorun_devices(); } static struct { @@ -3858,17 +3746,27 @@ if (!md_setup_args.device_set[minor]) continue; - if (mddev_map[minor].mddev) { + printk(KERN_INFO "md: Loading md%d: %s\n", minor, md_setup_args.device_names[minor]); + + mddev = mddev_find(minor); + if (!mddev) { + printk(KERN_ERR "md: kmalloc failed - cannot start array %d\n", minor); + continue; + } + if (mddev_lock(mddev)) { printk(KERN_WARNING - "md: Ignoring md=%d, already autodetected. (Use raid=noautodetect)\n", + "md: Ignoring md=%d, cannot lock!\n", minor); + mddev_put(mddev); continue; } - printk(KERN_INFO "md: Loading md%d: %s\n", minor, md_setup_args.device_names[minor]); - mddev = alloc_mddev(mk_kdev(MD_MAJOR,minor)); - if (!mddev) { - printk(KERN_ERR "md: kmalloc failed - cannot start array %d\n", minor); + if (mddev->sb || !list_empty(&mddev->disks)) { + printk(KERN_WARNING + "md: Ignoring md=%d, already autodetected. (Use raid=noautodetect)\n", + minor); + mddev_unlock(mddev); + mddev_put(mddev); continue; } if (md_setup_args.pers[minor]) { @@ -3922,6 +3820,8 @@ do_md_stop(mddev, 0); printk(KERN_WARNING "md: starting md%d failed\n", minor); } + mddev_unlock(mddev); + mddev_put(mddev); } } @@ -3972,9 +3872,10 @@ static void free_device_names(void) { - while (device_names.next != &device_names) { - struct list_head *tmp = device_names.next; - list_del(tmp); + while (!list_empty(&device_names)) { + struct dname *tmp = list_entry(device_names.next, + dev_name_t, list); + list_del(&tmp->list); kfree(tmp); } } @@ -4005,10 +3906,8 @@ EXPORT_SYMBOL(unregister_md_personality); EXPORT_SYMBOL(partition_name); EXPORT_SYMBOL(md_error); -EXPORT_SYMBOL(md_do_sync); EXPORT_SYMBOL(md_sync_acct); EXPORT_SYMBOL(md_done_sync); -EXPORT_SYMBOL(md_recover_arrays); EXPORT_SYMBOL(md_register_thread); EXPORT_SYMBOL(md_unregister_thread); EXPORT_SYMBOL(md_update_sb); @@ -4016,7 +3915,5 @@ EXPORT_SYMBOL(md_print_devices); EXPORT_SYMBOL(find_rdev_nr); EXPORT_SYMBOL(md_interrupt_thread); -EXPORT_SYMBOL(mddev_map); -EXPORT_SYMBOL(md_check_ordering); EXPORT_SYMBOL(get_spare); MODULE_LICENSE("GPL"); diff -Nru a/drivers/md/multipath.c b/drivers/md/multipath.c --- a/drivers/md/multipath.c Tue Jun 18 19:12:02 2002 +++ b/drivers/md/multipath.c Tue Jun 18 19:12:02 2002 @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -243,27 +244,19 @@ return 0; } -static int multipath_make_request (mddev_t *mddev, int rw, struct bio * bio) +static int multipath_make_request (request_queue_t *q, struct bio * bio) { + mddev_t *mddev = q->queuedata; multipath_conf_t *conf = mddev_to_conf(mddev); struct bio *real_bio; struct multipath_bh * mp_bh; struct multipath_info *multipath; -/* - * make_request() can abort the operation when READA is being - * used and no empty request is available. - * - * Currently, just replace the command with READ/WRITE. - */ - if (rw == READA) - rw = READ; - mp_bh = multipath_alloc_mpbh (conf); mp_bh->master_bio = bio; mp_bh->mddev = mddev; - mp_bh->cmd = rw; + mp_bh->cmd = bio_data_dir(bio); /* * read balancing logic: @@ -272,7 +265,7 @@ real_bio = bio_clone(bio, GFP_NOIO); real_bio->bi_bdev = multipath->bdev; - real_bio->bi_rw = rw; + real_bio->bi_rw = bio_data_dir(bio); real_bio->bi_end_io = multipath_end_request; real_bio->bi_private = mp_bh; mp_bh->bio = real_bio; @@ -707,7 +700,6 @@ mddev = mp_bh->mddev; if (mddev->sb_dirty) { printk(KERN_INFO "dirty sb detected, updating.\n"); - mddev->sb_dirty = 0; md_update_sb(mddev); } bio = mp_bh->bio; diff -Nru a/drivers/md/raid0.c b/drivers/md/raid0.c --- a/drivers/md/raid0.c Tue Jun 18 19:12:02 2002 +++ b/drivers/md/raid0.c Tue Jun 18 19:12:02 2002 @@ -20,6 +20,7 @@ #include #include +#include #define MAJOR_NR MD_MAJOR #define MD_DRIVER @@ -28,21 +29,26 @@ static int create_strip_zones (mddev_t *mddev) { - int i, c, j, j1, j2; + int i, c, j; unsigned long current_offset, curr_zone_offset; raid0_conf_t *conf = mddev_to_conf(mddev); mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; + struct list_head *tmp1, *tmp2; + struct strip_zone *zone; + int cnt; /* * The number of 'same size groups' */ conf->nr_strip_zones = 0; - ITERATE_RDEV_ORDERED(mddev,rdev1,j1) { + ITERATE_RDEV(mddev,rdev1,tmp1) { printk("raid0: looking at %s\n", partition_name(rdev1->dev)); c = 0; - ITERATE_RDEV_ORDERED(mddev,rdev2,j2) { - printk("raid0: comparing %s(%ld) with %s(%ld)\n", partition_name(rdev1->dev), rdev1->size, partition_name(rdev2->dev), rdev2->size); + ITERATE_RDEV(mddev,rdev2,tmp2) { + printk("raid0: comparing %s(%ld) with %s(%ld)\n", + partition_name(rdev1->dev), rdev1->size, + partition_name(rdev2->dev), rdev2->size); if (rdev2 == rdev1) { printk("raid0: END\n"); break; @@ -50,7 +56,7 @@ if (rdev2->size == rdev1->size) { /* - * Not unique, dont count it as a new + * Not unique, don't count it as a new * group */ printk("raid0: EQUAL\n"); @@ -65,29 +71,62 @@ printk("raid0: %d zones\n", conf->nr_strip_zones); } } - printk("raid0: FINAL %d zones\n", conf->nr_strip_zones); + printk("raid0: FINAL %d zones\n", conf->nr_strip_zones); conf->strip_zone = vmalloc(sizeof(struct strip_zone)* conf->nr_strip_zones); if (!conf->strip_zone) return 1; + memset(conf->strip_zone, 0,sizeof(struct strip_zone)* + conf->nr_strip_zones); + /* The first zone must contain all devices, so here we check that + * there is a properly alignment of slots to devices and find them all + */ + zone = &conf->strip_zone[0]; + cnt = 0; + smallest = NULL; + ITERATE_RDEV(mddev, rdev1, tmp1) { + int j = rdev1->sb->this_disk.raid_disk; + + if (j < 0 || j >= mddev->sb->raid_disks) { + printk("raid0: bad disk number %d - aborting!\n", j); + goto abort; + } + if (zone->dev[j]) { + printk("raid0: multiple devices for %d - aborting!\n", j); + goto abort; + } + zone->dev[j] = rdev1; + if (!smallest || (rdev1->size size)) + smallest = rdev1; + cnt++; + } + if (cnt != mddev->sb->raid_disks) { + printk("raid0: too few disks (%d of %d) - aborting!\n", cnt, + mddev->sb->raid_disks); + goto abort; + } + zone->nb_dev = cnt; + zone->size = smallest->size * cnt; + zone->zone_offset = 0; + + conf->smallest = zone; + current_offset = smallest->size; + curr_zone_offset = zone->size; - conf->smallest = NULL; - current_offset = 0; - curr_zone_offset = 0; - - for (i = 0; i < conf->nr_strip_zones; i++) + /* now do the other zones */ + for (i = 1; i < conf->nr_strip_zones; i++) { - struct strip_zone *zone = conf->strip_zone + i; + zone = conf->strip_zone + i; printk("raid0: zone %d\n", i); zone->dev_offset = current_offset; smallest = NULL; c = 0; - ITERATE_RDEV_ORDERED(mddev,rdev,j) { - + for (j=0; jstrip_zone[0].dev[j]; printk("raid0: checking %s ...", partition_name(rdev->dev)); if (rdev->size > current_offset) { @@ -117,6 +156,9 @@ } printk("raid0: done.\n"); return 0; + abort: + vfree(conf->strip_zone); + return 1; } static int raid0_run (mddev_t *mddev) @@ -131,11 +173,6 @@ goto out; mddev->private = (void *)conf; - if (md_check_ordering(mddev)) { - printk("raid0: disks are not ordered, aborting!\n"); - goto out_free_conf; - } - if (create_strip_zones (mddev)) goto out_free_conf; @@ -224,8 +261,9 @@ * Of course, those facts may not be valid anymore (and surely won't...) * Hey guys, there's some work out there ;-) */ -static int raid0_make_request (mddev_t *mddev, int rw, struct bio *bio) +static int raid0_make_request (request_queue_t *q, struct bio *bio) { + mddev_t *mddev = q->queuedata; unsigned int sect_in_chunk, chunksize_bits, chunk_size; raid0_conf_t *conf = mddev_to_conf(mddev); struct raid0_hash *hash; @@ -233,7 +271,7 @@ mdk_rdev_t *tmp_dev; unsigned long chunk, block, rsect; - chunk_size = mddev->param.chunk_size >> 10; + chunk_size = mddev->sb->chunk_size >> 10; chunksize_bits = ffz(~chunk_size); block = bio->bi_sector >> 1; hash = conf->hash_table + block / conf->smallest->size; @@ -322,7 +360,7 @@ conf->strip_zone[j].size); } #endif - sz += sprintf(page + sz, " %dk chunks", mddev->param.chunk_size/1024); + sz += sprintf(page + sz, " %dk chunks", mddev->sb->chunk_size/1024); return sz; } diff -Nru a/drivers/md/raid1.c b/drivers/md/raid1.c --- a/drivers/md/raid1.c Tue Jun 18 19:12:01 2002 +++ b/drivers/md/raid1.c Tue Jun 18 19:12:01 2002 @@ -23,6 +23,7 @@ */ #include +#include #define MAJOR_NR MD_MAJOR #define MD_DRIVER @@ -333,7 +334,7 @@ * device if no resync is going on, or below the resync window. * We take the first readable disk when above the resync window. */ - if (conf->resync_mirrors && (this_sector + sectors >= conf->next_resync)) { + if (!conf->mddev->in_sync && (this_sector + sectors >= conf->next_resync)) { /* make sure that disk is operational */ new_disk = 0; while (!conf->mirrors[new_disk].operational || conf->mirrors[new_disk].write_only) { @@ -433,8 +434,9 @@ spin_unlock_irq(&conf->resync_lock); } -static int make_request(mddev_t *mddev, int rw, struct bio * bio) +static int make_request(request_queue_t *q, struct bio * bio) { + mddev_t *mddev = q->queuedata; conf_t *conf = mddev_to_conf(mddev); mirror_info_t *mirror; r1bio_t *r1_bio; @@ -455,20 +457,16 @@ * make_request() can abort the operation when READA is being * used and no empty request is available. * - * Currently, just replace the command with READ. */ - if (rw == READA) - rw = READ; - r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); r1_bio->master_bio = bio; r1_bio->mddev = mddev; r1_bio->sector = bio->bi_sector; - r1_bio->cmd = rw; + r1_bio->cmd = bio_data_dir(bio); - if (rw == READ) { + if (r1_bio->cmd == READ) { /* * read balancing logic: */ @@ -482,7 +480,7 @@ read_bio->bi_sector = r1_bio->sector; read_bio->bi_bdev = mirror->bdev; read_bio->bi_end_io = end_request; - read_bio->bi_rw = rw; + read_bio->bi_rw = r1_bio->cmd; read_bio->bi_private = r1_bio; generic_make_request(read_bio); @@ -506,7 +504,7 @@ mbio->bi_sector = r1_bio->sector; mbio->bi_bdev = conf->mirrors[i].bdev; mbio->bi_end_io = end_request; - mbio->bi_rw = rw; + mbio->bi_rw = r1_bio->cmd; mbio->bi_private = r1_bio; sum_bios++; @@ -655,6 +653,9 @@ if (conf->barrier) BUG(); if (waitqueue_active(&conf->wait_idle)) BUG(); if (waitqueue_active(&conf->wait_resume)) BUG(); + + mempool_destroy(conf->r1buf_pool); + conf->r1buf_pool = NULL; } static int diskop(mddev_t *mddev, mdp_disk_t **d, int state) @@ -771,7 +772,6 @@ * Deactivate a spare disk: */ case DISKOP_SPARE_INACTIVE: - close_sync(conf); sdisk = conf->mirrors + spare_disk; sdisk->operational = 0; sdisk->write_only = 0; @@ -784,7 +784,6 @@ * property) */ case DISKOP_SPARE_ACTIVE: - close_sync(conf); sdisk = conf->mirrors + spare_disk; fdisk = conf->mirrors + failed_disk; @@ -918,10 +917,6 @@ } abort: spin_unlock_irq(&conf->device_lock); - if (state == DISKOP_SPARE_ACTIVE || state == DISKOP_SPARE_INACTIVE) { - mempool_destroy(conf->r1buf_pool); - conf->r1buf_pool = NULL; - } print_conf(conf); return err; @@ -1011,7 +1006,7 @@ * we read from here, no need to write */ continue; - if (i < conf->raid_disks && !conf->resync_mirrors) + if (i < conf->raid_disks && mddev->in_sync) /* * don't need to write this we are just rebuilding */ @@ -1087,7 +1082,6 @@ conf = mddev_to_conf(mddev); if (mddev->sb_dirty) { printk(KERN_INFO "raid1: dirty sb detected, updating.\n"); - mddev->sb_dirty = 0; md_update_sb(mddev); } bio = r1_bio->master_bio; @@ -1117,31 +1111,6 @@ spin_unlock_irqrestore(&retry_list_lock, flags); } -/* - * Private kernel thread to reconstruct mirrors after an unclean - * shutdown. - */ -static void raid1syncd(void *data) -{ - conf_t *conf = data; - mddev_t *mddev = conf->mddev; - - if (!conf->resync_mirrors) - return; - if (conf->resync_mirrors == 2) - return; - down(&mddev->recovery_sem); - if (!md_do_sync(mddev, NULL)) { - /* - * Only if everything went Ok. - */ - conf->resync_mirrors = 0; - } - - close_sync(conf); - - up(&mddev->recovery_sem); -} static int init_resync(conf_t *conf) { @@ -1176,9 +1145,16 @@ sector_t max_sector, nr_sectors; int disk, partial; - if (!sector_nr) + if (sector_nr == 0) if (init_resync(conf)) return -ENOMEM; + + max_sector = mddev->sb->size << 1; + if (sector_nr >= max_sector) { + close_sync(conf); + return 0; + } + /* * If there is non-resync activity waiting for us then * put in a delay to throttle resync. @@ -1215,10 +1191,6 @@ r1_bio->sector = sector_nr; r1_bio->cmd = SPECIAL; - max_sector = mddev->sb->size << 1; - if (sector_nr >= max_sector) - BUG(); - bio = r1_bio->master_bio; nr_sectors = RESYNC_BLOCK_SIZE >> 9; if (max_sector - sector_nr < nr_sectors) @@ -1301,7 +1273,6 @@ mdp_disk_t *descriptor; mdk_rdev_t *rdev; struct list_head *tmp; - int start_recovery = 0; MOD_INC_USE_COUNT; @@ -1453,10 +1424,6 @@ conf->last_used = j; - if (conf->working_disks != sb->raid_disks) { - printk(KERN_ALERT "raid1: md%d, not all disks are operational -- trying to recover array\n", mdidx(mddev)); - start_recovery = 1; - } { const char * name = "raid1d"; @@ -1468,20 +1435,6 @@ } } - if (!start_recovery && !(sb->state & (1 << MD_SB_CLEAN)) && - (conf->working_disks > 1)) { - const char * name = "raid1syncd"; - - conf->resync_thread = md_register_thread(raid1syncd, conf, name); - if (!conf->resync_thread) { - printk(THREAD_ERROR, mdidx(mddev)); - goto out_free_conf; - } - - printk(START_RESYNC, mdidx(mddev)); - conf->resync_mirrors = 1; - md_wakeup_thread(conf->resync_thread); - } /* * Regenerate the "device is in sync with the raid set" bit for @@ -1498,10 +1451,6 @@ } sb->active_disks = conf->working_disks; - if (start_recovery) - md_recover_arrays(); - - printk(ARRAY_IS_ACTIVE, mdidx(mddev), sb->active_disks, sb->raid_disks); /* * Ok, everything is just fine now @@ -1521,47 +1470,12 @@ return -EIO; } -static int stop_resync(mddev_t *mddev) -{ - conf_t *conf = mddev_to_conf(mddev); - - if (conf->resync_thread) { - if (conf->resync_mirrors) { - conf->resync_mirrors = 2; - md_interrupt_thread(conf->resync_thread); - - printk(KERN_INFO "raid1: mirror resync was not fully finished, restarting next time.\n"); - return 1; - } - return 0; - } - return 0; -} - -static int restart_resync(mddev_t *mddev) -{ - conf_t *conf = mddev_to_conf(mddev); - - if (conf->resync_mirrors) { - if (!conf->resync_thread) { - MD_BUG(); - return 0; - } - conf->resync_mirrors = 1; - md_wakeup_thread(conf->resync_thread); - return 1; - } - return 0; -} - static int stop(mddev_t *mddev) { conf_t *conf = mddev_to_conf(mddev); int i; md_unregister_thread(conf->thread); - if (conf->resync_thread) - md_unregister_thread(conf->resync_thread); if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); for (i = 0; i < MD_SB_DISKS; i++) @@ -1582,8 +1496,6 @@ status: status, error_handler: error, diskop: diskop, - stop_resync: stop_resync, - restart_resync: restart_resync, sync_request: sync_request }; diff -Nru a/drivers/md/raid5.c b/drivers/md/raid5.c --- a/drivers/md/raid5.c Tue Jun 18 19:12:02 2002 +++ b/drivers/md/raid5.c Tue Jun 18 19:12:02 2002 @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -633,7 +634,6 @@ else page_offset = (signed)(sector - bio->bi_sector) * -512; bio_for_each_segment(bvl, bio, i) { - char *ba = __bio_kmap(bio, i); int len = bio_iovec_idx(bio,i)->bv_len; int clen; int b_offset = 0; @@ -648,13 +648,16 @@ clen = STRIPE_SIZE - page_offset; else clen = len; - if (len > 0) { + if (clen > 0) { + char *ba = __bio_kmap(bio, i); if (frombio) memcpy(pa+page_offset, ba+b_offset, clen); else memcpy(ba+b_offset, pa+page_offset, clen); - } - __bio_kunmap(bio, i); + __bio_kunmap(bio, i); + } + if (clen < len) /* hit end of page */ + break; page_offset += len; } } @@ -809,6 +812,8 @@ spin_unlock_irq(&conf->device_lock); spin_unlock(&sh->lock); + PRINTK("added bi b#%lu to stripe s#%lu, disk %d.\n", bi->bi_sector, sh->sector, dd_idx); + if (forwrite) { /* check if page is coverred */ sector_t sector = sh->dev[dd_idx].sector; @@ -822,8 +827,6 @@ if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); } - - PRINTK("added bi b#%lu to stripe s#%lu, disk %d.\n", bi->bi_sector, sh->sector, dd_idx); } @@ -1035,7 +1038,7 @@ ) && !test_bit(R5_UPTODATE, &dev->flags)) { if (conf->disks[i].operational -/* && !(conf->resync_parity && i == sh->pd_idx) */ +/* && !(!mddev->insync && i == sh->pd_idx) */ ) rmw++; else rmw += 2*disks; /* cannot read it */ @@ -1225,14 +1228,15 @@ } static void raid5_unplug_device(void *data) { - raid5_conf_t *conf = (raid5_conf_t *)data; + request_queue_t *q = data; + mddev_t *mddev = q->queuedata; + raid5_conf_t *conf = mddev_to_conf(mddev); unsigned long flags; spin_lock_irqsave(&conf->device_lock, flags); - raid5_activate_delayed(conf); - - conf->plugged = 0; + if (blk_remove_plug(q)) + raid5_activate_delayed(conf); md_wakeup_thread(conf->thread); spin_unlock_irqrestore(&conf->device_lock, flags); @@ -1241,31 +1245,21 @@ static inline void raid5_plug_device(raid5_conf_t *conf) { spin_lock_irq(&conf->device_lock); - if (list_empty(&conf->delayed_list)) - if (!conf->plugged) { - conf->plugged = 1; - queue_task(&conf->plug_tq, &tq_disk); - } + blk_plug_device(&conf->mddev->queue); spin_unlock_irq(&conf->device_lock); } -static int make_request (mddev_t *mddev, int rw, struct bio * bi) +static int make_request (request_queue_t *q, struct bio * bi) { - raid5_conf_t *conf = (raid5_conf_t *) mddev->private; + mddev_t *mddev = q->queuedata; + raid5_conf_t *conf = mddev_to_conf(mddev); const unsigned int raid_disks = conf->raid_disks; const unsigned int data_disks = raid_disks - 1; unsigned int dd_idx, pd_idx; sector_t new_sector; sector_t logical_sector, last_sector; - int read_ahead = 0; - struct stripe_head *sh; - if (rw == READA) { - rw = READ; - read_ahead=1; - } - logical_sector = bi->bi_sector & ~(STRIPE_SECTORS-1); last_sector = bi->bi_sector + (bi->bi_size>>9); @@ -1280,10 +1274,10 @@ PRINTK("raid5: make_request, sector %ul logical %ul\n", new_sector, logical_sector); - sh = get_active_stripe(conf, new_sector, pd_idx, read_ahead); + sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK)); if (sh) { - add_stripe_bio(sh, bi, dd_idx, rw); + add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK)); raid5_plug_device(conf); handle_stripe(sh); @@ -1310,6 +1304,10 @@ int raid_disks = conf->raid_disks; int data_disks = raid_disks-1; + if (sector_nr >= mddev->sb->size <<1) + /* just being told to finish up .. nothing to do */ + return 0; + first_sector = raid5_compute_sector(stripe*data_disks*sectors_per_chunk + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf); sh = get_active_stripe(conf, sector_nr, pd_idx, 0); @@ -1342,17 +1340,15 @@ handled = 0; - if (mddev->sb_dirty) { - mddev->sb_dirty = 0; + if (mddev->sb_dirty) md_update_sb(mddev); - } spin_lock_irq(&conf->device_lock); while (1) { struct list_head *first; if (list_empty(&conf->handle_list) && atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD && - !conf->plugged && + !blk_queue_plugged(&mddev->queue) && !list_empty(&conf->delayed_list)) raid5_activate_delayed(conf); @@ -1381,31 +1377,6 @@ PRINTK("--- raid5d inactive\n"); } -/* - * Private kernel thread for parity reconstruction after an unclean - * shutdown. Reconstruction on spare drives in case of a failed drive - * is done by the generic mdsyncd. - */ -static void raid5syncd (void *data) -{ - raid5_conf_t *conf = data; - mddev_t *mddev = conf->mddev; - - if (!conf->resync_parity) - return; - if (conf->resync_parity == 2) - return; - down(&mddev->recovery_sem); - if (md_do_sync(mddev,NULL)) { - up(&mddev->recovery_sem); - printk("raid5: resync aborted!\n"); - return; - } - conf->resync_parity = 0; - up(&mddev->recovery_sem); - printk("raid5: resync finished.\n"); -} - static int run (mddev_t *mddev) { raid5_conf_t *conf; @@ -1415,7 +1386,6 @@ mdk_rdev_t *rdev; struct disk_info *disk; struct list_head *tmp; - int start_recovery = 0; MOD_INC_USE_COUNT; @@ -1443,10 +1413,7 @@ atomic_set(&conf->active_stripes, 0); atomic_set(&conf->preread_active_stripes, 0); - conf->plugged = 0; - conf->plug_tq.sync = 0; - conf->plug_tq.routine = &raid5_unplug_device; - conf->plug_tq.data = conf; + mddev->queue.unplug_fn = raid5_unplug_device; PRINTK("raid5: run(md%d) called.\n", mdidx(mddev)); @@ -1570,9 +1537,10 @@ goto abort; } - if (conf->working_disks != sb->raid_disks) { - printk(KERN_ALERT "raid5: md%d, not all disks are operational -- trying to recover array\n", mdidx(mddev)); - start_recovery = 1; + if (conf->failed_disks == 1 && + !(sb->state & (1<max_nr_stripes * (sizeof(struct stripe_head) + - conf->raid_disks * ((sizeof(struct buffer_head) + PAGE_SIZE))) / 1024; + conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; if (grow_stripes(conf, conf->max_nr_stripes)) { printk(KERN_ERR "raid5: couldn't allocate %dkB for buffers\n", memory); shrink_stripes(conf); + md_unregister_thread(conf->thread); goto abort; } else printk(KERN_INFO "raid5: allocated %dkB for md%d\n", memory, mdidx(mddev)); @@ -1614,23 +1583,6 @@ else printk(KERN_ALERT "raid5: raid level %d set md%d active with %d out of %d devices, algorithm %d\n", conf->level, mdidx(mddev), sb->active_disks, sb->raid_disks, conf->algorithm); - if (!start_recovery && !(sb->state & (1 << MD_SB_CLEAN))) { - const char * name = "raid5syncd"; - - conf->resync_thread = md_register_thread(raid5syncd, conf,name); - if (!conf->resync_thread) { - printk(KERN_ERR "raid5: couldn't allocate thread for md%d\n", mdidx(mddev)); - goto abort; - } - - printk("raid5: raid set md%d not clean; reconstructing parity\n", mdidx(mddev)); - conf->resync_parity = 1; - md_wakeup_thread(conf->resync_thread); - } - - print_raid5_conf(conf); - if (start_recovery) - md_recover_arrays(); print_raid5_conf(conf); /* Ok, everything is just fine now */ @@ -1649,48 +1601,12 @@ return -EIO; } -static int stop_resync (mddev_t *mddev) -{ - raid5_conf_t *conf = mddev_to_conf(mddev); - mdk_thread_t *thread = conf->resync_thread; - - if (thread) { - if (conf->resync_parity) { - conf->resync_parity = 2; - md_interrupt_thread(thread); - printk(KERN_INFO "raid5: parity resync was not fully finished, restarting next time.\n"); - return 1; - } - return 0; - } - return 0; -} - -static int restart_resync (mddev_t *mddev) -{ - raid5_conf_t *conf = mddev_to_conf(mddev); - - if (conf->resync_parity) { - if (!conf->resync_thread) { - MD_BUG(); - return 0; - } - printk("raid5: waking up raid5resync.\n"); - conf->resync_parity = 1; - md_wakeup_thread(conf->resync_thread); - return 1; - } else - printk("raid5: no restart-resync needed.\n"); - return 0; -} static int stop (mddev_t *mddev) { raid5_conf_t *conf = (raid5_conf_t *) mddev->private; - if (conf->resync_thread) - md_unregister_thread(conf->resync_thread); md_unregister_thread(conf->thread); shrink_stripes(conf); free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); @@ -2065,8 +1981,6 @@ status: status, error_handler: error, diskop: diskop, - stop_resync: stop_resync, - restart_resync: restart_resync, sync_request: sync_request }; diff -Nru a/drivers/net/aironet4500_core.c b/drivers/net/aironet4500_core.c --- a/drivers/net/aironet4500_core.c Tue Jun 18 19:12:01 2002 +++ b/drivers/net/aironet4500_core.c Tue Jun 18 19:12:01 2002 @@ -2669,10 +2669,8 @@ * but without it card gets screwed up */ #ifdef CONFIG_SMP - if(smp_num_cpus > 1){ both_bap_lock = 1; bap_setup_spinlock = 1; - } #endif //awc_dump_registers(dev); diff -Nru a/drivers/net/tg3.c b/drivers/net/tg3.c --- a/drivers/net/tg3.c Tue Jun 18 19:12:02 2002 +++ b/drivers/net/tg3.c Tue Jun 18 19:12:02 2002 @@ -52,8 +52,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "0.98" -#define DRV_MODULE_RELDATE "Mar 28, 2002" +#define DRV_MODULE_VERSION "0.99" +#define DRV_MODULE_RELDATE "Jun 11, 2002" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -971,7 +971,9 @@ tw32(MAC_MODE, tp->mac_mode); - if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { + if (tp->tg3_flags & + (TG3_FLAG_USE_LINKCHG_REG | + TG3_FLAG_POLL_SERDES)) { /* Polled via timer. */ tw32(MAC_EVENT, 0); } else { @@ -1065,6 +1067,7 @@ #define ANEG_TIMER_ENAB 2 #define ANEG_FAILED -1 +#define ANEG_STATE_SETTLE_TIME 10000 static int tg3_fiber_aneg_smachine(struct tg3 *tp, struct tg3_fiber_aneginfo *ap) @@ -1093,8 +1096,10 @@ ap->ability_match = 0; ap->ability_match_count = 0; } else { - if (++ap->ability_match_count > 1) + if (++ap->ability_match_count > 1) { ap->ability_match = 1; + ap->ability_match_cfg = rx_cfg_reg; + } } if (rx_cfg_reg & ANEG_CFG_ACK) ap->ack_match = 1; @@ -1151,10 +1156,11 @@ /* fallthru */ case ANEG_STATE_RESTART: delta = ap->cur_time - ap->link_time; - if (delta > 100000) + if (delta > ANEG_STATE_SETTLE_TIME) { ap->state = ANEG_STATE_ABILITY_DETECT_INIT; - else + } else { ret = ANEG_TIMER_ENAB; + } break; case ANEG_STATE_DISABLE_LINK_OK: @@ -1167,12 +1173,14 @@ tw32(MAC_TX_AUTO_NEG, ap->txconfig); tp->mac_mode |= MAC_MODE_SEND_CONFIGS; tw32(MAC_MODE, tp->mac_mode); + ap->state = ANEG_STATE_ABILITY_DETECT; break; case ANEG_STATE_ABILITY_DETECT: - if (ap->ability_match != 0 && ap->rxconfig != 0) + if (ap->ability_match != 0 && ap->rxconfig != 0) { ap->state = ANEG_STATE_ACK_DETECT_INIT; + } break; case ANEG_STATE_ACK_DETECT_INIT: @@ -1180,16 +1188,18 @@ tw32(MAC_TX_AUTO_NEG, ap->txconfig); tp->mac_mode |= MAC_MODE_SEND_CONFIGS; tw32(MAC_MODE, tp->mac_mode); + ap->state = ANEG_STATE_ACK_DETECT; /* fallthru */ case ANEG_STATE_ACK_DETECT: if (ap->ack_match != 0) { if ((ap->rxconfig & ~ANEG_CFG_ACK) == - (ap->ability_match_cfg & ~ANEG_CFG_ACK)) + (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { ap->state = ANEG_STATE_COMPLETE_ACK_INIT; - else + } else { ap->state = ANEG_STATE_AN_ENABLE; + } } else if (ap->ability_match != 0 && ap->rxconfig == 0) { ap->state = ANEG_STATE_AN_ENABLE; @@ -1245,15 +1255,16 @@ break; } delta = ap->cur_time - ap->link_time; - if (delta > 100000) { + if (delta > ANEG_STATE_SETTLE_TIME) { if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { ap->state = ANEG_STATE_IDLE_DETECT_INIT; } else { - if ((ap->txconfig & 0x0080) == 0 && - !(ap->flags & MR_NP_RX)) + if ((ap->txconfig & ANEG_CFG_NP) == 0 && + !(ap->flags & MR_NP_RX)) { ap->state = ANEG_STATE_IDLE_DETECT_INIT; - else + } else { ret = ANEG_FAILED; + } } } break; @@ -1262,6 +1273,7 @@ ap->link_time = ap->cur_time; tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; tw32(MAC_MODE, tp->mac_mode); + ap->state = ANEG_STATE_IDLE_DETECT; ret = ANEG_TIMER_ENAB; break; @@ -1273,7 +1285,7 @@ break; } delta = ap->cur_time - ap->link_time; - if (delta > 100000) { + if (delta > ANEG_STATE_SETTLE_TIME) { /* XXX another gem from the Broadcom driver :( */ ap->state = ANEG_STATE_LINK_OK; } @@ -1302,9 +1314,18 @@ static int tg3_setup_fiber_phy(struct tg3 *tp) { + u32 orig_pause_cfg; + u16 orig_active_speed; + u8 orig_active_duplex; int current_link_up; int i; + orig_pause_cfg = + (tp->tg3_flags & (TG3_FLAG_RX_PAUSE | + TG3_FLAG_TX_PAUSE)); + orig_active_speed = tp->link_config.active_speed; + orig_active_duplex = tp->link_config.active_duplex; + tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; tw32(MAC_MODE, tp->mac_mode); @@ -1317,7 +1338,7 @@ tg3_writephy(tp, 0x16, 0x8007); /* SW reset */ - tg3_writephy(tp, 0x00, 0x8000); + tg3_writephy(tp, MII_BMCR, BMCR_RESET); /* Wait for reset to complete. */ /* XXX schedule_timeout() ... */ @@ -1353,53 +1374,51 @@ tg3_writephy(tp, 0x10, 0x8011); } - /* Enable link change interrupt. */ - tw32(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + /* Enable link change interrupt unless serdes polling. */ + if (!(tp->tg3_flags & TG3_FLAG_POLL_SERDES)) + tw32(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + else + tw32(MAC_EVENT, 0); current_link_up = 0; if (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) { - if (tp->link_config.autoneg == AUTONEG_ENABLE) { + if (tp->link_config.autoneg == AUTONEG_ENABLE && + !(tp->tg3_flags & TG3_FLAG_GOT_SERDES_FLOWCTL)) { struct tg3_fiber_aneginfo aninfo; int status = ANEG_FAILED; + unsigned int tick; + u32 tmp; memset(&aninfo, 0, sizeof(aninfo)); aninfo.flags |= (MR_AN_ENABLE); - for (i = 0; i < 6; i++) { - unsigned int tick; - u32 tmp; - - tw32(MAC_TX_AUTO_NEG, 0); + tw32(MAC_TX_AUTO_NEG, 0); - tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; - tw32(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); - udelay(20); - - tw32(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); - - aninfo.state = ANEG_STATE_UNKNOWN; - aninfo.cur_time = 0; - tick = 0; - while (++tick < 95000) { - status = tg3_fiber_aneg_smachine(tp, &aninfo); - if (status == ANEG_DONE || - status == ANEG_FAILED) - break; - - udelay(1); - } + tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; + tw32(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); + udelay(20); + + tw32(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); + + aninfo.state = ANEG_STATE_UNKNOWN; + aninfo.cur_time = 0; + tick = 0; + while (++tick < 195000) { + status = tg3_fiber_aneg_smachine(tp, &aninfo); if (status == ANEG_DONE || status == ANEG_FAILED) break; + + udelay(1); } tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; tw32(MAC_MODE, tp->mac_mode); if (status == ANEG_DONE && - (aninfo.flags & MR_AN_COMPLETE) && - (aninfo.flags & MR_LINK_OK) && - (aninfo.flags & MR_LP_ADV_FULL_DUPLEX)) { + (aninfo.flags & + (MR_AN_COMPLETE | MR_LINK_OK | + MR_LP_ADV_FULL_DUPLEX))) { u32 local_adv, remote_adv; local_adv = ADVERTISE_PAUSE_CAP; @@ -1411,6 +1430,8 @@ tg3_setup_flow_control(tp, local_adv, remote_adv); + tp->tg3_flags |= + TG3_FLAG_GOT_SERDES_FLOWCTL; current_link_up = 1; } for (i = 0; i < 60; i++) { @@ -1425,6 +1446,10 @@ MAC_STATUS_CFG_CHANGED)) == 0) break; } + if (current_link_up == 0 && + (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) { + current_link_up = 1; + } } else { /* Forcing 1000FD link up. */ current_link_up = 1; @@ -1439,11 +1464,12 @@ (tp->hw_status->status & ~SD_STATUS_LINK_CHG)); for (i = 0; i < 100; i++) { + udelay(20); tw32(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED)); - udelay(5); + udelay(20); if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED)) == 0) @@ -1467,6 +1493,14 @@ else netif_carrier_off(tp->dev); tg3_link_report(tp); + } else { + u32 now_pause_cfg = + tp->tg3_flags & (TG3_FLAG_RX_PAUSE | + TG3_FLAG_TX_PAUSE); + if (orig_pause_cfg != now_pause_cfg || + orig_active_speed != tp->link_config.active_speed || + orig_active_duplex != tp->link_config.active_duplex) + tg3_link_report(tp); } if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0) { @@ -1644,7 +1678,7 @@ * tg3_alloc_rx_skb for full details. */ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, - int src_idx, int dest_idx_unmasked) + int src_idx, u32 dest_idx_unmasked) { struct tg3_rx_buffer_desc *src_desc, *dest_desc; struct ring_info *src_map, *dest_map; @@ -1965,7 +1999,9 @@ struct tg3_hw_status *sblk = tp->hw_status; int did_pkts; - if (!(tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG)) { + if (!(tp->tg3_flags & + (TG3_FLAG_USE_LINKCHG_REG | + TG3_FLAG_POLL_SERDES))) { if (sblk->status & SD_STATUS_LINK_CHG) { sblk->status = SD_STATUS_UPDATED | (sblk->status & ~SD_STATUS_LINK_CHG); @@ -2838,7 +2874,7 @@ return -ENOMEM; } -#define MAX_WAIT_CNT 10000 +#define MAX_WAIT_CNT 1000 /* To stop a block, clear the enable bit and poll till it * clears. tp->lock is held. @@ -3807,6 +3843,26 @@ if (phy_event) tg3_setup_phy(tp); + } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) { + u32 mac_stat = tr32(MAC_STATUS); + int need_setup = 0; + + if (netif_carrier_ok(tp->dev) && + (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { + need_setup = 1; + } + if (! netif_carrier_ok(tp->dev) && + (mac_stat & MAC_STATUS_PCS_SYNCED)) { + need_setup = 1; + } + if (need_setup) { + tw32(MAC_MODE, + (tp->mac_mode & + ~MAC_MODE_PORT_MODE_MASK)); + udelay(40); + tw32(MAC_MODE, tp->mac_mode); + tg3_setup_phy(tp); + } } tp->timer_counter = tp->timer_multiplier; @@ -4162,7 +4218,10 @@ tg3_halt(tp); tg3_free_rings(tp); - tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; + tp->tg3_flags &= + ~(TG3_FLAG_INIT_COMPLETE | + TG3_FLAG_GOT_SERDES_FLOWCTL); + netif_carrier_off(tp->dev); spin_unlock_irq(&tp->lock); @@ -5731,6 +5790,12 @@ tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT | TG3_FLAG_USE_LINKCHG_REG); } + + /* For all SERDES we poll the MAC status register. */ + if (tp->phy_id == PHY_ID_SERDES) + tp->tg3_flags |= TG3_FLAG_POLL_SERDES; + else + tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; /* 5700 BX chips need to have their TX producer index mailboxes * written twice to workaround a bug. diff -Nru a/drivers/net/tg3.h b/drivers/net/tg3.h --- a/drivers/net/tg3.h Tue Jun 18 19:12:01 2002 +++ b/drivers/net/tg3.h Tue Jun 18 19:12:01 2002 @@ -1126,7 +1126,7 @@ #define GRC_MISC_CFG_BOARD_ID_5702FE 0x00004000 #define GRC_MISC_CFG_BOARD_ID_5703 0x00000000 #define GRC_MISC_CFG_BOARD_ID_5703S 0x00002000 -#define GRC_MISC_CFG_BOARD_ID_AC91002A1 0x00001800 +#define GRC_MISC_CFG_BOARD_ID_AC91002A1 0x00018000 #define GRC_LOCAL_CTRL 0x00006808 #define GRC_LCLCTRL_INT_ACTIVE 0x00000001 #define GRC_LCLCTRL_CLEARINT 0x00000002 @@ -1769,6 +1769,7 @@ #define TG3_FLAG_USE_MI_INTERRUPT 0x00000010 #define TG3_FLAG_ADAPTIVE_RX 0x00000020 #define TG3_FLAG_ADAPTIVE_TX 0x00000040 +#define TG3_FLAG_POLL_SERDES 0x00000080 #define TG3_FLAG_PHY_RESET_ON_INIT 0x00000100 #define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200 #define TG3_FLAG_TAGGED_IRQ_STATUS 0x00000400 @@ -1790,6 +1791,7 @@ #define TG3_FLAG_PAUSE_RX 0x04000000 #define TG3_FLAG_PAUSE_TX 0x08000000 #define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000 +#define TG3_FLAG_GOT_SERDES_FLOWCTL 0x20000000 #define TG3_FLAG_INIT_COMPLETE 0x80000000 u32 msg_enable; diff -Nru a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c --- a/drivers/pci/pci-driver.c Tue Jun 18 19:12:02 2002 +++ b/drivers/pci/pci-driver.c Tue Jun 18 19:12:02 2002 @@ -210,3 +210,4 @@ EXPORT_SYMBOL(pci_register_driver); EXPORT_SYMBOL(pci_unregister_driver); EXPORT_SYMBOL(pci_dev_driver); +EXPORT_SYMBOL(pci_bus_type); diff -Nru a/drivers/pcmcia/pci_socket.c b/drivers/pcmcia/pci_socket.c --- a/drivers/pcmcia/pci_socket.c Tue Jun 18 19:12:03 2002 +++ b/drivers/pcmcia/pci_socket.c Tue Jun 18 19:12:03 2002 @@ -20,6 +20,7 @@ #include #include #include +#include #include #include diff -Nru a/drivers/pcmcia/yenta.c b/drivers/pcmcia/yenta.c --- a/drivers/pcmcia/yenta.c Tue Jun 18 19:12:02 2002 +++ b/drivers/pcmcia/yenta.c Tue Jun 18 19:12:02 2002 @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff -Nru a/drivers/s390/Config.in b/drivers/s390/Config.in --- a/drivers/s390/Config.in Tue Jun 18 19:12:02 2002 +++ b/drivers/s390/Config.in Tue Jun 18 19:12:02 2002 @@ -17,18 +17,18 @@ dep_tristate ' Support for ECKD Disks' CONFIG_DASD_ECKD $CONFIG_DASD if [ "$CONFIG_DASD_ECKD" = "m" ]; then bool ' Automatic activation of ECKD module' CONFIG_DASD_AUTO_ECKD - fi; + fi dep_tristate ' Support for FBA Disks' CONFIG_DASD_FBA $CONFIG_DASD if [ "$CONFIG_DASD_FBA" = "m" ]; then bool ' Automatic activation of FBA module' CONFIG_DASD_AUTO_FBA - fi; + fi # dep_tristate ' Support for CKD Disks' CONFIG_DASD_CKD $CONFIG_DASD if [ "$CONFIG_ARCH_S390X" != "y" ]; then dep_tristate ' Support for DIAG access to CMS reserved Disks' CONFIG_DASD_DIAG $CONFIG_DASD if [ "$CONFIG_DASD_DIAG" = "m" ]; then bool ' Automatic activation of DIAG module' CONFIG_DASD_AUTO_DIAG - fi; - fi; + fi + fi fi endmenu diff -Nru a/drivers/s390/Makefile b/drivers/s390/Makefile --- a/drivers/s390/Makefile Tue Jun 18 19:12:02 2002 +++ b/drivers/s390/Makefile Tue Jun 18 19:12:02 2002 @@ -7,6 +7,6 @@ obj-$(CONFIG_QDIO) += qdio.o obj-y += s390mach.o s390dyn.o sysinfo.o -obj-y += block/ char/ misc/ net/ scsi/ cio/ +obj-y += block/ char/ misc/ net/ cio/ include $(TOPDIR)/Rules.make diff -Nru a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c --- a/drivers/s390/block/dasd.c Tue Jun 18 19:12:02 2002 +++ b/drivers/s390/block/dasd.c Tue Jun 18 19:12:02 2002 @@ -1573,6 +1573,7 @@ static inline void __dasd_process_blk_queue(dasd_device_t * device) { + struct block_device *bdev; request_queue_t *queue; struct list_head *l; struct request *req; @@ -1601,11 +1602,14 @@ if (cqr->status == DASD_CQR_QUEUED) nr_queued++; } + bdev = bdget(kdev_t_to_nr(device->kdev)); + if (!bdev) + return; while (!blk_queue_plugged(queue) && !blk_queue_empty(queue) && nr_queued < DASD_CHANQ_MAX_SIZE) { req = elv_next_request(queue); - if (is_read_only(device->kdev) && rq_data_dir(req) == WRITE) { + if (bdev_read_only(bdev) && rq_data_dir(req) == WRITE) { DBF_EVENT(DBF_ERR, "(%04x) Rejecting write request %p", device->devinfo.devno, req); @@ -1632,6 +1636,7 @@ dasd_profile_start(device, cqr, req); nr_queued++; } + bdput(bdev); } /* diff -Nru a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c --- a/drivers/s390/block/dasd_genhd.c Tue Jun 18 19:12:03 2002 +++ b/drivers/s390/block/dasd_genhd.c Tue Jun 18 19:12:03 2002 @@ -65,7 +65,7 @@ { struct major_info *mi; struct hd_struct *gd_part; - devfs_handle_t *gd_de_arr, *gd_label_arr; + devfs_handle_t *gd_de_arr; int *gd_sizes; char *gd_flags; int new_major, rc; @@ -78,14 +78,12 @@ gd_de_arr = kmalloc(DASD_PER_MAJOR * sizeof(devfs_handle_t), GFP_KERNEL); gd_flags = kmalloc(DASD_PER_MAJOR * sizeof(char), GFP_KERNEL); - gd_label_arr = kmalloc(DASD_PER_MAJOR * sizeof(devfs_handle_t), - GFP_KERNEL); gd_part = kmalloc(sizeof (struct hd_struct) << MINORBITS, GFP_ATOMIC); gd_sizes = kmalloc(sizeof(int) << MINORBITS, GFP_ATOMIC); /* Check if one of the allocations failed. */ if (mi == NULL || gd_de_arr == NULL || gd_flags == NULL || - gd_label_arr == NULL || gd_part == NULL || gd_sizes == NULL) { + gd_part == NULL || gd_sizes == NULL) { MESSAGE(KERN_WARNING, "%s", "Cannot get memory to allocate another " "major number"); @@ -114,14 +112,12 @@ mi->gendisk.fops = &dasd_device_operations; mi->gendisk.de_arr = gd_de_arr; mi->gendisk.flags = gd_flags; - mi->gendisk.label_arr = gd_label_arr; mi->gendisk.part = gd_part; mi->gendisk.sizes = gd_sizes; /* Initialize the gendisk arrays. */ memset(gd_de_arr, 0, DASD_PER_MAJOR * sizeof(devfs_handle_t)); memset(gd_flags, 0, DASD_PER_MAJOR * sizeof (char)); - memset(gd_label_arr, 0, DASD_PER_MAJOR * sizeof(devfs_handle_t)); memset(gd_part, 0, sizeof (struct hd_struct) << MINORBITS); memset(gd_sizes, 0, sizeof(int) << MINORBITS); @@ -143,7 +139,6 @@ /* We rely on kfree to do the != NULL check. */ kfree(gd_sizes); kfree(gd_part); - kfree(gd_label_arr); kfree(gd_flags); kfree(gd_de_arr); kfree(mi); @@ -182,7 +177,6 @@ /* Free memory. */ kfree(bs); kfree(mi->gendisk.part); - kfree(mi->gendisk.label_arr); kfree(mi->gendisk.flags); kfree(mi->gendisk.de_arr); kfree(mi); diff -Nru a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c --- a/drivers/s390/block/dasd_proc.c Tue Jun 18 19:12:02 2002 +++ b/drivers/s390/block/dasd_proc.c Tue Jun 18 19:12:02 2002 @@ -93,7 +93,7 @@ size_t user_len, loff_t * offset) { char *buffer, *str; - int add_or_set, device_or_range; + int add_or_set; int from, to, features; buffer = dasd_get_user_string(user_buf, user_len); @@ -109,15 +109,11 @@ goto out_error; for (str = str + 4; isspace(*str); str++); - /* Scan for "device " or "range=". */ - if (strncmp(str, "device", 6) == 0 && isspace(str[6])) { - device_or_range = 0; + /* Scan for "device " and "range=" and ignore it. This is sick. */ + if (strncmp(str, "device", 6) == 0 && isspace(str[6])) + for (str = str + 6; isspace(*str); str++); + if (strncmp(str, "range=", 6) == 0) for (str = str + 6; isspace(*str); str++); - } else if (strncmp(str, "range=", 6) == 0) { - device_or_range = 1; - str = str + 6; - } else - goto out_error; /* Scan device number range and feature string. */ to = from = dasd_devno(str, &str); diff -Nru a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c --- a/drivers/s390/block/xpram.c Tue Jun 18 19:12:03 2002 +++ b/drivers/s390/block/xpram.c Tue Jun 18 19:12:03 2002 @@ -1,4 +1,3 @@ - /* * Xpram.c -- the S/390 expanded memory RAM-disk * @@ -8,137 +7,67 @@ * * Author of XPRAM specific coding: Reinhard Buendgen * buendgen@de.ibm.com + * Rewrite for 2.5: Martin Schwidefsky * * External interfaces: * Interfaces to linux kernel - * xpram_setup: read kernel parameters (see init/main.c) - * xpram_init: initialize device driver (see drivers/block/ll_rw_blk.c) - * Module interfaces - * init_module - * cleanup_module + * xpram_setup: read kernel parameters * Device specific file operations * xpram_iotcl * xpram_open * xpram_release * - * "ad-hoc" partitioning: + * "ad-hoc" partitioning: * the expanded memory can be partitioned among several devices * (with different minors). The partitioning set up can be * set by kernel or module parameters (int devs & int sizes[]) * - * module parameters: devs= and sizes= - * kernel parameters: xpram_parts= - * note: I did not succeed in parsing numbers - * for module parameters of type string "s" ?!? - * - * Other kenel files/modules affected(gerp for "xpram" or "XPRAM": - * drivers/s390/Config.in - * drivers/s390/block/Makefile - * include/linux/blk.h - * include/linux/major.h - * init/main.c - * drivers/block//ll_rw_blk.c - * - * * Potential future improvements: - * request clustering: first coding started not yet tested or integrated - * I doubt that it really pays off * generic hard disk support to replace ad-hoc partitioning - * - * Tested with 2.2.14 (under VM) */ -#ifdef MODULE -# ifndef __KERNEL__ -# define __KERNEL__ -# endif -# define __NO_VERSION__ /* don't define kernel_version in module.h */ -#endif /* MODULE */ - #include #include - -#ifdef MODULE -char kernel_version [] = UTS_RELEASE; -#endif - -#include -#include -#include -#include /* printk() */ -#include /* kmalloc() */ -#include -#include /* everything... */ -#include /* error codes */ -#include -#include /* size_t */ #include /* isdigit, isxdigit */ -#include /* O_ACCMODE */ -#include /* HDIO_GETGEO */ - -#include /* cli(), *_flags */ -#include /* put_user */ - -#define MAJOR_NR xpram_major /* force definitions on in blk.h */ -int xpram_major; /* must be declared before including blk.h */ -devfs_handle_t xpram_devfs_handle; - -#define DEVICE_NR(device) MINOR(device) /* xpram has no partition bits */ -#define DEVICE_NO_RANDOM /* no entropy to contribute */ -#define DEVICE_OFF(d) /* do-nothing */ - +#include +#include +#include #include +#include +#include /* HDIO_GETGEO */ +#include +#include -#include "xpram.h" /* local definitions */ - -__setup("xpram_parts=", xpram_setup); - -/* - define the debug levels: - - 0 No debugging output to console or syslog - - 1 Log internal errors to syslog, ignore check conditions - - 2 Log internal errors and check conditions to syslog - - 3 Log internal errors to console, log check conditions to syslog - - 4 Log internal errors and check conditions to console - - 5 panic on internal errors, log check conditions to console - - 6 panic on both, internal errors and check conditions - */ -#define XPRAM_DEBUG 4 +#define XPRAM_NAME "xpram" +#define XPRAM_DEVS 1 /* one partition */ +#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */ + +#define PRINT_DEBUG(x...) printk(KERN_DEBUG XPRAM_NAME " debug:" x) +#define PRINT_INFO(x...) printk(KERN_INFO XPRAM_NAME " info:" x) +#define PRINT_WARN(x...) printk(KERN_WARNING XPRAM_NAME " warning:" x) +#define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x) + +static struct device xpram_sys_device = { + name: "S/390 expanded memory RAM disk", + bus_id: "xpram", +}; -#define PRINTK_HEADER XPRAM_NAME - -#if XPRAM_DEBUG > 0 -#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER "debug:" x ) -#define PRINT_INFO(x...) printk ( KERN_INFO PRINTK_HEADER "info:" x ) -#define PRINT_WARN(x...) printk ( KERN_WARNING PRINTK_HEADER "warning:" x ) -#define PRINT_ERR(x...) printk ( KERN_ERR PRINTK_HEADER "error:" x ) -#define PRINT_FATAL(x...) panic ( PRINTK_HEADER "panic:"x ) -#else -#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER "debug:" x ) -#define PRINT_INFO(x...) printk ( KERN_DEBUG PRINTK_HEADER "info:" x ) -#define PRINT_WARN(x...) printk ( KERN_DEBUG PRINTK_HEADER "warning:" x ) -#define PRINT_ERR(x...) printk ( KERN_DEBUG PRINTK_HEADER "error:" x ) -#define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER "panic:" x ) -#endif +typedef struct { + unsigned long size; /* size of xpram segment in pages */ + unsigned long offset; /* start page of xpram segment */ +} xpram_device_t; + +static xpram_device_t xpram_devices[XPRAM_MAX_DEVS]; +static int xpram_sizes[XPRAM_MAX_DEVS]; +static unsigned long xpram_pages; +static int xpram_devs; +static devfs_handle_t xpram_devfs_handle; /* - * Non-prefixed symbols are static. They are meant to be assigned at - * load time. Prefixed symbols are not static, so they can be used in - * debugging. They are hidden anyways by register_symtab() unless - * XPRAM_DEBUG is defined. + * Parameter parsing functions. */ - -static int major = XPRAM_MAJOR; -static int devs = XPRAM_DEVS; -static int sizes[XPRAM_MAX_DEVS] = { 0, }; -static int blksize = XPRAM_BLKSIZE; -static int hardsect = XPRAM_HARDSECT; - -int xpram_devs; -int xpram_blksize, xpram_hardsect; -int xpram_mem_avail = 0; -unsigned long xpram_sizes[XPRAM_MAX_DEVS]; - +static int devs = XPRAM_DEVS; +static unsigned long sizes[XPRAM_MAX_DEVS]; MODULE_PARM(devs,"i"); MODULE_PARM(sizes,"1-" __MODULE_STRING(XPRAM_MAX_DEVS) "i"); @@ -150,512 +79,301 @@ "All devices with size 0 equally partition the " "remaining space on the expanded strorage not " "claimed by explicit sizes\n"); +MODULE_LICENSE("GPL"); - - -/* The following items are obtained through kmalloc() in init_module() */ - -Xpram_Dev *xpram_devices = NULL; -int *xpram_offsets = NULL; /* partition offsets */ - -#define MIN(x,y) ((x) < (y) ? (x) : (y)) -#define MAX(x,y) ((x) > (y) ? (x) : (y)) - -/* - * compute nearest multiple of 4 , argument must be non-negative - * the macros used depends on XPRAM_KB_IN_PG = 4 - */ - -#define NEXT4(x) ((x & 0x3) ? (x+4-(x &0x3)) : (x)) /* increment if needed */ -#define LAST4(x) ((x & 0x3) ? (x-4+(x & 0x3)) : (x)) /* decrement if needed */ - -#if 0 /* this is probably not faster than the previous code */ -#define NEXT4(x) ((((x-1)>>2)>>2)+4) /* increment if needed */ -#define LAST4(x) (((x+3)>>2)<<2) /* decrement if needed */ -#endif - -/* integer formats */ -#define XPRAM_INVALF -1 /* invalid */ -#define XPRAM_HEXF 0 /* hexadecimal */ -#define XPRAM_DECF 1 /* decimal */ - -/* - * parsing operations (needed for kernel parameter parsing) - */ - -/* ------------------------------------------------------------------------- - * sets the string pointer after the next comma - * - * argument: strptr pointer to string - * side effect: strptr points to endof string or to position of the next - * comma - * ------------------------------------------------------------------------*/ -static void -xpram_scan_to_next_comma (char **strptr) -{ - while ( ((**strptr) != ',') && (**strptr) ) - (*strptr)++; -} - -/* ------------------------------------------------------------------------- - * interpret character as hex-digit +#ifndef MODULE +/* + * Parses the kernel parameters given in the kernel parameter line. + * The expected format is + * [","]* + * where + * devices is a positive integer that initializes xpram_devs + * each size is a non-negative integer possibly followed by a + * magnitude (k,K,m,M,g,G), the list of sizes initialises + * xpram_sizes * - * argument: c charcter - * result: c interpreted as hex-digit - * note: can be used to read digits for any base <= 16 - * ------------------------------------------------------------------------*/ -static int -xpram_get_hexdigit (char c) -{ - if ((c >= '0') && (c <= '9')) - return c - '0'; - if ((c >= 'a') && (c <= 'f')) - return c + 10 - 'a'; - if ((c >= 'A') && (c <= 'F')) - return c + 10 - 'A'; - return -1; -} - -/*-------------------------------------------------------------------------- - * Check format of unsigned integer + * Arguments + * str: substring of kernel parameter line that contains xprams + * kernel parameters. * - * Argument: strptr pointer to string - * result: -1 if strptr does not start with a digit - * (does not start an integer) - * 0 if strptr starts a positive hex-integer with "0x" - * 1 if strptr start a positive decimal integer + * Result 0 on success, -EINVAL else -- only for Version > 2.3 * - * side effect: if strptr start a positive hex-integer then strptr is - * set to the character after the "0x" - *-------------------------------------------------------------------------*/ -static int -xpram_int_format(char **strptr) + * Side effects + * the global variabls devs is set to the value of + * and sizes[i] is set to the i-th + * partition size (if provided). A parsing error of a value + * results in this value being set to -EINVAL. + */ +static int __init xpram_setup (char *str) { - if ( !isdigit(**strptr) ) - return XPRAM_INVALF; - if ( (**strptr == '0') - && ( (*((*strptr)+1) == 'x') || (*((*strptr) +1) == 'X') ) - && isdigit(*((*strptr)+3)) ) { - *strptr=(*strptr)+2; - return XPRAM_HEXF; - } else return XPRAM_DECF; -} - -/*-------------------------------------------------------------------------- - * Read non-negative decimal integer - * - * Argument: strptr pointer to string starting with a non-negative integer - * in decimal format - * result: the value of theinitial integer pointed to by strptr - * - * side effect: strptr is set to the first character following the integer - *-------------------------------------------------------------------------*/ + char *cp; + int i; -static int -xpram_read_decint (char ** strptr) -{ - int res=0; - while ( isdigit(**strptr) ) { - res = (res*10) + xpram_get_hexdigit(**strptr); - (*strptr)++; - } - return res; + devs = simple_strtoul(str, &cp, 10); + if (cp <= str || devs > XPRAM_MAX_DEVS) + return 0; + for (i = 0; (i < devs) && (*cp++ == ','); i++) { + sizes[i] = simple_strtoul(cp, &cp, 10); + if (*cp == 'g' || *cp == 'G') { + sizes[i] <<= 20; + cp++; + } else if (*cp == 'm' || *cp == 'M') { + sizes[i] <<= 10; + cp++; + } else if (*cp == 'k' || *cp == 'K') + cp++; + while (isspace(*cp)) cp++; + } + if (*cp == ',' && i >= devs) + PRINT_WARN("partition sizes list has too many entries.\n"); + else if (*cp != 0) + PRINT_WARN("ignored '%s' at end of parameter string.\n", cp); + return 1; } -/*-------------------------------------------------------------------------- - * Read non-negative hex-integer - * - * Argument: strptr pointer to string starting with a non-negative integer - * in hexformat (without "0x" prefix) - * result: the value of the initial integer pointed to by strptr - * - * side effect: strptr is set to the first character following the integer - *-------------------------------------------------------------------------*/ +__setup("xpram_parts=", xpram_setup); +#endif -static int -xpram_read_hexint (char ** strptr) -{ - int res=0; - while ( isxdigit(**strptr) ) { - res = (res<<4) + xpram_get_hexdigit(**strptr); - (*strptr)++; - } - return res; +/* + * Copy expanded memory page (4kB) into main memory + * Arguments + * page_addr: address of target page + * xpage_index: index of expandeded memory page + * Return value + * 0: if operation succeeds + * -EIO: if pgin failed + * -ENXIO: if xpram has vanished + */ +static int xpram_page_in (unsigned long page_addr, unsigned long xpage_index) +{ + int cc; + + __asm__ __volatile( + " lhi %0,2\n" /* return unused cc 2 if pgin traps */ + " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */ + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" +#ifndef CONFIG_ARCH_S390X + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 0b,1b\n" + ".previous" +#else + ".section __ex_table,\"a\"\n" + " .align 8\n" + " .quad 0b,1b\n" + ".previous" +#endif + : "=&d" (cc) + : "a" (__pa(page_addr)), "a" (xpage_index) + : "cc" ); + if (cc == 3) + return -ENXIO; + if (cc == 2) { + PRINT_ERR("expanded storage lost!\n"); + return -ENXIO; + } + if (cc == 1) { + PRINT_ERR("page in failed for page index %ld.\n", + xpage_index); + return -EIO; + } + return 0; } -/*-------------------------------------------------------------------------- - * Read non-negative integer - * - * Argument: strptr pointer to string starting with a non-negative integer - (either in decimal- or in hex-format - * result: the value of the initial integer pointed to by strptr - * in case of a parsing error the result is -EINVAL - * - * side effect: strptr is set to the first character following the integer - *-------------------------------------------------------------------------*/ -static int -xpram_read_int (char ** strptr) -{ - switch ( xpram_int_format(strptr) ) { - case XPRAM_INVALF: return -EINVAL; - case XPRAM_HEXF: return xpram_read_hexint(strptr); - case XPRAM_DECF: return xpram_read_decint(strptr); - default: return -EINVAL; +/* + * Copy a 4kB page of main memory to an expanded memory page + * Arguments + * page_addr: address of source page + * xpage_index: index of expandeded memory page + * Return value + * 0: if operation succeeds + * -EIO: if pgout failed + * -ENXIO: if xpram has vanished + */ +static long xpram_page_out (unsigned long page_addr, unsigned long xpage_index) +{ + int cc; + + __asm__ __volatile( + " lhi %0,2\n" /* return unused cc 2 if pgout traps */ + " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */ + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" +#ifndef CONFIG_ARCH_S390X + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 0b,1b\n" + ".previous" +#else + ".section __ex_table,\"a\"\n" + " .align 8\n" + " .quad 0b,1b\n" + ".previous" +#endif + : "=&d" (cc) + : "a" (__pa(page_addr)), "a" (xpage_index) + : "cc" ); + if (cc == 3) + return -ENXIO; + if (cc == 2) { + PRINT_ERR("expanded storage lost!\n"); + return -ENXIO; } -} - -/*-------------------------------------------------------------------------- - * Read size - * - * Argument: strptr pointer to string starting with a non-negative integer - * followed optionally by a size modifier: - * k or K for kilo (default), - * m or M for mega - * g or G for giga - * result: the value of the initial integer pointed to by strptr - * multiplied by the modifier value devided by 1024 - * in case of a parsing error the result is -EINVAL - * - * side effect: strptr is set to the first character following the size - *-------------------------------------------------------------------------*/ - -static int -xpram_read_size (char ** strptr) -{ - int res; - - res=xpram_read_int(strptr); - if ( res < 0 )return res; - switch ( **strptr ) { - case 'g': - case 'G': res=res*1024; - case 'm': - case 'M': res=res*1024; - case 'k' : - case 'K' : (* strptr)++; + if (cc == 1) { + PRINT_ERR("page out failed for page index %ld.\n", + xpage_index); + return -EIO; } - - return res; + return 0; } +/* + * Check if xpram is available. + */ +static int __init xpram_present(void) +{ + unsigned long mem_page; + int rc; -/*-------------------------------------------------------------------------- - * Read tail of comma separated size list ",i1,i2,...,in" - * - * Arguments:strptr pointer to string. It is assumed that the string has - * the format (",")* - * maxl integer describing the maximal number of elements in the - list pointed to by strptr, max must be > 0. - * ilist array of dimension >= maxl of integers to be modified - * - * result: -EINVAL if the list is longer than maxl - * 0 otherwise - * - * side effects: for j=1,...,n ilist[ij] is set to the value of ij if it is - * a valid non-negative integer and to -EINVAL otherwise - * if no comma is found where it is expected an entry in - * ilist is set to -EINVAL - *-------------------------------------------------------------------------*/ -static int -xpram_read_size_list_tail (char ** strptr, int maxl, int * ilist) -{ - int i=0; - char *str = *strptr; - int res=0; - - while ( (*str == ',') && (i < maxl) ) { - str++; - ilist[i] = xpram_read_size(&str); - if ( ilist[i] == -EINVAL ) { - xpram_scan_to_next_comma(&str); - res = -EINVAL; - } - i++; - } - return res; -#if 0 /* be lenient about trailing stuff */ - if ( *str != 0 && *str != ' ' ) { - ilist[MAX(i-1,0)] = -EINVAL; - return -EINVAL; - } else return 0; -#endif + mem_page = (unsigned long) __get_free_page(GFP_KERNEL); + rc = xpram_page_in(mem_page, 0); + free_page(mem_page); + return rc ? -ENXIO : 0; } - /* - * expanded memory operations + * Return index of the last available xpram page. */ - - -/*--------------------------------------------------------------------*/ -/* Copy expanded memory page (4kB) into main memory */ -/* Arguments */ -/* page_addr: address of target page */ -/* xpage_index: index of expandeded memory page */ -/* Return value */ -/* 0: if operation succeeds */ -/* non-0: otherwise */ -/*--------------------------------------------------------------------*/ -long xpram_page_in (unsigned long page_addr, unsigned long xpage_index) +static unsigned long __init xpram_highest_page_index(void) { - int cc=0; - unsigned long real_page_addr = __pa(page_addr); -#ifndef CONFIG_ARCH_S390X - __asm__ __volatile__ ( - " lr 1,%1 \n" /* r1 = real_page_addr */ - " lr 2,%2 \n" /* r2 = xpage_index */ - " .long 0xb22e0012 \n" /* pgin r1,r2 */ - /* copy page from expanded memory */ - "0: ipm %0 \n" /* save status (cc & program mask */ - " srl %0,28 \n" /* cc into least significant bits */ - "1: \n" /* we are done */ - ".section .fixup,\"ax\"\n" /* start of fix up section */ - "2: lhi %0,2 \n" /* return unused condition code 2 */ - " bras 1,3f \n" /* safe label 1: in r1 and goto 3 */ - " .long 1b \n" /* literal containing label 1 */ - "3: l 1,0(1) \n" /* load label 1 address into r1 */ - " br 1 \n" /* goto label 1 (across sections) */ - ".previous \n" /* back in text section */ - ".section __ex_table,\"a\"\n" /* start __extable */ - " .align 4 \n" - " .long 0b,2b \n" /* failure point 0, fixup code 2 */ - ".previous \n" - : "=d" (cc) : "d" (real_page_addr), "d" (xpage_index) : "cc", "1", "2" - ); -#else /* CONFIG_ARCH_S390X */ - __asm__ __volatile__ ( - " lgr 1,%1 \n" /* r1 = real_page_addr */ - " lgr 2,%2 \n" /* r2 = xpage_index */ - " .long 0xb22e0012 \n" /* pgin r1,r2 */ - /* copy page from expanded memory */ - "0: ipm %0 \n" /* save status (cc & program mask */ - " srl %0,28 \n" /* cc into least significant bits */ - "1: \n" /* we are done */ - ".section .fixup,\"ax\"\n" /* start of fix up section */ - "2: lghi %0,2 \n" /* return unused condition code 2 */ - " jg 1b \n" /* goto label 1 above */ - ".previous \n" /* back in text section */ - ".section __ex_table,\"a\"\n" /* start __extable */ - " .align 8 \n" - " .quad 0b,2b \n" /* failure point 0, fixup code 2 */ - ".previous \n" - : "=d" (cc) : "d" (real_page_addr), "d" (xpage_index) : "cc", "1", "2" - ); -#endif /* CONFIG_ARCH_S390X */ - switch (cc) { - case 0: return 0; - case 1: return -EIO; - case 2: return -ENXIO; - case 3: return -ENXIO; - default: return -EIO; /* should not happen */ - }; -} + unsigned long page_index, add_bit; + unsigned long mem_page; -/*--------------------------------------------------------------------*/ -/* Copy a 4kB page of main memory to an expanded memory page */ -/* Arguments */ -/* page_addr: address of source page */ -/* xpage_index: index of expandeded memory page */ -/* Return value */ -/* 0: if operation succeeds */ -/* non-0: otherwise */ -/*--------------------------------------------------------------------*/ -long xpram_page_out (unsigned long page_addr, unsigned long xpage_index) -{ - int cc=0; - unsigned long real_page_addr = __pa(page_addr); -#ifndef CONFIG_ARCH_S390X - __asm__ __volatile__ ( - " lr 1,%1 \n" /* r1 = mem_page */ - " lr 2,%2 \n" /* r2 = rpi */ - " .long 0xb22f0012 \n" /* pgout r1,r2 */ - /* copy page from expanded memory */ - "0: ipm %0 \n" /* save status (cc & program mask */ - " srl %0,28 \n" /* cc into least significant bits */ - "1: \n" /* we are done */ - ".section .fixup,\"ax\"\n" /* start of fix up section */ - "2: lhi %0,2 \n" /* return unused condition code 2 */ - " bras 1,3f \n" /* safe label 1: in r1 and goto 3 */ - " .long 1b \n" /* literal containing label 1 */ - "3: l 1,0(1) \n" /* load label 1 address into r1 */ - " br 1 \n" /* goto label 1 (across sections) */ - ".previous \n" /* back in text section */ - ".section __ex_table,\"a\"\n" /* start __extable */ - " .align 4 \n" - " .long 0b,2b \n" /* failure point 0, fixup code 2 */ - ".previous \n" - : "=d" (cc) : "d" (real_page_addr), "d" (xpage_index) : "cc", "1", "2" - ); -#else /* CONFIG_ARCH_S390X */ - __asm__ __volatile__ ( - " lgr 1,%1 \n" /* r1 = mem_page */ - " lgr 2,%2 \n" /* r2 = rpi */ - " .long 0xb22f0012 \n" /* pgout r1,r2 */ - /* copy page from expanded memory */ - "0: ipm %0 \n" /* save status (cc & program mask */ - " srl %0,28 \n" /* cc into least significant bits */ - "1: \n" /* we are done */ - ".section .fixup,\"ax\"\n" /* start of fix up section */ - "2: lghi %0,2 \n" /* return unused condition code 2 */ - " jg 1b \n" /* goto label 1 above */ - ".previous \n" /* back in text section */ - ".section __ex_table,\"a\"\n" /* start __extable */ - " .align 8 \n" - " .quad 0b,2b \n" /* failure point 0, fixup code 2 */ - ".previous \n" - : "=d" (cc) : "d" (real_page_addr), "d" (xpage_index) : "cc", "1", "2" - ); -#endif /* CONFIG_ARCH_S390X */ - switch (cc) { - case 0: return 0; - case 1: return -EIO; - case 2: { PRINT_ERR("expanded storage lost!\n"); return -ENXIO; } - case 3: return -ENXIO; - default: return -EIO; /* should not happen */ - } -} + mem_page = (unsigned long) __get_free_page(GFP_KERNEL); -/*--------------------------------------------------------------------*/ -/* Measure expanded memory */ -/* Return value */ -/* size of expanded memory in kB (must be a multipe of 4) */ -/*--------------------------------------------------------------------*/ -int xpram_size(void) -{ - int cc=0; - unsigned long base=0; - unsigned long po, pi, rpi; /* page index order, page index */ - - unsigned long mem_page = __get_free_page(GFP_KERNEL); - - /* for po=0,1,2,... try to move in page number base+(2^po)-1 */ - pi=1; - for (po=0; po <= 32; po++) { /* pi = 2^po */ - cc=xpram_page_in(mem_page,base+pi-1); - if ( cc ) break; - pi <<= 1; - } - if ( cc && (po < 31 ) ) { - pi >>=1; - base += pi; - pi >>=1; - for ( ; pi > 0; pi >>= 1) { - rpi = pi - 1; - cc=xpram_page_in(mem_page,base+rpi); - if ( !cc ) base += pi; - } + page_index = 0; + add_bit = 1ULL << (sizeof(unsigned long)*8 - 1); + while (add_bit > 0) { + if (xpram_page_in(mem_page, page_index | add_bit) == 0) + page_index |= add_bit; + add_bit >>= 1; } - + free_page (mem_page); - if ( cc && (po < 31) ) - return (XPRAM_KB_IN_PG * base); - else /* return maximal value possible */ - return INT_MAX; + return page_index; } /* - * Open and close + * Block device make request function. */ - -int xpram_open (struct inode *inode, struct file *filp) +static int xpram_make_request(request_queue_t *q, struct bio *bio) { - Xpram_Dev *dev; /* device information */ - int num = MINOR(inode->i_rdev); - - - if (num >= xpram_devs) return -ENODEV; - dev = xpram_devices + num; - - PRINT_DEBUG("calling xpram_open for device %d\n",num); - PRINT_DEBUG(" size %dkB, name %s, usage: %d\n", - dev->size,dev->device_name, atomic_read(&(dev->usage))); + xpram_device_t *xdev; + struct bio_vec *bvec; + unsigned long index; + unsigned long page_addr; + unsigned long bytes; + int i; - atomic_inc(&(dev->usage)); - return 0; /* success */ + if (MINOR(bio->bi_bdev->bd_dev) > xpram_devs) + /* No such device. */ + goto fail; + xdev = xpram_devices + MINOR(bio->bi_bdev->bd_dev); + if ((bio->bi_sector & 3) != 0 || (bio->bi_size & 4095) != 0) + /* Request is not page-aligned. */ + goto fail; + if ((bio->bi_size >> 12) > xdev->size) + /* Request size is no page-aligned. */ + goto fail; + index = (bio->bi_sector >> 3) + xdev->offset; + bio_for_each_segment(bvec, bio, i) { + page_addr = (unsigned long) + kmap(bvec->bv_page) + bvec->bv_offset; + bytes = bvec->bv_len; + if ((page_addr & 4095) != 0 || (bytes & 4095) != 0) + /* More paranoia. */ + goto fail; + while (bytes > 0) { + if (bio_data_dir(bio) == READ) { + if (xpram_page_in(page_addr, index) != 0) + goto fail; + } else { + if (xpram_page_out(page_addr, index) != 0) + goto fail; + } + page_addr += 4096; + bytes -= 4096; + index++; + } + } + set_bit(BIO_UPTODATE, &bio->bi_flags); + bio->bi_end_io(bio); + return 0; +fail: + bio_io_error(bio); + return 0; } -int xpram_release (struct inode *inode, struct file *filp) +/* + * The file operations + */ +static int xpram_open (struct inode *inode, struct file *filp) { - Xpram_Dev *dev = xpram_devices + MINOR(inode->i_rdev); - - PRINT_DEBUG("calling xpram_release for device %d (size %dkB, usage: %d)\n",MINOR(inode->i_rdev) ,dev->size,atomic_read(&(dev->usage))); - - /* - * If the device is closed for the last time, start a timer - * to release RAM in half a minute. The function and argument - * for the timer have been setup in init_module() - */ - if (!atomic_dec_return(&(dev->usage))) { - /* but flush it right now */ - /* Everything is already flushed by caller -- AV */ - } - return(0); + if (minor(inode->i_rdev) >= xpram_devs) + return -ENODEV; + return 0; } -/* - * The ioctl() implementation - */ - -int xpram_ioctl (struct inode *inode, struct file *filp, +static int xpram_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - int err, size; - struct hd_geometry *geo = (struct hd_geometry *)arg; + struct hd_geometry *geo; + unsigned long size; + int idx; - PRINT_DEBUG("ioctl 0x%x 0x%lx\n", cmd, arg); - switch(cmd) { - - case BLKGETSIZE: /* 0x1260 */ + if ((!inode) || kdev_none(inode->i_rdev)) + return -EINVAL; + idx = minor(inode->i_rdev); + if (idx >= xpram_devs) + return -ENODEV; + switch (cmd) { + case BLKGETSIZE: /* Return the device size, expressed in sectors */ - return put_user( 1024* xpram_sizes[MINOR(inode->i_rdev)] - / XPRAM_SOFTSECT, - (unsigned long *) arg); - + return put_user(xpram_sizes[idx] << 1, (unsigned long *) arg); case BLKGETSIZE64: - return put_user( (u64)(1024* xpram_sizes[MINOR(inode->i_rdev)] - / XPRAM_SOFTSECT) << 9, - (u64 *) arg); - - case BLKFLSBUF: /* flush, 0x1261 */ - fsync_bdev(inode->i_bdev); - if ( capable(CAP_SYS_ADMIN) )invalidate_bdev(inode->i_bdev, 0); - return 0; - - case BLKRRPART: /* re-read partition table: can't do it, 0x1259 */ + /* Return the device size, expressed in bytes */ + return put_user((u64) xpram_sizes[idx] << 10, (u64 *) arg); + case BLKFLSBUF: + return blk_ioctl(((struct inode *) inode)->i_bdev, cmd, arg); + case BLKRRPART: + /* re-read partition table: can't do it */ return -EINVAL; - case HDIO_GETGEO: /* * get geometry: we have to fake one... trim the size to a * multiple of 64 (32k): tell we have 16 sectors, 4 heads, * whatever cylinders. Tell also that data starts at sector. 4. */ - size = xpram_mem_avail * 1024 / XPRAM_SOFTSECT; - /* size = xpram_mem_avail * 1024 / xpram_hardsect; */ - size &= ~0x3f; /* multiple of 64 */ - if (geo==NULL) return -EINVAL; - /* - * err=verify_area_20(VERIFY_WRITE, geo, sizeof(*geo)); - * if (err) return err; - */ - + geo = (struct hd_geometry *) arg; + if (geo == NULL) + return -EINVAL; + size = (xpram_pages * 8) & ~0x3f; put_user(size >> 6, &geo->cylinders); - put_user( 4, &geo->heads); - put_user( 16, &geo->sectors); - put_user( 4, &geo->start); - + put_user(4, &geo->heads); + put_user(16, &geo->sectors); + put_user(4, &geo->start); return 0; + default: + return -EINVAL; } - - return -EINVAL; /* unknown command */ } -/* - * The file operations - */ -struct block_device_operations xpram_devops = +static struct block_device_operations xpram_devops = { owner: THIS_MODULE, ioctl: xpram_ioctl, @@ -664,416 +382,149 @@ }; /* - * Block-driver specific functions - */ - -void xpram_request(request_queue_t * queue) -{ - Xpram_Dev *device; - /* u8 *ptr; */ - /* int size; */ - - unsigned long page_no; /* expanded memory page number */ - unsigned long sects_to_copy; /* number of sectors to be copied */ - char * buffer; /* local pointer into buffer cache */ - int dev_no; /* device number of request */ - int fault; /* faulty access to expanded memory */ - struct request * current_req; /* working request */ - - while(1) { - if (blk_queue_empty(QUEUE)) - return; - - fault=0; - current_req = CURRENT; - dev_no = DEVICE_NR(current_req->rq_dev); - /* Check if the minor number is in range */ - if ( dev_no > xpram_devs ) { - static int count = 0; - if (count++ < 5) /* print the message at most five times */ - PRINT_WARN(" request for unknown device\n"); - end_request(CURRENT, 0); - continue; - } - - /* pointer to device structure, from the global array */ - device = xpram_devices + dev_no; - sects_to_copy = current_req->current_nr_sectors; - /* does request exceed size of device ? */ - if ( XPRAM_SEC2KB(sects_to_copy) > xpram_sizes[dev_no] ) { - PRINT_WARN(" request past end of device\n"); - end_request(CURRENT, 0); - continue; - } - - /* Does request start at page boundery? -- paranoia */ -#if 0 - PRINT_DEBUG(" req %lx, sect %lx, to copy %lx, buf addr %lx\n", (unsigned long) current_req, current_req->sector, sects_to_copy, (unsigned long) current_req->buffer); -#endif - buffer = current_req->buffer; -#if XPRAM_SEC_IN_PG != 1 - /* Does request start at an expanded storage page boundery? */ - if ( current_req->sector & (XPRAM_SEC_IN_PG - 1) ) { - PRINT_WARN(" request does not start at an expanded storage page boundery\n"); - PRINT_WARN(" referenced sector: %ld\n",current_req->sector); - end_request(CURRENT, 0); - continue; - } - /* Does request refere to partial expanded storage pages? */ - if ( sects_to_copy & (XPRAM_SEC_IN_PG - 1) ) { - PRINT_WARN(" request referes to a partial expanded storage page\n"); - end_request(CURRENT, 0); - continue; - } -#endif /* XPRAM_SEC_IN_PG != 1 */ - /* Is request buffer aligned with kernel pages? */ - if ( ((unsigned long)buffer) & (XPRAM_PGSIZE-1) ) { - PRINT_WARN(" request buffer is not aligned with kernel pages\n"); - end_request(CURRENT, 0); - continue; - } - - /* which page of expanded storage is affected first? */ - page_no = (xpram_offsets[dev_no] >> XPRAM_KB_IN_PG_ORDER) - + (current_req->sector >> XPRAM_SEC_IN_PG_ORDER); - -#if 0 - PRINT_DEBUG("request: %d ( dev %d, copy %d sectors, at page %d ) \n", current_req->cmd,dev_no,sects_to_copy,page_no); -#endif - - switch(current_req->cmd) { - case READ: - do { - if ( (fault=xpram_page_in((unsigned long)buffer,page_no)) ) { - PRINT_WARN("xpram(dev %d): page in failed for page %ld.\n",dev_no,page_no); - break; - } - sects_to_copy -= XPRAM_SEC_IN_PG; - buffer += XPRAM_PGSIZE; - page_no++; - } while ( sects_to_copy > 0 ); - break; - case WRITE: - do { - if ( (fault=xpram_page_out((unsigned long)buffer,page_no)) - ) { - PRINT_WARN("xpram(dev %d): page out failed for page %ld.\n",dev_no,page_no); - break; - } - sects_to_copy -= XPRAM_SEC_IN_PG; - buffer += XPRAM_PGSIZE; - page_no++; - } while ( sects_to_copy > 0 ); - break; - default: - /* can't happen */ - end_request(CURRENT, 0); - continue; - } - if ( fault ) end_request(CURRENT, 0); - else end_request(CURRENT, 1); /* success */ - } -} - -/* - * Kernel interfaces + * Setup xpram_sizes array. */ - -/* - * Parses the kernel parameters given in the kernel parameter line. - * The expected format is - * [","]* - * where - * devices is a positive integer that initializes xpram_devs - * each size is a non-negative integer possibly followed by a - * magnitude (k,K,m,M,g,G), the list of sizes initialises - * xpram_sizes - * - * Arguments - * str: substring of kernel parameter line that contains xprams - * kernel parameters. - * ints: not used -- not in Version > 2.3 any more - * - * Result 0 on success, -EINVAl else -- only for Version > 2.3 - * - * Side effects - * the global variabls devs is set to the value of - * and sizes[i] is set to the i-th - * partition size (if provided). A parsing error of a value - * results in this value being set to -EINVAL. - */ -int xpram_setup (char *str) +static int __init xpram_setup_sizes(unsigned long pages) { - devs = xpram_read_int(&str); - if ( devs != -EINVAL ) - if ( xpram_read_size_list_tail(&str,devs,sizes) < 0 ) { - PRINT_ERR("error while reading xpram parameters.\n"); - return -EINVAL; - } - else - return 0; - else - return -EINVAL; -} - -/* - * initialize xpram device driver - * - * Result: 0 ok - * negative number: negative error code - */ + unsigned long mem_needed; + unsigned long mem_auto; + int mem_auto_no; + int i; -int xpram_init(void) -{ - int result, i; - int mem_usable; /* net size of expanded memory */ - int mem_needed=0; /* size of expanded memory needed to fullfill - * requirements of non-zero parameters in sizes - */ - - int mem_auto_no=0; /* number of (implicit) zero parameters in sizes */ - int mem_auto; /* automatically determined device size */ - int minor_length; /* store the length of a minor (w/o '\0') */ - int minor_thresh; /* threshhold for minor lenght */ - - request_queue_t *q; /* request queue */ - - /* - * Copy the (static) cfg variables to public prefixed ones to allow - * snoozing with a debugger. - */ - - xpram_blksize = blksize; - xpram_hardsect = hardsect; - - PRINT_INFO("initializing: %s\n",""); - /* check arguments */ - xpram_major = major; - if ( (devs <= 0) || (devs > XPRAM_MAX_DEVS) ) { + /* Check number of devices. */ + if (devs <= 0 || devs > XPRAM_MAX_DEVS) { PRINT_ERR("invalid number %d of devices\n",devs); - PRINT_ERR("Giving up xpram\n"); return -EINVAL; } - xpram_devs = devs; - for (i=0; i < xpram_devs; i++) { - if ( sizes[i] < 0 ) { - PRINT_ERR("Invalid partition size %d kB\n",xpram_sizes[i]); - PRINT_ERR("Giving up xpram\n"); - return -EINVAL; - } else { - xpram_sizes[i] = NEXT4(sizes[i]); /* page align */ - if ( sizes[i] ) mem_needed += xpram_sizes[i]; - else mem_auto_no++; - } - } - - PRINT_DEBUG(" major %d \n", xpram_major); - PRINT_INFO(" number of devices (partitions): %d \n", xpram_devs); - for (i=0; i < xpram_devs; i++) { - if ( sizes[i] ) - PRINT_INFO(" size of partition %d: %d kB\n", i, xpram_sizes[i]); - else - PRINT_INFO(" size of partition %d to be set automatically\n",i); - } - PRINT_DEBUG(" memory needed (for sized partitions): %d kB\n", mem_needed); - PRINT_DEBUG(" partitions to be sized automatically: %d\n", mem_auto_no); - -#if 0 - /* Hardsect can't be changed :( */ - /* I try it any way. Yet I must distinguish - * between hardsects (to be changed to 4096) - * and soft sectors, hard-coded for buffer - * sizes within the requests - */ - if (hardsect != 512) { - PRINT_ERR("Can't change hardsect size\n"); - hardsect = xpram_hardsect = 512; - } -#endif - PRINT_INFO(" hardsector size: %dB \n",xpram_hardsect); + xpram_devs = devs; /* - * Register your major, and accept a dynamic number + * Copy sizes array to xpram_sizes and align partition + * sizes to page boundary. */ - result = devfs_register_blkdev(xpram_major, "xpram", &xpram_devops); - if (result < 0) { - PRINT_ERR("Can't get major %d\n",xpram_major); - PRINT_ERR("Giving up xpram\n"); - return result; + mem_needed = 0; + mem_auto_no = 0; + for (i = 0; i < xpram_devs; i++) { + xpram_sizes[i] = (sizes[i] + 3) & -4UL; + if (xpram_sizes[i]) + mem_needed += xpram_sizes[i]; + else + mem_auto_no++; } - xpram_devfs_handle = devfs_mk_dir (NULL, "slram", NULL); - devfs_register_series (xpram_devfs_handle, "%u", XPRAM_MAX_DEVS, - DEVFS_FL_DEFAULT, XPRAM_MAJOR, 0, - S_IFBLK | S_IRUSR | S_IWUSR, - &xpram_devops, NULL); - if (xpram_major == 0) xpram_major = result; /* dynamic */ - major = xpram_major; /* Use `major' later on to save typing */ - - result = -ENOMEM; /* for the possible errors */ - - /* - * measure expanded memory - */ - - xpram_mem_avail = xpram_size(); - if (!xpram_mem_avail) { - PRINT_ERR("No or not enough expanded memory available\n"); - PRINT_ERR("Giving up xpram\n"); - result = -ENODEV; - goto fail_malloc; + + PRINT_INFO(" number of devices (partitions): %d \n", xpram_devs); + for (i = 0; i < xpram_devs; i++) { + if (xpram_sizes[i]) + PRINT_INFO(" size of partition %d: %d kB\n", + i, xpram_sizes[i]); + else + PRINT_INFO(" size of partition %d to be set " + "automatically\n",i); } - PRINT_INFO(" %d kB expanded memory found.\n",xpram_mem_avail ); + PRINT_DEBUG(" memory needed (for sized partitions): %ld kB\n", + mem_needed); + PRINT_DEBUG(" partitions to be sized automatically: %d\n", + mem_auto_no); - /* - * Assign the other needed values: request, size, blksize, - * hardsect. All the minor devices feature the same value. - * Note that `xpram' defines all of them to allow testing non-default - * values. A real device could well avoid setting values in global - * arrays if it uses the default values. - */ - - q = BLK_DEFAULT_QUEUE(major); - blk_init_queue (q, xpram_request); - blk_queue_hardsect_size(q, xpram_hardsect); - - /* we want to have XPRAM_UNUSED blocks security buffer between devices */ - mem_usable=xpram_mem_avail-(XPRAM_UNUSED*(xpram_devs-1)); - if ( mem_needed > mem_usable ) { + if (mem_needed > pages * 4) { PRINT_ERR("Not enough expanded memory available\n"); - PRINT_ERR("Giving up xpram\n"); - goto fail_malloc; + return -EINVAL; } /* * partitioning: * xpram_sizes[i] != 0; partition i has size xpram_sizes[i] kB - * else: ; all partitions i with xpram_sizesxpram_size[i] + * else: ; all partitions with zero xpram_sizes[i] * partition equally the remaining space */ + if (mem_auto_no) { + mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4; + PRINT_INFO(" automatically determined " + "partition size: %ld kB\n", mem_auto); + for (i = 0; i < xpram_devs; i++) + if (xpram_sizes[i] == 0) + xpram_sizes[i] = mem_auto; + } + return 0; +} + +static int __init xpram_setup_blkdev(void) +{ + request_queue_t *q; + unsigned long offset; + int i, rc; - if ( mem_auto_no ) { - mem_auto=LAST4((mem_usable-mem_needed)/mem_auto_no); - PRINT_INFO(" automatically determined partition size: %d kB\n", mem_auto); - for (i=0; i < xpram_devs; i++) - if (xpram_sizes[i] == 0) xpram_sizes[i] = mem_auto; + /* + * Register xpram major. + */ + rc = devfs_register_blkdev(XPRAM_MAJOR, XPRAM_NAME, &xpram_devops); + if (rc < 0) { + PRINT_ERR("Can't get xpram major %d\n", XPRAM_MAJOR); + return rc; } - blk_size[major] = xpram_sizes; - xpram_offsets = kmalloc(xpram_devs * sizeof(int), GFP_KERNEL); - if (!xpram_offsets) { - PRINT_ERR("Not enough memory for xpram_offsets\n"); - PRINT_ERR("Giving up xpram\n"); - goto fail_malloc; - } - xpram_offsets[0] = 0; - for (i=1; i < xpram_devs; i++) - xpram_offsets[i] = xpram_offsets[i-1] + xpram_sizes[i-1] + XPRAM_UNUSED; - -#if 0 - for (i=0; i < xpram_devs; i++) - PRINT_DEBUG(" device(%d) offset = %d kB, size = %d kB\n",i, xpram_offsets[i], xpram_sizes[i]); -#endif + xpram_devfs_handle = devfs_mk_dir (NULL, "slram", NULL); + devfs_register_series (xpram_devfs_handle, "%u", xpram_devs, + DEVFS_FL_DEFAULT, XPRAM_MAJOR, 0, + S_IFBLK | S_IRUSR | S_IWUSR, + &xpram_devops, NULL); - /* - * allocate the devices -- we can't have them static, as the number - * can be specified at load time + /* + * Assign the other needed values: make request function, sizes and + * hardsect size. All the minor devices feature the same value. */ + q = BLK_DEFAULT_QUEUE(XPRAM_MAJOR); + blk_queue_make_request(q,xpram_make_request); + blk_queue_hardsect_size(q, 4096); + blk_size[XPRAM_MAJOR] = xpram_sizes; - xpram_devices = kmalloc(xpram_devs * sizeof (Xpram_Dev), GFP_KERNEL); - if (!xpram_devices) { - PRINT_ERR("Not enough memory for xpram_devices\n"); - PRINT_ERR("Giving up xpram\n"); - goto fail_malloc_devices; - } - memset(xpram_devices, 0, xpram_devs * sizeof (Xpram_Dev)); - minor_length = 1; - minor_thresh = 10; - for (i=0; i < xpram_devs; i++) { - /* data and usage remain zeroed */ - xpram_devices[i].size = xpram_sizes[i]; /* size in kB not in bytes */ - atomic_set(&(xpram_devices[i].usage),0); - if (i == minor_thresh) { - minor_length++; - minor_thresh *= 10; - } - xpram_devices[i].device_name = - kmalloc(1 + strlen(XPRAM_DEVICE_NAME_PREFIX) + minor_length,GFP_KERNEL); - if ( xpram_devices[i].device_name == NULL ) { - PRINT_ERR("Not enough memory for xpram_devices[%d].device_name\n",i); - PRINT_ERR("Giving up xpram\n"); - goto fail_devfs_register; - } - sprintf(xpram_devices[i].device_name,XPRAM_DEVICE_NAME_PREFIX "%d",i); - - PRINT_DEBUG("initializing xpram_open for device %d\n",i); - PRINT_DEBUG(" size %dkB, name %s, usage: %d\n", - xpram_devices[i].size,xpram_devices[i].device_name, atomic_read(&(xpram_devices[i].usage))); - -#if 0 /* WHY? */ - xpram_devices[i].devfs_entry = - devfs_register(NULL /* devfs root dir */, - xpram_devices[i].device_name, 0, - 0 /* flags */, - XPRAM_MAJOR,i, - 0755 /* access mode */, - 0 /* uid */, 0 /* gid */, - &xpram_devops, - (void *) &(xpram_devices[i]) - ); - if ( xpram_devices[i].devfs_entry == NULL ) { - PRINT_ERR("devfs system registry failed\n"); - PRINT_ERR("Giving up xpram\n"); - goto fail_devfs_register; - } -#endif /* WHY? */ + /* + * Setup device structures. + */ + offset = 0; + for (i = 0; i < xpram_devs; i++) { + xpram_devices[i].size = xpram_sizes[i] / 4; + xpram_devices[i].offset = offset; + offset += xpram_devices[i].size; } - return 0; /* succeed */ - - /* clean up memory in case of failures */ - fail_devfs_register: - for (i=0; i < xpram_devs; i++) { - if ( xpram_devices[i].device_name ) - kfree(xpram_devices[i].device_name); - } - kfree(xpram_devices); - kfree (xpram_offsets); - fail_malloc_devices: - fail_malloc: - /* ??? unregister_chrdev(major, "xpram"); */ - unregister_blkdev(major, "xpram"); - return result; + return 0; } /* - * Finally, the module stuff + * Finally, the init/exit functions. */ - -int init_module(void) +static void __exit xpram_exit(void) { - int rc = 0; - - PRINT_INFO ("trying to load module\n"); - rc = xpram_init (); - if (rc == 0) { - PRINT_INFO ("Module loaded successfully\n"); - } else { - PRINT_WARN ("Module load returned rc=%d\n", rc); - } - return rc; + blk_clear(XPRAM_MAJOR); + devfs_unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME); + devfs_unregister(xpram_devfs_handle); + unregister_sys_device(&xpram_sys_device); } -void cleanup_module(void) +static int __init xpram_init(void) { - int i; - - /* first of all, reset all the data structures */ - kfree(xpram_offsets); - blk_clear(major); + int rc; - /* finally, the usual cleanup */ - devfs_unregister(xpram_devfs_handle); - if (devfs_unregister_blkdev(MAJOR_NR, "xpram")) - printk(KERN_WARNING "xpram: cannot unregister blkdev\n"); - kfree(xpram_devices); + /* Find out size of expanded memory. */ + if (xpram_present() != 0) { + PRINT_WARN("No expanded memory available\n"); + return -ENODEV; + } + xpram_pages = xpram_highest_page_index(); + PRINT_INFO(" %li pages expanded memory found (%li KB).\n", + xpram_pages, xpram_pages*4); + rc = xpram_setup_sizes(xpram_pages); + if (rc) + return rc; + rc = register_sys_device(&xpram_sys_device); + if (rc) + return rc; + rc = xpram_setup_blkdev(); + if (rc) + unregister_sys_device(&xpram_sys_device); + return rc; } + +module_init(xpram_init); +module_exit(xpram_exit); diff -Nru a/drivers/s390/block/xpram.h b/drivers/s390/block/xpram.h --- a/drivers/s390/block/xpram.h Tue Jun 18 19:12:02 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,70 +0,0 @@ - -/* - * xpram.h -- definitions for the char module - * - *********/ - - -#include -#include -#include - -/* version dependencies have been confined to a separate file */ - -/* - * Macros to help debugging - */ - -#define XPRAM_NAME "xpram" /* name of device/module */ -#define XPRAM_DEVICE_NAME_PREFIX "slram" /* Prefix device name for major 35 */ -#define XPRAM_DEVS 1 /* one partition */ -#define XPRAM_RAHEAD 8 /* no real read ahead */ -#define XPRAM_PGSIZE 4096 /* page size of (expanded) mememory pages - * according to S/390 architecture - */ -#define XPRAM_BLKSIZE XPRAM_PGSIZE /* must be equalt to page size ! */ -#define XPRAM_HARDSECT XPRAM_PGSIZE /* FIXME -- we have to deal with both - * this hard sect size and in some cases - * hard coded 512 bytes which I call - * soft sects: - */ -#define XPRAM_SOFTSECT 512 -#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */ -#define XPRAM_MAX_DEVS1 33 /* maximal number of devices (partitions) +1 */ - -/* The following macros depend on the sizes above */ - -#define XPRAM_KB_IN_PG 4 /* 4 kBs per page */ -#define XPRAM_KB_IN_PG_ORDER 2 /* 2^? kBs per page */ - -/* Eventhough XPRAM_HARDSECT is set to 4k some data structures use hard - * coded 512 byte sa sector size - */ -#define XPRAM_SEC2KB(x) ((x >> 1) + (x & 1)) /* modifier used to compute size - in kB from number of sectors */ -#define XPRAM_SEC_IN_PG 8 /* 8 sectors per page */ -#define XPRAM_SEC_IN_PG_ORDER 3 /* 2^? sectors per page */ - -#define XPRAM_UNUSED 40 /* unused space between devices, - * in kB, i.e. - * must be a multiple of 4 - */ -/* - * The xpram device is removable: if it is left closed for more than - * half a minute, it is removed. Thus use a usage count and a - * kernel timer - */ - -typedef struct Xpram_Dev { - int size; /* size in KB not in Byte - RB - */ - atomic_t usage; - char * device_name; /* device name prefix in devfs */ - devfs_handle_t devfs_entry; /* handle needed to unregister dev from devfs */ - u8 *data; -} Xpram_Dev; - -/* 2.2: void xpram_setup (char *, int *); */ -/* begin 2.3 */ -int xpram_setup (char *); -/* end 2.3 */ -int xpram_init(void); diff -Nru a/drivers/s390/qdio.c b/drivers/s390/qdio.c --- a/drivers/s390/qdio.c Tue Jun 18 19:12:01 2002 +++ b/drivers/s390/qdio.c Tue Jun 18 19:12:01 2002 @@ -61,9 +61,7 @@ MODULE_AUTHOR("Utz Bacher "); MODULE_DESCRIPTION("QDIO base support version 2, " \ "Copyright 2000 IBM Corporation"); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,12)) MODULE_LICENSE("GPL"); -#endif /******************** HERE WE GO ***********************************/ @@ -1584,7 +1582,7 @@ kfree(irq_ptr->input_qs[i]); next: - if (!irq_ptr->output_qs[i]) goto next2; + if (!irq_ptr->output_qs[i]) continue; available=0; if (!irq_ptr->output_qs[i]->is_0copy_sbals_q) for (j=0;joutput_qs[i]->slib) kfree(irq_ptr->output_qs[i]->slib); kfree(irq_ptr->output_qs[i]); -next2: + } if (irq_ptr->qdr) kfree(irq_ptr->qdr); kfree(irq_ptr); @@ -2191,21 +2189,12 @@ { int cc; -#ifdef QDIO_32_BIT - asm volatile ( - ".insn rre,0xb25f0000,%1,0 \n\t" - "ipm %0 \n\t" - "srl %0,28 \n\t" - : "=d" (cc) : "d" (chsc_area) : "cc" - ); -#else /* QDIO_32_BIT */ asm volatile ( ".insn rre,0xb25f0000,%1,0 \n\t" "ipm %0 \n\t" "srl %0,28 \n\t" : "=d" (cc) : "d" (chsc_area) : "cc" ); -#endif /* QDIO_32_BIT */ return cc; } diff -Nru a/drivers/scsi/README.st b/drivers/scsi/README.st --- a/drivers/scsi/README.st Tue Jun 18 19:12:01 2002 +++ b/drivers/scsi/README.st Tue Jun 18 19:12:01 2002 @@ -2,7 +2,7 @@ The driver is currently maintained by Kai M{kisara (email Kai.Makisara@metla.fi) -Last modified: Tue Jan 22 21:08:57 2002 by makisara +Last modified: Tue Jun 18 18:13:50 2002 by makisara BASICS @@ -105,15 +105,19 @@ BUFFERING -The driver uses tape buffers allocated either at system initialization -or at run-time when needed. One buffer is used for each open tape -device. The size of the buffers is selectable at compile and/or boot -time. The buffers are used to store the data being transferred to/from -the SCSI adapter. The following buffering options are selectable at -compile time and/or at run time (via ioctl): +The driver uses tape buffers allocated at run-time when needed and it +is freed when the device file is closed. One buffer is used for each +open tape device. + +The size of the buffers is always at least one tape block. In fixed +block mode, the minimum buffer size is defined (in 1024 byte units) by +ST_FIXED_BUFFER_BLOCKS. With small block size this allows buffering of +several blocks and using one SCSI read or write to transfer all of the +blocks. Buffering of data across write calls in fixed block mode is +allowed if ST_BUFFER_WRITES is non-zero. Buffer allocation uses chunks of +memory having sizes 2^n * (page size). Because of this the actual +buffer size may be larger than the minimum allowable buffer size. -Buffering of data across write calls in fixed block mode (define -ST_BUFFER_WRITES). Asynchronous writing. Writing the buffer contents to the tape is started and the write call returns immediately. The status is checked @@ -127,30 +131,6 @@ attempted even if the user does not want to get all of the data at this read command. Should be disabled for those drives that don't like a filemark to truncate a read request or that don't like backspacing. - -The buffer size is defined (in 1024 byte units) by ST_BUFFER_BLOCKS or -at boot time. If this size is not large enough, the driver tries to -temporarily enlarge the buffer. Buffer allocation uses chunks of -memory having sizes 2^n * (page size). Because of this the actual -buffer size may be larger than the buffer size specified with -ST_BUFFER_BLOCKS. - -A small number of buffers are allocated at driver initialisation. The -maximum number of these buffers is defined by ST_MAX_BUFFERS. The -maximum can be changed with kernel or module startup options. One -buffer is allocated for each drive detected when the driver is -initialized up to the maximum. - -The driver tries to allocate new buffers at run-time if -necessary. These buffers are freed after use. If the maximum number of -initial buffers is set to zero, all buffer allocation is done at -run-time. The advantage of run-time allocation is that memory is not -wasted for buffers not being used. The disadvantage is that there may -not be memory available at the time when a buffer is needed for the -first time (once a buffer is allocated, it is not released). This risk -should not be big if the tape drive is connected to a PCI adapter that -supports scatter/gather (the allocation is not limited to "DMA memory" -and the buffer can be composed of several fragments). The threshold for triggering asynchronous write in fixed block mode is defined by ST_WRITE_THRESHOLD. This may be optimized for each diff -Nru a/drivers/scsi/constants.c b/drivers/scsi/constants.c --- a/drivers/scsi/constants.c Tue Jun 18 19:12:02 2002 +++ b/drivers/scsi/constants.c Tue Jun 18 19:12:02 2002 @@ -993,10 +993,13 @@ } #if !(CONSTANTS & CONST_SENSE) + { + int i; printk("Raw sense data:"); for (i = 0; i < s; ++i) printk("0x%02x ", sense_buffer[i]); printk("\n"); + } #endif } diff -Nru a/drivers/scsi/cpqfcTSinit.c b/drivers/scsi/cpqfcTSinit.c --- a/drivers/scsi/cpqfcTSinit.c Tue Jun 18 19:12:01 2002 +++ b/drivers/scsi/cpqfcTSinit.c Tue Jun 18 19:12:01 2002 @@ -39,6 +39,7 @@ #include #include #include +#include #include // request_region() prototype #include // ioremap() //#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,7) diff -Nru a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c --- a/drivers/scsi/scsi.c Tue Jun 18 19:12:02 2002 +++ b/drivers/scsi/scsi.c Tue Jun 18 19:12:02 2002 @@ -1089,6 +1089,9 @@ SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n")); } +void scsi_tasklet_func(unsigned long); +static DECLARE_TASKLET(scsi_tasklet, scsi_tasklet_func, 0); + /* * This function is the mid-level interrupt routine, which decides how * to handle error conditions. Each invocation of this function must @@ -1186,7 +1189,7 @@ /* * Mark the bottom half handler to be run. */ - mark_bh(SCSI_BH); + tasklet_hi_schedule(&scsi_tasklet); } /* @@ -1212,7 +1215,7 @@ * half queue. Thus the only time we hold the lock here is when * we wish to atomically remove the contents of the queue. */ -void scsi_bottom_half_handler(void) +void scsi_tasklet_func(unsigned long ignore) { Scsi_Cmnd *SCpnt; Scsi_Cmnd *SCnext; @@ -2544,11 +2547,6 @@ if (scsihosts) printk(KERN_INFO "scsi: host order: %s\n", scsihosts); scsi_host_no_init (scsihosts); - /* - * This is where the processing takes place for most everything - * when commands are completed. - */ - init_bh(SCSI_BH, scsi_bottom_half_handler); return 0; } @@ -2558,7 +2556,7 @@ Scsi_Host_Name *shn, *shn2 = NULL; int i; - remove_bh(SCSI_BH); + tasklet_kill(&scsi_tasklet); devfs_unregister (scsi_devfs_handle); for (shn = scsi_host_no_list;shn;shn = shn->next) { diff -Nru a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c --- a/drivers/scsi/scsi_lib.c Tue Jun 18 19:12:02 2002 +++ b/drivers/scsi/scsi_lib.c Tue Jun 18 19:12:02 2002 @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include diff -Nru a/drivers/scsi/sd.c b/drivers/scsi/sd.c --- a/drivers/scsi/sd.c Tue Jun 18 19:12:02 2002 +++ b/drivers/scsi/sd.c Tue Jun 18 19:12:02 2002 @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include diff -Nru a/drivers/scsi/sr.c b/drivers/scsi/sr.c --- a/drivers/scsi/sr.c Tue Jun 18 19:12:02 2002 +++ b/drivers/scsi/sr.c Tue Jun 18 19:12:02 2002 @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include diff -Nru a/drivers/scsi/st.c b/drivers/scsi/st.c --- a/drivers/scsi/st.c Tue Jun 18 19:12:02 2002 +++ b/drivers/scsi/st.c Tue Jun 18 19:12:02 2002 @@ -12,13 +12,13 @@ Copyright 1992 - 2002 Kai Makisara email Kai.Makisara@metla.fi - Last modified: Tue Feb 5 21:25:55 2002 by makisara + Last modified: Sat Jun 15 13:01:56 2002 by makisara Some small formal changes - aeb, 950809 Last modified: 18-JAN-1998 Richard Gooch Devfs support */ -static char *verstr = "20020205"; +static char *verstr = "20020615"; #include @@ -69,7 +69,6 @@ static int buffer_kbs; static int write_threshold_kbs; -static int max_buffers = (-1); static int max_sg_segs; MODULE_AUTHOR("Kai Makisara"); @@ -80,8 +79,6 @@ MODULE_PARM_DESC(buffer_kbs, "Default driver buffer size (KB; 32)"); MODULE_PARM(write_threshold_kbs, "i"); MODULE_PARM_DESC(write_threshold_kbs, "Asynchronous write threshold (KB; 30)"); -MODULE_PARM(max_buffers, "i"); -MODULE_PARM_DESC(max_buffers, "Maximum number of buffer allocated at initialisation (4)"); MODULE_PARM(max_sg_segs, "i"); MODULE_PARM_DESC(max_sg_segs, "Maximum number of scatter/gather segments to use (32)"); @@ -97,9 +94,6 @@ "write_threshold_kbs", &write_threshold_kbs }, { - "max_buffers", &max_buffers - }, - { "max_sg_segs", &max_sg_segs } }; @@ -108,12 +102,12 @@ /* The default definitions have been moved to st_options.h */ -#define ST_BUFFER_SIZE (ST_BUFFER_BLOCKS * ST_KILOBYTE) +#define ST_FIXED_BUFFER_SIZE (ST_FIXED_BUFFER_BLOCKS * ST_KILOBYTE) #define ST_WRITE_THRESHOLD (ST_WRITE_THRESHOLD_BLOCKS * ST_KILOBYTE) /* The buffer size should fit into the 24 bits for length in the 6-byte SCSI read and write commands. */ -#if ST_BUFFER_SIZE >= (2 << 24 - 1) +#if ST_FIXED_BUFFER_SIZE >= (2 << 24 - 1) #error "Buffer size should not exceed (2 << 24 - 1) bytes!" #endif @@ -121,7 +115,7 @@ #define MAX_RETRIES 0 #define MAX_WRITE_RETRIES 0 -#define MAX_READY_RETRIES 5 +#define MAX_READY_RETRIES 0 #define NO_TAPE NOT_READY #define ST_TIMEOUT (900 * HZ) @@ -137,18 +131,15 @@ #define ST_DEV_ARR_LUMP 6 static rwlock_t st_dev_arr_lock = RW_LOCK_UNLOCKED; -static int st_nbr_buffers; -static ST_buffer **st_buffers = NULL; -static int st_buffer_size = ST_BUFFER_SIZE; +static int st_fixed_buffer_size = ST_FIXED_BUFFER_SIZE; static int st_write_threshold = ST_WRITE_THRESHOLD; -static int st_max_buffers = ST_MAX_BUFFERS; static int st_max_sg_segs = ST_MAX_SG; static Scsi_Tape **scsi_tapes = NULL; static int modes_defined; -static ST_buffer *new_tape_buffer(int, int, int); +static ST_buffer *new_tape_buffer(int, int); static int enlarge_buffer(ST_buffer *, int, int); static void normalize_buffer(ST_buffer *); static int append_to_buffer(const char *, ST_buffer *, int); @@ -914,8 +905,7 @@ module count. */ static int st_open(struct inode *inode, struct file *filp) { - int i, need_dma_buffer; - int retval = (-EIO); + int i, retval = (-EIO); Scsi_Tape *STp; ST_partstat *STps; int dev = TAPE_NR(inode->i_rdev); @@ -945,38 +935,15 @@ goto err_out; } - /* Allocate a buffer for this user */ - need_dma_buffer = STp->restr_dma; - write_lock(&st_dev_arr_lock); - for (i = 0; i < st_nbr_buffers; i++) - if (!st_buffers[i]->in_use && - (!need_dma_buffer || st_buffers[i]->dma)) { - STp->buffer = st_buffers[i]; - (STp->buffer)->in_use = 1; - break; - } - write_unlock(&st_dev_arr_lock); - if (i >= st_nbr_buffers) { - STp->buffer = new_tape_buffer(FALSE, need_dma_buffer, TRUE); - if (STp->buffer == NULL) { - printk(KERN_WARNING "st%d: Can't allocate tape buffer.\n", dev); - retval = (-EBUSY); - goto err_out; - } + /* See that we have at least a one page buffer available */ + if (!enlarge_buffer(STp->buffer, PAGE_SIZE, STp->restr_dma)) { + printk(KERN_WARNING "st%d: Can't allocate tape buffer.\n", dev); + retval = (-EOVERFLOW); + goto err_out; } (STp->buffer)->writing = 0; (STp->buffer)->syscall_result = 0; - (STp->buffer)->use_sg = STp->device->host->sg_tablesize; - - /* Compute the usable buffer size for this SCSI adapter */ - if (!(STp->buffer)->use_sg) - (STp->buffer)->buffer_size = (STp->buffer)->sg[0].length; - else { - for (i = 0, (STp->buffer)->buffer_size = 0; i < (STp->buffer)->use_sg && - i < (STp->buffer)->sg_segs; i++) - (STp->buffer)->buffer_size += (STp->buffer)->sg[i].length; - } STp->write_prot = ((filp->f_flags & O_ACCMODE) == O_RDONLY); @@ -999,10 +966,7 @@ return 0; err_out: - if (STp->buffer != NULL) { - (STp->buffer)->in_use = 0; - STp->buffer = NULL; - } + normalize_buffer(STp->buffer); STp->in_use = 0; STp->device->access_count--; if (STp->device->host->hostt->module) @@ -1149,16 +1113,8 @@ if (STp->door_locked == ST_LOCKED_AUTO) st_int_ioctl(STp, MTUNLOCK, 0); - if (STp->buffer != NULL) { - normalize_buffer(STp->buffer); - write_lock(&st_dev_arr_lock); - (STp->buffer)->in_use = 0; - STp->buffer = NULL; - } - else { - write_lock(&st_dev_arr_lock); - } - + normalize_buffer(STp->buffer); + write_lock(&st_dev_arr_lock); STp->in_use = 0; write_unlock(&st_dev_arr_lock); STp->device->access_count--; @@ -1168,31 +1124,11 @@ return result; } - -/* Write command */ -static ssize_t - st_write(struct file *filp, const char *buf, size_t count, loff_t * ppos) +/* The checks common to both reading and writing */ +static ssize_t rw_checks(Scsi_Tape *STp, struct file *filp, size_t count, loff_t *ppos) { - struct inode *inode = filp->f_dentry->d_inode; - ssize_t total; - ssize_t i, do_count, blks, transfer; + int bufsize; ssize_t retval = 0; - int write_threshold; - int doing_write = 0; - unsigned char cmd[MAX_COMMAND_SIZE]; - const char *b_point; - Scsi_Request *SRpnt = NULL; - Scsi_Tape *STp; - ST_mode *STm; - ST_partstat *STps; - int dev = TAPE_NR(inode->i_rdev); - - read_lock(&st_dev_arr_lock); - STp = scsi_tapes[dev]; - read_unlock(&st_dev_arr_lock); - - if (down_interruptible(&STp->lock)) - return -ERESTARTSYS; /* * If we are in the middle of error recovery, don't let anyone @@ -1219,13 +1155,11 @@ goto out; } - STm = &(STp->modes[STp->current_mode]); - if (!STm->defined) { + if (! STp->modes[STp->current_mode].defined) { retval = (-ENXIO); goto out; } - if (count == 0) - goto out; + /* * If there was a bus reset, block further access @@ -1236,30 +1170,20 @@ goto out; } + if (count == 0) + goto out; + DEB( if (!STp->in_use) { + int dev = TAPE_NR(filp->f_dentry->d_inode->i_rdev); printk(ST_DEB_MSG "st%d: Incorrect device.\n", dev); retval = (-EIO); goto out; } ) /* end DEB */ - /* Write must be integral number of blocks */ - if (STp->block_size != 0 && (count % STp->block_size) != 0) { - printk(KERN_WARNING "st%d: Write not multiple of tape block size.\n", - dev); - retval = (-EINVAL); - goto out; - } - if (STp->can_partitions && (retval = update_partition(STp)) < 0) goto out; - STps = &(STp->ps[STp->partition]); - - if (STp->write_prot) { - retval = (-EACCES); - goto out; - } if (STp->block_size == 0) { if (STp->max_block > 0 && @@ -1273,19 +1197,73 @@ goto out; } } - if ((STp->buffer)->buffer_blocks < 1) { - /* Fixed block mode with too small buffer */ - if (!enlarge_buffer(STp->buffer, STp->block_size, STp->restr_dma)) { + else { + /* Fixed block mode with too small buffer? */ + bufsize = STp->block_size > st_fixed_buffer_size ? + STp->block_size : st_fixed_buffer_size; + if ((STp->buffer)->buffer_size < bufsize && + !enlarge_buffer(STp->buffer, bufsize, STp->restr_dma)) { retval = (-EOVERFLOW); goto out; } - (STp->buffer)->buffer_blocks = 1; + (STp->buffer)->buffer_blocks = bufsize / STp->block_size; } if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED && !st_int_ioctl(STp, MTLOCK, 0)) STp->door_locked = ST_LOCKED_AUTO; + out: + return retval; +} + + +/* Write command */ +static ssize_t + st_write(struct file *filp, const char *buf, size_t count, loff_t * ppos) +{ + struct inode *inode = filp->f_dentry->d_inode; + ssize_t total; + ssize_t i, do_count, blks, transfer; + ssize_t retval; + int write_threshold; + int doing_write = 0; + unsigned char cmd[MAX_COMMAND_SIZE]; + const char *b_point; + Scsi_Request *SRpnt = NULL; + Scsi_Tape *STp; + ST_mode *STm; + ST_partstat *STps; + int dev = TAPE_NR(inode->i_rdev); + + read_lock(&st_dev_arr_lock); + STp = scsi_tapes[dev]; + read_unlock(&st_dev_arr_lock); + + if (down_interruptible(&STp->lock)) + return -ERESTARTSYS; + + retval = rw_checks(STp, filp, count, ppos); + if (retval || count == 0) + goto out; + + /* Write must be integral number of blocks */ + if (STp->block_size != 0 && (count % STp->block_size) != 0) { + printk(KERN_WARNING "st%d: Write not multiple of tape block size.\n", + dev); + retval = (-EINVAL); + goto out; + } + + STm = &(STp->modes[STp->current_mode]); + STps = &(STp->ps[STp->partition]); + + if (STp->write_prot) { + retval = (-EACCES); + goto out; + } + + if (STps->rw == ST_READING) { retval = flush_buffer(STp, 0); if (retval) @@ -1718,77 +1696,17 @@ if (down_interruptible(&STp->lock)) return -ERESTARTSYS; - /* - * If we are in the middle of error recovery, don't let anyone - * else try and use this device. Also, if error recovery fails, it - * may try and take the device offline, in which case all further - * access to the device is prohibited. - */ - if (!scsi_block_when_processing_errors(STp->device)) { - retval = (-ENXIO); - goto out; - } - - if (ppos != &filp->f_pos) { - /* "A request was outside the capabilities of the device." */ - retval = (-ENXIO); + retval = rw_checks(STp, filp, count, ppos); + if (retval || count == 0) goto out; - } - if (STp->ready != ST_READY) { - if (STp->ready == ST_NO_TAPE) - retval = (-ENOMEDIUM); - else - retval = (-EIO); - goto out; - } STm = &(STp->modes[STp->current_mode]); - if (!STm->defined) { - retval = (-ENXIO); - goto out; - } - DEB( - if (!STp->in_use) { - printk(ST_DEB_MSG "st%d: Incorrect device.\n", dev); - retval = (-EIO); - goto out; - } ) /* end DEB */ - - if (STp->can_partitions && - (retval = update_partition(STp)) < 0) - goto out; - - if (STp->block_size == 0) { - if (STp->max_block > 0 && - (count < STp->min_block || count > STp->max_block)) { - retval = (-EINVAL); - goto out; - } - if (count > (STp->buffer)->buffer_size && - !enlarge_buffer(STp->buffer, count, STp->restr_dma)) { - retval = (-EOVERFLOW); - goto out; - } - } - if ((STp->buffer)->buffer_blocks < 1) { - /* Fixed block mode with too small buffer */ - if (!enlarge_buffer(STp->buffer, STp->block_size, STp->restr_dma)) { - retval = (-EOVERFLOW); - goto out; - } - (STp->buffer)->buffer_blocks = 1; - } - if (!(STm->do_read_ahead) && STp->block_size != 0 && (count % STp->block_size) != 0) { retval = (-EINVAL); /* Read must be integral number of blocks */ goto out; } - if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED && - !st_int_ioctl(STp, MTLOCK, 0)) - STp->door_locked = ST_LOCKED_AUTO; - STps = &(STp->ps[STp->partition]); if (STps->rw == ST_WRITING) { retval = flush_buffer(STp, 0); @@ -1986,7 +1904,7 @@ st_log_options(STp, STm, dev); } else if (code == MT_ST_WRITE_THRESHOLD) { value = (options & ~MT_ST_OPTIONS) * ST_KILOBYTE; - if (value < 1 || value > st_buffer_size) { + if (value < 1 || value > st_fixed_buffer_size) { printk(KERN_WARNING "st%d: Write threshold %d too small or too large.\n", dev, value); @@ -2289,8 +2207,10 @@ if (!retval) { /* SCSI command successful */ - if (!load_code) + if (!load_code) { STp->rew_at_close = 0; + STp->ready = ST_NO_TAPE; + } else { STp->rew_at_close = STp->autorew_dev; retval = check_tape(STp, filp); @@ -2619,10 +2539,14 @@ ioctl_result = st_int_ioctl(STp, MTBSF, 1); if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) { + int old_block_size = STp->block_size; STp->block_size = arg & MT_ST_BLKSIZE_MASK; - if (STp->block_size != 0) + if (STp->block_size != 0) { + if (old_block_size == 0) + normalize_buffer(STp->buffer); (STp->buffer)->buffer_blocks = (STp->buffer)->buffer_size / STp->block_size; + } (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0; if (cmd_in == SET_DENS_AND_BLK) STp->density = arg >> MT_ST_DENSITY_SHIFT; @@ -3372,18 +3296,11 @@ /* Try to allocate a new tape buffer. Calling function must not hold dev_arr_lock. */ static ST_buffer * - new_tape_buffer(int from_initialization, int need_dma, int in_use) + new_tape_buffer(int from_initialization, int need_dma) { - int i, priority, b_size, order, got = 0, segs = 0; + int i, priority, got = 0, segs = 0; ST_buffer *tb; - read_lock(&st_dev_arr_lock); - if (st_nbr_buffers >= st_template.dev_max) { - read_unlock(&st_dev_arr_lock); - return NULL; /* Should never happen */ - } - read_unlock(&st_dev_arr_lock); - if (from_initialization) priority = GFP_ATOMIC; else @@ -3391,85 +3308,19 @@ i = sizeof(ST_buffer) + (st_max_sg_segs - 1) * sizeof(struct scatterlist); tb = kmalloc(i, priority); - if (tb) { - if (need_dma) - priority |= GFP_DMA; - - /* Try to allocate the first segment up to ST_FIRST_ORDER and the - others big enough to reach the goal */ - for (b_size = PAGE_SIZE, order=0; - b_size < st_buffer_size && order < ST_FIRST_ORDER; - order++, b_size *= 2) - ; - for ( ; b_size >= PAGE_SIZE; order--, b_size /= 2) { - tb->sg[0].page = alloc_pages(priority, order); - tb->sg[0].offset = 0; - if (tb->sg[0].page != NULL) { - tb->sg[0].length = b_size; - break; - } - } - if (tb->sg[segs].page == NULL) { - kfree(tb); - tb = NULL; - } else { /* Got something, continue */ - - for (b_size = PAGE_SIZE, order=0; - st_buffer_size > - tb->sg[0].length + (ST_FIRST_SG - 1) * b_size; - order++, b_size *= 2) - ; - for (segs = 1, got = tb->sg[0].length; - got < st_buffer_size && segs < ST_FIRST_SG;) { - tb->sg[segs].page = alloc_pages(priority, order); - tb->sg[segs].offset = 0; - if (tb->sg[segs].page == NULL) { - if (st_buffer_size - got <= - (ST_FIRST_SG - segs) * b_size / 2) { - b_size /= 2; /* Large enough for the - rest of the buffers */ - order--; - continue; - } - tb->sg_segs = segs; - tb->orig_sg_segs = 0; - DEB(tb->buffer_size = got); - normalize_buffer(tb); - kfree(tb); - tb = NULL; - break; - } - tb->sg[segs].length = b_size; - got += b_size; - segs++; - } - } - } - if (!tb) { - printk(KERN_NOTICE "st: Can't allocate new tape buffer (nbr %d).\n", - st_nbr_buffers); + printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n"); return NULL; } tb->sg_segs = tb->orig_sg_segs = segs; - tb->b_data = page_address(tb->sg[0].page); + if (segs > 0) + tb->b_data = page_address(tb->sg[0].page); - DEBC(printk(ST_DEB_MSG - "st: Allocated tape buffer %d (%d bytes, %d segments, dma: %d, a: %p).\n", - st_nbr_buffers, got, tb->sg_segs, need_dma, tb->b_data); - printk(ST_DEB_MSG - "st: segment sizes: first %d, last %d bytes.\n", - tb->sg[0].length, tb->sg[segs - 1].length); - ) - tb->in_use = in_use; + tb->in_use = TRUE; tb->dma = need_dma; tb->buffer_size = got; tb->writing = 0; - write_lock(&st_dev_arr_lock); - st_buffers[st_nbr_buffers++] = tb; - write_unlock(&st_dev_arr_lock); - return tb; } @@ -3479,6 +3330,9 @@ { int segs, nbr, max_segs, b_size, priority, order, got; + if (new_size <= STbuffer->buffer_size) + return TRUE; + normalize_buffer(STbuffer); max_segs = STbuffer->use_sg; @@ -3492,13 +3346,14 @@ if (need_dma) priority |= GFP_DMA; for (b_size = PAGE_SIZE, order=0; - b_size * nbr < new_size - STbuffer->buffer_size; + b_size < new_size - STbuffer->buffer_size; order++, b_size *= 2) ; /* empty */ for (segs = STbuffer->sg_segs, got = STbuffer->buffer_size; segs < max_segs && got < new_size;) { STbuffer->sg[segs].page = alloc_pages(priority, order); + /* printk("st: allocated %x, order %d\n", STbuffer->sg[segs].page, order); */ STbuffer->sg[segs].offset = 0; if (STbuffer->sg[segs].page == NULL) { if (new_size - got <= (max_segs - segs) * b_size / 2) { @@ -3518,9 +3373,10 @@ STbuffer->buffer_size = got; segs++; } + STbuffer->b_data = page_address(STbuffer->sg[0].page); DEBC(printk(ST_DEB_MSG - "st: Succeeded to enlarge buffer to %d bytes (segs %d->%d, %d).\n", - got, STbuffer->orig_sg_segs, STbuffer->sg_segs, b_size)); + "st: Succeeded to enlarge buffer at %p to %d bytes (segs %d->%d, %d).\n", + STbuffer, got, STbuffer->orig_sg_segs, STbuffer->sg_segs, b_size)); return TRUE; } @@ -3535,14 +3391,14 @@ for (b_size=PAGE_SIZE, order=0; b_size < STbuffer->sg[i].length; order++, b_size *= 2) ; /* empty */ + /* printk("st: freeing %x, order %d\n", STbuffer->sg[i].page, order); */ __free_pages(STbuffer->sg[i].page, order); STbuffer->buffer_size -= STbuffer->sg[i].length; } DEB( if (debugging && STbuffer->orig_sg_segs < STbuffer->sg_segs) printk(ST_DEB_MSG "st: Buffer at %p normalized to %d bytes (segs %d).\n", - page_address(STbuffer->sg[0].page), STbuffer->buffer_size, - STbuffer->sg_segs); + STbuffer, STbuffer->buffer_size, STbuffer->sg_segs); ) /* end DEB */ STbuffer->sg_segs = STbuffer->orig_sg_segs; } @@ -3619,18 +3475,16 @@ static void validate_options(void) { if (buffer_kbs > 0) - st_buffer_size = buffer_kbs * ST_KILOBYTE; + st_fixed_buffer_size = buffer_kbs * ST_KILOBYTE; if (write_threshold_kbs > 0) st_write_threshold = write_threshold_kbs * ST_KILOBYTE; else if (buffer_kbs > 0) - st_write_threshold = st_buffer_size - 2048; - if (st_write_threshold > st_buffer_size) { - st_write_threshold = st_buffer_size; + st_write_threshold = st_fixed_buffer_size - 2048; + if (st_write_threshold > st_fixed_buffer_size) { + st_write_threshold = st_fixed_buffer_size; printk(KERN_WARNING "st: write_threshold limited to %d bytes.\n", st_write_threshold); } - if (max_buffers >= 0) - st_max_buffers = max_buffers; if (max_sg_segs >= ST_FIRST_SG) st_max_sg_segs = max_sg_segs; } @@ -3694,7 +3548,8 @@ Scsi_Tape *tpnt; ST_mode *STm; ST_partstat *STps; - int i, mode, target_nbr, dev_num; + ST_buffer *buffer; + int i, mode, dev_num; char *stp; if (SDp->type != TYPE_TAPE) @@ -3707,6 +3562,12 @@ return 1; } + buffer = new_tape_buffer(TRUE, (SDp->host)->unchecked_isa_dma); + if (buffer == NULL) { + printk(KERN_ERR "st: Can't allocate new tape buffer. Device not attached.\n"); + return 1; + } + write_lock(&st_dev_arr_lock); if (st_template.nr_dev >= st_template.dev_max) { Scsi_Tape **tmp_da; @@ -3745,14 +3606,6 @@ } scsi_tapes = tmp_da; - memset(tmp_ba, 0, tmp_dev_max * sizeof(ST_buffer *)); - if (st_buffers != NULL) { - memcpy(tmp_ba, st_buffers, - st_template.dev_max * sizeof(ST_buffer *)); - kfree(st_buffers); - } - st_buffers = tmp_ba; - st_template.dev_max = tmp_dev_max; } @@ -3799,6 +3652,9 @@ else tpnt->tape_type = MT_ISSCSI2; + buffer->use_sg = tpnt->device->host->sg_tablesize; + tpnt->buffer = buffer; + tpnt->inited = 0; tpnt->devt = mk_kdev(SCSI_TAPE_MAJOR, i); tpnt->dirty = 0; @@ -3858,18 +3714,6 @@ "Attached scsi tape st%d at scsi%d, channel %d, id %d, lun %d\n", dev_num, SDp->host->host_no, SDp->channel, SDp->id, SDp->lun); - /* See if we need to allocate more static buffers */ - target_nbr = st_template.nr_dev; - if (target_nbr > st_max_buffers) - target_nbr = st_max_buffers; - for (i=st_nbr_buffers; i < target_nbr; i++) - if (!new_tape_buffer(TRUE, TRUE, FALSE)) { - printk(KERN_INFO "st: Unable to allocate new static buffer.\n"); - break; - } - /* If the previous allocation fails, we will try again when the buffer is - really needed. */ - return 0; }; @@ -3897,6 +3741,11 @@ devfs_unregister (tpnt->de_n[mode]); tpnt->de_n[mode] = NULL; } + if (tpnt->buffer) { + tpnt->buffer->orig_sg_segs = 0; + normalize_buffer(tpnt->buffer); + kfree(tpnt->buffer); + } kfree(tpnt); scsi_tapes[i] = 0; SDp->attached--; @@ -3916,10 +3765,10 @@ validate_options(); printk(KERN_INFO - "st: Version %s, bufsize %d, wrt %d, " - "max init. bufs %d, s/g segs %d\n", - verstr, st_buffer_size, st_write_threshold, - st_max_buffers, st_max_sg_segs); + "st: Version %s, fixed bufsize %d, wrt %d, " + "s/g segs %d\n", + verstr, st_fixed_buffer_size, st_write_threshold, + st_max_sg_segs); if (devfs_register_chrdev(SCSI_TAPE_MAJOR, "st", &st_fops) >= 0) return scsi_register_device(&st_template); @@ -3939,16 +3788,6 @@ if (scsi_tapes[i]) kfree(scsi_tapes[i]); kfree(scsi_tapes); - if (st_buffers != NULL) { - for (i = 0; i < st_nbr_buffers; i++) { - if (st_buffers[i] != NULL) { - st_buffers[i]->orig_sg_segs = 0; - normalize_buffer(st_buffers[i]); - kfree(st_buffers[i]); - } - } - kfree(st_buffers); - } } st_template.dev_max = 0; printk(KERN_INFO "st: Unloaded.\n"); diff -Nru a/drivers/scsi/st_options.h b/drivers/scsi/st_options.h --- a/drivers/scsi/st_options.h Tue Jun 18 19:12:02 2002 +++ b/drivers/scsi/st_options.h Tue Jun 18 19:12:02 2002 @@ -3,7 +3,7 @@ Copyright 1995-2000 Kai Makisara. - Last modified: Tue Jan 22 21:52:34 2002 by makisara + Last modified: Sun May 5 15:09:56 2002 by makisara */ #ifndef _ST_OPTIONS_H @@ -30,22 +30,17 @@ SENSE. */ #define ST_DEFAULT_BLOCK 0 -/* The tape driver buffer size in kilobytes. Must be non-zero. */ -#define ST_BUFFER_BLOCKS 32 +/* The minimum tape driver buffer size in kilobytes in fixed block mode. + Must be non-zero. */ +#define ST_FIXED_BUFFER_BLOCKS 32 /* The number of kilobytes of data in the buffer that triggers an asynchronous write in fixed block mode. See also ST_ASYNC_WRITES below. */ #define ST_WRITE_THRESHOLD_BLOCKS 30 -/* The maximum number of tape buffers the driver tries to allocate at - driver initialisation. The number is also constrained by the number - of drives detected. If more buffers are needed, they are allocated - at run time and freed after use. */ -#define ST_MAX_BUFFERS 4 - /* Maximum number of scatter/gather segments */ -#define ST_MAX_SG 16 +#define ST_MAX_SG 64 /* The number of scatter/gather segments to allocate at first try (must be smaller or equal to the maximum). */ diff -Nru a/drivers/usb/class/usb-midi.c b/drivers/usb/class/usb-midi.c --- a/drivers/usb/class/usb-midi.c Tue Jun 18 19:12:01 2002 +++ b/drivers/usb/class/usb-midi.c Tue Jun 18 19:12:01 2002 @@ -106,9 +106,7 @@ MODULE_AUTHOR("NAGANO Daisuke "); MODULE_DESCRIPTION("USB-MIDI driver"); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,14) MODULE_LICENSE("GPL"); -#endif /* ------------------------------------------------------------------------- */ diff -Nru a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h --- a/drivers/usb/core/hub.h Tue Jun 18 19:12:01 2002 +++ b/drivers/usb/core/hub.h Tue Jun 18 19:12:01 2002 @@ -9,6 +9,7 @@ */ #include +#include #include /* likely()/unlikely() */ /* diff -Nru a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c --- a/drivers/usb/host/ohci-dbg.c Tue Jun 18 19:12:02 2002 +++ b/drivers/usb/host/ohci-dbg.c Tue Jun 18 19:12:02 2002 @@ -74,9 +74,9 @@ static inline struct ed * dma_to_ed (struct ohci_hcd *hc, dma_addr_t ed_dma); -#ifdef OHCI_VERBOSE_DEBUG /* print non-empty branches of the periodic ed tree */ -void ohci_dump_periodic (struct ohci_hcd *ohci, char *label) +static void __attribute__ ((unused)) +ohci_dump_periodic (struct ohci_hcd *ohci, char *label) { int i, j; u32 *ed_p; @@ -101,7 +101,6 @@ printk (KERN_DEBUG "%s, ohci %s, empty periodic schedule\n", label, ohci->hcd.self.bus_name); } -#endif static void ohci_dump_intr_mask (char *label, __u32 mask) { @@ -241,6 +240,97 @@ ohci_dump_roothub (controller, 1); } +static void ohci_dump_td (char *label, struct td *td) +{ + u32 tmp = le32_to_cpup (&td->hwINFO); + + dbg ("%s td %p; urb %p index %d; hw next td %08x", + label, td, + td->urb, td->index, + le32_to_cpup (&td->hwNextTD)); + if ((tmp & TD_ISO) == 0) { + char *toggle, *pid; + u32 cbp, be; + + switch (tmp & TD_T) { + case TD_T_DATA0: toggle = "DATA0"; break; + case TD_T_DATA1: toggle = "DATA1"; break; + case TD_T_TOGGLE: toggle = "(CARRY)"; break; + default: toggle = "(?)"; break; + } + switch (tmp & TD_DP) { + case TD_DP_SETUP: pid = "SETUP"; break; + case TD_DP_IN: pid = "IN"; break; + case TD_DP_OUT: pid = "OUT"; break; + default: pid = "(bad pid)"; break; + } + dbg (" info %08x CC=%x %s DI=%d %s %s", tmp, + TD_CC_GET(tmp), /* EC, */ toggle, + (tmp & TD_DI) >> 21, pid, + (tmp & TD_R) ? "R" : ""); + cbp = le32_to_cpup (&td->hwCBP); + be = le32_to_cpup (&td->hwBE); + dbg (" cbp %08x be %08x (len %d)", cbp, be, + cbp ? (be + 1 - cbp) : 0); + } else { + unsigned i; + dbg (" info %08x CC=%x DI=%d START=%04x", tmp, + TD_CC_GET(tmp), /* FC, */ + (tmp & TD_DI) >> 21, + tmp & 0x0000ffff); + dbg (" bp0 %08x be %08x", + le32_to_cpup (&td->hwCBP) & ~0x0fff, + le32_to_cpup (&td->hwBE)); + for (i = 0; i < MAXPSW; i++) { + dbg (" psw [%d] = %2x", i, + le16_to_cpu (td->hwPSW [i])); + } + } +} + +/* caller MUST own hcd spinlock if verbose is set! */ +static void __attribute__((unused)) +ohci_dump_ed (struct ohci_hcd *ohci, char *label, struct ed *ed, int verbose) +{ + u32 tmp = ed->hwINFO; + char *type = ""; + + dbg ("%s: %s, ed %p state 0x%x type %d; next ed %08x", + ohci->hcd.self.bus_name, label, + ed, ed->state, ed->type, + le32_to_cpup (&ed->hwNextED)); + switch (tmp & (ED_IN|ED_OUT)) { + case ED_OUT: type = "-OUT"; break; + case ED_IN: type = "-IN"; break; + /* else from TDs ... control */ + } + dbg (" info %08x MAX=%d%s%s%s EP=%d%s DEV=%d", le32_to_cpu (tmp), + 0x0fff & (le32_to_cpu (tmp) >> 16), + (tmp & ED_ISO) ? " ISO" : "", + (tmp & ED_SKIP) ? " SKIP" : "", + (tmp & ED_LOWSPEED) ? " LOW" : "", + 0x000f & (le32_to_cpu (tmp) >> 7), + type, + 0x007f & le32_to_cpu (tmp)); + dbg (" tds: head %08x%s%s tail %08x%s", + tmp = le32_to_cpup (&ed->hwHeadP), + (ed->hwHeadP & ED_H) ? " HALT" : "", + (ed->hwHeadP & ED_C) ? " CARRY" : "", + le32_to_cpup (&ed->hwTailP), + verbose ? "" : " (not listing)"); + if (verbose) { + struct list_head *tmp; + + /* use ed->td_list because HC concurrently modifies + * hwNextTD as it accumulates ed_donelist. + */ + list_for_each (tmp, &ed->td_list) { + struct td *td; + td = list_entry (tmp, struct td, td_list); + ohci_dump_td (" ->", td); + } + } +} #endif diff -Nru a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c --- a/drivers/usb/host/ohci-hcd.c Tue Jun 18 19:12:01 2002 +++ b/drivers/usb/host/ohci-hcd.c Tue Jun 18 19:12:01 2002 @@ -100,7 +100,7 @@ * - lots more testing!! */ -#define DRIVER_VERSION "2002-Jun-10" +#define DRIVER_VERSION "2002-Jun-15" #define DRIVER_AUTHOR "Roman Weissgaerber , David Brownell" #define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver" @@ -145,8 +145,8 @@ urb_print (urb, "SUB", usb_pipein (pipe)); #endif - /* every endpoint has a ed, locate and fill it */ - if (! (ed = ep_add_ed (urb->dev, pipe, urb->interval, 1, mem_flags))) + /* every endpoint has a ed, locate and maybe (re)initialize it */ + if (! (ed = ed_get (ohci, urb->dev, pipe, urb->interval))) return -ENOMEM; /* for the private part of the URB we need the number of TDs (size) */ @@ -498,6 +498,7 @@ struct ohci_regs *regs = ohci->regs; int ints; + /* we can eliminate a (slow) readl() if _only_ WDH caused this irq */ if ((ohci->hcca->done_head != 0) && ! (le32_to_cpup (&ohci->hcca->done_head) & 0x01)) { ints = OHCI_INTR_WDH; diff -Nru a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c --- a/drivers/usb/host/ohci-mem.c Tue Jun 18 19:12:02 2002 +++ b/drivers/usb/host/ohci-mem.c Tue Jun 18 19:12:02 2002 @@ -221,6 +221,7 @@ ed = pci_pool_alloc (hc->ed_cache, mem_flags, &dma); if (ed) { memset (ed, 0, sizeof (*ed)); + INIT_LIST_HEAD (&ed->td_list); ed->dma = dma; /* hash it for later reverse mapping */ if (!hash_add_ed (hc, ed, mem_flags)) { diff -Nru a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c --- a/drivers/usb/host/ohci-q.c Tue Jun 18 19:12:03 2002 +++ b/drivers/usb/host/ohci-q.c Tue Jun 18 19:12:03 2002 @@ -131,8 +131,9 @@ /* search for the right branch to insert an interrupt ed into the int tree * do some load balancing; - * returns the branch and - * sets the interval to interval = 2^integer (ld (interval)) + * returns the branch + * FIXME allow for failure, when there's no bandwidth left; + * and consider iso loads too */ static int ep_int_balance (struct ohci_hcd *ohci, int interval, int load) { @@ -152,19 +153,6 @@ /*-------------------------------------------------------------------------*/ -/* 2^int ( ld (inter)) */ - -static int ep_2_n_interval (int inter) -{ - int i; - - for (i = 0; ((inter >> i) > 1 ) && (i < 5); i++) - continue; - return 1 << i; -} - -/*-------------------------------------------------------------------------*/ - /* the int tree is a binary tree * in order to process it sequentially the indexes of the branches have * to be mapped the mapping reverses the bits of a word of num_bits length @@ -230,8 +218,7 @@ case PIPE_INTERRUPT: load = ed->intriso.intr_info.int_load; - interval = ep_2_n_interval (ed->intriso.intr_info.int_period); - ed->interval = interval; + interval = ed->interval; int_branch = ep_int_balance (ohci, interval, load); ed->intriso.intr_info.int_branch = int_branch; @@ -301,6 +288,7 @@ * just the link to the ed is unlinked. * the link from the ed still points to another operational ed or 0 * so the HC can eventually finish the processing of the unlinked ed + * caller guarantees the ED has no active TDs. */ static int start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed) { @@ -387,84 +375,99 @@ /*-------------------------------------------------------------------------*/ -/* (re)init an endpoint; this _should_ be done once at the - * usb_set_configuration command, but the USB stack is a bit stateless - * so we do it at every transaction. - * if the state of the ed is ED_NEW then a dummy td is added and the - * state is changed to ED_UNLINK - * in all other cases the state is left unchanged - * the ed info fields are set even though most of them should - * not change +/* get and maybe (re)init an endpoint. init _should_ be done only as part + * of usb_set_configuration() or usb_set_interface() ... but the USB stack + * isn't very stateful, so we re-init whenever the HC isn't looking. */ -static struct ed *ep_add_ed ( +static struct ed *ed_get ( + struct ohci_hcd *ohci, struct usb_device *udev, unsigned int pipe, - int interval, - int load, - int mem_flags + int interval ) { - struct ohci_hcd *ohci = hcd_to_ohci (udev->bus->hcpriv); + int is_out = !usb_pipein (pipe); + int type = usb_pipetype (pipe); + int bus_msecs = 0; struct hcd_dev *dev = (struct hcd_dev *) udev->hcpriv; - struct td *td; struct ed *ed; unsigned ep; unsigned long flags; - spin_lock_irqsave (&ohci->lock, flags); - ep = usb_pipeendpoint (pipe) << 1; - if (!usb_pipecontrol (pipe) && usb_pipeout (pipe)) + if (type != PIPE_CONTROL && is_out) ep |= 1; + if (type == PIPE_INTERRUPT) + bus_msecs = usb_calc_bus_time (udev->speed, !is_out, 0, + usb_maxpacket (udev, pipe, is_out)) / 1000; + + spin_lock_irqsave (&ohci->lock, flags); + if (!(ed = dev->ep [ep])) { ed = ed_alloc (ohci, SLAB_ATOMIC); if (!ed) { /* out of memory */ - spin_unlock_irqrestore (&ohci->lock, flags); - return NULL; + goto done; } dev->ep [ep] = ed; } if (ed->state & ED_URB_DEL) { /* pending unlink request */ - spin_unlock_irqrestore (&ohci->lock, flags); - return NULL; + ed = 0; + goto done; } if (ed->state == ED_NEW) { + struct td *td; + ed->hwINFO = ED_SKIP; /* dummy td; end of td list for ed */ td = td_alloc (ohci, SLAB_ATOMIC); if (!td) { /* out of memory */ - spin_unlock_irqrestore (&ohci->lock, flags); - return NULL; + ed = 0; + goto done; } ed->dummy = td; ed->hwTailP = cpu_to_le32 (td->td_dma); ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */ ed->state = ED_UNLINK; - ed->type = usb_pipetype (pipe); + ed->type = type; } -// FIXME: don't do this if it's linked to the HC, or without knowing it's -// safe to clobber state/mode info tied to (previous) config/altsetting. -// (but dev0/ep0, used by set_address, must get clobbered) - - ed->hwINFO = cpu_to_le32 (usb_pipedevice (pipe) - | usb_pipeendpoint (pipe) << 7 - | (usb_pipeisoc (pipe)? 0x8000: 0) - | (usb_pipecontrol (pipe) - ? 0: (usb_pipeout (pipe)? 0x800: 0x1000)) - | (udev->speed == USB_SPEED_LOW) << 13 - | usb_maxpacket (udev, pipe, usb_pipeout (pipe)) - << 16); - - if (ed->type == PIPE_INTERRUPT && ed->state == ED_UNLINK) { - ed->intriso.intr_info.int_period = interval; - ed->intriso.intr_info.int_load = load; - } + /* FIXME: Don't do this without knowing it's safe to clobber this + * state/mode info. Currently the upper layers don't support such + * guarantees; we're lucky changing config/altsetting is rare. + */ + if (ed->state == ED_UNLINK) { + u32 info; + info = usb_pipedevice (pipe); + info |= (ep >> 1) << 7; + info |= usb_maxpacket (udev, pipe, is_out) << 16; + info = cpu_to_le32 (info); + if (udev->speed == USB_SPEED_LOW) + info |= ED_LOWSPEED; + /* control transfers store pids in tds */ + if (type != PIPE_CONTROL) { + info |= is_out ? ED_OUT : ED_IN; + if (type == PIPE_ISOCHRONOUS) + info |= ED_ISO; + if (type == PIPE_INTERRUPT) { + ed->intriso.intr_info.int_load = bus_msecs; + if (interval > 32) + interval = 32; + } + } + ed->hwINFO = info; + + /* value ignored except on periodic EDs, where + * we know it's already a power of 2 + */ + ed->interval = interval; + } + +done: spin_unlock_irqrestore (&ohci->lock, flags); return ed; } @@ -736,8 +739,8 @@ urb->iso_frame_desc [td->index].status = cc_to_error [cc]; if (cc != 0) - dbg (" urb %p iso TD %d len %d CC %d", - urb, td->index, dlen, cc); + dbg (" urb %p iso TD %p (%d) len %d CC %d", + urb, td, 1 + td->index, dlen, cc); /* BULK, INT, CONTROL ... drivers see aggregate length/status, * except that "setup" bytes aren't counted and "short" transfers @@ -776,9 +779,13 @@ - td->data_dma; } +#ifdef VERBOSE_DEBUG if (cc != 0) - dbg (" urb %p TD %d CC %d, len=%d", - urb, td->index, cc, urb->actual_length); + dbg (" urb %p TD %p (%d) CC %d, len=%d/%d", + urb, td, 1 + td->index, cc, + urb->actual_length, + urb->transfer_buffer_length); +#endif } } @@ -812,8 +819,8 @@ if (urb_priv && ((td_list->index + 1) < urb_priv->length)) { #ifdef OHCI_VERBOSE_DEBUG - dbg ("urb %p TD %d of %d, patch ED", - td_list->urb, + dbg ("urb %p TD %p (%d/%d), patch ED", + td_list->urb, td_list, 1 + td_list->index, urb_priv->length); #endif diff -Nru a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h --- a/drivers/usb/host/ohci.h Tue Jun 18 19:12:03 2002 +++ b/drivers/usb/host/ohci.h Tue Jun 18 19:12:03 2002 @@ -19,7 +19,7 @@ #define ED_SKIP __constant_cpu_to_le32(1 << 14) #define ED_LOWSPEED __constant_cpu_to_le32(1 << 13) #define ED_OUT __constant_cpu_to_le32(0x01 << 11) -#define ED_IN __constant_cpu_to_le32(0x10 << 11) +#define ED_IN __constant_cpu_to_le32(0x02 << 11) __u32 hwTailP; /* tail of TD list */ __u32 hwHeadP; /* head of TD list */ #define ED_C __constant_cpu_to_le32(0x02) /* toggle carry */ @@ -30,24 +30,24 @@ dma_addr_t dma; /* addr of ED */ struct ed *ed_prev; /* for non-interrupt EDs */ struct td *dummy; + struct list_head td_list; /* "shadow list" of our TDs */ + + u8 state; /* ED_{NEW,UNLINK,OPER} */ +#define ED_NEW 0x00 /* unused, no dummy td */ +#define ED_UNLINK 0x01 /* dummy td, maybe linked to hc */ +#define ED_OPER 0x02 /* dummy td, _is_ linked to hc */ +#define ED_URB_DEL 0x08 /* for unlinking; masked in */ u8 type; /* PIPE_{BULK,...} */ - u8 interval; /* interrupt, isochronous */ + u16 interval; /* interrupt, isochronous */ union { struct intr_info { /* interrupt */ - u8 int_period; u8 int_branch; u8 int_load; } intr_info; u16 last_iso; /* isochronous */ } intriso; - u8 state; /* ED_{NEW,UNLINK,OPER} */ -#define ED_NEW 0x00 /* unused, no dummy td */ -#define ED_UNLINK 0x01 /* dummy td, maybe linked to hc */ -#define ED_OPER 0x02 /* dummy td, _is_ linked to hc */ -#define ED_URB_DEL 0x08 /* for unlinking; masked in */ - /* HC may see EDs on rm_list until next frame (frame_no == tick) */ u16 tick; struct ed *ed_rm_list; @@ -108,6 +108,8 @@ dma_addr_t td_dma; /* addr of this TD */ dma_addr_t data_dma; /* addr of data it points to */ + + struct list_head td_list; /* "shadow list", TDs on same ED */ } __attribute__ ((aligned(32))); /* c/b/i need 16; only iso needs 32 */ #define TD_MASK ((u32)~0x1f) /* strip hw status in low addr bits */ diff -Nru a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c --- a/drivers/usb/net/kaweth.c Tue Jun 18 19:12:02 2002 +++ b/drivers/usb/net/kaweth.c Tue Jun 18 19:12:03 2002 @@ -220,10 +220,11 @@ struct urb *rx_urb; struct urb *tx_urb; struct urb *irq_urb; + + struct sk_buff *tx_skb; __u8 *firmware_buf; __u8 scratch[KAWETH_SCRATCH_SIZE]; - __u8 tx_buf[KAWETH_BUF_SIZE]; __u8 rx_buf[KAWETH_BUF_SIZE]; __u8 intbuffer[INTBUFFERSIZE]; __u16 packet_filter_bitmap; @@ -650,11 +651,13 @@ static void kaweth_usb_transmit_complete(struct urb *urb) { struct kaweth_device *kaweth = urb->context; + struct sk_buff *skb = kaweth->tx_skb; if (unlikely(urb->status != 0)) kaweth_dbg("%s: TX status %d.", kaweth->net->name, urb->status); netif_wake_queue(kaweth->net); + dev_kfree_skb(skb); } /**************************************************************** @@ -663,7 +666,7 @@ static int kaweth_start_xmit(struct sk_buff *skb, struct net_device *net) { struct kaweth_device *kaweth = net->priv; - int count = skb->len; + char *private_header; int res; @@ -679,15 +682,30 @@ kaweth_async_set_rx_mode(kaweth); netif_stop_queue(net); - *((__u16 *)kaweth->tx_buf) = cpu_to_le16(skb->len); + /* We now decide whether we can put our special header into the sk_buff */ + if (skb_cloned(skb) || skb_headroom(skb) < 2) { + /* no such luck - we make our own */ + struct sk_buff *copied_skb; + copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC); + dev_kfree_skb_any(skb); + skb = copied_skb; + if (!copied_skb) { + kaweth->stats.tx_errors++; + netif_start_queue(net); + spin_unlock(&kaweth->device_lock); + return 0; + } + } - memcpy(kaweth->tx_buf + 2, skb->data, skb->len); + private_header = __skb_push(skb, 2); + *private_header = cpu_to_le16(skb->len); + kaweth->tx_skb = skb; FILL_BULK_URB(kaweth->tx_urb, kaweth->dev, usb_sndbulkpipe(kaweth->dev, 2), - kaweth->tx_buf, - count + 2, + private_header, + skb->len, kaweth_usb_transmit_complete, kaweth); kaweth->end = 0; @@ -699,6 +717,7 @@ kaweth->stats.tx_errors++; netif_start_queue(net); + dev_kfree_skb(skb); } else { @@ -706,8 +725,6 @@ kaweth->stats.tx_bytes += skb->len; net->trans_start = jiffies; } - - dev_kfree_skb(skb); spin_unlock(&kaweth->device_lock); diff -Nru a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c --- a/drivers/usb/storage/scsiglue.c Tue Jun 18 19:12:02 2002 +++ b/drivers/usb/storage/scsiglue.c Tue Jun 18 19:12:02 2002 @@ -51,12 +51,6 @@ #include -/* - * kernel thread actions - */ - -#define US_ACT_COMMAND 1 -#define US_ACT_EXIT 5 /*********************************************************************** * Host functions @@ -204,7 +198,7 @@ US_DEBUGP("device_reset() called\n" ); /* if the device was removed, then we're already reset */ - if (atomic_read(&us->sm_state) == US_STATE_DETACHED) + if (!test_bit(DEV_ATTACHED, &us->bitflags)) return SUCCESS; scsi_unlock(srb->host); @@ -235,7 +229,7 @@ US_DEBUGP("bus_reset() called\n"); /* if the device has been removed, this worked */ - if (atomic_read(&us->sm_state) == US_STATE_DETACHED) { + if (!test_bit(DEV_ATTACHED, &us->bitflags)) { US_DEBUGP("-- device removed already\n"); return SUCCESS; } @@ -337,8 +331,8 @@ /* show the GUID of the device */ SPRINTF(" GUID: " GUID_FORMAT "\n", GUID_ARGS(us->guid)); - SPRINTF(" Attached: %s\n", (atomic_read(&us->sm_state) == - US_STATE_DETACHED) ? "Yes" : "No"); + SPRINTF(" Attached: %s\n", (test_bit(DEV_ATTACHED, &us->bitflags) + ? "Yes" : "No")); /* * Calculate start of next buffer, and return value. diff -Nru a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c --- a/drivers/usb/storage/usb.c Tue Jun 18 19:12:02 2002 +++ b/drivers/usb/storage/usb.c Tue Jun 18 19:12:02 2002 @@ -99,13 +99,6 @@ static int my_host_number; -/* - * kernel thread actions - */ - -#define US_ACT_COMMAND 1 -#define US_ACT_EXIT 5 - /* The list of structures and the protective lock for them */ struct us_data *us_list; struct semaphore us_list_semaphore; @@ -426,7 +419,7 @@ down(&(us->dev_semaphore)); /* our device has gone - pretend not ready */ - if (atomic_read(&us->device_state) == US_STATE_DETACHED) { + if (!test_bit(DEV_ATTACHED, &us->bitflags)) { US_DEBUGP("Request is for removed device\n"); /* For REQUEST_SENSE, it's the data. But * for anything else, it should look like @@ -450,7 +443,7 @@ sizeof(usb_stor_sense_notready)); us->srb->result = CHECK_CONDITION << 1; } - } else { /* atomic_read(&us->device_state) == STATE_DETACHED */ + } else { /* test_bit(DEV_ATTACHED, &us->bitflags) */ /* Handle those devices which need us to fake * their inquiry data */ @@ -557,9 +550,8 @@ unsigned int flags; struct us_unusual_dev *unusual_dev; struct us_data *ss = NULL; -#ifdef CONFIG_USB_STORAGE_SDDR09 int result; -#endif + int new_device = 0; /* these are temporary copies -- we test on these, then put them * in the us-data structure @@ -570,13 +562,13 @@ u8 subclass = 0; u8 protocol = 0; - /* the altsettting on the interface we're probing that matched our + /* the altsetting on the interface we're probing that matched our * usb_match_id table */ struct usb_interface *intf = dev->actconfig->interface; struct usb_interface_descriptor *altsetting = intf[ifnum].altsetting + intf[ifnum].act_altsetting; - US_DEBUGP("act_altsettting is %d\n", intf[ifnum].act_altsetting); + US_DEBUGP("act_altsetting is %d\n", intf[ifnum].act_altsetting); /* clear the temporary strings */ memset(mf, 0, sizeof(mf)); @@ -663,7 +655,7 @@ return NULL; } - /* At this point, we're committed to using the device */ + /* At this point, we've decided to try to use the device */ usb_get_dev(dev); /* clear the GUID and fetch the strings */ @@ -696,7 +688,8 @@ */ ss = us_list; while ((ss != NULL) && - ((ss->pusb_dev) || !GUID_EQUAL(guid, ss->guid))) + (test_bit(DEV_ATTACHED, &ss->bitflags) || + !GUID_EQUAL(guid, ss->guid))) ss = ss->next; if (ss != NULL) { @@ -710,29 +703,23 @@ /* establish the connection to the new device upon reconnect */ ss->ifnum = ifnum; ss->pusb_dev = dev; - atomic_set(&ss->device_state, US_STATE_ATTACHED); + set_bit(DEV_ATTACHED, &ss->bitflags); /* copy over the endpoint data */ - if (ep_in) - ss->ep_in = ep_in->bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK; - if (ep_out) - ss->ep_out = ep_out->bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK; + ss->ep_in = ep_in->bEndpointAddress & + USB_ENDPOINT_NUMBER_MASK; + ss->ep_out = ep_out->bEndpointAddress & + USB_ENDPOINT_NUMBER_MASK; ss->ep_int = ep_int; /* allocate an IRQ callback if one is needed */ - if ((ss->protocol == US_PR_CBI) && usb_stor_allocate_irq(ss)) { - usb_put_dev(dev); - return NULL; - } + if ((ss->protocol == US_PR_CBI) && usb_stor_allocate_irq(ss)) + goto BadDevice; /* allocate the URB we're going to use */ ss->current_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!ss->current_urb) { - usb_put_dev(dev); - return NULL; - } + if (!ss->current_urb) + goto BadDevice; /* Re-Initialize the device if it needs it */ if (unusual_dev && unusual_dev->initFunction) @@ -752,14 +739,12 @@ return NULL; } memset(ss, 0, sizeof(struct us_data)); + new_device = 1; /* allocate the URB we're going to use */ ss->current_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!ss->current_urb) { - kfree(ss); - usb_put_dev(dev); - return NULL; - } + if (!ss->current_urb) + goto BadDevice; /* Initialize the mutexes only when the struct is new */ init_completion(&(ss->notify)); @@ -776,12 +761,10 @@ ss->unusual_dev = unusual_dev; /* copy over the endpoint data */ - if (ep_in) - ss->ep_in = ep_in->bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK; - if (ep_out) - ss->ep_out = ep_out->bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK; + ss->ep_in = ep_in->bEndpointAddress & + USB_ENDPOINT_NUMBER_MASK; + ss->ep_out = ep_out->bEndpointAddress & + USB_ENDPOINT_NUMBER_MASK; ss->ep_int = ep_int; /* establish the connection to the new device */ @@ -904,12 +887,8 @@ #endif default: - ss->transport_name = "Unknown"; - kfree(ss->current_urb); - kfree(ss); - usb_put_dev(dev); - return NULL; - break; + /* ss->transport_name = "Unknown"; */ + goto BadDevice; } US_DEBUGP("Transport: %s\n", ss->transport_name); @@ -959,22 +938,14 @@ #endif default: - ss->protocol_name = "Unknown"; - kfree(ss->current_urb); - kfree(ss); - usb_put_dev(dev); - return NULL; - break; + /* ss->protocol_name = "Unknown"; */ + goto BadDevice; } US_DEBUGP("Protocol: %s\n", ss->protocol_name); /* allocate an IRQ callback if one is needed */ - if ((ss->protocol == US_PR_CBI) && usb_stor_allocate_irq(ss)) { - kfree(ss->current_urb); - kfree(ss); - usb_put_dev(dev); - return NULL; - } + if ((ss->protocol == US_PR_CBI) && usb_stor_allocate_irq(ss)) + goto BadDevice; /* * Since this is a new device, we need to generate a scsi @@ -1001,16 +972,13 @@ /* start up our control thread */ atomic_set(&ss->sm_state, US_STATE_IDLE); - atomic_set(&ss->device_state, US_STATE_ATTACHED); + set_bit(DEV_ATTACHED, &ss->bitflags); ss->pid = kernel_thread(usb_stor_control_thread, ss, CLONE_VM); if (ss->pid < 0) { printk(KERN_WARNING USB_STORAGE "Unable to start control thread\n"); - kfree(ss->current_urb); - kfree(ss); - usb_put_dev(dev); - return NULL; + goto BadDevice; } /* wait for the thread to start */ @@ -1018,7 +986,17 @@ /* now register - our detect function will be called */ ss->htmplt.module = THIS_MODULE; - scsi_register_host(&(ss->htmplt)); + result = scsi_register_host(&(ss->htmplt)); + if (result) { + printk(KERN_WARNING USB_STORAGE + "Unable to register the scsi host\n"); + + /* tell the control thread to exit */ + ss->action = US_ACT_EXIT; + up(&ss->sema); + wait_for_completion(&ss->notify); + goto BadDevice; + } /* lock access to the data structures */ down(&us_list_semaphore); @@ -1038,6 +1016,31 @@ /* return a pointer for the disconnect function */ return ss; + + /* we come here if there are any problems */ + BadDevice: + US_DEBUGP("storage_probe() failed\n"); + down(&ss->irq_urb_sem); + if (ss->irq_urb) { + usb_unlink_urb(ss->irq_urb); + usb_free_urb(ss->irq_urb); + ss->irq_urb = NULL; + } + up(&ss->irq_urb_sem); + if (ss->current_urb) { + usb_unlink_urb(ss->current_urb); + usb_free_urb(ss->current_urb); + ss->current_urb = NULL; + } + + clear_bit(DEV_ATTACHED, &ss->bitflags); + ss->pusb_dev = NULL; + if (new_device) + kfree(ss); + else + up(&ss->dev_semaphore); + usb_put_dev(dev); + return NULL; } /* Handle a disconnect event from the USB core */ @@ -1078,7 +1081,7 @@ /* mark the device as gone */ usb_put_dev(ss->pusb_dev); ss->pusb_dev = NULL; - atomic_set(&ss->sm_state, US_STATE_DETACHED); + clear_bit(DEV_ATTACHED, &ss->bitflags); /* unlock access to the device data structure */ up(&(ss->dev_semaphore)); diff -Nru a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h --- a/drivers/usb/storage/usb.h Tue Jun 18 19:12:02 2002 +++ b/drivers/usb/storage/usb.h Tue Jun 18 19:12:02 2002 @@ -103,9 +103,10 @@ #define US_FL_SCM_MULT_TARG 0x00000020 /* supports multiple targets */ #define US_FL_FIX_INQUIRY 0x00000040 /* INQUIRY response needs fixing */ -/* device attached/detached states */ -#define US_STATE_DETACHED 1 -#define US_STATE_ATTACHED 2 + +/* kernel thread actions */ +#define US_ACT_COMMAND 1 +#define US_ACT_EXIT 5 /* processing state machine states */ #define US_STATE_IDLE 1 @@ -127,10 +128,9 @@ /* The device we're working with * It's important to note: * (o) you must hold dev_semaphore to change pusb_dev - * (o) device_state should change whenever pusb_dev does + * (o) DEV_ATTACHED in bitflags should change whenever pusb_dev does */ struct semaphore dev_semaphore; /* protect pusb_dev */ - atomic_t device_state; /* attached or detached */ struct usb_device *pusb_dev; /* this usb_device */ unsigned int flags; /* from filter initially */ @@ -174,6 +174,7 @@ struct semaphore ip_waitq; /* for CBI interrupts */ unsigned long bitflags; /* single-bit flags: */ #define IP_WANTED 1 /* is an IRQ expected? */ +#define DEV_ATTACHED 2 /* is the dev. attached?*/ /* interrupt communications data */ struct semaphore irq_urb_sem; /* to protect irq_urb */ diff -Nru a/drivers/video/fbcon.c b/drivers/video/fbcon.c --- a/drivers/video/fbcon.c Tue Jun 18 19:12:01 2002 +++ b/drivers/video/fbcon.c Tue Jun 18 19:12:01 2002 @@ -2177,7 +2177,7 @@ if (p->fb_info->fbops->fb_rasterimg) p->fb_info->fbops->fb_rasterimg(p->fb_info, 1); - for (x = 0; x < smp_num_cpus * (LOGO_W + 8) && + for (x = 0; x < num_online_cpus() * (LOGO_W + 8) && x < p->var.xres - (LOGO_W + 8); x += (LOGO_W + 8)) { #if defined(CONFIG_FBCON_CFB16) || defined(CONFIG_FBCON_CFB24) || \ diff -Nru a/fs/bio.c b/fs/bio.c --- a/fs/bio.c Tue Jun 18 19:12:01 2002 +++ b/fs/bio.c Tue Jun 18 19:12:01 2002 @@ -17,6 +17,7 @@ * */ #include +#include #include #include #include @@ -284,8 +285,8 @@ vto = kmap(bbv->bv_page); } else { local_irq_save(flags); - vfrom = kmap_atomic(bv->bv_page, KM_BIO_IRQ); - vto = kmap_atomic(bbv->bv_page, KM_BIO_IRQ); + vfrom = kmap_atomic(bv->bv_page, KM_BIO_SRC_IRQ); + vto = kmap_atomic(bbv->bv_page, KM_BIO_DST_IRQ); } memcpy(vto + bbv->bv_offset, vfrom + bv->bv_offset, bv->bv_len); @@ -293,8 +294,8 @@ kunmap(bbv->bv_page); kunmap(bv->bv_page); } else { - kunmap_atomic(vto, KM_BIO_IRQ); - kunmap_atomic(vfrom, KM_BIO_IRQ); + kunmap_atomic(vto, KM_BIO_DST_IRQ); + kunmap_atomic(vfrom, KM_BIO_SRC_IRQ); local_irq_restore(flags); } } diff -Nru a/fs/buffer.c b/fs/buffer.c --- a/fs/buffer.c Tue Jun 18 19:12:01 2002 +++ b/fs/buffer.c Tue Jun 18 19:12:01 2002 @@ -152,14 +152,16 @@ { if (page_has_buffers(page)) buffer_error(); - set_page_buffers(page, head); page_cache_get(page); + SetPagePrivate(page); + page->private = (unsigned long)head; } static inline void __clear_page_buffers(struct page *page) { - clear_page_buffers(page); + ClearPagePrivate(page); + page->private = 0; page_cache_release(page); } @@ -376,7 +378,7 @@ } /* - * Various filesystems appear to want __get_hash_table to be non-blocking. + * Various filesystems appear to want __find_get_block to be non-blocking. * But it's the page lock which protects the buffers. To get around this, * we get exclusion from try_to_free_buffers with the blockdev mapping's * private_lock. @@ -387,7 +389,7 @@ * private_lock is contended then so is mapping->page_lock). */ struct buffer_head * -__get_hash_table(struct block_device *bdev, sector_t block, int unused) +__find_get_block(struct block_device *bdev, sector_t block, int unused) { struct inode *bd_inode = bdev->bd_inode; struct address_space *bd_mapping = bd_inode->i_mapping; @@ -492,7 +494,7 @@ } /* - * I/O completion handler for block_read_full_page() and brw_page() - pages + * I/O completion handler for block_read_full_page() - pages * which come unlocked at the end of I/O. */ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) @@ -542,14 +544,6 @@ */ if (page_uptodate && !PageError(page)) SetPageUptodate(page); - - /* - * swap page handling is a bit hacky. A standalone completion handler - * for swapout pages would fix that up. swapin can use this function. - */ - if (PageSwapCache(page) && PageWriteback(page)) - end_page_writeback(page); - unlock_page(page); return; @@ -856,8 +850,9 @@ if (mapping->assoc_mapping != buffer_mapping) BUG(); } - buffer_insert_list(&buffer_mapping->private_lock, - bh, &mapping->private_list); + if (list_empty(&bh->b_assoc_buffers)) + buffer_insert_list(&buffer_mapping->private_lock, + bh, &mapping->private_list); } EXPORT_SYMBOL(mark_buffer_dirty_inode); @@ -952,12 +947,12 @@ * the size of each buffer.. Use the bh->b_this_page linked list to * follow the buffers created. Return NULL if unable to create more * buffers. - * The async flag is used to differentiate async IO (paging, swapping) - * from ordinary buffer allocations, and only async requests are allowed - * to sleep waiting for buffer heads. + * + * The retry flag is used to differentiate async IO (paging, swapping) + * which may not fail from ordinary buffer allocations. */ static struct buffer_head * -create_buffers(struct page * page, unsigned long size, int async) +create_buffers(struct page * page, unsigned long size, int retry) { struct buffer_head *bh, *head; long offset; @@ -966,7 +961,7 @@ head = NULL; offset = PAGE_SIZE; while ((offset -= size) >= 0) { - bh = alloc_buffer_head(async); + bh = alloc_buffer_head(); if (!bh) goto no_grow; @@ -1003,7 +998,7 @@ * become available. But we don't want tasks sleeping with * partially complete buffers, so all were released above. */ - if (!async) + if (!retry) return NULL; /* We're _really_ low on memory. Now we just @@ -1096,7 +1091,7 @@ /* * Link the page to the buffers and initialise them. Take the - * lock to be atomic wrt __get_hash_table(), which does not + * lock to be atomic wrt __find_get_block(), which does not * run under the page lock. */ spin_lock(&inode->i_mapping->private_lock); @@ -1169,7 +1164,7 @@ for (;;) { struct buffer_head * bh; - bh = __get_hash_table(bdev, block, size); + bh = __find_get_block(bdev, block, size); if (bh) { touch_buffer(bh); return bh; @@ -1218,7 +1213,7 @@ { if (!buffer_uptodate(bh)) buffer_error(); - if (!test_set_buffer_dirty(bh)) + if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) __set_page_dirty_nobuffers(bh->b_page); } @@ -1243,10 +1238,17 @@ * bforget() is like brelse(), except it discards any * potentially dirty data. */ -void __bforget(struct buffer_head * buf) +void __bforget(struct buffer_head *bh) { - clear_buffer_dirty(buf); - __brelse(buf); + clear_buffer_dirty(bh); + if (!list_empty(&bh->b_assoc_buffers)) { + struct address_space *buffer_mapping = bh->b_page->mapping; + + spin_lock(&buffer_mapping->private_lock); + list_del_init(&bh->b_assoc_buffers); + spin_unlock(&buffer_mapping->private_lock); + } + __brelse(bh); } /** @@ -1359,11 +1361,11 @@ { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; + int ret = 1; - if (!PageLocked(page)) - BUG(); + BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) - return 1; + goto out; head = page_buffers(page); bh = head; @@ -1385,12 +1387,10 @@ * The get_block cached value has been unconditionally invalidated, * so real IO is not possible anymore. */ - if (offset == 0) { - if (!try_to_release_page(page, 0)) - return 0; - } - - return 1; + if (offset == 0) + ret = try_to_release_page(page, 0); +out: + return ret; } EXPORT_SYMBOL(block_invalidatepage); @@ -1449,7 +1449,7 @@ { struct buffer_head *old_bh; - old_bh = __get_hash_table(bdev, block, 0); + old_bh = __find_get_block(bdev, block, 0); if (old_bh) { #if 0 /* This happens. Later. */ if (buffer_dirty(old_bh)) @@ -2266,68 +2266,6 @@ } /* - * Start I/O on a page. - * This function expects the page to be locked and may return - * before I/O is complete. You then have to check page->locked - * and page->uptodate. - * - * FIXME: we need a swapper_inode->get_block function to remove - * some of the bmap kludges and interface ugliness here. - * - * NOTE: unlike file pages, swap pages are locked while under writeout. - * This is to throttle processes which reuse their swapcache pages while - * they are under writeout, and to ensure that there is no I/O going on - * when the page has been successfully locked. Functions such as - * free_swap_and_cache() need to guarantee that there is no I/O in progress - * because they will be freeing up swap blocks, which may then be reused. - * - * Swap pages are also marked PageWriteback when they are being written - * so that memory allocators will throttle on them. - */ -int brw_page(int rw, struct page *page, - struct block_device *bdev, sector_t b[], int size) -{ - struct buffer_head *head, *bh; - - BUG_ON(!PageLocked(page)); - - if (!page_has_buffers(page)) - create_empty_buffers(page, size, 0); - head = bh = page_buffers(page); - - /* Stage 1: lock all the buffers */ - do { - lock_buffer(bh); - bh->b_blocknr = *(b++); - bh->b_bdev = bdev; - set_buffer_mapped(bh); - if (rw == WRITE) { - set_buffer_uptodate(bh); - clear_buffer_dirty(bh); - } - /* - * Swap pages are locked during writeout, so use - * buffer_async_read in strange ways. - */ - mark_buffer_async_read(bh); - bh = bh->b_this_page; - } while (bh != head); - - if (rw == WRITE) { - BUG_ON(PageWriteback(page)); - SetPageWriteback(page); - } - - /* Stage 2: start the IO */ - do { - struct buffer_head *next = bh->b_this_page; - submit_bh(rw, bh); - bh = next; - } while (bh != head); - return 0; -} - -/* * Sanity checks for try_to_free_buffers. */ static void check_ttfb_buffer(struct page *page, struct buffer_head *bh) @@ -2456,7 +2394,7 @@ static kmem_cache_t *bh_cachep; static mempool_t *bh_mempool; -struct buffer_head *alloc_buffer_head(int async) +struct buffer_head *alloc_buffer_head(void) { return mempool_alloc(bh_mempool, GFP_NOFS); } diff -Nru a/fs/coda/dir.c b/fs/coda/dir.c --- a/fs/coda/dir.c Tue Jun 18 19:12:01 2002 +++ b/fs/coda/dir.c Tue Jun 18 19:12:01 2002 @@ -147,20 +147,25 @@ int coda_permission(struct inode *inode, int mask) { - int error; + int error = 0; if (!mask) return 0; + lock_kernel(); + coda_vfs_stat.permission++; if (coda_cache_check(inode, mask)) - return 0; + goto out; error = venus_access(inode->i_sb, coda_i2f(inode), mask); if (!error) coda_cache_enter(inode, mask); + + out: + unlock_kernel(); return error; } diff -Nru a/fs/ext3/balloc.c b/fs/ext3/balloc.c --- a/fs/ext3/balloc.c Tue Jun 18 19:12:01 2002 +++ b/fs/ext3/balloc.c Tue Jun 18 19:12:01 2002 @@ -352,7 +352,7 @@ #ifdef CONFIG_JBD_DEBUG { struct buffer_head *debug_bh; - debug_bh = sb_get_hash_table(sb, block + i); + debug_bh = sb_find_get_block(sb, block + i); if (debug_bh) { BUFFER_TRACE(debug_bh, "Deleted!"); if (!bh2jh(bitmap_bh)->b_committed_data) @@ -701,7 +701,7 @@ struct buffer_head *debug_bh; /* Record bitmap buffer state in the newly allocated block */ - debug_bh = sb_get_hash_table(sb, tmp); + debug_bh = sb_find_get_block(sb, tmp); if (debug_bh) { BUFFER_TRACE(debug_bh, "state when allocated"); BUFFER_TRACE2(debug_bh, bh, "bitmap state"); diff -Nru a/fs/ext3/inode.c b/fs/ext3/inode.c --- a/fs/ext3/inode.c Tue Jun 18 19:12:02 2002 +++ b/fs/ext3/inode.c Tue Jun 18 19:12:02 2002 @@ -1650,7 +1650,7 @@ struct buffer_head *bh; *p = 0; - bh = sb_get_hash_table(inode->i_sb, nr); + bh = sb_find_get_block(inode->i_sb, nr); ext3_forget(handle, 0, inode, bh, nr); } } diff -Nru a/fs/file_table.c b/fs/file_table.c --- a/fs/file_table.c Tue Jun 18 19:12:01 2002 +++ b/fs/file_table.c Tue Jun 18 19:12:01 2002 @@ -100,31 +100,38 @@ void fput(struct file * file) { + if (atomic_dec_and_test(&file->f_count)) + __fput(file); +} + +/* __fput is called from task context when aio completion releases the last + * last use of a struct file *. Do not use otherwise. + */ +void __fput(struct file * file) +{ struct dentry * dentry = file->f_dentry; struct vfsmount * mnt = file->f_vfsmnt; struct inode * inode = dentry->d_inode; - if (atomic_dec_and_test(&file->f_count)) { - locks_remove_flock(file); + locks_remove_flock(file); - if (file->f_iobuf) - free_kiovec(1, &file->f_iobuf); + if (file->f_iobuf) + free_kiovec(1, &file->f_iobuf); - if (file->f_op && file->f_op->release) - file->f_op->release(inode, file); - fops_put(file->f_op); - if (file->f_mode & FMODE_WRITE) - put_write_access(inode); - file_list_lock(); - file->f_dentry = NULL; - file->f_vfsmnt = NULL; - list_del(&file->f_list); - list_add(&file->f_list, &free_list); - files_stat.nr_free_files++; - file_list_unlock(); - dput(dentry); - mntput(mnt); - } + if (file->f_op && file->f_op->release) + file->f_op->release(inode, file); + fops_put(file->f_op); + if (file->f_mode & FMODE_WRITE) + put_write_access(inode); + file_list_lock(); + file->f_dentry = NULL; + file->f_vfsmnt = NULL; + list_del(&file->f_list); + list_add(&file->f_list, &free_list); + files_stat.nr_free_files++; + file_list_unlock(); + dput(dentry); + mntput(mnt); } struct file * fget(unsigned int fd) diff -Nru a/fs/inode.c b/fs/inode.c --- a/fs/inode.c Tue Jun 18 19:12:03 2002 +++ b/fs/inode.c Tue Jun 18 19:12:03 2002 @@ -913,16 +913,6 @@ return res; } -static inline void do_atime_update(struct inode *inode) -{ - unsigned long time = CURRENT_TIME; - if (inode->i_atime != time) { - inode->i_atime = time; - mark_inode_dirty_sync(inode); - } -} - - /** * update_atime - update the access time * @inode: inode accessed @@ -932,15 +922,19 @@ * as well as the "noatime" flag and inode specific "noatime" markers. */ -void update_atime (struct inode *inode) +void update_atime(struct inode *inode) { if (inode->i_atime == CURRENT_TIME) return; - if ( IS_NOATIME (inode) ) return; - if ( IS_NODIRATIME (inode) && S_ISDIR (inode->i_mode) ) return; - if ( IS_RDONLY (inode) ) return; - do_atime_update(inode); -} /* End Function update_atime */ + if (IS_NOATIME(inode)) + return; + if (IS_NODIRATIME(inode) && S_ISDIR(inode->i_mode)) + return; + if (IS_RDONLY(inode)) + return; + inode->i_atime = CURRENT_TIME; + mark_inode_dirty_sync(inode); +} int inode_needs_sync(struct inode *inode) { diff -Nru a/fs/intermezzo/dir.c b/fs/intermezzo/dir.c --- a/fs/intermezzo/dir.c Tue Jun 18 19:12:02 2002 +++ b/fs/intermezzo/dir.c Tue Jun 18 19:12:02 2002 @@ -785,13 +785,15 @@ { unsigned short mode = inode->i_mode; struct presto_cache *cache; - int rc; + int rc = 0; + lock_kernel(); ENTRY; + if ( presto_can_ilookup() && !(mask & S_IWOTH)) { CDEBUG(D_CACHE, "ilookup on %ld OK\n", inode->i_ino); - EXIT; - return 0; + EXIT; + goto out; } cache = presto_get_cache(inode); @@ -803,25 +805,22 @@ if ( S_ISREG(mode) && fiops && fiops->permission ) { EXIT; - return fiops->permission(inode, mask); + rc = fiops->permission(inode, mask); + goto out; } if ( S_ISDIR(mode) && diops && diops->permission ) { EXIT; - return diops->permission(inode, mask); + rc = diops->permission(inode, mask); + goto out; } } - /* The cache filesystem doesn't have its own permission function, - * but we don't want to duplicate the VFS code here. In order - * to avoid looping from permission calling this function again, - * we temporarily override the permission operation while we call - * the VFS permission function. - */ - inode->i_op->permission = NULL; - rc = permission(inode, mask); - inode->i_op->permission = &presto_permission; + rc = vfs_permission(inode, mask); EXIT; + + out: + unlock_kernel(); return rc; } diff -Nru a/fs/intermezzo/vfs.c b/fs/intermezzo/vfs.c --- a/fs/intermezzo/vfs.c Tue Jun 18 19:12:02 2002 +++ b/fs/intermezzo/vfs.c Tue Jun 18 19:12:02 2002 @@ -407,11 +407,11 @@ mode &= S_IALLUGO; mode |= S_IFREG; - down(&dir->d_inode->i_zombie); + down(&dir->d_inode->i_sem); error = presto_reserve_space(fset->fset_cache, PRESTO_REQHIGH); if (error) { EXIT; - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return error; } @@ -495,7 +495,7 @@ presto_trans_commit(fset, handle); exit_pre_lock: presto_release_space(fset->fset_cache, PRESTO_REQHIGH); - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return error; } @@ -583,11 +583,11 @@ struct presto_version new_link_ver; void *handle; - down(&dir->d_inode->i_zombie); + down(&dir->d_inode->i_sem); error = presto_reserve_space(fset->fset_cache, PRESTO_REQHIGH); if (error) { EXIT; - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return error; } error = -ENOENT; @@ -662,7 +662,7 @@ presto_trans_commit(fset, handle); exit_lock: presto_release_space(fset->fset_cache, PRESTO_REQHIGH); - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return error; } @@ -728,11 +728,11 @@ int do_kml = 0, do_expect =0; int linkno = 0; ENTRY; - down(&dir->d_inode->i_zombie); + down(&dir->d_inode->i_sem); error = may_delete(dir->d_inode, dentry, 0); if (error) { EXIT; - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return error; } @@ -740,14 +740,14 @@ iops = filter_c2cdiops(fset->fset_cache->cache_filter); if (!iops->unlink) { EXIT; - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return error; } error = presto_reserve_space(fset->fset_cache, PRESTO_REQLOW); if (error) { EXIT; - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return error; } @@ -757,7 +757,7 @@ if ( IS_ERR(handle) ) { presto_release_space(fset->fset_cache, PRESTO_REQLOW); printk("ERROR: presto_do_unlink: no space for transaction. Tell Peter.\n"); - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return -ENOSPC; } DQUOT_INIT(dir->d_inode); @@ -792,7 +792,7 @@ goto exit; } - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); if (error) { EXIT; goto exit; @@ -882,12 +882,12 @@ void *handle; ENTRY; - down(&dir->d_inode->i_zombie); + down(&dir->d_inode->i_sem); /* record + max path len + space to free */ error = presto_reserve_space(fset->fset_cache, PRESTO_REQHIGH + 4096); if (error) { EXIT; - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return error; } @@ -965,7 +965,7 @@ presto_trans_commit(fset, handle); exit_lock: presto_release_space(fset->fset_cache, PRESTO_REQHIGH + 4096); - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return error; } @@ -1043,12 +1043,12 @@ void *handle; ENTRY; - down(&dir->d_inode->i_zombie); + down(&dir->d_inode->i_sem); /* one journal record + directory block + room for removals*/ error = presto_reserve_space(fset->fset_cache, PRESTO_REQHIGH + 4096); if (error) { EXIT; - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return error; } @@ -1129,7 +1129,7 @@ presto_trans_commit(fset, handle); exit_lock: presto_release_space(fset->fset_cache, PRESTO_REQHIGH + 4096); - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return error; } @@ -1241,7 +1241,7 @@ do_kml = presto_do_kml(info, dir->d_inode); do_expect = presto_do_expect(info, dir->d_inode); - double_down(&dir->d_inode->i_zombie, &dentry->d_inode->i_zombie); + double_down(&dir->d_inode->i_sem, &dentry->d_inode->i_sem); d_unhash(dentry); if (IS_DEADDIR(dir->d_inode)) error = -ENOENT; @@ -1257,7 +1257,7 @@ ATTR_CTIME | ATTR_MTIME); } } - double_up(&dir->d_inode->i_zombie, &dentry->d_inode->i_zombie); + double_up(&dir->d_inode->i_sem, &dentry->d_inode->i_sem); if (!error) d_delete(dentry); dput(dentry); @@ -1343,12 +1343,12 @@ ENTRY; - down(&dir->d_inode->i_zombie); + down(&dir->d_inode->i_sem); /* one KML entry */ error = presto_reserve_space(fset->fset_cache, PRESTO_REQHIGH); if (error) { EXIT; - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return error; } @@ -1429,7 +1429,7 @@ unlock_kernel(); exit_lock: presto_release_space(fset->fset_cache, PRESTO_REQHIGH); - up(&dir->d_inode->i_zombie); + up(&dir->d_inode->i_sem); return error; } @@ -1624,13 +1624,13 @@ goto out_unlock; target = new_dentry->d_inode; if (target) { /* Hastur! Hastur! Hastur! */ - triple_down(&old_dir->i_zombie, - &new_dir->i_zombie, - &target->i_zombie); + triple_down(&old_dir->i_sem, + &new_dir->i_sem, + &target->i_sem); d_unhash(new_dentry); } else - double_down(&old_dir->i_zombie, - &new_dir->i_zombie); + double_down(&old_dir->i_sem, + &new_dir->i_sem); if (IS_DEADDIR(old_dir)||IS_DEADDIR(new_dir)) error = -ENOENT; else if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) @@ -1641,15 +1641,15 @@ if (target) { if (!error) target->i_flags |= S_DEAD; - triple_up(&old_dir->i_zombie, - &new_dir->i_zombie, - &target->i_zombie); + triple_up(&old_dir->i_sem, + &new_dir->i_sem, + &target->i_sem); if (d_unhashed(new_dentry)) d_rehash(new_dentry); dput(new_dentry); } else - double_up(&old_dir->i_zombie, - &new_dir->i_zombie); + double_up(&old_dir->i_sem, + &new_dir->i_sem); if (!error) d_move(old_dentry,new_dentry); @@ -1689,13 +1689,13 @@ DQUOT_INIT(old_dir); DQUOT_INIT(new_dir); - double_down(&old_dir->i_zombie, &new_dir->i_zombie); + double_down(&old_dir->i_sem, &new_dir->i_sem); if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) error = -EBUSY; else error = do_rename(fset, old_parent, old_dentry, new_parent, new_dentry, info); - double_up(&old_dir->i_zombie, &new_dir->i_zombie); + double_up(&old_dir->i_sem, &new_dir->i_sem); if (error) return error; /* The following d_move() should become unconditional */ diff -Nru a/fs/jbd/commit.c b/fs/jbd/commit.c --- a/fs/jbd/commit.c Tue Jun 18 19:12:02 2002 +++ b/fs/jbd/commit.c Tue Jun 18 19:12:02 2002 @@ -659,6 +659,20 @@ * there's no point in keeping a checkpoint record for * it. */ bh = jh2bh(jh); + + /* A buffer which has been freed while still being + * journaled by a previous transaction may end up still + * being dirty here, but we want to avoid writing back + * that buffer in the future now that the last use has + * been committed. That's not only a performance gain, + * it also stops aliasing problems if the buffer is left + * behind for writeback and gets reallocated for another + * use in a different page. */ + if (buffer_freed(bh)) { + clear_buffer_freed(bh); + clear_buffer_jbddirty(bh); + } + if (buffer_jdirty(bh)) { JBUFFER_TRACE(jh, "add to new checkpointing trans"); __journal_insert_checkpoint(jh, commit_transaction); diff -Nru a/fs/jbd/journal.c b/fs/jbd/journal.c --- a/fs/jbd/journal.c Tue Jun 18 19:12:01 2002 +++ b/fs/jbd/journal.c Tue Jun 18 19:12:01 2002 @@ -463,7 +463,7 @@ * Right, time to make up the new buffer_head. */ do { - new_bh = alloc_buffer_head(0); + new_bh = alloc_buffer_head(); if (!new_bh) { printk (KERN_NOTICE "%s: ENOMEM at alloc_buffer_head, " "trying again.\n", __FUNCTION__); diff -Nru a/fs/jbd/revoke.c b/fs/jbd/revoke.c --- a/fs/jbd/revoke.c Tue Jun 18 19:12:01 2002 +++ b/fs/jbd/revoke.c Tue Jun 18 19:12:01 2002 @@ -293,7 +293,7 @@ bh = bh_in; if (!bh) { - bh = __get_hash_table(bdev, blocknr, journal->j_blocksize); + bh = __find_get_block(bdev, blocknr, journal->j_blocksize); if (bh) BUFFER_TRACE(bh, "found on hash"); } @@ -303,7 +303,7 @@ /* If there is a different buffer_head lying around in * memory anywhere... */ - bh2 = __get_hash_table(bdev, blocknr, journal->j_blocksize); + bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize); if (bh2) { /* ... and it has RevokeValid status... */ if ((bh2 != bh) && @@ -407,7 +407,7 @@ * state machine will get very upset later on. */ if (need_cancel) { struct buffer_head *bh2; - bh2 = __get_hash_table(bh->b_bdev, bh->b_blocknr, bh->b_size); + bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size); if (bh2) { if (bh2 != bh) clear_bit(BH_Revoked, &bh2->b_state); diff -Nru a/fs/jbd/transaction.c b/fs/jbd/transaction.c --- a/fs/jbd/transaction.c Tue Jun 18 19:12:02 2002 +++ b/fs/jbd/transaction.c Tue Jun 18 19:12:02 2002 @@ -1601,8 +1601,7 @@ * * Returns non-zero iff we were able to free the journal_head. */ -static int __journal_try_to_free_buffer(struct buffer_head *bh, - int *locked_or_dirty) +static inline int __journal_try_to_free_buffer(struct buffer_head *bh) { struct journal_head *jh; @@ -1610,12 +1609,7 @@ jh = bh2jh(bh); - if (buffer_locked(bh) || buffer_dirty(bh)) { - *locked_or_dirty = 1; - goto out; - } - - if (!buffer_uptodate(bh)) /* AKPM: why? */ + if (buffer_locked(bh) || buffer_dirty(bh)) goto out; if (jh->b_next_transaction != 0) @@ -1630,8 +1624,7 @@ __journal_remove_journal_head(bh); __brelse(bh); } - } - else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) { + } else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) { /* written-back checkpointed metadata buffer */ if (jh->b_jlist == BJ_None) { JBUFFER_TRACE(jh, "remove from checkpoint list"); @@ -1647,10 +1640,8 @@ } /* - * journal_try_to_free_buffers(). For all the buffers on this page, - * if they are fully written out ordered data, move them onto BUF_CLEAN - * so try_to_free_buffers() can reap them. Called with lru_list_lock - * not held. Does its own locking. + * journal_try_to_free_buffers(). Try to remove all this page's buffers + * from the journal. * * This complicates JBD locking somewhat. We aren't protected by the * BKL here. We wish to remove the buffer from its committing or @@ -1669,50 +1660,28 @@ * journal_try_to_free_buffer() is changing its state. But that * cannot happen because we never reallocate freed data as metadata * while the data is part of a transaction. Yes? - * - * This function returns non-zero if we wish try_to_free_buffers() - * to be called. We do this is the page is releasable by try_to_free_buffers(). - * We also do it if the page has locked or dirty buffers and the caller wants - * us to perform sync or async writeout. */ int journal_try_to_free_buffers(journal_t *journal, - struct page *page, int gfp_mask) + struct page *page, int unused_gfp_mask) { + struct buffer_head *head; struct buffer_head *bh; - struct buffer_head *tmp; - int locked_or_dirty = 0; - int call_ttfb = 1; - int ret; + int ret = 0; J_ASSERT(PageLocked(page)); - bh = page_buffers(page); - tmp = bh; + head = page_buffers(page); + bh = head; spin_lock(&journal_datalist_lock); do { - struct buffer_head *p = tmp; - - tmp = tmp->b_this_page; - if (buffer_jbd(p)) - if (!__journal_try_to_free_buffer(p, &locked_or_dirty)) - call_ttfb = 0; - } while (tmp != bh); + if (buffer_jbd(bh) && !__journal_try_to_free_buffer(bh)) { + spin_unlock(&journal_datalist_lock); + goto busy; + } + } while ((bh = bh->b_this_page) != head); spin_unlock(&journal_datalist_lock); - - if (!(gfp_mask & (__GFP_IO|__GFP_WAIT))) - goto out; - if (!locked_or_dirty) - goto out; - /* - * The VM wants us to do writeout, or to block on IO, or both. - * So we allow try_to_free_buffers to be called even if the page - * still has journalled buffers. - */ - call_ttfb = 1; -out: - ret = 0; - if (call_ttfb) - ret = try_to_free_buffers(page); + ret = try_to_free_buffers(page); +busy: return ret; } @@ -1861,6 +1830,7 @@ * running transaction if that is set, but nothing * else. */ JBUFFER_TRACE(jh, "on committing transaction"); + set_buffer_freed(bh); if (jh->b_next_transaction) { J_ASSERT(jh->b_next_transaction == journal->j_running_transaction); diff -Nru a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c --- a/fs/jfs/jfs_logmgr.c Tue Jun 18 19:12:02 2002 +++ b/fs/jfs/jfs_logmgr.c Tue Jun 18 19:12:02 2002 @@ -65,6 +65,7 @@ #include #include #include /* for sync_blockdev() */ +#include #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_metapage.h" diff -Nru a/fs/locks.c b/fs/locks.c --- a/fs/locks.c Tue Jun 18 19:12:02 2002 +++ b/fs/locks.c Tue Jun 18 19:12:02 2002 @@ -119,6 +119,7 @@ #include #include #include +#include #include #include @@ -1020,6 +1021,46 @@ return error; } +/* We already had a lease on this file; just change its type */ +static int lease_modify(struct file_lock **before, int arg) +{ + struct file_lock *fl = *before; + int error = assign_type(fl, arg); + + if (error) + return error; + locks_wake_up_blocks(fl); + if (arg == F_UNLCK) { + struct file *filp = fl->fl_file; + + filp->f_owner.pid = 0; + filp->f_owner.uid = 0; + filp->f_owner.euid = 0; + filp->f_owner.signum = 0; + locks_delete_lock(before); + } + return 0; +} + +static void time_out_leases(struct inode *inode) +{ + struct file_lock **before; + struct file_lock *fl; + + before = &inode->i_flock; + while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) { + if ((fl->fl_break_time == 0) + || time_before(jiffies, fl->fl_break_time)) { + before = &fl->fl_next; + continue; + } + printk(KERN_INFO "lease broken - owner pid = %d\n", fl->fl_pid); + lease_modify(before, fl->fl_type & ~F_INPROGRESS); + if (fl == *before) /* lease_modify may have freed fl */ + before = &fl->fl_next; + } +} + /** * __get_lease - revoke all outstanding leases on file * @inode: the inode of the file to return @@ -1036,34 +1077,30 @@ struct file_lock *new_fl, *flock; struct file_lock *fl; int alloc_err; + unsigned long break_time; + int i_have_this_lease = 0; - alloc_err = lease_alloc(NULL, 0, &new_fl); + alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK, + &new_fl); lock_kernel(); + + time_out_leases(inode); + flock = inode->i_flock; - if (flock->fl_type & F_INPROGRESS) { - if ((mode & O_NONBLOCK) - || (flock->fl_owner == current->files)) { - error = -EWOULDBLOCK; - goto out; - } - if (alloc_err != 0) { - error = alloc_err; - goto out; - } - do { - error = locks_block_on(flock, new_fl); - if (error != 0) - goto out; - flock = inode->i_flock; - if (!(flock && IS_LEASE(flock))) - goto out; - } while (flock->fl_type & F_INPROGRESS); - } + if ((flock == NULL) || !IS_LEASE(flock)) + goto out; + + for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) + if (fl->fl_owner == current->files) + i_have_this_lease = 1; if (mode & FMODE_WRITE) { /* If we want write access, we have to revoke any lease. */ future = F_UNLCK | F_INPROGRESS; + } else if (flock->fl_type & F_INPROGRESS) { + /* If the lease is already being broken, we just leave it */ + future = flock->fl_type; } else if (flock->fl_type & F_WRLCK) { /* Downgrade the exclusive lease to a read-only lease. */ future = F_RDLCK | F_INPROGRESS; @@ -1072,38 +1109,48 @@ goto out; } - if (alloc_err && (flock->fl_owner != current->files)) { + if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) { error = alloc_err; goto out; } - fl = flock; - do { - fl->fl_type = future; - fl = fl->fl_next; - } while (fl != NULL && IS_LEASE(fl)); + break_time = 0; + if (lease_break_time > 0) { + break_time = jiffies + lease_break_time * HZ; + if (break_time == 0) + break_time++; /* so that 0 means no break time */ + } - kill_fasync(&flock->fl_fasync, SIGIO, POLL_MSG); + for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { + if (fl->fl_type != future) { + fl->fl_type = future; + fl->fl_break_time = break_time; + kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); + } + } - if ((mode & O_NONBLOCK) || (flock->fl_owner == current->files)) { + if (i_have_this_lease || (mode & O_NONBLOCK)) { error = -EWOULDBLOCK; goto out; } - if (lease_break_time > 0) - error = lease_break_time * HZ; - else - error = 0; restart: - error = locks_block_on_timeout(flock, new_fl, error); - if (error == 0) { - /* We timed out. Unilaterally break the lease. */ - locks_delete_lock(&inode->i_flock); - printk(KERN_WARNING "lease timed out\n"); - } else if (error > 0) { - flock = inode->i_flock; - if (flock && IS_LEASE(flock)) - goto restart; + break_time = flock->fl_break_time; + if (break_time != 0) { + break_time -= jiffies; + if (break_time == 0) + break_time++; + } + error = locks_block_on_timeout(flock, new_fl, break_time); + if (error >= 0) { + if (error == 0) + time_out_leases(inode); + /* Wait for the next lease that has not been broken yet */ + for (flock = inode->i_flock; flock && IS_LEASE(flock); + flock = flock->fl_next) { + if (flock->fl_type & F_INPROGRESS) + goto restart; + } error = 0; } @@ -1135,45 +1182,40 @@ * @filp: the file * * The value returned by this function will be one of + * (if no lease break is pending): * - * %F_RDLCK to indicate a read-only (type II) lease is held. + * %F_RDLCK to indicate a shared lease is held. * * %F_WRLCK to indicate an exclusive lease is held. * - * XXX: sfr & i disagree over whether F_INPROGRESS + * %F_UNLCK to indicate no lease is held. + * + * (if a lease break is pending): + * + * %F_RDLCK to indicate an exclusive lease needs to be + * changed to a shared lease (or removed). + * + * %F_UNLCK to indicate the lease needs to be removed. + * + * XXX: sfr & willy disagree over whether F_INPROGRESS * should be returned to userspace. */ int fcntl_getlease(struct file *filp) { struct file_lock *fl; - - fl = filp->f_dentry->d_inode->i_flock; - if ((fl == NULL) || !IS_LEASE(fl)) - return F_UNLCK; - return fl->fl_type & ~F_INPROGRESS; -} + int type = F_UNLCK; -/* We already had a lease on this file; just change its type */ -static int lease_modify(struct file_lock **before, int arg, int fd, struct file *filp) -{ - struct file_lock *fl = *before; - int error = assign_type(fl, arg); - if (error < 0) - goto out; - - locks_wake_up_blocks(fl); - - if (arg == F_UNLCK) { - filp->f_owner.pid = 0; - filp->f_owner.uid = 0; - filp->f_owner.euid = 0; - filp->f_owner.signum = 0; - locks_delete_lock(before); - fasync_helper(fd, filp, 0, &fl->fl_fasync); + lock_kernel(); + time_out_leases(filp->f_dentry->d_inode); + for (fl = filp->f_dentry->d_inode->i_flock; fl && IS_LEASE(fl); + fl = fl->fl_next) { + if (fl->fl_file == filp) { + type = fl->fl_type & ~F_INPROGRESS; + break; + } } - -out: - return error; + unlock_kernel(); + return type; } /** @@ -1201,50 +1243,59 @@ if (!S_ISREG(inode->i_mode)) return -EINVAL; + lock_kernel(); + + time_out_leases(inode); + /* * FIXME: What about F_RDLCK and files open for writing? */ + error = -EAGAIN; if ((arg == F_WRLCK) && ((atomic_read(&dentry->d_count) > 1) || (atomic_read(&inode->i_count) > 1))) - return -EAGAIN; - - before = &inode->i_flock; - - lock_kernel(); + goto out_unlock; - while ((fl = *before) != NULL) { - if (!IS_LEASE(fl)) - break; + /* + * At this point, we know that if there is an exclusive + * lease on this file, then we hold it on this filp + * (otherwise our open of this file would have blocked). + * And if we are trying to acquire an exclusive lease, + * then the file is not open by anyone (including us) + * except for this filp. + */ + for (before = &inode->i_flock; + ((fl = *before) != NULL) && IS_LEASE(fl); + before = &fl->fl_next) { if (fl->fl_file == filp) my_before = before; - else if (fl->fl_type & F_WRLCK) + else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) + /* + * Someone is in the process of opening this + * file for writing so we may not take an + * exclusive lease on it. + */ wrlease_count++; else rdlease_count++; - before = &fl->fl_next; } if ((arg == F_RDLCK && (wrlease_count > 0)) || - (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) { - error = -EAGAIN; + (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) goto out_unlock; - } if (my_before != NULL) { - error = lease_modify(my_before, arg, fd, filp); + error = lease_modify(my_before, arg); goto out_unlock; } - if (arg == F_UNLCK) { - error = 0; + error = 0; + if (arg == F_UNLCK) goto out_unlock; - } - if (!leases_enable) { - error = -EINVAL; + error = -EINVAL; + if (!leases_enable) goto out_unlock; - } error = lease_alloc(filp, arg, &fl); if (error) @@ -1616,9 +1667,15 @@ before = &inode->i_flock; while ((fl = *before) != NULL) { - if ((IS_FLOCK(fl) || IS_LEASE(fl)) && (fl->fl_file == filp)) { - locks_delete_lock(before); - continue; + if (fl->fl_file == filp) { + if (IS_FLOCK(fl)) { + locks_delete_lock(before); + continue; + } + if (IS_LEASE(fl)) { + lease_modify(before, F_UNLCK); + continue; + } } before = &fl->fl_next; } @@ -1673,7 +1730,13 @@ out += sprintf(out, "FLOCK ADVISORY "); } } else if (IS_LEASE(fl)) { - out += sprintf(out, "LEASE MANDATORY "); + out += sprintf(out, "LEASE "); + if (fl->fl_type & F_INPROGRESS) + out += sprintf(out, "BREAKING "); + else if (fl->fl_file) + out += sprintf(out, "ACTIVE "); + else + out += sprintf(out, "BREAKER "); } else { out += sprintf(out, "UNKNOWN UNKNOWN "); } @@ -1684,7 +1747,9 @@ : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); } else { out += sprintf(out, "%s ", - (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); + (fl->fl_type & F_INPROGRESS) + ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ " + : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); } out += sprintf(out, "%d %s:%ld ", fl->fl_pid, diff -Nru a/fs/namei.c b/fs/namei.c --- a/fs/namei.c Tue Jun 18 19:12:02 2002 +++ b/fs/namei.c Tue Jun 18 19:12:02 2002 @@ -204,13 +204,8 @@ int permission(struct inode * inode,int mask) { - if (inode->i_op && inode->i_op->permission) { - int retval; - lock_kernel(); - retval = inode->i_op->permission(inode, mask); - unlock_kernel(); - return retval; - } + if (inode->i_op && inode->i_op->permission) + return inode->i_op->permission(inode, mask); return vfs_permission(inode, mask); } @@ -833,22 +828,6 @@ } nd->mnt = mntget(current->fs->rootmnt); nd->dentry = dget(current->fs->root); - read_unlock(¤t->fs->lock); - return 1; -} - -/* SMP-safe */ -int path_init(const char *name, unsigned int flags, struct nameidata *nd) -{ - nd->last_type = LAST_ROOT; /* if there are only slashes... */ - nd->old_mnt = NULL; - nd->old_dentry = NULL; - nd->flags = flags; - if (*name=='/') - return walk_init_root(name,nd); - read_lock(¤t->fs->lock); - nd->mnt = mntget(current->fs->pwdmnt); - nd->dentry = dget(current->fs->pwd); read_unlock(¤t->fs->lock); return 1; } diff -Nru a/fs/nfs/dir.c b/fs/nfs/dir.c --- a/fs/nfs/dir.c Tue Jun 18 19:12:02 2002 +++ b/fs/nfs/dir.c Tue Jun 18 19:12:02 2002 @@ -1123,12 +1123,16 @@ && error != -EACCES) goto out; + lock_kernel(); + error = NFS_PROTO(inode)->access(inode, mask, 0); if (error == -EACCES && NFS_CLIENT(inode)->cl_droppriv && current->uid != 0 && current->gid != 0 && (current->fsuid != current->uid || current->fsgid != current->gid)) error = NFS_PROTO(inode)->access(inode, mask, 1); + + unlock_kernel(); out: return error; diff -Nru a/fs/ntfs/aops.c b/fs/ntfs/aops.c --- a/fs/ntfs/aops.c Tue Jun 18 19:12:03 2002 +++ b/fs/ntfs/aops.c Tue Jun 18 19:12:03 2002 @@ -61,10 +61,10 @@ if (file_ofs < ni->initialized_size) ofs = ni->initialized_size - file_ofs; - addr = kmap_atomic(page, KM_BIO_IRQ); + addr = kmap_atomic(page, KM_BIO_SRC_IRQ); memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); flush_dcache_page(page); - kunmap_atomic(addr, KM_BIO_IRQ); + kunmap_atomic(addr, KM_BIO_SRC_IRQ); } } else SetPageError(page); @@ -363,10 +363,10 @@ if (file_ofs < vol->mftbmp_initialized_size) ofs = vol->mftbmp_initialized_size - file_ofs; - addr = kmap_atomic(page, KM_BIO_IRQ); + addr = kmap_atomic(page, KM_BIO_SRC_IRQ); memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); flush_dcache_page(page); - kunmap_atomic(addr, KM_BIO_IRQ); + kunmap_atomic(addr, KM_BIO_SRC_IRQ); } } else SetPageError(page); @@ -559,10 +559,10 @@ if (file_ofs < ni->initialized_size) ofs = ni->initialized_size - file_ofs; - addr = kmap_atomic(page, KM_BIO_IRQ); + addr = kmap_atomic(page, KM_BIO_SRC_IRQ); memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); flush_dcache_page(page); - kunmap_atomic(addr, KM_BIO_IRQ); + kunmap_atomic(addr, KM_BIO_SRC_IRQ); } } else SetPageError(page); @@ -593,7 +593,7 @@ rec_size = ni->_IDM(index_block_size); recs = PAGE_CACHE_SIZE / rec_size; - addr = kmap_atomic(page, KM_BIO_IRQ); + addr = kmap_atomic(page, KM_BIO_SRC_IRQ); for (i = 0; i < recs; i++) { if (!post_read_mst_fixup((NTFS_RECORD*)(addr + i * rec_size), rec_size)) @@ -607,7 +607,7 @@ ni->_IDM(index_block_size_bits)) + i)); } flush_dcache_page(page); - kunmap_atomic(addr, KM_BIO_IRQ); + kunmap_atomic(addr, KM_BIO_SRC_IRQ); if (likely(!nr_err && recs)) SetPageUptodate(page); else { diff -Nru a/fs/ntfs/compress.c b/fs/ntfs/compress.c --- a/fs/ntfs/compress.c Tue Jun 18 19:12:02 2002 +++ b/fs/ntfs/compress.c Tue Jun 18 19:12:02 2002 @@ -50,14 +50,15 @@ } ntfs_compression_constants; /** - * ntfs_compression_buffers - per-CPU buffers for the decompression engine. + * ntfs_compression_buffer - one buffer for the decompression engine. */ -static u8 **ntfs_compression_buffers = NULL; +static u8 *ntfs_compression_buffer = NULL; + +/* This spinlock which protects it */ +static spinlock_t ntfs_cb_lock = SPIN_LOCK_UNLOCKED; /** - * allocate_compression_buffers - allocate the per-CPU decompression buffers - * - * Allocate the per-CPU buffers for the decompression engine. + * allocate_compression_buffers - allocate the decompression buffers * * Caller has to hold the ntfs_lock semaphore. * @@ -67,30 +68,16 @@ { int i, j; - BUG_ON(ntfs_compression_buffers); + BUG_ON(ntfs_compression_buffer); - ntfs_compression_buffers = (u8**)kmalloc(smp_num_cpus * sizeof(u8*), - GFP_KERNEL); - if (!ntfs_compression_buffers) + ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE); + if (!ntfs_compression_buffer) return -ENOMEM; - for (i = 0; i < smp_num_cpus; i++) { - ntfs_compression_buffers[i] = (u8*)vmalloc(NTFS_MAX_CB_SIZE); - if (!ntfs_compression_buffers[i]) - break; - } - if (i == smp_num_cpus) - return 0; - /* Allocation failed, cleanup and return error. */ - for (j = 0; j < i; j++) - vfree(ntfs_compression_buffers[j]); - kfree(ntfs_compression_buffers); - return -ENOMEM; + return 0; } /** - * free_compression_buffers - free the per-CPU decompression buffers - * - * Free the per-CPU buffers used by the decompression engine. + * free_compression_buffers - free the decompression buffers * * Caller has to hold the ntfs_lock semaphore. */ @@ -98,12 +85,9 @@ { int i; - BUG_ON(!ntfs_compression_buffers); - - for (i = 0; i < smp_num_cpus; i++) - vfree(ntfs_compression_buffers[i]); - kfree(ntfs_compression_buffers); - ntfs_compression_buffers = NULL; + BUG_ON(!ntfs_compression_buffer); + vfree(ntfs_compression_buffer); + ntfs_compression_buffer = NULL; } /** @@ -188,8 +172,8 @@ ntfs_debug("Completed. Returning success (0)."); err = 0; return_error: - /* We can sleep from now on, so we reenable preemption. */ - preempt_enable(); + /* We can sleep from now on, so we drop lock. */ + spin_unlock(&ntfs_cb_lock); /* Second stage: finalize completed pages. */ for (i = 0; i < nr_completed_pages; i++) { int di = completed_pages[i]; @@ -607,12 +591,10 @@ } /* - * Get the compression buffer corresponding to the current CPU. We must - * not sleep any more until we are finished with the compression buffer. - * If on a preemptible kernel, now disable preemption. - */ - preempt_disable(); - cb = ntfs_compression_buffers[smp_processor_id()]; + * Get the compression buffer. We must not sleep any more + * until we are finished with it. */ + spin_lock(&ntfs_cb_lock); + cb = ntfs_compression_buffer; BUG_ON(!cb); @@ -647,8 +629,8 @@ if (vcn == start_vcn - cb_clusters) { /* Sparse cb, zero out page range overlapping the cb. */ ntfs_debug("Found sparse compression block."); - /* We can sleep from now on, so we reenable preemption. */ - preempt_enable(); + /* We can sleep from now on, so we drop lock. */ + spin_unlock(&ntfs_cb_lock); if (cb_max_ofs) cb_max_page--; for (; cur_page < cb_max_page; cur_page++) { @@ -729,8 +711,8 @@ cb_pos += cb_max_ofs - cur_ofs; cur_ofs = cb_max_ofs; } - /* We can sleep from now on, so we reenable preemption. */ - preempt_enable(); + /* We can sleep from now on, so drop lock. */ + spin_unlock(&ntfs_cb_lock); /* Second stage: finalize pages. */ for (; cur2_page < cb_max_page; cur2_page++) { page = pages[cur2_page]; @@ -759,9 +741,8 @@ cb_max_page, cb_max_ofs, xpage, &xpage_done, cb_pos, cb_size - (cb_pos - cb)); /* - * We can sleep from now on, preemption already reenabled by - * ntfs_decompess. - */ + * We can sleep from now on, lock already dropped by + * ntfs_decompress. */ if (err) { ntfs_error(vol->sb, "ntfs_decompress() failed in inode " "0x%Lx with error code %i. Skipping " diff -Nru a/fs/ntfs/super.c b/fs/ntfs/super.c --- a/fs/ntfs/super.c Tue Jun 18 19:12:01 2002 +++ b/fs/ntfs/super.c Tue Jun 18 19:12:01 2002 @@ -1615,7 +1615,7 @@ if (vol->cluster_size <= 4096 && !ntfs_nr_compression_users++) { result = allocate_compression_buffers(); if (result) { - ntfs_error(NULL, "Failed to allocate per CPU buffers " + ntfs_error(NULL, "Failed to allocate buffers " "for compression engine."); ntfs_nr_compression_users--; up(&ntfs_lock); diff -Nru a/fs/partitions/Makefile b/fs/partitions/Makefile --- a/fs/partitions/Makefile Tue Jun 18 19:12:02 2002 +++ b/fs/partitions/Makefile Tue Jun 18 19:12:02 2002 @@ -2,7 +2,7 @@ # Makefile for the linux kernel. # -export-objs := check.o ibm.o msdos.o +export-objs := check.o msdos.o obj-y := check.o diff -Nru a/fs/partitions/check.c b/fs/partitions/check.c --- a/fs/partitions/check.c Tue Jun 18 19:12:02 2002 +++ b/fs/partitions/check.c Tue Jun 18 19:12:02 2002 @@ -83,13 +83,17 @@ /* * This is ucking fugly but its probably the best thing for 2.4.x - * Take it as a clear reminder than we should put the device name + * Take it as a clear reminder that: 1) we should put the device name * generation in the object kdev_t points to in 2.5. + * and 2) ioctls better work on half-opened devices. */ #ifdef CONFIG_ARCH_S390 int (*genhd_dasd_name)(char*,int,int,struct gendisk*) = NULL; +int (*genhd_dasd_ioctl)(struct inode *inp, struct file *filp, + unsigned int no, unsigned long data); EXPORT_SYMBOL(genhd_dasd_name); +EXPORT_SYMBOL(genhd_dasd_ioctl); #endif /* diff -Nru a/fs/partitions/ibm.c b/fs/partitions/ibm.c --- a/fs/partitions/ibm.c Tue Jun 18 19:12:03 2002 +++ b/fs/partitions/ibm.c Tue Jun 18 19:12:03 2002 @@ -8,6 +8,7 @@ * History of changes (starts July 2000) * 07/10/00 Fixed detection of CMS formatted disks * 02/13/00 VTOC partition support added + * 12/27/01 fixed PL030593 (CMS reserved minidisk not detected on 64 bit) */ #include @@ -29,47 +30,6 @@ #include "check.h" #include -typedef enum { - ibm_partition_lnx1 = 0, - ibm_partition_vol1 = 1, - ibm_partition_cms1 = 2, - ibm_partition_none = 3 -} ibm_partition_t; - -static char* part_names[] = { [ibm_partition_lnx1] = "LNX1", - [ibm_partition_vol1] = "VOL1", - [ibm_partition_cms1] = "CMS1", - [ibm_partition_none] = "(nonl)" -}; - -static ibm_partition_t -get_partition_type ( char * type ) -{ - int i; - for ( i = 0; i < 3; i ++) { - if ( ! strncmp (type,part_names[i],4) ) - break; - } - return i; -} - -/* - * add the two default partitions - * - whole dasd - * - whole dasd without "offset" - */ -static inline void -two_partitions(struct gendisk *hd, - int minor, - int blocksize, - int offset, - int size) { - - add_gd_partition( hd, minor, 0, size); - add_gd_partition( hd, minor+1, offset*blocksize, size-offset*blocksize); -} - - /* * compute the block number from a * cyl-cyl-head-head structure @@ -92,115 +52,186 @@ ptr->b; } +/* + * We used to use ioctl_by_bdev in early 2.4, but it broke + * between 2.4.9 and 2.4.18 somewhere. + */ +extern int (*genhd_dasd_ioctl)(struct inode *inp, struct file *filp, + unsigned int no, unsigned long data); + +static int +ibm_ioctl_unopened(struct block_device *bdev, unsigned cmd, unsigned long arg) +{ + int res; + mm_segment_t old_fs = get_fs(); + + if (genhd_dasd_ioctl == NULL) + return -ENODEV; +#if 0 + lock_kernel(); + if (bd_ops->owner) + __MOD_INC_USE_COUNT(bdev->bd_op->owner); + unlock_kernel(); +#endif + set_fs(KERNEL_DS); + res = (*genhd_dasd_ioctl)(bdev->bd_inode, NULL, cmd, arg); + set_fs(old_fs); +#if 0 + lock_kernel(); + if (bd_ops->owner) + __MOD_DEV_USE_COUNT(bd_ops->owner); + unlock_kernel(); +#endif + return res; +} + +/* + */ int ibm_partition(struct gendisk *hd, struct block_device *bdev, - unsigned long first_sector, int first_part_minor) + unsigned long first_sector, int first_part_minor) { - Sector sect, sect2; - unsigned char *data; - ibm_partition_t partition_type; + int blocksize, offset, size; + dasd_information_t *info; + struct hd_geometry *geo; char type[5] = {0,}; char name[7] = {0,}; - struct hd_geometry *geo; - int blocksize; - int offset=0, size=0, psize=0, counter=0; - unsigned int blk; - format1_label_t f1; - volume_label_t vlabel; - dasd_information_t *info; - kdev_t dev = to_kdev_t(bdev->bd_dev); + volume_label_t *vlabel; + unsigned char *data; + Sector sect; if ( first_sector != 0 ) BUG(); - info = (struct dasd_information_t *)kmalloc(sizeof(dasd_information_t), - GFP_KERNEL); - if ( info == NULL ) - return 0; - if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)(info))) - return 0; - geo = (struct hd_geometry *)kmalloc(sizeof(struct hd_geometry), - GFP_KERNEL); - if ( geo == NULL ) - return 0; - if (ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo); - return 0; - blocksize = bdev_hardsect_size(bdev) >> 9; + if ((info = kmalloc(sizeof(dasd_information_t), GFP_KERNEL)) == NULL) + goto out_noinfo; + if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL) + goto out_nogeo; + if ((vlabel = kmalloc(sizeof(volume_label_t), GFP_KERNEL)) == NULL) + goto out_novlab; - data = read_dev_sector(bdev, inode->label_block*blocksize, §); - if (!data) - return 0; + if (ibm_ioctl_unopened(bdev, BIODASDINFO, (unsigned long)info) != 0 || + ibm_ioctl_unopened(bdev, HDIO_GETGEO, (unsigned long)geo) != 0) + goto out_noioctl; + + if ((blocksize = bdev_hardsect_size(bdev)) <= 0) + goto out_badsect; + + /* + * Get volume label, extract name and type. + */ + data = read_dev_sector(bdev, info->label_block*(blocksize/512), §); + if (data == NULL) + goto out_readerr; strncpy (type, data, 4); - if ((!info->FBA_layout) && (!strcmp(info->type,"ECKD"))) { - strncpy ( name, data + 8, 6); - } else { - strncpy ( name, data + 4, 6); - } - memcpy (&vlabel, data, sizeof(volume_label_t)); + if ((!info->FBA_layout) && (!strcmp(info->type, "ECKD"))) + strncpy(name, data + 8, 6); + else + strncpy(name, data + 4, 6); + memcpy (vlabel, data, sizeof(volume_label_t)); + put_dev_sector(sect); - EBCASC(type,4); - EBCASC(name,6); - - partition_type = get_partition_type(type); - printk ( "%4s/%8s:",part_names[partition_type],name); - switch ( partition_type ) { - case ibm_partition_cms1: - if (* ((long *)data + 13) != 0) { + EBCASC(type, 4); + EBCASC(name, 6); + + /* + * Three different types: CMS1, VOL1 and LNX1/unlabeled + */ + if (strncmp(type, "CMS1", 4) == 0) { + /* + * VM style CMS1 labeled disk + */ + int *label = (int *) data; + + if (label[13] != 0) { + printk("CMS1/%8s(MDSK):", name); /* disk is reserved minidisk */ - long *label=(long*)data; - blocksize = label[3]>>9; + blocksize = label[3]; offset = label[13]; - size = (label[7]-1)*blocksize; - printk ("(MDSK)"); + size = (label[7] - 1)*(blocksize >> 9); } else { + printk("CMS1/%8s:", name); offset = (info->label_block + 1); - size = hd -> sizes[MINOR(dev)]<<1; + size = bdev->bd_inode->i_size >> 9; } - two_partitions( hd, MINOR(dev), blocksize, offset, size); - break; - case ibm_partition_lnx1: - case ibm_partition_none: - offset = (info->label_block + 1); - size = hd -> sizes[MINOR(dev)]<<1; - two_partitions( hd, MINOR(dev), blocksize, offset, size); - break; - case ibm_partition_vol1: - size = hd -> sizes[MINOR(dev)]<<1; - add_gd_partition(hd, MINOR(dev), 0, size); - - /* get block number and read then first format1 label */ - blk = cchhb2blk(&vlabel.vtoc, geo) + 1; - data = read_dev_sector(bdev, blk * blocksize, §2); - if (data) { - memcpy (&f1, data, sizeof(format1_label_t)); - put_dev_sector(sect2); - } - - while (f1.DS1FMTID == _ascebc['1']) { + // add_gd_partition(hd, first_part_minor - 1, 0, size); + add_gd_partition(hd, first_part_minor, + offset*(blocksize >> 9), + size-offset*(blocksize >> 9)); + } else if (strncmp(type, "VOL1", 4) == 0) { + /* + * New style VOL1 labeled disk + */ + unsigned int blk; + int counter; + + printk("VOL1/%8s:", name); + + /* get block number and read then go through format1 labels */ + blk = cchhb2blk(&vlabel->vtoc, geo) + 1; + counter = 0; + while ((data = read_dev_sector(bdev, blk*(blocksize/512), + §)) != NULL) { + format1_label_t f1; + + memcpy(&f1, data, sizeof(format1_label_t)); + put_dev_sector(sect); + + /* skip FMT4 / FMT5 / FMT7 labels */ + if (f1.DS1FMTID == _ascebc['4'] + || f1.DS1FMTID == _ascebc['5'] + || f1.DS1FMTID == _ascebc['7']) { + blk++; + continue; + } + + /* only FMT1 valid at this point */ + if (f1.DS1FMTID != _ascebc['1']) + break; + + /* OK, we got valid partition data */ offset = cchh2blk(&f1.DS1EXT1.llimit, geo); - psize = cchh2blk(&f1.DS1EXT1.ulimit, geo) - + size = cchh2blk(&f1.DS1EXT1.ulimit, geo) - offset + geo->sectors; - + if (counter >= (1 << hd->minor_shift)) + break; + add_gd_partition(hd, first_part_minor + counter, + offset * (blocksize >> 9), + size * (blocksize >> 9)); counter++; - add_gd_partition(hd, MINOR(dev) + counter, - offset * blocksize, - psize * blocksize); - blk++; - data = read_dev_sector(bdev, blk * blocksize, §2); - if (data) { - memcpy (&f1, data, sizeof(format1_label_t)); - put_dev_sector(sect2); - } } - break; - default: - add_gd_partition( hd, MINOR(dev), 0, 0); - add_gd_partition( hd, MINOR(dev) + 1, 0, 0); + } else { + /* + * Old style LNX1 or unlabeled disk + */ + if (strncmp(type, "LNX1", 4) == 0) + printk ("LNX1/%8s:", name); + else + printk("(nonl)/%8s:", name); + offset = (info->label_block + 1); + size = (bdev->bd_inode->i_size >> 9); + // add_gd_partition(hd, first_part_minor - 1, 0, size); + add_gd_partition(hd, first_part_minor, + offset*(blocksize >> 9), + size-offset*(blocksize >> 9)); } - - printk ( "\n" ); - put_dev_sector(sect); + + printk("\n"); + kfree(vlabel); + kfree(geo); + kfree(info); return 1; + +out_readerr: +out_badsect: +out_noioctl: + kfree(vlabel); +out_novlab: + kfree(geo); +out_nogeo: + kfree(info); +out_noinfo: + return 0; } diff -Nru a/fs/proc/array.c b/fs/proc/array.c --- a/fs/proc/array.c Tue Jun 18 19:12:01 2002 +++ b/fs/proc/array.c Tue Jun 18 19:12:01 2002 @@ -695,12 +695,14 @@ task->times.tms_utime, task->times.tms_stime); - for (i = 0 ; i < smp_num_cpus; i++) + for (i = 0 ; i < NR_CPUS; i++) { + if (cpu_online(i)) len += sprintf(buffer + len, "cpu%d %lu %lu\n", i, - task->per_cpu_utime[cpu_logical_map(i)], - task->per_cpu_stime[cpu_logical_map(i)]); + task->per_cpu_utime[i], + task->per_cpu_stime[i]); + } return len; } #endif diff -Nru a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c --- a/fs/proc/proc_misc.c Tue Jun 18 19:12:01 2002 +++ b/fs/proc/proc_misc.c Tue Jun 18 19:12:01 2002 @@ -281,29 +281,32 @@ unsigned int sum = 0, user = 0, nice = 0, system = 0; int major, disk; - for (i = 0 ; i < smp_num_cpus; i++) { - int cpu = cpu_logical_map(i), j; + for (i = 0 ; i < NR_CPUS; i++) { + int j; - user += kstat.per_cpu_user[cpu]; - nice += kstat.per_cpu_nice[cpu]; - system += kstat.per_cpu_system[cpu]; + if(!cpu_online(i)) continue; + user += kstat.per_cpu_user[i]; + nice += kstat.per_cpu_nice[i]; + system += kstat.per_cpu_system[i]; #if !defined(CONFIG_ARCH_S390) for (j = 0 ; j < NR_IRQS ; j++) - sum += kstat.irqs[cpu][j]; + sum += kstat.irqs[i][j]; #endif } len = sprintf(page, "cpu %u %u %u %lu\n", user, nice, system, - jif * smp_num_cpus - (user + nice + system)); - for (i = 0 ; i < smp_num_cpus; i++) + jif * num_online_cpus() - (user + nice + system)); + for (i = 0 ; i < NR_CPUS; i++){ + if (!cpu_online(i)) continue; len += sprintf(page + len, "cpu%d %u %u %u %lu\n", i, - kstat.per_cpu_user[cpu_logical_map(i)], - kstat.per_cpu_nice[cpu_logical_map(i)], - kstat.per_cpu_system[cpu_logical_map(i)], - jif - ( kstat.per_cpu_user[cpu_logical_map(i)] \ - + kstat.per_cpu_nice[cpu_logical_map(i)] \ - + kstat.per_cpu_system[cpu_logical_map(i)])); + kstat.per_cpu_user[i], + kstat.per_cpu_nice[i], + kstat.per_cpu_system[i], + jif - ( kstat.per_cpu_user[i] \ + + kstat.per_cpu_nice[i] \ + + kstat.per_cpu_system[i])); + } len += sprintf(page + len, "page %u %u\n" "swap %u %u\n" diff -Nru a/fs/qnx4/fsync.c b/fs/qnx4/fsync.c --- a/fs/qnx4/fsync.c Tue Jun 18 19:12:02 2002 +++ b/fs/qnx4/fsync.c Tue Jun 18 19:12:02 2002 @@ -37,7 +37,7 @@ if (!*block) return 0; tmp = *block; - bh = sb_get_hash_table(inode->i_sb, *block); + bh = sb_find_get_block(inode->i_sb, *block); if (!bh) return 0; if (*block != tmp) { diff -Nru a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c --- a/fs/reiserfs/fix_node.c Tue Jun 18 19:12:02 2002 +++ b/fs/reiserfs/fix_node.c Tue Jun 18 19:12:02 2002 @@ -920,7 +920,7 @@ /* Get left neighbor block number. */ n_left_neighbor_blocknr = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_left_neighbor_position); /* Look for the left neighbor in the cache. */ - if ( (left = sb_get_hash_table(p_s_sb, n_left_neighbor_blocknr)) ) { + if ( (left = sb_find_get_block(p_s_sb, n_left_neighbor_blocknr)) ) { RFALSE( buffer_uptodate (left) && ! B_IS_IN_TREE(left), "vs-8170: left neighbor (%b %z) is not in the tree", left, left); diff -Nru a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c --- a/fs/reiserfs/journal.c Tue Jun 18 19:12:02 2002 +++ b/fs/reiserfs/journal.c Tue Jun 18 19:12:02 2002 @@ -689,7 +689,7 @@ count = 0 ; for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 && i < (jl->j_len + 1) ; i++) { /* everything but commit_bh */ bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start+i) % SB_ONDISK_JOURNAL_SIZE(s); - tbh = journal_get_hash_table(s, bn) ; + tbh = journal_find_get_block(s, bn) ; /* kill this sanity check */ if (count > (orig_commit_left + 2)) { @@ -718,7 +718,7 @@ for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 && i < (jl->j_len + 1) ; i++) { /* everything but commit_bh */ bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s) ; - tbh = journal_get_hash_table(s, bn) ; + tbh = journal_find_get_block(s, bn) ; wait_on_buffer(tbh) ; if (!buffer_uptodate(tbh)) { @@ -2764,7 +2764,7 @@ int cleaned = 0 ; if (reiserfs_dont_log(th->t_super)) { - bh = sb_get_hash_table(p_s_sb, blocknr) ; + bh = sb_find_get_block(p_s_sb, blocknr) ; if (bh && buffer_dirty (bh)) { printk ("journal_mark_freed(dont_log): dirty buffer on hash list: %lx %ld\n", bh->b_state, blocknr); BUG (); @@ -2772,7 +2772,7 @@ brelse (bh); return 0 ; } - bh = sb_get_hash_table(p_s_sb, blocknr) ; + bh = sb_find_get_block(p_s_sb, blocknr) ; /* if it is journal new, we just remove it from this transaction */ if (bh && buffer_journal_new(bh)) { mark_buffer_notjournal_new(bh) ; diff -Nru a/fs/ufs/truncate.c b/fs/ufs/truncate.c --- a/fs/ufs/truncate.c Tue Jun 18 19:12:02 2002 +++ b/fs/ufs/truncate.c Tue Jun 18 19:12:02 2002 @@ -117,7 +117,7 @@ frag1 = ufs_fragnum (frag1); frag2 = ufs_fragnum (frag2); for (j = frag1; j < frag2; j++) { - bh = sb_get_hash_table (sb, tmp + j); + bh = sb_find_get_block (sb, tmp + j); if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) { retry = 1; brelse (bh); @@ -140,7 +140,7 @@ if (!tmp) continue; for (j = 0; j < uspi->s_fpb; j++) { - bh = sb_get_hash_table(sb, tmp + j); + bh = sb_find_get_block(sb, tmp + j); if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) { retry = 1; brelse (bh); @@ -179,7 +179,7 @@ ufs_panic(sb, "ufs_truncate_direct", "internal error"); frag4 = ufs_fragnum (frag4); for (j = 0; j < frag4; j++) { - bh = sb_get_hash_table (sb, tmp + j); + bh = sb_find_get_block (sb, tmp + j); if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) { retry = 1; brelse (bh); @@ -238,7 +238,7 @@ if (!tmp) continue; for (j = 0; j < uspi->s_fpb; j++) { - bh = sb_get_hash_table(sb, tmp + j); + bh = sb_find_get_block(sb, tmp + j); if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *ind)) { retry = 1; brelse (bh); diff -Nru a/include/asm-alpha/agp.h b/include/asm-alpha/agp.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-alpha/agp.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,11 @@ +#ifndef AGP_H +#define AGP_H 1 + +/* dummy for now */ + +#define map_page_into_agp(page) +#define unmap_page_from_agp(page) +#define flush_agp_mappings() +#define flush_agp_cache() mb() + +#endif diff -Nru a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h --- a/include/asm-generic/tlb.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-generic/tlb.h Tue Jun 18 19:12:01 2002 @@ -54,7 +54,7 @@ tlb->mm = mm; /* Use fast mode if only one CPU is online */ - tlb->nr = smp_num_cpus > 1 ? 0UL : ~0UL; + tlb->nr = num_online_cpus() > 1 ? 0UL : ~0UL; tlb->fullmm = full_mm_flush; tlb->freed = 0; diff -Nru a/include/asm-i386/agp.h b/include/asm-i386/agp.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-i386/agp.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,23 @@ +#ifndef AGP_H +#define AGP_H 1 + +#include + +/* + * Functions to keep the agpgart mappings coherent with the MMU. + * The GART gives the CPU a physical alias of pages in memory. The alias region is + * mapped uncacheable. Make sure there are no conflicting mappings + * with different cachability attributes for the same page. This avoids + * data corruption on some CPUs. + */ + +#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE) +#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL) +#define flush_agp_mappings() global_flush_tlb() + +/* Could use CLFLUSH here if the cpu supports it. But then it would + need to be called for each cacheline of the whole page so it may not be + worth it. Would need a page for it. */ +#define flush_agp_cache() asm volatile("wbinvd":::"memory") + +#endif diff -Nru a/include/asm-i386/cacheflush.h b/include/asm-i386/cacheflush.h --- a/include/asm-i386/cacheflush.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-i386/cacheflush.h Tue Jun 18 19:12:01 2002 @@ -15,4 +15,7 @@ #define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) +void global_flush_tlb(void); +int change_page_attr(struct page *page, int numpages, pgprot_t prot); + #endif /* _I386_CACHEFLUSH_H */ diff -Nru a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h --- a/include/asm-i386/hardirq.h Tue Jun 18 19:12:03 2002 +++ b/include/asm-i386/hardirq.h Tue Jun 18 19:12:03 2002 @@ -51,7 +51,7 @@ { int i; - for (i = 0; i < smp_num_cpus; i++) + for (i = 0; i < NR_CPUS; i++) if (local_irq_count(i)) return 1; return 0; diff -Nru a/include/asm-i386/io.h b/include/asm-i386/io.h --- a/include/asm-i386/io.h Tue Jun 18 19:12:02 2002 +++ b/include/asm-i386/io.h Tue Jun 18 19:12:02 2002 @@ -121,31 +121,7 @@ return __ioremap(offset, size, 0); } -/** - * ioremap_nocache - map bus memory into CPU space - * @offset: bus address of the memory - * @size: size of the resource to map - * - * ioremap_nocache performs a platform specific sequence of operations to - * make bus memory CPU accessible via the readb/readw/readl/writeb/ - * writew/writel functions and the other mmio helpers. The returned - * address is not guaranteed to be usable directly as a virtual - * address. - * - * This version of ioremap ensures that the memory is marked uncachable - * on the CPU as well as honouring existing caching rules from things like - * the PCI bus. Note that there are other caches and buffers on many - * busses. In paticular driver authors should read up on PCI writes - * - * It's useful if some control registers are in such an area and - * write combining or read caching is not desirable: - */ - -static inline void * ioremap_nocache (unsigned long offset, unsigned long size) -{ - return __ioremap(offset, size, _PAGE_PCD); -} - +extern void * ioremap_nocache (unsigned long offset, unsigned long size); extern void iounmap(void *addr); /* diff -Nru a/include/asm-i386/kmap_types.h b/include/asm-i386/kmap_types.h --- a/include/asm-i386/kmap_types.h Tue Jun 18 19:12:03 2002 +++ b/include/asm-i386/kmap_types.h Tue Jun 18 19:12:03 2002 @@ -15,10 +15,11 @@ D(2) KM_SKB_DATA_SOFTIRQ, D(3) KM_USER0, D(4) KM_USER1, -D(5) KM_BIO_IRQ, -D(6) KM_PTE0, -D(7) KM_PTE1, -D(8) KM_TYPE_NR +D(5) KM_BIO_SRC_IRQ, +D(6) KM_BIO_DST_IRQ, +D(7) KM_PTE0, +D(8) KM_PTE1, +D(9) KM_TYPE_NR }; #undef D diff -Nru a/include/asm-i386/page.h b/include/asm-i386/page.h --- a/include/asm-i386/page.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-i386/page.h Tue Jun 18 19:12:01 2002 @@ -6,6 +6,9 @@ #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) +#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) +#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) + #ifdef __KERNEL__ #ifndef __ASSEMBLY__ diff -Nru a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h --- a/include/asm-i386/pgtable-2level.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-i386/pgtable-2level.h Tue Jun 18 19:12:01 2002 @@ -40,6 +40,7 @@ * hook is made available. */ #define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) /* * (pmds are folded into pgds so this doesnt get actually called, * but the define is needed for a generic inline function.) diff -Nru a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h --- a/include/asm-i386/pgtable-3level.h Tue Jun 18 19:12:03 2002 +++ b/include/asm-i386/pgtable-3level.h Tue Jun 18 19:12:03 2002 @@ -49,6 +49,8 @@ smp_wmb(); ptep->pte_low = pte.pte_low; } +#define set_pte_atomic(pteptr,pteval) \ + set_64bit((unsigned long long *)(pteptr),pte_val(pteval)) #define set_pmd(pmdptr,pmdval) \ set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval)) #define set_pgd(pgdptr,pgdval) \ diff -Nru a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h --- a/include/asm-i386/pgtable.h Tue Jun 18 19:12:03 2002 +++ b/include/asm-i386/pgtable.h Tue Jun 18 19:12:03 2002 @@ -237,6 +237,9 @@ #define pmd_page(pmd) \ (mem_map + (pmd_val(pmd) >> PAGE_SHIFT)) +#define pmd_large(pmd) \ + ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) + /* to find an entry in a page-table-directory. */ #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) diff -Nru a/include/asm-i386/smp.h b/include/asm-i386/smp.h --- a/include/asm-i386/smp.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-i386/smp.h Tue Jun 18 19:12:01 2002 @@ -69,20 +69,6 @@ extern void zap_low_mappings (void); /* - * On x86 all CPUs are mapped 1:1 to the APIC space. - * This simplifies scheduling and IPI sending and - * compresses data structures. - */ -static inline int cpu_logical_map(int cpu) -{ - return cpu; -} -static inline int cpu_number_map(int cpu) -{ - return cpu; -} - -/* * Some lowlevel functions might want to know about * the real APIC ID <-> CPU # mapping. */ @@ -104,8 +90,22 @@ * from the initial startup. We map APIC_BASE very early in page_setup(), * so this is correct in the x86 case. */ - #define smp_processor_id() (current_thread_info()->cpu) + +#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) + +extern inline unsigned int num_online_cpus(void) +{ + return hweight32(cpu_online_map); +} + +extern inline int any_online_cpu(unsigned int mask) +{ + if (mask & cpu_online_map) + return __ffs(mask & cpu_online_map); + + return -1; +} static __inline int hard_smp_processor_id(void) { diff -Nru a/include/asm-i386/xor.h b/include/asm-i386/xor.h --- a/include/asm-i386/xor.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-i386/xor.h Tue Jun 18 19:12:01 2002 @@ -76,9 +76,9 @@ " addl $128, %2 ;\n" " decl %0 ;\n" " jnz 1b ;\n" - : - : "r" (lines), - "r" (p1), "r" (p2) + : "+r" (lines), + "+r" (p1), "+r" (p2) + : : "memory"); FPU_RESTORE; @@ -126,9 +126,9 @@ " addl $128, %3 ;\n" " decl %0 ;\n" " jnz 1b ;\n" - : - : "r" (lines), - "r" (p1), "r" (p2), "r" (p3) + : "+r" (lines), + "+r" (p1), "+r" (p2), "+r" (p3) + : : "memory"); FPU_RESTORE; @@ -181,14 +181,15 @@ " addl $128, %4 ;\n" " decl %0 ;\n" " jnz 1b ;\n" - : - : "r" (lines), - "r" (p1), "r" (p2), "r" (p3), "r" (p4) + : "+r" (lines), + "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) + : : "memory"); FPU_RESTORE; } + static void xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) @@ -198,7 +199,11 @@ FPU_SAVE; + /* need to save/restore p4/p5 manually otherwise gcc's 10 argument + limit gets exceeded (+ counts as two arguments) */ __asm__ __volatile__ ( + " pushl %4\n" + " pushl %5\n" #undef BLOCK #define BLOCK(i) \ LD(i,0) \ @@ -241,9 +246,11 @@ " addl $128, %5 ;\n" " decl %0 ;\n" " jnz 1b ;\n" - : - : "g" (lines), - "r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5) + " popl %5\n" + " popl %4\n" + : "+r" (lines), + "+r" (p1), "+r" (p2), "+r" (p3) + : "r" (p4), "r" (p5) : "memory"); FPU_RESTORE; @@ -297,9 +304,9 @@ " addl $64, %2 ;\n" " decl %0 ;\n" " jnz 1b ;\n" - : - : "r" (lines), - "r" (p1), "r" (p2) + : "+r" (lines), + "+r" (p1), "+r" (p2) + : : "memory"); FPU_RESTORE; @@ -355,9 +362,9 @@ " addl $64, %3 ;\n" " decl %0 ;\n" " jnz 1b ;\n" - : - : "r" (lines), - "r" (p1), "r" (p2), "r" (p3) + : "+r" (lines), + "+r" (p1), "+r" (p2), "+r" (p3) + : : "memory" ); FPU_RESTORE; @@ -422,9 +429,9 @@ " addl $64, %4 ;\n" " decl %0 ;\n" " jnz 1b ;\n" - : - : "r" (lines), - "r" (p1), "r" (p2), "r" (p3), "r" (p4) + : "+r" (lines), + "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) + : : "memory"); FPU_RESTORE; @@ -439,7 +446,10 @@ FPU_SAVE; + /* need to save p4/p5 manually to not exceed gcc's 10 argument limit */ __asm__ __volatile__ ( + " pushl %4\n" + " pushl %5\n" " .align 32,0x90 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" @@ -498,9 +508,11 @@ " addl $64, %5 ;\n" " decl %0 ;\n" " jnz 1b ;\n" - : - : "g" (lines), - "r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5) + " popl %5\n" + " popl %4\n" + : "+g" (lines), + "+r" (p1), "+r" (p2), "+r" (p3) + : "r" (p4), "r" (p5) : "memory"); FPU_RESTORE; @@ -554,6 +566,8 @@ : "r" (cr0), "r" (xmm_save) \ : "memory") +#define ALIGN16 __attribute__((aligned(16))) + #define OFFS(x) "16*("#x")" #define PF_OFFS(x) "256+16*("#x")" #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" @@ -575,7 +589,7 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { unsigned long lines = bytes >> 8; - char xmm_save[16*4]; + char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; @@ -616,9 +630,9 @@ " addl $256, %2 ;\n" " decl %0 ;\n" " jnz 1b ;\n" + : "+r" (lines), + "+r" (p1), "+r" (p2) : - : "r" (lines), - "r" (p1), "r" (p2) : "memory"); XMMS_RESTORE; @@ -629,7 +643,7 @@ unsigned long *p3) { unsigned long lines = bytes >> 8; - char xmm_save[16*4]; + char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; @@ -677,9 +691,9 @@ " addl $256, %3 ;\n" " decl %0 ;\n" " jnz 1b ;\n" + : "+r" (lines), + "+r" (p1), "+r"(p2), "+r"(p3) : - : "r" (lines), - "r" (p1), "r"(p2), "r"(p3) : "memory" ); XMMS_RESTORE; @@ -690,7 +704,7 @@ unsigned long *p3, unsigned long *p4) { unsigned long lines = bytes >> 8; - char xmm_save[16*4]; + char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; @@ -745,9 +759,9 @@ " addl $256, %4 ;\n" " decl %0 ;\n" " jnz 1b ;\n" + : "+r" (lines), + "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) : - : "r" (lines), - "r" (p1), "r" (p2), "r" (p3), "r" (p4) : "memory" ); XMMS_RESTORE; @@ -758,12 +772,15 @@ unsigned long *p3, unsigned long *p4, unsigned long *p5) { unsigned long lines = bytes >> 8; - char xmm_save[16*4]; + char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; + /* need to save p4/p5 manually to not exceed gcc's 10 argument limit */ __asm__ __volatile__ ( + " pushl %4\n" + " pushl %5\n" #undef BLOCK #define BLOCK(i) \ PF1(i) \ @@ -820,9 +837,11 @@ " addl $256, %5 ;\n" " decl %0 ;\n" " jnz 1b ;\n" - : - : "r" (lines), - "r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5) + " popl %5\n" + " popl %4\n" + : "+r" (lines), + "+r" (p1), "+r" (p2), "+r" (p3) + : "r" (p4), "r" (p5) : "memory"); XMMS_RESTORE; diff -Nru a/include/asm-ia64/agp.h b/include/asm-ia64/agp.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-ia64/agp.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,11 @@ +#ifndef AGP_H +#define AGP_H 1 + +/* dummy for now */ + +#define map_page_into_agp(page) +#define unmap_page_from_agp(page) +#define flush_agp_mappings() +#define flush_agp_cache() mb() + +#endif diff -Nru a/include/asm-ia64/hardirq.h b/include/asm-ia64/hardirq.h --- a/include/asm-ia64/hardirq.h Tue Jun 18 19:12:02 2002 +++ b/include/asm-ia64/hardirq.h Tue Jun 18 19:12:02 2002 @@ -58,7 +58,7 @@ { int i; - for (i = 0; i < smp_num_cpus; i++) + for (i = 0; i < NR_CPUS; i++) if (irq_count(i)) return 1; return 0; diff -Nru a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h --- a/include/asm-ia64/smp.h Tue Jun 18 19:12:02 2002 +++ b/include/asm-ia64/smp.h Tue Jun 18 19:12:02 2002 @@ -39,15 +39,26 @@ extern volatile unsigned long cpu_online_map; extern unsigned long ipi_base_addr; extern unsigned char smp_int_redirect; -extern int smp_num_cpus; extern volatile int ia64_cpu_to_sapicid[]; #define cpu_physical_id(i) ia64_cpu_to_sapicid[i] -#define cpu_number_map(i) (i) -#define cpu_logical_map(i) (i) extern unsigned long ap_wakeup_vector; +#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) +extern inline unsigned int num_online_cpus(void) +{ + return hweight64(cpu_online_map); +} + +extern inline int any_online_cpu(unsigned int mask) +{ + if (mask & cpu_online_map) + return __ffs(mask & cpu_online_map); + + return -1; +} + /* * Function to map hard smp processor id to logical id. Slow, so * don't use this in performance-critical code. @@ -57,7 +68,7 @@ { int i; - for (i = 0; i < smp_num_cpus; ++i) + for (i = 0; i < NR_CPUS; ++i) if (cpu_physical_id(i) == (__u32) cpuid) break; return i; @@ -107,6 +118,11 @@ lid.bits = ia64_get_lid(); return lid.f.id << 8 | lid.f.eid; } + +/* Upping and downing of CPUs */ +extern int __cpu_disable(void); +extern void __cpu_die(unsigned int cpu); +extern int __cpu_up(unsigned int cpu); #define NO_PROC_ID 0xffffffff /* no processor magic marker */ diff -Nru a/include/asm-ppc/hardirq.h b/include/asm-ppc/hardirq.h --- a/include/asm-ppc/hardirq.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-ppc/hardirq.h Tue Jun 18 19:12:01 2002 @@ -56,7 +56,7 @@ { int i; - for (i = 0; i < smp_num_cpus; i++) + for (i = 0; i < NR_CPUS; i++) if (local_irq_count(i)) return 1; return 0; diff -Nru a/include/asm-ppc/kmap_types.h b/include/asm-ppc/kmap_types.h --- a/include/asm-ppc/kmap_types.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-ppc/kmap_types.h Tue Jun 18 19:12:01 2002 @@ -11,7 +11,8 @@ KM_SKB_DATA_SOFTIRQ, KM_USER0, KM_USER1, - KM_BIO_IRQ, + KM_BIO_SRC_IRQ, + KM_BIO_DST_IRQ, KM_PTE0, KM_PTE1, KM_TYPE_NR diff -Nru a/include/asm-ppc/smp.h b/include/asm-ppc/smp.h --- a/include/asm-ppc/smp.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-ppc/smp.h Tue Jun 18 19:12:01 2002 @@ -15,6 +15,7 @@ #include #include +#include #ifdef CONFIG_SMP @@ -44,11 +45,22 @@ #define NO_PROC_ID 0xFF /* No processor magic marker */ #define PROC_CHANGE_PENALTY 20 -/* 1 to 1 mapping on PPC -- Cort */ -#define cpu_logical_map(cpu) (cpu) -#define cpu_number_map(x) (x) - #define smp_processor_id() (current_thread_info()->cpu) + +#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) + +extern inline unsigned int num_online_cpus(void) +{ + return hweight32(cpu_online_map); +} + +extern inline int any_online_cpu(unsigned int mask) +{ + if (mask & cpu_online_map) + return __ffs(mask & cpu_online_map); + + return -1; +} extern int smp_hw_index[]; #define hard_smp_processor_id() (smp_hw_index[smp_processor_id()]) diff -Nru a/include/asm-s390/system.h b/include/asm-s390/system.h --- a/include/asm-s390/system.h Tue Jun 18 19:12:02 2002 +++ b/include/asm-s390/system.h Tue Jun 18 19:12:02 2002 @@ -18,8 +18,12 @@ #endif #include -#define prepare_to_switch() do { } while(0) -#define switch_to(prev,next) do { \ +#define prepare_arch_schedule(prev) do { } while (0) +#define finish_arch_schedule(prev) do { } while (0) +#define prepare_arch_switch(rq) do { } while (0) +#define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock) + +#define switch_to(prev,next,last) do { \ if (prev == next) \ break; \ save_fp_regs1(&prev->thread.fp_regs); \ diff -Nru a/include/asm-s390x/system.h b/include/asm-s390x/system.h --- a/include/asm-s390x/system.h Tue Jun 18 19:12:03 2002 +++ b/include/asm-s390x/system.h Tue Jun 18 19:12:03 2002 @@ -18,8 +18,12 @@ #endif #include -#define prepare_to_switch() do { } while(0) -#define switch_to(prev,next) do { \ +#define prepare_arch_schedule(prev) do { } while (0) +#define finish_arch_schedule(prev) do { } while (0) +#define prepare_arch_switch(rq) do { } while (0) +#define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock) + +#define switch_to(prev,next),last do { \ if (prev == next) \ break; \ save_fp_regs(&prev->thread.fp_regs); \ diff -Nru a/include/asm-sparc/kmap_types.h b/include/asm-sparc/kmap_types.h --- a/include/asm-sparc/kmap_types.h Tue Jun 18 19:12:02 2002 +++ b/include/asm-sparc/kmap_types.h Tue Jun 18 19:12:02 2002 @@ -7,7 +7,8 @@ KM_SKB_DATA_SOFTIRQ, KM_USER0, KM_USER1, - KM_BIO_IRQ, + KM_BIO_SRC_IRQ, + KM_BIO_DST_IRQ, KM_TYPE_NR }; diff -Nru a/include/asm-sparc64/agp.h b/include/asm-sparc64/agp.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-sparc64/agp.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,11 @@ +#ifndef AGP_H +#define AGP_H 1 + +/* dummy for now */ + +#define map_page_into_agp(page) +#define unmap_page_from_agp(page) +#define flush_agp_mappings() +#define flush_agp_cache() mb() + +#endif diff -Nru a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h --- a/include/asm-sparc64/oplib.h Tue Jun 18 19:12:03 2002 +++ b/include/asm-sparc64/oplib.h Tue Jun 18 19:12:03 2002 @@ -113,6 +113,9 @@ */ extern void prom_halt(void) __attribute__ ((noreturn)); +/* Halt and power-off the machine. */ +extern void prom_halt_power_off(void) __attribute__ ((noreturn)); + /* Set the PROM 'sync' callback function to the passed function pointer. * When the user gives the 'sync' command at the prom prompt while the * kernel is still active, the prom will call this routine. diff -Nru a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h --- a/include/asm-sparc64/system.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-sparc64/system.h Tue Jun 18 19:12:01 2002 @@ -143,7 +143,14 @@ #define flush_user_windows flushw_user #define flush_register_windows flushw_all -#define prepare_to_switch flushw_all + +#define prepare_arch_schedule(prev) task_lock(prev) +#define finish_arch_schedule(prev) task_unlock(prev) +#define prepare_arch_switch(rq) \ +do { spin_unlock(&(rq)->lock); \ + flushw_all(); \ +} while (0) +#define finish_arch_switch(rq) __sti() #ifndef CONFIG_DEBUG_SPINLOCK #define CHECK_LOCKS(PREV) do { } while(0) @@ -172,61 +179,61 @@ * not preserve it's value. Hairy, but it lets us remove 2 loads * and 2 stores in this critical code path. -DaveM */ -#define switch_to(prev, next) \ -do { CHECK_LOCKS(prev); \ - if (test_thread_flag(TIF_PERFCTR)) { \ - unsigned long __tmp; \ - read_pcr(__tmp); \ - current_thread_info()->pcr_reg = __tmp; \ - read_pic(__tmp); \ - current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp); \ - current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \ - } \ - save_and_clear_fpu(); \ - /* If you are tempted to conditionalize the following */ \ - /* so that ASI is only written if it changes, think again. */ \ - __asm__ __volatile__("wr %%g0, %0, %%asi" \ - : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS])); \ - __asm__ __volatile__( \ - "mov %%g6, %%g5\n\t" \ - "wrpr %%g0, 0x95, %%pstate\n\t" \ - "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ - "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ - "rdpr %%wstate, %%o5\n\t" \ - "stx %%o6, [%%g6 + %2]\n\t" \ - "stb %%o5, [%%g6 + %1]\n\t" \ - "rdpr %%cwp, %%o5\n\t" \ - "stb %%o5, [%%g6 + %4]\n\t" \ - "mov %0, %%g6\n\t" \ - "ldub [%0 + %4], %%g1\n\t" \ - "wrpr %%g1, %%cwp\n\t" \ - "ldx [%%g6 + %2], %%o6\n\t" \ - "ldub [%%g6 + %1], %%o5\n\t" \ - "ldx [%%g6 + %3], %%o7\n\t" \ - "mov %%g6, %%l2\n\t" \ - "wrpr %%o5, 0x0, %%wstate\n\t" \ - "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ - "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ - "wrpr %%g0, 0x94, %%pstate\n\t" \ - "mov %%l2, %%g6\n\t" \ - "ldx [%%g6 + %6], %%g4\n\t" \ - "wrpr %%g0, 0x96, %%pstate\n\t" \ - "andcc %%o7, %5, %%g0\n\t" \ - "bne,pn %%icc, ret_from_syscall\n\t" \ - " nop\n\t" \ - : /* no outputs */ \ - : "r" (next->thread_info), \ - "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_FLAGS), "i" (TI_CWP), \ - "i" (_TIF_NEWCHILD), "i" (TI_TASK) \ - : "cc", "g1", "g2", "g3", "g5", "g7", \ - "l2", "l3", "l4", "l5", "l6", "l7", \ - "i0", "i1", "i2", "i3", "i4", "i5", \ - "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \ - /* If you fuck with this, update ret_from_syscall code too. */ \ - if (test_thread_flag(TIF_PERFCTR)) { \ - write_pcr(current_thread_info()->pcr_reg); \ - reset_pic(); \ - } \ +#define switch_to(prev, next, last) \ +do { CHECK_LOCKS(prev); \ + if (test_thread_flag(TIF_PERFCTR)) { \ + unsigned long __tmp; \ + read_pcr(__tmp); \ + current_thread_info()->pcr_reg = __tmp; \ + read_pic(__tmp); \ + current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\ + current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \ + } \ + save_and_clear_fpu(); \ + /* If you are tempted to conditionalize the following */ \ + /* so that ASI is only written if it changes, think again. */ \ + __asm__ __volatile__("wr %%g0, %0, %%asi" \ + : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\ + __asm__ __volatile__( \ + "mov %%g4, %%g5\n\t" \ + "wrpr %%g0, 0x95, %%pstate\n\t" \ + "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ + "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ + "rdpr %%wstate, %%o5\n\t" \ + "stx %%o6, [%%g6 + %3]\n\t" \ + "stb %%o5, [%%g6 + %2]\n\t" \ + "rdpr %%cwp, %%o5\n\t" \ + "stb %%o5, [%%g6 + %5]\n\t" \ + "mov %1, %%g6\n\t" \ + "ldub [%1 + %5], %%g1\n\t" \ + "wrpr %%g1, %%cwp\n\t" \ + "ldx [%%g6 + %3], %%o6\n\t" \ + "ldub [%%g6 + %2], %%o5\n\t" \ + "ldx [%%g6 + %4], %%o7\n\t" \ + "mov %%g6, %%l2\n\t" \ + "wrpr %%o5, 0x0, %%wstate\n\t" \ + "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ + "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ + "wrpr %%g0, 0x94, %%pstate\n\t" \ + "mov %%l2, %%g6\n\t" \ + "ldx [%%g6 + %7], %%g4\n\t" \ + "wrpr %%g0, 0x96, %%pstate\n\t" \ + "andcc %%o7, %6, %%g0\n\t" \ + "bne,pn %%icc, ret_from_syscall\n\t" \ + " mov %%g5, %0\n\t" \ + : "=&r" (last) \ + : "0" (next->thread_info), \ + "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_FLAGS), "i" (TI_CWP), \ + "i" (_TIF_NEWCHILD), "i" (TI_TASK) \ + : "cc", "g1", "g2", "g3", "g5", "g7", \ + "l2", "l3", "l4", "l5", "l6", "l7", \ + "i0", "i1", "i2", "i3", "i4", "i5", \ + "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \ + /* If you fuck with this, update ret_from_syscall code too. */ \ + if (test_thread_flag(TIF_PERFCTR)) { \ + write_pcr(current_thread_info()->pcr_reg); \ + reset_pic(); \ + } \ } while(0) extern __inline__ unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) diff -Nru a/include/asm-x86_64/agp.h b/include/asm-x86_64/agp.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-x86_64/agp.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,23 @@ +#ifndef AGP_H +#define AGP_H 1 + +#include + +/* + * Functions to keep the agpgart mappings coherent. + * The GART gives the CPU a physical alias of memory. The alias is + * mapped uncacheable. Make sure there are no conflicting mappings + * with different cachability attributes for the same page. + */ + +#define map_page_into_agp(page) \ + change_page_attr(page, __pgprot(__PAGE_KERNEL | _PAGE_PCD)) +#define unmap_page_from_agp(page) change_page_attr(page, PAGE_KERNEL) +#define flush_agp_mappings() global_flush_tlb() + +/* Could use CLFLUSH here if the cpu supports it. But then it would + need to be called for each cacheline of the whole page so it may not be + worth it. Would need a page for it. */ +#define flush_agp_cache() asm volatile("wbinvd":::"memory") + +#endif diff -Nru a/include/asm-x86_64/cacheflush.h b/include/asm-x86_64/cacheflush.h --- a/include/asm-x86_64/cacheflush.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-x86_64/cacheflush.h Tue Jun 18 19:12:01 2002 @@ -15,4 +15,7 @@ #define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) +void global_flush_tlb(void); +int change_page_attr(struct page *page, int numpages, pgprot_t prot); + #endif /* _I386_CACHEFLUSH_H */ diff -Nru a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h --- a/include/asm-x86_64/i387.h Tue Jun 18 19:12:02 2002 +++ b/include/asm-x86_64/i387.h Tue Jun 18 19:12:02 2002 @@ -16,10 +16,21 @@ #include #include #include +#include extern void fpu_init(void); extern void init_fpu(void); int save_i387(struct _fpstate *buf); + +static inline int need_signal_i387(struct task_struct *me) +{ + if (!me->used_math) + return 0; + me->used_math = 0; + if (!test_thread_flag(TIF_USEDFPU)) + return 0; + return 1; +} /* * FPU lazy state save handling... diff -Nru a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h --- a/include/asm-x86_64/ia32.h Tue Jun 18 19:12:03 2002 +++ b/include/asm-x86_64/ia32.h Tue Jun 18 19:12:03 2002 @@ -18,7 +18,9 @@ typedef int __kernel_pid_t32; typedef unsigned short __kernel_ipc_pid_t32; typedef unsigned short __kernel_uid_t32; +typedef unsigned __kernel_uid32_t32; typedef unsigned short __kernel_gid_t32; +typedef unsigned __kernel_gid32_t32; typedef unsigned short __kernel_dev_t32; typedef unsigned int __kernel_ino_t32; typedef unsigned short __kernel_mode_t32; diff -Nru a/include/asm-x86_64/ipc.h b/include/asm-x86_64/ipc.h --- a/include/asm-x86_64/ipc.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-x86_64/ipc.h Tue Jun 18 19:12:01 2002 @@ -1,34 +1,6 @@ #ifndef __i386_IPC_H__ #define __i386_IPC_H__ -/* - * These are used to wrap system calls on x86. - * - * See arch/i386/kernel/sys_i386.c for ugly details.. - * - * (on x86-64 only used for 32bit emulation) - */ - -struct ipc_kludge { - struct msgbuf *msgp; - long msgtyp; -}; - -#define SEMOP 1 -#define SEMGET 2 -#define SEMCTL 3 -#define MSGSND 11 -#define MSGRCV 12 -#define MSGGET 13 -#define MSGCTL 14 -#define SHMAT 21 -#define SHMDT 22 -#define SHMGET 23 -#define SHMCTL 24 - -/* Used by the DIPC package, try and avoid reusing it */ -#define DIPC 25 - -#define IPCCALL(version,op) ((version)<<16 | (op)) +/* dummy */ #endif diff -Nru a/include/asm-x86_64/kmap_types.h b/include/asm-x86_64/kmap_types.h --- a/include/asm-x86_64/kmap_types.h Tue Jun 18 19:12:03 2002 +++ b/include/asm-x86_64/kmap_types.h Tue Jun 18 19:12:03 2002 @@ -7,7 +7,8 @@ KM_SKB_DATA_SOFTIRQ, KM_USER0, KM_USER1, - KM_BIO_IRQ, + KM_BIO_SRC_IRQ, + KM_BIO_DST_IRQ, KM_TYPE_NR }; diff -Nru a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h --- a/include/asm-x86_64/mmu_context.h Tue Jun 18 19:12:02 2002 +++ b/include/asm-x86_64/mmu_context.h Tue Jun 18 19:12:02 2002 @@ -19,8 +19,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) { - if(cpu_tlbstate[cpu].state == TLBSTATE_OK) - cpu_tlbstate[cpu].state = TLBSTATE_LAZY; + if (read_pda(mmu_state) == TLBSTATE_OK) + write_pda(mmu_state, TLBSTATE_LAZY); } #else static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) @@ -35,8 +35,8 @@ /* stop flush ipis for the previous mm */ clear_bit(cpu, &prev->cpu_vm_mask); #ifdef CONFIG_SMP - cpu_tlbstate[cpu].state = TLBSTATE_OK; - cpu_tlbstate[cpu].active_mm = next; + write_pda(mmu_state, TLBSTATE_OK); + write_pda(active_mm, next); #endif set_bit(cpu, &next->cpu_vm_mask); /* Re-load page tables */ @@ -48,8 +48,8 @@ } #ifdef CONFIG_SMP else { - cpu_tlbstate[cpu].state = TLBSTATE_OK; - if(cpu_tlbstate[cpu].active_mm != next) + write_pda(mmu_state, TLBSTATE_OK); + if (read_pda(active_mm) != next) out_of_line_bug(); if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled diff -Nru a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h --- a/include/asm-x86_64/msr.h Tue Jun 18 19:12:03 2002 +++ b/include/asm-x86_64/msr.h Tue Jun 18 19:12:03 2002 @@ -95,6 +95,7 @@ #define MSR_IA32_PERFCTR0 0xc1 #define MSR_IA32_PERFCTR1 0xc2 +#define MSR_MTRRcap 0x0fe #define MSR_IA32_BBL_CR_CTL 0x119 #define MSR_IA32_MCG_CAP 0x179 @@ -110,6 +111,19 @@ #define MSR_IA32_LASTINTFROMIP 0x1dd #define MSR_IA32_LASTINTTOIP 0x1de +#define MSR_MTRRfix64K_00000 0x250 +#define MSR_MTRRfix16K_80000 0x258 +#define MSR_MTRRfix16K_A0000 0x259 +#define MSR_MTRRfix4K_C0000 0x268 +#define MSR_MTRRfix4K_C8000 0x269 +#define MSR_MTRRfix4K_D0000 0x26a +#define MSR_MTRRfix4K_D8000 0x26b +#define MSR_MTRRfix4K_E0000 0x26c +#define MSR_MTRRfix4K_E8000 0x26d +#define MSR_MTRRfix4K_F0000 0x26e +#define MSR_MTRRfix4K_F8000 0x26f +#define MSR_MTRRdefType 0x2ff + #define MSR_IA32_MC0_CTL 0x400 #define MSR_IA32_MC0_STATUS 0x401 #define MSR_IA32_MC0_ADDR 0x402 @@ -170,12 +184,5 @@ #define MSR_IA32_APICBASE_BSP (1<<8) #define MSR_IA32_APICBASE_ENABLE (1<<11) #define MSR_IA32_APICBASE_BASE (0xfffff<<12) - - -#define MSR_IA32_THERM_CONTROL 0x19a -#define MSR_IA32_THERM_INTERRUPT 0x19b -#define MSR_IA32_THERM_STATUS 0x19c -#define MSR_IA32_MISC_ENABLE 0x1a0 - #endif diff -Nru a/include/asm-x86_64/mtrr.h b/include/asm-x86_64/mtrr.h --- a/include/asm-x86_64/mtrr.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-x86_64/mtrr.h Tue Jun 18 19:12:01 2002 @@ -30,16 +30,16 @@ struct mtrr_sentry { - unsigned long base; /* Base address */ - unsigned long size; /* Size of region */ + __u64 base; /* Base address */ + __u32 size; /* Size of region */ unsigned int type; /* Type of region */ }; struct mtrr_gentry { + __u64 base; /* Base address */ + __u32 size; /* Size of region */ unsigned int regnum; /* Register number */ - unsigned long base; /* Base address */ - unsigned long size; /* Size of region */ unsigned int type; /* Type of region */ }; @@ -81,46 +81,38 @@ #ifdef __KERNEL__ /* The following functions are for use by other drivers */ -# ifdef CONFIG_MTRR -extern int mtrr_add (unsigned long base, unsigned long size, - unsigned int type, char increment); -extern int mtrr_add_page (unsigned long base, unsigned long size, - unsigned int type, char increment); -extern int mtrr_del (int reg, unsigned long base, unsigned long size); -extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); -extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); -# else -static __inline__ int mtrr_add (unsigned long base, unsigned long size, +#ifdef CONFIG_MTRR +extern int mtrr_add (__u64 base, __u32 size, unsigned int type, char increment); +extern int mtrr_add_page (__u64 base, __u32 size, unsigned int type, char increment); +extern int mtrr_del (int reg, __u64 base, __u32 size); +extern int mtrr_del_page (int reg, __u64 base, __u32 size); +#else +static __inline__ int mtrr_add (__u64 base, __u32 size, unsigned int type, char increment) { return -ENODEV; } -static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, +static __inline__ int mtrr_add_page (__u64 base, __u32 size, unsigned int type, char increment) { return -ENODEV; } -static __inline__ int mtrr_del (int reg, unsigned long base, - unsigned long size) +static __inline__ int mtrr_del (int reg, __u64 base, __u32 size) { return -ENODEV; } -static __inline__ int mtrr_del_page (int reg, unsigned long base, - unsigned long size) +static __inline__ int mtrr_del_page (int reg, __u64 base, __u32 size) { return -ENODEV; } - -static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} - -# endif +#endif /* The following functions are for initialisation: don't use them! */ extern int mtrr_init (void); -# if defined(CONFIG_SMP) && defined(CONFIG_MTRR) +#if defined(CONFIG_SMP) && defined(CONFIG_MTRR) extern void mtrr_init_boot_cpu (void); extern void mtrr_init_secondary_cpu (void); -# endif +#endif #endif diff -Nru a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h --- a/include/asm-x86_64/pda.h Tue Jun 18 19:12:02 2002 +++ b/include/asm-x86_64/pda.h Tue Jun 18 19:12:02 2002 @@ -22,6 +22,8 @@ unsigned int __local_bh_count; unsigned int __nmi_count; /* arch dependent */ struct task_struct * __ksoftirqd_task; /* waitqueue is too large */ + struct mm_struct *active_mm; + int mmu_state; } ____cacheline_aligned; #define PDA_STACKOFFSET (5*8) diff -Nru a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h --- a/include/asm-x86_64/processor.h Tue Jun 18 19:12:03 2002 +++ b/include/asm-x86_64/processor.h Tue Jun 18 19:12:03 2002 @@ -45,21 +45,12 @@ __u8 x86_vendor; /* CPU vendor */ __u8 x86_model; __u8 x86_mask; - /* We know that wp_works_ok = 1, hlt_works_ok = 1, hard_math = 1, - etc... */ - char wp_works_ok; /* It doesn't on 386's */ - char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ - char hard_math; - char rfu; int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ __u32 x86_capability[NCAPINTS]; char x86_vendor_id[16]; char x86_model_id[64]; int x86_cache_size; /* in KB - valid for CPUS which support this call */ - int fdiv_bug; - int f00f_bug; - int coma_bug; unsigned long loops_per_jiffy; } ____cacheline_aligned; @@ -323,7 +314,7 @@ /* IO permissions. the bitmap could be moved into the GDT, that would make switch faster for a limited number of ioperm using tasks. -AK */ int ioperm; - u32 io_bitmap[IO_BITMAP_SIZE+1]; + u32 *io_bitmap_ptr; }; #define INIT_THREAD { \ diff -Nru a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h --- a/include/asm-x86_64/spinlock.h Tue Jun 18 19:12:03 2002 +++ b/include/asm-x86_64/spinlock.h Tue Jun 18 19:12:03 2002 @@ -15,7 +15,7 @@ typedef struct { volatile unsigned int lock; -#ifdef CONFIG_DEBUG_SPINLOCK +#if SPINLOCK_DEBUG unsigned magic; #endif } spinlock_t; @@ -39,7 +39,7 @@ * We make no fairness assumptions. They have a cost. */ -#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0) +#define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) #define spin_lock_string \ @@ -62,7 +62,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) { - char oldval; + signed char oldval; __asm__ __volatile__( "xchgb %b0,%1" :"=q" (oldval), "=m" (lock->lock) diff -Nru a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h --- a/include/asm-x86_64/string.h Tue Jun 18 19:12:02 2002 +++ b/include/asm-x86_64/string.h Tue Jun 18 19:12:02 2002 @@ -40,18 +40,9 @@ __ret = __builtin_memcpy((dst),(src),__len); \ __ret; }) -#if 0 + #define __HAVE_ARCH_MEMSET -extern void *__memset(void *mem, int val, size_t len); -#define memset(dst,val,len) \ - ({ size_t __len = (len); \ - void *__ret; \ - if (__builtin_constant_p(len) && __len >= 64) \ - __ret = __memset((dst),(val),__len); \ - else \ - __ret = __builtin_memset((dst),(val),__len); \ - __ret; }) -#endif +#define memset __builtin_memset #define __HAVE_ARCH_MEMMOVE void * memmove(void * dest,const void *src,size_t count); diff -Nru a/include/asm-x86_64/suspend.h b/include/asm-x86_64/suspend.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-x86_64/suspend.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,6 @@ +#ifndef SUSPEND_H +#define SUSPEND_H 1 + +/* dummy for now */ + +#endif diff -Nru a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h --- a/include/asm-x86_64/system.h Tue Jun 18 19:12:02 2002 +++ b/include/asm-x86_64/system.h Tue Jun 18 19:12:02 2002 @@ -13,7 +13,10 @@ #define LOCK_PREFIX "" #endif -#define prepare_to_switch() do {} while(0) +#define prepare_arch_schedule(prev) do { } while(0) +#define finish_arch_schedule(prev) do { } while(0) +#define prepare_arch_switch(rq) do { } while(0) +#define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock) #define __STR(x) #x #define STR(x) __STR(x) @@ -41,7 +44,7 @@ __POP(rax) __POP(r15) __POP(r14) __POP(r13) __POP(r12) __POP(r11) __POP(r10) \ __POP(r9) __POP(r8) -#define switch_to(prev,next) \ +#define switch_to(prev,next,last) \ asm volatile(SAVE_CONTEXT \ "movq %%rsp,%[prevrsp]\n\t" \ "movq %[nextrsp],%%rsp\n\t" \ diff -Nru a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h --- a/include/asm-x86_64/timex.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-x86_64/timex.h Tue Jun 18 19:12:01 2002 @@ -48,6 +48,4 @@ extern unsigned int cpu_khz; -#define ARCH_HAS_JIFFIES_64 - #endif diff -Nru a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h --- a/include/asm-x86_64/tlbflush.h Tue Jun 18 19:12:01 2002 +++ b/include/asm-x86_64/tlbflush.h Tue Jun 18 19:12:01 2002 @@ -106,15 +106,6 @@ #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 -struct tlb_state -{ - struct mm_struct *active_mm; - int state; - char __cacheline_padding[24]; -}; -extern struct tlb_state cpu_tlbstate[NR_CPUS]; - - #endif #define flush_tlb_kernel_range(start, end) flush_tlb_all() diff -Nru a/include/linux/auto_fs.h b/include/linux/auto_fs.h --- a/include/linux/auto_fs.h Tue Jun 18 19:12:01 2002 +++ b/include/linux/auto_fs.h Tue Jun 18 19:12:01 2002 @@ -45,7 +45,8 @@ * If so, 32-bit user-space code should be backwards compatible. */ -#if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) || defined(__powerpc__) +#if defined(__sparc__) || defined(__mips__) || defined(__x86_64) \ + || defined(__powerpc__) || defined(__s390__) typedef unsigned int autofs_wqt_t; #else typedef unsigned long autofs_wqt_t; diff -Nru a/include/linux/bio.h b/include/linux/bio.h --- a/include/linux/bio.h Tue Jun 18 19:12:03 2002 +++ b/include/linux/bio.h Tue Jun 18 19:12:03 2002 @@ -21,6 +21,8 @@ #define __LINUX_BIO_H #include +#include + /* Platforms may set this to teach the BIO layer about IOMMU hardware. */ #include #ifndef BIO_VMERGE_BOUNDARY @@ -47,9 +49,6 @@ unsigned int bv_offset; }; -/* - * weee, c forward decl... - */ struct bio; typedef void (bio_end_io_t) (struct bio *); typedef void (bio_destructor_t) (struct bio *); @@ -205,5 +204,50 @@ extern inline void bio_init(struct bio *); extern int bio_ioctl(kdev_t, unsigned int, unsigned long); + +#ifdef CONFIG_HIGHMEM +/* + * remember to add offset! and never ever reenable interrupts between a + * bio_kmap_irq and bio_kunmap_irq!! + * + * This function MUST be inlined - it plays with the CPU interrupt flags. + * Hence the `extern inline'. + */ +extern inline char *bio_kmap_irq(struct bio *bio, unsigned long *flags) +{ + unsigned long addr; + + __save_flags(*flags); + + /* + * could be low + */ + if (!PageHighMem(bio_page(bio))) + return bio_data(bio); + + /* + * it's a highmem page + */ + __cli(); + addr = (unsigned long) kmap_atomic(bio_page(bio), KM_BIO_SRC_IRQ); + + if (addr & ~PAGE_MASK) + BUG(); + + return (char *) addr + bio_offset(bio); +} + +extern inline void bio_kunmap_irq(char *buffer, unsigned long *flags) +{ + unsigned long ptr = (unsigned long) buffer & PAGE_MASK; + + kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ); + __restore_flags(*flags); +} + +#else +#define bio_kmap_irq(bio, flags) (bio_data(bio)) +#define bio_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) +#endif #endif /* __LINUX_BIO_H */ diff -Nru a/include/linux/blkdev.h b/include/linux/blkdev.h --- a/include/linux/blkdev.h Tue Jun 18 19:12:02 2002 +++ b/include/linux/blkdev.h Tue Jun 18 19:12:02 2002 @@ -246,12 +246,7 @@ #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) extern int init_emergency_isa_pool(void); -extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig); - -extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio) -{ - create_bounce(q->bounce_pfn, q->bounce_gfp, bio); -} +void blk_queue_bounce(request_queue_t *q, struct bio **bio); #define rq_for_each_bio(bio, rq) \ if ((rq->bio)) \ @@ -328,11 +323,11 @@ /* * tag stuff */ -#define blk_queue_tag_request(q, tag) ((q)->queue_tags->tag_index[(tag)]) #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) #define blk_rq_tagged(rq) ((rq)->flags & REQ_QUEUED) extern int blk_queue_start_tag(request_queue_t *, struct request *); +extern struct request *blk_queue_find_tag(request_queue_t *, int); extern void blk_queue_end_tag(request_queue_t *, struct request *); extern int blk_queue_init_tags(request_queue_t *, int); extern void blk_queue_free_tags(request_queue_t *); diff -Nru a/include/linux/brlock.h b/include/linux/brlock.h --- a/include/linux/brlock.h Tue Jun 18 19:12:02 2002 +++ b/include/linux/brlock.h Tue Jun 18 19:12:02 2002 @@ -28,13 +28,15 @@ * load-locked/store-conditional cpus (ALPHA/MIPS/PPC) and * compare-and-swap cpus (Sparc64). So we control which * implementation to use with a __BRLOCK_USE_ATOMICS define. -DaveM + * + * Added BR_LLC_LOCK for use in net/core/ext8022.c -acme */ /* Register bigreader lock indices here. */ enum brlock_indices { BR_GLOBALIRQ_LOCK, BR_NETPROTO_LOCK, - + BR_LLC_LOCK, __BR_END }; diff -Nru a/include/linux/buffer_head.h b/include/linux/buffer_head.h --- a/include/linux/buffer_head.h Tue Jun 18 19:12:01 2002 +++ b/include/linux/buffer_head.h Tue Jun 18 19:12:01 2002 @@ -108,12 +108,7 @@ BUFFER_FNS(Async_Write, async_write) BUFFER_FNS(Boundary, boundary) -/* - * FIXME: this is used only by bh_kmap, which is used only by RAID5. - * Move all that stuff into raid5.c - */ #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) - #define touch_buffer(bh) mark_page_accessed(bh->b_page) /* If we *know* page->private refers to buffer_heads */ @@ -124,16 +119,6 @@ ((struct buffer_head *)(page)->private); \ }) #define page_has_buffers(page) PagePrivate(page) -#define set_page_buffers(page, buffers) \ - do { \ - SetPagePrivate(page); \ - page->private = (unsigned long)buffers; \ - } while (0) -#define clear_page_buffers(page) \ - do { \ - ClearPagePrivate(page); \ - page->private = 0; \ - } while (0) #define invalidate_buffers(dev) __invalidate_buffers((dev), 0) #define destroy_buffers(dev) __invalidate_buffers((dev), 1) @@ -175,15 +160,14 @@ int fsync_bdev(struct block_device *); int fsync_super(struct super_block *); int fsync_no_super(struct block_device *); -struct buffer_head *__get_hash_table(struct block_device *, sector_t, int); +struct buffer_head *__find_get_block(struct block_device *, sector_t, int); struct buffer_head * __getblk(struct block_device *, sector_t, int); void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); struct buffer_head * __bread(struct block_device *, int, int); void wakeup_bdflush(void); -struct buffer_head *alloc_buffer_head(int async); +struct buffer_head *alloc_buffer_head(void); void free_buffer_head(struct buffer_head * bh); -int brw_page(int, struct page *, struct block_device *, sector_t [], int); void FASTCALL(unlock_buffer(struct buffer_head *bh)); /* @@ -270,9 +254,9 @@ } static inline struct buffer_head * -sb_get_hash_table(struct super_block *sb, int block) +sb_find_get_block(struct super_block *sb, int block) { - return __get_hash_table(sb->s_bdev, block, sb->s_blocksize); + return __find_get_block(sb->s_bdev, block, sb->s_blocksize); } static inline void diff -Nru a/include/linux/dqblk_xfs.h b/include/linux/dqblk_xfs.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/linux/dqblk_xfs.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,159 @@ +/* + * Copyright (c) 1995-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, + * USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef _LINUX_XQM_H +#define _LINUX_XQM_H + +#include + +/* + * Disk quota - quotactl(2) commands for the XFS Quota Manager (XQM). + */ + +#define XQM_CMD(x) (('X'<<8)+(x)) /* note: forms first QCMD argument */ +#define Q_XQUOTAON XQM_CMD(0x1) /* enable accounting/enforcement */ +#define Q_XQUOTAOFF XQM_CMD(0x2) /* disable accounting/enforcement */ +#define Q_XGETQUOTA XQM_CMD(0x3) /* get disk limits and usage */ +#define Q_XSETQLIM XQM_CMD(0x4) /* set disk limits */ +#define Q_XGETQSTAT XQM_CMD(0x5) /* get quota subsystem status */ +#define Q_XQUOTARM XQM_CMD(0x6) /* free disk space used by dquots */ + +/* + * fs_disk_quota structure: + * + * This contains the current quota information regarding a user/proj/group. + * It is 64-bit aligned, and all the blk units are in BBs (Basic Blocks) of + * 512 bytes. + */ +#define FS_DQUOT_VERSION 1 /* fs_disk_quota.d_version */ +typedef struct fs_disk_quota { + __s8 d_version; /* version of this structure */ + __s8 d_flags; /* XFS_{USER,PROJ,GROUP}_QUOTA */ + __u16 d_fieldmask; /* field specifier */ + __u32 d_id; /* user, project, or group ID */ + __u64 d_blk_hardlimit;/* absolute limit on disk blks */ + __u64 d_blk_softlimit;/* preferred limit on disk blks */ + __u64 d_ino_hardlimit;/* maximum # allocated inodes */ + __u64 d_ino_softlimit;/* preferred inode limit */ + __u64 d_bcount; /* # disk blocks owned by the user */ + __u64 d_icount; /* # inodes owned by the user */ + __s32 d_itimer; /* zero if within inode limits */ + /* if not, we refuse service */ + __s32 d_btimer; /* similar to above; for disk blocks */ + __u16 d_iwarns; /* # warnings issued wrt num inodes */ + __u16 d_bwarns; /* # warnings issued wrt disk blocks */ + __s32 d_padding2; /* padding2 - for future use */ + __u64 d_rtb_hardlimit;/* absolute limit on realtime blks */ + __u64 d_rtb_softlimit;/* preferred limit on RT disk blks */ + __u64 d_rtbcount; /* # realtime blocks owned */ + __s32 d_rtbtimer; /* similar to above; for RT disk blks */ + __u16 d_rtbwarns; /* # warnings issued wrt RT disk blks */ + __s16 d_padding3; /* padding3 - for future use */ + char d_padding4[8]; /* yet more padding */ +} fs_disk_quota_t; + +/* + * These fields are sent to Q_XSETQLIM to specify fields that need to change. + */ +#define FS_DQ_ISOFT (1<<0) +#define FS_DQ_IHARD (1<<1) +#define FS_DQ_BSOFT (1<<2) +#define FS_DQ_BHARD (1<<3) +#define FS_DQ_RTBSOFT (1<<4) +#define FS_DQ_RTBHARD (1<<5) +#define FS_DQ_LIMIT_MASK (FS_DQ_ISOFT | FS_DQ_IHARD | FS_DQ_BSOFT | \ + FS_DQ_BHARD | FS_DQ_RTBSOFT | FS_DQ_RTBHARD) +/* + * These timers can only be set in super user's dquot. For others, timers are + * automatically started and stopped. Superusers timer values set the limits + * for the rest. In case these values are zero, the DQ_{F,B}TIMELIMIT values + * defined below are used. + * These values also apply only to the d_fieldmask field for Q_XSETQLIM. + */ +#define FS_DQ_BTIMER (1<<6) +#define FS_DQ_ITIMER (1<<7) +#define FS_DQ_RTBTIMER (1<<8) +#define FS_DQ_TIMER_MASK (FS_DQ_BTIMER | FS_DQ_ITIMER | FS_DQ_RTBTIMER) + +/* + * The following constants define the default amount of time given a user + * before the soft limits are treated as hard limits (usually resulting + * in an allocation failure). These may be modified by the quotactl(2) + * system call with the Q_XSETQLIM command. + */ +#define DQ_FTIMELIMIT (7 * 24*60*60) /* 1 week */ +#define DQ_BTIMELIMIT (7 * 24*60*60) /* 1 week */ + +/* + * Various flags related to quotactl(2). Only relevant to XFS filesystems. + */ +#define XFS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */ +#define XFS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */ +#define XFS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */ +#define XFS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */ + +#define XFS_USER_QUOTA (1<<0) /* user quota type */ +#define XFS_PROJ_QUOTA (1<<1) /* (IRIX) project quota type */ +#define XFS_GROUP_QUOTA (1<<2) /* group quota type */ + +/* + * fs_quota_stat is the struct returned in Q_XGETQSTAT for a given file system. + * Provides a centralized way to get meta infomation about the quota subsystem. + * eg. space taken up for user and group quotas, number of dquots currently + * incore. + */ +#define FS_QSTAT_VERSION 1 /* fs_quota_stat.qs_version */ + +/* + * Some basic infomation about 'quota files'. + */ +typedef struct fs_qfilestat { + __u64 qfs_ino; /* inode number */ + __u64 qfs_nblks; /* number of BBs 512-byte-blks */ + __u32 qfs_nextents; /* number of extents */ +} fs_qfilestat_t; + +typedef struct fs_quota_stat { + __s8 qs_version; /* version number for future changes */ + __u16 qs_flags; /* XFS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */ + __s8 qs_pad; /* unused */ + fs_qfilestat_t qs_uquota; /* user quota storage information */ + fs_qfilestat_t qs_gquota; /* group quota storage information */ + __u32 qs_incoredqs; /* number of dquots incore */ + __s32 qs_btimelimit; /* limit for blks timer */ + __s32 qs_itimelimit; /* limit for inodes timer */ + __s32 qs_rtbtimelimit;/* limit for rt blks timer */ + __u16 qs_bwarnlimit; /* limit for num warnings */ + __u16 qs_iwarnlimit; /* limit for num warnings */ +} fs_quota_stat_t; + +#endif /* _LINUX_XQM_H */ diff -Nru a/include/linux/file.h b/include/linux/file.h --- a/include/linux/file.h Tue Jun 18 19:12:01 2002 +++ b/include/linux/file.h Tue Jun 18 19:12:01 2002 @@ -33,6 +33,7 @@ struct file * fd_array[NR_OPEN_DEFAULT]; }; +extern void FASTCALL(__fput(struct file *)); extern void FASTCALL(fput(struct file *)); extern struct file * FASTCALL(fget(unsigned int fd)); extern void FASTCALL(set_close_on_exec(unsigned int fd, int flag)); diff -Nru a/include/linux/fs.h b/include/linux/fs.h --- a/include/linux/fs.h Tue Jun 18 19:12:02 2002 +++ b/include/linux/fs.h Tue Jun 18 19:12:02 2002 @@ -554,6 +554,7 @@ void (*fl_remove)(struct file_lock *); /* lock removal callback */ struct fasync_struct * fl_fasync; /* for lease break notifications */ + unsigned long fl_break_time; /* for nonblocking lease breaks */ union { struct nfs_lock_info nfs_fl; diff -Nru a/include/linux/highmem.h b/include/linux/highmem.h --- a/include/linux/highmem.h Tue Jun 18 19:12:02 2002 +++ b/include/linux/highmem.h Tue Jun 18 19:12:02 2002 @@ -2,7 +2,6 @@ #define _LINUX_HIGHMEM_H #include -#include #include #include @@ -15,45 +14,8 @@ /* declarations for linux/mm/highmem.c */ unsigned int nr_free_highpages(void); -extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig); extern void check_highmem_ptes(void); -/* - * remember to add offset! and never ever reenable interrupts between a - * bio_kmap_irq and bio_kunmap_irq!! - */ -static inline char *bio_kmap_irq(struct bio *bio, unsigned long *flags) -{ - unsigned long addr; - - __save_flags(*flags); - - /* - * could be low - */ - if (!PageHighMem(bio_page(bio))) - return bio_data(bio); - - /* - * it's a highmem page - */ - __cli(); - addr = (unsigned long) kmap_atomic(bio_page(bio), KM_BIO_IRQ); - - if (addr & ~PAGE_MASK) - BUG(); - - return (char *) addr + bio_offset(bio); -} - -static inline void bio_kunmap_irq(char *buffer, unsigned long *flags) -{ - unsigned long ptr = (unsigned long) buffer & PAGE_MASK; - - kunmap_atomic((void *) ptr, KM_BIO_IRQ); - __restore_flags(*flags); -} - #else /* CONFIG_HIGHMEM */ static inline unsigned int nr_free_highpages(void) { return 0; } @@ -64,12 +26,6 @@ #define kmap_atomic(page,idx) kmap(page) #define kunmap_atomic(page,idx) kunmap(page) - -#define bh_kmap(bh) ((bh)->b_data) -#define bh_kunmap(bh) do { } while (0) - -#define bio_kmap_irq(bio, flags) (bio_data(bio)) -#define bio_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) #endif /* CONFIG_HIGHMEM */ diff -Nru a/include/linux/ide.h b/include/linux/ide.h --- a/include/linux/ide.h Tue Jun 18 19:12:02 2002 +++ b/include/linux/ide.h Tue Jun 18 19:12:02 2002 @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff -Nru a/include/linux/intermezzo_psdev.h b/include/linux/intermezzo_psdev.h --- a/include/linux/intermezzo_psdev.h Tue Jun 18 19:12:02 2002 +++ b/include/linux/intermezzo_psdev.h Tue Jun 18 19:12:02 2002 @@ -47,7 +47,7 @@ }; #define ISLENTO(minor) (current->pid == upc_comms[minor].uc_pid \ - || current->p_pptr->pid == upc_comms[minor].uc_pid) + || current->parent->pid == upc_comms[minor].uc_pid) extern struct upc_comm upc_comms[MAX_PRESTODEV]; diff -Nru a/include/linux/jbd.h b/include/linux/jbd.h --- a/include/linux/jbd.h Tue Jun 18 19:12:03 2002 +++ b/include/linux/jbd.h Tue Jun 18 19:12:03 2002 @@ -238,6 +238,7 @@ BUFFER_FNS(JBD, jbd) BUFFER_FNS(JBDDirty, jbddirty) TAS_BUFFER_FNS(JBDDirty, jbddirty) +BUFFER_FNS(Freed, freed) static inline struct buffer_head *jh2bh(struct journal_head *jh) { diff -Nru a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h --- a/include/linux/kernel_stat.h Tue Jun 18 19:12:02 2002 +++ b/include/linux/kernel_stat.h Tue Jun 18 19:12:02 2002 @@ -43,8 +43,8 @@ { int i, sum=0; - for (i = 0 ; i < smp_num_cpus ; i++) - sum += kstat.irqs[cpu_logical_map(i)][irq]; + for (i = 0 ; i < NR_CPUS ; i++) + sum += kstat.irqs[i][irq]; return sum; } diff -Nru a/include/linux/llc.h b/include/linux/llc.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/linux/llc.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,102 @@ +#ifndef __LINUX_LLC_H +#define __LINUX_LLC_H +/* + * IEEE 802.2 User Interface SAPs for Linux, data structures and indicators. + * + * Copyright (c) 2001 by Jay Schulist + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#define __LLC_SOCK_SIZE__ 28 /* sizeof(sockaddr_llc), word align. */ +struct sockaddr_llc { + sa_family_t sllc_family; /* AF_LLC */ + sa_family_t sllc_arphrd; /* ARPHRD_ETHER */ + unsigned char sllc_test; + unsigned char sllc_xid; + unsigned char sllc_ua; /* UA data, only for SOCK_STREAM. */ + unsigned char sllc_dsap; + unsigned char sllc_ssap; + unsigned char sllc_dmac[IFHWADDRLEN]; + unsigned char sllc_smac[IFHWADDRLEN]; + unsigned char sllc_mmac[IFHWADDRLEN]; + unsigned char __pad[__LLC_SOCK_SIZE__ - sizeof(sa_family_t) * 2 - + sizeof(unsigned char) * 5 - IFHWADDRLEN * 3]; +}; + +/* sockopt definitions. */ +enum llc_sockopts { + LLC_OPT_UNKNOWN = 0, + LLC_OPT_RETRY, /* max retrans attempts. */ + LLC_OPT_SIZE, /* max PDU size (octets). */ + LLC_OPT_ACK_TMR_EXP, /* ack expire time (secs). */ + LLC_OPT_P_TMR_EXP, /* pf cycle expire time (secs). */ + LLC_OPT_REJ_TMR_EXP, /* rej sent expire time (secs). */ + LLC_OPT_BUSY_TMR_EXP, /* busy state expire time (secs). */ + LLC_OPT_TX_WIN, /* tx window size. */ + LLC_OPT_RX_WIN, /* rx window size. */ + LLC_OPT_MAX +}; + +#define LLC_OPT_MAX_RETRY 100 +#define LLC_OPT_MAX_SIZE 4196 +#define LLC_OPT_MAX_WIN 127 +#define LLC_OPT_MAX_ACK_TMR_EXP 60 +#define LLC_OPT_MAX_P_TMR_EXP 60 +#define LLC_OPT_MAX_REJ_TMR_EXP 60 +#define LLC_OPT_MAX_BUSY_TMR_EXP 60 + +/* LLC SAP types. */ +#define LLC_SAP_NULL 0x00 /* NULL SAP. */ +#define LLC_SAP_LLC 0x02 /* LLC Sublayer Managment. */ +#define LLC_SAP_SNA 0x04 /* SNA Path Control. */ +#define LLC_SAP_PNM 0x0E /* Proway Network Managment. */ +#define LLC_SAP_IP 0x06 /* TCP/IP. */ +#define LLC_SAP_BSPAN 0x42 /* Bridge Spanning Tree Proto */ +#define LLC_SAP_MMS 0x4E /* Manufacturing Message Srv. */ +#define LLC_SAP_8208 0x7E /* ISO 8208 */ +#define LLC_SAP_3COM 0x80 /* 3COM. */ +#define LLC_SAP_PRO 0x8E /* Proway Active Station List */ +#define LLC_SAP_SNAP 0xAA /* SNAP. */ +#define LLC_SAP_BANYAN 0xBC /* Banyan. */ +#define LLC_SAP_IPX 0xE0 /* IPX/SPX. */ +#define LLC_SAP_NETBEUI 0xF0 /* NetBEUI. */ +#define LLC_SAP_LANMGR 0xF4 /* LanManager. */ +#define LLC_SAP_IMPL 0xF8 /* IMPL */ +#define LLC_SAP_DISC 0xFC /* Discovery */ +#define LLC_SAP_OSI 0xFE /* OSI Network Layers. */ +#define LLC_SAP_LAR 0xDC /* LAN Address Resolution */ +#define LLC_SAP_RM 0xD4 /* Resource Management */ +#define LLC_SAP_GLOBAL 0xFF /* Global SAP. */ + +#ifdef __KERNEL__ +#define LLC_SAP_DYN_START 0xC0 +#define LLC_SAP_DYN_STOP 0xDE +#define LLC_SAP_DYN_TRIES 4 + +struct sock; + +struct llc_ui_opt { + u16 link; /* network layer link number */ + struct llc_sap *sap; /* pointer to parent SAP */ + struct sock *core_sk; + struct net_device *dev; /* device to send to remote */ + struct sockaddr_llc addr; /* address sock is bound to */ +}; + +#define llc_ui_sk(__sk) ((struct llc_ui_opt *)(__sk)->protinfo) +#define llc_ui_skb_cb(__skb) ((struct sockaddr_llc *)&((__skb)->cb[0])) + +#ifdef CONFIG_LLC_UI +extern int llc_ui_init(void); +extern void llc_ui_exit(void); +#else +#define llc_ui_init() +#define llc_ui_exit() +#endif +#endif /* __KERNEL__ */ +#endif /* __LINUX_LLC_H */ diff -Nru a/include/linux/loop.h b/include/linux/loop.h --- a/include/linux/loop.h Tue Jun 18 19:12:02 2002 +++ b/include/linux/loop.h Tue Jun 18 19:12:02 2002 @@ -62,14 +62,6 @@ char *raw_buf, char *loop_buf, int size, int real_block); -static inline int lo_do_transfer(struct loop_device *lo, int cmd, char *rbuf, - char *lbuf, int size, int rblock) -{ - if (!lo->transfer) - return 0; - - return lo->transfer(lo, cmd, rbuf, lbuf, size, rblock); -} #endif /* __KERNEL__ */ /* diff -Nru a/include/linux/namei.h b/include/linux/namei.h --- a/include/linux/namei.h Tue Jun 18 19:12:02 2002 +++ b/include/linux/namei.h Tue Jun 18 19:12:02 2002 @@ -40,7 +40,6 @@ __user_walk(name, LOOKUP_FOLLOW, nd) #define user_path_walk_link(name,nd) \ __user_walk(name, 0, nd) -extern int FASTCALL(path_init(const char *, unsigned, struct nameidata *)); extern int FASTCALL(path_lookup(const char *, unsigned, struct nameidata *)); extern int FASTCALL(path_walk(const char *, struct nameidata *)); extern int FASTCALL(link_path_walk(const char *, struct nameidata *)); diff -Nru a/include/linux/quota.h b/include/linux/quota.h --- a/include/linux/quota.h Tue Jun 18 19:12:03 2002 +++ b/include/linux/quota.h Tue Jun 18 19:12:03 2002 @@ -134,7 +134,7 @@ #ifdef __KERNEL__ -#include +#include #include #include diff -Nru a/include/linux/raid/md.h b/include/linux/raid/md.h --- a/include/linux/raid/md.h Tue Jun 18 19:12:01 2002 +++ b/include/linux/raid/md.h Tue Jun 18 19:12:01 2002 @@ -63,8 +63,6 @@ extern int md_size[MAX_MD_DEVS]; extern struct hd_struct md_hd_struct[MAX_MD_DEVS]; -extern void add_mddev_mapping (mddev_t *mddev, kdev_t dev, void *data); -extern void del_mddev_mapping (mddev_t *mddev, kdev_t dev); extern char * partition_name (kdev_t dev); extern inline char * bdev_partition_name (struct block_device *bdev) { @@ -77,14 +75,9 @@ extern void md_unregister_thread (mdk_thread_t *thread); extern void md_wakeup_thread(mdk_thread_t *thread); extern void md_interrupt_thread (mdk_thread_t *thread); -extern int md_update_sb (mddev_t *mddev); -extern int md_do_sync(mddev_t *mddev, mdp_disk_t *spare); +extern void md_update_sb (mddev_t *mddev); extern void md_done_sync(mddev_t *mddev, int blocks, int ok); extern void md_sync_acct(kdev_t dev, unsigned long nr_sectors); -extern void md_recover_arrays (void); -extern int md_check_ordering (mddev_t *mddev); -extern int md_notify_reboot(struct notifier_block *this, - unsigned long code, void *x); extern int md_error (mddev_t *mddev, struct block_device *bdev); extern int md_run_setup(void); diff -Nru a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h --- a/include/linux/raid/md_k.h Tue Jun 18 19:12:01 2002 +++ b/include/linux/raid/md_k.h Tue Jun 18 19:12:01 2002 @@ -65,24 +65,6 @@ #define MAX_MD_DEVS (1< 0 under reconfig_sem + */ int recovery_running; + int in_sync; /* know to not need resync */ struct semaphore reconfig_sem; - struct semaphore recovery_sem; - struct semaphore resync_sem; atomic_t active; + mdp_disk_t *spare; atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; + request_queue_t queue; /* for plugging ... */ + struct list_head all_mddevs; }; struct mdk_personality_s { char *name; - int (*make_request)(mddev_t *mddev, int rw, struct bio *bio); + int (*make_request)(request_queue_t *q, struct bio *bio); int (*run)(mddev_t *mddev); int (*stop)(mddev_t *mddev); int (*status)(char *page, mddev_t *mddev); @@ -237,9 +226,6 @@ * SPARE_ACTIVE expects such a change) */ int (*diskop) (mddev_t *mddev, mdp_disk_t **descriptor, int state); - - int (*stop_resync)(mddev_t *mddev); - int (*restart_resync)(mddev_t *mddev); int (*sync_request)(mddev_t *mddev, sector_t sector_nr, int go_faster); }; @@ -279,13 +265,6 @@ #define ITERATE_RDEV(mddev,rdev,tmp) \ ITERATE_RDEV_GENERIC((mddev)->disks,same_set,rdev,tmp) -/* - * Same as above, but assumes that the device has rdev->desc_nr numbered - * from 0 to mddev->nb_dev, and iterates through rdevs in ascending order. - */ -#define ITERATE_RDEV_ORDERED(mddev,rdev,i) \ - for (i = 0; rdev = find_rdev_nr(mddev, i), i < mddev->nb_dev; i++) - /* * Iterates through all 'RAID managed disks' @@ -298,26 +277,6 @@ */ #define ITERATE_RDEV_PENDING(rdev,tmp) \ ITERATE_RDEV_GENERIC(pending_raid_disks,pending,rdev,tmp) - -/* - * iterates through all used mddevs in the system. - */ -#define ITERATE_MDDEV(mddev,tmp) \ - \ - for (tmp = all_mddevs.next; \ - mddev = list_entry(tmp, mddev_t, all_mddevs), \ - tmp = tmp->next, tmp->prev != &all_mddevs \ - ; ) - -static inline int lock_mddev (mddev_t * mddev) -{ - return down_interruptible(&mddev->reconfig_sem); -} - -static inline void unlock_mddev (mddev_t * mddev) -{ - up(&mddev->reconfig_sem); -} #define xchg_values(x,y) do { __typeof__(x) __tmp = x; \ x = y; y = __tmp; } while (0) diff -Nru a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h --- a/include/linux/raid/raid1.h Tue Jun 18 19:12:02 2002 +++ b/include/linux/raid/raid1.h Tue Jun 18 19:12:02 2002 @@ -33,8 +33,7 @@ int working_disks; int last_used; sector_t next_seq_sect; - mdk_thread_t *thread, *resync_thread; - int resync_mirrors; + mdk_thread_t *thread; mirror_info_t *spare; spinlock_t device_lock; diff -Nru a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h --- a/include/linux/raid/raid5.h Tue Jun 18 19:12:02 2002 +++ b/include/linux/raid/raid5.h Tue Jun 18 19:12:02 2002 @@ -3,6 +3,7 @@ #include #include +#include /* * @@ -176,7 +177,7 @@ * is put on a "delayed" queue until there are no stripes currently * in a pre-read phase. Further, if the "delayed" queue is empty when * a stripe is put on it then we "plug" the queue and do not process it - * until an unplg call is made. (the tq_disk list is run). + * until an unplug call is made. (blk_run_queues is run). * * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add * it to the count of prereading stripes. @@ -204,12 +205,11 @@ struct raid5_private_data { struct stripe_head **stripe_hashtbl; mddev_t *mddev; - mdk_thread_t *thread, *resync_thread; + mdk_thread_t *thread; struct disk_info disks[MD_SB_DISKS]; struct disk_info *spare; int chunk_size, level, algorithm; int raid_disks, working_disks, failed_disks; - int resync_parity; int max_nr_stripes; struct list_head handle_list; /* stripes needing handling */ @@ -228,9 +228,6 @@ * waiting for 25% to be free */ spinlock_t device_lock; - - int plugged; - struct tq_struct plug_tq; }; typedef struct raid5_private_data raid5_conf_t; diff -Nru a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h --- a/include/linux/reiserfs_fs.h Tue Jun 18 19:12:03 2002 +++ b/include/linux/reiserfs_fs.h Tue Jun 18 19:12:03 2002 @@ -1651,7 +1651,7 @@ #define JOURNAL_BUFFER(j,n) ((j)->j_ap_blocks[((j)->j_start + (n)) % JOURNAL_BLOCK_COUNT]) // We need these to make journal.c code more readable -#define journal_get_hash_table(s, block) __get_hash_table(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize) +#define journal_find_get_block(s, block) __find_get_block(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize) #define journal_getblk(s, block) __getblk(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize) #define journal_bread(s, block) __bread(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize) diff -Nru a/include/linux/sched.h b/include/linux/sched.h --- a/include/linux/sched.h Tue Jun 18 19:12:01 2002 +++ b/include/linux/sched.h Tue Jun 18 19:12:01 2002 @@ -7,7 +7,6 @@ #include #include -#include #include #include #include @@ -160,7 +159,6 @@ extern signed long FASTCALL(schedule_timeout(signed long timeout)); asmlinkage void schedule(void); -extern int schedule_task(struct tq_struct *task); extern void flush_scheduled_tasks(void); extern int start_context_thread(void); extern int current_is_keventd(void); diff -Nru a/include/linux/skbuff.h b/include/linux/skbuff.h --- a/include/linux/skbuff.h Tue Jun 18 19:12:01 2002 +++ b/include/linux/skbuff.h Tue Jun 18 19:12:01 2002 @@ -10,7 +10,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ - + #ifndef _LINUX_SKBUFF_H #define _LINUX_SKBUFF_H @@ -35,10 +35,13 @@ #define CHECKSUM_HW 1 #define CHECKSUM_UNNECESSARY 2 -#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1)) -#define SKB_MAX_ORDER(X,ORDER) (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1)) -#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X),0)) -#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0,2)) +#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ + ~(SMP_CACHE_BYTES - 1)) +#define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \ + sizeof(struct skb_shared_info)) & \ + ~(SMP_CACHE_BYTES - 1)) +#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) +#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) /* A. Checksumming of received packets by device. * @@ -79,7 +82,7 @@ */ #ifdef __i386__ -#define NET_CALLER(arg) (*(((void**)&arg)-1)) +#define NET_CALLER(arg) (*(((void **)&arg) - 1)) #else #define NET_CALLER(arg) __builtin_return_address(0) #endif @@ -97,8 +100,8 @@ struct sk_buff_head { /* These two members must be first. */ - struct sk_buff * next; - struct sk_buff * prev; + struct sk_buff *next; + struct sk_buff *prev; __u32 qlen; spinlock_t lock; @@ -110,8 +113,7 @@ typedef struct skb_frag_struct skb_frag_t; -struct skb_frag_struct -{ +struct skb_frag_struct { struct page *page; __u16 page_offset; __u16 size; @@ -127,19 +129,54 @@ skb_frag_t frags[MAX_SKB_FRAGS]; }; +/** + * struct sk_buff - socket buffer + * @next: Next buffer in list + * @prev: Previous buffer in list + * @list: List we are on + * @sk: Socket we are owned by + * @stamp: Time we arrived + * @dev: Device we arrived on/are leaving by + * @h: Transport layer header + * @nh: Network layer header + * @mac: Link layer header + * @dst: FIXME: Describe this field + * @cb: Control buffer. Free for use by every layer. Put private vars here + * @len: Length of actual data + * @data_len: Data length + * @csum: Checksum + * @__unused: Dead field, may be reused + * @cloned: Head may be cloned (check refcnt to be sure) + * @pkt_type: Packet class + * @ip_summed: Driver fed us an IP checksum + * @priority: Packet queueing priority + * @users: User count - see {datagram,tcp}.c + * @protocol: Packet protocol from driver + * @security: Security level of packet + * @truesize: Buffer size + * @head: Head of buffer + * @data: Data head pointer + * @tail: Tail pointer + * @end: End pointer + * @destructor: Destruct function + * @nfmark: Can be used for communication between hooks + * @nfcache: Cache info + * @nfct: Associated connection, if any + * @nf_debug: Netfilter debugging + * @tc_index: Traffic control index + */ + struct sk_buff { /* These two members must be first. */ - struct sk_buff * next; /* Next buffer in list */ - struct sk_buff * prev; /* Previous buffer in list */ + struct sk_buff *next; + struct sk_buff *prev; + + struct sk_buff_head *list; + struct sock *sk; + struct timeval stamp; + struct net_device *dev; - struct sk_buff_head * list; /* List we are on */ - struct sock *sk; /* Socket we are owned by */ - struct timeval stamp; /* Time we arrived */ - struct net_device *dev; /* Device we arrived on/are leaving by */ - - /* Transport layer header */ - union - { + union { struct tcphdr *th; struct udphdr *uh; struct icmphdr *icmph; @@ -149,72 +186,63 @@ unsigned char *raw; } h; - /* Network layer header */ - union - { + union { struct iphdr *iph; struct ipv6hdr *ipv6h; struct arphdr *arph; struct ipxhdr *ipxh; unsigned char *raw; } nh; - - /* Link layer header */ - union - { + + union { struct ethhdr *ethernet; unsigned char *raw; } mac; - struct dst_entry *dst; + struct dst_entry *dst; - /* + /* * This is the control buffer. It is free to use for every * layer. Please put your private variables there. If you * want to keep them across layers you have to do a skb_clone() * first. This is owned by whoever has the skb queued ATM. - */ - char cb[48]; + */ + char cb[48]; - unsigned int len; /* Length of actual data */ - unsigned int data_len; - unsigned int csum; /* Checksum */ - unsigned char __unused, /* Dead field, may be reused */ - cloned, /* head may be cloned (check refcnt to be sure). */ - pkt_type, /* Packet class */ - ip_summed; /* Driver fed us an IP checksum */ - __u32 priority; /* Packet queueing priority */ - atomic_t users; /* User count - see datagram.c,tcp.c */ - unsigned short protocol; /* Packet protocol from driver. */ - unsigned short security; /* Security level of packet */ - unsigned int truesize; /* Buffer size */ - - unsigned char *head; /* Head of buffer */ - unsigned char *data; /* Data head pointer */ - unsigned char *tail; /* Tail pointer */ - unsigned char *end; /* End pointer */ + unsigned int len, + data_len, + csum; + unsigned char __unused, + cloned, + pkt_type, + ip_summed; + __u32 priority; + atomic_t users; + unsigned short protocol, + security; + unsigned int truesize; + + unsigned char *head, + *data, + *tail, + *end; - void (*destructor)(struct sk_buff *); /* Destruct function */ + void (*destructor)(struct sk_buff *skb); #ifdef CONFIG_NETFILTER - /* Can be used for communication between hooks. */ - unsigned long nfmark; - /* Cache info */ - __u32 nfcache; - /* Associated connection, if any */ - struct nf_ct_info *nfct; + unsigned long nfmark; + __u32 nfcache; + struct nf_ct_info *nfct; #ifdef CONFIG_NETFILTER_DEBUG - unsigned int nf_debug; + unsigned int nf_debug; #endif -#endif /*CONFIG_NETFILTER*/ - +#endif /* CONFIG_NETFILTER */ #if defined(CONFIG_HIPPI) - union{ - __u32 ifield; + union { + __u32 ifield; } private; #endif - #ifdef CONFIG_NET_SCHED - __u32 tc_index; /* traffic control index */ + __u32 tc_index; /* traffic control index */ #endif }; @@ -229,21 +257,24 @@ #include -extern void __kfree_skb(struct sk_buff *skb); -extern struct sk_buff * alloc_skb(unsigned int size, int priority); -extern void kfree_skbmem(struct sk_buff *skb); -extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority); -extern struct sk_buff * skb_copy(const struct sk_buff *skb, int priority); -extern struct sk_buff * pskb_copy(struct sk_buff *skb, int gfp_mask); -extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask); -extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); -extern struct sk_buff * skb_copy_expand(const struct sk_buff *skb, - int newheadroom, - int newtailroom, - int priority); +extern void __kfree_skb(struct sk_buff *skb); +extern struct sk_buff *alloc_skb(unsigned int size, int priority); +extern void kfree_skbmem(struct sk_buff *skb); +extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority); +extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority); +extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask); +extern int pskb_expand_head(struct sk_buff *skb, + int nhead, int ntail, int gfp_mask); +extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, + unsigned int headroom); +extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, + int newheadroom, int newtailroom, + int priority); #define dev_kfree_skb(a) kfree_skb(a) -extern void skb_over_panic(struct sk_buff *skb, int len, void *here); -extern void skb_under_panic(struct sk_buff *skb, int len, void *here); +extern void skb_over_panic(struct sk_buff *skb, int len, + void *here); +extern void skb_under_panic(struct sk_buff *skb, int len, + void *here); /* Internal */ #define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) @@ -254,10 +285,9 @@ * * Returns true if the queue is empty, false otherwise. */ - static inline int skb_queue_empty(struct sk_buff_head *list) { - return (list->next == (struct sk_buff *) list); + return list->next == (struct sk_buff *)list; } /** @@ -267,7 +297,6 @@ * Makes another reference to a socket buffer and returns a pointer * to the buffer. */ - static inline struct sk_buff *skb_get(struct sk_buff *skb) { atomic_inc(&skb->users); @@ -275,10 +304,10 @@ } /* - * If users==1, we are the only owner and are can avoid redundant + * If users == 1, we are the only owner and are can avoid redundant * atomic change. */ - + /** * kfree_skb - free an sk_buff * @skb: buffer to free @@ -286,7 +315,6 @@ * Drop a reference to the buffer and free it if the usage count has * hit zero. */ - static inline void kfree_skb(struct sk_buff *skb) { if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users)) @@ -297,7 +325,7 @@ static inline void kfree_skb_fast(struct sk_buff *skb) { if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users)) - kfree_skbmem(skb); + kfree_skbmem(skb); } /** @@ -308,7 +336,6 @@ * one of multiple shared copies of the buffer. Cloned buffers are * shared data so must not be written to under normal circumstances. */ - static inline int skb_cloned(struct sk_buff *skb) { return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1; @@ -321,17 +348,16 @@ * Returns true if more than one person has a reference to this * buffer. */ - static inline int skb_shared(struct sk_buff *skb) { - return (atomic_read(&skb->users) != 1); + return atomic_read(&skb->users) != 1; } -/** +/** * skb_share_check - check if buffer is shared and if so clone it * @skb: buffer to check * @pri: priority for memory allocation - * + * * If the buffer is shared the buffer is cloned and the old copy * drops a reference. A new clone with a single reference is returned. * If the buffer is not shared the original buffer is returned. When @@ -340,26 +366,23 @@ * * NULL is returned on a memory allocation failure. */ - static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri) { if (skb_shared(skb)) { - struct sk_buff *nskb; - nskb = skb_clone(skb, pri); + struct sk_buff *nskb = skb_clone(skb, pri); kfree_skb(skb); - return nskb; + skb = nskb; } return skb; } - /* * Copy shared buffers into a new sk_buff. We effectively do COW on * packets to handle cases where we have a local reader and forward * and a couple of other messy ones. The normal one is tcpdumping * a packet thats being forwarded. */ - + /** * skb_unshare - make a copy of a shared buffer * @skb: buffer to check @@ -373,15 +396,14 @@ * * %NULL is returned on a memory allocation failure. */ - static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri) { - struct sk_buff *nskb; - if(!skb_cloned(skb)) - return skb; - nskb=skb_copy(skb, pri); - kfree_skb(skb); /* Free our shared copy */ - return nskb; + if (skb_cloned(skb)) { + struct sk_buff *nskb = skb_copy(skb, pri); + kfree_skb(skb); /* Free our shared copy */ + skb = nskb; + } + return skb; } /** @@ -397,7 +419,6 @@ * The reference count is not incremented and the reference is therefore * volatile. Use with caution. */ - static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) { struct sk_buff *list = ((struct sk_buff *)list_)->next; @@ -419,7 +440,6 @@ * The reference count is not incremented and the reference is therefore * volatile. Use with caution. */ - static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) { struct sk_buff *list = ((struct sk_buff *)list_)->prev; @@ -432,19 +452,17 @@ * skb_queue_len - get queue length * @list_: list to measure * - * Return the length of an &sk_buff queue. + * Return the length of an &sk_buff queue. */ - static inline __u32 skb_queue_len(struct sk_buff_head *list_) { - return(list_->qlen); + return list_->qlen; } static inline void skb_queue_head_init(struct sk_buff_head *list) { spin_lock_init(&list->lock); - list->prev = (struct sk_buff *)list; - list->next = (struct sk_buff *)list; + list->prev = list->next = (struct sk_buff *)list; list->qlen = 0; } @@ -464,9 +482,9 @@ * and you must therefore hold required locks before calling it. * * A buffer cannot be placed on two lists at the same time. - */ - -static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) + */ +static inline void __skb_queue_head(struct sk_buff_head *list, + struct sk_buff *newsk) { struct sk_buff *prev, *next; @@ -476,8 +494,7 @@ next = prev->next; newsk->next = next; newsk->prev = prev; - next->prev = newsk; - prev->next = newsk; + next->prev = prev->next = newsk; } @@ -491,9 +508,9 @@ * safely. * * A buffer cannot be placed on two lists at the same time. - */ - -static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) + */ +static inline void skb_queue_head(struct sk_buff_head *list, + struct sk_buff *newsk) { unsigned long flags; @@ -511,10 +528,9 @@ * and you must therefore hold required locks before calling it. * * A buffer cannot be placed on two lists at the same time. - */ - - -static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) + */ +static inline void __skb_queue_tail(struct sk_buff_head *list, + struct sk_buff *newsk) { struct sk_buff *prev, *next; @@ -524,8 +540,7 @@ prev = next->prev; newsk->next = next; newsk->prev = prev; - next->prev = newsk; - prev->next = newsk; + next->prev = prev->next = newsk; } /** @@ -538,9 +553,9 @@ * safely. * * A buffer cannot be placed on two lists at the same time. - */ - -static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) + */ +static inline void skb_queue_tail(struct sk_buff_head *list, + struct sk_buff *newsk) { unsigned long flags; @@ -557,7 +572,6 @@ * so must be used with appropriate locks held only. The head item is * returned or %NULL if the list is empty. */ - static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) { struct sk_buff *next, *prev, *result; @@ -566,13 +580,12 @@ next = prev->next; result = NULL; if (next != prev) { - result = next; - next = next->next; + result = next; + next = next->next; list->qlen--; - next->prev = prev; - prev->next = next; - result->next = NULL; - result->prev = NULL; + next->prev = prev; + prev->next = next; + result->next = result->prev = NULL; result->list = NULL; } return result; @@ -603,13 +616,12 @@ */ static inline void __skb_insert(struct sk_buff *newsk, - struct sk_buff * prev, struct sk_buff *next, - struct sk_buff_head * list) + struct sk_buff *prev, struct sk_buff *next, + struct sk_buff_head *list) { newsk->next = next; newsk->prev = prev; - next->prev = newsk; - prev->next = newsk; + next->prev = prev->next = newsk; newsk->list = list; list->qlen++; } @@ -666,17 +678,15 @@ * remove sk_buff from list. _Must_ be called atomically, and with * the list known.. */ - static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) { - struct sk_buff * next, * prev; + struct sk_buff *next, *prev; list->qlen--; - next = skb->next; - prev = skb->prev; - skb->next = NULL; - skb->prev = NULL; - skb->list = NULL; + next = skb->next; + prev = skb->prev; + skb->next = skb->prev = NULL; + skb->list = NULL; next->prev = prev; prev->next = next; } @@ -687,22 +697,21 @@ * * Place a packet after a given packet in a list. The list locks are taken * and this function is atomic with respect to other list locked calls - * - * Works even without knowing the list it is sitting on, which can be - * handy at times. It also means that THE LIST MUST EXIST when you + * + * Works even without knowing the list it is sitting on, which can be + * handy at times. It also means that THE LIST MUST EXIST when you * unlink. Thus a list must have its contents unlinked before it is * destroyed. */ - static inline void skb_unlink(struct sk_buff *skb) { struct sk_buff_head *list = skb->list; - if(list) { + if (list) { unsigned long flags; spin_lock_irqsave(&list->lock, flags); - if(skb->list == list) + if (skb->list == list) __skb_unlink(skb, skb->list); spin_unlock_irqrestore(&list->lock, flags); } @@ -718,10 +727,9 @@ * so must be used with appropriate locks held only. The tail item is * returned or %NULL if the list is empty. */ - static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) { - struct sk_buff *skb = skb_peek_tail(list); + struct sk_buff *skb = skb_peek_tail(list); if (skb) __skb_unlink(skb, list); return skb; @@ -735,7 +743,6 @@ * may be used safely with other locking list functions. The tail item is * returned or %NULL if the list is empty. */ - static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) { unsigned long flags; @@ -757,83 +764,81 @@ return skb->len - skb->data_len; } -#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) BUG(); } while (0) -#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) BUG(); } while (0) -#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) BUG(); } while (0) +#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) \ + BUG(); } while (0) +#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) \ + BUG(); } while (0) +#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) \ + BUG(); } while (0) /* * Add data to an sk_buff */ - static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) { - unsigned char *tmp=skb->tail; + unsigned char *tmp = skb->tail; SKB_LINEAR_ASSERT(skb); - skb->tail+=len; - skb->len+=len; + skb->tail += len; + skb->len += len; return tmp; } /** * skb_put - add data to a buffer - * @skb: buffer to use + * @skb: buffer to use * @len: amount of data to add * * This function extends the used data area of the buffer. If this would * exceed the total buffer size the kernel will panic. A pointer to the * first byte of the extra data is returned. */ - static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) { - unsigned char *tmp=skb->tail; + unsigned char *tmp = skb->tail; SKB_LINEAR_ASSERT(skb); - skb->tail+=len; - skb->len+=len; - if(skb->tail>skb->end) { + skb->tail += len; + skb->len += len; + if (skb->tail>skb->end) skb_over_panic(skb, len, current_text_addr()); - } return tmp; } static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) { - skb->data-=len; - skb->len+=len; + skb->data -= len; + skb->len += len; return skb->data; } /** * skb_push - add data to the start of a buffer - * @skb: buffer to use + * @skb: buffer to use * @len: amount of data to add * * This function extends the used data area of the buffer at the buffer * start. If this would exceed the total buffer headroom the kernel will * panic. A pointer to the first byte of the extra data is returned. */ - static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len) { - skb->data-=len; - skb->len+=len; - if(skb->datahead) { + skb->data -= len; + skb->len += len; + if (skb->datahead) skb_under_panic(skb, len, current_text_addr()); - } return skb->data; } static inline char *__skb_pull(struct sk_buff *skb, unsigned int len) { - skb->len-=len; + skb->len -= len; if (skb->len < skb->data_len) BUG(); - return skb->data+=len; + return skb->data += len; } /** * skb_pull - remove data from the start of a buffer - * @skb: buffer to use + * @skb: buffer to use * @len: amount of data to remove * * This function removes data from the start of a buffer, returning @@ -841,30 +846,25 @@ * is returned. Once the data has been pulled future pushes will overwrite * the old data. */ - -static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len) -{ - if (len > skb->len) - return NULL; - return __skb_pull(skb,len); +static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) +{ + return (len > skb->len) ? NULL : __skb_pull(skb, len); } -extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta); +extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len) { if (len > skb_headlen(skb) && - __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL) + !__pskb_pull_tail(skb, len-skb_headlen(skb))) return NULL; skb->len -= len; - return skb->data += len; + return skb->data += len; } -static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len) -{ - if (len > skb->len) - return NULL; - return __pskb_pull(skb,len); +static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) +{ + return (len > skb->len) ? NULL : __pskb_pull(skb, len); } static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) @@ -873,7 +873,7 @@ return 1; if (len > skb->len) return 0; - return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL); + return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; } /** @@ -882,10 +882,9 @@ * * Return the number of bytes of free space at the head of an &sk_buff. */ - static inline int skb_headroom(const struct sk_buff *skb) { - return skb->data-skb->head; + return skb->data - skb->head; } /** @@ -894,10 +893,9 @@ * * Return the number of bytes of free space at the tail of an sk_buff */ - static inline int skb_tailroom(const struct sk_buff *skb) { - return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail; + return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; } /** @@ -908,11 +906,10 @@ * Increase the headroom of an empty &sk_buff by reducing the tail * room. This is only allowed for an empty buffer. */ - static inline void skb_reserve(struct sk_buff *skb, unsigned int len) { - skb->data+=len; - skb->tail+=len; + skb->data += len; + skb->tail += len; } extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); @@ -920,11 +917,10 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len) { if (!skb->data_len) { - skb->len = len; - skb->tail = skb->data+len; - } else { + skb->len = len; + skb->tail = skb->data + len; + } else ___pskb_trim(skb, len, 0); - } } /** @@ -935,31 +931,26 @@ * Cut the length of a buffer down by removing data from the tail. If * the buffer is already under the length specified it is not modified. */ - static inline void skb_trim(struct sk_buff *skb, unsigned int len) { - if (skb->len > len) { + if (skb->len > len) __skb_trim(skb, len); - } } static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) { if (!skb->data_len) { - skb->len = len; + skb->len = len; skb->tail = skb->data+len; return 0; - } else { - return ___pskb_trim(skb, len, 1); } + return ___pskb_trim(skb, len, 1); } static inline int pskb_trim(struct sk_buff *skb, unsigned int len) { - if (len < skb->len) - return __pskb_trim(skb, len); - return 0; + return (len < skb->len) ? __pskb_trim(skb, len) : 0; } /** @@ -970,47 +961,41 @@ * destructor function and make the @skb unowned. The buffer continues * to exist but is no longer charged to its former owner. */ - - static inline void skb_orphan(struct sk_buff *skb) { if (skb->destructor) skb->destructor(skb); skb->destructor = NULL; - skb->sk = NULL; + skb->sk = NULL; } /** - * skb_purge - empty a list + * skb_queue_purge - empty a list * @list: list to empty * * Delete all buffers on an &sk_buff list. Each buffer is removed from * the list and one reference dropped. This function takes the list * lock and is atomic with respect to other list locking functions. */ - - static inline void skb_queue_purge(struct sk_buff_head *list) { struct sk_buff *skb; - while ((skb=skb_dequeue(list))!=NULL) + while ((skb = skb_dequeue(list)) != NULL) kfree_skb(skb); } /** - * __skb_purge - empty a list + * __skb_queue_purge - empty a list * @list: list to empty * * Delete all buffers on an &sk_buff list. Each buffer is removed from * the list and one reference dropped. This function does not take the * list lock and the caller must hold the relevant locks to use it. */ - - static inline void __skb_queue_purge(struct sk_buff_head *list) { struct sk_buff *skb; - while ((skb=__skb_dequeue(list))!=NULL) + while ((skb = __skb_dequeue(list)) != NULL) kfree_skb(skb); } @@ -1026,15 +1011,12 @@ * * %NULL is returned in there is no free memory. */ - static inline struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask) { - struct sk_buff *skb; - - skb = alloc_skb(length+16, gfp_mask); + struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); if (skb) - skb_reserve(skb,16); + skb_reserve(skb, 16); return skb; } @@ -1050,7 +1032,6 @@ * %NULL is returned in there is no free memory. Although this function * allocates memory it can be called from an interrupt. */ - static inline struct sk_buff *dev_alloc_skb(unsigned int length) { return __dev_alloc_skb(length, GFP_ATOMIC); @@ -1068,9 +1049,7 @@ * The result is skb with writable area skb->head...skb->tail * and at least @headroom of space at head. */ - -static inline int -skb_cow(struct sk_buff *skb, unsigned int headroom) +static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) { int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb); @@ -1078,7 +1057,7 @@ delta = 0; if (delta || skb_cloned(skb)) - return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC); + return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC); return 0; } @@ -1088,7 +1067,8 @@ * @gfp: allocation mode * * If there is no free memory -ENOMEM is returned, otherwise zero - * is returned and the old skb data released. */ + * is returned and the old skb data released. + */ int skb_linearize(struct sk_buff *skb, int gfp); static inline void *kmap_skb_frag(const skb_frag_t *frag) @@ -1113,34 +1093,45 @@ #define skb_queue_walk(queue, skb) \ for (skb = (queue)->next; \ (skb != (struct sk_buff *)(queue)); \ - skb=skb->next) + skb = skb->next) -extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err); -extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); -extern int skb_copy_datagram(const struct sk_buff *from, int offset, char *to,int size); -extern int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, struct iovec *to,int size); -extern int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump); -extern int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov); -extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb); - -extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum); -extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); -extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum); -extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); +extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, + int noblock, int *err); +extern unsigned int datagram_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); +extern int skb_copy_datagram(const struct sk_buff *from, + int offset, char *to, int size); +extern int skb_copy_datagram_iovec(const struct sk_buff *from, + int offset, struct iovec *to, + int size); +extern int skb_copy_and_csum_datagram(const struct sk_buff *skb, + int offset, u8 *to, int len, + unsigned int *csump); +extern int skb_copy_and_csum_datagram_iovec(const + struct sk_buff *skb, + int hlen, + struct iovec *iov); +extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); +extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, + int len, unsigned int csum); +extern int skb_copy_bits(const struct sk_buff *skb, int offset, + void *to, int len); +extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, + int offset, u8 *to, int len, + unsigned int csum); +extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); extern void skb_init(void); extern void skb_add_mtu(int mtu); #ifdef CONFIG_NETFILTER -static inline void -nf_conntrack_put(struct nf_ct_info *nfct) +static inline void nf_conntrack_put(struct nf_ct_info *nfct) { if (nfct && atomic_dec_and_test(&nfct->master->use)) nfct->master->destroy(nfct->master); } -static inline void -nf_conntrack_get(struct nf_ct_info *nfct) +static inline void nf_conntrack_get(struct nf_ct_info *nfct) { if (nfct) atomic_inc(&nfct->master->use); diff -Nru a/include/linux/smp.h b/include/linux/smp.h --- a/include/linux/smp.h Tue Jun 18 19:12:02 2002 +++ b/include/linux/smp.h Tue Jun 18 19:12:02 2002 @@ -57,8 +57,6 @@ */ extern int smp_threads_ready; -extern int smp_num_cpus; - extern volatile unsigned long smp_msg_data; extern volatile int smp_src_cpu; extern volatile int smp_msg_id; @@ -79,19 +77,17 @@ * These macros fold the SMP functionality into a single CPU system */ -#define smp_num_cpus 1 #define smp_processor_id() 0 #define hard_smp_processor_id() 0 #define smp_threads_ready 1 #ifndef CONFIG_PREEMPT #define kernel_lock() #endif -#define cpu_logical_map(cpu) 0 -#define cpu_number_map(cpu) 0 #define smp_call_function(func,info,retry,wait) ({ 0; }) -#define cpu_online_map 1 static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule_all(void) { } +#define cpu_online(cpu) 1 +#define num_online_cpus() 1 #define __per_cpu_data #define per_cpu(var, cpu) var #define this_cpu(var) var diff -Nru a/include/linux/swap.h b/include/linux/swap.h --- a/include/linux/swap.h Tue Jun 18 19:12:01 2002 +++ b/include/linux/swap.h Tue Jun 18 19:12:01 2002 @@ -5,6 +5,7 @@ #include #include #include +#include #include #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ @@ -62,6 +63,21 @@ #ifdef __KERNEL__ /* + * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of + * disk blocks. A list of swap extents maps the entire swapfile. (Where the + * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart + * from setup, they're handled identically. + * + * We always assume that blocks are of size PAGE_SIZE. + */ +struct swap_extent { + struct list_head list; + pgoff_t start_page; + pgoff_t nr_pages; + sector_t start_block; +}; + +/* * Max bad pages in the new format.. */ #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) @@ -83,11 +99,17 @@ /* * The in-memory structure used to track swap areas. + * extent_list.prev points at the lowest-index extent. That list is + * sorted. */ struct swap_info_struct { unsigned int flags; spinlock_t sdev_lock; struct file *swap_file; + struct block_device *bdev; + struct list_head extent_list; + int nr_extents; + struct swap_extent *curr_swap_extent; unsigned old_block_size; unsigned short * swap_map; unsigned int lowest_bit; @@ -134,8 +156,9 @@ extern int FASTCALL(try_to_free_pages(zone_t *, unsigned int, unsigned int)); /* linux/mm/page_io.c */ -extern void rw_swap_page(int, struct page *); -extern void rw_swap_page_nolock(int, swp_entry_t, char *); +int swap_readpage(struct file *file, struct page *page); +int swap_writepage(struct page *page); +int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page); /* linux/mm/page_alloc.c */ @@ -163,12 +186,13 @@ extern struct swap_info_struct swap_info[]; extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); -extern void get_swaphandle_info(swp_entry_t, unsigned long *, struct inode **); extern int swap_duplicate(swp_entry_t); -extern int swap_count(struct page *); extern int valid_swaphandles(swp_entry_t, unsigned long *); extern void swap_free(swp_entry_t); extern void free_swap_and_cache(swp_entry_t); +sector_t map_swap_page(struct swap_info_struct *p, pgoff_t offset); +struct swap_info_struct *get_swap_info_struct(unsigned type); + struct swap_list_t { int head; /* head of priority-ordered swapfile list */ int next; /* swapfile to be used next */ diff -Nru a/include/linux/sysctl.h b/include/linux/sysctl.h --- a/include/linux/sysctl.h Tue Jun 18 19:12:01 2002 +++ b/include/linux/sysctl.h Tue Jun 18 19:12:01 2002 @@ -130,16 +130,21 @@ /* CTL_VM names: */ enum { - VM_SWAPCTL=1, /* struct: Set vm swapping control */ - VM_SWAPOUT=2, /* int: Linear or sqrt() swapout for hogs */ - VM_FREEPG=3, /* struct: Set free page thresholds */ + VM_UNUSED1=1, /* was: struct: Set vm swapping control */ + VM_UNUSED2=2, /* was; int: Linear or sqrt() swapout for hogs */ + VM_UNUSED3=3, /* was: struct: Set free page thresholds */ VM_BDFLUSH_UNUSED=4, /* Spare */ VM_OVERCOMMIT_MEMORY=5, /* Turn off the virtual memory safety limit */ - VM_BUFFERMEM=6, /* struct: Set buffer memory thresholds */ - VM_PAGECACHE=7, /* struct: Set cache memory thresholds */ + VM_UNUSED4=6, /* was: struct: Set buffer memory thresholds */ + VM_UNUSED5=7, /* was: struct: Set cache memory thresholds */ VM_PAGERDAEMON=8, /* struct: Control kswapd behaviour */ - VM_PGT_CACHE=9, /* struct: Set page table cache parameters */ - VM_PAGE_CLUSTER=10 /* int: set number of pages to swap together */ + VM_UNUSED6=9, /* was: struct: Set page table cache parameters */ + VM_PAGE_CLUSTER=10, /* int: set number of pages to swap together */ + VM_DIRTY_BACKGROUND=11, /* dirty_background_ratio */ + VM_DIRTY_ASYNC=12, /* dirty_async_ratio */ + VM_DIRTY_SYNC=13, /* dirty_sync_ratio */ + VM_DIRTY_WB_CS=14, /* dirty_writeback_centisecs */ + VM_DIRTY_EXPIRE_CS=15, /* dirty_expire_centisecs */ }; diff -Nru a/include/linux/timer.h b/include/linux/timer.h --- a/include/linux/timer.h Tue Jun 18 19:12:01 2002 +++ b/include/linux/timer.h Tue Jun 18 19:12:01 2002 @@ -25,10 +25,8 @@ #ifdef CONFIG_SMP extern int del_timer_sync(struct timer_list * timer); -extern void sync_timers(void); #else #define del_timer_sync(t) del_timer(t) -#define sync_timers() do { } while (0) #endif /* diff -Nru a/include/linux/tqueue.h b/include/linux/tqueue.h --- a/include/linux/tqueue.h Tue Jun 18 19:12:01 2002 +++ b/include/linux/tqueue.h Tue Jun 18 19:12:01 2002 @@ -110,6 +110,9 @@ return ret; } +/* Schedule a tq to run in process context */ +extern int schedule_task(struct tq_struct *task); + /* * Call all "bottom halfs" on a given list. */ diff -Nru a/include/linux/vmalloc.h b/include/linux/vmalloc.h --- a/include/linux/vmalloc.h Tue Jun 18 19:12:03 2002 +++ b/include/linux/vmalloc.h Tue Jun 18 19:12:03 2002 @@ -13,6 +13,7 @@ unsigned long flags; void * addr; unsigned long size; + unsigned long phys_addr; struct vm_struct * next; }; @@ -23,6 +24,8 @@ extern void vmfree_area_pages(unsigned long address, unsigned long size); extern int vmalloc_area_pages(unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot); +extern struct vm_struct *remove_kernel_area(void *addr); + /* * Various ways to allocate pages. */ diff -Nru a/include/linux/wait.h b/include/linux/wait.h --- a/include/linux/wait.h Tue Jun 18 19:12:01 2002 +++ b/include/linux/wait.h Tue Jun 18 19:12:01 2002 @@ -19,13 +19,17 @@ #include #include +typedef struct __wait_queue wait_queue_t; +typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync); +extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync); + struct __wait_queue { unsigned int flags; #define WQ_FLAG_EXCLUSIVE 0x01 struct task_struct * task; + wait_queue_func_t func; struct list_head task_list; }; -typedef struct __wait_queue wait_queue_t; struct __wait_queue_head { spinlock_t lock; @@ -40,13 +44,14 @@ #define __WAITQUEUE_INITIALIZER(name, tsk) { \ task: tsk, \ + func: default_wake_function, \ task_list: { NULL, NULL } } #define DECLARE_WAITQUEUE(name, tsk) \ wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ - lock: SPIN_LOCK_UNLOCKED, \ + lock: SPIN_LOCK_UNLOCKED, \ task_list: { &(name).task_list, &(name).task_list } } #define DECLARE_WAIT_QUEUE_HEAD(name) \ @@ -62,6 +67,15 @@ { q->flags = 0; q->task = p; + q->func = default_wake_function; +} + +static inline void init_waitqueue_func_entry(wait_queue_t *q, + wait_queue_func_t func) +{ + q->flags = 0; + q->task = NULL; + q->func = func; } static inline int waitqueue_active(wait_queue_head_t *q) @@ -88,6 +102,22 @@ { list_del(&old->task_list); } + +#define add_wait_queue_cond(q, wait, cond) \ + ({ \ + unsigned long flags; \ + int _raced = 0; \ + spin_lock_irqsave(&(q)->lock, flags); \ + (wait)->flags = 0; \ + __add_wait_queue((q), (wait)); \ + rmb(); \ + if (!(cond)) { \ + _raced = 1; \ + __remove_wait_queue((q), (wait)); \ + } \ + spin_lock_irqrestore(&(q)->lock, flags); \ + _raced; \ + }) #endif /* __KERNEL__ */ diff -Nru a/include/linux/writeback.h b/include/linux/writeback.h --- a/include/linux/writeback.h Tue Jun 18 19:12:01 2002 +++ b/include/linux/writeback.h Tue Jun 18 19:12:01 2002 @@ -45,6 +45,12 @@ /* * mm/page-writeback.c */ +extern int dirty_background_ratio; +extern int dirty_async_ratio; +extern int dirty_sync_ratio; +extern int dirty_writeback_centisecs; +extern int dirty_expire_centisecs; + void balance_dirty_pages(struct address_space *mapping); void balance_dirty_pages_ratelimited(struct address_space *mapping); int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); diff -Nru a/include/linux/xqm.h b/include/linux/xqm.h --- a/include/linux/xqm.h Tue Jun 18 19:12:03 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,159 +0,0 @@ -/* - * Copyright (c) 1995-2001 Silicon Graphics, Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2.1 of the GNU Lesser General Public License - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * Further, this software is distributed without any warranty that it is - * free of the rightful claim of any third person regarding infringement - * or the like. Any license provided herein, whether implied or - * otherwise, applies only to this software file. Patent licenses, if - * any, provided herein do not apply to combinations of this program with - * other software, or any other product whatsoever. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, write the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, - * USA. - * - * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, - * Mountain View, CA 94043, or: - * - * http://www.sgi.com - * - * For further information regarding this notice, see: - * - * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ - */ -#ifndef _LINUX_XQM_H -#define _LINUX_XQM_H - -#include - -/* - * Disk quota - quotactl(2) commands for the XFS Quota Manager (XQM). - */ - -#define XQM_CMD(x) (('X'<<8)+(x)) /* note: forms first QCMD argument */ -#define Q_XQUOTAON XQM_CMD(0x1) /* enable accounting/enforcement */ -#define Q_XQUOTAOFF XQM_CMD(0x2) /* disable accounting/enforcement */ -#define Q_XGETQUOTA XQM_CMD(0x3) /* get disk limits and usage */ -#define Q_XSETQLIM XQM_CMD(0x4) /* set disk limits */ -#define Q_XGETQSTAT XQM_CMD(0x5) /* get quota subsystem status */ -#define Q_XQUOTARM XQM_CMD(0x6) /* free disk space used by dquots */ - -/* - * fs_disk_quota structure: - * - * This contains the current quota information regarding a user/proj/group. - * It is 64-bit aligned, and all the blk units are in BBs (Basic Blocks) of - * 512 bytes. - */ -#define FS_DQUOT_VERSION 1 /* fs_disk_quota.d_version */ -typedef struct fs_disk_quota { - __s8 d_version; /* version of this structure */ - __s8 d_flags; /* XFS_{USER,PROJ,GROUP}_QUOTA */ - __u16 d_fieldmask; /* field specifier */ - __u32 d_id; /* user, project, or group ID */ - __u64 d_blk_hardlimit;/* absolute limit on disk blks */ - __u64 d_blk_softlimit;/* preferred limit on disk blks */ - __u64 d_ino_hardlimit;/* maximum # allocated inodes */ - __u64 d_ino_softlimit;/* preferred inode limit */ - __u64 d_bcount; /* # disk blocks owned by the user */ - __u64 d_icount; /* # inodes owned by the user */ - __s32 d_itimer; /* zero if within inode limits */ - /* if not, we refuse service */ - __s32 d_btimer; /* similar to above; for disk blocks */ - __u16 d_iwarns; /* # warnings issued wrt num inodes */ - __u16 d_bwarns; /* # warnings issued wrt disk blocks */ - __s32 d_padding2; /* padding2 - for future use */ - __u64 d_rtb_hardlimit;/* absolute limit on realtime blks */ - __u64 d_rtb_softlimit;/* preferred limit on RT disk blks */ - __u64 d_rtbcount; /* # realtime blocks owned */ - __s32 d_rtbtimer; /* similar to above; for RT disk blks */ - __u16 d_rtbwarns; /* # warnings issued wrt RT disk blks */ - __s16 d_padding3; /* padding3 - for future use */ - char d_padding4[8]; /* yet more padding */ -} fs_disk_quota_t; - -/* - * These fields are sent to Q_XSETQLIM to specify fields that need to change. - */ -#define FS_DQ_ISOFT (1<<0) -#define FS_DQ_IHARD (1<<1) -#define FS_DQ_BSOFT (1<<2) -#define FS_DQ_BHARD (1<<3) -#define FS_DQ_RTBSOFT (1<<4) -#define FS_DQ_RTBHARD (1<<5) -#define FS_DQ_LIMIT_MASK (FS_DQ_ISOFT | FS_DQ_IHARD | FS_DQ_BSOFT | \ - FS_DQ_BHARD | FS_DQ_RTBSOFT | FS_DQ_RTBHARD) -/* - * These timers can only be set in super user's dquot. For others, timers are - * automatically started and stopped. Superusers timer values set the limits - * for the rest. In case these values are zero, the DQ_{F,B}TIMELIMIT values - * defined below are used. - * These values also apply only to the d_fieldmask field for Q_XSETQLIM. - */ -#define FS_DQ_BTIMER (1<<6) -#define FS_DQ_ITIMER (1<<7) -#define FS_DQ_RTBTIMER (1<<8) -#define FS_DQ_TIMER_MASK (FS_DQ_BTIMER | FS_DQ_ITIMER | FS_DQ_RTBTIMER) - -/* - * The following constants define the default amount of time given a user - * before the soft limits are treated as hard limits (usually resulting - * in an allocation failure). These may be modified by the quotactl(2) - * system call with the Q_XSETQLIM command. - */ -#define DQ_FTIMELIMIT (7 * 24*60*60) /* 1 week */ -#define DQ_BTIMELIMIT (7 * 24*60*60) /* 1 week */ - -/* - * Various flags related to quotactl(2). Only relevant to XFS filesystems. - */ -#define XFS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */ -#define XFS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */ -#define XFS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */ -#define XFS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */ - -#define XFS_USER_QUOTA (1<<0) /* user quota type */ -#define XFS_PROJ_QUOTA (1<<1) /* (IRIX) project quota type */ -#define XFS_GROUP_QUOTA (1<<2) /* group quota type */ - -/* - * fs_quota_stat is the struct returned in Q_XGETQSTAT for a given file system. - * Provides a centralized way to get meta infomation about the quota subsystem. - * eg. space taken up for user and group quotas, number of dquots currently - * incore. - */ -#define FS_QSTAT_VERSION 1 /* fs_quota_stat.qs_version */ - -/* - * Some basic infomation about 'quota files'. - */ -typedef struct fs_qfilestat { - __u64 qfs_ino; /* inode number */ - __u64 qfs_nblks; /* number of BBs 512-byte-blks */ - __u32 qfs_nextents; /* number of extents */ -} fs_qfilestat_t; - -typedef struct fs_quota_stat { - __s8 qs_version; /* version number for future changes */ - __u16 qs_flags; /* XFS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */ - __s8 qs_pad; /* unused */ - fs_qfilestat_t qs_uquota; /* user quota storage information */ - fs_qfilestat_t qs_gquota; /* group quota storage information */ - __u32 qs_incoredqs; /* number of dquots incore */ - __s32 qs_btimelimit; /* limit for blks timer */ - __s32 qs_itimelimit; /* limit for inodes timer */ - __s32 qs_rtbtimelimit;/* limit for rt blks timer */ - __u16 qs_bwarnlimit; /* limit for num warnings */ - __u16 qs_iwarnlimit; /* limit for num warnings */ -} fs_quota_stat_t; - -#endif /* _LINUX_XQM_H */ diff -Nru a/include/net/datalink.h b/include/net/datalink.h --- a/include/net/datalink.h Tue Jun 18 19:12:02 2002 +++ b/include/net/datalink.h Tue Jun 18 19:12:02 2002 @@ -2,15 +2,24 @@ #define _NET_INET_DATALINK_H_ struct datalink_proto { - unsigned short type_len; - unsigned char type[8]; - const char *string_name; - unsigned short header_length; - int (*rcvfunc)(struct sk_buff *, struct net_device *, - struct packet_type *); - void (*datalink_header)(struct datalink_proto *, struct sk_buff *, - unsigned char *); - struct datalink_proto *next; + unsigned short type_len; + unsigned char type[8]; + const char *string_name; + + union { + struct llc_pinfo *llc; + } ll_pinfo; + + struct llc_sc_info *llc_sc; + struct sock *sock; + + unsigned short header_length; + + int (*rcvfunc)(struct sk_buff *, struct net_device *, + struct packet_type *); + void (*datalink_header)(struct datalink_proto *, struct sk_buff *, + unsigned char *); + struct datalink_proto *next; }; #endif diff -Nru a/include/net/llc_actn.h b/include/net/llc_actn.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_actn.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,48 @@ +#ifndef LLC_ACTN_H +#define LLC_ACTN_H +/* + * Copyright (c) 1997 by Procom Technology,Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +/* Station component state transition actions */ +#define LLC_STATION_AC_START_ACK_TMR 1 +#define LLC_STATION_AC_SET_RETRY_CNT_0 2 +#define LLC_STATION_AC_INC_RETRY_CNT_BY_1 3 +#define LLC_STATION_AC_SET_XID_R_CNT_0 4 +#define LLC_STATION_AC_INC_XID_R_CNT_BY_1 5 +#define LLC_STATION_AC_SEND_NULL_DSAP_XID_C 6 +#define LLC_STATION_AC_SEND_XID_R 7 +#define LLC_STATION_AC_SEND_TEST_R 8 +#define LLC_STATION_AC_REPORT_STATUS 9 + +/* All station state event action functions look like this */ +typedef int (*llc_station_action_t)(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_station_ac_start_ack_timer(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_station_ac_set_retry_cnt_0(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_station_ac_inc_retry_cnt_by_1(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_station_ac_set_xid_r_cnt_0(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_station_ac_inc_xid_r_cnt_by_1(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_station_ac_send_null_dsap_xid_c(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_station_ac_send_xid_r(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_station_ac_send_test_r(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_station_ac_report_status(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_station_ac_report_status(struct llc_station *station, + struct llc_station_state_ev *ev); +#endif /* LLC_ACTN_H */ diff -Nru a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_c_ac.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,254 @@ +#ifndef LLC_C_AC_H +#define LLC_C_AC_H +/* + * Copyright (c) 1997 by Procom Technology,Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +/* Connection component state transition actions */ +/* + * Connection state transition actions + * (Fb = F bit; Pb = P bit; Xb = X bit) + */ +#define LLC_CONN_AC_CLR_REMOTE_BUSY 1 +#define LLC_CONN_AC_CONN_IND 2 +#define LLC_CONN_AC_CONN_CONFIRM 3 +#define LLC_CONN_AC_DATA_IND 4 +#define LLC_CONN_AC_DISC_IND 5 +#define LLC_CONN_AC_RESET_IND 6 +#define LLC_CONN_AC_RESET_CONFIRM 7 +#define LLC_CONN_AC_REPORT_STATUS 8 +#define LLC_CONN_AC_CLR_REMOTE_BUSY_IF_Fb_EQ_1 9 +#define LLC_CONN_AC_STOP_REJ_TMR_IF_DATA_FLAG_EQ_2 10 +#define LLC_CONN_AC_SEND_DISC_CMD_Pb_SET_X 11 +#define LLC_CONN_AC_SEND_DM_RSP_Fb_SET_Pb 12 +#define LLC_CONN_AC_SEND_DM_RSP_Fb_SET_1 13 +#define LLC_CONN_AC_SEND_DM_RSP_Fb_SET_F_FLAG 14 +#define LLC_CONN_AC_SEND_FRMR_RSP_Fb_SET_X 15 +#define LLC_CONN_AC_RESEND_FRMR_RSP_Fb_SET_0 16 +#define LLC_CONN_AC_RESEND_FRMR_RSP_Fb_SET_Pb 17 +#define LLC_CONN_AC_SEND_I_CMD_Pb_SET_1 18 +#define LLC_CONN_AC_RESEND_I_CMD_Pb_SET_1 19 +#define LLC_CONN_AC_RESEND_I_CMD_Pb_SET_1_OR_SEND_RR 20 +#define LLC_CONN_AC_SEND_I_XXX_Xb_SET_0 21 +#define LLC_CONN_AC_RESEND_I_XXX_Xb_SET_0 22 +#define LLC_CONN_AC_RESEND_I_XXX_Xb_SET_0_OR_SEND_RR 23 +#define LLC_CONN_AC_RESEND_I_RSP_Fb_SET_1 24 +#define LLC_CONN_AC_SEND_REJ_CMD_Pb_SET_1 25 +#define LLC_CONN_AC_SEND_REJ_RSP_Fb_SET_1 26 +#define LLC_CONN_AC_SEND_REJ_XXX_Xb_SET_0 27 +#define LLC_CONN_AC_SEND_RNR_CMD_Pb_SET_1 28 +#define LLC_CONN_AC_SEND_RNR_RSP_Fb_SET_1 29 +#define LLC_CONN_AC_SEND_RNR_XXX_Xb_SET_0 30 +#define LLC_CONN_AC_SET_REMOTE_BUSY 31 +#define LLC_CONN_AC_OPTIONAL_SEND_RNR_XXX_Xb_SET_0 32 +#define LLC_CONN_AC_SEND_RR_CMD_Pb_SET_1 33 +#define LLC_CONN_AC_SEND_ACK_CMD_Pb_SET_1 34 +#define LLC_CONN_AC_SEND_RR_RSP_Fb_SET_1 35 +#define LLC_CONN_AC_SEND_ACK_RSP_Fb_SET_1 36 +#define LLC_CONN_AC_SEND_RR_XXX_Xb_SET_0 37 +#define LLC_CONN_AC_SEND_ACK_XXX_Xb_SET_0 38 +#define LLC_CONN_AC_SEND_SABME_CMD_Pb_SET_X 39 +#define LLC_CONN_AC_SEND_UA_RSP_Fb_SET_Pb 40 +#define LLC_CONN_AC_SEND_UA_RSP_Fb_SET_F_FLAG 41 +#define LLC_CONN_AC_S_FLAG_SET_0 42 +#define LLC_CONN_AC_S_FLAG_SET_1 43 +#define LLC_CONN_AC_START_P_TMR 44 +#define LLC_CONN_AC_START_ACK_TMR 45 +#define LLC_CONN_AC_START_REJ_TMR 46 +#define LLC_CONN_AC_START_ACK_TMR_IF_NOT_RUNNING 47 +#define LLC_CONN_AC_STOP_ACK_TMR 48 +#define LLC_CONN_AC_STOP_P_TMR 49 +#define LLC_CONN_AC_STOP_REJ_TMR 50 +#define LLC_CONN_AC_STOP_ALL_TMRS 51 +#define LLC_CONN_AC_STOP_OTHER_TMRS 52 +#define LLC_CONN_AC_UPDATE_Nr_RECEIVED 53 +#define LLC_CONN_AC_UPDATE_P_FLAG 54 +#define LLC_CONN_AC_DATA_FLAG_SET_2 55 +#define LLC_CONN_AC_DATA_FLAG_SET_0 56 +#define LLC_CONN_AC_DATA_FLAG_SET_1 57 +#define LLC_CONN_AC_DATA_FLAG_SET_1_IF_DATA_FLAG_EQ_0 58 +#define LLC_CONN_AC_P_FLAG_SET_0 59 +#define LLC_CONN_AC_P_FLAG_SET_P 60 +#define LLC_CONN_AC_REMOTE_BUSY_SET_0 61 +#define LLC_CONN_AC_RETRY_CNT_SET_0 62 +#define LLC_CONN_AC_RETRY_CNT_INC_BY_1 63 +#define LLC_CONN_AC_Vr_SET_0 64 +#define LLC_CONN_AC_Vr_INC_BY_1 65 +#define LLC_CONN_AC_Vs_SET_0 66 +#define LLC_CONN_AC_Vs_SET_Nr 67 +#define LLC_CONN_AC_F_FLAG_SET_P 68 +#define LLC_CONN_AC_STOP_SENDACK_TMR 70 +#define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71 + +typedef int (*llc_conn_action_t)(struct sock *sk, struct llc_conn_state_ev *ev); + +extern int llc_conn_ac_clear_remote_busy(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_conn_ind(struct sock *sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ac_conn_confirm(struct sock* sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ac_data_ind(struct sock* sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ac_disc_ind(struct sock* sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ac_rst_ind(struct sock* sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ac_rst_confirm(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_report_status(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_disc_cmd_p_set_x(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_dm_rsp_f_set_p(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_dm_rsp_f_set_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_dm_rsp_f_set_f_flag(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_i_cmd_p_set_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_i_cmd_p_set_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_resend_i_cmd_p_set_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_resend_i_cmd_p_set_1_or_send_rr(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_i_xxx_x_set_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_resend_i_xxx_x_set_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_resend_i_rsp_f_set_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_rej_cmd_p_set_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_rej_rsp_f_set_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_rej_xxx_x_set_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_remote_busy(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_rr_cmd_p_set_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_ack_cmd_p_set_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_rr_rsp_f_set_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_ack_rsp_f_set_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_rr_xxx_x_set_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_ack_xxx_x_set_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_ua_rsp_f_set_f_flag(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_ua_rsp_f_set_p(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_s_flag_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_s_flag_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_start_p_timer(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_start_ack_timer(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_start_rej_timer(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_start_ack_tmr_if_not_running(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_stop_ack_timer(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_stop_p_timer(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_stop_rej_timer(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_stop_all_timers(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_stop_other_timers(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_upd_nr_received(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_inc_tx_win_size(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_dec_tx_win_size(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_upd_p_flag(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_data_flag_2(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_data_flag_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_data_flag_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_p_flag_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_p_flag_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_remote_busy_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_retry_cnt_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_cause_flag_0(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_cause_flag_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_inc_retry_cnt_by_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_vr_0(struct sock* sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ac_inc_vr_by_1(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_vs_0(struct sock* sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_vs_nr(struct sock* sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ac_rst_vs(struct sock* sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ac_upd_vs(struct sock* sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ac_set_f_flag_p(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_disc(struct sock* sk, struct llc_conn_state_ev *ev); +extern int llc_conn_reset(struct sock* sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ac_disc_confirm(struct sock* sk, struct llc_conn_state_ev *ev); +extern u8 llc_circular_between(u8 a, u8 b, u8 c); +extern int llc_conn_ac_send_ack_if_needed(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_inc_npta_value(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_adjust_npta_by_rr(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_adjust_npta_by_rnr(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_rst_sendack_flag(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_i_rsp_as_ack(struct sock* sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ac_send_i_as_ack(struct sock* sk, + struct llc_conn_state_ev *ev); +#endif /* LLC_C_AC_H */ diff -Nru a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_c_ev.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,323 @@ +#ifndef LLC_C_EV_H +#define LLC_C_EV_H +/* + * Copyright (c) 1997 by Procom Technology,Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +/* Connection component state transition event qualifiers */ +/* Types of events (possible values in 'ev->type') */ +#define LLC_CONN_EV_TYPE_SIMPLE 1 +#define LLC_CONN_EV_TYPE_CONDITION 2 +#define LLC_CONN_EV_TYPE_PRIM 3 +#define LLC_CONN_EV_TYPE_PDU 4 /* command/response PDU */ +#define LLC_CONN_EV_TYPE_ACK_TMR 5 +#define LLC_CONN_EV_TYPE_P_TMR 6 +#define LLC_CONN_EV_TYPE_REJ_TMR 7 +#define LLC_CONN_EV_TYPE_BUSY_TMR 8 +#define LLC_CONN_EV_TYPE_RPT_STATUS 9 +#define LLC_CONN_EV_TYPE_SENDACK_TMR 10 + +#define NBR_CONN_EV 5 +/* Connection events which cause state transitions when fully qualified */ + +#define LLC_CONN_EV_CONN_REQ 1 +#define LLC_CONN_EV_CONN_RESP 2 +#define LLC_CONN_EV_DATA_REQ 3 +#define LLC_CONN_EV_DISC_REQ 4 +#define LLC_CONN_EV_RESET_REQ 5 +#define LLC_CONN_EV_RESET_RESP 6 +#define LLC_CONN_EV_LOCAL_BUSY_DETECTED 7 +#define LLC_CONN_EV_LOCAL_BUSY_CLEARED 8 +#define LLC_CONN_EV_RX_BAD_PDU 9 +#define LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X 10 +#define LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X 11 +#define LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X 12 +#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_X 13 +#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_X_UNEXPD_Ns 14 +#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_X_INVAL_Ns 15 +#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_X 16 +#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_UNEXPD_Ns 17 +#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_INVAL_Ns 18 +#define LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_X 19 +#define LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X 20 +#define LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_X 21 +#define LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_X 22 +#define LLC_CONN_EV_RX_RR_CMD_Pbit_SET_X 23 +#define LLC_CONN_EV_RX_RR_RSP_Fbit_SET_X 24 +#define LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X 25 +#define LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X 26 +#define LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_X 27 +#define LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_X 28 +#define LLC_CONN_EV_RX_XXX_YYY 29 +#define LLC_CONN_EV_RX_ZZZ_CMD_Pbit_SET_X_INVAL_Nr 30 +#define LLC_CONN_EV_RX_ZZZ_RSP_Fbit_SET_X_INVAL_Nr 31 +#define LLC_CONN_EV_P_TMR_EXP 32 +#define LLC_CONN_EV_ACK_TMR_EXP 33 +#define LLC_CONN_EV_REJ_TMR_EXP 34 +#define LLC_CONN_EV_BUSY_TMR_EXP 35 +#define LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_1 36 +#define LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_0 37 +#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns 38 +#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns 39 +#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns 40 +#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns 41 +#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 42 +#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 43 +#define LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 44 +#define LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 45 +#define LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 46 +#define LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 47 +#define LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 48 +#define LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 49 +#define LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 50 +#define LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 51 +#define LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 52 +#define LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 53 +#define LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 54 +#define LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 55 +#define LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 56 +#define LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 57 +#define LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_1 58 +#define LLC_CONN_EV_TX_BUFF_FULL 59 + +#define LLC_CONN_EV_INIT_P_F_CYCLE 100 +/* + * Connection event qualifiers; for some events a certain combination of + * these qualifiers must be TRUE before event recognized valid for state; + * these constants act as indexes into the Event Qualifier function + * table + */ +#define LLC_CONN_EV_QFY_DATA_FLAG_EQ_1 1 +#define LLC_CONN_EV_QFY_DATA_FLAG_EQ_0 2 +#define LLC_CONN_EV_QFY_DATA_FLAG_EQ_2 3 +#define LLC_CONN_EV_QFY_P_FLAG_EQ_1 4 +#define LLC_CONN_EV_QFY_P_FLAG_EQ_0 5 +#define LLC_CONN_EV_QFY_P_FLAG_EQ_Fbit 6 +#define LLC_CONN_EV_QFY_REMOTE_BUSY_EQ_0 7 +#define LLC_CONN_EV_QFY_RETRY_CNT_LT_N2 8 +#define LLC_CONN_EV_QFY_RETRY_CNT_GTE_N2 9 +#define LLC_CONN_EV_QFY_S_FLAG_EQ_1 10 +#define LLC_CONN_EV_QFY_S_FLAG_EQ_0 11 +#define LLC_CONN_EV_QFY_INIT_P_F_CYCLE 12 + +/* Event data interface; what is sent in an event package */ +/* Event LLC_CONN_EV_TYPE_SIMPLE interface */ +struct llc_conn_ev_simple_if { + u8 ev; +}; + +/* Event LLC_CONN_EV_TYPE_PRIM interface */ +struct llc_conn_ev_prim_if { + u8 prim; /* connect, disconnect, reset, ... */ + u8 type; /* request, indicate, response, conf */ + struct llc_prim_if_block *data; +}; + +/* Event LLC_CONN_EV_TYPE_PDU interface */ +struct llc_conn_ev_pdu_if { + u8 ev; + u8 reason; + struct sk_buff *skb; +}; + +/* Event interface for timer-generated events */ +struct llc_conn_ev_tmr_if { + struct sock *sk; + u32 component_handle; + void *timer_specific; +}; + +struct llc_conn_ev_rpt_sts_if { + u8 status; +}; + +union llc_conn_ev_if { + struct llc_conn_ev_simple_if a; /* 'a' for simple, easy ... */ + struct llc_conn_ev_prim_if prim; + struct llc_conn_ev_pdu_if pdu; + struct llc_conn_ev_tmr_if tmr; + struct llc_conn_ev_rpt_sts_if rsts; /* report status */ +}; + +struct llc_conn_state_ev { + u8 type; + u8 status; + u8 flag; + struct llc_prim_if_block *ind_prim; + struct llc_prim_if_block *cfm_prim; + union llc_conn_ev_if data; +}; + +typedef int (*llc_conn_ev_t)(struct sock *sk, struct llc_conn_state_ev *ev); +typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, + struct llc_conn_state_ev *ev); + +extern int llc_conn_ev_conn_req(struct sock *sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ev_conn_resp(struct sock *sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ev_data_req(struct sock *sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ev_disc_req(struct sock *sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rst_req(struct sock *sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rst_resp(struct sock *sk, struct llc_conn_state_ev *ev); +extern int llc_conn_ev_local_busy_detected(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_local_busy_cleared(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_bad_pdu(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_xxx_yyy(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_p_tmr_exp(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_ack_tmr_exp(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rej_tmr_exp(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_busy_tmr_exp(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_any_tmr_exp(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_sendack_tmr_exp(struct sock *sk, + struct llc_conn_state_ev *ev); +/* NOT_USED functions and their variations */ +extern int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_xxx_cmd_pbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_rx_any_frame(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_tx_buffer_full(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_init_p_f_cycle(struct sock *sk, + struct llc_conn_state_ev *ev); + +/* Available connection action qualifiers */ +extern int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_init_p_f_cycle(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_set_status_impossible(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_set_status_received(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, + struct llc_conn_state_ev *ev); +extern int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, + struct llc_conn_state_ev *ev); +#endif /* LLC_C_EV_H */ diff -Nru a/include/net/llc_c_st.h b/include/net/llc_c_st.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_c_st.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,48 @@ +#ifndef LLC_C_ST_H +#define LLC_C_ST_H +/* + * Copyright (c) 1997 by Procom Technology,Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +/* Connection component state management */ +/* connection states */ +#define LLC_CONN_OUT_OF_SVC 0 /* prior to allocation */ + +#define LLC_CONN_STATE_ADM 1 /* disc, initial state */ +#define LLC_CONN_STATE_SETUP 2 /* disconnected state */ +#define LLC_CONN_STATE_NORMAL 3 /* connected state */ +#define LLC_CONN_STATE_BUSY 4 /* connected state */ +#define LLC_CONN_STATE_REJ 5 /* connected state */ +#define LLC_CONN_STATE_AWAIT 6 /* connected state */ +#define LLC_CONN_STATE_AWAIT_BUSY 7 /* connected state */ +#define LLC_CONN_STATE_AWAIT_REJ 8 /* connected state */ +#define LLC_CONN_STATE_D_CONN 9 /* disconnected state */ +#define LLC_CONN_STATE_RESET 10 /* disconnected state */ +#define LLC_CONN_STATE_ERROR 11 /* disconnected state */ +#define LLC_CONN_STATE_TEMP 12 /* disconnected state */ + +#define NBR_CONN_STATES 12 /* size of state table */ +#define NO_STATE_CHANGE 100 + +/* Connection state table structure */ +struct llc_conn_state_trans { + llc_conn_ev_t ev; + u8 next_state; + llc_conn_ev_qfyr_t *ev_qualifiers; + llc_conn_action_t *ev_actions; +}; + +struct llc_conn_state { + u8 current_state; + struct llc_conn_state_trans **transitions; +}; + +extern struct llc_conn_state llc_conn_state_table[]; +#endif /* LLC_C_ST_H */ diff -Nru a/include/net/llc_conn.h b/include/net/llc_conn.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_conn.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,155 @@ +#ifndef LLC_CONN_H +#define LLC_CONN_H +/* + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include + +#undef DEBUG_LLC_CONN_ALLOC + +struct llc_timer { + struct timer_list timer; + u8 running; /* timer is running or no */ + u16 expire; /* timer expire time */ +}; + +struct llc_opt { + struct list_head node; /* entry in sap->sk_list.list */ + struct sock *sk; /* sock that has this llc_opt */ + void *handler; /* for upper layers usage */ + u8 state; /* state of connection */ + struct llc_sap *sap; /* pointer to parent SAP */ + struct llc_addr laddr; /* lsap/mac pair */ + struct llc_addr daddr; /* dsap/mac pair */ + struct net_device *dev; /* device to send to remote */ + u8 retry_count; /* number of retries */ + u8 ack_must_be_send; + u8 first_pdu_Ns; + u8 npta; + struct llc_timer ack_timer; + struct llc_timer pf_cycle_timer; + struct llc_timer rej_sent_timer; + struct llc_timer busy_state_timer; /* ind busy clr at remote LLC */ + u8 vS; /* seq# next in-seq I-PDU tx'd*/ + u8 vR; /* seq# next in-seq I-PDU rx'd*/ + u32 n2; /* max nbr re-tx's for timeout*/ + u32 n1; /* max nbr octets in I PDU */ + u8 k; /* tx window size; max = 127 */ + u8 rw; /* rx window size; max = 127 */ + u8 p_flag; /* state flags */ + u8 f_flag; + u8 s_flag; + u8 data_flag; + u8 remote_busy_flag; + u8 cause_flag; + struct sk_buff_head pdu_unack_q; /* PUDs sent/waiting ack */ + u16 link; /* network layer link number */ + u8 X; /* a temporary variable */ + u8 ack_pf; /* this flag indicates what is + the P-bit of acknowledge */ + u8 failed_data_req; /* recognize that already exist a + failed llc_data_req_handler + (tx_buffer_full or unacceptable + state */ + u8 dec_step; + u8 inc_cntr; + u8 dec_cntr; + u8 connect_step; + u8 last_nr; /* NR of last pdu recieved */ + u32 rx_pdu_hdr; /* used for saving header of last pdu + received and caused sending FRMR. + Used for resending FRMR */ +#ifdef DEBUG_LLC_CONN_ALLOC + char *f_alloc, /* function that allocated this connection */ + *f_free; /* function that freed this connection */ + int l_alloc, /* line that allocated this connection */ + l_free; /* line that freed this connection */ +#endif +}; + +#define llc_sk(__sk) ((struct llc_opt *)(__sk)->protinfo) + +struct llc_conn_state_ev; + +extern struct sock *__llc_sock_alloc(void); +extern void __llc_sock_free(struct sock *sk, u8 free); + +#ifdef DEBUG_LLC_CONN_ALLOC +#define dump_stack() printk(KERN_INFO "call trace: %p, %p, %p\n", \ + __builtin_return_address(0), \ + __builtin_return_address(1), \ + __builtin_return_address(2)); +#define llc_sock_alloc() ({ \ + struct sock *__sk = __llc_sock_alloc(); \ + if (__sk) { \ + llc_sk(__sk)->f_alloc = __FUNCTION__; \ + llc_sk(__sk)->l_alloc = __LINE__; \ + } \ + __sk;}) +#define __llc_sock_assert(__sk) \ + if (llc_sk(__sk)->f_free) { \ + printk(KERN_ERR \ + "%p conn (alloc'd @ %s(%d)) " \ + "already freed @ %s(%d) " \ + "being used again @ %s(%d)\n", \ + llc_sk(__sk), \ + llc_sk(__sk)->f_alloc, llc_sk(__sk)->l_alloc, \ + llc_sk(__sk)->f_free, llc_sk(__sk)->l_free, \ + __FUNCTION__, __LINE__); \ + dump_stack(); +#define llc_sock_free(__sk) \ +{ \ + __llc_sock_assert(__sk) \ + } else { \ + __llc_sock_free(__sk, 0); \ + llc_sk(__sk)->f_free = __FUNCTION__; \ + llc_sk(__sk)->l_free = __LINE__; \ + } \ +} +#define llc_sock_assert(__sk) \ +{ \ + __llc_sock_assert(__sk); \ + return; } \ +} +#define llc_sock_assert_ret(__sk, __ret) \ +{ \ + __llc_sock_assert(__sk); \ + return __ret; } \ +} +#else /* DEBUG_LLC_CONN_ALLOC */ +#define llc_sock_alloc() __llc_sock_alloc() +#define llc_sock_free(__sk) __llc_sock_free(__sk, 1) +#define llc_sock_assert(__sk) +#define llc_sock_assert_ret(__sk) +#endif /* DEBUG_LLC_CONN_ALLOC */ + +extern void llc_sock_reset(struct sock *sk); +extern int llc_sock_init(struct sock *sk); + +/* Access to a connection */ +extern struct llc_conn_state_ev *llc_conn_alloc_ev(struct sock *sk); +extern int llc_conn_send_ev(struct sock *sk, struct llc_conn_state_ev *ev); +extern void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); +extern void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb, + struct llc_conn_state_ev *ev); +extern void llc_conn_free_ev(struct llc_conn_state_ev *ev); +extern void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, + u8 first_p_bit); +extern void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, + u8 first_f_bit); +extern int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr, + u16 *how_many_unacked); +extern struct sock *llc_find_sock(struct llc_sap *sap, struct llc_addr *daddr, + struct llc_addr *laddr); +extern u8 llc_data_accept_state(u8 state); +extern void llc_build_offset_table(void); +#endif /* LLC_CONN_H */ diff -Nru a/include/net/llc_evnt.h b/include/net/llc_evnt.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_evnt.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,93 @@ +#ifndef LLC_EVNT_H +#define LLC_EVNT_H +/* + * Copyright (c) 1997 by Procom Technology,Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +/* Station component state transition events */ +/* Types of events (possible values in 'ev->type') */ +#define LLC_STATION_EV_TYPE_SIMPLE 1 +#define LLC_STATION_EV_TYPE_CONDITION 2 +#define LLC_STATION_EV_TYPE_PRIM 3 +#define LLC_STATION_EV_TYPE_PDU 4 /* command/response PDU */ +#define LLC_STATION_EV_TYPE_ACK_TMR 5 +#define LLC_STATION_EV_TYPE_RPT_STATUS 6 + +/* Events */ +#define LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK 1 +#define LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK 2 +#define LLC_STATION_EV_ACK_TMR_EXP_LT_RETRY_CNT_MAX_RETRY 3 +#define LLC_STATION_EV_ACK_TMR_EXP_EQ_RETRY_CNT_MAX_RETRY 4 +#define LLC_STATION_EV_RX_NULL_DSAP_XID_C 5 +#define LLC_STATION_EV_RX_NULL_DSAP_0_XID_R_XID_R_CNT_EQ 6 +#define LLC_STATION_EV_RX_NULL_DSAP_1_XID_R_XID_R_CNT_EQ 7 +#define LLC_STATION_EV_RX_NULL_DSAP_TEST_C 8 +#define LLC_STATION_EV_DISABLE_REQ 9 + +/* Interfaces for various types of supported events */ +struct llc_stat_ev_simple_if { + u8 ev; +}; + +struct llc_stat_ev_prim_if { + u8 prim; /* connect, disconnect, reset, ... */ + u8 type; /* request, indicate, response, confirm */ +}; + +struct llc_stat_ev_pdu_if { + u8 reason; + struct sk_buff *skb; +}; + +struct llc_stat_ev_tmr_if { + void *timer_specific; +}; + +struct llc_stat_ev_rpt_sts_if { + u8 status; +}; + +union llc_stat_ev_if { + struct llc_stat_ev_simple_if a; /* 'a' for simple, easy ... */ + struct llc_stat_ev_prim_if prim; + struct llc_stat_ev_pdu_if pdu; + struct llc_stat_ev_tmr_if tmr; + struct llc_stat_ev_rpt_sts_if rsts; /* report status */ +}; + +struct llc_station_state_ev { + u8 type; + union llc_stat_ev_if data; + struct list_head node; /* node in station->ev_q.list */ +}; + +typedef int (*llc_station_ev_t)(struct llc_station *station, + struct llc_station_state_ev *ev); + +extern int llc_stat_ev_enable_with_dup_addr_check(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_stat_ev_enable_without_dup_addr_check(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry(struct llc_station * + station, + struct llc_station_state_ev *ev); +extern int llc_stat_ev_ack_tmr_exp_eq_retry_cnt_max_retry(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_stat_ev_rx_null_dsap_xid_c(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_stat_ev_rx_null_dsap_0_xid_r_xid_r_cnt_eq(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_stat_ev_rx_null_dsap_1_xid_r_xid_r_cnt_eq(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_stat_ev_rx_null_dsap_test_c(struct llc_station *station, + struct llc_station_state_ev *ev); +extern int llc_stat_ev_disable_req(struct llc_station *station, + struct llc_station_state_ev *ev); +#endif /* LLC_EVNT_H */ diff -Nru a/include/net/llc_if.h b/include/net/llc_if.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_if.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,155 @@ +#ifndef LLC_IF_H +#define LLC_IF_H +/* + * Copyright (c) 1997 by Procom Technology,Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +/* Defines LLC interface to network layer */ +/* Available primitives */ +#include + +#define LLC_DATAUNIT_PRIM 0 +#define LLC_CONN_PRIM 1 +#define LLC_DATA_PRIM 2 +#define LLC_DISC_PRIM 3 +#define LLC_RESET_PRIM 4 +#define LLC_FLOWCONTROL_PRIM 5 +#define LLC_DISABLE_PRIM 6 +#define LLC_XID_PRIM 7 +#define LLC_TEST_PRIM 8 +#define LLC_SAP_ACTIVATION 9 +#define LLC_SAP_DEACTIVATION 10 + +#define LLC_NBR_PRIMITIVES 11 + +#define LLC_IND 1 +#define LLC_CONFIRM 2 + +/* Primitive type */ +#define LLC_PRIM_TYPE_REQ 1 +#define LLC_PRIM_TYPE_IND 2 +#define LLC_PRIM_TYPE_RESP 3 +#define LLC_PRIM_TYPE_CONFIRM 4 + +/* Reset reasons, remote entity or local LLC */ +#define LLC_RESET_REASON_REMOTE 1 +#define LLC_RESET_REASON_LOCAL 2 + +/* Disconnect reasons */ +#define LLC_DISC_REASON_RX_DM_RSP_PDU 0 +#define LLC_DISC_REASON_RX_DISC_CMD_PDU 1 +#define LLC_DISC_REASON_ACK_TMR_EXP 2 + +/* Confirm reasons */ +#define LLC_STATUS_CONN 0 /* connect confirm & reset confirm */ +#define LLC_STATUS_DISC 1 /* connect confirm & reset confirm */ +#define LLC_STATUS_FAILED 2 /* connect confirm & reset confirm */ +#define LLC_STATUS_IMPOSSIBLE 3 /* connect confirm */ +#define LLC_STATUS_RECEIVED 4 /* data conn */ +#define LLC_STATUS_REMOTE_BUSY 5 /* data conn */ +#define LLC_STATUS_REFUSE 6 /* data conn */ +#define LLC_STATUS_CONFLICT 7 /* disconnect conn */ +#define LLC_STATUS_RESET_DONE 8 /* */ + +/* Structures and types */ +/* SAP/MAC Address pair */ +struct llc_addr { + u8 lsap; + u8 mac[IFHWADDRLEN]; +}; + +/* Primitive-specific data */ +struct llc_prim_conn { + struct llc_addr saddr; /* used by request only */ + struct llc_addr daddr; /* used by request only */ + u8 status; /* reason for failure */ + u8 pri; /* service_class */ + struct net_device *dev; + struct sock *sk; /* returned from REQUEST */ + void *handler; /* upper layer use, + stored in llc_opt->handler */ + u16 link; + struct sk_buff *skb; /* received SABME */ +}; + +struct llc_prim_disc { + struct sock *sk; + u16 link; + u8 reason; /* not used by request */ +}; + +struct llc_prim_reset { + struct sock *sk; + u16 link; + u8 reason; /* used only by indicate */ +}; + +struct llc_prim_flow_ctrl { + struct sock *sk; + u16 link; + u32 amount; +}; + +struct llc_prim_data { + struct sock *sk; + u16 link; + u8 pri; + struct sk_buff *skb; /* pointer to frame */ + u8 status; /* reason */ +}; + + /* Sending data in conection-less mode */ +struct llc_prim_unit_data { + struct llc_addr saddr; + struct llc_addr daddr; + u8 pri; + struct sk_buff *skb; /* pointer to frame */ + u8 lfb; /* largest frame bit (TR) */ +}; + +struct llc_prim_xid { + struct llc_addr saddr; + struct llc_addr daddr; + u8 pri; + struct sk_buff *skb; +}; + +struct llc_prim_test { + struct llc_addr saddr; + struct llc_addr daddr; + u8 pri; + struct sk_buff *skb; /* pointer to frame */ +}; + +union llc_u_prim_data { + struct llc_prim_conn conn; + struct llc_prim_disc disc; + struct llc_prim_reset res; + struct llc_prim_flow_ctrl fc; + struct llc_prim_data data; /* data */ + struct llc_prim_unit_data udata; /* unit data */ + struct llc_prim_xid xid; + struct llc_prim_test test; +}; + +struct llc_sap; + +/* Information block passed with all called primitives */ +struct llc_prim_if_block { + struct llc_sap *sap; + u8 prim; + union llc_u_prim_data *data; +}; +typedef int (*llc_prim_call_t)(struct llc_prim_if_block *prim_if); + +extern struct llc_sap *llc_sap_open(llc_prim_call_t network_indicate, + llc_prim_call_t network_confirm, u8 lsap); +extern void llc_sap_close(struct llc_sap *sap); +#endif /* LLC_IF_H */ diff -Nru a/include/net/llc_mac.h b/include/net/llc_mac.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_mac.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,23 @@ +#ifndef LLC_MAC_H +#define LLC_MAC_H +/* + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +/* Defines MAC-layer interface to LLC layer */ +extern int mac_send_pdu(struct sk_buff *skb); +extern int mac_indicate(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt); +extern struct net_device *mac_dev_peer(struct net_device *current_dev, + int type, u8 *mac); +extern int llc_pdu_router(struct llc_sap *sap, struct sock *sk, + struct sk_buff *skb, u8 type); +extern u16 lan_hdrs_init(struct sk_buff *skb, u8 *sa, u8 *da); +#endif /* LLC_MAC_H */ diff -Nru a/include/net/llc_main.h b/include/net/llc_main.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_main.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,68 @@ +#ifndef LLC_MAIN_H +#define LLC_MAIN_H +/* + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#define LLC_EVENT 1 +#define LLC_PACKET 2 +#define LLC_TYPE_1 1 +#define LLC_TYPE_2 2 +#define LLC_P_TIME 2 +#define LLC_ACK_TIME 3 +#define LLC_REJ_TIME 3 +#define LLC_BUSY_TIME 3 +#define LLC_SENDACK_TIME 50 +#define LLC_DEST_INVALID 0 /* Invalid LLC PDU type */ +#define LLC_DEST_SAP 1 /* Type 1 goes here */ +#define LLC_DEST_CONN 2 /* Type 2 goes here */ + +/* LLC Layer global default parameters */ + +#define LLC_GLOBAL_DEFAULT_MAX_NBR_SAPS 4 +#define LLC_GLOBAL_DEFAULT_MAX_NBR_CONNS 64 + +extern struct llc_prim_if_block llc_ind_prim, llc_cfm_prim; + +/* LLC station component (SAP and connection resource manager) */ +/* Station component; one per adapter */ +struct llc_station { + u8 state; /* state of station */ + u8 xid_r_count; /* XID response PDU counter */ + struct timer_list ack_timer; + u8 ack_tmr_running; /* 1 or 0 */ + u8 retry_count; + u8 maximum_retry; + u8 mac_sa[6]; /* MAC source address */ + struct { + spinlock_t lock; + struct list_head list; + } sap_list; /* list of related SAPs */ + struct { + spinlock_t lock; + struct list_head list; + } ev_q; /* events entering state mach. */ + struct sk_buff_head mac_pdu_q; /* PDUs ready to send to MAC */ +}; +struct llc_station_state_ev; + +extern struct llc_sap *llc_sap_alloc(void); +extern void llc_sap_save(struct llc_sap *sap); +extern void llc_free_sap(struct llc_sap *sap); +extern struct llc_sap *llc_sap_find(u8 lsap); +extern struct llc_station *llc_station_get(void); +extern struct llc_station_state_ev * + llc_station_alloc_ev(struct llc_station *station); +extern void llc_station_send_ev(struct llc_station *station, + struct llc_station_state_ev *ev); +extern void llc_station_send_pdu(struct llc_station *station, + struct sk_buff *skb); +extern struct sk_buff *llc_alloc_frame(void); +#endif /* LLC_MAIN_H */ diff -Nru a/include/net/llc_pdu.h b/include/net/llc_pdu.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_pdu.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,255 @@ +#ifndef LLC_PDU_H +#define LLC_PDU_H +/* + * Copyright (c) 1997 by Procom Technology,Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +/* LLC PDU structure */ +/* Lengths of frame formats */ +#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ +#define LLC_PDU_LEN_S 4 +#define LLC_PDU_LEN_U 3 /* header and 1 control byte */ +/* Known SAP addresses */ +#define LLC_GLOBAL_SAP 0xFF +#define LLC_NULL_SAP 0x00 /* not network-layer visible */ +#define LLC_MGMT_INDIV 0x02 /* station LLC mgmt indiv addr */ +#define LLC_MGMT_GRP 0x03 /* station LLC mgmt group addr */ +#define LLC_RDE_SAP 0xA6 /* route ... */ + +/* SAP field bit masks */ +#define LLC_ISO_RESERVED_SAP 0x02 +#define LLC_SAP_GROUP_DSAP 0x01 +#define LLC_SAP_RESP_SSAP 0x01 + +/* Group/individual DSAP indicator is DSAP field */ +#define LLC_PDU_GROUP_DSAP_MASK 0x01 +#define LLC_PDU_IS_GROUP_DSAP(pdu) \ + ((pdu->dsap & LLC_PDU_GROUP_DSAP_MASK) ? 0 : 1) +#define LLC_PDU_IS_INDIV_DSAP(pdu) \ + (!(pdu->dsap & LLC_PDU_GROUP_DSAP_MASK) ? 0 : 1) + +/* Command/response PDU indicator in SSAP field */ +#define LLC_PDU_CMD_RSP_MASK 0x01 +#define LLC_PDU_CMD 0 +#define LLC_PDU_RSP 1 +#define LLC_PDU_IS_CMD(pdu) ((pdu->ssap & LLC_PDU_RSP) ? 1 : 0) +#define LLC_PDU_IS_RSP(pdu) ((pdu->ssap & LLC_PDU_RSP) ? 0 : 1) + +/* Get PDU type from 2 lowest-order bits of control field first byte */ +#define LLC_PDU_TYPE_I_MASK 0x01 /* 16-bit control field */ +#define LLC_PDU_TYPE_S_MASK 0x03 +#define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */ +#define LLC_PDU_TYPE_MASK 0x03 + +#define LLC_PDU_TYPE_I 0 /* first bit */ +#define LLC_PDU_TYPE_S 1 /* first two bits */ +#define LLC_PDU_TYPE_U 3 /* first two bits */ + +#define LLC_PDU_TYPE_IS_I(pdu) \ + ((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 0 : 1) + +#define LLC_PDU_TYPE_IS_U(pdu) \ + (((pdu->ctrl_1 & LLC_PDU_TYPE_U_MASK) == LLC_PDU_TYPE_U) ? 0 : 1) + +#define LLC_PDU_TYPE_IS_S(pdu) \ + (((pdu->ctrl_1 & LLC_PDU_TYPE_S_MASK) == LLC_PDU_TYPE_S) ? 0 : 1) + +/* U-format PDU control field masks */ +#define LLC_U_PF_BIT_MASK 0x10 /* P/F bit mask */ +#define LLC_U_PF_IS_1(pdu) ((pdu->ctrl_1 & LLC_U_PF_BIT_MASK) ? 0 : 1) +#define LLC_U_PF_IS_0(pdu) ((!(pdu->ctrl_1 & LLC_U_PF_BIT_MASK)) ? 0 : 1) + +#define LLC_U_PDU_CMD_MASK 0xEC /* cmd/rsp mask */ +#define LLC_U_PDU_CMD(pdu) (pdu->ctrl_1 & LLC_U_PDU_CMD_MASK) +#define LLC_U_PDU_RSP(pdu) (pdu->ctrl_1 & LLC_U_PDU_CMD_MASK) + +#define LLC_1_PDU_CMD_UI 0x00 /* Type 1 cmds/rsps */ +#define LLC_1_PDU_CMD_XID 0xAC +#define LLC_1_PDU_CMD_TEST 0xE0 + +#define LLC_2_PDU_CMD_SABME 0x6C /* Type 2 cmds/rsps */ +#define LLC_2_PDU_CMD_DISC 0x40 +#define LLC_2_PDU_RSP_UA 0x60 +#define LLC_2_PDU_RSP_DM 0x0C +#define LLC_2_PDU_RSP_FRMR 0x84 + +/* Type 1 operations */ + +/* XID information field bit masks */ + +/* LLC format identifier (byte 1) */ +#define LLC_XID_FMT_ID 0x81 /* first byte must be this */ + +/* LLC types/classes identifier (byte 2) */ +#define LLC_XID_CLASS_ZEROS_MASK 0xE0 /* these must be zeros */ +#define LLC_XID_CLASS_MASK 0x1F /* AND with byte to get below */ + +#define LLC_XID_NULL_CLASS_1 0x01 /* if NULL LSAP...use these */ +#define LLC_XID_NULL_CLASS_2 0x03 +#define LLC_XID_NULL_CLASS_3 0x05 +#define LLC_XID_NULL_CLASS_4 0x07 + +#define LLC_XID_NNULL_TYPE_1 0x01 /* if non-NULL LSAP...use these */ +#define LLC_XID_NNULL_TYPE_2 0x02 +#define LLC_XID_NNULL_TYPE_3 0x04 +#define LLC_XID_NNULL_TYPE_1_2 0x03 +#define LLC_XID_NNULL_TYPE_1_3 0x05 +#define LLC_XID_NNULL_TYPE_2_3 0x06 +#define LLC_XID_NNULL_ALL 0x07 + +/* Sender Receive Window (byte 3) */ +#define LLC_XID_RW_MASK 0xFE /* AND with value to get below */ + +#define LLC_XID_MIN_RW 0x02 /* lowest-order bit always zero */ + +/* Type 2 operations */ + +#define LLC_2_SEQ_NBR_MODULO ((u8) 128) + +/* I-PDU masks ('ctrl' is I-PDU control word) */ +#define LLC_I_GET_NS(pdu) (u8)((pdu->ctrl_1 & 0xFE) >> 1) +#define LLC_I_GET_NR(pdu) (u8)((pdu->ctrl_2 & 0xFE) >> 1) + +#define LLC_I_PF_BIT_MASK 0x01 + +#define LLC_I_PF_IS_0(pdu) ((!(pdu->ctrl_2 & LLC_I_PF_BIT_MASK)) ? 0 : 1) +#define LLC_I_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_I_PF_BIT_MASK) ? 0 : 1) + +/* S-PDU supervisory commands and responses */ + +#define LLC_S_PDU_CMD_MASK 0x0C +#define LLC_S_PDU_CMD(pdu) (pdu->ctrl_1 & LLC_S_PDU_CMD_MASK) +#define LLC_S_PDU_RSP(pdu) (pdu->ctrl_1 & LLC_S_PDU_CMD_MASK) + +#define LLC_2_PDU_CMD_RR 0x00 /* rx ready cmd */ +#define LLC_2_PDU_RSP_RR 0x00 /* rx ready rsp */ +#define LLC_2_PDU_CMD_REJ 0x08 /* reject PDU cmd */ +#define LLC_2_PDU_RSP_REJ 0x08 /* reject PDU rsp */ +#define LLC_2_PDU_CMD_RNR 0x04 /* rx not ready cmd */ +#define LLC_2_PDU_RSP_RNR 0x04 /* rx not ready rsp */ + +#define LLC_S_PF_BIT_MASK 0x01 +#define LLC_S_PF_IS_0(pdu) ((!(pdu->ctrl_2 & LLC_S_PF_BIT_MASK)) ? 0 : 1) +#define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 0 : 1) + +#define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1) +#define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO) + +/* FRMR information field macros */ + +#define FRMR_INFO_LENGTH 5 /* 5 bytes of information */ + +/* + * info is pointer to FRMR info field structure; 'rej_ctrl' is byte pointer + * (if U-PDU) or word pointer to rejected PDU control field + */ +#define FRMR_INFO_SET_REJ_CNTRL(info,rej_ctrl) \ + info->rej_pdu_ctrl = ((*((u8 *) rej_ctrl) & \ + LLC_PDU_TYPE_U) != LLC_PDU_TYPE_U ? \ + (u16)*((u16 *) rej_ctrl) : \ + (((u16) *((u8 *) rej_ctrl)) & 0x00FF)) + +/* + * Info is pointer to FRMR info field structure; 'vs' is a byte containing + * send state variable value in low-order 7 bits (insure the lowest-order + * bit remains zero (0)) + */ +#define FRMR_INFO_SET_Vs(info,vs) (info->curr_ssv = (((u8) vs) << 1)) +#define FRMR_INFO_SET_Vr(info,vr) (info->curr_rsv = (((u8) vr) << 1)) + +/* + * Info is pointer to FRMR info field structure; 'cr' is a byte containing + * the C/R bit value in the low-order bit + */ +#define FRMR_INFO_SET_C_R_BIT(info, cr) (info->curr_rsv |= (((u8) cr) & 0x01)) + +/* + * In the remaining five macros, 'info' is pointer to FRMR info field + * structure; 'ind' is a byte containing the bit value to set in the + * lowest-order bit) + */ +#define FRMR_INFO_SET_INVALID_PDU_CTRL_IND(info, ind) \ + (info->ind_bits = ((info->ind_bits & 0xFE) | (((u8) ind) & 0x01))) + +#define FRMR_INFO_SET_INVALID_PDU_INFO_IND(info, ind) \ + (info->ind_bits = ( (info->ind_bits & 0xFD) | (((u8) ind) & 0x02))) + +#define FRMR_INFO_SET_PDU_INFO_2LONG_IND(info, ind) \ + (info->ind_bits = ( (info->ind_bits & 0xFB) | (((u8) ind) & 0x04))) + +#define FRMR_INFO_SET_PDU_INVALID_Nr_IND(info, ind) \ + (info->ind_bits = ( (info->ind_bits & 0xF7) | (((u8) ind) & 0x08))) + +#define FRMR_INFO_SET_PDU_INVALID_Ns_IND(info, ind) \ + (info->ind_bits = ( (info->ind_bits & 0xEF) | (((u8) ind) & 0x10))) + +/* Sequence-numbered PDU format (4 bytes in length) */ +typedef struct llc_pdu_sn { + u8 dsap; + u8 ssap; + u8 ctrl_1; + u8 ctrl_2; +} llc_pdu_sn_t; + +/* Un-numbered PDU format (3 bytes in length) */ +typedef struct llc_pdu_un { + u8 dsap; + u8 ssap; + u8 ctrl_1; +} llc_pdu_un_t; + +/* LLC Type 1 XID command/response information fields format */ +typedef struct llc_xid_info { + u8 fmt_id; /* always 0x18 for LLC */ + u8 type; /* different if NULL/non-NULL LSAP */ + u8 rw; /* sender receive window */ +} llc_xid_info_t; + +/* LLC Type 2 FRMR response information field format */ +typedef struct llc_frmr_info { + u16 rej_pdu_ctrl; /* bits 1-8 if U-PDU */ + u8 curr_ssv; /* current send state variable val */ + u8 curr_rsv; /* current receive state variable */ + u8 ind_bits; /* indicator bits set with macro */ +} llc_frmr_info_t; + +extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); +extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value); +extern int llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit); +extern int llc_pdu_decode_cr_bit(struct sk_buff *skb, u8 *cr_bit); +extern int llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa); +extern int llc_pdu_decode_da(struct sk_buff *skb, u8 *ds); +extern int llc_pdu_decode_dsap(struct sk_buff *skb, u8 *dsap); +extern int llc_pdu_decode_ssap(struct sk_buff *skb, u8 *ssap); +extern int llc_decode_pdu_type(struct sk_buff *skb, u8 *destination); +extern void llc_pdu_header_init(struct sk_buff *skb, u8 pdu_type, u8 ssap, + u8 dsap, u8 cr); +extern int llc_pdu_init_as_ui_cmd(struct sk_buff *skb); +extern int llc_pdu_init_as_xid_cmd(struct sk_buff *skb, u8 svcs_supported, + u8 rx_window); +extern int llc_pdu_init_as_test_cmd(struct sk_buff *skb); +extern int llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit); +extern int llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr); +extern int llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr); +extern int llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr); +extern int llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr); +extern int llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit); +extern int llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit); +extern int llc_pdu_init_as_xid_rsp(struct sk_buff *skb, u8 svcs_supported, + u8 rx_window); +extern int llc_pdu_init_as_test_rsp(struct sk_buff *skb, + struct sk_buff *ev_skb); +extern int llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, llc_pdu_sn_t *prev_pdu, + u8 f_bit, u8 vs, u8 vr, u8 vzyxw); +extern int llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr); +extern int llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr); +extern int llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr); +extern int llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit); +#endif /* LLC_PDU_H */ diff -Nru a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_s_ac.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,47 @@ +#ifndef LLC_S_AC_H +#define LLC_S_AC_H +/* + * Copyright (c) 1997 by Procom Technology,Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +/* SAP component actions */ +#define SAP_ACT_UNITDATA_IND 1 +#define SAP_ACT_SEND_UI 2 +#define SAP_ACT_SEND_XID_C 3 +#define SAP_ACT_SEND_XID_R 4 +#define SAP_ACT_SEND_TEST_C 5 +#define SAP_ACT_SEND_TEST_R 6 +#define SAP_ACT_REPORT_STATUS 7 +#define SAP_ACT_XID_IND 8 +#define SAP_ACT_TEST_IND 9 + +/* All action functions must look like this */ +typedef int (*llc_sap_action_t)(struct llc_sap *sap, + struct llc_sap_state_ev *ev); + +extern int llc_sap_action_unitdata_ind(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_action_send_ui(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_action_send_xid_c(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_action_send_xid_r(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_action_send_test_c(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_action_send_test_r(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_action_report_status(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_action_xid_ind(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_action_test_ind(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +#endif /* LLC_S_AC_H */ diff -Nru a/include/net/llc_s_ev.h b/include/net/llc_s_ev.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_s_ev.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,101 @@ +#ifndef LLC_S_EV_H +#define LLC_S_EV_H +/* + * Copyright (c) 1997 by Procom Technology,Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +/* Defines SAP component events */ +/* Types of events (possible values in 'ev->type') */ +#define LLC_SAP_EV_TYPE_SIMPLE 1 +#define LLC_SAP_EV_TYPE_CONDITION 2 +#define LLC_SAP_EV_TYPE_PRIM 3 +#define LLC_SAP_EV_TYPE_PDU 4 /* command/response PDU */ +#define LLC_SAP_EV_TYPE_ACK_TMR 5 +#define LLC_SAP_EV_TYPE_RPT_STATUS 6 + +#define LLC_SAP_EV_ACTIVATION_REQ 1 +#define LLC_SAP_EV_RX_UI 2 +#define LLC_SAP_EV_UNITDATA_REQ 3 +#define LLC_SAP_EV_XID_REQ 4 +#define LLC_SAP_EV_RX_XID_C 5 +#define LLC_SAP_EV_RX_XID_R 6 +#define LLC_SAP_EV_TEST_REQ 7 +#define LLC_SAP_EV_RX_TEST_C 8 +#define LLC_SAP_EV_RX_TEST_R 9 +#define LLC_SAP_EV_DEACTIVATION_REQ 10 + +/* Interfaces for various types of supported events */ +struct llc_sap_ev_simple_if { + u8 ev; +}; + +struct llc_prim_if_block; + +struct llc_sap_ev_prim_if { + u8 prim; /* connect, disconnect, reset, ... */ + u8 type; /* request, indicate, response, conf */ + struct llc_prim_if_block *data; +}; + +struct llc_sap_ev_pdu_if { + u8 ev; + u8 reason; + struct sk_buff *skb; +}; + +struct llc_sap_ev_tmr_if { + void *timer_specific; +}; + +struct llc_sap_ev_rpt_sts_if { + u8 status; +}; + +union llc_sap_ev_if { + struct llc_sap_ev_simple_if a; /* 'a' for simple, easy ... */ + struct llc_sap_ev_prim_if prim; + struct llc_sap_ev_pdu_if pdu; + struct llc_sap_ev_tmr_if tmr; + struct llc_sap_ev_rpt_sts_if rsts; /* report status */ +}; + +struct llc_prim_if_block; + +struct llc_sap_state_ev { + u8 type; + u8 ind_cfm_flag; + struct llc_prim_if_block *prim; + union llc_sap_ev_if data; +}; + +struct llc_sap; + +typedef int (*llc_sap_ev_t)(struct llc_sap *sap, struct llc_sap_state_ev *ev); + +extern int llc_sap_ev_activation_req(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_ev_rx_ui(struct llc_sap *sap, struct llc_sap_state_ev *ev); +extern int llc_sap_ev_unitdata_req(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_ev_xid_req(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_ev_rx_xid_c(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_ev_rx_xid_r(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_ev_test_req(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_ev_rx_test_c(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_ev_rx_test_r(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +extern int llc_sap_ev_deactivation_req(struct llc_sap *sap, + struct llc_sap_state_ev *ev); +#endif /* LLC_S_EV_H */ diff -Nru a/include/net/llc_s_st.h b/include/net/llc_s_st.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_s_st.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,34 @@ +#ifndef LLC_S_ST_H +#define LLC_S_ST_H +/* + * Copyright (c) 1997 by Procom Technology,Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +/* Defines SAP component states */ + +#define LLC_SAP_STATE_INACTIVE 1 +#define LLC_SAP_STATE_ACTIVE 2 +#define LLC_NBR_SAP_STATES 2 /* size of state table */ +/* structures and types */ +/* SAP state table structure */ +struct llc_sap_state_trans { + llc_sap_ev_t ev; + u8 next_state; + llc_sap_action_t *ev_actions; +}; + +struct llc_sap_state { + u8 curr_state; + struct llc_sap_state_trans **transitions; +}; + +/* only access to SAP state table */ +extern struct llc_sap_state llc_sap_state_table[LLC_NBR_SAP_STATES]; +#endif /* LLC_S_ST_H */ diff -Nru a/include/net/llc_sap.h b/include/net/llc_sap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_sap.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,42 @@ +#ifndef LLC_SAP_H +#define LLC_SAP_H +/* + * Copyright (c) 1997 by Procom Technology,Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +/* Defines the SAP component */ +struct llc_sap { + u8 state; + struct llc_station *parent_station; + u8 p_bit; /* only lowest-order bit used */ + u8 f_bit; /* only lowest-order bit used */ + llc_prim_call_t req; /* provided by LLC layer */ + llc_prim_call_t resp; /* provided by LLC layer */ + llc_prim_call_t ind; /* provided by network layer */ + llc_prim_call_t conf; /* provided by network layer */ + struct llc_addr laddr; /* SAP value in this 'lsap' */ + struct list_head node; /* entry in station sap_list */ + struct { + spinlock_t lock; + struct list_head list; + } sk_list; /* LLC sockets this one manages */ + struct sk_buff_head mac_pdu_q; /* PDUs ready to send to MAC */ +}; +struct llc_sap_state_ev; + +extern void llc_sap_assign_sock(struct llc_sap *sap, struct sock *sk); +extern void llc_sap_unassign_sock(struct llc_sap *sap, struct sock *sk); +extern void llc_sap_send_ev(struct llc_sap *sap, struct llc_sap_state_ev *ev); +extern void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb, + struct llc_sap_state_ev *ev); +extern void llc_sap_send_pdu(struct llc_sap *sap, struct sk_buff *skb); +extern struct llc_sap_state_ev *llc_sap_alloc_ev(struct llc_sap *sap); +#endif /* LLC_SAP_H */ diff -Nru a/include/net/llc_stat.h b/include/net/llc_stat.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/net/llc_stat.h Tue Jun 18 19:12:03 2002 @@ -0,0 +1,35 @@ +#ifndef LLC_STAT_H +#define LLC_STAT_H +/* + * Copyright (c) 1997 by Procom Technology,Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +/* Station component state table */ +/* Station component states */ +#define LLC_STATION_STATE_DOWN 1 /* initial state */ +#define LLC_STATION_STATE_DUP_ADDR_CHK 2 +#define LLC_STATION_STATE_UP 3 + +#define LLC_NBR_STATION_STATES 3 /* size of state table */ + +/* Station component state table structure */ +struct llc_station_state_trans { + llc_station_ev_t ev; + u8 next_state; + llc_station_action_t *ev_actions; +}; + +struct llc_station_state { + u8 curr_state; + struct llc_station_state_trans **transitions; +}; + +extern struct llc_station_state llc_station_state_table[LLC_NBR_STATION_STATES]; +#endif /* LLC_STAT_H */ diff -Nru a/include/net/p8022.h b/include/net/p8022.h --- a/include/net/p8022.h Tue Jun 18 19:12:02 2002 +++ b/include/net/p8022.h Tue Jun 18 19:12:02 2002 @@ -1,7 +1,9 @@ #ifndef _NET_P8022_H #define _NET_P8022_H - -extern struct datalink_proto *register_8022_client(unsigned char type, int (*rcvfunc)(struct sk_buff *, struct net_device *, struct packet_type *)); +extern struct datalink_proto *register_8022_client(unsigned char type, + int (*rcvfunc) + (struct sk_buff *, + struct net_device *, + struct packet_type *)); extern void unregister_8022_client(unsigned char type); - #endif diff -Nru a/init/main.c b/init/main.c --- a/init/main.c Tue Jun 18 19:12:01 2002 +++ b/init/main.c Tue Jun 18 19:12:01 2002 @@ -34,7 +34,6 @@ #if defined(CONFIG_ARCH_S390) #include -#include #endif #ifdef CONFIG_MTRR @@ -393,9 +392,6 @@ buffer_init(); vfs_caches_init(mempages); radix_tree_init(); -#if defined(CONFIG_ARCH_S390) - ccwcache_init(); -#endif signals_init(); #ifdef CONFIG_PROC_FS proc_root_init(); diff -Nru a/kernel/context.c b/kernel/context.c --- a/kernel/context.c Tue Jun 18 19:12:02 2002 +++ b/kernel/context.c Tue Jun 18 19:12:02 2002 @@ -20,6 +20,7 @@ #include #include #include +#include static DECLARE_TASK_QUEUE(tq_context); static DECLARE_WAIT_QUEUE_HEAD(context_task_wq); diff -Nru a/kernel/fork.c b/kernel/fork.c --- a/kernel/fork.c Tue Jun 18 19:12:01 2002 +++ b/kernel/fork.c Tue Jun 18 19:12:01 2002 @@ -693,9 +693,8 @@ int i; /* ?? should we just memset this ?? */ - for(i = 0; i < smp_num_cpus; i++) - p->per_cpu_utime[cpu_logical_map(i)] = - p->per_cpu_stime[cpu_logical_map(i)] = 0; + for(i = 0; i < NR_CPUS; i++) + p->per_cpu_utime[i] = p->per_cpu_stime[i] = 0; spin_lock_init(&p->sigmask_lock); } #endif diff -Nru a/kernel/kmod.c b/kernel/kmod.c --- a/kernel/kmod.c Tue Jun 18 19:12:02 2002 +++ b/kernel/kmod.c Tue Jun 18 19:12:02 2002 @@ -28,6 +28,7 @@ #include #include #include +#include #include diff -Nru a/kernel/ksyms.c b/kernel/ksyms.c --- a/kernel/ksyms.c Tue Jun 18 19:12:01 2002 +++ b/kernel/ksyms.c Tue Jun 18 19:12:01 2002 @@ -120,7 +120,7 @@ EXPORT_SYMBOL(find_vma); EXPORT_SYMBOL(get_unmapped_area); EXPORT_SYMBOL(init_mm); -EXPORT_SYMBOL(create_bounce); +EXPORT_SYMBOL(blk_queue_bounce); #ifdef CONFIG_HIGHMEM EXPORT_SYMBOL(kmap_high); EXPORT_SYMBOL(kunmap_high); @@ -148,7 +148,6 @@ EXPORT_SYMBOL(follow_down); EXPORT_SYMBOL(lookup_mnt); EXPORT_SYMBOL(path_lookup); -EXPORT_SYMBOL(path_init); EXPORT_SYMBOL(path_walk); EXPORT_SYMBOL(path_release); EXPORT_SYMBOL(__user_walk); @@ -388,9 +387,7 @@ EXPORT_SYMBOL(del_timer); EXPORT_SYMBOL(request_irq); EXPORT_SYMBOL(free_irq); -#if !defined(CONFIG_ARCH_S390) -EXPORT_SYMBOL(irq_stat); /* No separate irq_stat for s390, it is part of PSA */ -#endif +EXPORT_SYMBOL(irq_stat); /* waitqueue handling */ EXPORT_SYMBOL(add_wait_queue); @@ -551,7 +548,7 @@ EXPORT_SYMBOL(fsync_buffers_list); EXPORT_SYMBOL(clear_inode); EXPORT_SYMBOL(init_special_inode); -EXPORT_SYMBOL(__get_hash_table); +EXPORT_SYMBOL(__find_get_block); EXPORT_SYMBOL(new_inode); EXPORT_SYMBOL(__insert_inode_hash); EXPORT_SYMBOL(remove_inode_hash); @@ -559,7 +556,6 @@ EXPORT_SYMBOL(make_bad_inode); EXPORT_SYMBOL(is_bad_inode); EXPORT_SYMBOL(event); -EXPORT_SYMBOL(brw_page); #ifdef CONFIG_UID16 EXPORT_SYMBOL(overflowuid); diff -Nru a/kernel/printk.c b/kernel/printk.c --- a/kernel/printk.c Tue Jun 18 19:12:02 2002 +++ b/kernel/printk.c Tue Jun 18 19:12:02 2002 @@ -553,7 +553,14 @@ { struct console *c; - acquire_console_sem(); + /* + * Try to get the console semaphore. If someone else owns it + * we have to return without unblanking because console_unblank + * may be called in interrupt context. + */ + if (down_trylock(&console_sem) != 0) + return; + console_may_schedule = 0; for (c = console_drivers; c != NULL; c = c->next) if ((c->flags & CON_ENABLED) && c->unblank) c->unblank(); diff -Nru a/kernel/sched.c b/kernel/sched.c --- a/kernel/sched.c Tue Jun 18 19:12:02 2002 +++ b/kernel/sched.c Tue Jun 18 19:12:02 2002 @@ -6,14 +6,14 @@ * Copyright (C) 1991-2002 Linus Torvalds * * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and - * make semaphores SMP safe + * make semaphores SMP safe * 1998-11-19 Implemented schedule_timeout() and related stuff * by Andrea Arcangeli * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: - * hybrid priority-list and round-robin design with - * an array-switch method of distributing timeslices - * and per-CPU runqueues. Additional code by Davide - * Libenzi, Robert Love, and Rusty Russell. + * hybrid priority-list and round-robin design with + * an array-switch method of distributing timeslices + * and per-CPU runqueues. Additional code by Davide + * Libenzi, Robert Love, and Rusty Russell. */ #include @@ -180,11 +180,14 @@ /* * rq_lock - lock a given runqueue and disable interrupts. */ -static inline runqueue_t *rq_lock(runqueue_t *rq) +static inline runqueue_t *this_rq_lock(void) { + runqueue_t *rq; + local_irq_disable(); rq = this_rq(); spin_lock(&rq->lock); + return rq; } @@ -358,12 +361,17 @@ rq = task_rq_lock(p, &flags); old_state = p->state; if (!p->array) { - if (unlikely(sync && (rq->curr != p))) { - if (p->thread_info->cpu != smp_processor_id()) { - p->thread_info->cpu = smp_processor_id(); - task_rq_unlock(rq, &flags); - goto repeat_lock_task; - } + /* + * Fast-migrate the task if it's not running or runnable + * currently. Do not violate hard affinity. + */ + if (unlikely(sync && (rq->curr != p) && + (p->thread_info->cpu != smp_processor_id()) && + (p->cpus_allowed & (1UL << smp_processor_id())))) { + + p->thread_info->cpu = smp_processor_id(); + task_rq_unlock(rq, &flags); + goto repeat_lock_task; } if (old_state == TASK_UNINTERRUPTIBLE) rq->nr_uninterruptible--; @@ -388,9 +396,7 @@ void wake_up_forked_process(task_t * p) { - runqueue_t *rq; - - rq = rq_lock(rq); + runqueue_t *rq = this_rq_lock(); p->state = TASK_RUNNING; if (!rt_task(p)) { @@ -469,8 +475,8 @@ { unsigned long i, sum = 0; - for (i = 0; i < smp_num_cpus; i++) - sum += cpu_rq(cpu_logical_map(i))->nr_running; + for (i = 0; i < NR_CPUS; i++) + sum += cpu_rq(i)->nr_running; return sum; } @@ -479,8 +485,8 @@ { unsigned long i, sum = 0; - for (i = 0; i < smp_num_cpus; i++) - sum += cpu_rq(cpu_logical_map(i))->nr_uninterruptible; + for (i = 0; i < NR_CPUS; i++) + sum += cpu_rq(i)->nr_uninterruptible; return sum; } @@ -489,8 +495,8 @@ { unsigned long i, sum = 0; - for (i = 0; i < smp_num_cpus; i++) - sum += cpu_rq(cpu_logical_map(i))->nr_switches; + for (i = 0; i < NR_CPUS; i++) + sum += cpu_rq(i)->nr_switches; return sum; } @@ -565,15 +571,16 @@ busiest = NULL; max_load = 1; - for (i = 0; i < smp_num_cpus; i++) { - int logical = cpu_logical_map(i); + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_online(i)) + continue; - rq_src = cpu_rq(logical); - if (idle || (rq_src->nr_running < this_rq->prev_nr_running[logical])) + rq_src = cpu_rq(i); + if (idle || (rq_src->nr_running < this_rq->prev_nr_running[i])) load = rq_src->nr_running; else - load = this_rq->prev_nr_running[logical]; - this_rq->prev_nr_running[logical] = rq_src->nr_running; + load = this_rq->prev_nr_running[i]; + this_rq->prev_nr_running[i] = rq_src->nr_running; if ((load > max_load) && (rq_src != this_rq)) { busiest = rq_src; @@ -797,7 +804,8 @@ list_t *queue; int idx; - BUG_ON(in_interrupt()); + if (unlikely(in_interrupt())) + BUG(); #if CONFIG_DEBUG_HIGHMEM check_highmem_ptes(); @@ -905,6 +913,12 @@ } #endif /* CONFIG_PREEMPT */ +int default_wake_function(wait_queue_t *curr, unsigned mode, int sync) +{ + task_t *p = curr->task; + return ((p->state & mode) && try_to_wake_up(p, sync)); +} + /* * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve @@ -916,18 +930,17 @@ */ static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync) { - struct list_head *tmp; - unsigned int state; - wait_queue_t *curr; - task_t *p; + struct list_head *tmp, *next; - list_for_each(tmp, &q->task_list) { + list_for_each_safe(tmp, next, &q->task_list) { + wait_queue_t *curr; + unsigned flags; curr = list_entry(tmp, wait_queue_t, task_list); - p = curr->task; - state = p->state; - if ((state & mode) && try_to_wake_up(p, sync) && - ((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)) - break; + flags = curr->flags; + if (curr->func(curr, mode, sync) && + (flags & WQ_FLAG_EXCLUSIVE) && + !--nr_exclusive) + break; } } @@ -1158,13 +1171,12 @@ static int setscheduler(pid_t pid, int policy, struct sched_param *param) { struct sched_param lp; + int retval = -EINVAL; prio_array_t *array; unsigned long flags; runqueue_t *rq; - int retval; task_t *p; - retval = -EINVAL; if (!param || pid < 0) goto out_nounlock; @@ -1251,10 +1263,9 @@ asmlinkage long sys_sched_getscheduler(pid_t pid) { + int retval = -EINVAL; task_t *p; - int retval; - retval = -EINVAL; if (pid < 0) goto out_nounlock; @@ -1271,11 +1282,10 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param) { - task_t *p; struct sched_param lp; - int retval; + int retval = -EINVAL; + task_t *p; - retval = -EINVAL; if (!param || pid < 0) goto out_nounlock; @@ -1310,8 +1320,8 @@ unsigned long *user_mask_ptr) { unsigned long new_mask; - task_t *p; int retval; + task_t *p; if (len < sizeof(new_mask)) return -EINVAL; @@ -1361,13 +1371,12 @@ asmlinkage int sys_sched_getaffinity(pid_t pid, unsigned int len, unsigned long *user_mask_ptr) { - unsigned long mask; unsigned int real_len; - task_t *p; + unsigned long mask; int retval; + task_t *p; real_len = sizeof(mask); - if (len < real_len) return -EINVAL; @@ -1392,25 +1401,35 @@ asmlinkage long sys_sched_yield(void) { - runqueue_t *rq; - prio_array_t *array; - - rq = rq_lock(rq); + runqueue_t *rq = this_rq_lock(); + prio_array_t *array = current->array; /* - * Decrease the yielding task's priority by one, to avoid - * livelocks. This priority loss is temporary, it's recovered - * once the current timeslice expires. + * There are three levels of how a yielding task will give up + * the current CPU: * - * If priority is already MAX_PRIO-1 then we still - * roundrobin the task within the runlist. - */ - array = current->array; - /* - * If the task has reached maximum priority (or is a RT task) - * then just requeue the task to the end of the runqueue: + * #1 - it decreases its priority by one. This priority loss is + * temporary, it's recovered once the current timeslice + * expires. + * + * #2 - once it has reached the lowest priority level, + * it will give up timeslices one by one. (We do not + * want to give them up all at once, it's gradual, + * to protect the casual yield()er.) + * + * #3 - once all timeslices are gone we put the process into + * the expired array. + * + * (special rule: RT tasks do not lose any priority, they just + * roundrobin on their current priority level.) */ - if (likely(current->prio == MAX_PRIO-1 || rt_task(current))) { + if (likely(current->prio == MAX_PRIO-1)) { + if (current->time_slice <= 1) { + dequeue_task(current, rq->active); + enqueue_task(current, rq->expired); + } else + current->time_slice--; + } else if (unlikely(rt_task(current))) { list_del(¤t->run_list); list_add_tail(¤t->run_list, array->queue + current->prio); } else { @@ -1461,9 +1480,9 @@ asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval) { + int retval = -EINVAL; struct timespec t; task_t *p; - int retval = -EINVAL; if (pid < 0) goto out_nounlock; @@ -1758,7 +1777,7 @@ static int migration_thread(void * bind_cpu) { - int cpu = cpu_logical_map((int) (long) bind_cpu); + int cpu = (int) (long) bind_cpu; struct sched_param param = { sched_priority: MAX_RT_PRIO-1 }; runqueue_t *rq; int ret; @@ -1766,12 +1785,15 @@ daemonize(); sigfillset(¤t->blocked); set_fs(KERNEL_DS); + + /* FIXME: First CPU may not be zero, but this crap code + vanishes with hotplug cpu patch anyway. --RR */ /* * The first migration thread is started on CPU #0. This one can * migrate the other migration threads to their destination CPUs. */ if (cpu != 0) { - while (!cpu_rq(cpu_logical_map(0))->migration_thread) + while (!cpu_rq(0)->migration_thread) yield(); set_cpus_allowed(current, 1UL << cpu); } @@ -1835,16 +1857,21 @@ { int cpu; - current->cpus_allowed = 1UL << cpu_logical_map(0); - for (cpu = 0; cpu < smp_num_cpus; cpu++) { + current->cpus_allowed = 1UL << 0; + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu)) + continue; if (kernel_thread(migration_thread, (void *) (long) cpu, CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0) BUG(); } current->cpus_allowed = -1L; - for (cpu = 0; cpu < smp_num_cpus; cpu++) - while (!cpu_rq(cpu_logical_map(cpu))->migration_thread) + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu)) + continue; + while (!cpu_rq(cpu)->migration_thread) schedule_timeout(2); + } } -#endif /* CONFIG_SMP */ +#endif diff -Nru a/kernel/signal.c b/kernel/signal.c --- a/kernel/signal.c Tue Jun 18 19:12:02 2002 +++ b/kernel/signal.c Tue Jun 18 19:12:02 2002 @@ -1043,37 +1043,58 @@ int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from) { + int err; + if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) return -EFAULT; if (from->si_code < 0) - return __copy_to_user(to, from, sizeof(siginfo_t)); - else { - int err; - - /* If you change siginfo_t structure, please be sure - this code is fixed accordingly. - It should never copy any pad contained in the structure - to avoid security leaks, but must copy the generic - 3 ints plus the relevant union member. */ - err = __put_user(from->si_signo, &to->si_signo); - err |= __put_user(from->si_errno, &to->si_errno); - err |= __put_user((short)from->si_code, &to->si_code); - /* First 32bits of unions are always present. */ + return __copy_to_user(to, from, sizeof(siginfo_t)) + ? -EFAULT : 0; + /* + * If you change siginfo_t structure, please be sure + * this code is fixed accordingly. + * It should never copy any pad contained in the structure + * to avoid security leaks, but must copy the generic + * 3 ints plus the relevant union member. + */ + err = __put_user(from->si_signo, &to->si_signo); + err |= __put_user(from->si_errno, &to->si_errno); + err |= __put_user((short)from->si_code, &to->si_code); + switch (from->si_code & __SI_MASK) { + case __SI_KILL: + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + break; + case __SI_TIMER: + err |= __put_user(from->si_timer1, &to->si_timer1); + err |= __put_user(from->si_timer2, &to->si_timer2); + break; + case __SI_POLL: + err |= __put_user(from->si_band, &to->si_band); + err |= __put_user(from->si_fd, &to->si_fd); + break; + case __SI_FAULT: + err |= __put_user(from->si_addr, &to->si_addr); + break; + case __SI_CHLD: + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + err |= __put_user(from->si_status, &to->si_status); + err |= __put_user(from->si_utime, &to->si_utime); + err |= __put_user(from->si_stime, &to->si_stime); + break; + case __SI_RT: /* This is not generated by the kernel as of now. */ + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + err |= __put_user(from->si_int, &to->si_int); + err |= __put_user(from->si_ptr, &to->si_ptr); + break; + default: /* this is just in case for now ... */ err |= __put_user(from->si_pid, &to->si_pid); - switch (from->si_code >> 16) { - case __SI_FAULT >> 16: - break; - case __SI_CHLD >> 16: - err |= __put_user(from->si_utime, &to->si_utime); - err |= __put_user(from->si_stime, &to->si_stime); - err |= __put_user(from->si_status, &to->si_status); - default: - err |= __put_user(from->si_uid, &to->si_uid); - break; - /* case __SI_RT: This is not generated by the kernel as of now. */ - } - return err; + err |= __put_user(from->si_uid, &to->si_uid); + break; } + return err; } #endif diff -Nru a/kernel/softirq.c b/kernel/softirq.c --- a/kernel/softirq.c Tue Jun 18 19:12:02 2002 +++ b/kernel/softirq.c Tue Jun 18 19:12:02 2002 @@ -363,8 +363,7 @@ static int ksoftirqd(void * __bind_cpu) { - int bind_cpu = (int) (long) __bind_cpu; - int cpu = cpu_logical_map(bind_cpu); + int cpu = (int) (long) __bind_cpu; daemonize(); set_user_nice(current, 19); @@ -376,7 +375,7 @@ if (smp_processor_id() != cpu) BUG(); - sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu); + sprintf(current->comm, "ksoftirqd_CPU%d", cpu); __set_current_state(TASK_INTERRUPTIBLE); mb(); @@ -402,13 +401,16 @@ { int cpu; - for (cpu = 0; cpu < smp_num_cpus; cpu++) + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu)) + continue; if (kernel_thread(ksoftirqd, (void *) (long) cpu, CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0) printk("spawn_ksoftirqd() failed for cpu %d\n", cpu); else - while (!ksoftirqd_task(cpu_logical_map(cpu))) + while (!ksoftirqd_task(cpu)) yield(); + } return 0; } diff -Nru a/kernel/suspend.c b/kernel/suspend.c --- a/kernel/suspend.c Tue Jun 18 19:12:02 2002 +++ b/kernel/suspend.c Tue Jun 18 19:12:02 2002 @@ -281,7 +281,8 @@ sh->num_physpages = num_physpages; strncpy(sh->machine, system_utsname.machine, 8); strncpy(sh->version, system_utsname.version, 20); - sh->num_cpus = smp_num_cpus; + /* FIXME: Is this bogus? --RR */ + sh->num_cpus = num_online_cpus(); sh->page_size = PAGE_SIZE; sh->suspend_pagedir = pagedir_nosave; if (pagedir_save != pagedir_nosave) @@ -319,14 +320,15 @@ { swp_entry_t entry; union diskpage *cur; - - cur = (union diskpage *)get_free_page(GFP_ATOMIC); - if (!cur) + struct page *page; + + page = alloc_page(GFP_ATOMIC); + if (!page) panic("Out of memory in mark_swapfiles"); + cur = page_address(page); /* XXX: this is dirty hack to get first page of swap file */ entry = swp_entry(root_swap, 0); - lock_page(virt_to_page((unsigned long)cur)); - rw_swap_page_nolock(READ, entry, (char *) cur); + rw_swap_page_sync(READ, entry, page); if (mode == MARK_SWAP_RESUME) { if (!memcmp("SUSP1R",cur->swh.magic.magic,6)) @@ -344,10 +346,8 @@ cur->link.next = prev; /* prev is the first/last swap page of the resume area */ /* link.next lies *no more* in last 4 bytes of magic */ } - lock_page(virt_to_page((unsigned long)cur)); - rw_swap_page_nolock(WRITE, entry, (char *)cur); - - free_page((unsigned long)cur); + rw_swap_page_sync(WRITE, entry, page); + __free_page(page); } static void read_swapfiles(void) /* This is called before saving image */ @@ -408,6 +408,7 @@ int nr_pgdir_pages = SUSPEND_PD_PAGES(nr_copy_pages); union diskpage *cur, *buffer = (union diskpage *)get_free_page(GFP_ATOMIC); unsigned long address; + struct page *page; PRINTS( "Writing data to swap (%d pages): ", nr_copy_pages ); for (i=0; iaddress; - lock_page(virt_to_page(address)); - { - long dummy1; - struct inode *suspend_file; - get_swaphandle_info(entry, &dummy1, &suspend_file); - } - rw_swap_page_nolock(WRITE, entry, (char *) address); + page = virt_to_page(address); + rw_swap_page_sync(WRITE, entry, page); (pagedir_nosave+i)->swap_address = entry; } PRINTK(" done\n"); @@ -451,8 +447,8 @@ if (PAGE_SIZE % sizeof(struct pbe)) panic("I need PAGE_SIZE to be integer multiple of struct pbe, otherwise next assignment could damage pagedir"); cur->link.next = prev; - lock_page(virt_to_page((unsigned long)cur)); - rw_swap_page_nolock(WRITE, entry, (char *) cur); + page = virt_to_page((unsigned long)cur); + rw_swap_page_sync(WRITE, entry, page); prev = entry; } PRINTK(", header"); @@ -472,8 +468,8 @@ cur->link.next = prev; - lock_page(virt_to_page((unsigned long)cur)); - rw_swap_page_nolock(WRITE, entry, (char *) cur); + page = virt_to_page((unsigned long)cur); + rw_swap_page_sync(WRITE, entry, page); prev = entry; PRINTK( ", signature" ); @@ -1009,7 +1005,7 @@ return sanity_check_failed("Incorrect machine type"); if(strncmp(sh->version, system_utsname.version, 20)) return sanity_check_failed("Incorrect version"); - if(sh->num_cpus != smp_num_cpus) + if(sh->num_cpus != num_online_cpus()) return sanity_check_failed("Incorrect number of cpus"); if(sh->page_size != PAGE_SIZE) return sanity_check_failed("Incorrect PAGE_SIZE"); diff -Nru a/kernel/sys.c b/kernel/sys.c --- a/kernel/sys.c Tue Jun 18 19:12:01 2002 +++ b/kernel/sys.c Tue Jun 18 19:12:01 2002 @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff -Nru a/kernel/sysctl.c b/kernel/sysctl.c --- a/kernel/sysctl.c Tue Jun 18 19:12:01 2002 +++ b/kernel/sysctl.c Tue Jun 18 19:12:01 2002 @@ -31,6 +31,7 @@ #include #include #include +#include #include @@ -264,6 +265,19 @@ &pager_daemon, sizeof(pager_daemon_t), 0644, NULL, &proc_dointvec}, {VM_PAGE_CLUSTER, "page-cluster", &page_cluster, sizeof(int), 0644, NULL, &proc_dointvec}, + {VM_DIRTY_BACKGROUND, "dirty_background_ratio", + &dirty_background_ratio, sizeof(dirty_background_ratio), + 0644, NULL, &proc_dointvec}, + {VM_DIRTY_ASYNC, "dirty_async_ratio", &dirty_async_ratio, + sizeof(dirty_async_ratio), 0644, NULL, &proc_dointvec}, + {VM_DIRTY_SYNC, "dirty_sync_ratio", &dirty_sync_ratio, + sizeof(dirty_sync_ratio), 0644, NULL, &proc_dointvec}, + {VM_DIRTY_WB_CS, "dirty_writeback_centisecs", + &dirty_writeback_centisecs, sizeof(dirty_writeback_centisecs), 0644, + NULL, &proc_dointvec}, + {VM_DIRTY_EXPIRE_CS, "dirty_expire_centisecs", + &dirty_expire_centisecs, sizeof(dirty_expire_centisecs), 0644, + NULL, &proc_dointvec}, {0} }; diff -Nru a/kernel/timer.c b/kernel/timer.c --- a/kernel/timer.c Tue Jun 18 19:12:02 2002 +++ b/kernel/timer.c Tue Jun 18 19:12:02 2002 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -69,11 +70,11 @@ extern int do_setitimer(int, struct itimerval *, struct itimerval *); /* - * The 64-bit value is not volatile - you MUST NOT read it + * The 64-bit jiffies value is not atomic - you MUST NOT read it * without holding read_lock_irq(&xtime_lock). * jiffies is defined in the linker script... */ -u64 jiffies_64; + unsigned int * prof_buffer; unsigned long prof_len; @@ -231,11 +232,6 @@ } #ifdef CONFIG_SMP -void sync_timers(void) -{ - spin_unlock_wait(&global_bh_lock); -} - /* * SMP specific function to delete periodic timer. * Caller must disable by some means restarting the timer diff -Nru a/lib/brlock.c b/lib/brlock.c --- a/lib/brlock.c Tue Jun 18 19:12:01 2002 +++ b/lib/brlock.c Tue Jun 18 19:12:01 2002 @@ -24,16 +24,16 @@ { int i; - for (i = 0; i < smp_num_cpus; i++) - write_lock(&__brlock_array[cpu_logical_map(i)][idx]); + for (i = 0; i < NR_CPUS; i++) + write_lock(&__brlock_array[i][idx]); } void __br_write_unlock (enum brlock_indices idx) { int i; - for (i = 0; i < smp_num_cpus; i++) - write_unlock(&__brlock_array[cpu_logical_map(i)][idx]); + for (i = 0; i < NR_CPUS; i++) + write_unlock(&__brlock_array[i][idx]); } #else /* ! __BRLOCK_USE_ATOMICS */ @@ -50,8 +50,8 @@ again: spin_lock(&__br_write_locks[idx].lock); - for (i = 0; i < smp_num_cpus; i++) - if (__brlock_array[cpu_logical_map(i)][idx] != 0) { + for (i = 0; i < NR_CPUS; i++) + if (__brlock_array[i][idx] != 0) { spin_unlock(&__br_write_locks[idx].lock); barrier(); cpu_relax(); diff -Nru a/lib/radix-tree.c b/lib/radix-tree.c --- a/lib/radix-tree.c Tue Jun 18 19:12:02 2002 +++ b/lib/radix-tree.c Tue Jun 18 19:12:02 2002 @@ -29,7 +29,7 @@ /* * Radix tree node definition. */ -#define RADIX_TREE_MAP_SHIFT 7 +#define RADIX_TREE_MAP_SHIFT 6 #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) diff -Nru a/mm/filemap.c b/mm/filemap.c --- a/mm/filemap.c Tue Jun 18 19:12:02 2002 +++ b/mm/filemap.c Tue Jun 18 19:12:02 2002 @@ -445,8 +445,10 @@ { /* Only activate on memory-pressure, not fsync.. */ if (current->flags & PF_MEMALLOC) { - activate_page(page); - SetPageReferenced(page); + if (!PageActive(page)) + activate_page(page); + if (!PageReferenced(page)) + SetPageReferenced(page); } /* Set the page dirty again, unlock */ @@ -868,55 +870,35 @@ * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should * be safe to call while holding the lock for another page. + * + * Clear __GFP_FS when allocating the page to avoid recursion into the fs + * and deadlock against the caller's locked page. */ -struct page *grab_cache_page_nowait(struct address_space *mapping, unsigned long index) +struct page * +grab_cache_page_nowait(struct address_space *mapping, unsigned long index) { - struct page *page; - - page = find_get_page(mapping, index); - - if ( page ) { - if ( !TestSetPageLocked(page) ) { - /* Page found and locked */ - /* This test is overly paranoid, but what the heck... */ - if ( unlikely(page->mapping != mapping || page->index != index) ) { - /* Someone reallocated this page under us. */ - unlock_page(page); - page_cache_release(page); - return NULL; - } else { - return page; - } - } else { - /* Page locked by someone else */ - page_cache_release(page); - return NULL; - } - } - - page = page_cache_alloc(mapping); - if (unlikely(!page)) - return NULL; /* Failed to allocate a page */ + struct page *page = find_get_page(mapping, index); - if (unlikely(add_to_page_cache_unique(page, mapping, index))) { - /* - * Someone else grabbed the page already, or - * failed to allocate a radix-tree node - */ + if (page) { + if (!TestSetPageLocked(page)) + return page; page_cache_release(page); return NULL; } - + page = alloc_pages(mapping->gfp_mask & ~__GFP_FS, 0); + if (page && add_to_page_cache_unique(page, mapping, index)) { + page_cache_release(page); + page = NULL; + } return page; } /* * Mark a page as having seen activity. * - * If it was already so marked, move it - * to the active queue and drop the referenced - * bit. Otherwise, just mark it for future - * action.. + * inactive,unreferenced -> inactive,referenced + * inactive,referenced -> active,unreferenced + * active,unreferenced -> active,referenced */ void mark_page_accessed(struct page *page) { @@ -924,10 +906,9 @@ activate_page(page); ClearPageReferenced(page); return; + } else if (!PageReferenced(page)) { + SetPageReferenced(page); } - - /* Mark the page referenced, AFTER checking for previous usage.. */ - SetPageReferenced(page); } /* @@ -2286,7 +2267,8 @@ } } kunmap(page); - SetPageReferenced(page); + if (!PageReferenced(page)) + SetPageReferenced(page); unlock_page(page); page_cache_release(page); if (status < 0) diff -Nru a/mm/highmem.c b/mm/highmem.c --- a/mm/highmem.c Tue Jun 18 19:12:01 2002 +++ b/mm/highmem.c Tue Jun 18 19:12:01 2002 @@ -17,6 +17,7 @@ */ #include +#include #include #include #include @@ -347,13 +348,15 @@ return __bounce_end_io_read(bio, isa_page_pool); } -void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig) +void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) { struct page *page; struct bio *bio = NULL; int i, rw = bio_data_dir(*bio_orig), bio_gfp; struct bio_vec *to, *from; mempool_t *pool; + unsigned long pfn = q->bounce_pfn; + int gfp = q->bounce_gfp; BUG_ON((*bio_orig)->bi_idx); diff -Nru a/mm/msync.c b/mm/msync.c --- a/mm/msync.c Tue Jun 18 19:12:02 2002 +++ b/mm/msync.c Tue Jun 18 19:12:02 2002 @@ -169,7 +169,7 @@ { unsigned long end; struct vm_area_struct * vma; - int unmapped_error, error = -EINVAL; + int unmapped_error, error = -ENOMEM; down_read(¤t->mm->mmap_sem); if (start & ~PAGE_MASK) @@ -185,18 +185,18 @@ goto out; /* * If the interval [start,end) covers some unmapped address ranges, - * just ignore them, but return -EFAULT at the end. + * just ignore them, but return -ENOMEM at the end. */ vma = find_vma(current->mm, start); unmapped_error = 0; for (;;) { /* Still start < end. */ - error = -EFAULT; + error = -ENOMEM; if (!vma) goto out; /* Here start < vma->vm_end. */ if (start < vma->vm_start) { - unmapped_error = -EFAULT; + unmapped_error = -ENOMEM; start = vma->vm_start; } /* Here vma->vm_start <= start < vma->vm_end. */ @@ -220,5 +220,3 @@ up_read(¤t->mm->mmap_sem); return error; } - - diff -Nru a/mm/page-writeback.c b/mm/page-writeback.c --- a/mm/page-writeback.c Tue Jun 18 19:12:01 2002 +++ b/mm/page-writeback.c Tue Jun 18 19:12:01 2002 @@ -26,29 +26,56 @@ * The maximum number of pages to writeout in a single bdflush/kupdate * operation. We do this so we don't hold I_LOCK against an inode for * enormous amounts of time, which would block a userspace task which has - * been forced to throttle against that inode. + * been forced to throttle against that inode. Also, the code reevaluates + * the dirty each time it has written this many pages. */ #define MAX_WRITEBACK_PAGES 1024 /* - * Memory thresholds, in percentages - * FIXME: expose these via /proc or whatever. + * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited + * will look to see if it needs to force writeback or throttling. Probably + * should be scaled by memory size. + */ +#define RATELIMIT_PAGES 1000 + +/* + * When balance_dirty_pages decides that the caller needs to perform some + * non-background writeback, this is how many pages it will attempt to write. + * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably + * large amounts of I/O are submitted. + */ +#define SYNC_WRITEBACK_PAGES 1500 + + +/* + * Dirty memory thresholds, in percentages */ /* * Start background writeback (via pdflush) at this level */ -static int dirty_background_ratio = 40; +int dirty_background_ratio = 40; /* * The generator of dirty data starts async writeback at this level */ -static int dirty_async_ratio = 50; +int dirty_async_ratio = 50; /* * The generator of dirty data performs sync writeout at this level */ -static int dirty_sync_ratio = 60; +int dirty_sync_ratio = 60; + +/* + * The interval between `kupdate'-style writebacks. + */ +int dirty_writeback_centisecs = 5 * 100; + +/* + * The largest amount of time for which data is allowed to remain dirty + */ +int dirty_expire_centisecs = 30 * 100; + static void background_writeout(unsigned long _min_pages); @@ -84,12 +111,12 @@ sync_thresh = (dirty_sync_ratio * tot) / 100; if (dirty_and_writeback > sync_thresh) { - int nr_to_write = 1500; + int nr_to_write = SYNC_WRITEBACK_PAGES; writeback_unlocked_inodes(&nr_to_write, WB_SYNC_LAST, NULL); get_page_state(&ps); } else if (dirty_and_writeback > async_thresh) { - int nr_to_write = 1500; + int nr_to_write = SYNC_WRITEBACK_PAGES; writeback_unlocked_inodes(&nr_to_write, WB_SYNC_NONE, NULL); get_page_state(&ps); @@ -118,7 +145,7 @@ int cpu; cpu = get_cpu(); - if (ratelimits[cpu].count++ >= 1000) { + if (ratelimits[cpu].count++ >= RATELIMIT_PAGES) { ratelimits[cpu].count = 0; put_cpu(); balance_dirty_pages(mapping); @@ -162,17 +189,6 @@ pdflush_operation(background_writeout, ps.nr_dirty); } -/* - * The interval between `kupdate'-style writebacks. - * - * Traditional kupdate writes back data which is 30-35 seconds old. - * This one does that, but it also writes back just 1/6th of the dirty - * data. This is to avoid great I/O storms. - * - * We chunk the writes up and yield, to permit any throttled page-allocators - * to perform their I/O against a large file. - */ -static int wb_writeback_jifs = 5 * HZ; static struct timer_list wb_timer; /* @@ -183,9 +199,9 @@ * just walks the superblock inode list, writing back any inodes which are * older than a specific point in time. * - * Try to run once per wb_writeback_jifs jiffies. But if a writeback event - * takes longer than a wb_writeback_jifs interval, then leave a one-second - * gap. + * Try to run once per dirty_writeback_centisecs. But if a writeback event + * takes longer than a dirty_writeback_centisecs interval, then leave a + * one-second gap. * * older_than_this takes precedence over nr_to_write. So we'll only write back * all dirty pages if they are all attached to "old" mappings. @@ -201,9 +217,9 @@ sync_supers(); get_page_state(&ps); - oldest_jif = jiffies - 30*HZ; + oldest_jif = jiffies - (dirty_expire_centisecs * HZ) / 100; start_jif = jiffies; - next_jif = start_jif + wb_writeback_jifs; + next_jif = start_jif + (dirty_writeback_centisecs * HZ) / 100; nr_to_write = ps.nr_dirty; writeback_unlocked_inodes(&nr_to_write, WB_SYNC_NONE, &oldest_jif); blk_run_queues(); @@ -223,7 +239,7 @@ static int __init wb_timer_init(void) { init_timer(&wb_timer); - wb_timer.expires = jiffies + wb_writeback_jifs; + wb_timer.expires = jiffies + (dirty_writeback_centisecs * HZ) / 100; wb_timer.data = 0; wb_timer.function = wb_timer_fn; add_timer(&wb_timer); diff -Nru a/mm/page_alloc.c b/mm/page_alloc.c --- a/mm/page_alloc.c Tue Jun 18 19:12:01 2002 +++ b/mm/page_alloc.c Tue Jun 18 19:12:01 2002 @@ -574,10 +574,13 @@ int pcpu; memset(ret, 0, sizeof(*ret)); - for (pcpu = 0; pcpu < smp_num_cpus; pcpu++) { + for (pcpu = 0; pcpu < NR_CPUS; pcpu++) { struct page_state *ps; - ps = &page_states[cpu_logical_map(pcpu)]; + if (!cpu_online(pcpu)) + continue; + + ps = &page_states[pcpu]; ret->nr_dirty += ps->nr_dirty; ret->nr_writeback += ps->nr_writeback; ret->nr_pagecache += ps->nr_pagecache; diff -Nru a/mm/page_io.c b/mm/page_io.c --- a/mm/page_io.c Tue Jun 18 19:12:01 2002 +++ b/mm/page_io.c Tue Jun 18 19:12:01 2002 @@ -14,112 +14,163 @@ #include #include #include -#include -#include /* for brw_page() */ - +#include +#include #include +#include -/* - * Reads or writes a swap page. - * wait=1: start I/O and wait for completion. wait=0: start asynchronous I/O. - * - * Important prevention of race condition: the caller *must* atomically - * create a unique swap cache entry for this swap page before calling - * rw_swap_page, and must lock that page. By ensuring that there is a - * single page of memory reserved for the swap entry, the normal VM page - * lock on that page also doubles as a lock on swap entries. Having only - * one lock to deal with per swap entry (rather than locking swap and memory - * independently) also makes it easier to make certain swapping operations - * atomic, which is particularly important when we are trying to ensure - * that shared pages stay shared while being swapped. - */ +static int +swap_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) +{ + struct swap_info_struct *sis; + swp_entry_t entry; -static int rw_swap_page_base(int rw, swp_entry_t entry, struct page *page) + entry.val = iblock; + sis = get_swap_info_struct(swp_type(entry)); + bh_result->b_bdev = sis->bdev; + bh_result->b_blocknr = map_swap_page(sis, swp_offset(entry)); + bh_result->b_size = PAGE_SIZE; + set_buffer_mapped(bh_result); + return 0; +} + +static struct bio * +get_swap_bio(int gfp_flags, struct page *page, bio_end_io_t end_io) { - unsigned long offset; - sector_t zones[PAGE_SIZE/512]; - int zones_used; - int block_size; - struct inode *swapf = 0; - struct block_device *bdev; + struct bio *bio; + struct buffer_head bh; - if (rw == READ) { + bio = bio_alloc(gfp_flags, 1); + if (bio) { + swap_get_block(NULL, page->index, &bh, 1); + bio->bi_sector = bh.b_blocknr * (PAGE_SIZE >> 9); + bio->bi_bdev = bh.b_bdev; + bio->bi_io_vec[0].bv_page = page; + bio->bi_io_vec[0].bv_len = PAGE_SIZE; + bio->bi_io_vec[0].bv_offset = 0; + bio->bi_vcnt = 1; + bio->bi_idx = 0; + bio->bi_size = PAGE_SIZE; + bio->bi_end_io = end_io; + } + return bio; +} + +static void end_swap_bio_write(struct bio *bio) +{ + const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + struct page *page = bio->bi_io_vec[0].bv_page; + + if (!uptodate) + SetPageError(page); + end_page_writeback(page); + bio_put(bio); +} + +static void end_swap_bio_read(struct bio *bio) +{ + const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + struct page *page = bio->bi_io_vec[0].bv_page; + + if (!uptodate) { + SetPageError(page); ClearPageUptodate(page); - kstat.pswpin++; - } else - kstat.pswpout++; - - get_swaphandle_info(entry, &offset, &swapf); - bdev = swapf->i_bdev; - if (bdev) { - zones[0] = offset; - zones_used = 1; - block_size = PAGE_SIZE; } else { - int i, j; - unsigned int block = offset - << (PAGE_SHIFT - swapf->i_sb->s_blocksize_bits); - - block_size = swapf->i_sb->s_blocksize; - for (i=0, j=0; j< PAGE_SIZE ; i++, j += block_size) - if (!(zones[i] = bmap(swapf,block++))) { - printk("rw_swap_page: bad swap file\n"); - return 0; - } - zones_used = i; - bdev = swapf->i_sb->s_bdev; - } - - /* block_size == PAGE_SIZE/zones_used */ - brw_page(rw, page, bdev, zones, block_size); - - /* Note! For consistency we do all of the logic, - * decrementing the page count, and unlocking the page in the - * swap lock map - in the IO completion handler. - */ - return 1; + SetPageUptodate(page); + } + unlock_page(page); + bio_put(bio); } /* - * A simple wrapper so the base function doesn't need to enforce - * that all swap pages go through the swap cache! We verify that: - * - the page is locked - * - it's marked as being swap-cache - * - it's associated with the swap inode + * We may have stale swap cache pages in memory: notice + * them here and get rid of the unnecessary final write. */ -void rw_swap_page(int rw, struct page *page) +int swap_writepage(struct page *page) { - swp_entry_t entry; + struct bio *bio; + int ret = 0; - entry.val = page->index; - - if (!PageLocked(page)) - PAGE_BUG(page); - if (!PageSwapCache(page)) - PAGE_BUG(page); - if (!rw_swap_page_base(rw, entry, page)) + if (remove_exclusive_swap_page(page)) { unlock_page(page); + goto out; + } + bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); + if (bio == NULL) { + ret = -ENOMEM; + goto out; + } + kstat.pswpout++; + SetPageWriteback(page); + unlock_page(page); + submit_bio(WRITE, bio); +out: + return ret; +} + +int swap_readpage(struct file *file, struct page *page) +{ + struct bio *bio; + int ret = 0; + + ClearPageUptodate(page); + bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); + if (bio == NULL) { + ret = -ENOMEM; + goto out; + } + kstat.pswpin++; + submit_bio(READ, bio); +out: + return ret; +} +/* + * swapper_space doesn't have a real inode, so it gets a special vm_writeback() + * so we don't need swap special cases in generic_vm_writeback(). + * + * Swap pages are PageLocked and PageWriteback while under writeout so that + * memory allocators will throttle against them. + */ +static int swap_vm_writeback(struct page *page, int *nr_to_write) +{ + struct address_space *mapping = page->mapping; + + unlock_page(page); + return generic_writepages(mapping, nr_to_write); } +struct address_space_operations swap_aops = { + vm_writeback: swap_vm_writeback, + writepage: swap_writepage, + readpage: swap_readpage, + sync_page: block_sync_page, + set_page_dirty: __set_page_dirty_nobuffers, +}; + /* - * The swap lock map insists that pages be in the page cache! - * Therefore we can't use it. Later when we can remove the need for the - * lock map and we can reduce the number of functions exported. + * A scruffy utility function to read or write an arbitrary swap page + * and wait on the I/O. */ -void rw_swap_page_nolock(int rw, swp_entry_t entry, char *buf) +int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page) { - struct page *page = virt_to_page(buf); - - if (!PageLocked(page)) - PAGE_BUG(page); - if (page->mapping) - PAGE_BUG(page); - /* needs sync_page to wait I/O completation */ + int ret; + + lock_page(page); + + BUG_ON(page->mapping); page->mapping = &swapper_space; - if (rw_swap_page_base(rw, entry, page)) - lock_page(page); - if (page_has_buffers(page) && !try_to_free_buffers(page)) - PAGE_BUG(page); + page->index = entry.val; + + if (rw == READ) { + ret = swap_readpage(NULL, page); + wait_on_page_locked(page); + } else { + ret = swap_writepage(page); + wait_on_page_writeback(page); + } page->mapping = NULL; - unlock_page(page); + if (ret == 0 && (!PageUptodate(page) || PageError(page))) + ret = -EIO; + return ret; } diff -Nru a/mm/shmem.c b/mm/shmem.c --- a/mm/shmem.c Tue Jun 18 19:12:02 2002 +++ b/mm/shmem.c Tue Jun 18 19:12:02 2002 @@ -426,15 +426,22 @@ swap_free(entry); ptr[offset] = (swp_entry_t) {0}; - while (inode && move_from_swap_cache(page, idx, inode->i_mapping)) { + while (inode && (PageWriteback(page) || + move_from_swap_cache(page, idx, inode->i_mapping))) { /* * Yield for kswapd, and try again - but we're still * holding the page lock - ugh! fix this up later on. * Beware of inode being unlinked or truncated: just * leave try_to_unuse to delete_from_swap_cache if so. + * + * AKPM: We now wait on writeback too. Note that it's + * the page lock which prevents new writeback from starting. */ spin_unlock(&info->lock); - yield(); + if (PageWriteback(page)) + wait_on_page_writeback(page); + else + yield(); spin_lock(&info->lock); ptr = shmem_swp_entry(info, idx, 0); if (IS_ERR(ptr)) @@ -594,9 +601,14 @@ } /* We have to do this with page locked to prevent races */ - if (TestSetPageLocked(page)) + if (TestSetPageLocked(page)) goto wait_retry; - + if (PageWriteback(page)) { + spin_unlock(&info->lock); + wait_on_page_writeback(page); + unlock_page(page); + goto repeat; + } error = move_from_swap_cache(page, idx, mapping); if (error < 0) { unlock_page(page); @@ -651,7 +663,7 @@ return ERR_PTR(-ENOSPC); wait_retry: - spin_unlock (&info->lock); + spin_unlock(&info->lock); wait_on_page_locked(page); page_cache_release(page); goto repeat; diff -Nru a/mm/slab.c b/mm/slab.c --- a/mm/slab.c Tue Jun 18 19:12:03 2002 +++ b/mm/slab.c Tue Jun 18 19:12:03 2002 @@ -941,8 +941,8 @@ down(&cache_chain_sem); smp_call_function_all_cpus(do_ccupdate_local, (void *)&new); - for (i = 0; i < smp_num_cpus; i++) { - cpucache_t* ccold = new.new[cpu_logical_map(i)]; + for (i = 0; i < NR_CPUS; i++) { + cpucache_t* ccold = new.new[i]; if (!ccold || (ccold->avail == 0)) continue; local_irq_disable(); @@ -1675,16 +1675,18 @@ memset(&new.new,0,sizeof(new.new)); if (limit) { - for (i = 0; i< smp_num_cpus; i++) { + for (i = 0; i < NR_CPUS; i++) { cpucache_t* ccnew; ccnew = kmalloc(sizeof(void*)*limit+ sizeof(cpucache_t), GFP_KERNEL); - if (!ccnew) - goto oom; + if (!ccnew) { + for (i--; i >= 0; i--) kfree(new.new[i]); + return -ENOMEM; + } ccnew->limit = limit; ccnew->avail = 0; - new.new[cpu_logical_map(i)] = ccnew; + new.new[i] = ccnew; } } new.cachep = cachep; @@ -1694,8 +1696,8 @@ smp_call_function_all_cpus(do_ccupdate_local, (void *)&new); - for (i = 0; i < smp_num_cpus; i++) { - cpucache_t* ccold = new.new[cpu_logical_map(i)]; + for (i = 0; i < NR_CPUS; i++) { + cpucache_t* ccold = new.new[i]; if (!ccold) continue; local_irq_disable(); @@ -1704,10 +1706,6 @@ kfree(ccold); } return 0; -oom: - for (i--; i >= 0; i--) - kfree(new.new[cpu_logical_map(i)]); - return -ENOMEM; } static void enable_cpucache (kmem_cache_t *cachep) diff -Nru a/mm/swap_state.c b/mm/swap_state.c --- a/mm/swap_state.c Tue Jun 18 19:12:02 2002 +++ b/mm/swap_state.c Tue Jun 18 19:12:02 2002 @@ -14,54 +14,27 @@ #include #include #include -#include /* block_sync_page()/try_to_free_buffers() */ +#include /* block_sync_page() */ #include /* - * We may have stale swap cache pages in memory: notice - * them here and get rid of the unnecessary final write. - */ -static int swap_writepage(struct page *page) -{ - if (remove_exclusive_swap_page(page)) { - unlock_page(page); - return 0; - } - rw_swap_page(WRITE, page); - return 0; -} - -/* - * swapper_space doesn't have a real inode, so it gets a special vm_writeback() - * so we don't need swap special cases in generic_vm_writeback(). - * - * Swap pages are PageLocked and PageWriteback while under writeout so that - * memory allocators will throttle against them. - */ -static int swap_vm_writeback(struct page *page, int *nr_to_write) -{ - struct address_space *mapping = page->mapping; - - unlock_page(page); - return generic_writepages(mapping, nr_to_write); -} - -static struct address_space_operations swap_aops = { - vm_writeback: swap_vm_writeback, - writepage: swap_writepage, - sync_page: block_sync_page, - set_page_dirty: __set_page_dirty_nobuffers, -}; - -/* * swapper_inode doesn't do anything much. It is really only here to * avoid some special-casing in other parts of the kernel. + * + * We set i_size to "infinity" to keep the page I/O functions happy. The swap + * block allocator makes sure that allocations are in-range. A strange + * number is chosen to prevent various arith overflows elsewhere. For example, + * `lblock' in block_read_full_page(). */ static struct inode swapper_inode = { - i_mapping: &swapper_space, + i_mapping: &swapper_space, + i_size: PAGE_SIZE * 0xffffffffLL, + i_blkbits: PAGE_SHIFT, }; +extern struct address_space_operations swap_aops; + struct address_space swapper_space = { page_tree: RADIX_TREE_INIT(GFP_ATOMIC), page_lock: RW_LOCK_UNLOCKED, @@ -131,10 +104,9 @@ */ void __delete_from_swap_cache(struct page *page) { - if (!PageLocked(page)) - BUG(); - if (!PageSwapCache(page)) - BUG(); + BUG_ON(!PageLocked(page)); + BUG_ON(!PageSwapCache(page)); + BUG_ON(PageWriteback(page)); ClearPageDirty(page); __remove_inode_page(page); INC_CACHE_INFO(del_total); @@ -150,14 +122,9 @@ { swp_entry_t entry; - /* - * I/O should have completed and nobody can have a ref against the - * page's buffers - */ BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); - if (page_has_buffers(page) && !try_to_free_buffers(page)) - BUG(); + BUG_ON(page_has_buffers(page)); entry.val = page->index; @@ -223,16 +190,9 @@ void **pslot; int err; - /* - * Drop the buffers now, before taking the page_lock. Because - * mapping->private_lock nests outside mapping->page_lock. - * This "must" succeed. The page is locked and all I/O has completed - * and nobody else has a ref against its buffers. - */ BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); - if (page_has_buffers(page) && !try_to_free_buffers(page)) - BUG(); + BUG_ON(page_has_buffers(page)); write_lock(&swapper_space.page_lock); write_lock(&mapping->page_lock); @@ -362,7 +322,7 @@ /* * Initiate read into locked page and return. */ - rw_swap_page(READ, new_page); + swap_readpage(NULL, new_page); return new_page; } } while (err != -ENOENT && err != -ENOMEM); diff -Nru a/mm/swapfile.c b/mm/swapfile.c --- a/mm/swapfile.c Tue Jun 18 19:12:02 2002 +++ b/mm/swapfile.c Tue Jun 18 19:12:02 2002 @@ -16,7 +16,7 @@ #include #include #include -#include /* for try_to_free_buffers() */ +#include #include #include @@ -294,11 +294,14 @@ struct swap_info_struct * p; swp_entry_t entry; - if (!PageLocked(page)) - BUG(); + BUG_ON(page_has_buffers(page)); + BUG_ON(!PageLocked(page)); + if (!PageSwapCache(page)) return 0; - if (page_count(page) - !!PagePrivate(page) != 2) /* 2: us + cache */ + if (PageWriteback(page)) + return 0; + if (page_count(page) != 2) /* 2: us + cache */ return 0; entry.val = page->index; @@ -311,13 +314,8 @@ if (p->swap_map[swp_offset(entry)] == 1) { /* Recheck the page count with the pagecache lock held.. */ write_lock(&swapper_space.page_lock); - if (page_count(page) - !!PagePrivate(page) == 2) { + if ((page_count(page) == 2) && !PageWriteback(page)) { __delete_from_swap_cache(page); - /* - * NOTE: if/when swap gets buffer/page coherency - * like other mappings, we'll need to mark the buffers - * dirty here too. set_page_dirty(). - */ SetPageDirty(page); retval = 1; } @@ -326,9 +324,6 @@ swap_info_put(p); if (retval) { - BUG_ON(PageWriteback(page)); - if (page_has_buffers(page) && !try_to_free_buffers(page)) - BUG(); swap_free(entry); page_cache_release(page); } @@ -352,9 +347,13 @@ swap_info_put(p); } if (page) { + int one_user; + + BUG_ON(page_has_buffers(page)); page_cache_get(page); + one_user = (page_count(page) == 2); /* Only cache user (+us), or swap space full? Free it! */ - if (page_count(page) - !!PagePrivate(page) == 2 || vm_swap_full()) { + if (!PageWriteback(page) && (one_user || vm_swap_full())) { delete_from_swap_cache(page); SetPageDirty(page); } @@ -606,6 +605,7 @@ wait_on_page_locked(page); wait_on_page_writeback(page); lock_page(page); + wait_on_page_writeback(page); /* * Remove all references to entry, without blocking. @@ -685,11 +685,13 @@ * Note shmem_unuse already deleted its from swap cache. */ if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) { - rw_swap_page(WRITE, page); + swap_writepage(page); lock_page(page); } - if (PageSwapCache(page)) + if (PageSwapCache(page)) { + wait_on_page_writeback(page); delete_from_swap_cache(page); + } /* * So we could skip searching mms once swap count went @@ -717,6 +719,207 @@ return retval; } +/* + * Use this swapdev's extent info to locate the (PAGE_SIZE) block which + * corresponds to page offset `offset'. + */ +sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset) +{ + struct swap_extent *se = sis->curr_swap_extent; + struct swap_extent *start_se = se; + + for ( ; ; ) { + struct list_head *lh; + + if (se->start_page <= offset && + offset < (se->start_page + se->nr_pages)) { + return se->start_block + (offset - se->start_page); + } + lh = se->list.prev; + if (lh == &sis->extent_list) + lh = lh->prev; + se = list_entry(lh, struct swap_extent, list); + sis->curr_swap_extent = se; + BUG_ON(se == start_se); /* It *must* be present */ + } +} + +/* + * Free all of a swapdev's extent information + */ +static void destroy_swap_extents(struct swap_info_struct *sis) +{ + while (!list_empty(&sis->extent_list)) { + struct swap_extent *se; + + se = list_entry(sis->extent_list.next, + struct swap_extent, list); + list_del(&se->list); + kfree(se); + } + sis->nr_extents = 0; +} + +/* + * Add a block range (and the corresponding page range) into this swapdev's + * extent list. The extent list is kept sorted in block order. + * + * This function rather assumes that it is called in ascending sector_t order. + * It doesn't look for extent coalescing opportunities. + */ +static int +add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, + unsigned long nr_pages, sector_t start_block) +{ + struct swap_extent *se; + struct swap_extent *new_se; + struct list_head *lh; + + lh = sis->extent_list.next; /* The highest-addressed block */ + while (lh != &sis->extent_list) { + se = list_entry(lh, struct swap_extent, list); + if (se->start_block + se->nr_pages == start_block) { + /* Merge it */ + se->nr_pages += nr_pages; + return 0; + } + lh = lh->next; + } + + /* + * No merge. Insert a new extent, preserving ordering. + */ + new_se = kmalloc(sizeof(*se), GFP_KERNEL); + if (new_se == NULL) + return -ENOMEM; + new_se->start_page = start_page; + new_se->nr_pages = nr_pages; + new_se->start_block = start_block; + + lh = sis->extent_list.prev; /* The lowest block */ + while (lh != &sis->extent_list) { + se = list_entry(lh, struct swap_extent, list); + if (se->start_block > start_block) + break; + lh = lh->prev; + } + list_add_tail(&new_se->list, lh); + sis->nr_extents++; + return 0; +} + +/* + * A `swap extent' is a simple thing which maps a contiguous range of pages + * onto a contiguous range of disk blocks. An ordered list of swap extents + * is built at swapon time and is then used at swap_writepage/swap_readpage + * time for locating where on disk a page belongs. + * + * If the swapfile is an S_ISBLK block device, a single extent is installed. + * This is done so that the main operating code can treat S_ISBLK and S_ISREG + * swap files identically. + * + * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap + * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK + * swapfiles are handled *identically* after swapon time. + * + * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks + * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If + * some stray blocks are found which do not fall within the PAGE_SIZE alignment + * requirements, they are simply tossed out - we will never use those blocks + * for swapping. + * + * The amount of disk space which a single swap extent represents varies. + * Typically it is in the 1-4 megabyte range. So we can have hundreds of + * extents in the list. To avoid much list walking, we cache the previous + * search location in `curr_swap_extent', and start new searches from there. + * This is extremely effective. The average number of iterations in + * map_swap_page() has been measured at about 0.3 per page. - akpm. + */ +static int setup_swap_extents(struct swap_info_struct *sis) +{ + struct inode *inode; + unsigned blocks_per_page; + unsigned long page_no; + unsigned blkbits; + sector_t probe_block; + sector_t last_block; + int ret; + + inode = sis->swap_file->f_dentry->d_inode; + if (S_ISBLK(inode->i_mode)) { + ret = add_swap_extent(sis, 0, sis->max, 0); + goto done; + } + + blkbits = inode->i_blkbits; + blocks_per_page = PAGE_SIZE >> blkbits; + + /* + * Map all the blocks into the extent list. This code doesn't try + * to be very smart. + */ + probe_block = 0; + page_no = 0; + last_block = inode->i_size >> blkbits; + while ((probe_block + blocks_per_page) <= last_block && + page_no < sis->max) { + unsigned block_in_page; + sector_t first_block; + + first_block = bmap(inode, probe_block); + if (first_block == 0) + goto bad_bmap; + + /* + * It must be PAGE_SIZE aligned on-disk + */ + if (first_block & (blocks_per_page - 1)) { + probe_block++; + goto reprobe; + } + + for (block_in_page = 1; block_in_page < blocks_per_page; + block_in_page++) { + sector_t block; + + block = bmap(inode, probe_block + block_in_page); + if (block == 0) + goto bad_bmap; + if (block != first_block + block_in_page) { + /* Discontiguity */ + probe_block++; + goto reprobe; + } + } + + /* + * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks + */ + ret = add_swap_extent(sis, page_no, 1, + first_block >> (PAGE_SHIFT - blkbits)); + if (ret) + goto out; + page_no++; + probe_block += blocks_per_page; +reprobe: + continue; + } + ret = 0; + if (page_no == 0) + ret = -EINVAL; + sis->max = page_no; + sis->highest_bit = page_no - 1; +done: + sis->curr_swap_extent = list_entry(sis->extent_list.prev, + struct swap_extent, list); + goto out; +bad_bmap: + printk(KERN_ERR "swapon: swapfile has holes\n"); + ret = -EINVAL; +out: + return ret; +} + asmlinkage long sys_swapoff(const char * specialfile) { struct swap_info_struct * p = NULL; @@ -733,7 +936,6 @@ if (err) goto out; - lock_kernel(); prev = -1; swap_list_lock(); for (type = swap_list.head; type >= 0; type = swap_info[type].next) { @@ -763,9 +965,7 @@ total_swap_pages -= p->pages; p->flags &= ~SWP_WRITEOK; swap_list_unlock(); - unlock_kernel(); err = try_to_unuse(type); - lock_kernel(); if (err) { /* re-insert swap space back into swap_list */ swap_list_lock(); @@ -791,6 +991,7 @@ swap_map = p->swap_map; p->swap_map = NULL; p->flags = 0; + destroy_swap_extents(p); swap_device_unlock(p); swap_list_unlock(); vfree(swap_map); @@ -804,7 +1005,6 @@ err = 0; out_dput: - unlock_kernel(); path_release(&nd); out: return err; @@ -858,12 +1058,12 @@ asmlinkage long sys_swapon(const char * specialfile, int swap_flags) { struct swap_info_struct * p; - char *name; + char *name = NULL; struct block_device *bdev = NULL; struct file *swap_file = NULL; struct address_space *mapping; unsigned int type; - int i, j, prev; + int i, prev; int error; static int least_priority = 0; union swap_header *swap_header = 0; @@ -872,10 +1072,10 @@ unsigned long maxpages = 1; int swapfilesize; unsigned short *swap_map; - + struct page *page = NULL; + if (!capable(CAP_SYS_ADMIN)) return -EPERM; - lock_kernel(); swap_list_lock(); p = swap_info; for (type = 0 ; type < nr_swapfiles ; type++,p++) @@ -888,7 +1088,9 @@ } if (type >= nr_swapfiles) nr_swapfiles = type+1; + INIT_LIST_HEAD(&p->extent_list); p->flags = SWP_USED; + p->nr_extents = 0; p->swap_file = NULL; p->old_block_size = 0; p->swap_map = NULL; @@ -909,7 +1111,6 @@ if (IS_ERR(name)) goto bad_swap_2; swap_file = filp_open(name, O_RDWR, 0); - putname(name); error = PTR_ERR(swap_file); if (IS_ERR(swap_file)) { swap_file = NULL; @@ -931,8 +1132,12 @@ PAGE_SIZE); if (error < 0) goto bad_swap; - } else if (!S_ISREG(swap_file->f_dentry->d_inode->i_mode)) + p->bdev = bdev; + } else if (S_ISREG(swap_file->f_dentry->d_inode->i_mode)) { + p->bdev = swap_file->f_dentry->d_inode->i_sb->s_bdev; + } else { goto bad_swap; + } mapping = swap_file->f_dentry->d_inode->i_mapping; swapfilesize = mapping->host->i_size >> PAGE_SHIFT; @@ -946,15 +1151,20 @@ goto bad_swap; } - swap_header = (void *) __get_free_page(GFP_USER); - if (!swap_header) { - printk("Unable to start swapping: out of memory :-)\n"); - error = -ENOMEM; + /* + * Read the swap header. + */ + page = read_cache_page(mapping, 0, + (filler_t *)mapping->a_ops->readpage, swap_file); + if (IS_ERR(page)) { + error = PTR_ERR(page); goto bad_swap; } - - lock_page(virt_to_page(swap_header)); - rw_swap_page_nolock(READ, swp_entry(type,0), (char *) swap_header); + wait_on_page_locked(page); + if (!PageUptodate(page)) + goto bad_swap; + kmap(page); + swap_header = page_address(page); if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10)) swap_header_version = 1; @@ -968,33 +1178,10 @@ switch (swap_header_version) { case 1: - memset(((char *) swap_header)+PAGE_SIZE-10,0,10); - j = 0; - p->lowest_bit = 0; - p->highest_bit = 0; - for (i = 1 ; i < 8*PAGE_SIZE ; i++) { - if (test_bit(i,(unsigned long *) swap_header)) { - if (!p->lowest_bit) - p->lowest_bit = i; - p->highest_bit = i; - maxpages = i+1; - j++; - } - } - nr_good_pages = j; - p->swap_map = vmalloc(maxpages * sizeof(short)); - if (!p->swap_map) { - error = -ENOMEM; - goto bad_swap; - } - for (i = 1 ; i < maxpages ; i++) { - if (test_bit(i,(unsigned long *) swap_header)) - p->swap_map[i] = 0; - else - p->swap_map[i] = SWAP_MAP_BAD; - } - break; - + printk(KERN_ERR "version 0 swap is no longer supported. " + "Use mkswap -v1 %s\n", name); + error = -EINVAL; + goto bad_swap; case 2: /* Check the swap header's sub-version and the size of the swap file and bad block lists */ @@ -1050,15 +1237,20 @@ goto bad_swap; } p->swap_map[0] = SWAP_MAP_BAD; + p->max = maxpages; + p->pages = nr_good_pages; + + if (setup_swap_extents(p)) + goto bad_swap; + swap_list_lock(); swap_device_lock(p); - p->max = maxpages; p->flags = SWP_ACTIVE; - p->pages = nr_good_pages; nr_swap_pages += nr_good_pages; total_swap_pages += nr_good_pages; - printk(KERN_INFO "Adding Swap: %dk swap-space (priority %d)\n", - nr_good_pages<<(PAGE_SHIFT-10), p->prio); + printk(KERN_INFO "Adding %dk swap on %s. Priority:%d extents:%d\n", + nr_good_pages<<(PAGE_SHIFT-10), name, + p->prio, p->nr_extents); /* insert swap space into swap_list: */ prev = -1; @@ -1092,14 +1284,18 @@ if (!(swap_flags & SWAP_FLAG_PREFER)) ++least_priority; swap_list_unlock(); + destroy_swap_extents(p); if (swap_map) vfree(swap_map); if (swap_file && !IS_ERR(swap_file)) filp_close(swap_file, NULL); out: - if (swap_header) - free_page((long) swap_header); - unlock_kernel(); + if (page && !IS_ERR(page)) { + kunmap(page); + page_cache_release(page); + } + if (name) + putname(name); return error; } @@ -1168,78 +1364,10 @@ goto out; } -/* - * Page lock needs to be held in all cases to prevent races with - * swap file deletion. - */ -int swap_count(struct page *page) -{ - struct swap_info_struct * p; - unsigned long offset, type; - swp_entry_t entry; - int retval = 0; - - entry.val = page->index; - if (!entry.val) - goto bad_entry; - type = swp_type(entry); - if (type >= nr_swapfiles) - goto bad_file; - p = type + swap_info; - offset = swp_offset(entry); - if (offset >= p->max) - goto bad_offset; - if (!p->swap_map[offset]) - goto bad_unused; - retval = p->swap_map[offset]; -out: - return retval; - -bad_entry: - printk(KERN_ERR "swap_count: null entry!\n"); - goto out; -bad_file: - printk(KERN_ERR "swap_count: %s%08lx\n", Bad_file, entry.val); - goto out; -bad_offset: - printk(KERN_ERR "swap_count: %s%08lx\n", Bad_offset, entry.val); - goto out; -bad_unused: - printk(KERN_ERR "swap_count: %s%08lx\n", Unused_offset, entry.val); - goto out; -} - -/* - * Prior swap_duplicate protects against swap device deletion. - */ -void get_swaphandle_info(swp_entry_t entry, unsigned long *offset, - struct inode **swapf) +struct swap_info_struct * +get_swap_info_struct(unsigned type) { - unsigned long type; - struct swap_info_struct *p; - - type = swp_type(entry); - if (type >= nr_swapfiles) { - printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_file, entry.val); - return; - } - - p = &swap_info[type]; - *offset = swp_offset(entry); - if (*offset >= p->max && *offset != 0) { - printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_offset, entry.val); - return; - } - if (p->swap_map && !p->swap_map[*offset]) { - printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_offset, entry.val); - return; - } - if (!(p->flags & SWP_USED)) { - printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_file, entry.val); - return; - } - - *swapf = p->swap_file->f_dentry->d_inode; + return &swap_info[type]; } /* diff -Nru a/mm/vmalloc.c b/mm/vmalloc.c --- a/mm/vmalloc.c Tue Jun 18 19:12:01 2002 +++ b/mm/vmalloc.c Tue Jun 18 19:12:01 2002 @@ -195,6 +195,7 @@ if (addr > VMALLOC_END-size) goto out; } + area->phys_addr = 0; area->flags = flags; area->addr = (void *)addr; area->size = size; @@ -209,9 +210,25 @@ return NULL; } -void vfree(void * addr) +struct vm_struct *remove_kernel_area(void *addr) { struct vm_struct **p, *tmp; + write_lock(&vmlist_lock); + for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { + if (tmp->addr == addr) { + *p = tmp->next; + write_unlock(&vmlist_lock); + return tmp; + } + + } + write_unlock(&vmlist_lock); + return NULL; +} + +void vfree(void * addr) +{ + struct vm_struct *tmp; if (!addr) return; @@ -219,17 +236,12 @@ printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr); return; } - write_lock(&vmlist_lock); - for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { - if (tmp->addr == addr) { - *p = tmp->next; + tmp = remove_kernel_area(addr); + if (tmp) { vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size); - write_unlock(&vmlist_lock); kfree(tmp); return; } - } - write_unlock(&vmlist_lock); printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr); } diff -Nru a/mm/vmscan.c b/mm/vmscan.c --- a/mm/vmscan.c Tue Jun 18 19:12:01 2002 +++ b/mm/vmscan.c Tue Jun 18 19:12:01 2002 @@ -392,7 +392,8 @@ spin_lock(&pagemap_lru_lock); while (--max_scan >= 0 && (entry = inactive_list.prev) != &inactive_list) { - struct page * page; + struct page *page; + int may_enter_fs; if (need_resched()) { spin_unlock(&pagemap_lru_lock); @@ -427,10 +428,17 @@ goto page_mapped; /* + * swap activity never enters the filesystem and is safe + * for GFP_NOFS allocations. + */ + may_enter_fs = (gfp_mask & __GFP_FS) || + (PageSwapCache(page) && (gfp_mask & __GFP_IO)); + + /* * IO in progress? Leave it at the back of the list. */ if (unlikely(PageWriteback(page))) { - if (gfp_mask & __GFP_FS) { + if (may_enter_fs) { page_cache_get(page); spin_unlock(&pagemap_lru_lock); wait_on_page_writeback(page); @@ -451,7 +459,7 @@ mapping = page->mapping; if (PageDirty(page) && is_page_cache_freeable(page) && - page->mapping && (gfp_mask & __GFP_FS)) { + page->mapping && may_enter_fs) { /* * It is not critical here to write it only if * the page is unmapped beause any direct writer @@ -480,6 +488,15 @@ * If the page has buffers, try to free the buffer mappings * associated with this page. If we succeed we try to free * the page as well. + * + * We do this even if the page is PageDirty(). + * try_to_release_page() does not perform I/O, but it is + * possible for a page to have PageDirty set, but it is actually + * clean (all its buffers are clean). This happens if the + * buffers were written out directly, with submit_bh(). ext3 + * will do this, as well as the blockdev mapping. + * try_to_release_page() will discover that cleanness and will + * drop the buffers and mark the page clean - it can be freed. */ if (PagePrivate(page)) { spin_unlock(&pagemap_lru_lock); diff -Nru a/net/802/p8022.c b/net/802/p8022.c --- a/net/802/p8022.c Tue Jun 18 19:12:02 2002 +++ b/net/802/p8022.c Tue Jun 18 19:12:02 2002 @@ -11,11 +11,10 @@ * matches. The control byte is ignored and handling of such items * is up to the routine passed the frame. * - * Unlike the 802.3 datalink we have a list of 802.2 entries as there - * are multiple protocols to demux. The list is currently short (3 or - * 4 entries at most). The current demux assumes this. + * Unlike the 802.3 datalink we have a list of 802.2 entries as + * there are multiple protocols to demux. The list is currently + * short (3 or 4 entries at most). The current demux assumes this. */ - #include #include #include @@ -25,8 +24,13 @@ #include #include -static struct datalink_proto *p8022_list = NULL; +extern void llc_register_sap(unsigned char sap, + int (*rcvfunc)(struct sk_buff *, + struct net_device *, + struct packet_type *)); +extern void llc_unregister_sap(unsigned char sap); +static struct datalink_proto *p8022_list; /* * We don't handle the loopback SAP stuff, the extended * 802.2 command set, multicast SAP identifiers and non UI @@ -34,91 +38,68 @@ * IP and Appletalk phase 2. See the llc_* routines for * support libraries if your protocol needs these. */ - static struct datalink_proto *find_8022_client(unsigned char type) { - struct datalink_proto *proto; - - for (proto = p8022_list; - ((proto != NULL) && (*(proto->type) != type)); - proto = proto->next) - ; + struct datalink_proto *proto = p8022_list; + while (proto && *(proto->type) != type) + proto = proto->next; return proto; } -int p8022_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) +int p8022_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt) { - struct datalink_proto *proto; + struct datalink_proto *proto; + int rc = 0; proto = find_8022_client(*(skb->h.raw)); - if (proto != NULL) - { - skb->h.raw += 3; - skb->nh.raw += 3; - skb_pull(skb,3); - return proto->rcvfunc(skb, dev, pt); + if (!proto) { + skb->sk = NULL; + kfree_skb(skb); + goto out; } - - skb->sk = NULL; - kfree_skb(skb); - return 0; + skb->h.raw += 3; + skb->nh.raw += 3; + skb_pull(skb, 3); + rc = proto->rcvfunc(skb, dev, pt); +out: return rc; } static void p8022_datalink_header(struct datalink_proto *dl, - struct sk_buff *skb, unsigned char *dest_node) + struct sk_buff *skb, unsigned char *dest_node) { - struct net_device *dev = skb->dev; - unsigned char *rawp; + struct net_device *dev = skb->dev; + unsigned char *rawp = skb_push(skb, 3); - rawp = skb_push(skb,3); *rawp++ = dl->type[0]; *rawp++ = dl->type[0]; - *rawp = 0x03; /* UI */ + *rawp = 0x03; /* UI */ dev->hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); } -static struct packet_type p8022_packet_type = -{ - 0, /* MUTTER ntohs(ETH_P_8022),*/ - NULL, /* All devices */ - p8022_rcv, - NULL, - NULL, -}; - -EXPORT_SYMBOL(register_8022_client); -EXPORT_SYMBOL(unregister_8022_client); - -static int __init p8022_init(void) -{ - p8022_packet_type.type=htons(ETH_P_802_2); - dev_add_pack(&p8022_packet_type); - return 0; -} - -module_init(p8022_init); - -struct datalink_proto *register_8022_client(unsigned char type, int (*rcvfunc)(struct sk_buff *, struct net_device *, struct packet_type *)) -{ - struct datalink_proto *proto; - - if (find_8022_client(type) != NULL) - return NULL; - - proto = (struct datalink_proto *) kmalloc(sizeof(*proto), GFP_ATOMIC); - if (proto != NULL) { - proto->type[0] = type; - proto->type_len = 1; - proto->rcvfunc = rcvfunc; - proto->header_length = 3; - proto->datalink_header = p8022_datalink_header; - proto->string_name = "802.2"; - proto->next = p8022_list; - p8022_list = proto; +struct datalink_proto *register_8022_client(unsigned char type, + int (*rcvfunc)(struct sk_buff *, + struct net_device *, + struct packet_type *)) +{ + struct datalink_proto *proto = NULL; + + if (find_8022_client(type)) + goto out; + proto = kmalloc(sizeof(*proto), GFP_ATOMIC); + if (proto) { + proto->type[0] = type; + proto->type_len = 1; + proto->rcvfunc = rcvfunc; + proto->header_length = 3; + proto->datalink_header = p8022_datalink_header; + proto->string_name = "802.2"; + proto->next = p8022_list; + p8022_list = proto; + llc_register_sap(type, p8022_rcv); } - - return proto; +out: return proto; } void unregister_8022_client(unsigned char type) @@ -128,17 +109,18 @@ save_flags(flags); cli(); - - while ((tmp = *clients) != NULL) - { + while (*clients) { + tmp = *clients; if (tmp->type[0] == type) { *clients = tmp->next; kfree(tmp); + llc_unregister_sap(type); break; - } else { - clients = &tmp->next; } + clients = &tmp->next; } - restore_flags(flags); } + +EXPORT_SYMBOL(register_8022_client); +EXPORT_SYMBOL(unregister_8022_client); diff -Nru a/net/Config.in b/net/Config.in --- a/net/Config.in Tue Jun 18 19:12:02 2002 +++ b/net/Config.in Tue Jun 18 19:12:02 2002 @@ -64,11 +64,12 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then tristate 'CCITT X.25 Packet Layer (EXPERIMENTAL)' CONFIG_X25 tristate 'LAPB Data Link Driver (EXPERIMENTAL)' CONFIG_LAPB - bool '802.2 LLC (EXPERIMENTAL)' CONFIG_LLC + tristate 'ANSI/IEEE 802.2 Data link layer protocol' CONFIG_LLC + if [ "$CONFIG_LLC" != "n" ]; then + # When NETBEUI is added the following line will be a tristate + define_bool CONFIG_LLC_UI y + fi bool 'Frame Diverter (EXPERIMENTAL)' CONFIG_NET_DIVERT -# if [ "$CONFIG_LLC" = "y" ]; then -# bool ' Netbeui (EXPERIMENTAL)' CONFIG_NETBEUI -# fi if [ "$CONFIG_INET" = "y" ]; then tristate 'Acorn Econet/AUN protocols (EXPERIMENTAL)' CONFIG_ECONET if [ "$CONFIG_ECONET" != "n" ]; then diff -Nru a/net/Makefile b/net/Makefile --- a/net/Makefile Tue Jun 18 19:12:02 2002 +++ b/net/Makefile Tue Jun 18 19:12:02 2002 @@ -7,6 +7,7 @@ O_TARGET := network.o + export-objs := netsyms.o obj-y := socket.o core/ @@ -42,6 +43,7 @@ obj-$(CONFIG_DECNET) += decnet/ obj-$(CONFIG_ECONET) += econet/ obj-$(CONFIG_VLAN_8021Q) += 8021q/ +obj-$(CONFIG_LLC) += llc/ ifeq ($(CONFIG_NET),y) obj-$(CONFIG_MODULES) += netsyms.o diff -Nru a/net/core/Makefile b/net/core/Makefile --- a/net/core/Makefile Tue Jun 18 19:12:02 2002 +++ b/net/core/Makefile Tue Jun 18 19:12:02 2002 @@ -2,7 +2,7 @@ # Makefile for the Linux networking core. # -export-objs := netfilter.o profile.o +export-objs := ext8022.o netfilter.o profile.o obj-y := sock.o skbuff.o iovec.o datagram.o scm.o @@ -15,6 +15,10 @@ obj-$(CONFIG_FILTER) += filter.o obj-$(CONFIG_NET) += dev.o dev_mcast.o dst.o neighbour.o rtnetlink.o utils.o + +ifneq ($(CONFIG_LLC),n) +obj-y += ext8022.o +endif obj-$(CONFIG_NETFILTER) += netfilter.o obj-$(CONFIG_NET_DIVERT) += dv.o diff -Nru a/net/core/datagram.c b/net/core/datagram.c --- a/net/core/datagram.c Tue Jun 18 19:12:02 2002 +++ b/net/core/datagram.c Tue Jun 18 19:12:02 2002 @@ -1,19 +1,28 @@ /* * SUCS NET3: * - * Generic datagram handling routines. These are generic for all protocols. Possibly a generic IP version on top - * of these would make sense. Not tonight however 8-). - * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and NetROM layer all have identical poll code and mostly - * identical recvmsg() code. So we share it here. The poll was shared before but buried in udp.c so I moved it. + * Generic datagram handling routines. These are generic for all + * protocols. Possibly a generic IP version on top of these would + * make sense. Not tonight however 8-). + * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and + * NetROM layer all have identical poll code and mostly + * identical recvmsg() code. So we share it here. The poll was + * shared before but buried in udp.c so I moved it. * - * Authors: Alan Cox . (datagram_poll() from old udp.c code) + * Authors: Alan Cox . (datagram_poll() from old + * udp.c code) * * Fixes: - * Alan Cox : NULL return from skb_peek_copy() understood - * Alan Cox : Rewrote skb_read_datagram to avoid the skb_peek_copy stuff. - * Alan Cox : Added support for SOCK_SEQPACKET. IPX can no longer use the SO_TYPE hack but - * AX.25 now works right, and SPX is feasible. - * Alan Cox : Fixed write poll of non IP protocol crash. + * Alan Cox : NULL return from skb_peek_copy() + * understood + * Alan Cox : Rewrote skb_read_datagram to avoid the + * skb_peek_copy stuff. + * Alan Cox : Added support for SOCK_SEQPACKET. + * IPX can no longer use the SO_TYPE hack + * but AX.25 now works right, and SPX is + * feasible. + * Alan Cox : Fixed write poll of non IP protocol + * crash. * Florian La Roche: Changed for my new skbuff handling. * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET. * Linus Torvalds : BSD semantic fixes. @@ -48,18 +57,15 @@ /* * Is a socket 'connection oriented' ? */ - static inline int connection_based(struct sock *sk) { - return (sk->type==SOCK_SEQPACKET || sk->type==SOCK_STREAM); + return sk->type == SOCK_SEQPACKET || sk->type == SOCK_STREAM; } - /* * Wait for a packet.. */ - -static int wait_for_packet(struct sock * sk, int *err, long *timeo_p) +static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) { int error; @@ -74,51 +80,57 @@ goto out_err; if (!skb_queue_empty(&sk->receive_queue)) - goto ready; + goto out; /* Socket shut down? */ if (sk->shutdown & RCV_SHUTDOWN) goto out_noerr; - /* Sequenced packets can come disconnected. If so we report the problem */ + /* Sequenced packets can come disconnected. + * If so we report the problem + */ error = -ENOTCONN; - if(connection_based(sk) && !(sk->state==TCP_ESTABLISHED || sk->state==TCP_LISTEN)) + if (connection_based(sk) && !(sk->state == TCP_ESTABLISHED || + sk->state == TCP_LISTEN)) goto out_err; /* handle signals */ if (signal_pending(current)) goto interrupted; + error = 0; *timeo_p = schedule_timeout(*timeo_p); - -ready: +out: current->state = TASK_RUNNING; remove_wait_queue(sk->sleep, &wait); - return 0; - + return error; interrupted: error = sock_intr_errno(*timeo_p); out_err: *err = error; -out: - current->state = TASK_RUNNING; - remove_wait_queue(sk->sleep, &wait); - return error; + goto out; out_noerr: *err = 0; error = 1; goto out; } -/* - * Get a datagram skbuff, understands the peeking, nonblocking wakeups and possible - * races. This replaces identical code in packet,raw and udp, as well as the IPX - * AX.25 and Appletalk. It also finally fixes the long standing peek and read - * race for datagram sockets. If you alter this routine remember it must be - * re-entrant. +/** + * skb_recv_datagram - Receive a datagram skbuff + * @sk - socket + * @flags - MSG_ flags + * @noblock - blocking operation? + * @err - error code returned + * + * Get a datagram skbuff, understands the peeking, nonblocking wakeups + * and possible races. This replaces identical code in packet, raw and + * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes + * the long standing peek and read race for datagram sockets. If you + * alter this routine remember it must be re-entrant. * * This function will lock the socket if a skb is returned, so the caller - * needs to unlock the socket in that case (usually by calling skb_free_datagram) + * needs to unlock the socket in that case (usually by calling + * skb_free_datagram) * * * It does not lock socket since today. This function is * * free of race conditions. This measure should/can improve @@ -132,36 +144,35 @@ * quite explicitly by POSIX 1003.1g, don't change them without having * the standard around please. */ - -struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err) +struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, + int noblock, int *err) { - int error; struct sk_buff *skb; long timeo; - /* Caller is allowed not to check sk->err before skb_recv_datagram() */ - error = sock_error(sk); + int error = sock_error(sk); + if (error) goto no_packet; timeo = sock_rcvtimeo(sk, noblock); do { - /* Again only user level code calls this function, so nothing interrupt level - will suddenly eat the receive_queue. - - Look at current nfs client by the way... - However, this function was corrent in any case. 8) + /* Again only user level code calls this function, so nothing + * interrupt level will suddenly eat the receive_queue. + * + * Look at current nfs client by the way... + * However, this function was corrent in any case. 8) */ - if (flags & MSG_PEEK) - { + if (flags & MSG_PEEK) { unsigned long cpu_flags; spin_lock_irqsave(&sk->receive_queue.lock, cpu_flags); skb = skb_peek(&sk->receive_queue); - if(skb!=NULL) + if (skb) atomic_inc(&skb->users); - spin_unlock_irqrestore(&sk->receive_queue.lock, cpu_flags); + spin_unlock_irqrestore(&sk->receive_queue.lock, + cpu_flags); } else skb = skb_dequeue(&sk->receive_queue); @@ -173,7 +184,7 @@ if (!timeo) goto no_packet; - } while (wait_for_packet(sk, err, &timeo) == 0); + } while (!wait_for_packet(sk, err, &timeo)); return NULL; @@ -182,7 +193,7 @@ return NULL; } -void skb_free_datagram(struct sock * sk, struct sk_buff *skb) +void skb_free_datagram(struct sock *sk, struct sk_buff *skb) { kfree_skb(skb); } @@ -190,26 +201,33 @@ /* * Copy a datagram to a linear buffer. */ - int skb_copy_datagram(const struct sk_buff *skb, int offset, char *to, int size) { - struct iovec iov = { to, size }; + struct iovec iov = { + iov_base: to, + iov_len: size, + }; return skb_copy_datagram_iovec(skb, offset, &iov, size); } -/* - * Copy a datagram to an iovec. +/** + * skb_copy_datagram_iovec - Copy a datagram to an iovec. + * @skb - buffer to copy + * @offset - offset in the buffer to start copying from + * @iovec - io vector to copy to + * @len - amount of data to copy from buffer to iovec + * * Note: the iovec is modified during the copy. */ -int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, struct iovec *to, - int len) +int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, + struct iovec *to, int len) { - int i, copy; int start = skb->len - skb->data_len; + int i, copy = start - offset; /* Copy header. */ - if ((copy = start-offset) > 0) { + if (copy > 0) { if (copy > len) copy = len; if (memcpy_toiovec(to, skb->data + offset, copy)) @@ -220,13 +238,13 @@ } /* Copy paged appendix. Hmm... why does this look so complicated? */ - for (i=0; inr_frags; i++) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; - BUG_TRAP(start <= offset+len); + BUG_TRAP(start <= offset + len); end = start + skb_shinfo(skb)->frags[i].size; - if ((copy = end-offset) > 0) { + if ((copy = end - offset) > 0) { int err; u8 *vaddr; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -236,7 +254,7 @@ copy = len; vaddr = kmap(page); err = memcpy_toiovec(to, vaddr + frag->page_offset + - offset-start, copy); + offset - start, copy); kunmap(page); if (err) goto fault; @@ -248,18 +266,20 @@ } if (skb_shinfo(skb)->frag_list) { - struct sk_buff *list; + struct sk_buff *list = skb_shinfo(skb)->frag_list; - for (list = skb_shinfo(skb)->frag_list; list; list=list->next) { + for (; list; list = list->next) { int end; - BUG_TRAP(start <= offset+len); + BUG_TRAP(start <= offset + len); end = start + list->len; - if ((copy = end-offset) > 0) { + if ((copy = end - offset) > 0) { if (copy > len) copy = len; - if (skb_copy_datagram_iovec(list, offset-start, to, copy)) + if (skb_copy_datagram_iovec(list, + offset - start, + to, copy)) goto fault; if ((len -= copy) == 0) return 0; @@ -268,25 +288,27 @@ start = end; } } - if (len == 0) + if (!len) return 0; fault: return -EFAULT; } -int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump) +int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, + u8 *to, int len, unsigned int *csump) { - int i, copy; int start = skb->len - skb->data_len; int pos = 0; + int i, copy = start - offset; /* Copy header. */ - if ((copy = start-offset) > 0) { + if (copy > 0) { int err = 0; if (copy > len) copy = len; - *csump = csum_and_copy_to_user(skb->data+offset, to, copy, *csump, &err); + *csump = csum_and_copy_to_user(skb->data + offset, to, copy, + *csump, &err); if (err) goto fault; if ((len -= copy) == 0) @@ -296,13 +318,13 @@ pos = copy; } - for (i=0; inr_frags; i++) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; - BUG_TRAP(start <= offset+len); + BUG_TRAP(start <= offset + len); end = start + skb_shinfo(skb)->frags[i].size; - if ((copy = end-offset) > 0) { + if ((copy = end - offset) > 0) { unsigned int csum2; int err = 0; u8 *vaddr; @@ -312,8 +334,10 @@ if (copy > len) copy = len; vaddr = kmap(page); - csum2 = csum_and_copy_to_user(vaddr + frag->page_offset + - offset-start, to, copy, 0, &err); + csum2 = csum_and_copy_to_user(vaddr + + frag->page_offset + + offset - start, + to, copy, 0, &err); kunmap(page); if (err) goto fault; @@ -328,19 +352,22 @@ } if (skb_shinfo(skb)->frag_list) { - struct sk_buff *list; + struct sk_buff *list = skb_shinfo(skb)->frag_list; - for (list = skb_shinfo(skb)->frag_list; list; list=list->next) { + for (; list; list=list->next) { int end; - BUG_TRAP(start <= offset+len); + BUG_TRAP(start <= offset + len); end = start + list->len; - if ((copy = end-offset) > 0) { + if ((copy = end - offset) > 0) { unsigned int csum2 = 0; if (copy > len) copy = len; - if (skb_copy_and_csum_datagram(list, offset-start, to, copy, &csum2)) + if (skb_copy_and_csum_datagram(list, + offset - start, + to, copy, + &csum2)) goto fault; *csump = csum_block_add(*csump, csum2, pos); if ((len -= copy) == 0) @@ -352,39 +379,48 @@ start = end; } } - if (len == 0) + if (!len) return 0; fault: return -EFAULT; } -/* Copy and checkum skb to user iovec. Caller _must_ check that - skb will fit to this iovec. - - Returns: 0 - success. - -EINVAL - checksum failure. - -EFAULT - fault during copy. Beware, in this case iovec can be - modified! +/** + * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec. + * @skb - skbuff + * @hlen - hardware length + * @iovec - io vector + * + * Caller _must_ check that skb will fit to this iovec. + * + * Returns: 0 - success. + * -EINVAL - checksum failure. + * -EFAULT - fault during copy. Beware, in this case iovec + * can be modified! */ - -int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov) +int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, + int hlen, struct iovec *iov) { unsigned int csum; int chunk = skb->len - hlen; - /* Skip filled elements. Pretty silly, look at memcpy_toiovec, though 8) */ - while (iov->iov_len == 0) + /* Skip filled elements. + * Pretty silly, look at memcpy_toiovec, though 8) + */ + while (!iov->iov_len) iov++; if (iov->iov_len < chunk) { - if ((unsigned short)csum_fold(skb_checksum(skb, 0, chunk+hlen, skb->csum))) + if ((unsigned short)csum_fold(skb_checksum(skb, 0, chunk + hlen, + skb->csum))) goto csum_error; if (skb_copy_datagram_iovec(skb, hlen, iov, chunk)) goto fault; } else { csum = csum_partial(skb->data, hlen, skb->csum); - if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, chunk, &csum)) + if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, + chunk, &csum)) goto fault; if ((unsigned short)csum_fold(csum)) goto csum_error; @@ -392,17 +428,18 @@ iov->iov_base += chunk; } return 0; - csum_error: return -EINVAL; - fault: return -EFAULT; } - - -/* +/** + * datagram_poll - generic datagram poll + * @file - file struct + * @sock - socket + * @wait - poll table + * * Datagram poll: Again totally generic. This also handles * sequenced packet sockets providing the socket receive queue * is only ever holding data ready to receive. @@ -411,8 +448,8 @@ * and you use a different write policy from sock_writeable() * then please supply your own write_space callback. */ - -unsigned int datagram_poll(struct file * file, struct socket *sock, poll_table *wait) +unsigned int datagram_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; unsigned int mask; @@ -427,12 +464,13 @@ mask |= POLLHUP; /* readable? */ - if (!skb_queue_empty(&sk->receive_queue) || (sk->shutdown&RCV_SHUTDOWN)) + if (!skb_queue_empty(&sk->receive_queue) || + (sk->shutdown & RCV_SHUTDOWN)) mask |= POLLIN | POLLRDNORM; /* Connection-based need to check for termination and startup */ if (connection_based(sk)) { - if (sk->state==TCP_CLOSE) + if (sk->state == TCP_CLOSE) mask |= POLLHUP; /* connection hasn't started yet? */ if (sk->state == TCP_SYN_SENT) diff -Nru a/net/core/dev.c b/net/core/dev.c --- a/net/core/dev.c Tue Jun 18 19:12:02 2002 +++ b/net/core/dev.c Tue Jun 18 19:12:02 2002 @@ -1817,11 +1817,13 @@ static int dev_proc_stats(char *buffer, char **start, off_t offset, int length, int *eof, void *data) { - int i, lcpu; + int i; int len = 0; - for (lcpu = 0; lcpu < smp_num_cpus; lcpu++) { - i = cpu_logical_map(lcpu); + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_online(i)) + continue; + len += sprintf(buffer + len, "%08x %08x %08x %08x %08x %08x " "%08x %08x %08x\n", netdev_rx_stat[i].total, diff -Nru a/net/core/ext8022.c b/net/core/ext8022.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/core/ext8022.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,76 @@ +/* + * (ext8022.c) + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include + +typedef int (*func_type)(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt); +static int llc_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *); + +static func_type llc_sap_table[128]; +static int llc_users; + +static struct packet_type llc_packet_type = { + type: __constant_htons(ETH_P_802_2), + func: llc_rcv, +}; +static struct packet_type llc_tr_packet_type = { + type: __constant_htons(ETH_P_TR_802_2), + func: llc_rcv, +}; + +static int llc_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt) +{ + unsigned char n = (*(skb->h.raw)) >> 1; + + br_read_lock(BR_LLC_LOCK); + if (llc_sap_table[n]) + llc_sap_table[n](skb, dev, pt); + else + kfree_skb(skb); + br_read_unlock(BR_LLC_LOCK); + return 0; +} + +void llc_register_sap(unsigned char sap, func_type rcvfunc) +{ + sap >>= 1; + br_write_lock_bh(BR_LLC_LOCK); + llc_sap_table[sap] = rcvfunc; + if (!llc_users) { + dev_add_pack(&llc_packet_type); + dev_add_pack(&llc_tr_packet_type); + } + llc_users++; + br_write_unlock_bh(BR_LLC_LOCK); +} + +void llc_unregister_sap(unsigned char sap) +{ + sap >>= 1; + br_write_lock_bh(BR_LLC_LOCK); + llc_sap_table[sap] = NULL; + if (!--llc_users) { + dev_remove_pack(&llc_packet_type); + dev_remove_pack(&llc_tr_packet_type); + } + br_write_unlock_bh(BR_LLC_LOCK); +} + +EXPORT_SYMBOL(llc_register_sap); +EXPORT_SYMBOL(llc_unregister_sap); diff -Nru a/net/core/neighbour.c b/net/core/neighbour.c --- a/net/core/neighbour.c Tue Jun 18 19:12:01 2002 +++ b/net/core/neighbour.c Tue Jun 18 19:12:01 2002 @@ -64,7 +64,7 @@ cache. - If the entry requires some non-trivial actions, increase its reference count and release table lock. - + Neighbour entries are protected: - with reference count. - with rwlock neigh->lock @@ -101,7 +101,7 @@ unsigned long neigh_rand_reach_time(unsigned long base) { - return (net_random() % base) + (base>>1); + return (net_random() % base) + (base >> 1); } @@ -110,7 +110,7 @@ int shrunk = 0; int i; - for (i=0; i<=NEIGH_HASHMASK; i++) { + for (i = 0; i <= NEIGH_HASHMASK; i++) { struct neighbour *n, **np; np = &tbl->hash_buckets[i]; @@ -128,12 +128,12 @@ */ write_lock(&n->lock); if (atomic_read(&n->refcnt) == 1 && - !(n->nud_state&NUD_PERMANENT) && + !(n->nud_state & NUD_PERMANENT) && (n->nud_state != NUD_INCOMPLETE || jiffies - n->used > n->parms->retrans_time)) { - *np = n->next; + *np = n->next; n->dead = 1; - shrunk = 1; + shrunk = 1; write_unlock(&n->lock); neigh_release(n); continue; @@ -143,18 +143,17 @@ } write_unlock_bh(&tbl->lock); } - + tbl->last_flush = jiffies; return shrunk; } static int neigh_del_timer(struct neighbour *n) { - if (n->nud_state & NUD_IN_TIMER) { - if (del_timer(&n->timer)) { - neigh_release(n); - return 1; - } + if ((n->nud_state & NUD_IN_TIMER) && + del_timer(&n->timer)) { + neigh_release(n); + return 1; } return 0; } @@ -175,10 +174,9 @@ write_lock_bh(&tbl->lock); - for (i=0; i<=NEIGH_HASHMASK; i++) { - struct neighbour *n, **np; + for (i = 0; i <= NEIGH_HASHMASK; i++) { + struct neighbour *n, **np = &tbl->hash_buckets[i]; - np = &tbl->hash_buckets[i]; while ((n = *np) != NULL) { if (dev && n->dev != dev) { np = &n->next; @@ -202,7 +200,7 @@ n->parms = &tbl->parms; skb_queue_purge(&n->arp_queue); n->output = neigh_blackhole; - if (n->nud_state&NUD_VALID) + if (n->nud_state & NUD_VALID) n->nud_state = NUD_NOARP; else n->nud_state = NUD_NONE; @@ -223,38 +221,39 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl) { - struct neighbour *n; + struct neighbour *n = NULL; unsigned long now = jiffies; if (tbl->entries > tbl->gc_thresh3 || (tbl->entries > tbl->gc_thresh2 && - now - tbl->last_flush > 5*HZ)) { - if (neigh_forced_gc(tbl) == 0 && + now - tbl->last_flush > 5 * HZ)) { + if (!neigh_forced_gc(tbl) && tbl->entries > tbl->gc_thresh3) - return NULL; + goto out; } n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC); - if (n == NULL) - return NULL; + if (!n) + goto out; memset(n, 0, tbl->entry_size); skb_queue_head_init(&n->arp_queue); - n->lock = RW_LOCK_UNLOCKED; - n->updated = n->used = now; - n->nud_state = NUD_NONE; - n->output = neigh_blackhole; - n->parms = &tbl->parms; + n->lock = RW_LOCK_UNLOCKED; + n->updated = n->used = now; + n->nud_state = NUD_NONE; + n->output = neigh_blackhole; + n->parms = &tbl->parms; init_timer(&n->timer); n->timer.function = neigh_timer_handler; - n->timer.data = (unsigned long)n; + n->timer.data = (unsigned long)n; tbl->stats.allocs++; neigh_glbl_allocs++; tbl->entries++; - n->tbl = tbl; + n->tbl = tbl; atomic_set(&n->refcnt, 1); - n->dead = 1; + n->dead = 1; +out: return n; } @@ -262,15 +261,12 @@ struct net_device *dev) { struct neighbour *n; - u32 hash_val; int key_len = tbl->key_len; - - hash_val = tbl->hash(pkey, dev); + u32 hash_val = tbl->hash(pkey, dev); read_lock_bh(&tbl->lock); for (n = tbl->hash_buckets[hash_val]; n; n = n->next) { - if (dev == n->dev && - memcmp(n->primary_key, pkey, key_len) == 0) { + if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) { neigh_hold(n); break; } @@ -279,17 +275,18 @@ return n; } -struct neighbour * neigh_create(struct neigh_table *tbl, const void *pkey, - struct net_device *dev) +struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, + struct net_device *dev) { - struct neighbour *n, *n1; u32 hash_val; int key_len = tbl->key_len; int error; + struct neighbour *n1, *rc, *n = neigh_alloc(tbl); - n = neigh_alloc(tbl); - if (n == NULL) - return ERR_PTR(-ENOBUFS); + if (!n) { + rc = ERR_PTR(-ENOBUFS); + goto out; + } memcpy(n->primary_key, pkey, key_len); n->dev = dev; @@ -297,29 +294,28 @@ /* Protocol specific setup. */ if (tbl->constructor && (error = tbl->constructor(n)) < 0) { - neigh_release(n); - return ERR_PTR(error); + rc = ERR_PTR(error); + goto out_neigh_release; } /* Device specific setup. */ if (n->parms->neigh_setup && (error = n->parms->neigh_setup(n)) < 0) { - neigh_release(n); - return ERR_PTR(error); + rc = ERR_PTR(error); + goto out_neigh_release; } - n->confirmed = jiffies - (n->parms->base_reachable_time<<1); + n->confirmed = jiffies - (n->parms->base_reachable_time << 1); hash_val = tbl->hash(pkey, dev); write_lock_bh(&tbl->lock); for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) { - if (dev == n1->dev && - memcmp(n1->primary_key, pkey, key_len) == 0) { + if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) { neigh_hold(n1); write_unlock_bh(&tbl->lock); - neigh_release(n); - return n1; + rc = n1; + goto out_neigh_release; } } @@ -329,69 +325,77 @@ neigh_hold(n); write_unlock_bh(&tbl->lock); NEIGH_PRINTK2("neigh %p is created.\n", n); - return n; + rc = n; +out: + return rc; +out_neigh_release: + neigh_release(n); + goto out; } struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat) { struct pneigh_entry *n; - u32 hash_val; int key_len = tbl->key_len; + u32 hash_val = *(u32 *)(pkey + key_len - 4); - hash_val = *(u32*)(pkey + key_len - 4); - hash_val ^= (hash_val>>16); - hash_val ^= hash_val>>8; - hash_val ^= hash_val>>4; + hash_val ^= (hash_val >> 16); + hash_val ^= hash_val >> 8; + hash_val ^= hash_val >> 4; hash_val &= PNEIGH_HASHMASK; read_lock_bh(&tbl->lock); for (n = tbl->phash_buckets[hash_val]; n; n = n->next) { - if (memcmp(n->key, pkey, key_len) == 0 && + if (!memcmp(n->key, pkey, key_len) && (n->dev == dev || !n->dev)) { read_unlock_bh(&tbl->lock); - return n; + goto out; } } read_unlock_bh(&tbl->lock); + n = NULL; if (!creat) - return NULL; + goto out; n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL); - if (n == NULL) - return NULL; + if (!n) + goto out; memcpy(n->key, pkey, key_len); n->dev = dev; if (tbl->pconstructor && tbl->pconstructor(n)) { kfree(n); - return NULL; + n = NULL; + goto out; } write_lock_bh(&tbl->lock); n->next = tbl->phash_buckets[hash_val]; tbl->phash_buckets[hash_val] = n; write_unlock_bh(&tbl->lock); +out: return n; } -int pneigh_delete(struct neigh_table *tbl, const void *pkey, struct net_device *dev) +int pneigh_delete(struct neigh_table *tbl, const void *pkey, + struct net_device *dev) { struct pneigh_entry *n, **np; - u32 hash_val; int key_len = tbl->key_len; + u32 hash_val = *(u32 *)(pkey + key_len - 4); - hash_val = *(u32*)(pkey + key_len - 4); - hash_val ^= (hash_val>>16); - hash_val ^= hash_val>>8; - hash_val ^= hash_val>>4; + hash_val ^= (hash_val >> 16); + hash_val ^= hash_val >> 8; + hash_val ^= hash_val >> 4; hash_val &= PNEIGH_HASHMASK; - for (np = &tbl->phash_buckets[hash_val]; (n=*np) != NULL; np = &n->next) { - if (memcmp(n->key, pkey, key_len) == 0 && n->dev == dev) { + for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; + np = &n->next) { + if (!memcmp(n->key, pkey, key_len) && n->dev == dev) { write_lock_bh(&tbl->lock); *np = n->next; write_unlock_bh(&tbl->lock); @@ -409,10 +413,10 @@ struct pneigh_entry *n, **np; u32 h; - for (h=0; h<=PNEIGH_HASHMASK; h++) { - np = &tbl->phash_buckets[h]; - while ((n=*np) != NULL) { - if (n->dev == dev || dev == NULL) { + for (h = 0; h <= PNEIGH_HASHMASK; h++) { + np = &tbl->phash_buckets[h]; + while ((n = *np) != NULL) { + if (!dev || n->dev == dev) { *np = n->next; if (tbl->pdestructor) tbl->pdestructor(n); @@ -431,17 +435,18 @@ * */ void neigh_destroy(struct neighbour *neigh) -{ +{ struct hh_cache *hh; if (!neigh->dead) { - printk("Destroying alive neighbour %p from %08lx\n", neigh, - *(((unsigned long*)&neigh)-1)); + printk(KERN_WARNING + "Destroying alive neighbour %p from %08lx\n", neigh, + *(((unsigned long *)&neigh) - 1)); return; } if (neigh_del_timer(neigh)) - printk("Impossible event.\n"); + printk(KERN_WARNING "Impossible event.\n"); while ((hh = neigh->hh) != NULL) { neigh->hh = hh->hh_next; @@ -519,14 +524,14 @@ unsigned long now = jiffies; u8 state = n->nud_state; - if (state&(NUD_NOARP|NUD_PERMANENT)) + if (state & (NUD_NOARP | NUD_PERMANENT)) return; - if (state&NUD_REACHABLE) { + if (state & NUD_REACHABLE) { if (now - n->confirmed > n->parms->reachable_time) { n->nud_state = NUD_STALE; neigh_suspect(n); } - } else if (state&NUD_VALID) { + } else if (state & NUD_VALID) { if (now - n->confirmed < n->parms->reachable_time) { neigh_del_timer(n); n->nud_state = NUD_REACHABLE; @@ -537,7 +542,7 @@ static void SMP_TIMER_NAME(neigh_periodic_timer)(unsigned long arg) { - struct neigh_table *tbl = (struct neigh_table*)arg; + struct neigh_table *tbl = (struct neigh_table *)arg; unsigned long now = jiffies; int i; @@ -547,15 +552,16 @@ /* * periodicly recompute ReachableTime from random function */ - - if (now - tbl->last_rand > 300*HZ) { + + if (now - tbl->last_rand > 300 * HZ) { struct neigh_parms *p; tbl->last_rand = now; - for (p=&tbl->parms; p; p = p->next) - p->reachable_time = neigh_rand_reach_time(p->base_reachable_time); + for (p = &tbl->parms; p; p = p->next) + p->reachable_time = + neigh_rand_reach_time(p->base_reachable_time); } - for (i=0; i <= NEIGH_HASHMASK; i++) { + for (i = 0; i <= NEIGH_HASHMASK; i++) { struct neighbour *n, **np; np = &tbl->hash_buckets[i]; @@ -565,7 +571,7 @@ write_lock(&n->lock); state = n->nud_state; - if (state&(NUD_PERMANENT|NUD_IN_TIMER)) { + if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { write_unlock(&n->lock); goto next_elt; } @@ -574,7 +580,8 @@ n->used = n->confirmed; if (atomic_read(&n->refcnt) == 1 && - (state == NUD_FAILED || now - n->used > n->parms->gc_staletime)) { + (state == NUD_FAILED || + now - n->used > n->parms->gc_staletime)) { *np = n->next; n->dead = 1; write_unlock(&n->lock); @@ -582,7 +589,7 @@ continue; } - if (n->nud_state&NUD_REACHABLE && + if (n->nud_state & NUD_REACHABLE && now - n->confirmed > n->parms->reachable_time) { n->nud_state = NUD_STALE; neigh_suspect(n); @@ -601,8 +608,8 @@ #ifdef CONFIG_SMP static void neigh_periodic_timer(unsigned long arg) { - struct neigh_table *tbl = (struct neigh_table*)arg; - + struct neigh_table *tbl = (struct neigh_table *)arg; + tasklet_schedule(&tbl->gc_task); } #endif @@ -616,10 +623,10 @@ /* Called when a timer expires for a neighbour entry. */ -static void neigh_timer_handler(unsigned long arg) +static void neigh_timer_handler(unsigned long arg) { unsigned long now = jiffies; - struct neighbour *neigh = (struct neighbour*)arg; + struct neighbour *neigh = (struct neighbour *)arg; unsigned state; int notify = 0; @@ -627,14 +634,14 @@ state = neigh->nud_state; - if (!(state&NUD_IN_TIMER)) { + if (!(state & NUD_IN_TIMER)) { #ifndef CONFIG_SMP - printk("neigh: timer & !nud_in_timer\n"); + printk(KERN_WARNING "neigh: timer & !nud_in_timer\n"); #endif goto out; } - if ((state&NUD_VALID) && + if ((state & NUD_VALID) && now - neigh->confirmed < neigh->parms->reachable_time) { neigh->nud_state = NUD_REACHABLE; NEIGH_PRINTK2("neigh %p is still alive.\n", neigh); @@ -657,10 +664,11 @@ /* It is very thin place. report_unreachable is very complicated routine. Particularly, it can hit the same neighbour entry! - + So that, we try to be accurate and avoid dead loop. --ANK */ - while(neigh->nud_state==NUD_FAILED && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) { + while (neigh->nud_state == NUD_FAILED && + (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { write_unlock(&neigh->lock); neigh->ops->error_report(neigh, skb); write_lock(&neigh->lock); @@ -688,61 +696,69 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) { + int rc; + write_lock_bh(&neigh->lock); - if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) { - if (!(neigh->nud_state&(NUD_STALE|NUD_INCOMPLETE))) { - if (neigh->parms->mcast_probes + neigh->parms->app_probes) { - atomic_set(&neigh->probes, neigh->parms->ucast_probes); - neigh->nud_state = NUD_INCOMPLETE; - neigh_hold(neigh); - neigh->timer.expires = jiffies + neigh->parms->retrans_time; - add_timer(&neigh->timer); - write_unlock_bh(&neigh->lock); - neigh->ops->solicit(neigh, skb); - atomic_inc(&neigh->probes); - write_lock_bh(&neigh->lock); - } else { - neigh->nud_state = NUD_FAILED; - write_unlock_bh(&neigh->lock); - - if (skb) - kfree_skb(skb); - return 1; - } - } - if (neigh->nud_state == NUD_INCOMPLETE) { - if (skb) { - if (skb_queue_len(&neigh->arp_queue) >= neigh->parms->queue_len) { - struct sk_buff *buff; - buff = neigh->arp_queue.next; - __skb_unlink(buff, &neigh->arp_queue); - kfree_skb(buff); - } - __skb_queue_tail(&neigh->arp_queue, skb); - } + + rc = 0; + if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) + goto out_unlock_bh; + + if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { + if (neigh->parms->mcast_probes + neigh->parms->app_probes) { + atomic_set(&neigh->probes, neigh->parms->ucast_probes); + neigh->nud_state = NUD_INCOMPLETE; + neigh_hold(neigh); + neigh->timer.expires = jiffies + + neigh->parms->retrans_time; + add_timer(&neigh->timer); + write_unlock_bh(&neigh->lock); + neigh->ops->solicit(neigh, skb); + atomic_inc(&neigh->probes); + write_lock_bh(&neigh->lock); + } else { + neigh->nud_state = NUD_FAILED; write_unlock_bh(&neigh->lock); + + if (skb) + kfree_skb(skb); return 1; } - if (neigh->nud_state == NUD_STALE) { - NEIGH_PRINTK2("neigh %p is delayed.\n", neigh); - neigh_hold(neigh); - neigh->nud_state = NUD_DELAY; - neigh->timer.expires = jiffies + neigh->parms->delay_probe_time; - add_timer(&neigh->timer); + } + + if (neigh->nud_state == NUD_INCOMPLETE) { + if (skb) { + if (skb_queue_len(&neigh->arp_queue) >= + neigh->parms->queue_len) { + struct sk_buff *buff; + buff = neigh->arp_queue.next; + __skb_unlink(buff, &neigh->arp_queue); + kfree_skb(buff); + } + __skb_queue_tail(&neigh->arp_queue, skb); } + rc = 1; + } else if (neigh->nud_state == NUD_STALE) { + NEIGH_PRINTK2("neigh %p is delayed.\n", neigh); + neigh_hold(neigh); + neigh->nud_state = NUD_DELAY; + neigh->timer.expires = jiffies + neigh->parms->delay_probe_time; + add_timer(&neigh->timer); + rc = 0; } +out_unlock_bh: write_unlock_bh(&neigh->lock); - return 0; + return rc; } static __inline__ void neigh_update_hhs(struct neighbour *neigh) { struct hh_cache *hh; - void (*update)(struct hh_cache*, struct net_device*, unsigned char*) = + void (*update)(struct hh_cache*, struct net_device*, unsigned char *) = neigh->dev->header_cache_update; if (update) { - for (hh=neigh->hh; hh; hh=hh->hh_next) { + for (hh = neigh->hh; hh; hh = hh->hh_next) { write_lock_bh(&hh->hh_lock); update(hh, neigh->dev, neigh->ha); write_unlock_bh(&hh->hh_lock); @@ -755,38 +771,45 @@ /* Generic update routine. -- lladdr is new lladdr or NULL, if it is not supplied. -- new is new state. - -- override==1 allows to override existing lladdr, if it is different. - -- arp==0 means that the change is administrative. + -- override == 1 allows to override existing lladdr, if it is different. + -- arp == 0 means that the change is administrative. Caller MUST hold reference count on the entry. */ -int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, int override, int arp) +int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, + int override, int arp) { u8 old; int err; +#ifdef CONFIG_ARPD int notify = 0; - struct net_device *dev = neigh->dev; +#endif + struct net_device *dev; write_lock_bh(&neigh->lock); - old = neigh->nud_state; - err = -EPERM; - if (arp && (old&(NUD_NOARP|NUD_PERMANENT))) + dev = neigh->dev; + old = neigh->nud_state; + err = -EPERM; + + if (arp && (old & (NUD_NOARP | NUD_PERMANENT))) goto out; - if (!(new&NUD_VALID)) { + if (!(new & NUD_VALID)) { neigh_del_timer(neigh); - if (old&NUD_CONNECTED) + if (old & NUD_CONNECTED) neigh_suspect(neigh); neigh->nud_state = new; err = 0; - notify = old&NUD_VALID; +#ifdef CONFIG_ARPD + notify = old & NUD_VALID; +#endif goto out; } /* Compare new lladdr with cached one */ - if (dev->addr_len == 0) { + if (!dev->addr_len) { /* First case: device needs no address. */ lladdr = neigh->ha; } else if (lladdr) { @@ -795,8 +818,8 @@ - compare new & old - if they are different, check override flag */ - if (old&NUD_VALID) { - if (memcmp(lladdr, neigh->ha, dev->addr_len) == 0) + if (old & NUD_VALID) { + if (!memcmp(lladdr, neigh->ha, dev->addr_len)) lladdr = neigh->ha; else if (!override) goto out; @@ -806,14 +829,14 @@ use it, otherwise discard the request. */ err = -EINVAL; - if (!(old&NUD_VALID)) + if (!(old & NUD_VALID)) goto out; lladdr = neigh->ha; } neigh_sync(neigh); old = neigh->nud_state; - if (new&NUD_CONNECTED) + if (new & NUD_CONNECTED) neigh->confirmed = jiffies; neigh->updated = jiffies; @@ -821,35 +844,35 @@ do not change entry state, if new one is STALE. */ err = 0; - if (old&NUD_VALID) { - if (lladdr == neigh->ha) - if (new == old || (new == NUD_STALE && (old&NUD_CONNECTED))) - goto out; - } + if ((old & NUD_VALID) && lladdr == neigh->ha && + (new == old || (new == NUD_STALE && (old & NUD_CONNECTED)))) + goto out; + neigh_del_timer(neigh); neigh->nud_state = new; if (lladdr != neigh->ha) { memcpy(&neigh->ha, lladdr, dev->addr_len); neigh_update_hhs(neigh); - if (!(new&NUD_CONNECTED)) - neigh->confirmed = jiffies - (neigh->parms->base_reachable_time<<1); + if (!(new & NUD_CONNECTED)) + neigh->confirmed = jiffies - + (neigh->parms->base_reachable_time << 1); #ifdef CONFIG_ARPD notify = 1; #endif } if (new == old) goto out; - if (new&NUD_CONNECTED) + if (new & NUD_CONNECTED) neigh_connect(neigh); else neigh_suspect(neigh); - if (!(old&NUD_VALID)) { + if (!(old & NUD_VALID)) { struct sk_buff *skb; /* Again: avoid dead loop if something went wrong */ - while (neigh->nud_state&NUD_VALID && - (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) { + while (neigh->nud_state & NUD_VALID && + (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { struct neighbour *n1 = neigh; write_unlock_bh(&neigh->lock); /* On shaper/eql skb->dst->neighbour != neigh :( */ @@ -869,24 +892,24 @@ return err; } -struct neighbour * neigh_event_ns(struct neigh_table *tbl, - u8 *lladdr, void *saddr, - struct net_device *dev) +struct neighbour *neigh_event_ns(struct neigh_table *tbl, + u8 *lladdr, void *saddr, + struct net_device *dev) { - struct neighbour *neigh; - - neigh = __neigh_lookup(tbl, saddr, dev, lladdr || !dev->addr_len); + struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev, + lladdr || !dev->addr_len); if (neigh) neigh_update(neigh, lladdr, NUD_STALE, 1, 1); return neigh; } -static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, u16 protocol) +static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, + u16 protocol) { - struct hh_cache *hh = NULL; + struct hh_cache *hh; struct net_device *dev = dst->dev; - for (hh=n->hh; hh; hh = hh->hh_next) + for (hh = n->hh; hh; hh = hh->hh_next) if (hh->hh_type == protocol) break; @@ -902,8 +925,8 @@ } else { atomic_inc(&hh->hh_refcnt); hh->hh_next = n->hh; - n->hh = hh; - if (n->nud_state&NUD_CONNECTED) + n->hh = hh; + if (n->nud_state & NUD_CONNECTED) hh->hh_output = n->ops->hh_output; else hh->hh_output = n->ops->output; @@ -927,7 +950,8 @@ __skb_pull(skb, skb->nh.raw - skb->data); if (dev->hard_header && - dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, skb->len) < 0 && + dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, + skb->len) < 0 && dev->rebuild_header(skb)) return 0; @@ -940,37 +964,43 @@ { struct dst_entry *dst = skb->dst; struct neighbour *neigh; + int rc = 0; if (!dst || !(neigh = dst->neighbour)) goto discard; __skb_pull(skb, skb->nh.raw - skb->data); - if (neigh_event_send(neigh, skb) == 0) { + if (!neigh_event_send(neigh, skb)) { int err; struct net_device *dev = neigh->dev; - if (dev->hard_header_cache && dst->hh == NULL) { + if (dev->hard_header_cache && !dst->hh) { write_lock_bh(&neigh->lock); - if (dst->hh == NULL) + if (!dst->hh) neigh_hh_init(neigh, dst, dst->ops->protocol); - err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); + err = dev->hard_header(skb, dev, ntohs(skb->protocol), + neigh->ha, NULL, skb->len); write_unlock_bh(&neigh->lock); } else { read_lock_bh(&neigh->lock); - err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); + err = dev->hard_header(skb, dev, ntohs(skb->protocol), + neigh->ha, NULL, skb->len); read_unlock_bh(&neigh->lock); } if (err >= 0) - return neigh->ops->queue_xmit(skb); - kfree_skb(skb); - return -EINVAL; + rc = neigh->ops->queue_xmit(skb); + else + goto out_kfree_skb; } - return 0; - +out: + return rc; discard: - NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", dst, dst ? dst->neighbour : NULL); + NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", + dst, dst ? dst->neighbour : NULL); +out_kfree_skb: + rc = -EINVAL; kfree_skb(skb); - return -EINVAL; + goto out; } /* As fast as possible without hh cache */ @@ -985,12 +1015,16 @@ __skb_pull(skb, skb->nh.raw - skb->data); read_lock_bh(&neigh->lock); - err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); + err = dev->hard_header(skb, dev, ntohs(skb->protocol), + neigh->ha, NULL, skb->len); read_unlock_bh(&neigh->lock); if (err >= 0) - return neigh->ops->queue_xmit(skb); - kfree_skb(skb); - return -EINVAL; + err = neigh->ops->queue_xmit(skb); + else { + err = -EINVAL; + kfree_skb(skb); + } + return err; } static void neigh_proxy_process(unsigned long arg) @@ -1004,7 +1038,7 @@ skb = tbl->proxy_queue.next; - while (skb != (struct sk_buff*)&tbl->proxy_queue) { + while (skb != (struct sk_buff *)&tbl->proxy_queue) { struct sk_buff *back = skb; long tdif = back->stamp.tv_usec - now; @@ -1031,13 +1065,13 @@ struct sk_buff *skb) { unsigned long now = jiffies; - long sched_next = net_random()%p->proxy_delay; + long sched_next = net_random() % p->proxy_delay; if (tbl->proxy_queue.qlen > p->proxy_qlen) { kfree_skb(skb); return; } - skb->stamp.tv_sec = 0; + skb->stamp.tv_sec = 0; skb->stamp.tv_usec = now + sched_next; spin_lock(&tbl->proxy_queue.lock); @@ -1055,22 +1089,22 @@ } -struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl) +struct neigh_parms *neigh_parms_alloc(struct net_device *dev, + struct neigh_table *tbl) { - struct neigh_parms *p; - p = kmalloc(sizeof(*p), GFP_KERNEL); + struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL); + if (p) { memcpy(p, &tbl->parms, sizeof(*p)); - p->tbl = tbl; - p->reachable_time = neigh_rand_reach_time(p->base_reachable_time); - if (dev && dev->neigh_setup) { - if (dev->neigh_setup(dev, p)) { - kfree(p); - return NULL; - } + p->tbl = tbl; + p->reachable_time = + neigh_rand_reach_time(p->base_reachable_time); + if (dev && dev->neigh_setup && dev->neigh_setup(dev, p)) { + kfree(p); + return NULL; } write_lock_bh(&tbl->lock); - p->next = tbl->parms.next; + p->next = tbl->parms.next; tbl->parms.next = p; write_unlock_bh(&tbl->lock); } @@ -1080,8 +1114,8 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) { struct neigh_parms **p; - - if (parms == NULL || parms == &tbl->parms) + + if (!parms || parms == &tbl->parms) return; write_lock_bh(&tbl->lock); for (p = &tbl->parms.next; *p; p = &(*p)->next) { @@ -1104,34 +1138,37 @@ { unsigned long now = jiffies; - tbl->parms.reachable_time = neigh_rand_reach_time(tbl->parms.base_reachable_time); + tbl->parms.reachable_time = + neigh_rand_reach_time(tbl->parms.base_reachable_time); - if (tbl->kmem_cachep == NULL) + if (!tbl->kmem_cachep) tbl->kmem_cachep = kmem_cache_create(tbl->id, - (tbl->entry_size+15)&~15, + (tbl->entry_size + + 15) & ~15, 0, SLAB_HWCACHE_ALIGN, NULL, NULL); - #ifdef CONFIG_SMP - tasklet_init(&tbl->gc_task, SMP_TIMER_NAME(neigh_periodic_timer), (unsigned long)tbl); + tasklet_init(&tbl->gc_task, SMP_TIMER_NAME(neigh_periodic_timer), + (unsigned long)tbl); #endif init_timer(&tbl->gc_timer); - tbl->lock = RW_LOCK_UNLOCKED; - tbl->gc_timer.data = (unsigned long)tbl; + tbl->lock = RW_LOCK_UNLOCKED; + tbl->gc_timer.data = (unsigned long)tbl; tbl->gc_timer.function = neigh_periodic_timer; - tbl->gc_timer.expires = now + tbl->gc_interval + tbl->parms.reachable_time; + tbl->gc_timer.expires = now + tbl->gc_interval + + tbl->parms.reachable_time; add_timer(&tbl->gc_timer); init_timer(&tbl->proxy_timer); - tbl->proxy_timer.data = (unsigned long)tbl; + tbl->proxy_timer.data = (unsigned long)tbl; tbl->proxy_timer.function = neigh_proxy_process; skb_queue_head_init(&tbl->proxy_queue); tbl->last_flush = now; - tbl->last_rand = now + tbl->parms.reachable_time*20; + tbl->last_rand = now + tbl->parms.reachable_time * 20; write_lock(&neigh_tbl_lock); - tbl->next = neigh_tables; - neigh_tables = tbl; + tbl->next = neigh_tables; + neigh_tables = tbl; write_unlock(&neigh_tbl_lock); } @@ -1167,15 +1204,14 @@ struct rtattr **nda = arg; struct neigh_table *tbl; struct net_device *dev = NULL; - int err = 0; + int err = -ENODEV; - if (ndm->ndm_ifindex) { - if ((dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL) - return -ENODEV; - } + if (ndm->ndm_ifindex && + (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL) + goto out; read_lock(&neigh_tbl_lock); - for (tbl=neigh_tables; tbl; tbl = tbl->next) { + for (tbl = neigh_tables; tbl; tbl = tbl->next) { struct neighbour *n; if (tbl->family != ndm->ndm_family) @@ -1183,34 +1219,33 @@ read_unlock(&neigh_tbl_lock); err = -EINVAL; - if (nda[NDA_DST-1] == NULL || - nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len)) - goto out; + if (!nda[NDA_DST - 1] || + nda[NDA_DST - 1]->rta_len != RTA_LENGTH(tbl->key_len)) + goto out_dev_put; - if (ndm->ndm_flags&NTF_PROXY) { - err = pneigh_delete(tbl, RTA_DATA(nda[NDA_DST-1]), dev); - goto out; + if (ndm->ndm_flags & NTF_PROXY) { + err = pneigh_delete(tbl, + RTA_DATA(nda[NDA_DST - 1]), dev); + goto out_dev_put; } - if (dev == NULL) - return -EINVAL; + if (!dev) + goto out; - n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev); + n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST - 1]), dev); if (n) { err = neigh_update(n, NULL, NUD_FAILED, 1, 0); neigh_release(n); } -out: - if (dev) - dev_put(dev); - return err; + goto out_dev_put; } read_unlock(&neigh_tbl_lock); - + err = -EADDRNOTAVAIL; +out_dev_put: if (dev) dev_put(dev); - - return -EADDRNOTAVAIL; +out: + return err; } int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) @@ -1219,15 +1254,14 @@ struct rtattr **nda = arg; struct neigh_table *tbl; struct net_device *dev = NULL; + int err = -ENODEV; - if (ndm->ndm_ifindex) { - if ((dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL) - return -ENODEV; - } + if (ndm->ndm_ifindex && + (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL) + goto out; read_lock(&neigh_tbl_lock); - for (tbl=neigh_tables; tbl; tbl = tbl->next) { - int err = 0; + for (tbl = neigh_tables; tbl; tbl = tbl->next) { int override = 1; struct neighbour *n; @@ -1236,53 +1270,57 @@ read_unlock(&neigh_tbl_lock); err = -EINVAL; - if (nda[NDA_DST-1] == NULL || - nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len)) - goto out; - if (ndm->ndm_flags&NTF_PROXY) { + if (!nda[NDA_DST - 1] || + nda[NDA_DST - 1]->rta_len != RTA_LENGTH(tbl->key_len)) + goto out_dev_put; + if (ndm->ndm_flags & NTF_PROXY) { err = -ENOBUFS; - if (pneigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev, 1)) + if (pneigh_lookup(tbl, + RTA_DATA(nda[NDA_DST - 1]), dev, 1)) err = 0; - goto out; + goto out_dev_put; } - if (dev == NULL) - return -EINVAL; err = -EINVAL; - if (nda[NDA_LLADDR-1] != NULL && - nda[NDA_LLADDR-1]->rta_len != RTA_LENGTH(dev->addr_len)) + if (!dev) goto out; + if (nda[NDA_LLADDR - 1] && + nda[NDA_LLADDR - 1]->rta_len != RTA_LENGTH(dev->addr_len)) + goto out_dev_put; err = 0; - n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev); + n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST - 1]), dev); if (n) { - if (nlh->nlmsg_flags&NLM_F_EXCL) + if (nlh->nlmsg_flags & NLM_F_EXCL) err = -EEXIST; - override = nlh->nlmsg_flags&NLM_F_REPLACE; - } else if (!(nlh->nlmsg_flags&NLM_F_CREATE)) + override = nlh->nlmsg_flags & NLM_F_REPLACE; + } else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) err = -ENOENT; else { - n = __neigh_lookup_errno(tbl, RTA_DATA(nda[NDA_DST-1]), dev); + n = __neigh_lookup_errno(tbl, RTA_DATA(nda[NDA_DST - 1]), + dev); if (IS_ERR(n)) { err = PTR_ERR(n); n = NULL; } } - if (err == 0) { - err = neigh_update(n, nda[NDA_LLADDR-1] ? RTA_DATA(nda[NDA_LLADDR-1]) : NULL, + if (!err) { + err = neigh_update(n, nda[NDA_LLADDR - 1] ? + RTA_DATA(nda[NDA_LLADDR - 1]) : + NULL, ndm->ndm_state, override, 0); } if (n) neigh_release(n); -out: - if (dev) - dev_put(dev); - return err; + goto out_dev_put; } - read_unlock(&neigh_tbl_lock); + read_unlock(&neigh_tbl_lock); + err = -EADDRNOTAVAIL; +out_dev_put: if (dev) dev_put(dev); - return -EADDRNOTAVAIL; +out: + return err; } @@ -1290,32 +1328,31 @@ u32 pid, u32 seq, int event) { unsigned long now = jiffies; - struct ndmsg *ndm; - struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb->tail; struct nda_cacheinfo ci; int locked = 0; + struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq, event, + sizeof(struct ndmsg)); + struct ndmsg *ndm = NLMSG_DATA(nlh); - nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*ndm)); - ndm = NLMSG_DATA(nlh); - ndm->ndm_family = n->ops->family; - ndm->ndm_flags = n->flags; - ndm->ndm_type = n->type; + ndm->ndm_family = n->ops->family; + ndm->ndm_flags = n->flags; + ndm->ndm_type = n->type; ndm->ndm_ifindex = n->dev->ifindex; RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key); read_lock_bh(&n->lock); - locked=1; - ndm->ndm_state = n->nud_state; - if (n->nud_state&NUD_VALID) + locked = 1; + ndm->ndm_state = n->nud_state; + if (n->nud_state & NUD_VALID) RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha); - ci.ndm_used = now - n->used; + ci.ndm_used = now - n->used; ci.ndm_confirmed = now - n->confirmed; - ci.ndm_updated = now - n->updated; - ci.ndm_refcnt = atomic_read(&n->refcnt) - 1; + ci.ndm_updated = now - n->updated; + ci.ndm_refcnt = atomic_read(&n->refcnt) - 1; read_unlock_bh(&n->lock); - locked=0; + locked = 0; RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci); - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb->tail - b; return skb->len; nlmsg_failure: @@ -1327,73 +1364,70 @@ } -static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct netlink_callback *cb) +static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, + struct netlink_callback *cb) { struct neighbour *n; - int h, s_h; - int idx, s_idx; + int rc, h, s_h = cb->args[1]; + int idx, s_idx = idx = cb->args[2]; - s_h = cb->args[1]; - s_idx = idx = cb->args[2]; - for (h=0; h <= NEIGH_HASHMASK; h++) { - if (h < s_h) continue; + for (h = 0; h <= NEIGH_HASHMASK; h++) { + if (h < s_h) + continue; if (h > s_h) s_idx = 0; read_lock_bh(&tbl->lock); - for (n = tbl->hash_buckets[h], idx = 0; n; - n = n->next, idx++) { + for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) { if (idx < s_idx) continue; if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, RTM_NEWNEIGH) <= 0) { + cb->nlh->nlmsg_seq, + RTM_NEWNEIGH) <= 0) { read_unlock_bh(&tbl->lock); - cb->args[1] = h; - cb->args[2] = idx; - return -1; + rc = -1; + goto out; } } read_unlock_bh(&tbl->lock); } - + rc = skb->len; +out: cb->args[1] = h; cb->args[2] = idx; - return skb->len; + return rc; } int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) { - int t; - int s_t; struct neigh_table *tbl; - int family = ((struct rtgenmsg*)NLMSG_DATA(cb->nlh))->rtgen_family; + int t, family, s_t; + read_lock(&neigh_tbl_lock); + family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family; s_t = cb->args[0]; - read_lock(&neigh_tbl_lock); - for (tbl=neigh_tables, t=0; tbl; tbl = tbl->next, t++) { - if (t < s_t) continue; - if (family && tbl->family != family) + for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) { + if (t < s_t || (family && tbl->family != family)) continue; if (t > s_t) - memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); - if (neigh_dump_table(tbl, skb, cb) < 0) + memset(&cb->args[1], 0, sizeof(cb->args) - + sizeof(cb->args[0])); + if (neigh_dump_table(tbl, skb, cb) < 0) break; } read_unlock(&neigh_tbl_lock); cb->args[0] = t; - return skb->len; } #ifdef CONFIG_ARPD void neigh_app_ns(struct neighbour *n) { - struct sk_buff *skb; struct nlmsghdr *nlh; - int size = NLMSG_SPACE(sizeof(struct ndmsg)+256); + int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256); + struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC); - skb = alloc_skb(size, GFP_ATOMIC); if (!skb) return; @@ -1401,19 +1435,18 @@ kfree_skb(skb); return; } - nlh = (struct nlmsghdr*)skb->data; - nlh->nlmsg_flags = NLM_F_REQUEST; + nlh = (struct nlmsghdr *)skb->data; + nlh->nlmsg_flags = NLM_F_REQUEST; NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH; netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC); } static void neigh_app_notify(struct neighbour *n) { - struct sk_buff *skb; - struct nlmsghdr *nlh; - int size = NLMSG_SPACE(sizeof(struct ndmsg)+256); + struct nlmsghdr *nlh; + int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256); + struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC); - skb = alloc_skb(size, GFP_ATOMIC); if (!skb) return; @@ -1421,7 +1454,7 @@ kfree_skb(skb); return; } - nlh = (struct nlmsghdr*)skb->data; + nlh = (struct nlmsghdr *)skb->data; NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH; netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC); } @@ -1430,91 +1463,173 @@ #ifdef CONFIG_SYSCTL -struct neigh_sysctl_table -{ +struct neigh_sysctl_table { struct ctl_table_header *sysctl_header; - ctl_table neigh_vars[17]; - ctl_table neigh_dev[2]; - ctl_table neigh_neigh_dir[2]; - ctl_table neigh_proto_dir[2]; - ctl_table neigh_root_dir[2]; + ctl_table neigh_vars[17]; + ctl_table neigh_dev[2]; + ctl_table neigh_neigh_dir[2]; + ctl_table neigh_proto_dir[2]; + ctl_table neigh_root_dir[2]; } neigh_sysctl_template = { - NULL, - {{NET_NEIGH_MCAST_SOLICIT, "mcast_solicit", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec}, - {NET_NEIGH_UCAST_SOLICIT, "ucast_solicit", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec}, - {NET_NEIGH_APP_SOLICIT, "app_solicit", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec}, - {NET_NEIGH_RETRANS_TIME, "retrans_time", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec}, - {NET_NEIGH_REACHABLE_TIME, "base_reachable_time", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec_jiffies}, - {NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec_jiffies}, - {NET_NEIGH_GC_STALE_TIME, "gc_stale_time", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec_jiffies}, - {NET_NEIGH_UNRES_QLEN, "unres_qlen", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec}, - {NET_NEIGH_PROXY_QLEN, "proxy_qlen", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec}, - {NET_NEIGH_ANYCAST_DELAY, "anycast_delay", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec}, - {NET_NEIGH_PROXY_DELAY, "proxy_delay", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec}, - {NET_NEIGH_LOCKTIME, "locktime", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec}, - {NET_NEIGH_GC_INTERVAL, "gc_interval", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec_jiffies}, - {NET_NEIGH_GC_THRESH1, "gc_thresh1", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec}, - {NET_NEIGH_GC_THRESH2, "gc_thresh2", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec}, - {NET_NEIGH_GC_THRESH3, "gc_thresh3", - NULL, sizeof(int), 0644, NULL, - &proc_dointvec}, - {0}}, - - {{NET_PROTO_CONF_DEFAULT, "default", NULL, 0, 0555, NULL},{0}}, - {{0, "neigh", NULL, 0, 0555, NULL},{0}}, - {{0, NULL, NULL, 0, 0555, NULL},{0}}, - {{CTL_NET, "net", NULL, 0, 0555, NULL},{0}} + neigh_vars: { + { + ctl_name: NET_NEIGH_MCAST_SOLICIT, + procname: "mcast_solicit", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec, + }, + { + ctl_name: NET_NEIGH_UCAST_SOLICIT, + procname: "ucast_solicit", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec, + }, + { + ctl_name: NET_NEIGH_APP_SOLICIT, + procname: "app_solicit", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec, + }, + { + ctl_name: NET_NEIGH_RETRANS_TIME, + procname: "retrans_time", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec, + }, + { + ctl_name: NET_NEIGH_REACHABLE_TIME, + procname: "base_reachable_time", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec_jiffies, + }, + { + ctl_name: NET_NEIGH_DELAY_PROBE_TIME, + procname: "delay_first_probe_time", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec_jiffies, + }, + { + ctl_name: NET_NEIGH_GC_STALE_TIME, + procname: "gc_stale_time", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec_jiffies, + }, + { + ctl_name: NET_NEIGH_UNRES_QLEN, + procname: "unres_qlen", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec, + }, + { + ctl_name: NET_NEIGH_PROXY_QLEN, + procname: "proxy_qlen", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec, + }, + { + ctl_name: NET_NEIGH_ANYCAST_DELAY, + procname: "anycast_delay", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec, + }, + { + ctl_name: NET_NEIGH_PROXY_DELAY, + procname: "proxy_delay", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec, + }, + { + ctl_name: NET_NEIGH_LOCKTIME, + procname: "locktime", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec, + }, + { + ctl_name: NET_NEIGH_GC_INTERVAL, + procname: "gc_interval", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec_jiffies, + }, + { + ctl_name: NET_NEIGH_GC_THRESH1, + procname: "gc_thresh1", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec, + }, + { + ctl_name: NET_NEIGH_GC_THRESH2, + procname: "gc_thresh2", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec, + }, + { + ctl_name: NET_NEIGH_GC_THRESH3, + procname: "gc_thresh3", + maxlen: sizeof(int), + mode: 0644, + proc_handler: &proc_dointvec, + }, + }, + neigh_dev: { + { + ctl_name: NET_PROTO_CONF_DEFAULT, + procname: "default", + mode: 0555, + }, + }, + neigh_neigh_dir: { + { + procname: "neigh", + mode: 0555, + }, + }, + neigh_proto_dir: { + { + mode: 0555, + }, + }, + neigh_root_dir: { + { + ctl_name: CTL_NET, + procname: "net", + mode: 0555, + }, + }, }; int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, int p_id, int pdev_id, char *p_name) { - struct neigh_sysctl_table *t; + struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL); - t = kmalloc(sizeof(*t), GFP_KERNEL); - if (t == NULL) + if (!t) return -ENOBUFS; memcpy(t, &neigh_sysctl_template, sizeof(*t)); - t->neigh_vars[0].data = &p->mcast_probes; - t->neigh_vars[1].data = &p->ucast_probes; - t->neigh_vars[2].data = &p->app_probes; - t->neigh_vars[3].data = &p->retrans_time; - t->neigh_vars[4].data = &p->base_reachable_time; - t->neigh_vars[5].data = &p->delay_probe_time; - t->neigh_vars[6].data = &p->gc_staletime; - t->neigh_vars[7].data = &p->queue_len; - t->neigh_vars[8].data = &p->proxy_qlen; - t->neigh_vars[9].data = &p->anycast_delay; + t->neigh_vars[0].data = &p->mcast_probes; + t->neigh_vars[1].data = &p->ucast_probes; + t->neigh_vars[2].data = &p->app_probes; + t->neigh_vars[3].data = &p->retrans_time; + t->neigh_vars[4].data = &p->base_reachable_time; + t->neigh_vars[5].data = &p->delay_probe_time; + t->neigh_vars[6].data = &p->gc_staletime; + t->neigh_vars[7].data = &p->queue_len; + t->neigh_vars[8].data = &p->proxy_qlen; + t->neigh_vars[9].data = &p->anycast_delay; t->neigh_vars[10].data = &p->proxy_delay; t->neigh_vars[11].data = &p->locktime; if (dev) { @@ -1522,23 +1637,23 @@ t->neigh_dev[0].ctl_name = dev->ifindex; memset(&t->neigh_vars[12], 0, sizeof(ctl_table)); } else { - t->neigh_vars[12].data = (int*)(p+1); - t->neigh_vars[13].data = (int*)(p+1) + 1; - t->neigh_vars[14].data = (int*)(p+1) + 2; - t->neigh_vars[15].data = (int*)(p+1) + 3; + t->neigh_vars[12].data = (int *)(p + 1); + t->neigh_vars[13].data = (int *)(p + 1) + 1; + t->neigh_vars[14].data = (int *)(p + 1) + 2; + t->neigh_vars[15].data = (int *)(p + 1) + 3; } t->neigh_neigh_dir[0].ctl_name = pdev_id; t->neigh_proto_dir[0].procname = p_name; t->neigh_proto_dir[0].ctl_name = p_id; - t->neigh_dev[0].child = t->neigh_vars; - t->neigh_neigh_dir[0].child = t->neigh_dev; - t->neigh_proto_dir[0].child = t->neigh_neigh_dir; - t->neigh_root_dir[0].child = t->neigh_proto_dir; + t->neigh_dev[0].child = t->neigh_vars; + t->neigh_neigh_dir[0].child = t->neigh_dev; + t->neigh_proto_dir[0].child = t->neigh_neigh_dir; + t->neigh_root_dir[0].child = t->neigh_proto_dir; t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0); - if (t->sysctl_header == NULL) { + if (!t->sysctl_header) { kfree(t); return -ENOBUFS; } diff -Nru a/net/core/skbuff.c b/net/core/skbuff.c --- a/net/core/skbuff.c Tue Jun 18 19:12:01 2002 +++ b/net/core/skbuff.c Tue Jun 18 19:12:01 2002 @@ -6,8 +6,9 @@ * * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $ * - * Fixes: - * Alan Cox : Fixed the worst of the load balancer bugs. + * Fixes: + * Alan Cox : Fixed the worst of the load + * balancer bugs. * Dave Platt : Interrupt stacking fix. * Richard Kooijman : Timestamp fixes. * Alan Cox : Changed buffer format. @@ -21,8 +22,8 @@ * Andi Kleen : slabified it. * * NOTE: - * The __skb_ routines should be called with interrupts - * disabled, or you better be *real* sure that the operation is atomic + * The __skb_ routines should be called with interrupts + * disabled, or you better be *real* sure that the operation is atomic * with respect to whatever list is being frobbed (e.g. via lock_sock() * or via disabling bottom half handlers, etc). * @@ -73,7 +74,7 @@ /* * Keep out-of-line to prevent kernel bloat. * __builtin_return_address is not used because it is not always - * reliable. + * reliable. */ /** @@ -84,10 +85,9 @@ * * Out of line support code for skb_put(). Not user callable. */ - void skb_over_panic(struct sk_buff *skb, int sz, void *here) { - printk("skput:over: %p:%d put:%d dev:%s", + printk(KERN_INFO "skput:over: %p:%d put:%d dev:%s", here, skb->len, sz, skb->dev ? skb->dev->name : ""); BUG(); } @@ -100,12 +100,11 @@ * * Out of line support code for skb_push(). Not user callable. */ - void skb_under_panic(struct sk_buff *skb, int sz, void *here) { - printk("skput:under: %p:%d put:%d dev:%s", - here, skb->len, sz, skb->dev ? skb->dev->name : ""); + printk(KERN_INFO "skput:under: %p:%d put:%d dev:%s", + here, skb->len, sz, skb->dev ? skb->dev->name : ""); BUG(); } @@ -132,6 +131,7 @@ unsigned long flags; local_irq_save(flags); + list = &skb_head_pool[smp_processor_id()].list; if (skb_queue_len(list) < sysctl_hot_list_len) { @@ -149,7 +149,7 @@ /* Allocate a new skbuff. We do this ourselves so we can fill in a few * 'private' fields and also do memory statistics to find all the * [BEEP] leaks. - * + * */ /** @@ -164,14 +164,13 @@ * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */ - -struct sk_buff *alloc_skb(unsigned int size,int gfp_mask) +struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) { struct sk_buff *skb; u8 *data; if (in_interrupt() && (gfp_mask & __GFP_WAIT)) { - static int count = 0; + static int count; if (++count < 5) { printk(KERN_ERR "alloc_skb called nonatomically " "from interrupt %p\n", NET_CALLER(size)); @@ -182,76 +181,74 @@ /* Get the HEAD */ skb = skb_head_from_pool(); - if (skb == NULL) { - skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask & ~__GFP_DMA); - if (skb == NULL) - goto nohead; + if (!skb) { + skb = kmem_cache_alloc(skbuff_head_cache, + gfp_mask & ~__GFP_DMA); + if (!skb) + goto out; } /* Get the DATA. Size must match skb_add_mtu(). */ size = SKB_DATA_ALIGN(size); data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); - if (data == NULL) + if (!data) goto nodata; - /* XXX: does not include slab overhead */ + /* XXX: does not include slab overhead */ skb->truesize = size + sizeof(struct sk_buff); /* Load the data pointers. */ - skb->head = data; - skb->data = data; - skb->tail = data; - skb->end = data + size; + skb->head = skb->data = skb->tail = data; + skb->end = data + size; /* Set up other state */ - skb->len = 0; - skb->cloned = 0; + skb->len = 0; + skb->cloned = 0; skb->data_len = 0; - atomic_set(&skb->users, 1); + atomic_set(&skb->users, 1); atomic_set(&(skb_shinfo(skb)->dataref), 1); - skb_shinfo(skb)->nr_frags = 0; + skb_shinfo(skb)->nr_frags = 0; skb_shinfo(skb)->frag_list = NULL; +out: return skb; - nodata: skb_head_to_pool(skb); -nohead: - return NULL; + skb = NULL; + goto out; } /* - * Slab constructor for a skb head. - */ -static inline void skb_headerinit(void *p, kmem_cache_t *cache, + * Slab constructor for a skb head. + */ +static inline void skb_headerinit(void *p, kmem_cache_t *cache, unsigned long flags) { struct sk_buff *skb = p; - skb->next = NULL; - skb->prev = NULL; - skb->list = NULL; - skb->sk = NULL; - skb->stamp.tv_sec=0; /* No idea about time */ - skb->dev = NULL; - skb->dst = NULL; + skb->next = skb->prev = NULL; + skb->list = NULL; + skb->sk = NULL; + skb->stamp.tv_sec = 0; /* No idea about time */ + skb->dev = NULL; + skb->dst = NULL; memset(skb->cb, 0, sizeof(skb->cb)); - skb->pkt_type = PACKET_HOST; /* Default type */ - skb->ip_summed = 0; - skb->priority = 0; - skb->security = 0; /* By default packets are insecure */ - skb->destructor = NULL; + skb->pkt_type = PACKET_HOST; /* Default type */ + skb->ip_summed = 0; + skb->priority = 0; + skb->security = 0; /* By default packets are insecure */ + skb->destructor = NULL; #ifdef CONFIG_NETFILTER - skb->nfmark = skb->nfcache = 0; - skb->nfct = NULL; + skb->nfmark = skb->nfcache = 0; + skb->nfct = NULL; #ifdef CONFIG_NETFILTER_DEBUG - skb->nf_debug = 0; + skb->nf_debug = 0; #endif #endif #ifdef CONFIG_NET_SCHED - skb->tc_index = 0; + skb->tc_index = 0; #endif } @@ -272,7 +269,7 @@ { struct sk_buff *list; - for (list = skb_shinfo(skb)->frag_list; list; list=list->next) + for (list = skb_shinfo(skb)->frag_list; list; list = list->next) skb_get(list); } @@ -294,7 +291,7 @@ } /* - * Free an skbuff by memory without cleaning the state. + * Free an skbuff by memory without cleaning the state. */ void kfree_skbmem(struct sk_buff *skb) { @@ -303,10 +300,10 @@ } /** - * __kfree_skb - private function + * __kfree_skb - private function * @skb: buffer * - * Free an sk_buff. Release anything attached to the buffer. + * Free an sk_buff. Release anything attached to the buffer. * Clean the state. This is an internal helper function. Users should * always call kfree_skb */ @@ -321,10 +318,9 @@ dst_release(skb->dst); if(skb->destructor) { - if (in_irq()) { - printk(KERN_WARNING "Warning: kfree_skb on hard IRQ %p\n", - NET_CALLER(skb)); - } + if (in_irq()) + printk(KERN_WARNING "Warning: kfree_skb on " + "hard IRQ %p\n", NET_CALLER(skb)); skb->destructor(skb); } #ifdef CONFIG_NETFILTER @@ -341,18 +337,17 @@ * * Duplicate an &sk_buff. The new one is not owned by a socket. Both * copies share the same packet data but not structure. The new - * buffer has a reference count of 1. If the allocation fails the + * buffer has a reference count of 1. If the allocation fails the * function returns %NULL otherwise the new buffer is returned. - * + * * If this function is called from an interrupt gfp_mask() must be * %GFP_ATOMIC. */ struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) { - struct sk_buff *n; + struct sk_buff *n = skb_head_from_pool(); - n = skb_head_from_pool(); if (!n) { n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); if (!n) @@ -418,32 +413,32 @@ */ unsigned long offset = new->data - old->data; - new->list=NULL; - new->sk=NULL; - new->dev=old->dev; - new->priority=old->priority; - new->protocol=old->protocol; - new->dst=dst_clone(old->dst); - new->h.raw=old->h.raw+offset; - new->nh.raw=old->nh.raw+offset; - new->mac.raw=old->mac.raw+offset; + new->list = NULL; + new->sk = NULL; + new->dev = old->dev; + new->priority = old->priority; + new->protocol = old->protocol; + new->dst = dst_clone(old->dst); + new->h.raw = old->h.raw + offset; + new->nh.raw = old->nh.raw + offset; + new->mac.raw = old->mac.raw + offset; memcpy(new->cb, old->cb, sizeof(old->cb)); atomic_set(&new->users, 1); - new->pkt_type=old->pkt_type; - new->stamp=old->stamp; + new->pkt_type = old->pkt_type; + new->stamp = old->stamp; new->destructor = NULL; - new->security=old->security; + new->security = old->security; #ifdef CONFIG_NETFILTER - new->nfmark=old->nfmark; - new->nfcache=old->nfcache; - new->nfct=old->nfct; + new->nfmark = old->nfmark; + new->nfcache = old->nfcache; + new->nfct = old->nfct; nf_conntrack_get(new->nfct); #ifdef CONFIG_NETFILTER_DEBUG - new->nf_debug=old->nf_debug; + new->nf_debug = old->nf_debug; #endif #endif #ifdef CONFIG_NET_SCHED - new->tc_index = old->tc_index; + new->tc_index = old->tc_index; #endif } @@ -453,7 +448,7 @@ * @gfp_mask: allocation priority * * Make a copy of both an &sk_buff and its data. This is used when the - * caller wishes to modify the data and needs a private copy of the + * caller wishes to modify the data and needs a private copy of the * data to alter. Returns %NULL on failure or the pointer to the buffer * on success. The returned buffer has a reference count of 1. * @@ -463,31 +458,29 @@ * function is not recommended for use in circumstances when only * header is going to be modified. Use pskb_copy() instead. */ - + struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) { - struct sk_buff *n; - int headerlen = skb->data-skb->head; - + int headerlen = skb->data - skb->head; /* * Allocate the copy buffer */ - n=alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask); - if(n==NULL) + struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len, + gfp_mask); + if (!n) return NULL; /* Set the data pointer */ - skb_reserve(n,headerlen); + skb_reserve(n, headerlen); /* Set the tail pointer and length */ - skb_put(n,skb->len); - n->csum = skb->csum; + skb_put(n, skb->len); + n->csum = skb->csum; n->ip_summed = skb->ip_summed; - if (skb_copy_bits(skb, -headerlen, n->head, headerlen+skb->len)) + if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) BUG(); copy_skb_header(n, skb); - return n; } @@ -498,7 +491,7 @@ u8 *data; long offset; int headerlen = skb->data - skb->head; - int expand = (skb->tail+skb->data_len) - skb->end; + int expand = (skb->tail + skb->data_len) - skb->end; if (skb_shared(skb)) BUG(); @@ -506,14 +499,14 @@ if (expand <= 0) expand = 0; - size = (skb->end - skb->head + expand); + size = skb->end - skb->head + expand; size = SKB_DATA_ALIGN(size); data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); - if (data == NULL) + if (!data) return -ENOMEM; /* Copy entire thing */ - if (skb_copy_bits(skb, -headerlen, data, headerlen+skb->len)) + if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len)) BUG(); /* Offset between the two in bytes */ @@ -526,22 +519,22 @@ skb->end = data + size; /* Set up new pointers */ - skb->h.raw += offset; - skb->nh.raw += offset; + skb->h.raw += offset; + skb->nh.raw += offset; skb->mac.raw += offset; - skb->tail += offset; - skb->data += offset; + skb->tail += offset; + skb->data += offset; /* Set up shinfo */ atomic_set(&(skb_shinfo(skb)->dataref), 1); - skb_shinfo(skb)->nr_frags = 0; + skb_shinfo(skb)->nr_frags = 0; skb_shinfo(skb)->frag_list = NULL; /* We are no longer a clone, even if we were. */ - skb->cloned = 0; + skb->cloned = 0; - skb->tail += skb->data_len; - skb->data_len = 0; + skb->tail += skb->data_len; + skb->data_len = 0; return 0; } @@ -561,26 +554,25 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask) { - struct sk_buff *n; - /* * Allocate the copy buffer */ - n=alloc_skb(skb->end - skb->head, gfp_mask); - if(n==NULL) - return NULL; + struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask); + + if (!n) + goto out; /* Set the data pointer */ - skb_reserve(n,skb->data-skb->head); + skb_reserve(n, skb->data - skb->head); /* Set the tail pointer and length */ - skb_put(n,skb_headlen(skb)); + skb_put(n, skb_headlen(skb)); /* Copy the bytes */ memcpy(n->data, skb->data, n->len); - n->csum = skb->csum; + n->csum = skb->csum; n->ip_summed = skb->ip_summed; - n->data_len = skb->data_len; - n->len = skb->len; + n->data_len = skb->data_len; + n->len = skb->len; if (skb_shinfo(skb)->nr_frags) { int i; @@ -598,7 +590,7 @@ } copy_skb_header(n, skb); - +out: return n; } @@ -631,15 +623,15 @@ size = SKB_DATA_ALIGN(size); data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); - if (data == NULL) + if (!data) goto nodata; /* Copy only real data... and, alas, header. This should be * optimized for the cases when header is void. */ - memcpy(data+nhead, skb->head, skb->tail-skb->head); - memcpy(data+size, skb->end, sizeof(struct skb_shared_info)); + memcpy(data + nhead, skb->head, skb->tail - skb->head); + memcpy(data + size, skb->end, sizeof(struct skb_shared_info)); - for (i=0; inr_frags; i++) + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) get_page(skb_shinfo(skb)->frags[i].page); if (skb_shinfo(skb)->frag_list) @@ -647,17 +639,16 @@ skb_release_data(skb); - off = (data+nhead) - skb->head; - - skb->head = data; - skb->end = data+size; + off = (data + nhead) - skb->head; - skb->data += off; - skb->tail += off; + skb->head = data; + skb->end = data + size; + skb->data += off; + skb->tail += off; skb->mac.raw += off; - skb->h.raw += off; - skb->nh.raw += off; - skb->cloned = 0; + skb->h.raw += off; + skb->nh.raw += off; + skb->cloned = 0; atomic_set(&skb_shinfo(skb)->dataref, 1); return 0; @@ -667,22 +658,22 @@ /* Make private copy of skb with writable head and some headroom */ -struct sk_buff * -skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) +struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) { struct sk_buff *skb2; int delta = headroom - skb_headroom(skb); if (delta <= 0) - return pskb_copy(skb, GFP_ATOMIC); - - skb2 = skb_clone(skb, GFP_ATOMIC); - if (skb2 == NULL || - !pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) - return skb2; - - kfree_skb(skb2); - return NULL; + skb2 = pskb_copy(skb, GFP_ATOMIC); + else { + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, + GFP_ATOMIC)) { + kfree_skb(skb2); + skb2 = NULL; + } + } + return skb2; } @@ -693,10 +684,10 @@ * @newtailroom: new free bytes at tail * @gfp_mask: allocation priority * - * Make a copy of both an &sk_buff and its data and while doing so + * Make a copy of both an &sk_buff and its data and while doing so * allocate additional space. * - * This is used when the caller wishes to modify the data and needs a + * This is used when the caller wishes to modify the data and needs a * private copy of the data to alter as well as more space for new fields. * Returns %NULL on failure or the pointer to the buffer * on success. The returned buffer has a reference count of 1. @@ -704,34 +695,28 @@ * You must pass %GFP_ATOMIC as the allocation priority if this function * is called from an interrupt. */ - - struct sk_buff *skb_copy_expand(const struct sk_buff *skb, - int newheadroom, - int newtailroom, - int gfp_mask) + int newheadroom, int newtailroom, int gfp_mask) { - struct sk_buff *n; - /* * Allocate the copy buffer */ - - n=alloc_skb(newheadroom + skb->len + newtailroom, - gfp_mask); - if(n==NULL) + struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, + gfp_mask); + if (!n) return NULL; - skb_reserve(n,newheadroom); + skb_reserve(n, newheadroom); /* Set the tail pointer and length */ - skb_put(n,skb->len); + skb_put(n, skb->len); /* Copy the data only. */ if (skb_copy_bits(skb, 0, n->data, skb->len)) BUG(); copy_skb_header(n, skb); + return n; } @@ -746,7 +731,7 @@ int nfrags = skb_shinfo(skb)->nr_frags; int i; - for (i=0; ifrags[i].size; if (end > len) { if (skb_cloned(skb)) { @@ -759,7 +744,7 @@ put_page(skb_shinfo(skb)->frags[i].page); skb_shinfo(skb)->nr_frags--; } else { - skb_shinfo(skb)->frags[i].size = len-offset; + skb_shinfo(skb)->frags[i].size = len - offset; } } offset = end; @@ -767,17 +752,17 @@ if (offset < len) { skb->data_len -= skb->len - len; - skb->len = len; + skb->len = len; } else { if (len <= skb_headlen(skb)) { - skb->len = len; + skb->len = len; skb->data_len = 0; - skb->tail = skb->data + len; + skb->tail = skb->data + len; if (skb_shinfo(skb)->frag_list && !skb_cloned(skb)) skb_drop_fraglist(skb); } else { skb->data_len -= skb->len - len; - skb->len = len; + skb->len = len; } } @@ -785,7 +770,7 @@ } /** - * __pskb_pull_tail - advance tail of skb header + * __pskb_pull_tail - advance tail of skb header * @skb: buffer to reallocate * @delta: number of bytes to advance tail * @@ -809,18 +794,17 @@ * * It is pretty complicated. Luckily, it is called only in exceptional cases. */ -unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta) +unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) { - int i, k, eat; - /* If skb has not enough free space at tail, get new one * plus 128 bytes for future expansions. If we have enough * room at tail, reallocate without expansion only if skb is cloned. */ - eat = (skb->tail+delta) - skb->end; + int i, k, eat = (skb->tail + delta) - skb->end; if (eat > 0 || skb_cloned(skb)) { - if (pskb_expand_head(skb, 0, eat>0 ? eat+128 : 0, GFP_ATOMIC)) + if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, + GFP_ATOMIC)) return NULL; } @@ -830,12 +814,12 @@ /* Optimization: no fragments, no reasons to preestimate * size of pulled pages. Superb. */ - if (skb_shinfo(skb)->frag_list == NULL) + if (!skb_shinfo(skb)->frag_list) goto pull_pages; /* Estimate size of pulled pages. */ eat = delta; - for (i=0; inr_frags; i++) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { if (skb_shinfo(skb)->frags[i].size >= eat) goto pull_pages; eat -= skb_shinfo(skb)->frags[i].size; @@ -854,7 +838,7 @@ struct sk_buff *insp = NULL; do { - if (list == NULL) + if (!list) BUG(); if (list->len <= eat) { @@ -868,7 +852,7 @@ if (skb_shared(list)) { /* Sucks! We need to fork list. :-( */ clone = skb_clone(list, GFP_ATOMIC); - if (clone == NULL) + if (!clone) return NULL; insp = list->next; list = clone; @@ -877,7 +861,7 @@ * problems. */ insp = list; } - if (pskb_pull(list, eat) == NULL) { + if (!pskb_pull(list, eat)) { if (clone) kfree_skb(clone); return NULL; @@ -902,7 +886,7 @@ pull_pages: eat = delta; k = 0; - for (i=0; inr_frags; i++) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { if (skb_shinfo(skb)->frags[i].size <= eat) { put_page(skb_shinfo(skb)->frags[i].page); eat -= skb_shinfo(skb)->frags[i].size; @@ -918,7 +902,7 @@ } skb_shinfo(skb)->nr_frags = k; - skb->tail += delta; + skb->tail += delta; skb->data_len -= delta; return skb->tail; @@ -931,68 +915,70 @@ int i, copy; int start = skb->len - skb->data_len; - if (offset > (int)skb->len-len) + if (offset > (int)skb->len - len) goto fault; /* Copy header. */ - if ((copy = start-offset) > 0) { + if ((copy = start - offset) > 0) { if (copy > len) copy = len; memcpy(to, skb->data + offset, copy); if ((len -= copy) == 0) return 0; offset += copy; - to += copy; + to += copy; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; - BUG_TRAP(start <= offset+len); + BUG_TRAP(start <= offset + len); end = start + skb_shinfo(skb)->frags[i].size; - if ((copy = end-offset) > 0) { + if ((copy = end - offset) > 0) { u8 *vaddr; if (copy > len) copy = len; vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); - memcpy(to, vaddr+skb_shinfo(skb)->frags[i].page_offset+ - offset-start, copy); + memcpy(to, + vaddr + skb_shinfo(skb)->frags[i].page_offset+ + offset - start, copy); kunmap_skb_frag(vaddr); if ((len -= copy) == 0) return 0; offset += copy; - to += copy; + to += copy; } start = end; } if (skb_shinfo(skb)->frag_list) { - struct sk_buff *list; + struct sk_buff *list = skb_shinfo(skb)->frag_list; - for (list = skb_shinfo(skb)->frag_list; list; list=list->next) { + for (; list; list = list->next) { int end; - BUG_TRAP(start <= offset+len); + BUG_TRAP(start <= offset + len); end = start + list->len; - if ((copy = end-offset) > 0) { + if ((copy = end - offset) > 0) { if (copy > len) copy = len; - if (skb_copy_bits(list, offset-start, to, copy)) + if (skb_copy_bits(list, offset - start, + to, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; - to += copy; + to += copy; } start = end; } } - if (len == 0) + if (!len) return 0; fault: @@ -1001,30 +987,31 @@ /* Checksum skb data. */ -unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum) +unsigned int skb_checksum(const struct sk_buff *skb, int offset, + int len, unsigned int csum) { - int i, copy; int start = skb->len - skb->data_len; + int i, copy = start - offset; int pos = 0; /* Checksum header. */ - if ((copy = start-offset) > 0) { + if (copy > 0) { if (copy > len) copy = len; - csum = csum_partial(skb->data+offset, copy, csum); + csum = csum_partial(skb->data + offset, copy, csum); if ((len -= copy) == 0) return csum; offset += copy; - pos = copy; + pos = copy; } - for (i=0; inr_frags; i++) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; - BUG_TRAP(start <= offset+len); + BUG_TRAP(start <= offset + len); end = start + skb_shinfo(skb)->frags[i].size; - if ((copy = end-offset) > 0) { + if ((copy = end - offset) > 0) { unsigned int csum2; u8 *vaddr; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -1033,74 +1020,76 @@ copy = len; vaddr = kmap_skb_frag(frag); csum2 = csum_partial(vaddr + frag->page_offset + - offset-start, copy, 0); + offset - start, copy, 0); kunmap_skb_frag(vaddr); csum = csum_block_add(csum, csum2, pos); if (!(len -= copy)) return csum; offset += copy; - pos += copy; + pos += copy; } start = end; } if (skb_shinfo(skb)->frag_list) { - struct sk_buff *list; + struct sk_buff *list = skb_shinfo(skb)->frag_list; - for (list = skb_shinfo(skb)->frag_list; list; list=list->next) { + for (; list; list = list->next) { int end; - BUG_TRAP(start <= offset+len); + BUG_TRAP(start <= offset + len); end = start + list->len; - if ((copy = end-offset) > 0) { + if ((copy = end - offset) > 0) { unsigned int csum2; if (copy > len) copy = len; - csum2 = skb_checksum(list, offset-start, copy, 0); + csum2 = skb_checksum(list, offset - start, + copy, 0); csum = csum_block_add(csum, csum2, pos); if ((len -= copy) == 0) return csum; offset += copy; - pos += copy; + pos += copy; } start = end; } } - if (len == 0) - return csum; + if (len) + BUG(); - BUG(); return csum; } /* Both of above in one bottle. */ -unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum) +unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, + u8 *to, int len, unsigned int csum) { - int i, copy; int start = skb->len - skb->data_len; + int i, copy = start - offset; int pos = 0; /* Copy header. */ - if ((copy = start-offset) > 0) { + if (copy > 0) { if (copy > len) copy = len; - csum = csum_partial_copy_nocheck(skb->data+offset, to, copy, csum); + csum = csum_partial_copy_nocheck(skb->data + offset, to, + copy, csum); if ((len -= copy) == 0) return csum; offset += copy; - to += copy; - pos = copy; + to += copy; + pos = copy; } - for (i=0; inr_frags; i++) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; - BUG_TRAP(start <= offset+len); + BUG_TRAP(start <= offset + len); end = start + skb_shinfo(skb)->frags[i].size; - if ((copy = end-offset) > 0) { + if ((copy = end - offset) > 0) { unsigned int csum2; u8 *vaddr; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -1108,47 +1097,49 @@ if (copy > len) copy = len; vaddr = kmap_skb_frag(frag); - csum2 = csum_partial_copy_nocheck(vaddr + frag->page_offset + - offset-start, to, copy, 0); + csum2 = csum_partial_copy_nocheck(vaddr + + frag->page_offset + + offset - start, to, + copy, 0); kunmap_skb_frag(vaddr); csum = csum_block_add(csum, csum2, pos); if (!(len -= copy)) return csum; offset += copy; - to += copy; - pos += copy; + to += copy; + pos += copy; } start = end; } if (skb_shinfo(skb)->frag_list) { - struct sk_buff *list; + struct sk_buff *list = skb_shinfo(skb)->frag_list; - for (list = skb_shinfo(skb)->frag_list; list; list=list->next) { + for (; list; list = list->next) { unsigned int csum2; int end; - BUG_TRAP(start <= offset+len); + BUG_TRAP(start <= offset + len); end = start + list->len; - if ((copy = end-offset) > 0) { + if ((copy = end - offset) > 0) { if (copy > len) copy = len; - csum2 = skb_copy_and_csum_bits(list, offset-start, to, copy, 0); + csum2 = skb_copy_and_csum_bits(list, + offset - start, + to, copy, 0); csum = csum_block_add(csum, csum2, pos); if ((len -= copy) == 0) return csum; offset += copy; - to += copy; - pos += copy; + to += copy; + pos += copy; } start = end; } } - if (len == 0) - return csum; - - BUG(); + if (len) + BUG(); return csum; } @@ -1169,8 +1160,8 @@ csum = 0; if (csstart != skb->len) - csum = skb_copy_and_csum_bits(skb, csstart, to+csstart, - skb->len-csstart, 0); + csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, + skb->len - csstart, 0); if (skb->ip_summed == CHECKSUM_HW) { long csstuff = csstart + skb->csum; @@ -1180,7 +1171,7 @@ } #if 0 -/* +/* * Tune the memory allocator for a new MTU size. */ void skb_add_mtu(int mtu) @@ -1204,6 +1195,6 @@ if (!skbuff_head_cache) panic("cannot create skbuff cache"); - for (i=0; ilock); table_base = (void *)table->private->entries + TABLE_OFFSET(table->private, - cpu_number_map(smp_processor_id())); + smp_processor_id()); e = get_entry(table_base, table->private->hook_entry[hook]); back = get_entry(table_base, table->private->underflow[hook]); @@ -705,7 +705,7 @@ } /* And one copy for every other CPU */ - for (i = 1; i < smp_num_cpus; i++) { + for (i = 1; i < NR_CPUS; i++) { memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, newinfo->entries, SMP_ALIGN(newinfo->size)); @@ -756,7 +756,7 @@ unsigned int cpu; unsigned int i; - for (cpu = 0; cpu < smp_num_cpus; cpu++) { + for (cpu = 0; cpu < NR_CPUS; cpu++) { i = 0; ARPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), t->size, @@ -874,7 +874,7 @@ return -ENOMEM; newinfo = vmalloc(sizeof(struct arpt_table_info) - + SMP_ALIGN(tmp.size) * smp_num_cpus); + + SMP_ALIGN(tmp.size) * NR_CPUS); if (!newinfo) return -ENOMEM; @@ -1143,7 +1143,7 @@ MOD_INC_USE_COUNT; newinfo = vmalloc(sizeof(struct arpt_table_info) - + SMP_ALIGN(table->table->size) * smp_num_cpus); + + SMP_ALIGN(table->table->size) * NR_CPUS); if (!newinfo) { ret = -ENOMEM; MOD_DEC_USE_COUNT; diff -Nru a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c --- a/net/ipv4/netfilter/ip_tables.c Tue Jun 18 19:12:02 2002 +++ b/net/ipv4/netfilter/ip_tables.c Tue Jun 18 19:12:02 2002 @@ -288,8 +288,7 @@ read_lock_bh(&table->lock); IP_NF_ASSERT(table->valid_hooks & (1 << hook)); table_base = (void *)table->private->entries - + TABLE_OFFSET(table->private, - cpu_number_map(smp_processor_id())); + + TABLE_OFFSET(table->private, smp_processor_id()); e = get_entry(table_base, table->private->hook_entry[hook]); #ifdef CONFIG_NETFILTER_DEBUG @@ -865,7 +864,7 @@ } /* And one copy for every other CPU */ - for (i = 1; i < smp_num_cpus; i++) { + for (i = 1; i < NR_CPUS; i++) { memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, newinfo->entries, SMP_ALIGN(newinfo->size)); @@ -887,7 +886,7 @@ struct ipt_entry *table_base; unsigned int i; - for (i = 0; i < smp_num_cpus; i++) { + for (i = 0; i < NR_CPUS; i++) { table_base = (void *)newinfo->entries + TABLE_OFFSET(newinfo, i); @@ -934,7 +933,7 @@ unsigned int cpu; unsigned int i; - for (cpu = 0; cpu < smp_num_cpus; cpu++) { + for (cpu = 0; cpu < NR_CPUS; cpu++) { i = 0; IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), t->size, @@ -1072,7 +1071,7 @@ return -ENOMEM; newinfo = vmalloc(sizeof(struct ipt_table_info) - + SMP_ALIGN(tmp.size) * smp_num_cpus); + + SMP_ALIGN(tmp.size) * NR_CPUS); if (!newinfo) return -ENOMEM; @@ -1385,7 +1384,7 @@ MOD_INC_USE_COUNT; newinfo = vmalloc(sizeof(struct ipt_table_info) - + SMP_ALIGN(table->table->size) * smp_num_cpus); + + SMP_ALIGN(table->table->size) * NR_CPUS); if (!newinfo) { ret = -ENOMEM; MOD_DEC_USE_COUNT; diff -Nru a/net/ipv4/netfilter/ipchains_core.c b/net/ipv4/netfilter/ipchains_core.c --- a/net/ipv4/netfilter/ipchains_core.c Tue Jun 18 19:12:02 2002 +++ b/net/ipv4/netfilter/ipchains_core.c Tue Jun 18 19:12:02 2002 @@ -125,8 +125,8 @@ * UP. * * For backchains and counters, we use an array, indexed by - * [cpu_number_map[smp_processor_id()]*2 + !in_interrupt()]; the array is of - * size [smp_num_cpus*2]. For v2.0, smp_num_cpus is effectively 1. So, + * [smp_processor_id()*2 + !in_interrupt()]; the array is of + * size [NR_CPUS*2]. For v2.0, NR_CPUS is effectively 1. So, * confident of uniqueness, we modify counters even though we only * have a read lock (to read the counters, you need a write lock, * though). */ @@ -151,11 +151,11 @@ #endif #ifdef CONFIG_SMP -#define SLOT_NUMBER() (cpu_number_map(smp_processor_id())*2 + !in_interrupt()) +#define SLOT_NUMBER() (smp_processor_id()*2 + !in_interrupt()) #else /* !SMP */ #define SLOT_NUMBER() (!in_interrupt()) #endif /* CONFIG_SMP */ -#define NUM_SLOTS (smp_num_cpus*2) +#define NUM_SLOTS (NR_CPUS*2) #define SIZEOF_STRUCT_IP_CHAIN (sizeof(struct ip_chain) \ + NUM_SLOTS*sizeof(struct ip_reent)) @@ -1122,7 +1122,7 @@ label->chain = NULL; label->refcount = ref; label->policy = policy; - for (i = 0; i < smp_num_cpus*2; i++) { + for (i = 0; i < NUM_SLOTS; i++) { label->reent[i].counters.pcnt = label->reent[i].counters.bcnt = 0; label->reent[i].prevchain = NULL; diff -Nru a/net/ipv4/proc.c b/net/ipv4/proc.c --- a/net/ipv4/proc.c Tue Jun 18 19:12:01 2002 +++ b/net/ipv4/proc.c Tue Jun 18 19:12:01 2002 @@ -55,8 +55,8 @@ int res = 0; int cpu; - for (cpu=0; cpustats[cpu_logical_map(cpu)].inuse; + for (cpu=0; cpustats[cpu].inuse; return res; } @@ -103,9 +103,9 @@ sz /= sizeof(unsigned long); - for (i=0; i 0) { - u32 *dst = (u32*)buffer; - u32 *src = (u32*)(((u8*)ip_rt_acct) + offset); - - memcpy(dst, src, length); - -#ifdef CONFIG_SMP - if (smp_num_cpus > 1 || cpu_logical_map(0) != 0) { - int i; - int cnt = length / 4; - for (i = 0; i < smp_num_cpus; i++) { - int cpu = cpu_logical_map(i); - int k; - - if (cpu == 0) - continue; - - src = (u32*)(((u8*)ip_rt_acct) + offset + - cpu * 256 * sizeof(struct ip_rt_acct)); + /* Copy first cpu. */ + *start = buffer; + memcpy(buffer, IP_RT_ACCT_CPU(0), length); - for (k = 0; k < cnt; k++) - dst[k] += src[k]; - } - } -#endif - return length; + /* Add the other cpus in, one int at a time */ + for (i = 1; i < NR_CPUS; i++) { + unsigned int j; + if (!cpu_online(i)) + continue; + for (j = 0; j < length/4; j++) + ((u32*)buffer)[j] += ((u32*)IP_RT_ACCT_CPU(i))[j]; } - return 0; + return length; } #endif diff -Nru a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c --- a/net/ipv6/netfilter/ip6_tables.c Tue Jun 18 19:12:03 2002 +++ b/net/ipv6/netfilter/ip6_tables.c Tue Jun 18 19:12:03 2002 @@ -336,8 +336,7 @@ read_lock_bh(&table->lock); IP_NF_ASSERT(table->valid_hooks & (1 << hook)); table_base = (void *)table->private->entries - + TABLE_OFFSET(table->private, - cpu_number_map(smp_processor_id())); + + TABLE_OFFSET(table->private, smp_processor_id()); e = get_entry(table_base, table->private->hook_entry[hook]); #ifdef CONFIG_NETFILTER_DEBUG @@ -913,7 +912,7 @@ } /* And one copy for every other CPU */ - for (i = 1; i < smp_num_cpus; i++) { + for (i = 1; i < NR_CPUS; i++) { memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, newinfo->entries, SMP_ALIGN(newinfo->size)); @@ -935,7 +934,7 @@ struct ip6t_entry *table_base; unsigned int i; - for (i = 0; i < smp_num_cpus; i++) { + for (i = 0; i < NR_CPUS; i++) { table_base = (void *)newinfo->entries + TABLE_OFFSET(newinfo, i); @@ -982,7 +981,7 @@ unsigned int cpu; unsigned int i; - for (cpu = 0; cpu < smp_num_cpus; cpu++) { + for (cpu = 0; cpu < NR_CPUS; cpu++) { i = 0; IP6T_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), t->size, @@ -1116,7 +1115,7 @@ return -ENOMEM; newinfo = vmalloc(sizeof(struct ip6t_table_info) - + SMP_ALIGN(tmp.size) * smp_num_cpus); + + SMP_ALIGN(tmp.size) * NR_CPUS); if (!newinfo) return -ENOMEM; @@ -1429,7 +1428,7 @@ MOD_INC_USE_COUNT; newinfo = vmalloc(sizeof(struct ip6t_table_info) - + SMP_ALIGN(table->table->size) * smp_num_cpus); + + SMP_ALIGN(table->table->size) * NR_CPUS); if (!newinfo) { ret = -ENOMEM; MOD_DEC_USE_COUNT; diff -Nru a/net/ipv6/proc.c b/net/ipv6/proc.c --- a/net/ipv6/proc.c Tue Jun 18 19:12:02 2002 +++ b/net/ipv6/proc.c Tue Jun 18 19:12:02 2002 @@ -31,8 +31,8 @@ int res = 0; int cpu; - for (cpu=0; cpustats[cpu_logical_map(cpu)].inuse; + for (cpu=0; cpustats[cpu].inuse; return res; } @@ -140,9 +140,9 @@ unsigned long res = 0; int i; - for (i=0; ipinet6 = &newtcp6sk->inet6; + newinet = inet_sk(newsk); newnp = inet6_sk(newsk); newtp = tcp_sk(newsk); + memcpy(newnp, np, sizeof(struct ipv6_pinfo)); + ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF), newinet->daddr); @@ -1336,9 +1342,15 @@ ip6_dst_store(newsk, dst, NULL); sk->route_caps = dst->dev->features&~NETIF_F_IP_CSUM; + newtcp6sk = (struct tcp6_sock *)newsk; + newtcp6sk->pinet6 = &newtcp6sk->inet6; + newtp = tcp_sk(newsk); newinet = inet_sk(newsk); newnp = inet6_sk(newsk); + + memcpy(newnp, np, sizeof(struct ipv6_pinfo)); + ipv6_addr_copy(&newnp->daddr, &req->af.v6_req.rmt_addr); ipv6_addr_copy(&newnp->saddr, &req->af.v6_req.loc_addr); ipv6_addr_copy(&newnp->rcv_saddr, &req->af.v6_req.loc_addr); diff -Nru a/net/llc/Makefile b/net/llc/Makefile --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/Makefile Tue Jun 18 19:12:03 2002 @@ -0,0 +1,38 @@ +########################################################################### +# Makefile for the Linux 802.2 LLC (fully-functional) layer. +# +# Note 1! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definition is now in the main makefile... +# +# Copyright (c) 1997 by Procom Technology,Inc. +# 2001 by Arnaldo Carvalho de Melo +# +# This program can be redistributed or modified under the terms of the +# GNU General Public License as published by the Free Software Foundation. +# This program is distributed without any warranty or implied warranty +# of merchantability or fitness for a particular purpose. +# +# See the GNU General Public License for more details. +########################################################################### + +O_TARGET := llc.o + +obj-y := llc_if.o llc_c_ev.o llc_c_ac.o llc_mac.o llc_sap.o llc_s_st.o \ + llc_main.o llc_s_ac.o llc_conn.o llc_c_st.o llc_stat.o llc_actn.o \ + llc_s_ev.o llc_evnt.o llc_pdu.o + +ifeq ($(CONFIG_LLC_UI),y) + obj-y += llc_sock.o +endif + +# Objects that export symbols. +export-objs := llc_if.o + +ifeq ($(CONFIG_LLC),m) + obj-m += $(O_TARGET) +endif + +include $(TOPDIR)/Rules.make diff -Nru a/net/llc/llc_actn.c b/net/llc/llc_actn.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_actn.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,150 @@ +/* + * llc_actn.c - Implementation of actions of station component of LLC + * + * Description : + * Functions in this module are implementation of station component actions. + * Details of actions can be found in IEEE-802.2 standard document. + * All functions have one station and one event as input argument. All of + * them return 0 On success and 1 otherwise. + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include + +static void llc_station_ack_tmr_callback(unsigned long timeout_data); + +int llc_station_ac_start_ack_timer(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + del_timer(&station->ack_timer); + station->ack_timer.expires = jiffies + LLC_ACK_TIME * HZ; + station->ack_timer.data = (unsigned long)station; + station->ack_timer.function = llc_station_ack_tmr_callback; + add_timer(&station->ack_timer); + station->ack_tmr_running = 1; + return 0; +} + +int llc_station_ac_set_retry_cnt_0(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + station->retry_count = 0; + return 0; +} + +int llc_station_ac_inc_retry_cnt_by_1(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + station->retry_count++; + return 0; +} + +int llc_station_ac_set_xid_r_cnt_0(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + station->xid_r_count = 0; + return 0; +} + +int llc_station_ac_inc_xid_r_cnt_by_1(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + station->xid_r_count++; + return 0; +} + +int llc_station_ac_send_null_dsap_xid_c(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (!skb) + goto out; + rc = 0; + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, 0, 0, LLC_PDU_CMD); + llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 127); + lan_hdrs_init(skb, station->mac_sa, station->mac_sa); + llc_station_send_pdu(station, skb); +out: + return rc; +} + +int llc_station_ac_send_xid_r(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + u8 mac_da[ETH_ALEN], dsap; + int rc = 1; + struct sk_buff *ev_skb; + struct sk_buff* skb = llc_alloc_frame(); + + if (!skb) + goto out; + rc = 0; + ev_skb = ev->data.pdu.skb; + skb->dev = ev_skb->dev; + llc_pdu_decode_sa(ev_skb, mac_da); + llc_pdu_decode_ssap(ev_skb, &dsap); + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP); + llc_pdu_init_as_xid_rsp(skb, LLC_XID_NULL_CLASS_2, 127); + lan_hdrs_init(skb, station->mac_sa, mac_da); + llc_station_send_pdu(station, skb); +out: + return rc; +} + +int llc_station_ac_send_test_r(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + u8 mac_da[ETH_ALEN], dsap; + int rc = 1; + struct sk_buff *ev_skb; + struct sk_buff *skb = llc_alloc_frame(); + + if (!skb) + goto out; + rc = 0; + ev_skb = ev->data.pdu.skb; + skb->dev = ev_skb->dev; + llc_pdu_decode_sa(ev_skb, mac_da); + llc_pdu_decode_ssap(ev_skb, &dsap); + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP); + llc_pdu_init_as_test_rsp(skb, ev_skb); + lan_hdrs_init(skb, station->mac_sa, mac_da); + llc_station_send_pdu(station, skb); +out: + return rc; +} + +int llc_station_ac_report_status(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + return 0; +} + +static void llc_station_ack_tmr_callback(unsigned long timeout_data) +{ + struct llc_station *station = (struct llc_station *)timeout_data; + struct llc_station_state_ev *ev; + + station->ack_tmr_running = 0; + ev = llc_station_alloc_ev(station); + if (ev) { + ev->type = LLC_STATION_EV_TYPE_ACK_TMR; + ev->data.tmr.timer_specific = NULL; + llc_station_send_ev(station, ev); + } +} diff -Nru a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_c_ac.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,1648 @@ +/* + * llc_c_ac.c - actions performed during connection state transition. + * + * Description: + * Functions in this module are implementation of connection component actions + * Details of actions can be found in IEEE-802.2 standard document. + * All functions have one connection and one event as input argument. All of + * them return 0 On success and 1 otherwise. + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data); +static void llc_conn_ack_tmr_cb(unsigned long timeout_data); +static void llc_conn_rej_tmr_cb(unsigned long timeout_data); +static void llc_conn_busy_tmr_cb(unsigned long timeout_data); +static int llc_conn_ac_inc_vs_by_1(struct sock *sk, + struct llc_conn_state_ev *ev); +static void llc_process_tmr_ev(struct sock *sk, struct llc_conn_state_ev *ev); +static int llc_conn_ac_data_confirm(struct sock *sk, + struct llc_conn_state_ev *ev); + +#define INCORRECT 0 + +int llc_conn_ac_clear_remote_busy(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + if (llc->remote_busy_flag) { + u8 nr; + llc_pdu_sn_t *rx_pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + llc->remote_busy_flag = 0; + del_timer(&llc->busy_state_timer.timer); + llc->busy_state_timer.running = 0; + nr = LLC_I_GET_NR(rx_pdu); + llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); + } + return 0; +} + +int llc_conn_ac_conn_ind(struct sock *sk, struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = ev->data.pdu.skb; + union llc_u_prim_data *prim_data = llc_ind_prim.data; + struct llc_prim_if_block *prim = &llc_ind_prim; + struct llc_sap *sap; + struct llc_opt *llc = llc_sk(sk); + + llc_pdu_decode_dsap(skb, &prim_data->conn.daddr.lsap); + sap = llc_sap_find(prim_data->conn.daddr.lsap); + if (sap) { + llc_pdu_decode_sa(skb, llc->daddr.mac); + llc_pdu_decode_da(skb, llc->laddr.mac); + llc->dev = skb->dev; + prim_data->conn.pri = 0; + prim_data->conn.sk = sk; + prim_data->conn.dev = skb->dev; + memcpy(&prim_data->conn.daddr, &llc->laddr, sizeof(llc->laddr)); + memcpy(&prim_data->conn.saddr, &llc->daddr, sizeof(llc->daddr)); + prim->data = prim_data; + prim->prim = LLC_CONN_PRIM; + prim->sap = llc->sap; + ev->flag = 1; + ev->ind_prim = prim; + rc = 0; + } + return rc; +} + +int llc_conn_ac_conn_confirm(struct sock *sk, struct llc_conn_state_ev *ev) +{ + union llc_u_prim_data *prim_data = llc_cfm_prim.data; + struct sk_buff *skb = ev->data.pdu.skb; + /* FIXME: wtf, this is global, so the whole thing is really + * non reentrant... + */ + struct llc_prim_if_block *prim = &llc_cfm_prim; + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + + prim_data->conn.sk = sk; + prim_data->conn.pri = 0; + prim_data->conn.status = ev->status; + prim_data->conn.link = llc->link; + if (skb) + prim_data->conn.dev = skb->dev; + else + printk(KERN_ERR __FUNCTION__ "ev->data.pdu.skb == NULL\n"); + prim->data = prim_data; + prim->prim = LLC_CONN_PRIM; + prim->sap = sap; + ev->flag = 1; + ev->cfm_prim = prim; + return 0; +} + +static int llc_conn_ac_data_confirm(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + struct llc_prim_if_block *prim = &llc_cfm_prim; + union llc_u_prim_data *prim_data = llc_cfm_prim.data; + + prim_data->data.sk = sk; + prim_data->data.pri = 0; + prim_data->data.link = llc_sk(sk)->link; + prim_data->data.status = LLC_STATUS_RECEIVED; + prim_data->data.skb = NULL; + prim->data = prim_data; + prim->prim = LLC_DATA_PRIM; + prim->sap = llc_sk(sk)->sap; + ev->flag = 1; + ev->cfm_prim = prim; + return 0; +} + +int llc_conn_ac_data_ind(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_conn_rtn_pdu(sk, ev->data.pdu.skb, ev); + return 0; +} + +int llc_conn_ac_disc_ind(struct sock *sk, struct llc_conn_state_ev *ev) +{ + u8 reason = 0; + int rc = 1; + union llc_u_prim_data *prim_data = llc_ind_prim.data; + struct llc_prim_if_block *prim = &llc_ind_prim; + + if (ev->type == LLC_CONN_EV_TYPE_PDU) { + llc_pdu_un_t *rx_pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + if (!LLC_PDU_IS_RSP(rx_pdu) && + !LLC_PDU_TYPE_IS_U(rx_pdu) && + LLC_U_PDU_RSP(rx_pdu) == LLC_2_PDU_RSP_DM) { + reason = LLC_DISC_REASON_RX_DM_RSP_PDU; + rc = 0; + } else if (!LLC_PDU_IS_CMD(rx_pdu) && + !LLC_PDU_TYPE_IS_U(rx_pdu) && + LLC_U_PDU_CMD(rx_pdu) == LLC_2_PDU_CMD_DISC) { + reason = LLC_DISC_REASON_RX_DISC_CMD_PDU; + rc = 0; + } + } else if (ev->type == LLC_CONN_EV_TYPE_ACK_TMR) { + reason = LLC_DISC_REASON_ACK_TMR_EXP; + rc = 0; + } else { + reason = 0; + rc = 1; + } + if (!rc) { + prim_data->disc.sk = sk; + prim_data->disc.reason = reason; + prim_data->disc.link = llc_sk(sk)->link; + prim->data = prim_data; + prim->prim = LLC_DISC_PRIM; + prim->sap = llc_sk(sk)->sap; + ev->flag = 1; + ev->ind_prim = prim; + } + return rc; +} + +int llc_conn_ac_disc_confirm(struct sock *sk, struct llc_conn_state_ev *ev) +{ + union llc_u_prim_data *prim_data = llc_cfm_prim.data; + struct llc_prim_if_block *prim = &llc_cfm_prim; + + prim_data->disc.sk = sk; + prim_data->disc.reason = ev->status; + prim_data->disc.link = llc_sk(sk)->link; + prim->data = prim_data; + prim->prim = LLC_DISC_PRIM; + prim->sap = llc_sk(sk)->sap; + ev->flag = 1; + ev->cfm_prim = prim; + return 0; +} + +int llc_conn_ac_rst_ind(struct sock *sk, struct llc_conn_state_ev *ev) +{ + u8 reason = 0; + int rc = 1; + llc_pdu_un_t *rx_pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + union llc_u_prim_data *prim_data = llc_ind_prim.data; + struct llc_prim_if_block *prim = &llc_ind_prim; + struct llc_opt *llc = llc_sk(sk); + + switch (ev->type) { + case LLC_CONN_EV_TYPE_PDU: + if (!LLC_PDU_IS_RSP(rx_pdu) && + !LLC_PDU_TYPE_IS_U(rx_pdu) && + LLC_U_PDU_RSP(rx_pdu) == LLC_2_PDU_RSP_FRMR) { + reason = LLC_RESET_REASON_LOCAL; + rc = 0; + } else if (!LLC_PDU_IS_CMD(rx_pdu) && + !LLC_PDU_TYPE_IS_U(rx_pdu) && + LLC_U_PDU_CMD(rx_pdu) == + LLC_2_PDU_CMD_SABME) { + reason = LLC_RESET_REASON_REMOTE; + rc = 0; + } else { + reason = 0; + rc = 1; + } + break; + case LLC_CONN_EV_TYPE_ACK_TMR: + case LLC_CONN_EV_TYPE_P_TMR: + case LLC_CONN_EV_TYPE_REJ_TMR: + case LLC_CONN_EV_TYPE_BUSY_TMR: + if (llc->retry_count > llc->n2) { + reason = LLC_RESET_REASON_LOCAL; + rc = 0; + } else + rc = 1; + break; + } + if (!rc) { + prim_data->res.sk = sk; + prim_data->res.reason = reason; + prim_data->res.link = llc->link; + prim->data = prim_data; + prim->prim = LLC_RESET_PRIM; + prim->sap = llc->sap; + ev->flag = 1; + ev->ind_prim = prim; + } + return rc; +} + +int llc_conn_ac_rst_confirm(struct sock *sk, struct llc_conn_state_ev *ev) +{ + union llc_u_prim_data *prim_data = llc_cfm_prim.data; + struct llc_prim_if_block *prim = &llc_cfm_prim; + + prim_data->res.sk = sk; + prim_data->res.link = llc_sk(sk)->link; + prim->data = prim_data; + prim->prim = LLC_RESET_PRIM; + prim->sap = llc_sk(sk)->sap; + ev->flag = 1; + ev->cfm_prim = prim; + return 0; +} + +int llc_conn_ac_report_status(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return 0; +} + +int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *rx_pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + if (!LLC_PDU_IS_RSP(rx_pdu) && + !LLC_PDU_TYPE_IS_I(rx_pdu) && + !LLC_I_PF_IS_1(rx_pdu) && llc_sk(sk)->ack_pf) + llc_conn_ac_clear_remote_busy(sk, ev); + return 0; +} + +int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + if (llc_sk(sk)->data_flag == 2) { + del_timer(&llc_sk(sk)->rej_sent_timer.timer); + llc_sk(sk)->rej_sent_timer.running = 0; + } + return 0; +} + +int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + u8 p_bit = 1; + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_CMD); + llc_pdu_init_as_disc_cmd(skb, p_bit); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + llc_conn_ac_set_p_flag_1(sk, ev); + return rc; +} + +int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + struct sk_buff *rx_skb = ev->data.pdu.skb; + u8 f_bit; + + skb->dev = llc->dev; + llc_pdu_decode_pf_bit(rx_skb, &f_bit); + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_dm_rsp(skb, f_bit); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + u8 f_bit = 1; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_dm_rsp(skb, f_bit); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_dm_rsp_f_set_f_flag(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + u8 f_bit = llc->f_flag; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_dm_rsp(skb, f_bit); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u8 f_bit; + int rc = 1; + struct sk_buff *skb, *ev_skb = ev->data.pdu.skb; + llc_pdu_sn_t *rx_pdu = (llc_pdu_sn_t *)ev_skb->nh.raw; + struct llc_opt *llc = llc_sk(sk); + + llc->rx_pdu_hdr = (u32)*((u32 *)rx_pdu); + if (!LLC_PDU_IS_CMD(rx_pdu)) + llc_pdu_decode_pf_bit(ev_skb, &f_bit); + else + f_bit = 0; + skb = llc_alloc_frame(); + if (skb) { + struct llc_sap *sap = llc->sap; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_frmr_rsp(skb, rx_pdu, f_bit, llc->vS, + llc->vR, INCORRECT); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + u8 f_bit = 0; + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + llc_pdu_sn_t *rx_pdu = (llc_pdu_sn_t *)&llc->rx_pdu_hdr; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_frmr_rsp(skb, rx_pdu, f_bit, llc->vS, + llc->vR, INCORRECT); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u8 f_bit; + int rc = 1; + struct sk_buff *skb; + + llc_pdu_decode_pf_bit(ev->data.pdu.skb, &f_bit); + skb = llc_alloc_frame(); + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + llc_pdu_sn_t *rx_pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_frmr_rsp(skb, rx_pdu, f_bit, llc->vS, + llc->vR, INCORRECT); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u8 p_bit = 1; + struct sk_buff *skb = ev->data.prim.data->data->data.skb; + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + + llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_CMD); + llc_pdu_init_as_i_cmd(skb, p_bit, llc->vS, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + llc_conn_send_pdu(sk, skb); + llc_conn_ac_inc_vs_by_1(sk, ev); + return 0; +} + +int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u8 p_bit = 0; + struct sk_buff *skb = ev->data.prim.data->data->data.skb; + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + + llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_CMD); + llc_pdu_init_as_i_cmd(skb, p_bit, llc->vS, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + llc_conn_send_pdu(sk, skb); + llc_conn_ac_inc_vs_by_1(sk, ev); + return 0; +} + +int llc_conn_ac_resend_i_cmd_p_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *rx_pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + u8 nr = LLC_I_GET_NR(rx_pdu); + + llc_conn_resend_i_pdu_as_cmd(sk, nr, 1); + return 0; +} + +int llc_conn_ac_resend_i_cmd_p_set_1_or_send_rr(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *rx_pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + u8 nr = LLC_I_GET_NR(rx_pdu); + int rc = llc_conn_ac_send_rr_cmd_p_set_1(sk, ev); + + if (!rc) + llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); + return rc; +} + +int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u8 p_bit = 0; + struct sk_buff *skb = ev->data.prim.data->data->data.skb; + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + + llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_CMD); + llc_pdu_init_as_i_cmd(skb, p_bit, llc->vS, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + llc_conn_send_pdu(sk, skb); + llc_conn_ac_inc_vs_by_1(sk, ev); + return 0; +} + +int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *rx_pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + u8 nr = LLC_I_GET_NR(rx_pdu); + + llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); + return 0; +} + +int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u8 nr; + u8 f_bit = 0; + llc_pdu_sn_t *rx_pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_rr_rsp(skb, f_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + if (rc) { + nr = LLC_I_GET_NR(rx_pdu); + rc = 0; + llc_conn_resend_i_pdu_as_cmd(sk, nr, f_bit); + } + return rc; +} + +int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *rx_pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + u8 nr = LLC_I_GET_NR(rx_pdu); + + llc_conn_resend_i_pdu_as_rsp(sk, nr, 1); + return 0; +} + +int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + u8 p_bit = 1; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_CMD); + llc_pdu_init_as_rej_cmd(skb, p_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + u8 f_bit = 1; + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_rej_rsp(skb, f_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + u8 f_bit = 0; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_rej_rsp(skb, f_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + u8 p_bit = 1; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_CMD); + llc_pdu_init_as_rnr_cmd(skb, p_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + u8 f_bit = 1; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_rnr_rsp(skb, f_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + u8 f_bit = 0; + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_rnr_rsp(skb, f_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_set_remote_busy(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + if (!llc->remote_busy_flag) { + llc->remote_busy_flag = 1; + llc->busy_state_timer.timer.expires = jiffies + + llc->busy_state_timer.expire * HZ; + llc->busy_state_timer.timer.data = (unsigned long)sk; + llc->busy_state_timer.timer.function = llc_conn_busy_tmr_cb; + add_timer(&llc->busy_state_timer.timer); + llc->busy_state_timer.running = 1; + } + return 0; +} + +int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + u8 f_bit = 0; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_rnr_rsp(skb, f_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + u8 p_bit = 1; + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_CMD); + llc_pdu_init_as_rr_cmd(skb, p_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_ack_cmd_p_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + u8 p_bit = 1; + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_CMD); + llc_pdu_init_as_rr_cmd(skb, p_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + u8 f_bit = 1; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_rr_rsp(skb, f_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + u8 f_bit = 1; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_rr_rsp(skb, f_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + u8 f_bit = 0; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_rr_rsp(skb, f_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + u8 f_bit = 0; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_rr_rsp(skb, f_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + struct llc_opt *llc = llc_sk(sk); + u8 p_bit = 1; + + if (skb) { + struct llc_sap *sap = llc->sap; + u8 *dmac = llc->daddr.mac; + + if (llc->dev->flags & IFF_LOOPBACK) + dmac = llc->dev->dev_addr; + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_CMD); + llc_pdu_init_as_sabme_cmd(skb, p_bit); + lan_hdrs_init(skb, llc->dev->dev_addr, dmac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + llc->p_flag = p_bit; + return rc; +} + +int llc_conn_ac_send_ua_rsp_f_set_f_flag(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + u8 f_bit = llc->f_flag; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_ua_rsp(skb, f_bit); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u8 f_bit; + int rc = 1; + struct sk_buff *rx_skb = ev->data.pdu.skb; + struct sk_buff *skb; + + llc_pdu_decode_pf_bit(rx_skb, &f_bit); + skb = llc_alloc_frame(); + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_ua_rsp(skb, f_bit); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +int llc_conn_ac_set_s_flag_0(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->s_flag = 0; + return 0; +} + +int llc_conn_ac_set_s_flag_1(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->s_flag = 1; + return 0; +} + +int llc_conn_ac_start_p_timer(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + llc->p_flag = 1; + del_timer(&llc->pf_cycle_timer.timer); + llc->pf_cycle_timer.timer.expires = jiffies + + llc->pf_cycle_timer.expire * HZ; + llc->pf_cycle_timer.timer.data = (unsigned long)sk; + llc->pf_cycle_timer.timer.function = llc_conn_pf_cycle_tmr_cb; + add_timer(&llc->pf_cycle_timer.timer); + llc->pf_cycle_timer.running = 1; + return 0; +} + +/** + * llc_conn_ac_send_ack_if_needed - check if ack is needed + * @sk: current connection structure + * @ev: current event + * + * Checks number of received PDUs which have not been acknowledged, yet, + * If number of them reaches to "npta"(Number of PDUs To Acknowledge) then + * sends an RR response as acknowledgement for them. Returns 0 for + * success, 1 otherwise. + */ +int llc_conn_ac_send_ack_if_needed(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u8 pf_bit; + struct sk_buff *skb = ev->data.pdu.skb; + struct llc_opt *llc = llc_sk(sk); + + llc_pdu_decode_pf_bit(skb, &pf_bit); + llc->ack_pf |= pf_bit & 1; + if (!llc->ack_must_be_send) { + llc->first_pdu_Ns = llc->vR; + llc->ack_must_be_send = 1; + llc->ack_pf = pf_bit & 1; + } + if (((llc->vR - llc->first_pdu_Ns + 129) % 128) >= llc->npta) { + llc_conn_ac_send_rr_rsp_f_set_ackpf(sk, ev); + llc->ack_must_be_send = 0; + llc->ack_pf = 0; + llc_conn_ac_inc_npta_value(sk, ev); + } + return 0; +} + +/** + * llc_conn_ac_rst_sendack_flag - resets ack_must_be_send flag + * @sk: current connection structure + * @ev: current event + * + * This action resets ack_must_be_send flag of given connection, this flag + * indicates if there is any PDU which has not been acknowledged yet. + * Returns 0 for success, 1 otherwise. + */ +int llc_conn_ac_rst_sendack_flag(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->ack_must_be_send = llc_sk(sk)->ack_pf = 0; + return 0; +} + +/** + * llc_conn_ac_send_i_rsp_f_set_ackpf - acknowledge received PDUs + * @sk: current connection structure + * @ev: current event + * + * Sends an I response PDU with f-bit set to ack_pf flag as acknowledge to + * all received PDUs which have not been acknowledged, yet. ack_pf flag is + * set to one if one PDU with p-bit set to one is received. Returns 0 for + * success, 1 otherwise. + */ +int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + struct sk_buff *skb = ev->data.prim.data->data->data.skb; + struct llc_opt *llc = llc_sk(sk); + u8 p_bit = llc->ack_pf; + struct llc_sap *sap = llc->sap; + + llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_i_cmd(skb, p_bit, llc->vS, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + llc_conn_send_pdu(sk, skb); + llc_conn_ac_inc_vs_by_1(sk, ev); + return 0; +} + +/** + * llc_conn_ac_send_i_as_ack - sends an I-format PDU to acknowledge rx PDUs + * @sk: current connection structure. + * @ev: current event. + * + * This action sends an I-format PDU as acknowledge to received PDUs which + * have not been acknowledged, yet, if there is any. By using of this + * action number of acknowledgements decreases, this technic is called + * piggy backing. Returns 0 for success, 1 otherwise. + */ +int llc_conn_ac_send_i_as_ack(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + if (llc->ack_must_be_send) { + llc_conn_ac_send_i_rsp_f_set_ackpf(sk, ev); + llc->ack_must_be_send = 0 ; + llc->ack_pf = 0; + } else + llc_conn_ac_send_i_cmd_p_set_0(sk, ev); + return 0; +} + +/** + * llc_conn_ac_send_rr_rsp_f_set_ackpf - ack all rx PDUs not yet acked + * @sk: current connection structure. + * @ev: current event. + * + * This action sends an RR response with f-bit set to ack_pf flag as + * acknowledge to all received PDUs which have not been acknowledged, yet, + * if there is any. ack_pf flag indicates if a PDU has been received with + * p-bit set to one. Returns 0 for success, 1 otherwise. + */ +int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct sk_buff *skb = llc_alloc_frame(); + + if (skb) { + struct llc_opt *llc = llc_sk(sk); + struct llc_sap *sap = llc->sap; + u8 f_bit = llc->ack_pf; + + skb->dev = llc->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_S, sap->laddr.lsap, + llc->daddr.lsap, LLC_PDU_RSP); + llc_pdu_init_as_rr_rsp(skb, f_bit, llc->vR); + lan_hdrs_init(skb, llc->dev->dev_addr, llc->daddr.mac); + rc = 0; + llc_conn_send_pdu(sk, skb); + } + return rc; +} + +/** + * llc_conn_ac_inc_npta_value - tries to make value of npta greater + * @sk: current connection structure. + * @ev: current event. + * + * After "inc_cntr" times calling of this action, "npta" increase by one. + * this action tries to make vale of "npta" greater as possible; number of + * acknowledgements decreases by increasing of "npta". Returns 0 for + * success, 1 otherwise. + */ +int llc_conn_ac_inc_npta_value(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + if (!llc->inc_cntr) { + llc->dec_step = 0; + llc->dec_cntr = llc->inc_cntr = 2; + ++llc->npta; + if (llc->npta > 127) + llc->npta = 127 ; + } else + --llc->inc_cntr; + return 0; +} + +/** + * llc_conn_ac_adjust_npta_by_rr - decreases "npta" by one + * @sk: current connection structure. + * @ev: current event. + * + * After receiving "dec_cntr" times RR command, this action decreases + * "npta" by one. Returns 0 for success, 1 otherwise. + */ +int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + if (!llc->connect_step && !llc->remote_busy_flag) { + if (!llc->dec_step) { + if (!llc->dec_cntr) { + llc->inc_cntr = llc->dec_cntr = 2; + if (llc->npta > 0) + llc->npta = llc->npta - 1; + } else + llc->dec_cntr -=1; + } + } else + llc->connect_step = 0 ; + return 0; +} + +/** + * llc_conn_ac_adjust_npta_by_rnr - decreases "npta" by one + * @sk: current connection structure. + * @ev: current event. + * + * After receiving "dec_cntr" times RNR command, this action decreases + * "npta" by one. Returns 0 for success, 1 otherwise. + */ +int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + if (llc->remote_busy_flag) + if (!llc->dec_step) { + if (!llc->dec_cntr) { + llc->inc_cntr = llc->dec_cntr = 2; + if (llc->npta > 0) + --llc->npta; + } else + --llc->dec_cntr; + } + return 0; +} + +/** + * llc_conn_ac_dec_tx_win_size - decreases tx window size + * @sk: current connection structure. + * @ev: current event. + * + * After receiving of a REJ command or response, transmit window size is + * decreased by number of PDUs which are outstanding yet. Returns 0 for + * success, 1 otherwise. + */ +int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + u8 unacked_pdu = skb_queue_len(&llc->pdu_unack_q); + + llc->k -= unacked_pdu; + if (llc->k < 2) + llc->k = 2; + return 0; +} + +/** + * llc_conn_ac_inc_tx_win_size - tx window size is inc by 1 + * @sk: current connection structure. + * @ev: current event. + * + * After receiving an RR response with f-bit set to one, transmit window + * size is increased by one. Returns 0 for success, 1 otherwise. + */ +int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + llc->k += 1; + if (llc->k > 128) + llc->k = 128 ; + return 0; +} + +int llc_conn_ac_stop_all_timers(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + del_timer(&llc->pf_cycle_timer.timer); + llc->pf_cycle_timer.running = 0; + del_timer(&llc->ack_timer.timer); + llc->ack_timer.running = 0; + del_timer(&llc->rej_sent_timer.timer); + llc->rej_sent_timer.running = 0; + del_timer(&llc->busy_state_timer.timer); + llc->busy_state_timer.running = 0; + llc->ack_must_be_send = 0; + llc->ack_pf = 0; + return 0; +} + +int llc_conn_ac_stop_other_timers(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + del_timer(&llc->rej_sent_timer.timer); + llc->rej_sent_timer.running = 0; + del_timer(&llc->pf_cycle_timer.timer); + llc->pf_cycle_timer.running = 0; + del_timer(&llc->busy_state_timer.timer); + llc->busy_state_timer.running = 0; + llc->ack_must_be_send = 0; + llc->ack_pf = 0; + return 0; +} + +int llc_conn_ac_start_ack_timer(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + del_timer(&llc->ack_timer.timer); + llc->ack_timer.timer.expires = jiffies + llc->ack_timer.expire * HZ; + llc->ack_timer.timer.data = (unsigned long)sk; + llc->ack_timer.timer.function = llc_conn_ack_tmr_cb; + add_timer(&llc->ack_timer.timer); + llc->ack_timer.running = 1; + return 0; +} + +int llc_conn_ac_start_rej_timer(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + del_timer(&llc->rej_sent_timer.timer); + llc->rej_sent_timer.timer.expires = jiffies + + llc->rej_sent_timer.expire * HZ; + llc->rej_sent_timer.timer.data = (unsigned long)sk; + llc->rej_sent_timer.timer.function = llc_conn_rej_tmr_cb; + add_timer(&llc->rej_sent_timer.timer); + llc->rej_sent_timer.running = 1; + return 0; +} + +int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + if (!llc->ack_timer.running) { + llc->ack_timer.timer.expires = jiffies + + llc->ack_timer.expire * HZ; + llc->ack_timer.timer.data = (unsigned long)sk; + llc->ack_timer.timer.function = llc_conn_ack_tmr_cb; + add_timer(&llc->ack_timer.timer); + llc->ack_timer.running = 1; + } + return 0; +} + +int llc_conn_ac_stop_ack_timer(struct sock *sk, struct llc_conn_state_ev *ev) +{ + del_timer(&llc_sk(sk)->ack_timer.timer); + llc_sk(sk)->ack_timer.running = 0; + return 0; +} + +int llc_conn_ac_stop_p_timer(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_opt *llc = llc_sk(sk); + + del_timer(&llc->pf_cycle_timer.timer); + llc->pf_cycle_timer.running = 0; + llc->p_flag = 0; + return 0; +} + +int llc_conn_ac_stop_rej_timer(struct sock *sk, struct llc_conn_state_ev *ev) +{ + del_timer(&llc_sk(sk)->rej_sent_timer.timer); + llc_sk(sk)->rej_sent_timer.running = 0; + return 0; +} + +int llc_conn_ac_upd_nr_received(struct sock *sk, struct llc_conn_state_ev *ev) +{ + int acked; + u16 unacked = 0; + u8 fbit; + struct sk_buff *skb = ev->data.pdu.skb; + llc_pdu_sn_t *rx_pdu = (llc_pdu_sn_t *)skb->nh.raw; + struct llc_opt *llc = llc_sk(sk); + + llc->last_nr = PDU_SUPV_GET_Nr(rx_pdu); + acked = llc_conn_remove_acked_pdus(sk, llc->last_nr, &unacked); + /* On loopback we don't queue I frames in unack_pdu_q queue. */ + if (acked > 0 || (llc->dev->flags & IFF_LOOPBACK)) { + llc->retry_count = 0; + del_timer(&llc->ack_timer.timer); + llc->ack_timer.running = 0; + if (llc->failed_data_req) { + /* already, we did not accept data from upper layer + * (tx_window full or unacceptable state). Now, we + * can send data and must inform to upper layer. + */ + llc->failed_data_req = 0; + llc_conn_ac_data_confirm(sk, ev); + } + if (unacked) { + llc->ack_timer.timer.expires = jiffies + + llc->ack_timer.expire * HZ; + llc->ack_timer.timer.data = (unsigned long)sk; + llc->ack_timer.timer.function = llc_conn_ack_tmr_cb; + add_timer(&llc->ack_timer.timer); + llc->ack_timer.running = 1; + } + } else if (llc->failed_data_req) { + llc_pdu_decode_pf_bit(skb, &fbit); + if (fbit == 1) { + llc->failed_data_req = 0; + llc_conn_ac_data_confirm(sk, ev); + } + } + return 0; +} + +int llc_conn_ac_upd_p_flag(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct sk_buff *skb = ev->data.pdu.skb; + llc_pdu_sn_t *rx_pdu = (llc_pdu_sn_t *)skb->nh.raw; + u8 f_bit; + + if (!LLC_PDU_IS_RSP(rx_pdu) && + !llc_pdu_decode_pf_bit(skb, &f_bit) && f_bit) { + llc_sk(sk)->p_flag = 0; + llc_conn_ac_stop_p_timer(sk, ev); + } + return 0; +} + +int llc_conn_ac_set_data_flag_2(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->data_flag = 2; + return 0; +} + +int llc_conn_ac_set_data_flag_0(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->data_flag = 0; + return 0; +} + +int llc_conn_ac_set_data_flag_1(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->data_flag = 1; + return 0; +} + +int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + if (!llc_sk(sk)->data_flag) + llc_sk(sk)->data_flag = 1; + return 0; +} + +int llc_conn_ac_set_p_flag_0(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->p_flag = 0; + return 0; +} + +int llc_conn_ac_set_p_flag_1(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->p_flag = 1; + return 0; +} + +int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->remote_busy_flag = 0; + return 0; +} + +int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->cause_flag = 0; + return 0; +} + +int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->cause_flag = 1; + return 0; +} + +int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->retry_count = 0; + return 0; +} + +int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->retry_count++; + return 0; +} + +int llc_conn_ac_set_vr_0(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->vR = 0; + return 0; +} + +int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->vR = PDU_GET_NEXT_Vr(llc_sk(sk)->vR); + return 0; +} + +int llc_conn_ac_set_vs_0(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->vS = 0; + return 0; +} + +int llc_conn_ac_set_vs_nr(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->vS = llc_sk(sk)->last_nr; + return 0; +} + +int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->vS = (llc_sk(sk)->vS + 1) % 128; + return 0; +} + +int llc_conn_ac_set_f_flag_p(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_pdu_decode_pf_bit(ev->data.pdu.skb, &llc_sk(sk)->f_flag); + return 0; +} + +void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data) +{ + struct sock *sk = (struct sock *)timeout_data; + struct llc_conn_state_ev *ev; + + llc_sk(sk)->pf_cycle_timer.running = 0; + ev = llc_conn_alloc_ev(sk); + if (ev) { + ev->type = LLC_CONN_EV_TYPE_P_TMR; + ev->data.tmr.timer_specific = NULL; + llc_process_tmr_ev(sk, ev); + } +} + +static void llc_conn_busy_tmr_cb(unsigned long timeout_data) +{ + struct sock *sk = (struct sock *)timeout_data; + struct llc_conn_state_ev *ev; + + llc_sk(sk)->busy_state_timer.running = 0; + ev = llc_conn_alloc_ev(sk); + if (ev) { + ev->type = LLC_CONN_EV_TYPE_BUSY_TMR; + ev->data.tmr.timer_specific = NULL; + llc_process_tmr_ev(sk, ev); + } +} + +void llc_conn_ack_tmr_cb(unsigned long timeout_data) +{ + struct sock* sk = (struct sock *)timeout_data; + struct llc_conn_state_ev *ev; + + llc_sk(sk)->ack_timer.running = 0; + ev = llc_conn_alloc_ev(sk); + if (ev) { + ev->type = LLC_CONN_EV_TYPE_ACK_TMR; + ev->data.tmr.timer_specific = NULL; + llc_process_tmr_ev(sk, ev); + } +} + +static void llc_conn_rej_tmr_cb(unsigned long timeout_data) +{ + struct sock *sk = (struct sock *)timeout_data; + struct llc_conn_state_ev *ev; + + llc_sk(sk)->rej_sent_timer.running = 0; + ev = llc_conn_alloc_ev(sk); + if (ev) { + ev->type = LLC_CONN_EV_TYPE_REJ_TMR; + ev->data.tmr.timer_specific = NULL; + llc_process_tmr_ev(sk, ev); + } +} + +int llc_conn_ac_rst_vs(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sk(sk)->X = llc_sk(sk)->vS; + llc_conn_ac_set_vs_nr(sk, ev); + return 0; +} + +int llc_conn_ac_upd_vs(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *rx_pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + u8 nr = PDU_SUPV_GET_Nr(rx_pdu); + + if (llc_circular_between(llc_sk(sk)->vS, nr, llc_sk(sk)->X)) + llc_conn_ac_set_vs_nr(sk, ev); + return 0; +} + +/* + * Non-standard actions; these not contained in IEEE specification; for + * our own usage + */ +/** + * llc_conn_disc - removes connection from SAP list and frees it + * @sk: closed connection + * @ev: occurred event + * + * Returns 2, to indicate the state machine that the connection was freed. + */ +int llc_conn_disc(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sap_unassign_sock(llc_sk(sk)->sap, sk); + llc_sock_free(sk); + return 2; +} + +/** + * llc_conn_reset - resets connection + * @sk : reseting connection. + * @ev: occurred event. + * + * Stop all timers, empty all queues and reset all flags. + */ +int llc_conn_reset(struct sock *sk, struct llc_conn_state_ev *ev) +{ + llc_sock_reset(sk); + return 0; +} + +/** + * llc_circular_between - designates that b is between a and c or not + * @a: lower bound + * @b: element to see if is between a and b + * @c: upper bound + * + * This function designates that b is between a and c or not (for example, + * 0 is between 127 and 1). Returns 1 if b is between a and c, 0 + * otherwise. + */ +u8 llc_circular_between(u8 a, u8 b, u8 c) +{ + b = b - a; + c = c - a; + return b <= c; +} + +/** + * llc_process_tmr_ev - timer backend + * @sk: active connection + * @ev: occurred event + * + * This function is called from timer callback functions. When connection + * is busy (during sending a data frame) timer expiration event must be + * queued. Otherwise this event can be sent to connection state machine. + * Queued events will process by process_rxframes_events function after + * sending data frame. Returns 0 for success, 1 otherwise. + */ +static void llc_process_tmr_ev(struct sock *sk, struct llc_conn_state_ev *ev) +{ + bh_lock_sock(sk); + if (llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) { + printk(KERN_WARNING "timer called on closed connection\n"); + llc_conn_free_ev(ev); + goto out; + } + if (!sk->lock.users) + llc_conn_send_ev(sk, ev); + else { + struct sk_buff *skb = alloc_skb(1, GFP_ATOMIC); + + if (skb) { + skb->cb[0] = LLC_EVENT; + skb->data = (void *)ev; + sk_add_backlog(sk, skb); + } else + llc_conn_free_ev(ev); + } +out: + bh_unlock_sock(sk); +} diff -Nru a/net/llc/llc_c_ev.c b/net/llc/llc_c_ev.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_c_ev.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,873 @@ +/* + * llc_c_ev.c - Connection component state transition event qualifiers + * + * A 'state' consists of a number of possible event matching functions, + * the actions associated with each being executed when that event is + * matched; a 'state machine' accepts events in a serial fashion from an + * event queue. Each event is passed to each successive event matching + * function until a match is made (the event matching function returns + * success, or '0') or the list of event matching functions is exhausted. + * If a match is made, the actions associated with the event are executed + * and the state is changed to that event's transition state. Before some + * events are recognized, even after a match has been made, a certain + * number of 'event qualifier' functions must also be executed. If these + * all execute successfully, then the event is finally executed. + * + * These event functions must return 0 for success, to show a matched + * event, of 1 if the event does not match. Event qualifier functions + * must return a 0 for success or a non-zero for failure. Each function + * is simply responsible for verifying one single thing and returning + * either a success or failure. + * + * All of followed event functions are described in 802.2 LLC Protocol + * standard document except two functions that we added that will explain + * in their comments, at below. + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include + +#if 0 +#define dprintk(args...) printk(KERN_DEBUG args) +#else +#define dprintk(args...) +#endif + +extern u16 llc_circular_between(u8 a, u8 b, u8 c); + +/** + * llc_util_ns_inside_rx_window - check if sequence number is in rx window + * @ns: sequence number of received pdu. + * @vr: sequence number which receiver expects to receive. + * @rw: receive window size of receiver. + * + * Checks if sequence number of received PDU is in range of receive + * window. Returns 0 for success, 1 otherwise + */ +static u16 llc_util_ns_inside_rx_window(u8 ns, u8 vr, u8 rw) +{ + return !llc_circular_between(vr, ns, + (vr + rw - 1) % LLC_2_SEQ_NBR_MODULO); +} + +/** + * llc_util_nr_inside_tx_window - check if sequence number is in tx window + * @sk: current connection. + * @nr: N(R) of received PDU. + * + * This routine checks if N(R) of received PDU is in range of transmit + * window; on the other hand checks if received PDU acknowledges some + * outstanding PDUs that are in transmit window. Returns 0 for success, 1 + * otherwise. + */ +static u16 llc_util_nr_inside_tx_window(struct sock *sk, u8 nr) +{ + u8 nr1, nr2; + struct sk_buff *skb; + llc_pdu_sn_t *pdu; + struct llc_opt *llc = llc_sk(sk); + int rc = 0; + + if (llc->dev->flags & IFF_LOOPBACK) + goto out; + rc = 1; + if (!skb_queue_len(&llc->pdu_unack_q)) + goto out; + skb = skb_peek(&llc->pdu_unack_q); + pdu = (llc_pdu_sn_t *)skb->nh.raw; + nr1 = LLC_I_GET_NS(pdu); + skb = skb_peek_tail(&llc->pdu_unack_q); + pdu = (llc_pdu_sn_t *)skb->nh.raw; + nr2 = LLC_I_GET_NS(pdu); + rc = !llc_circular_between(nr1, nr, (nr2 + 1) % LLC_2_SEQ_NBR_MODULO); +out: + return rc; +} + +int llc_conn_ev_conn_req(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return ev->data.prim.prim == LLC_CONN_PRIM && + ev->data.prim.type == LLC_PRIM_TYPE_REQ ? 0 : 1; +} + +int llc_conn_ev_conn_resp(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return ev->data.prim.prim == LLC_CONN_PRIM && + ev->data.prim.type == LLC_PRIM_TYPE_RESP ? 0 : 1; +} + +int llc_conn_ev_data_req(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return ev->data.prim.prim == LLC_DATA_PRIM && + ev->data.prim.type == LLC_PRIM_TYPE_REQ ? 0 : 1; +} + +int llc_conn_ev_disc_req(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return ev->data.prim.prim == LLC_DISC_PRIM && + ev->data.prim.type == LLC_PRIM_TYPE_REQ ? 0 : 1; +} + +int llc_conn_ev_rst_req(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return ev->data.prim.prim == LLC_RESET_PRIM && + ev->data.prim.type == LLC_PRIM_TYPE_REQ ? 0 : 1; +} + +int llc_conn_ev_rst_resp(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return ev->data.prim.prim == LLC_RESET_PRIM && + ev->data.prim.type == LLC_PRIM_TYPE_RESP ? 0 : 1; +} + +int llc_conn_ev_local_busy_detected(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return ev->type == LLC_CONN_EV_TYPE_SIMPLE && + ev->data.a.ev == LLC_CONN_EV_LOCAL_BUSY_DETECTED ? 0 : 1; +} + +int llc_conn_ev_local_busy_cleared(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return ev->type == LLC_CONN_EV_TYPE_SIMPLE && + ev->data.a.ev == LLC_CONN_EV_LOCAL_BUSY_CLEARED ? 0 : 1; +} + +int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return 1; +} + +int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_CMD(pdu) && !LLC_PDU_TYPE_IS_U(pdu) && + LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_DISC ? 0 : 1; +} + +int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_U(pdu) && + LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_DM ? 0 : 1; +} + +int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_U(pdu) && + LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_FRMR ? 0 : 1; +} + +int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_CMD(pdu) && !LLC_PDU_TYPE_IS_I(pdu) && + !LLC_I_PF_IS_0(pdu) && + LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1; +} + +int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_CMD(pdu) && !LLC_PDU_TYPE_IS_I(pdu) && + !LLC_I_PF_IS_1(pdu) && + LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1; +} + +int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + u8 vr = llc_sk(sk)->vR; + u8 ns = LLC_I_GET_NS(pdu); + + return !LLC_PDU_IS_CMD(pdu) && !LLC_PDU_TYPE_IS_I(pdu) && + !LLC_I_PF_IS_0(pdu) && ns != vr && + !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; +} + +int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + u8 vr = llc_sk(sk)->vR; + u8 ns = LLC_I_GET_NS(pdu); + + return !LLC_PDU_IS_CMD(pdu) && !LLC_PDU_TYPE_IS_I(pdu) && + !LLC_I_PF_IS_1(pdu) && ns != vr && + !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; +} + +int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t * pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + u8 vr = llc_sk(sk)->vR; + u8 ns = LLC_I_GET_NS(pdu); + u16 rc = !LLC_PDU_IS_CMD(pdu) && !LLC_PDU_TYPE_IS_I(pdu) && ns != vr && + llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; + if (!rc) + dprintk(KERN_WARNING "rx_i_cmd_p_bit_set_x_inval_ns matched," + "state = %d, ns = %d, vr = %d\n", + llc_sk(sk)->state, ns, vr); + return rc; +} + +int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_I(pdu) && + !LLC_I_PF_IS_0(pdu) && + LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1; +} + +int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_I(pdu) && + !LLC_I_PF_IS_1(pdu) && + LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1; +} + +int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_I(pdu) && + LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1; +} + +int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + u8 vr = llc_sk(sk)->vR; + u8 ns = LLC_I_GET_NS(pdu); + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_I(pdu) && + !LLC_I_PF_IS_0(pdu) && ns != vr && + !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; +} + +int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + u8 vr = llc_sk(sk)->vR; + u8 ns = LLC_I_GET_NS(pdu); + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_I(pdu) && + !LLC_I_PF_IS_1(pdu) && ns != vr && + !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; +} + +int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + u8 vr = llc_sk(sk)->vR; + u8 ns = LLC_I_GET_NS(pdu); + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_I(pdu) && ns != vr && + !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; +} + +int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + u8 vr = llc_sk(sk)->vR; + u8 ns = LLC_I_GET_NS(pdu); + u16 rc = !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_I(pdu) && ns != vr && + llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; + if (!rc) + dprintk(KERN_WARNING "conn_ev_rx_i_rsp_fbit_set_x_inval_ns " + "matched : state = %d, ns = %d, vr = %d\n", + llc_sk(sk)->state, ns, vr); + return rc; +} + +int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_CMD(pdu) && !LLC_PDU_TYPE_IS_S(pdu) && + !LLC_S_PF_IS_0(pdu) && + LLC_S_PDU_CMD(pdu) == LLC_2_PDU_CMD_REJ ? 0 : 1; +} + +int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_CMD(pdu) && !LLC_PDU_TYPE_IS_S(pdu) && + !LLC_S_PF_IS_1(pdu) && + LLC_S_PDU_CMD(pdu) == LLC_2_PDU_CMD_REJ ? 0 : 1; +} + +int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_S(pdu) && + !LLC_S_PF_IS_0(pdu) && + LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_REJ ? 0 : 1; +} + +int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_S(pdu) && + !LLC_S_PF_IS_1(pdu) && + LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_REJ ? 0 : 1; +} + +int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_S(pdu) && + LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_REJ ? 0 : 1; +} + +int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_CMD(pdu) && !LLC_PDU_TYPE_IS_S(pdu) && + !LLC_S_PF_IS_0(pdu) && + LLC_S_PDU_CMD(pdu) == LLC_2_PDU_CMD_RNR ? 0 : 1; +} + +int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_CMD(pdu) && !LLC_PDU_TYPE_IS_S(pdu) && + !LLC_S_PF_IS_1(pdu) && + LLC_S_PDU_CMD(pdu) == LLC_2_PDU_CMD_RNR ? 0 : 1; +} + +int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_S(pdu) && + !LLC_S_PF_IS_0(pdu) && + LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_RNR ? 0 : 1; +} + +int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_S(pdu) && + !LLC_S_PF_IS_1(pdu) && + LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_RNR ? 0 : 1; +} + +int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_CMD(pdu) && !LLC_PDU_TYPE_IS_S(pdu) && + !LLC_S_PF_IS_0(pdu) && + LLC_S_PDU_CMD(pdu) == LLC_2_PDU_CMD_RR ? 0 : 1; +} + +int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_CMD(pdu) && !LLC_PDU_TYPE_IS_S(pdu) && + !LLC_S_PF_IS_1(pdu) && + LLC_S_PDU_CMD(pdu) == LLC_2_PDU_CMD_RR ? 0 : 1; +} + +int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_S(pdu) && + !LLC_S_PF_IS_0(pdu) && + LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_RR ? 0 : 1; +} + +int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_S(pdu) && + !LLC_S_PF_IS_1(pdu) && + LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_RR ? 0 : 1; +} + +int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_CMD(pdu) && !LLC_PDU_TYPE_IS_U(pdu) && + LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_SABME ? 0 : 1; +} + +int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return !LLC_PDU_IS_RSP(pdu) && !LLC_PDU_TYPE_IS_U(pdu) && + LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_UA ? 0 : 1; +} + +int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u16 rc = 1; + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + if (!LLC_PDU_IS_CMD(pdu)) { + if (!LLC_PDU_TYPE_IS_I(pdu) || !LLC_PDU_TYPE_IS_S(pdu)) { + if (!LLC_I_PF_IS_1(pdu)) + rc = 0; + } else if (!LLC_PDU_TYPE_IS_U(pdu) && !LLC_U_PF_IS_1(pdu)) + rc = 0; + } + return rc; +} + +int llc_conn_ev_rx_xxx_cmd_pbit_set_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u16 rc = 1; + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + if (!LLC_PDU_IS_CMD(pdu)) { + if (!LLC_PDU_TYPE_IS_I(pdu) || !LLC_PDU_TYPE_IS_S(pdu)) { + if (!LLC_I_PF_IS_0(pdu)) + rc = 0; + } else if (!LLC_PDU_TYPE_IS_U(pdu)) + switch (LLC_U_PDU_CMD(pdu)) { + case LLC_2_PDU_CMD_SABME: + case LLC_2_PDU_CMD_DISC: + if (!LLC_U_PF_IS_0(pdu)) + rc = 0; + break; + } + } + return rc; +} + +int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u16 rc = 1; + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + if (!LLC_PDU_IS_CMD(pdu)) { + if (!LLC_PDU_TYPE_IS_I(pdu) || !LLC_PDU_TYPE_IS_S(pdu)) + rc = 0; + else if (!LLC_PDU_TYPE_IS_U(pdu)) + switch (LLC_U_PDU_CMD(pdu)) { + case LLC_2_PDU_CMD_SABME: + case LLC_2_PDU_CMD_DISC: + rc = 0; + break; + } + } + return rc; +} + +int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u16 rc = 1; + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + if (!LLC_PDU_IS_RSP(pdu)) { + if (!LLC_PDU_TYPE_IS_I(pdu) || !LLC_PDU_TYPE_IS_S(pdu)) { + if (!LLC_I_PF_IS_1(pdu)) + rc = 0; + } else if (!LLC_PDU_TYPE_IS_U(pdu)) + switch (LLC_U_PDU_RSP(pdu)) { + case LLC_2_PDU_RSP_UA: + case LLC_2_PDU_RSP_DM: + case LLC_2_PDU_RSP_FRMR: + if (!LLC_U_PF_IS_1(pdu)) + rc = 0; + break; + } + } + return rc; +} + +int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u16 rc = 1; + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + if (!LLC_PDU_IS_RSP(pdu)) { + if (!LLC_PDU_TYPE_IS_I(pdu) || !LLC_PDU_TYPE_IS_S(pdu)) + rc = 0; + else if (!LLC_PDU_TYPE_IS_U(pdu)) + switch (LLC_U_PDU_RSP(pdu)) { + case LLC_2_PDU_RSP_UA: + case LLC_2_PDU_RSP_DM: + case LLC_2_PDU_RSP_FRMR: + rc = 0; + break; + } + } + + return rc; +} + +int llc_conn_ev_rx_xxx_yyy(struct sock *sk, struct llc_conn_state_ev *ev) +{ + u16 rc = 1; + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + if (!LLC_PDU_TYPE_IS_I(pdu) || !LLC_PDU_TYPE_IS_S(pdu)) + rc = 0; + else if (!LLC_PDU_TYPE_IS_U(pdu)) + switch (LLC_U_PDU_CMD(pdu)) { + case LLC_2_PDU_CMD_SABME: + case LLC_2_PDU_CMD_DISC: + case LLC_2_PDU_RSP_UA: + case LLC_2_PDU_RSP_DM: + case LLC_2_PDU_RSP_FRMR: + rc = 0; + break; + } + return rc; +} + +int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u16 rc = 1; + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + u8 vs = llc_sk(sk)->vS; + u8 nr = LLC_I_GET_NR(pdu); + + if (!LLC_PDU_IS_CMD(pdu)) { + if (!LLC_PDU_TYPE_IS_I(pdu) || !LLC_PDU_TYPE_IS_S(pdu)) { + if (nr != vs && + llc_util_nr_inside_tx_window(sk, nr)) { + dprintk(KERN_ERR "conn_ev_rx_zzz_cmd_inv_nr " + "matched, state = %d, vs = %d, " + "nr = %d\n", llc_sk(sk)->state, vs, nr); + rc = 0; + } + } + } + return rc; +} + +int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + u16 rc = 1; + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + u8 vs = llc_sk(sk)->vS; + u8 nr = LLC_I_GET_NR(pdu); + + if (!LLC_PDU_IS_RSP(pdu)) { + if (!LLC_PDU_TYPE_IS_I(pdu) || !LLC_PDU_TYPE_IS_S(pdu)) { + if (nr != vs && + llc_util_nr_inside_tx_window(sk, nr)) { + rc = 0; + dprintk(KERN_ERR "conn_ev_rx_zzz_fbit_set" + "_x_inval_nr matched, state = %d, " + "vs = %d, nr = %d\n", + llc_sk(sk)->state, vs, nr); + } + } + } + return rc; +} + +int llc_conn_ev_rx_any_frame(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return 0; +} + +int llc_conn_ev_p_tmr_exp(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return ev->type != LLC_CONN_EV_TYPE_P_TMR; +} + +int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return ev->type != LLC_CONN_EV_TYPE_ACK_TMR; +} + +int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return ev->type != LLC_CONN_EV_TYPE_REJ_TMR; +} + +int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return ev->type != LLC_CONN_EV_TYPE_BUSY_TMR; +} + +int llc_conn_ev_any_tmr_exp(struct sock *sk, struct llc_conn_state_ev *ev) +{ + + return ev->type == LLC_CONN_EV_TYPE_P_TMR || + ev->type == LLC_CONN_EV_TYPE_ACK_TMR || + ev->type == LLC_CONN_EV_TYPE_REJ_TMR || + ev->type == LLC_CONN_EV_TYPE_BUSY_TMR ? 0 : 1; +} + +int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return 1; +} + +int llc_conn_ev_tx_buffer_full(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return ev->type == LLC_CONN_EV_TYPE_SIMPLE && + ev->data.a.ev == LLC_CONN_EV_TX_BUFF_FULL ? 0 : 1; +} + +/* Event qualifier functions + * + * these functions simply verify the value of a state flag associated with + * the connection and return either a 0 for success or a non-zero value + * for not-success; verify the event is the type we expect + */ +int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return llc_sk(sk)->data_flag != 1; +} + +int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return llc_sk(sk)->data_flag; +} + +int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return llc_sk(sk)->data_flag != 2; +} + +int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return llc_sk(sk)->p_flag != 1; +} + +/** + * conn_ev_qlfy_last_frame_eq_1 - checks if frame is last in tx window + * @sk: current connection structure. + * @ev: current event. + * + * This function determines when frame which is sent, is last frame of + * transmit window, if it is then this function return zero else return + * one. This function is used for sending last frame of transmit window + * as I-format command with p-bit set to one. Returns 0 if frame is last + * frame, 1 otherwise. + */ +int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return !(skb_queue_len(&llc_sk(sk)->pdu_unack_q) + 1 == llc_sk(sk)->k); +} + +/** + * conn_ev_qlfy_last_frame_eq_0 - checks if frame isn't last in tx window + * @sk: current connection structure. + * @ev: current event. + * + * This function determines when frame which is sent, isn't last frame of + * transmit window, if it isn't then this function return zero else return + * one. Returns 0 if frame isn't last frame, 1 otherwise. + */ +int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return skb_queue_len(&llc_sk(sk)->pdu_unack_q) + 1 == llc_sk(sk)->k; +} + +int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return llc_sk(sk)->p_flag; +} + +int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct llc_conn_state_ev *ev) +{ + u8 f_bit; + struct sk_buff *skb; + + if (ev->type == LLC_CONN_EV_TYPE_PDU) + skb = ev->data.pdu.skb; + else + skb = ev->data.prim.data->data->conn.skb; + llc_pdu_decode_pf_bit(skb, &f_bit); + return llc_sk(sk)->p_flag == f_bit ? 0 : 1; +} + +int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return llc_sk(sk)->remote_busy_flag; +} + +int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return !llc_sk(sk)->remote_busy_flag; +} + +int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return !(llc_sk(sk)->retry_count < llc_sk(sk)->n2); +} + +int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return !(llc_sk(sk)->retry_count >= llc_sk(sk)->n2); +} + +int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return !llc_sk(sk)->s_flag; +} + +int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct llc_conn_state_ev *ev) +{ + return llc_sk(sk)->s_flag; +} + +int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return !llc_sk(sk)->cause_flag; +} + +int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return llc_sk(sk)->cause_flag; +} + +int llc_conn_ev_qlfy_init_p_f_cycle(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + return 0; +} + +int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + ev->status = LLC_STATUS_CONN; + return 0; +} + +int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + ev->status = LLC_STATUS_DISC; + return 0; +} + +int llc_conn_ev_qlfy_set_status_impossible(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + ev->status = LLC_STATUS_IMPOSSIBLE; + return 0; +} + +int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + ev->status = LLC_STATUS_FAILED; + return 0; +} + +int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + ev->status = LLC_STATUS_REMOTE_BUSY; + return 0; +} + +int llc_conn_ev_qlfy_set_status_received(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + ev->status = LLC_STATUS_RECEIVED; + return 0; +} + +int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + ev->status = LLC_STATUS_REFUSE; + return 0; +} + +int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + ev->status = LLC_STATUS_CONFLICT; + return 0; +} + +int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, + struct llc_conn_state_ev *ev) +{ + ev->status = LLC_STATUS_RESET_DONE; + return 0; +} diff -Nru a/net/llc/llc_c_st.c b/net/llc/llc_c_st.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_c_st.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,4946 @@ +/* + * llc_c_st.c - This module contains state transition of connection component. + * + * Description of event functions and actions there is in 802.2 LLC standard, + * or in "llc_c_ac.c" and "llc_c_ev.c" modules. + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001, 2002 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include + +#define NONE NULL + +/* COMMON CONNECTION STATE transitions + * Common transitions for + * LLC_CONN_STATE_NORMAL, + * LLC_CONN_STATE_BUSY, + * LLC_CONN_STATE_REJ, + * LLC_CONN_STATE_AWAIT, + * LLC_CONN_STATE_AWAIT_BUSY and + * LLC_CONN_STATE_AWAIT_REJ states + */ +/* State transitions for LLC_CONN_EV_DISC_REQ event */ +static llc_conn_action_t llc_common_actions_1[] = { + [0] = llc_conn_ac_send_disc_cmd_p_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = llc_conn_ac_set_cause_flag_1, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_1 = { + .ev = llc_conn_ev_disc_req, + .next_state = LLC_CONN_STATE_D_CONN, + .ev_qualifiers = NONE, + .ev_actions = llc_common_actions_1, +}; + +/* State transitions for LLC_CONN_EV_RESET_REQ event */ +static llc_conn_action_t llc_common_actions_2[] = { + [0] = llc_conn_ac_send_sabme_cmd_p_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = llc_conn_ac_set_cause_flag_1, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_2 = { + .ev = llc_conn_ev_rst_req, + .next_state = LLC_CONN_STATE_RESET, + .ev_qualifiers = NONE, + .ev_actions = llc_common_actions_2, +}; + +/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ +static llc_conn_action_t llc_common_actions_3[] = { + [0] = llc_conn_ac_stop_all_timers, + [1] = llc_conn_ac_set_vs_0, + [2] = llc_conn_ac_set_vr_0, + [3] = llc_conn_ac_send_ua_rsp_f_set_p, + [4] = llc_conn_ac_rst_ind, + [5] = llc_conn_ac_set_p_flag_0, + [6] = llc_conn_ac_set_remote_busy_0, + [7] = llc_conn_reset, + [8] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_3 = { + .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_common_actions_3, +}; + +/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ +static llc_conn_action_t llc_common_actions_4[] = { + [0] = llc_conn_ac_stop_all_timers, + [1] = llc_conn_ac_send_ua_rsp_f_set_p, + [2] = llc_conn_ac_disc_ind, + [3] = llc_conn_disc, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_4 = { + .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = NONE, + .ev_actions = llc_common_actions_4, +}; + +/* State transitions for LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X event */ +static llc_conn_action_t llc_common_actions_5[] = { + [0] = llc_conn_ac_send_sabme_cmd_p_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = llc_conn_ac_rst_ind, + [5] = llc_conn_ac_set_cause_flag_0, + [6] = llc_conn_reset, + [7] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_5 = { + .ev = llc_conn_ev_rx_frmr_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_RESET, + .ev_qualifiers = NONE, + .ev_actions = llc_common_actions_5, +}; + +/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */ +static llc_conn_action_t llc_common_actions_6[] = { + [0] = llc_conn_ac_disc_ind, + [1] = llc_conn_ac_stop_all_timers, + [2] = llc_conn_disc, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_6 = { + .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = NONE, + .ev_actions = llc_common_actions_6, +}; + +/* State transitions for LLC_CONN_EV_RX_ZZZ_CMD_Pbit_SET_X_INVAL_Nr event */ +static llc_conn_action_t llc_common_actions_7a[] = { + [0] = llc_conn_ac_send_frmr_rsp_f_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_7a = { + .ev = llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr, + .next_state = LLC_CONN_STATE_ERROR, + .ev_qualifiers = NONE, + .ev_actions = llc_common_actions_7a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_X_INVAL_Ns event */ +static llc_conn_action_t llc_common_actions_7b[] = { + [0] = llc_conn_ac_send_frmr_rsp_f_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_7b = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns, + .next_state = LLC_CONN_STATE_ERROR, + .ev_qualifiers = NONE, + .ev_actions = llc_common_actions_7b, +}; + +/* State transitions for LLC_CONN_EV_RX_ZZZ_RSP_Fbit_SET_X_INVAL_Nr event */ +static llc_conn_action_t llc_common_actions_8a[] = { + [0] = llc_conn_ac_send_frmr_rsp_f_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_8a = { + .ev = llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr, + .next_state = LLC_CONN_STATE_ERROR, + .ev_qualifiers = NONE, + .ev_actions = llc_common_actions_8a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_INVAL_Ns event */ +static llc_conn_action_t llc_common_actions_8b[] = { + [0] = llc_conn_ac_send_frmr_rsp_f_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_8b = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns, + .next_state = LLC_CONN_STATE_ERROR, + .ev_qualifiers = NONE, + .ev_actions = llc_common_actions_8b, +}; + +/* State transitions for LLC_CONN_EV_RX_BAD_PDU event */ +static llc_conn_action_t llc_common_actions_8c[] = { + [0] = llc_conn_ac_send_frmr_rsp_f_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_8c = { + .ev = llc_conn_ev_rx_bad_pdu, + .next_state = LLC_CONN_STATE_ERROR, + .ev_qualifiers = NONE, + .ev_actions = llc_common_actions_8c, +}; + +/* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event */ +static llc_conn_action_t llc_common_actions_9[] = { + [0] = llc_conn_ac_send_frmr_rsp_f_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_9 = { + .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_ERROR, + .ev_qualifiers = NONE, + .ev_actions = llc_common_actions_9, +}; + +/* State transitions for LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_1 event */ +#if 0 +static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_10[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_common_actions_10[] = { + [0] = llc_conn_ac_send_frmr_rsp_f_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_10 = { + .ev = llc_conn_ev_rx_xxx_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_ERROR, + .ev_qualifiers = llc_common_ev_qfyrs_10, + .ev_actions = llc_common_actions_10, +}; +#endif + +/* State transitions for LLC_CONN_EV_P_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11a[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, + [1] = NULL, +}; + +static llc_conn_action_t llc_common_actions_11a[] = { + [0] = llc_conn_ac_send_sabme_cmd_p_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = llc_conn_ac_set_cause_flag_0, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_11a = { + .ev = llc_conn_ev_p_tmr_exp, + .next_state = LLC_CONN_STATE_RESET, + .ev_qualifiers = llc_common_ev_qfyrs_11a, + .ev_actions = llc_common_actions_11a, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11b[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, + [1] = NULL, +}; + +static llc_conn_action_t llc_common_actions_11b[] = { + [0] = llc_conn_ac_send_sabme_cmd_p_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = llc_conn_ac_set_cause_flag_0, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_11b = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_RESET, + .ev_qualifiers = llc_common_ev_qfyrs_11b, + .ev_actions = llc_common_actions_11b, +}; + +/* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11c[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, + [1] = NULL, +}; + +static llc_conn_action_t llc_common_actions_11c[] = { + [0] = llc_conn_ac_send_sabme_cmd_p_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = llc_conn_ac_set_cause_flag_0, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_11c = { + .ev = llc_conn_ev_rej_tmr_exp, + .next_state = LLC_CONN_STATE_RESET, + .ev_qualifiers = llc_common_ev_qfyrs_11c, + .ev_actions = llc_common_actions_11c, +}; + +/* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11d[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, + [1] = NULL, +}; + +static llc_conn_action_t llc_common_actions_11d[] = { + [0] = llc_conn_ac_send_sabme_cmd_p_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_stop_other_timers, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = llc_conn_ac_set_cause_flag_0, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_common_state_trans_11d = { + .ev = llc_conn_ev_busy_tmr_exp, + .next_state = LLC_CONN_STATE_RESET, + .ev_qualifiers = llc_common_ev_qfyrs_11d, + .ev_actions = llc_common_actions_11d, +}; + +/* + * Common dummy state transition; must be last entry for all state + * transition groups - it'll be on .bss, so will be zeroed. + */ +static struct llc_conn_state_trans llc_common_state_trans_n; + +/* LLC_CONN_STATE_ADM transitions */ +/* State transitions for LLC_CONN_EV_CONN_REQ event */ +static llc_conn_action_t llc_adm_actions_1[] = { + [0] = llc_conn_ac_send_sabme_cmd_p_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_set_retry_cnt_0, + [3] = llc_conn_ac_set_s_flag_0, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_adm_state_trans_1 = { + .ev = llc_conn_ev_conn_req, + .next_state = LLC_CONN_STATE_SETUP, + .ev_qualifiers = NONE, + .ev_actions = llc_adm_actions_1, +}; + +/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ +static llc_conn_action_t llc_adm_actions_2[] = { + [0] = llc_conn_ac_send_ua_rsp_f_set_p, + [1] = llc_conn_ac_set_vs_0, + [2] = llc_conn_ac_set_vr_0, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = llc_conn_ac_set_p_flag_0, + [5] = llc_conn_ac_set_remote_busy_0, + [6] = llc_conn_ac_conn_ind, + [7] = NULL, +}; + +static struct llc_conn_state_trans llc_adm_state_trans_2 = { + .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_adm_actions_2, +}; + +/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ +static llc_conn_action_t llc_adm_actions_3[] = { + [0] = llc_conn_ac_send_dm_rsp_f_set_p, + [1] = llc_conn_disc, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_adm_state_trans_3 = { + .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = NONE, + .ev_actions = llc_adm_actions_3, +}; + +/* State transitions for LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_adm_actions_4[] = { + [0] = llc_conn_ac_send_dm_rsp_f_set_1, + [1] = llc_conn_disc, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_adm_state_trans_4 = { + .ev = llc_conn_ev_rx_xxx_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = NONE, + .ev_actions = llc_adm_actions_4, +}; + +/* State transitions for LLC_CONN_EV_RX_XXX_YYY event */ +static llc_conn_action_t llc_adm_actions_5[] = { + [0] = llc_conn_disc, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_adm_state_trans_5 = { + .ev = llc_conn_ev_rx_any_frame, + .next_state = LLC_CONN_OUT_OF_SVC, + .ev_qualifiers = NONE, + .ev_actions = llc_adm_actions_5, +}; + +/* + * Array of pointers; + * one to each transition + */ +static struct llc_conn_state_trans *llc_adm_state_transitions[] = { + [0] = &llc_adm_state_trans_1, /* Request */ + [1] = &llc_common_state_trans_n, + [2] = &llc_common_state_trans_n, /* local_busy */ + [3] = &llc_common_state_trans_n, /* init_pf_cycle */ + [4] = &llc_common_state_trans_n, /* timer */ + [5] = &llc_adm_state_trans_2, /* Receive frame */ + [6] = &llc_adm_state_trans_3, + [7] = &llc_adm_state_trans_4, + [8] = &llc_adm_state_trans_5, + [9] = &llc_common_state_trans_n, +}; + +/* LLC_CONN_STATE_SETUP transitions */ +/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ +static llc_conn_action_t llc_setup_actions_1[] = { + [0] = llc_conn_ac_send_ua_rsp_f_set_p, + [1] = llc_conn_ac_set_vs_0, + [2] = llc_conn_ac_set_vr_0, + [3] = llc_conn_ac_set_s_flag_1, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_setup_state_trans_1 = { + .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_SETUP, + .ev_qualifiers = NONE, + .ev_actions = llc_setup_actions_1, +}; + +/* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event */ +static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_2[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_f, + [1] = llc_conn_ev_qlfy_set_status_conn, + [2] = NULL, +}; + +static llc_conn_action_t llc_setup_actions_2[] = { + [0] = llc_conn_ac_stop_ack_timer, + [1] = llc_conn_ac_set_vs_0, + [2] = llc_conn_ac_set_vr_0, + [3] = llc_conn_ac_upd_p_flag, + [4] = llc_conn_ac_set_remote_busy_0, + [5] = llc_conn_ac_conn_confirm, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_setup_state_trans_2 = { + .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_setup_ev_qfyrs_2, + .ev_actions = llc_setup_actions_2, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_3[] = { + [0] = llc_conn_ev_qlfy_s_flag_eq_1, + [1] = llc_conn_ev_qlfy_set_status_conn, + [2] = NULL, +}; + +static llc_conn_action_t llc_setup_actions_3[] = { + [0] = llc_conn_ac_set_p_flag_0, + [1] = llc_conn_ac_set_remote_busy_0, + [2] = llc_conn_ac_conn_confirm, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_setup_state_trans_3 = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_setup_ev_qfyrs_3, + .ev_actions = llc_setup_actions_3, +}; + +/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ +static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_4[] = { + [0] = llc_conn_ev_qlfy_set_status_disc, + [1] = NULL, +}; + +static llc_conn_action_t llc_setup_actions_4[] = { + [0] = llc_conn_ac_send_dm_rsp_f_set_p, + [1] = llc_conn_ac_stop_ack_timer, + [2] = llc_conn_ac_conn_confirm, + [3] = llc_conn_disc, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_setup_state_trans_4 = { + .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_setup_ev_qfyrs_4, + .ev_actions = llc_setup_actions_4, +}; + +/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */ +static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_5[] = { + [0] = llc_conn_ev_qlfy_set_status_disc, + [1] = NULL, +}; + +static llc_conn_action_t llc_setup_actions_5[] = { + [0] = llc_conn_ac_stop_ack_timer, + [1] = llc_conn_ac_conn_confirm, + [2] = llc_conn_disc, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_setup_state_trans_5 = { + .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_setup_ev_qfyrs_5, + .ev_actions = llc_setup_actions_5, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_7[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [1] = llc_conn_ev_qlfy_s_flag_eq_0, + [2] = NULL, +}; + +static llc_conn_action_t llc_setup_actions_7[] = { + [0] = llc_conn_ac_send_sabme_cmd_p_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_inc_retry_cnt_by_1, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_setup_state_trans_7 = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_SETUP, + .ev_qualifiers = llc_setup_ev_qfyrs_7, + .ev_actions = llc_setup_actions_7, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_8[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, + [1] = llc_conn_ev_qlfy_s_flag_eq_0, + [2] = llc_conn_ev_qlfy_set_status_failed, + [3] = NULL, +}; + +static llc_conn_action_t llc_setup_actions_8[] = { + [0] = llc_conn_ac_conn_confirm, + [1] = llc_conn_disc, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_setup_state_trans_8 = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_setup_ev_qfyrs_8, + .ev_actions = llc_setup_actions_8, +}; + +/* + * Array of pointers; + * one to each transition + */ +static struct llc_conn_state_trans *llc_setup_state_transitions[] = { + [0] = &llc_common_state_trans_n, /* Request */ + [1] = &llc_common_state_trans_n, /* local busy */ + [2] = &llc_common_state_trans_n, /* init_pf_cycle */ + [3] = &llc_setup_state_trans_3, /* Timer */ + [4] = &llc_setup_state_trans_7, + [5] = &llc_setup_state_trans_8, + [6] = &llc_common_state_trans_n, + [7] = &llc_setup_state_trans_1, /* Receive frame */ + [8] = &llc_setup_state_trans_2, + [9] = &llc_setup_state_trans_4, + [10] = &llc_setup_state_trans_5, + [11] = &llc_common_state_trans_n, +}; + +/* LLC_CONN_STATE_NORMAL transitions */ +/* State transitions for LLC_CONN_EV_DATA_REQ event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_1[] = { + [0] = llc_conn_ev_qlfy_remote_busy_eq_0, + [1] = llc_conn_ev_qlfy_p_flag_eq_0, + [2] = llc_conn_ev_qlfy_last_frame_eq_0, + [3] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_1[] = { + [0] = llc_conn_ac_send_i_as_ack, + [1] = llc_conn_ac_start_ack_tmr_if_not_running, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_1 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_1, + .ev_actions = llc_normal_actions_1, +}; + +/* State transitions for LLC_CONN_EV_DATA_REQ event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2[] = { + [0] = llc_conn_ev_qlfy_remote_busy_eq_0, + [1] = llc_conn_ev_qlfy_p_flag_eq_0, + [2] = llc_conn_ev_qlfy_last_frame_eq_1, + [3] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_2[] = { + [0] = llc_conn_ac_send_i_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_2 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_2, + .ev_actions = llc_normal_actions_2, +}; + +/* State transitions for LLC_CONN_EV_DATA_REQ event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2_1[] = { + [0] = llc_conn_ev_qlfy_remote_busy_eq_1, + [1] = llc_conn_ev_qlfy_set_status_remote_busy, + [2] = NULL, +}; + +/* just one member, NULL, .bss zeroes it */ +static llc_conn_action_t llc_normal_actions_2_1[1]; + +static struct llc_conn_state_trans llc_normal_state_trans_2_1 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_2_1, + .ev_actions = llc_normal_actions_2_1, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_3[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_3[] = { + [0] = llc_conn_ac_rst_sendack_flag, + [1] = llc_conn_ac_send_rnr_xxx_x_set_0, + [2] = llc_conn_ac_set_data_flag_0, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_3 = { + .ev = llc_conn_ev_local_busy_detected, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_normal_ev_qfyrs_3, + .ev_actions = llc_normal_actions_3, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_4[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_4[] = { + [0] = llc_conn_ac_rst_sendack_flag, + [1] = llc_conn_ac_send_rnr_xxx_x_set_0, + [2] = llc_conn_ac_set_data_flag_0, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_4 = { + .ev = llc_conn_ev_local_busy_detected, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_normal_ev_qfyrs_4, + .ev_actions = llc_normal_actions_4, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_5a[] = { + [0] = llc_conn_ac_rst_sendack_flag, + [1] = llc_conn_ac_send_rej_xxx_x_set_0, + [2] = llc_conn_ac_upd_nr_received, + [3] = llc_conn_ac_upd_p_flag, + [4] = llc_conn_ac_start_rej_timer, + [5] = llc_conn_ac_clear_remote_busy_if_f_eq_1, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_5a = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_normal_ev_qfyrs_5a, + .ev_actions = llc_normal_actions_5a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_5b[] = { + [0] = llc_conn_ac_rst_sendack_flag, + [1] = llc_conn_ac_send_rej_xxx_x_set_0, + [2] = llc_conn_ac_upd_nr_received, + [3] = llc_conn_ac_upd_p_flag, + [4] = llc_conn_ac_start_rej_timer, + [5] = llc_conn_ac_clear_remote_busy_if_f_eq_1, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_5b = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_normal_ev_qfyrs_5b, + .ev_actions = llc_normal_actions_5b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5c[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_5c[] = { + [0] = llc_conn_ac_rst_sendack_flag, + [1] = llc_conn_ac_send_rej_xxx_x_set_0, + [2] = llc_conn_ac_upd_nr_received, + [3] = llc_conn_ac_upd_p_flag, + [4] = llc_conn_ac_start_rej_timer, + [5] = llc_conn_ac_clear_remote_busy_if_f_eq_1, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_5c = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_normal_ev_qfyrs_5c, + .ev_actions = llc_normal_actions_5c, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_6a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_6a[] = { + [0] = llc_conn_ac_rst_sendack_flag, + [1] = llc_conn_ac_send_rej_xxx_x_set_0, + [2] = llc_conn_ac_upd_nr_received, + [3] = llc_conn_ac_start_rej_timer, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_6a = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_normal_ev_qfyrs_6a, + .ev_actions = llc_normal_actions_6a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_6b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_6b[] = { + [0] = llc_conn_ac_rst_sendack_flag, + [1] = llc_conn_ac_send_rej_xxx_x_set_0, + [2] = llc_conn_ac_upd_nr_received, + [3] = llc_conn_ac_start_rej_timer, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_6b = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_normal_ev_qfyrs_6b, + .ev_actions = llc_normal_actions_6b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ +static llc_conn_action_t llc_normal_actions_7[] = { + [0] = llc_conn_ac_rst_sendack_flag, + [1] = llc_conn_ac_send_rej_rsp_f_set_1, + [2] = llc_conn_ac_upd_nr_received, + [3] = llc_conn_ac_start_rej_timer, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_7 = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_normal_actions_7, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_f, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_8[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_upd_p_flag, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_clear_remote_busy_if_f_eq_1, + [5] = llc_conn_ac_send_ack_if_needed, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_8a = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_8a, + .ev_actions = llc_normal_actions_8, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_8b = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_8b, + .ev_actions = llc_normal_actions_8, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_9a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_9a[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_data_ind, + [3] = llc_conn_ac_send_ack_if_needed, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_9a = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_9a, + .ev_actions = llc_normal_actions_9a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_9b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_9b[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_data_ind, + [3] = llc_conn_ac_send_ack_if_needed, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_9b = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_9b, + .ev_actions = llc_normal_actions_9b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_normal_actions_10[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_send_ack_rsp_f_set_1, + [2] = llc_conn_ac_rst_sendack_flag, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_data_ind, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_10 = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_normal_actions_10, +}; + +/* State transitions for * LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_normal_actions_11a[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_11a = { + .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_normal_actions_11a, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_normal_actions_11b[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_11b = { + .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_normal_actions_11b, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_11c[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_11c[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_inc_tx_win_size, + [3] = llc_conn_ac_clear_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_11c = { + .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_11c, + .ev_actions = llc_normal_actions_11c, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_normal_actions_12[] = { + [0] = llc_conn_ac_send_ack_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_adjust_npta_by_rr, + [3] = llc_conn_ac_rst_sendack_flag, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_12 = { + .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_normal_actions_12, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_normal_actions_13a[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_13a = { + .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_normal_actions_13a, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_normal_actions_13b[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_13b = { + .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_normal_actions_13b, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_13c[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_13c[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_13c = { + .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_13c, + .ev_actions = llc_normal_actions_13c, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_normal_actions_14[] = { + [0] = llc_conn_ac_send_rr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_adjust_npta_by_rnr, + [3] = llc_conn_ac_rst_sendack_flag, + [4] = llc_conn_ac_set_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_14 = { + .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_normal_actions_14, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_15a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_15a[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_p_flag, + [3] = llc_conn_ac_dec_tx_win_size, + [4] = llc_conn_ac_resend_i_xxx_x_set_0, + [5] = llc_conn_ac_clear_remote_busy, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_15a = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_15a, + .ev_actions = llc_normal_actions_15a, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_15b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_f, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_15b[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_p_flag, + [3] = llc_conn_ac_dec_tx_win_size, + [4] = llc_conn_ac_resend_i_xxx_x_set_0, + [5] = llc_conn_ac_clear_remote_busy, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_15b = { + .ev = llc_conn_ev_rx_rej_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_15b, + .ev_actions = llc_normal_actions_15b, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_16a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_16a[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_dec_tx_win_size, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_16a = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_16a, + .ev_actions = llc_normal_actions_16a, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_16b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_16b[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_dec_tx_win_size, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_16b = { + .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_16b, + .ev_actions = llc_normal_actions_16b, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_normal_actions_17[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_dec_tx_win_size, + [3] = llc_conn_ac_resend_i_rsp_f_set_1, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_17 = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_normal_actions_17, +}; + +/* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_18[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_18[] = { + [0] = llc_conn_ac_send_rr_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_18 = { + .ev = llc_conn_ev_init_p_f_cycle, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_18, + .ev_actions = llc_normal_actions_18, +}; + +/* State transitions for LLC_CONN_EV_P_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_19[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_19[] = { + [0] = llc_conn_ac_rst_sendack_flag, + [1] = llc_conn_ac_send_rr_cmd_p_set_1, + [2] = llc_conn_ac_rst_vs, + [3] = llc_conn_ac_start_p_timer, + [4] = llc_conn_ac_inc_retry_cnt_by_1, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_19 = { + .ev = llc_conn_ev_p_tmr_exp, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = llc_normal_ev_qfyrs_19, + .ev_actions = llc_normal_actions_19, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_20a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [2] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_20a[] = { + [0] = llc_conn_ac_rst_sendack_flag, + [1] = llc_conn_ac_send_rr_cmd_p_set_1, + [2] = llc_conn_ac_rst_vs, + [3] = llc_conn_ac_start_p_timer, + [4] = llc_conn_ac_inc_retry_cnt_by_1, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_20a = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = llc_normal_ev_qfyrs_20a, + .ev_actions = llc_normal_actions_20a, +}; + +/* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_20b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [2] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_20b[] = { + [0] = llc_conn_ac_rst_sendack_flag, + [1] = llc_conn_ac_send_rr_cmd_p_set_1, + [2] = llc_conn_ac_rst_vs, + [3] = llc_conn_ac_start_p_timer, + [4] = llc_conn_ac_inc_retry_cnt_by_1, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_20b = { + .ev = llc_conn_ev_busy_tmr_exp, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = llc_normal_ev_qfyrs_20b, + .ev_actions = llc_normal_actions_20b, +}; + +/* State transitions for LLC_CONN_EV_TX_BUFF_FULL event */ +static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_21[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_normal_actions_21[] = { + [0] = llc_conn_ac_send_rr_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_normal_state_trans_21 = { + .ev = llc_conn_ev_tx_buffer_full, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_normal_ev_qfyrs_21, + .ev_actions = llc_normal_actions_21, +}; + +/* + * Array of pointers; + * one to each transition + */ +static struct llc_conn_state_trans *llc_normal_state_transitions[] = { + [0] = &llc_normal_state_trans_1, /* Requests */ + [1] = &llc_normal_state_trans_2, + [2] = &llc_normal_state_trans_2_1, + [3] = &llc_common_state_trans_1, + [4] = &llc_common_state_trans_2, + [5] = &llc_common_state_trans_n, + [6] = &llc_normal_state_trans_21, + [7] = &llc_normal_state_trans_3, /* Local busy */ + [8] = &llc_normal_state_trans_4, + [9] = &llc_common_state_trans_n, + [10] = &llc_normal_state_trans_18, /* Init pf cycle */ + [11] = &llc_common_state_trans_n, + [12] = &llc_common_state_trans_11a, /* Timers */ + [13] = &llc_common_state_trans_11b, + [14] = &llc_common_state_trans_11c, + [15] = &llc_common_state_trans_11d, + [16] = &llc_normal_state_trans_19, + [17] = &llc_normal_state_trans_20a, + [18] = &llc_normal_state_trans_20b, + [19] = &llc_common_state_trans_n, + [20] = &llc_normal_state_trans_8b, /* Receive frames */ + [21] = &llc_normal_state_trans_9b, + [22] = &llc_normal_state_trans_10, + [23] = &llc_normal_state_trans_11b, + [24] = &llc_normal_state_trans_11c, + [25] = &llc_normal_state_trans_5a, + [26] = &llc_normal_state_trans_5b, + [27] = &llc_normal_state_trans_5c, + [28] = &llc_normal_state_trans_6a, + [29] = &llc_normal_state_trans_6b, + [30] = &llc_normal_state_trans_7, + [31] = &llc_normal_state_trans_8a, + [32] = &llc_normal_state_trans_9a, + [33] = &llc_normal_state_trans_11a, + [34] = &llc_normal_state_trans_12, + [35] = &llc_normal_state_trans_13a, + [36] = &llc_normal_state_trans_13b, + [37] = &llc_normal_state_trans_13c, + [38] = &llc_normal_state_trans_14, + [39] = &llc_normal_state_trans_15a, + [40] = &llc_normal_state_trans_15b, + [41] = &llc_normal_state_trans_16a, + [42] = &llc_normal_state_trans_16b, + [43] = &llc_normal_state_trans_17, + [44] = &llc_common_state_trans_3, + [45] = &llc_common_state_trans_4, + [46] = &llc_common_state_trans_5, + [47] = &llc_common_state_trans_6, + [48] = &llc_common_state_trans_7a, + [49] = &llc_common_state_trans_7b, + [50] = &llc_common_state_trans_8a, + [51] = &llc_common_state_trans_8b, + [52] = &llc_common_state_trans_8c, + [53] = &llc_common_state_trans_9, + /* [54] = &llc_common_state_trans_10, */ + [54] = &llc_common_state_trans_n, +}; + +/* LLC_CONN_STATE_BUSY transitions */ +/* State transitions for LLC_CONN_EV_DATA_REQ event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_1[] = { + [0] = llc_conn_ev_qlfy_remote_busy_eq_0, + [1] = llc_conn_ev_qlfy_p_flag_eq_0, + [2] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_1[] = { + [0] = llc_conn_ac_send_i_xxx_x_set_0, + [1] = llc_conn_ac_start_ack_tmr_if_not_running, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_1 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_1, + .ev_actions = llc_busy_actions_1, +}; + +/* State transitions for LLC_CONN_EV_DATA_REQ event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2[] = { + [0] = llc_conn_ev_qlfy_remote_busy_eq_0, + [1] = llc_conn_ev_qlfy_p_flag_eq_1, + [2] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_2[] = { + [0] = llc_conn_ac_send_i_xxx_x_set_0, + [1] = llc_conn_ac_start_ack_tmr_if_not_running, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_2 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_2, + .ev_actions = llc_busy_actions_2, +}; + +/* State transitions for LLC_CONN_EV_DATA_REQ event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2_1[] = { + [0] = llc_conn_ev_qlfy_remote_busy_eq_1, + [1] = llc_conn_ev_qlfy_set_status_remote_busy, + [2] = NULL, +}; + +/* just one member, NULL, .bss zeroes it */ +static llc_conn_action_t llc_busy_actions_2_1[1]; + +static struct llc_conn_state_trans llc_busy_state_trans_2_1 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_2_1, + .ev_actions = llc_busy_actions_2_1, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_3[] = { + [0] = llc_conn_ev_qlfy_data_flag_eq_1, + [1] = llc_conn_ev_qlfy_p_flag_eq_0, + [2] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_3[] = { + [0] = llc_conn_ac_send_rej_xxx_x_set_0, + [1] = llc_conn_ac_start_rej_timer, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_3 = { + .ev = llc_conn_ev_local_busy_cleared, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_busy_ev_qfyrs_3, + .ev_actions = llc_busy_actions_3, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_4[] = { + [0] = llc_conn_ev_qlfy_data_flag_eq_1, + [1] = llc_conn_ev_qlfy_p_flag_eq_1, + [2] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_4[] = { + [0] = llc_conn_ac_send_rej_xxx_x_set_0, + [1] = llc_conn_ac_start_rej_timer, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_4 = { + .ev = llc_conn_ev_local_busy_cleared, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_busy_ev_qfyrs_4, + .ev_actions = llc_busy_actions_4, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_5[] = { + [0] = llc_conn_ev_qlfy_data_flag_eq_0, + [1] = llc_conn_ev_qlfy_p_flag_eq_0, + [2] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_5[] = { + [0] = llc_conn_ac_send_rr_xxx_x_set_0, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_5 = { + .ev = llc_conn_ev_local_busy_cleared, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_busy_ev_qfyrs_5, + .ev_actions = llc_busy_actions_5, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_6[] = { + [0] = llc_conn_ev_qlfy_data_flag_eq_0, + [1] = llc_conn_ev_qlfy_p_flag_eq_1, + [2] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_6[] = { + [0] = llc_conn_ac_send_rr_xxx_x_set_0, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_6 = { + .ev = llc_conn_ev_local_busy_cleared, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_busy_ev_qfyrs_6, + .ev_actions = llc_busy_actions_6, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_7[] = { + [0] = llc_conn_ev_qlfy_data_flag_eq_2, + [1] = llc_conn_ev_qlfy_p_flag_eq_0, + [2] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_7[] = { + [0] = llc_conn_ac_send_rr_xxx_x_set_0, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_7 = { + .ev = llc_conn_ev_local_busy_cleared, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_busy_ev_qfyrs_7, + .ev_actions = llc_busy_actions_7, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_8[] = { + [0] = llc_conn_ev_qlfy_data_flag_eq_2, + [1] = llc_conn_ev_qlfy_p_flag_eq_1, + [2] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_8[] = { + [0] = llc_conn_ac_send_rr_xxx_x_set_0, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_8 = { + .ev = llc_conn_ev_local_busy_cleared, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_busy_ev_qfyrs_8, + .ev_actions = llc_busy_actions_8, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_UNEXPD_Ns event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_9a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_f, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_9a[] = { + [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_upd_p_flag, + [2] = llc_conn_ac_upd_nr_received, + [3] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0, + [4] = llc_conn_ac_clear_remote_busy_if_f_eq_1, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_9a = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_9a, + .ev_actions = llc_busy_actions_9a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_9b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_9b[] = { + [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_upd_p_flag, + [2] = llc_conn_ac_upd_nr_received, + [3] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0, + [4] = llc_conn_ac_clear_remote_busy_if_f_eq_1, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_9b = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_9b, + .ev_actions = llc_busy_actions_9b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_10a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_10a[] = { + [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_10a = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_10a, + .ev_actions = llc_busy_actions_10a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_10b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_10b[] = { + [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_10b = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_10b, + .ev_actions = llc_busy_actions_10b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ +static llc_conn_action_t llc_busy_actions_11[] = { + [0] = llc_conn_ac_send_rnr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_11 = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_busy_actions_11, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_busy_actions_12[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_send_rnr_rsp_f_set_1, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2, + [5] = llc_conn_ac_set_data_flag_0, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_12 = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_busy_actions_12, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_13a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_f, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_13a[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_upd_p_flag, + [3] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [4] = llc_conn_ac_upd_nr_received, + [5] = llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2, + [6] = llc_conn_ac_set_data_flag_0, + [7] = llc_conn_ac_clear_remote_busy_if_f_eq_1, + [8] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_13a = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_13a, + .ev_actions = llc_busy_actions_13a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_13b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_13b[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_upd_p_flag, + [3] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [4] = llc_conn_ac_upd_nr_received, + [5] = llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2, + [6] = llc_conn_ac_set_data_flag_0, + [7] = llc_conn_ac_clear_remote_busy_if_f_eq_1, + [8] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_13b = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_13b, + .ev_actions = llc_busy_actions_13b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_14a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_14a[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2, + [5] = llc_conn_ac_set_data_flag_0, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_14a = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_14a, + .ev_actions = llc_busy_actions_14a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_14b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_14b[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2, + [5] = llc_conn_ac_set_data_flag_0, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_14b = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_14b, + .ev_actions = llc_busy_actions_14b, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_busy_actions_15a[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_15a = { + .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_busy_actions_15a, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_busy_actions_15b[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_15b = { + .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_busy_actions_15b, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_15c[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_15c[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_15c = { + .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_15c, + .ev_actions = llc_busy_actions_15c, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_busy_actions_16[] = { + [0] = llc_conn_ac_send_rnr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_16 = { + .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_busy_actions_16, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_busy_actions_17a[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_17a = { + .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_busy_actions_17a, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_busy_actions_17b[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_17b = { + .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_busy_actions_17b, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_17c[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_17c[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_17c = { + .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_17c, + .ev_actions = llc_busy_actions_17c, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_busy_actions_18[] = { + [0] = llc_conn_ac_send_rnr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_18 = { + .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_busy_actions_18, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_19a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_19a[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_p_flag, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_19a = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_19a, + .ev_actions = llc_busy_actions_19a, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_19b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_f, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_19b[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_p_flag, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_19b = { + .ev = llc_conn_ev_rx_rej_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_19b, + .ev_actions = llc_busy_actions_19b, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_20a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_20a[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_resend_i_xxx_x_set_0, + [3] = llc_conn_ac_clear_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_20a = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_20a, + .ev_actions = llc_busy_actions_20a, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_20b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_20b[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_resend_i_xxx_x_set_0, + [3] = llc_conn_ac_clear_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_20b = { + .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_20b, + .ev_actions = llc_busy_actions_20b, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_busy_actions_21[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_send_rnr_rsp_f_set_1, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_21 = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_busy_actions_21, +}; + +/* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_22[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_22[] = { + [0] = llc_conn_ac_send_rnr_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_22 = { + .ev = llc_conn_ev_init_p_f_cycle, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_22, + .ev_actions = llc_busy_actions_22, +}; + +/* State transitions for LLC_CONN_EV_P_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_23[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [1] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_23[] = { + [0] = llc_conn_ac_send_rnr_cmd_p_set_1, + [1] = llc_conn_ac_rst_vs, + [2] = llc_conn_ac_start_p_timer, + [3] = llc_conn_ac_inc_retry_cnt_by_1, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_23 = { + .ev = llc_conn_ev_p_tmr_exp, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_23, + .ev_actions = llc_busy_actions_23, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_24a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [2] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_24a[] = { + [0] = llc_conn_ac_send_rnr_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = llc_conn_ac_inc_retry_cnt_by_1, + [3] = llc_conn_ac_rst_vs, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_24a = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_24a, + .ev_actions = llc_busy_actions_24a, +}; + +/* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_24b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [2] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_24b[] = { + [0] = llc_conn_ac_send_rnr_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = llc_conn_ac_inc_retry_cnt_by_1, + [3] = llc_conn_ac_rst_vs, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_24b = { + .ev = llc_conn_ev_busy_tmr_exp, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_24b, + .ev_actions = llc_busy_actions_24b, +}; + +/* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_25[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [2] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_25[] = { + [0] = llc_conn_ac_send_rnr_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = llc_conn_ac_inc_retry_cnt_by_1, + [3] = llc_conn_ac_rst_vs, + [4] = llc_conn_ac_set_data_flag_1, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_25 = { + .ev = llc_conn_ev_rej_tmr_exp, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_25, + .ev_actions = llc_busy_actions_25, +}; + +/* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_26[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [2] = NULL, +}; + +static llc_conn_action_t llc_busy_actions_26[] = { + [0] = llc_conn_ac_set_data_flag_1, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_busy_state_trans_26 = { + .ev = llc_conn_ev_rej_tmr_exp, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_busy_ev_qfyrs_26, + .ev_actions = llc_busy_actions_26, +}; + +/* + * Array of pointers; + * one to each transition + */ +static struct llc_conn_state_trans *llc_busy_state_transitions[] = { + [0] = &llc_common_state_trans_1, /* Request */ + [1] = &llc_common_state_trans_2, + [2] = &llc_busy_state_trans_1, + [3] = &llc_busy_state_trans_2, + [4] = &llc_busy_state_trans_2_1, + [5] = &llc_common_state_trans_n, + [6] = &llc_busy_state_trans_3, /* Local busy */ + [7] = &llc_busy_state_trans_4, + [8] = &llc_busy_state_trans_5, + [9] = &llc_busy_state_trans_6, + [10] = &llc_busy_state_trans_7, + [11] = &llc_busy_state_trans_8, + [12] = &llc_common_state_trans_n, + [13] = &llc_busy_state_trans_22, /* Initiate PF cycle */ + [14] = &llc_common_state_trans_n, + [15] = &llc_common_state_trans_11a, /* Timer */ + [16] = &llc_common_state_trans_11b, + [17] = &llc_common_state_trans_11c, + [18] = &llc_common_state_trans_11d, + [19] = &llc_busy_state_trans_23, + [20] = &llc_busy_state_trans_24a, + [21] = &llc_busy_state_trans_24b, + [22] = &llc_busy_state_trans_25, + [23] = &llc_busy_state_trans_26, + [24] = &llc_common_state_trans_n, + [25] = &llc_busy_state_trans_9a, /* Receive frame */ + [26] = &llc_busy_state_trans_9b, + [27] = &llc_busy_state_trans_10a, + [28] = &llc_busy_state_trans_10b, + [29] = &llc_busy_state_trans_11, + [30] = &llc_busy_state_trans_12, + [31] = &llc_busy_state_trans_13a, + [32] = &llc_busy_state_trans_13b, + [33] = &llc_busy_state_trans_14a, + [34] = &llc_busy_state_trans_14b, + [35] = &llc_busy_state_trans_15a, + [36] = &llc_busy_state_trans_15b, + [37] = &llc_busy_state_trans_15c, + [38] = &llc_busy_state_trans_16, + [39] = &llc_busy_state_trans_17a, + [40] = &llc_busy_state_trans_17b, + [41] = &llc_busy_state_trans_17c, + [42] = &llc_busy_state_trans_18, + [43] = &llc_busy_state_trans_19a, + [44] = &llc_busy_state_trans_19b, + [45] = &llc_busy_state_trans_20a, + [46] = &llc_busy_state_trans_20b, + [47] = &llc_busy_state_trans_21, + [48] = &llc_common_state_trans_3, + [49] = &llc_common_state_trans_4, + [50] = &llc_common_state_trans_5, + [51] = &llc_common_state_trans_6, + [52] = &llc_common_state_trans_7a, + [53] = &llc_common_state_trans_7b, + [54] = &llc_common_state_trans_8a, + [55] = &llc_common_state_trans_8b, + [56] = &llc_common_state_trans_8c, + [57] = &llc_common_state_trans_9, + /* [58] = &llc_common_state_trans_10, */ + [58] = &llc_common_state_trans_n, +}; + +/* LLC_CONN_STATE_REJ transitions */ +/* State transitions for LLC_CONN_EV_DATA_REQ event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_1[] = { + [0] = llc_conn_ev_qlfy_remote_busy_eq_0, + [1] = llc_conn_ev_qlfy_p_flag_eq_0, + [2] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_1[] = { + [0] = llc_conn_ac_send_i_xxx_x_set_0, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_1 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_1, + .ev_actions = llc_reject_actions_1, +}; + +/* State transitions for LLC_CONN_EV_DATA_REQ event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2[] = { + [0] = llc_conn_ev_qlfy_remote_busy_eq_0, + [1] = llc_conn_ev_qlfy_p_flag_eq_1, + [2] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_2[] = { + [0] = llc_conn_ac_send_i_xxx_x_set_0, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_2 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_2, + .ev_actions = llc_reject_actions_2, +}; + +/* State transitions for LLC_CONN_EV_DATA_REQ event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2_1[] = { + [0] = llc_conn_ev_qlfy_remote_busy_eq_1, + [1] = llc_conn_ev_qlfy_set_status_remote_busy, + [2] = NULL, +}; + +/* just one member, NULL, .bss zeroes it */ +static llc_conn_action_t llc_reject_actions_2_1[1]; + +static struct llc_conn_state_trans llc_reject_state_trans_2_1 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_2_1, + .ev_actions = llc_reject_actions_2_1, +}; + + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_3[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_3[] = { + [0] = llc_conn_ac_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_set_data_flag_2, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_3 = { + .ev = llc_conn_ev_local_busy_detected, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_reject_ev_qfyrs_3, + .ev_actions = llc_reject_actions_3, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_4[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_4[] = { + [0] = llc_conn_ac_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_set_data_flag_2, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_4 = { + .ev = llc_conn_ev_local_busy_detected, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = llc_reject_ev_qfyrs_4, + .ev_actions = llc_reject_actions_4, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ +static llc_conn_action_t llc_reject_actions_5a[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_p_flag, + [2] = llc_conn_ac_clear_remote_busy_if_f_eq_1, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_5a = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_reject_actions_5a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ +static llc_conn_action_t llc_reject_actions_5b[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_p_flag, + [2] = llc_conn_ac_clear_remote_busy_if_f_eq_1, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_5b = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_reject_actions_5b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_5c[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_5c[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_p_flag, + [2] = llc_conn_ac_clear_remote_busy_if_f_eq_1, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_5c = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_5c, + .ev_actions = llc_reject_actions_5c, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ +static llc_conn_action_t llc_reject_actions_6[] = { + [0] = llc_conn_ac_send_rr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_6 = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_reject_actions_6, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_7a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_f, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_7a[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_upd_p_flag, + [3] = llc_conn_ac_send_ack_xxx_x_set_0, + [4] = llc_conn_ac_upd_nr_received, + [5] = llc_conn_ac_clear_remote_busy_if_f_eq_1, + [6] = llc_conn_ac_stop_rej_timer, + [7] = NULL, + +}; + +static struct llc_conn_state_trans llc_reject_state_trans_7a = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_reject_ev_qfyrs_7a, + .ev_actions = llc_reject_actions_7a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_7b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_7b[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_upd_p_flag, + [3] = llc_conn_ac_send_ack_xxx_x_set_0, + [4] = llc_conn_ac_upd_nr_received, + [5] = llc_conn_ac_clear_remote_busy_if_f_eq_1, + [6] = llc_conn_ac_stop_rej_timer, + [7] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_7b = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_reject_ev_qfyrs_7b, + .ev_actions = llc_reject_actions_7b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_8a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_8a[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_send_ack_xxx_x_set_0, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_stop_rej_timer, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_8a = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_reject_ev_qfyrs_8a, + .ev_actions = llc_reject_actions_8a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_8b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_8b[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_send_ack_xxx_x_set_0, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_stop_rej_timer, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_8b = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_reject_ev_qfyrs_8b, + .ev_actions = llc_reject_actions_8b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_reject_actions_9[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_send_ack_rsp_f_set_1, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_stop_rej_timer, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_9 = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_reject_actions_9, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_reject_actions_10a[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_10a = { + .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_reject_actions_10a, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_reject_actions_10b[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_10b = { + .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_reject_actions_10b, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_10c[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_10c[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_10c = { + .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_10c, + .ev_actions = llc_reject_actions_10c, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_reject_actions_11[] = { + [0] = llc_conn_ac_send_ack_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_11 = { + .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_reject_actions_11, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_reject_actions_12a[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_12a = { + .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_reject_actions_12a, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_reject_actions_12b[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_12b = { + .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_reject_actions_12b, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_12c[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_12c[] = { + [0] = llc_conn_ac_upd_p_flag, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_12c = { + .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_12c, + .ev_actions = llc_reject_actions_12c, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_reject_actions_13[] = { + [0] = llc_conn_ac_send_rr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_13 = { + .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_reject_actions_13, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_14a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_14a[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_p_flag, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_14a = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_14a, + .ev_actions = llc_reject_actions_14a, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_14b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_f, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_14b[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_p_flag, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_14b = { + .ev = llc_conn_ev_rx_rej_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_14b, + .ev_actions = llc_reject_actions_14b, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_15a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_15a[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_resend_i_xxx_x_set_0, + [3] = llc_conn_ac_clear_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_15a = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_15a, + .ev_actions = llc_reject_actions_15a, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_15b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_15b[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_resend_i_xxx_x_set_0, + [3] = llc_conn_ac_clear_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_15b = { + .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_15b, + .ev_actions = llc_reject_actions_15b, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_reject_actions_16[] = { + [0] = llc_conn_ac_set_vs_nr, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_resend_i_rsp_f_set_1, + [3] = llc_conn_ac_clear_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_16 = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_reject_actions_16, +}; + +/* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_17[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_17[] = { + [0] = llc_conn_ac_send_rr_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_17 = { + .ev = llc_conn_ev_init_p_f_cycle, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_17, + .ev_actions = llc_reject_actions_17, +}; + +/* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_18[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [2] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_18[] = { + [0] = llc_conn_ac_send_rej_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = llc_conn_ac_start_rej_timer, + [3] = llc_conn_ac_inc_retry_cnt_by_1, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_18 = { + .ev = llc_conn_ev_rej_tmr_exp, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_18, + .ev_actions = llc_reject_actions_18, +}; + +/* State transitions for LLC_CONN_EV_P_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_19[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [1] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_19[] = { + [0] = llc_conn_ac_send_rr_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = llc_conn_ac_start_rej_timer, + [3] = llc_conn_ac_inc_retry_cnt_by_1, + [4] = llc_conn_ac_rst_vs, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_19 = { + .ev = llc_conn_ev_p_tmr_exp, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_19, + .ev_actions = llc_reject_actions_19, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_20a[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [2] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_20a[] = { + [0] = llc_conn_ac_send_rr_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = llc_conn_ac_start_rej_timer, + [3] = llc_conn_ac_inc_retry_cnt_by_1, + [4] = llc_conn_ac_rst_vs, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_20a = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_20a, + .ev_actions = llc_reject_actions_20a, +}; + +/* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_20b[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_0, + [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [2] = NULL, +}; + +static llc_conn_action_t llc_reject_actions_20b[] = { + [0] = llc_conn_ac_send_rr_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = llc_conn_ac_start_rej_timer, + [3] = llc_conn_ac_inc_retry_cnt_by_1, + [4] = llc_conn_ac_rst_vs, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_reject_state_trans_20b = { + .ev = llc_conn_ev_busy_tmr_exp, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = llc_reject_ev_qfyrs_20b, + .ev_actions = llc_reject_actions_20b, +}; + +/* + * Array of pointers; + * one to each transition + */ +static struct llc_conn_state_trans *llc_reject_state_transitions[] = { + [0] = &llc_common_state_trans_1, /* Request */ + [1] = &llc_common_state_trans_2, + [2] = &llc_common_state_trans_n, + [3] = &llc_reject_state_trans_1, + [4] = &llc_reject_state_trans_2, + [5] = &llc_reject_state_trans_2_1, + [6] = &llc_reject_state_trans_3, /* Local busy */ + [7] = &llc_reject_state_trans_4, + [8] = &llc_common_state_trans_n, + [9] = &llc_reject_state_trans_17, /* Initiate PF cycle */ + [10] = &llc_common_state_trans_n, + [11] = &llc_common_state_trans_11a, /* Timer */ + [12] = &llc_common_state_trans_11b, + [13] = &llc_common_state_trans_11c, + [14] = &llc_common_state_trans_11d, + [15] = &llc_reject_state_trans_18, + [16] = &llc_reject_state_trans_19, + [17] = &llc_reject_state_trans_20a, + [18] = &llc_reject_state_trans_20b, + [19] = &llc_common_state_trans_n, + [20] = &llc_common_state_trans_3, /* Receive frame */ + [21] = &llc_common_state_trans_4, + [22] = &llc_common_state_trans_5, + [23] = &llc_common_state_trans_6, + [24] = &llc_common_state_trans_7a, + [25] = &llc_common_state_trans_7b, + [26] = &llc_common_state_trans_8a, + [27] = &llc_common_state_trans_8b, + [28] = &llc_common_state_trans_8c, + [29] = &llc_common_state_trans_9, + /* [30] = &llc_common_state_trans_10, */ + [30] = &llc_reject_state_trans_5a, + [31] = &llc_reject_state_trans_5b, + [32] = &llc_reject_state_trans_5c, + [33] = &llc_reject_state_trans_6, + [34] = &llc_reject_state_trans_7a, + [35] = &llc_reject_state_trans_7b, + [36] = &llc_reject_state_trans_8a, + [37] = &llc_reject_state_trans_8b, + [38] = &llc_reject_state_trans_9, + [40] = &llc_reject_state_trans_10a, + [41] = &llc_reject_state_trans_10b, + [42] = &llc_reject_state_trans_10c, + [43] = &llc_reject_state_trans_11, + [44] = &llc_reject_state_trans_12a, + [45] = &llc_reject_state_trans_12b, + [46] = &llc_reject_state_trans_12c, + [47] = &llc_reject_state_trans_13, + [48] = &llc_reject_state_trans_14a, + [49] = &llc_reject_state_trans_14b, + [50] = &llc_reject_state_trans_15a, + [51] = &llc_reject_state_trans_15b, + [52] = &llc_reject_state_trans_16, + [53] = &llc_common_state_trans_n, +}; + +/* LLC_CONN_STATE_AWAIT transitions */ +/* State transitions for LLC_CONN_EV_DATA_REQ event */ +static llc_conn_ev_qfyr_t llc_await_ev_qfyrs_1_0[] = { + [0] = llc_conn_ev_qlfy_set_status_refuse, + [1] = NULL, +}; + +/* just one member, NULL, .bss zeroes it */ +static llc_conn_action_t llc_await_actions_1_0[1]; + +static struct llc_conn_state_trans llc_await_state_trans_1_0 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = llc_await_ev_qfyrs_1_0, + .ev_actions = llc_await_actions_1_0, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ +static llc_conn_action_t llc_await_actions_1[] = { + [0] = llc_conn_ac_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_set_data_flag_0, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_1 = { + .ev = llc_conn_ev_local_busy_detected, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_1, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ +static llc_conn_action_t llc_await_actions_2[] = { + [0] = llc_conn_ac_send_rej_xxx_x_set_0, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_stop_p_timer, + [4] = llc_conn_ac_resend_i_xxx_x_set_0, + [5] = llc_conn_ac_start_rej_timer, + [6] = llc_conn_ac_clear_remote_busy, + [7] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_2 = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_2, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ +static llc_conn_action_t llc_await_actions_3a[] = { + [0] = llc_conn_ac_send_rej_xxx_x_set_0, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_start_rej_timer, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_3a = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_3a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ +static llc_conn_action_t llc_await_actions_3b[] = { + [0] = llc_conn_ac_send_rej_xxx_x_set_0, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_start_rej_timer, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_3b = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_3b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ +static llc_conn_action_t llc_await_actions_4[] = { + [0] = llc_conn_ac_send_rej_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_start_rej_timer, + [4] = llc_conn_ac_start_p_timer, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_4 = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_4, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */ +static llc_conn_action_t llc_await_actions_5[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_stop_p_timer, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_upd_vs, + [5] = llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr, + [6] = llc_conn_ac_clear_remote_busy, + [7] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_5 = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_5, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_await_actions_6a[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_send_rr_xxx_x_set_0, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_upd_vs, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_6a = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_6a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_await_actions_6b[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_send_rr_xxx_x_set_0, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_upd_vs, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_6b = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_6b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_await_actions_7[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_send_rr_rsp_f_set_1, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_upd_vs, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_7 = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_7, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ +static llc_conn_action_t llc_await_actions_8a[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_stop_p_timer, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_8a = { + .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_8a, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */ +static llc_conn_action_t llc_await_actions_8b[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_stop_p_timer, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_8b = { + .ev = llc_conn_ev_rx_rej_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_8b, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_await_actions_9a[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_9a = { + .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_9a, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_await_actions_9b[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_9b = { + .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_9b, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_await_actions_9c[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_9c = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_9c, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_await_actions_9d[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_9d = { + .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_9d, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_await_actions_10a[] = { + [0] = llc_conn_ac_send_rr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_clear_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_10a = { + .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_10a, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_await_actions_10b[] = { + [0] = llc_conn_ac_send_rr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_clear_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_10b = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_10b, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ +static llc_conn_action_t llc_await_actions_11[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_stop_p_timer, + [3] = llc_conn_ac_set_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_11 = { + .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_11, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_await_actions_12a[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_12a = { + .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_12a, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_await_actions_12b[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_12b = { + .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_12b, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_await_actions_13[] = { + [0] = llc_conn_ac_send_rr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_set_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_13 = { + .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_actions_13, +}; + +/* State transitions for LLC_CONN_EV_P_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_await_ev_qfyrs_14[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [1] = NULL, +}; + +static llc_conn_action_t llc_await_actions_14[] = { + [0] = llc_conn_ac_send_rr_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = llc_conn_ac_inc_retry_cnt_by_1, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_state_trans_14 = { + .ev = llc_conn_ev_p_tmr_exp, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = llc_await_ev_qfyrs_14, + .ev_actions = llc_await_actions_14, +}; + +/* + * Array of pointers; + * one to each transition + */ +static struct llc_conn_state_trans *llc_await_state_transitions[] = { + [0] = &llc_common_state_trans_1, /* Request */ + [1] = &llc_common_state_trans_2, + [2] = &llc_await_state_trans_1_0, + [3] = &llc_common_state_trans_n, + [4] = &llc_await_state_trans_1, /* Local busy */ + [5] = &llc_common_state_trans_n, + [6] = &llc_common_state_trans_n, /* Initiate PF Cycle */ + [7] = &llc_common_state_trans_11a, /* Timer */ + [8] = &llc_common_state_trans_11b, + [9] = &llc_common_state_trans_11c, + [10] = &llc_common_state_trans_11d, + [11] = &llc_await_state_trans_14, + [12] = &llc_common_state_trans_n, + [13] = &llc_common_state_trans_3, /* Receive frame */ + [14] = &llc_common_state_trans_4, + [15] = &llc_common_state_trans_5, + [16] = &llc_common_state_trans_6, + [17] = &llc_common_state_trans_7a, + [18] = &llc_common_state_trans_7b, + [19] = &llc_common_state_trans_8a, + [20] = &llc_common_state_trans_8b, + [21] = &llc_common_state_trans_8c, + [22] = &llc_common_state_trans_9, + /* [23] = &llc_common_state_trans_10, */ + [23] = &llc_await_state_trans_2, + [24] = &llc_await_state_trans_3a, + [25] = &llc_await_state_trans_3b, + [26] = &llc_await_state_trans_4, + [27] = &llc_await_state_trans_5, + [28] = &llc_await_state_trans_6a, + [29] = &llc_await_state_trans_6b, + [30] = &llc_await_state_trans_7, + [31] = &llc_await_state_trans_8a, + [32] = &llc_await_state_trans_8b, + [33] = &llc_await_state_trans_9a, + [34] = &llc_await_state_trans_9b, + [35] = &llc_await_state_trans_9c, + [36] = &llc_await_state_trans_9d, + [37] = &llc_await_state_trans_10a, + [38] = &llc_await_state_trans_10b, + [39] = &llc_await_state_trans_11, + [40] = &llc_await_state_trans_12a, + [41] = &llc_await_state_trans_12b, + [42] = &llc_await_state_trans_13, + [43] = &llc_common_state_trans_n, +}; + +/* LLC_CONN_STATE_AWAIT_BUSY transitions */ +/* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */ +static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1_0[] = { + [0] = llc_conn_ev_qlfy_set_status_refuse, + [1] = NULL, +}; + +/* just one member, NULL, .bss zeroes it */ +static llc_conn_action_t llc_await_busy_actions_1_0[1]; + +static struct llc_conn_state_trans llc_await_busy_state_trans_1_0 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = llc_await_busy_ev_qfyrs_1_0, + .ev_actions = llc_await_busy_actions_1_0, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ +static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1[] = { + [0] = llc_conn_ev_qlfy_data_flag_eq_1, + [1] = NULL, +}; + +static llc_conn_action_t llc_await_busy_actions_1[] = { + [0] = llc_conn_ac_send_rej_xxx_x_set_0, + [1] = llc_conn_ac_start_rej_timer, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_1 = { + .ev = llc_conn_ev_local_busy_cleared, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = llc_await_busy_ev_qfyrs_1, + .ev_actions = llc_await_busy_actions_1, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ +static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_2[] = { + [0] = llc_conn_ev_qlfy_data_flag_eq_0, + [1] = NULL, +}; + +static llc_conn_action_t llc_await_busy_actions_2[] = { + [0] = llc_conn_ac_send_rr_xxx_x_set_0, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_2 = { + .ev = llc_conn_ev_local_busy_cleared, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = llc_await_busy_ev_qfyrs_2, + .ev_actions = llc_await_busy_actions_2, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ +static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_3[] = { + [0] = llc_conn_ev_qlfy_data_flag_eq_2, + [1] = NULL, +}; + +static llc_conn_action_t llc_await_busy_actions_3[] = { + [0] = llc_conn_ac_send_rr_xxx_x_set_0, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_3 = { + .ev = llc_conn_ev_local_busy_cleared, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = llc_await_busy_ev_qfyrs_3, + .ev_actions = llc_await_busy_actions_3, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ +static llc_conn_action_t llc_await_busy_actions_4[] = { + [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_stop_p_timer, + [4] = llc_conn_ac_set_data_flag_1, + [5] = llc_conn_ac_clear_remote_busy, + [6] = llc_conn_ac_resend_i_xxx_x_set_0, + [7] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_4 = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_4, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ +static llc_conn_action_t llc_await_busy_actions_5a[] = { + [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_set_data_flag_1, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_5a = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_5a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ +static llc_conn_action_t llc_await_busy_actions_5b[] = { + [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_set_data_flag_1, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_5b = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_5b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ +static llc_conn_action_t llc_await_busy_actions_6[] = { + [0] = llc_conn_ac_send_rnr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_set_data_flag_1, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_6 = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_6, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */ +static llc_conn_action_t llc_await_busy_actions_7[] = { + [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_inc_vr_by_1, + [2] = llc_conn_ac_data_ind, + [3] = llc_conn_ac_stop_p_timer, + [4] = llc_conn_ac_upd_nr_received, + [5] = llc_conn_ac_upd_vs, + [6] = llc_conn_ac_set_data_flag_0, + [7] = llc_conn_ac_clear_remote_busy, + [8] = llc_conn_ac_resend_i_xxx_x_set_0, + [9] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_7 = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_7, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_await_busy_actions_8a[] = { + [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_inc_vr_by_1, + [2] = llc_conn_ac_data_ind, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_upd_vs, + [5] = llc_conn_ac_set_data_flag_0, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_8a = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_8a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_await_busy_actions_8b[] = { + [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_inc_vr_by_1, + [2] = llc_conn_ac_data_ind, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_upd_vs, + [5] = llc_conn_ac_set_data_flag_0, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_8b = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_8b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_await_busy_actions_9[] = { + [0] = llc_conn_ac_send_rnr_rsp_f_set_1, + [1] = llc_conn_ac_inc_vr_by_1, + [2] = llc_conn_ac_data_ind, + [3] = llc_conn_ac_upd_nr_received, + [4] = llc_conn_ac_upd_vs, + [5] = llc_conn_ac_set_data_flag_0, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_9 = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_9, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ +static llc_conn_action_t llc_await_busy_actions_10a[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_stop_p_timer, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_10a = { + .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_10a, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */ +static llc_conn_action_t llc_await_busy_actions_10b[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_stop_p_timer, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_10b = { + .ev = llc_conn_ev_rx_rej_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_10b, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_await_busy_actions_11a[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_11a = { + .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_11a, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_await_busy_actions_11b[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_11b = { + .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_11b, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_await_busy_actions_11c[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_11c = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_11c, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_await_busy_actions_11d[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_11d = { + .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_11d, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_await_busy_actions_12a[] = { + [0] = llc_conn_ac_send_rnr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_clear_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_12a = { + .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_12a, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_await_busy_actions_12b[] = { + [0] = llc_conn_ac_send_rnr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_clear_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_12b = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_12b, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ +static llc_conn_action_t llc_await_busy_actions_13[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_stop_p_timer, + [3] = llc_conn_ac_set_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_13 = { + .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_13, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_await_busy_actions_14a[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_14a = { + .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_14a, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_await_busy_actions_14b[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_14b = { + .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_14b, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_await_busy_actions_15[] = { + [0] = llc_conn_ac_send_rnr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_set_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_15 = { + .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_busy_actions_15, +}; + +/* State transitions for LLC_CONN_EV_P_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_16[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [1] = NULL, +}; + +static llc_conn_action_t llc_await_busy_actions_16[] = { + [0] = llc_conn_ac_send_rnr_cmd_p_set_1, + [1] = llc_conn_ac_start_p_timer, + [2] = llc_conn_ac_inc_retry_cnt_by_1, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_busy_state_trans_16 = { + .ev = llc_conn_ev_p_tmr_exp, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = llc_await_busy_ev_qfyrs_16, + .ev_actions = llc_await_busy_actions_16, +}; + +/* + * Array of pointers; + * one to each transition + */ +static struct llc_conn_state_trans *llc_await_busy_state_transitions[] = { + [0] = &llc_common_state_trans_1, /* Request */ + [1] = &llc_common_state_trans_2, + [2] = &llc_await_busy_state_trans_1_0, + [3] = &llc_common_state_trans_n, + [4] = &llc_await_busy_state_trans_1, /* Local busy */ + [5] = &llc_await_busy_state_trans_2, + [6] = &llc_await_busy_state_trans_3, + [7] = &llc_common_state_trans_n, + [8] = &llc_common_state_trans_n, /* Initiate PF cycle */ + [9] = &llc_common_state_trans_11a, /* Timer */ + [10] = &llc_common_state_trans_11b, + [11] = &llc_common_state_trans_11c, + [12] = &llc_common_state_trans_11d, + [13] = &llc_await_busy_state_trans_16, + [14] = &llc_common_state_trans_n, + [15] = &llc_await_busy_state_trans_4, /* Receive frame */ + [16] = &llc_await_busy_state_trans_5a, + [17] = &llc_await_busy_state_trans_5b, + [18] = &llc_await_busy_state_trans_6, + [19] = &llc_await_busy_state_trans_7, + [20] = &llc_await_busy_state_trans_8a, + [21] = &llc_await_busy_state_trans_8b, + [22] = &llc_await_busy_state_trans_9, + [23] = &llc_await_busy_state_trans_10a, + [24] = &llc_await_busy_state_trans_10b, + [25] = &llc_await_busy_state_trans_11a, + [26] = &llc_await_busy_state_trans_11b, + [27] = &llc_await_busy_state_trans_11c, + [28] = &llc_await_busy_state_trans_11d, + [29] = &llc_await_busy_state_trans_12a, + [30] = &llc_await_busy_state_trans_12b, + [31] = &llc_await_busy_state_trans_13, + [32] = &llc_await_busy_state_trans_14a, + [33] = &llc_await_busy_state_trans_14b, + [34] = &llc_await_busy_state_trans_15, + [35] = &llc_common_state_trans_3, + [36] = &llc_common_state_trans_4, + [37] = &llc_common_state_trans_5, + [38] = &llc_common_state_trans_6, + [39] = &llc_common_state_trans_7a, + [40] = &llc_common_state_trans_7b, + [41] = &llc_common_state_trans_8a, + [42] = &llc_common_state_trans_8b, + [43] = &llc_common_state_trans_8c, + [44] = &llc_common_state_trans_9, + /* [45] = &llc_common_state_trans_10, */ + [45] = &llc_common_state_trans_n, +}; + +/* ----------------- LLC_CONN_STATE_AWAIT_REJ transitions --------------- */ +/* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */ +static llc_conn_ev_qfyr_t llc_await_reject_ev_qfyrs_1_0[] = { + [0] = llc_conn_ev_qlfy_set_status_refuse, + [1] = NULL, +}; + +/* just one member, NULL, .bss zeroes it */ +static llc_conn_action_t llc_await_reject_actions_1_0[1]; + +static struct llc_conn_state_trans llc_await_reject_state_trans_1_0 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = llc_await_reject_ev_qfyrs_1_0, + .ev_actions = llc_await_reject_actions_1_0, +}; + +/* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ +static llc_conn_action_t llc_await_rejct_actions_1[] = { + [0] = llc_conn_ac_send_rnr_xxx_x_set_0, + [1] = llc_conn_ac_set_data_flag_2, + [2] = NULL +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_1 = { + .ev = llc_conn_ev_local_busy_detected, + .next_state = LLC_CONN_STATE_AWAIT_BUSY, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_1, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ +static llc_conn_action_t llc_await_rejct_actions_2a[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = NULL +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_2a = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_2a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ +static llc_conn_action_t llc_await_rejct_actions_2b[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = NULL +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_2b = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_2b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ +static llc_conn_action_t llc_await_rejct_actions_3[] = { + [0] = llc_conn_ac_send_rr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = NULL +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_3 = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_3, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */ +static llc_conn_action_t llc_await_rejct_actions_4[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_stop_p_timer, + [3] = llc_conn_ac_stop_rej_timer, + [4] = llc_conn_ac_upd_nr_received, + [5] = llc_conn_ac_upd_vs, + [6] = llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr, + [7] = llc_conn_ac_clear_remote_busy, + [8] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_4 = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_4, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_await_rejct_actions_5a[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_send_rr_xxx_x_set_0, + [3] = llc_conn_ac_stop_rej_timer, + [4] = llc_conn_ac_upd_nr_received, + [5] = llc_conn_ac_upd_vs, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_5a = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_5a, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_await_rejct_actions_5b[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_send_rr_xxx_x_set_0, + [3] = llc_conn_ac_stop_rej_timer, + [4] = llc_conn_ac_upd_nr_received, + [5] = llc_conn_ac_upd_vs, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_5b = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_5b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_await_rejct_actions_6[] = { + [0] = llc_conn_ac_inc_vr_by_1, + [1] = llc_conn_ac_data_ind, + [2] = llc_conn_ac_send_rr_rsp_f_set_1, + [3] = llc_conn_ac_stop_rej_timer, + [4] = llc_conn_ac_upd_nr_received, + [5] = llc_conn_ac_upd_vs, + [6] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_6 = { + .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_AWAIT, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_6, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ +static llc_conn_action_t llc_await_rejct_actions_7a[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_stop_p_timer, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_7a = { + .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_7a, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */ +static llc_conn_action_t llc_await_rejct_actions_7b[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_stop_p_timer, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_7b = { + .ev = llc_conn_ev_rx_rej_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_7b, +}; + +/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ +static llc_conn_action_t llc_await_rejct_actions_7c[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_stop_p_timer, + [3] = llc_conn_ac_resend_i_xxx_x_set_0, + [4] = llc_conn_ac_clear_remote_busy, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_7c = { + .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_7c, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_await_rejct_actions_8a[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_8a = { + .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_8a, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_await_rejct_actions_8b[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_8b = { + .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_8b, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_await_rejct_actions_8c[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_8c = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_8c, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_await_rejct_actions_8d[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_clear_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_8d = { + .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_8d, +}; + +/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_await_rejct_actions_9a[] = { + [0] = llc_conn_ac_send_rr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_clear_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_9a = { + .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_9a, +}; + +/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_await_rejct_actions_9b[] = { + [0] = llc_conn_ac_send_rr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_clear_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_9b = { + .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_9b, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ +static llc_conn_action_t llc_await_rejct_actions_10[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_stop_p_timer, + [3] = llc_conn_ac_set_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_10 = { + .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, + .next_state = LLC_CONN_STATE_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_10, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ +static llc_conn_action_t llc_await_rejct_actions_11a[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_11a = { + .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_11a, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ +static llc_conn_action_t llc_await_rejct_actions_11b[] = { + [0] = llc_conn_ac_upd_nr_received, + [1] = llc_conn_ac_upd_vs, + [2] = llc_conn_ac_set_remote_busy, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_11b = { + .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_11b, +}; + +/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ +static llc_conn_action_t llc_await_rejct_actions_12[] = { + [0] = llc_conn_ac_send_rr_rsp_f_set_1, + [1] = llc_conn_ac_upd_nr_received, + [2] = llc_conn_ac_upd_vs, + [3] = llc_conn_ac_set_remote_busy, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_12 = { + .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = NONE, + .ev_actions = llc_await_rejct_actions_12, +}; + +/* State transitions for LLC_CONN_EV_P_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_await_rejct_ev_qfyrs_13[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [1] = NULL, +}; + +static llc_conn_action_t llc_await_rejct_actions_13[] = { + [0] = llc_conn_ac_send_rej_cmd_p_set_1, + [1] = llc_conn_ac_stop_p_timer, + [2] = llc_conn_ac_inc_retry_cnt_by_1, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_await_rejct_state_trans_13 = { + .ev = llc_conn_ev_p_tmr_exp, + .next_state = LLC_CONN_STATE_AWAIT_REJ, + .ev_qualifiers = llc_await_rejct_ev_qfyrs_13, + .ev_actions = llc_await_rejct_actions_13, +}; + +/* + * Array of pointers; + * one to each transition + */ +static struct llc_conn_state_trans *llc_await_rejct_state_transitions[] = { + [0] = &llc_await_reject_state_trans_1_0, + [1] = &llc_common_state_trans_1, /* requests */ + [2] = &llc_common_state_trans_2, + [3] = &llc_common_state_trans_n, + [4] = &llc_await_rejct_state_trans_1, /* local busy */ + [5] = &llc_common_state_trans_n, + [6] = &llc_common_state_trans_n, /* Initiate PF cycle */ + [7] = &llc_await_rejct_state_trans_13, /* timers */ + [8] = &llc_common_state_trans_11a, + [9] = &llc_common_state_trans_11b, + [10] = &llc_common_state_trans_11c, + [11] = &llc_common_state_trans_11d, + [12] = &llc_common_state_trans_n, + [13] = &llc_await_rejct_state_trans_2a, /* receive frames */ + [14] = &llc_await_rejct_state_trans_2b, + [15] = &llc_await_rejct_state_trans_3, + [16] = &llc_await_rejct_state_trans_4, + [17] = &llc_await_rejct_state_trans_5a, + [18] = &llc_await_rejct_state_trans_5b, + [19] = &llc_await_rejct_state_trans_6, + [20] = &llc_await_rejct_state_trans_7a, + [21] = &llc_await_rejct_state_trans_7b, + [22] = &llc_await_rejct_state_trans_7c, + [23] = &llc_await_rejct_state_trans_8a, + [24] = &llc_await_rejct_state_trans_8b, + [25] = &llc_await_rejct_state_trans_8c, + [26] = &llc_await_rejct_state_trans_8d, + [27] = &llc_await_rejct_state_trans_9a, + [28] = &llc_await_rejct_state_trans_9b, + [29] = &llc_await_rejct_state_trans_10, + [30] = &llc_await_rejct_state_trans_11a, + [31] = &llc_await_rejct_state_trans_11b, + [32] = &llc_await_rejct_state_trans_12, + [33] = &llc_common_state_trans_3, + [34] = &llc_common_state_trans_4, + [35] = &llc_common_state_trans_5, + [36] = &llc_common_state_trans_6, + [37] = &llc_common_state_trans_7a, + [38] = &llc_common_state_trans_7b, + [39] = &llc_common_state_trans_8a, + [40] = &llc_common_state_trans_8b, + [41] = &llc_common_state_trans_8c, + [42] = &llc_common_state_trans_9, + /* [43] = &llc_common_state_trans_10, */ + [43] = &llc_common_state_trans_n, +}; + +/* LLC_CONN_STATE_D_CONN transitions */ +/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event, + * cause_flag = 1 */ +static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_1[] = { + [0] = llc_conn_ev_qlfy_cause_flag_eq_1, + [1] = llc_conn_ev_qlfy_set_status_conflict, + [2] = NULL, +}; + +static llc_conn_action_t llc_d_conn_actions_1[] = { + [0] = llc_conn_ac_send_dm_rsp_f_set_p, + [1] = llc_conn_ac_stop_ack_timer, + [2] = llc_conn_ac_disc_confirm, + [3] = llc_conn_disc, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_d_conn_state_trans_1 = { + .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_d_conn_ev_qfyrs_1, + .ev_actions = llc_d_conn_actions_1, +}; + +/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event, + * cause_flag = 0 + */ +static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_1_1[] = { + [0] = llc_conn_ev_qlfy_cause_flag_eq_0, + [1] = llc_conn_ev_qlfy_set_status_conflict, + [2] = NULL, +}; + +static llc_conn_action_t llc_d_conn_actions_1_1[] = { + [0] = llc_conn_ac_send_dm_rsp_f_set_p, + [1] = llc_conn_ac_stop_ack_timer, + [2] = llc_conn_disc, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_d_conn_state_trans_1_1 = { + .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_d_conn_ev_qfyrs_1_1, + .ev_actions = llc_d_conn_actions_1_1, +}; + +/* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event, + * cause_flag = 1 + */ +static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_2[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_f, + [1] = llc_conn_ev_qlfy_cause_flag_eq_1, + [2] = llc_conn_ev_qlfy_set_status_disc, + [3] = NULL, +}; + +static llc_conn_action_t llc_d_conn_actions_2[] = { + [0] = llc_conn_ac_stop_ack_timer, + [1] = llc_conn_ac_disc_confirm, + [2] = llc_conn_disc, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_d_conn_state_trans_2 = { + .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_d_conn_ev_qfyrs_2, + .ev_actions = llc_d_conn_actions_2, +}; + +/* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event, + * cause_flag = 0 + */ +static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_2_1[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_f, + [1] = llc_conn_ev_qlfy_cause_flag_eq_0, + [2] = llc_conn_ev_qlfy_set_status_disc, + [3] = NULL, +}; + +static llc_conn_action_t llc_d_conn_actions_2_1[] = { + [0] = llc_conn_ac_stop_ack_timer, + [1] = llc_conn_disc, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_d_conn_state_trans_2_1 = { + .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_d_conn_ev_qfyrs_2_1, + .ev_actions = llc_d_conn_actions_2_1, +}; + +/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ +static llc_conn_action_t llc_d_conn_actions_3[] = { + [0] = llc_conn_ac_send_ua_rsp_f_set_p, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_d_conn_state_trans_3 = { + .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_D_CONN, + .ev_qualifiers = NONE, + .ev_actions = llc_d_conn_actions_3, +}; + +/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event, + * cause_flag = 1 + */ +static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_4[] = { + [0] = llc_conn_ev_qlfy_cause_flag_eq_1, + [1] = llc_conn_ev_qlfy_set_status_disc, + [2] = NULL, +}; + +static llc_conn_action_t llc_d_conn_actions_4[] = { + [0] = llc_conn_ac_stop_ack_timer, + [1] = llc_conn_ac_disc_confirm, + [2] = llc_conn_disc, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_d_conn_state_trans_4 = { + .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_d_conn_ev_qfyrs_4, + .ev_actions = llc_d_conn_actions_4, +}; + +/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event, + * cause_flag = 0 + */ +static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_4_1[] = { + [0] = llc_conn_ev_qlfy_cause_flag_eq_0, + [1] = llc_conn_ev_qlfy_set_status_disc, + [2] = NULL, +}; + +static llc_conn_action_t llc_d_conn_actions_4_1[] = { + [0] = llc_conn_ac_stop_ack_timer, + [1] = llc_conn_disc, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_d_conn_state_trans_4_1 = { + .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_d_conn_ev_qfyrs_4_1, + .ev_actions = llc_d_conn_actions_4_1, +}; + +/* + * State transition for + * LLC_CONN_EV_DATA_CONN_REQ event + */ +static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_5[] = { + [0] = llc_conn_ev_qlfy_set_status_refuse, + [1] = NULL, +}; + +/* just one member, NULL, .bss zeroes it */ +static llc_conn_action_t llc_d_conn_actions_5[1]; + +static struct llc_conn_state_trans llc_d_conn_state_trans_5 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_D_CONN, + .ev_qualifiers = llc_d_conn_ev_qfyrs_5, + .ev_actions = llc_d_conn_actions_5, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_6[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [1] = NULL, +}; + +static llc_conn_action_t llc_d_conn_actions_6[] = { + [0] = llc_conn_ac_send_disc_cmd_p_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_inc_retry_cnt_by_1, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_d_conn_state_trans_6 = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_D_CONN, + .ev_qualifiers = llc_d_conn_ev_qfyrs_6, + .ev_actions = llc_d_conn_actions_6, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event, cause_flag = 1 */ +static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_7[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, + [1] = llc_conn_ev_qlfy_cause_flag_eq_1, + [2] = llc_conn_ev_qlfy_set_status_failed, + [3] = NULL, +}; + +static llc_conn_action_t llc_d_conn_actions_7[] = { + [0] = llc_conn_ac_disc_confirm, + [1] = llc_conn_disc, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_d_conn_state_trans_7 = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_d_conn_ev_qfyrs_7, + .ev_actions = llc_d_conn_actions_7, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event, cause_flag = 0 */ +static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_8[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, + [1] = llc_conn_ev_qlfy_cause_flag_eq_0, + [2] = llc_conn_ev_qlfy_set_status_failed, + [3] = NULL, +}; + +static llc_conn_action_t llc_d_conn_actions_8[] = { + [0] = llc_conn_disc, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_d_conn_state_trans_8 = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_d_conn_ev_qfyrs_8, + .ev_actions = llc_d_conn_actions_8, +}; + +/* + * Array of pointers; + * one to each transition + */ +static struct llc_conn_state_trans *llc_d_conn_state_transitions[] = { + [0] = &llc_d_conn_state_trans_5, /* Request */ + [1] = &llc_common_state_trans_n, + [2] = &llc_common_state_trans_n, /* Local busy */ + [3] = &llc_common_state_trans_n, /* Initiate PF cycle */ + [4] = &llc_d_conn_state_trans_6, /* Timer */ + [5] = &llc_d_conn_state_trans_7, + [6] = &llc_d_conn_state_trans_8, + [7] = &llc_common_state_trans_n, + [8] = &llc_d_conn_state_trans_1, /* Receive frame */ + [9] = &llc_d_conn_state_trans_1_1, + [10] = &llc_d_conn_state_trans_2, + [11] = &llc_d_conn_state_trans_2_1, + [12] = &llc_d_conn_state_trans_3, + [13] = &llc_d_conn_state_trans_4, + [14] = &llc_d_conn_state_trans_4_1, + [15] = &llc_common_state_trans_n, +}; + +/* LLC_CONN_STATE_RESET transitions */ +/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ +static llc_conn_action_t llc_rst_actions_1[] = { + [0] = llc_conn_ac_set_vs_0, + [1] = llc_conn_ac_set_vr_0, + [2] = llc_conn_ac_set_s_flag_1, + [3] = llc_conn_ac_send_ua_rsp_f_set_p, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_rst_state_trans_1 = { + .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_RESET, + .ev_qualifiers = NONE, + .ev_actions = llc_rst_actions_1, +}; + +/* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event, + * cause_flag = 1 + */ +static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_2[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_f, + [1] = llc_conn_ev_qlfy_cause_flag_eq_1, + [2] = llc_conn_ev_qlfy_set_status_conn, + [3] = NULL, +}; + +static llc_conn_action_t llc_rst_actions_2[] = { + [0] = llc_conn_ac_stop_ack_timer, + [1] = llc_conn_ac_set_vs_0, + [2] = llc_conn_ac_set_vr_0, + [3] = llc_conn_ac_upd_p_flag, + [4] = llc_conn_ac_rst_confirm, + [5] = llc_conn_ac_set_remote_busy_0, + [6] = llc_conn_reset, + [7] = NULL, +}; + +static struct llc_conn_state_trans llc_rst_state_trans_2 = { + .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_rst_ev_qfyrs_2, + .ev_actions = llc_rst_actions_2, +}; + +/* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event, + * cause_flag = 0 + */ +static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_2_1[] = { + [0] = llc_conn_ev_qlfy_p_flag_eq_f, + [1] = llc_conn_ev_qlfy_cause_flag_eq_0, + [2] = llc_conn_ev_qlfy_set_status_rst_done, + [3] = NULL, +}; + +static llc_conn_action_t llc_rst_actions_2_1[] = { + [0] = llc_conn_ac_stop_ack_timer, + [1] = llc_conn_ac_set_vs_0, + [2] = llc_conn_ac_set_vr_0, + [3] = llc_conn_ac_upd_p_flag, + [4] = llc_conn_ac_rst_confirm, + [5] = llc_conn_ac_set_remote_busy_0, + [6] = llc_conn_reset, + [7] = NULL, +}; + +static struct llc_conn_state_trans llc_rst_state_trans_2_1 = { + .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_rst_ev_qfyrs_2_1, + .ev_actions = llc_rst_actions_2_1, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_3[] = { + [0] = llc_conn_ev_qlfy_s_flag_eq_1, + [1] = llc_conn_ev_qlfy_set_status_rst_done, + [2] = NULL, +}; + +static llc_conn_action_t llc_rst_actions_3[] = { + [0] = llc_conn_ac_set_p_flag_0, + [1] = llc_conn_ac_set_remote_busy_0, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_rst_state_trans_3 = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = llc_rst_ev_qfyrs_3, + .ev_actions = llc_rst_actions_3, +}; + +/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event, + * cause_flag = 1 + */ +static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_4[] = { + [0] = llc_conn_ev_qlfy_cause_flag_eq_1, + [1] = llc_conn_ev_qlfy_set_status_disc, + [2] = NULL, +}; +static llc_conn_action_t llc_rst_actions_4[] = { + [0] = llc_conn_ac_send_dm_rsp_f_set_p, + [1] = llc_conn_ac_disc_ind, + [2] = llc_conn_ac_stop_ack_timer, + [3] = llc_conn_disc, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_rst_state_trans_4 = { + .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_rst_ev_qfyrs_4, + .ev_actions = llc_rst_actions_4, +}; + +/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event, + * cause_flag = 0 + */ +static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_4_1[] = { + [0] = llc_conn_ev_qlfy_cause_flag_eq_0, + [1] = llc_conn_ev_qlfy_set_status_refuse, + [2] = NULL, +}; + +static llc_conn_action_t llc_rst_actions_4_1[] = { + [0] = llc_conn_ac_send_dm_rsp_f_set_p, + [1] = llc_conn_ac_stop_ack_timer, + [2] = llc_conn_disc, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_rst_state_trans_4_1 = { + .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_rst_ev_qfyrs_4_1, + .ev_actions = llc_rst_actions_4_1, +}; + +/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event, + * cause_flag = 1 + */ +static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_5[] = { + [0] = llc_conn_ev_qlfy_cause_flag_eq_1, + [1] = llc_conn_ev_qlfy_set_status_disc, + [2] = NULL, +}; + +static llc_conn_action_t llc_rst_actions_5[] = { + [0] = llc_conn_ac_disc_ind, + [1] = llc_conn_ac_stop_ack_timer, + [2] = llc_conn_disc, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_rst_state_trans_5 = { + .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_rst_ev_qfyrs_5, + .ev_actions = llc_rst_actions_5, +}; + +/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event, + * cause_flag = 0 + */ +static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_5_1[] = { + [0] = llc_conn_ev_qlfy_cause_flag_eq_0, + [1] = llc_conn_ev_qlfy_set_status_refuse, + [2] = NULL, +}; + +static llc_conn_action_t llc_rst_actions_5_1[] = { + [0] = llc_conn_ac_stop_ack_timer, + [1] = llc_conn_disc, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_rst_state_trans_5_1 = { + .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_rst_ev_qfyrs_5_1, + .ev_actions = llc_rst_actions_5_1, +}; + +/* State transitions for DATA_CONN_REQ event */ +static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_6[] = { + [0] = llc_conn_ev_qlfy_set_status_refuse, + [1] = NULL, +}; + +/* just one member, NULL, .bss zeroes it */ +static llc_conn_action_t llc_rst_actions_6[1]; + +static struct llc_conn_state_trans llc_rst_state_trans_6 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_RESET, + .ev_qualifiers = llc_rst_ev_qfyrs_6, + .ev_actions = llc_rst_actions_6, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_7[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [1] = llc_conn_ev_qlfy_s_flag_eq_0, + [2] = NULL, +}; + +static llc_conn_action_t llc_rst_actions_7[] = { + [0] = llc_conn_ac_send_sabme_cmd_p_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_inc_retry_cnt_by_1, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_rst_state_trans_7 = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_RESET, + .ev_qualifiers = llc_rst_ev_qfyrs_7, + .ev_actions = llc_rst_actions_7, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_8[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, + [1] = llc_conn_ev_qlfy_s_flag_eq_0, + [2] = llc_conn_ev_qlfy_cause_flag_eq_1, + [3] = llc_conn_ev_qlfy_set_status_failed, + [4] = NULL, +}; +static llc_conn_action_t llc_rst_actions_8[] = { + [0] = llc_conn_ac_disc_ind, + [1] = llc_conn_disc, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_rst_state_trans_8 = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_rst_ev_qfyrs_8, + .ev_actions = llc_rst_actions_8, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_8_1[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, + [1] = llc_conn_ev_qlfy_s_flag_eq_0, + [2] = llc_conn_ev_qlfy_cause_flag_eq_0, + [3] = llc_conn_ev_qlfy_set_status_failed, + [4] = NULL, +}; +static llc_conn_action_t llc_rst_actions_8_1[] = { + [0] = llc_conn_ac_disc_ind, + [1] = llc_conn_disc, + [2] = NULL, +}; + +static struct llc_conn_state_trans llc_rst_state_trans_8_1 = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = llc_rst_ev_qfyrs_8_1, + .ev_actions = llc_rst_actions_8_1, +}; + +/* + * Array of pointers; + * one to each transition + */ +static struct llc_conn_state_trans *llc_rst_state_transitions[] = { + [0] = &llc_rst_state_trans_6, /* Request */ + [1] = &llc_common_state_trans_n, + [2] = &llc_common_state_trans_n, /* Local busy */ + [3] = &llc_common_state_trans_n, /* Initiate PF cycle */ + [4] = &llc_rst_state_trans_3, /* Timer */ + [5] = &llc_rst_state_trans_7, + [6] = &llc_rst_state_trans_8, + [7] = &llc_rst_state_trans_8_1, + [8] = &llc_common_state_trans_n, + [9] = &llc_rst_state_trans_1, /* Receive frame */ + [10] = &llc_rst_state_trans_2, + [11] = &llc_rst_state_trans_2_1, + [12] = &llc_rst_state_trans_4, + [13] = &llc_rst_state_trans_4_1, + [14] = &llc_rst_state_trans_5, + [15] = &llc_rst_state_trans_5_1, + [16] = &llc_common_state_trans_n, +}; + +/* LLC_CONN_STATE_ERROR transitions */ +/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ +static llc_conn_action_t llc_error_actions_1[] = { + [0] = llc_conn_ac_set_vs_0, + [1] = llc_conn_ac_set_vr_0, + [2] = llc_conn_ac_send_ua_rsp_f_set_p, + [3] = llc_conn_ac_rst_ind, + [4] = llc_conn_ac_set_p_flag_0, + [5] = llc_conn_ac_set_remote_busy_0, + [6] = llc_conn_ac_stop_ack_timer, + [7] = llc_conn_reset, + [8] = NULL, +}; + +static struct llc_conn_state_trans llc_error_state_trans_1 = { + .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_NORMAL, + .ev_qualifiers = NONE, + .ev_actions = llc_error_actions_1, +}; + +/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ +static llc_conn_action_t llc_error_actions_2[] = { + [0] = llc_conn_ac_send_ua_rsp_f_set_p, + [1] = llc_conn_ac_disc_ind, + [2] = llc_conn_ac_stop_ack_timer, + [3] = llc_conn_disc, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_error_state_trans_2 = { + .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = NONE, + .ev_actions = llc_error_actions_2, +}; + +/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */ +static llc_conn_action_t llc_error_actions_3[] = { + [0] = llc_conn_ac_disc_ind, + [1] = llc_conn_ac_stop_ack_timer, + [2] = llc_conn_disc, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_error_state_trans_3 = { + .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = NONE, + .ev_actions = llc_error_actions_3, +}; + +/* State transitions for LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X event */ +static llc_conn_action_t llc_error_actions_4[] = { + [0] = llc_conn_ac_send_sabme_cmd_p_set_x, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_set_retry_cnt_0, + [3] = llc_conn_ac_set_cause_flag_0, + [4] = NULL, +}; + +static struct llc_conn_state_trans llc_error_state_trans_4 = { + .ev = llc_conn_ev_rx_frmr_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_RESET, + .ev_qualifiers = NONE, + .ev_actions = llc_error_actions_4, +}; + +/* State transitions for LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_X event */ +static llc_conn_action_t llc_error_actions_5[] = { + [0] = llc_conn_ac_resend_frmr_rsp_f_set_p, + [1] = NULL, +}; + +static struct llc_conn_state_trans llc_error_state_trans_5 = { + .ev = llc_conn_ev_rx_xxx_cmd_pbit_set_x, + .next_state = LLC_CONN_STATE_ERROR, + .ev_qualifiers = NONE, + .ev_actions = llc_error_actions_5, +}; + +/* State transitions for LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_X event */ +static struct llc_conn_state_trans llc_error_state_trans_6 = { + .ev = llc_conn_ev_rx_xxx_rsp_fbit_set_x, + .next_state = LLC_CONN_STATE_ERROR, + .ev_qualifiers = NONE, + .ev_actions = NONE, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_error_ev_qfyrs_7[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, + [1] = NULL, +}; + +static llc_conn_action_t llc_error_actions_7[] = { + [0] = llc_conn_ac_resend_frmr_rsp_f_set_0, + [1] = llc_conn_ac_start_ack_timer, + [2] = llc_conn_ac_inc_retry_cnt_by_1, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_error_state_trans_7 = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_ERROR, + .ev_qualifiers = llc_error_ev_qfyrs_7, + .ev_actions = llc_error_actions_7, +}; + +/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ +static llc_conn_ev_qfyr_t llc_error_ev_qfyrs_8[] = { + [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, + [1] = NULL, +}; + +static llc_conn_action_t llc_error_actions_8[] = { + [0] = llc_conn_ac_send_sabme_cmd_p_set_x, + [1] = llc_conn_ac_set_s_flag_0, + [2] = llc_conn_ac_start_ack_timer, + [3] = llc_conn_ac_set_retry_cnt_0, + [4] = llc_conn_ac_set_cause_flag_0, + [5] = NULL, +}; + +static struct llc_conn_state_trans llc_error_state_trans_8 = { + .ev = llc_conn_ev_ack_tmr_exp, + .next_state = LLC_CONN_STATE_RESET, + .ev_qualifiers = llc_error_ev_qfyrs_8, + .ev_actions = llc_error_actions_8, +}; + +/* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */ +static llc_conn_ev_qfyr_t llc_error_ev_qfyrs_9[] = { + [0] = llc_conn_ev_qlfy_set_status_refuse, + [1] = NULL, +}; + +/* just one member, NULL, .bss zeroes it */ +static llc_conn_action_t llc_error_actions_9[1]; + +static struct llc_conn_state_trans llc_error_state_trans_9 = { + .ev = llc_conn_ev_data_req, + .next_state = LLC_CONN_STATE_ERROR, + .ev_qualifiers = llc_error_ev_qfyrs_9, + .ev_actions = llc_error_actions_9, +}; + +/* + * Array of pointers; + * one to each transition + */ +static struct llc_conn_state_trans *llc_error_state_transitions[] = { + [0] = &llc_error_state_trans_9, /* Request */ + [1] = &llc_common_state_trans_n, + [2] = &llc_common_state_trans_n, /* Local busy */ + [3] = &llc_common_state_trans_n, /* Initiate PF cycle */ + [4] = &llc_error_state_trans_7, /* Timer */ + [5] = &llc_error_state_trans_8, + [6] = &llc_common_state_trans_n, + [7] = &llc_error_state_trans_1, /* Receive frame */ + [8] = &llc_error_state_trans_2, + [9] = &llc_error_state_trans_3, + [10] = &llc_error_state_trans_4, + [11] = &llc_error_state_trans_5, + [12] = &llc_error_state_trans_6, + [13] = &llc_common_state_trans_n, +}; + +/* LLC_CONN_STATE_TEMP transitions */ +/* State transitions for LLC_CONN_EV_DISC_REQ event */ +static llc_conn_action_t llc_temp_actions_1[] = { + [0] = llc_conn_ac_stop_all_timers, + [1] = llc_conn_ac_send_disc_cmd_p_set_x, + [2] = llc_conn_disc, + [3] = NULL, +}; + +static struct llc_conn_state_trans llc_temp_state_trans_1 = { + .ev = llc_conn_ev_disc_req, + .next_state = LLC_CONN_STATE_ADM, + .ev_qualifiers = NONE, + .ev_actions = llc_temp_actions_1, +}; + +/* + * Array of pointers; + * one to each transition + */ +static struct llc_conn_state_trans *llc_temp_state_transitions[] = { + [0] = &llc_temp_state_trans_1, /* requests */ + [1] = &llc_common_state_trans_n, + [2] = &llc_common_state_trans_n, /* local busy */ + [3] = &llc_common_state_trans_n, /* init_pf_cycle */ + [4] = &llc_common_state_trans_n, /* timer */ + [5] = &llc_common_state_trans_n, /* receive */ +}; + +/* Connection State Transition Table */ +struct llc_conn_state llc_conn_state_table[] = { + { + current_state: LLC_CONN_STATE_ADM, + transitions: llc_adm_state_transitions, + }, + { + current_state: LLC_CONN_STATE_SETUP, + transitions: llc_setup_state_transitions, + }, + { + current_state: LLC_CONN_STATE_NORMAL, + transitions: llc_normal_state_transitions, + }, + { + current_state: LLC_CONN_STATE_BUSY, + transitions: llc_busy_state_transitions, + }, + { + current_state: LLC_CONN_STATE_REJ, + transitions: llc_reject_state_transitions, + }, + { + current_state: LLC_CONN_STATE_AWAIT, + transitions: llc_await_state_transitions, + }, + { + current_state: LLC_CONN_STATE_AWAIT_BUSY, + transitions: llc_await_busy_state_transitions, + }, + { + current_state: LLC_CONN_STATE_AWAIT_REJ, + transitions: llc_await_rejct_state_transitions, + }, + { + current_state: LLC_CONN_STATE_D_CONN, + transitions: llc_d_conn_state_transitions, + }, + { + current_state: LLC_CONN_STATE_RESET, + transitions: llc_rst_state_transitions, + }, + { + current_state: LLC_CONN_STATE_ERROR, + transitions: llc_error_state_transitions, + }, + { + current_state: LLC_CONN_STATE_TEMP, + transitions: llc_temp_state_transitions, + }, +}; diff -Nru a/net/llc/llc_conn.c b/net/llc/llc_conn.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_conn.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,531 @@ +/* + * llc_conn.c - Driver routines for connection component. + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int llc_find_offset(int state, int ev_type); +static void llc_conn_send_pdus(struct sock *sk); +static int llc_conn_service(struct sock *sk, struct llc_conn_state_ev *ev); +static int llc_exec_conn_trans_actions(struct sock *sk, + struct llc_conn_state_trans *trans, + struct llc_conn_state_ev *ev); +static struct llc_conn_state_trans * + llc_qualify_conn_ev(struct sock *sk, struct llc_conn_state_ev *ev); + +/* Offset table on connection states transition diagram */ +static int llc_offset_table[NBR_CONN_STATES][NBR_CONN_EV]; + +/** + * llc_conn_alloc_event: allocates an event + * @sk: socket that event is associated + * + * Returns pointer to allocated connection on success, %NULL on failure. + */ +struct llc_conn_state_ev *llc_conn_alloc_ev(struct sock *sk) +{ + struct llc_conn_state_ev *ev = NULL; + + /* verify connection is valid, active and open */ + if (llc_sk(sk)->state != LLC_CONN_OUT_OF_SVC) { + /* get event structure to build a station event */ + ev = kmalloc(sizeof(*ev), GFP_ATOMIC); + if (ev) + memset(ev, 0, sizeof(*ev)); + } + return ev; +} + +/** + * llc_conn_send_event - sends event to connection state machine + * @sk: connection + * @ev: occurred event + * + * Sends an event to connection state machine. after processing event + * (executing it's actions and changing state), upper layer will be + * indicated or confirmed, if needed. Returns 0 for success, 1 for + * failure. The socket lock has to be held before calling this function. + */ +int llc_conn_send_ev(struct sock *sk, struct llc_conn_state_ev *ev) +{ + /* sending event to state machine */ + int rc = llc_conn_service(sk, ev); + struct llc_opt *llc = llc_sk(sk); + u8 flag = ev->flag; + struct llc_prim_if_block *ind_prim = ev->ind_prim; + struct llc_prim_if_block *cfm_prim = ev->cfm_prim; + + llc_conn_free_ev(ev); +#ifdef THIS_BREAKS_DISCONNECT_NOTIFICATION_BADLY + /* check if the connection was freed by the state machine by + * means of llc_conn_disc */ + if (rc == 2) { + printk(KERN_INFO __FUNCTION__ ": rc == 2\n"); + rc = -ECONNABORTED; + goto out; + } +#endif /* THIS_BREAKS_DISCONNECT_NOTIFICATION_BADLY */ + if (!flag) /* indicate or confirm not required */ + goto out; + rc = 0; + if (ind_prim) /* indication required */ + llc->sap->ind(ind_prim); + if (!cfm_prim) /* confirmation not required */ + goto out; + /* data confirm has preconditions */ + if (cfm_prim->prim != LLC_DATA_PRIM) { + llc->sap->conf(cfm_prim); + goto out; + } + if (!llc_data_accept_state(llc->state)) { + /* In this state, we can send I pdu */ + /* FIXME: check if we don't need to see if sk->lock.users != 0 + * is needed here + */ + rc = llc->sap->conf(cfm_prim); + if (rc) /* confirmation didn't accept by upper layer */ + llc->failed_data_req = 1; + } else + llc->failed_data_req = 1; +out: + return rc; +} + +void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) +{ + llc_sock_assert(sk); + /* queue PDU to send to MAC layer */ + skb_queue_tail(&sk->write_queue, skb); + llc_conn_send_pdus(sk); +} + +/** + * llc_conn_rtn_pdu - sends received data pdu to upper layer + * @sk: Active connection + * @skb: Received data frame + * @ev: Occurred event + * + * Sends received data pdu to upper layer (by using indicate function). + * Prepares service parameters (prim and prim_data). calling indication + * function will be done in llc_conn_send_ev. + */ +void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb, + struct llc_conn_state_ev *ev) +{ + struct llc_prim_if_block *prim = &llc_ind_prim; + union llc_u_prim_data *prim_data = llc_ind_prim.data; + + prim_data->data.sk = sk; + prim_data->data.pri = 0; + prim_data->data.skb = skb; + prim_data->data.link = llc_sk(sk)->link; + prim->data = prim_data; + prim->prim = LLC_DATA_PRIM; + prim->sap = llc_sk(sk)->sap; + ev->flag = 1; + /* saving prepd prim in event for future use in llc_conn_send_ev */ + ev->ind_prim = prim; +} + +/** + * llc_conn_resend_i_pdu_as_cmd - resend all all unacknowledged I PDUs + * @sk: active connection + * @nr: NR + * @first_p_bit: p_bit value of first pdu + * + * Resend all unacknowledged I PDUs, starting with the NR; send first as + * command PDU with P bit equal first_p_bit; if more than one send + * subsequent as command PDUs with P bit equal zero (0). + */ +void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit) +{ + struct sk_buff *skb; + llc_pdu_sn_t *pdu; + u16 nbr_unack_pdus; + u8 howmany_resend = 0; + + llc_conn_remove_acked_pdus(sk, nr, &nbr_unack_pdus); + if (!nbr_unack_pdus) + goto out; + /* process unack PDUs only if unack queue is not empty; remove + * appropriate PDUs, fix them up, and put them on mac_pdu_q. + */ + while ((skb = skb_dequeue(&llc_sk(sk)->pdu_unack_q)) != NULL) { + pdu = (llc_pdu_sn_t *)skb->nh.raw; + llc_pdu_set_cmd_rsp(skb, LLC_PDU_CMD); + llc_pdu_set_pf_bit(skb, first_p_bit); + skb_queue_tail(&sk->write_queue, skb); + first_p_bit = 0; + llc_sk(sk)->vS = LLC_I_GET_NS(pdu); + howmany_resend++; + } + if (howmany_resend > 0) + llc_sk(sk)->vS = (llc_sk(sk)->vS + 1) % LLC_2_SEQ_NBR_MODULO; + /* any PDUs to re-send are queued up; start sending to MAC */ + llc_conn_send_pdus(sk); +out:; +} + +/** + * llc_conn_resend_i_pdu_as_rsp - Resend all unacknowledged I PDUs + * @sk: active connection. + * @nr: NR + * @first_f_bit: f_bit value of first pdu. + * + * Resend all unacknowledged I PDUs, starting with the NR; send first as + * response PDU with F bit equal first_f_bit; if more than one send + * subsequent as response PDUs with F bit equal zero (0). + */ +void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit) +{ + struct sk_buff *skb; + llc_pdu_sn_t *pdu; + u16 nbr_unack_pdus; + u8 howmany_resend = 0; + + llc_conn_remove_acked_pdus(sk, nr, &nbr_unack_pdus); + if (!nbr_unack_pdus) + goto out; + /* process unack PDUs only if unack queue is not empty; remove + * appropriate PDUs, fix them up, and put them on mac_pdu_q + */ + while ((skb = skb_dequeue(&llc_sk(sk)->pdu_unack_q)) != NULL) { + pdu = (llc_pdu_sn_t *)skb->nh.raw; + llc_pdu_set_cmd_rsp(skb, LLC_PDU_RSP); + llc_pdu_set_pf_bit(skb, first_f_bit); + skb_queue_tail(&sk->write_queue, skb); + first_f_bit = 0; + llc_sk(sk)->vS = LLC_I_GET_NS(pdu); + howmany_resend++; + } + if (howmany_resend > 0) + llc_sk(sk)->vS = (llc_sk(sk)->vS + 1) % LLC_2_SEQ_NBR_MODULO; + /* any PDUs to re-send are queued up; start sending to MAC */ + llc_conn_send_pdus(sk); +out:; +} + +/** + * llc_conn_remove_acked_pdus - Removes acknowledged pdus from tx queue + * @sk: active connection + * nr: NR + * how_many_unacked: size of pdu_unack_q after removing acked pdus + * + * Removes acknowledged pdus from transmit queue (pdu_unack_q). Returns + * the number of pdus that removed from queue. + */ +int llc_conn_remove_acked_pdus(struct sock *sk, u8 nr, u16 *how_many_unacked) +{ + int pdu_pos, i; + struct sk_buff *skb; + llc_pdu_sn_t *pdu; + int nbr_acked = 0; + int q_len = skb_queue_len(&llc_sk(sk)->pdu_unack_q); + + if (!q_len) + goto out; + skb = skb_peek(&llc_sk(sk)->pdu_unack_q); + pdu = (llc_pdu_sn_t *)skb->nh.raw; + + /* finding position of last acked pdu in queue */ + pdu_pos = ((int)LLC_2_SEQ_NBR_MODULO + (int)nr - + (int)LLC_I_GET_NS(pdu)) % LLC_2_SEQ_NBR_MODULO; + + for (i = 0; i < pdu_pos && i < q_len; i++) { + skb = skb_dequeue(&llc_sk(sk)->pdu_unack_q); + if (skb) + kfree_skb(skb); + nbr_acked++; + } +out: + *how_many_unacked = skb_queue_len(&llc_sk(sk)->pdu_unack_q); + return nbr_acked; +} + +/** + * llc_conn_send_pdus - Sends queued PDUs + * @sk: active connection + * + * Sends queued pdus to MAC layer for transmition. + */ +static void llc_conn_send_pdus(struct sock *sk) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(&sk->write_queue)) != NULL) { + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)skb->nh.raw; + + if (!LLC_PDU_TYPE_IS_I(pdu) && + !(skb->dev->flags & IFF_LOOPBACK)) + skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb); + mac_send_pdu(skb); + if (LLC_PDU_TYPE_IS_I(pdu) || + (skb->dev && skb->dev->flags & IFF_LOOPBACK)) + kfree_skb(skb); + } +} + +/** + * llc_conn_free_ev - free event + * @ev: event to free + * + * Free allocated event. + */ +void llc_conn_free_ev(struct llc_conn_state_ev *ev) +{ + if (ev->type == LLC_CONN_EV_TYPE_PDU) { + /* free the frame that binded to this event */ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)ev->data.pdu.skb->nh.raw; + + if (LLC_PDU_TYPE_IS_I(pdu) || !ev->flag || !ev->ind_prim) + kfree_skb(ev->data.pdu.skb); + } + /* free event structure to free list of the same */ + kfree(ev); +} + +/** + * llc_conn_service - finds transition and changes state of connection + * @sk: connection + * @ev: happened event + * + * This function finds transition that matches with happened event, then + * executes related actions and finally changes state of connection. + * Returns 0 for success, 1 for failure. + */ +static int llc_conn_service(struct sock *sk, struct llc_conn_state_ev *ev) +{ + int rc = 1; + struct llc_conn_state_trans *trans; + + if (llc_sk(sk)->state > NBR_CONN_STATES) + goto out; + rc = 0; + trans = llc_qualify_conn_ev(sk, ev); + if (trans) { + rc = llc_exec_conn_trans_actions(sk, trans, ev); + if (!rc && trans->next_state != NO_STATE_CHANGE) + llc_sk(sk)->state = trans->next_state; + } +out: + return rc; +} + +/** + * llc_qualify_conn_ev - finds transition for event + * @sk: connection + * @ev: happened event + * + * This function finds transition that matches with happened event. + * Returns pointer to found transition on success, %NULL otherwise. + */ +static struct llc_conn_state_trans * + llc_qualify_conn_ev(struct sock *sk, struct llc_conn_state_ev *ev) +{ + struct llc_conn_state_trans **next_trans; + llc_conn_ev_qfyr_t *next_qualifier; + struct llc_conn_state *curr_state = + &llc_conn_state_table[llc_sk(sk)->state - 1]; + + /* search thru events for this state until + * list exhausted or until no more + */ + for (next_trans = curr_state->transitions + + llc_find_offset(llc_sk(sk)->state - 1, ev->type); + (*next_trans)->ev; next_trans++) { + if (!((*next_trans)->ev)(sk, ev)) { + /* got POSSIBLE event match; the event may require + * qualification based on the values of a number of + * state flags; if all qualifications are met (i.e., + * if all qualifying functions return success, or 0, + * then this is THE event we're looking for + */ + for (next_qualifier = (*next_trans)->ev_qualifiers; + next_qualifier && *next_qualifier && + !(*next_qualifier)(sk, ev); next_qualifier++) + /* nothing */; + if (!next_qualifier || !*next_qualifier) + /* all qualifiers executed successfully; this is + * our transition; return it so we can perform + * the associated actions & change the state + */ + return *next_trans; + } + } + return NULL; +} + +/** + * llc_exec_conn_trans_actions - executes related actions + * @sk: connection + * @trans: transition that it's actions must be performed + * @ev: happened event + * + * Executes actions that is related to happened event. Returns 0 for + * success, 1 to indicate failure of at least one action or 2 if the + * connection was freed (llc_conn_disc was called) + */ +static int llc_exec_conn_trans_actions(struct sock *sk, + struct llc_conn_state_trans *trans, + struct llc_conn_state_ev *ev) +{ + int rc = 0; + llc_conn_action_t *next_action; + + for (next_action = trans->ev_actions; + next_action && *next_action; next_action++) { + int rc2 = (*next_action)(sk, ev); + + if (rc2 == 2) { + rc = rc2; + break; + } else if (rc2) + rc = 1; + } + return rc; +} + +/** + * llc_find_sock - Finds connection in sap for the remote/local sap/mac + * @sap: SAP + * @daddr: address of remote LLC (MAC + SAP) + * @laddr: address of local LLC (MAC + SAP) + * + * Search connection list of the SAP and finds connection using the remote + * mac, remote sap, local mac, and local sap. Returns pointer for + * connection found, %NULL otherwise. + */ +struct sock *llc_find_sock(struct llc_sap *sap, struct llc_addr *daddr, + struct llc_addr *laddr) +{ + struct sock *rc = NULL; + struct list_head *entry; + + spin_lock_bh(&sap->sk_list.lock); + if (list_empty(&sap->sk_list.list)) + goto out; + list_for_each(entry, &sap->sk_list.list) { + struct llc_opt *llc = list_entry(entry, struct llc_opt, node); + + if (llc->laddr.lsap == laddr->lsap && + llc->daddr.lsap == daddr->lsap && + !memcmp(llc->laddr.mac, laddr->mac, ETH_ALEN) && + !memcmp(llc->daddr.mac, daddr->mac, ETH_ALEN)) { + rc = llc->sk; + break; + } + } + if (rc) + sock_hold(rc); +out: + spin_unlock_bh(&sap->sk_list.lock); + return rc; +} + +/** + * llc_data_accept_state - designates if in this state data can be sent. + * @state: state of connection. + * + * Returns 0 if data can be sent, 1 otherwise. + */ +u8 llc_data_accept_state(u8 state) +{ + if (state != LLC_CONN_STATE_NORMAL && state != LLC_CONN_STATE_BUSY && + state != LLC_CONN_STATE_REJ) + return 1; /* data_conn_refuse */ + return 0; +} + +/** + * find_next_offset - finds offset for next category of transitions + * @state: state table. + * @offset: start offset. + * + * Finds offset of next category of transitions in transition table. + * Returns the start index of next category. + */ +u16 find_next_offset(struct llc_conn_state *state, u16 offset) +{ + u16 cnt = 0; + struct llc_conn_state_trans **next_trans; + + for (next_trans = state->transitions + offset; + (*next_trans)->ev; next_trans++) + ++cnt; + return cnt; +} + +/** + * llc_build_offset_table - builds offset table of connection + * + * Fills offset table of connection state transition table + * (llc_offset_table). + */ +void __init llc_build_offset_table(void) +{ + struct llc_conn_state *curr_state; + int state, ev_type, next_offset; + + memset(llc_offset_table, 0, sizeof(llc_offset_table)); + for (state = 0; state < NBR_CONN_STATES; state++) { + curr_state = &llc_conn_state_table[state]; + next_offset = 0; + for (ev_type = 0; ev_type < NBR_CONN_EV; ev_type++) { + llc_offset_table[state][ev_type] = next_offset; + next_offset += find_next_offset(curr_state, + next_offset) + 1; + } + } +} + +/** + * llc_find_offset - finds start offset of category of transitions + * @state: state of connection + * @ev_type: type of happened event + * + * Finds start offset of desired category of transitions. Returns the + * desired start offset. + */ +static int llc_find_offset(int state, int ev_type) +{ + int rc = 0; + /* at this stage, llc_offset_table[..][2] is not important. it is for + * init_pf_cycle and I don't know what is it. + */ + switch (ev_type) { + case LLC_CONN_EV_TYPE_PRIM: + rc = llc_offset_table[state][0]; break; + case LLC_CONN_EV_TYPE_PDU: + rc = llc_offset_table[state][4]; break; + case LLC_CONN_EV_TYPE_SIMPLE: + rc = llc_offset_table[state][1]; break; + case LLC_CONN_EV_TYPE_P_TMR: + case LLC_CONN_EV_TYPE_ACK_TMR: + case LLC_CONN_EV_TYPE_REJ_TMR: + case LLC_CONN_EV_TYPE_BUSY_TMR: + rc = llc_offset_table[state][3]; break; + } + return rc; +} diff -Nru a/net/llc/llc_evnt.c b/net/llc/llc_evnt.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_evnt.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,112 @@ +/* + * llc_evnt.c - LLC station component event match functions + * Description : + * Functions in this module are implementation of station component events. + * Details of events can be found in IEEE-802.2 standard document. + * All functions have one station and one event as input argument. All of + * them return 0 On success and 1 otherwise. + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include + +int llc_stat_ev_enable_with_dup_addr_check(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + return ev->type == LLC_STATION_EV_TYPE_SIMPLE && + ev->data.a.ev == + LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK ? 0 : 1; +} + +int llc_stat_ev_enable_without_dup_addr_check(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + return ev->type == LLC_STATION_EV_TYPE_SIMPLE && + ev->data.a.ev == + LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK ? 0 : 1; +} + +int llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + return ev->type == LLC_STATION_EV_TYPE_ACK_TMR && + station->retry_count < station->maximum_retry ? 0 : 1; +} + +int llc_stat_ev_ack_tmr_exp_eq_retry_cnt_max_retry(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + return ev->type == LLC_STATION_EV_TYPE_ACK_TMR && + station->retry_count == station->maximum_retry ? 0 : 1; +} + +int llc_stat_ev_rx_null_dsap_xid_c(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return ev->type == LLC_STATION_EV_TYPE_PDU && + !LLC_PDU_IS_CMD(pdu) && /* command PDU */ + !LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */ + LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID && + !pdu->dsap ? 0 : 1; /* NULL DSAP value */ +} + +int llc_stat_ev_rx_null_dsap_0_xid_r_xid_r_cnt_eq(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return ev->type == LLC_STATION_EV_TYPE_PDU && + !LLC_PDU_IS_RSP(pdu) && /* response PDU */ + !LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */ + LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_XID && + !pdu->dsap && /* NULL DSAP value */ + !station->xid_r_count ? 0 : 1; +} + +int llc_stat_ev_rx_null_dsap_1_xid_r_xid_r_cnt_eq(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return ev->type == LLC_STATION_EV_TYPE_PDU && + !LLC_PDU_IS_RSP(pdu) && /* response PDU */ + !LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */ + LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_XID && + !pdu->dsap && /* NULL DSAP value */ + station->xid_r_count == 1 ? 0 : 1; +} + +int llc_stat_ev_rx_null_dsap_test_c(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return ev->type == LLC_STATION_EV_TYPE_PDU && + !LLC_PDU_IS_CMD(pdu) && /* command PDU */ + !LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */ + LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST && + !pdu->dsap ? 0 : 1; /* NULL DSAP */ +} + +int llc_stat_ev_disable_req(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + return ev->type == LLC_STATION_EV_TYPE_PRIM && + ev->data.prim.prim == LLC_DISABLE_PRIM && + ev->data.prim.type == LLC_PRIM_TYPE_REQ ? 0 : 1; +} diff -Nru a/net/llc/llc_if.c b/net/llc/llc_if.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_if.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,530 @@ +/* + * llc_if.c - Defines LLC interface to upper layer + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int llc_sap_req(struct llc_prim_if_block *prim); +static int llc_unitdata_req_handler(struct llc_prim_if_block *prim); +static int llc_test_req_handler(struct llc_prim_if_block *prim); +static int llc_xid_req_handler(struct llc_prim_if_block *prim); +static int llc_data_req_handler(struct llc_prim_if_block *prim); +static int llc_conn_req_handler(struct llc_prim_if_block *prim); +static int llc_disc_req_handler(struct llc_prim_if_block *prim); +static int llc_rst_req_handler(struct llc_prim_if_block *prim); +static int llc_flowcontrol_req_handler(struct llc_prim_if_block *prim); +static int llc_sap_resp(struct llc_prim_if_block *prim); +static int llc_conn_rsp_handler(struct llc_prim_if_block *prim); +static int llc_rst_rsp_handler(struct llc_prim_if_block *prim); +static int llc_no_rsp_handler(struct llc_prim_if_block *prim); + +extern void llc_register_sap(unsigned char sap, + int (*rcvfunc)(struct sk_buff *skb, + struct net_device *dev, + struct packet_type *pt)); +extern void llc_unregister_sap(unsigned char sap); + +/* table of request handler functions */ +static llc_prim_call_t llc_req_prim[LLC_NBR_PRIMITIVES] = { + [LLC_DATAUNIT_PRIM] = llc_unitdata_req_handler, + [LLC_CONN_PRIM] = llc_conn_req_handler, + [LLC_DATA_PRIM] = llc_data_req_handler, + [LLC_DISC_PRIM] = llc_disc_req_handler, + [LLC_RESET_PRIM] = llc_rst_req_handler, + [LLC_FLOWCONTROL_PRIM] = llc_flowcontrol_req_handler, + [LLC_XID_PRIM] = llc_xid_req_handler, + [LLC_TEST_PRIM] = llc_test_req_handler, +}; + +/* table of response handler functions */ +static llc_prim_call_t llc_resp_prim[LLC_NBR_PRIMITIVES] = { + [LLC_DATAUNIT_PRIM] = llc_no_rsp_handler, + [LLC_CONN_PRIM] = llc_conn_rsp_handler, + [LLC_DATA_PRIM] = llc_no_rsp_handler, + [LLC_DISC_PRIM] = llc_no_rsp_handler, + [LLC_RESET_PRIM] = llc_rst_rsp_handler, + [LLC_FLOWCONTROL_PRIM] = llc_no_rsp_handler, +}; + +/** + * llc_sap_open - open interface to the upper layers. + * @nw_indicate: pointer to indicate function of upper layer. + * @nw_confirm: pointer to confirm function of upper layer. + * @lsap: SAP number. + * @sap: pointer to allocated SAP (output argument). + * + * Interface function to upper layer. each one who wants to get a SAP + * (for example NetBEUI) should call this function. Returns 0 for + * success, 1 for failure. + */ +struct llc_sap *llc_sap_open(llc_prim_call_t nw_indicate, + llc_prim_call_t nw_confirm, u8 lsap) +{ + /* verify this SAP is not already open; if so, return error */ + struct llc_sap *sap; + + MOD_INC_USE_COUNT; + sap = llc_sap_find(lsap); + if (sap) { /* SAP already exists */ + sap = NULL; + goto err; + } + /* sap requested does not yet exist */ + sap = llc_sap_alloc(); + if (!sap) + goto err; + /* allocated a SAP; initialize it and clear out its memory pool */ + sap->laddr.lsap = lsap; + sap->req = llc_sap_req; + sap->resp = llc_sap_resp; + sap->ind = nw_indicate; + sap->conf = nw_confirm; + sap->parent_station = llc_station_get(); + /* initialized SAP; add it to list of SAPs this station manages */ + llc_sap_save(sap); + llc_register_sap(lsap, mac_indicate); +out: + return sap; +err: + MOD_DEC_USE_COUNT; + goto out; +} + +/** + * llc_sap_close - close interface for upper layers. + * @sap: SAP to be closed. + * + * Close interface function to upper layer. each one who wants to + * close an open SAP (for example NetBEUI) should call this function. + */ +void llc_sap_close(struct llc_sap *sap) +{ + llc_unregister_sap(sap->laddr.lsap); + llc_free_sap(sap); + MOD_DEC_USE_COUNT; +} + +/** + * llc_sap_req - Request interface for upper layers + * @prim: pointer to structure that contains service parameters. + * + * Request interface function to upper layer. each one who wants to + * request a service from LLC, must call this function. details of + * requested service is defined in input argument(prim). Returns 0 for + * success, 1 otherwise. + */ +static int llc_sap_req(struct llc_prim_if_block *prim) +{ + int rc = 1; + + if (prim->prim > 8 || prim->prim == 6) { + printk(KERN_ERR __FUNCTION__ ": invalid primitive %d\n", + prim->prim); + goto out; + } + /* receive REQUEST primitive from network layer; call the appropriate + * primitive handler which then packages it up as an event and sends it + * to the SAP or CONNECTION event handler + */ + if (prim->prim < LLC_NBR_PRIMITIVES) + /* valid primitive; call the function to handle it */ + rc = llc_req_prim[prim->prim](prim); +out: + return rc; +} + +/** + * llc_unitdata_req_handler - unitdata request interface for upper layers + * @prim: pointer to structure that contains service parameters + * + * Upper layers calls this function when upper layer wants to send data + * using connection-less mode communication (UI pdu). Returns 0 for + * success, 1 otherwise. + */ +static int llc_unitdata_req_handler(struct llc_prim_if_block *prim) +{ + int rc = 1; + struct llc_sap_state_ev *ev; + /* accept data frame from network layer to be sent using connection- + * less mode communication; timeout/retries handled by network layer; + * package primitive as an event and send to SAP event handler + */ + struct llc_sap *sap = llc_sap_find(prim->data->udata.saddr.lsap); + + if (!sap) + goto out; + ev = llc_sap_alloc_ev(sap); + if (!ev) + goto out; + ev->type = LLC_SAP_EV_TYPE_PRIM; + ev->data.prim.prim = LLC_DATAUNIT_PRIM; + ev->data.prim.type = LLC_PRIM_TYPE_REQ; + ev->data.prim.data = prim; + rc = 0; + llc_sap_send_ev(sap, ev); +out: + return rc; +} + +/** + * llc_test_req_handler - TEST interface for upper layers. + * @prim: pointer to structure that contains service parameters. + * + * This function is called when upper layer wants to send a TEST pdu. + * Returns 0 for success, 1 otherwise. + */ +static int llc_test_req_handler(struct llc_prim_if_block *prim) +{ + int rc = 1; + struct llc_sap_state_ev *ev; + /* package primitive as an event and send to SAP event handler */ + struct llc_sap *sap = llc_sap_find(prim->data->udata.saddr.lsap); + if (!sap) + goto out; + ev = llc_sap_alloc_ev(sap); + if (!ev) + goto out; + ev->type = LLC_SAP_EV_TYPE_PRIM; + ev->data.prim.prim = LLC_TEST_PRIM; + ev->data.prim.type = LLC_PRIM_TYPE_REQ; + ev->data.prim.data = prim; + rc = 0; + llc_sap_send_ev(sap, ev); +out: + return rc; +} + +/** + * llc_xid_req_handler - XID interface for upper layers + * @prim: pointer to structure that contains service parameters. + * + * This function is called when upper layer wants to send a XID pdu. + * Returns 0 for success, 1 otherwise. + */ +static int llc_xid_req_handler(struct llc_prim_if_block *prim) +{ + int rc = 1; + struct llc_sap_state_ev *ev; + /* package primitive as an event and send to SAP event handler */ + struct llc_sap *sap = llc_sap_find(prim->data->udata.saddr.lsap); + + if (!sap) + goto out; + ev = llc_sap_alloc_ev(sap); + if (!ev) + goto out; + ev->type = LLC_SAP_EV_TYPE_PRIM; + ev->data.prim.prim = LLC_XID_PRIM; + ev->data.prim.type = LLC_PRIM_TYPE_REQ; + ev->data.prim.data = prim; + rc = 0; + llc_sap_send_ev(sap, ev); +out: + return rc; +} + +/** + * llc_data_req_handler - Connection data sending for upper layers. + * @prim: pointer to structure that contains service parameters + * + * This function is called when upper layer wants to send data using + * connection oriented communication mode. during sending data, connection + * will be locked and received frames and expired timers will be queued. + * Returns 0 for success, -ECONNABORTED when the connection already + * closed. and -EBUSY when sending data is not permitted in this state or + * LLC has send an I pdu with p bit set to 1 and is waiting for it's + * response. + */ +static int llc_data_req_handler(struct llc_prim_if_block *prim) +{ + struct llc_conn_state_ev *ev; + int rc = -ECONNABORTED; + /* accept data frame from network layer to be sent using connection + * mode communication; timeout/retries handled by this layer; + * package primitive as an event and send to connection event handler + */ + struct sock *sk = prim->data->data.sk; + struct llc_opt *llc = llc_sk(sk); + + lock_sock(sk); + if (llc->state == LLC_CONN_STATE_ADM) + goto out; + rc = -EBUSY; + if (llc_data_accept_state(llc->state)) { /* data_conn_refuse */ + llc->failed_data_req = 1; + goto out; + } + if (llc->p_flag) { + llc->failed_data_req = 1; + goto out; + } + rc = -ENOMEM; + ev = llc_conn_alloc_ev(sk); + if (ev) { + ev->type = LLC_CONN_EV_TYPE_PRIM; + ev->data.prim.prim = LLC_DATA_PRIM; + ev->data.prim.type = LLC_PRIM_TYPE_REQ; + ev->data.prim.data = prim; + prim->data->data.skb->dev = llc->dev; + rc = llc_conn_send_ev(sk, ev); + } +out: + release_sock(sk); + return rc; +} + +/** + * confirm_impossible - Informs upper layer about failed connection + * @prim: pointer to structure that contains confirmation data. + * + * Informs upper layer about failing in connection establishment. This + * function is called by llc_conn_req_handler. + */ +static void confirm_impossible(struct llc_prim_if_block *prim) +{ + prim->data->conn.status = LLC_STATUS_IMPOSSIBLE; + prim->sap->conf(prim); +} + +/** + * llc_conn_req_handler - Called by upper layer to establish a conn + * @prim: pointer to structure that contains service parameters. + * + * Upper layer calls this to establish an LLC connection with a remote + * machine. this function packages a proper event and sends it connection + * component state machine. Success or failure of connection + * establishment will inform to upper layer via calling it's confirm + * function and passing proper information. + */ +static int llc_conn_req_handler(struct llc_prim_if_block *prim) +{ + int rc = -EBUSY; + struct llc_opt *llc; + struct llc_sap *sap = prim->sap; + struct llc_conn_state_ev *ev; + struct net_device *ddev = mac_dev_peer(prim->data->conn.dev, + prim->data->conn.dev->type, + prim->data->conn.daddr.mac), + *sdev = (ddev->flags & IFF_LOOPBACK) ? + ddev : prim->data->conn.dev; + struct llc_addr laddr, daddr; + /* network layer supplies addressing required to establish connection; + * package as an event and send it to the connection event handler + */ + struct sock *sk; + + memcpy(laddr.mac, sdev->dev_addr, sizeof(laddr.mac)); + laddr.lsap = prim->data->conn.saddr.lsap; + memcpy(daddr.mac, ddev->dev_addr, sizeof(daddr.mac)); + daddr.lsap = prim->data->conn.daddr.lsap; + sk = llc_find_sock(sap, &daddr, &laddr); + if (sk) { + confirm_impossible(prim); + goto out_put; + } + rc = -ENOMEM; + if (prim->data->conn.sk) { + sk = prim->data->conn.sk; + if (llc_sock_init(sk)) + goto out; + } else { + sk = llc_sock_alloc(); + if (!sk) { + confirm_impossible(prim); + goto out; + } + prim->data->conn.sk = sk; + } + sock_hold(sk); + lock_sock(sk); + /* assign new connection to it's SAP */ + llc_sap_assign_sock(sap, sk); + llc = llc_sk(sk); + memcpy(&llc->daddr, &daddr, sizeof(llc->daddr)); + memcpy(&llc->laddr, &laddr, sizeof(llc->laddr)); + llc->dev = ddev; + llc->link = prim->data->conn.link; + llc->handler = prim->data->conn.handler; + ev = llc_conn_alloc_ev(sk); + if (ev) { + ev->type = LLC_CONN_EV_TYPE_PRIM; + ev->data.prim.prim = LLC_CONN_PRIM; + ev->data.prim.type = LLC_PRIM_TYPE_REQ; + ev->data.prim.data = prim; + rc = llc_conn_send_ev(sk, ev); + } + if (rc) { + llc_sap_unassign_sock(sap, sk); + llc_sock_free(sk); + confirm_impossible(prim); + } + release_sock(sk); +out_put: + sock_put(sk); +out: + return rc; +} + +/** + * llc_disc_req_handler - Called by upper layer to close a connection + * @prim: pointer to structure that contains service parameters. + * + * Upper layer calls this when it wants to close an established LLC + * connection with a remote machine. this function packages a proper event + * and sends it to connection component state machine. Returns 0 for + * success, 1 otherwise. + */ +static int llc_disc_req_handler(struct llc_prim_if_block *prim) +{ + u16 rc = 1; + struct llc_conn_state_ev *ev; + struct sock* sk = prim->data->disc.sk; + + sock_hold(sk); + lock_sock(sk); + if (llc_sk(sk)->state == LLC_CONN_STATE_ADM || + llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) + goto out; + /* postpone unassigning the connection from its SAP and returning the + * connection until all ACTIONs have been completely executed + */ + ev = llc_conn_alloc_ev(sk); + if (!ev) + goto out; + ev->type = LLC_CONN_EV_TYPE_PRIM; + ev->data.prim.prim = LLC_DISC_PRIM; + ev->data.prim.type = LLC_PRIM_TYPE_REQ; + ev->data.prim.data = prim; + rc = llc_conn_send_ev(sk, ev); +out: + release_sock(sk); + sock_put(sk); + return rc; +} + +/** + * llc_rst_req_handler - Resets an established LLC connection + * @prim: pointer to structure that contains service parameters. + * + * Called when upper layer wants to reset an established LLC connection + * with a remote machine. this function packages a proper event and sends + * it to connection component state machine. Returns 0 for success, 1 + * otherwise. + */ +static int llc_rst_req_handler(struct llc_prim_if_block *prim) +{ + int rc = 1; + struct sock *sk = prim->data->res.sk; + struct llc_conn_state_ev *ev; + + lock_sock(sk); + ev = llc_conn_alloc_ev(sk); + if (ev) { + ev->type = LLC_CONN_EV_TYPE_PRIM; + ev->data.prim.prim = LLC_RESET_PRIM; + ev->data.prim.type = LLC_PRIM_TYPE_REQ; + ev->data.prim.data = prim; + rc = llc_conn_send_ev(sk, ev); + } + release_sock(sk); + return rc; +} + +/* We don't support flow control. The original code from procom has + * some bits, but for now I'm cleaning this + */ +static int llc_flowcontrol_req_handler(struct llc_prim_if_block *prim) +{ + return 1; +} + +/** + * llc_sap_resp - Sends response to peer + * @prim: pointer to structure that contains service parameters + * + * This function is a interface function to upper layer. each one who + * wants to response to an indicate can call this function via calling + * sap_resp with proper service parameters. Returns 0 for success, 1 + * otherwise. + */ +static int llc_sap_resp(struct llc_prim_if_block *prim) +{ + u16 rc = 1; + /* network layer RESPONSE primitive received; package primitive + * as an event and send it to the connection event handler + */ + if (prim->prim < LLC_NBR_PRIMITIVES) + /* valid primitive; call the function to handle it */ + rc = llc_resp_prim[prim->prim](prim); + return rc; +} + +/** + * llc_conn_rsp_handler - Response to connect indication + * @prim: pointer to structure that contains response info. + * + * Response to connect indication. + */ +static int llc_conn_rsp_handler(struct llc_prim_if_block *prim) +{ + struct sock *sk = prim->data->conn.sk; + + llc_sk(sk)->link = prim->data->conn.link; + return 0; +} + +/** + * llc_rst_rsp_handler - Response to RESET indication + * @prim: pointer to structure that contains response info + * + * Returns 0 for success, 1 otherwise + */ +static int llc_rst_rsp_handler(struct llc_prim_if_block *prim) +{ + int rc = 1; + /* network layer supplies connection handle; map it to a connection; + * package as event and send it to connection event handler + */ + struct sock *sk = prim->data->res.sk; + struct llc_conn_state_ev *ev = llc_conn_alloc_ev(sk); + + if (ev) { + ev->type = LLC_CONN_EV_TYPE_PRIM; + ev->data.prim.prim = LLC_RESET_PRIM; + ev->data.prim.type = LLC_PRIM_TYPE_RESP; + ev->data.prim.data = prim; + rc = llc_conn_send_ev(sk, ev); + } + return rc; +} + +static int llc_no_rsp_handler(struct llc_prim_if_block *prim) +{ + return 0; +} + +EXPORT_SYMBOL(llc_sap_open); +EXPORT_SYMBOL(llc_sap_close); diff -Nru a/net/llc/llc_mac.c b/net/llc/llc_mac.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_mac.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,317 @@ +/* + * llc_mac.c - Manages interface between LLC and MAC + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_TR +extern void tr_source_route(struct sk_buff *skb, struct trh_hdr *trh, + struct net_device *dev); +#endif +/* function prototypes */ +static void fix_up_incoming_skb(struct sk_buff *skb); + +/** + * mac_send_pdu - Sends PDU to specific device. + * @skb: pdu which must be sent + * + * If module is not initialized then returns failure, else figures out + * where to direct this PDU. Sends PDU to specific device, at this point a + * device must has been assigned to the PDU; If not, can't transmit the + * PDU. PDU sent to MAC layer, is free to re-send at a later time. Returns + * 0 on success, 1 for failure. + */ +int mac_send_pdu(struct sk_buff *skb) +{ + struct sk_buff *skb2; + int pri = GFP_ATOMIC, rc = -1; + + if (!skb->dev) { + printk(KERN_ERR __FUNCTION__ ": skb->dev == NULL!"); + goto out; + } + if (skb->sk) + pri = (int)skb->sk->priority; + skb2 = skb_clone(skb, pri); + if (!skb2) + goto out; + rc = 0; + dev_queue_xmit(skb2); +out: + return rc; +} + +/** + * mac_indicate - 802.2 entry point from net lower layers + * @skb: received pdu + * @dev: device that receive pdu + * @pt: packet type + * + * When the system receives a 802.2 frame this function is called. It + * checks SAP and connection of received pdu and passes frame to + * llc_pdu_router for sending to proper state machine. If frame is + * related to a busy connection (a connection is sending data now), + * function queues this frame in connection's backlog. + */ +int mac_indicate(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt) +{ + struct llc_sap *sap; + llc_pdu_sn_t *pdu; + u8 dest; + + /* When the interface is in promisc. mode, drop all the crap that it + * receives, do not try to analyse it. + */ + if (skb->pkt_type == PACKET_OTHERHOST) { + printk(KERN_INFO __FUNCTION__ ": PACKET_OTHERHOST\n"); + goto drop; + } + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + goto out; + fix_up_incoming_skb(skb); + pdu = (llc_pdu_sn_t *)skb->nh.raw; + if (!pdu->dsap) { /* NULL DSAP, refer to station */ + llc_pdu_router(NULL, NULL, skb, 0); + goto out; + } + sap = llc_sap_find(pdu->dsap); + if (!sap) /* unknown SAP */ + goto drop; + llc_decode_pdu_type(skb, &dest); + if (dest == LLC_DEST_SAP) /* type 1 services */ + llc_pdu_router(sap, NULL, skb, LLC_TYPE_1); + else if (dest == LLC_DEST_CONN) { + struct llc_addr saddr, daddr; + struct sock *sk; + + llc_pdu_decode_sa(skb, saddr.mac); + llc_pdu_decode_ssap(skb, &saddr.lsap); + llc_pdu_decode_da(skb, daddr.mac); + llc_pdu_decode_dsap(skb, &daddr.lsap); + + sk = llc_find_sock(sap, &saddr, &daddr); + if (!sk) { /* didn't find an active connection; allocate a + * connection to use; associate it with this SAP + */ + sk = llc_sock_alloc(); + if (!sk) + goto drop; + memcpy(&llc_sk(sk)->daddr, &saddr, sizeof(saddr)); + llc_sap_assign_sock(sap, sk); + sock_hold(sk); + } + bh_lock_sock(sk); + if (!sk->lock.users) { + /* FIXME: Check this on SMP as it is now calling + * llc_pdu_router _with_ the lock held. + * Old comment: + * With the current code one can't call + * llc_pdu_router with the socket lock held, cause + * it'll route the pdu to the upper layers and it can + * reenter llc and in llc_req_prim will try to grab + * the same lock, maybe we should use spin_trylock_bh + * in the llc_req_prim (llc_data_req_handler, etc) and + * add the request to the backlog, well see... + */ + llc_pdu_router(llc_sk(sk)->sap, sk, skb, LLC_TYPE_2); + bh_unlock_sock(sk); + } else { + skb->cb[0] = LLC_PACKET; + sk_add_backlog(sk, skb); + bh_unlock_sock(sk); + } + sock_put(sk); + } else /* unknown or not supported pdu */ + goto drop; +out: + return 0; +drop: + kfree_skb(skb); + goto out; +} + +/** + * fix_up_incoming_skb - initializes skb pointers + * @skb: This argument points to incoming skb + * + * Initializes internal skb pointer to start of network layer by deriving + * length of LLC header; finds length of LLC control field in LLC header + * by looking at the two lowest-order bits of the first control field + * byte; field is either 3 or 4 bytes long. + */ +static void fix_up_incoming_skb(struct sk_buff *skb) +{ + u8 llc_len = 2; + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)skb->data; + + if ((pdu->ctrl_1 & LLC_PDU_TYPE_MASK) == LLC_PDU_TYPE_U) + llc_len = 1; + llc_len += 2; + skb_pull(skb, llc_len); + if (skb->protocol == htons(ETH_P_802_2)) { + u16 pdulen = ((struct ethhdr *)skb->mac.raw)->h_proto, + data_size = ntohs(pdulen) - llc_len; + + skb_trim(skb, data_size); + } +} + +/** + * llc_pdu_router - routes received pdus to the upper layers + * @sap: current sap component structure. + * @sk: current connection structure. + * @frame: received frame. + * @type: type of received frame, that is LLC_TYPE_1 or LLC_TYPE_2 + * + * Queues received PDUs from LLC_MAC PDU receive queue until queue is + * empty; examines LLC header to determine the destination of PDU, if DSAP + * is NULL then data unit destined for station else frame destined for SAP + * or connection; finds a matching open SAP, if one, forwards the packet + * to it; if no matching SAP, drops the packet. Returns 0 or the return of + * llc_conn_send_ev (that may well result in the connection being + * destroyed) + */ +int llc_pdu_router(struct llc_sap *sap, struct sock* sk, + struct sk_buff *skb, u8 type) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)skb->nh.raw; + int rc = 0; + + if (!pdu->dsap) { + struct llc_station *station = llc_station_get(); + struct llc_station_state_ev *stat_ev = + llc_station_alloc_ev(station); + if (stat_ev) { + stat_ev->type = LLC_STATION_EV_TYPE_PDU; + stat_ev->data.pdu.skb = skb; + stat_ev->data.pdu.reason = 0; + llc_station_send_ev(station, stat_ev); + } + } else if (type == LLC_TYPE_1) { + struct llc_sap_state_ev *sap_ev = llc_sap_alloc_ev(sap); + + if (sap_ev) { + sap_ev->type = LLC_SAP_EV_TYPE_PDU; + sap_ev->data.pdu.skb = skb; + sap_ev->data.pdu.reason = 0; + llc_sap_send_ev(sap, sap_ev); + } + } else if (type == LLC_TYPE_2) { + struct llc_conn_state_ev *conn_ev = llc_conn_alloc_ev(sk); + struct llc_opt *llc = llc_sk(sk); + + if (!llc->dev) + llc->dev = skb->dev; + if (conn_ev) { + conn_ev->type = LLC_CONN_EV_TYPE_PDU; + conn_ev->data.pdu.skb = skb; + conn_ev->data.pdu.reason = 0; + rc = llc_conn_send_ev(sk, conn_ev); + } + } + return rc; +} + +/** + * lan_hdrs_init - fills MAC header fields + * @skb: Address of the frame to initialize its MAC header + * @sa: The MAC source address + * @da: The MAC destination address + * + * Fills MAC header fields, depending on MAC type. Returns 0, If MAC type + * is a valid type and initialization completes correctly 1, otherwise. + */ +u16 lan_hdrs_init(struct sk_buff *skb, u8 *sa, u8 *da) +{ + u8 *saddr; + u8 *daddr; + u16 rc = 0; + + switch (skb->dev->type) { +#ifdef CONFIG_TR + case ARPHRD_IEEE802_TR: { + struct trh_hdr *trh = (struct trh_hdr *) + skb_push(skb, sizeof(*trh)); + struct net_device *dev = skb->dev; + + trh->ac = AC; + trh->fc = LLC_FRAME; + if (sa) + memcpy(trh->saddr, sa, dev->addr_len); + else + memset(trh->saddr, 0, dev->addr_len); + if (da) { + memcpy(trh->daddr, da, dev->addr_len); + tr_source_route(skb, trh, dev); + } + skb->mac.raw = skb->data; + break; + } +#endif + case ARPHRD_ETHER: + case ARPHRD_LOOPBACK: { + unsigned short len = skb->len; + + skb->mac.raw = skb_push(skb, sizeof(struct ethhdr)); + memset(skb->mac.raw, 0, sizeof(struct ethhdr)); + ((struct ethhdr *)skb->mac.raw)->h_proto = htons(len); + daddr = ((struct ethhdr *)skb->mac.raw)->h_dest; + saddr = ((struct ethhdr *)skb->mac.raw)->h_source; + memcpy(daddr, da, ETH_ALEN); + memcpy(saddr, sa, ETH_ALEN); + break; + } + default: + printk(KERN_WARNING "Unknown DEVICE type : %d\n", + skb->dev->type); + rc = 1; + } + return rc; +} + +/** + * mac_dev_peer - search the appropriate dev to send packets to peer + * @current_dev - Current device suggested by upper layer + * @type - hardware type + * @mac - mac address + * + * Check if the we should use loopback to send packets, i.e., if the + * dmac belongs to one of the local interfaces, returning the pointer + * to the loopback &net_device struct or the current_dev if it is not + * local. + */ +struct net_device *mac_dev_peer(struct net_device *current_dev, int type, + u8 *mac) +{ + struct net_device *dev; + + rtnl_lock(); + dev = dev_getbyhwaddr(type, mac); + if (dev) + dev = __dev_get_by_name("lo"); + rtnl_unlock(); + return dev ? : current_dev; +} diff -Nru a/net/llc/llc_main.c b/net/llc/llc_main.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_main.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,641 @@ +/* + * llc_main.c - This module contains main functions to manage station, saps + * and connections of the LLC. + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* static function prototypes */ +static void llc_station_service_events(struct llc_station *station); +static void llc_station_free_ev(struct llc_station *station, + struct llc_station_state_ev *ev); +static void llc_station_send_pdus(struct llc_station *station); +static u16 llc_station_next_state(struct llc_station *station, + struct llc_station_state_ev *ev); +static u16 llc_exec_station_trans_actions(struct llc_station *station, + struct llc_station_state_trans *trans, + struct llc_station_state_ev *ev); +static struct llc_station_state_trans * + llc_find_station_trans(struct llc_station *station, + struct llc_station_state_ev *ev); +static int llc_rtn_all_conns(struct llc_sap *sap); + +extern void llc_register_sap(unsigned char sap, + int (*rcvfunc)(struct sk_buff *skb, + struct net_device *dev, + struct packet_type *pt)); +extern void llc_unregister_sap(unsigned char sap); + +static struct llc_station llc_main_station; /* only one of its kind */ +struct llc_prim_if_block llc_ind_prim, llc_cfm_prim; +static union llc_u_prim_data llc_ind_data_prim, llc_cfm_data_prim; + +/** + * llc_sap_alloc - allocates and initializes sap. + * + * Allocates and initializes sap. + */ +struct llc_sap *llc_sap_alloc(void) +{ + struct llc_sap *sap = kmalloc(sizeof(*sap), GFP_ATOMIC); + + if (sap) { + memset(sap, 0, sizeof(*sap)); + sap->state = LLC_SAP_STATE_ACTIVE; + memcpy(sap->laddr.mac, llc_main_station.mac_sa, ETH_ALEN); + spin_lock_init(&sap->sk_list.lock); + INIT_LIST_HEAD(&sap->sk_list.list); + skb_queue_head_init(&sap->mac_pdu_q); + } + return sap; +} + +/** + * llc_free_sap - frees a sap + * @sap: Address of the sap + * + * Frees all associated connections (if any), removes this sap from + * the list of saps in te station and them frees the memory for this sap. + */ +void llc_free_sap(struct llc_sap *sap) +{ + struct llc_station *station = sap->parent_station; + + llc_rtn_all_conns(sap); + spin_lock_bh(&station->sap_list.lock); + list_del(&sap->node); + spin_unlock_bh(&station->sap_list.lock); + kfree(sap); +} + +/** + * llc_sap_save - add sap to station list + * @sap: Address of the sap + * + * Adds a sap to the LLC's station sap list. + */ +void llc_sap_save(struct llc_sap *sap) +{ + spin_lock_bh(&llc_main_station.sap_list.lock); + list_add_tail(&sap->node, &llc_main_station.sap_list.list); + spin_unlock_bh(&llc_main_station.sap_list.lock); +} + +/** + * llc_sap_find - searchs a SAP in station + * @sap_value: sap to be found + * + * Searchs for a sap in the sap list of the LLC's station upon the sap ID. + * Returns the sap or %NULL if not found. + */ +struct llc_sap *llc_sap_find(u8 sap_value) +{ + struct llc_sap* sap = NULL; + struct list_head *entry; + + spin_lock_bh(&llc_main_station.sap_list.lock); + list_for_each(entry, &llc_main_station.sap_list.list) { + sap = list_entry(entry, struct llc_sap, node); + if (sap->laddr.lsap == sap_value) + break; + } + if (entry == &llc_main_station.sap_list.list) /* not found */ + sap = NULL; + spin_unlock_bh(&llc_main_station.sap_list.lock); + return sap; +} + +/** + * llc_backlog_rcv - Processes rx frames and expired timers. + * @sk: LLC sock (p8022 connection) + * @skb: queued rx frame or event + * + * This function processes frames that has received and timers that has + * expired during sending an I pdu (refer to data_req_handler). frames + * queue by mac_indicate function (llc_mac.c) and timers queue by timer + * callback functions(llc_c_ac.c). + */ +static int llc_backlog_rcv(struct sock *sk, struct sk_buff *skb) +{ + int rc = 0; + struct llc_opt *llc = llc_sk(sk); + + if (skb->cb[0] == LLC_PACKET) { + if (llc->state > 1) /* not closed */ + rc = llc_pdu_router(llc->sap, sk, skb, LLC_TYPE_2); + else + kfree_skb(skb); + } else if (skb->cb[0] == LLC_EVENT) { + struct llc_conn_state_ev *ev = + (struct llc_conn_state_ev *)skb->data; + /* timer expiration event */ + if (llc->state > 1) /* not closed */ + rc = llc_conn_send_ev(sk, ev); + else + llc_conn_free_ev(ev); + kfree_skb(skb); + } + return rc; +} + +/** + * llc_sock_init - Initialize a socket with default llc values. + * @sk: socket to intiailize. + */ +int llc_sock_init(struct sock* sk) +{ + struct llc_opt *llc = kmalloc(sizeof(*llc), GFP_ATOMIC); + int rc = -ENOMEM; + + if (!llc) + goto out; + memset(llc, 0, sizeof(*llc)); + rc = 0; + llc->sk = sk; + llc->state = LLC_CONN_STATE_ADM; + llc->inc_cntr = llc->dec_cntr = 2; + llc->dec_step = llc->connect_step = 1; + llc->ack_timer.expire = LLC_ACK_TIME; + llc->pf_cycle_timer.expire = LLC_P_TIME; + llc->rej_sent_timer.expire = LLC_REJ_TIME; + llc->busy_state_timer.expire = LLC_BUSY_TIME; + llc->n2 = 2; /* max retransmit */ + llc->k = 2; /* tx win size, will adjust dynam */ + llc->rw = 128; /* rx win size (opt and equal to + * tx_win of remote LLC) + */ + skb_queue_head_init(&llc->pdu_unack_q); + sk->backlog_rcv = llc_backlog_rcv; + llc_sk(sk) = llc; +out: + return rc; +} + +/** + * __llc_sock_alloc - Allocates LLC sock + * + * Allocates a LLC sock and initializes it. Returns the new LLC sock + * or %NULL if there's no memory available for one + */ +struct sock *__llc_sock_alloc(void) +{ + struct sock *sk = sk_alloc(PF_LLC, GFP_ATOMIC, 1, NULL); + + if (!sk) + goto out; + if (llc_sock_init(sk)) + goto outsk; + sock_init_data(NULL, sk); +out: + return sk; +outsk: + sk_free(sk); + sk = NULL; + goto out; +} + +/** + * __llc_sock_free - Frees a LLC socket + * @sk - socket to free + * + * Frees a LLC socket + */ +void __llc_sock_free(struct sock *sk, u8 free) +{ + struct llc_opt *llc = llc_sk(sk); + + llc->state = LLC_CONN_OUT_OF_SVC; + /* stop all (possibly) running timers */ + llc_conn_ac_stop_all_timers(sk, NULL); + /* handle return of frames on lists */ + printk(KERN_INFO __FUNCTION__ ": unackq=%d, txq=%d\n", + skb_queue_len(&llc->pdu_unack_q), + skb_queue_len(&sk->write_queue)); + skb_queue_purge(&sk->write_queue); + skb_queue_purge(&llc->pdu_unack_q); + if (free) + sock_put(sk); +} + +/** + * llc_sock_reset - resets a connection + * @sk: LLC socket to reset + * + * Resets a connection to the out of service state. Stops its timers + * and frees any frames in the queues of the connection. + */ +void llc_sock_reset(struct sock *sk) +{ + struct llc_opt *llc = llc_sk(sk); + + llc_conn_ac_stop_all_timers(sk, NULL); + skb_queue_purge(&sk->write_queue); + skb_queue_purge(&llc->pdu_unack_q); + llc->remote_busy_flag = 0; + llc->cause_flag = 0; + llc->retry_count = 0; + llc->p_flag = 0; + llc->f_flag = 0; + llc->s_flag = 0; + llc->ack_pf = 0; + llc->first_pdu_Ns = 0; + llc->ack_must_be_send = 0; + llc->dec_step = 1; + llc->inc_cntr = 2; + llc->dec_cntr = 2; + llc->X = 0; + llc->failed_data_req = 0 ; + llc->last_nr = 0; +} + +/** + * llc_rtn_all_conns - Closes all connections of a sap + * @sap: sap to close its connections + * + * Closes all connections of a sap. Returns 0 if all actions complete + * successfully, nonzero otherwise + */ +static int llc_rtn_all_conns(struct llc_sap *sap) +{ + int rc = 0; + union llc_u_prim_data prim_data; + struct llc_prim_if_block prim; + struct list_head *entry, *tmp; + + spin_lock_bh(&sap->sk_list.lock); + if (list_empty(&sap->sk_list.list)) + goto out; + list_for_each_safe(entry, tmp, &sap->sk_list.list) { + struct llc_opt *llc = list_entry(entry, struct llc_opt, node); + + prim.sap = sap; + prim_data.disc.sk = llc->sk; + prim.prim = LLC_DISC_PRIM; + prim.data = &prim_data; + llc->state = LLC_CONN_STATE_TEMP; + if (sap->req(&prim)) + rc = 1; + } +out: + spin_unlock_bh(&sap->sk_list.lock); + return rc; +} + +/** + * llc_station_get - get addr of global station. + * + * Returns address of a place to copy the global station to it. + */ +struct llc_station *llc_station_get(void) +{ + return &llc_main_station; +} + +/** + * llc_station_alloc_ev - allocates an event + * @station: Address of the station + * + * Allocates an event in this station. Returns the allocated event on + * success, %NULL otherwise. + */ +struct llc_station_state_ev *llc_station_alloc_ev(struct llc_station *station) +{ + struct llc_station_state_ev *ev = kmalloc(sizeof(*ev), GFP_ATOMIC); + + if (ev) + memset(ev, 0, sizeof(*ev)); + return ev; +} + +/** + * llc_station_send_ev: queue event and try to process queue. + * @station: Address of the station + * @ev: Address of the event + * + * Queues an event (on the station event queue) for handling by the + * station state machine and attempts to process any queued-up events. + */ +void llc_station_send_ev(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + spin_lock_bh(&station->ev_q.lock); + list_add_tail(&ev->node, &station->ev_q.list); + llc_station_service_events(station); + spin_unlock_bh(&station->ev_q.lock); +} + +/** + * llc_station_send_pdu - queues PDU to send + * @station: Address of the station + * @skb: Address of the PDU + * + * Queues a PDU to send to the MAC layer. + */ +void llc_station_send_pdu(struct llc_station *station, struct sk_buff *skb) +{ + skb_queue_tail(&station->mac_pdu_q, skb); + llc_station_send_pdus(station); +} + +/** + * llc_station_send_pdus - tries to send queued PDUs + * @station: Address of the station + * + * Tries to send any PDUs queued in the station mac_pdu_q to the MAC + * layer. + */ +static void llc_station_send_pdus(struct llc_station *station) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(&station->mac_pdu_q)) != NULL) { + int rc = mac_send_pdu(skb); + + kfree_skb(skb); + if (rc) + break; + } +} + +/** + * llc_station_free_ev - frees an event + * @station: Address of the station + * @event: Address of the event + * + * Frees an event. + */ +static void llc_station_free_ev(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + struct sk_buff *skb = ev->data.pdu.skb; + + if (ev->type == LLC_STATION_EV_TYPE_PDU) + kfree_skb(skb); + kfree(ev); +} + +/** + * llc_station_service_events - service events in the queue + * @station: Address of the station + * + * Get an event from the station event queue (if any); attempt to service + * the event; if event serviced, get the next event (if any) on the event + * queue; if event not service, re-queue the event on the event queue and + * attempt to service the next event; when serviced all events in queue, + * finished; if don't transition to different state, just service all + * events once; if transition to new state, service all events again. + * Caller must hold station->ev_q.lock. + */ +static void llc_station_service_events(struct llc_station *station) +{ + struct llc_station_state_ev *ev; + struct list_head *entry, *tmp; + + list_for_each_safe(entry, tmp, &station->ev_q.list) { + ev = list_entry(entry, struct llc_station_state_ev, node); + list_del(&ev->node); + llc_station_next_state(station, ev); + } +} + +/** + * llc_station_next_state - processes event and goes to the next state + * @station: Address of the station + * @ev: Address of the event + * + * Processes an event, executes any transitions related to that event and + * updates the state of the station. + */ +static u16 llc_station_next_state(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + u16 rc = 1; + struct llc_station_state_trans *trans; + + if (station->state > LLC_NBR_STATION_STATES) + goto out; + trans = llc_find_station_trans(station, ev); + if (trans) { + /* got the state to which we next transition; perform the + * actions associated with this transition before actually + * transitioning to the next state + */ + rc = llc_exec_station_trans_actions(station, trans, ev); + if (!rc) + /* transition station to next state if all actions + * execute successfully; done; wait for next event + */ + station->state = trans->next_state; + } else + /* event not recognized in current state; re-queue it for + * processing again at a later time; return failure + */ + rc = 0; +out: + llc_station_free_ev(station, ev); + return rc; +} + +/** + * llc_find_station_trans - finds transition for this event + * @station: Address of the station + * @ev: Address of the event + * + * Search thru events of the current state of the station until list + * exhausted or it's obvious that the event is not valid for the current + * state. Returns the address of the transition if cound, %NULL otherwise. + */ +static struct llc_station_state_trans * + llc_find_station_trans(struct llc_station *station, + struct llc_station_state_ev *ev) +{ + int i = 0; + struct llc_station_state_trans *rc = NULL; + struct llc_station_state_trans **next_trans; + struct llc_station_state *curr_state = + &llc_station_state_table[station->state - 1]; + + for (next_trans = curr_state->transitions; next_trans[i]->ev; i++) + if (!next_trans[i]->ev(station, ev)) { + rc = next_trans[i]; + break; + } + return rc; +} + +/** + * llc_exec_station_trans_actions - executes actions for transition + * @station: Address of the station + * @trans: Address of the transition + * @ev: Address of the event that caused the transition + * + * Executes actions of a transition of the station state machine. Returns + * 0 if all actions complete successfully, nonzero otherwise. + */ +static u16 llc_exec_station_trans_actions(struct llc_station *station, + struct llc_station_state_trans *trans, + struct llc_station_state_ev *ev) +{ + u16 rc = 0; + llc_station_action_t *next_action; + + for (next_action = trans->ev_actions; + next_action && *next_action; next_action++) + if ((*next_action)(station, ev)) + rc = 1; + return rc; +} + +/** + * llc_alloc_frame - allocates sk_buff for frame + * + * Allocates an sk_buff for frame and initializes sk_buff fields. + * Returns allocated skb or %NULL when out of memory. + */ +struct sk_buff *llc_alloc_frame(void) +{ + struct sk_buff *skb = alloc_skb(128, GFP_ATOMIC); + + if (skb) { + skb_reserve(skb, 50); + skb->nh.raw = skb->h.raw = skb->data; + skb->protocol = htons(ETH_P_802_2); + skb->dev = dev_base->next; + skb->mac.raw = skb->head; + } + return skb; +} + +static int llc_proc_get_info(char *bf, char **start, off_t offset, int length) +{ + struct llc_opt *llc; + struct list_head *sap_entry, *llc_entry; + off_t begin = 0, pos = 0; + int len = 0; + + spin_lock_bh(&llc_main_station.sap_list.lock); + list_for_each(sap_entry, &llc_main_station.sap_list.list) { + struct llc_sap *sap = list_entry(sap_entry, struct llc_sap, + node); + + len += sprintf(bf + len, "lsap=%d\n", sap->laddr.lsap); + spin_lock_bh(&sap->sk_list.lock); + if (list_empty(&sap->sk_list.list)) { + len += sprintf(bf + len, "no connections\n"); + goto unlock; + } + len += sprintf(bf + len, + "connection list:\nstate retr txwin rxwin\n"); + list_for_each(llc_entry, &sap->sk_list.list) { + llc = list_entry(llc_entry, struct llc_opt, node); + len += sprintf(bf + len, " %-5d%-5d%-6d%-5d\n", + llc->state, llc->retry_count, llc->k, + llc->rw); + } +unlock: + spin_unlock_bh(&sap->sk_list.lock); + pos = begin + len; + if (pos < offset) { + len = 0; /* Keep dumping into the buffer start */ + begin = pos; + } + if (pos > offset + length) /* We have dumped enough */ + break; + } + spin_unlock_bh(&llc_main_station.sap_list.lock); + + /* The data in question runs from begin to begin + len */ + *start = bf + (offset - begin); /* Start of wanted data */ + len -= (offset - begin); /* Remove unwanted header data from length */ + return len; +} + +static char llc_banner[] __initdata = + KERN_INFO "LLC 2.0 by Procom, 1997, Arnaldo C. Melo, 2001\n" + KERN_INFO "NET4.0 IEEE 802.2 extended support\n"; +static char llc_error_msg[] __initdata = + KERN_ERR "LLC install NOT successful.\n"; + +static int __init llc_init(void) +{ + u16 rc = 0; + struct llc_station_state_ev *ev; + + printk(llc_banner); + INIT_LIST_HEAD(&llc_main_station.ev_q.list); + spin_lock_init(&llc_main_station.ev_q.lock); + INIT_LIST_HEAD(&llc_main_station.sap_list.list); + spin_lock_init(&llc_main_station.sap_list.lock); + skb_queue_head_init(&llc_main_station.mac_pdu_q); + ev = kmalloc(sizeof(*ev), GFP_ATOMIC); + if (!ev) + goto err; + memset(ev, 0, sizeof(*ev)); + if(dev_base->next) + memcpy(llc_main_station.mac_sa, dev_base->next->dev_addr, ETH_ALEN); + else + memset(llc_main_station.mac_sa, 0, ETH_ALEN); + llc_main_station.ack_timer.expires = jiffies + 3 * HZ; + /* initialize the station component */ + llc_register_sap(0, mac_indicate); + llc_main_station.maximum_retry = 1; + llc_main_station.state = LLC_STATION_STATE_DOWN; + ev->type = LLC_STATION_EV_TYPE_SIMPLE; + ev->data.a.ev = LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK; + rc = llc_station_next_state(&llc_main_station, ev); + llc_build_offset_table(); + llc_ind_prim.data = &llc_ind_data_prim; + llc_cfm_prim.data = &llc_cfm_data_prim; + proc_net_create("802.2", 0, llc_proc_get_info); + llc_ui_init(); +out: + return rc; +err: + printk(llc_error_msg); + rc = 1; + goto out; +} + +static void __exit llc_exit(void) +{ + llc_ui_exit(); + llc_unregister_sap(0); + proc_net_remove("802.2"); +} + +module_init(llc_init); +module_exit(llc_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Procom, 1997, Arnaldo C. Melo, Jay Schullist, 2001"); +MODULE_DESCRIPTION("LLC 2.0, NET4.0 IEEE 802.2 extended support"); diff -Nru a/net/llc/llc_pdu.c b/net/llc/llc_pdu.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_pdu.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,654 @@ +/* + * llc_pdu.c - access to PDU internals + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include + +static int llc_pdu_decode_pdu_type(struct sk_buff *skb, u8 *type); +static int llc_get_llc_hdr_length(u8 pdu_type); +static u8 llc_pdu_get_pf_bit(llc_pdu_sn_t *pdu); + +/** + * llc_pdu_header_init - initializes pdu header + * @skb: input skb that header must be set into it. + * @pdu_type: type of PDU (U, I or S). + * @ssap: source sap. + * @dsap: destination sap. + * @cr: command/response bit (0 or 1). + * + * This function sets DSAP, SSAP and command/Response bit in LLC header. + */ +void llc_pdu_header_init(struct sk_buff *skb, u8 pdu_type, u8 ssap, + u8 dsap, u8 cr) +{ + llc_pdu_un_t *p; + + skb->nh.raw = skb_push(skb, llc_get_llc_hdr_length(pdu_type)); + p = (llc_pdu_un_t *)skb->nh.raw; + p->dsap = dsap; + p->ssap = ssap; + p->ssap |= cr; +} + +void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 pdu_type) +{ + ((llc_pdu_un_t *)skb->nh.raw)->ssap |= pdu_type; +} + +/** + * pdu_set_pf_bit - sets poll/final bit in LLC header + * @pdu_frame: input frame that p/f bit must be set into it. + * @bit_value: poll/final bit (0 or 1). + * + * This function sets poll/final bit in LLC header (based on type of PDU). + * in I or S pdus, p/f bit is right bit of fourth byte in header. in U + * pdus p/f bit is fifth bit of third byte. + */ +void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value) +{ + u8 pdu_type; + + if (llc_pdu_decode_pdu_type(skb, &pdu_type)) + goto out; + switch (pdu_type) { + case LLC_PDU_TYPE_I: + case LLC_PDU_TYPE_S: + ((llc_pdu_sn_t *)skb->nh.raw)->ctrl_2 = + (((llc_pdu_sn_t *)skb->nh.raw)->ctrl_2 & 0xFE) | + bit_value; + break; + case LLC_PDU_TYPE_U: + ((llc_pdu_un_t *)skb->nh.raw)->ctrl_1 |= + (((llc_pdu_un_t *)skb->nh.raw)->ctrl_1 & 0xEF) | + (bit_value << 4); + break; + } +out:; +} + +/** + * llc_pdu_decode_pf_bit - extracs poll/final bit from LLC header + * @skb: input skb that p/f bit must be extracted from it + * @pf_bit: poll/final bit (0 or 1) + * + * This function extracts poll/final bit from LLC header (based on type of + * PDU). In I or S pdus, p/f bit is right bit of fourth byte in header. In + * U pdus p/f bit is fifth bit of third byte. + */ +int llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit) +{ + u8 pdu_type; + int rc = llc_pdu_decode_pdu_type(skb, &pdu_type); + + if (rc) + goto out; + switch (pdu_type) { + case LLC_PDU_TYPE_I: + case LLC_PDU_TYPE_S: + *pf_bit = ((llc_pdu_sn_t *)skb->nh.raw)->ctrl_2 & + LLC_S_PF_BIT_MASK; + break; + case LLC_PDU_TYPE_U: + *pf_bit = (((llc_pdu_un_t *)skb->nh.raw)->ctrl_1 & + LLC_U_PF_BIT_MASK) >> 4; + break; + } +out: + return 0; +} + +/** + * llc_pdu_decode_cr_bit - extracs command response bit from LLC header + * @skb: input skb that c/r bit must be extracted from it. + * @cr_bit: command/response bit (0 or 1). + * + * This function extracts command/response bit from LLC header. this bit + * is right bit of source SAP. + */ +int llc_pdu_decode_cr_bit(struct sk_buff *skb, u8 *cr_bit) +{ + *cr_bit = ((llc_pdu_un_t *)skb->nh.raw)->ssap & LLC_PDU_CMD_RSP_MASK; + return 0; +} + +/** + * llc_pdu_decode_sa - extracs source address (MAC) of input frame + * @skb: input skb that source address must be extracted from it. + * @sa: pointer to source address (6 byte array). + * + * This function extracts source address(MAC) of input frame. + */ +int llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) +{ + if (skb->protocol == ntohs(ETH_P_802_2)) + memcpy(sa, ((struct ethhdr *)skb->mac.raw)->h_source, ETH_ALEN); + else if (skb->protocol == ntohs(ETH_P_TR_802_2)) + memcpy(sa, ((struct trh_hdr *)skb->mac.raw)->saddr, ETH_ALEN); + return 0; +} + +/** + * llc_pdu_decode_da - extracts dest address of input frame + * @skb: input skb that destination address must be extracted from it + * @sa: pointer to destination address (6 byte array). + * + * This function extracts destination address(MAC) of input frame. + */ +int llc_pdu_decode_da(struct sk_buff *skb, u8 *da) +{ + if (skb->protocol == ntohs(ETH_P_802_2)) + memcpy(da, ((struct ethhdr *)skb->mac.raw)->h_dest, ETH_ALEN); + else if (skb->protocol == ntohs(ETH_P_TR_802_2)) + memcpy(da, ((struct trh_hdr *)skb->mac.raw)->daddr, ETH_ALEN); + return 0; +} + +/** + * llc_pdu_decode_dsap - extracts dest SAP of input frame + * @skb: input skb that destination SAP must be extracted from it. + * @dsap: destination SAP (output argument). + * + * This function extracts destination SAP of input frame. right bit of + * DSAP designates individual/group SAP. + */ +int llc_pdu_decode_dsap(struct sk_buff *skb, u8 *dsap) +{ + *dsap = ((llc_pdu_un_t *)skb->nh.raw)->dsap & 0xFE; + return 0; +} + +/** + * llc_pdu_decode_ssap - extracts source SAP of input frame + * @skb: input skb that source SAP must be extracted from it. + * @ssap: source SAP (output argument). + * + * This function extracts source SAP of input frame. right bit of SSAP is + * command/response bit. + */ +int llc_pdu_decode_ssap(struct sk_buff *skb, u8 *ssap) +{ + *ssap = ((llc_pdu_un_t *)skb->nh.raw)->ssap & 0xFE; + return 0; +} + +/** + * llc_pdu_init_as_ui_cmd - sets LLC header as UI PDU + * @skb: input skb that header must be set into it. + * + * This function sets third byte of LLC header as a UI PDU. + */ +int llc_pdu_init_as_ui_cmd(struct sk_buff *skb) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_U; + pdu->ctrl_1 |= LLC_1_PDU_CMD_UI; + return 0; +} + +/** + * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID + * @skb: input skb that header must be set into it. + * + * This function sets third,fourth,fifth and sixth bytes of LLC header as + * a XID PDU. + */ +int llc_pdu_init_as_xid_cmd(struct sk_buff *skb, u8 svcs_supported, + u8 rx_window) +{ + llc_xid_info_t *xid_info; + llc_pdu_un_t *pdu = (llc_pdu_un_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_U; + pdu->ctrl_1 |= LLC_1_PDU_CMD_XID; + pdu->ctrl_1 |= LLC_U_PF_BIT_MASK; + xid_info = (llc_xid_info_t *)(((u8 *)&pdu->ctrl_1) + 1); + xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */ + xid_info->type = svcs_supported; + xid_info->rw = rx_window << 1; /* size of recieve window */ + skb_put(skb, 3); + return 0; +} + +/** + * llc_pdu_init_as_test_cmd - sets PDU as TEST + * @skb - Address of the skb to build + * + * Sets a PDU as TEST + */ +int llc_pdu_init_as_test_cmd(struct sk_buff *skb) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_U; + pdu->ctrl_1 |= LLC_1_PDU_CMD_TEST; + pdu->ctrl_1 |= LLC_U_PF_BIT_MASK; + return 0; +} + +/** + * llc_pdu_init_as_disc_cmd - Builds DISC PDU + * @skb: Address of the skb to build + * @p_bit: The P bit to set in the PDU + * + * Builds a pdu frame as a DISC command. + */ +int llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_U; + pdu->ctrl_1 |= LLC_2_PDU_CMD_DISC; + pdu->ctrl_1 |= ((p_bit & 1) << 4) & LLC_U_PF_BIT_MASK; + return 0; +} + +/** + * pdu_init_as_i_cmd - builds I pdu + * @skb: Address of the skb to build + * @p_bit: The P bit to set in the PDU + * @ns: The sequence number of the data PDU + * @nr: The seq. number of the expected I PDU from the remote + * + * Builds a pdu frame as an I command. + */ +int llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_I; + pdu->ctrl_2 = 0; + pdu->ctrl_2 |= (p_bit & LLC_I_PF_BIT_MASK); /* p/f bit */ + pdu->ctrl_1 |= (ns << 1) & 0xFE; /* set N(S) in bits 2..8 */ + pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ + return 0; +} + +/** + * pdu_init_as_rej_cmd - builds REJ PDU + * @skb: Address of the skb to build + * @p_bit: The P bit to set in the PDU + * @nr: The seq. number of the expected I PDU from the remote + * + * Builds a pdu frame as a REJ command. + */ +int llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_S; + pdu->ctrl_1 |= LLC_2_PDU_CMD_REJ; + pdu->ctrl_2 = 0; + pdu->ctrl_2 |= p_bit & LLC_S_PF_BIT_MASK; + pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */ + pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ + return 0; +} + +/** + * pdu_init_as_rnr_cmd - builds RNR pdu + * @skb: Address of the skb to build + * @p_bit: The P bit to set in the PDU + * @nr: The seq. number of the expected I PDU from the remote + * + * Builds a pdu frame as an RNR command. + */ +int llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_S; + pdu->ctrl_1 |= LLC_2_PDU_CMD_RNR; + pdu->ctrl_2 = 0; + pdu->ctrl_2 |= p_bit & LLC_S_PF_BIT_MASK; + pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */ + pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ + return 0; +} + +/** + * pdu_init_as_rr_cmd - Builds RR pdu + * @skb: Address of the skb to build + * @p_bit: The P bit to set in the PDU + * @nr: The seq. number of the expected I PDU from the remote + * + * Builds a pdu frame as an RR command. + */ +int llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_S; + pdu->ctrl_1 |= LLC_2_PDU_CMD_RR; + pdu->ctrl_2 = p_bit & LLC_S_PF_BIT_MASK; + pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */ + pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ + return 0; +} + +/** + * pdu_init_as_sabme_cmd - builds SABME pdu + * @skb: Address of the skb to build + * @p_bit: The P bit to set in the PDU + * + * Builds a pdu frame as an SABME command. + */ +int llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_U; + pdu->ctrl_1 |= LLC_2_PDU_CMD_SABME; + pdu->ctrl_1 |= ((p_bit & 1) << 4) & LLC_U_PF_BIT_MASK; + return 0; +} + +/** + * pdu_init_as_dm_rsp - builds DM response pdu + * @skb: Address of the skb to build + * @f_bit: The F bit to set in the PDU + * + * Builds a pdu frame as a DM response. + */ +int llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_U; + pdu->ctrl_1 |= LLC_2_PDU_RSP_DM; + pdu->ctrl_1 |= ((f_bit & 1) << 4) & LLC_U_PF_BIT_MASK; + return 0; +} + +/** + * pdu_init_as_xid_rsp - builds XID response PDU + * @skb: Address of the skb to build + * @svcs_supported: The class of the LLC (I or II) + * @rx_window: The size of the receive window of the LLC + * + * Builds a pdu frame as an XID response. + */ +int llc_pdu_init_as_xid_rsp(struct sk_buff *skb, u8 svcs_supported, + u8 rx_window) +{ + llc_xid_info_t *xid_info; + llc_pdu_un_t *pdu = (llc_pdu_un_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_U; + pdu->ctrl_1 |= LLC_1_PDU_CMD_XID; + pdu->ctrl_1 |= LLC_U_PF_BIT_MASK; + + xid_info = (llc_xid_info_t *)(((u8 *)&pdu->ctrl_1) + 1); + xid_info->fmt_id = LLC_XID_FMT_ID; + xid_info->type = svcs_supported; + xid_info->rw = rx_window << 1; + skb_put(skb, 3); + return 0; +} + +/** + * pdu_init_as_test_rsp - build TEST response PDU + * @skb: Address of the skb to build + * @ev_skb: The received TEST command PDU frame + * + * Builds a pdu frame as a TEST response. + */ +int llc_pdu_init_as_test_rsp(struct sk_buff *skb, struct sk_buff *ev_skb) +{ + int dsize; + llc_pdu_un_t *pdu = (llc_pdu_un_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_U; + pdu->ctrl_1 |= LLC_1_PDU_CMD_TEST; + pdu->ctrl_1 |= LLC_U_PF_BIT_MASK; + if (ev_skb->protocol == ntohs(ETH_P_802_2)) { + dsize = ntohs(((struct ethhdr *)ev_skb->mac.raw)->h_proto) - 3; + memcpy(((u8 *)skb->nh.raw) + 3, + ((u8 *)ev_skb->nh.raw) + 3, dsize); + skb_put(skb, dsize); + } + return 0; +} + +/** + * pdu_init_as_frmr_rsp - builds FRMR response PDU + * @pdu_frame: Address of the frame to build + * @prev_pdu: The rejected PDU frame + * @f_bit: The F bit to set in the PDU + * @vs: tx state vari value for the data link conn at the rejecting LLC + * @vr: rx state var value for the data link conn at the rejecting LLC + * @vzyxw: completely described in the IEEE Std 802.2 document (Pg 55) + * + * Builds a pdu frame as a FRMR response. + */ +int llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, llc_pdu_sn_t *prev_pdu, + u8 f_bit, u8 vs, u8 vr, u8 vzyxw) +{ + llc_frmr_info_t *frmr_info; + u8 prev_pf = 0; + u8 *ctrl; + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_U; + pdu->ctrl_1 |= LLC_2_PDU_RSP_FRMR; + pdu->ctrl_1 |= ((f_bit & 1) << 4) & LLC_U_PF_BIT_MASK; + + frmr_info = (llc_frmr_info_t *)&pdu->ctrl_2; + ctrl = (u8 *)&prev_pdu->ctrl_1; + FRMR_INFO_SET_REJ_CNTRL(frmr_info,ctrl); + FRMR_INFO_SET_Vs(frmr_info, vs); + FRMR_INFO_SET_Vr(frmr_info, vr); + prev_pf = llc_pdu_get_pf_bit(prev_pdu); + FRMR_INFO_SET_C_R_BIT(frmr_info, prev_pf); + FRMR_INFO_SET_INVALID_PDU_CTRL_IND(frmr_info, vzyxw); + FRMR_INFO_SET_INVALID_PDU_INFO_IND(frmr_info, vzyxw); + FRMR_INFO_SET_PDU_INFO_2LONG_IND(frmr_info, vzyxw); + FRMR_INFO_SET_PDU_INVALID_Nr_IND(frmr_info, vzyxw); + FRMR_INFO_SET_PDU_INVALID_Ns_IND(frmr_info, vzyxw); + skb_put(skb, 5); + return 0; +} + +/** + * pdu_init_as_rr_rsp - builds RR response pdu + * @skb: Address of the skb to build + * @f_bit: The F bit to set in the PDU + * @nr: The seq. number of the expected data PDU from the remote + * + * Builds a pdu frame as an RR response. + */ +int llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_S; + pdu->ctrl_1 |= LLC_2_PDU_RSP_RR; + pdu->ctrl_2 = 0; + pdu->ctrl_2 |= f_bit & LLC_S_PF_BIT_MASK; + pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */ + pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ + return 0; +} + +/** + * pdu_init_as_rej_rsp - builds REJ response pdu + * @skb: Address of the skb to build + * @f_bit: The F bit to set in the PDU + * @nr: The seq. number of the expected data PDU from the remote + * + * Builds a pdu frame as a REJ response. + */ +int llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_S; + pdu->ctrl_1 |= LLC_2_PDU_RSP_REJ; + pdu->ctrl_2 = 0; + pdu->ctrl_2 |= f_bit & LLC_S_PF_BIT_MASK; + pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */ + pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ + return 0; +} + +/** + * pdu_init_as_rnr_rsp - builds RNR response pdu + * @pdu_frame: Address of the frame to build + * @f_bit: The F bit to set in the PDU + * @nr: The seq. number of the expected data PDU from the remote + * + * Builds a pdu frame as an RNR response. + */ +int llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr) +{ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_S; + pdu->ctrl_1 |= LLC_2_PDU_RSP_RNR; + pdu->ctrl_2 = 0; + pdu->ctrl_2 |= f_bit & LLC_S_PF_BIT_MASK; + pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */ + pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ + return 0; +} + +/** + * pdu_init_as_ua_rsp - builds UA response pdu + * @skb: Address of the frame to build + * @f_bit: The F bit to set in the PDU + * + * Builds a pdu frame as a UA response. + */ +int llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)skb->nh.raw; + + pdu->ctrl_1 = LLC_PDU_TYPE_U; + pdu->ctrl_1 |= LLC_2_PDU_RSP_UA; + pdu->ctrl_1 |= ((f_bit & 1) << 4) & LLC_U_PF_BIT_MASK; + return 0; +} + +/** + * llc_pdu_decode_pdu_type - designates PDU type + * @skb: input skb that type of it must be designated. + * @type: type of PDU (output argument). + * + * This function designates type of PDU (I,S or U). + */ +static int llc_pdu_decode_pdu_type(struct sk_buff *skb, u8 *type) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)skb->nh.raw; + + if (pdu->ctrl_1 & 1) { + if ((pdu->ctrl_1 & LLC_PDU_TYPE_U) == LLC_PDU_TYPE_U) + *type = LLC_PDU_TYPE_U; + else + *type = LLC_PDU_TYPE_S; + } else + *type = LLC_PDU_TYPE_I; + return 0; +} + +/** + * llc_decode_pdu_type - designates component LLC must handle for PDU + * @skb: input skb + * @dest: destination component + * + * This function designates which component of LLC must handle this PDU. + */ +int llc_decode_pdu_type(struct sk_buff *skb, u8 *dest) +{ + u8 type = LLC_DEST_CONN; /* I-PDU or S-PDU type */ + llc_pdu_sn_t *pdu = (llc_pdu_sn_t *)skb->nh.raw; + + if ((pdu->ctrl_1 & LLC_PDU_TYPE_MASK) != LLC_PDU_TYPE_U) + goto out; + switch (LLC_U_PDU_CMD(pdu)) { + case LLC_1_PDU_CMD_XID: + case LLC_1_PDU_CMD_UI: + case LLC_1_PDU_CMD_TEST: + type = LLC_DEST_SAP; + break; + case LLC_2_PDU_CMD_SABME: + case LLC_2_PDU_CMD_DISC: + case LLC_2_PDU_RSP_UA: + case LLC_2_PDU_RSP_DM: + case LLC_2_PDU_RSP_FRMR: + break; + default: + type = LLC_DEST_INVALID; + break; + } +out: + *dest = type; + return 0; +} + +/** + * get_llc_hdr_len - designates LLC header length + * @pdu_type: type of PDU. + * + * This function designates LLC header length of PDU. header length for I + * and S PDU is 4 and for U is 3 bytes. Returns the length of header. + */ +static int llc_get_llc_hdr_length(u8 pdu_type) +{ + int rtn_val = 0; + + switch (pdu_type) { + case LLC_PDU_TYPE_I: + case LLC_PDU_TYPE_S: + rtn_val = 4; + break; + case LLC_PDU_TYPE_U: + rtn_val = 3; + break; + } + return rtn_val; +} + +/** + * llc_pdu_get_pf_bit - extracts p/f bit of input PDU + * @pdu: pointer to LLC header. + * + * This function extracts p/f bit of input PDU. at first examines type of + * PDU and then extracts p/f bit. Returns the p/f bit. + */ +static u8 llc_pdu_get_pf_bit(llc_pdu_sn_t *pdu) +{ + u8 pdu_type; + u8 pf_bit = 0; + + if (pdu->ctrl_1 & 1) { + if ((pdu->ctrl_1 & LLC_PDU_TYPE_U) == LLC_PDU_TYPE_U) + pdu_type = LLC_PDU_TYPE_U; + else + pdu_type = LLC_PDU_TYPE_S; + } else + pdu_type = LLC_PDU_TYPE_I; + switch (pdu_type) { + case LLC_PDU_TYPE_I: + case LLC_PDU_TYPE_S: + pf_bit = pdu->ctrl_2 & LLC_S_PF_BIT_MASK; + break; + case LLC_PDU_TYPE_U: + pf_bit = (pdu->ctrl_1 & LLC_U_PF_BIT_MASK) >> 4; + break; + } + return pf_bit; +} diff -Nru a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_s_ac.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,227 @@ +/* + * llc_s_ac.c - actions performed during sap state transition. + * + * Description : + * Functions in this module are implementation of sap component actions. + * Details of actions can be found in IEEE-802.2 standard document. + * All functions have one sap and one event as input argument. All of + * them return 0 On success and 1 otherwise. + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include + +/** + * llc_sap_action_unit_data_ind - forward UI PDU to network layer + * @sap: SAP + * @ev: the event to forward + * + * Received a UI PDU from MAC layer; forward to network layer as a + * UNITDATA INDICATION; verify our event is the kind we expect + */ +int llc_sap_action_unitdata_ind(struct llc_sap *sap, + struct llc_sap_state_ev *ev) +{ + llc_sap_rtn_pdu(sap, ev->data.pdu.skb, ev); + return 0; +} + +/** + * llc_sap_action_send_ui - sends UI PDU resp to UNITDATA REQ to MAC layer + * @sap: SAP + * @ev: the event to send + * + * Sends a UI PDU to the MAC layer in response to a UNITDATA REQUEST + * primitive from the network layer. Verifies event is a primitive type of + * event. Verify the primitive is a UNITDATA REQUEST. + */ +int llc_sap_action_send_ui(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + struct llc_prim_if_block *prim = ev->data.prim.data; + struct llc_prim_unit_data *prim_data = &prim->data->udata; + struct sk_buff *skb = prim->data->udata.skb; + int rc; + + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, prim_data->saddr.lsap, + prim_data->daddr.lsap, LLC_PDU_CMD); + rc = llc_pdu_init_as_ui_cmd(skb); + if (rc) + goto out; + rc = lan_hdrs_init(skb, prim_data->saddr.mac, prim_data->daddr.mac); + if (!rc) + llc_sap_send_pdu(sap, skb); +out: + return rc; +} + +/** + * llc_sap_action_send_xid_c - send XID PDU as response to XID REQ + * @sap: SAP + * @ev: the event to send + * + * Send a XID command PDU to MAC layer in response to a XID REQUEST + * primitive from the network layer. Verify event is a primitive type + * event. Verify the primitive is a XID REQUEST. + */ +int llc_sap_action_send_xid_c(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + struct llc_prim_if_block *prim = ev->data.prim.data; + struct llc_prim_xid *prim_data = &prim->data->xid; + struct sk_buff *skb = prim_data->skb; + int rc; + + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, prim_data->saddr.lsap, + prim_data->daddr.lsap, LLC_PDU_CMD); + rc = llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); + if (rc) + goto out; + rc = lan_hdrs_init(skb, prim_data->saddr.mac, prim_data->daddr.mac); + if (!rc) + llc_sap_send_pdu(sap, skb); +out: + return rc; +} + +/** + * llc_sap_action_send_xid_r - send XID PDU resp to MAC for received XID + * @sap: SAP + * @ev: the event to send + * + * Send XID response PDU to MAC in response to an earlier received XID + * command PDU. Verify event is a PDU type event + */ +int llc_sap_action_send_xid_r(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + u8 mac_da[ETH_ALEN], mac_sa[ETH_ALEN], dsap; + int rc = 1; + struct sk_buff *ev_skb = ev->data.pdu.skb; + struct sk_buff *skb; + + llc_pdu_decode_sa(ev_skb, mac_da); + llc_pdu_decode_da(ev_skb, mac_sa); + llc_pdu_decode_ssap(ev_skb, &dsap); + skb = llc_alloc_frame(); + if (!skb) + goto out; + skb->dev = ev_skb->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap, + LLC_PDU_RSP); + rc = llc_pdu_init_as_xid_rsp(skb, LLC_XID_NULL_CLASS_2, 0); + if (rc) + goto out; + rc = lan_hdrs_init(skb, mac_sa, mac_da); + if (!rc) + llc_sap_send_pdu(sap, skb); +out: + return rc; +} + +/** + * llc_sap_action_send_test_c - send TEST PDU to MAC in resp to TEST REQ + * @sap: SAP + * @ev: the event to send + * + * Send a TEST command PDU to the MAC layer in response to a TEST REQUEST + * primitive from the network layer. Verify event is a primitive type + * event; verify the primitive is a TEST REQUEST. + */ +int llc_sap_action_send_test_c(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + struct llc_prim_if_block *prim = ev->data.prim.data; + struct llc_prim_test *prim_data = &prim->data->test; + struct sk_buff *skb = prim_data->skb; + int rc; + + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, prim_data->saddr.lsap, + prim_data->daddr.lsap, LLC_PDU_CMD); + rc = llc_pdu_init_as_test_cmd(skb); + if (rc) + goto out; + rc = lan_hdrs_init(skb, prim_data->saddr.mac, prim_data->daddr.mac); + if (!rc) + llc_sap_send_pdu(sap, skb); +out: + return rc; +} + +int llc_sap_action_send_test_r(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + u8 mac_da[ETH_ALEN], mac_sa[ETH_ALEN], dsap; + int rc = 1; + struct sk_buff *ev_skb = ev->data.pdu.skb; + struct sk_buff *skb; + + llc_pdu_decode_sa(ev_skb, mac_da); + llc_pdu_decode_da(ev_skb, mac_sa); + llc_pdu_decode_ssap(ev_skb, &dsap); + skb = llc_alloc_frame(); + if (!skb) + goto out; + skb->dev = ev_skb->dev; + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap, + LLC_PDU_RSP); + rc = llc_pdu_init_as_test_rsp(skb, ev_skb); + if (rc) + goto out; + rc = lan_hdrs_init(skb, mac_sa, mac_da); + if (!rc) + llc_sap_send_pdu(sap, skb); +out: + return rc; +} + +/** + * llc_sap_action_report_status - report data link status to layer mgmt + * @sap: SAP + * @ev: the event to send + * + * Report data link status to layer management. Verify our event is the + * kind we expect. + */ +int llc_sap_action_report_status(struct llc_sap *sap, + struct llc_sap_state_ev *ev) +{ + return 0; +} + +/** + * llc_sap_action_xid_ind - send XID PDU resp to net layer via XID IND + * @sap: SAP + * @ev: the event to send + * + * Send a XID response PDU to the network layer via a XID INDICATION + * primitive. + */ +int llc_sap_action_xid_ind(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + llc_sap_rtn_pdu(sap, ev->data.pdu.skb, ev); + return 0; +} + +/** + * llc_sap_action_test_ind - send TEST PDU to net layer via TEST IND + * @sap: SAP + * @ev: the event to send + * + * Send a TEST response PDU to the network layer via a TEST INDICATION + * primitive. Verify our event is a PDU type event. + */ +int llc_sap_action_test_ind(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + llc_sap_rtn_pdu(sap, ev->data.pdu.skb, ev); + return 0; +} diff -Nru a/net/llc/llc_s_ev.c b/net/llc/llc_s_ev.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_s_ev.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,101 @@ +/* + * llc_s_ev.c - Defines SAP component events + * + * The followed event functions are SAP component events which are described + * in 802.2 LLC protocol standard document. + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include + +int llc_sap_ev_activation_req(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + return ev->type == LLC_SAP_EV_TYPE_SIMPLE && + ev->data.a.ev == LLC_SAP_EV_ACTIVATION_REQ ? 0 : 1; +} + +int llc_sap_ev_rx_ui(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return ev->type == LLC_SAP_EV_TYPE_PDU && !LLC_PDU_IS_CMD(pdu) && + !LLC_PDU_TYPE_IS_U(pdu) && + LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_UI ? 0 : 1; +} + +int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + return ev->type == LLC_SAP_EV_TYPE_PRIM && + ev->data.prim.prim == LLC_DATAUNIT_PRIM && + ev->data.prim.type == LLC_PRIM_TYPE_REQ ? 0 : 1; + +} + +int llc_sap_ev_xid_req(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + return ev->type == LLC_SAP_EV_TYPE_PRIM && + ev->data.prim.prim == LLC_XID_PRIM && + ev->data.prim.type == LLC_PRIM_TYPE_REQ ? 0 : 1; +} + +int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return ev->type == LLC_SAP_EV_TYPE_PDU && !LLC_PDU_IS_CMD(pdu) && + !LLC_PDU_TYPE_IS_U(pdu) && + LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID ? 0 : 1; +} + +int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return ev->type == LLC_SAP_EV_TYPE_PDU && !LLC_PDU_IS_RSP(pdu) && + !LLC_PDU_TYPE_IS_U(pdu) && + LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_XID ? 0 : 1; +} + +int llc_sap_ev_test_req(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + return ev->type == LLC_SAP_EV_TYPE_PRIM && + ev->data.prim.prim == LLC_TEST_PRIM && + ev->data.prim.type == LLC_PRIM_TYPE_REQ ? 0 : 1; +} + +int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return ev->type == LLC_SAP_EV_TYPE_PDU && !LLC_PDU_IS_CMD(pdu) && + !LLC_PDU_TYPE_IS_U(pdu) && + LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST ? 0 : 1; +} + +int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + return ev->type == LLC_SAP_EV_TYPE_PDU && !LLC_PDU_IS_RSP(pdu) && + !LLC_PDU_TYPE_IS_U(pdu) && + LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_TEST ? 0 : 1; +} + +int llc_sap_ev_deactivation_req(struct llc_sap *sap, + struct llc_sap_state_ev *ev) +{ + return ev->type == LLC_SAP_EV_TYPE_SIMPLE && + ev->data.a.ev == LLC_SAP_EV_DEACTIVATION_REQ ? 0 : 1; +} diff -Nru a/net/llc/llc_s_st.c b/net/llc/llc_s_st.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_s_st.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,183 @@ +/* + * llc_s_st.c - Defines SAP component state machine transitions. + * + * The followed transitions are SAP component state machine transitions + * which are described in 802.2 LLC protocol standard document. + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include + +/* dummy last-transition indicator; common to all state transition groups + * last entry for this state + * all members are zeros, .bss zeroes it + */ +static struct llc_sap_state_trans llc_sap_state_trans_n; + +/* state LLC_SAP_STATE_INACTIVE transition for + * LLC_SAP_EV_ACTIVATION_REQ event + */ +static llc_sap_action_t llc_sap_inactive_state_actions_1[] = { + [0] = llc_sap_action_report_status, + [1] = NULL, +}; + +static struct llc_sap_state_trans llc_sap_inactive_state_trans_1 = { + .ev = llc_sap_ev_activation_req, + .next_state = LLC_SAP_STATE_ACTIVE, + .ev_actions = llc_sap_inactive_state_actions_1, +}; + +/* array of pointers; one to each transition */ +static struct llc_sap_state_trans *llc_sap_inactive_state_transitions[] = { + [0] = &llc_sap_inactive_state_trans_1, + [1] = &llc_sap_state_trans_n, +}; + +/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_UI event */ +static llc_sap_action_t llc_sap_active_state_actions_1[] = { + [0] = llc_sap_action_unitdata_ind, + [1] = NULL, +}; + +static struct llc_sap_state_trans llc_sap_active_state_trans_1 = { + .ev = llc_sap_ev_rx_ui, + .next_state = LLC_SAP_STATE_ACTIVE, + .ev_actions = llc_sap_active_state_actions_1, +}; + +/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_UNITDATA_REQ event */ +static llc_sap_action_t llc_sap_active_state_actions_2[] = { + [0] = llc_sap_action_send_ui, + [1] = NULL, +}; + +static struct llc_sap_state_trans llc_sap_active_state_trans_2 = { + .ev = llc_sap_ev_unitdata_req, + .next_state = LLC_SAP_STATE_ACTIVE, + .ev_actions = llc_sap_active_state_actions_2, +}; + +/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_XID_REQ event */ +static llc_sap_action_t llc_sap_active_state_actions_3[] = { + [0] = llc_sap_action_send_xid_c, + [1] = NULL, +}; + +static struct llc_sap_state_trans llc_sap_active_state_trans_3 = { + .ev = llc_sap_ev_xid_req, + .next_state = LLC_SAP_STATE_ACTIVE, + .ev_actions = llc_sap_active_state_actions_3, +}; + +/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_XID_C event */ +static llc_sap_action_t llc_sap_active_state_actions_4[] = { + [0] = llc_sap_action_send_xid_r, + [1] = NULL, +}; + +static struct llc_sap_state_trans llc_sap_active_state_trans_4 = { + .ev = llc_sap_ev_rx_xid_c, + .next_state = LLC_SAP_STATE_ACTIVE, + .ev_actions = llc_sap_active_state_actions_4, +}; + +/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_XID_R event */ +static llc_sap_action_t llc_sap_active_state_actions_5[] = { + [0] = llc_sap_action_xid_ind, + [1] = NULL, +}; + +static struct llc_sap_state_trans llc_sap_active_state_trans_5 = { + .ev = llc_sap_ev_rx_xid_r, + .next_state = LLC_SAP_STATE_ACTIVE, + .ev_actions = llc_sap_active_state_actions_5, +}; + +/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_TEST_REQ event */ +static llc_sap_action_t llc_sap_active_state_actions_6[] = { + [0] = llc_sap_action_send_test_c, + [1] = NULL, +}; + +static struct llc_sap_state_trans llc_sap_active_state_trans_6 = { + .ev = llc_sap_ev_test_req, + .next_state = LLC_SAP_STATE_ACTIVE, + .ev_actions = llc_sap_active_state_actions_6, +}; + +/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_TEST_C event */ +static llc_sap_action_t llc_sap_active_state_actions_7[] = { + [0] = llc_sap_action_send_test_r, + [1] = NULL, +}; + +static struct llc_sap_state_trans llc_sap_active_state_trans_7 = { + .ev = llc_sap_ev_rx_test_c, + .next_state = LLC_SAP_STATE_ACTIVE, + .ev_actions = llc_sap_active_state_actions_7 +}; + +/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_TEST_R event */ +static llc_sap_action_t llc_sap_active_state_actions_8[] = { + [0] = llc_sap_action_test_ind, + [1] = NULL, +}; + +static struct llc_sap_state_trans llc_sap_active_state_trans_8 = { + .ev = llc_sap_ev_rx_test_r, + .next_state = LLC_SAP_STATE_ACTIVE, + .ev_actions = llc_sap_active_state_actions_8, +}; + +/* state LLC_SAP_STATE_ACTIVE transition for + * LLC_SAP_EV_DEACTIVATION_REQ event + */ +static llc_sap_action_t llc_sap_active_state_actions_9[] = { + [0] = llc_sap_action_report_status, + [1] = NULL, +}; + +static struct llc_sap_state_trans llc_sap_active_state_trans_9 = { + .ev = llc_sap_ev_deactivation_req, + .next_state = LLC_SAP_STATE_INACTIVE, + .ev_actions = llc_sap_active_state_actions_9 +}; + +/* array of pointers; one to each transition */ +static struct llc_sap_state_trans *llc_sap_active_state_transitions[] = { + [0] = &llc_sap_active_state_trans_2, + [1] = &llc_sap_active_state_trans_1, + [2] = &llc_sap_active_state_trans_3, + [3] = &llc_sap_active_state_trans_4, + [4] = &llc_sap_active_state_trans_5, + [5] = &llc_sap_active_state_trans_6, + [6] = &llc_sap_active_state_trans_7, + [7] = &llc_sap_active_state_trans_8, + [8] = &llc_sap_active_state_trans_9, + [9] = &llc_sap_state_trans_n, +}; + +/* SAP state transition table */ +struct llc_sap_state llc_sap_state_table[] = { + { + curr_state: LLC_SAP_STATE_INACTIVE, + transitions: llc_sap_inactive_state_transitions, + }, + { + curr_state: LLC_SAP_STATE_ACTIVE, + transitions: llc_sap_active_state_transitions, + }, +}; diff -Nru a/net/llc/llc_sap.c b/net/llc/llc_sap.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_sap.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,262 @@ +/* + * llc_sap.c - driver routines for SAP component. + * + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void llc_sap_free_ev(struct llc_sap *sap, struct llc_sap_state_ev *ev); +static int llc_sap_next_state(struct llc_sap *sap, struct llc_sap_state_ev *ev); +static int llc_exec_sap_trans_actions(struct llc_sap *sap, + struct llc_sap_state_trans *trans, + struct llc_sap_state_ev *ev); +static struct llc_sap_state_trans *llc_find_sap_trans(struct llc_sap *sap, + struct llc_sap_state_ev *ev); + +/** + * llc_sap_assign_sock - adds a connection to a SAP + * @sap: pointer to SAP. + * @conn: pointer to connection. + * + * This function adds a connection to connection_list of a SAP. + */ +void llc_sap_assign_sock(struct llc_sap *sap, struct sock *sk) +{ + spin_lock_bh(&sap->sk_list.lock); + llc_sk(sk)->sap = sap; + list_add_tail(&llc_sk(sk)->node, &sap->sk_list.list); + sock_hold(sk); + spin_unlock_bh(&sap->sk_list.lock); +} + +/** + * llc_sap_unassign_sock - removes a connection from SAP + * @sap: SAP + * @sk: pointer to connection + * + * This function removes a connection from connection_list of a SAP. + * List locking is performed by caller (rtn_all_conns). + */ +void llc_sap_unassign_sock(struct llc_sap *sap, struct sock *sk) +{ + spin_lock_bh(&sap->sk_list.lock); + list_del(&llc_sk(sk)->node); + sock_put(sk); + spin_unlock_bh(&sap->sk_list.lock); +} + +/** + * llc_sap_alloc_ev - allocates sap event + * @sap: pointer to SAP + * @ev: allocated event (output argument) + * + * Returns the allocated sap event or %NULL when out of memory. + */ +struct llc_sap_state_ev *llc_sap_alloc_ev(struct llc_sap *sap) +{ + struct llc_sap_state_ev *ev = kmalloc(sizeof(*ev), GFP_ATOMIC); + + if (ev) + memset(ev, 0, sizeof(*ev)); + return ev; +} + +/** + * llc_sap_send_ev - sends event to SAP state machine + * @sap: pointer to SAP + * @ev: pointer to occurred event + * + * After executing actions of the event, upper layer will be indicated + * if needed(on receiving an UI frame). + */ +void llc_sap_send_ev(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + struct llc_prim_if_block *prim; + u8 flag; + + llc_sap_next_state(sap, ev); + flag = ev->ind_cfm_flag; + prim = ev->prim; + if (flag == LLC_IND) { + skb_get(ev->data.pdu.skb); + sap->ind(prim); + } + llc_sap_free_ev(sap, ev); +} + +/** + * llc_sap_rtn_pdu - Informs upper layer on rx of an UI, XID or TEST pdu. + * @sap: pointer to SAP + * @skb: received pdu + * @ev: pointer to occurred event + */ +void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb, + struct llc_sap_state_ev *ev) +{ + llc_pdu_un_t *pdu; + struct llc_prim_if_block *prim = &llc_ind_prim; + union llc_u_prim_data *prim_data = llc_ind_prim.data; + u8 lfb; + + llc_pdu_decode_sa(skb, prim_data->udata.saddr.mac); + llc_pdu_decode_da(skb, prim_data->udata.daddr.mac); + llc_pdu_decode_dsap(skb, &prim_data->udata.daddr.lsap); + llc_pdu_decode_ssap(skb, &prim_data->udata.saddr.lsap); + prim_data->udata.pri = 0; + prim_data->udata.skb = skb; + pdu = (llc_pdu_un_t *)skb->nh.raw; + switch (LLC_U_PDU_RSP(pdu)) { + case LLC_1_PDU_CMD_TEST: + prim->prim = LLC_TEST_PRIM; + break; + case LLC_1_PDU_CMD_XID: + prim->prim = LLC_XID_PRIM; + break; + case LLC_1_PDU_CMD_UI: + if (skb->protocol == ntohs(ETH_P_TR_802_2)) { + if (((struct trh_hdr *)skb->mac.raw)->rcf) { + lfb = ntohs(((struct trh_hdr *) + skb->mac.raw)->rcf) & + 0x0070; + prim_data->udata.lfb = lfb >> 4; + } else { + lfb = 0xFF; + prim_data->udata.lfb = 0xFF; + } + } + prim->prim = LLC_DATAUNIT_PRIM; + break; + } + prim->data = prim_data; + prim->sap = sap; + ev->ind_cfm_flag = LLC_IND; + ev->prim = prim; +} + +/** + * llc_sap_send_pdu - Sends a frame to MAC layer for transmition + * @sap: pointer to SAP + * @skb: pdu that must be sent + */ +void llc_sap_send_pdu(struct llc_sap *sap, struct sk_buff *skb) +{ + mac_send_pdu(skb); + kfree_skb(skb); +} + +/** + * llc_sap_free_ev - frees an sap event + * @sap: pointer to SAP + * @ev: released event + */ +static void llc_sap_free_ev(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + if (ev->type == LLC_SAP_EV_TYPE_PDU) { + llc_pdu_un_t *pdu = (llc_pdu_un_t *)ev->data.pdu.skb->nh.raw; + + if (LLC_U_PDU_CMD(pdu) != LLC_1_PDU_CMD_UI) + kfree_skb(ev->data.pdu.skb); + } + kfree(ev); +} + +/** + * llc_sap_next_state - finds transition, execs actions & change SAP state + * @sap: pointer to SAP + * @ev: happened event + * + * This function finds transition that matches with happened event, then + * executes related actions and finally changes state of SAP. It returns + * 0 on success and 1 for failure. + */ +static int llc_sap_next_state(struct llc_sap *sap, struct llc_sap_state_ev *ev) +{ + int rc = 1; + struct llc_sap_state_trans *trans; + + if (sap->state <= LLC_NBR_SAP_STATES) { + trans = llc_find_sap_trans(sap, ev); + if (trans) { + /* got the state to which we next transition; perform + * the actions associated with this transition before + * actually transitioning to the next state + */ + rc = llc_exec_sap_trans_actions(sap, trans, ev); + if (!rc) + /* transition SAP to next state if all actions + * execute successfully + */ + sap->state = trans->next_state; + } + } + return rc; +} + +/** + * llc_find_sap_trans - finds transition for event + * @sap: pointer to SAP + * @ev: happened event + * + * This function finds transition that matches with happened event. + * Returns the pointer to found transition on success or %NULL for + * failure. + */ +static struct llc_sap_state_trans *llc_find_sap_trans(struct llc_sap *sap, + struct llc_sap_state_ev* ev) +{ + int i = 0; + struct llc_sap_state_trans *rc = NULL; + struct llc_sap_state_trans **next_trans; + struct llc_sap_state *curr_state = &llc_sap_state_table[sap->state - 1]; + /* search thru events for this state until list exhausted or until + * its obvious the event is not valid for the current state + */ + for (next_trans = curr_state->transitions; next_trans [i]->ev; i++) + if (!next_trans[i]->ev(sap, ev)) { + /* got event match; return it */ + rc = next_trans[i]; + break; + } + return rc; +} + +/** + * llc_exec_sap_trans_actions - execute actions related to event + * @sap: pointer to SAP + * @trans: pointer to transition that it's actions must be performed + * @ev: happened event. + * + * This function executes actions that is related to happened event. + * Returns 0 for success and 1 for failure of at least one action. + */ +static int llc_exec_sap_trans_actions(struct llc_sap *sap, + struct llc_sap_state_trans *trans, + struct llc_sap_state_ev *ev) +{ + int rc = 0; + llc_sap_action_t *next_action; + + for (next_action = trans->ev_actions; + next_action && *next_action; next_action++) + if ((*next_action)(sap, ev)) + rc = 1; + return rc; +} diff -Nru a/net/llc/llc_sock.c b/net/llc/llc_sock.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_sock.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,1780 @@ +/* + * llc_sock.c - LLC User Interface SAPs + * Description: + * Functions in this module are implementation of socket based llc + * communications for the Linux operating system. Support of llc class + * one and class two is provided via SOCK_DGRAM and SOCK_STREAM + * respectively. + * + * An llc2 connection is (mac + sap), only one llc2 sap connection + * is allowed per mac. Though one sap may have multiple mac + sap + * connections. + * + * Copyright (c) 2001 by Jay Schulist + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define dprintk(format, a...) printk(KERN_INFO __FUNCTION__ ": " format, ##a) + +/* remember: uninitialized global data is zeroed because its in .bss */ +static u16 llc_ui_sap_last_autoport = LLC_SAP_DYN_START; +static u16 llc_ui_sap_link_no_max[256]; +static u8 llc_ui_addrany[IFHWADDRLEN]; +static struct sockaddr_llc llc_ui_addrnull; +static struct proto_ops llc_ui_ops; +static struct sock *llc_ui_sockets; +static rwlock_t llc_ui_sockets_lock = RW_LOCK_UNLOCKED; + +static int llc_ui_indicate(struct llc_prim_if_block *prim); +static int llc_ui_confirm(struct llc_prim_if_block *prim); +static int llc_ui_wait_for_conn(struct sock *sk, int seconds); +static int llc_ui_wait_for_disc(struct sock *sk, int seconds); + +/** + * llc_ui_next_link_no - return the next unused link number for a sap + * @sap: Address of sap to get link number from. + * + * Return the next unused link number for a given sap. + */ +static inline u16 llc_ui_next_link_no(int sap) +{ + return llc_ui_sap_link_no_max[sap]++; +} + +/** + * llc_ui_mac_match - determines if two mac addresses are the same + * @mac1: First mac address to compare. + * @mac2: Second mac address to compare. + * + * Determines if two given mac address are the same. Returns 0 if there + * is not a complete match up to len, 1 if a complete match up to len is + * found. + */ +static inline u8 llc_ui_mac_match(u8 *mac1, u8 *mac2) +{ + return !memcmp(mac1, mac2, IFHWADDRLEN); +} + +/** + * llc_ui_mac_null - determines if a address is a null mac address + * @mac: Mac address to test if null. + * + * Determines if a given address is a null mac address. Returns 0 if the + * address is not a null mac, 1 if the address is a null mac. + */ +static inline u8 llc_ui_mac_null(u8 *mac) +{ + return !memcmp(mac, llc_ui_addrany, IFHWADDRLEN); +} + +/** + * llc_ui_addr_null - determines if a address structure is null + * @addr: Address to test if null. + */ +static inline u8 llc_ui_addr_null(struct sockaddr_llc *addr) +{ + return !memcmp(addr, &llc_ui_addrnull, sizeof(*addr)); +} + +/** + * llc_ui_protocol_type - return eth protocol for ARP header type + * @arphrd: ARP header type. + * + * Given an ARP header type return the corresponding ethernet protocol. + * Returns 0 if ARP header type not supported or the corresponding + * ethernet protocol type. + */ +static inline u16 llc_ui_protocol_type(u16 arphrd) +{ + u16 rc = htons(ETH_P_802_2); + + if (arphrd == ARPHRD_IEEE802_TR) + rc = htons(ETH_P_TR_802_2); + return rc; +} + +/** + * llc_ui_header_len - return length of llc header based on operation + * @sk: Socket which contains a valid llc socket type. + * @addr: Complete sockaddr_llc structure received from the user. + * + * Provide the length of the llc header depending on what kind of + * operation the user would like to perform and the type of socket. + * Returns the correct llc header length. + */ +static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr) +{ + u8 rc = LLC_PDU_LEN_U; + + if (addr->sllc_test || addr->sllc_xid) + rc = LLC_PDU_LEN_U; + else if (sk->type == SOCK_STREAM) + rc = LLC_PDU_LEN_I; + return rc; +} + +/** + * llc_ui_send_conn - send connect command for new llc2 connection + * @sap : Sap the socket is bound to. + * @addr: Source and destination fields provided by the user. + * @dev : Device which this connection should use. + * @link: Link number to assign to this connection. + * + * Send a connect command to the llc layer for a new llc2 connection. + * Returns 0 upon success, non-zero if action didn't succeed. + */ +static int llc_ui_send_conn(struct sock *sk, struct llc_sap *sap, + struct sockaddr_llc *addr, + struct net_device *dev, int link) +{ + struct llc_ui_opt *llc_ui = llc_ui_sk(sk); + union llc_u_prim_data prim_data; + struct llc_prim_if_block prim; + + prim.data = &prim_data; + prim.sap = sap; + prim.prim = LLC_CONN_PRIM; + prim_data.conn.dev = dev; + prim_data.conn.link = link; + prim_data.conn.sk = NULL; + prim_data.conn.handler = sk; + prim_data.conn.pri = 0; + prim_data.conn.saddr.lsap = llc_ui->addr.sllc_ssap; + prim_data.conn.daddr.lsap = addr->sllc_dsap; + memcpy(prim_data.conn.saddr.mac, dev->dev_addr, IFHWADDRLEN); + memcpy(prim_data.conn.daddr.mac, addr->sllc_dmac, IFHWADDRLEN); + return sap->req(&prim); +} + +/** + * llc_ui_send_disc - send disc command to llc layer + * @sk: Socket with valid llc information. + * + * Send a disconnect command to the llc layer for an established + * llc2 connection. + * Returns 0 upon success, non-zero if action did not succeed. + */ +static int llc_ui_send_disc(struct sock *sk) +{ + struct llc_ui_opt *llc_ui = llc_ui_sk(sk); + union llc_u_prim_data prim_data; + struct llc_prim_if_block prim; + int rc = 0; + + if (sk->type != SOCK_STREAM || sk->state != TCP_ESTABLISHED) + goto out; + sk->state = TCP_CLOSING; + prim.data = &prim_data; + prim.sap = llc_ui->sap; + prim.prim = LLC_DISC_PRIM; + prim_data.disc.sk = llc_ui->core_sk; + prim_data.disc.link = llc_ui->link; + rc = llc_ui->sap->req(&prim); +out: + return rc; +} + +/** + * llc_ui_send_data - send data via reliable llc2 connection + * @sap: Sap the socket is bound to. + * @sk: Connection the socket is using. + * @skb: Data the user wishes to send. + * @addr: Source and destination fields provided by the user. + * + * Send data via reliable llc2 connection. + * Returns 0 upon success, non-zero if action did not succeed. + */ +static int llc_ui_send_data(struct llc_sap *sap, struct sock* sk, + struct sk_buff *skb, struct sockaddr_llc *addr) +{ + union llc_u_prim_data prim_data; + struct llc_prim_if_block prim; + struct llc_ui_opt* llc_ui = llc_ui_sk(sk); + struct llc_opt* llc_core = llc_sk(llc_ui->core_sk); + int rc; + + prim.data = &prim_data; + prim.sap = sap; + prim.prim = LLC_DATA_PRIM; + prim_data.data.skb = skb; + prim_data.data.pri = 0; + prim_data.data.sk = llc_ui->core_sk; + skb->protocol = llc_ui_protocol_type(addr->sllc_arphrd); + sock_hold(sk); +try: + rc = sap->req(&prim); + if (rc != -EBUSY) + goto out; + rc = wait_event_interruptible(sk->socket->wait, !llc_ui->core_sk || + !llc_core->failed_data_req); + if (!rc) + goto try; + if (!llc_ui->core_sk) + rc = -ENOTCONN; +out: + sock_put(sk); + return rc; +} + +/** + * llc_ui_send_llc1 - send llc1 prim data block to llc layer. + * @sap : Sap the socket is bound to. + * @skb : Data the user wishes to send. + * @addr : Source and destination fields provided by the user. + * @primitive: Action the llc layer should perform. + * + * Send an llc1 primitive data block to the llc layer for processing. + * This function is used for test, xid and unit_data messages. + * Returns 0 upon success, non-zero if action did not succeed. + */ +static int llc_ui_send_llc1(struct llc_sap *sap, struct sk_buff *skb, + struct sockaddr_llc *addr, int primitive) +{ + union llc_u_prim_data prim_data; + struct llc_prim_if_block prim; + + prim.data = &prim_data; + prim.sap = sap; + prim.prim = primitive; + prim_data.test.skb = skb; + prim_data.test.saddr.lsap = sap->laddr.lsap; + prim_data.test.daddr.lsap = addr->sllc_dsap; + skb->protocol = llc_ui_protocol_type(addr->sllc_arphrd); + memcpy(prim_data.test.saddr.mac, skb->dev->dev_addr, IFHWADDRLEN); + memcpy(prim_data.test.daddr.mac, addr->sllc_dmac, IFHWADDRLEN); + return sap->req(&prim); +} + +/** + * llc_ui_find_sap - returns sap struct that matches sap number specified + * @sap: Sap number to search for. + * + * Search the local socket list and return the first instance of the sap + * structure which matches the sap number the user specified. + * Returns llc_sap upon match, %NULL otherwise. + */ +static inline struct llc_sap *llc_ui_find_sap(u8 sap) +{ + struct sock *sk; + struct llc_sap *s = NULL; + + read_lock_bh(&llc_ui_sockets_lock); + for (sk = llc_ui_sockets; sk; sk = sk->next) { + struct llc_ui_opt *llc_ui = llc_ui_sk(sk); + + if (!llc_ui->sap) + continue; + if (llc_ui->sap->laddr.lsap == sap) { + s = llc_ui->sap; + break; + } + } + read_unlock_bh(&llc_ui_sockets_lock); + return s; +} + +static struct sock *__llc_ui_find_sk_by_exact(struct llc_addr *laddr, + struct llc_addr *daddr) +{ + struct sock *sk; + + for (sk = llc_ui_sockets; sk; sk = sk->next) { + struct llc_ui_opt *llc_ui = llc_ui_sk(sk); + + if (llc_ui->addr.sllc_ssap == laddr->lsap && + llc_ui->addr.sllc_dsap == daddr->lsap && + llc_ui_mac_null(llc_ui->addr.sllc_mmac) && + llc_ui_mac_match(llc_ui->addr.sllc_smac, laddr->mac) && + llc_ui_mac_match(llc_ui->addr.sllc_dmac, daddr->mac)) + break; + } + return sk; +} + +/** + * __llc_ui_find_sk_by_addr - return socket matching local mac + sap. + * @addr: Local address to match. + * + * Search the local socket list and return the socket which has a matching + * local (mac + sap) address (allows null mac). This search will work on + * unconnected and connected sockets, though find_by_link_no is recommend + * for connected sockets. + * Returns sock upon match, %NULL otherwise. + */ +static struct sock *__llc_ui_find_sk_by_addr(struct llc_addr *laddr, + struct llc_addr *daddr, + struct net_device *dev) +{ + struct sock *sk, *tmp_sk; + + for (sk = llc_ui_sockets; sk; sk = sk->next) { + struct llc_ui_opt *llc_ui = llc_ui_sk(sk); + + if (llc_ui->addr.sllc_ssap != laddr->lsap) + continue; + if (llc_ui_mac_null(llc_ui->addr.sllc_smac)) { + if (!llc_ui_mac_null(llc_ui->addr.sllc_mmac) && + !llc_ui_mac_match(llc_ui->addr.sllc_mmac, + laddr->mac)) + continue; + break; + } + if (dev && !llc_ui_mac_null(llc_ui->addr.sllc_mmac) && + llc_ui_mac_match(llc_ui->addr.sllc_mmac, laddr->mac) && + llc_ui_mac_match(llc_ui->addr.sllc_smac, dev->dev_addr)) + break; + if (dev->flags & IFF_LOOPBACK) + break; + if (!llc_ui_mac_match(llc_ui->addr.sllc_smac, laddr->mac)) + continue; + tmp_sk = __llc_ui_find_sk_by_exact(laddr, daddr); + if (tmp_sk) { + sk = tmp_sk; + break; + } + if (llc_ui_mac_null(llc_ui->addr.sllc_dmac)) + break; + } + return sk; +} + +static struct sock *llc_ui_find_sk_by_addr(struct llc_addr *addr, + struct llc_addr *daddr, + struct net_device *dev) +{ + struct sock *sk; + + read_lock(&llc_ui_sockets_lock); + sk = __llc_ui_find_sk_by_addr(addr, daddr, dev); + if (sk) + sock_hold(sk); + read_unlock(&llc_ui_sockets_lock); + return sk; +} + +static struct sock *llc_ui_bh_find_sk_by_addr(struct llc_addr *addr, + struct llc_addr *daddr, + struct net_device *dev) +{ + struct sock *sk; + + read_lock_bh(&llc_ui_sockets_lock); + sk = __llc_ui_find_sk_by_addr(addr, daddr, dev); + if (sk) + sock_hold(sk); + read_unlock_bh(&llc_ui_sockets_lock); + return sk; +} + +/** + * llc_ui_insert_socket - insert socket into list + * @sk: Socket to insert. + * + * Insert a socket into the local llc socket list. + */ +static inline void llc_ui_insert_socket(struct sock *sk) +{ + write_lock_bh(&llc_ui_sockets_lock); + sk->next = llc_ui_sockets; + if (sk->next) + llc_ui_sockets->pprev = &sk->next; + llc_ui_sockets = sk; + sk->pprev = &llc_ui_sockets; + sock_hold(sk); + write_unlock_bh(&llc_ui_sockets_lock); +} + +/** + * llc_ui_remove_socket - remove socket from list + * @sk: Socket to remove. + * + * Remove a socket from the local llc socket list. + */ +static inline void llc_ui_remove_socket(struct sock *sk) +{ + write_lock_bh(&llc_ui_sockets_lock); + if (sk->pprev) { + if (sk->next) + sk->next->pprev = sk->pprev; + *sk->pprev = sk->next; + sk->pprev = NULL; + /* this only makes sense if the socket was inserted on the + * list, if sk->pprev is NULL it wasn't + */ + sock_put(sk); + } + write_unlock_bh(&llc_ui_sockets_lock); +} + +/** + * llc_ui_destroy_sk - destroy socket + * @data: Socket which is to be destroyed. + * + * Really destroy the socket. + */ +static void llc_ui_destroy_sk(struct sock *sk) +{ + skb_queue_purge(&sk->receive_queue); + skb_queue_purge(&sk->write_queue); + sock_put(sk); + MOD_DEC_USE_COUNT; +} + +/** + * llc_ui_destroy_timer - try to destroy socket again + * @data: Socket which is to be destroyed. + * + * Attempt to destroy a socket which was previously destroyed but + * was still in use at the time. + */ +static void llc_ui_destroy_timer(unsigned long data) +{ + struct sock *sk = (struct sock *)data; + + if (!atomic_read(&sk->wmem_alloc) && + !atomic_read(&sk->rmem_alloc) && sk->dead) + llc_ui_destroy_sk(sk); + else { + sk->timer.expires = jiffies + SOCK_DESTROY_TIME; + add_timer(&sk->timer); + } +} + +/** + * llc_ui_create - alloc and init a new llc_ui socket + * @sock: Socket to initialize and attach allocated sk to. + * @protocol: Unused. + * + * Allocate and initialize a new llc_ui socket, validate the user wants a + * socket type we have available. + * Returns 0 upon success, negative upon failure. + */ +static int llc_ui_create(struct socket *sock, int protocol) +{ + struct sock *sk; + struct llc_ui_opt *llc_ui; + int rc = -ESOCKTNOSUPPORT; + + MOD_INC_USE_COUNT; + if (sock->type != SOCK_DGRAM && sock->type != SOCK_STREAM) + goto decmod; + rc = -ENOMEM; + sk = sk_alloc(PF_LLC, GFP_KERNEL, 1, NULL); + if (!sk) + goto decmod; + llc_ui = kmalloc(sizeof(*llc_ui), GFP_KERNEL); + if (!llc_ui) + goto outsk; + memset(llc_ui, 0, sizeof(*llc_ui)); + rc = 0; + sock_init_data(sock, sk); + llc_ui_sk(sk) = llc_ui; + sock->ops = &llc_ui_ops; +out: + return rc; +outsk: + sk_free(sk); +decmod: + MOD_DEC_USE_COUNT; + goto out; +} + +/** + * llc_ui_release - shutdown socket + * @sock: Socket to release. + * + * Shutdown and deallocate an existing socket. + */ +static int llc_ui_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct llc_ui_opt *llc_ui; + + if (!sk) + goto out; + llc_ui = llc_ui_sk(sk); + if (llc_ui->core_sk && !llc_ui_send_disc(sk)) + llc_ui_wait_for_disc(sk, 255); + llc_ui_remove_socket(sk); + if (llc_ui->sap && !llc_ui_find_sap(llc_ui->sap->laddr.lsap)) + llc_sap_close(llc_ui->sap); + dprintk("rxq=%d, txq=%d\n", skb_queue_len(&sk->receive_queue), + skb_queue_len(&sk->write_queue)); + sock_orphan(sk); + sock->sk = NULL; + if (!atomic_read(&sk->wmem_alloc) && + !atomic_read(&sk->rmem_alloc) && sk->dead) + llc_ui_destroy_sk(sk); + else { + init_timer(&sk->timer); + sk->timer.expires = jiffies + SOCK_DESTROY_TIME; + sk->timer.function = llc_ui_destroy_timer; + sk->timer.data = (unsigned long)sk; + add_timer(&sk->timer); + } +out: + return 0; +} + +/** + * llc_ui_autoport - provide dynamicly allocate SAP number + * + * Provide the caller with a dynamicly allocated SAP number according + * to the rules that are set in this function. Returns: 0, upon failure, + * SAP number otherwise. + */ +static int llc_ui_autoport(void) +{ + struct llc_sap *sap; + int i, tries = 0; + + while (tries < LLC_SAP_DYN_TRIES) { + for (i = llc_ui_sap_last_autoport; + i < LLC_SAP_DYN_STOP; i += 2) { + sap = llc_ui_find_sap(i); + if (!sap) { + llc_ui_sap_last_autoport = i + 2; + goto out; + } + } + llc_ui_sap_last_autoport = LLC_SAP_DYN_START; + tries++; + } + i = 0; +out: + return i; +} + +/** + * llc_ui_autobind - Bind a socket to a specific address. + * @sk: Socket to bind an address to. + * @addr: Address the user wants the socket bound to. + * + * Bind a socket to a specific address. For llc a user is able to bind to + * a specific sap only or mac + sap. If the user only specifies a sap and + * a null dmac (all zeros) the user is attempting to bind to an entire + * sap. This will stop anyone else on the local system from using that + * sap. If someone else has a mac + sap open the bind to null + sap will + * fail. + * If the user desires to bind to a specific mac + sap, it is possible to + * have multiple sap connections via multiple macs. + * Bind and autobind for that matter must enforce the correct sap usage + * otherwise all hell will break loose. + * Returns: 0 upon success, negative otherwise. + */ +static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) +{ + struct sock *sk = sock->sk; + struct llc_ui_opt *llc_ui = llc_ui_sk(sk); + struct llc_sap *sap; + struct net_device *dev = NULL; + int rc = -EINVAL; + + if (!sk->zapped) + goto out; + /* bind to a specific mac, optional. */ + if (!llc_ui_mac_null(addr->sllc_smac)) { + rtnl_lock(); + dev = dev_getbyhwaddr(addr->sllc_arphrd, addr->sllc_smac); + rtnl_unlock(); + rc = -ENETUNREACH; + if (!dev) + goto out; + llc_ui->dev = dev; + } + /* bind to a specific sap, optional. */ + if (!addr->sllc_ssap) { + rc = -EUSERS; + addr->sllc_ssap = llc_ui_autoport(); + if (!addr->sllc_ssap) + goto out; + } + sap = llc_ui_find_sap(addr->sllc_ssap); + if (!sap) { + sap = llc_sap_open(llc_ui_indicate, llc_ui_confirm, + addr->sllc_ssap); + rc = -EBUSY; /* some other network layer is using the sap */ + if (!sap) + goto out; + } else { + struct llc_addr laddr, daddr; + struct sock *ask; + + rc = -EUSERS; /* can't get exclusive use of sap */ + if (!dev && llc_ui_mac_null(addr->sllc_mmac)) + goto out; + memset(&laddr, 0, sizeof(laddr)); + memset(&daddr, 0, sizeof(daddr)); + if (!llc_ui_mac_null(addr->sllc_mmac)) { + if (sk->type != SOCK_DGRAM) { + rc = -EOPNOTSUPP; + goto out; + } + memcpy(laddr.mac, addr->sllc_mmac, IFHWADDRLEN); + } else + memcpy(laddr.mac, addr->sllc_smac, IFHWADDRLEN); + laddr.lsap = addr->sllc_ssap; + rc = -EADDRINUSE; /* mac + sap clash. */ + ask = llc_ui_bh_find_sk_by_addr(&laddr, &daddr, dev); + if (ask) { + sock_put(ask); + goto out; + } + } + memcpy(&llc_ui->addr, addr, sizeof(*addr)); + llc_ui->sap = sap; + rc = sk->zapped = 0; + llc_ui_insert_socket(sk); +out: + return rc; +} + +/** + * llc_ui_bind - bind a socket to a specific address. + * @sock: Socket to bind an address to. + * @uaddr: Address the user wants the socket bound to. + * @addrlen: Length of the uaddr structure. + * + * Bind a socket to a specific address. For llc a user is able to bind to + * a specific sap only or mac + sap. If the user only specifies a sap and + * a null dmac (all zeros) the user is attempting to bind to an entire + * sap. This will stop anyone else on the local system from using that + * sap. If someone else has a mac + sap open the bind to null + sap will + * fail. + * If the user desires to bind to a specific mac + sap, it is possible to + * have multiple sap connections via multiple macs. + * Bind and autobind for that matter must enforce the correct sap usage + * otherwise all hell will break loose. + * Returns: 0 upon success, negative otherwise. + */ +static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) +{ + struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr; + struct sock *sk = sock->sk; + int rc = -EINVAL; + + if (!sk->zapped || addrlen != sizeof(*addr)) + goto out; + rc = -EAFNOSUPPORT; + if (addr->sllc_family != AF_LLC) + goto out; + /* use autobind, to avoid code replication. */ + rc = llc_ui_autobind(sock, addr); +out: + return rc; +} + +/** + * llc_ui_shutdown - shutdown a connect llc2 socket. + * @sock: Socket to shutdown. + * @how: What part of the socket to shutdown. + * + * Shutdown a connected llc2 socket. Currently this function only supports + * shutting down both sends and receives (2), we could probably make this + * function such that a user can shutdown only half the connection but not + * right now. + * Returns: 0 upon success, negative otherwise. + */ +static int llc_ui_shutdown(struct socket *sock, int how) +{ + struct sock *sk = sock->sk; + int rc = -ENOTCONN; + + lock_sock(sk); + if (sk->state != TCP_ESTABLISHED) + goto out; + rc = -EINVAL; + if (how != 2) + goto out; + rc = llc_ui_send_disc(sk); + if (!rc) + llc_ui_wait_for_disc(sk, 255); + /* Wake up anyone sleeping in poll */ + sk->state_change(sk); +out: + release_sock(sk); + return rc; +} + +/** + * llc_ui_connect - Connect to a remote llc2 mac + sap. + * @sock: Socket which will be connected to the remote destination. + * @uaddr: Remote and possibly the local address of the new connection. + * @addrlen: Size of uaddr structure. + * @flags: Operational flags specified by the user. + * + * Connect to a remote llc2 mac + sap. The caller must specify the + * destination mac and address to connect to. If the user previously + * called bind(2) with a smac the user does not need to specify the source + * address and mac. + * This function will autobind if user did not previously call bind. + * Returns: 0 upon success, negative otherwise. + */ +static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr, + int addrlen, int flags) +{ + struct sock *sk = sock->sk; + struct llc_ui_opt *llc_ui = llc_ui_sk(sk); + struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr; + struct net_device *dev; + int rc = -EINVAL; + + lock_sock(sk); + if (addrlen != sizeof(*addr)) + goto out; + rc = -EAFNOSUPPORT; + if (addr->sllc_family != AF_LLC) + goto out; + /* bind connection to sap if user hasn't done it. */ + if (sk->zapped) { + /* bind to sap with null dev, exclusive */ + rc = llc_ui_autobind(sock, addr); + if (rc) + goto out; + } + if (!llc_ui->dev) { + rtnl_lock(); + dev = dev_getbyhwaddr(addr->sllc_arphrd, addr->sllc_smac); + rtnl_unlock(); + if (!dev) + goto out; + } else + dev = llc_ui->dev; + if (sk->type != SOCK_STREAM) + goto out; + rc = -EALREADY; + if (sock->state == SS_CONNECTING) + goto out; + sock->state = SS_CONNECTING; + sk->state = TCP_SYN_SENT; + llc_ui->link = llc_ui_next_link_no(llc_ui->sap->laddr.lsap); + rc = llc_ui_send_conn(sk, llc_ui->sap, addr, dev, llc_ui->link); + if (rc) { + sock->state = SS_UNCONNECTED; + sk->state = TCP_CLOSE; + goto out; + } + rc = llc_ui_wait_for_conn(sk, 255); +out: + release_sock(sk); + return rc; +} + +/** + * llc_ui_listen - allow a normal socket to accept incoming connections + * @sock: Socket to allow incoming connections on. + * @backlog: Number of connections to queue. + * + * Allow a normal socket to accept incoming connections. + * Returns 0 upon success, negative otherwise. + */ +static int llc_ui_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + int rc = -EINVAL; + + lock_sock(sk); + if (sock->state != SS_UNCONNECTED) + goto out; + rc = -EOPNOTSUPP; + if (sk->type != SOCK_STREAM && sk->type != SOCK_SEQPACKET) + goto out; + rc = -EAGAIN; + if (sk->zapped) + goto out; + rc = 0; + if (!(unsigned)backlog) /* BSDism */ + backlog = 1; + if ((unsigned)backlog > SOMAXCONN) + backlog = SOMAXCONN; + sk->max_ack_backlog = backlog; + if (sk->state != TCP_LISTEN) { + sk->ack_backlog = 0; + sk->state = TCP_LISTEN; + } + sk->socket->flags |= __SO_ACCEPTCON; +out: + release_sock(sk); + return rc; +} + +static int llc_ui_wait_for_disc(struct sock *sk, int seconds) +{ + DECLARE_WAITQUEUE(wait, current); + int rc, timeout = seconds * HZ; + + add_wait_queue_exclusive(sk->sleep, &wait); + for (;;) { + __set_current_state(TASK_INTERRUPTIBLE); + rc = 0; + if (sk->state != TCP_CLOSE) + timeout = schedule_timeout(timeout); + else + break; + rc = -ERESTARTSYS; + if (signal_pending(current)) + break; + rc = -EAGAIN; + if (!timeout) + break; + } + __set_current_state(TASK_RUNNING); + remove_wait_queue(sk->sleep, &wait); + return rc; +} + +static int llc_ui_wait_for_conn(struct sock *sk, int seconds) +{ + struct llc_ui_opt *llc_ui = llc_ui_sk(sk); + DECLARE_WAITQUEUE(wait, current); + int rc, timeout = seconds * HZ; + + add_wait_queue_exclusive(sk->sleep, &wait); + for (;;) { + __set_current_state(TASK_INTERRUPTIBLE); + rc = 0; + if (sk->state != TCP_ESTABLISHED) + timeout = schedule_timeout(timeout); + if (sk->state == TCP_ESTABLISHED) { + if (!llc_ui->core_sk) + rc = -EAGAIN; + break; + } + rc = -EAGAIN; + if (sk->state == TCP_CLOSE) + break; + rc = -ERESTARTSYS; + if (signal_pending(current)) + break; + rc = -EAGAIN; + if (!timeout) + break; + } + __set_current_state(TASK_RUNNING); + remove_wait_queue(sk->sleep, &wait); + return rc; +} + +/** + * llc_ui_accept - accept a new incoming connection. + * @sock: Socket which connections arrive on. + * @newsock: Socket to move incoming connection to. + * @flags: User specified operational flags. + * + * Accept a new incoming connection. + * Returns 0 upon success, negative otherwise. + */ +static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags) +{ + struct sock *sk = sock->sk, *newsk; + struct llc_ui_opt *llc_ui, *newllc_ui; + struct llc_opt *newllc_core; + struct sk_buff *skb; + int rc = -EOPNOTSUPP; + + lock_sock(sk); + if (sk->type != SOCK_SEQPACKET && sk->type != SOCK_STREAM) + goto out; + rc = -EINVAL; + if (sock->state != SS_UNCONNECTED || sk->state != TCP_LISTEN) + goto out; + /* wait for a connection to arrive. */ + do { + skb = skb_dequeue(&sk->receive_queue); + if (!skb) { + rc = -EWOULDBLOCK; + if (flags & O_NONBLOCK) + goto out; + interruptible_sleep_on(sk->sleep); + rc = -ERESTARTSYS; + if (signal_pending(current)) + goto out; + } + } while (!skb); + + rc = -EINVAL; + if(!skb->sk) + goto frees; + /* attach connection to a new socket. */ + rc = llc_ui_create(newsock, sk->protocol); + if (rc) + goto frees; + rc = 0; + newsk = newsock->sk; + newsk->pair = NULL; + newsk->socket = newsock; + newsk->sleep = &newsock->wait; + newsk->zapped = 0; + newsk->state = TCP_ESTABLISHED; + newsock->state = SS_CONNECTED; + llc_ui = llc_ui_sk(sk); + newllc_ui = llc_ui_sk(newsk); + newllc_ui->sap = llc_ui->sap; + newllc_ui->dev = llc_ui->dev; + newllc_ui->core_sk = skb->sk; + newllc_core = llc_sk(newllc_ui->core_sk); + newllc_ui->link = newllc_core->link; + newllc_core->handler = newsk; + memcpy(&newllc_ui->addr, &llc_ui->addr, sizeof(newllc_ui->addr)); + memcpy(newllc_ui->addr.sllc_dmac, newllc_core->daddr.mac, IFHWADDRLEN); + newllc_ui->addr.sllc_dsap = newllc_core->daddr.lsap; + + /* put original socket back into a clean listen state. */ + sk->state = TCP_LISTEN; + sk->ack_backlog--; + llc_ui_insert_socket(newsk); + skb->sk = NULL; +frees: + kfree_skb(skb); +out: + release_sock(sk); + return rc; +} + +/** + * llc_ui_recvmsg - copy received data to the socket user. + * @sock: Socket to copy data from. + * @msg: Various user space related information. + * @size: Size of user buffer. + * @flags: User specified flags. + * @scm: Unknown. + * + * Copy received data to the socket user. + * Returns non-negative upon success, negative otherwise. + */ +static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, int size, + int flags, struct scm_cookie *scm) +{ + struct sock *sk = sock->sk; + struct sockaddr_llc *uaddr = (struct sockaddr_llc *)msg->msg_name; + struct sk_buff *skb; + int rc = -ENOMEM, copied = 0; + int noblock = flags & MSG_DONTWAIT; + + lock_sock(sk); + skb = skb_recv_datagram(sk, flags, noblock, &rc); + if (!skb) + goto out; + copied = skb->len; + if (copied > size) { + copied = size; + msg->msg_flags |= MSG_TRUNC; + } + rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + if (rc) + goto dgram_free; + if (uaddr) + memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); + msg->msg_namelen = sizeof(*uaddr); +dgram_free: + skb_free_datagram(sk, skb); /* Free the datagram. */ +out: + release_sock(sk); + return rc ? : copied; +} + +/** + * llc_ui_sendmsg - Transmit data provided by the socket user. + * @sock: Socket to transmit data from. + * @msg: Various user related information. + * @len: Length of data to transmit. + * @scm: Unknown. + * + * Transmit data provided by the socket user. + * Returns non-negative upon success, negative otherwise. + */ +static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, int len, + struct scm_cookie *scm) +{ + struct sock *sk = sock->sk; + struct llc_ui_opt *llc_ui = llc_ui_sk(sk); + struct sockaddr_llc *addr = (struct sockaddr_llc *)msg->msg_name; + int flags = msg->msg_flags; + struct net_device *dev; + struct sk_buff *skb; + int rc = -EOPNOTSUPP, size = 0; + + lock_sock(sk); + if (flags & ~MSG_DONTWAIT) + goto release; + rc = -EINVAL; + if (addr) { + if (msg->msg_namelen < sizeof(*addr)) + goto release; + } else { + if (llc_ui_addr_null(&llc_ui->addr)) + goto release; + addr = &llc_ui->addr; + } + /* must bind connection to sap if user hasn't done it. */ + if (sk->zapped) { + /* bind to sap with null dev, exclusive. */ + rc = llc_ui_autobind(sock, addr); + if (rc) + goto release; + } + if (!llc_ui->dev) { + rtnl_lock(); + dev = dev_getbyhwaddr(addr->sllc_arphrd, addr->sllc_smac); + rtnl_unlock(); + rc = -ENETUNREACH; + if (!dev) + goto release; + } else + dev = llc_ui->dev; + size = dev->hard_header_len + len + llc_ui_header_len(sk, addr); + rc = -EMSGSIZE; + if (size > dev->mtu) + goto release; + skb = sock_alloc_send_skb(sk, size, flags & MSG_DONTWAIT, &rc); + if (!skb) + goto release; + skb->sk = sk; + skb->dev = dev; + skb_reserve(skb, dev->hard_header_len + llc_ui_header_len(sk, addr)); + rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); + if (rc) + goto release; + if (addr->sllc_test) { + rc = llc_ui_send_llc1(llc_ui->sap, skb, addr, LLC_TEST_PRIM); + goto out; + } + if (addr->sllc_xid) { + rc = llc_ui_send_llc1(llc_ui->sap, skb, addr, LLC_XID_PRIM); + goto out; + } + if (sk->type == SOCK_DGRAM || addr->sllc_ua) { + rc = llc_ui_send_llc1(llc_ui->sap, skb, addr, LLC_DATAUNIT_PRIM); + goto out; + } + rc = -ENOPROTOOPT; + if (!(sk->type == SOCK_STREAM && !addr->sllc_ua)) + goto out; + rc = -ENOTCONN; + if (!llc_ui->core_sk) + goto out; + rc = llc_ui_send_data(llc_ui->sap, sk, skb, addr); +out: + if (rc) + skb_free_datagram(sk, skb); +release: + release_sock(sk); + return rc ? : len; +} + +/** + * llc_ui_getname - return the address info of a socket + * @sock: Socket to get address of. + * @uaddr: Address structure to return information. + * @uaddrlen: Length of address structure. + * @peer: Does user want local or remote address information. + * + * Return the address information of a socket. + */ +static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr, + int *uaddrlen, int peer) +{ + struct sockaddr_llc sllc; + struct sock *sk = sock->sk; + struct llc_ui_opt *llc_ui = llc_ui_sk(sk); + int rc = 0; + + lock_sock(sk); + if (sk->zapped) + goto out; + *uaddrlen = sizeof(sllc); + memset(uaddr, 0, *uaddrlen); + if (peer) { + rc = -ENOTCONN; + if (sk->state != TCP_ESTABLISHED) + goto out; + if(llc_ui->dev) + sllc.sllc_arphrd = llc_ui->dev->type; + sllc.sllc_dsap = llc_sk(llc_ui->core_sk)->daddr.lsap; + memcpy(&sllc.sllc_dmac, &llc_sk(llc_ui->core_sk)->daddr.mac, + IFHWADDRLEN); + } else { + rc = -EINVAL; + if (!llc_ui->sap) + goto out; + sllc.sllc_ssap = llc_ui->sap->laddr.lsap; + + if (llc_ui->dev) { + sllc.sllc_arphrd = llc_ui->dev->type; + memcpy(&sllc.sllc_smac, &llc_ui->dev->dev_addr, + IFHWADDRLEN); + } + } + rc = 0; + sllc.sllc_family = AF_LLC; + memcpy(uaddr, &sllc, sizeof(sllc)); +out: + release_sock(sk); + return rc; +} + +/** + * llc_ui_ioctl - io controls for PF_LLC + * @sock: Socket to get/set info + * @cmd: command + * @arg: optional argument for cmd + * + * get/set info on llc sockets + */ +static int llc_ui_ioctl(struct socket *sock, unsigned int cmd, + unsigned long arg) +{ + return dev_ioctl(cmd, (void *)arg); +} + +/** + * llc_ui_setsockopt - set various connection specific parameters. + * @sock: Socket to set options on. + * @level: Socket level user is requesting operations on. + * @optname: Operation name. + * @optval User provided operation data. + * @optlen: Length of optval. + * + * Set various connection specific parameters. + */ +static int llc_ui_setsockopt(struct socket *sock, int level, int optname, + char *optval, int optlen) +{ + struct sock *sk = sock->sk; + struct llc_ui_opt *llc_ui = llc_ui_sk(sk); + struct llc_opt *llc_core; + int rc = -EINVAL, opt; + + lock_sock(sk); + if (level != SOL_LLC || optlen != sizeof(int)) + goto out; + rc = -ENOTCONN; + if (!llc_ui->core_sk) + goto out; + rc = get_user(opt, (int *)optval); + if (rc) + goto out; + rc = -EINVAL; + llc_core = llc_sk(llc_ui->core_sk); + switch (optname) { + case LLC_OPT_RETRY: + if (opt > LLC_OPT_MAX_RETRY) + goto out; + llc_core->n2 = opt; + break; + case LLC_OPT_SIZE: + if (opt > LLC_OPT_MAX_SIZE) + goto out; + llc_core->n1 = opt; + break; + case LLC_OPT_ACK_TMR_EXP: + if (opt > LLC_OPT_MAX_ACK_TMR_EXP) + goto out; + llc_core->ack_timer.expire = opt; + break; + case LLC_OPT_P_TMR_EXP: + if (opt > LLC_OPT_MAX_P_TMR_EXP) + goto out; + llc_core->pf_cycle_timer.expire = opt; + break; + case LLC_OPT_REJ_TMR_EXP: + if (opt > LLC_OPT_MAX_REJ_TMR_EXP) + goto out; + llc_core->rej_sent_timer.expire = opt; + break; + case LLC_OPT_BUSY_TMR_EXP: + if (opt > LLC_OPT_MAX_BUSY_TMR_EXP) + goto out; + llc_core->busy_state_timer.expire = opt; + break; + case LLC_OPT_TX_WIN: + if (opt > LLC_OPT_MAX_WIN) + goto out; + llc_core->k = opt; + break; + case LLC_OPT_RX_WIN: + if (opt > LLC_OPT_MAX_WIN) + goto out; + llc_core->rw = opt; + break; + default: + rc = -ENOPROTOOPT; + goto out; + } + rc = 0; +out: + release_sock(sk); + return rc; +} + +/** + * llc_ui_getsockopt - get connection specific socket info + * @sock: Socket to get information from. + * @level: Socket level user is requesting operations on. + * @optname: Operation name. + * @optval: Variable to return operation data in. + * @optlen: Length of optval. + * + * Get connection specific socket information. + */ +static int llc_ui_getsockopt(struct socket *sock, int level, int optname, + char *optval, int *optlen) +{ + struct sock *sk = sock->sk; + struct llc_ui_opt *llc_ui = llc_ui_sk(sk); + struct llc_opt *llc_core; + int val = 0, len = 0, rc = -EINVAL; + + lock_sock(sk); + if (level != SOL_LLC) + goto out; + rc = -ENOTCONN; + if (!llc_ui->core_sk) + goto out; + rc = get_user(len, optlen); + if (rc) + goto out; + rc = -EINVAL; + if (len != sizeof(int)) + goto out; + llc_core = llc_sk(llc_ui->core_sk); + switch (optname) { + case LLC_OPT_RETRY: + val = llc_core->n2; break; + case LLC_OPT_SIZE: + val = llc_core->n1; break; + case LLC_OPT_ACK_TMR_EXP: + val = llc_core->ack_timer.expire; break; + case LLC_OPT_P_TMR_EXP: + val = llc_core->pf_cycle_timer.expire; break; + case LLC_OPT_REJ_TMR_EXP: + val = llc_core->rej_sent_timer.expire; break; + case LLC_OPT_BUSY_TMR_EXP: + val = llc_core->busy_state_timer.expire; break; + case LLC_OPT_TX_WIN: + val = llc_core->k; break; + case LLC_OPT_RX_WIN: + val = llc_core->rw; break; + default: + rc = -ENOPROTOOPT; + goto out; + } + rc = 0; + if (put_user(len, optlen) || copy_to_user(optval, &val, len)) + rc = -EFAULT; +out: + release_sock(sk); + return rc; +} + +/** + * llc_ui_ind_test - handle TEST indication + * @prim: Primitive block provided by the llc layer. + * + * handle TEST indication. + */ +static void llc_ui_ind_test(struct llc_prim_if_block *prim) +{ + struct llc_prim_test *prim_data = &prim->data->test; + struct sk_buff *skb = prim_data->skb; + struct sockaddr_llc *llc_ui = llc_ui_skb_cb(skb); + struct sock *sk = llc_ui_find_sk_by_addr(&prim_data->daddr, + &prim_data->saddr, skb->dev); + if (!sk) + goto out; + if (sk->state == TCP_LISTEN) + goto out_put; + /* save primitive for use by the user. */ + llc_ui->sllc_family = AF_LLC; + llc_ui->sllc_arphrd = skb->dev->type; + llc_ui->sllc_test = 1; + llc_ui->sllc_xid = 0; + llc_ui->sllc_ua = 0; + llc_ui->sllc_dsap = prim_data->daddr.lsap; + memcpy(llc_ui->sllc_dmac, prim_data->daddr.mac, IFHWADDRLEN); + llc_ui->sllc_ssap = prim_data->saddr.lsap; + memcpy(llc_ui->sllc_smac, prim_data->saddr.mac, IFHWADDRLEN); + /* queue skb to the user. */ + if (sock_queue_rcv_skb(sk, skb)) + kfree_skb(skb); +out_put: + sock_put(sk); +out:; +} + +/** + * llc_ui_ind_xid - handle XID indication + * @prim: Primitive block provided by the llc layer. + * + * handle XID indication. + */ +static void llc_ui_ind_xid(struct llc_prim_if_block *prim) +{ + struct llc_prim_xid *prim_data = &prim->data->xid; + struct sk_buff *skb = prim_data->skb; + struct sockaddr_llc *llc_ui = llc_ui_skb_cb(skb); + struct sock *sk = llc_ui_find_sk_by_addr(&prim_data->daddr, + &prim_data->saddr, skb->dev); + if (!sk) + goto out; + if (sk->state == TCP_LISTEN) + goto out_put; + /* save primitive for use by the user. */ + llc_ui->sllc_family = AF_LLC; + llc_ui->sllc_arphrd = 0; + llc_ui->sllc_test = 0; + llc_ui->sllc_xid = 1; + llc_ui->sllc_ua = 0; + llc_ui->sllc_dsap = prim_data->daddr.lsap; + memcpy(llc_ui->sllc_dmac, prim_data->daddr.mac, IFHWADDRLEN); + llc_ui->sllc_ssap = prim_data->saddr.lsap; + memcpy(llc_ui->sllc_smac, prim_data->saddr.mac, IFHWADDRLEN); + /* queue skb to the user. */ + if (sock_queue_rcv_skb(sk, skb)) + kfree_skb(skb); +out_put: + sock_put(sk); +out:; +} + +/** + * llc_ui_ind_dataunit - handle DATAUNIT indication + * @prim: Primitive block provided by the llc layer. + * + * handle DATAUNIT indication. + */ +static void llc_ui_ind_dataunit(struct llc_prim_if_block *prim) +{ + struct llc_prim_unit_data *prim_data = &prim->data->udata; + struct sk_buff *skb = prim_data->skb; + struct sockaddr_llc *llc_ui = llc_ui_skb_cb(skb); + struct sock *sk = llc_ui_find_sk_by_addr(&prim_data->daddr, + &prim_data->saddr, skb->dev); + if (!sk) + goto out; + if (sk->state == TCP_LISTEN) + goto out_put; + /* save primitive for use by the user. */ + llc_ui->sllc_family = AF_LLC; + llc_ui->sllc_arphrd = skb->dev->type; + llc_ui->sllc_test = 0; + llc_ui->sllc_xid = 0; + llc_ui->sllc_ua = 1; + llc_ui->sllc_dsap = prim_data->daddr.lsap; + memcpy(llc_ui->sllc_dmac, prim_data->daddr.mac, IFHWADDRLEN); + llc_ui->sllc_ssap = prim_data->saddr.lsap; + memcpy(llc_ui->sllc_smac, prim_data->saddr.mac, IFHWADDRLEN); + /* queue skb to the user. */ + if (sock_queue_rcv_skb(sk, skb)) + kfree_skb(skb); +out_put: + sock_put(sk); +out:; +} + +/** + * llc_ui_ind_conn - handle CONNECT indication + * @prim: Primitive block provided by the llc layer. + * + * handle CONNECT indication. + */ +static void llc_ui_ind_conn(struct llc_prim_if_block *prim) +{ + struct llc_prim_conn *prim_data = &prim->data->conn; + struct sock* sk; + struct sk_buff *skb2; + + llc_sk(prim_data->sk)->laddr.lsap = prim->sap->laddr.lsap; + sk = llc_ui_find_sk_by_addr(&llc_sk(prim_data->sk)->laddr, + &prim_data->saddr, prim_data->dev); + if (!sk) { + dprintk("llc_ui_find_sk_by_addr failed\n"); + goto out; + } + if (sk->type != SOCK_STREAM || sk->state != TCP_LISTEN) + goto out_put; + if (prim->data->conn.status) + goto out_put; /* bad status. */ + /* give this connection a link number. */ + llc_sk(prim_data->sk)->link = + llc_ui_next_link_no(llc_sk(prim_data->sk)->laddr.lsap); + skb2 = alloc_skb(0, GFP_ATOMIC); + if (!skb2) + goto out_put; + skb2->sk = prim_data->sk; + skb_queue_tail(&sk->receive_queue, skb2); + sk->state_change(sk); +out_put: + sock_put(sk); +out:; +} + +/** + * llc_ui_ind_data - handle DATA indication + * @prim: Primitive block provided by the llc layer. + * + * handle CONNECT indication. + */ +static void llc_ui_ind_data(struct llc_prim_if_block *prim) +{ + struct llc_prim_data *prim_data = &prim->data->data; + struct sk_buff *skb = prim_data->skb; + struct sockaddr_llc *llc_ui = llc_ui_skb_cb(skb); + struct sock* sk = llc_sk(prim_data->sk)->handler; + + if (!sk) + goto out; + sock_hold(sk); + if (sk->type != SOCK_STREAM || sk->state != TCP_ESTABLISHED) + goto out_put; + /* save primitive for use by the user. */ + llc_ui->sllc_family = AF_LLC; + llc_ui->sllc_arphrd = skb->dev->type; + llc_ui->sllc_test = 0; + llc_ui->sllc_xid = 0; + llc_ui->sllc_ua = 0; + llc_ui->sllc_dsap = llc_ui_sk(sk)->sap->laddr.lsap; + memcpy(llc_ui->sllc_dmac, llc_sk(prim_data->sk)->laddr.mac, + IFHWADDRLEN); + llc_ui->sllc_ssap = llc_sk(prim_data->sk)->daddr.lsap; + memcpy(llc_ui->sllc_smac, llc_sk(prim_data->sk)->daddr.mac, + IFHWADDRLEN); + /* queue skb to the user. */ + if (sock_queue_rcv_skb(sk, skb)) { + dprintk("sock_queue_rcv_skb failed!\n"); + kfree_skb(skb); + } +out_put: + sock_put(sk); +out:; +} + +/** + * llc_ui_ind_disc - handle DISC indication + * @prim: Primitive block provided by the llc layer. + * + * handle DISC indication. + */ +static void llc_ui_ind_disc(struct llc_prim_if_block *prim) +{ + struct llc_prim_disc *prim_data = &prim->data->disc; + struct sock* sk = llc_sk(prim_data->sk)->handler; + + if (!sk) + goto out; + sock_hold(sk); + if (sk->type != SOCK_STREAM || sk->state != TCP_ESTABLISHED) + goto out_put; + llc_ui_sk(sk)->core_sk = NULL; + sk->shutdown = SHUTDOWN_MASK; + sk->socket->state = SS_UNCONNECTED; + sk->state = TCP_CLOSE; + if (!sk->dead) { + sk->state_change(sk); + sk->dead = 1; + } +out_put: + sock_put(sk); +out:; +} + +/** + * llc_ui_indicate - LLC user interface hook into the LLC layer. + * @prim: Primitive block provided by the llc layer. + * + * LLC user interface hook into the LLC layer, every llc_ui sap references + * this function as its indicate handler. + * Always returns 0 to indicate reception of primitive. + */ +static int llc_ui_indicate(struct llc_prim_if_block *prim) +{ + switch (prim->prim) { + case LLC_TEST_PRIM: + llc_ui_ind_test(prim); break; + case LLC_XID_PRIM: + llc_ui_ind_xid(prim); break; + case LLC_DATAUNIT_PRIM: + llc_ui_ind_dataunit(prim); break; + case LLC_CONN_PRIM: + llc_ui_ind_conn(prim); break; + case LLC_DATA_PRIM: + llc_ui_ind_data(prim); break; + case LLC_DISC_PRIM: + llc_ui_ind_disc(prim); break; + case LLC_RESET_PRIM: + case LLC_FLOWCONTROL_PRIM: + default: break; + } + return 0; +} + +/** + * llc_ui_conf_conn - handle CONN confirm. + * @prim: Primitive block provided by the llc layer. + * + * handle CONN confirm. + */ +static void llc_ui_conf_conn(struct llc_prim_if_block *prim) +{ + struct llc_prim_conn *prim_data = &prim->data->conn; + struct llc_opt *llc_core = llc_sk(prim_data->sk); + struct llc_ui_opt *llc_ui = llc_ui_sk(prim_data->sk); + struct sock* sk = llc_core->handler; + + if (!sk) { + dprintk("llc_core->handler == NULL!\n"); + goto out; + } + sock_hold(sk); + if (sk->type != SOCK_STREAM || sk->state != TCP_SYN_SENT) + goto out_put; + if (!prim->data->conn.status) { + sk->socket->state = SS_CONNECTED; + sk->state = TCP_ESTABLISHED; + llc_ui->core_sk = prim_data->sk; + } else { + dprintk("prim->data->conn.status = %d\n", + prim->data->conn.status); + sk->socket->state = SS_UNCONNECTED; + sk->state = TCP_CLOSE; + llc_ui->core_sk = NULL; + } + sk->state_change(sk); +out_put: + sock_put(sk); +out:; +} + +/** + * llc_ui_conf_data - handle DATA confirm. + * @prim: Primitive block provided by the llc layer. + * + * handle DATA confirm. + */ +static void llc_ui_conf_data(struct llc_prim_if_block *prim) +{ + struct llc_prim_data *prim_data = &prim->data->data; + struct sock* sk = llc_sk(prim_data->sk)->handler; + + if (sk) + wake_up(sk->sleep); +} + +/** + * llc_ui_conf_disc - handle DISC confirm. + * @prim: Primitive block provided by the llc layer. + * + * handle DISC confirm. + */ +static void llc_ui_conf_disc(struct llc_prim_if_block *prim) +{ + struct llc_prim_disc *prim_data = &prim->data->disc; + struct sock* sk = llc_sk(prim_data->sk)->handler; + + if (!sk) + goto out; + sock_hold(sk); + if (sk->type != SOCK_STREAM || sk->state != TCP_CLOSING) + goto out_put; + llc_ui_sk(sk)->core_sk = NULL; + sk->socket->state = SS_UNCONNECTED; + sk->state = TCP_CLOSE; + sk->state_change(sk); +out_put: + sock_put(sk); +out:; +} + +/** + * llc_ui_confirm - LLC user interface hook into the LLC layer + * @prim: Primitive block provided by the llc layer. + * + * LLC user interface hook into the LLC layer, every llc_ui sap references + * this function as its confirm handler. + * Always returns 0 to indicate reception of primitive. + */ +static int llc_ui_confirm(struct llc_prim_if_block *prim) +{ + switch (prim->prim) { + case LLC_CONN_PRIM: + llc_ui_conf_conn(prim); break; + case LLC_DATA_PRIM: + llc_ui_conf_data(prim); break; + case LLC_DISC_PRIM: + llc_ui_conf_disc(prim); break; + case LLC_RESET_PRIM: break; + default: + printk(KERN_ERR __FUNCTION__ ": unknown prim %d\n", + prim->prim); + break; + } + return 0; +} + +#ifdef CONFIG_PROC_FS +/** + * llc_ui_get_info - return info to procfs + * @buffer: where to put the formatted output + * @start: starting from + * @offset: offset into buffer. + * @length: size of the buffer + * + * Get the output of the local llc ui socket list to the caller. + * Returns the length of data wrote to buffer. + */ +static int llc_ui_get_info(char *buffer, char **start, off_t offset, int length) +{ + off_t pos = 0; + off_t begin = 0; + struct sock *s; + int len = sprintf(buffer, "SocketID SKt Mc local_mac_sap\t " + "remote_mac_sap\t tx_queue rx_queue st uid " + "link_no\n"); + /* Output the LLC socket data for the /proc filesystem */ + read_lock_bh(&llc_ui_sockets_lock); + for (s = llc_ui_sockets; s; s = s->next) { + struct llc_ui_opt *llc_ui = llc_ui_sk(s); + len += sprintf(buffer + len, "%p %02X %02X ", s, s->type, + !llc_ui_mac_null(llc_ui->addr.sllc_mmac)); + if (llc_ui->sap) { + if (llc_ui->dev && + llc_ui_mac_null(llc_ui->addr.sllc_mmac)) + len += sprintf(buffer + len, + "%02X:%02X:%02X:%02X:%02X:%02X", + llc_ui->dev->dev_addr[0], + llc_ui->dev->dev_addr[1], + llc_ui->dev->dev_addr[2], + llc_ui->dev->dev_addr[3], + llc_ui->dev->dev_addr[4], + llc_ui->dev->dev_addr[5]); + else { + if (!llc_ui_mac_null(llc_ui->addr.sllc_mmac)) + len += sprintf(buffer + len, + "%02X:%02X:%02X:%02X:%02X:%02X", + llc_ui->addr.sllc_mmac[0], + llc_ui->addr.sllc_mmac[1], + llc_ui->addr.sllc_mmac[2], + llc_ui->addr.sllc_mmac[3], + llc_ui->addr.sllc_mmac[4], + llc_ui->addr.sllc_mmac[5]); + else + len += sprintf(buffer + len, + "00:00:00:00:00:00"); + } + len += sprintf(buffer + len, "@%02X ", + llc_ui->sap->laddr.lsap); + } else + len += sprintf(buffer + len, "00:00:00:00:00:00@00 "); + len += sprintf(buffer + len, + "%02X:%02X:%02X:%02X:%02X:%02X@%02X " + "%08X:%08X %02X %-3d ", + llc_ui->addr.sllc_dmac[0], + llc_ui->addr.sllc_dmac[1], + llc_ui->addr.sllc_dmac[2], + llc_ui->addr.sllc_dmac[3], + llc_ui->addr.sllc_dmac[4], + llc_ui->addr.sllc_dmac[5], + llc_ui->addr.sllc_dsap, + atomic_read(&s->wmem_alloc), + atomic_read(&s->rmem_alloc), s->state, + SOCK_INODE(s->socket)->i_uid); + if (llc_ui->core_sk) + len += sprintf(buffer + len, "%-7d\n", + llc_sk(llc_ui->core_sk)->link); + else + len += sprintf(buffer + len, "no_link\n"); + /* Are we still dumping unwanted data then discard the record */ + pos = begin + len; + + if (pos < offset) { + len = 0; /* Keep dumping into the buffer start */ + begin = pos; + } + if (pos > offset + length) /* We have dumped enough */ + break; + } + read_unlock_bh(&llc_ui_sockets_lock); + + /* The data in question runs from begin to begin + len */ + *start = buffer + offset - begin; /* Start of wanted data */ + len -= offset - begin; /* Remove unwanted header data from length */ + if (len > length) + len = length; /* Remove unwanted tail data from length */ + return len; +} +#endif /* CONFIG_PROC_FS */ + +static struct net_proto_family llc_ui_family_ops = { + .family = PF_LLC, + .create = llc_ui_create, +}; + +static struct proto_ops SOCKOPS_WRAPPED(llc_ui_ops) = { + .family = PF_LLC, + .release = llc_ui_release, + .bind = llc_ui_bind, + .connect = llc_ui_connect, + .socketpair = sock_no_socketpair, + .accept = llc_ui_accept, + .getname = llc_ui_getname, + .poll = datagram_poll, + .ioctl = llc_ui_ioctl, + .listen = llc_ui_listen, + .shutdown = llc_ui_shutdown, + .setsockopt = llc_ui_setsockopt, + .getsockopt = llc_ui_getsockopt, + .sendmsg = llc_ui_sendmsg, + .recvmsg = llc_ui_recvmsg, + .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage, +}; + +#include +SOCKOPS_WRAP(llc_ui, PF_LLC); + +static char llc_ui_banner[] __initdata = + KERN_INFO "NET4.0 IEEE 802.2 User Interface SAPs, Jay Schulist, 2001\n"; + +int __init llc_ui_init(void) +{ + llc_ui_sap_last_autoport = LLC_SAP_DYN_START; + sock_register(&llc_ui_family_ops); + proc_net_create("llc", 0, llc_ui_get_info); + printk(llc_ui_banner); + return 0; +} + +void __exit llc_ui_exit(void) +{ + proc_net_remove("llc"); + sock_unregister(PF_LLC); +} diff -Nru a/net/llc/llc_stat.c b/net/llc/llc_stat.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/net/llc/llc_stat.c Tue Jun 18 19:12:03 2002 @@ -0,0 +1,218 @@ +/* + * llc_stat.c - Implementation of LLC station component state machine + * transitions + * Copyright (c) 1997 by Procom Technology, Inc. + * 2001 by Arnaldo Carvalho de Melo + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include + +/* COMMON STATION STATE transitions */ + +/* dummy last-transition indicator; common to all state transition groups + * last entry for this state + * all members are zeros, .bss zeroes it + */ +static struct llc_station_state_trans llc_stat_state_trans_n; + +/* DOWN STATE transitions */ + +/* state transition for LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK event */ +static llc_station_action_t llc_stat_down_state_actions_1[] = { + [0] = llc_station_ac_start_ack_timer, + [1] = llc_station_ac_set_retry_cnt_0, + [2] = llc_station_ac_set_xid_r_cnt_0, + [3] = llc_station_ac_send_null_dsap_xid_c, + [4] = NULL, +}; + +static struct llc_station_state_trans llc_stat_down_state_trans_1 = { + .ev = llc_stat_ev_enable_with_dup_addr_check, + .next_state = LLC_STATION_STATE_DUP_ADDR_CHK, + .ev_actions = llc_stat_down_state_actions_1, +}; + +/* state transition for LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK event */ +static llc_station_action_t llc_stat_down_state_actions_2[] = { + [0] = llc_station_ac_report_status, /* STATION UP */ + [1] = NULL, +}; + +static struct llc_station_state_trans llc_stat_down_state_trans_2 = { + .ev = llc_stat_ev_enable_without_dup_addr_check, + .next_state = LLC_STATION_STATE_UP, + .ev_actions = llc_stat_down_state_actions_2, +}; + +/* array of pointers; one to each transition */ +static struct llc_station_state_trans *llc_stat_dwn_state_trans[] = { + [0] = &llc_stat_down_state_trans_1, + [1] = &llc_stat_down_state_trans_2, + [2] = &llc_stat_state_trans_n, +}; + +/* UP STATE transitions */ +/* state transition for LLC_STATION_EV_DISABLE_REQ event */ +static llc_station_action_t llc_stat_up_state_actions_1[] = { + [0] = llc_station_ac_report_status, /* STATION DOWN */ + [1] = NULL, +}; + +static struct llc_station_state_trans llc_stat_up_state_trans_1 = { + .ev = llc_stat_ev_disable_req, + .next_state = LLC_STATION_STATE_DOWN, + .ev_actions = llc_stat_up_state_actions_1, +}; + +/* state transition for LLC_STATION_EV_RX_NULL_DSAP_XID_C event */ +static llc_station_action_t llc_stat_up_state_actions_2[] = { + [0] = llc_station_ac_send_xid_r, + [1] = NULL, +}; + +static struct llc_station_state_trans llc_stat_up_state_trans_2 = { + .ev = llc_stat_ev_rx_null_dsap_xid_c, + .next_state = LLC_STATION_STATE_UP, + .ev_actions = llc_stat_up_state_actions_2, +}; + +/* state transition for LLC_STATION_EV_RX_NULL_DSAP_TEST_C event */ +static llc_station_action_t llc_stat_up_state_actions_3[] = { + [0] = llc_station_ac_send_test_r, + [1] = NULL, +}; + +static struct llc_station_state_trans llc_stat_up_state_trans_3 = { + .ev = llc_stat_ev_rx_null_dsap_test_c, + .next_state = LLC_STATION_STATE_UP, + .ev_actions = llc_stat_up_state_actions_3, +}; + +/* array of pointers; one to each transition */ +static struct llc_station_state_trans *llc_stat_up_state_trans [] = { + [0] = &llc_stat_up_state_trans_1, + [1] = &llc_stat_up_state_trans_2, + [2] = &llc_stat_up_state_trans_3, + [3] = &llc_stat_state_trans_n, +}; + +/* DUP ADDR CHK STATE transitions */ +/* state transition for LLC_STATION_EV_RX_NULL_DSAP_0_XID_R_XID_R_CNT_EQ + * event + */ +static llc_station_action_t llc_stat_dupaddr_state_actions_1[] = { + [0] = llc_station_ac_inc_xid_r_cnt_by_1, + [1] = NULL, +}; + +static struct llc_station_state_trans llc_stat_dupaddr_state_trans_1 = { + .ev = llc_stat_ev_rx_null_dsap_0_xid_r_xid_r_cnt_eq, + .next_state = LLC_STATION_STATE_DUP_ADDR_CHK, + .ev_actions = llc_stat_dupaddr_state_actions_1, +}; + +/* state transition for LLC_STATION_EV_RX_NULL_DSAP_1_XID_R_XID_R_CNT_EQ + * event + */ +static llc_station_action_t llc_stat_dupaddr_state_actions_2[] = { + [0] = llc_station_ac_report_status, /* DUPLICATE ADDRESS FOUND */ + [1] = NULL, +}; + +static struct llc_station_state_trans llc_stat_dupaddr_state_trans_2 = { + .ev = llc_stat_ev_rx_null_dsap_1_xid_r_xid_r_cnt_eq, + .next_state = LLC_STATION_STATE_DOWN, + .ev_actions = llc_stat_dupaddr_state_actions_2, +}; + +/* state transition for LLC_STATION_EV_RX_NULL_DSAP_XID_C event */ +static llc_station_action_t llc_stat_dupaddr_state_actions_3[] = { + [0] = llc_station_ac_send_xid_r, + [1] = NULL, +}; + +static struct llc_station_state_trans llc_stat_dupaddr_state_trans_3 = { + .ev = llc_stat_ev_rx_null_dsap_xid_c, + .next_state = LLC_STATION_STATE_DUP_ADDR_CHK, + .ev_actions = llc_stat_dupaddr_state_actions_3, +}; + +/* state transition for LLC_STATION_EV_ACK_TMR_EXP_LT_RETRY_CNT_MAX_RETRY + * event + */ +static llc_station_action_t llc_stat_dupaddr_state_actions_4[] = { + [0] = llc_station_ac_start_ack_timer, + [1] = llc_station_ac_inc_retry_cnt_by_1, + [2] = llc_station_ac_set_xid_r_cnt_0, + [3] = llc_station_ac_send_null_dsap_xid_c, + [4] = NULL, +}; + +static struct llc_station_state_trans llc_stat_dupaddr_state_trans_4 = { + .ev = llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry, + .next_state = LLC_STATION_STATE_DUP_ADDR_CHK, + .ev_actions = llc_stat_dupaddr_state_actions_4 +}; + +/* state transition for LLC_STATION_EV_ACK_TMR_EXP_EQ_RETRY_CNT_MAX_RETRY + * event + */ +static llc_station_action_t llc_stat_dupaddr_state_actions_5[] = { + [0] = llc_station_ac_report_status, /* STATION UP */ + [1] = NULL, +}; + +static struct llc_station_state_trans llc_stat_dupaddr_state_trans_5 = { + .ev = llc_stat_ev_ack_tmr_exp_eq_retry_cnt_max_retry, + .next_state = LLC_STATION_STATE_UP, + .ev_actions = llc_stat_dupaddr_state_actions_5, +}; + +/* state transition for LLC_STATION_EV_DISABLE_REQ event */ +static llc_station_action_t llc_stat_dupaddr_state_actions_6[] = { + [0] = llc_station_ac_report_status, /* STATION DOWN */ + [1] = NULL, +}; + +static struct llc_station_state_trans llc_stat_dupaddr_state_trans_6 = { + .ev = llc_stat_ev_disable_req, + .next_state = LLC_STATION_STATE_DOWN, + .ev_actions = llc_stat_dupaddr_state_actions_6, +}; + +/* array of pointers; one to each transition */ +static struct llc_station_state_trans *llc_stat_dupaddr_state_trans[] = { + [0] = &llc_stat_dupaddr_state_trans_6, /* Request */ + [1] = &llc_stat_dupaddr_state_trans_4, /* Timer */ + [2] = &llc_stat_dupaddr_state_trans_5, + [3] = &llc_stat_dupaddr_state_trans_1, /* Receive frame */ + [4] = &llc_stat_dupaddr_state_trans_2, + [5] = &llc_stat_dupaddr_state_trans_3, + [6] = &llc_stat_state_trans_n +}; + +struct llc_station_state llc_station_state_table[LLC_NBR_STATION_STATES] = { + { + .curr_state = LLC_STATION_STATE_DOWN, + .transitions = llc_stat_dwn_state_trans, + }, + { + .curr_state = LLC_STATION_STATE_DUP_ADDR_CHK, + .transitions = llc_stat_dupaddr_state_trans, + }, + { + .curr_state = LLC_STATION_STATE_UP, + .transitions = llc_stat_up_state_trans, + } +}; diff -Nru a/net/netsyms.c b/net/netsyms.c --- a/net/netsyms.c Tue Jun 18 19:12:02 2002 +++ b/net/netsyms.c Tue Jun 18 19:12:02 2002 @@ -444,6 +444,7 @@ #endif /* CONFIG_INET */ #ifdef CONFIG_TR +EXPORT_SYMBOL(tr_source_route); EXPORT_SYMBOL(tr_type_trans); #endif @@ -462,6 +463,7 @@ EXPORT_SYMBOL(__dev_get_by_index); EXPORT_SYMBOL(dev_get_by_name); EXPORT_SYMBOL(__dev_get_by_name); +EXPORT_SYMBOL(dev_getbyhwaddr); EXPORT_SYMBOL(netdev_finish_unregister); EXPORT_SYMBOL(netdev_set_master); EXPORT_SYMBOL(eth_type_trans); diff -Nru a/net/socket.c b/net/socket.c --- a/net/socket.c Tue Jun 18 19:12:02 2002 +++ b/net/socket.c Tue Jun 18 19:12:02 2002 @@ -1750,8 +1750,8 @@ int len, cpu; int counter = 0; - for (cpu=0; cpu #include #include +#include #include #include #define SNDRV_GET_ID