## Automatically generated incremental diff ## From: linux-2.5.70-bk13 ## To: linux-2.5.70-bk14 ## Robot: $Id: make-incremental-diff,v 1.11 2002/02/20 02:59:33 hpa Exp $ diff -urN linux-2.5.70-bk13/Documentation/DocBook/Makefile linux-2.5.70-bk14/Documentation/DocBook/Makefile --- linux-2.5.70-bk13/Documentation/DocBook/Makefile 2003-06-09 04:41:53.000000000 -0700 +++ linux-2.5.70-bk14/Documentation/DocBook/Makefile 2003-06-09 04:42:01.000000000 -0700 @@ -39,8 +39,8 @@ ### #External programs used -KERNELDOC=$(objtree)/scripts/kernel-doc -DOCPROC=$(objtree)/scripts/docproc +KERNELDOC = scripts/kernel-doc +DOCPROC = scripts/docproc ### # DOCPROC is used for two purposes: @@ -50,14 +50,14 @@ # The following rules are used to generate the .sgml documentation # required to generate the final targets. (ps, pdf, html). quiet_cmd_docproc = DOCPROC $@ -cmd_docproc = $(DOCPROC) doc $< >$@ + cmd_docproc = $(DOCPROC) doc $< >$@ define rule_docproc - set -e - $(if $($(quiet)cmd_$(1)),echo ' $($(quiet)cmd_$(1))';) - $(cmd_$(1)); \ - ( \ - echo 'cmd_$@ := $(cmd_$(1))'; \ - echo $@: `$(DOCPROC) depend $<`; \ + set -e; \ + $(if $($(quiet)cmd_$(1)),echo ' $($(quiet)cmd_$(1))';) \ + $(cmd_$(1)); \ + ( \ + echo 'cmd_$@ := $(cmd_$(1))'; \ + echo $@: `$(DOCPROC) depend $<`; \ ) > $(dir $@).$(notdir $@).cmd endef @@ -96,41 +96,55 @@ ### # Rules to generate postscript, PDF and HTML # db2html creates a directory. Generate a html file used for timestamp + +quiet_cmd_db2ps = DB2PS $@ + cmd_db2ps = db2ps -o $(dir $@) $< %.ps : %.sgml @(which db2ps > /dev/null 2>&1) || \ (echo "*** You need to install DocBook stylesheets ***"; \ exit 1) - $(call do_cmd,DB2PS $@,db2ps -o $(dir $@) $<) + $(call cmd,db2ps) +quiet_cmd_db2pdf = DB2PDF $@ + cmd_db2pdf = db2pdf -o $(dir $@) $< %.pdf : %.sgml @(which db2pdf > /dev/null 2>&1) || \ (echo "*** You need to install DocBook stylesheets ***"; \ exit 1) - $(call do_cmd,DB2PDF $@,db2pdf -o $(dir $@) $<) + $(call cmd,db2pdf) + +quiet_cmd_db2html = DB2HTML $@ + cmd_db2html = db2html -o $(patsubst %.html,%,$@) $< && \ + echo ' \ + Goto $(patsubst %.html,%,$(notdir $@))

' > $@ %.html: %.sgml @(which db2html > /dev/null 2>&1) || \ (echo "*** You need to install DocBook stylesheets ***"; \ exit 1) @rm -rf $@ $(patsubst %.html,%,$@) - $(call do_cmd,DB2HTML $@,db2html -o $(patsubst %.html,%,$@) $< && \ - echo '\ - Goto $(patsubst %.html,%,$(notdir $@))

' > $@) + $(call cmd,db2html) @if [ ! -z "$(PNG-$(basename $(notdir $@)))" ]; then \ cp $(PNG-$(basename $(notdir $@))) $(patsubst %.html,%,$@); fi ### # Rules to generate postscripts and PNG imgages from .fig format files +quiet_cmd_fig2eps = FIG2EPS $@ + cmd_fig2eps = fig2dev -Leps $< $@ + %.eps: %.fig - $(call do_cmd,FIG2DEV -Leps $@,fig2dev -Leps $< $@) + $(call cmd,fig2eps) + +quiet_cmd_fig2png = FIG2PNG $@ + cmd_fig2png = fig2dev -Lpng $< $@ %.png: %.fig - $(call do_cmd,FIG2DEV -Lpng $@,fig2dev -Lpng $< $@) + $(call cmd,fig2png) ### # Rule to convert a .c file to inline SGML documentation %.sgml: %.c - @echo ' Generating $@' + @echo ' GEN $@' @( \ echo ""; \ expand --tabs=8 < $< | \ diff -urN linux-2.5.70-bk13/Documentation/DocBook/kernel-api.tmpl linux-2.5.70-bk14/Documentation/DocBook/kernel-api.tmpl --- linux-2.5.70-bk13/Documentation/DocBook/kernel-api.tmpl 2003-05-26 18:00:20.000000000 -0700 +++ linux-2.5.70-bk14/Documentation/DocBook/kernel-api.tmpl 2003-06-09 04:42:01.000000000 -0700 @@ -79,6 +79,7 @@ String Manipulation !Ilib/string.c +!Elib/string.c Bit Operations !Iinclude/asm-i386/bitops.h @@ -176,7 +177,7 @@ !Edrivers/pci/pci.c PCI Hotplug Support Library -!Edrivers/hotplug/pci_hotplug_core.c +!Edrivers/pci/hotplug/pci_hotplug_core.c MCA Architecture MCA Device Functions diff -urN linux-2.5.70-bk13/Documentation/kbuild/makefiles.txt linux-2.5.70-bk14/Documentation/kbuild/makefiles.txt --- linux-2.5.70-bk13/Documentation/kbuild/makefiles.txt 2003-05-26 18:00:45.000000000 -0700 +++ linux-2.5.70-bk14/Documentation/kbuild/makefiles.txt 2003-06-09 04:42:01.000000000 -0700 @@ -11,7 +11,7 @@ --- 3.2 Built-in object goals - obj-y --- 3.3 Loadable module goals - obj-m --- 3.4 Objects which export symbols - --- 3.5 Library file goals - L_TARGET + --- 3.5 Library file goals - lib-y --- 3.6 Descending down in directories --- 3.7 Compilation flags --- 3.8 Command line dependency @@ -214,20 +214,33 @@ modules exporting symbols. See also Documentation/modules.txt. ---- 3.5 Library file goals - L_TARGET +--- 3.5 Library file goals - lib-y - Instead of building a built-in.o file, you may also - build an archive which again contains objects listed in $(obj-y). - This is normally not necessary and only used in lib/ and - arch/$(ARCH)/lib directories. - Only the name lib.a is allowed. + Objects listed with obj-* is used for modules or + are combined in a built-in.o for that specific directory. + There is also the possibility to list objects that will + be included in a library, lib.a. + All objects listed with lib-y are combined in a single + library for that directory. + Objects that are listed in obj-y and additional listed in + lib-y will not be included in the library, since they will anyway + be accessible. + For consistency objects listed in lib-m will be included in lib.a. + + Note that the same kbuild makefile may list files to be built-in + and to be part of a library. Therefore the same directory + may contain both a built-in.o and a lib.a file. Example: #arch/i386/lib/Makefile - L_TARGET := lib.a - obj-y := checksum.o delay.o + lib-y := checksum.o delay.o This will create a library lib.a based on checksum.o and delay.o. + For kbuild to actually recognize that there is a lib.a being build + the directory shall be listed in libs-y. + See also "6.3 List directories to visit when descending". + + Usage of lib-y is normally restricted to lib/ and arch/*/lib. --- 3.6 Descending down in directories @@ -727,7 +740,7 @@ head-y, init-y, core-y, libs-y, drivers-y, net-y $(head-y) list objects to be linked first in vmlinux. - $(libs-y) list directories where a libs.a archive can be located. + $(libs-y) list directories where a lib.a archive can be located. The rest list directories where a built-in.o object file can be located. $(init-y) objects will be located after $(head-y). diff -urN linux-2.5.70-bk13/MAINTAINERS linux-2.5.70-bk14/MAINTAINERS --- linux-2.5.70-bk13/MAINTAINERS 2003-06-09 04:41:53.000000000 -0700 +++ linux-2.5.70-bk14/MAINTAINERS 2003-06-09 04:42:02.000000000 -0700 @@ -395,27 +395,6 @@ L: pcihpd-discuss@lists.sourceforge.net S: Supported -COMPAQ FIBRE CHANNEL 64-bit/66MHz PCI non-intelligent HBA -P: Amy Vanzant-Hodge -M: Amy Vanzant-Hodge (fibrechannel@compaq.com) -L: compaqandlinux@cpqlin.van-dijk.net -W: ftp.compaq.com/pub/products/drivers/linux -S: Supported - -COMPAQ SMART2 RAID DRIVER -P: Charles White -M: Charles White -L: compaqandlinux@cpqlin.van-dijk.net -W: ftp.compaq.com/pub/products/drivers/linux -S: Supported - -COMPAQ SMART CISS RAID DRIVER -P: Charles White -M: Charles White -L: compaqandlinux@cpqlin.van-dijk.net -W: ftp.compaq.com/pub/products/drivers/linux -S: Supported - COMPUTONE INTELLIPORT MULTIPORT CARD P: Michael H. Warfield M: Michael H. Warfield @@ -792,6 +771,27 @@ L: linux-hippi@sunsite.dk S: Maintained +HP (was COMPAQ) FIBRE CHANNEL 64-bit/66MHz PCI non-intelligent HBA +P: Stephen Cameron +M: arrays@hp.com +M: steve.cameron@hp.com +L: cpqfc-discuss@lists.sourceforge.net +S: Odd Fixes + +HP (was COMPAQ) SMART2 RAID DRIVER +P: Stephen Cameron +M: arrays@hp.com +M: steve.cameron@hp.com +L: cpqarray-discuss@lists.sourceforge.net +S: Odd Fixes + +HP (was COMPAQ) SMART CISS RAID DRIVER +P: Stephen Cameron +M: arrays@hp.com +M: steve.cameron@hp.com +L: cciss-discuss@lists.sourceforge.net +S: Supported + HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series P: Jaroslav Kysela M: perex@suse.cz diff -urN linux-2.5.70-bk13/Makefile linux-2.5.70-bk14/Makefile --- linux-2.5.70-bk13/Makefile 2003-06-09 04:41:53.000000000 -0700 +++ linux-2.5.70-bk14/Makefile 2003-06-09 04:42:02.000000000 -0700 @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 5 SUBLEVEL = 70 -EXTRAVERSION = -bk13 +EXTRAVERSION = -bk14 # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff -urN linux-2.5.70-bk13/arch/alpha/kernel/asm-offsets.c linux-2.5.70-bk14/arch/alpha/kernel/asm-offsets.c --- linux-2.5.70-bk13/arch/alpha/kernel/asm-offsets.c 2003-05-26 18:01:01.000000000 -0700 +++ linux-2.5.70-bk14/arch/alpha/kernel/asm-offsets.c 2003-06-09 04:42:02.000000000 -0700 @@ -31,6 +31,7 @@ DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); BLANK(); + DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs)); DEFINE(PT_PTRACED, PT_PTRACED); DEFINE(CLONE_VM, CLONE_VM); DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); diff -urN linux-2.5.70-bk13/arch/alpha/kernel/head.S linux-2.5.70-bk14/arch/alpha/kernel/head.S --- linux-2.5.70-bk13/arch/alpha/kernel/head.S 2003-05-26 18:00:20.000000000 -0700 +++ linux-2.5.70-bk14/arch/alpha/kernel/head.S 2003-06-09 04:42:02.000000000 -0700 @@ -9,6 +9,7 @@ #include #include +#include .globl swapper_pg_dir .globl _stext @@ -25,7 +26,7 @@ /* We need to get current_task_info loaded up... */ lda $8,init_thread_union /* ... and find our stack ... */ - lda $30,0x4000($8) + lda $30,0x4000 - SIZEOF_PT_REGS($8) /* ... and then we can start the kernel. */ jsr $26,start_kernel call_pal PAL_halt diff -urN linux-2.5.70-bk13/arch/alpha/kernel/systbls.S linux-2.5.70-bk14/arch/alpha/kernel/systbls.S --- linux-2.5.70-bk13/arch/alpha/kernel/systbls.S 2003-06-09 04:41:53.000000000 -0700 +++ linux-2.5.70-bk14/arch/alpha/kernel/systbls.S 2003-06-09 04:42:02.000000000 -0700 @@ -442,6 +442,7 @@ .quad sys_clock_gettime /* 420 */ .quad sys_clock_getres .quad sys_clock_nanosleep + .quad sys_semtimedop .size sys_call_table, . - sys_call_table .type sys_call_table, @object diff -urN linux-2.5.70-bk13/arch/alpha/kernel/traps.c linux-2.5.70-bk14/arch/alpha/kernel/traps.c --- linux-2.5.70-bk13/arch/alpha/kernel/traps.c 2003-05-26 18:00:46.000000000 -0700 +++ linux-2.5.70-bk14/arch/alpha/kernel/traps.c 2003-06-09 04:42:02.000000000 -0700 @@ -485,9 +485,9 @@ " extwh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -505,9 +505,9 @@ " extlh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -525,9 +525,9 @@ " extqh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -554,13 +554,13 @@ "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %2,5b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %1,5b-2b(%0)\n" - " .gprel32 3b\n" + " .long 3b - .\n" " lda $31,5b-3b(%0)\n" - " .gprel32 4b\n" + " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), @@ -584,13 +584,13 @@ "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %2,5b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %1,5b-2b(%0)\n" - " .gprel32 3b\n" + " .long 3b - .\n" " lda $31,5b-3b(%0)\n" - " .gprel32 4b\n" + " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), @@ -614,13 +614,13 @@ "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n\t" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %2,5b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %1,5b-2b(%0)\n" - " .gprel32 3b\n" + " .long 3b - .\n" " lda $31,5b-3b(%0)\n" - " .gprel32 4b\n" + " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), @@ -845,9 +845,9 @@ " extwh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -865,9 +865,9 @@ " extlh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -885,9 +885,9 @@ " extqh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -905,9 +905,9 @@ " extlh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -925,9 +925,9 @@ " extqh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -954,13 +954,13 @@ "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %2,5b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %1,5b-2b(%0)\n" - " .gprel32 3b\n" + " .long 3b - .\n" " lda $31,5b-3b(%0)\n" - " .gprel32 4b\n" + " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), @@ -988,13 +988,13 @@ "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %2,5b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %1,5b-2b(%0)\n" - " .gprel32 3b\n" + " .long 3b - .\n" " lda $31,5b-3b(%0)\n" - " .gprel32 4b\n" + " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), @@ -1022,13 +1022,13 @@ "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n\t" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %2,5b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %1,5b-2b(%0)\n" - " .gprel32 3b\n" + " .long 3b - .\n" " lda $31,5b-3b(%0)\n" - " .gprel32 4b\n" + " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), diff -urN linux-2.5.70-bk13/arch/alpha/lib/csum_partial_copy.c linux-2.5.70-bk14/arch/alpha/lib/csum_partial_copy.c --- linux-2.5.70-bk13/arch/alpha/lib/csum_partial_copy.c 2003-05-26 18:00:40.000000000 -0700 +++ linux-2.5.70-bk14/arch/alpha/lib/csum_partial_copy.c 2003-06-09 04:42:02.000000000 -0700 @@ -46,7 +46,7 @@ "1: ldq_u %0,%2\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ - " .gprel32 1b\n" \ + " .long 1b - .\n" \ " lda %0,2b-1b(%1)\n" \ ".previous" \ : "=r"(x), "=r"(__guu_err) \ @@ -61,7 +61,7 @@ "1: stq_u %2,%1\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ - " .gprel32 1b" \ + " .long 1b - ." \ " lda $31,2b-1b(%0)\n" \ ".previous" \ : "=r"(__puu_err) \ diff -urN linux-2.5.70-bk13/arch/alpha/lib/memmove.S linux-2.5.70-bk14/arch/alpha/lib/memmove.S --- linux-2.5.70-bk13/arch/alpha/lib/memmove.S 2003-05-26 18:00:40.000000000 -0700 +++ linux-2.5.70-bk14/arch/alpha/lib/memmove.S 2003-06-09 04:42:02.000000000 -0700 @@ -12,6 +12,15 @@ .text .align 4 + .globl bcopy + .ent bcopy +bcopy: + mov $16,$0 + mov $17,$16 + mov $0,$17 + .end bcopy + + .align 4 .globl memmove .ent memmove memmove: diff -urN linux-2.5.70-bk13/arch/arm26/ACKNOWLEDGEMENTS linux-2.5.70-bk14/arch/arm26/ACKNOWLEDGEMENTS --- linux-2.5.70-bk13/arch/arm26/ACKNOWLEDGEMENTS 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/ACKNOWLEDGEMENTS 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,27 @@ +The work in this architecture (ARM26) is that of a great many people. + +This is what has happened: + +I [Ian Molton] have been trying to repair the ARM26 architecture support, but it has become an impossible task whilst it is still merged with the ARM32 (arch/arm) code. The ARM26 code is too different to be sensible to keep with the ARM32 code now, and Russell King really doesnt have the time to maintain the ARM26 code. Add to that that most ARM32 developers dont know about or care about ARM26 when writing patches, and you have a reall mess. + +As a result, I've split it off into a new architecture of its own. I've named it arm26 since these CPUs have only a 26 bit address space, unlike the other ARMs. + +The upheaval in moving around so many source files and chopping out vasty ammounts of cruft was enormous, and the copyright of many files is sometimes unclear. Because of this, I am writing this, in order that no-one is left out / misaccredited / blamed for any of the code. + +People I KNOW have made major contributions to the code: + +David Alan Gilbert (former maintainer of ARM26 bits) +Philip Blundell +Russell King +Keith Owens + +Currently maintaing the code are + +Ian Molton (Maintainer / Archimedes) +John Appleby (kernel / A5K) + +If anyone has a problem with attributions in header files / source files, please do contact me to straighten things out. + +Ian Molton (aka spyro) - ARM26 maintainer +spyro@f2s.com + diff -urN linux-2.5.70-bk13/arch/arm26/Config.help linux-2.5.70-bk14/arch/arm26/Config.help --- linux-2.5.70-bk13/arch/arm26/Config.help 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/Config.help 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,387 @@ +CONFIG_ARM + The ARM series is a line of low-power-consumption RISC chip designs + licensed by ARM ltd and targeted at embedded applications. + +CONFIG_IDE + If you say Y here, your kernel will be able to manage low cost mass + storage units such as ATA/(E)IDE and ATAPI units. The most common + cases are IDE hard drives and ATAPI CD-ROM drives. + + If your system is pure SCSI and doesn't use these interfaces, you + can say N here. + + Integrated Disk Electronics (IDE aka ATA-1) is a connecting standard + for mass storage units such as hard disks. It was designed by + Western Digital and Compaq Computer in 1984. It was then named + ST506. Quite a number of disks use the IDE interface. + + AT Attachment (ATA) is the superset of the IDE specifications. + ST506 was also called ATA-1. + + Fast-IDE is ATA-2 (also named Fast ATA), Enhanced IDE (EIDE) is + ATA-3. It provides support for larger disks (up to 8.4GB by means of + the LBA standard), more disks (4 instead of 2) and for other mass + storage units such as tapes and cdrom. UDMA/33 (aka UltraDMA/33) is + ATA-4 and provides faster (and more CPU friendly) transfer modes + than previous PIO (Programmed processor Input/Output) from previous + ATA/IDE standards by means of fast DMA controllers. + + ATA Packet Interface (ATAPI) is a protocol used by EIDE tape and + CD-ROM drives, similar in many respects to the SCSI protocol. + + SMART IDE (Self Monitoring, Analysis and Reporting Technology) was + designed in order to prevent data corruption and disk crash by + detecting pre hardware failure conditions (heat, access time, and + the like...). Disks built since June 1995 may follow this standard. + The kernel itself don't manage this; however there are quite a + number of user programs such as smart that can query the status of + SMART parameters disk. + + If you want to compile this driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read . The module + will be called ide.o. + + For further information, please read . + + If unsure, say Y. + +CONFIG_ISA + Find out whether you have ISA slots on your motherboard. ISA is the + name of a bus system, i.e. the way the CPU talks to the other stuff + inside your box. Other bus systems are PCI, EISA, MicroChannel + (MCA) or VESA. ISA is an older system, now being displaced by PCI; + newer boards don't support it. If you have ISA, say Y, otherwise N. + +CONFIG_PREEMPT + This option reduces the latency of the kernel when reacting to + real-time or interactive events by allowing a low priority process to + be preempted even if it is in kernel mode executing a system call. + This allows applications to run more reliably even when the system is + under load. + + Say Y here if you are building a kernel for a desktop, embedded + or real-time system. Say N if you are unsure. + +CONFIG_MCA + MicroChannel Architecture is found in some IBM PS/2 machines and + laptops. It is a bus system similar to PCI or ISA. See + (and especially the web page given + there) before attempting to build an MCA bus kernel. + +CONFIG_EISA + The Extended Industry Standard Architecture (EISA) bus was + developed as an open alternative to the IBM MicroChannel bus. + + The EISA bus provided some of the features of the IBM MicroChannel + bus while maintaining backward compatibility with cards made for + the older ISA bus. The EISA bus saw limited use between 1988 and + 1995 when it was made obsolete by the PCI bus. + + Say Y here if you are building a kernel for an EISA-based machine. + + Otherwise, say N. + +CONFIG_HOTPLUG + Say Y here if you want to plug devices into your computer while + the system is running, and be able to use them quickly. In many + cases, the devices can likewise be unplugged at any time too. + + One well known example of this is PCMCIA- or PC-cards, credit-card + size devices such as network cards, modems or hard drives which are + plugged into slots found on all modern laptop computers. Another + example, used on modern desktops as well as laptops, is USB. + + Enable HOTPLUG and KMOD, and build a modular kernel. Get agent + software (at ) and install it. + Then your kernel will automatically call out to a user mode "policy + agent" (/sbin/hotplug) to load modules and set up software needed + to use devices as you hotplug them. + +CONFIG_KCORE_ELF + If you enabled support for /proc file system then the file + /proc/kcore will contain the kernel core image. This can be used + in gdb: + + $ cd /usr/src/linux ; gdb vmlinux /proc/kcore + + You have two choices here: ELF and A.OUT. Selecting ELF will make + /proc/kcore appear in ELF core format as defined by the Executable + and Linking Format specification. Selecting A.OUT will choose the + old "a.out" format which may be necessary for some old versions + of binutils or on some architectures. + + This is especially useful if you have compiled the kernel with the + "-g" option to preserve debugging information. It is mainly used + for examining kernel data structures on the live kernel so if you + don't understand what this means or are not a kernel hacker, just + leave it at its default value ELF. + +CONFIG_KCORE_AOUT + Not necessary unless you're using a very out-of-date binutils + version. You probably want KCORE_ELF. + +CONFIG_BINFMT_ELF + ELF (Executable and Linkable Format) is a format for libraries and + executables used across different architectures and operating + systems. Saying Y here will enable your kernel to run ELF binaries + and enlarge it by about 13 KB. ELF support under Linux has now all + but replaced the traditional Linux a.out formats (QMAGIC and ZMAGIC) + because it is portable (this does *not* mean that you will be able + to run executables from different architectures or operating systems + however) and makes building run-time libraries very easy. Many new + executables are distributed solely in ELF format. You definitely + want to say Y here. + + Information about ELF is contained in the ELF HOWTO available from + . + + If you find that after upgrading from Linux kernel 1.2 and saying Y + here, you still can't run any ELF binaries (they just crash), then + you'll have to install the newest ELF runtime libraries, including + ld.so (check the file for location and + latest version). + + If you want to compile this as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read . The module + will be called binfmt_elf.o. Saying M or N here is dangerous because + some crucial programs on your system might be in ELF format. + +CONFIG_BINFMT_AOUT + A.out (Assembler.OUTput) is a set of formats for libraries and + executables used in the earliest versions of UNIX. Linux used the + a.out formats QMAGIC and ZMAGIC until they were replaced with the + ELF format. + + As more and more programs are converted to ELF, the use for a.out + will gradually diminish. If you disable this option it will reduce + your kernel by one page. This is not much and by itself does not + warrant removing support. However its removal is a good idea if you + wish to ensure that absolutely none of your programs will use this + older executable format. If you don't know what to answer at this + point then answer Y. If someone told you "You need a kernel with + QMAGIC support" then you'll have to say Y here. You may answer M to + compile a.out support as a module and later load the module when you + want to use a program or library in a.out format. The module will be + called binfmt_aout.o. Saying M or N here is dangerous though, + because some crucial programs on your system might still be in A.OUT + format. + +CONFIG_BINFMT_MISC + If you say Y here, it will be possible to plug wrapper-driven binary + formats into the kernel. You will like this especially when you use + programs that need an interpreter to run like Java, Python or + Emacs-Lisp. It's also useful if you often run DOS executables under + the Linux DOS emulator DOSEMU (read the DOSEMU-HOWTO, available from + ). Once you have + registered such a binary class with the kernel, you can start one of + those programs simply by typing in its name at a shell prompt; Linux + will automatically feed it to the correct interpreter. + + You can do other nice things, too. Read the file + to learn how to use this + feature, and for information about how + to include Java support. + + You must say Y to "/proc file system support" (CONFIG_PROC_FS) to + use this part of the kernel. + + You may say M here for module support and later load the module when + you have use for it; the module is called binfmt_misc.o. If you + don't know what to answer at this point, say Y. + +CONFIG_SCSI + If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or + any other SCSI device under Linux, say Y and make sure that you know + the name of your SCSI host adapter (the card inside your computer + that "speaks" the SCSI protocol, also called SCSI controller), + because you will be asked for it. + + You also need to say Y here if you want support for the parallel + port version of the 100 MB IOMEGA ZIP drive. + + This driver is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called scsi_mod.o. If you want to compile it as + a module, say M here and read and + . However, do not compile this as a + module if your root file system (the one containing the directory /) + is located on a SCSI device. + +CONFIG_NETDEVICES + You can say N here if you don't intend to connect your Linux box to + any other computer at all or if all your connections will be over a + telephone line with a modem either via UUCP (UUCP is a protocol to + forward mail and news between unix hosts over telephone lines; read + the UUCP-HOWTO, available from + ) or dialing up a shell + account or a BBS, even using term (term is a program which gives you + almost full Internet connectivity if you have a regular dial up + shell account on some Internet connected Unix computer. Read + ). + + You'll have to say Y if your computer contains a network card that + you want to use under Linux (make sure you know its name because you + will be asked for it and read the Ethernet-HOWTO (especially if you + plan to use more than one network card under Linux)) or if you want + to use SLIP (Serial Line Internet Protocol is the protocol used to + send Internet traffic over telephone lines or null modem cables) or + CSLIP (compressed SLIP) or PPP (Point to Point Protocol, a better + and newer replacement for SLIP) or PLIP (Parallel Line Internet + Protocol is mainly used to create a mini network by connecting the + parallel ports of two local machines) or AX.25/KISS (protocol for + sending Internet traffic over amateur radio links). + + Make sure to read the NET-3-HOWTO. Eventually, you will have to read + Olaf Kirch's excellent and free book "Network Administrator's + Guide", to be found in . If + unsure, say Y. + +CONFIG_MAGIC_SYSRQ + If you say Y here, you will have some control over the system even + if the system crashes for example during kernel debugging (e.g., you + will be able to flush the buffer cache to disk, reboot the system + immediately or dump some status information). This is accomplished + by pressing various keys while holding SysRq (Alt+PrintScreen). It + also works on a serial console (on PC hardware at least), if you + send a BREAK and then within 5 seconds a command keypress. The + keys are documented in . Don't say Y + unless you really know what this hack does. + +CONFIG_ARCH_ARCA5K + This selects support for 'ARM26' CPUs (ARM 2 and 3) + +CONFIG_ARCH_A5K + Say Y here to to support the Acorn A5000. Linux can support the + internal IDE disk and CD-ROM interface, serial and parallel port, + and the floppy drive. Note that on some A5000s the floppy is + plugged into the wrong socket on the motherboard. + +CONFIG_ARCH_ARC + The Acorn Archimedes was an personal computer based on an 8MHz ARM2 + processor, released in 1987. It supported 512K of RAM and 2 800K + floppy disks. Picture and more detailed specifications at + . + +CONFIG_PAGESIZE_16 + Say Y here if your Archimedes or A5000 system has only 2MB of + memory, otherwise say N. The resulting kernel will not run on a + machine with 4MB of memory. + +CONFIG_FPE_NWFPE + Say Y to include the NWFPE floating point emulator in the kernel. + This is necessary to run most binaries. Linux does not currently + support floating point hardware so you need to say Y here even if + your machine has an FPA or floating point co-processor podule. + + It is also possible to say M to build the emulator as a module + (nwfpe.o) or indeed to leave it out altogether. However, unless you + know what you are doing this can easily render your machine + unbootable. Saying Y is the safe option. + + You may say N here if you are going to load the Acorn FPEmulator + early in the bootup. + +CONFIG_FPE_FASTFPE + Say Y here to include the FAST floating point emulator in the kernel. + This is an experimental much faster emulator which now also has full + precision for the mantissa. It does not support any exceptions. + It is very simple, and approximately 3-6 times faster than NWFPE. + + It should be sufficient for most programs. It may be not suitable + for scientific calculations, but you have to check this for yourself. + If you do not feel you need a faster FP emulation you should better + choose NWFPE. + + It is also possible to say M to build the emulator as a module + (fastfpe.o). But keep in mind that you should only load the FP + emulator early in the bootup. You should never change from NWFPE to + FASTFPE or vice versa in an active system! + +CONFIG_DEBUG_ERRORS + This option controls verbose debugging information which can be + printed when the kernel detects an internal error. This debugging + information is useful to kernel hackers when tracking down problems, + but mostly meaningless to other people. It's safe to say Y unless + you are concerned with the code size or don't want to see these + messages. + +CONFIG_NO_FRAME_POINTER + If you say Y here, the resulting kernel will be slightly smaller and + faster. However, when a problem occurs with the kernel, the + information that is reported is severely limited. Most people + should say N here. + +CONFIG_DEBUG_USER + When a user program crashes due to an exception, the kernel can + print a brief message explaining what the problem was. This is + sometimes helpful for debugging but serves no purpose on a + production system. Most people should say N here. + +CONFIG_DEBUG_INFO + Say Y here to include source-level debugging information in the + `vmlinux' binary image. This is handy if you want to use gdb or + addr2line to debug the kernel. It has no impact on the in-memory + footprint of the running kernel but it can increase the amount of + time and disk space needed for compilation of the kernel. If in + doubt say N. + +CONFIG_DEBUG_LL + Say Y here to include definitions of printascii, printchar, printhex + in the kernel. This is helpful if you are debugging code that + executes before the console is initialized. + +CONFIG_NO_PGT_CACHE + Normally the kernel maintains a `quicklist' of preallocated + pagetable structures in order to increase performance. On machines + with very few pages this may however be a loss. Say Y here to + disable the pgtable cache. + +CONFIG_ARTHUR + Say Y here to include the kernel code necessary if you want to run + Acorn RISC OS/Arthur binaries under Linux. This code is still very + experimental; if this sounds frightening, say N and sleep in peace. + You can also say M here to compile this support as a module (which + will be called arthur.o). + +CONFIG_CMDLINE + On some architectures (EBSA110 and CATS), there is currently no way + for the boot loader to pass arguments to the kernel. For these + architectures, you should supply some command-line options at build + time by entering them here. As a minimum, you should specify the + memory size and the root device (e.g., mem=64M root=/dev/nfs). + +CONFIG_DEBUG_KERNEL + Say Y here if you are developing drivers or trying to debug and + identify kernel problems. + +CONFIG_DEBUG_SLAB + Say Y here to have the kernel do limited verification on memory + allocation as well as poisoning memory on free to catch use of freed + memory. + +CONFIG_DEBUG_SPINLOCK + Say Y here and build SMP to catch missing spinlock initialization + and certain other kinds of spinlock errors commonly made. This is + best used in conjunction with the NMI watchdog so that spinlock + deadlocks are also debuggable. + +CONFIG_DEBUG_BUGVERBOSE + Say Y here to make BUG() panics output the file name and line number + of the BUG call as well as the EIP and oops trace. This aids + debugging but costs about 70-100K of memory. + +CONFIG_ZBOOT_ROM + Say Y here if you intend to execute your compressed kernel image (zImage) + directly from ROM or flash. If unsure, say N. + +CONFIG_ZBOOT_ROM_TEXT + The base address for zImage. Unless you have special requirements, you + should not change this value. + +CONFIG_ZBOOT_ROM_BSS + The base address of 64KiB of read/write memory, which must be available + while the decompressor is running. Unless you have special requirements, + you should not change this value. + diff -urN linux-2.5.70-bk13/arch/arm26/Kconfig linux-2.5.70-bk14/arch/arm26/Kconfig --- linux-2.5.70-bk13/arch/arm26/Kconfig 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/Kconfig 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,572 @@ +# +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.txt. +# + +mainmenu "Linux Kernel Configuration" + +config ARM + bool + default y + help + The ARM series is a line of low-power-consumption RISC chip designs + licensed by ARM ltd and targeted at embedded applications and + handhelds such as the Compaq IPAQ. ARM-based PCs are no longer + manufactured, but legacy ARM-based PC hardware remains popular in + Europe. There is an ARM Linux project with a web page at + . + +config ARCH_ARCA5K + bool + default y + +config MMU + bool + default y + +config ARCH_ACORN + bool + default y + +config CPU_26 + bool + default y + +config FIQ + bool + default y + +# 9 = 512 pages 8 = 256 pages 7 = 128 pages +config FORCE_MAX_ZONEORDER + int + default 9 + +config UID16 + bool + default y + +config RWSEM_GENERIC_SPINLOCK + bool + default y + +config RWSEM_XCHGADD_ALGORITHM + bool + +config GENERIC_BUST_SPINLOCK + bool + +config GENERIC_ISA_DMA + bool + +source "init/Kconfig" + + +menu "System Type" + +comment "Archimedes/A5000 Implementations (select only ONE)" + +config ARCH_ARC + bool "Archimedes" + depends on ARCH_ARCA5K + help + The Acorn Archimedes was an personal computer based on an 8K ARM2 + processor, released in 1987. It supported 512K of RAM and 2 800K + floppy disks. Picture and more detailed specifications at + . + +config ARCH_A5K + bool "A5000" + depends on ARCH_ARCA5K + help + Say Y here to to support the Acorn A5000. Linux can support the + internal IDE disk and CD-ROM interface, serial and parallel port, + and the floppy drive. Note that on some A5000s the floppy is + plugged into the wrong socket on the motherboard. + +config PAGESIZE_16 + bool "2MB physical memory" + depends on ARCH_ARCA5K + help + Say Y here if your Archimedes or A5000 system has only 2MB of + memory, otherwise say N. The resulting kernel will not run on a + machine with 4MB of memory. +endmenu + +menu "General setup" + +# Compressed boot loader in ROM. Yes, we really want to ask about +# TEXT and BSS so we preserve their values in the config files. +config ZBOOT_ROM + bool "Compressed boot loader in ROM/flash" + help + Say Y here if you intend to execute your compressed kernel image (zImage) + directly from ROM or flash. If unsure, say N. + +config ZBOOT_ROM_TEXT + hex "Compressed ROM boot loader base address" + default "0" + help + The base address for zImage. Unless you have special requirements, you + should not change this value. + +config ZBOOT_ROM_BSS + hex "Compressed ROM boot loader BSS address" + default "0" + help + The base address of 64KiB of read/write memory, which must be available + while the decompressor is running. Unless you have special requirements, + you should not change this value. + +config HOTPLUG + bool "Support for hot-pluggable devices" + ---help--- + Say Y here if you want to plug devices into your computer while + the system is running, and be able to use them quickly. In many + cases, the devices can likewise be unplugged at any time too. + + One well known example of this is PCMCIA- or PC-cards, credit-card + size devices such as network cards, modems or hard drives which are + plugged into slots found on all modern laptop computers. Another + example, used on modern desktops as well as laptops, is USB. + + Enable HOTPLUG and KMOD, and build a modular kernel. Get agent + software (at ) and install it. + Then your kernel will automatically call out to a user mode "policy + agent" (/sbin/hotplug) to load modules and set up software needed + to use devices as you hotplug them. + +comment "At least one math emulation must be selected" + +config FPE_NWFPE + tristate "NWFPE math emulation" + ---help--- + Say Y to include the NWFPE floating point emulator in the kernel. + This is necessary to run most binaries. Linux does not currently + support floating point hardware so you need to say Y here even if + your machine has an FPA or floating point co-processor podule. + + It is also possible to say M to build the emulator as a module + (nwfpe) or indeed to leave it out altogether. However, unless you + know what you are doing this can easily render your machine + unbootable. Saying Y is the safe option. + + You may say N here if you are going to load the Acorn FPEmulator + early in the bootup. + +choice + prompt "Kernel core (/proc/kcore) format" + default KCORE_ELF + +config KCORE_ELF + bool "ELF" + ---help--- + If you enabled support for /proc file system then the file + /proc/kcore will contain the kernel core image. This can be used + in gdb: + + $ cd /usr/src/linux ; gdb vmlinux /proc/kcore + + You have two choices here: ELF and A.OUT. Selecting ELF will make + /proc/kcore appear in ELF core format as defined by the Executable + and Linking Format specification. Selecting A.OUT will choose the + old "a.out" format which may be necessary for some old versions + of binutils or on some architectures. + + This is especially useful if you have compiled the kernel with the + "-g" option to preserve debugging information. It is mainly used + for examining kernel data structures on the live kernel so if you + don't understand what this means or are not a kernel hacker, just + leave it at its default value ELF. + +config KCORE_AOUT + bool "A.OUT" + help + Not necessary unless you're using a very out-of-date binutils + version. You probably want KCORE_ELF. + +endchoice + +config BINFMT_AOUT + tristate "Kernel support for a.out binaries" + ---help--- + A.out (Assembler.OUTput) is a set of formats for libraries and + executables used in the earliest versions of UNIX. Linux used the + a.out formats QMAGIC and ZMAGIC until they were replaced with the + ELF format. + + As more and more programs are converted to ELF, the use for a.out + will gradually diminish. If you disable this option it will reduce + your kernel by one page. This is not much and by itself does not + warrant removing support. However its removal is a good idea if you + wish to ensure that absolutely none of your programs will use this + older executable format. If you don't know what to answer at this + point then answer Y. If someone told you "You need a kernel with + QMAGIC support" then you'll have to say Y here. You may answer M to + compile a.out support as a module and later load the module when you + want to use a program or library in a.out format. The module will be + called binfmt_aout. Saying M or N here is dangerous though, + because some crucial programs on your system might still be in A.OUT + format. + +config BINFMT_ELF + tristate "Kernel support for ELF binaries" + ---help--- + ELF (Executable and Linkable Format) is a format for libraries and + executables used across different architectures and operating + systems. Saying Y here will enable your kernel to run ELF binaries + and enlarge it by about 13 KB. ELF support under Linux has now all + but replaced the traditional Linux a.out formats (QMAGIC and ZMAGIC) + because it is portable (this does *not* mean that you will be able + to run executables from different architectures or operating systems + however) and makes building run-time libraries very easy. Many new + executables are distributed solely in ELF format. You definitely + want to say Y here. + + Information about ELF is contained in the ELF HOWTO available from + . + + If you find that after upgrading from Linux kernel 1.2 and saying Y + here, you still can't run any ELF binaries (they just crash), then + you'll have to install the newest ELF runtime libraries, including + ld.so (check the file for location and + latest version). + + If you want to compile this as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read . The module + will be called binfmt_elf. Saying M or N here is dangerous because + some crucial programs on your system might be in ELF format. + +config BINFMT_MISC + tristate "Kernel support for MISC binaries" + ---help--- + If you say Y here, it will be possible to plug wrapper-driven binary + formats into the kernel. You will like this especially when you use + programs that need an interpreter to run like Java, Python or + Emacs-Lisp. It's also useful if you often run DOS executables under + the Linux DOS emulator DOSEMU (read the DOSEMU-HOWTO, available from + ). Once you have + registered such a binary class with the kernel, you can start one of + those programs simply by typing in its name at a shell prompt; Linux + will automatically feed it to the correct interpreter. + + You can do other nice things, too. Read the file + to learn how to use this + feature, and for information about how + to include Java support. + + You must say Y to "/proc file system support" (CONFIG_PROC_FS) to + use this part of the kernel. + + You may say M here for module support and later load the module when + you have use for it; the module is called binfmt_misc. If you + don't know what to answer at this point, say Y. + +config PREEMPT + bool "Preemptible Kernel (EXPERIMENTAL)" + depends on CPU_32 && EXPERIMENTAL + help + This option reduces the latency of the kernel when reacting to + real-time or interactive events by allowing a low priority process to + be preempted even if it is in kernel mode executing a system call. + This allows applications to run more reliably even when the system is + under load. + + Say Y here if you are building a kernel for a desktop, embedded + or real-time system. Say N if you are unsure. + +config ARTHUR + tristate "RISC OS personality" + depends on CPU_32 + help + Say Y here to include the kernel code necessary if you want to run + Acorn RISC OS/Arthur binaries under Linux. This code is still very + experimental; if this sounds frightening, say N and sleep in peace. + You can also say M here to compile this support as a module (which + will be called arthur). + +config CMDLINE + string "Default kernel command string" + default "" + help + On some architectures (EBSA110 and CATS), there is currently no way + for the boot loader to pass arguments to the kernel. For these + architectures, you should supply some command-line options at build + time by entering them here. As a minimum, you should specify the + memory size and the root device (e.g., mem=64M root=/dev/nfs). + +endmenu + +source "drivers/parport/Kconfig" + +source "drivers/pnp/Kconfig" + +source "drivers/block/Kconfig" + +source "drivers/md/Kconfig" + +source "net/Kconfig" + +source "net/irda/Kconfig" + +menu "ATA/ATAPI/MFM/RLL support" + +config IDE + tristate "ATA/ATAPI/MFM/RLL support" + ---help--- + If you say Y here, your kernel will be able to manage low cost mass + storage units such as ATA/(E)IDE and ATAPI units. The most common + cases are IDE hard drives and ATAPI CD-ROM drives. + + If your system is pure SCSI and doesn't use these interfaces, you + can say N here. + + Integrated Disk Electronics (IDE aka ATA-1) is a connecting standard + for mass storage units such as hard disks. It was designed by + Western Digital and Compaq Computer in 1984. It was then named + ST506. Quite a number of disks use the IDE interface. + + AT Attachment (ATA) is the superset of the IDE specifications. + ST506 was also called ATA-1. + + Fast-IDE is ATA-2 (also named Fast ATA), Enhanced IDE (EIDE) is + ATA-3. It provides support for larger disks (up to 8.4GB by means of + the LBA standard), more disks (4 instead of 2) and for other mass + storage units such as tapes and cdrom. UDMA/33 (aka UltraDMA/33) is + ATA-4 and provides faster (and more CPU friendly) transfer modes + than previous PIO (Programmed processor Input/Output) from previous + ATA/IDE standards by means of fast DMA controllers. + + ATA Packet Interface (ATAPI) is a protocol used by EIDE tape and + CD-ROM drives, similar in many respects to the SCSI protocol. + + SMART IDE (Self Monitoring, Analysis and Reporting Technology) was + designed in order to prevent data corruption and disk crash by + detecting pre hardware failure conditions (heat, access time, and + the like...). Disks built since June 1995 may follow this standard. + The kernel itself don't manage this; however there are quite a + number of user programs such as smart that can query the status of + SMART parameters disk. + + If you want to compile this driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read . The module + will be called ide. + + For further information, please read . + + If unsure, say Y. + +source "drivers/ide/Kconfig" + +endmenu + + +menu "SCSI support" + +config SCSI + tristate "SCSI support" + ---help--- + If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or + any other SCSI device under Linux, say Y and make sure that you know + the name of your SCSI host adapter (the card inside your computer + that "speaks" the SCSI protocol, also called SCSI controller), + because you will be asked for it. + + You also need to say Y here if you want support for the parallel + port version of the 100 MB IOMEGA ZIP drive. + + This driver is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called scsi_mod. If you want to compile it as + a module, say M here and read and + . However, do not compile this as a + module if your root file system (the one containing the directory /) + is located on a SCSI device. + +source "drivers/scsi/Kconfig" + +endmenu + +source "drivers/isdn/Kconfig" + +# +# input before char - char/joystick depends on it. As does USB. +# +source "drivers/input/Kconfig" + +source "drivers/char/Kconfig" + +config KBDMOUSE + bool + depends on ARCH_ACORN && BUSMOUSE=y + default y + +source "drivers/media/Kconfig" + +source "fs/Kconfig" + +source "drivers/video/Kconfig" + +menu "Sound" + depends on ARCH_ACORN + +config SOUND + tristate "Sound card support" + ---help--- + If you have a sound card in your computer, i.e. if it can say more + than an occasional beep, say Y. Be sure to have all the information + about your sound card and its configuration down (I/O port, + interrupt and DMA channel), because you will be asked for it. + + You want to read the Sound-HOWTO, available from + . General information about + the modular sound system is contained in the files + . The file + contains some slightly + outdated but still useful information as well. + + If you have a PnP sound card and you want to configure it at boot + time using the ISA PnP tools (read + ), then you need to + compile the sound card support as a module ( = code which can be + inserted in and removed from the running kernel whenever you want) + and load that module after the PnP configuration is finished. To do + this, say M here and read as well + as ; the module will be + called soundcore. + + I'm told that even without a sound card, you can make your computer + say more than an occasional beep, by programming the PC speaker. + Kernel patches and supporting utilities to do that are in the pcsp + package, available at . + +source "sound/Kconfig" + +endmenu + +source "drivers/misc/Kconfig" + +source "drivers/usb/Kconfig" + +source "net/bluetooth/Kconfig" + + +menu "Kernel hacking" + +# RMK wants arm kernels compiled with frame pointers so hardwire this to y. +# If you know what you are doing and are willing to live without stack +# traces, you can get a slightly smaller kernel by setting this option to +# n, but then RMK will have to kill you ;). +config FRAME_POINTER + bool + default y + help + If you say N here, the resulting kernel will be slightly smaller and + faster. However, when a problem occurs with the kernel, the + information that is reported is severely limited. Most people + should say Y here. + +config DEBUG_USER + bool "Verbose user fault messages" + help + When a user program crashes due to an exception, the kernel can + print a brief message explaining what the problem was. This is + sometimes helpful for debugging but serves no purpose on a + production system. Most people should say N here. + +config DEBUG_INFO + bool "Include GDB debugging information in kernel binary" + help + Say Y here to include source-level debugging information in the + `vmlinux' binary image. This is handy if you want to use gdb or + addr2line to debug the kernel. It has no impact on the in-memory + footprint of the running kernel but it can increase the amount of + time and disk space needed for compilation of the kernel. If in + doubt say N. + +config DEBUG_KERNEL + bool "Kernel debugging" + help + Say Y here if you are developing drivers or trying to debug and + identify kernel problems. + +config DEBUG_SLAB + bool "Debug memory allocations" + depends on DEBUG_KERNEL + help + Say Y here to have the kernel do limited verification on memory + allocation as well as poisoning memory on free to catch use of freed + memory. + +config MAGIC_SYSRQ + bool "Magic SysRq key" + depends on DEBUG_KERNEL + help + If you say Y here, you will have some control over the system even + if the system crashes for example during kernel debugging (e.g., you + will be able to flush the buffer cache to disk, reboot the system + immediately or dump some status information). This is accomplished + by pressing various keys while holding SysRq (Alt+PrintScreen). It + also works on a serial console (on PC hardware at least), if you + send a BREAK and then within 5 seconds a command keypress. The + keys are documented in . Don't say Y + unless you really know what this hack does. + +config DEBUG_SPINLOCK + bool "Spinlock debugging" + depends on DEBUG_KERNEL + help + Say Y here and build SMP to catch missing spinlock initialization + and certain other kinds of spinlock errors commonly made. This is + best used in conjunction with the NMI watchdog so that spinlock + deadlocks are also debuggable. + +config DEBUG_WAITQ + bool "Wait queue debugging" + depends on DEBUG_KERNEL + +config DEBUG_BUGVERBOSE + bool "Verbose BUG() reporting (adds 70K)" + depends on DEBUG_KERNEL + help + Say Y here to make BUG() panics output the file name and line number + of the BUG call as well as the EIP and oops trace. This aids + debugging but costs about 70-100K of memory. + +config DEBUG_ERRORS + bool "Verbose kernel error messages" + depends on DEBUG_KERNEL + help + This option controls verbose debugging information which can be + printed when the kernel detects an internal error. This debugging + information is useful to kernel hackers when tracking down problems, + but mostly meaningless to other people. It's safe to say Y unless + you are concerned with the code size or don't want to see these + messages. + +config KALLSYMS + bool "Load all symbols for debugging/kksymoops" + depends on DEBUG_KERNEL + help + Say Y here to let the kernel print out symbolic crash information and + symbolic stack backtraces. This increases the size of the kernel + somewhat, as all symbols have to be loaded into the kernel image. + +# These options are only for real kernel hackers who want to get their hands dirty. +config DEBUG_LL + bool "Kernel low-level debugging functions" + depends on DEBUG_KERNEL + help + Say Y here to include definitions of printascii, printchar, printhex + in the kernel. This is helpful if you are debugging code that + executes before the console is initialized. + +endmenu + +source "security/Kconfig" + +source "crypto/Kconfig" + +source "lib/Kconfig" + diff -urN linux-2.5.70-bk13/arch/arm26/Makefile linux-2.5.70-bk14/arch/arm26/Makefile --- linux-2.5.70-bk13/arch/arm26/Makefile 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/Makefile 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,126 @@ +# +# arch/arm26/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1995-2001 by Russell King + +LDFLAGS_vmlinux :=-p -X +LDFLAGS_BLOB :=--format binary +AFLAGS_vmlinux.lds.o = -DTEXTADDR=$(TEXTADDR) -DDATAADDR=$(DATAADDR) +OBJCOPYFLAGS :=-O binary -R .note -R .comment -S +GZFLAGS :=-9 +#CFLAGS +=-pipe +CFLAGS :=$(CFLAGS:-O2=-Os) + +ifeq ($(CONFIG_FRAME_POINTER),y) +CFLAGS +=-fno-omit-frame-pointer -mno-sched-prolog +endif + +ifeq ($(CONFIG_DEBUG_INFO),y) +CFLAGS +=-g +endif + +# Force -mno-fpu to be passed to the assembler. Some versions of gcc don't +# do this with -msoft-float +CFLAGS_BOOT :=-mapcs-26 -mcpu=arm3 -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm +CFLAGS +=-mapcs-26 -mcpu=arm3 -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm +AFLAGS +=-mapcs-26 -mcpu=arm3 -mno-fpu -msoft-float -Wa,-mno-fpu + +#Default value +DATAADDR := . + +ifeq ($(CONFIG_CPU_26),y) +head-y := arch/arm26/machine/head.o arch/arm26/kernel/init_task.o +LDFLAGS_BLOB += --oformat elf32-littlearm + ifeq ($(CONFIG_ROM_KERNEL),y) + DATAADDR := 0x02080000 + textaddr-y := 0x03800000 + else + textaddr-y := 0x02080000 + endif +endif + +TEXTADDR := $(textaddr-y) +ifeq ($(incdir-y),) +incdir-y := +endif +INCDIR := + +export MACHINE TEXTADDR GZFLAGS CFLAGS_BOOT + +# If we have a machine-specific directory, then include it in the build. +core-y += arch/arm26/kernel/ arch/arm26/mm/ arch/arm26/machine/ +core-$(CONFIG_FPE_NWFPE) += arch/arm26/nwfpe/ + +libs-y += arch/arm26/lib/ + +# Default target when executing plain make +all: zImage + +boot := arch/arm26/boot + +prepare: include/asm-$(ARCH)/asm_offsets.h +CLEAN_FILES += include/asm-$(ARCH)/asm_offsets.h + + +.PHONY: maketools FORCE +maketools: FORCE + + +# Convert bzImage to zImage +bzImage: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/zImage + +zImage Image bootpImage: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +zinstall install: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $@ + +# We use MRPROPER_FILES and CLEAN_FILES now +archclean: + $(Q)$(MAKE) $(clean)=$(boot) + +# My testing targets (that short circuit a few dependencies) +zImg:; $(Q)$(MAKE) $(build)=$(boot) $(boot)/zImage +Img:; $(Q)$(MAKE) $(build)=$(boot) $(boot)/Image +bp:; $(Q)$(MAKE) $(build)=$(boot) $(boot)/bootpImage +i:; $(Q)$(MAKE) $(build)=$(boot) install +zi:; $(Q)$(MAKE) $(build)=$(boot) zinstall + +# +# Configuration targets. Use these to select a +# configuration for your architecture +%_config: + @( \ + CFG=$(@:_config=); \ + if [ -f arch/arm26/def-configs/$$CFG ]; then \ + [ -f .config ] && mv -f .config .config.old; \ + cp arch/arm26/def-configs/$$CFG .config; \ + echo "*** Default configuration for $$CFG installed"; \ + echo "*** Next, you may run 'make oldconfig'"; \ + else \ + echo "$$CFG does not exist"; \ + fi; \ + ) + +arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \ + include/config/MARKER + +include/asm-$(ARCH)/asm_offsets.h: arch/$(ARCH)/kernel/asm-offsets.s + $(call filechk,gen-asm-offsets) + +define archhelp + echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' + echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' + echo ' bootpImage - Combined zImage and initial RAM disk' + echo ' initrd - Create an initial image' + echo ' install - Install uncompressed kernel' + echo ' zinstall - Install compressed kernel' + echo ' Install using (your) ~/bin/installkernel or' + echo ' (distribution) /sbin/installkernel or' + echo ' install to $$(INSTALL_PATH) and run lilo' +endef diff -urN linux-2.5.70-bk13/arch/arm26/boot/Makefile linux-2.5.70-bk14/arch/arm26/boot/Makefile --- linux-2.5.70-bk13/arch/arm26/boot/Makefile 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/boot/Makefile 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,69 @@ +# +# arch/arm/boot/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1995-2002 Russell King +# + +# Note: the following conditions must always be true: +# ZRELADDR == virt_to_phys(TEXTADDR) +# PARAMS_PHYS must be with 4MB of ZRELADDR +# INITRD_PHYS must be in RAM + + zreladdr-y := 0x02080000 +params_phys-y := 0x0207c000 +initrd_phys-y := 0x02180000 + +ZRELADDR := 0x02080000 +ZTEXTADDR := 0x0207c000 +PARAMS_PHYS := $(params_phys-y) +INITRD_PHYS := 0x02180000 + +# We now have a PIC decompressor implementation. Decompressors running +# from RAM should not define ZTEXTADDR. Decompressors running directly +# from ROM or Flash must define ZTEXTADDR (preferably via the config) +# FIXME: Previous assignment to ztextaddr-y is lost here. See SHARK +ifeq ($(CONFIG_ZBOOT_ROM),y) +ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT) +ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) +else +ZTEXTADDR := 0 +ZBSSADDR := ALIGN(4) +endif + +export ZTEXTADDR ZBSSADDR ZRELADDR INITRD_PHYS PARAMS_PHYS + +targets := Image zImage bootpImage + +$(obj)/Image: vmlinux FORCE + $(call if_changed,objcopy) + @echo ' Kernel: $@ is ready' + +$(obj)/zImage: $(obj)/compressed/vmlinux FORCE + $(call if_changed,objcopy) + @echo ' Kernel: $@ is ready' + +$(obj)/compressed/vmlinux: vmlinux FORCE + $(Q)$(MAKE) $(build)=$(obj)/compressed $@ + +.PHONY: initrd +initrd: + @test "$(INITRD_PHYS)" != "" || \ + (echo This machine does not support INITRD; exit -1) + @test "$(INITRD)" != "" || \ + (echo You must specify INITRD; exit -1) + +install: $(obj)/Image + $(CONFIG_SHELL) $(obj)/install.sh \ + $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) \ + $(obj)/Image System.map "$(INSTALL_PATH)" + +zinstall: $(obj)/zImage + $(CONFIG_SHELL) $(obj)/install.sh \ + $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) \ + $(obj)/zImage System.map "$(INSTALL_PATH)" + +subdir- := compressed diff -urN linux-2.5.70-bk13/arch/arm26/boot/compressed/Makefile linux-2.5.70-bk14/arch/arm26/boot/compressed/Makefile --- linux-2.5.70-bk13/arch/arm26/boot/compressed/Makefile 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/boot/compressed/Makefile 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,50 @@ +# +# linux/arch/arm26/boot/compressed/Makefile +# +# create a compressed vmlinuz image from the original vmlinux +# +# Note! ZTEXTADDR, ZBSSADDR and ZRELADDR are now exported +# from arch/arm26/boot/Makefile +# + +HEAD = head.o +OBJS = misc.o +FONTC = drivers/video/console/font_acorn_8x8.c + +OBJS += ll_char_wr.o font.o +CFLAGS_misc.o := -DPARAMS_PHYS=$(PARAMS_PHYS) + +targets := vmlinux vmlinux.lds piggy piggy.gz piggy.o font.o head.o $(OBJS) + +SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/LOAD_ADDR/$(ZRELADDR)/;s/BSS_START/$(ZBSSADDR)/ + +EXTRA_CFLAGS := $(CFLAGS_BOOT) -fpic +EXTRA_AFLAGS := -traditional + +LDFLAGS_vmlinux := -p -X \ + $(shell $(CC) $(CFLAGS)) -T + +$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.o \ + $(addprefix $(obj)/, $(OBJS)) FORCE + $(call if_changed,ld) + @: + + +$(obj)/piggy: vmlinux FORCE + $(call if_changed,objcopy) + +$(obj)/piggy.gz: $(obj)/piggy FORCE + $(call if_changed,gzip) + +LDFLAGS_piggy.o := -r -b binary +$(obj)/piggy.o: $(obj)/piggy.gz FORCE + $(call if_changed,ld) + +$(obj)/font.o: $(FONTC) + $(CC) $(CFLAGS) -Dstatic= -c $(FONTC) -o $(obj)/font.o + +$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in Makefile arch/arm26/boot/Makefile .config + @sed "$(SEDFLAGS)" < $< > $@ + +$(obj)/misc.o: $(obj)/misc.c $(obj)/uncompress.h lib/inflate.c + diff -urN linux-2.5.70-bk13/arch/arm26/boot/compressed/head.S linux-2.5.70-bk14/arch/arm26/boot/compressed/head.S --- linux-2.5.70-bk13/arch/arm26/boot/compressed/head.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/boot/compressed/head.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,517 @@ +/* + * linux/arch/arm/boot/compressed/head.S + * + * Copyright (C) 1996-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + +/* + * Debugging stuff + * + * Note that these macros must not contain any code which is not + * 100% relocatable. Any attempt to do so will result in a crash. + * Please select one of the following when turning on debugging. + */ + + .macro kputc,val + mov r0, \val + bl putc + .endm + + .macro kphex,val,len + mov r0, \val + mov r1, #\len + bl phex + .endm + + .macro debug_reloc_start + .endm + + .macro debug_reloc_end + .endm + + .section ".start", #alloc, #execinstr +/* + * sort out different calling conventions + */ + .align +start: + .type start,#function + .rept 8 + mov r0, r0 + .endr + + b 1f + .word 0x016f2818 @ Magic numbers to help the loader + .word start @ absolute load/run zImage address + .word _edata @ zImage end address +1: mov r7, r1 @ save architecture ID + mov r8, #0 @ save r0 + teqp pc, #0x0c000003 @ turn off interrupts + + .text + adr r0, LC0 + ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp} + subs r0, r0, r1 @ calculate the delta offset + + teq r0, #0 @ if delta is zero, we're + beq not_relocated @ running at the address we + @ were linked at. + + add r2, r2, r0 @ different address, so we + add r3, r3, r0 @ need to fix up various + add r5, r5, r0 @ pointers. + add r6, r6, r0 + add ip, ip, r0 + add sp, sp, r0 + +1: ldr r1, [r6, #0] @ relocate entries in the GOT + add r1, r1, r0 @ table. This fixes up the + str r1, [r6], #4 @ C references. + cmp r6, ip + blo 1b + +not_relocated: mov r0, #0 +1: str r0, [r2], #4 @ clear bss + str r0, [r2], #4 + str r0, [r2], #4 + str r0, [r2], #4 + cmp r2, r3 + blo 1b + + bl cache_on + + mov r1, sp @ malloc space above stack + add r2, sp, #0x10000 @ 64k max + +/* + * Check to see if we will overwrite ourselves. + * r4 = final kernel address + * r5 = start of this image + * r2 = end of malloc space (and therefore this image) + * We basically want: + * r4 >= r2 -> OK + * r4 + image length <= r5 -> OK + */ + cmp r4, r2 + bhs wont_overwrite + add r0, r4, #4096*1024 @ 4MB largest kernel size + cmp r0, r5 + bls wont_overwrite + + mov r5, r2 @ decompress after malloc space + mov r0, r5 + mov r3, r7 + bl decompress_kernel + + add r0, r0, #127 + bic r0, r0, #127 @ align the kernel length +/* + * r0 = decompressed kernel length + * r1-r3 = unused + * r4 = kernel execution address + * r5 = decompressed kernel start + * r6 = processor ID + * r7 = architecture ID + * r8-r14 = unused + */ + add r1, r5, r0 @ end of decompressed kernel + adr r2, reloc_start + ldr r3, LC1 + add r3, r2, r3 +1: ldmia r2!, {r8 - r13} @ copy relocation code + stmia r1!, {r8 - r13} + ldmia r2!, {r8 - r13} + stmia r1!, {r8 - r13} + cmp r2, r3 + blo 1b + + bl cache_clean_flush + add pc, r5, r0 @ call relocation code + +/* + * We're not in danger of overwriting ourselves. Do this the simple way. + * + * r4 = kernel execution address + * r7 = architecture ID + */ +wont_overwrite: mov r0, r4 + mov r3, r7 + bl decompress_kernel + b call_kernel + + .type LC0, #object +LC0: .word LC0 @ r1 + .word __bss_start @ r2 + .word _end @ r3 + .word _load_addr @ r4 + .word _start @ r5 + .word _got_start @ r6 + .word _got_end @ ip + .word user_stack+4096 @ sp +LC1: .word reloc_end - reloc_start + .size LC0, . - LC0 + +/* + * Turn on the cache. We need to setup some page tables so that we + * can have both the I and D caches on. + * + * We place the page tables 16k down from the kernel execution address, + * and we hope that nothing else is using it. If we're using it, we + * will go pop! + * + * On entry, + * r4 = kernel execution address + * r6 = processor ID + * r7 = architecture number + * r8 = run-time address of "start" + * On exit, + * r1, r2, r3, r8, r9, r12 corrupted + * This routine must preserve: + * r4, r5, r6, r7 + */ + .align 5 +cache_on: mov r3, #8 @ cache_on function + b call_cache_fn + +__setup_mmu: sub r3, r4, #16384 @ Page directory size + bic r3, r3, #0xff @ Align the pointer + bic r3, r3, #0x3f00 +/* + * Initialise the page tables, turning on the cacheable and bufferable + * bits for the RAM area only. + */ + mov r0, r3 + mov r8, r0, lsr #18 + mov r8, r8, lsl #18 @ start of RAM + add r9, r8, #0x10000000 @ a reasonable RAM size + mov r1, #0x12 + orr r1, r1, #3 << 10 + add r2, r3, #16384 +1: cmp r1, r8 @ if virt > start of RAM + orrhs r1, r1, #0x0c @ set cacheable, bufferable + cmp r1, r9 @ if virt > end of RAM + bichs r1, r1, #0x0c @ clear cacheable, bufferable + str r1, [r0], #4 @ 1:1 mapping + add r1, r1, #1048576 + teq r0, r2 + bne 1b +/* + * If ever we are running from Flash, then we surely want the cache + * to be enabled also for our execution instance... We map 2MB of it + * so there is no map overlap problem for up to 1 MB compressed kernel. + * If the execution is in RAM then we would only be duplicating the above. + */ + mov r1, #0x1e + orr r1, r1, #3 << 10 + mov r2, pc, lsr #20 + orr r1, r1, r2, lsl #20 + add r0, r3, r2, lsl #2 + str r1, [r0], #4 + add r1, r1, #1048576 + str r1, [r0] + mov pc, lr + +__armv4_cache_on: + mov r12, lr + bl __setup_mmu + mov r0, #0 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer + mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs + mrc p15, 0, r0, c1, c0, 0 @ read control reg + orr r0, r0, #0x1000 @ I-cache enable + orr r0, r0, #0x0030 + b __common_cache_on + +__arm6_cache_on: + mov r12, lr + bl __setup_mmu + mov r0, #0 + mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 + mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 + mov r0, #0x30 +__common_cache_on: +#ifndef DEBUG + orr r0, r0, #0x000d @ Write buffer, mmu +#endif + mov r1, #-1 + mcr p15, 0, r3, c2, c0, 0 @ load page table pointer + mcr p15, 0, r1, c3, c0, 0 @ load domain access control + mcr p15, 0, r0, c1, c0, 0 @ load control register + mov pc, r12 + +/* + * All code following this line is relocatable. It is relocated by + * the above code to the end of the decompressed kernel image and + * executed there. During this time, we have no stacks. + * + * r0 = decompressed kernel length + * r1-r3 = unused + * r4 = kernel execution address + * r5 = decompressed kernel start + * r6 = processor ID + * r7 = architecture ID + * r8-r14 = unused + */ + .align 5 +reloc_start: add r8, r5, r0 + debug_reloc_start + mov r1, r4 +1: + .rept 4 + ldmia r5!, {r0, r2, r3, r9 - r13} @ relocate kernel + stmia r1!, {r0, r2, r3, r9 - r13} + .endr + + cmp r5, r8 + blo 1b + debug_reloc_end + +call_kernel: bl cache_clean_flush + bl cache_off + mov r0, #0 + mov r1, r7 @ restore architecture number + mov pc, r4 @ call kernel + +/* + * Here follow the relocatable cache support functions for the + * various processors. This is a generic hook for locating an + * entry and jumping to an instruction at the specified offset + * from the start of the block. Please note this is all position + * independent code. + * + * r1 = corrupted + * r2 = corrupted + * r3 = block offset + * r6 = corrupted + * r12 = corrupted + */ + +call_cache_fn: adr r12, proc_types + mrc p15, 0, r6, c0, c0 @ get processor ID +1: ldr r1, [r12, #0] @ get value + ldr r2, [r12, #4] @ get mask + eor r1, r1, r6 @ (real ^ match) + tst r1, r2 @ & mask + addeq pc, r12, r3 @ call cache function + add r12, r12, #4*5 + b 1b + +/* + * Table for cache operations. This is basically: + * - CPU ID match + * - CPU ID mask + * - 'cache on' method instruction + * - 'cache off' method instruction + * - 'cache flush' method instruction + * + * We match an entry using: ((real_id ^ match) & mask) == 0 + * + * Writethrough caches generally only need 'on' and 'off' + * methods. Writeback caches _must_ have the flush method + * defined. + */ + .type proc_types,#object +proc_types: + .word 0x41560600 @ ARM6/610 + .word 0xffffffe0 + b __arm6_cache_off @ works, but slow + b __arm6_cache_off + mov pc, lr +@ b __arm6_cache_on @ untested +@ b __arm6_cache_off +@ b __armv3_cache_flush + + .word 0x41007000 @ ARM7/710 + .word 0xfff8fe00 + b __arm7_cache_off + b __arm7_cache_off + mov pc, lr + + .word 0x41807200 @ ARM720T (writethrough) + .word 0xffffff00 + b __armv4_cache_on + b __armv4_cache_off + mov pc, lr + + .word 0x41129200 @ ARM920T + .word 0xff00fff0 + b __armv4_cache_on + b __armv4_cache_off + b __armv4_cache_flush + + .word 0x4401a100 @ sa110 / sa1100 + .word 0xffffffe0 + b __armv4_cache_on + b __armv4_cache_off + b __armv4_cache_flush + + .word 0x6901b110 @ sa1110 + .word 0xfffffff0 + b __armv4_cache_on + b __armv4_cache_off + b __armv4_cache_flush + + .word 0x69050000 @ xscale + .word 0xffff0000 + b __armv4_cache_on + b __armv4_cache_off + b __armv4_cache_flush + + .word 0 @ unrecognised type + .word 0 + mov pc, lr + mov pc, lr + mov pc, lr + + .size proc_types, . - proc_types + +/* + * Turn off the Cache and MMU. ARMv3 does not support + * reading the control register, but ARMv4 does. + * + * On entry, r6 = processor ID + * On exit, r0, r1, r2, r3, r12 corrupted + * This routine must preserve: r4, r6, r7 + */ + .align 5 +cache_off: mov r3, #12 @ cache_off function + b call_cache_fn + +__armv4_cache_off: + mrc p15, 0, r0, c1, c0 + bic r0, r0, #0x000d + mcr p15, 0, r0, c1, c0 @ turn MMU and cache off + mov r0, #0 + mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 + mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 + mov pc, lr + +__arm6_cache_off: + mov r0, #0x00000030 @ ARM6 control reg. + b __armv3_cache_off + +__arm7_cache_off: + mov r0, #0x00000070 @ ARM7 control reg. + b __armv3_cache_off + +__armv3_cache_off: + mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off + mov r0, #0 + mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 + mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 + mov pc, lr + +/* + * Clean and flush the cache to maintain consistency. + * + * On entry, + * r6 = processor ID + * On exit, + * r1, r2, r3, r12 corrupted + * This routine must preserve: + * r0, r4, r5, r6, r7 + */ + .align 5 +cache_clean_flush: + mov r3, #16 + b call_cache_fn + +__armv4_cache_flush: + bic r1, pc, #31 + add r2, r1, #65536 @ 2x the largest dcache size +1: ldr r12, [r1], #32 @ s/w flush D cache + teq r1, r2 + bne 1b + + mcr p15, 0, r1, c7, c7, 0 @ flush I cache + mcr p15, 0, r1, c7, c10, 4 @ drain WB + mov pc, lr + +__armv3_cache_flush: + mov r1, #0 + mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 + mov pc, lr + +/* + * Various debugging routines for printing hex characters and + * memory, which again must be relocatable. + */ +#ifdef DEBUG + .type phexbuf,#object +phexbuf: .space 12 + .size phexbuf, . - phexbuf + +phex: adr r3, phexbuf + mov r2, #0 + strb r2, [r3, r1] +1: subs r1, r1, #1 + movmi r0, r3 + bmi puts + and r2, r0, #15 + mov r0, r0, lsr #4 + cmp r2, #10 + addge r2, r2, #7 + add r2, r2, #'0' + strb r2, [r3, r1] + b 1b + +puts: loadsp r3 +1: ldrb r2, [r0], #1 + teq r2, #0 + moveq pc, lr +2: writeb r2 + mov r1, #0x00020000 +3: subs r1, r1, #1 + bne 3b + teq r2, #'\n' + moveq r2, #'\r' + beq 2b + teq r0, #0 + bne 1b + mov pc, lr +putc: + mov r2, r0 + mov r0, #0 + loadsp r3 + b 2b + +memdump: mov r12, r0 + mov r10, lr + mov r11, #0 +2: mov r0, r11, lsl #2 + add r0, r0, r12 + mov r1, #8 + bl phex + mov r0, #':' + bl putc +1: mov r0, #' ' + bl putc + ldr r0, [r12, r11, lsl #2] + mov r1, #8 + bl phex + and r0, r11, #7 + teq r0, #3 + moveq r0, #' ' + bleq putc + and r0, r11, #7 + add r11, r11, #1 + teq r0, #7 + bne 1b + mov r0, #'\n' + bl putc + cmp r11, #64 + blt 2b + mov pc, r10 +#endif + +reloc_end: + + .align + .section ".stack", "aw" +user_stack: .space 4096 diff -urN linux-2.5.70-bk13/arch/arm26/boot/compressed/hw-bse.c linux-2.5.70-bk14/arch/arm26/boot/compressed/hw-bse.c --- linux-2.5.70-bk13/arch/arm26/boot/compressed/hw-bse.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/boot/compressed/hw-bse.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,74 @@ +/* + * Bright Star Engineering Inc. + * + * code for readng parameters from the + * parameter blocks of the boot block + * flash memory + * + */ + +static int strcmp(const char *s1, const char *s2) +{ + while (*s1 != '\0' && *s1 == *s2) + { + s1++; + s2++; + } + + return (*(unsigned char *) s1) - (*(unsigned char *) s2); +} + +struct pblk_t { + char type; + unsigned short size; +}; + +static char *bse_getflashparam(char *name) { + unsigned int esize; + char *q,*r; + unsigned char *p,*e; + struct pblk_t *thepb = (struct pblk_t *) 0x00004000; + struct pblk_t *altpb = (struct pblk_t *) 0x00006000; + if (thepb->type&1) { + if (altpb->type&1) { + /* no valid param block */ + return (char*)0; + } else { + /* altpb is valid */ + struct pblk_t *tmp; + tmp = thepb; + thepb = altpb; + altpb = tmp; + } + } + p = (char*)thepb + sizeof(struct pblk_t); + e = p + thepb->size; + while (p < e) { + q = p; + esize = *p; + if (esize == 0xFF) break; + if (esize == 0) break; + if (esize > 127) { + esize = (esize&0x7F)<<8 | p[1]; + q++; + } + q++; + r=q; + if (*r && ((name == 0) || (!strcmp(name,r)))) { + while (*q++) ; + return q; + } + p+=esize; + } + return (char*)0; +} + +void bse_setup(void) { + /* extract the linux cmdline from flash */ + char *name=bse_getflashparam("linuxboot"); + char *x = (char *)0xc0000100; + if (name) { + while (*name) *x++=*name++; + } + *x=0; +} diff -urN linux-2.5.70-bk13/arch/arm26/boot/compressed/ll_char_wr.S linux-2.5.70-bk14/arch/arm26/boot/compressed/ll_char_wr.S --- linux-2.5.70-bk13/arch/arm26/boot/compressed/ll_char_wr.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/boot/compressed/ll_char_wr.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,162 @@ +/* + * linux/arch/arm/lib/ll_char_wr.S + * + * Copyright (C) 1995, 1996 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Speedups & 1bpp code (C) 1996 Philip Blundell & Russell King. + * + * 10-04-96 RMK Various cleanups & reduced register usage. + * 08-04-98 RMK Shifts re-ordered + */ + +@ Regs: [] = corruptible +@ {} = used +@ () = do not use + +#include +#include + .text + +#define BOLD 0x01 +#define ITALIC 0x02 +#define UNDERLINE 0x04 +#define FLASH 0x08 +#define INVERSE 0x10 + +LC0: .word bytes_per_char_h + .word video_size_row + .word acorndata_8x8 + .word con_charconvtable + +ENTRY(ll_write_char) + stmfd sp!, {r4 - r7, lr} +@ +@ Smashable regs: {r0 - r3}, [r4 - r7], (r8 - fp), [ip], (sp), [lr], (pc) +@ + eor ip, r1, #UNDERLINE << 9 +/* + * calculate colours + */ + tst r1, #INVERSE << 9 + moveq r2, r1, lsr #16 + moveq r3, r1, lsr #24 + movne r2, r1, lsr #24 + movne r3, r1, lsr #16 + and r3, r3, #255 + and r2, r2, #255 +/* + * calculate offset into character table + */ + mov r1, r1, lsl #23 + mov r1, r1, lsr #20 +/* + * calculate offset required for each row [maybe I should make this an argument to this fn. + * Have to see what the register usage is like in the calling routines. + */ + adr r4, LC0 + ldmia r4, {r4, r5, r6, lr} + ldr r4, [r4] + ldr r5, [r5] +/* + * Go to resolution-dependent routine... + */ + cmp r4, #4 + blt Lrow1bpp + eor r2, r3, r2 @ Create eor mask to change colour from bg + orr r3, r3, r3, lsl #8 @ to fg. + orr r3, r3, r3, lsl #16 + add r0, r0, r5, lsl #3 @ Move to bottom of character + add r1, r1, #7 + ldrb r7, [r6, r1] + tst ip, #UNDERLINE << 9 + eoreq r7, r7, #255 + teq r4, #8 + beq Lrow8bpplp +@ +@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc) +@ + orr r3, r3, r3, lsl #4 +Lrow4bpplp: ldr r7, [lr, r7, lsl #2] + mul r7, r2, r7 + tst r1, #7 @ avoid using r7 directly after + eor ip, r3, r7 + str ip, [r0, -r5]! + LOADREGS(eqfd, sp!, {r4 - r7, pc}) + sub r1, r1, #1 + ldrb r7, [r6, r1] + ldr r7, [lr, r7, lsl #2] + mul r7, r2, r7 + tst r1, #7 @ avoid using r7 directly after + eor ip, r3, r7 + str ip, [r0, -r5]! + subne r1, r1, #1 + ldrneb r7, [r6, r1] + bne Lrow4bpplp + LOADREGS(fd, sp!, {r4 - r7, pc}) + +@ +@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc) +@ +Lrow8bpplp: mov ip, r7, lsr #4 + ldr ip, [lr, ip, lsl #2] + mul r4, r2, ip + and ip, r7, #15 @ avoid r4 + ldr ip, [lr, ip, lsl #2] @ avoid r4 + mul ip, r2, ip @ avoid r4 + eor r4, r3, r4 @ avoid ip + tst r1, #7 @ avoid ip + sub r0, r0, r5 @ avoid ip + eor ip, r3, ip + stmia r0, {r4, ip} + LOADREGS(eqfd, sp!, {r4 - r7, pc}) + sub r1, r1, #1 + ldrb r7, [r6, r1] + mov ip, r7, lsr #4 + ldr ip, [lr, ip, lsl #2] + mul r4, r2, ip + and ip, r7, #15 @ avoid r4 + ldr ip, [lr, ip, lsl #2] @ avoid r4 + mul ip, r2, ip @ avoid r4 + eor r4, r3, r4 @ avoid ip + tst r1, #7 @ avoid ip + sub r0, r0, r5 @ avoid ip + eor ip, r3, ip + stmia r0, {r4, ip} + subne r1, r1, #1 + ldrneb r7, [r6, r1] + bne Lrow8bpplp + LOADREGS(fd, sp!, {r4 - r7, pc}) + +@ +@ Smashable regs: {r0 - r3}, [r4], {r5, r6}, [r7], (r8 - fp), [ip], (sp), [lr], (pc) +@ +Lrow1bpp: add r6, r6, r1 + ldmia r6, {r4, r7} + tst ip, #INVERSE << 9 + mvnne r4, r4 + mvnne r7, r7 + strb r4, [r0], r5 + mov r4, r4, lsr #8 + strb r4, [r0], r5 + mov r4, r4, lsr #8 + strb r4, [r0], r5 + mov r4, r4, lsr #8 + strb r4, [r0], r5 + strb r7, [r0], r5 + mov r7, r7, lsr #8 + strb r7, [r0], r5 + mov r7, r7, lsr #8 + strb r7, [r0], r5 + mov r7, r7, lsr #8 + tst ip, #UNDERLINE << 9 + mvneq r7, r7 + strb r7, [r0], r5 + LOADREGS(fd, sp!, {r4 - r7, pc}) + + .bss +ENTRY(con_charconvtable) + .space 1024 diff -urN linux-2.5.70-bk13/arch/arm26/boot/compressed/misc.c linux-2.5.70-bk14/arch/arm26/boot/compressed/misc.c --- linux-2.5.70-bk13/arch/arm26/boot/compressed/misc.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/boot/compressed/misc.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,316 @@ +/* + * misc.c + * + * This is a collection of several routines from gzip-1.0.3 + * adapted for Linux. + * + * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 + * + * Modified for ARM Linux by Russell King + * + * Nicolas Pitre 1999/04/14 : + * For this code to run directly from Flash, all constant variables must + * be marked with 'const' and all other variables initialized at run-time + * only. This way all non constant variables will end up in the bss segment, + * which should point to addresses in RAM and cleared to 0 on start. + * This allows for a much quicker boot time. + */ + +unsigned int __machine_arch_type; + +#include + +#include +#include "uncompress.h" + +#ifdef STANDALONE_DEBUG +#define puts printf +#endif + +#define __ptr_t void * + +/* + * Optimised C version of memzero for the ARM. + */ +void __memzero (__ptr_t s, size_t n) +{ + union { void *vp; unsigned long *ulp; unsigned char *ucp; } u; + int i; + + u.vp = s; + + for (i = n >> 5; i > 0; i--) { + *u.ulp++ = 0; + *u.ulp++ = 0; + *u.ulp++ = 0; + *u.ulp++ = 0; + *u.ulp++ = 0; + *u.ulp++ = 0; + *u.ulp++ = 0; + *u.ulp++ = 0; + } + + if (n & 1 << 4) { + *u.ulp++ = 0; + *u.ulp++ = 0; + *u.ulp++ = 0; + *u.ulp++ = 0; + } + + if (n & 1 << 3) { + *u.ulp++ = 0; + *u.ulp++ = 0; + } + + if (n & 1 << 2) + *u.ulp++ = 0; + + if (n & 1 << 1) { + *u.ucp++ = 0; + *u.ucp++ = 0; + } + + if (n & 1) + *u.ucp++ = 0; +} + +static inline __ptr_t memcpy(__ptr_t __dest, __const __ptr_t __src, + size_t __n) +{ + int i = 0; + unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src; + + for (i = __n >> 3; i > 0; i--) { + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + } + + if (__n & 1 << 2) { + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + } + + if (__n & 1 << 1) { + *d++ = *s++; + *d++ = *s++; + } + + if (__n & 1) + *d++ = *s++; + + return __dest; +} + +/* + * gzip delarations + */ +#define OF(args) args +#define STATIC static + +typedef unsigned char uch; +typedef unsigned short ush; +typedef unsigned long ulg; + +#define WSIZE 0x8000 /* Window size must be at least 32k, */ + /* and a power of two */ + +static uch *inbuf; /* input buffer */ +static uch window[WSIZE]; /* Sliding window buffer */ + +static unsigned insize; /* valid bytes in inbuf */ +static unsigned inptr; /* index of next byte to be processed in inbuf */ +static unsigned outcnt; /* bytes in output buffer */ + +/* gzip flag byte */ +#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */ +#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ +#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ +#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ +#define COMMENT 0x10 /* bit 4 set: file comment present */ +#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ +#define RESERVED 0xC0 /* bit 6,7: reserved */ + +#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) + +/* Diagnostic functions */ +#ifdef DEBUG +# define Assert(cond,msg) {if(!(cond)) error(msg);} +# define Trace(x) fprintf x +# define Tracev(x) {if (verbose) fprintf x ;} +# define Tracevv(x) {if (verbose>1) fprintf x ;} +# define Tracec(c,x) {if (verbose && (c)) fprintf x ;} +# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} +#else +# define Assert(cond,msg) +# define Trace(x) +# define Tracev(x) +# define Tracevv(x) +# define Tracec(c,x) +# define Tracecv(c,x) +#endif + +static int fill_inbuf(void); +static void flush_window(void); +static void error(char *m); +static void gzip_mark(void **); +static void gzip_release(void **); + +extern char input_data[]; +extern char input_data_end[]; + +static uch *output_data; +static ulg output_ptr; +static ulg bytes_out; + +static void *malloc(int size); +static void free(void *where); +static void error(char *m); +static void gzip_mark(void **); +static void gzip_release(void **); + +static void puts(const char *); + +extern int end; +static ulg free_mem_ptr; +static ulg free_mem_ptr_end; + +#define HEAP_SIZE 0x2000 + +#include "../../../../lib/inflate.c" + +#ifndef STANDALONE_DEBUG +static void *malloc(int size) +{ + void *p; + + if (size <0) error("Malloc error\n"); + if (free_mem_ptr <= 0) error("Memory error\n"); + + free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ + + p = (void *)free_mem_ptr; + free_mem_ptr += size; + + if (free_mem_ptr >= free_mem_ptr_end) + error("Out of memory"); + return p; +} + +static void free(void *where) +{ /* gzip_mark & gzip_release do the free */ +} + +static void gzip_mark(void **ptr) +{ + arch_decomp_wdog(); + *ptr = (void *) free_mem_ptr; +} + +static void gzip_release(void **ptr) +{ + arch_decomp_wdog(); + free_mem_ptr = (long) *ptr; +} +#else +static void gzip_mark(void **ptr) +{ +} + +static void gzip_release(void **ptr) +{ +} +#endif + +/* =========================================================================== + * Fill the input buffer. This is called only when the buffer is empty + * and at least one byte is really needed. + */ +int fill_inbuf(void) +{ + if (insize != 0) + error("ran out of input data\n"); + + inbuf = input_data; + insize = &input_data_end[0] - &input_data[0]; + + inptr = 1; + return inbuf[0]; +} + +/* =========================================================================== + * Write the output window window[0..outcnt-1] and update crc and bytes_out. + * (Used for the decompressed data only.) + */ +void flush_window(void) +{ + ulg c = crc; + unsigned n; + uch *in, *out, ch; + + in = window; + out = &output_data[output_ptr]; + for (n = 0; n < outcnt; n++) { + ch = *out++ = *in++; + c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); + } + crc = c; + bytes_out += (ulg)outcnt; + output_ptr += (ulg)outcnt; + outcnt = 0; + puts("."); +} + +static void error(char *x) +{ + int ptr; + + puts("\n\n"); + puts(x); + puts("\n\n -- System halted"); + + while(1); /* Halt */ +} + +#ifndef STANDALONE_DEBUG + +ulg +decompress_kernel(ulg output_start, ulg free_mem_ptr_p, ulg free_mem_ptr_end_p, + int arch_id) +{ + output_data = (uch *)output_start; /* Points to kernel start */ + free_mem_ptr = free_mem_ptr_p; + free_mem_ptr_end = free_mem_ptr_end_p; + __machine_arch_type = arch_id; + + arch_decomp_setup(); + + makecrc(); + puts("Uncompressing Linux..."); + gunzip(); + puts(" done, booting the kernel.\n"); + return output_ptr; +} +#else + +char output_buffer[1500*1024]; + +int main() +{ + output_data = output_buffer; + + makecrc(); + puts("Uncompressing Linux..."); + gunzip(); + puts("done.\n"); + return 0; +} +#endif + diff -urN linux-2.5.70-bk13/arch/arm26/boot/compressed/ofw-shark.c linux-2.5.70-bk14/arch/arm26/boot/compressed/ofw-shark.c --- linux-2.5.70-bk13/arch/arm26/boot/compressed/ofw-shark.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/boot/compressed/ofw-shark.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,258 @@ +/* + * linux/arch/arm/boot/compressed/ofw-shark.c + * + * by Alexander Schulz + * + * This file is used to get some basic information + * about the memory layout of the shark we are running + * on. Memory is usually divided in blocks a 8 MB. + * And bootargs are copied from OpenFirmware. + */ + + +#include +#include +#include +#include + + +asmlinkage void +create_params (unsigned long *buffer) +{ + /* Is there a better address? Also change in mach-shark/core.c */ + struct tag *tag = (struct tag *) 0x08003000; + int j,i,m,k,nr_banks,size; + unsigned char *c; + + /* Head of the taglist */ + tag->hdr.tag = ATAG_CORE; + tag->hdr.size = tag_size(tag_core); + tag->u.core.flags = FLAG_READONLY; + tag->u.core.pagesize = PAGE_SIZE; + tag->u.core.rootdev = 0; + + /* Build up one tagged block for each memory region */ + size=0; + nr_banks=(unsigned int) buffer[0]; + for (j=0;jhdr.tag = ATAG_MEM; + tag->hdr.size = tag_size(tag_mem32); + tag->u.mem.size = buffer[2*k+2]; + tag->u.mem.start = buffer[2*k+1]; + + size += buffer[2*k+2]; + + buffer[2*k+1]=0xffffffff; /* mark as copied */ + } + + /* The command line */ + tag = tag_next(tag); + tag->hdr.tag = ATAG_CMDLINE; + + c=(unsigned char *)(&buffer[34]); + j=0; + while (*c) tag->u.cmdline.cmdline[j++]=*c++; + + tag->u.cmdline.cmdline[j]=0; + tag->hdr.size = (j + 7 + sizeof(struct tag_header)) >> 2; + + /* Hardware revision */ + tag = tag_next(tag); + tag->hdr.tag = ATAG_REVISION; + tag->hdr.size = tag_size(tag_revision); + tag->u.revision.rev = ((unsigned char) buffer[33])-'0'; + + /* End of the taglist */ + tag = tag_next(tag); + tag->hdr.tag = 0; + tag->hdr.size = 0; +} + + +typedef int (*ofw_handle_t)(void *); + +/* Everything below is called with a wrong MMU setting. + * This means: no string constants, no initialization of + * arrays, no global variables! This is ugly but I didn't + * want to write this in assembler :-) + */ + +int +of_decode_int(const unsigned char *p) +{ + unsigned int i = *p++ << 8; + i = (i + *p++) << 8; + i = (i + *p++) << 8; + return (i + *p); +} + +int +OF_finddevice(ofw_handle_t openfirmware, char *name) +{ + unsigned int args[8]; + char service[12]; + + service[0]='f'; + service[1]='i'; + service[2]='n'; + service[3]='d'; + service[4]='d'; + service[5]='e'; + service[6]='v'; + service[7]='i'; + service[8]='c'; + service[9]='e'; + service[10]='\0'; + + args[0]=(unsigned int)service; + args[1]=1; + args[2]=1; + args[3]=(unsigned int)name; + + if (openfirmware(args) == -1) + return -1; + return args[4]; +} + +int +OF_getproplen(ofw_handle_t openfirmware, int handle, char *prop) +{ + unsigned int args[8]; + char service[12]; + + service[0]='g'; + service[1]='e'; + service[2]='t'; + service[3]='p'; + service[4]='r'; + service[5]='o'; + service[6]='p'; + service[7]='l'; + service[8]='e'; + service[9]='n'; + service[10]='\0'; + + args[0] = (unsigned int)service; + args[1] = 2; + args[2] = 1; + args[3] = (unsigned int)handle; + args[4] = (unsigned int)prop; + + if (openfirmware(args) == -1) + return -1; + return args[5]; +} + +int +OF_getprop(ofw_handle_t openfirmware, int handle, char *prop, void *buf, unsigned int buflen) +{ + unsigned int args[8]; + char service[8]; + + service[0]='g'; + service[1]='e'; + service[2]='t'; + service[3]='p'; + service[4]='r'; + service[5]='o'; + service[6]='p'; + service[7]='\0'; + + args[0] = (unsigned int)service; + args[1] = 4; + args[2] = 1; + args[3] = (unsigned int)handle; + args[4] = (unsigned int)prop; + args[5] = (unsigned int)buf; + args[6] = buflen; + + if (openfirmware(args) == -1) + return -1; + return args[7]; +} + +asmlinkage void ofw_init(ofw_handle_t o, int *nomr, int *pointer) +{ + int phandle,i,mem_len,buffer[32]; + char temp[15]; + + temp[0]='/'; + temp[1]='m'; + temp[2]='e'; + temp[3]='m'; + temp[4]='o'; + temp[5]='r'; + temp[6]='y'; + temp[7]='\0'; + + phandle=OF_finddevice(o,temp); + + temp[0]='r'; + temp[1]='e'; + temp[2]='g'; + temp[3]='\0'; + + mem_len = OF_getproplen(o,phandle, temp); + OF_getprop(o,phandle, temp, buffer, mem_len); + *nomr=mem_len >> 3; + + for (i=0; i<=mem_len/4; i++) pointer[i]=of_decode_int((const unsigned char *)&buffer[i]); + + temp[0]='/'; + temp[1]='c'; + temp[2]='h'; + temp[3]='o'; + temp[4]='s'; + temp[5]='e'; + temp[6]='n'; + temp[7]='\0'; + + phandle=OF_finddevice(o,temp); + + temp[0]='b'; + temp[1]='o'; + temp[2]='o'; + temp[3]='t'; + temp[4]='a'; + temp[5]='r'; + temp[6]='g'; + temp[7]='s'; + temp[8]='\0'; + + mem_len = OF_getproplen(o,phandle, temp); + OF_getprop(o,phandle, temp, buffer, mem_len); + if (mem_len > 128) mem_len=128; + for (i=0; i<=mem_len/4; i++) pointer[i+33]=buffer[i]; + pointer[i+33]=0; + + temp[0]='/'; + temp[1]='\0'; + phandle=OF_finddevice(o,temp); + temp[0]='b'; + temp[1]='a'; + temp[2]='n'; + temp[3]='n'; + temp[4]='e'; + temp[5]='r'; + temp[6]='-'; + temp[7]='n'; + temp[8]='a'; + temp[9]='m'; + temp[10]='e'; + temp[11]='\0'; + mem_len = OF_getproplen(o,phandle, temp); + OF_getprop(o,phandle, temp, buffer, mem_len); + (unsigned char) pointer[32] = ((unsigned char *) buffer)[mem_len-2]; +} diff -urN linux-2.5.70-bk13/arch/arm26/boot/compressed/uncompress.h linux-2.5.70-bk14/arch/arm26/boot/compressed/uncompress.h --- linux-2.5.70-bk13/arch/arm26/boot/compressed/uncompress.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/boot/compressed/uncompress.h 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,111 @@ +/* + * linux/include/asm-arm/arch-arc/uncompress.h + * + * Copyright (C) 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#define VIDMEM ((char *)0x02000000) + +int video_num_columns, video_num_lines, video_size_row; +int white, bytes_per_char_h; +extern unsigned long con_charconvtable[256]; + +struct param_struct { + unsigned long page_size; + unsigned long nr_pages; + unsigned long ramdisk_size; + unsigned long mountrootrdonly; + unsigned long rootdev; + unsigned long video_num_cols; + unsigned long video_num_rows; + unsigned long video_x; + unsigned long video_y; + unsigned long memc_control_reg; + unsigned char sounddefault; + unsigned char adfsdrives; + unsigned char bytes_per_char_h; + unsigned char bytes_per_char_v; + unsigned long unused[256/4-11]; +}; + +static struct param_struct *params = (struct param_struct *)0x0207c000; + +/* + * This does not append a newline + */ +static void puts(const char *s) +{ + extern void ll_write_char(char *, unsigned long); + int x,y; + unsigned char c; + char *ptr; + + x = params->video_x; + y = params->video_y; + + while ( ( c = *(unsigned char *)s++ ) != '\0' ) { + if ( c == '\n' ) { + x = 0; + if ( ++y >= video_num_lines ) { + y--; + } + } else { + ptr = VIDMEM + ((y*video_num_columns*params->bytes_per_char_v+x)*bytes_per_char_h); + ll_write_char(ptr, c|(white<<16)); + if ( ++x >= video_num_columns ) { + x = 0; + if ( ++y >= video_num_lines ) { + y--; + } + } + } + } + + params->video_x = x; + params->video_y = y; +} + +static void error(char *x); + +/* + * Setup for decompression + */ +static void arch_decomp_setup(void) +{ + int i; + + video_num_lines = params->video_num_rows; + video_num_columns = params->video_num_cols; + bytes_per_char_h = params->bytes_per_char_h; + video_size_row = video_num_columns * bytes_per_char_h; + if (bytes_per_char_h == 4) + for (i = 0; i < 256; i++) + con_charconvtable[i] = + (i & 128 ? 1 << 0 : 0) | + (i & 64 ? 1 << 4 : 0) | + (i & 32 ? 1 << 8 : 0) | + (i & 16 ? 1 << 12 : 0) | + (i & 8 ? 1 << 16 : 0) | + (i & 4 ? 1 << 20 : 0) | + (i & 2 ? 1 << 24 : 0) | + (i & 1 ? 1 << 28 : 0); + else + for (i = 0; i < 16; i++) + con_charconvtable[i] = + (i & 8 ? 1 << 0 : 0) | + (i & 4 ? 1 << 8 : 0) | + (i & 2 ? 1 << 16 : 0) | + (i & 1 ? 1 << 24 : 0); + + white = bytes_per_char_h == 8 ? 0xfc : 7; + + if (params->nr_pages * params->page_size < 4096*1024) error("<4M of mem\n"); +} + +/* + * nothing to do + */ +#define arch_decomp_wdog() diff -urN linux-2.5.70-bk13/arch/arm26/boot/compressed/vmlinux.lds.in linux-2.5.70-bk14/arch/arm26/boot/compressed/vmlinux.lds.in --- linux-2.5.70-bk13/arch/arm26/boot/compressed/vmlinux.lds.in 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/boot/compressed/vmlinux.lds.in 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,60 @@ +/* + * linux/arch/arm/boot/compressed/vmlinux.lds.in + * + * Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + . = LOAD_ADDR; + _load_addr = .; + + . = TEXT_START; + _text = .; + + .text : { + _start = .; + *(.start) + *(.text) + *(.fixup) + *(.gnu.warning) + *(.rodata) + *(.rodata.*) + *(.glue_7) + *(.glue_7t) + input_data = .; + arch/arm26/boot/compressed/piggy.o + input_data_end = .; + . = ALIGN(4); + } + + _etext = .; + + _got_start = .; + .got : { *(.got) } + _got_end = .; + .got.plt : { *(.got.plt) } + .data : { *(.data) } + _edata = .; + + . = BSS_START; + __bss_start = .; + .bss : { *(.bss) } + _end = .; + + .stack (NOLOAD) : { *(.stack) } + + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } +} + diff -urN linux-2.5.70-bk13/arch/arm26/boot/install.sh linux-2.5.70-bk14/arch/arm26/boot/install.sh --- linux-2.5.70-bk13/arch/arm26/boot/install.sh 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/boot/install.sh 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,62 @@ +#!/bin/sh +# +# arch/arm/boot/install.sh +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1995 by Linus Torvalds +# +# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin +# Adapted from code in arch/i386/boot/install.sh by Russell King +# Stolen from arch/arm/boot/install.sh by Ian Molton +# +# "make install" script for arm architecture +# +# Arguments: +# $1 - kernel version +# $2 - kernel image file +# $3 - kernel map file +# $4 - default install path (blank if root directory) +# + +# User may have a custom install script + +if [ -x /sbin/installkernel ]; then + exec /sbin/installkernel "$@" +fi + +if [ "$2" = "zImage" ]; then +# Compressed install + echo "Installing compressed kernel" + if [ -f $4/vmlinuz-$1 ]; then + mv $4/vmlinuz-$1 $4/vmlinuz.old + fi + + if [ -f $4/System.map-$1 ]; then + mv $4/System.map-$1 $4/System.old + fi + + cat $2 > $4/vmlinuz-$1 + cp $3 $4/System.map-$1 +else +# Normal install + echo "Installing normal kernel" + if [ -f $4/vmlinux-$1 ]; then + mv $4/vmlinux-$1 $4/vmlinux.old + fi + + if [ -f $4/System.map ]; then + mv $4/System.map $4/System.old + fi + + cat $2 > $4/vmlinux-$1 + cp $3 $4/System.map +fi + +if [ -x /sbin/loadmap ]; then + /sbin/loadmap --rdev /dev/ima +else + echo "You have to install it yourself" +fi diff -urN linux-2.5.70-bk13/arch/arm26/config.in linux-2.5.70-bk14/arch/arm26/config.in --- linux-2.5.70-bk13/arch/arm26/config.in 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/config.in 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,151 @@ +# +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/config-language.txt. +# +mainmenu_name "Linux Kernel Configuration" + +define_bool CONFIG_ARM y +define_bool CONFIG_EISA n +define_bool CONFIG_SBUS n +define_bool CONFIG_MCA n +define_bool CONFIG_UID16 y +define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y +define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n +define_bool CONFIG_GENERIC_BUST_SPINLOCK n +define_bool CONFIG_GENERIC_ISA_DMA n + +source init/Config.in + +mainmenu_option next_comment +comment 'System Type' + +define_bool CONFIG_ARCH_ARCA5K +bool ' Archimedes' CONFIG_ARCH_ARC +bool ' A5000' CONFIG_ARCH_A5K + +# Definitions to make life easier +define_bool CONFIG_ARCH_ACORN y +define_bool CONFIG_CPU_32 n +define_bool CONFIG_CPU_26 y +bool '2MB physical memory' CONFIG_PAGESIZE_16 + +endmenu + +mainmenu_option next_comment +comment 'General setup' + +define_bool CONFIG_FIQ y + +# Compressed boot loader in ROM. Yes, we really want to ask about +# TEXT and BSS so we preserve their values in the config files. +bool 'Compressed boot loader in ROM/flash' CONFIG_ZBOOT_ROM +hex 'Compressed ROM boot loader base address' CONFIG_ZBOOT_ROM_TEXT 0 +hex 'Compressed ROM boot loader BSS address' CONFIG_ZBOOT_ROM_BSS 0 + +comment 'At least one math emulation must be selected' +define_bool CONFIG_FPE_NWFPE y +choice 'Kernel core (/proc/kcore) format' \ + "ELF CONFIG_KCORE_ELF \ + A.OUT CONFIG_KCORE_AOUT" ELF +tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT +tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF +tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC +string 'Default kernel command string' CONFIG_CMDLINE "" + +define_bool CONFIG_ALIGNMENT_TRAP n +endmenu + +source drivers/parport/Config.in +source drivers/pnp/Config.in +source drivers/block/Config.in +source drivers/md/Config.in +source drivers/acorn/block/Config.in + +if [ "$CONFIG_NET" = "y" ]; then + source net/Config.in + + mainmenu_option next_comment + comment 'Network device support' + + bool 'Network device support' CONFIG_NETDEVICES + if [ "$CONFIG_NETDEVICES" = "y" ]; then + source drivers/net/Config.in + fi + endmenu +fi + +mainmenu_option next_comment +comment 'ATA/ATAPI/MFM/RLL support' + +tristate 'ATA/ATAPI/MFM/RLL support' CONFIG_IDE + +if [ "$CONFIG_IDE" != "n" ]; then + source drivers/ide/Config.in +else + define_bool CONFIG_BLK_DEV_HD n +fi +endmenu + +mainmenu_option next_comment +comment 'SCSI support' + +tristate 'SCSI support' CONFIG_SCSI +endmenu + +source drivers/isdn/Config.in + +# +# input before char - char/joystick depends on it. As does USB. +# +source drivers/input/Config.in + +source drivers/char/Config.in +if [ "$CONFIG_BUSMOUSE" = "y" ]; then + define_bool CONFIG_KBDMOUSE y +fi + +source drivers/media/Config.in + +source fs/Config.in + +if [ "$CONFIG_VT" = "y" ]; then + mainmenu_option next_comment + comment 'Console drivers' + source drivers/video/Config.in + endmenu +fi + +mainmenu_option next_comment +comment 'Sound' + +tristate 'Sound card support' CONFIG_SOUND +if [ "$CONFIG_SOUND" != "n" ]; then + source sound/Config.in +fi +endmenu + +source drivers/misc/Config.in +source drivers/usb/Config.in + +mainmenu_option next_comment +comment 'Kernel hacking' + +# Always compile kernel with framepointer (until 2.4 real comes out) +# Bug reports aren't much use without this. +bool 'Compile kernel without frame pointer' CONFIG_NO_FRAME_POINTER +bool 'Verbose user fault messages' CONFIG_DEBUG_USER +bool 'Include debugging information in kernel binary' CONFIG_DEBUG_INFO + +bool 'Kernel debugging' CONFIG_DEBUG_KERNEL +dep_bool ' Debug memory allocations' CONFIG_DEBUG_SLAB $CONFIG_DEBUG_KERNEL +dep_bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ $CONFIG_DEBUG_KERNEL +dep_bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK $CONFIG_DEBUG_KERNEL +dep_bool ' Wait queue debugging' CONFIG_DEBUG_WAITQ $CONFIG_DEBUG_KERNEL +dep_bool ' Verbose BUG() reporting (adds 70K)' CONFIG_DEBUG_BUGVERBOSE $CONFIG_DEBUG_KERNEL +dep_bool ' Verbose kernel error messages' CONFIG_DEBUG_ERRORS $CONFIG_DEBUG_KERNEL +# These options are only for real kernel hackers who want to get their hands dirty. +dep_bool ' Kernel low-level debugging functions' CONFIG_DEBUG_LL $CONFIG_DEBUG_KERNEL +endmenu + +source security/Config.in +source lib/Config.in diff -urN linux-2.5.70-bk13/arch/arm26/defconfig linux-2.5.70-bk14/arch/arm26/defconfig --- linux-2.5.70-bk13/arch/arm26/defconfig 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/defconfig 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,367 @@ +# +# Automatically generated by make menuconfig: don't edit +# +CONFIG_ARM=y +# CONFIG_EISA is not set +# CONFIG_SBUS is not set +# CONFIG_MCA is not set +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +# CONFIG_GENERIC_BUST_SPINLOCK is not set +# CONFIG_GENERIC_ISA_DMA is not set + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y + +# +# General setup +# +# CONFIG_NET is not set +# CONFIG_SYSVIPC is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_SYSCTL is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# System Type +# +CONFIG_ARCH_ARC=y +# CONFIG_ARCH_A5K is not set +CONFIG_ARCH_ACORN=y +# CONFIG_CPU_32 is not set +CONFIG_CPU_26=y +# CONFIG_PAGESIZE_16 is not set + +# +# General setup +# +CONFIG_FIQ=y +# CONFIG_ZBOOT_ROM is not set +CONFIG_ZBOOT_ROM_TEXT=0 +CONFIG_ZBOOT_ROM_BSS=0 +CONFIG_FPE_NWFPE=y +CONFIG_KCORE_ELF=y +# CONFIG_KCORE_AOUT is not set +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_ELF is not set +# CONFIG_BINFMT_MISC is not set +CONFIG_CMDLINE="" +# CONFIG_ALIGNMENT_TRAP is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play configuration +# +# CONFIG_PNP is not set +# CONFIG_ISAPNP is not set +# CONFIG_PNPBIOS is not set + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_XD is not set +# CONFIG_PARIDE is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_CISS_SCSI_TAPE is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_BLK_DEV_INITRD is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set +# CONFIG_BLK_DEV_MD is not set +# CONFIG_MD_LINEAR is not set +# CONFIG_MD_RAID0 is not set +# CONFIG_MD_RAID1 is not set +# CONFIG_MD_RAID5 is not set +# CONFIG_MD_MULTIPATH is not set +# CONFIG_BLK_DEV_LVM is not set + +# +# Acorn-specific block devices +# +# CONFIG_BLK_DEV_FD1772 is not set +# CONFIG_BLK_DEV_MFM is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set +# CONFIG_BLK_DEV_HD is not set + +# +# SCSI support +# +# CONFIG_SCSI is not set + +# +# ISDN subsystem +# + +# +# Input device support +# +# CONFIG_INPUT is not set +# CONFIG_INPUT_KEYBDEV is not set +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_TSLIBDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set +# CONFIG_INPUT_UINPUT is not set +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +# CONFIG_GAMEPORT_NS558 is not set +# CONFIG_GAMEPORT_L4 is not set +# CONFIG_GAMEPORT_EMU10K1 is not set +# CONFIG_GAMEPORT_VORTEX is not set +# CONFIG_GAMEPORT_FM801 is not set +# CONFIG_GAMEPORT_CS461x is not set +# CONFIG_SERIO is not set +# CONFIG_SERIO_I8042 is not set +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_ACORN is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set +# CONFIG_SERIAL_8250_CONSOLE is not set +# CONFIG_SERIAL_8250_CS is not set +# CONFIG_SERIAL_8250_EXTENDED is not set +# CONFIG_SERIAL_8250_MANY_PORTS is not set +# CONFIG_SERIAL_8250_SHARE_IRQ is not set +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +# CONFIG_SERIAL_8250_MULTIPORT is not set +# CONFIG_SERIAL_8250_RSA is not set +# CONFIG_ATOMWIDE_SERIAL is not set +# CONFIG_DUALSP_SERIAL is not set +# CONFIG_SERIAL_ANAKIN is not set +# CONFIG_SERIAL_ANAKIN_CONSOLE is not set +# CONFIG_SERIAL_AMBA is not set +# CONFIG_SERIAL_AMBA_CONSOLE is not set +# CONFIG_SERIAL_CLPS711X is not set +# CONFIG_SERIAL_CLPS711X_CONSOLE is not set +# CONFIG_SERIAL_CLPS711X_OLD_NAME is not set +# CONFIG_SERIAL_21285 is not set +# CONFIG_SERIAL_21285_OLD is not set +# CONFIG_SERIAL_21285_CONSOLE is not set +# CONFIG_SERIAL_UART00 is not set +# CONFIG_SERIAL_UART00_CONSOLE is not set +# CONFIG_SERIAL_SA1100 is not set +# CONFIG_SERIAL_SA1100_CONSOLE is not set +# CONFIG_UNIX98_PTYS is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_I2C_ALGOBIT=y +# CONFIG_I2C_PHILIPSPAR is not set +# CONFIG_I2C_ELV is not set +# CONFIG_I2C_VELLEMAN is not set +CONFIG_I2C_ALGOPCF=y +# CONFIG_I2C_ELEKTOR is not set +CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_PROC is not set + +# +# L3 serial bus support +# +# CONFIG_L3 is not set +# CONFIG_L3_ALGOBIT is not set +# CONFIG_L3_BIT_SA1100_GPIO is not set +# CONFIG_L3_SA1111 is not set +# CONFIG_BIT_SA1100_GPIO is not set + +# +# Mice +# +# CONFIG_BUSMOUSE is not set +# CONFIG_PSMOUSE is not set +# CONFIG_QIC02_TAPE is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# File systems +# +# CONFIG_QUOTA is not set +# CONFIG_QFMT_V1 is not set +# CONFIG_QFMT_V2 is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_REISERFS_CHECK is not set +# CONFIG_REISERFS_PROC_INFO is not set +# CONFIG_ADFS_FS is not set +# CONFIG_ADFS_FS_RW is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_JBD_DEBUG is not set +# CONFIG_FAT_FS is not set +# CONFIG_MSDOS_FS is not set +# CONFIG_UMSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_TMPFS is not set +CONFIG_RAMFS=y +# CONFIG_ISO9660_FS is not set +# CONFIG_JOLIET is not set +# CONFIG_ZISOFS is not set +# CONFIG_JFS_FS is not set +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_NTFS_DEBUG is not set +# CONFIG_HPFS_FS is not set +CONFIG_PROC_FS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVFS_MOUNT is not set +# CONFIG_DEVFS_DEBUG is not set +# CONFIG_DEVPTS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX4FS_RW is not set +# CONFIG_ROMFS_FS is not set +CONFIG_EXT2_FS=y +# CONFIG_SYSV_FS is not set +# CONFIG_UDF_FS is not set +# CONFIG_UDF_RW is not set +# CONFIG_UFS_FS is not set +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_NCPFS_NLS is not set +# CONFIG_SMB_FS is not set +# CONFIG_ZISOFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +CONFIG_ACORN_PARTITION=y +# CONFIG_ACORN_PARTITION_EESOX is not set +# CONFIG_ACORN_PARTITION_ICS is not set +CONFIG_ACORN_PARTITION_ADFS=y +# CONFIG_ACORN_PARTITION_POWERTEC is not set +CONFIG_ACORN_PARTITION_RISCIX=y +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +# CONFIG_MSDOS_PARTITION is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set +# CONFIG_SMB_NLS is not set +# CONFIG_NLS is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# Multimedia Capabilities Port drivers +# +# CONFIG_MCP is not set +# CONFIG_MCP_SA1100 is not set +# CONFIG_MCP_UCB1200 is not set +# CONFIG_MCP_UCB1200_AUDIO is not set +# CONFIG_MCP_UCB1200_TS is not set + +# +# Console Switches +# +# CONFIG_SWITCHES is not set +# CONFIG_SWITCHES_SA1100 is not set +# CONFIG_SWITCHES_UCB1X00 is not set + +# +# USB support +# +# CONFIG_USB is not set + +# +# Kernel hacking +# +# CONFIG_NO_FRAME_POINTER is not set +CONFIG_DEBUG_USER=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SLAB=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_WAITQ=y +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_ERRORS=y +CONFIG_DEBUG_LL=y + +# +# Security options +# +CONFIG_SECURITY_CAPABILITIES=y + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_ZLIB_INFLATE is not set +# CONFIG_ZLIB_DEFLATE is not set diff -urN linux-2.5.70-bk13/arch/arm26/kernel/Makefile linux-2.5.70-bk14/arch/arm26/kernel/Makefile --- linux-2.5.70-bk13/arch/arm26/kernel/Makefile 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/Makefile 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,18 @@ +# +# Makefile for the linux kernel. +# + +ENTRY_OBJ = entry.o + +# Object file lists. + +obj-y := arch.o compat.o dma.o entry.o irq.o \ + process.o ptrace.o semaphore.o setup.o signal.o sys_arm.o \ + time.o traps.o ecard.o time-acorn.o dma.o \ + ecard.o fiq.o time.o + +obj-$(CONFIG_FIQ) += fiq.o +obj-$(CONFIG_MODULES) += armksyms.o + +extra-y := init_task.o + diff -urN linux-2.5.70-bk13/arch/arm26/kernel/arch.c linux-2.5.70-bk14/arch/arm26/kernel/arch.c --- linux-2.5.70-bk13/arch/arm26/kernel/arch.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/arch.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,30 @@ +/* + * linux/arch/arm/kernel/arch.c + * + * Architecture specific fixups. + */ +#include +#include +#include + +#include +#include +#include +#include +#include + +unsigned int vram_size; + + +unsigned int memc_ctrl_reg; +unsigned int number_mfm_drives; + +static int __init parse_tag_acorn(const struct tag *tag) +{ + memc_ctrl_reg = tag->u.acorn.memc_control_reg; + number_mfm_drives = tag->u.acorn.adfsdrives; + return 0; +} + +__tagtable(ATAG_ACORN, parse_tag_acorn); + diff -urN linux-2.5.70-bk13/arch/arm26/kernel/armksyms.c linux-2.5.70-bk14/arch/arm26/kernel/armksyms.c --- linux-2.5.70-bk13/arch/arm26/kernel/armksyms.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/armksyms.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,233 @@ +/* + * linux/arch/arm26/kernel/armksyms.c + * + * Copyright (C) 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +//#include +#include +#include +#include +#include +#include +#include + +extern void dump_thread(struct pt_regs *, struct user *); +extern int dump_fpu(struct pt_regs *, struct user_fp_struct *); +extern void inswb(unsigned int port, void *to, int len); +extern void outswb(unsigned int port, const void *to, int len); + +extern void __bad_xchg(volatile void *ptr, int size); + +/* + * syscalls + */ +extern int sys_write(int, const char *, int); +extern int sys_read(int, char *, int); +extern int sys_lseek(int, off_t, int); +extern int sys_exit(int); + +/* + * libgcc functions - functions that are used internally by the + * compiler... (prototypes are not correct though, but that + * doesn't really matter since they're not versioned). + */ +extern void __ashldi3(void); +extern void __ashrdi3(void); +extern void __divsi3(void); +extern void __lshrdi3(void); +extern void __modsi3(void); +extern void __muldi3(void); +extern void __ucmpdi2(void); +extern void __udivdi3(void); +extern void __umoddi3(void); +extern void __udivmoddi4(void); +extern void __udivsi3(void); +extern void __umodsi3(void); +extern void abort(void); + +extern void ret_from_exception(void); +extern void fpundefinstr(void); +extern void fp_enter(void); + +/* + * This has a special calling convention; it doesn't + * modify any of the usual registers, except for LR. + */ +extern void __do_softirq(void); + +#define EXPORT_SYMBOL_ALIAS(sym,orig) \ + const char __kstrtab_##sym[] \ + __attribute__((section(".kstrtab"))) = \ + __MODULE_STRING(sym); \ + const struct module_symbol __ksymtab_##sym \ + __attribute__((section("__ksymtab"))) = \ + { (unsigned long)&orig, __kstrtab_##sym }; + +/* + * floating point math emulator support. + * These symbols will never change their calling convention... + */ +EXPORT_SYMBOL_ALIAS(kern_fp_enter,fp_enter); +EXPORT_SYMBOL_ALIAS(fp_printk,printk); +EXPORT_SYMBOL_ALIAS(fp_send_sig,send_sig); + +EXPORT_SYMBOL(fpundefinstr); +EXPORT_SYMBOL(ret_from_exception); + +#ifdef CONFIG_VT +EXPORT_SYMBOL(kd_mksound); +#endif + +EXPORT_SYMBOL_NOVERS(__do_softirq); + + /* platform dependent support */ +EXPORT_SYMBOL(dump_thread); +EXPORT_SYMBOL(dump_fpu); +EXPORT_SYMBOL(udelay); +EXPORT_SYMBOL(kernel_thread); +EXPORT_SYMBOL(system_rev); +EXPORT_SYMBOL(system_serial_low); +EXPORT_SYMBOL(system_serial_high); +#ifdef CONFIG_DEBUG_BUGVERBOSE +EXPORT_SYMBOL(__bug); +#endif +EXPORT_SYMBOL(__bad_xchg); +EXPORT_SYMBOL(__readwrite_bug); +EXPORT_SYMBOL(enable_irq); +EXPORT_SYMBOL(disable_irq); +EXPORT_SYMBOL(set_irq_type); +EXPORT_SYMBOL(pm_idle); +EXPORT_SYMBOL(pm_power_off); + + /* processor dependencies */ +EXPORT_SYMBOL(__machine_arch_type); + + /* networking */ +EXPORT_SYMBOL(csum_partial_copy_nocheck); +EXPORT_SYMBOL(__csum_ipv6_magic); + + /* io */ +#ifndef __raw_readsb +EXPORT_SYMBOL_NOVERS(__raw_readsb); +#endif +#ifndef __raw_readsw +EXPORT_SYMBOL_NOVERS(__raw_readsw); +#endif +#ifndef __raw_readsl +EXPORT_SYMBOL_NOVERS(__raw_readsl); +#endif +#ifndef __raw_writesb +EXPORT_SYMBOL_NOVERS(__raw_writesb); +#endif +#ifndef __raw_writesw +EXPORT_SYMBOL_NOVERS(__raw_writesw); +#endif +#ifndef __raw_writesl +EXPORT_SYMBOL_NOVERS(__raw_writesl); +#endif + + /* string / mem functions */ +EXPORT_SYMBOL_NOVERS(strcpy); +EXPORT_SYMBOL_NOVERS(strncpy); +EXPORT_SYMBOL_NOVERS(strcat); +EXPORT_SYMBOL_NOVERS(strncat); +EXPORT_SYMBOL_NOVERS(strcmp); +EXPORT_SYMBOL_NOVERS(strncmp); +EXPORT_SYMBOL_NOVERS(strchr); +EXPORT_SYMBOL_NOVERS(strlen); +EXPORT_SYMBOL_NOVERS(strnlen); +EXPORT_SYMBOL_NOVERS(strpbrk); +EXPORT_SYMBOL_NOVERS(strrchr); +EXPORT_SYMBOL_NOVERS(strstr); +EXPORT_SYMBOL_NOVERS(memset); +EXPORT_SYMBOL_NOVERS(memcpy); +EXPORT_SYMBOL_NOVERS(memmove); +EXPORT_SYMBOL_NOVERS(memcmp); +EXPORT_SYMBOL_NOVERS(memscan); +EXPORT_SYMBOL_NOVERS(__memzero); + + /* user mem (segment) */ +EXPORT_SYMBOL(uaccess_kernel); +EXPORT_SYMBOL(uaccess_user); + +EXPORT_SYMBOL_NOVERS(__get_user_1); +EXPORT_SYMBOL_NOVERS(__get_user_2); +EXPORT_SYMBOL_NOVERS(__get_user_4); +EXPORT_SYMBOL_NOVERS(__get_user_8); + +EXPORT_SYMBOL_NOVERS(__put_user_1); +EXPORT_SYMBOL_NOVERS(__put_user_2); +EXPORT_SYMBOL_NOVERS(__put_user_4); +EXPORT_SYMBOL_NOVERS(__put_user_8); + + /* gcc lib functions */ +EXPORT_SYMBOL_NOVERS(__ashldi3); +EXPORT_SYMBOL_NOVERS(__ashrdi3); +EXPORT_SYMBOL_NOVERS(__divsi3); +EXPORT_SYMBOL_NOVERS(__lshrdi3); +EXPORT_SYMBOL_NOVERS(__modsi3); +EXPORT_SYMBOL_NOVERS(__muldi3); +EXPORT_SYMBOL_NOVERS(__ucmpdi2); +EXPORT_SYMBOL_NOVERS(__udivdi3); +EXPORT_SYMBOL_NOVERS(__umoddi3); +EXPORT_SYMBOL_NOVERS(__udivmoddi4); +EXPORT_SYMBOL_NOVERS(__udivsi3); +EXPORT_SYMBOL_NOVERS(__umodsi3); + + /* bitops */ +EXPORT_SYMBOL(_set_bit_le); +EXPORT_SYMBOL(_test_and_set_bit_le); +EXPORT_SYMBOL(_clear_bit_le); +EXPORT_SYMBOL(_test_and_clear_bit_le); +EXPORT_SYMBOL(_change_bit_le); +EXPORT_SYMBOL(_test_and_change_bit_le); +EXPORT_SYMBOL(_find_first_zero_bit_le); +EXPORT_SYMBOL(_find_next_zero_bit_le); + + /* elf */ +EXPORT_SYMBOL(elf_platform); +EXPORT_SYMBOL(elf_hwcap); + + /* syscalls */ +EXPORT_SYMBOL(sys_write); +EXPORT_SYMBOL(sys_read); +EXPORT_SYMBOL(sys_lseek); +EXPORT_SYMBOL(sys_open); +EXPORT_SYMBOL(sys_exit); +EXPORT_SYMBOL(sys_wait4); + + /* semaphores */ +EXPORT_SYMBOL_NOVERS(__down_failed); +EXPORT_SYMBOL_NOVERS(__down_interruptible_failed); +EXPORT_SYMBOL_NOVERS(__down_trylock_failed); +EXPORT_SYMBOL_NOVERS(__up_wakeup); + +EXPORT_SYMBOL(get_wchan); + +#ifdef CONFIG_PREEMPT +EXPORT_SYMBOL(kernel_flag); +#endif diff -urN linux-2.5.70-bk13/arch/arm26/kernel/asm-offsets.c linux-2.5.70-bk14/arch/arm26/kernel/asm-offsets.c --- linux-2.5.70-bk13/arch/arm26/kernel/asm-offsets.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/asm-offsets.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,64 @@ +/* + * Copyright (C) 1995-2001 Russell King + * 2001-2002 Keith Owens + * 2003-? Ian Molton + * + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed to extract + * and format the required data. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include +#include + +/* + * Make sure that the compiler and target are compatible. + */ +#if defined(__APCS_32__) && defined(CONFIG_CPU_26) +#error Sorry, your compiler targets APCS-32 but this kernel requires APCS-26 +#endif +#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 95) +#error Sorry, your compiler is known to miscompile kernels. Only use gcc 2.95.3 and later. +#endif +#if __GNUC__ == 2 && __GNUC_MINOR__ == 95 +/* shame we can't detect the .1 or .2 releases */ +#warning GCC 2.95.2 and earlier miscompiles kernels. +#endif + +/* Use marker if you need to separate the values later */ + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +#define BLANK() asm volatile("\n->" : : ) + +int main(void) +{ + DEFINE(TSK_USED_MATH, offsetof(struct task_struct, used_math)); + DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); + BLANK(); + DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); + DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags)); + BLANK(); + DEFINE(VM_EXEC, VM_EXEC); + BLANK(); + BLANK(); + DEFINE(PAGE_PRESENT, _PAGE_PRESENT); + DEFINE(PAGE_READONLY, _PAGE_READONLY); + DEFINE(PAGE_NOT_USER, _PAGE_NOT_USER); + DEFINE(PAGE_OLD, _PAGE_OLD); + DEFINE(PAGE_CLEAN, _PAGE_CLEAN); + BLANK(); + DEFINE(PAGE_SZ, PAGE_SIZE); + BLANK(); + DEFINE(SYS_ERROR0, 0x9f0000); + return 0; +} diff -urN linux-2.5.70-bk13/arch/arm26/kernel/compat.c linux-2.5.70-bk14/arch/arm26/kernel/compat.c --- linux-2.5.70-bk13/arch/arm26/kernel/compat.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/compat.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,174 @@ +/* + * linux/arch/arm/kernel/compat.c + * + * Copyright (C) 2001 Russell King + * 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * We keep the old params compatibility cruft in one place (here) + * so we don't end up with lots of mess around other places. + * + * NOTE: + * The old struct param_struct is deprecated, but it will be kept in + * the kernel for 5 years from now (2001). This will allow boot loaders + * to convert to the new struct tag way. + */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +//#include + +/* + * Usage: + * - do not go blindly adding fields, add them at the end + * - when adding fields, don't rely on the address until + * a patch from me has been released + * - unused fields should be zero (for future expansion) + * - this structure is relatively short-lived - only + * guaranteed to contain useful data in setup_arch() + * + * This is the old deprecated way to pass parameters to the kernel + */ +struct param_struct { + union { + struct { + unsigned long page_size; /* 0 */ + unsigned long nr_pages; /* 4 */ + unsigned long ramdisk_size; /* 8 */ + unsigned long flags; /* 12 */ +#define FLAG_READONLY 1 +#define FLAG_RDLOAD 4 +#define FLAG_RDPROMPT 8 + unsigned long rootdev; /* 16 */ + unsigned long video_num_cols; /* 20 */ + unsigned long video_num_rows; /* 24 */ + unsigned long video_x; /* 28 */ + unsigned long video_y; /* 32 */ + unsigned long memc_control_reg; /* 36 */ + unsigned char sounddefault; /* 40 */ + unsigned char adfsdrives; /* 41 */ + unsigned char bytes_per_char_h; /* 42 */ + unsigned char bytes_per_char_v; /* 43 */ + unsigned long pages_in_bank[4]; /* 44 */ + unsigned long pages_in_vram; /* 60 */ + unsigned long initrd_start; /* 64 */ + unsigned long initrd_size; /* 68 */ + unsigned long rd_start; /* 72 */ + unsigned long system_rev; /* 76 */ + unsigned long system_serial_low; /* 80 */ + unsigned long system_serial_high; /* 84 */ + unsigned long mem_fclk_21285; /* 88 */ + } s; + char unused[256]; + } u1; + union { + char paths[8][128]; + struct { + unsigned long magic; + char n[1024 - sizeof(unsigned long)]; + } s; + } u2; + char commandline[COMMAND_LINE_SIZE]; +}; + +static struct tag * __init memtag(struct tag *tag, unsigned long start, unsigned long size) +{ + tag = tag_next(tag); + tag->hdr.tag = ATAG_MEM; + tag->hdr.size = tag_size(tag_mem32); + tag->u.mem.size = size; + tag->u.mem.start = start; + + return tag; +} + +static void __init build_tag_list(struct param_struct *params, void *taglist) +{ + struct tag *tag = taglist; + + if (params->u1.s.page_size != PAGE_SIZE) { + printk(KERN_WARNING "Warning: bad configuration page, " + "trying to continue\n"); + return; + } + + printk(KERN_DEBUG "Converting old-style param struct to taglist\n"); + + tag->hdr.tag = ATAG_CORE; + tag->hdr.size = tag_size(tag_core); + tag->u.core.flags = params->u1.s.flags & FLAG_READONLY; + tag->u.core.pagesize = params->u1.s.page_size; + tag->u.core.rootdev = params->u1.s.rootdev; + + tag = tag_next(tag); + tag->hdr.tag = ATAG_RAMDISK; + tag->hdr.size = tag_size(tag_ramdisk); + tag->u.ramdisk.flags = (params->u1.s.flags & FLAG_RDLOAD ? 1 : 0) | + (params->u1.s.flags & FLAG_RDPROMPT ? 2 : 0); + tag->u.ramdisk.size = params->u1.s.ramdisk_size; + tag->u.ramdisk.start = params->u1.s.rd_start; + + tag = tag_next(tag); + tag->hdr.tag = ATAG_INITRD; + tag->hdr.size = tag_size(tag_initrd); + tag->u.initrd.start = params->u1.s.initrd_start; + tag->u.initrd.size = params->u1.s.initrd_size; + + tag = tag_next(tag); + tag->hdr.tag = ATAG_SERIAL; + tag->hdr.size = tag_size(tag_serialnr); + tag->u.serialnr.low = params->u1.s.system_serial_low; + tag->u.serialnr.high = params->u1.s.system_serial_high; + + tag = tag_next(tag); + tag->hdr.tag = ATAG_REVISION; + tag->hdr.size = tag_size(tag_revision); + tag->u.revision.rev = params->u1.s.system_rev; + + tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE); + + tag = tag_next(tag); + tag->hdr.tag = ATAG_ACORN; + tag->hdr.size = tag_size(tag_acorn); + tag->u.acorn.memc_control_reg = params->u1.s.memc_control_reg; + tag->u.acorn.vram_pages = params->u1.s.pages_in_vram; + tag->u.acorn.sounddefault = params->u1.s.sounddefault; + tag->u.acorn.adfsdrives = params->u1.s.adfsdrives; + + tag = tag_next(tag); + tag->hdr.tag = ATAG_CMDLINE; + tag->hdr.size = (strlen(params->commandline) + 3 + + sizeof(struct tag_header)) >> 2; + strcpy(tag->u.cmdline.cmdline, params->commandline); + + tag = tag_next(tag); + tag->hdr.tag = ATAG_NONE; + tag->hdr.size = 0; + + memmove(params, taglist, ((int)tag) - ((int)taglist) + + sizeof(struct tag_header)); +} + +void __init convert_to_tag_list(struct tag *tags) +{ + struct param_struct *params = (struct param_struct *)tags; + build_tag_list(params, ¶ms->u2); +} + +void __init squash_mem_tags(struct tag *tag) +{ + for (; tag->hdr.size; tag = tag_next(tag)) + if (tag->hdr.tag == ATAG_MEM) + tag->hdr.tag = ATAG_NONE; +} diff -urN linux-2.5.70-bk13/arch/arm26/kernel/dma.c linux-2.5.70-bk14/arch/arm26/kernel/dma.c --- linux-2.5.70-bk13/arch/arm26/kernel/dma.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/dma.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,302 @@ +/* + * linux/arch/arm/kernel/dma.c + * + * Copyright (C) 1995-2000 Russell King + * 2003-? Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Front-end to the DMA handling. This handles the allocation/freeing + * of DMA channels, and provides a unified interface to the machines + * DMA facilities. + */ +#include +#include +#include +#include +#include +#include +#include + +#include + +spinlock_t dma_spin_lock = SPIN_LOCK_UNLOCKED; + +#if MAX_DMA_CHANNELS > 0 + +static dma_t dma_chan[MAX_DMA_CHANNELS]; + +/* + * Get dma list for /proc/dma + */ +int get_dma_list(char *buf) +{ + dma_t *dma; + char *p = buf; + int i; + + for (i = 0, dma = dma_chan; i < MAX_DMA_CHANNELS; i++, dma++) + if (dma->lock) + p += sprintf(p, "%2d: %14s %s\n", i, + dma->d_ops->type, dma->device_id); + + return p - buf; +} + +/* + * Request DMA channel + * + * On certain platforms, we have to allocate an interrupt as well... + */ +int request_dma(dmach_t channel, const char *device_id) +{ + dma_t *dma = dma_chan + channel; + int ret; + + if (channel >= MAX_DMA_CHANNELS || !dma->d_ops) + goto bad_dma; + + if (xchg(&dma->lock, 1) != 0) + goto busy; + + dma->device_id = device_id; + dma->active = 0; + dma->invalid = 1; + + ret = 0; + if (dma->d_ops->request) + ret = dma->d_ops->request(channel, dma); + + if (ret) + xchg(&dma->lock, 0); + + return ret; + +bad_dma: + printk(KERN_ERR "dma: trying to allocate DMA%d\n", channel); + return -EINVAL; + +busy: + return -EBUSY; +} + +/* + * Free DMA channel + * + * On certain platforms, we have to free interrupt as well... + */ +void free_dma(dmach_t channel) +{ + dma_t *dma = dma_chan + channel; + + if (channel >= MAX_DMA_CHANNELS || !dma->d_ops) + goto bad_dma; + + if (dma->active) { + printk(KERN_ERR "dma%d: freeing active DMA\n", channel); + dma->d_ops->disable(channel, dma); + dma->active = 0; + } + + if (xchg(&dma->lock, 0) != 0) { + if (dma->d_ops->free) + dma->d_ops->free(channel, dma); + return; + } + + printk(KERN_ERR "dma%d: trying to free free DMA\n", channel); + return; + +bad_dma: + printk(KERN_ERR "dma: trying to free DMA%d\n", channel); +} + +/* Set DMA Scatter-Gather list + */ +void set_dma_sg (dmach_t channel, struct scatterlist *sg, int nr_sg) +{ + dma_t *dma = dma_chan + channel; + + if (dma->active) + printk(KERN_ERR "dma%d: altering DMA SG while " + "DMA active\n", channel); + + dma->sg = sg; + dma->sgcount = nr_sg; + dma->using_sg = 1; + dma->invalid = 1; +} + +/* Set DMA address + * + * Copy address to the structure, and set the invalid bit + */ +void set_dma_addr (dmach_t channel, unsigned long physaddr) +{ + dma_t *dma = dma_chan + channel; + + if (dma->active) + printk(KERN_ERR "dma%d: altering DMA address while " + "DMA active\n", channel); + + dma->sg = &dma->buf; + dma->sgcount = 1; + dma->buf.__address = (char *)physaddr;//FIXME - not pretty + dma->using_sg = 0; + dma->invalid = 1; +} + +/* Set DMA byte count + * + * Copy address to the structure, and set the invalid bit + */ +void set_dma_count (dmach_t channel, unsigned long count) +{ + dma_t *dma = dma_chan + channel; + + if (dma->active) + printk(KERN_ERR "dma%d: altering DMA count while " + "DMA active\n", channel); + + dma->sg = &dma->buf; + dma->sgcount = 1; + dma->buf.length = count; + dma->using_sg = 0; + dma->invalid = 1; +} + +/* Set DMA direction mode + */ +void set_dma_mode (dmach_t channel, dmamode_t mode) +{ + dma_t *dma = dma_chan + channel; + + if (dma->active) + printk(KERN_ERR "dma%d: altering DMA mode while " + "DMA active\n", channel); + + dma->dma_mode = mode; + dma->invalid = 1; +} + +/* Enable DMA channel + */ +void enable_dma (dmach_t channel) +{ + dma_t *dma = dma_chan + channel; + + if (!dma->lock) + goto free_dma; + + if (dma->active == 0) { + dma->active = 1; + dma->d_ops->enable(channel, dma); + } + return; + +free_dma: + printk(KERN_ERR "dma%d: trying to enable free DMA\n", channel); + BUG(); +} + +/* Disable DMA channel + */ +void disable_dma (dmach_t channel) +{ + dma_t *dma = dma_chan + channel; + + if (!dma->lock) + goto free_dma; + + if (dma->active == 1) { + dma->active = 0; + dma->d_ops->disable(channel, dma); + } + return; + +free_dma: + printk(KERN_ERR "dma%d: trying to disable free DMA\n", channel); + BUG(); +} + +/* + * Is the specified DMA channel active? + */ +int dma_channel_active(dmach_t channel) +{ + return dma_chan[channel].active; +} + +void set_dma_page(dmach_t channel, char pagenr) +{ + printk(KERN_ERR "dma%d: trying to set_dma_page\n", channel); +} + +void set_dma_speed(dmach_t channel, int cycle_ns) +{ + dma_t *dma = dma_chan + channel; + int ret = 0; + + if (dma->d_ops->setspeed) + ret = dma->d_ops->setspeed(channel, dma, cycle_ns); + dma->speed = ret; +} + +int get_dma_residue(dmach_t channel) +{ + dma_t *dma = dma_chan + channel; + int ret = 0; + + if (dma->d_ops->residue) + ret = dma->d_ops->residue(channel, dma); + + return ret; +} + +void __init init_dma(void) +{ + arch_dma_init(dma_chan); +} + +#else + +int request_dma(dmach_t channel, const char *device_id) +{ + return -EINVAL; +} + +int get_dma_residue(dmach_t channel) +{ + return 0; +} + +#define GLOBAL_ALIAS(_a,_b) asm (".set " #_a "," #_b "; .globl " #_a) +GLOBAL_ALIAS(disable_dma, get_dma_residue); +GLOBAL_ALIAS(enable_dma, get_dma_residue); +GLOBAL_ALIAS(free_dma, get_dma_residue); +GLOBAL_ALIAS(get_dma_list, get_dma_residue); +GLOBAL_ALIAS(set_dma_mode, get_dma_residue); +GLOBAL_ALIAS(set_dma_page, get_dma_residue); +GLOBAL_ALIAS(set_dma_count, get_dma_residue); +GLOBAL_ALIAS(set_dma_addr, get_dma_residue); +GLOBAL_ALIAS(set_dma_sg, get_dma_residue); +GLOBAL_ALIAS(set_dma_speed, get_dma_residue); +GLOBAL_ALIAS(init_dma, get_dma_residue); + +#endif + +EXPORT_SYMBOL(request_dma); +EXPORT_SYMBOL(free_dma); +EXPORT_SYMBOL(enable_dma); +EXPORT_SYMBOL(disable_dma); +EXPORT_SYMBOL(set_dma_addr); +EXPORT_SYMBOL(set_dma_count); +EXPORT_SYMBOL(set_dma_mode); +EXPORT_SYMBOL(set_dma_page); +EXPORT_SYMBOL(get_dma_residue); +EXPORT_SYMBOL(set_dma_sg); +EXPORT_SYMBOL(set_dma_speed); + +EXPORT_SYMBOL(dma_spin_lock); diff -urN linux-2.5.70-bk13/arch/arm26/kernel/ecard.c linux-2.5.70-bk14/arch/arm26/kernel/ecard.c --- linux-2.5.70-bk13/arch/arm26/kernel/ecard.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/ecard.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,899 @@ +/* + * linux/arch/arm26/kernel/ecard.c + * + * Copyright 1995-2001 Russell King + * Copyright 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Find all installed expansion cards, and handle interrupts from them. + * + * Created from information from Acorns RiscOS3 PRMs + * + * 08-Dec-1996 RMK Added code for the 9'th expansion card - the ether + * podule slot. + * 06-May-1997 RMK Added blacklist for cards whose loader doesn't work. + * 12-Sep-1997 RMK Created new handling of interrupt enables/disables + * - cards can now register their own routine to control + * interrupts (recommended). + * 29-Sep-1997 RMK Expansion card interrupt hardware not being re-enabled + * on reset from Linux. (Caused cards not to respond + * under RiscOS without hard reset). + * 15-Feb-1998 RMK Added DMA support + * 12-Sep-1998 RMK Added EASI support + * 10-Jan-1999 RMK Run loaders in a simulated RISC OS environment. + * 17-Apr-1999 RMK Support for EASI Type C cycles. + */ +#define ECARD_C + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum req { + req_readbytes, + req_reset +}; + +struct ecard_request { + enum req req; + ecard_t *ec; + unsigned int address; + unsigned int length; + unsigned int use_loader; + void *buffer; +}; + +struct expcard_blacklist { + unsigned short manufacturer; + unsigned short product; + const char *type; +}; + +static ecard_t *cards; +static ecard_t *slot_to_expcard[MAX_ECARDS]; +static unsigned int ectcr; + +/* List of descriptions of cards which don't have an extended + * identification, or chunk directories containing a description. + */ +static struct expcard_blacklist __initdata blacklist[] = { + { MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1" } +}; + +asmlinkage extern int +ecard_loader_reset(volatile unsigned char *pa, loader_t loader); +asmlinkage extern int +ecard_loader_read(int off, volatile unsigned char *pa, loader_t loader); + +static const struct ecard_id * +ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec); + +static inline unsigned short +ecard_getu16(unsigned char *v) +{ + return v[0] | v[1] << 8; +} + +static inline signed long +ecard_gets24(unsigned char *v) +{ + return v[0] | v[1] << 8 | v[2] << 16 | ((v[2] & 0x80) ? 0xff000000 : 0); +} + +static inline ecard_t * +slot_to_ecard(unsigned int slot) +{ + return slot < MAX_ECARDS ? slot_to_expcard[slot] : NULL; +} + +/* ===================== Expansion card daemon ======================== */ +/* + * Since the loader programs on the expansion cards need to be run + * in a specific environment, create a separate task with this + * environment up, and pass requests to this task as and when we + * need to. + * + * This should allow 99% of loaders to be called from Linux. + * + * From a security standpoint, we trust the card vendors. This + * may be a misplaced trust. + */ +#define BUS_ADDR(x) ((((unsigned long)(x)) << 2) + IO_BASE) +#define POD_INT_ADDR(x) ((volatile unsigned char *)\ + ((BUS_ADDR((x)) - IO_BASE) + IO_START)) + +static inline void ecard_task_reset(struct ecard_request *req) +{ + struct expansion_card *ec = req->ec; + if (ec->loader) + ecard_loader_reset(POD_INT_ADDR(ec->podaddr), ec->loader); +} + +static void +ecard_task_readbytes(struct ecard_request *req) +{ + unsigned char *buf = (unsigned char *)req->buffer; + volatile unsigned char *base_addr = + (volatile unsigned char *)POD_INT_ADDR(req->ec->podaddr); + unsigned int len = req->length; + unsigned int off = req->address; + + if (req->ec->slot_no == 8) { + /* + * The card maintains an index which increments the address + * into a 4096-byte page on each access. We need to keep + * track of the counter. + */ + static unsigned int index; + unsigned int page; + + page = (off >> 12) * 4; + if (page > 256 * 4) + return; + + off &= 4095; + + /* + * If we are reading offset 0, or our current index is + * greater than the offset, reset the hardware index counter. + */ + if (off == 0 || index > off) { + *base_addr = 0; + index = 0; + } + + /* + * Increment the hardware index counter until we get to the + * required offset. The read bytes are discarded. + */ + while (index < off) { + unsigned char byte; + byte = base_addr[page]; + index += 1; + } + + while (len--) { + *buf++ = base_addr[page]; + index += 1; + } + } else { + + if (!req->use_loader || !req->ec->loader) { + off *= 4; + while (len--) { + *buf++ = base_addr[off]; + off += 4; + } + } else { + while(len--) { + /* + * The following is required by some + * expansion card loader programs. + */ + *(unsigned long *)0x108 = 0; + *buf++ = ecard_loader_read(off++, base_addr, + req->ec->loader); + } + } + } + +} + +static void ecard_do_request(struct ecard_request *req) +{ + switch (req->req) { + case req_readbytes: + ecard_task_readbytes(req); + break; + + case req_reset: + ecard_task_reset(req); + break; + } +} + +/* + * On 26-bit processors, we don't need the kcardd thread to access the + * expansion card loaders. We do it directly. + */ +#define ecard_call(req) ecard_do_request(req) + +/* ======================= Mid-level card control ===================== */ + +static void +ecard_readbytes(void *addr, ecard_t *ec, int off, int len, int useld) +{ + struct ecard_request req; + + req.req = req_readbytes; + req.ec = ec; + req.address = off; + req.length = len; + req.use_loader = useld; + req.buffer = addr; + + ecard_call(&req); +} + +int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num) +{ + struct ex_chunk_dir excd; + int index = 16; + int useld = 0; + + if (!ec->cid.cd) + return 0; + + while(1) { + ecard_readbytes(&excd, ec, index, 8, useld); + index += 8; + if (c_id(&excd) == 0) { + if (!useld && ec->loader) { + useld = 1; + index = 0; + continue; + } + return 0; + } + if (c_id(&excd) == 0xf0) { /* link */ + index = c_start(&excd); + continue; + } + if (c_id(&excd) == 0x80) { /* loader */ + if (!ec->loader) { + ec->loader = (loader_t)kmalloc(c_len(&excd), + GFP_KERNEL); + if (ec->loader) + ecard_readbytes(ec->loader, ec, + (int)c_start(&excd), + c_len(&excd), useld); + else + return 0; + } + continue; + } + if (c_id(&excd) == id && num-- == 0) + break; + } + + if (c_id(&excd) & 0x80) { + switch (c_id(&excd) & 0x70) { + case 0x70: + ecard_readbytes((unsigned char *)excd.d.string, ec, + (int)c_start(&excd), c_len(&excd), + useld); + break; + case 0x00: + break; + } + } + cd->start_offset = c_start(&excd); + memcpy(cd->d.string, excd.d.string, 256); + return 1; +} + +/* ======================= Interrupt control ============================ */ + +static void ecard_def_irq_enable(ecard_t *ec, int irqnr) +{ +} + +static void ecard_def_irq_disable(ecard_t *ec, int irqnr) +{ +} + +static int ecard_def_irq_pending(ecard_t *ec) +{ + return !ec->irqmask || ec->irqaddr[0] & ec->irqmask; +} + +static void ecard_def_fiq_enable(ecard_t *ec, int fiqnr) +{ + panic("ecard_def_fiq_enable called - impossible"); +} + +static void ecard_def_fiq_disable(ecard_t *ec, int fiqnr) +{ + panic("ecard_def_fiq_disable called - impossible"); +} + +static int ecard_def_fiq_pending(ecard_t *ec) +{ + return !ec->fiqmask || ec->fiqaddr[0] & ec->fiqmask; +} + +static expansioncard_ops_t ecard_default_ops = { + ecard_def_irq_enable, + ecard_def_irq_disable, + ecard_def_irq_pending, + ecard_def_fiq_enable, + ecard_def_fiq_disable, + ecard_def_fiq_pending +}; + +/* + * Enable and disable interrupts from expansion cards. + * (interrupts are disabled for these functions). + * + * They are not meant to be called directly, but via enable/disable_irq. + */ +static void ecard_irq_unmask(unsigned int irqnr) +{ + ecard_t *ec = slot_to_ecard(irqnr - 32); + + if (ec) { + if (!ec->ops) + ec->ops = &ecard_default_ops; + + if (ec->claimed && ec->ops->irqenable) + ec->ops->irqenable(ec, irqnr); + else + printk(KERN_ERR "ecard: rejecting request to " + "enable IRQs for %d\n", irqnr); + } +} + +static void ecard_irq_mask(unsigned int irqnr) +{ + ecard_t *ec = slot_to_ecard(irqnr - 32); + + if (ec) { + if (!ec->ops) + ec->ops = &ecard_default_ops; + + if (ec->ops && ec->ops->irqdisable) + ec->ops->irqdisable(ec, irqnr); + } +} + +static struct irqchip ecard_chip = { + .ack = ecard_irq_mask, + .mask = ecard_irq_mask, + .unmask = ecard_irq_unmask, +}; + +void ecard_enablefiq(unsigned int fiqnr) +{ + ecard_t *ec = slot_to_ecard(fiqnr); + + if (ec) { + if (!ec->ops) + ec->ops = &ecard_default_ops; + + if (ec->claimed && ec->ops->fiqenable) + ec->ops->fiqenable(ec, fiqnr); + else + printk(KERN_ERR "ecard: rejecting request to " + "enable FIQs for %d\n", fiqnr); + } +} + +void ecard_disablefiq(unsigned int fiqnr) +{ + ecard_t *ec = slot_to_ecard(fiqnr); + + if (ec) { + if (!ec->ops) + ec->ops = &ecard_default_ops; + + if (ec->ops->fiqdisable) + ec->ops->fiqdisable(ec, fiqnr); + } +} + +static void +ecard_dump_irq_state(ecard_t *ec) +{ + printk(" %d: %sclaimed, ", + ec->slot_no, + ec->claimed ? "" : "not "); + + if (ec->ops && ec->ops->irqpending && + ec->ops != &ecard_default_ops) + printk("irq %spending\n", + ec->ops->irqpending(ec) ? "" : "not "); + else + printk("irqaddr %p, mask = %02X, status = %02X\n", + ec->irqaddr, ec->irqmask, *ec->irqaddr); +} + +static void ecard_check_lockup(struct irqdesc *desc) +{ + static int last, lockup; + ecard_t *ec; + + /* + * If the timer interrupt has not run since the last million + * unrecognised expansion card interrupts, then there is + * something seriously wrong. Disable the expansion card + * interrupts so at least we can continue. + * + * Maybe we ought to start a timer to re-enable them some time + * later? + */ + if (last == jiffies) { + lockup += 1; + if (lockup > 1000000) { + printk(KERN_ERR "\nInterrupt lockup detected - " + "disabling all expansion card interrupts\n"); + + desc->chip->mask(IRQ_EXPANSIONCARD); + + printk("Expansion card IRQ state:\n"); + + for (ec = cards; ec; ec = ec->next) + ecard_dump_irq_state(ec); + } + } else + lockup = 0; + + /* + * If we did not recognise the source of this interrupt, + * warn the user, but don't flood the user with these messages. + */ + if (!last || time_after(jiffies, (unsigned long)(last + 5*HZ))) { + last = jiffies; + printk(KERN_WARNING "Unrecognised interrupt from backplane\n"); + } +} + +static void +ecard_irq_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) +{ + ecard_t *ec; + int called = 0; + + desc->chip->mask(irq); + for (ec = cards; ec; ec = ec->next) { + int pending; + + if (!ec->claimed || ec->irq == NO_IRQ || ec->slot_no == 8) + continue; + + if (ec->ops && ec->ops->irqpending) + pending = ec->ops->irqpending(ec); + else + pending = ecard_default_ops.irqpending(ec); + + if (pending) { + struct irqdesc *d = irq_desc + ec->irq; + d->handle(ec->irq, d, regs); + called ++; + } + } + desc->chip->unmask(irq); + + if (called == 0) + ecard_check_lockup(desc); +} + +#define ecard_irqexp_handler NULL +#define ecard_probeirqhw() (0) + +unsigned int ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed) +{ + unsigned long address = 0; + int slot = ec->slot_no; + + if (ec->slot_no == 8) + return 0; + + ectcr &= ~(1 << slot); + + switch (type) { + case ECARD_MEMC: + if (slot < 4) + address = IO_EC_MEMC_BASE + (slot << 12); + break; + + case ECARD_IOC: + if (slot < 4) + address = IO_EC_IOC_BASE + (slot << 12); + if (address) + address += speed << 17; + break; + + default: + break; + } + + return address; +} + +static int ecard_prints(char *buffer, ecard_t *ec) +{ + char *start = buffer; + + buffer += sprintf(buffer, " %d: %s ", ec->slot_no, + ec->type == ECARD_EASI ? "EASI" : " "); + + if (ec->cid.id == 0) { + struct in_chunk_dir incd; + + buffer += sprintf(buffer, "[%04X:%04X] ", + ec->cid.manufacturer, ec->cid.product); + + if (!ec->card_desc && ec->cid.cd && + ecard_readchunk(&incd, ec, 0xf5, 0)) { + ec->card_desc = kmalloc(strlen(incd.d.string)+1, GFP_KERNEL); + + if (ec->card_desc) + strcpy((char *)ec->card_desc, incd.d.string); + } + + buffer += sprintf(buffer, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*"); + } else + buffer += sprintf(buffer, "Simple card %d\n", ec->cid.id); + + return buffer - start; +} + +static int get_ecard_dev_info(char *buf, char **start, off_t pos, int count) +{ + ecard_t *ec = cards; + off_t at = 0; + int len, cnt; + + cnt = 0; + while (ec && count > cnt) { + len = ecard_prints(buf, ec); + at += len; + if (at >= pos) { + if (!*start) { + *start = buf + (pos - (at - len)); + cnt = at - pos; + } else + cnt += len; + buf += len; + } + ec = ec->next; + } + return (count > cnt) ? cnt : count; +} + +static struct proc_dir_entry *proc_bus_ecard_dir = NULL; + +static void ecard_proc_init(void) +{ + proc_bus_ecard_dir = proc_mkdir("ecard", proc_bus); + create_proc_info_entry("devices", 0, proc_bus_ecard_dir, + get_ecard_dev_info); +} + +#define ec_set_resource(ec,nr,st,sz,flg) \ + do { \ + (ec)->resource[nr].name = ec->dev.name; \ + (ec)->resource[nr].start = st; \ + (ec)->resource[nr].end = (st) + (sz) - 1; \ + (ec)->resource[nr].flags = flg; \ + } while (0) + +static void __init ecard_init_resources(struct expansion_card *ec) +{ + unsigned long base = PODSLOT_IOC0_BASE; + unsigned int slot = ec->slot_no; + int i; + + if (slot < 4) { + ec_set_resource(ec, ECARD_RES_MEMC, + PODSLOT_MEMC_BASE + (slot << 14), + PODSLOT_MEMC_SIZE, IORESOURCE_MEM); + } + + for (i = 0; i < ECARD_RES_IOCSYNC - ECARD_RES_IOCSLOW; i++) { + ec_set_resource(ec, i + ECARD_RES_IOCSLOW, + base + (slot << 14) + (i << 19), + PODSLOT_IOC_SIZE, IORESOURCE_MEM); + } + + for (i = 0; i < ECARD_NUM_RESOURCES; i++) { + if (ec->resource[i].start && + request_resource(&iomem_resource, &ec->resource[i])) { + printk(KERN_ERR "%s: resource(s) not available\n", + ec->dev.bus_id); + ec->resource[i].end -= ec->resource[i].start; + ec->resource[i].start = 0; + } + } +} + +static ssize_t ecard_show_irq(struct device *dev, char *buf) +{ + struct expansion_card *ec = ECARD_DEV(dev); + return sprintf(buf, "%u\n", ec->irq); +} + +static DEVICE_ATTR(irq, S_IRUGO, ecard_show_irq, NULL); + +static ssize_t ecard_show_dma(struct device *dev, char *buf) +{ + struct expansion_card *ec = ECARD_DEV(dev); + return sprintf(buf, "%u\n", ec->dma); +} + +static DEVICE_ATTR(dma, S_IRUGO, ecard_show_dma, NULL); + +static ssize_t ecard_show_resources(struct device *dev, char *buf) +{ + struct expansion_card *ec = ECARD_DEV(dev); + char *str = buf; + int i; + + for (i = 0; i < ECARD_NUM_RESOURCES; i++) + str += sprintf(str, "%08lx %08lx %08lx\n", + ec->resource[i].start, + ec->resource[i].end, + ec->resource[i].flags); + + return str - buf; +} + +static DEVICE_ATTR(resource, S_IRUGO, ecard_show_resources, NULL); + +/* + * Probe for an expansion card. + * + * If bit 1 of the first byte of the card is set, then the + * card does not exist. + */ +static int __init +ecard_probe(int slot, card_type_t type) +{ + ecard_t **ecp; + ecard_t *ec; + struct ex_ecid cid; + int i, rc = -ENOMEM; + + ec = kmalloc(sizeof(ecard_t), GFP_KERNEL); + if (!ec) + goto nomem; + + memset(ec, 0, sizeof(ecard_t)); + + ec->slot_no = slot; + ec->type = type; + ec->irq = NO_IRQ; + ec->fiq = NO_IRQ; + ec->dma = NO_DMA; + ec->card_desc = NULL; + ec->ops = &ecard_default_ops; + + rc = -ENODEV; + if ((ec->podaddr = ecard_address(ec, type, ECARD_SYNC)) == 0) + goto nodev; + + cid.r_zero = 1; + ecard_readbytes(&cid, ec, 0, 16, 0); + if (cid.r_zero) + goto nodev; + + ec->cid.id = cid.r_id; + ec->cid.cd = cid.r_cd; + ec->cid.is = cid.r_is; + ec->cid.w = cid.r_w; + ec->cid.manufacturer = ecard_getu16(cid.r_manu); + ec->cid.product = ecard_getu16(cid.r_prod); + ec->cid.country = cid.r_country; + ec->cid.irqmask = cid.r_irqmask; + ec->cid.irqoff = ecard_gets24(cid.r_irqoff); + ec->cid.fiqmask = cid.r_fiqmask; + ec->cid.fiqoff = ecard_gets24(cid.r_fiqoff); + ec->fiqaddr = + ec->irqaddr = (unsigned char *)ioaddr(ec->podaddr); + + if (ec->cid.is) { + ec->irqmask = ec->cid.irqmask; + ec->irqaddr += ec->cid.irqoff; + ec->fiqmask = ec->cid.fiqmask; + ec->fiqaddr += ec->cid.fiqoff; + } else { + ec->irqmask = 1; + ec->fiqmask = 4; + } + + for (i = 0; i < sizeof(blacklist) / sizeof(*blacklist); i++) + if (blacklist[i].manufacturer == ec->cid.manufacturer && + blacklist[i].product == ec->cid.product) { + ec->card_desc = blacklist[i].type; + break; + } + + snprintf(ec->dev.bus_id, sizeof(ec->dev.bus_id), "ecard%d", slot); + snprintf(ec->dev.name, sizeof(ec->dev.name), "ecard %04x:%04x", + ec->cid.manufacturer, ec->cid.product); + ec->dev.parent = NULL; + ec->dev.bus = &ecard_bus_type; + ec->dev.dma_mask = &ec->dma_mask; + ec->dma_mask = (u64)0xffffffff; + + ecard_init_resources(ec); + + /* + * hook the interrupt handlers + */ + if (slot < 8) { + ec->irq = 32 + slot; + set_irq_chip(ec->irq, &ecard_chip); + set_irq_handler(ec->irq, do_level_IRQ); + set_irq_flags(ec->irq, IRQF_VALID); + } + + for (ecp = &cards; *ecp; ecp = &(*ecp)->next); + + *ecp = ec; + slot_to_expcard[slot] = ec; + + device_register(&ec->dev); + device_create_file(&ec->dev, &dev_attr_dma); + device_create_file(&ec->dev, &dev_attr_irq); + device_create_file(&ec->dev, &dev_attr_resource); + + return 0; + +nodev: + kfree(ec); +nomem: + return rc; +} + +/* + * Initialise the expansion card system. + * Locate all hardware - interrupt management and + * actual cards. + */ +static int __init ecard_init(void) +{ + int slot, irqhw; + + printk("Probing expansion cards\n"); + + for (slot = 0; slot < 8; slot ++) { + if (ecard_probe(slot, ECARD_EASI) == -ENODEV) + ecard_probe(slot, ECARD_IOC); + } + + irqhw = ecard_probeirqhw(); + + set_irq_chained_handler(IRQ_EXPANSIONCARD, + irqhw ? ecard_irqexp_handler : ecard_irq_handler); + + ecard_proc_init(); + + return 0; +} + +subsys_initcall(ecard_init); + +/* + * ECARD "bus" + */ +static const struct ecard_id * +ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec) +{ + int i; + + for (i = 0; ids[i].manufacturer != 65535; i++) + if (ec->cid.manufacturer == ids[i].manufacturer && + ec->cid.product == ids[i].product) + return ids + i; + + return NULL; +} + +static int ecard_drv_probe(struct device *dev) +{ + struct expansion_card *ec = ECARD_DEV(dev); + struct ecard_driver *drv = ECARD_DRV(dev->driver); + const struct ecard_id *id; + int ret; + + id = ecard_match_device(drv->id_table, ec); + + ecard_claim(ec); + ret = drv->probe(ec, id); + if (ret) + ecard_release(ec); + return ret; +} + +static int ecard_drv_remove(struct device *dev) +{ + struct expansion_card *ec = ECARD_DEV(dev); + struct ecard_driver *drv = ECARD_DRV(dev->driver); + + drv->remove(ec); + ecard_release(ec); + + return 0; +} + +/* + * Before rebooting, we must make sure that the expansion card is in a + * sensible state, so it can be re-detected. This means that the first + * page of the ROM must be visible. We call the expansion cards reset + * handler, if any. + */ +static void ecard_drv_shutdown(struct device *dev) +{ + struct expansion_card *ec = ECARD_DEV(dev); + struct ecard_driver *drv = ECARD_DRV(dev->driver); + struct ecard_request req; + + if (drv->shutdown) + drv->shutdown(ec); + ecard_release(ec); + req.req = req_reset; + req.ec = ec; + ecard_call(&req); +} + +int ecard_register_driver(struct ecard_driver *drv) +{ + drv->drv.bus = &ecard_bus_type; + drv->drv.probe = ecard_drv_probe; + drv->drv.remove = ecard_drv_remove; + drv->drv.shutdown = ecard_drv_shutdown; + + return driver_register(&drv->drv); +} + +void ecard_remove_driver(struct ecard_driver *drv) +{ + driver_unregister(&drv->drv); +} + +static int ecard_match(struct device *_dev, struct device_driver *_drv) +{ + struct expansion_card *ec = ECARD_DEV(_dev); + struct ecard_driver *drv = ECARD_DRV(_drv); + int ret; + + if (drv->id_table) { + ret = ecard_match_device(drv->id_table, ec) != NULL; + } else { + ret = ec->cid.id == drv->id; + } + + return ret; +} + +struct bus_type ecard_bus_type = { + .name = "ecard", + .match = ecard_match, +}; + +static int ecard_bus_init(void) +{ + return bus_register(&ecard_bus_type); +} + +postcore_initcall(ecard_bus_init); + +EXPORT_SYMBOL(ecard_readchunk); +EXPORT_SYMBOL(ecard_address); +EXPORT_SYMBOL(ecard_register_driver); +EXPORT_SYMBOL(ecard_remove_driver); +EXPORT_SYMBOL(ecard_bus_type); diff -urN linux-2.5.70-bk13/arch/arm26/kernel/entry.S linux-2.5.70-bk14/arch/arm26/kernel/entry.S --- linux-2.5.70-bk13/arch/arm26/kernel/entry.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/entry.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,982 @@ +/* arch/arm26/kernel/entry.S + * + * Assembled from chunks of code in arch/arm + * + * Copyright (C) 2003 Ian Molton + * + */ + +#include /* for CONFIG_ARCH_xxxx */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + .macro zero_fp +#ifndef CONFIG_NO_FRAME_POINTER + mov fp, #0 +#endif + .endm + + .text + +@ Bad Abort numbers +@ ----------------- +@ +#define BAD_PREFETCH 0 +#define BAD_DATA 1 +#define BAD_ADDREXCPTN 2 +#define BAD_IRQ 3 +#define BAD_UNDEFINSTR 4 + +#define PT_TRACESYS 0x00000002 + +@ OS version number used in SWIs +@ RISC OS is 0 +@ RISC iX is 8 +@ +#define OS_NUMBER 9 +#define ARMSWI_OFFSET 0x000f0000 + +@ +@ Stack format (ensured by USER_* and SVC_*) +@ +#define S_FRAME_SIZE 72 +#define S_OLD_R0 64 +#define S_PSR 60 +#define S_PC 60 +#define S_LR 56 +#define S_SP 52 +#define S_IP 48 +#define S_FP 44 +#define S_R10 40 +#define S_R9 36 +#define S_R8 32 +#define S_R7 28 +#define S_R6 24 +#define S_R5 20 +#define S_R4 16 +#define S_R3 12 +#define S_R2 8 +#define S_R1 4 +#define S_R0 0 +#define S_OFF 8 + + .macro save_user_regs + str r0, [sp, #-4]! + str lr, [sp, #-4]! + sub sp, sp, #15*4 + stmia sp, {r0 - lr}^ + mov r0, r0 + .endm + + .macro slow_restore_user_regs + ldmia sp, {r0 - lr}^ + mov r0, r0 + ldr lr, [sp, #15*4] + add sp, sp, #15*4+8 + movs pc, lr + .endm + + .macro fast_restore_user_regs + add sp, sp, #S_OFF + ldmib sp, {r1 - lr}^ + mov r0, r0 + ldr lr, [sp, #15*4] + add sp, sp, #15*4+8 + movs pc, lr + .endm + + .macro mask_pc, rd, rm + bic \rd, \rm, #PCMASK + .endm + + .macro disable_irqs, temp + mov \temp, pc + orr \temp, \temp, #PSR_I_BIT + teqp \temp, #0 + .endm + + .macro enable_irqs, temp + mov \temp, pc + and \temp, \temp, #~PSR_I_BIT + teqp \temp, #0 + .endm + + .macro initialise_traps_extra + .endm + + .macro get_thread_info, rd + mov \rd, sp, lsr #13 + mov \rd, \rd, lsl #13 + .endm + + /* + * Like adr, but force SVC mode (if required) + */ + .macro adrsvc, cond, reg, label + adr\cond \reg, \label + orr\cond \reg, \reg, #PSR_I_BIT | MODE_SVC26 + .endm + + +/* + * These are the registers used in the syscall handler, and allow us to + * have in theory up to 7 arguments to a function - r0 to r6. + * + * r7 is reserved for the system call number for thumb mode. + * + * Note that tbl == why is intentional. + * + * We must set at least "tsk" and "why" when calling ret_with_reschedule. + */ +scno .req r7 @ syscall number +tbl .req r8 @ syscall table pointer +why .req r8 @ Linux syscall (!= 0) +tsk .req r9 @ current thread_info + +/* + * Get the system call number. + */ + .macro get_scno + mask_pc lr, lr + ldr scno, [lr, #-4] @ get SWI instruction + .endm +/* + * ----------------------------------------------------------------------- + */ + +/* + * We rely on the fact that R0 is at the bottom of the stack (due to + * slow/fast restore user regs). + */ +#if S_R0 != 0 +#error "Please fix" +#endif + +/* + * Our do_softirq out of line code. See include/asm-arm/softirq.h for + * the calling assembly. + */ +ENTRY(__do_softirq) + stmfd sp!, {r0 - r3, ip, lr} + bl do_softirq + ldmfd sp!, {r0 - r3, ip, pc} + + .align 5 + +/* + * This is the fast syscall return path. We do as little as + * possible here, and this includes saving r0 back into the SVC + * stack. + */ +ret_fast_syscall: + disable_irqs r1 @ disable interrupts + ldr r1, [tsk, #TI_FLAGS] + tst r1, #_TIF_WORK_MASK + bne fast_work_pending + fast_restore_user_regs + +/* + * Ok, we need to do extra processing, enter the slow path. + */ +fast_work_pending: + str r0, [sp, #S_R0+S_OFF]! @ returned r0 +work_pending: + tst r1, #_TIF_NEED_RESCHED + bne work_resched + tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING + beq no_work_pending + mov r0, sp @ 'regs' + mov r2, why @ 'syscall' + bl do_notify_resume + disable_irqs r1 @ disable interrupts + b no_work_pending + +work_resched: + bl schedule +/* + * "slow" syscall return path. "why" tells us if this was a real syscall. + */ +ENTRY(ret_to_user) +ret_slow_syscall: + disable_irqs r1 @ disable interrupts + ldr r1, [tsk, #TI_FLAGS] + tst r1, #_TIF_WORK_MASK + bne work_pending +no_work_pending: + slow_restore_user_regs + +/* + * This is how we return from a fork. + */ +ENTRY(ret_from_fork) + bl schedule_tail + get_thread_info tsk + ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing + mov why, #1 + tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? + beq ret_slow_syscall + mov r1, sp + mov r0, #1 @ trace exit [IP = 1] + bl syscall_trace + b ret_slow_syscall + +#include + +/*============================================================================= + * SWI handler + *----------------------------------------------------------------------------- + */ + + .align 5 +ENTRY(vector_swi) + save_user_regs + zero_fp + get_scno + +#ifdef CONFIG_ALIGNMENT_TRAP + ldr ip, __cr_alignment + ldr ip, [ip] + mcr p15, 0, ip, c1, c0 @ update control register +#endif + enable_irqs ip + + str r4, [sp, #-S_OFF]! @ push fifth arg + + get_thread_info tsk + ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing + bic scno, scno, #0xff000000 @ mask off SWI op-code + eor scno, scno, #OS_NUMBER << 20 @ check OS number + adr tbl, sys_call_table @ load syscall table pointer + tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? + bne __sys_trace + + adrsvc al, lr, ret_fast_syscall @ return address + cmp scno, #NR_syscalls @ check upper syscall limit + ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine + + add r1, sp, #S_OFF +2: mov why, #0 @ no longer a real syscall + cmp scno, #ARMSWI_OFFSET + eor r0, scno, #OS_NUMBER << 20 @ put OS number back + bcs arm_syscall + b sys_ni_syscall @ not private func + + /* + * This is the really slow path. We're going to be doing + * context switches, and waiting for our parent to respond. + */ +__sys_trace: + add r1, sp, #S_OFF + mov r0, #0 @ trace entry [IP = 0] + bl syscall_trace + + adrsvc al, lr, __sys_trace_return @ return address + add r1, sp, #S_R0 + S_OFF @ pointer to regs + cmp scno, #NR_syscalls @ check upper syscall limit + ldmccia r1, {r0 - r3} @ have to reload r0 - r3 + ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine + b 2b + +__sys_trace_return: + str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 + mov r1, sp + mov r0, #1 @ trace exit [IP = 1] + bl syscall_trace + b ret_slow_syscall + + .align 5 +#ifdef CONFIG_ALIGNMENT_TRAP + .type __cr_alignment, #object +__cr_alignment: + .word cr_alignment +#endif + + .type sys_call_table, #object +ENTRY(sys_call_table) +#include + +/*============================================================================ + * Special system call wrappers + */ +@ r0 = syscall number +@ r5 = syscall table + .type sys_syscall, #function +sys_syscall: + eor scno, r0, #OS_NUMBER << 20 + cmp scno, #NR_syscalls @ check range + stmleia sp, {r5, r6} @ shuffle args + movle r0, r1 + movle r1, r2 + movle r2, r3 + movle r3, r4 + ldrle pc, [tbl, scno, lsl #2] + b sys_ni_syscall + +sys_fork_wrapper: + add r0, sp, #S_OFF + b sys_fork + +sys_vfork_wrapper: + add r0, sp, #S_OFF + b sys_vfork + +sys_execve_wrapper: + add r3, sp, #S_OFF + b sys_execve + +sys_clone_wapper: + add r2, sp, #S_OFF + b sys_clone + +sys_sigsuspend_wrapper: + add r3, sp, #S_OFF + b sys_sigsuspend + +sys_rt_sigsuspend_wrapper: + add r2, sp, #S_OFF + b sys_rt_sigsuspend + +sys_sigreturn_wrapper: + add r0, sp, #S_OFF + b sys_sigreturn + +sys_rt_sigreturn_wrapper: + add r0, sp, #S_OFF + b sys_rt_sigreturn + +sys_sigaltstack_wrapper: + ldr r2, [sp, #S_OFF + S_SP] + b do_sigaltstack + +/* + * Note: off_4k (r5) is always units of 4K. If we can't do the requested + * offset, we return EINVAL. FIXME - this lost some stuff from arm32 to + * ifdefs. check it out. + */ +sys_mmap2: + tst r5, #((1 << (PAGE_SHIFT - 12)) - 1) + moveq r5, r5, lsr #PAGE_SHIFT - 12 + streq r5, [sp, #4] + beq do_mmap2 + mov r0, #-EINVAL + RETINSTR(mov,pc, lr) + +/* + * Design issues: + * - We have several modes that each vector can be called from, + * each with its own set of registers. On entry to any vector, + * we *must* save the registers used in *that* mode. + * + * - This code must be as fast as possible. + * + * There are a few restrictions on the vectors: + * - the SWI vector cannot be called from *any* non-user mode + * + * - the FP emulator is *never* called from *any* non-user mode undefined + * instruction. + * + */ + + .text + + .equ ioc_base_high, IOC_BASE & 0xff000000 + .equ ioc_base_low, IOC_BASE & 0x00ff0000 + .macro disable_fiq + mov r12, #ioc_base_high + .if ioc_base_low + orr r12, r12, #ioc_base_low + .endif + strb r12, [r12, #0x38] @ Disable FIQ register + .endm + + .macro get_irqnr_and_base, irqnr, base + mov r4, #ioc_base_high @ point at IOC + .if ioc_base_low + orr r4, r4, #ioc_base_low + .endif + ldrb \irqnr, [r4, #0x24] @ get high priority first + adr \base, irq_prio_h + teq \irqnr, #0 + ldreqb \irqnr, [r4, #0x14] @ get low priority + adreq \base, irq_prio_l + .endm + +/* + * Interrupt table (incorporates priority) + */ + .macro irq_prio_table +irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 + .byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 + .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 + .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 + .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3 + .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3 + .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 + .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 + .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 + .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 + .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 + .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 + .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 + .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 + .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 + .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 +irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 + .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 + .endm + +#if 1 +/* FIXME (well, ok, dont - but its easy to grep for :) */ +/* + * Uncomment these if you wish to get more debugging into about data aborts. + */ +#define FAULT_CODE_LDRSTRPOST 0x80 +#define FAULT_CODE_LDRSTRPRE 0x40 +#define FAULT_CODE_LDRSTRREG 0x20 +#define FAULT_CODE_LDMSTM 0x10 +#define FAULT_CODE_LDCSTC 0x08 +#endif +#define FAULT_CODE_PREFETCH 0x04 +#define FAULT_CODE_WRITE 0x02 +#define FAULT_CODE_FORCECOW 0x01 + +#define SVC_SAVE_ALL \ + str sp, [sp, #-16]! ;\ + str lr, [sp, #8] ;\ + str lr, [sp, #4] ;\ + stmfd sp!, {r0 - r12} ;\ + mov r0, #-1 ;\ + str r0, [sp, #S_OLD_R0] ;\ + zero_fp + +#define SVC_IRQ_SAVE_ALL \ + str sp, [sp, #-16]! ;\ + str lr, [sp, #4] ;\ + ldr lr, .LCirq ;\ + ldr lr, [lr] ;\ + str lr, [sp, #8] ;\ + stmfd sp!, {r0 - r12} ;\ + mov r0, #-1 ;\ + str r0, [sp, #S_OLD_R0] ;\ + zero_fp + +#define SVC_RESTORE_ALL \ + ldmfd sp, {r0 - pc}^ + +/*============================================================================= + * Undefined FIQs + *----------------------------------------------------------------------------- + */ +_unexp_fiq: ldr sp, .LCfiq + mov r12, #IOC_BASE + strb r12, [r12, #0x38] @ Disable FIQ register + teqp pc, #PSR_I_BIT | PSR_F_BIT | MODE_SVC26 + mov r0, r0 + stmfd sp!, {r0 - r3, ip, lr} + adr r0, Lfiqmsg + bl printk + ldmfd sp!, {r0 - r3, ip, lr} + teqp pc, #PSR_I_BIT | PSR_F_BIT | MODE_FIQ26 + mov r0, r0 + movs pc, lr + +Lfiqmsg: .ascii "*** Unexpected FIQ\n\0" + .align + +.LCfiq: .word __temp_fiq +.LCirq: .word __temp_irq + +/*============================================================================= + * Undefined instruction handler + *----------------------------------------------------------------------------- + * Handles floating point instructions + */ +vector_undefinstr: + tst lr,#3 + bne __und_svc + save_user_regs + zero_fp + teqp pc, #PSR_I_BIT | MODE_SVC26 +.Lbug_undef: + ldr r4, .LC2 + ldr pc, [r4] @ Call FP module USR entry point + + .globl fpundefinstr +fpundefinstr: @ Called by FP module on undefined instr + mov r0, lr + mov r1, sp + teqp pc, #MODE_SVC26 + bl do_undefinstr + b ret_from_exception @ Normal FP exit + +__und_svc: SVC_SAVE_ALL @ Non-user mode + mask_pc r0, lr + and r2, lr, #3 + sub r0, r0, #4 + mov r1, sp + bl do_undefinstr + SVC_RESTORE_ALL + +#if defined CONFIG_FPE_NWFPE || defined CONFIG_FPE_FASTFPE + /* The FPE is always present */ + .equ fpe_not_present, 0 +#else +/* We get here if an undefined instruction happens and the floating + * point emulator is not present. If the offending instruction was + * a WFS, we just perform a normal return as if we had emulated the + * operation. This is a hack to allow some basic userland binaries + * to run so that the emulator module proper can be loaded. --philb + */ +fpe_not_present: + adr r10, wfs_mask_data + ldmia r10, {r4, r5, r6, r7, r8} + ldr r10, [sp, #S_PC] @ Load PC + sub r10, r10, #4 + mask_pc r10, r10 + ldrt r10, [r10] @ get instruction + and r5, r10, r5 + teq r5, r4 @ Is it WFS? + beq ret_from_exception + and r5, r10, r8 + teq r5, r6 @ Is it LDF/STF on sp or fp? + teqne r5, r7 + bne fpundefinstr + tst r10, #0x00200000 @ Does it have WB + beq ret_from_exception + and r4, r10, #255 @ get offset + and r6, r10, #0x000f0000 + tst r10, #0x00800000 @ +/- + ldr r5, [sp, r6, lsr #14] @ Load reg + rsbeq r4, r4, #0 + add r5, r5, r4, lsl #2 + str r5, [sp, r6, lsr #14] @ Save reg + b ret_from_exception + +wfs_mask_data: .word 0x0e200110 @ WFS/RFS + .word 0x0fef0fff + .word 0x0d0d0100 @ LDF [sp]/STF [sp] + .word 0x0d0b0100 @ LDF [fp]/STF [fp] + .word 0x0f0f0f00 +#endif + +.LC2: .word fp_enter + +/*============================================================================= + * Prefetch abort handler + *----------------------------------------------------------------------------- + */ + +/* remember: lr = USR pc */ +vector_prefetch: + sub lr, lr, #4 + tst lr, #3 + bne __pabt_invalid + save_user_regs + teqp pc, #MODE_SVC26 + mask_pc r0, lr @ Address of abort + mov r1, sp @ Tasks registers + bl do_PrefetchAbort + teq r0, #0 @ If non-zero, we believe this abort.. + bne ret_from_exception +#ifdef DEBUG_UNDEF + adr r0, t + bl printk +#endif + ldr lr, [sp,#S_PC] @ program to test this on. I think its + b .Lbug_undef @ broken at the moment though!) + +__pabt_invalid: SVC_SAVE_ALL + mov r0, sp @ Prefetch aborts are definitely *not* + mov r1, #BAD_PREFETCH @ allowed in non-user modes. We cant + and r2, lr, #3 @ recover from this problem. + b bad_mode + +#ifdef DEBUG_UNDEF +t: .ascii "*** undef ***\r\n\0" + .align +#endif + +/*============================================================================= + * Address exception handler + *----------------------------------------------------------------------------- + * These aren't too critical. + * (they're not supposed to happen). + * In order to debug the reason for address exceptions in non-user modes, + * we have to obtain all the registers so that we can see what's going on. + */ + +vector_addrexcptn: + sub lr, lr, #8 + tst lr, #3 + bne Laddrexcptn_not_user + save_user_regs + teq pc, #MODE_SVC26 + mask_pc r0, lr @ Point to instruction + mov r1, sp @ Point to registers + mov r2, #0x400 + mov lr, pc + bl do_excpt + b ret_from_exception + +Laddrexcptn_not_user: + SVC_SAVE_ALL + and r2, lr, #3 + teq r2, #3 + bne Laddrexcptn_illegal_mode + teqp pc, #MODE_SVC26 + mask_pc r0, lr + mov r1, sp + orr r2, r2, #0x400 + bl do_excpt + ldmia sp, {r0 - lr} @ I cant remember the reason I changed this... + add sp, sp, #15*4 + movs pc, lr + +Laddrexcptn_illegal_mode: + mov r0, sp + str lr, [sp, #-4]! + orr r1, r2, #PSR_I_BIT | PSR_F_BIT + teqp r1, #0 @ change into mode (wont be user mode) + mov r0, r0 + mov r1, r8 @ Any register from r8 - r14 can be banked + mov r2, r9 + mov r3, r10 + mov r4, r11 + mov r5, r12 + mov r6, r13 + mov r7, r14 + teqp pc, #PSR_F_BIT | MODE_SVC26 @ back to svc + mov r0, r0 + stmfd sp!, {r1-r7} + ldmia r0, {r0-r7} + stmfd sp!, {r0-r7} + mov r0, sp + mov r1, #BAD_ADDREXCPTN + b bad_mode + +/*============================================================================= + * Interrupt (IRQ) handler + *----------------------------------------------------------------------------- + * Note: if in user mode, then *no* kernel routine is running, so do not have + * to save svc lr + * (r13 points to irq temp save area) + */ + +vector_IRQ: ldr r13, .LCirq @ I will leave this one in just in case... + sub lr, lr, #4 + str lr, [r13] + tst lr, #3 + bne __irq_svc + teqp pc, #PSR_I_BIT | MODE_SVC26 + mov r0, r0 + ldr lr, .LCirq + ldr lr, [lr] + save_user_regs + +1: get_irqnr_and_base r6, r5 + teq r6, #0 + ldrneb r0, [r5, r6] @ get IRQ number + movne r1, sp + @ + @ routine called with r0 = irq number, r1 = struct pt_regs * + @ + adr lr, 1b + orr lr, lr, #PSR_I_BIT | MODE_SVC26 @ Force SVC + bne asm_do_IRQ + + mov why, #0 + get_thread_info r5 + b ret_to_user + + irq_prio_table + +__irq_svc: teqp pc, #PSR_I_BIT | MODE_SVC26 + mov r0, r0 + SVC_IRQ_SAVE_ALL + and r2, lr, #3 + teq r2, #3 + bne __irq_invalid +1: get_irqnr_and_base r6, r5 + teq r6, #0 + ldrneb r0, [r5, r6] @ get IRQ number + movne r1, sp + @ + @ routine called with r0 = irq number, r1 = struct pt_regs * + @ + adr lr, 1b + orr lr, lr, #PSR_I_BIT | MODE_SVC26 @ Force SVC + bne asm_do_IRQ @ Returns to 1b + SVC_RESTORE_ALL + +__irq_invalid: mov r0, sp + mov r1, #BAD_IRQ + b bad_mode + +/*============================================================================= + * Data abort handler code + *----------------------------------------------------------------------------- + * + * This handles both exceptions from user and SVC modes, computes the address + * range of the problem, and does any correction that is required. It then + * calls the kernel data abort routine. + * + * This is where I wish that the ARM would tell you which address aborted. + */ + +vector_data: sub lr, lr, #8 @ Correct lr + tst lr, #3 + bne Ldata_not_user + save_user_regs + teqp pc, #MODE_SVC26 + mask_pc r0, lr + bl Ldata_do + b ret_from_exception + +Ldata_not_user: + SVC_SAVE_ALL + and r2, lr, #3 + teq r2, #3 + bne Ldata_illegal_mode + tst lr, #PSR_I_BIT + teqeqp pc, #MODE_SVC26 + mask_pc r0, lr + bl Ldata_do + SVC_RESTORE_ALL + +Ldata_illegal_mode: + mov r0, sp + mov r1, #BAD_DATA + b bad_mode + +Ldata_do: mov r3, sp + ldr r4, [r0] @ Get instruction + mov r2, #0 + tst r4, #1 << 20 @ Check to see if it is a write instruction + orreq r2, r2, #FAULT_CODE_WRITE @ Indicate write instruction + mov r1, r4, lsr #22 @ Now branch to the relevent processing routine + and r1, r1, #15 << 2 + add pc, pc, r1 + movs pc, lr + b Ldata_unknown + b Ldata_unknown + b Ldata_unknown + b Ldata_unknown + b Ldata_ldrstr_post @ ldr rd, [rn], #m + b Ldata_ldrstr_numindex @ ldr rd, [rn, #m] @ RegVal + b Ldata_ldrstr_post @ ldr rd, [rn], rm + b Ldata_ldrstr_regindex @ ldr rd, [rn, rm] + b Ldata_ldmstm @ ldm*a rn, + b Ldata_ldmstm @ ldm*b rn, + b Ldata_unknown + b Ldata_unknown + b Ldata_ldrstr_post @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m + b Ldata_ldcstc_pre @ ldc rd, [rn, #m] + b Ldata_unknown +Ldata_unknown: @ Part of jumptable + mov r0, r1 + mov r1, r4 + mov r2, r3 + b baddataabort + +Ldata_ldrstr_post: + mov r0, r4, lsr #14 @ Get Rn + and r0, r0, #15 << 2 @ Mask out reg. + teq r0, #15 << 2 + ldr r0, [r3, r0] @ Get register + biceq r0, r0, #PCMASK + mov r1, r0 +#ifdef FAULT_CODE_LDRSTRPOST + orr r2, r2, #FAULT_CODE_LDRSTRPOST +#endif + b do_DataAbort + +Ldata_ldrstr_numindex: + mov r0, r4, lsr #14 @ Get Rn + and r0, r0, #15 << 2 @ Mask out reg. + teq r0, #15 << 2 + ldr r0, [r3, r0] @ Get register + mov r1, r4, lsl #20 + biceq r0, r0, #PCMASK + tst r4, #1 << 23 + addne r0, r0, r1, lsr #20 + subeq r0, r0, r1, lsr #20 + mov r1, r0 +#ifdef FAULT_CODE_LDRSTRPRE + orr r2, r2, #FAULT_CODE_LDRSTRPRE +#endif + b do_DataAbort + +Ldata_ldrstr_regindex: + mov r0, r4, lsr #14 @ Get Rn + and r0, r0, #15 << 2 @ Mask out reg. + teq r0, #15 << 2 + ldr r0, [r3, r0] @ Get register + and r7, r4, #15 + biceq r0, r0, #PCMASK + teq r7, #15 @ Check for PC + ldr r7, [r3, r7, lsl #2] @ Get Rm + and r8, r4, #0x60 @ Get shift types + biceq r7, r7, #PCMASK + mov r9, r4, lsr #7 @ Get shift amount + and r9, r9, #31 + teq r8, #0 + moveq r7, r7, lsl r9 + teq r8, #0x20 @ LSR shift + moveq r7, r7, lsr r9 + teq r8, #0x40 @ ASR shift + moveq r7, r7, asr r9 + teq r8, #0x60 @ ROR shift + moveq r7, r7, ror r9 + tst r4, #1 << 23 + addne r0, r0, r7 + subeq r0, r0, r7 @ Apply correction + mov r1, r0 +#ifdef FAULT_CODE_LDRSTRREG + orr r2, r2, #FAULT_CODE_LDRSTRREG +#endif + b do_DataAbort + +Ldata_ldmstm: + mov r7, #0x11 + orr r7, r7, r7, lsl #8 + and r0, r4, r7 + and r1, r4, r7, lsl #1 + add r0, r0, r1, lsr #1 + and r1, r4, r7, lsl #2 + add r0, r0, r1, lsr #2 + and r1, r4, r7, lsl #3 + add r0, r0, r1, lsr #3 + add r0, r0, r0, lsr #8 + add r0, r0, r0, lsr #4 + and r7, r0, #15 @ r7 = no. of registers to transfer. + mov r5, r4, lsr #14 @ Get Rn + and r5, r5, #15 << 2 + ldr r0, [r3, r5] @ Get reg + eor r6, r4, r4, lsl #2 + tst r6, #1 << 23 @ Check inc/dec ^ writeback + rsbeq r7, r7, #0 + add r7, r0, r7, lsl #2 @ Do correction (signed) + subne r1, r7, #1 + subeq r1, r0, #1 + moveq r0, r7 + tst r4, #1 << 21 @ Check writeback + strne r7, [r3, r5] + eor r6, r4, r4, lsl #1 + tst r6, #1 << 24 @ Check Pre/Post ^ inc/dec + addeq r0, r0, #4 + addeq r1, r1, #4 + teq r5, #15*4 @ CHECK FOR PC + biceq r1, r1, #PCMASK + biceq r0, r0, #PCMASK +#ifdef FAULT_CODE_LDMSTM + orr r2, r2, #FAULT_CODE_LDMSTM +#endif + b do_DataAbort + +Ldata_ldcstc_pre: + mov r0, r4, lsr #14 @ Get Rn + and r0, r0, #15 << 2 @ Mask out reg. + teq r0, #15 << 2 + ldr r0, [r3, r0] @ Get register + mov r1, r4, lsl #24 @ Get offset + biceq r0, r0, #PCMASK + tst r4, #1 << 23 + addne r0, r0, r1, lsr #24 + subeq r0, r0, r1, lsr #24 + mov r1, r0 +#ifdef FAULT_CODE_LDCSTC + orr r2, r2, #FAULT_CODE_LDCSTC +#endif + b do_DataAbort + + +/* + * This is the return code to user mode for abort handlers + */ +ENTRY(ret_from_exception) + get_thread_info tsk + mov why, #0 + b ret_to_user + + .data +ENTRY(fp_enter) + .word fpe_not_present + .text +/* + * Register switch for older 26-bit only ARMs + */ +ENTRY(__switch_to) + add r0, r0, #TI_CPU_SAVE + stmia r0, {r4 - sl, fp, sp, lr} + add r1, r1, #TI_CPU_SAVE + ldmia r1, {r4 - sl, fp, sp, pc}^ + +/* + *============================================================================= + * Low-level interface code + *----------------------------------------------------------------------------- + * Trap initialisation + *----------------------------------------------------------------------------- + * + * Note - FIQ code has changed. The default is a couple of words in 0x1c, 0x20 + * that call _unexp_fiq. Nowever, we now copy the FIQ routine to 0x1c (removes + * some excess cycles). + * + * What we need to put into 0-0x1c are branches to branch to the kernel. + */ + + .section ".init.text",#alloc,#execinstr + +.Ljump_addresses: + swi SYS_ERROR0 + .word vector_undefinstr - 12 + .word vector_swi - 16 + .word vector_prefetch - 20 + .word vector_data - 24 + .word vector_addrexcptn - 28 + .word vector_IRQ - 32 + .word _unexp_fiq - 36 + b . + 8 +/* + * initialise the trap system + */ +ENTRY(__trap_init) + stmfd sp!, {r4 - r7, lr} + adr r1, .Ljump_addresses + ldmia r1, {r1 - r7, ip, lr} + orr r2, lr, r2, lsr #2 + orr r3, lr, r3, lsr #2 + orr r4, lr, r4, lsr #2 + orr r5, lr, r5, lsr #2 + orr r6, lr, r6, lsr #2 + orr r7, lr, r7, lsr #2 + orr ip, lr, ip, lsr #2 + mov r0, #0 + stmia r0, {r1 - r7, ip} + ldmfd sp!, {r4 - r7, pc}^ + + .bss +__temp_irq: .space 4 @ saved lr_irq +__temp_fiq: .space 128 diff -urN linux-2.5.70-bk13/arch/arm26/kernel/fiq.c linux-2.5.70-bk14/arch/arm26/kernel/fiq.c --- linux-2.5.70-bk13/arch/arm26/kernel/fiq.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/fiq.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,202 @@ +/* + * linux/arch/arm26/kernel/fiq.c + * + * Copyright (C) 1998 Russell King + * Copyright (C) 1998, 1999 Phil Blundell + * Copyright (C) 2003 Ian Molton + * + * FIQ support written by Philip Blundell , 1998. + * + * FIQ support re-written by Russell King to be more generic + * + * We now properly support a method by which the FIQ handlers can + * be stacked onto the vector. We still do not support sharing + * the FIQ vector itself. + * + * Operation is as follows: + * 1. Owner A claims FIQ: + * - default_fiq relinquishes control. + * 2. Owner A: + * - inserts code. + * - sets any registers, + * - enables FIQ. + * 3. Owner B claims FIQ: + * - if owner A has a relinquish function. + * - disable FIQs. + * - saves any registers. + * - returns zero. + * 4. Owner B: + * - inserts code. + * - sets any registers, + * - enables FIQ. + * 5. Owner B releases FIQ: + * - Owner A is asked to reacquire FIQ: + * - inserts code. + * - restores saved registers. + * - enables FIQ. + * 6. Goto 3 + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define FIQ_VECTOR (vectors_base() + 0x1c) + +static unsigned long no_fiq_insn; + +#define unprotect_page_0() +#define protect_page_0() + +/* Default reacquire function + * - we always relinquish FIQ control + * - we always reacquire FIQ control + */ +static int fiq_def_op(void *ref, int relinquish) +{ + if (!relinquish) { + unprotect_page_0(); + *(unsigned long *)FIQ_VECTOR = no_fiq_insn; + protect_page_0(); + } + + return 0; +} + +static struct fiq_handler default_owner = { + .name = "default", + .fiq_op = fiq_def_op, +}; + +static struct fiq_handler *current_fiq = &default_owner; + +int show_fiq_list(struct seq_file *p, void *v) +{ + if (current_fiq != &default_owner) + seq_printf(p, "FIQ: %s\n", current_fiq->name); + + return 0; +} + +void set_fiq_handler(void *start, unsigned int length) +{ + unprotect_page_0(); + + memcpy((void *)FIQ_VECTOR, start, length); + + protect_page_0(); +} + +/* + * Taking an interrupt in FIQ mode is death, so both these functions + * disable irqs for the duration. + */ +void set_fiq_regs(struct pt_regs *regs) +{ + register unsigned long tmp, tmp2; + __asm__ volatile ( + "mov %0, pc + bic %1, %0, #0x3 + orr %1, %1, %3 + teqp %1, #0 @ select FIQ mode + mov r0, r0 + ldmia %2, {r8 - r14} + teqp %0, #0 @ return to SVC mode + mov r0, r0" + : "=&r" (tmp), "=&r" (tmp2) + : "r" (®s->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | MODE_FIQ26) + /* These registers aren't modified by the above code in a way + visible to the compiler, but we mark them as clobbers anyway + so that GCC won't put any of the input or output operands in + them. */ + : "r8", "r9", "r10", "r11", "r12", "r13", "r14"); +} + +void get_fiq_regs(struct pt_regs *regs) +{ + register unsigned long tmp, tmp2; + __asm__ volatile ( + "mov %0, pc + bic %1, %0, #0x3 + orr %1, %1, %3 + teqp %1, #0 @ select FIQ mode + mov r0, r0 + stmia %2, {r8 - r14} + teqp %0, #0 @ return to SVC mode + mov r0, r0" + : "=&r" (tmp), "=&r" (tmp2) + : "r" (®s->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | MODE_FIQ26) + /* These registers aren't modified by the above code in a way + visible to the compiler, but we mark them as clobbers anyway + so that GCC won't put any of the input or output operands in + them. */ + : "r8", "r9", "r10", "r11", "r12", "r13", "r14"); +} + +int claim_fiq(struct fiq_handler *f) +{ + int ret = 0; + + if (current_fiq) { + ret = -EBUSY; + + if (current_fiq->fiq_op != NULL) + ret = current_fiq->fiq_op(current_fiq->dev_id, 1); + } + + if (!ret) { + f->next = current_fiq; + current_fiq = f; + } + + return ret; +} + +void release_fiq(struct fiq_handler *f) +{ + if (current_fiq != f) { + printk(KERN_ERR "%s FIQ trying to release %s FIQ\n", + f->name, current_fiq->name); +#ifdef CONFIG_DEBUG_ERRORS + __backtrace(); +#endif + return; + } + + do + current_fiq = current_fiq->next; + while (current_fiq->fiq_op(current_fiq->dev_id, 0)); +} + +void enable_fiq(int fiq) +{ + enable_irq(fiq + FIQ_START); +} + +void disable_fiq(int fiq) +{ + disable_irq(fiq + FIQ_START); +} + +EXPORT_SYMBOL(set_fiq_handler); +EXPORT_SYMBOL(set_fiq_regs); +EXPORT_SYMBOL(get_fiq_regs); +EXPORT_SYMBOL(claim_fiq); +EXPORT_SYMBOL(release_fiq); +EXPORT_SYMBOL(enable_fiq); +EXPORT_SYMBOL(disable_fiq); + +void __init init_FIQ(void) +{ + no_fiq_insn = *(unsigned long *)FIQ_VECTOR; + set_fs(get_fs()); +} diff -urN linux-2.5.70-bk13/arch/arm26/kernel/init_task.c linux-2.5.70-bk14/arch/arm26/kernel/init_task.c --- linux-2.5.70-bk13/arch/arm26/kernel/init_task.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/init_task.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,41 @@ +/* + * linux/arch/arm/kernel/init_task.c + * + * Copyright (C) 2003 Ian Molton + * + */ +#include +#include +#include +#include +#include + +#include +#include + +static struct fs_struct init_fs = INIT_FS; +static struct files_struct init_files = INIT_FILES; +static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); +struct mm_struct init_mm = INIT_MM(init_mm); + +/* + * Initial thread structure. + * + * We need to make sure that this is 8192-byte aligned due to the + * way process stacks are handled. This is done by making sure + * the linker maps this in the .text segment right after head.S, + * and making head.S ensure the proper alignment. + * + * The things we do for performance... + */ +union thread_union init_thread_union + __attribute__((__section__(".init.task"))) = + { INIT_THREAD_INFO(init_task) }; + +/* + * Initial task structure. + * + * All other task structs will be allocated on slabs in fork.c + */ +struct task_struct init_task = INIT_TASK(init_task); diff -urN linux-2.5.70-bk13/arch/arm26/kernel/irq.c linux-2.5.70-bk14/arch/arm26/kernel/irq.c --- linux-2.5.70-bk13/arch/arm26/kernel/irq.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/irq.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,705 @@ +/* + * linux/arch/arm/kernel/irq.c + * + * Copyright (C) 1992 Linus Torvalds + * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. + * 'Borrowed' for ARM26 and (C) 2003 Ian Molton. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file contains the code used by various IRQ handling routines: + * asking for different IRQ's should be done through these routines + * instead of just grabbing them. Thus setups with different IRQ numbers + * shouldn't result in any weird surprises, and installing new handlers + * should be easier. + * + * IRQ's are in fact implemented a bit like signal handlers for the kernel. + * Naturally it's not a 1:1 relation, but there are similarities. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * Maximum IRQ count. Currently, this is arbitary. However, it should + * not be set too low to prevent false triggering. Conversely, if it + * is set too high, then you could miss a stuck IRQ. + * + * Maybe we ought to set a timer and re-enable the IRQ at a later time? + */ +#define MAX_IRQ_CNT 100000 + +static volatile unsigned long irq_err_count; +static spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED; + +struct irqdesc irq_desc[NR_IRQS]; +void (*init_arch_irq)(void) __initdata = NULL; + +/* + * Dummy mask/unmask handler + */ +void dummy_mask_unmask_irq(unsigned int irq) +{ +} + +void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) +{ + irq_err_count += 1; + printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); +} + +static struct irqchip bad_chip = { + .ack = dummy_mask_unmask_irq, + .mask = dummy_mask_unmask_irq, + .unmask = dummy_mask_unmask_irq, +}; + +static struct irqdesc bad_irq_desc = { + .chip = &bad_chip, + .handle = do_bad_IRQ, + .depth = 1, +}; + +/** + * disable_irq - disable an irq and wait for completion + * @irq: Interrupt to disable + * + * Disable the selected interrupt line. We do this lazily. + * + * This function may be called from IRQ context. + */ +void disable_irq(unsigned int irq) +{ + struct irqdesc *desc = irq_desc + irq; + unsigned long flags; + + spin_lock_irqsave(&irq_controller_lock, flags); + if (!desc->depth++) + desc->enabled = 0; + spin_unlock_irqrestore(&irq_controller_lock, flags); +} + +/** + * enable_irq - enable interrupt handling on an irq + * @irq: Interrupt to enable + * + * Re-enables the processing of interrupts on this IRQ line. + * Note that this may call the interrupt handler, so you may + * get unexpected results if you hold IRQs disabled. + * + * This function may be called from IRQ context. + */ +void enable_irq(unsigned int irq) +{ + struct irqdesc *desc = irq_desc + irq; + unsigned long flags; + int pending = 0; + + spin_lock_irqsave(&irq_controller_lock, flags); + if (unlikely(!desc->depth)) { + printk("enable_irq(%u) unbalanced from %p\n", irq, + __builtin_return_address(0)); + } else if (!--desc->depth) { + desc->probing = 0; + desc->enabled = 1; + desc->chip->unmask(irq); + pending = desc->pending; + desc->pending = 0; + /* + * If the interrupt was waiting to be processed, + * retrigger it. + */ + if (pending) + desc->chip->rerun(irq); + } + spin_unlock_irqrestore(&irq_controller_lock, flags); +} + +int show_interrupts(struct seq_file *p, void *v) +{ + int i; + struct irqaction * action; + + for (i = 0 ; i < NR_IRQS ; i++) { + action = irq_desc[i].action; + if (!action) + continue; + seq_printf(p, "%3d: %10u ", i, kstat_irqs(i)); + seq_printf(p, " %s", action->name); + for (action = action->next; action; action = action->next) { + seq_printf(p, ", %s", action->name); + } + seq_putc(p, '\n'); + } + + show_fiq_list(p, v); + seq_printf(p, "Err: %10lu\n", irq_err_count); + return 0; +} + +/* + * IRQ lock detection. + * + * Hopefully, this should get us out of a few locked situations. + * However, it may take a while for this to happen, since we need + * a large number if IRQs to appear in the same jiffie with the + * same instruction pointer (or within 2 instructions). + */ +static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs) +{ + unsigned long instr_ptr = instruction_pointer(regs); + + if (desc->lck_jif == jiffies && + desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) { + desc->lck_cnt += 1; + + if (desc->lck_cnt > MAX_IRQ_CNT) { + printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq); + return 1; + } + } else { + desc->lck_cnt = 0; + desc->lck_pc = instruction_pointer(regs); + desc->lck_jif = jiffies; + } + return 0; +} + +static void +__do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs) +{ + unsigned int status; + + spin_unlock(&irq_controller_lock); + + if (!(action->flags & SA_INTERRUPT)) + local_irq_enable(); + + status = 0; + do { + status |= action->flags; + action->handler(irq, action->dev_id, regs); + action = action->next; + } while (action); + + if (status & SA_SAMPLE_RANDOM) + add_interrupt_randomness(irq); + + spin_lock_irq(&irq_controller_lock); +} + +/* + * This is for software-decoded IRQs. The caller is expected to + * handle the ack, clear, mask and unmask issues. + */ +void +do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) +{ + struct irqaction *action; + const int cpu = smp_processor_id(); + + desc->triggered = 1; + + kstat_cpu(cpu).irqs[irq]++; + + action = desc->action; + if (action) + __do_irq(irq, desc->action, regs); +} + +/* + * Most edge-triggered IRQ implementations seem to take a broken + * approach to this. Hence the complexity. + */ +void +do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) +{ + const int cpu = smp_processor_id(); + + desc->triggered = 1; + + /* + * If we're currently running this IRQ, or its disabled, + * we shouldn't process the IRQ. Instead, turn on the + * hardware masks. + */ + if (unlikely(desc->running || !desc->enabled)) + goto running; + + /* + * Acknowledge and clear the IRQ, but don't mask it. + */ + desc->chip->ack(irq); + + /* + * Mark the IRQ currently in progress. + */ + desc->running = 1; + + kstat_cpu(cpu).irqs[irq]++; + + do { + struct irqaction *action; + + action = desc->action; + if (!action) + break; + + if (desc->pending && desc->enabled) { + desc->pending = 0; + desc->chip->unmask(irq); + } + + __do_irq(irq, action, regs); + } while (desc->pending); + + desc->running = 0; + + /* + * If we were disabled or freed, shut down the handler. + */ + if (likely(desc->action && !check_irq_lock(desc, irq, regs))) + return; + + running: + /* + * We got another IRQ while this one was masked or + * currently running. Delay it. + */ + desc->pending = 1; + desc->chip->mask(irq); + desc->chip->ack(irq); +} + +/* + * Level-based IRQ handler. Nice and simple. + */ +void +do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) +{ + struct irqaction *action; + const int cpu = smp_processor_id(); + + desc->triggered = 1; + + /* + * Acknowledge, clear _AND_ disable the interrupt. + */ + desc->chip->ack(irq); + + if (likely(desc->enabled)) { + kstat_cpu(cpu).irqs[irq]++; + + /* + * Return with this interrupt masked if no action + */ + action = desc->action; + if (action) { + __do_irq(irq, desc->action, regs); + + if (likely(desc->enabled && + !check_irq_lock(desc, irq, regs))) + desc->chip->unmask(irq); + } + } +} + +/* + * do_IRQ handles all hardware IRQ's. Decoded IRQs should not + * come via this function. Instead, they should provide their + * own 'handler' + */ +asmlinkage void asm_do_IRQ(int irq, struct pt_regs *regs) +{ + struct irqdesc *desc = irq_desc + irq; + + /* + * Some hardware gives randomly wrong interrupts. Rather + * than crashing, do something sensible. + */ + if (irq >= NR_IRQS) + desc = &bad_irq_desc; + + irq_enter(); + spin_lock(&irq_controller_lock); + desc->handle(irq, desc, regs); + spin_unlock(&irq_controller_lock); + irq_exit(); +} + +void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained) +{ + struct irqdesc *desc; + unsigned long flags; + + if (irq >= NR_IRQS) { + printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq); + return; + } + + if (handle == NULL) + handle = do_bad_IRQ; + + desc = irq_desc + irq; + + if (is_chained && desc->chip == &bad_chip) + printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq); + + spin_lock_irqsave(&irq_controller_lock, flags); + if (handle == do_bad_IRQ) { + desc->chip->mask(irq); + desc->chip->ack(irq); + desc->depth = 1; + desc->enabled = 0; + } + desc->handle = handle; + if (handle != do_bad_IRQ && is_chained) { + desc->valid = 0; + desc->probe_ok = 0; + desc->depth = 0; + desc->chip->unmask(irq); + } + spin_unlock_irqrestore(&irq_controller_lock, flags); +} + +void set_irq_chip(unsigned int irq, struct irqchip *chip) +{ + struct irqdesc *desc; + unsigned long flags; + + if (irq >= NR_IRQS) { + printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq); + return; + } + + if (chip == NULL) + chip = &bad_chip; + + desc = irq_desc + irq; + spin_lock_irqsave(&irq_controller_lock, flags); + desc->chip = chip; + spin_unlock_irqrestore(&irq_controller_lock, flags); +} + +int set_irq_type(unsigned int irq, unsigned int type) +{ + struct irqdesc *desc; + unsigned long flags; + int ret = -ENXIO; + + if (irq >= NR_IRQS) { + printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); + return -ENODEV; + } + + desc = irq_desc + irq; + if (desc->chip->type) { + spin_lock_irqsave(&irq_controller_lock, flags); + ret = desc->chip->type(irq, type); + spin_unlock_irqrestore(&irq_controller_lock, flags); + } + + return ret; +} + +void set_irq_flags(unsigned int irq, unsigned int iflags) +{ + struct irqdesc *desc; + unsigned long flags; + + if (irq >= NR_IRQS) { + printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); + return; + } + + desc = irq_desc + irq; + spin_lock_irqsave(&irq_controller_lock, flags); + desc->valid = (iflags & IRQF_VALID) != 0; + desc->probe_ok = (iflags & IRQF_PROBE) != 0; + desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0; + spin_unlock_irqrestore(&irq_controller_lock, flags); +} + +int setup_irq(unsigned int irq, struct irqaction *new) +{ + int shared = 0; + struct irqaction *old, **p; + unsigned long flags; + struct irqdesc *desc; + + /* + * Some drivers like serial.c use request_irq() heavily, + * so we have to be careful not to interfere with a + * running system. + */ + if (new->flags & SA_SAMPLE_RANDOM) { + /* + * This function might sleep, we want to call it first, + * outside of the atomic block. + * Yes, this might clear the entropy pool if the wrong + * driver is attempted to be loaded, without actually + * installing a new handler, but is this really a problem, + * only the sysadmin is able to do this. + */ + rand_initialize_irq(irq); + } + + /* + * The following block of code has to be executed atomically + */ + desc = irq_desc + irq; + spin_lock_irqsave(&irq_controller_lock, flags); + p = &desc->action; + if ((old = *p) != NULL) { + /* Can't share interrupts unless both agree to */ + if (!(old->flags & new->flags & SA_SHIRQ)) { + spin_unlock_irqrestore(&irq_controller_lock, flags); + return -EBUSY; + } + + /* add new interrupt at end of irq queue */ + do { + p = &old->next; + old = *p; + } while (old); + shared = 1; + } + + *p = new; + + if (!shared) { + desc->probing = 0; + desc->running = 0; + desc->pending = 0; + desc->depth = 1; + if (!desc->noautoenable) { + desc->depth = 0; + desc->enabled = 1; + desc->chip->unmask(irq); + } + } + + spin_unlock_irqrestore(&irq_controller_lock, flags); + return 0; +} + +/** + * request_irq - allocate an interrupt line + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs + * @irqflags: Interrupt type flags + * @devname: An ascii name for the claiming device + * @dev_id: A cookie passed back to the handler function + * + * This call allocates interrupt resources and enables the + * interrupt line and IRQ handling. From the point this + * call is made your handler function may be invoked. Since + * your handler function must clear any interrupt the board + * raises, you must take care both to initialise your hardware + * and to set up the interrupt handler in the right order. + * + * Dev_id must be globally unique. Normally the address of the + * device data structure is used as the cookie. Since the handler + * receives this value it makes sense to use it. + * + * If your interrupt is shared you must pass a non NULL dev_id + * as this is required when freeing the interrupt. + * + * Flags: + * + * SA_SHIRQ Interrupt is shared + * + * SA_INTERRUPT Disable local interrupts while processing + * + * SA_SAMPLE_RANDOM The interrupt can be used for entropy + * + */ + +//FIXME - handler used to return void - whats the significance of the change? +int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), + unsigned long irq_flags, const char * devname, void *dev_id) +{ + unsigned long retval; + struct irqaction *action; + + if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler || + (irq_flags & SA_SHIRQ && !dev_id)) + return -EINVAL; + + action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL); + if (!action) + return -ENOMEM; + + action->handler = handler; + action->flags = irq_flags; + action->mask = 0; + action->name = devname; + action->next = NULL; + action->dev_id = dev_id; + + retval = setup_irq(irq, action); + + if (retval) + kfree(action); + return retval; +} + +/** + * free_irq - free an interrupt + * @irq: Interrupt line to free + * @dev_id: Device identity to free + * + * Remove an interrupt handler. The handler is removed and if the + * interrupt line is no longer in use by any driver it is disabled. + * On a shared IRQ the caller must ensure the interrupt is disabled + * on the card it drives before calling this function. + * + * This function may be called from interrupt context. + */ +void free_irq(unsigned int irq, void *dev_id) +{ + struct irqaction * action, **p; + unsigned long flags; + + if (irq >= NR_IRQS || !irq_desc[irq].valid) { + printk(KERN_ERR "Trying to free IRQ%d\n",irq); +#ifdef CONFIG_DEBUG_ERRORS + __backtrace(); +#endif + return; + } + + spin_lock_irqsave(&irq_controller_lock, flags); + for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) { + if (action->dev_id != dev_id) + continue; + + /* Found it - now free it */ + *p = action->next; + kfree(action); + goto out; + } + printk(KERN_ERR "Trying to free free IRQ%d\n",irq); +#ifdef CONFIG_DEBUG_ERRORS + __backtrace(); +#endif +out: + spin_unlock_irqrestore(&irq_controller_lock, flags); +} + +/* Start the interrupt probing. Unlike other architectures, + * we don't return a mask of interrupts from probe_irq_on, + * but return the number of interrupts enabled for the probe. + * The interrupts which have been enabled for probing is + * instead recorded in the irq_desc structure. + */ +unsigned long probe_irq_on(void) +{ + unsigned int i, irqs = 0; + unsigned long delay; + + /* + * first snaffle up any unassigned but + * probe-able interrupts + */ + spin_lock_irq(&irq_controller_lock); + for (i = 0; i < NR_IRQS; i++) { + if (!irq_desc[i].probe_ok || irq_desc[i].action) + continue; + + irq_desc[i].probing = 1; + irq_desc[i].triggered = 0; + if (irq_desc[i].chip->type) + irq_desc[i].chip->type(i, IRQT_PROBE); + irq_desc[i].chip->unmask(i); + irqs += 1; + } + spin_unlock_irq(&irq_controller_lock); + + /* + * wait for spurious interrupts to mask themselves out again + */ + for (delay = jiffies + HZ/10; time_before(jiffies, delay); ) + /* min 100ms delay */; + + /* + * now filter out any obviously spurious interrupts + */ + spin_lock_irq(&irq_controller_lock); + for (i = 0; i < NR_IRQS; i++) { + if (irq_desc[i].probing && irq_desc[i].triggered) { + irq_desc[i].probing = 0; + irqs -= 1; + } + } + spin_unlock_irq(&irq_controller_lock); + + return irqs; +} + +/* + * Possible return values: + * >= 0 - interrupt number + * -1 - no interrupt/many interrupts + */ +int probe_irq_off(unsigned long irqs) +{ + unsigned int i; + int irq_found = NO_IRQ; + + /* + * look at the interrupts, and find exactly one + * that we were probing has been triggered + */ + spin_lock_irq(&irq_controller_lock); + for (i = 0; i < NR_IRQS; i++) { + if (irq_desc[i].probing && + irq_desc[i].triggered) { + if (irq_found != NO_IRQ) { + irq_found = NO_IRQ; + goto out; + } + irq_found = i; + } + } + + if (irq_found == -1) + irq_found = NO_IRQ; +out: + spin_unlock_irq(&irq_controller_lock); + + return irq_found; +} + +void __init init_irq_proc(void) +{ +} + +void __init init_IRQ(void) +{ + struct irqdesc *desc; + extern void init_dma(void); + int irq; + + for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) + *desc = bad_irq_desc; + + init_arch_irq(); + init_dma(); +} diff -urN linux-2.5.70-bk13/arch/arm26/kernel/process.c linux-2.5.70-bk14/arch/arm26/kernel/process.c --- linux-2.5.70-bk13/arch/arm26/kernel/process.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/process.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,414 @@ +/* + * linux/arch/arm26/kernel/process.c + * + * Copyright (C) 2003 Ian Molton - adapted for ARM26 + * Copyright (C) 1996-2000 Russell King - Converted to ARM. + * Origional Copyright (C) 1995 Linus Torvalds + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +extern const char *processor_modes[]; +extern void setup_mm_for_reboot(char mode); + +static volatile int hlt_counter; + +void disable_hlt(void) +{ + hlt_counter++; +} + +void enable_hlt(void) +{ + hlt_counter--; +} + +static int __init nohlt_setup(char *__unused) +{ + hlt_counter = 1; + return 1; +} + +static int __init hlt_setup(char *__unused) +{ + hlt_counter = 0; + return 1; +} + +__setup("nohlt", nohlt_setup); +__setup("hlt", hlt_setup); + +/* + * The following aren't currently used. + */ +void (*pm_idle)(void); +void (*pm_power_off)(void); + +/* + * This is our default idle handler. We need to disable + * interrupts here to ensure we don't miss a wakeup call. + */ +void default_idle(void) +{ + local_irq_disable(); + if (!need_resched() && !hlt_counter) + local_irq_enable(); +} + +/* + * The idle thread. We try to conserve power, while trying to keep + * overall latency low. The architecture specific idle is passed + * a value to indicate the level of "idleness" of the system. + */ +void cpu_idle(void) +{ + /* endless idle loop with no priority at all */ + preempt_disable(); + while (1) { + void (*idle)(void) = pm_idle; + if (!idle) + idle = default_idle; + leds_event(led_idle_start); + while (!need_resched()) + idle(); + leds_event(led_idle_end); + schedule(); + } +} + +static char reboot_mode = 'h'; + +int __init reboot_setup(char *str) +{ + reboot_mode = str[0]; + return 1; +} + +__setup("reboot=", reboot_setup); + +void machine_halt(void) +{ + leds_event(led_halted); +} + +void machine_power_off(void) +{ + leds_event(led_halted); + if (pm_power_off) + pm_power_off(); +} + +void machine_restart(char * __unused) +{ + /* + * Clean and disable cache, and turn off interrupts + */ + cpu_proc_fin(); + + /* + * Tell the mm system that we are going to reboot - + * we may need it to insert some 1:1 mappings so that + * soft boot works. + */ + setup_mm_for_reboot(reboot_mode); + + /* + * copy branch instruction to reset location and call it + */ + + *(unsigned long *)0 = *(unsigned long *)0x03800000; + ((void(*)(void))0)(); + + /* + * Whoops - the architecture was unable to reboot. + * Tell the user! Should never happen... + */ + mdelay(1000); + printk("Reboot failed -- System halted\n"); + while (1); +} + +void show_regs(struct pt_regs * regs) +{ + unsigned long flags; + + flags = condition_codes(regs); + + printk("pc : [<%08lx>] lr : [<%08lx>] %s\n" + "sp : %08lx ip : %08lx fp : %08lx\n", + instruction_pointer(regs), + regs->ARM_lr, print_tainted(), regs->ARM_sp, + regs->ARM_ip, regs->ARM_fp); + printk("r10: %08lx r9 : %08lx r8 : %08lx\n", + regs->ARM_r10, regs->ARM_r9, + regs->ARM_r8); + printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", + regs->ARM_r7, regs->ARM_r6, + regs->ARM_r5, regs->ARM_r4); + printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", + regs->ARM_r3, regs->ARM_r2, + regs->ARM_r1, regs->ARM_r0); + printk("Flags: %c%c%c%c", + flags & PSR_N_BIT ? 'N' : 'n', + flags & PSR_Z_BIT ? 'Z' : 'z', + flags & PSR_C_BIT ? 'C' : 'c', + flags & PSR_V_BIT ? 'V' : 'v'); + printk(" IRQs o%s FIQs o%s Mode %s Segment %s\n", + interrupts_enabled(regs) ? "n" : "ff", + fast_interrupts_enabled(regs) ? "n" : "ff", + processor_modes[processor_mode(regs)], + get_fs() == get_ds() ? "kernel" : "user"); +} + +void show_fpregs(struct user_fp *regs) +{ + int i; + + for (i = 0; i < 8; i++) { + unsigned long *p; + char type; + + p = (unsigned long *)(regs->fpregs + i); + + switch (regs->ftype[i]) { + case 1: type = 'f'; break; + case 2: type = 'd'; break; + case 3: type = 'e'; break; + default: type = '?'; break; + } + if (regs->init_flag) + type = '?'; + + printk(" f%d(%c): %08lx %08lx %08lx%c", + i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' '); + } + + + printk("FPSR: %08lx FPCR: %08lx\n", + (unsigned long)regs->fpsr, + (unsigned long)regs->fpcr); +} + +/* + * Task structure and kernel stack allocation. + */ +static unsigned long *thread_info_head; +static unsigned int nr_thread_info; + +extern unsigned long get_page_8k(int priority); +extern void free_page_8k(unsigned long page); + +// FIXME - is this valid? +#define EXTRA_TASK_STRUCT 0 +#define ll_alloc_task_struct() ((struct thread_info *)get_page_8k(GFP_KERNEL)) +#define ll_free_task_struct(p) free_page_8k((unsigned long)(p)) + +struct thread_info *alloc_thread_info(void) +{ + struct thread_info *thread = NULL; + + if (EXTRA_TASK_STRUCT) { + unsigned long *p = thread_info_head; + + if (p) { + thread_info_head = (unsigned long *)p[0]; + nr_thread_info -= 1; + } + thread = (struct thread_info *)p; + } + + if (!thread) + thread = ll_alloc_task_struct(); + +#ifdef CONFIG_SYSRQ + /* + * The stack must be cleared if you want SYSRQ-T to + * give sensible stack usage information + */ + if (thread) { + char *p = (char *)thread; + memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE); + } +#endif + return thread; +} + +void free_thread_info(struct thread_info *thread) +{ + if (EXTRA_TASK_STRUCT && nr_thread_info < EXTRA_TASK_STRUCT) { + unsigned long *p = (unsigned long *)thread; + p[0] = (unsigned long)thread_info_head; + thread_info_head = p; + nr_thread_info += 1; + } else + ll_free_task_struct(thread); +} + +/* + * Free current thread data structures etc.. + */ +void exit_thread(void) +{ +} + +void flush_thread(void) +{ + struct thread_info *thread = current_thread_info(); + struct task_struct *tsk = current; + + memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); + memset(&thread->fpstate, 0, sizeof(union fp_state)); + + current->used_math = 0; +} + +void release_thread(struct task_struct *dead_task) +{ +} + +asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); + +int +copy_thread(int nr, unsigned long clone_flags, unsigned long esp, + unsigned long unused, struct task_struct *p, struct pt_regs *regs) +{ + struct thread_info *thread = p->thread_info; + struct pt_regs *childregs; + + childregs = __get_user_regs(thread); + *childregs = *regs; + childregs->ARM_r0 = 0; + childregs->ARM_sp = esp; + + memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); + thread->cpu_context.sp = (unsigned long)childregs; + thread->cpu_context.pc = (unsigned long)ret_from_fork | MODE_SVC26 | PSR_I_BIT; + + return 0; +} + +/* + * fill in the fpe structure for a core dump... + */ +int dump_fpu (struct pt_regs *regs, struct user_fp *fp) +{ + struct thread_info *thread = current_thread_info(); + int used_math = current->used_math; + + if (used_math) + memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); + + return used_math; +} + +/* + * fill in the user structure for a core dump.. + */ +void dump_thread(struct pt_regs * regs, struct user * dump) +{ + struct task_struct *tsk = current; + + dump->magic = CMAGIC; + dump->start_code = tsk->mm->start_code; + dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1); + + dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT; + dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; + dump->u_ssize = 0; + + dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; + dump->u_debugreg[1] = tsk->thread.debug.bp[1].address; + dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn; + dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn; + dump->u_debugreg[4] = tsk->thread.debug.nsaved; + + if (dump->start_stack < 0x04000000) + dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; + + dump->regs = *regs; + dump->u_fpvalid = dump_fpu (regs, &dump->u_fp); +} + +/* + * This is the mechanism for creating a new kernel thread. + * + * NOTE! Only a kernel-only process(ie the swapper or direct descendants + * who haven't done an "execve()") should use this: it will work within + * a system call from a "real" process, but the process memory space will + * not be free'd until both the parent and the child have exited. + * FIXME - taken from arm32 + */ +pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) +{ + register unsigned int r0 asm("r0") = flags | CLONE_VM | CLONE_UNTRACED; + register unsigned int r1 asm("r1") = 0; + register pid_t __ret asm("r0"); + + __asm__ __volatile__( + __syscall(clone)" @ kernel_thread sys_clone \n\ + movs %0, r0 @ if we are the child \n\ + bne 1f \n\ + mov fp, #0 @ ensure that fp is zero \n\ + mov r0, %4 \n\ + mov lr, pc \n\ + mov pc, %3 \n\ + b sys_exit \n\ +1: " + : "=r" (__ret) + : "0" (r0), "r" (r1), "r" (fn), "r" (arg) + : "lr"); + return __ret; +} + +/* + * These bracket the sleeping functions.. + */ +extern void scheduling_functions_start_here(void); +extern void scheduling_functions_end_here(void); +#define first_sched ((unsigned long) scheduling_functions_start_here) +#define last_sched ((unsigned long) scheduling_functions_end_here) + +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long fp, lr; + unsigned long stack_page; + int count = 0; + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + + stack_page = 4096 + (unsigned long)p; + fp = thread_saved_fp(p); + do { + if (fp < stack_page || fp > 4092+stack_page) + return 0; + lr = pc_pointer (((unsigned long *)fp)[-1]); + if (lr < first_sched || lr > last_sched) + return lr; + fp = *(unsigned long *) (fp - 12); + } while (count ++ < 16); + return 0; +} diff -urN linux-2.5.70-bk13/arch/arm26/kernel/ptrace.c linux-2.5.70-bk14/arch/arm26/kernel/ptrace.c --- linux-2.5.70-bk13/arch/arm26/kernel/ptrace.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/ptrace.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,747 @@ +/* + * linux/arch/arm26/kernel/ptrace.c + * + * By Ross Biro 1/23/92 + * edited by Linus Torvalds + * ARM modifications Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +//#include + +#include "ptrace.h" + +#define REG_PC 15 +#define REG_PSR 15 +/* + * does not yet catch signals sent when the child dies. + * in exit.c or in signal.c. + */ + +/* + * Breakpoint SWI instruction: SWI &9F0001 + */ +#define BREAKINST_ARM 0xef9f0001 + +/* + * Get the address of the live pt_regs for the specified task. + * These are saved onto the top kernel stack when the process + * is not running. + * + * Note: if a user thread is execve'd from kernel space, the + * kernel stack will not be empty on entry to the kernel, so + * ptracing these tasks will fail. + */ +static inline struct pt_regs * +get_user_regs(struct task_struct *task) +{ + return __get_user_regs(task->thread_info); +} + +/* + * this routine will get a word off of the processes privileged stack. + * the offset is how far from the base addr as stored in the THREAD. + * this routine assumes that all the privileged stacks are in our + * data space. + */ +static inline long get_user_reg(struct task_struct *task, int offset) +{ + return get_user_regs(task)->uregs[offset]; +} + +/* + * this routine will put a word on the processes privileged stack. + * the offset is how far from the base addr as stored in the THREAD. + * this routine assumes that all the privileged stacks are in our + * data space. + */ +static inline int +put_user_reg(struct task_struct *task, int offset, long data) +{ + struct pt_regs newregs, *regs = get_user_regs(task); + int ret = -EINVAL; + + newregs = *regs; + newregs.uregs[offset] = data; + + if (valid_user_regs(&newregs)) { + regs->uregs[offset] = data; + ret = 0; + } + + return ret; +} + +static inline int +read_u32(struct task_struct *task, unsigned long addr, u32 *res) +{ + int ret; + + ret = access_process_vm(task, addr, res, sizeof(*res), 0); + + return ret == sizeof(*res) ? 0 : -EIO; +} + +static inline int +read_instr(struct task_struct *task, unsigned long addr, u32 *res) +{ + int ret; + u32 val; + ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0); + ret = ret == sizeof(val) ? 0 : -EIO; + *res = val; + return ret; +} + +/* + * Get value of register `rn' (in the instruction) + */ +static unsigned long +ptrace_getrn(struct task_struct *child, unsigned long insn) +{ + unsigned int reg = (insn >> 16) & 15; + unsigned long val; + + val = get_user_reg(child, reg); + if (reg == 15) + val = pc_pointer(val + 8); //FIXME - correct for arm26? + + return val; +} + +/* + * Get value of operand 2 (in an ALU instruction) + */ +static unsigned long +ptrace_getaluop2(struct task_struct *child, unsigned long insn) +{ + unsigned long val; + int shift; + int type; + + if (insn & 1 << 25) { + val = insn & 255; + shift = (insn >> 8) & 15; + type = 3; + } else { + val = get_user_reg (child, insn & 15); + + if (insn & (1 << 4)) + shift = (int)get_user_reg (child, (insn >> 8) & 15); + else + shift = (insn >> 7) & 31; + + type = (insn >> 5) & 3; + } + + switch (type) { + case 0: val <<= shift; break; + case 1: val >>= shift; break; + case 2: + val = (((signed long)val) >> shift); + break; + case 3: + val = (val >> shift) | (val << (32 - shift)); + break; + } + return val; +} + +/* + * Get value of operand 2 (in a LDR instruction) + */ +static unsigned long +ptrace_getldrop2(struct task_struct *child, unsigned long insn) +{ + unsigned long val; + int shift; + int type; + + val = get_user_reg(child, insn & 15); + shift = (insn >> 7) & 31; + type = (insn >> 5) & 3; + + switch (type) { + case 0: val <<= shift; break; + case 1: val >>= shift; break; + case 2: + val = (((signed long)val) >> shift); + break; + case 3: + val = (val >> shift) | (val << (32 - shift)); + break; + } + return val; +} + +#define OP_MASK 0x01e00000 +#define OP_AND 0x00000000 +#define OP_EOR 0x00200000 +#define OP_SUB 0x00400000 +#define OP_RSB 0x00600000 +#define OP_ADD 0x00800000 +#define OP_ADC 0x00a00000 +#define OP_SBC 0x00c00000 +#define OP_RSC 0x00e00000 +#define OP_ORR 0x01800000 +#define OP_MOV 0x01a00000 +#define OP_BIC 0x01c00000 +#define OP_MVN 0x01e00000 + +static unsigned long +get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn) +{ + u32 alt = 0; + + switch (insn & 0x0e000000) { + case 0x00000000: + case 0x02000000: { + /* + * data processing + */ + long aluop1, aluop2, ccbit; + + if ((insn & 0xf000) != 0xf000) + break; + + aluop1 = ptrace_getrn(child, insn); + aluop2 = ptrace_getaluop2(child, insn); + ccbit = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0; + + switch (insn & OP_MASK) { + case OP_AND: alt = aluop1 & aluop2; break; + case OP_EOR: alt = aluop1 ^ aluop2; break; + case OP_SUB: alt = aluop1 - aluop2; break; + case OP_RSB: alt = aluop2 - aluop1; break; + case OP_ADD: alt = aluop1 + aluop2; break; + case OP_ADC: alt = aluop1 + aluop2 + ccbit; break; + case OP_SBC: alt = aluop1 - aluop2 + ccbit; break; + case OP_RSC: alt = aluop2 - aluop1 + ccbit; break; + case OP_ORR: alt = aluop1 | aluop2; break; + case OP_MOV: alt = aluop2; break; + case OP_BIC: alt = aluop1 & ~aluop2; break; + case OP_MVN: alt = ~aluop2; break; + } + break; + } + + case 0x04000000: + case 0x06000000: + /* + * ldr + */ + if ((insn & 0x0010f000) == 0x0010f000) { + unsigned long base; + + base = ptrace_getrn(child, insn); + if (insn & 1 << 24) { + long aluop2; + + if (insn & 0x02000000) + aluop2 = ptrace_getldrop2(child, insn); + else + aluop2 = insn & 0xfff; + + if (insn & 1 << 23) + base += aluop2; + else + base -= aluop2; + } + if (read_u32(child, base, &alt) == 0) + alt = pc_pointer(alt); + } + break; + + case 0x08000000: + /* + * ldm + */ + if ((insn & 0x00108000) == 0x00108000) { + unsigned long base; + unsigned int nr_regs; + + if (insn & (1 << 23)) { + nr_regs = hweight16(insn & 65535) << 2; + + if (!(insn & (1 << 24))) + nr_regs -= 4; + } else { + if (insn & (1 << 24)) + nr_regs = -4; + else + nr_regs = 0; + } + + base = ptrace_getrn(child, insn); + + if (read_u32(child, base + nr_regs, &alt) == 0) + alt = pc_pointer(alt); + break; + } + break; + + case 0x0a000000: { + /* + * bl or b + */ + signed long displ; + /* It's a branch/branch link: instead of trying to + * figure out whether the branch will be taken or not, + * we'll put a breakpoint at both locations. This is + * simpler, more reliable, and probably not a whole lot + * slower than the alternative approach of emulating the + * branch. + */ + displ = (insn & 0x00ffffff) << 8; + displ = (displ >> 6) + 8; + if (displ != 0 && displ != 4) + alt = pc + displ; + } + break; + } + + return alt; +} + +static int +swap_insn(struct task_struct *task, unsigned long addr, + void *old_insn, void *new_insn, int size) +{ + int ret; + + ret = access_process_vm(task, addr, old_insn, size, 0); + if (ret == size) + ret = access_process_vm(task, addr, new_insn, size, 1); + return ret; +} + +static void +add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr) +{ + int nr = dbg->nsaved; + + if (nr < 2) { + u32 new_insn = BREAKINST_ARM; + int res; + + res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4); + + if (res == 4) { + dbg->bp[nr].address = addr; + dbg->nsaved += 1; + } + } else + printk(KERN_ERR "ptrace: too many breakpoints\n"); +} + +/* + * Clear one breakpoint in the user program. We copy what the hardware + * does and use bit 0 of the address to indicate whether this is a Thumb + * breakpoint or an ARM breakpoint. + */ +static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp) +{ + unsigned long addr = bp->address; + u32 old_insn; + int ret; + + ret = swap_insn(task, addr & ~3, &old_insn, + &bp->insn, 4); + + if (ret != 4 || old_insn != BREAKINST_ARM) + printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at " + "0x%08lx (0x%08x)\n", task->comm, task->pid, + addr, old_insn); +} + +void ptrace_set_bpt(struct task_struct *child) +{ + struct pt_regs *regs; + unsigned long pc; + u32 insn; + int res; + + regs = get_user_regs(child); + pc = instruction_pointer(regs); + + res = read_instr(child, pc, &insn); + if (!res) { + struct debug_info *dbg = &child->thread.debug; + unsigned long alt; + + dbg->nsaved = 0; + + alt = get_branch_address(child, pc, insn); + if (alt) + add_breakpoint(child, dbg, alt); + + /* + * Note that we ignore the result of setting the above + * breakpoint since it may fail. When it does, this is + * not so much an error, but a forewarning that we may + * be receiving a prefetch abort shortly. + * + * If we don't set this breakpoint here, then we can + * lose control of the thread during single stepping. + */ + if (!alt || predicate(insn) != PREDICATE_ALWAYS) + add_breakpoint(child, dbg, pc + 4); + } +} + +/* + * Ensure no single-step breakpoint is pending. Returns non-zero + * value if child was being single-stepped. + */ +void ptrace_cancel_bpt(struct task_struct *child) +{ + int i, nsaved = child->thread.debug.nsaved; + + child->thread.debug.nsaved = 0; + + if (nsaved > 2) { + printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved); + nsaved = 2; + } + + for (i = 0; i < nsaved; i++) + clear_breakpoint(child, &child->thread.debug.bp[i]); +} + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure the single step bit is not set. + */ +void ptrace_disable(struct task_struct *child) +{ + child->ptrace &= ~PT_SINGLESTEP; + ptrace_cancel_bpt(child); +} + +/* + * Handle hitting a breakpoint. + */ +void ptrace_break(struct task_struct *tsk, struct pt_regs *regs) +{ + siginfo_t info; + + /* + * The PC is always left pointing at the next instruction. Fix this. + */ + regs->ARM_pc -= 4; + + if (tsk->thread.debug.nsaved == 0) + printk(KERN_ERR "ptrace: bogus breakpoint trap\n"); + + ptrace_cancel_bpt(tsk); + + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = TRAP_BRKPT; + info.si_addr = (void *)instruction_pointer(regs) - 4; + + force_sig_info(SIGTRAP, &info, tsk); +} + +/* + * Read the word at offset "off" into the "struct user". We + * actually access the pt_regs stored on the kernel stack. + */ +static int ptrace_read_user(struct task_struct *tsk, unsigned long off, + unsigned long *ret) +{ + unsigned long tmp; + + if (off & 3 || off >= sizeof(struct user)) + return -EIO; + + tmp = 0; + if (off < sizeof(struct pt_regs)) + tmp = get_user_reg(tsk, off >> 2); + + return put_user(tmp, ret); +} + +/* + * Write the word at offset "off" into "struct user". We + * actually access the pt_regs stored on the kernel stack. + */ +static int ptrace_write_user(struct task_struct *tsk, unsigned long off, + unsigned long val) +{ + if (off & 3 || off >= sizeof(struct user)) + return -EIO; + + if (off >= sizeof(struct pt_regs)) + return 0; + + return put_user_reg(tsk, off >> 2, val); +} + +/* + * Get all user integer registers. + */ +static int ptrace_getregs(struct task_struct *tsk, void *uregs) +{ + struct pt_regs *regs = get_user_regs(tsk); + + return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; +} + +/* + * Set all user integer registers. + */ +static int ptrace_setregs(struct task_struct *tsk, void *uregs) +{ + struct pt_regs newregs; + int ret; + + ret = -EFAULT; + if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) { + struct pt_regs *regs = get_user_regs(tsk); + + ret = -EINVAL; + if (valid_user_regs(&newregs)) { + *regs = newregs; + ret = 0; + } + } + + return ret; +} + +/* + * Get the child FPU state. + */ +static int ptrace_getfpregs(struct task_struct *tsk, void *ufp) +{ + return copy_to_user(ufp, &tsk->thread_info->fpstate, + sizeof(struct user_fp)) ? -EFAULT : 0; +} + +/* + * Set the child FPU state. + */ +static int ptrace_setfpregs(struct task_struct *tsk, void *ufp) +{ + tsk->used_math = 1; + return copy_from_user(&tsk->thread_info->fpstate, ufp, + sizeof(struct user_fp)) ? -EFAULT : 0; +} + +static int do_ptrace(int request, struct task_struct *child, long addr, long data) +{ + unsigned long tmp; + int ret; + + switch (request) { + /* + * read word at location "addr" in the child process. + */ + case PTRACE_PEEKTEXT: + case PTRACE_PEEKDATA: + ret = access_process_vm(child, addr, &tmp, + sizeof(unsigned long), 0); + if (ret == sizeof(unsigned long)) + ret = put_user(tmp, (unsigned long *) data); + else + ret = -EIO; + break; + + case PTRACE_PEEKUSR: + ret = ptrace_read_user(child, addr, (unsigned long *)data); + break; + + /* + * write the word at location addr. + */ + case PTRACE_POKETEXT: + case PTRACE_POKEDATA: + ret = access_process_vm(child, addr, &data, + sizeof(unsigned long), 1); + if (ret == sizeof(unsigned long)) + ret = 0; + else + ret = -EIO; + break; + + case PTRACE_POKEUSR: + ret = ptrace_write_user(child, addr, data); + break; + + /* + * continue/restart and stop at next (return from) syscall + */ + case PTRACE_SYSCALL: + case PTRACE_CONT: + ret = -EIO; + if ((unsigned long) data > _NSIG) + break; + if (request == PTRACE_SYSCALL) + set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + else + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + child->exit_code = data; + /* make sure single-step breakpoint is gone. */ + child->ptrace &= ~PT_SINGLESTEP; + ptrace_cancel_bpt(child); + wake_up_process(child); + ret = 0; + break; + + /* + * make the child exit. Best I can do is send it a sigkill. + * perhaps it should be put in the status that it wants to + * exit. + */ + case PTRACE_KILL: + /* make sure single-step breakpoint is gone. */ + child->ptrace &= ~PT_SINGLESTEP; + ptrace_cancel_bpt(child); + if (child->state != TASK_ZOMBIE) { + child->exit_code = SIGKILL; + wake_up_process(child); + } + ret = 0; + break; + + /* + * execute single instruction. + */ + case PTRACE_SINGLESTEP: + ret = -EIO; + if ((unsigned long) data > _NSIG) + break; + child->ptrace |= PT_SINGLESTEP; + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + child->exit_code = data; + /* give it a chance to run. */ + wake_up_process(child); + ret = 0; + break; + + case PTRACE_DETACH: + ret = ptrace_detach(child, data); + break; + + case PTRACE_GETREGS: + ret = ptrace_getregs(child, (void *)data); + break; + + case PTRACE_SETREGS: + ret = ptrace_setregs(child, (void *)data); + break; + + case PTRACE_GETFPREGS: + ret = ptrace_getfpregs(child, (void *)data); + break; + + case PTRACE_SETFPREGS: + ret = ptrace_setfpregs(child, (void *)data); + break; + + default: + ret = ptrace_request(child, request, addr, data); + break; + } + + return ret; +} + +asmlinkage int sys_ptrace(long request, long pid, long addr, long data) +{ + struct task_struct *child; + int ret; + + lock_kernel(); + ret = -EPERM; + if (request == PTRACE_TRACEME) { + /* are we already being traced? */ + if (current->ptrace & PT_PTRACED) + goto out; + ret = security_ptrace(current->parent, current); + if (ret) + goto out; + /* set the ptrace bit in the process flags. */ + current->ptrace |= PT_PTRACED; + ret = 0; + goto out; + } + ret = -ESRCH; + read_lock(&tasklist_lock); + child = find_task_by_pid(pid); + if (child) + get_task_struct(child); + read_unlock(&tasklist_lock); + if (!child) + goto out; + + ret = -EPERM; + if (pid == 1) /* you may not mess with init */ + goto out_tsk; + + if (request == PTRACE_ATTACH) { + ret = ptrace_attach(child); + goto out_tsk; + } + ret = ptrace_check_attach(child, request == PTRACE_KILL); + if (ret == 0) + ret = do_ptrace(request, child, addr, data); + +out_tsk: + put_task_struct(child); +out: + unlock_kernel(); + return ret; +} + +asmlinkage void syscall_trace(int why, struct pt_regs *regs) +{ + unsigned long ip; + + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return; + if (!(current->ptrace & PT_PTRACED)) + return; + + /* + * Save IP. IP is used to denote syscall entry/exit: + * IP = 0 -> entry, = 1 -> exit + */ + ip = regs->ARM_ip; + regs->ARM_ip = why; + + /* the 0x80 provides a way for the tracing parent to distinguish + between a syscall stop and SIGTRAP delivery */ + current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) + ? 0x80 : 0); + current->state = TASK_STOPPED; + notify_parent(current, SIGCHLD); + schedule(); + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } + regs->ARM_ip = ip; +} diff -urN linux-2.5.70-bk13/arch/arm26/kernel/ptrace.h linux-2.5.70-bk14/arch/arm26/kernel/ptrace.h --- linux-2.5.70-bk13/arch/arm26/kernel/ptrace.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/ptrace.h 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,13 @@ +/* + * linux/arch/arm26/kernel/ptrace.h + * + * Copyright (C) 2000-2003 Russell King + * Copyright (C) 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +extern void ptrace_cancel_bpt(struct task_struct *); +extern void ptrace_set_bpt(struct task_struct *); +extern void ptrace_break(struct task_struct *, struct pt_regs *); diff -urN linux-2.5.70-bk13/arch/arm26/kernel/semaphore.c linux-2.5.70-bk14/arch/arm26/kernel/semaphore.c --- linux-2.5.70-bk13/arch/arm26/kernel/semaphore.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/semaphore.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,215 @@ +/* + * ARM semaphore implementation, taken from + * + * i386 semaphore implementation. + * + * (C) Copyright 1999 Linus Torvalds + * (C) Copyright 2003 Ian Molton (ARM26 mods) + * + * Modified for ARM by Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +#include + +/* + * Semaphores are implemented using a two-way counter: + * The "count" variable is decremented for each process + * that tries to acquire the semaphore, while the "sleeping" + * variable is a count of such acquires. + * + * Notably, the inline "up()" and "down()" functions can + * efficiently test if they need to do any extra work (up + * needs to do something only if count was negative before + * the increment operation. + * + * "sleeping" and the contention routine ordering is + * protected by the semaphore spinlock. + * + * Note that these functions are only called when there is + * contention on the lock, and as such all this is the + * "non-critical" part of the whole semaphore business. The + * critical part is the inline stuff in + * where we want to avoid any extra jumps and calls. + */ + +/* + * Logic: + * - only on a boundary condition do we need to care. When we go + * from a negative count to a non-negative, we wake people up. + * - when we go from a non-negative count to a negative do we + * (a) synchronize with the "sleeper" count and (b) make sure + * that we're on the wakeup list before we synchronize so that + * we cannot lose wakeup events. + */ + +void __up(struct semaphore *sem) +{ + wake_up(&sem->wait); +} + +static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED; + +void __down(struct semaphore * sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + tsk->state = TASK_UNINTERRUPTIBLE; + add_wait_queue_exclusive(&sem->wait, &wait); + + spin_lock_irq(&semaphore_lock); + sem->sleepers++; + for (;;) { + int sleepers = sem->sleepers; + + /* + * Add "everybody else" into it. They aren't + * playing, because we own the spinlock. + */ + if (!atomic_add_negative(sleepers - 1, &sem->count)) { + sem->sleepers = 0; + break; + } + sem->sleepers = 1; /* us - see -1 above */ + spin_unlock_irq(&semaphore_lock); + + schedule(); + tsk->state = TASK_UNINTERRUPTIBLE; + spin_lock_irq(&semaphore_lock); + } + spin_unlock_irq(&semaphore_lock); + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + wake_up(&sem->wait); +} + +int __down_interruptible(struct semaphore * sem) +{ + int retval = 0; + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + tsk->state = TASK_INTERRUPTIBLE; + add_wait_queue_exclusive(&sem->wait, &wait); + + spin_lock_irq(&semaphore_lock); + sem->sleepers ++; + for (;;) { + int sleepers = sem->sleepers; + + /* + * With signals pending, this turns into + * the trylock failure case - we won't be + * sleeping, and we* can't get the lock as + * it has contention. Just correct the count + * and exit. + */ + if (signal_pending(current)) { + retval = -EINTR; + sem->sleepers = 0; + atomic_add(sleepers, &sem->count); + break; + } + + /* + * Add "everybody else" into it. They aren't + * playing, because we own the spinlock. The + * "-1" is because we're still hoping to get + * the lock. + */ + if (!atomic_add_negative(sleepers - 1, &sem->count)) { + sem->sleepers = 0; + break; + } + sem->sleepers = 1; /* us - see -1 above */ + spin_unlock_irq(&semaphore_lock); + + schedule(); + tsk->state = TASK_INTERRUPTIBLE; + spin_lock_irq(&semaphore_lock); + } + spin_unlock_irq(&semaphore_lock); + tsk->state = TASK_RUNNING; + remove_wait_queue(&sem->wait, &wait); + wake_up(&sem->wait); + return retval; +} + +/* + * Trylock failed - make sure we correct for + * having decremented the count. + * + * We could have done the trylock with a + * single "cmpxchg" without failure cases, + * but then it wouldn't work on a 386. + */ +int __down_trylock(struct semaphore * sem) +{ + int sleepers; + unsigned long flags; + + spin_lock_irqsave(&semaphore_lock, flags); + sleepers = sem->sleepers + 1; + sem->sleepers = 0; + + /* + * Add "everybody else" and us into it. They aren't + * playing, because we own the spinlock. + */ + if (!atomic_add_negative(sleepers, &sem->count)) + wake_up(&sem->wait); + + spin_unlock_irqrestore(&semaphore_lock, flags); + return 1; +} + +/* + * The semaphore operations have a special calling sequence that + * allow us to do a simpler in-line version of them. These routines + * need to convert that sequence back into the C sequence when + * there is contention on the semaphore. + * + * ip contains the semaphore pointer on entry. Save the C-clobbered + * registers (r0 to r3 and lr), but not ip, as we use it as a return + * value in some cases.. + */ +asm(" .align 5 \n\ + .globl __down_failed \n\ +__down_failed: \n\ + stmfd sp!, {r0 - r3, lr} \n\ + mov r0, ip \n\ + bl __down \n\ + ldmfd sp!, {r0 - r3, pc}^ \n\ + \n\ + .align 5 \n\ + .globl __down_interruptible_failed \n\ +__down_interruptible_failed: \n\ + stmfd sp!, {r0 - r3, lr} \n\ + mov r0, ip \n\ + bl __down_interruptible \n\ + mov ip, r0 \n\ + ldmfd sp!, {r0 - r3, pc}^ \n\ + \n\ + .align 5 \n\ + .globl __down_trylock_failed \n\ +__down_trylock_failed: \n\ + stmfd sp!, {r0 - r3, lr} \n\ + mov r0, ip \n\ + bl __down_trylock \n\ + mov ip, r0 \n\ + ldmfd sp!, {r0 - r3, pc}^ \n\ + \n\ + .align 5 \n\ + .globl __up_wakeup \n\ +__up_wakeup: \n\ + stmfd sp!, {r0 - r3, lr} \n\ + mov r0, ip \n\ + bl __up \n\ + ldmfd sp!, {r0 - r3, pc}^ \n\ + "); + diff -urN linux-2.5.70-bk13/arch/arm26/kernel/setup.c linux-2.5.70-bk14/arch/arm26/kernel/setup.c --- linux-2.5.70-bk13/arch/arm26/kernel/setup.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/setup.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,581 @@ +/* + * linux/arch/arm/kernel/setup.c + * + * Copyright (C) 1995-2001 Russell King + * Copyright (C) 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifndef MEM_SIZE +#define MEM_SIZE (16*1024*1024) +#endif + +#ifdef CONFIG_PREEMPT +spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED; +#endif + +#if defined(CONFIG_FPE_NWFPE) +char fpe_type[8]; + +static int __init fpe_setup(char *line) +{ + memcpy(fpe_type, line, 8); + return 1; +} + +__setup("fpe=", fpe_setup); +#endif + +extern void paging_init(struct meminfo *); +extern void convert_to_tag_list(struct tag *tags); +extern void squash_mem_tags(struct tag *tag); +extern void bootmem_init(struct meminfo *); +extern int root_mountflags; +extern int _stext, _text, _etext, _edata, _end; + +unsigned int processor_id; +unsigned int __machine_arch_type; +unsigned int system_rev; +unsigned int system_serial_low; +unsigned int system_serial_high; +unsigned int elf_hwcap; + +struct processor processor; + +unsigned char aux_device_present; +char elf_platform[ELF_PLATFORM_SIZE]; +char saved_command_line[COMMAND_LINE_SIZE]; + +unsigned long phys_initrd_start __initdata = 0; +unsigned long phys_initrd_size __initdata = 0; +static struct meminfo meminfo __initdata = { 0, }; +static struct proc_info_item proc_info; +static const char *machine_name; +static char command_line[COMMAND_LINE_SIZE]; + +static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; + +/* + * Standard memory resources + */ +static struct resource mem_res[] = { + { "Video RAM", 0, 0, IORESOURCE_MEM }, + { "Kernel code", 0, 0, IORESOURCE_MEM }, + { "Kernel data", 0, 0, IORESOURCE_MEM } +}; + +#define video_ram mem_res[0] +#define kernel_code mem_res[1] +#define kernel_data mem_res[2] + +static struct resource io_res[] = { + { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY }, + { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY }, + { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY } +}; + +#define lp0 io_res[0] +#define lp1 io_res[1] +#define lp2 io_res[2] + +#define dump_cpu_info() do { } while (0) + +static void __init setup_processor(void) +{ + extern struct proc_info_list __proc_info_begin, __proc_info_end; + struct proc_info_list *list; + + /* + * locate processor in the list of supported processor + * types. The linker builds this table for us from the + * entries in arch/arm/mm/proc-*.S + */ + for (list = &__proc_info_begin; list < &__proc_info_end ; list++) + if ((processor_id & list->cpu_mask) == list->cpu_val) + break; + /* + * If processor type is unrecognised, then we + * can do nothing... + */ + if (list >= &__proc_info_end) { + printk("CPU configuration botched (ID %08x), unable " + "to continue.\n", processor_id); + while (1); + } + + proc_info = *list->info; + processor = *list->proc; + + + printk("CPU: %s %s revision %d\n", + proc_info.manufacturer, proc_info.cpu_name, + (int)processor_id & 15); + + dump_cpu_info(); + + sprintf(system_utsname.machine, "%s", list->arch_name); + sprintf(elf_platform, "%s", list->elf_name); + elf_hwcap = list->elf_hwcap; + + cpu_proc_init(); +} + +static struct machine_desc * __init setup_machine(unsigned int nr) +{ + extern struct machine_desc __arch_info_begin, __arch_info_end; + struct machine_desc *list; + + /* + * locate architecture in the list of supported architectures. + */ + for (list = &__arch_info_begin; list < &__arch_info_end; list++) + if (list->nr == nr) + break; + + /* + * If the architecture type is not recognised, then we + * can co nothing... + */ + if (list >= &__arch_info_end) { + printk("Architecture configuration botched (nr %d), unable " + "to continue.\n", nr); + while (1); + } + + printk("Machine: %s\n", list->name); + + return list; +} + +/* + * Initial parsing of the command line. We need to pick out the + * memory size. We look for mem=size@start, where start and size + * are "size[KkMm]" + */ +static void __init +parse_cmdline(struct meminfo *mi, char **cmdline_p, char *from) +{ + char c = ' ', *to = command_line; + int usermem = 0, len = 0; + + for (;;) { + if (c == ' ' && !memcmp(from, "mem=", 4)) { + unsigned long size, start; + + if (to != command_line) + to -= 1; + + /* + * If the user specifies memory size, we + * blow away any automatically generated + * size. + */ + if (usermem == 0) { + usermem = 1; + mi->nr_banks = 0; + } + + start = PHYS_OFFSET; + size = memparse(from + 4, &from); + if (*from == '@') + start = memparse(from + 1, &from); + + mi->bank[mi->nr_banks].start = start; + mi->bank[mi->nr_banks].size = size; + mi->bank[mi->nr_banks].node = PHYS_TO_NID(start); + mi->nr_banks += 1; + } + c = *from++; + if (!c) + break; + if (COMMAND_LINE_SIZE <= ++len) + break; + *to++ = c; + } + *to = '\0'; + *cmdline_p = command_line; +} + +static void __init +setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) +{ +#ifdef CONFIG_BLK_DEV_RAM + extern int rd_size, rd_image_start, rd_prompt, rd_doload; + + rd_image_start = image_start; + rd_prompt = prompt; + rd_doload = doload; + + if (rd_sz) + rd_size = rd_sz; +#endif +} + +static void __init +request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc) +{ + struct resource *res; + int i; + + kernel_code.start = init_mm.start_code; + kernel_code.end = init_mm.end_code - 1; + kernel_data.start = init_mm.end_code; + kernel_data.end = init_mm.brk - 1; + + for (i = 0; i < mi->nr_banks; i++) { + unsigned long virt_start, virt_end; + + if (mi->bank[i].size == 0) + continue; + + virt_start = mi->bank[i].start; + virt_end = virt_start + mi->bank[i].size - 1; + + res = alloc_bootmem_low(sizeof(*res)); + res->name = "System RAM"; + res->start = virt_start; + res->end = virt_end; + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + + request_resource(&iomem_resource, res); + + if (kernel_code.start >= res->start && + kernel_code.end <= res->end) + request_resource(res, &kernel_code); + if (kernel_data.start >= res->start && + kernel_data.end <= res->end) + request_resource(res, &kernel_data); + } + + if (mdesc->video_start) { + video_ram.start = mdesc->video_start; + video_ram.end = mdesc->video_end; + request_resource(&iomem_resource, &video_ram); + } + + /* + * Some machines don't have the possibility of ever + * possessing lp0, lp1 or lp2 + */ + if (mdesc->reserve_lp0) + request_resource(&ioport_resource, &lp0); + if (mdesc->reserve_lp1) + request_resource(&ioport_resource, &lp1); + if (mdesc->reserve_lp2) + request_resource(&ioport_resource, &lp2); +} + +/* + * Tag parsing. + * + * This is the new way of passing data to the kernel at boot time. Rather + * than passing a fixed inflexible structure to the kernel, we pass a list + * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE + * tag for the list to be recognised (to distinguish the tagged list from + * a param_struct). The list is terminated with a zero-length tag (this tag + * is not parsed in any way). + */ +static int __init parse_tag_core(const struct tag *tag) +{ + if (tag->hdr.size > 2) { + if ((tag->u.core.flags & 1) == 0) + root_mountflags &= ~MS_RDONLY; + ROOT_DEV = tag->u.core.rootdev; + } + return 0; +} + +__tagtable(ATAG_CORE, parse_tag_core); + +static int __init parse_tag_mem32(const struct tag *tag) +{ + if (meminfo.nr_banks >= NR_BANKS) { + printk(KERN_WARNING + "Ignoring memory bank 0x%08x size %dKB\n", + tag->u.mem.start, tag->u.mem.size / 1024); + return -EINVAL; + } + meminfo.bank[meminfo.nr_banks].start = tag->u.mem.start; + meminfo.bank[meminfo.nr_banks].size = tag->u.mem.size; + meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(tag->u.mem.start); + meminfo.nr_banks += 1; + + return 0; +} + +__tagtable(ATAG_MEM, parse_tag_mem32); + +#if defined(CONFIG_DUMMY_CONSOLE) +struct screen_info screen_info = { + orig_video_lines: 30, + orig_video_cols: 80, + orig_video_mode: 0, + orig_video_ega_bx: 0, + orig_video_isVGA: 1, + orig_video_points: 8 +}; + +static int __init parse_tag_videotext(const struct tag *tag) +{ + screen_info.orig_x = tag->u.videotext.x; + screen_info.orig_y = tag->u.videotext.y; + screen_info.orig_video_page = tag->u.videotext.video_page; + screen_info.orig_video_mode = tag->u.videotext.video_mode; + screen_info.orig_video_cols = tag->u.videotext.video_cols; + screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; + screen_info.orig_video_lines = tag->u.videotext.video_lines; + screen_info.orig_video_isVGA = tag->u.videotext.video_isvga; + screen_info.orig_video_points = tag->u.videotext.video_points; + return 0; +} + +__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); +#endif + +static int __init parse_tag_ramdisk(const struct tag *tag) +{ + setup_ramdisk((tag->u.ramdisk.flags & 1) == 0, + (tag->u.ramdisk.flags & 2) == 0, + tag->u.ramdisk.start, tag->u.ramdisk.size); + return 0; +} + +__tagtable(ATAG_RAMDISK, parse_tag_ramdisk); + +static int __init parse_tag_initrd(const struct tag *tag) +{ + printk(KERN_WARNING "ATAG_INITRD is deprecated; please update your bootloader. \n"); + phys_initrd_start = (unsigned long)tag->u.initrd.start; + phys_initrd_size = (unsigned long)tag->u.initrd.size; + return 0; +} + +__tagtable(ATAG_INITRD, parse_tag_initrd); + +static int __init parse_tag_initrd2(const struct tag *tag) +{ + printk(KERN_WARNING "ATAG_INITRD is deprecated; please update your bootloader. \n"); + phys_initrd_start = (unsigned long)tag->u.initrd.start; + phys_initrd_size = (unsigned long)tag->u.initrd.size; + return 0; +} + +__tagtable(ATAG_INITRD2, parse_tag_initrd2); + +static int __init parse_tag_serialnr(const struct tag *tag) +{ + system_serial_low = tag->u.serialnr.low; + system_serial_high = tag->u.serialnr.high; + return 0; +} + +__tagtable(ATAG_SERIAL, parse_tag_serialnr); + +static int __init parse_tag_revision(const struct tag *tag) +{ + system_rev = tag->u.revision.rev; + return 0; +} + +__tagtable(ATAG_REVISION, parse_tag_revision); + +static int __init parse_tag_cmdline(const struct tag *tag) +{ + strncpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); + default_command_line[COMMAND_LINE_SIZE - 1] = '\0'; + return 0; +} + +__tagtable(ATAG_CMDLINE, parse_tag_cmdline); + +/* + * Scan the tag table for this tag, and call its parse function. + * The tag table is built by the linker from all the __tagtable + * declarations. + */ +static int __init parse_tag(const struct tag *tag) +{ + extern struct tagtable __tagtable_begin, __tagtable_end; + struct tagtable *t; + + for (t = &__tagtable_begin; t < &__tagtable_end; t++) + if (tag->hdr.tag == t->tag) { + t->parse(tag); + break; + } + + return t < &__tagtable_end; +} + +/* + * Parse all tags in the list, checking both the global and architecture + * specific tag tables. + */ +static void __init parse_tags(const struct tag *t) +{ + for (; t->hdr.size; t = tag_next(t)) + if (!parse_tag(t)) + printk(KERN_WARNING + "Ignoring unrecognised tag 0x%08x\n", + t->hdr.tag); +} + +/* + * This holds our defaults. + */ +static struct init_tags { + struct tag_header hdr1; + struct tag_core core; + struct tag_header hdr2; + struct tag_mem32 mem; + struct tag_header hdr3; +} init_tags __initdata = { + { tag_size(tag_core), ATAG_CORE }, + { 1, PAGE_SIZE, 0xff }, + { tag_size(tag_mem32), ATAG_MEM }, + { MEM_SIZE, PHYS_OFFSET }, + { 0, ATAG_NONE } +}; + +void __init setup_arch(char **cmdline_p) +{ + struct tag *tags = (struct tag *)&init_tags; + struct machine_desc *mdesc; + char *from = default_command_line; + + setup_processor(); + mdesc = setup_machine(machine_arch_type); + machine_name = mdesc->name; + + if (mdesc->param_offset) + tags = (struct tag *)mdesc->param_offset; //FIXME - ugly? + + /* + * If we have the old style parameters, convert them to + * a tag list. + */ + if (tags->hdr.tag != ATAG_CORE) + convert_to_tag_list(tags); + if (tags->hdr.tag != ATAG_CORE) + tags = (struct tag *)&init_tags; + if (tags->hdr.tag == ATAG_CORE) { + if (meminfo.nr_banks != 0) + squash_mem_tags(tags); + parse_tags(tags); + } + + init_mm.start_code = (unsigned long) &_text; + init_mm.end_code = (unsigned long) &_etext; + init_mm.end_data = (unsigned long) &_edata; + init_mm.brk = (unsigned long) &_end; + + memcpy(saved_command_line, from, COMMAND_LINE_SIZE); + saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; + parse_cmdline(&meminfo, cmdline_p, from); + bootmem_init(&meminfo); + paging_init(&meminfo); + request_standard_resources(&meminfo, mdesc); + + /* + * Set up various architecture-specific pointers + */ + init_arch_irq = mdesc->init_irq; + +#ifdef CONFIG_VT +#if defined(CONFIG_DUMMY_CONSOLE) + conswitchp = &dummy_con; +#endif +#endif +} + +static const char *hwcap_str[] = { + "swp", + "half", + "thumb", + "26bit", + "fastmult", + "fpa", + "vfp", + "edsp", + NULL +}; + +static int c_show(struct seq_file *m, void *v) +{ + int i; + + seq_printf(m, "Processor\t: %s %s rev %d (%s)\n", + proc_info.manufacturer, proc_info.cpu_name, + (int)processor_id & 15, elf_platform); + + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", + loops_per_jiffy / (500000/HZ), + (loops_per_jiffy / (5000/HZ)) % 100); + + /* dump out the processor features */ + seq_puts(m, "Features\t: "); + + for (i = 0; hwcap_str[i]; i++) + if (elf_hwcap & (1 << i)) + seq_printf(m, "%s ", hwcap_str[i]); + + seq_puts(m, "\n"); + + seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4); + seq_printf(m, "CPU revision\t: %d\n\n", processor_id & 15); + seq_printf(m, "Hardware\t: %s\n", machine_name); + seq_printf(m, "Revision\t: %04x\n", system_rev); + seq_printf(m, "Serial\t\t: %08x%08x\n", + system_serial_high, system_serial_low); + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return *pos < 1 ? (void *)1 : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return NULL; +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = c_show +}; diff -urN linux-2.5.70-bk13/arch/arm26/kernel/signal.c linux-2.5.70-bk14/arch/arm26/kernel/signal.c --- linux-2.5.70-bk13/arch/arm26/kernel/signal.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/signal.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,542 @@ +/* + * linux/arch/arm26/kernel/signal.c + * + * Copyright (C) 1995-2002 Russell King + * Copyright (C) 2003 Ian Molton (ARM26) + * + * FIXME!!! This is probably very broken (13/05/2003) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ptrace.h" + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +/* + * For ARM syscalls, we encode the syscall number into the instruction. + */ +#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)) +#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)) + +static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall); + +/* + * atomically swap in the new signal mask, and wait for a signal. + */ +asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs) +{ + sigset_t saveset; + + mask &= _BLOCKABLE; + spin_lock_irq(¤t->sighand->siglock); + saveset = current->blocked; + siginitset(¤t->blocked, mask); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + regs->ARM_r0 = -EINTR; + + while (1) { + current->state = TASK_INTERRUPTIBLE; + schedule(); + if (do_signal(&saveset, regs, 0)) + return regs->ARM_r0; + } +} + +asmlinkage int +sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, struct pt_regs *regs) +{ + sigset_t saveset, newset; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + if (copy_from_user(&newset, unewset, sizeof(newset))) + return -EFAULT; + sigdelsetmask(&newset, ~_BLOCKABLE); + + spin_lock_irq(¤t->sighand->siglock); + saveset = current->blocked; + current->blocked = newset; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + regs->ARM_r0 = -EINTR; + + while (1) { + current->state = TASK_INTERRUPTIBLE; + schedule(); + if (do_signal(&saveset, regs, 0)) + return regs->ARM_r0; + } +} + +asmlinkage int +sys_sigaction(int sig, const struct old_sigaction *act, + struct old_sigaction *oact) +{ + struct k_sigaction new_ka, old_ka; + int ret; + + if (act) { + old_sigset_t mask; + if (verify_area(VERIFY_READ, act, sizeof(*act)) || + __get_user(new_ka.sa.sa_handler, &act->sa_handler) || + __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) + return -EFAULT; + __get_user(new_ka.sa.sa_flags, &act->sa_flags); + __get_user(mask, &act->sa_mask); + siginitset(&new_ka.sa.sa_mask, mask); + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || + __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || + __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) + return -EFAULT; + __put_user(old_ka.sa.sa_flags, &oact->sa_flags); + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); + } + + return ret; +} + +/* + * Do a signal return; undo the signal stack. + */ +struct sigframe +{ + struct sigcontext sc; + unsigned long extramask[_NSIG_WORDS-1]; + unsigned long retcode; +}; + +struct rt_sigframe +{ + struct siginfo *pinfo; + void *puc; + struct siginfo info; + struct ucontext uc; + unsigned long retcode; +}; + +static int +restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) +{ + int err = 0; + + __get_user_error(regs->ARM_r0, &sc->arm_r0, err); + __get_user_error(regs->ARM_r1, &sc->arm_r1, err); + __get_user_error(regs->ARM_r2, &sc->arm_r2, err); + __get_user_error(regs->ARM_r3, &sc->arm_r3, err); + __get_user_error(regs->ARM_r4, &sc->arm_r4, err); + __get_user_error(regs->ARM_r5, &sc->arm_r5, err); + __get_user_error(regs->ARM_r6, &sc->arm_r6, err); + __get_user_error(regs->ARM_r7, &sc->arm_r7, err); + __get_user_error(regs->ARM_r8, &sc->arm_r8, err); + __get_user_error(regs->ARM_r9, &sc->arm_r9, err); + __get_user_error(regs->ARM_r10, &sc->arm_r10, err); + __get_user_error(regs->ARM_fp, &sc->arm_fp, err); + __get_user_error(regs->ARM_ip, &sc->arm_ip, err); + __get_user_error(regs->ARM_sp, &sc->arm_sp, err); + __get_user_error(regs->ARM_lr, &sc->arm_lr, err); + __get_user_error(regs->ARM_pc, &sc->arm_pc, err); + + err |= !valid_user_regs(regs); + + return err; +} + +asmlinkage int sys_sigreturn(struct pt_regs *regs) +{ + struct sigframe *frame; + sigset_t set; + + /* + * Since we stacked the signal on a 64-bit boundary, + * then 'sp' should be word aligned here. If it's + * not, then the user is trying to mess with us. + */ + if (regs->ARM_sp & 7) + goto badframe; + + frame = (struct sigframe *)regs->ARM_sp; + + if (verify_area(VERIFY_READ, frame, sizeof (*frame))) + goto badframe; + if (__get_user(set.sig[0], &frame->sc.oldmask) + || (_NSIG_WORDS > 1 + && __copy_from_user(&set.sig[1], &frame->extramask, + sizeof(frame->extramask)))) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + if (restore_sigcontext(regs, &frame->sc)) + goto badframe; + + /* Send SIGTRAP if we're single-stepping */ + if (current->ptrace & PT_SINGLESTEP) { + ptrace_cancel_bpt(current); + send_sig(SIGTRAP, current, 1); + } + + return regs->ARM_r0; + +badframe: + force_sig(SIGSEGV, current); + return 0; +} + +asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) +{ + struct rt_sigframe *frame; + sigset_t set; + + /* + * Since we stacked the signal on a 64-bit boundary, + * then 'sp' should be word aligned here. If it's + * not, then the user is trying to mess with us. + */ + if (regs->ARM_sp & 7) + goto badframe; + + frame = (struct rt_sigframe *)regs->ARM_sp; + + if (verify_area(VERIFY_READ, frame, sizeof (*frame))) + goto badframe; + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) + goto badframe; + + /* Send SIGTRAP if we're single-stepping */ + if (current->ptrace & PT_SINGLESTEP) { + ptrace_cancel_bpt(current); + send_sig(SIGTRAP, current, 1); + } + + return regs->ARM_r0; + +badframe: + force_sig(SIGSEGV, current); + return 0; +} + +static int +setup_sigcontext(struct sigcontext *sc, /*struct _fpstate *fpstate,*/ + struct pt_regs *regs, unsigned long mask) +{ + int err = 0; + + __put_user_error(regs->ARM_r0, &sc->arm_r0, err); + __put_user_error(regs->ARM_r1, &sc->arm_r1, err); + __put_user_error(regs->ARM_r2, &sc->arm_r2, err); + __put_user_error(regs->ARM_r3, &sc->arm_r3, err); + __put_user_error(regs->ARM_r4, &sc->arm_r4, err); + __put_user_error(regs->ARM_r5, &sc->arm_r5, err); + __put_user_error(regs->ARM_r6, &sc->arm_r6, err); + __put_user_error(regs->ARM_r7, &sc->arm_r7, err); + __put_user_error(regs->ARM_r8, &sc->arm_r8, err); + __put_user_error(regs->ARM_r9, &sc->arm_r9, err); + __put_user_error(regs->ARM_r10, &sc->arm_r10, err); + __put_user_error(regs->ARM_fp, &sc->arm_fp, err); + __put_user_error(regs->ARM_ip, &sc->arm_ip, err); + __put_user_error(regs->ARM_sp, &sc->arm_sp, err); + __put_user_error(regs->ARM_lr, &sc->arm_lr, err); + __put_user_error(regs->ARM_pc, &sc->arm_pc, err); + + __put_user_error(current->thread.trap_no, &sc->trap_no, err); + __put_user_error(current->thread.error_code, &sc->error_code, err); + __put_user_error(current->thread.address, &sc->fault_address, err); + __put_user_error(mask, &sc->oldmask, err); + + return err; +} + +static inline void * +get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize) +{ + unsigned long sp = regs->ARM_sp; + + /* + * This is the X/Open sanctioned signal stack switching. + */ + if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) + sp = current->sas_ss_sp + current->sas_ss_size; + + /* + * ATPCS B01 mandates 8-byte alignment + */ + return (void *)((sp - framesize) & ~7); +} + +static int +setup_return(struct pt_regs *regs, struct k_sigaction *ka, + unsigned long *rc, void *frame, int usig) +{ + unsigned long handler = (unsigned long)ka->sa.sa_handler; + unsigned long retcode; + + if (ka->sa.sa_flags & SA_RESTORER) { + retcode = (unsigned long)ka->sa.sa_restorer; + } else { + + if (__put_user((ka->sa.sa_flags & SA_SIGINFO)?SWI_SYS_RT_SIGRETURN:SWI_SYS_SIGRETURN, rc)) + return 1; + + retcode = ((unsigned long)rc); + } + + regs->ARM_r0 = usig; + regs->ARM_sp = (unsigned long)frame; + regs->ARM_lr = retcode; + regs->ARM_pc = handler & ~3; + + return 0; +} + +static int +setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) +{ + struct sigframe *frame = get_sigframe(ka, regs, sizeof(*frame)); + int err = 0; + + if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) + return 1; + + err |= setup_sigcontext(&frame->sc, /*&frame->fpstate,*/ regs, set->sig[0]); + + if (_NSIG_WORDS > 1) { + err |= __copy_to_user(frame->extramask, &set->sig[1], + sizeof(frame->extramask)); + } + + if (err == 0) + err = setup_return(regs, ka, &frame->retcode, frame, usig); + + return err; +} + +static int +setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs *regs) +{ + struct rt_sigframe *frame = get_sigframe(ka, regs, sizeof(*frame)); + int err = 0; + + if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) + return 1; + + __put_user_error(&frame->info, &frame->pinfo, err); + __put_user_error(&frame->uc, &frame->puc, err); + err |= copy_siginfo_to_user(&frame->info, info); + + /* Clear all the bits of the ucontext we don't use. */ + err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); + + err |= setup_sigcontext(&frame->uc.uc_mcontext, /*&frame->fpstate,*/ + regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + if (err == 0) + err = setup_return(regs, ka, &frame->retcode, frame, usig); + + if (err == 0) { + /* + * For realtime signals we must also set the second and third + * arguments for the signal handler. + * -- Peter Maydell 2000-12-06 + */ + regs->ARM_r1 = (unsigned long)frame->pinfo; + regs->ARM_r2 = (unsigned long)frame->puc; + } + + return err; +} + +static inline void restart_syscall(struct pt_regs *regs) +{ + regs->ARM_r0 = regs->ARM_ORIG_r0; + regs->ARM_pc -= 4; +} + +/* + * OK, we're invoking a handler + */ +static void +handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, + struct pt_regs * regs, int syscall) +{ + struct thread_info *thread = current_thread_info(); + struct task_struct *tsk = current; + struct k_sigaction *ka = &tsk->sighand->action[sig-1]; + int usig = sig; + int ret; + + /* + * If we were from a system call, check for system call restarting... + */ + if (syscall) { + switch (regs->ARM_r0) { + case -ERESTART_RESTARTBLOCK: + current_thread_info()->restart_block.fn = + do_no_restart_syscall; + case -ERESTARTNOHAND: + regs->ARM_r0 = -EINTR; + break; + case -ERESTARTSYS: + if (!(ka->sa.sa_flags & SA_RESTART)) { + regs->ARM_r0 = -EINTR; + break; + } + /* fallthrough */ + case -ERESTARTNOINTR: + restart_syscall(regs); + } + } + + /* + * translate the signal + */ + if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) + usig = thread->exec_domain->signal_invmap[usig]; + + /* + * Set up the stack frame + */ + if (ka->sa.sa_flags & SA_SIGINFO) + ret = setup_rt_frame(usig, ka, info, oldset, regs); + else + ret = setup_frame(usig, ka, oldset, regs); + + /* + * Check that the resulting registers are actually sane. + */ + ret |= !valid_user_regs(regs); + + if (ret == 0) { + if (ka->sa.sa_flags & SA_ONESHOT) + ka->sa.sa_handler = SIG_DFL; + + if (!(ka->sa.sa_flags & SA_NODEFER)) { + spin_lock_irq(&tsk->sighand->siglock); + sigorsets(&tsk->blocked, &tsk->blocked, + &ka->sa.sa_mask); + sigaddset(&tsk->blocked, sig); + recalc_sigpending(); + spin_unlock_irq(&tsk->sighand->siglock); + } + return; + } + + if (sig == SIGSEGV) + ka->sa.sa_handler = SIG_DFL; + force_sig(SIGSEGV, tsk); +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals that + * the kernel can handle, and then we build all the user-level signal handling + * stack-frames in one go after that. + */ +static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) +{ + siginfo_t info; + int signr; + + /* + * We want the common case to go fast, which + * is why we may in certain cases get here from + * kernel mode. Just return without doing anything + * if so. + */ + if (!user_mode(regs)) + return 0; + + if (current->ptrace & PT_SINGLESTEP) + ptrace_cancel_bpt(current); + + signr = get_signal_to_deliver(&info, regs, NULL); + if (signr > 0) { + handle_signal(signr, &info, oldset, regs, syscall); + if (current->ptrace & PT_SINGLESTEP) + ptrace_set_bpt(current); + return 1; + } + + /* + * No signal to deliver to the process - restart the syscall. + */ + if (syscall) { + if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { + u32 *usp; + + regs->ARM_sp -= 12; + usp = (u32 *)regs->ARM_sp; + + put_user(regs->ARM_pc, &usp[0]); + /* swi __NR_restart_syscall */ + put_user(0xef000000 | __NR_restart_syscall, &usp[1]); + /* ldr pc, [sp], #12 */ +// FIXME!!! is #12 correct there? + put_user(0xe49df00c, &usp[2]); + + regs->ARM_pc = regs->ARM_sp + 4; + } + if (regs->ARM_r0 == -ERESTARTNOHAND || + regs->ARM_r0 == -ERESTARTSYS || + regs->ARM_r0 == -ERESTARTNOINTR) { + restart_syscall(regs); + } + } + if (current->ptrace & PT_SINGLESTEP) + ptrace_set_bpt(current); + return 0; +} + +asmlinkage void +do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) +{ + if (thread_flags & _TIF_SIGPENDING) + do_signal(¤t->blocked, regs, syscall); +} diff -urN linux-2.5.70-bk13/arch/arm26/kernel/sys_arm.c linux-2.5.70-bk14/arch/arm26/kernel/sys_arm.c --- linux-2.5.70-bk13/arch/arm26/kernel/sys_arm.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/sys_arm.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,283 @@ +/* + * linux/arch/arm26/kernel/sys_arm.c + * + * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c + * Copyright (C) 1995, 1996 Russell King. + * Copyright (C) 2003 Ian Molton. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file contains various random system calls that + * have a non-standard calling sequence on the Linux/arm + * platform. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +extern unsigned long do_mremap(unsigned long addr, unsigned long old_len, + unsigned long new_len, unsigned long flags, + unsigned long new_addr); + +/* + * sys_pipe() is the normal C calling standard for creating + * a pipe. It's not the way unix traditionally does this, though. + */ +asmlinkage int sys_pipe(unsigned long * fildes) +{ + int fd[2]; + int error; + + error = do_pipe(fd); + if (!error) { + if (copy_to_user(fildes, fd, 2*sizeof(int))) + error = -EFAULT; + } + return error; +} + +/* common code for old and new mmaps */ +inline long do_mmap2( + unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) +{ + int error = -EINVAL; + struct file * file = NULL; + + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + + /* + * If we are doing a fixed mapping, and address < PAGE_SIZE, + * then deny it. + */ + if (flags & MAP_FIXED && addr < PAGE_SIZE && vectors_base() == 0) + goto out; + + error = -EBADF; + if (!(flags & MAP_ANONYMOUS)) { + file = fget(fd); + if (!file) + goto out; + } + + down_write(¤t->mm->mmap_sem); + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); + up_write(¤t->mm->mmap_sem); + + if (file) + fput(file); +out: + return error; +} + +struct mmap_arg_struct { + unsigned long addr; + unsigned long len; + unsigned long prot; + unsigned long flags; + unsigned long fd; + unsigned long offset; +}; + +asmlinkage int old_mmap(struct mmap_arg_struct *arg) +{ + int error = -EFAULT; + struct mmap_arg_struct a; + + if (copy_from_user(&a, arg, sizeof(a))) + goto out;; + + error = -EINVAL; + if (a.offset & ~PAGE_MASK) + goto out; + + error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); +out: + return error; +} + +asmlinkage unsigned long +sys_arm_mremap(unsigned long addr, unsigned long old_len, + unsigned long new_len, unsigned long flags, + unsigned long new_addr) +{ + unsigned long ret = -EINVAL; + + /* + * If we are doing a fixed mapping, and address < PAGE_SIZE, + * then deny it. + */ + if (flags & MREMAP_FIXED && new_addr < PAGE_SIZE && + vectors_base() == 0) + goto out; + + down_write(¤t->mm->mmap_sem); + ret = do_mremap(addr, old_len, new_len, flags, new_addr); + up_write(¤t->mm->mmap_sem); + +out: + return ret; +} + +/* + * Perform the select(nd, in, out, ex, tv) and mmap() system + * calls. + */ +extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); + +struct sel_arg_struct { + unsigned long n; + fd_set *inp, *outp, *exp; + struct timeval *tvp; +}; + +asmlinkage int old_select(struct sel_arg_struct *arg) +{ + struct sel_arg_struct a; + + if (copy_from_user(&a, arg, sizeof(a))) + return -EFAULT; + /* sys_select() does the appropriate kernel locking */ + return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); +} + +/* + * sys_ipc() is the de-multiplexer for the SysV IPC calls.. + * + * This is really horribly ugly. + */ +asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth) +{ + int version, ret; + + version = call >> 16; /* hack for backward compatibility */ + call &= 0xffff; + + switch (call) { + case SEMOP: + return sys_semop (first, (struct sembuf *)ptr, second); + case SEMGET: + return sys_semget (first, second, third); + case SEMCTL: { + union semun fourth; + if (!ptr) + return -EINVAL; + if (get_user(fourth.__pad, (void **) ptr)) + return -EFAULT; + return sys_semctl (first, second, third, fourth); + } + + case MSGSND: + return sys_msgsnd (first, (struct msgbuf *) ptr, + second, third); + case MSGRCV: + switch (version) { + case 0: { + struct ipc_kludge tmp; + if (!ptr) + return -EINVAL; + if (copy_from_user(&tmp,(struct ipc_kludge *) ptr, + sizeof (tmp))) + return -EFAULT; + return sys_msgrcv (first, tmp.msgp, second, + tmp.msgtyp, third); + } + default: + return sys_msgrcv (first, + (struct msgbuf *) ptr, + second, fifth, third); + } + case MSGGET: + return sys_msgget ((key_t) first, second); + case MSGCTL: + return sys_msgctl (first, second, (struct msqid_ds *) ptr); + + case SHMAT: + switch (version) { + default: { + ulong raddr; + ret = sys_shmat (first, (char *) ptr, second, &raddr); + if (ret) + return ret; + return put_user (raddr, (ulong *) third); + } + case 1: /* iBCS2 emulator entry point */ + if (!segment_eq(get_fs(), get_ds())) + return -EINVAL; + return sys_shmat (first, (char *) ptr, + second, (ulong *) third); + } + case SHMDT: + return sys_shmdt ((char *)ptr); + case SHMGET: + return sys_shmget (first, second, third); + case SHMCTL: + return sys_shmctl (first, second, + (struct shmid_ds *) ptr); + default: + return -EINVAL; + } +} + +/* Fork a new task - this creates a new program thread. + * This is called indirectly via a small wrapper + */ +asmlinkage int sys_fork(struct pt_regs *regs) +{ + return do_fork(SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL); +} + +/* Clone a task - this clones the calling program thread. + * This is called indirectly via a small wrapper + */ +asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, struct pt_regs *regs) +{ + /* + * We don't support SETTID / CLEARTID (FIXME!!! (nicked from arm32)) + */ + if (clone_flags & (CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID)) + return -EINVAL; + + if (!newsp) + newsp = regs->ARM_sp; + + return do_fork(clone_flags & ~CLONE_IDLETASK, newsp, regs, 0, NULL, NULL); +} + +asmlinkage int sys_vfork(struct pt_regs *regs) +{ + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL); +} + +/* sys_execve() executes a new program. + * This is called indirectly via a small wrapper + */ +asmlinkage int sys_execve(char *filenamei, char **argv, char **envp, struct pt_regs *regs) +{ + int error; + char * filename; + + filename = getname(filenamei); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + goto out; + error = do_execve(filename, argv, envp, regs); + putname(filename); +out: + return error; +} diff -urN linux-2.5.70-bk13/arch/arm26/kernel/time-acorn.c linux-2.5.70-bk14/arch/arm26/kernel/time-acorn.c --- linux-2.5.70-bk13/arch/arm26/kernel/time-acorn.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/time-acorn.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,69 @@ +/* + * linux/arch/arm/kernel/time-acorn.c + * + * Copyright (c) 1996-2000 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Changelog: + * 24-Sep-1996 RMK Created + * 10-Oct-1996 RMK Brought up to date with arch-sa110eval + * 04-Dec-1997 RMK Updated for new arch/arm/time.c + * 13-May-2003 IM Brought over to ARM26 + */ +#include +#include +#include + +#include +#include +#include +#include + +extern unsigned long (*gettimeoffset)(void); + +static unsigned long ioctime_gettimeoffset(void) +{ + unsigned int count1, count2, status; + long offset; + + ioc_writeb (0, IOC_T0LATCH); + barrier (); + count1 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8); + barrier (); + status = ioc_readb(IOC_IRQREQA); + barrier (); + ioc_writeb (0, IOC_T0LATCH); + barrier (); + count2 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8); + + offset = count2; + if (count2 < count1) { + /* + * We have not had an interrupt between reading count1 + * and count2. + */ + if (status & (1 << 5)) + offset -= LATCH; + } else if (count2 > count1) { + /* + * We have just had another interrupt between reading + * count1 and count2. + */ + offset -= LATCH; + } + + offset = (LATCH - offset) * (tick_nsec / 1000); + return (offset + LATCH/2) / LATCH; +} + +void __init ioctime_init(void) +{ + ioc_writeb(LATCH & 255, IOC_T0LTCHL); + ioc_writeb(LATCH >> 8, IOC_T0LTCHH); + ioc_writeb(0, IOC_T0GO); + + gettimeoffset = ioctime_gettimeoffset; +} diff -urN linux-2.5.70-bk13/arch/arm26/kernel/time.c linux-2.5.70-bk14/arch/arm26/kernel/time.c --- linux-2.5.70-bk13/arch/arm26/kernel/time.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/time.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,202 @@ +/* + * linux/arch/arm26/kernel/time.c + * + * Copyright (C) 1991, 1992, 1995 Linus Torvalds + * Modifications for ARM (C) 1994-2001 Russell King + * Mods for ARM26 (C) 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file contains the ARM-specific time handling details: + * reading the RTC at bootup, etc... + * + * 1994-07-02 Alan Modra + * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime + * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 + * "A Kernel Model for Precision Timekeeping" by Dave Mills + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +u64 jiffies_64 = INITIAL_JIFFIES; + +extern unsigned long wall_jiffies; + +/* this needs a better home */ +spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; + +/* change this if you have some constant time drift */ +#define USECS_PER_JIFFY (1000000/HZ) + +static int dummy_set_rtc(void) +{ + return 0; +} + +/* + * hook for setting the RTC's idea of the current time. + */ +int (*set_rtc)(void) = dummy_set_rtc; + +static unsigned long dummy_gettimeoffset(void) +{ + return 0; +} + +/* + * hook for getting the time offset. Note that it is + * always called with interrupts disabled. + */ +unsigned long (*gettimeoffset)(void) = dummy_gettimeoffset; + +/* + * Handle kernel profile stuff... + */ +static inline void do_profile(struct pt_regs *regs) +{ + if (!user_mode(regs) && + prof_buffer && + current->pid) { + unsigned long pc = instruction_pointer(regs); + extern int _stext; + + pc -= (unsigned long)&_stext; + + pc >>= prof_shift; + + if (pc >= prof_len) + pc = prof_len - 1; + + prof_buffer[pc] += 1; + } +} + +static unsigned long next_rtc_update; + +/* + * If we have an externally synchronized linux clock, then update + * CMOS clock accordingly every ~11 minutes. set_rtc() has to be + * called as close as possible to 500 ms before the new second + * starts. + */ +static inline void do_set_rtc(void) +{ + if (time_status & STA_UNSYNC || set_rtc == NULL) + return; + +//FIXME - timespec.tv_sec is a time_t not unsigned long + if (next_rtc_update && + time_before((unsigned long)xtime.tv_sec, next_rtc_update)) + return; + + if (xtime.tv_nsec < 500000000 - ((unsigned) tick_nsec >> 1) && + xtime.tv_nsec >= 500000000 + ((unsigned) tick_nsec >> 1)) + return; + + if (set_rtc()) + /* + * rtc update failed. Try again in 60s + */ + next_rtc_update = xtime.tv_sec + 60; + else + next_rtc_update = xtime.tv_sec + 660; +} + +#define do_leds() + +void do_gettimeofday(struct timeval *tv) +{ + unsigned long flags; + unsigned long seq; + unsigned long usec, sec, lost; + + do { + seq = read_seqbegin_irqsave(&xtime_lock, flags); + usec = gettimeoffset(); + + lost = jiffies - wall_jiffies; + if (lost) + usec += lost * USECS_PER_JIFFY; + + sec = xtime.tv_sec; + usec += xtime.tv_nsec / 1000; + } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); + + /* usec may have gone up a lot: be safe */ + while (usec >= 1000000) { + usec -= 1000000; + sec++; + } + + tv->tv_sec = sec; + tv->tv_usec = usec; +} + +void do_settimeofday(struct timeval *tv) +{ + write_seqlock_irq(&xtime_lock); + /* + * This is revolting. We need to set "xtime" correctly. However, the + * value in this location is the value at the most recent update of + * wall time. Discover what correction gettimeofday() would have + * done, and then undo it! + */ + tv->tv_usec -= gettimeoffset(); + tv->tv_usec -= (jiffies - wall_jiffies) * USECS_PER_JIFFY; + + while (tv->tv_usec < 0) { + tv->tv_usec += 1000000; + tv->tv_sec--; + } + + xtime.tv_sec = tv->tv_sec; + xtime.tv_nsec = tv->tv_usec * 1000; + time_adjust = 0; /* stop active adjtime() */ + time_status |= STA_UNSYNC; + time_maxerror = NTP_PHASE_LIMIT; + time_esterror = NTP_PHASE_LIMIT; + write_sequnlock_irq(&xtime_lock); +} + +static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + do_timer(regs); + do_set_rtc(); //FIME - EVERY timer IRQ? + do_profile(regs); + return IRQ_HANDLED; //FIXME - is this right? +} + +static struct irqaction timer_irq = { + .name = "timer", + .flags = SA_INTERRUPT, + .handler = timer_interrupt, +}; + +extern void ioctime_init(void); + +/* + * Set up timer interrupt. + */ +void __init time_init(void) +{ + ioctime_init(); + + setup_irq(IRQ_TIMER, &timer_irq); +} + diff -urN linux-2.5.70-bk13/arch/arm26/kernel/traps.c linux-2.5.70-bk14/arch/arm26/kernel/traps.c --- linux-2.5.70-bk13/arch/arm26/kernel/traps.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/kernel/traps.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,553 @@ +/* + * linux/arch/arm/kernel/traps.c + * + * Copyright (C) 1995-2002 Russell King + * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds + * Copyright (C) 2003 Ian Molton (ARM26) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * 'traps.c' handles hardware exceptions after we have saved some state in + * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably + * kill the offending process. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ptrace.h" + +extern void c_backtrace (unsigned long fp, int pmode); +extern void show_pte(struct mm_struct *mm, unsigned long addr); + +const char *processor_modes[] = { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" }; + +static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" "*bad reason*"}; + +/* + * Stack pointers should always be within the kernels view of + * physical memory. If it is not there, then we can't dump + * out any information relating to the stack. + */ +static int verify_stack(unsigned long sp) +{ + if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != 0)) + return -EFAULT; + + return 0; +} + +/* + * Dump out the contents of some memory nicely... + */ +static void dump_mem(const char *str, unsigned long bottom, unsigned long top) +{ + unsigned long p = bottom & ~31; + mm_segment_t fs; + int i; + + /* + * We need to switch to kernel mode so that we can use __get_user + * to safely read from kernel space. Note that we now dump the + * code first, just in case the backtrace kills us. + */ + fs = get_fs(); + set_fs(KERNEL_DS); + + printk("%s", str); + printk("(0x%08lx to 0x%08lx)\n", bottom, top); + + for (p = bottom & ~31; p < top;) { + printk("%04lx: ", p & 0xffff); + + for (i = 0; i < 8; i++, p += 4) { + unsigned int val; + + if (p < bottom || p >= top) + printk(" "); + else { + __get_user(val, (unsigned long *)p); + printk("%08x ", val); + } + } + printk ("\n"); + } + + set_fs(fs); +} + +static void dump_instr(struct pt_regs *regs) +{ + unsigned long addr = instruction_pointer(regs); + const int width = 8; + mm_segment_t fs; + int i; + + /* + * We need to switch to kernel mode so that we can use __get_user + * to safely read from kernel space. Note that we now dump the + * code first, just in case the backtrace kills us. + */ + fs = get_fs(); + set_fs(KERNEL_DS); + + printk("Code: "); + for (i = -4; i < 1; i++) { + unsigned int val, bad; + + bad = __get_user(val, &((u32 *)addr)[i]); + + if (!bad) + printk(i == 0 ? "(%0*x) " : "%0*x ", width, val); + else { + printk("bad PC value."); + break; + } + } + printk("\n"); + + set_fs(fs); +} + +/*static*/ void __dump_stack(struct task_struct *tsk, unsigned long sp) +{ + dump_mem("Stack: ", sp, 8192+(unsigned long)tsk->thread_info); +} + +void dump_stack(void) +{ +#ifdef CONFIG_DEBUG_ERRORS + __backtrace(); +#endif +} + +//FIXME - was a static fn +void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) +{ + unsigned int fp; + int ok = 1; + + printk("Backtrace: "); + fp = regs->ARM_fp; + if (!fp) { + printk("no frame pointer"); + ok = 0; + } else if (verify_stack(fp)) { + printk("invalid frame pointer 0x%08x", fp); + ok = 0; + } else if (fp < (unsigned long)(tsk->thread_info + 1)) + printk("frame pointer underflow"); + printk("\n"); + + if (ok) + c_backtrace(fp, processor_mode(regs)); +} + +/* + * This is called from SysRq-T (show_task) to display the current + * call trace for each process. Very useful. + */ +void show_trace_task(struct task_struct *tsk) +{ + if (tsk != current) { + unsigned int fp = thread_saved_fp(tsk); + c_backtrace(fp, 0x10); + } +} + +spinlock_t die_lock = SPIN_LOCK_UNLOCKED; + +/* + * This function is protected against re-entrancy. + */ +NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) +{ + struct task_struct *tsk = current; + + console_verbose(); + spin_lock_irq(&die_lock); + + printk("Internal error: %s: %x\n", str, err); + printk("CPU: %d\n", smp_processor_id()); + show_regs(regs); + printk("Process %s (pid: %d, stack limit = 0x%p)\n", + current->comm, current->pid, tsk->thread_info + 1); + + if (!user_mode(regs) || in_interrupt()) { + __dump_stack(tsk, (unsigned long)(regs + 1)); + dump_backtrace(regs, tsk); + dump_instr(regs); + } +while(1); + spin_unlock_irq(&die_lock); + do_exit(SIGSEGV); +} + +void die_if_kernel(const char *str, struct pt_regs *regs, int err) +{ + if (user_mode(regs)) + return; + + die(str, regs, err); +} + +static DECLARE_MUTEX(undef_sem); +static int (*undef_hook)(struct pt_regs *); + +int request_undef_hook(int (*fn)(struct pt_regs *)) +{ + int ret = -EBUSY; + + down(&undef_sem); + if (undef_hook == NULL) { + undef_hook = fn; + ret = 0; + } + up(&undef_sem); + + return ret; +} + +int release_undef_hook(int (*fn)(struct pt_regs *)) +{ + int ret = -EINVAL; + + down(&undef_sem); + if (undef_hook == fn) { + undef_hook = NULL; + ret = 0; + } + up(&undef_sem); + + return ret; +} + +static int undefined_extension(struct pt_regs *regs, unsigned int op) +{ + switch (op) { + case 1: /* 0xde01 / 0x?7f001f0 */ + ptrace_break(current, regs); + return 0; + } + return 1; +} + +asmlinkage void do_undefinstr(struct pt_regs *regs) +{ + siginfo_t info; + void *pc; + + regs->ARM_pc -= 4; + + pc = (unsigned long *)instruction_pointer(regs); /* strip PSR */ + + if (user_mode(regs)) { + u32 instr; + + get_user(instr, (u32 *)pc); + + if ((instr & 0x0fff00ff) == 0x07f000f0 && + undefined_extension(regs, (instr >> 8) & 255) == 0) { + regs->ARM_pc += 4; + return; + } + } else { + if (undef_hook && undef_hook(regs) == 0) { + regs->ARM_pc += 4; + return; + } + } + +#ifdef CONFIG_DEBUG_USER + printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", + current->comm, current->pid, pc); + dump_instr(regs); +#endif + + current->thread.error_code = 0; + current->thread.trap_no = 6; + + info.si_signo = SIGILL; + info.si_errno = 0; + info.si_code = ILL_ILLOPC; + info.si_addr = pc; + + force_sig_info(SIGILL, &info, current); + + die_if_kernel("Oops - undefined instruction", regs, 0); +} + +asmlinkage void do_excpt(unsigned long address, struct pt_regs *regs, int mode) +{ + siginfo_t info; + +#ifdef CONFIG_DEBUG_USER + printk(KERN_INFO "%s (%d): address exception: pc=%08lx\n", + current->comm, current->pid, instruction_pointer(regs)); + dump_instr(regs); +#endif + + current->thread.error_code = 0; + current->thread.trap_no = 11; + + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRERR; + info.si_addr = (void *)address; + + force_sig_info(SIGBUS, &info, current); + + die_if_kernel("Oops - address exception", regs, mode); +} + +asmlinkage void do_unexp_fiq (struct pt_regs *regs) +{ +#ifndef CONFIG_IGNORE_FIQ + printk("Hmm. Unexpected FIQ received, but trying to continue\n"); + printk("You may have a hardware problem...\n"); +#endif +} + +/* + * bad_mode handles the impossible case in the vectors. If you see one of + * these, then it's extremely serious, and could mean you have buggy hardware. + * It never returns, and never tries to sync. We hope that we can at least + * dump out some state information... + */ +asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode) +{ + unsigned int vectors = vectors_base(); + + console_verbose(); + + printk(KERN_CRIT "Bad mode in %s handler detected: mode %s\n", + handler[reason<5?reason:4], processor_modes[proc_mode]); + + /* + * Dump out the vectors and stub routines. Maybe a better solution + * would be to dump them out only if we detect that they are corrupted. + */ + dump_mem(KERN_CRIT "Vectors: ", vectors, vectors + 0x40); + dump_mem(KERN_CRIT "Stubs: ", vectors + 0x200, vectors + 0x4b8); + + die("Oops", regs, 0); + local_irq_disable(); + panic("bad mode"); +} + +static int bad_syscall(int n, struct pt_regs *regs) +{ + struct thread_info *thread = current_thread_info(); + siginfo_t info; + + if (current->personality != PER_LINUX && thread->exec_domain->handler) { + thread->exec_domain->handler(n, regs); + return regs->ARM_r0; + } + +#ifdef CONFIG_DEBUG_USER + printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", + current->pid, current->comm, n); + dump_instr(regs); +#endif + + info.si_signo = SIGILL; + info.si_errno = 0; + info.si_code = ILL_ILLTRP; + info.si_addr = (void *)instruction_pointer(regs) - 4; + + force_sig_info(SIGILL, &info, current); + die_if_kernel("Oops", regs, n); + return regs->ARM_r0; +} + +static inline void +do_cache_op(unsigned long start, unsigned long end, int flags) +{ + struct vm_area_struct *vma; + + if (end < start) + return; + + vma = find_vma(current->active_mm, start); + if (vma && vma->vm_start < end) { + if (start < vma->vm_start) + start = vma->vm_start; + if (end > vma->vm_end) + end = vma->vm_end; + } +} + +/* + * Handle all unrecognised system calls. + * 0x9f0000 - 0x9fffff are some more esoteric system calls + */ +#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) +asmlinkage int arm_syscall(int no, struct pt_regs *regs) +{ + siginfo_t info; + + if ((no >> 16) != 0x9f) + return bad_syscall(no, regs); + + switch (no & 0xffff) { + case 0: /* branch through 0 */ + info.si_signo = SIGSEGV; + info.si_errno = 0; + info.si_code = SEGV_MAPERR; + info.si_addr = NULL; + + force_sig_info(SIGSEGV, &info, current); + + die_if_kernel("branch through zero", regs, 0); + return 0; + + case NR(breakpoint): /* SWI BREAK_POINT */ + ptrace_break(current, regs); + return regs->ARM_r0; + + case NR(cacheflush): + return 0; + + case NR(usr26): + case NR(usr32): + break; + + default: + /* Calls 9f00xx..9f07ff are defined to return -ENOSYS + if not implemented, rather than raising SIGILL. This + way the calling program can gracefully determine whether + a feature is supported. */ + if (no <= 0x7ff) + return -ENOSYS; + break; + } +#ifdef CONFIG_DEBUG_USER + /* + * experience shows that these seem to indicate that + * something catastrophic has happened + */ + printk("[%d] %s: arm syscall %d\n", current->pid, current->comm, no); + dump_instr(regs); + if (user_mode(regs)) { + show_regs(regs); + c_backtrace(regs->ARM_fp, processor_mode(regs)); + } +#endif + info.si_signo = SIGILL; + info.si_errno = 0; + info.si_code = ILL_ILLTRP; + info.si_addr = (void *)instruction_pointer(regs) - 4; + + force_sig_info(SIGILL, &info, current); + die_if_kernel("Oops", regs, no); + return 0; +} + +void __bad_xchg(volatile void *ptr, int size) +{ + printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", + __builtin_return_address(0), ptr, size); + BUG(); +} + +/* + * A data abort trap was taken, but we did not handle the instruction. + * Try to abort the user program, or panic if it was the kernel. + */ +asmlinkage void +baddataabort(int code, unsigned long instr, struct pt_regs *regs) +{ + unsigned long addr = instruction_pointer(regs); + siginfo_t info; + +#ifdef CONFIG_DEBUG_USER + printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", + current->pid, current->comm, code, instr); + dump_instr(regs); + show_pte(current->mm, addr); +#endif + + info.si_signo = SIGILL; + info.si_errno = 0; + info.si_code = ILL_ILLOPC; + info.si_addr = (void *)addr; + + force_sig_info(SIGILL, &info, current); + die_if_kernel("unknown data abort code", regs, instr); +} + +void __bug(const char *file, int line, void *data) +{ + printk(KERN_CRIT"kernel BUG at %s:%d!", file, line); + if (data) + printk(KERN_CRIT" - extra data = %p", data); + printk("\n"); + *(int *)0 = 0; +} + +void __readwrite_bug(const char *fn) +{ + printk("%s called, but not implemented", fn); + BUG(); +} + +void __pte_error(const char *file, int line, unsigned long val) +{ + printk("%s:%d: bad pte %08lx.\n", file, line, val); +} + +void __pmd_error(const char *file, int line, unsigned long val) +{ + printk("%s:%d: bad pmd %08lx.\n", file, line, val); +} + +void __pgd_error(const char *file, int line, unsigned long val) +{ + printk("%s:%d: bad pgd %08lx.\n", file, line, val); +} + +asmlinkage void __div0(void) +{ + printk("Division by zero in kernel.\n"); + dump_stack(); +} + +void abort(void) +{ + BUG(); + + /* if that doesn't kill us, halt */ + panic("Oops failed to kill thread"); +} + +void __init trap_init(void) +{ + extern void __trap_init(unsigned long); + unsigned long base = vectors_base(); + + __trap_init(base); + if (base != 0) + printk(KERN_DEBUG "Relocating machine vectors to 0x%08lx\n", + base); +} diff -urN linux-2.5.70-bk13/arch/arm26/lib/Makefile linux-2.5.70-bk14/arch/arm26/lib/Makefile --- linux-2.5.70-bk13/arch/arm26/lib/Makefile 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/Makefile 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,31 @@ +# +# linux/arch/arm/lib/Makefile +# +# Copyright (C) 1995-2000 Russell King +# + +L_TARGET := lib.a + +obj-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ + csumpartialcopy.o csumpartialcopyuser.o clearbit.o \ + copy_page.o delay.o findbit.o memchr.o memcpy.o \ + memset.o memzero.o setbit.o \ + strchr.o strrchr.o testchangebit.o \ + testclearbit.o testsetbit.o getuser.o \ + putuser.o ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ + ucmpdi2.o udivdi3.o lib1funcs.o ecard.o io-acorn.o \ + floppydma.o io-readsb.o io-writesb.o io-writesl.o \ + uaccess-kernel.o uaccess-user.o io-readsw-armv3.o \ + io-writesw-armv3.o io-readsl-armv3.o + +obj-m := +obj-n := + +obj-$(CONFIG_VT)+= kbd.o + +obj-y += ecard.o io-acorn.o floppydma.o + + +csumpartialcopy.o: csumpartialcopygeneric.S +csumpartialcopyuser.o: csumpartialcopygeneric.S + diff -urN linux-2.5.70-bk13/arch/arm26/lib/ashldi3.c linux-2.5.70-bk14/arch/arm26/lib/ashldi3.c --- linux-2.5.70-bk13/arch/arm26/lib/ashldi3.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/ashldi3.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,61 @@ +/* More subroutines needed by GCC output code on some machines. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* support functions required by the kernel. based on code from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#include "gcclib.h" + +DItype +__ashldi3 (DItype u, word_type b) +{ + DIunion w; + word_type bm; + DIunion uu; + + if (b == 0) + return u; + + uu.ll = u; + + bm = (sizeof (SItype) * BITS_PER_UNIT) - b; + if (bm <= 0) + { + w.s.low = 0; + w.s.high = (USItype)uu.s.low << -bm; + } + else + { + USItype carries = (USItype)uu.s.low >> bm; + w.s.low = (USItype)uu.s.low << b; + w.s.high = ((USItype)uu.s.high << b) | carries; + } + + return w.ll; +} + diff -urN linux-2.5.70-bk13/arch/arm26/lib/ashrdi3.c linux-2.5.70-bk14/arch/arm26/lib/ashrdi3.c --- linux-2.5.70-bk13/arch/arm26/lib/ashrdi3.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/ashrdi3.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,61 @@ +/* More subroutines needed by GCC output code on some machines. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* support functions required by the kernel. based on code from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#include "gcclib.h" + +DItype +__ashrdi3 (DItype u, word_type b) +{ + DIunion w; + word_type bm; + DIunion uu; + + if (b == 0) + return u; + + uu.ll = u; + + bm = (sizeof (SItype) * BITS_PER_UNIT) - b; + if (bm <= 0) + { + /* w.s.high = 1..1 or 0..0 */ + w.s.high = uu.s.high >> (sizeof (SItype) * BITS_PER_UNIT - 1); + w.s.low = uu.s.high >> -bm; + } + else + { + USItype carries = (USItype)uu.s.high << bm; + w.s.high = uu.s.high >> b; + w.s.low = ((USItype)uu.s.low >> b) | carries; + } + + return w.ll; +} diff -urN linux-2.5.70-bk13/arch/arm26/lib/backtrace.S linux-2.5.70-bk14/arch/arm26/lib/backtrace.S --- linux-2.5.70-bk13/arch/arm26/lib/backtrace.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/backtrace.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,145 @@ +/* + * linux/arch/arm/lib/backtrace.S + * + * Copyright (C) 1995, 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + .text + +@ fp is 0 or stack frame + +#define frame r4 +#define next r5 +#define save r6 +#define mask r7 +#define offset r8 + +ENTRY(__backtrace) + mov r1, #0x10 + mov r0, fp + +ENTRY(c_backtrace) + +#ifdef CONFIG_NO_FRAME_POINTER + mov pc, lr +#else + + stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location... + mov mask, #0xfc000003 + tst mask, r0 + movne r0, #0 + movs frame, r0 +1: moveq r0, #-2 + LOADREGS(eqfd, sp!, {r4 - r8, pc}) + +2: stmfd sp!, {pc} @ calculate offset of PC in STMIA instruction + ldr r0, [sp], #4 + adr r1, 2b - 4 + sub offset, r0, r1 + +3: tst frame, mask @ Check for address exceptions... + bne 1b + +1001: ldr next, [frame, #-12] @ get fp +1002: ldr r2, [frame, #-4] @ get lr +1003: ldr r3, [frame, #0] @ get pc + sub save, r3, offset @ Correct PC for prefetching + bic save, save, mask +1004: ldr r1, [save, #0] @ get instruction at function + mov r1, r1, lsr #10 + ldr r3, .Ldsi+4 + teq r1, r3 + subeq save, save, #4 + adr r0, .Lfe + mov r1, save + bic r2, r2, mask + bl printk @ print pc and link register + + ldr r0, [frame, #-8] @ get sp + sub r0, r0, #4 +1005: ldr r1, [save, #4] @ get instruction at function+4 + mov r3, r1, lsr #10 + ldr r2, .Ldsi+4 + teq r3, r2 @ Check for stmia sp!, {args} + addeq save, save, #4 @ next instruction + bleq .Ldumpstm + + sub r0, frame, #16 +1006: ldr r1, [save, #4] @ Get 'stmia sp!, {rlist, fp, ip, lr, pc}' instruction + mov r3, r1, lsr #10 + ldr r2, .Ldsi + teq r3, r2 + bleq .Ldumpstm + + teq frame, next + movne frame, next + teqne frame, #0 + bne 3b + LOADREGS(fd, sp!, {r4 - r8, pc}) + +/* + * Fixup for LDMDB + */ + .section .fixup,"ax" + .align 0 +1007: ldr r0, =.Lbad + mov r1, frame + bl printk + LOADREGS(fd, sp!, {r4 - r8, pc}) + .ltorg + .previous + + .section __ex_table,"a" + .align 3 + .long 1001b, 1007b + .long 1002b, 1007b + .long 1003b, 1007b + .long 1004b, 1007b + .long 1005b, 1007b + .long 1006b, 1007b + .previous + +#define instr r4 +#define reg r5 +#define stack r6 + +.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr} + mov stack, r0 + mov instr, r1 + mov reg, #9 + mov r7, #0 +1: mov r3, #1 + tst instr, r3, lsl reg + beq 2f + add r7, r7, #1 + teq r7, #4 + moveq r7, #0 + moveq r3, #'\n' + movne r3, #' ' + ldr r2, [stack], #-4 + mov r1, reg + adr r0, .Lfp + bl printk +2: subs reg, reg, #1 + bpl 1b + teq r7, #0 + adrne r0, .Lcr + blne printk + mov r0, stack + LOADREGS(fd, sp!, {instr, reg, stack, r7, pc}) + +.Lfe: .asciz "Function entered at [<%p>] from [<%p>]\n" +.Lfp: .asciz " r%d = %08X%c" +.Lcr: .asciz "\n" +.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" + .align +.Ldsi: .word 0x00e92dd8 >> 2 + .word 0x00e92d00 >> 2 + +#endif diff -urN linux-2.5.70-bk13/arch/arm26/lib/changebit.S linux-2.5.70-bk14/arch/arm26/lib/changebit.S --- linux-2.5.70-bk13/arch/arm26/lib/changebit.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/changebit.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,28 @@ +/* + * linux/arch/arm/lib/changebit.S + * + * Copyright (C) 1995-1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +/* Purpose : Function to change a bit + * Prototype: int change_bit(int bit, void *addr) + */ +ENTRY(_change_bit_be) + eor r0, r0, #0x18 @ big endian byte ordering +ENTRY(_change_bit_le) + and r2, r0, #7 + mov r3, #1 + mov r3, r3, lsl r2 + save_and_disable_irqs ip, r2 + ldrb r2, [r1, r0, lsr #3] + eor r2, r2, r3 + strb r2, [r1, r0, lsr #3] + restore_irqs ip + RETINSTR(mov,pc,lr) diff -urN linux-2.5.70-bk13/arch/arm26/lib/clearbit.S linux-2.5.70-bk14/arch/arm26/lib/clearbit.S --- linux-2.5.70-bk13/arch/arm26/lib/clearbit.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/clearbit.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,31 @@ +/* + * linux/arch/arm/lib/clearbit.S + * + * Copyright (C) 1995-1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +/* + * Purpose : Function to clear a bit + * Prototype: int clear_bit(int bit, void *addr) + */ +ENTRY(_clear_bit_be) + eor r0, r0, #0x18 @ big endian byte ordering +ENTRY(_clear_bit_le) + and r2, r0, #7 + mov r3, #1 + mov r3, r3, lsl r2 + save_and_disable_irqs ip, r2 + ldrb r2, [r1, r0, lsr #3] + bic r2, r2, r3 + strb r2, [r1, r0, lsr #3] + restore_irqs ip + RETINSTR(mov,pc,lr) + + diff -urN linux-2.5.70-bk13/arch/arm26/lib/copy_page.S linux-2.5.70-bk14/arch/arm26/lib/copy_page.S --- linux-2.5.70-bk13/arch/arm26/lib/copy_page.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/copy_page.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,62 @@ +/* + * linux/arch/arm/lib/copypage.S + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ASM optimised string functions + */ +#include +#include +#include + + .text + .align 5 +/* + * ARMv3 optimised copy_user_page + * + * FIXME: rmk do we need to handle cache stuff... + * FIXME: im is this right on ARM26? + */ +ENTRY(__copy_user_page) + stmfd sp!, {r4, lr} @ 2 + mov r2, #PAGE_SZ/64 @ 1 + ldmia r1!, {r3, r4, ip, lr} @ 4+1 +1: stmia r0!, {r3, r4, ip, lr} @ 4 + ldmia r1!, {r3, r4, ip, lr} @ 4+1 + stmia r0!, {r3, r4, ip, lr} @ 4 + ldmia r1!, {r3, r4, ip, lr} @ 4+1 + stmia r0!, {r3, r4, ip, lr} @ 4 + ldmia r1!, {r3, r4, ip, lr} @ 4 + subs r2, r2, #1 @ 1 + stmia r0!, {r3, r4, ip, lr} @ 4 + ldmneia r1!, {r3, r4, ip, lr} @ 4 + bne 1b @ 1 + LOADREGS(fd, sp!, {r4, pc}) @ 3 + + .align 5 +/* + * ARMv3 optimised clear_user_page + * + * FIXME: rmk do we need to handle cache stuff... + */ +ENTRY(__clear_user_page) + str lr, [sp, #-4]! + mov r1, #PAGE_SZ/64 @ 1 + mov r2, #0 @ 1 + mov r3, #0 @ 1 + mov ip, #0 @ 1 + mov lr, #0 @ 1 +1: stmia r0!, {r2, r3, ip, lr} @ 4 + stmia r0!, {r2, r3, ip, lr} @ 4 + stmia r0!, {r2, r3, ip, lr} @ 4 + stmia r0!, {r2, r3, ip, lr} @ 4 + subs r1, r1, #1 @ 1 + bne 1b @ 1 + ldr pc, [sp], #4 + + .section ".init.text", #alloc, #execinstr + diff -urN linux-2.5.70-bk13/arch/arm26/lib/csumipv6.S linux-2.5.70-bk14/arch/arm26/lib/csumipv6.S --- linux-2.5.70-bk13/arch/arm26/lib/csumipv6.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/csumipv6.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,32 @@ +/* + * linux/arch/arm/lib/csumipv6.S + * + * Copyright (C) 1995-1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + + .text + +ENTRY(__csum_ipv6_magic) + str lr, [sp, #-4]! + adds ip, r2, r3 + ldmia r1, {r1 - r3, lr} + adcs ip, ip, r1 + adcs ip, ip, r2 + adcs ip, ip, r3 + adcs ip, ip, lr + ldmia r0, {r0 - r3} + adcs r0, ip, r0 + adcs r0, r0, r1 + adcs r0, r0, r2 + ldr r2, [sp, #4] + adcs r0, r0, r3 + adcs r0, r0, r2 + adcs r0, r0, #0 + LOADREGS(fd, sp!, {pc}) + diff -urN linux-2.5.70-bk13/arch/arm26/lib/csumpartial.S linux-2.5.70-bk14/arch/arm26/lib/csumpartial.S --- linux-2.5.70-bk13/arch/arm26/lib/csumpartial.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/csumpartial.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,130 @@ +/* + * linux/arch/arm/lib/csumpartial.S + * + * Copyright (C) 1995-1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + + .text + +/* + * Function: __u32 csum_partial(const char *src, int len, __u32 sum) + * Params : r0 = buffer, r1 = len, r2 = checksum + * Returns : r0 = new checksum + */ + +buf .req r0 +len .req r1 +sum .req r2 +td0 .req r3 +td1 .req r4 @ save before use +td2 .req r5 @ save before use +td3 .req lr + +.zero: mov r0, sum + add sp, sp, #4 + ldr pc, [sp], #4 + + /* + * Handle 0 to 7 bytes, with any alignment of source and + * destination pointers. Note that when we get here, C = 0 + */ +.less8: teq len, #0 @ check for zero count + beq .zero + + /* we must have at least one byte. */ + tst buf, #1 @ odd address? + ldrneb td0, [buf], #1 + subne len, len, #1 + adcnes sum, sum, td0, lsl #byte(1) + +.less4: tst len, #6 + beq .less8_byte + + /* we are now half-word aligned */ + +.less8_wordlp: +#if __LINUX_ARM_ARCH__ >= 4 + ldrh td0, [buf], #2 + sub len, len, #2 +#else + ldrb td0, [buf], #1 + ldrb td3, [buf], #1 + sub len, len, #2 + orr td0, td0, td3, lsl #8 +#endif + adcs sum, sum, td0 + tst len, #6 + bne .less8_wordlp + +.less8_byte: tst len, #1 @ odd number of bytes + ldrneb td0, [buf], #1 @ include last byte + adcnes sum, sum, td0, lsl #byte(0) @ update checksum + +.done: adc r0, sum, #0 @ collect up the last carry + ldr td0, [sp], #4 + tst td0, #1 @ check buffer alignment + movne td0, r0, lsl #8 @ rotate checksum by 8 bits + orrne r0, td0, r0, lsr #24 + ldr pc, [sp], #4 @ return + +.not_aligned: tst buf, #1 @ odd address + ldrneb td0, [buf], #1 @ make even + subne len, len, #1 + adcnes sum, sum, td0, lsl #byte(1) @ update checksum + + tst buf, #2 @ 32-bit aligned? +#if __LINUX_ARM_ARCH__ >= 4 + ldrneh td0, [buf], #2 @ make 32-bit aligned + subne len, len, #2 +#else + ldrneb td0, [buf], #1 + ldrneb ip, [buf], #1 + subne len, len, #2 + orrne td0, td0, ip, lsl #8 +#endif + adcnes sum, sum, td0 @ update checksum + mov pc, lr + +ENTRY(csum_partial) + stmfd sp!, {buf, lr} + cmp len, #8 @ Ensure that we have at least + blo .less8 @ 8 bytes to copy. + + adds sum, sum, #0 @ C = 0 + tst buf, #3 @ Test destination alignment + blne .not_aligned @ aligh destination, return here + +1: bics ip, len, #31 + beq 3f + + stmfd sp!, {r4 - r5} +2: ldmia buf!, {td0, td1, td2, td3} + adcs sum, sum, td0 + adcs sum, sum, td1 + adcs sum, sum, td2 + adcs sum, sum, td3 + ldmia buf!, {td0, td1, td2, td3} + adcs sum, sum, td0 + adcs sum, sum, td1 + adcs sum, sum, td2 + adcs sum, sum, td3 + sub ip, ip, #32 + teq ip, #0 + bne 2b + ldmfd sp!, {r4 - r5} + +3: tst len, #0x1c @ should not change C + beq .less4 + +4: ldr td0, [buf], #4 + sub len, len, #4 + adcs sum, sum, td0 + tst len, #0x1c + bne 4b + b .less4 diff -urN linux-2.5.70-bk13/arch/arm26/lib/csumpartialcopy.S linux-2.5.70-bk14/arch/arm26/lib/csumpartialcopy.S --- linux-2.5.70-bk13/arch/arm26/lib/csumpartialcopy.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/csumpartialcopy.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,52 @@ +/* + * linux/arch/arm/lib/csumpartialcopy.S + * + * Copyright (C) 1995-1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + + .text + +/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len, __u32 sum) + * Params : r0 = src, r1 = dst, r2 = len, r3 = checksum + * Returns : r0 = new checksum + */ + + .macro save_regs + stmfd sp!, {r1, r4 - r8, fp, ip, lr, pc} + .endm + + .macro load_regs,flags + LOADREGS(\flags,fp,{r1, r4 - r8, fp, sp, pc}) + .endm + + .macro load1b, reg1 + ldrb \reg1, [r0], #1 + .endm + + .macro load2b, reg1, reg2 + ldrb \reg1, [r0], #1 + ldrb \reg2, [r0], #1 + .endm + + .macro load1l, reg1 + ldr \reg1, [r0], #4 + .endm + + .macro load2l, reg1, reg2 + ldr \reg1, [r0], #4 + ldr \reg2, [r0], #4 + .endm + + .macro load4l, reg1, reg2, reg3, reg4 + ldmia r0!, {\reg1, \reg2, \reg3, \reg4} + .endm + +#define FN_ENTRY ENTRY(csum_partial_copy_nocheck) + +#include "csumpartialcopygeneric.S" diff -urN linux-2.5.70-bk13/arch/arm26/lib/csumpartialcopygeneric.S linux-2.5.70-bk14/arch/arm26/lib/csumpartialcopygeneric.S --- linux-2.5.70-bk13/arch/arm26/lib/csumpartialcopygeneric.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/csumpartialcopygeneric.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,352 @@ +/* + * linux/arch/arm/lib/csumpartialcopygeneric.S + * + * Copyright (C) 1995-2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * JMA 01/06/03 Commented out some shl0s; probobly irrelevant to arm26 + * + */ + +/* + * unsigned int + * csum_partial_copy_xxx(const char *src, char *dst, int len, int sum, ) + * r0 = src, r1 = dst, r2 = len, r3 = sum + * Returns : r0 = checksum + * + * Note that 'tst' and 'teq' preserve the carry flag. + */ + +/* Quick hack */ + .macro save_regs + stmfd sp!, {r1, r4 - r8, fp, ip, lr, pc} + .endm + +/* end Quick Hack */ + +src .req r0 +dst .req r1 +len .req r2 +sum .req r3 + +.zero: mov r0, sum + load_regs ea + + /* + * Align an unaligned destination pointer. We know that + * we have >= 8 bytes here, so we don't need to check + * the length. Note that the source pointer hasn't been + * aligned yet. + */ +.dst_unaligned: tst dst, #1 + beq .dst_16bit + + load1b ip + sub len, len, #1 + adcs sum, sum, ip, lsl #byte(1) @ update checksum + strb ip, [dst], #1 + tst dst, #2 + moveq pc, lr @ dst is now 32bit aligned + +.dst_16bit: load2b r8, ip + sub len, len, #2 + adcs sum, sum, r8, lsl #byte(0) + strb r8, [dst], #1 + adcs sum, sum, ip, lsl #byte(1) + strb ip, [dst], #1 + mov pc, lr @ dst is now 32bit aligned + + /* + * Handle 0 to 7 bytes, with any alignment of source and + * destination pointers. Note that when we get here, C = 0 + */ +.less8: teq len, #0 @ check for zero count + beq .zero + + /* we must have at least one byte. */ + tst dst, #1 @ dst 16-bit aligned + beq .less8_aligned + + /* Align dst */ + load1b ip + sub len, len, #1 + adcs sum, sum, ip, lsl #byte(1) @ update checksum + strb ip, [dst], #1 + tst len, #6 + beq .less8_byteonly + +1: load2b r8, ip + sub len, len, #2 + adcs sum, sum, r8, lsl #byte(0) + strb r8, [dst], #1 + adcs sum, sum, ip, lsl #byte(1) + strb ip, [dst], #1 +.less8_aligned: tst len, #6 + bne 1b +.less8_byteonly: + tst len, #1 + beq .done + load1b r8 + adcs sum, sum, r8, lsl #byte(0) @ update checksum + strb r8, [dst], #1 + b .done + +FN_ENTRY + mov ip, sp + save_regs + sub fp, ip, #4 + + cmp len, #8 @ Ensure that we have at least + blo .less8 @ 8 bytes to copy. + + adds sum, sum, #0 @ C = 0 + tst dst, #3 @ Test destination alignment + blne .dst_unaligned @ align destination, return here + + /* + * Ok, the dst pointer is now 32bit aligned, and we know + * that we must have more than 4 bytes to copy. Note + * that C contains the carry from the dst alignment above. + */ + + tst src, #3 @ Test source alignment + bne .src_not_aligned + + /* Routine for src & dst aligned */ + + bics ip, len, #15 + beq 2f + +1: load4l r4, r5, r6, r7 + stmia dst!, {r4, r5, r6, r7} + adcs sum, sum, r4 + adcs sum, sum, r5 + adcs sum, sum, r6 + adcs sum, sum, r7 + sub ip, ip, #16 + teq ip, #0 + bne 1b + +2: ands ip, len, #12 + beq 4f + tst ip, #8 + beq 3f + load2l r4, r5 + stmia dst!, {r4, r5} + adcs sum, sum, r4 + adcs sum, sum, r5 + tst ip, #4 + beq 4f + +3: load1l r4 + str r4, [dst], #4 + adcs sum, sum, r4 + +4: ands len, len, #3 + beq .done + load1l r4 + tst len, #2 +/* mov r5, r4, lsr #byte(0) +FIXME? 0 Shift anyhow! +*/ + beq .exit + adcs sum, sum, r4, push #16 + strb r5, [dst], #1 + mov r5, r4, lsr #byte(1) + strb r5, [dst], #1 + mov r5, r4, lsr #byte(2) +.exit: tst len, #1 + strneb r5, [dst], #1 + andne r5, r5, #255 + adcnes sum, sum, r5, lsl #byte(0) + + /* + * If the dst pointer was not 16-bit aligned, we + * need to rotate the checksum here to get around + * the inefficient byte manipulations in the + * architecture independent code. + */ +.done: adc r0, sum, #0 + ldr sum, [sp, #0] @ dst + tst sum, #1 + movne sum, r0, lsl #8 + orrne r0, sum, r0, lsr #24 + load_regs ea + +.src_not_aligned: + adc sum, sum, #0 @ include C from dst alignment + and ip, src, #3 + bic src, src, #3 + load1l r5 + cmp ip, #2 + beq .src2_aligned + bhi .src3_aligned + mov r4, r5, pull #8 @ C = 0 + bics ip, len, #15 + beq 2f +1: load4l r5, r6, r7, r8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + mov r7, r7, pull #8 + orr r7, r7, r8, push #24 + stmia dst!, {r4, r5, r6, r7} + adcs sum, sum, r4 + adcs sum, sum, r5 + adcs sum, sum, r6 + adcs sum, sum, r7 + mov r4, r8, pull #8 + sub ip, ip, #16 + teq ip, #0 + bne 1b +2: ands ip, len, #12 + beq 4f + tst ip, #8 + beq 3f + load2l r5, r6 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + stmia dst!, {r4, r5} + adcs sum, sum, r4 + adcs sum, sum, r5 + mov r4, r6, pull #8 + tst ip, #4 + beq 4f +3: load1l r5 + orr r4, r4, r5, push #24 + str r4, [dst], #4 + adcs sum, sum, r4 + mov r4, r5, pull #8 +4: ands len, len, #3 + beq .done +/* mov r5, r4, lsr #byte(0) +FIXME? 0 Shift anyhow +*/ + tst len, #2 + beq .exit + adcs sum, sum, r4, push #16 + strb r5, [dst], #1 + mov r5, r4, lsr #byte(1) + strb r5, [dst], #1 + mov r5, r4, lsr #byte(2) + b .exit + +.src2_aligned: mov r4, r5, pull #16 + adds sum, sum, #0 + bics ip, len, #15 + beq 2f +1: load4l r5, r6, r7, r8 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + mov r7, r7, pull #16 + orr r7, r7, r8, push #16 + stmia dst!, {r4, r5, r6, r7} + adcs sum, sum, r4 + adcs sum, sum, r5 + adcs sum, sum, r6 + adcs sum, sum, r7 + mov r4, r8, pull #16 + sub ip, ip, #16 + teq ip, #0 + bne 1b +2: ands ip, len, #12 + beq 4f + tst ip, #8 + beq 3f + load2l r5, r6 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + stmia dst!, {r4, r5} + adcs sum, sum, r4 + adcs sum, sum, r5 + mov r4, r6, pull #16 + tst ip, #4 + beq 4f +3: load1l r5 + orr r4, r4, r5, push #16 + str r4, [dst], #4 + adcs sum, sum, r4 + mov r4, r5, pull #16 +4: ands len, len, #3 + beq .done +/* mov r5, r4, lsr #byte(0) +FIXME? 0 Shift anyhow +*/ + tst len, #2 + beq .exit + adcs sum, sum, r4 + strb r5, [dst], #1 + mov r5, r4, lsr #byte(1) + strb r5, [dst], #1 + tst len, #1 + beq .done + load1b r5 + b .exit + +.src3_aligned: mov r4, r5, pull #24 + adds sum, sum, #0 + bics ip, len, #15 + beq 2f +1: load4l r5, r6, r7, r8 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + mov r7, r7, pull #24 + orr r7, r7, r8, push #8 + stmia dst!, {r4, r5, r6, r7} + adcs sum, sum, r4 + adcs sum, sum, r5 + adcs sum, sum, r6 + adcs sum, sum, r7 + mov r4, r8, pull #24 + sub ip, ip, #16 + teq ip, #0 + bne 1b +2: ands ip, len, #12 + beq 4f + tst ip, #8 + beq 3f + load2l r5, r6 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + stmia dst!, {r4, r5} + adcs sum, sum, r4 + adcs sum, sum, r5 + mov r4, r6, pull #24 + tst ip, #4 + beq 4f +3: load1l r5 + orr r4, r4, r5, push #8 + str r4, [dst], #4 + adcs sum, sum, r4 + mov r4, r5, pull #24 +4: ands len, len, #3 + beq .done +/* mov r5, r4, lsr #byte(0) +FIXME? 0 Shift anyhow +*/ + tst len, #2 + beq .exit + strb r5, [dst], #1 + adcs sum, sum, r4 + load1l r4 +/* mov r5, r4, lsr #byte(0) +FIXME? 0 Shift anyhow +*/ + strb r5, [dst], #1 + adcs sum, sum, r4, push #24 + mov r5, r4, lsr #byte(1) + b .exit diff -urN linux-2.5.70-bk13/arch/arm26/lib/csumpartialcopyuser.S linux-2.5.70-bk14/arch/arm26/lib/csumpartialcopyuser.S --- linux-2.5.70-bk13/arch/arm26/lib/csumpartialcopyuser.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/csumpartialcopyuser.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,115 @@ +/* + * linux/arch/arm26/lib/csumpartialcopyuser.S + * + * Copyright (C) 1995-1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + + .text + + .macro save_regs + stmfd sp!, {r1 - r2, r4 - r9, fp, ip, lr, pc} + mov r9, sp, lsr #13 + mov r9, r9, lsl #13 + ldr r9, [r9, #TSK_ADDR_LIMIT] + mov r9, r9, lsr #24 + .endm + + .macro load_regs,flags + ldm\flags fp, {r1, r2, r4-r9, fp, sp, pc}^ + .endm + + .macro load1b, reg1 + tst r9, #0x01 +9999: ldreqbt \reg1, [r0], #1 + ldrneb \reg1, [r0], #1 + .section __ex_table, "a" + .align 3 + .long 9999b, 6001f + .previous + .endm + + .macro load2b, reg1, reg2 + tst r9, #0x01 +9999: ldreqbt \reg1, [r0], #1 + ldrneb \reg1, [r0], #1 +9998: ldreqbt \reg2, [r0], #1 + ldrneb \reg2, [r0], #1 + .section __ex_table, "a" + .long 9999b, 6001f + .long 9998b, 6001f + .previous + .endm + + .macro load1l, reg1 + tst r9, #0x01 +9999: ldreqt \reg1, [r0], #4 + ldrne \reg1, [r0], #4 + .section __ex_table, "a" + .align 3 + .long 9999b, 6001f + .previous + .endm + + .macro load2l, reg1, reg2 + tst r9, #0x01 + ldmneia r0!, {\reg1, \reg2} +9999: ldreqt \reg1, [r0], #4 +9998: ldreqt \reg2, [r0], #4 + .section __ex_table, "a" + .long 9999b, 6001f + .long 9998b, 6001f + .previous + .endm + + .macro load4l, reg1, reg2, reg3, reg4 + tst r9, #0x01 + ldmneia r0!, {\reg1, \reg2, \reg3, \reg4} +9999: ldreqt \reg1, [r0], #4 +9998: ldreqt \reg2, [r0], #4 +9997: ldreqt \reg3, [r0], #4 +9996: ldreqt \reg4, [r0], #4 + .section __ex_table, "a" + .long 9999b, 6001f + .long 9998b, 6001f + .long 9997b, 6001f + .long 9996b, 6001f + .previous + .endm + +/* + * unsigned int + * csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *err_ptr) + * r0 = src, r1 = dst, r2 = len, r3 = sum, [sp] = *err_ptr + * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT + */ + +#define FN_ENTRY ENTRY(csum_partial_copy_from_user) + +#include "csumpartialcopygeneric.S" + +/* + * FIXME: minor buglet here + * We don't return the checksum for the data present in the buffer. To do + * so properly, we would have to add in whatever registers were loaded before + * the fault, which, with the current asm above is not predictable. + */ + .align 4 +6001: mov r4, #-EFAULT + ldr r5, [fp, #4] @ *err_ptr + str r4, [r5] + ldmia sp, {r1, r2} @ retrieve dst, len + add r2, r2, r1 + mov r0, #0 @ zero the buffer +6002: teq r2, r1 + strneb r0, [r1], #1 + bne 6002b + load_regs ea diff -urN linux-2.5.70-bk13/arch/arm26/lib/delay.S linux-2.5.70-bk14/arch/arm26/lib/delay.S --- linux-2.5.70-bk13/arch/arm26/lib/delay.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/delay.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,57 @@ +/* + * linux/arch/arm/lib/delay.S + * + * Copyright (C) 1995, 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +LC0: .word loops_per_jiffy + +/* + * 0 <= r0 <= 2000 + */ +ENTRY(udelay) + mov r2, #0x6800 + orr r2, r2, #0x00db + mul r1, r0, r2 + ldr r2, LC0 + ldr r2, [r2] + mov r1, r1, lsr #11 + mov r2, r2, lsr #11 + mul r0, r1, r2 + movs r0, r0, lsr #6 + RETINSTR(moveq,pc,lr) + +/* + * loops = (r0 * 0x10c6 * 100 * loops_per_jiffy) / 2^32 + * + * Oh, if only we had a cycle counter... + */ + +@ Delay routine +ENTRY(__delay) + subs r0, r0, #1 +#if 0 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 +#endif + bhi __delay + RETINSTR(mov,pc,lr) diff -urN linux-2.5.70-bk13/arch/arm26/lib/ecard.S linux-2.5.70-bk14/arch/arm26/lib/ecard.S --- linux-2.5.70-bk13/arch/arm26/lib/ecard.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/ecard.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,41 @@ +/* + * linux/arch/arm/lib/ecard.S + * + * Copyright (C) 1995, 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include /* for CONFIG_CPU_nn */ +#include +#include +#include + +#define CPSR2SPSR(rt) + +@ Purpose: call an expansion card loader to read bytes. +@ Proto : char read_loader(int offset, char *card_base, char *loader); +@ Returns: byte read + +ENTRY(ecard_loader_read) + stmfd sp!, {r4 - r12, lr} + mov r11, r1 + mov r1, r0 + CPSR2SPSR(r0) + mov lr, pc + mov pc, r2 + LOADREGS(fd, sp!, {r4 - r12, pc}) + +@ Purpose: call an expansion card loader to reset the card +@ Proto : void read_loader(int card_base, char *loader); +@ Returns: byte read + +ENTRY(ecard_loader_reset) + stmfd sp!, {r4 - r12, lr} + mov r11, r0 + CPSR2SPSR(r0) + mov lr, pc + add pc, r1, #8 + LOADREGS(fd, sp!, {r4 - r12, pc}) + diff -urN linux-2.5.70-bk13/arch/arm26/lib/findbit.S linux-2.5.70-bk14/arch/arm26/lib/findbit.S --- linux-2.5.70-bk13/arch/arm26/lib/findbit.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/findbit.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,67 @@ +/* + * linux/arch/arm/lib/findbit.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * 16th March 2001 - John Ripley + * Fixed so that "size" is an exclusive not an inclusive quantity. + * All users of these functions expect exclusive sizes, and may + * also call with zero size. + * Reworked by rmk. + */ +#include +#include + .text + +/* + * Purpose : Find a 'zero' bit + * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit); + */ +ENTRY(_find_first_zero_bit_le) + teq r1, #0 + beq 3f + mov r2, #0 +1: ldrb r3, [r0, r2, lsr #3] + eors r3, r3, #0xff @ invert bits + bne .found @ any now set - found zero bit + add r2, r2, #8 @ next bit pointer +2: cmp r2, r1 @ any more? + blo 1b +3: mov r0, r1 @ no free bits + RETINSTR(mov,pc,lr) + +/* + * Purpose : Find next 'zero' bit + * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset) + */ +ENTRY(_find_next_zero_bit_le) + teq r1, #0 + beq 2b + ands ip, r2, #7 + beq 1b @ If new byte, goto old routine + ldrb r3, [r0, r2, lsr #3] + eor r3, r3, #0xff @ now looking for a 1 bit + movs r3, r3, lsr ip @ shift off unused bits + bne .found + orr r2, r2, #7 @ if zero, then no bits here + add r2, r2, #1 @ align bit pointer + b 2b @ loop for next bit + +/* + * One or more bits in the LSB of r3 are assumed to be set. + */ +.found: tst r3, #0x0f + addeq r2, r2, #4 + movne r3, r3, lsl #4 + tst r3, #0x30 + addeq r2, r2, #2 + movne r3, r3, lsl #2 + tst r3, #0x40 + addeq r2, r2, #1 + mov r0, r2 + RETINSTR(mov,pc,lr) + diff -urN linux-2.5.70-bk13/arch/arm26/lib/floppydma.S linux-2.5.70-bk14/arch/arm26/lib/floppydma.S --- linux-2.5.70-bk13/arch/arm26/lib/floppydma.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/floppydma.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,32 @@ +/* + * linux/arch/arm/lib/floppydma.S + * + * Copyright (C) 1995, 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + + .global floppy_fiqin_end +ENTRY(floppy_fiqin_start) + subs r9, r9, #1 + ldrgtb r12, [r11, #-4] + ldrleb r12, [r11], #0 + strb r12, [r10], #1 + subs pc, lr, #4 +floppy_fiqin_end: + + .global floppy_fiqout_end +ENTRY(floppy_fiqout_start) + subs r9, r9, #1 + ldrgeb r12, [r10], #1 + movlt r12, #0 + strleb r12, [r11], #0 + subles pc, lr, #4 + strb r12, [r11, #-4] + subs pc, lr, #4 +floppy_fiqout_end: diff -urN linux-2.5.70-bk13/arch/arm26/lib/gcclib.h linux-2.5.70-bk14/arch/arm26/lib/gcclib.h --- linux-2.5.70-bk13/arch/arm26/lib/gcclib.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/gcclib.h 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,21 @@ +/* gcclib.h -- definitions for various functions 'borrowed' from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#define BITS_PER_UNIT 8 +#define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT) + +typedef unsigned int UQItype __attribute__ ((mode (QI))); +typedef int SItype __attribute__ ((mode (SI))); +typedef unsigned int USItype __attribute__ ((mode (SI))); +typedef int DItype __attribute__ ((mode (DI))); +typedef int word_type __attribute__ ((mode (__word__))); +typedef unsigned int UDItype __attribute__ ((mode (DI))); + +struct DIstruct {SItype low, high;}; + +typedef union +{ + struct DIstruct s; + DItype ll; +} DIunion; + diff -urN linux-2.5.70-bk13/arch/arm26/lib/getuser.S linux-2.5.70-bk14/arch/arm26/lib/getuser.S --- linux-2.5.70-bk13/arch/arm26/lib/getuser.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/getuser.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,111 @@ +/* + * linux/arch/arm/lib/getuser.S + * + * Copyright (C) 2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Idea from x86 version, (C) Copyright 1998 Linus Torvalds + * + * These functions have a non-standard call interface to make them more + * efficient, especially as they return an error value in addition to + * the "real" return value. + * + * __get_user_X + * + * Inputs: r0 contains the address + * Outputs: r0 is the error code + * r1, r2 contains the zero-extended value + * lr corrupted + * + * No other registers must be altered. (see include/asm-arm/uaccess.h + * for specific ASM register usage). + * + * Note that ADDR_LIMIT is either 0 or 0xc0000000. + * Note also that it is intended that __get_user_bad is not global. + */ +#include +#include + + .global __get_user_1 +__get_user_1: + bic r1, sp, #0x1f00 + bic r1, r1, #0x00ff + str lr, [sp, #-4]! + ldr r1, [r1, #TI_ADDR_LIMIT] + sub r1, r1, #1 + cmp r0, r1 + bge __get_user_bad + cmp r0, #0x02000000 +1: ldrlsbt r1, [r0] + ldrgeb r1, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + + .global __get_user_2 +__get_user_2: + bic r2, sp, #0x1f00 + bic r2, r2, #0x00ff + str lr, [sp, #-4]! + ldr r2, [r2, #TI_ADDR_LIMIT] + sub r2, r2, #2 + cmp r0, r2 + bge __get_user_bad + cmp r0, #0x02000000 +2: ldrlsbt r1, [r0], #1 +3: ldrlsbt r2, [r0] + ldrgeb r1, [r0], #1 + ldrgeb r2, [r0] + orr r1, r1, r2, lsl #8 + mov r0, #0 + ldmfd sp!, {pc}^ + + .global __get_user_4 +__get_user_4: + bic r1, sp, #0x1f00 + bic r1, r1, #0x00ff + str lr, [sp, #-4]! + ldr r1, [r1, #TI_ADDR_LIMIT] + sub r1, r1, #4 + cmp r0, r1 + bge __get_user_bad + cmp r0, #0x02000000 +4: ldrlst r1, [r0] + ldrge r1, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + + .global __get_user_8 +__get_user_8: + bic r2, sp, #0x1f00 + bic r2, r2, #0x00ff + str lr, [sp, #-4]! + ldr r2, [r2, #TI_ADDR_LIMIT] + sub r2, r2, #8 + cmp r0, r2 + bge __get_user_bad_8 + cmp r0, #0x02000000 +5: ldrlst r1, [r0], #4 +6: ldrlst r2, [r0] + ldrge r1, [r0], #4 + ldrge r2, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + +__get_user_bad_8: + mov r2, #0 +__get_user_bad: + mov r1, #0 + mov r0, #-14 + ldmfd sp!, {pc}^ + +.section __ex_table, "a" + .long 1b, __get_user_bad + .long 2b, __get_user_bad + .long 3b, __get_user_bad + .long 4b, __get_user_bad + .long 5b, __get_user_bad_8 + .long 6b, __get_user_bad_8 +.previous diff -urN linux-2.5.70-bk13/arch/arm26/lib/io-acorn.S linux-2.5.70-bk14/arch/arm26/lib/io-acorn.S --- linux-2.5.70-bk13/arch/arm26/lib/io-acorn.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/io-acorn.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,71 @@ +/* + * linux/arch/arm/lib/io-acorn.S + * + * Copyright (C) 1995, 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include /* for CONFIG_CPU_nn */ +#include +#include +#include + + .text + .align + + .equ diff_pcio_base, PCIO_BASE - IO_BASE + + .macro outw2 rd + mov r8, \rd, lsl #16 + orr r8, r8, r8, lsr #16 + str r8, [r3, r0, lsl #2] + mov r8, \rd, lsr #16 + orr r8, r8, r8, lsl #16 + str r8, [r3, r0, lsl #2] + .endm + + .macro inw2 rd, mask, temp + ldr \rd, [r0] + and \rd, \rd, \mask + ldr \temp, [r0] + orr \rd, \rd, \temp, lsl #16 + .endm + + .macro addr rd + tst \rd, #0x80000000 + mov \rd, \rd, lsl #2 + add \rd, \rd, #IO_BASE + addeq \rd, \rd, #diff_pcio_base + .endm + +.iosl_warning: + .ascii "<4>insl/outsl not implemented, called from %08lX\0" + .align + +/* + * These make no sense on Acorn machines. + * Print a warning message. + */ +ENTRY(insl) +ENTRY(outsl) + adr r0, .iosl_warning + mov r1, lr + b printk + +@ Purpose: write a memc register +@ Proto : void memc_write(int register, int value); +@ Returns: nothing + +ENTRY(memc_write) + cmp r0, #7 + RETINSTR(movgt,pc,lr) + mov r0, r0, lsl #17 + mov r1, r1, lsl #15 + mov r1, r1, lsr #17 + orr r0, r0, r1, lsl #2 + add r0, r0, #0x03600000 + strb r0, [r0] + RETINSTR(mov,pc,lr) + diff -urN linux-2.5.70-bk13/arch/arm26/lib/io-readsb.S linux-2.5.70-bk14/arch/arm26/lib/io-readsb.S --- linux-2.5.70-bk13/arch/arm26/lib/io-readsb.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/io-readsb.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,116 @@ +/* + * linux/arch/arm/lib/io-readsb.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +.insb_align: rsb ip, ip, #4 + cmp ip, r2 + movgt ip, r2 + cmp ip, #2 + ldrb r3, [r0] + strb r3, [r1], #1 + ldrgeb r3, [r0] + strgeb r3, [r1], #1 + ldrgtb r3, [r0] + strgtb r3, [r1], #1 + subs r2, r2, ip + bne .insb_aligned + +ENTRY(__raw_readsb) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + ands ip, r1, #3 + bne .insb_align + +.insb_aligned: stmfd sp!, {r4 - r6, lr} + + subs r2, r2, #16 + bmi .insb_no_16 + +.insb_16_lp: ldrb r3, [r0] + ldrb r4, [r0] + orr r3, r3, r4, lsl #8 + ldrb r4, [r0] + orr r3, r3, r4, lsl #16 + ldrb r4, [r0] + orr r3, r3, r4, lsl #24 + ldrb r4, [r0] + ldrb r5, [r0] + orr r4, r4, r5, lsl #8 + ldrb r5, [r0] + orr r4, r4, r5, lsl #16 + ldrb r5, [r0] + orr r4, r4, r5, lsl #24 + ldrb r5, [r0] + ldrb r6, [r0] + orr r5, r5, r6, lsl #8 + ldrb r6, [r0] + orr r5, r5, r6, lsl #16 + ldrb r6, [r0] + orr r5, r5, r6, lsl #24 + ldrb r6, [r0] + ldrb ip, [r0] + orr r6, r6, ip, lsl #8 + ldrb ip, [r0] + orr r6, r6, ip, lsl #16 + ldrb ip, [r0] + orr r6, r6, ip, lsl #24 + stmia r1!, {r3 - r6} + + subs r2, r2, #16 + bpl .insb_16_lp + + tst r2, #15 + LOADREGS(eqfd, sp!, {r4 - r6, pc}) + +.insb_no_16: tst r2, #8 + beq .insb_no_8 + + ldrb r3, [r0] + ldrb r4, [r0] + orr r3, r3, r4, lsl #8 + ldrb r4, [r0] + orr r3, r3, r4, lsl #16 + ldrb r4, [r0] + orr r3, r3, r4, lsl #24 + ldrb r4, [r0] + ldrb r5, [r0] + orr r4, r4, r5, lsl #8 + ldrb r5, [r0] + orr r4, r4, r5, lsl #16 + ldrb r5, [r0] + orr r4, r4, r5, lsl #24 + stmia r1!, {r3, r4} + +.insb_no_8: tst r2, #4 + beq .insb_no_4 + + ldrb r3, [r0] + ldrb r4, [r0] + orr r3, r3, r4, lsl #8 + ldrb r4, [r0] + orr r3, r3, r4, lsl #16 + ldrb r4, [r0] + orr r3, r3, r4, lsl #24 + str r3, [r1], #4 + +.insb_no_4: ands r2, r2, #3 + LOADREGS(eqfd, sp!, {r4 - r6, pc}) + + cmp r2, #2 + ldrb r3, [r0] + strb r3, [r1], #1 + ldrgeb r3, [r0] + strgeb r3, [r1], #1 + ldrgtb r3, [r0] + strgtb r3, [r1] + + LOADREGS(fd, sp!, {r4 - r6, pc}) diff -urN linux-2.5.70-bk13/arch/arm26/lib/io-readsl-armv3.S linux-2.5.70-bk14/arch/arm26/lib/io-readsl-armv3.S --- linux-2.5.70-bk13/arch/arm26/lib/io-readsl-armv3.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/io-readsl-armv3.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,78 @@ +/* + * linux/arch/arm/lib/io-readsl-armv3.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +/* + * Note that some reads can be aligned on half-word boundaries. + */ +ENTRY(__raw_readsl) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + ands ip, r1, #3 + bne 2f + +1: ldr r3, [r0] + str r3, [r1], #4 + subs r2, r2, #1 + bne 1b + mov pc, lr + +2: cmp ip, #2 + ldr ip, [r0] + blt 4f + bgt 6f + + strb ip, [r1], #1 + mov ip, ip, lsr #8 + strb ip, [r1], #1 + mov ip, ip, lsr #8 +3: subs r2, r2, #1 + ldrne r3, [r0] + orrne ip, ip, r3, lsl #16 + strne ip, [r1], #4 + movne ip, r3, lsr #16 + bne 3b + strb ip, [r1], #1 + mov ip, ip, lsr #8 + strb ip, [r1], #1 + mov pc, lr + +4: strb ip, [r1], #1 + mov ip, ip, lsr #8 + strb ip, [r1], #1 + mov ip, ip, lsr #8 + strb ip, [r1], #1 + mov ip, ip, lsr #8 +5: subs r2, r2, #1 + ldrne r3, [r0] + orrne ip, ip, r3, lsl #8 + strne ip, [r1], #4 + movne ip, r3, lsr #24 + bne 5b + strb ip, [r1], #1 + mov pc, lr + +6: strb ip, [r1], #1 + mov ip, ip, lsr #8 +7: subs r2, r2, #1 + ldrne r3, [r0] + orrne ip, ip, r3, lsl #24 + strne ip, [r1], #4 + movne ip, r3, lsr #8 + bne 7b + strb ip, [r1], #1 + mov ip, ip, lsr #8 + strb ip, [r1], #1 + mov ip, ip, lsr #8 + strb ip, [r1], #1 + mov pc, lr + diff -urN linux-2.5.70-bk13/arch/arm26/lib/io-readsw-armv3.S linux-2.5.70-bk14/arch/arm26/lib/io-readsw-armv3.S --- linux-2.5.70-bk13/arch/arm26/lib/io-readsw-armv3.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/io-readsw-armv3.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,107 @@ +/* + * linux/arch/arm/lib/io-readsw-armv3.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +.insw_bad_alignment: + adr r0, .insw_bad_align_msg + mov r2, lr + b panic +.insw_bad_align_msg: + .asciz "insw: bad buffer alignment (0x%p, lr=0x%08lX)\n" + .align + +.insw_align: tst r1, #1 + bne .insw_bad_alignment + + ldr r3, [r0] + strb r3, [r1], #1 + mov r3, r3, lsr #8 + strb r3, [r1], #1 + + subs r2, r2, #1 + RETINSTR(moveq, pc, lr) + +ENTRY(__raw_readsw) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + tst r1, #3 + bne .insw_align + +.insw_aligned: mov ip, #0xff + orr ip, ip, ip, lsl #8 + stmfd sp!, {r4, r5, r6, lr} + + subs r2, r2, #8 + bmi .no_insw_8 + +.insw_8_lp: ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + ldr r4, [r0] + and r4, r4, ip + ldr r5, [r0] + orr r4, r4, r5, lsl #16 + + ldr r5, [r0] + and r5, r5, ip + ldr r6, [r0] + orr r5, r5, r6, lsl #16 + + ldr r6, [r0] + and r6, r6, ip + ldr lr, [r0] + orr r6, r6, lr, lsl #16 + + stmia r1!, {r3 - r6} + + subs r2, r2, #8 + bpl .insw_8_lp + + tst r2, #7 + LOADREGS(eqfd, sp!, {r4, r5, r6, pc}) + +.no_insw_8: tst r2, #4 + beq .no_insw_4 + + ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + ldr r4, [r0] + and r4, r4, ip + ldr r5, [r0] + orr r4, r4, r5, lsl #16 + + stmia r1!, {r3, r4} + +.no_insw_4: tst r2, #2 + beq .no_insw_2 + + ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + str r3, [r1], #4 + +.no_insw_2: tst r2, #1 + ldrne r3, [r0] + strneb r3, [r1], #1 + movne r3, r3, lsr #8 + strneb r3, [r1] + + LOADREGS(fd, sp!, {r4, r5, r6, pc}) + + diff -urN linux-2.5.70-bk13/arch/arm26/lib/io-writesb.S linux-2.5.70-bk14/arch/arm26/lib/io-writesb.S --- linux-2.5.70-bk13/arch/arm26/lib/io-writesb.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/io-writesb.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,122 @@ +/* + * linux/arch/arm/lib/io-writesb.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +.outsb_align: rsb ip, ip, #4 + cmp ip, r2 + movgt ip, r2 + cmp ip, #2 + ldrb r3, [r1], #1 + strb r3, [r0] + ldrgeb r3, [r1], #1 + strgeb r3, [r0] + ldrgtb r3, [r1], #1 + strgtb r3, [r0] + subs r2, r2, ip + bne .outsb_aligned + +ENTRY(__raw_writesb) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + ands ip, r1, #3 + bne .outsb_align + +.outsb_aligned: stmfd sp!, {r4 - r6, lr} + + subs r2, r2, #16 + bmi .outsb_no_16 + +.outsb_16_lp: ldmia r1!, {r3 - r6} + + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + + strb r4, [r0] + mov r4, r4, lsr #8 + strb r4, [r0] + mov r4, r4, lsr #8 + strb r4, [r0] + mov r4, r4, lsr #8 + strb r4, [r0] + + strb r5, [r0] + mov r5, r5, lsr #8 + strb r5, [r0] + mov r5, r5, lsr #8 + strb r5, [r0] + mov r5, r5, lsr #8 + strb r5, [r0] + + strb r6, [r0] + mov r6, r6, lsr #8 + strb r6, [r0] + mov r6, r6, lsr #8 + strb r6, [r0] + mov r6, r6, lsr #8 + strb r6, [r0] + + subs r2, r2, #16 + bpl .outsb_16_lp + + tst r2, #15 + LOADREGS(eqfd, sp!, {r4 - r6, pc}) + +.outsb_no_16: tst r2, #8 + beq .outsb_no_8 + + ldmia r1!, {r3, r4} + + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + + strb r4, [r0] + mov r4, r4, lsr #8 + strb r4, [r0] + mov r4, r4, lsr #8 + strb r4, [r0] + mov r4, r4, lsr #8 + strb r4, [r0] + +.outsb_no_8: tst r2, #4 + beq .outsb_no_4 + + ldr r3, [r1], #4 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + +.outsb_no_4: ands r2, r2, #3 + LOADREGS(eqfd, sp!, {r4 - r6, pc}) + + cmp r2, #2 + ldrb r3, [r1], #1 + strb r3, [r0] + ldrgeb r3, [r1], #1 + strgeb r3, [r0] + ldrgtb r3, [r1] + strgtb r3, [r0] + + LOADREGS(fd, sp!, {r4 - r6, pc}) diff -urN linux-2.5.70-bk13/arch/arm26/lib/io-writesl.S linux-2.5.70-bk14/arch/arm26/lib/io-writesl.S --- linux-2.5.70-bk13/arch/arm26/lib/io-writesl.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/io-writesl.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,56 @@ +/* + * linux/arch/arm/lib/io-writesl.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +ENTRY(__raw_writesl) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + ands ip, r1, #3 + bne 2f + +1: ldr r3, [r1], #4 + str r3, [r0] + subs r2, r2, #1 + bne 1b + mov pc, lr + +2: bic r1, r1, #3 + cmp ip, #2 + ldr r3, [r1], #4 + bgt 4f + blt 5f + +3: mov ip, r3, lsr #16 + ldr r3, [r1], #4 + orr ip, ip, r3, lsl #16 + str ip, [r0] + subs r2, r2, #1 + bne 3b + mov pc, lr + +4: mov ip, r3, lsr #24 + ldr r3, [r1], #4 + orr ip, ip, r3, lsl #8 + str ip, [r0] + subs r2, r2, #1 + bne 4b + mov pc, lr + +5: mov ip, r3, lsr #8 + ldr r3, [r1], #4 + orr ip, ip, r3, lsl #24 + str ip, [r0] + subs r2, r2, #1 + bne 5b + mov pc, lr + + diff -urN linux-2.5.70-bk13/arch/arm26/lib/io-writesw-armv3.S linux-2.5.70-bk14/arch/arm26/lib/io-writesw-armv3.S --- linux-2.5.70-bk13/arch/arm26/lib/io-writesw-armv3.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/io-writesw-armv3.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,127 @@ +/* + * linux/arch/arm/lib/io-writesw-armv3.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +.outsw_bad_alignment: + adr r0, .outsw_bad_align_msg + mov r2, lr + b panic +.outsw_bad_align_msg: + .asciz "outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n" + .align + +.outsw_align: tst r1, #1 + bne .outsw_bad_alignment + + add r1, r1, #2 + + ldr r3, [r1, #-4] + mov r3, r3, lsr #16 + orr r3, r3, r3, lsl #16 + str r3, [r0] + subs r2, r2, #1 + RETINSTR(moveq, pc, lr) + +ENTRY(__raw_writesw) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + tst r1, #3 + bne .outsw_align + +.outsw_aligned: stmfd sp!, {r4, r5, r6, lr} + + subs r2, r2, #8 + bmi .no_outsw_8 + +.outsw_8_lp: ldmia r1!, {r3, r4, r5, r6} + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r4, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r4, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r5, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r5, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r6, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r6, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + subs r2, r2, #8 + bpl .outsw_8_lp + + tst r2, #7 + LOADREGS(eqfd, sp!, {r4, r5, r6, pc}) + +.no_outsw_8: tst r2, #4 + beq .no_outsw_4 + + ldmia r1!, {r3, r4} + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r4, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r4, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + +.no_outsw_4: tst r2, #2 + beq .no_outsw_2 + + ldr r3, [r1], #4 + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + +.no_outsw_2: tst r2, #1 + + ldrne r3, [r1] + + movne ip, r3, lsl #16 + orrne ip, ip, ip, lsr #16 + strne ip, [r0] + + LOADREGS(fd, sp!, {r4, r5, r6, pc}) diff -urN linux-2.5.70-bk13/arch/arm26/lib/kbd.c linux-2.5.70-bk14/arch/arm26/lib/kbd.c --- linux-2.5.70-bk13/arch/arm26/lib/kbd.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/kbd.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,279 @@ +#include +#include +//#include +#include + +/* + * Translation of escaped scancodes to keycodes. + * This is now user-settable. + * The keycodes 1-88,96-111,119 are fairly standard, and + * should probably not be changed - changing might confuse X. + * X also interprets scancode 0x5d (KEY_Begin). + * + * For 1-88 keycode equals scancode. + */ + +#define E0_KPENTER 96 +#define E0_RCTRL 97 +#define E0_KPSLASH 98 +#define E0_PRSCR 99 +#define E0_RALT 100 +#define E0_BREAK 101 /* (control-pause) */ +#define E0_HOME 102 +#define E0_UP 103 +#define E0_PGUP 104 +#define E0_LEFT 105 +#define E0_RIGHT 106 +#define E0_END 107 +#define E0_DOWN 108 +#define E0_PGDN 109 +#define E0_INS 110 +#define E0_DEL 111 + +/* for USB 106 keyboard */ +#define E0_YEN 124 +#define E0_BACKSLASH 89 + + +#define E1_PAUSE 119 + +/* + * The keycodes below are randomly located in 89-95,112-118,120-127. + * They could be thrown away (and all occurrences below replaced by 0), + * but that would force many users to use the `setkeycodes' utility, where + * they needed not before. It does not matter that there are duplicates, as + * long as no duplication occurs for any single keyboard. + */ +#define SC_LIM 89 + +#define FOCUS_PF1 85 /* actual code! */ +#define FOCUS_PF2 89 +#define FOCUS_PF3 90 +#define FOCUS_PF4 91 +#define FOCUS_PF5 92 +#define FOCUS_PF6 93 +#define FOCUS_PF7 94 +#define FOCUS_PF8 95 +#define FOCUS_PF9 120 +#define FOCUS_PF10 121 +#define FOCUS_PF11 122 +#define FOCUS_PF12 123 + +#define JAP_86 124 +/* tfj@olivia.ping.dk: + * The four keys are located over the numeric keypad, and are + * labelled A1-A4. It's an rc930 keyboard, from + * Regnecentralen/RC International, Now ICL. + * Scancodes: 59, 5a, 5b, 5c. + */ +#define RGN1 124 +#define RGN2 125 +#define RGN3 126 +#define RGN4 127 + +static unsigned char high_keys[128 - SC_LIM] = { + RGN1, RGN2, RGN3, RGN4, 0, 0, 0, /* 0x59-0x5f */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x60-0x67 */ + 0, 0, 0, 0, 0, FOCUS_PF11, 0, FOCUS_PF12, /* 0x68-0x6f */ + 0, 0, 0, FOCUS_PF2, FOCUS_PF9, 0, 0, FOCUS_PF3, /* 0x70-0x77 */ + FOCUS_PF4, FOCUS_PF5, FOCUS_PF6, FOCUS_PF7, /* 0x78-0x7b */ + FOCUS_PF8, JAP_86, FOCUS_PF10, 0 /* 0x7c-0x7f */ +}; + +/* BTC */ +#define E0_MACRO 112 +/* LK450 */ +#define E0_F13 113 +#define E0_F14 114 +#define E0_HELP 115 +#define E0_DO 116 +#define E0_F17 117 +#define E0_KPMINPLUS 118 +/* + * My OmniKey generates e0 4c for the "OMNI" key and the + * right alt key does nada. [kkoller@nyx10.cs.du.edu] + */ +#define E0_OK 124 +/* + * New microsoft keyboard is rumoured to have + * e0 5b (left window button), e0 5c (right window button), + * e0 5d (menu button). [or: LBANNER, RBANNER, RMENU] + * [or: Windows_L, Windows_R, TaskMan] + */ +#define E0_MSLW 125 +#define E0_MSRW 126 +#define E0_MSTM 127 + +static unsigned char e0_keys[128] = { + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x00-0x07 */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x08-0x0f */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x10-0x17 */ + 0, 0, 0, 0, E0_KPENTER, E0_RCTRL, 0, 0, /* 0x18-0x1f */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x20-0x27 */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x28-0x2f */ + 0, 0, 0, 0, 0, E0_KPSLASH, 0, E0_PRSCR, /* 0x30-0x37 */ + E0_RALT, 0, 0, 0, 0, E0_F13, E0_F14, E0_HELP, /* 0x38-0x3f */ + E0_DO, E0_F17, 0, 0, 0, 0, E0_BREAK, E0_HOME, /* 0x40-0x47 */ + E0_UP, E0_PGUP, 0, E0_LEFT, E0_OK, E0_RIGHT, E0_KPMINPLUS, E0_END, /* 0x48-0x4f */ + E0_DOWN, E0_PGDN, E0_INS, E0_DEL, 0, 0, 0, 0, /* 0x50-0x57 */ + 0, 0, 0, E0_MSLW, E0_MSRW, E0_MSTM, 0, 0, /* 0x58-0x5f */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x60-0x67 */ + 0, 0, 0, 0, 0, 0, 0, E0_MACRO, /* 0x68-0x6f */ + //0, 0, 0, 0, 0, 0, 0, 0, /* 0x70-0x77 */ + 0, 0, 0, 0, 0, E0_BACKSLASH, 0, 0, /* 0x70-0x77 */ + 0, 0, 0, E0_YEN, 0, 0, 0, 0 /* 0x78-0x7f */ +}; + +static int gen_setkeycode(unsigned int scancode, unsigned int keycode) +{ + if (scancode < SC_LIM || scancode > 255 || keycode > 127) + return -EINVAL; + if (scancode < 128) + high_keys[scancode - SC_LIM] = keycode; + else + e0_keys[scancode - 128] = keycode; + return 0; +} + +static int gen_getkeycode(unsigned int scancode) +{ + return + (scancode < SC_LIM || scancode > 255) ? -EINVAL : + (scancode < + 128) ? high_keys[scancode - SC_LIM] : e0_keys[scancode - 128]; +} + +static int +gen_translate(unsigned char scancode, unsigned char *keycode, char raw_mode) +{ + static int prev_scancode = 0; + + /* special prefix scancodes.. */ + if (scancode == 0xe0 || scancode == 0xe1) { + prev_scancode = scancode; + return 0; + } + + /* 0xFF is sent by a few keyboards, ignore it. 0x00 is error */ + if (scancode == 0x00 || scancode == 0xff) { + prev_scancode = 0; + return 0; + } + + scancode &= 0x7f; + + if (prev_scancode) { + /* + * usually it will be 0xe0, but a Pause key generates + * e1 1d 45 e1 9d c5 when pressed, and nothing when released + */ + if (prev_scancode != 0xe0) { + if (prev_scancode == 0xe1 && scancode == 0x1d) { + prev_scancode = 0x100; + return 0; + } + else if (prev_scancode == 0x100 + && scancode == 0x45) { + *keycode = E1_PAUSE; + prev_scancode = 0; + } else { +#ifdef KBD_REPORT_UNKN + if (!raw_mode) + printk(KERN_INFO + "keyboard: unknown e1 escape sequence\n"); +#endif + prev_scancode = 0; + return 0; + } + } else { + prev_scancode = 0; + /* + * The keyboard maintains its own internal caps lock and + * num lock statuses. In caps lock mode E0 AA precedes make + * code and E0 2A follows break code. In num lock mode, + * E0 2A precedes make code and E0 AA follows break code. + * We do our own book-keeping, so we will just ignore these. + */ + /* + * For my keyboard there is no caps lock mode, but there are + * both Shift-L and Shift-R modes. The former mode generates + * E0 2A / E0 AA pairs, the latter E0 B6 / E0 36 pairs. + * So, we should also ignore the latter. - aeb@cwi.nl + */ + if (scancode == 0x2a || scancode == 0x36) + return 0; + + if (e0_keys[scancode]) + *keycode = e0_keys[scancode]; + else { +#ifdef KBD_REPORT_UNKN + if (!raw_mode) + printk(KERN_INFO + "keyboard: unknown scancode e0 %02x\n", + scancode); +#endif + return 0; + } + } + } else if (scancode >= SC_LIM) { + /* This happens with the FOCUS 9000 keyboard + Its keys PF1..PF12 are reported to generate + 55 73 77 78 79 7a 7b 7c 74 7e 6d 6f + Moreover, unless repeated, they do not generate + key-down events, so we have to zero up_flag below */ + /* Also, Japanese 86/106 keyboards are reported to + generate 0x73 and 0x7d for \ - and \ | respectively. */ + /* Also, some Brazilian keyboard is reported to produce + 0x73 and 0x7e for \ ? and KP-dot, respectively. */ + + *keycode = high_keys[scancode - SC_LIM]; + + if (!*keycode) { + if (!raw_mode) { +#ifdef KBD_REPORT_UNKN + printk(KERN_INFO + "keyboard: unrecognized scancode (%02x)" + " - ignored\n", scancode); +#endif + } + return 0; + } + } else + *keycode = scancode; + return 1; +} + +static char gen_unexpected_up(unsigned char keycode) +{ + /* unexpected, but this can happen: maybe this was a key release for a + FOCUS 9000 PF key; if we want to see it, we have to clear up_flag */ + if (keycode >= SC_LIM || keycode == 85) + return 0; + else + return 0200; +} + +/* + * These are the default mappings + */ +int (*k_setkeycode)(unsigned int, unsigned int) = gen_setkeycode; +int (*k_getkeycode)(unsigned int) = gen_getkeycode; +int (*k_translate)(unsigned char, unsigned char *, char) = gen_translate; +char (*k_unexpected_up)(unsigned char) = gen_unexpected_up; +void (*k_leds)(unsigned char); + +/* Simple translation table for the SysRq keys */ + +#ifdef CONFIG_MAGIC_SYSRQ +static unsigned char gen_sysrq_xlate[128] = + "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */ + "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */ + "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */ + "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */ + "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */ + "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */ + "\r\000/"; /* 0x60 - 0x6f */ + +unsigned char *k_sysrq_xlate = gen_sysrq_xlate; +int k_sysrq_key = 0x54; +#endif diff -urN linux-2.5.70-bk13/arch/arm26/lib/lib1funcs.S linux-2.5.70-bk14/arch/arm26/lib/lib1funcs.S --- linux-2.5.70-bk13/arch/arm26/lib/lib1funcs.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/lib1funcs.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,314 @@ +@ libgcc1 routines for ARM cpu. +@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk) + +/* Copyright (C) 1995, 1996, 1998 Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file with other programs, and to distribute +those programs without any restriction coming from the use of this +file. (The General Public License restrictions do apply in other +respects; for example, they cover modification of the file, and +distribution when not linked into another program.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* This code is derived from gcc 2.95.3 */ +/* I Molton 29/07/01 */ + +#include +#include +#include +#include + +#define RET movs +#define RETc(x) mov##x##s +#define RETCOND ^ + +dividend .req r0 +divisor .req r1 +result .req r2 +overdone .req r2 +curbit .req r3 +ip .req r12 +sp .req r13 +lr .req r14 +pc .req r15 + +ENTRY(__udivsi3) + cmp divisor, #0 + beq Ldiv0 + mov curbit, #1 + mov result, #0 + cmp dividend, divisor + bcc Lgot_result_udivsi3 +1: + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. + cmp divisor, #0x10000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #4 + movcc curbit, curbit, lsl #4 + bcc 1b + +2: + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. + cmp divisor, #0x80000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #1 + movcc curbit, curbit, lsl #1 + bcc 2b + +3: + @ Test for possible subtractions, and note which bits + @ are done in the result. On the final pass, this may subtract + @ too much from the dividend, but the result will be ok, since the + @ "bit" will have been shifted out at the bottom. + cmp dividend, divisor + subcs dividend, dividend, divisor + orrcs result, result, curbit + cmp dividend, divisor, lsr #1 + subcs dividend, dividend, divisor, lsr #1 + orrcs result, result, curbit, lsr #1 + cmp dividend, divisor, lsr #2 + subcs dividend, dividend, divisor, lsr #2 + orrcs result, result, curbit, lsr #2 + cmp dividend, divisor, lsr #3 + subcs dividend, dividend, divisor, lsr #3 + orrcs result, result, curbit, lsr #3 + cmp dividend, #0 @ Early termination? + movnes curbit, curbit, lsr #4 @ No, any more bits to do? + movne divisor, divisor, lsr #4 + bne 3b +Lgot_result_udivsi3: + mov r0, result + RET pc, lr + +Ldiv0: + str lr, [sp, #-4]! + bl __div0 + mov r0, #0 @ about as wrong as it could be + ldmia sp!, {pc}RETCOND + +/* __umodsi3 ----------------------- */ + +ENTRY(__umodsi3) + cmp divisor, #0 + beq Ldiv0 + mov curbit, #1 + cmp dividend, divisor + RETc(cc) pc, lr +1: + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. + cmp divisor, #0x10000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #4 + movcc curbit, curbit, lsl #4 + bcc 1b + +2: + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. + cmp divisor, #0x80000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #1 + movcc curbit, curbit, lsl #1 + bcc 2b + +3: + @ Test for possible subtractions. On the final pass, this may + @ subtract too much from the dividend, so keep track of which + @ subtractions are done, we can fix them up afterwards... + mov overdone, #0 + cmp dividend, divisor + subcs dividend, dividend, divisor + cmp dividend, divisor, lsr #1 + subcs dividend, dividend, divisor, lsr #1 + orrcs overdone, overdone, curbit, ror #1 + cmp dividend, divisor, lsr #2 + subcs dividend, dividend, divisor, lsr #2 + orrcs overdone, overdone, curbit, ror #2 + cmp dividend, divisor, lsr #3 + subcs dividend, dividend, divisor, lsr #3 + orrcs overdone, overdone, curbit, ror #3 + mov ip, curbit + cmp dividend, #0 @ Early termination? + movnes curbit, curbit, lsr #4 @ No, any more bits to do? + movne divisor, divisor, lsr #4 + bne 3b + + @ Any subtractions that we should not have done will be recorded in + @ the top three bits of "overdone". Exactly which were not needed + @ are governed by the position of the bit, stored in ip. + @ If we terminated early, because dividend became zero, + @ then none of the below will match, since the bit in ip will not be + @ in the bottom nibble. + ands overdone, overdone, #0xe0000000 + RETc(eq) pc, lr @ No fixups needed + tst overdone, ip, ror #3 + addne dividend, dividend, divisor, lsr #3 + tst overdone, ip, ror #2 + addne dividend, dividend, divisor, lsr #2 + tst overdone, ip, ror #1 + addne dividend, dividend, divisor, lsr #1 + RET pc, lr + +ENTRY(__divsi3) + eor ip, dividend, divisor @ Save the sign of the result. + mov curbit, #1 + mov result, #0 + cmp divisor, #0 + rsbmi divisor, divisor, #0 @ Loops below use unsigned. + beq Ldiv0 + cmp dividend, #0 + rsbmi dividend, dividend, #0 + cmp dividend, divisor + bcc Lgot_result_divsi3 + +1: + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. + cmp divisor, #0x10000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #4 + movcc curbit, curbit, lsl #4 + bcc 1b + +2: + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. + cmp divisor, #0x80000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #1 + movcc curbit, curbit, lsl #1 + bcc 2b + +3: + @ Test for possible subtractions, and note which bits + @ are done in the result. On the final pass, this may subtract + @ too much from the dividend, but the result will be ok, since the + @ "bit" will have been shifted out at the bottom. + cmp dividend, divisor + subcs dividend, dividend, divisor + orrcs result, result, curbit + cmp dividend, divisor, lsr #1 + subcs dividend, dividend, divisor, lsr #1 + orrcs result, result, curbit, lsr #1 + cmp dividend, divisor, lsr #2 + subcs dividend, dividend, divisor, lsr #2 + orrcs result, result, curbit, lsr #2 + cmp dividend, divisor, lsr #3 + subcs dividend, dividend, divisor, lsr #3 + orrcs result, result, curbit, lsr #3 + cmp dividend, #0 @ Early termination? + movnes curbit, curbit, lsr #4 @ No, any more bits to do? + movne divisor, divisor, lsr #4 + bne 3b +Lgot_result_divsi3: + mov r0, result + cmp ip, #0 + rsbmi r0, r0, #0 + RET pc, lr + +ENTRY(__modsi3) + mov curbit, #1 + cmp divisor, #0 + rsbmi divisor, divisor, #0 @ Loops below use unsigned. + beq Ldiv0 + @ Need to save the sign of the dividend, unfortunately, we need + @ ip later on; this is faster than pushing lr and using that. + str dividend, [sp, #-4]! + cmp dividend, #0 + rsbmi dividend, dividend, #0 + cmp dividend, divisor + bcc Lgot_result_modsi3 + +1: + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. + cmp divisor, #0x10000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #4 + movcc curbit, curbit, lsl #4 + bcc 1b + +2: + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. + cmp divisor, #0x80000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #1 + movcc curbit, curbit, lsl #1 + bcc 2b + +3: + @ Test for possible subtractions. On the final pass, this may + @ subtract too much from the dividend, so keep track of which + @ subtractions are done, we can fix them up afterwards... + mov overdone, #0 + cmp dividend, divisor + subcs dividend, dividend, divisor + cmp dividend, divisor, lsr #1 + subcs dividend, dividend, divisor, lsr #1 + orrcs overdone, overdone, curbit, ror #1 + cmp dividend, divisor, lsr #2 + subcs dividend, dividend, divisor, lsr #2 + orrcs overdone, overdone, curbit, ror #2 + cmp dividend, divisor, lsr #3 + subcs dividend, dividend, divisor, lsr #3 + orrcs overdone, overdone, curbit, ror #3 + mov ip, curbit + cmp dividend, #0 @ Early termination? + movnes curbit, curbit, lsr #4 @ No, any more bits to do? + movne divisor, divisor, lsr #4 + bne 3b + + @ Any subtractions that we should not have done will be recorded in + @ the top three bits of "overdone". Exactly which were not needed + @ are governed by the position of the bit, stored in ip. + @ If we terminated early, because dividend became zero, + @ then none of the below will match, since the bit in ip will not be + @ in the bottom nibble. + ands overdone, overdone, #0xe0000000 + beq Lgot_result_modsi3 + tst overdone, ip, ror #3 + addne dividend, dividend, divisor, lsr #3 + tst overdone, ip, ror #2 + addne dividend, dividend, divisor, lsr #2 + tst overdone, ip, ror #1 + addne dividend, dividend, divisor, lsr #1 +Lgot_result_modsi3: + ldr ip, [sp], #4 + cmp ip, #0 + rsbmi dividend, dividend, #0 + RET pc, lr diff -urN linux-2.5.70-bk13/arch/arm26/lib/longlong.h linux-2.5.70-bk14/arch/arm26/lib/longlong.h --- linux-2.5.70-bk13/arch/arm26/lib/longlong.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/longlong.h 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,184 @@ +/* longlong.h -- based on code from gcc-2.95.3 + + definitions for mixed size 32/64 bit arithmetic. + Copyright (C) 1991, 92, 94, 95, 96, 1997, 1998 Free Software Foundation, Inc. + + This definition file is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public + License as published by the Free Software Foundation; either + version 2, or (at your option) any later version. + + This definition file is distributed in the hope that it will be + useful, but WITHOUT ANY WARRANTY; without even the implied + warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +/* Borrowed from GCC 2.95.3, I Molton 29/07/01 */ + +#ifndef SI_TYPE_SIZE +#define SI_TYPE_SIZE 32 +#endif + +#define __BITS4 (SI_TYPE_SIZE / 4) +#define __ll_B (1L << (SI_TYPE_SIZE / 2)) +#define __ll_lowpart(t) ((USItype) (t) % __ll_B) +#define __ll_highpart(t) ((USItype) (t) / __ll_B) + +/* Define auxiliary asm macros. + + 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) + multiplies two USItype integers MULTIPLER and MULTIPLICAND, + and generates a two-part USItype product in HIGH_PROD and + LOW_PROD. + + 2) __umulsidi3(a,b) multiplies two USItype integers A and B, + and returns a UDItype product. This is just a variant of umul_ppmm. + + 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator, + denominator) divides a two-word unsigned integer, composed by the + integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and + places the quotient in QUOTIENT and the remainder in REMAINDER. + HIGH_NUMERATOR must be less than DENOMINATOR for correct operation. + If, in addition, the most significant bit of DENOMINATOR must be 1, + then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1. + + 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator, + denominator). Like udiv_qrnnd but the numbers are signed. The + quotient is rounded towards 0. + + 5) count_leading_zeros(count, x) counts the number of zero-bits from + the msb to the first non-zero bit. This is the number of steps X + needs to be shifted left to set the msb. Undefined for X == 0. + + 6) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1, + high_addend_2, low_addend_2) adds two two-word unsigned integers, + composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and + LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and + LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is + lost. + + 7) sub_ddmmss(high_difference, low_difference, high_minuend, + low_minuend, high_subtrahend, low_subtrahend) subtracts two + two-word unsigned integers, composed by HIGH_MINUEND_1 and + LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2 + respectively. The result is placed in HIGH_DIFFERENCE and + LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere, + and is lost. + + If any of these macros are left undefined for a particular CPU, + C macros are used. */ + +#if defined (__arm__) +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("adds %1, %4, %5 \n\ + adc %0, %2, %3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "%r" ((USItype) (ah)), \ + "rI" ((USItype) (bh)), \ + "%r" ((USItype) (al)), \ + "rI" ((USItype) (bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("subs %1, %4, %5 \n\ + sbc %0, %2, %3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "r" ((USItype) (ah)), \ + "rI" ((USItype) (bh)), \ + "r" ((USItype) (al)), \ + "rI" ((USItype) (bl))) +#define umul_ppmm(xh, xl, a, b) \ +{register USItype __t0, __t1, __t2; \ + __asm__ ("%@ Inlined umul_ppmm \n\ + mov %2, %5, lsr #16 \n\ + mov %0, %6, lsr #16 \n\ + bic %3, %5, %2, lsl #16 \n\ + bic %4, %6, %0, lsl #16 \n\ + mul %1, %3, %4 \n\ + mul %4, %2, %4 \n\ + mul %3, %0, %3 \n\ + mul %0, %2, %0 \n\ + adds %3, %4, %3 \n\ + addcs %0, %0, #65536 \n\ + adds %1, %1, %3, lsl #16 \n\ + adc %0, %0, %3, lsr #16" \ + : "=&r" ((USItype) (xh)), \ + "=r" ((USItype) (xl)), \ + "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \ + : "r" ((USItype) (a)), \ + "r" ((USItype) (b)));} +#define UMUL_TIME 20 +#define UDIV_TIME 100 +#endif /* __arm__ */ + +#define __umulsidi3(u, v) \ + ({DIunion __w; \ + umul_ppmm (__w.s.high, __w.s.low, u, v); \ + __w.ll; }) + +#define __udiv_qrnnd_c(q, r, n1, n0, d) \ + do { \ + USItype __d1, __d0, __q1, __q0; \ + USItype __r1, __r0, __m; \ + __d1 = __ll_highpart (d); \ + __d0 = __ll_lowpart (d); \ + \ + __r1 = (n1) % __d1; \ + __q1 = (n1) / __d1; \ + __m = (USItype) __q1 * __d0; \ + __r1 = __r1 * __ll_B | __ll_highpart (n0); \ + if (__r1 < __m) \ + { \ + __q1--, __r1 += (d); \ + if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\ + if (__r1 < __m) \ + __q1--, __r1 += (d); \ + } \ + __r1 -= __m; \ + \ + __r0 = __r1 % __d1; \ + __q0 = __r1 / __d1; \ + __m = (USItype) __q0 * __d0; \ + __r0 = __r0 * __ll_B | __ll_lowpart (n0); \ + if (__r0 < __m) \ + { \ + __q0--, __r0 += (d); \ + if (__r0 >= (d)) \ + if (__r0 < __m) \ + __q0--, __r0 += (d); \ + } \ + __r0 -= __m; \ + \ + (q) = (USItype) __q1 * __ll_B | __q0; \ + (r) = __r0; \ + } while (0) + +#define UDIV_NEEDS_NORMALIZATION 1 +#define udiv_qrnnd __udiv_qrnnd_c + +extern const UQItype __clz_tab[]; +#define count_leading_zeros(count, x) \ + do { \ + USItype __xr = (x); \ + USItype __a; \ + \ + if (SI_TYPE_SIZE <= 32) \ + { \ + __a = __xr < ((USItype)1<<2*__BITS4) \ + ? (__xr < ((USItype)1<<__BITS4) ? 0 : __BITS4) \ + : (__xr < ((USItype)1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \ + } \ + else \ + { \ + for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8) \ + if (((__xr >> __a) & 0xff) != 0) \ + break; \ + } \ + \ + (count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \ + } while (0) diff -urN linux-2.5.70-bk13/arch/arm26/lib/lshrdi3.c linux-2.5.70-bk14/arch/arm26/lib/lshrdi3.c --- linux-2.5.70-bk13/arch/arm26/lib/lshrdi3.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/lshrdi3.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,61 @@ +/* More subroutines needed by GCC output code on some machines. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* support functions required by the kernel. based on code from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#include "gcclib.h" + +DItype +__lshrdi3 (DItype u, word_type b) +{ + DIunion w; + word_type bm; + DIunion uu; + + if (b == 0) + return u; + + uu.ll = u; + + bm = (sizeof (SItype) * BITS_PER_UNIT) - b; + if (bm <= 0) + { + w.s.high = 0; + w.s.low = (USItype)uu.s.high >> -bm; + } + else + { + USItype carries = (USItype)uu.s.high << bm; + w.s.high = (USItype)uu.s.high >> b; + w.s.low = ((USItype)uu.s.low >> b) | carries; + } + + return w.ll; +} + diff -urN linux-2.5.70-bk13/arch/arm26/lib/memchr.S linux-2.5.70-bk14/arch/arm26/lib/memchr.S --- linux-2.5.70-bk13/arch/arm26/lib/memchr.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/memchr.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,25 @@ +/* + * linux/arch/arm/lib/memchr.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ASM optimised string functions + */ +#include +#include + + .text + .align 5 +ENTRY(memchr) +1: subs r2, r2, #1 + bmi 2f + ldrb r3, [r0], #1 + teq r3, r1 + bne 1b + sub r0, r0, #1 +2: movne r0, #0 + RETINSTR(mov,pc,lr) diff -urN linux-2.5.70-bk13/arch/arm26/lib/memcpy.S linux-2.5.70-bk14/arch/arm26/lib/memcpy.S --- linux-2.5.70-bk13/arch/arm26/lib/memcpy.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/memcpy.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,318 @@ +/* + * linux/arch/arm/lib/memcpy.S + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ASM optimised string functions + */ +#include +#include + + .text + +#define ENTER \ + mov ip,sp ;\ + stmfd sp!,{r4-r9,fp,ip,lr,pc} ;\ + sub fp,ip,#4 + +#define EXIT \ + LOADREGS(ea, fp, {r4 - r9, fp, sp, pc}) + +#define EXITEQ \ + LOADREGS(eqea, fp, {r4 - r9, fp, sp, pc}) + +/* + * Prototype: void memcpy(void *to,const void *from,unsigned long n); + * ARM3: cant use memcopy here!!! + */ +ENTRY(memcpy) +ENTRY(memmove) + ENTER + cmp r1, r0 + bcc 19f + subs r2, r2, #4 + blt 6f + ands ip, r0, #3 + bne 7f + ands ip, r1, #3 + bne 8f + +1: subs r2, r2, #8 + blt 5f + subs r2, r2, #0x14 + blt 3f +2: ldmia r1!,{r3 - r9, ip} + stmia r0!,{r3 - r9, ip} + subs r2, r2, #32 + bge 2b + cmn r2, #16 + ldmgeia r1!, {r3 - r6} + stmgeia r0!, {r3 - r6} + subge r2, r2, #0x10 +3: adds r2, r2, #0x14 +4: ldmgeia r1!, {r3 - r5} + stmgeia r0!, {r3 - r5} + subges r2, r2, #12 + bge 4b +5: adds r2, r2, #8 + blt 6f + subs r2, r2, #4 + ldrlt r3, [r1], #4 + ldmgeia r1!, {r4, r5} + strlt r3, [r0], #4 + stmgeia r0!, {r4, r5} + subge r2, r2, #4 + +6: adds r2, r2, #4 + EXITEQ + cmp r2, #2 + ldrb r3, [r1], #1 + ldrgeb r4, [r1], #1 + ldrgtb r5, [r1], #1 + strb r3, [r0], #1 + strgeb r4, [r0], #1 + strgtb r5, [r0], #1 + EXIT + +7: rsb ip, ip, #4 + cmp ip, #2 + ldrb r3, [r1], #1 + ldrgeb r4, [r1], #1 + ldrgtb r5, [r1], #1 + strb r3, [r0], #1 + strgeb r4, [r0], #1 + strgtb r5, [r0], #1 + subs r2, r2, ip + blt 6b + ands ip, r1, #3 + beq 1b + +8: bic r1, r1, #3 + ldr r7, [r1], #4 + cmp ip, #2 + bgt 15f + beq 11f + cmp r2, #12 + blt 10f + sub r2, r2, #12 +9: mov r3, r7, pull #8 + ldmia r1!, {r4 - r7} + orr r3, r3, r4, push #24 + mov r4, r4, pull #8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + stmia r0!, {r3 - r6} + subs r2, r2, #16 + bge 9b + adds r2, r2, #12 + blt 100f +10: mov r3, r7, pull #8 + ldr r7, [r1], #4 + subs r2, r2, #4 + orr r3, r3, r7, push #24 + str r3, [r0], #4 + bge 10b +100: sub r1, r1, #3 + b 6b + +11: cmp r2, #12 + blt 13f /* */ + sub r2, r2, #12 +12: mov r3, r7, pull #16 + ldmia r1!, {r4 - r7} + orr r3, r3, r4, push #16 + mov r4, r4, pull #16 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + stmia r0!, {r3 - r6} + subs r2, r2, #16 + bge 12b + adds r2, r2, #12 + blt 14f +13: mov r3, r7, pull #16 + ldr r7, [r1], #4 + subs r2, r2, #4 + orr r3, r3, r7, push #16 + str r3, [r0], #4 + bge 13b +14: sub r1, r1, #2 + b 6b + +15: cmp r2, #12 + blt 17f + sub r2, r2, #12 +16: mov r3, r7, pull #24 + ldmia r1!, {r4 - r7} + orr r3, r3, r4, push #8 + mov r4, r4, pull #24 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + stmia r0!, {r3 - r6} + subs r2, r2, #16 + bge 16b + adds r2, r2, #12 + blt 18f +17: mov r3, r7, pull #24 + ldr r7, [r1], #4 + subs r2, r2, #4 + orr r3, r3, r7, push #8 + str r3, [r0], #4 + bge 17b +18: sub r1, r1, #1 + b 6b + + +19: add r1, r1, r2 + add r0, r0, r2 + subs r2, r2, #4 + blt 24f + ands ip, r0, #3 + bne 25f + ands ip, r1, #3 + bne 26f + +20: subs r2, r2, #8 + blt 23f + subs r2, r2, #0x14 + blt 22f +21: ldmdb r1!, {r3 - r9, ip} + stmdb r0!, {r3 - r9, ip} + subs r2, r2, #32 + bge 21b +22: cmn r2, #16 + ldmgedb r1!, {r3 - r6} + stmgedb r0!, {r3 - r6} + subge r2, r2, #16 + adds r2, r2, #20 + ldmgedb r1!, {r3 - r5} + stmgedb r0!, {r3 - r5} + subge r2, r2, #12 +23: adds r2, r2, #8 + blt 24f + subs r2, r2, #4 + ldrlt r3, [r1, #-4]! + ldmgedb r1!, {r4, r5} + strlt r3, [r0, #-4]! + stmgedb r0!, {r4, r5} + subge r2, r2, #4 + +24: adds r2, r2, #4 + EXITEQ + cmp r2, #2 + ldrb r3, [r1, #-1]! + ldrgeb r4, [r1, #-1]! + ldrgtb r5, [r1, #-1]! + strb r3, [r0, #-1]! + strgeb r4, [r0, #-1]! + strgtb r5, [r0, #-1]! + EXIT + +25: cmp ip, #2 + ldrb r3, [r1, #-1]! + ldrgeb r4, [r1, #-1]! + ldrgtb r5, [r1, #-1]! + strb r3, [r0, #-1]! + strgeb r4, [r0, #-1]! + strgtb r5, [r0, #-1]! + subs r2, r2, ip + blt 24b + ands ip, r1, #3 + beq 20b + +26: bic r1, r1, #3 + ldr r3, [r1], #0 + cmp ip, #2 + blt 34f + beq 30f + cmp r2, #12 + blt 28f + sub r2, r2, #12 +27: mov r7, r3, push #8 + ldmdb r1!, {r3, r4, r5, r6} + orr r7, r7, r6, pull #24 + mov r6, r6, push #8 + orr r6, r6, r5, pull #24 + mov r5, r5, push #8 + orr r5, r5, r4, pull #24 + mov r4, r4, push #8 + orr r4, r4, r3, pull #24 + stmdb r0!, {r4, r5, r6, r7} + subs r2, r2, #16 + bge 27b + adds r2, r2, #12 + blt 29f +28: mov ip, r3, push #8 + ldr r3, [r1, #-4]! + subs r2, r2, #4 + orr ip, ip, r3, pull #24 + str ip, [r0, #-4]! + bge 28b +29: add r1, r1, #3 + b 24b + +30: cmp r2, #12 + blt 32f + sub r2, r2, #12 +31: mov r7, r3, push #16 + ldmdb r1!, {r3, r4, r5, r6} + orr r7, r7, r6, pull #16 + mov r6, r6, push #16 + orr r6, r6, r5, pull #16 + mov r5, r5, push #16 + orr r5, r5, r4, pull #16 + mov r4, r4, push #16 + orr r4, r4, r3, pull #16 + stmdb r0!, {r4, r5, r6, r7} + subs r2, r2, #16 + bge 31b + adds r2, r2, #12 + blt 33f +32: mov ip, r3, push #16 + ldr r3, [r1, #-4]! + subs r2, r2, #4 + orr ip, ip, r3, pull #16 + str ip, [r0, #-4]! + bge 32b +33: add r1, r1, #2 + b 24b + +34: cmp r2, #12 + blt 36f + sub r2, r2, #12 +35: mov r7, r3, push #24 + ldmdb r1!, {r3, r4, r5, r6} + orr r7, r7, r6, pull #8 + mov r6, r6, push #24 + orr r6, r6, r5, pull #8 + mov r5, r5, push #24 + orr r5, r5, r4, pull #8 + mov r4, r4, push #24 + orr r4, r4, r3, pull #8 + stmdb r0!, {r4, r5, r6, r7} + subs r2, r2, #16 + bge 35b + adds r2, r2, #12 + blt 37f +36: mov ip, r3, push #24 + ldr r3, [r1, #-4]! + subs r2, r2, #4 + orr ip, ip, r3, pull #8 + str ip, [r0, #-4]! + bge 36b +37: add r1, r1, #1 + b 24b + + .align diff -urN linux-2.5.70-bk13/arch/arm26/lib/memset.S linux-2.5.70-bk14/arch/arm26/lib/memset.S --- linux-2.5.70-bk13/arch/arm26/lib/memset.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/memset.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,80 @@ +/* + * linux/arch/arm/lib/memset.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ASM optimised string functions + */ +#include +#include + + .text + .align 5 + .word 0 + +1: subs r2, r2, #4 @ 1 do we have enough + blt 5f @ 1 bytes to align with? + cmp r3, #2 @ 1 + strltb r1, [r0], #1 @ 1 + strleb r1, [r0], #1 @ 1 + strb r1, [r0], #1 @ 1 + add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) +/* + * The pointer is now aligned and the length is adjusted. Try doing the + * memzero again. + */ + +ENTRY(memset) + ands r3, r0, #3 @ 1 unaligned? + bne 1b @ 1 +/* + * we know that the pointer in r0 is aligned to a word boundary. + */ + orr r1, r1, r1, lsl #8 + orr r1, r1, r1, lsl #16 + mov r3, r1 + cmp r2, #16 + blt 4f +/* + * We need an extra register for this loop - save the return address and + * use the LR + */ + str lr, [sp, #-4]! + mov ip, r1 + mov lr, r1 + +2: subs r2, r2, #64 + stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time. + stmgeia r0!, {r1, r3, ip, lr} + stmgeia r0!, {r1, r3, ip, lr} + stmgeia r0!, {r1, r3, ip, lr} + bgt 2b + LOADREGS(eqfd, sp!, {pc}) @ Now <64 bytes to go. +/* + * No need to correct the count; we're only testing bits from now on + */ + tst r2, #32 + stmneia r0!, {r1, r3, ip, lr} + stmneia r0!, {r1, r3, ip, lr} + tst r2, #16 + stmneia r0!, {r1, r3, ip, lr} + ldr lr, [sp], #4 + +4: tst r2, #8 + stmneia r0!, {r1, r3} + tst r2, #4 + strne r1, [r0], #4 +/* + * When we get here, we've got less than 4 bytes to zero. We + * may have an unaligned pointer as well. + */ +5: tst r2, #2 + strneb r1, [r0], #1 + strneb r1, [r0], #1 + tst r2, #1 + strneb r1, [r0], #1 + RETINSTR(mov,pc,lr) diff -urN linux-2.5.70-bk13/arch/arm26/lib/memzero.S linux-2.5.70-bk14/arch/arm26/lib/memzero.S --- linux-2.5.70-bk13/arch/arm26/lib/memzero.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/memzero.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,80 @@ +/* + * linux/arch/arm/lib/memzero.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + + .text + .align 5 + .word 0 +/* + * Align the pointer in r0. r3 contains the number of bytes that we are + * mis-aligned by, and r1 is the number of bytes. If r1 < 4, then we + * don't bother; we use byte stores instead. + */ +1: subs r1, r1, #4 @ 1 do we have enough + blt 5f @ 1 bytes to align with? + cmp r3, #2 @ 1 + strltb r2, [r0], #1 @ 1 + strleb r2, [r0], #1 @ 1 + strb r2, [r0], #1 @ 1 + add r1, r1, r3 @ 1 (r1 = r1 - (4 - r3)) +/* + * The pointer is now aligned and the length is adjusted. Try doing the + * memzero again. + */ + +ENTRY(__memzero) + mov r2, #0 @ 1 + ands r3, r0, #3 @ 1 unaligned? + bne 1b @ 1 +/* + * r3 = 0, and we know that the pointer in r0 is aligned to a word boundary. + */ + cmp r1, #16 @ 1 we can skip this chunk if we + blt 4f @ 1 have < 16 bytes +/* + * We need an extra register for this loop - save the return address and + * use the LR + */ + str lr, [sp, #-4]! @ 1 + mov ip, r2 @ 1 + mov lr, r2 @ 1 + +3: subs r1, r1, #64 @ 1 write 32 bytes out per loop + stmgeia r0!, {r2, r3, ip, lr} @ 4 + stmgeia r0!, {r2, r3, ip, lr} @ 4 + stmgeia r0!, {r2, r3, ip, lr} @ 4 + stmgeia r0!, {r2, r3, ip, lr} @ 4 + bgt 3b @ 1 + LOADREGS(eqfd, sp!, {pc}) @ 1/2 quick exit +/* + * No need to correct the count; we're only testing bits from now on + */ + tst r1, #32 @ 1 + stmneia r0!, {r2, r3, ip, lr} @ 4 + stmneia r0!, {r2, r3, ip, lr} @ 4 + tst r1, #16 @ 1 16 bytes or more? + stmneia r0!, {r2, r3, ip, lr} @ 4 + ldr lr, [sp], #4 @ 1 + +4: tst r1, #8 @ 1 8 bytes or more? + stmneia r0!, {r2, r3} @ 2 + tst r1, #4 @ 1 4 bytes or more? + strne r2, [r0], #4 @ 1 +/* + * When we get here, we've got less than 4 bytes to zero. We + * may have an unaligned pointer as well. + */ +5: tst r1, #2 @ 1 2 bytes or more? + strneb r2, [r0], #1 @ 1 + strneb r2, [r0], #1 @ 1 + tst r1, #1 @ 1 a byte left over + strneb r2, [r0], #1 @ 1 + RETINSTR(mov,pc,lr) @ 1 diff -urN linux-2.5.70-bk13/arch/arm26/lib/muldi3.c linux-2.5.70-bk14/arch/arm26/lib/muldi3.c --- linux-2.5.70-bk13/arch/arm26/lib/muldi3.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/muldi3.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,77 @@ +/* More subroutines needed by GCC output code on some machines. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* support functions required by the kernel. based on code from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#include "gcclib.h" + +#define umul_ppmm(xh, xl, a, b) \ +{register USItype __t0, __t1, __t2; \ + __asm__ ("%@ Inlined umul_ppmm \n\ + mov %2, %5, lsr #16 \n\ + mov %0, %6, lsr #16 \n\ + bic %3, %5, %2, lsl #16 \n\ + bic %4, %6, %0, lsl #16 \n\ + mul %1, %3, %4 \n\ + mul %4, %2, %4 \n\ + mul %3, %0, %3 \n\ + mul %0, %2, %0 \n\ + adds %3, %4, %3 \n\ + addcs %0, %0, #65536 \n\ + adds %1, %1, %3, lsl #16 \n\ + adc %0, %0, %3, lsr #16" \ + : "=&r" ((USItype) (xh)), \ + "=r" ((USItype) (xl)), \ + "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \ + : "r" ((USItype) (a)), \ + "r" ((USItype) (b)));} + + +#define __umulsidi3(u, v) \ + ({DIunion __w; \ + umul_ppmm (__w.s.high, __w.s.low, u, v); \ + __w.ll; }) + + +DItype +__muldi3 (DItype u, DItype v) +{ + DIunion w; + DIunion uu, vv; + + uu.ll = u, + vv.ll = v; + + w.ll = __umulsidi3 (uu.s.low, vv.s.low); + w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high + + (USItype) uu.s.high * (USItype) vv.s.low); + + return w.ll; +} + diff -urN linux-2.5.70-bk13/arch/arm26/lib/putuser.S linux-2.5.70-bk14/arch/arm26/lib/putuser.S --- linux-2.5.70-bk13/arch/arm26/lib/putuser.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/putuser.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,108 @@ +/* + * linux/arch/arm/lib/putuser.S + * + * Copyright (C) 2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Idea from x86 version, (C) Copyright 1998 Linus Torvalds + * + * These functions have a non-standard call interface to make + * them more efficient, especially as they return an error + * value in addition to the "real" return value. + * + * __put_user_X + * + * Inputs: r0 contains the address + * r1, r2 contains the value + * Outputs: r0 is the error code + * lr corrupted + * + * No other registers must be altered. (see include/asm-arm/uaccess.h + * for specific ASM register usage). + * + * Note that ADDR_LIMIT is either 0 or 0xc0000000 + * Note also that it is intended that __put_user_bad is not global. + */ +#include +#include + + .global __put_user_1 +__put_user_1: + bic r2, sp, #0x1f00 + bic r2, r2, #0x00ff + str lr, [sp, #-4]! + ldr r2, [r2, #TI_ADDR_LIMIT] + sub r2, r2, #1 + cmp r0, r2 + bge __put_user_bad +1: cmp r0, #0x02000000 + strlsbt r1, [r0] + strgeb r1, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + + .global __put_user_2 +__put_user_2: + bic r2, sp, #0x1f00 + bic r2, r2, #0x00ff + str lr, [sp, #-4]! + ldr r2, [r2, #TI_ADDR_LIMIT] + sub r2, r2, #2 + cmp r0, r2 + bge __put_user_bad +2: cmp r0, #0x02000000 + strlsbt r1, [r0], #1 + strgeb r1, [r0], #1 + mov r1, r1, lsr #8 +3: strlsbt r1, [r0] + strgeb r1, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + + .global __put_user_4 +__put_user_4: + bic r2, sp, #0x1f00 + bic r2, r2, #0x00ff + str lr, [sp, #-4]! + ldr r2, [r2, #TI_ADDR_LIMIT] + sub r2, r2, #4 + cmp r0, r2 +4: bge __put_user_bad + cmp r0, #0x02000000 + strlst r1, [r0] + strge r1, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + + .global __put_user_8 +__put_user_8: + bic ip, sp, #0x1f00 + bic ip, ip, #0x00ff + str lr, [sp, #-4]! + ldr ip, [ip, #TI_ADDR_LIMIT] + sub ip, ip, #8 + cmp r0, ip + bge __put_user_bad + cmp r0, #0x02000000 +5: strlst r1, [r0], #4 +6: strlst r2, [r0] + strge r1, [r0], #4 + strge r2, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + +__put_user_bad: + mov r0, #-14 + mov pc, lr + +.section __ex_table, "a" + .long 1b, __put_user_bad + .long 2b, __put_user_bad + .long 3b, __put_user_bad + .long 4b, __put_user_bad + .long 5b, __put_user_bad + .long 6b, __put_user_bad +.previous diff -urN linux-2.5.70-bk13/arch/arm26/lib/setbit.S linux-2.5.70-bk14/arch/arm26/lib/setbit.S --- linux-2.5.70-bk13/arch/arm26/lib/setbit.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/setbit.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,29 @@ +/* + * linux/arch/arm/lib/setbit.S + * + * Copyright (C) 1995-1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +/* + * Purpose : Function to set a bit + * Prototype: int set_bit(int bit, void *addr) + */ +ENTRY(_set_bit_be) + eor r0, r0, #0x18 @ big endian byte ordering +ENTRY(_set_bit_le) + and r2, r0, #7 + mov r3, #1 + mov r3, r3, lsl r2 + save_and_disable_irqs ip, r2 + ldrb r2, [r1, r0, lsr #3] + orr r2, r2, r3 + strb r2, [r1, r0, lsr #3] + restore_irqs ip + RETINSTR(mov,pc,lr) diff -urN linux-2.5.70-bk13/arch/arm26/lib/strchr.S linux-2.5.70-bk14/arch/arm26/lib/strchr.S --- linux-2.5.70-bk13/arch/arm26/lib/strchr.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/strchr.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,25 @@ +/* + * linux/arch/arm/lib/strchr.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ASM optimised string functions + */ +#include +#include + + .text + .align 5 +ENTRY(strchr) +1: ldrb r2, [r0], #1 + teq r2, r1 + teqne r2, #0 + bne 1b + teq r2, #0 + moveq r0, #0 + subne r0, r0, #1 + RETINSTR(mov,pc,lr) diff -urN linux-2.5.70-bk13/arch/arm26/lib/strrchr.S linux-2.5.70-bk14/arch/arm26/lib/strrchr.S --- linux-2.5.70-bk13/arch/arm26/lib/strrchr.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/strrchr.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,25 @@ +/* + * linux/arch/arm/lib/strrchr.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ASM optimised string functions + */ +#include +#include + + .text + .align 5 +ENTRY(strrchr) + mov r3, #0 +1: ldrb r2, [r0], #1 + teq r2, r1 + subeq r3, r0, #1 + teq r2, #0 + bne 1b + mov r0, r3 + RETINSTR(mov,pc,lr) diff -urN linux-2.5.70-bk13/arch/arm26/lib/testchangebit.S linux-2.5.70-bk14/arch/arm26/lib/testchangebit.S --- linux-2.5.70-bk13/arch/arm26/lib/testchangebit.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/testchangebit.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,29 @@ +/* + * linux/arch/arm/lib/testchangebit.S + * + * Copyright (C) 1995-1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +ENTRY(_test_and_change_bit_be) + eor r0, r0, #0x18 @ big endian byte ordering +ENTRY(_test_and_change_bit_le) + add r1, r1, r0, lsr #3 + and r3, r0, #7 + mov r0, #1 + save_and_disable_irqs ip, r2 + ldrb r2, [r1] + tst r2, r0, lsl r3 + eor r2, r2, r0, lsl r3 + strb r2, [r1] + restore_irqs ip + moveq r0, #0 + RETINSTR(mov,pc,lr) + + diff -urN linux-2.5.70-bk13/arch/arm26/lib/testclearbit.S linux-2.5.70-bk14/arch/arm26/lib/testclearbit.S --- linux-2.5.70-bk13/arch/arm26/lib/testclearbit.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/testclearbit.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,29 @@ +/* + * linux/arch/arm/lib/testclearbit.S + * + * Copyright (C) 1995-1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +ENTRY(_test_and_clear_bit_be) + eor r0, r0, #0x18 @ big endian byte ordering +ENTRY(_test_and_clear_bit_le) + add r1, r1, r0, lsr #3 @ Get byte offset + and r3, r0, #7 @ Get bit offset + mov r0, #1 + save_and_disable_irqs ip, r2 + ldrb r2, [r1] + tst r2, r0, lsl r3 + bic r2, r2, r0, lsl r3 + strb r2, [r1] + restore_irqs ip + moveq r0, #0 + RETINSTR(mov,pc,lr) + + diff -urN linux-2.5.70-bk13/arch/arm26/lib/testsetbit.S linux-2.5.70-bk14/arch/arm26/lib/testsetbit.S --- linux-2.5.70-bk13/arch/arm26/lib/testsetbit.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/testsetbit.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,29 @@ +/* + * linux/arch/arm/lib/testsetbit.S + * + * Copyright (C) 1995-1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +ENTRY(_test_and_set_bit_be) + eor r0, r0, #0x18 @ big endian byte ordering +ENTRY(_test_and_set_bit_le) + add r1, r1, r0, lsr #3 @ Get byte offset + and r3, r0, #7 @ Get bit offset + mov r0, #1 + save_and_disable_irqs ip, r2 + ldrb r2, [r1] + tst r2, r0, lsl r3 + orr r2, r2, r0, lsl r3 + strb r2, [r1] + restore_irqs ip + moveq r0, #0 + RETINSTR(mov,pc,lr) + + diff -urN linux-2.5.70-bk13/arch/arm26/lib/uaccess-kernel.S linux-2.5.70-bk14/arch/arm26/lib/uaccess-kernel.S --- linux-2.5.70-bk13/arch/arm26/lib/uaccess-kernel.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/uaccess-kernel.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,173 @@ +/* + * linux/arch/arm26/lib/uaccess-kernel.S + * + * Copyright (C) 1998 Russell King + * + * Note! Some code fragments found in here have a special calling + * convention - they are not APCS compliant! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + + .text + +//FIXME - surely this can be done in C not asm, removing the problem of keeping C and asm in sync? (this is a struct uaccess_t) + + .globl uaccess_kernel +uaccess_kernel: + .word uaccess_kernel_put_byte + .word uaccess_kernel_get_byte + .word uaccess_kernel_put_half + .word uaccess_kernel_get_half + .word uaccess_kernel_put_word + .word uaccess_kernel_get_word + .word uaccess_kernel_put_dword + .word uaccess_kernel_copy + .word uaccess_kernel_copy + .word uaccess_kernel_clear + .word uaccess_kernel_strncpy + .word uaccess_kernel_strnlen + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_kernel_put_byte: + stmfd sp!, {lr} + strb r0, [r1] + ldmfd sp!, {pc}^ + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_kernel_put_half: + stmfd sp!, {lr} + strb r0, [r1] + mov r0, r0, lsr #8 + strb r0, [r1, #1] + ldmfd sp!, {pc}^ + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_kernel_put_word: + stmfd sp!, {lr} + str r0, [r1] + ldmfd sp!, {pc}^ + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_kernel_put_dword: + stmfd sp!, {lr} + str r0, [r1], #4 + str r0, [r1], #0 + ldmfd sp!, {pc}^ + +@ In : r0 = addr, r1 = error +@ Out: r0 = x, r1 = error +uaccess_kernel_get_byte: + stmfd sp!, {lr} + ldrb r0, [r0] + ldmfd sp!, {pc}^ + +@ In : r0 = addr, r1 = error +@ Out: r0 = x, r1 = error +uaccess_kernel_get_half: + stmfd sp!, {lr} + ldr r0, [r0] + mov r0, r0, lsl #16 + mov r0, r0, lsr #16 + ldmfd sp!, {pc}^ + +@ In : r0 = addr, r1 = error +@ Out: r0 = x, r1 = error +uaccess_kernel_get_word: + stmfd sp!, {lr} + ldr r0, [r0] + ldmfd sp!, {pc}^ + + +/* Prototype: int uaccess_kernel_copy(void *to, const char *from, size_t n) + * Purpose : copy a block to kernel memory from kernel memory + * Params : to - kernel memory + * : from - kernel memory + * : n - number of bytes to copy + * Returns : Number of bytes NOT copied. + */ +uaccess_kernel_copy: + stmfd sp!, {lr} + bl memcpy + mov r0, #0 + ldmfd sp!, {pc}^ + +/* Prototype: int uaccess_kernel_clear(void *addr, size_t sz) + * Purpose : clear some kernel memory + * Params : addr - kernel memory address to clear + * : sz - number of bytes to clear + * Returns : number of bytes NOT cleared + */ +uaccess_kernel_clear: + stmfd sp!, {lr} + mov r2, #0 + cmp r1, #4 + blt 2f + ands ip, r0, #3 + beq 1f + cmp ip, #1 + strb r2, [r0], #1 + strleb r2, [r0], #1 + strltb r2, [r0], #1 + rsb ip, ip, #4 + sub r1, r1, ip @ 7 6 5 4 3 2 1 +1: subs r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7 + bmi 2f + str r2, [r0], #4 + str r2, [r0], #4 + b 1b +2: adds r1, r1, #4 @ 3 2 1 0 -1 -2 -3 + strpl r2, [r0], #4 + tst r1, #2 @ 1x 1x 0x 0x 1x 1x 0x + strneb r2, [r0], #1 + strneb r2, [r0], #1 + tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1 + strneb r2, [r0], #1 + mov r0, #0 + ldmfd sp!, {pc}^ + +/* Prototype: size_t uaccess_kernel_strncpy(char *dst, char *src, size_t len) + * Purpose : copy a string from kernel memory to kernel memory + * Params : dst - kernel memory destination + * : src - kernel memory source + * : len - maximum length of string + * Returns : number of characters copied + */ +uaccess_kernel_strncpy: + stmfd sp!, {lr} + mov ip, r2 +1: subs r2, r2, #1 + bmi 2f + ldrb r3, [r1], #1 + strb r3, [r0], #1 + teq r3, #0 + bne 1b +2: subs r0, ip, r2 + ldmfd sp!, {pc}^ + +/* Prototype: int uaccess_kernel_strlen(char *str, long n) + * Purpose : get length of a string in kernel memory + * Params : str - address of string in kernel memory + * Returns : length of string *including terminator*, + * or zero on exception, or n + 1 if too long + */ +uaccess_kernel_strnlen: + stmfd sp!, {lr} + mov r2, r0 +1: ldrb r1, [r0], #1 + teq r1, #0 + beq 2f + subs r1, r1, #1 + bne 1b + add r0, r0, #1 +2: sub r0, r0, r2 + ldmfd sp!, {pc}^ + diff -urN linux-2.5.70-bk13/arch/arm26/lib/uaccess-user.S linux-2.5.70-bk14/arch/arm26/lib/uaccess-user.S --- linux-2.5.70-bk13/arch/arm26/lib/uaccess-user.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/uaccess-user.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,718 @@ +/* + * linux/arch/arm26/lib/uaccess-user.S + * + * Copyright (C) 1995, 1996,1997,1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Routines to block copy data to/from user memory + * These are highly optimised both for the 4k page size + * and for various alignments. + */ +#include +#include +#include +#include + + .text + +//FIXME - surely this can be done in C not asm, removing the problem of keeping C and asm in sync? (this is a struct uaccess_t) + .globl uaccess_user +uaccess_user: + .word uaccess_user_put_byte + .word uaccess_user_get_byte + .word uaccess_user_put_half + .word uaccess_user_get_half + .word uaccess_user_put_word + .word uaccess_user_get_word + .word uaccess_user_put_dword + .word uaccess_user_copy_from_user + .word uaccess_user_copy_to_user + .word uaccess_user_clear_user + .word uaccess_user_strncpy_from_user + .word uaccess_user_strnlen_user + + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_user_put_byte: + stmfd sp!, {lr} +USER( strbt r0, [r1]) + ldmfd sp!, {pc}^ + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_user_put_half: + stmfd sp!, {lr} +USER( strbt r0, [r1], #1) + mov r0, r0, lsr #8 +USER( strbt r0, [r1]) + ldmfd sp!, {pc}^ + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_user_put_word: + stmfd sp!, {lr} +USER( strt r0, [r1]) + ldmfd sp!, {pc}^ + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_user_put_dword: + stmfd sp!, {lr} +USER( strt r0, [r1], #4) +USER( strt r0, [r1], #0) + ldmfd sp!, {pc}^ + +9001: mov r2, #-EFAULT + ldmfd sp!, {pc}^ + + +@ In : r0 = addr, r1 = error +@ Out: r0 = x, r1 = error +uaccess_user_get_byte: + stmfd sp!, {lr} +USER( ldrbt r0, [r0]) + ldmfd sp!, {pc}^ + +@ In : r0 = addr, r1 = error +@ Out: r0 = x, r1 = error +uaccess_user_get_half: + stmfd sp!, {lr} +USER( ldrt r0, [r0]) + mov r0, r0, lsl #16 + mov r0, r0, lsr #16 + ldmfd sp!, {pc}^ + +@ In : r0 = addr, r1 = error +@ Out: r0 = x, r1 = error +uaccess_user_get_word: + stmfd sp!, {lr} +USER( ldrt r0, [r0]) + ldmfd sp!, {pc}^ + +9001: mov r1, #-EFAULT + ldmfd sp!, {pc}^ + +/* Prototype: int uaccess_user_copy_to_user(void *to, const char *from, size_t n) + * Purpose : copy a block to user memory from kernel memory + * Params : to - user memory + * : from - kernel memory + * : n - number of bytes to copy + * Returns : Number of bytes NOT copied. + */ + +.c2u_dest_not_aligned: + rsb ip, ip, #4 + cmp ip, #2 + ldrb r3, [r1], #1 +USER( strbt r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( strgebt r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #1 +USER( strgtbt r3, [r0], #1) @ May fault + sub r2, r2, ip + b .c2u_dest_aligned + +ENTRY(uaccess_user_copy_to_user) + stmfd sp!, {r2, r4 - r7, lr} + cmp r2, #4 + blt .c2u_not_enough + ands ip, r0, #3 + bne .c2u_dest_not_aligned +.c2u_dest_aligned: + + ands ip, r1, #3 + bne .c2u_src_not_aligned +/* + * Seeing as there has to be at least 8 bytes to copy, we can + * copy one word, and force a user-mode page fault... + */ + +.c2u_0fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .c2u_0nowords + ldr r3, [r1], #4 +USER( strt r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .c2u_0fupi +/* + * ip = max no. of bytes to copy before needing another "strt" insn + */ + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #32 + blt .c2u_0rem8lp + +.c2u_0cpy8lp: ldmia r1!, {r3 - r6} + stmia r0!, {r3 - r6} @ Shouldnt fault + ldmia r1!, {r3 - r6} + stmia r0!, {r3 - r6} @ Shouldnt fault + subs ip, ip, #32 + bpl .c2u_0cpy8lp +.c2u_0rem8lp: cmn ip, #16 + ldmgeia r1!, {r3 - r6} + stmgeia r0!, {r3 - r6} @ Shouldnt fault + tst ip, #8 + ldmneia r1!, {r3 - r4} + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + ldrne r3, [r1], #4 + strnet r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .c2u_0fupi +.c2u_0nowords: teq ip, #0 + beq .c2u_finished +.c2u_nowords: cmp ip, #2 + ldrb r3, [r1], #1 +USER( strbt r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( strgebt r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #1 +USER( strgtbt r3, [r0], #1) @ May fault + b .c2u_finished + +.c2u_not_enough: + movs ip, r2 + bne .c2u_nowords +.c2u_finished: mov r0, #0 + LOADREGS(fd,sp!,{r2, r4 - r7, pc}) + +.c2u_src_not_aligned: + bic r1, r1, #3 + ldr r7, [r1], #4 + cmp ip, #2 + bgt .c2u_3fupi + beq .c2u_2fupi +.c2u_1fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .c2u_1nowords + mov r3, r7, pull #8 + ldr r7, [r1], #4 + orr r3, r3, r7, push #24 +USER( strt r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .c2u_1fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .c2u_1rem8lp + +.c2u_1cpy8lp: mov r3, r7, pull #8 + ldmia r1!, {r4 - r7} + orr r3, r3, r4, push #24 + mov r4, r4, pull #8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + stmia r0!, {r3 - r6} @ Shouldnt fault + subs ip, ip, #16 + bpl .c2u_1cpy8lp +.c2u_1rem8lp: tst ip, #8 + movne r3, r7, pull #8 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #24 + movne r4, r4, pull #8 + orrne r4, r4, r7, push #24 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #8 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #24 + strnet r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .c2u_1fupi +.c2u_1nowords: mov r3, r7, lsr #byte(1) + teq ip, #0 + beq .c2u_finished + cmp ip, #2 +USER( strbt r3, [r0], #1) @ May fault + movge r3, r7, lsr #byte(2) +USER( strgebt r3, [r0], #1) @ May fault + movgt r3, r7, lsr #byte(3) +USER( strgtbt r3, [r0], #1) @ May fault + b .c2u_finished + +.c2u_2fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .c2u_2nowords + mov r3, r7, pull #16 + ldr r7, [r1], #4 + orr r3, r3, r7, push #16 +USER( strt r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .c2u_2fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .c2u_2rem8lp + +.c2u_2cpy8lp: mov r3, r7, pull #16 + ldmia r1!, {r4 - r7} + orr r3, r3, r4, push #16 + mov r4, r4, pull #16 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + stmia r0!, {r3 - r6} @ Shouldnt fault + subs ip, ip, #16 + bpl .c2u_2cpy8lp +.c2u_2rem8lp: tst ip, #8 + movne r3, r7, pull #16 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #16 + movne r4, r4, pull #16 + orrne r4, r4, r7, push #16 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #16 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #16 + strnet r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .c2u_2fupi +.c2u_2nowords: mov r3, r7, lsr #byte(2) + teq ip, #0 + beq .c2u_finished + cmp ip, #2 +USER( strbt r3, [r0], #1) @ May fault + movge r3, r7, lsr #byte(3) +USER( strgebt r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #0 +USER( strgtbt r3, [r0], #1) @ May fault + b .c2u_finished + +.c2u_3fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .c2u_3nowords + mov r3, r7, pull #24 + ldr r7, [r1], #4 + orr r3, r3, r7, push #8 +USER( strt r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .c2u_3fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .c2u_3rem8lp + +.c2u_3cpy8lp: mov r3, r7, pull #24 + ldmia r1!, {r4 - r7} + orr r3, r3, r4, push #8 + mov r4, r4, pull #24 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + stmia r0!, {r3 - r6} @ Shouldnt fault + subs ip, ip, #16 + bpl .c2u_3cpy8lp +.c2u_3rem8lp: tst ip, #8 + movne r3, r7, pull #24 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #8 + movne r4, r4, pull #24 + orrne r4, r4, r7, push #8 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #24 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #8 + strnet r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .c2u_3fupi +.c2u_3nowords: mov r3, r7, lsr #byte(3) + teq ip, #0 + beq .c2u_finished + cmp ip, #2 +USER( strbt r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( strgebt r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #0 +USER( strgtbt r3, [r0], #1) @ May fault + b .c2u_finished + + .section .fixup,"ax" + .align 0 +9001: LOADREGS(fd,sp!, {r0, r4 - r7, pc}) + .previous + +/* Prototype: unsigned long uaccess_user_copy_from_user(void *to,const void *from,unsigned long n); + * Purpose : copy a block from user memory to kernel memory + * Params : to - kernel memory + * : from - user memory + * : n - number of bytes to copy + * Returns : Number of bytes NOT copied. + */ +.cfu_dest_not_aligned: + rsb ip, ip, #4 + cmp ip, #2 +USER( ldrbt r3, [r1], #1) @ May fault + strb r3, [r0], #1 +USER( ldrgebt r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( ldrgtbt r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + sub r2, r2, ip + b .cfu_dest_aligned + +ENTRY(uaccess_user_copy_from_user) + stmfd sp!, {r0, r2, r4 - r7, lr} + cmp r2, #4 + blt .cfu_not_enough + ands ip, r0, #3 + bne .cfu_dest_not_aligned +.cfu_dest_aligned: + ands ip, r1, #3 + bne .cfu_src_not_aligned +/* + * Seeing as there has to be at least 8 bytes to copy, we can + * copy one word, and force a user-mode page fault... + */ + +.cfu_0fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .cfu_0nowords +USER( ldrt r3, [r1], #4) + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .cfu_0fupi +/* + * ip = max no. of bytes to copy before needing another "strt" insn + */ + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #32 + blt .cfu_0rem8lp + +.cfu_0cpy8lp: ldmia r1!, {r3 - r6} @ Shouldnt fault + stmia r0!, {r3 - r6} + ldmia r1!, {r3 - r6} @ Shouldnt fault + stmia r0!, {r3 - r6} + subs ip, ip, #32 + bpl .cfu_0cpy8lp +.cfu_0rem8lp: cmn ip, #16 + ldmgeia r1!, {r3 - r6} @ Shouldnt fault + stmgeia r0!, {r3 - r6} + tst ip, #8 + ldmneia r1!, {r3 - r4} @ Shouldnt fault + stmneia r0!, {r3 - r4} + tst ip, #4 + ldrnet r3, [r1], #4 @ Shouldnt fault + strne r3, [r0], #4 + ands ip, ip, #3 + beq .cfu_0fupi +.cfu_0nowords: teq ip, #0 + beq .cfu_finished +.cfu_nowords: cmp ip, #2 +USER( ldrbt r3, [r1], #1) @ May fault + strb r3, [r0], #1 +USER( ldrgebt r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( ldrgtbt r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + b .cfu_finished + +.cfu_not_enough: + movs ip, r2 + bne .cfu_nowords +.cfu_finished: mov r0, #0 + add sp, sp, #8 + LOADREGS(fd,sp!,{r4 - r7, pc}) + +.cfu_src_not_aligned: + bic r1, r1, #3 +USER( ldrt r7, [r1], #4) @ May fault + cmp ip, #2 + bgt .cfu_3fupi + beq .cfu_2fupi +.cfu_1fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .cfu_1nowords + mov r3, r7, pull #8 +USER( ldrt r7, [r1], #4) @ May fault + orr r3, r3, r7, push #24 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .cfu_1fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .cfu_1rem8lp + +.cfu_1cpy8lp: mov r3, r7, pull #8 + ldmia r1!, {r4 - r7} @ Shouldnt fault + orr r3, r3, r4, push #24 + mov r4, r4, pull #8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + stmia r0!, {r3 - r6} + subs ip, ip, #16 + bpl .cfu_1cpy8lp +.cfu_1rem8lp: tst ip, #8 + movne r3, r7, pull #8 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #24 + movne r4, r4, pull #8 + orrne r4, r4, r7, push #24 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #8 +USER( ldrnet r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #24 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .cfu_1fupi +.cfu_1nowords: mov r3, r7, lsr #byte(1) + teq ip, #0 + beq .cfu_finished + cmp ip, #2 + strb r3, [r0], #1 + movge r3, r7, lsr #byte(2) + strgeb r3, [r0], #1 + movgt r3, r7, lsr #byte(3) + strgtb r3, [r0], #1 + b .cfu_finished + +.cfu_2fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .cfu_2nowords + mov r3, r7, pull #16 +USER( ldrt r7, [r1], #4) @ May fault + orr r3, r3, r7, push #16 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .cfu_2fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .cfu_2rem8lp + +.cfu_2cpy8lp: mov r3, r7, pull #16 + ldmia r1!, {r4 - r7} @ Shouldnt fault + orr r3, r3, r4, push #16 + mov r4, r4, pull #16 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + stmia r0!, {r3 - r6} + subs ip, ip, #16 + bpl .cfu_2cpy8lp +.cfu_2rem8lp: tst ip, #8 + movne r3, r7, pull #16 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #16 + movne r4, r4, pull #16 + orrne r4, r4, r7, push #16 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #16 +USER( ldrnet r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #16 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .cfu_2fupi +.cfu_2nowords: mov r3, r7, lsr #byte(2) + teq ip, #0 + beq .cfu_finished + cmp ip, #2 + strb r3, [r0], #1 + movge r3, r7, lsr #byte(3) + strgeb r3, [r0], #1 +USER( ldrgtbt r3, [r1], #0) @ May fault + strgtb r3, [r0], #1 + b .cfu_finished + +.cfu_3fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .cfu_3nowords + mov r3, r7, pull #24 +USER( ldrt r7, [r1], #4) @ May fault + orr r3, r3, r7, push #8 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .cfu_3fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .cfu_3rem8lp + +.cfu_3cpy8lp: mov r3, r7, pull #24 + ldmia r1!, {r4 - r7} @ Shouldnt fault + orr r3, r3, r4, push #8 + mov r4, r4, pull #24 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + stmia r0!, {r3 - r6} + subs ip, ip, #16 + bpl .cfu_3cpy8lp +.cfu_3rem8lp: tst ip, #8 + movne r3, r7, pull #24 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #8 + movne r4, r4, pull #24 + orrne r4, r4, r7, push #8 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #24 +USER( ldrnet r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #8 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .cfu_3fupi +.cfu_3nowords: mov r3, r7, lsr #byte(3) + teq ip, #0 + beq .cfu_finished + cmp ip, #2 + strb r3, [r0], #1 +USER( ldrgebt r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( ldrgtbt r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + b .cfu_finished + + .section .fixup,"ax" + .align 0 + /* + * We took an exception. r0 contains a pointer to + * the byte not copied. + */ +9001: ldr r2, [sp], #4 @ void *to + sub r2, r0, r2 @ bytes copied + ldr r1, [sp], #4 @ unsigned long count + subs r4, r1, r2 @ bytes left to copy + movne r1, r4 + blne __memzero + mov r0, r4 + LOADREGS(fd,sp!, {r4 - r7, pc}) + .previous + +/* Prototype: int uaccess_user_clear_user(void *addr, size_t sz) + * Purpose : clear some user memory + * Params : addr - user memory address to clear + * : sz - number of bytes to clear + * Returns : number of bytes NOT cleared + */ +ENTRY(uaccess_user_clear_user) + stmfd sp!, {r1, lr} + mov r2, #0 + cmp r1, #4 + blt 2f + ands ip, r0, #3 + beq 1f + cmp ip, #2 +USER( strbt r2, [r0], #1) +USER( strlebt r2, [r0], #1) +USER( strltbt r2, [r0], #1) + rsb ip, ip, #4 + sub r1, r1, ip @ 7 6 5 4 3 2 1 +1: subs r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7 +USER( strplt r2, [r0], #4) +USER( strplt r2, [r0], #4) + bpl 1b + adds r1, r1, #4 @ 3 2 1 0 -1 -2 -3 +USER( strplt r2, [r0], #4) +2: tst r1, #2 @ 1x 1x 0x 0x 1x 1x 0x +USER( strnebt r2, [r0], #1) +USER( strnebt r2, [r0], #1) + tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1 +USER( strnebt r2, [r0], #1) + mov r0, #0 + LOADREGS(fd,sp!, {r1, pc}) + + .section .fixup,"ax" + .align 0 +9001: LOADREGS(fd,sp!, {r0, pc}) + .previous + +/* + * Copy a string from user space to kernel space. + * r0 = dst, r1 = src, r2 = byte length + * returns the number of characters copied (strlen of copied string), + * -EFAULT on exception, or "len" if we fill the whole buffer + */ +ENTRY(uaccess_user_strncpy_from_user) + save_lr + mov ip, r1 +1: subs r2, r2, #1 +USER( ldrplbt r3, [r1], #1) + bmi 2f + strb r3, [r0], #1 + teq r3, #0 + bne 1b + sub r1, r1, #1 @ take NUL character out of count +2: sub r0, r1, ip + restore_pc + + .section .fixup,"ax" + .align 0 +9001: mov r3, #0 + strb r3, [r0, #0] @ null terminate + mov r0, #-EFAULT + restore_pc + .previous + +/* Prototype: unsigned long uaccess_user_strnlen_user(const char *str, long n) + * Purpose : get length of a string in user memory + * Params : str - address of string in user memory + * Returns : length of string *including terminator* + * or zero on exception, or n + 1 if too long + */ +ENTRY(uaccess_user_strnlen_user) + save_lr + mov r2, r0 +1: +USER( ldrbt r3, [r0], #1) + teq r3, #0 + beq 2f + subs r1, r1, #1 + bne 1b + add r0, r0, #1 +2: sub r0, r0, r2 + restore_pc + + .section .fixup,"ax" + .align 0 +9001: mov r0, #0 + restore_pc + .previous + diff -urN linux-2.5.70-bk13/arch/arm26/lib/ucmpdi2.c linux-2.5.70-bk14/arch/arm26/lib/ucmpdi2.c --- linux-2.5.70-bk13/arch/arm26/lib/ucmpdi2.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/ucmpdi2.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,51 @@ +/* More subroutines needed by GCC output code on some machines. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* support functions required by the kernel. based on code from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#include "gcclib.h" + +word_type +__ucmpdi2 (DItype a, DItype b) +{ + DIunion au, bu; + + au.ll = a, bu.ll = b; + + if ((USItype) au.s.high < (USItype) bu.s.high) + return 0; + else if ((USItype) au.s.high > (USItype) bu.s.high) + return 2; + if ((USItype) au.s.low < (USItype) bu.s.low) + return 0; + else if ((USItype) au.s.low > (USItype) bu.s.low) + return 2; + return 1; +} + diff -urN linux-2.5.70-bk13/arch/arm26/lib/udivdi3.c linux-2.5.70-bk14/arch/arm26/lib/udivdi3.c --- linux-2.5.70-bk13/arch/arm26/lib/udivdi3.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/lib/udivdi3.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,242 @@ +/* More subroutines needed by GCC output code on some machines. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* support functions required by the kernel. based on code from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#include "gcclib.h" +#include "longlong.h" + +static const UQItype __clz_tab[] = +{ + 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, +}; + +UDItype +__udivmoddi4 (UDItype n, UDItype d, UDItype *rp) +{ + DIunion ww; + DIunion nn, dd; + DIunion rr; + USItype d0, d1, n0, n1, n2; + USItype q0, q1; + USItype b, bm; + + nn.ll = n; + dd.ll = d; + + d0 = dd.s.low; + d1 = dd.s.high; + n0 = nn.s.low; + n1 = nn.s.high; + + if (d1 == 0) + { + if (d0 > n1) + { + /* 0q = nn / 0D */ + + count_leading_zeros (bm, d0); + + if (bm != 0) + { + /* Normalize, i.e. make the most significant bit of the + denominator set. */ + + d0 = d0 << bm; + n1 = (n1 << bm) | (n0 >> (SI_TYPE_SIZE - bm)); + n0 = n0 << bm; + } + + udiv_qrnnd (q0, n0, n1, n0, d0); + q1 = 0; + + /* Remainder in n0 >> bm. */ + } + else + { + /* qq = NN / 0d */ + + if (d0 == 0) + d0 = 1 / d0; /* Divide intentionally by zero. */ + + count_leading_zeros (bm, d0); + + if (bm == 0) + { + /* From (n1 >= d0) /\ (the most significant bit of d0 is set), + conclude (the most significant bit of n1 is set) /\ (the + leading quotient digit q1 = 1). + + This special case is necessary, not an optimization. + (Shifts counts of SI_TYPE_SIZE are undefined.) */ + + n1 -= d0; + q1 = 1; + } + else + { + /* Normalize. */ + + b = SI_TYPE_SIZE - bm; + + d0 = d0 << bm; + n2 = n1 >> b; + n1 = (n1 << bm) | (n0 >> b); + n0 = n0 << bm; + + udiv_qrnnd (q1, n1, n2, n1, d0); + } + + /* n1 != d0... */ + + udiv_qrnnd (q0, n0, n1, n0, d0); + + /* Remainder in n0 >> bm. */ + } + + if (rp != 0) + { + rr.s.low = n0 >> bm; + rr.s.high = 0; + *rp = rr.ll; + } + } + else + { + if (d1 > n1) + { + /* 00 = nn / DD */ + + q0 = 0; + q1 = 0; + + /* Remainder in n1n0. */ + if (rp != 0) + { + rr.s.low = n0; + rr.s.high = n1; + *rp = rr.ll; + } + } + else + { + /* 0q = NN / dd */ + + count_leading_zeros (bm, d1); + if (bm == 0) + { + /* From (n1 >= d1) /\ (the most significant bit of d1 is set), + conclude (the most significant bit of n1 is set) /\ (the + quotient digit q0 = 0 or 1). + + This special case is necessary, not an optimization. */ + + /* The condition on the next line takes advantage of that + n1 >= d1 (true due to program flow). */ + if (n1 > d1 || n0 >= d0) + { + q0 = 1; + sub_ddmmss (n1, n0, n1, n0, d1, d0); + } + else + q0 = 0; + + q1 = 0; + + if (rp != 0) + { + rr.s.low = n0; + rr.s.high = n1; + *rp = rr.ll; + } + } + else + { + USItype m1, m0; + /* Normalize. */ + + b = SI_TYPE_SIZE - bm; + + d1 = (d1 << bm) | (d0 >> b); + d0 = d0 << bm; + n2 = n1 >> b; + n1 = (n1 << bm) | (n0 >> b); + n0 = n0 << bm; + + udiv_qrnnd (q0, n1, n2, n1, d1); + umul_ppmm (m1, m0, q0, d0); + + if (m1 > n1 || (m1 == n1 && m0 > n0)) + { + q0--; + sub_ddmmss (m1, m0, m1, m0, d1, d0); + } + + q1 = 0; + + /* Remainder in (n1n0 - m1m0) >> bm. */ + if (rp != 0) + { + sub_ddmmss (n1, n0, n1, n0, m1, m0); + rr.s.low = (n1 << b) | (n0 >> bm); + rr.s.high = n1 >> bm; + *rp = rr.ll; + } + } + } + } + + ww.s.low = q0; + ww.s.high = q1; + return ww.ll; +} + +UDItype +__udivdi3 (UDItype n, UDItype d) +{ + return __udivmoddi4 (n, d, (UDItype *) 0); +} + +UDItype +__umoddi3 (UDItype u, UDItype v) +{ + UDItype w; + + (void) __udivmoddi4 (u ,v, &w); + + return w; +} + diff -urN linux-2.5.70-bk13/arch/arm26/machine/Makefile linux-2.5.70-bk14/arch/arm26/machine/Makefile --- linux-2.5.70-bk13/arch/arm26/machine/Makefile 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/machine/Makefile 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,12 @@ +# +# Makefile for the linux kernel. +# + +# Object file lists. + +obj-y := arch.o dma.o irq.o oldlatches.o \ + small_page.o + +extra-y := head.o + +AFLAGS_head.o := -DTEXTADDR=$(TEXTADDR) diff -urN linux-2.5.70-bk13/arch/arm26/machine/arch.c linux-2.5.70-bk14/arch/arm26/machine/arch.c --- linux-2.5.70-bk13/arch/arm26/machine/arch.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/machine/arch.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,36 @@ +/* + * linux/arch/arm26/mach-arc/arch.c + * + * Copyright (C) 1998-2001 Russell King + * Copyright (C) 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Architecture specific fixups. + */ +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +extern void arc_init_irq(void); + +#ifdef CONFIG_ARCH_ARC +MACHINE_START(ARCHIMEDES, "Acorn-Archimedes") +#elif defined(CONFIG_ARCH_A5K) +MACHINE_START(A5K, "Acorn-A5000") +#endif + MAINTAINER("Ian Molton") + BOOT_PARAMS(0x0207c000) + INITIRQ(arc_init_irq) +MACHINE_END + diff -urN linux-2.5.70-bk13/arch/arm26/machine/dma.c linux-2.5.70-bk14/arch/arm26/machine/dma.c --- linux-2.5.70-bk13/arch/arm26/machine/dma.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/machine/dma.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,215 @@ +/* + * linux/arch/arm/kernel/dma-arc.c + * + * Copyright (C) 1998-1999 Dave Gilbert / Russell King + * Copyright (C) 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * DMA functions specific to Archimedes and A5000 architecture + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define DPRINTK(x...) printk(KERN_DEBUG x) + +#if defined(CONFIG_BLK_DEV_FD1772) || defined(CONFIG_BLK_DEV_FD1772_MODULE) + +extern unsigned char fdc1772_dma_read, fdc1772_dma_read_end; +extern unsigned char fdc1772_dma_write, fdc1772_dma_write_end; +extern void fdc1772_setupdma(unsigned int count,unsigned int addr); + +static void arc_floppy_data_enable_dma(dmach_t channel, dma_t *dma) +{ + DPRINTK("arc_floppy_data_enable_dma\n"); + + if (dma->using_sg) + BUG(); + + switch (dma->dma_mode) { + case DMA_MODE_READ: { /* read */ + unsigned long flags; + DPRINTK("enable_dma fdc1772 data read\n"); + local_save_flags_cli(flags); + clf(); + + memcpy ((void *)0x1c, (void *)&fdc1772_dma_read, + &fdc1772_dma_read_end - &fdc1772_dma_read); + fdc1772_setupdma(dma->buf.length, dma->buf.__address); /* Sets data pointer up */ + enable_fiq(FIQ_FLOPPYDATA); + loacl_irq_restore(flags); + } + break; + + case DMA_MODE_WRITE: { /* write */ + unsigned long flags; + DPRINTK("enable_dma fdc1772 data write\n"); + local_save_flags_cli(flags); + clf(); + memcpy ((void *)0x1c, (void *)&fdc1772_dma_write, + &fdc1772_dma_write_end - &fdc1772_dma_write); + fdc1772_setupdma(dma->buf.length, dma->buf.__address); /* Sets data pointer up */ + enable_fiq(FIQ_FLOPPYDATA); + + local_irq_restore(flags); + } + break; + default: + printk ("enable_dma: dma%d not initialised\n", channel); + } +} + +static int arc_floppy_data_get_dma_residue(dmach_t channel, dma_t *dma) +{ + extern unsigned int fdc1772_bytestogo; + + /* 10/1/1999 DAG - I presume its the number of bytes left? */ + return fdc1772_bytestogo; +} + +static void arc_floppy_cmdend_enable_dma(dmach_t channel, dma_t *dma) +{ + /* Need to build a branch at the FIQ address */ + extern void fdc1772_comendhandler(void); + unsigned long flags; + + DPRINTK("arc_floppy_cmdend_enable_dma\n"); + /*printk("enable_dma fdc1772 command end FIQ\n");*/ + save_flags(flags); + clf(); + + /* B fdc1772_comendhandler */ + *((unsigned int *)0x1c)=0xea000000 | + (((unsigned int)fdc1772_comendhandler-(0x1c+8))/4); + + local_irq_restore(flags); +} + +static int arc_floppy_cmdend_get_dma_residue(dmach_t channel, dma_t *dma) +{ + /* 10/1/1999 DAG - Presume whether there is an outstanding command? */ + extern unsigned int fdc1772_fdc_int_done; + + /* Explicit! If the int done is 0 then 1 int to go */ + return (fdc1772_fdc_int_done==0)?1:0; +} + +static void arc_disable_dma(dmach_t channel, dma_t *dma) +{ + disable_fiq(dma->dma_irq); +} + +static struct dma_ops arc_floppy_data_dma_ops = { + .type = "FIQDMA", + .enable = arc_floppy_data_enable_dma, + .disable = arc_disable_dma, + .residue = arc_floppy_data_get_dma_residue, +}; + +static struct dma_ops arc_floppy_cmdend_dma_ops = { + .type = "FIQCMD", + .enable = arc_floppy_cmdend_enable_dma, + .disable = arc_disable_dma, + .residue = arc_floppy_cmdend_get_dma_residue, +}; +#endif + +#ifdef CONFIG_ARCH_A5K +static struct fiq_handler fh = { + .name = "floppydata" +}; + +static int a5k_floppy_get_dma_residue(dmach_t channel, dma_t *dma) +{ + struct pt_regs regs; + get_fiq_regs(®s); + return regs.ARM_r9; +} + +static void a5k_floppy_enable_dma(dmach_t channel, dma_t *dma) +{ + struct pt_regs regs; + void *fiqhandler_start; + unsigned int fiqhandler_length; + extern void floppy_fiqsetup(unsigned long len, unsigned long addr, + unsigned long port); + + if (dma->using_sg) + BUG(); + + if (dma->dma_mode == DMA_MODE_READ) { + extern unsigned char floppy_fiqin_start, floppy_fiqin_end; + fiqhandler_start = &floppy_fiqin_start; + fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start; + } else { + extern unsigned char floppy_fiqout_start, floppy_fiqout_end; + fiqhandler_start = &floppy_fiqout_start; + fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start; + } + if (claim_fiq(&fh)) { + printk("floppydma: couldn't claim FIQ.\n"); + return; + } + memcpy((void *)0x1c, fiqhandler_start, fiqhandler_length); + regs.ARM_r9 = dma->buf.length; + regs.ARM_r10 = (unsigned long)dma->buf.__address; + regs.ARM_fp = FLOPPYDMA_BASE; + set_fiq_regs(®s); + enable_fiq(dma->dma_irq); +} + +static void a5k_floppy_disable_dma(dmach_t channel, dma_t *dma) +{ + disable_fiq(dma->dma_irq); + release_fiq(&fh); +} + +static struct dma_ops a5k_floppy_dma_ops = { + .type = "FIQDMA", + .enable = a5k_floppy_enable_dma, + .disable = a5k_floppy_disable_dma, + .residue = a5k_floppy_get_dma_residue, +}; +#endif + +/* + * This is virtual DMA - we don't need anything here + */ +static void sound_enable_disable_dma(dmach_t channel, dma_t *dma) +{ +} + +static struct dma_ops sound_dma_ops = { + .type = "VIRTUAL", + .enable = sound_enable_disable_dma, + .disable = sound_enable_disable_dma, +}; + +void __init arch_dma_init(dma_t *dma) +{ +#if defined(CONFIG_BLK_DEV_FD1772) || defined(CONFIG_BLK_DEV_FD1772_MODULE) + if (machine_is_archimedes()) { + dma[DMA_VIRTUAL_FLOPPY0].dma_irq = FIQ_FLOPPYDATA; + dma[DMA_VIRTUAL_FLOPPY0].d_ops = &arc_floppy_data_dma_ops; + dma[DMA_VIRTUAL_FLOPPY1].dma_irq = 1; + dma[DMA_VIRTUAL_FLOPPY1].d_ops = &arc_floppy_cmdend_dma_ops; + } +#endif +#ifdef CONFIG_ARCH_A5K + if (machine_is_a5k()) { + dma[DMA_VIRTUAL_FLOPPY0].dma_irq = FIQ_FLOPPYDATA; + dma[DMA_VIRTUAL_FLOPPY0].d_ops = &a5k_floppy_dma_ops; + } +#endif + dma[DMA_VIRTUAL_SOUND].d_ops = &sound_dma_ops; +} diff -urN linux-2.5.70-bk13/arch/arm26/machine/head.S linux-2.5.70-bk14/arch/arm26/machine/head.S --- linux-2.5.70-bk13/arch/arm26/machine/head.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/machine/head.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,93 @@ +/* + * linux/arch/arm/kernel/head-armo.S + * + * Copyright (C) 1994-2000 Russell King + * Copyright (C) 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * 26-bit kernel startup code + */ +#include +#include +#include + + .globl swapper_pg_dir + .equ swapper_pg_dir, 0x0207d000 + +/* + * Entry point. + */ + .section ".init.text",#alloc,#execinstr +ENTRY(stext) +__entry: cmp pc, #0x02000000 + ldrlt pc, LC0 @ if 0x01800000, call at 0x02080000 + teq r0, #0 @ Check for old calling method + blne oldparams @ Move page if old + adr r0, LC0 + ldmib r0, {r2-r5, sp} @ Setup stack + mov r0, #0 +1: cmp r2, r3 @ Clear BSS + strcc r0, [r2], #4 + bcc 1b + + bl detect_proc_type + str r0, [r4] + bl detect_arch_type + str r0, [r5] + + mov fp, #0 + b start_kernel + +LC0: .word _stext + .word __bss_start @ r2 + .word _end @ r3 + .word processor_id @ r4 + .word __machine_arch_type @ r5 + .word init_thread_union+8192 @ sp +arm2_id: .long 0x41560200 +arm250_id: .long 0x41560250 + .align + +oldparams: mov r4, #0x02000000 + add r3, r4, #0x00080000 + add r4, r4, #0x0007c000 +1: ldmia r0!, {r5 - r12} + stmia r4!, {r5 - r12} + cmp r4, r3 + blt 1b + mov pc, lr + +/* + * We need some way to automatically detect the difference between + * these two machines. Unfortunately, it is not possible to detect + * the presence of the SuperIO chip, because that will hang the old + * Archimedes machines solid. + */ +/* DAG: Outdated, these have been combined !!!!!!! */ +detect_arch_type: +#if defined(CONFIG_ARCH_ARC) + mov r0, #MACH_TYPE_ARCHIMEDES +#elif defined(CONFIG_ARCH_A5K) + mov r0, #MACH_TYPE_A5K +#endif + mov pc, lr + +detect_proc_type: + mov ip, lr + mov r2, #0xea000000 @ Point undef instr to continuation + adr r0, continue - 12 + orr r0, r2, r0, lsr #2 + mov r1, #0 + str r0, [r1, #4] + ldr r0, arm2_id + swp r2, r2, [r1] @ check for swp (ARM2 cant) + ldr r0, arm250_id + mrc 15, 0, r3, c0, c0 @ check for CP#15 (ARM250 cant) + mov r0, r3 +continue: mov r2, #0xeb000000 @ Make undef vector loop + sub r2, r2, #2 + str r2, [r1, #4] + mov pc, ip diff -urN linux-2.5.70-bk13/arch/arm26/machine/irq.c linux-2.5.70-bk14/arch/arm26/machine/irq.c --- linux-2.5.70-bk13/arch/arm26/machine/irq.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/machine/irq.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,165 @@ +/* + * linux/arch/arm/mach-arc/irq.c + * + * Copyright (C) 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Changelog: + * 24-09-1996 RMK Created + * 10-10-1996 RMK Brought up to date with arch-sa110eval + * 22-10-1996 RMK Changed interrupt numbers & uses new inb/outb macros + * 11-01-1998 RMK Added mask_and_ack_irq + * 22-08-1998 RMK Restructured IRQ routines + * 08-09-2002 IM Brought up to date for 2.5 + * 01-06-2003 JMA Removed arc_fiq_chip + */ +#include +#include + +#include +#include +#include +#include +#include + +extern void init_FIQ(void); + +#define a_clf() clf() +#define a_stf() stf() + +static void arc_ack_irq_a(unsigned int irq) +{ + unsigned int val, mask; + + mask = 1 << irq; + a_clf(); + val = ioc_readb(IOC_IRQMASKA); + ioc_writeb(val & ~mask, IOC_IRQMASKA); + ioc_writeb(mask, IOC_IRQCLRA); + a_stf(); +} + +static void arc_mask_irq_a(unsigned int irq) +{ + unsigned int val, mask; + + mask = 1 << irq; + a_clf(); + val = ioc_readb(IOC_IRQMASKA); + ioc_writeb(val & ~mask, IOC_IRQMASKA); + a_stf(); +} + +static void arc_unmask_irq_a(unsigned int irq) +{ + unsigned int val, mask; + + mask = 1 << irq; + a_clf(); + val = ioc_readb(IOC_IRQMASKA); + ioc_writeb(val | mask, IOC_IRQMASKA); + a_stf(); +} + +static struct irqchip arc_a_chip = { + .ack = arc_ack_irq_a, + .mask = arc_mask_irq_a, + .unmask = arc_unmask_irq_a, +}; + +static void arc_mask_irq_b(unsigned int irq) +{ + unsigned int val, mask; + + mask = 1 << (irq & 7); + val = ioc_readb(IOC_IRQMASKB); + ioc_writeb(val & ~mask, IOC_IRQMASKB); +} + +static void arc_unmask_irq_b(unsigned int irq) +{ + unsigned int val, mask; + + mask = 1 << (irq & 7); + val = ioc_readb(IOC_IRQMASKB); + ioc_writeb(val | mask, IOC_IRQMASKB); +} + +static struct irqchip arc_b_chip = { + .ack = arc_mask_irq_b, + .mask = arc_mask_irq_b, + .unmask = arc_unmask_irq_b, +}; + +/* FIXME - JMA none of these functions are used in arm26 +static void arc_mask_irq_fiq(unsigned int irq) +{ + unsigned int val, mask; + + mask = 1 << (irq & 7); + val = ioc_readb(IOC_FIQMASK); + ioc_writeb(val & ~mask, IOC_FIQMASK); +} + +static void arc_unmask_irq_fiq(unsigned int irq) +{ + unsigned int val, mask; + + mask = 1 << (irq & 7); + val = ioc_readb(IOC_FIQMASK); + ioc_writeb(val | mask, IOC_FIQMASK); +} + +static struct irqchip arc_fiq_chip = { + .ack = arc_mask_irq_fiq, + .mask = arc_mask_irq_fiq, + .unmask = arc_unmask_irq_fiq, +}; +*/ + +void __init arc_init_irq(void) +{ + unsigned int irq, flags; + + ioc_writeb(0, IOC_IRQMASKA); + ioc_writeb(0, IOC_IRQMASKB); + ioc_writeb(0, IOC_FIQMASK); + + for (irq = 0; irq < NR_IRQS; irq++) { + flags = IRQF_VALID; + + if (irq <= 6 || (irq >= 9 && irq <= 15)) + flags |= IRQF_PROBE; + + if (irq == IRQ_KEYBOARDTX) + flags |= IRQF_NOAUTOEN; + + switch (irq) { + case 0 ... 7: + set_irq_chip(irq, &arc_a_chip); + set_irq_handler(irq, do_level_IRQ); + set_irq_flags(irq, flags); + break; + + case 8 ... 15: + set_irq_chip(irq, &arc_b_chip); + set_irq_handler(irq, do_level_IRQ); + set_irq_flags(irq, flags); + +/* case 64 ... 72: + set_irq_chip(irq, &arc_fiq_chip); + set_irq_flags(irq, flags); + break; +*/ + + } + } + + irq_desc[IRQ_KEYBOARDTX].noautoenable = 1; + + init_FIQ(); +} + diff -urN linux-2.5.70-bk13/arch/arm26/machine/oldlatches.c linux-2.5.70-bk14/arch/arm26/machine/oldlatches.c --- linux-2.5.70-bk13/arch/arm26/machine/oldlatches.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/machine/oldlatches.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,72 @@ +/* + * linux/arch/arm/kernel/oldlatches.c + * + * Copyright (C) David Alan Gilbert 1995/1996,2000 + * Copyright (C) Ian Molton 2003 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Support for the latches on the old Archimedes which control the floppy, + * hard disc and printer + */ +#include +#include +#include +#include + +#include +#include +#include +#include + +static unsigned char latch_a_copy; +static unsigned char latch_b_copy; + +/* newval=(oldval & ~mask)|newdata */ +void oldlatch_aupdate(unsigned char mask,unsigned char newdata) +{ + unsigned long flags; + + BUG_ON(!machine_is_archimedes()); + + local_irq_save(flags); //FIXME: was local_save_flags + latch_a_copy = (latch_a_copy & ~mask) | newdata; + __raw_writeb(latch_a_copy, LATCHA_BASE); + local_irq_restore(flags); + + printk("Latch: A = 0x%02x\n", latch_a_copy); +} + + +/* newval=(oldval & ~mask)|newdata */ +void oldlatch_bupdate(unsigned char mask,unsigned char newdata) +{ + unsigned long flags; + + BUG_ON(!machine_is_archimedes()); + + + local_irq_save(flags);//FIXME: was local_save_flags + latch_b_copy = (latch_b_copy & ~mask) | newdata; + __raw_writeb(latch_b_copy, LATCHB_BASE); + local_irq_restore(flags); + + printk("Latch: B = 0x%02x\n", latch_b_copy); +} + +static int __init oldlatch_init(void) +{ + if (machine_is_archimedes()) { + oldlatch_aupdate(0xff, 0xff); + /* Thats no FDC reset...*/ + oldlatch_bupdate(0xff, LATCHB_FDCRESET); + } + return 0; +} + +arch_initcall(oldlatch_init); + +EXPORT_SYMBOL(oldlatch_aupdate); +EXPORT_SYMBOL(oldlatch_bupdate); diff -urN linux-2.5.70-bk13/arch/arm26/machine/small_page.c linux-2.5.70-bk14/arch/arm26/machine/small_page.c --- linux-2.5.70-bk13/arch/arm26/machine/small_page.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/machine/small_page.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,191 @@ +/* + * linux/arch/arm/mm/small_page.c + * + * Copyright (C) 1996 Russell King + * Copyright (C) 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Changelog: + * 26/01/1996 RMK Cleaned up various areas to make little more generic + * 07/02/1999 RMK Support added for 16K and 32K page sizes + * containing 8K blocks + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define PEDANTIC + +/* + * Requirement: + * We need to be able to allocate naturally aligned memory of finer + * granularity than the page size. This is typically used for the + * second level page tables on 32-bit ARMs. + * + * Theory: + * We "misuse" the Linux memory management system. We use alloc_page + * to allocate a page and then mark it as reserved. The Linux memory + * management system will then ignore the "offset", "next_hash" and + * "pprev_hash" entries in the mem_map for this page. + * + * We then use a bitstring in the "offset" field to mark which segments + * of the page are in use, and manipulate this as required during the + * allocation and freeing of these small pages. + * + * We also maintain a queue of pages being used for this purpose using + * the "next_hash" and "pprev_hash" entries of mem_map; + */ + +struct order { + struct list_head queue; + unsigned int mask; /* (1 << shift) - 1 */ + unsigned int shift; /* (1 << shift) size of page */ + unsigned int block_mask; /* nr_blocks - 1 */ + unsigned int all_used; /* (1 << nr_blocks) - 1 */ +}; + + +static struct order orders[] = { +#if PAGE_SIZE == 32768 + { LIST_HEAD_INIT(orders[0].queue), 2047, 11, 15, 0x0000ffff }, + { LIST_HEAD_INIT(orders[1].queue), 8191, 13, 3, 0x0000000f } +#else +#error unsupported page size (ARGH!) +#endif +}; + +#define USED_MAP(pg) ((pg)->index) +#define TEST_AND_CLEAR_USED(pg,off) (test_and_clear_bit(off, &USED_MAP(pg))) +#define SET_USED(pg,off) (set_bit(off, &USED_MAP(pg))) + +static spinlock_t small_page_lock = SPIN_LOCK_UNLOCKED; + +static unsigned long __get_small_page(int priority, struct order *order) +{ + unsigned long flags; + struct page *page; + int offset; + + do { + spin_lock_irqsave(&small_page_lock, flags); + + if (list_empty(&order->queue)) + goto need_new_page; + + page = list_entry(order->queue.next, struct page, list); +again: +#ifdef PEDANTIC + if (USED_MAP(page) & ~order->all_used) + PAGE_BUG(page); +#endif + offset = ffz(USED_MAP(page)); + SET_USED(page, offset); + if (USED_MAP(page) == order->all_used) + list_del_init(&page->list); + spin_unlock_irqrestore(&small_page_lock, flags); + + return (unsigned long) page_address(page) + (offset << order->shift); + +need_new_page: + spin_unlock_irqrestore(&small_page_lock, flags); + page = alloc_page(priority); + spin_lock_irqsave(&small_page_lock, flags); + + if (list_empty(&order->queue)) { + if (!page) + goto no_page; + SetPageReserved(page); + USED_MAP(page) = 0; + list_add(&page->list, &order->queue); + goto again; + } + + spin_unlock_irqrestore(&small_page_lock, flags); + __free_page(page); + } while (1); + +no_page: + spin_unlock_irqrestore(&small_page_lock, flags); + return 0; +} + +static void __free_small_page(unsigned long spage, struct order *order) +{ + unsigned long flags; + struct page *page; + + if (virt_addr_valid(spage)) { + page = virt_to_page(spage); + + /* + * The container-page must be marked Reserved + */ + if (!PageReserved(page) || spage & order->mask) + goto non_small; + +#ifdef PEDANTIC + if (USED_MAP(page) & ~order->all_used) + PAGE_BUG(page); +#endif + + spage = spage >> order->shift; + spage &= order->block_mask; + + /* + * the following must be atomic wrt get_page + */ + spin_lock_irqsave(&small_page_lock, flags); + + if (USED_MAP(page) == order->all_used) + list_add(&page->list, &order->queue); + + if (!TEST_AND_CLEAR_USED(page, spage)) + goto already_free; + + if (USED_MAP(page) == 0) + goto free_page; + + spin_unlock_irqrestore(&small_page_lock, flags); + } + return; + +free_page: + /* + * unlink the page from the small page queue and free it + */ + list_del_init(&page->list); + spin_unlock_irqrestore(&small_page_lock, flags); + ClearPageReserved(page); + __free_page(page); + return; + +non_small: + printk("Trying to free non-small page from %p\n", __builtin_return_address(0)); + return; +already_free: + printk("Trying to free free small page from %p\n", __builtin_return_address(0)); +} + +unsigned long get_page_8k(int priority) +{ + return __get_small_page(priority, orders+1); +} + +void free_page_8k(unsigned long spage) +{ + __free_small_page(spage, orders+1); +} diff -urN linux-2.5.70-bk13/arch/arm26/mm/Makefile linux-2.5.70-bk14/arch/arm26/mm/Makefile --- linux-2.5.70-bk13/arch/arm26/mm/Makefile 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/mm/Makefile 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,12 @@ +# +# Makefile for the linux arm26-specific parts of the memory manager. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definition is now in the main makefile... + +# Object file lists. + +obj-y := init.o extable.o proc-funcs.o mm-memc.o fault.o diff -urN linux-2.5.70-bk13/arch/arm26/mm/extable.c linux-2.5.70-bk14/arch/arm26/mm/extable.c --- linux-2.5.70-bk13/arch/arm26/mm/extable.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/mm/extable.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,40 @@ +/* + * linux/arch/arm/mm/extable.c + */ + +#include +#include +#include + +const struct exception_table_entry * +search_extable(const struct exception_table_entry *first, + const struct exception_table_entry *last, + unsigned long value) +{ + while (first <= last) { + const struct exception_table_entry *mid; + long diff; + + mid = (last - first) / 2 + first; + diff = mid->insn - value; + if (diff == 0) + return mid; + else if (diff < 0) + first = mid+1; + else + last = mid-1; + } + return NULL; +} + +int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(instruction_pointer(regs)); + if (fixup) + regs->ARM_pc = fixup->fixup | PSR_I_BIT | MODE_SVC26; + + return fixup != NULL; +} + diff -urN linux-2.5.70-bk13/arch/arm26/mm/fault.c linux-2.5.70-bk14/arch/arm26/mm/fault.c --- linux-2.5.70-bk13/arch/arm26/mm/fault.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/mm/fault.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,318 @@ +/* + * linux/arch/arm/mm/fault-common.c + * + * Copyright (C) 1995 Linus Torvalds + * Modifications for ARM processor (c) 1995-2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include //FIXME this header may be bogusly included + +#include "fault.h" + +#define FAULT_CODE_LDRSTRPOST 0x80 +#define FAULT_CODE_LDRSTRPRE 0x40 +#define FAULT_CODE_LDRSTRREG 0x20 +#define FAULT_CODE_LDMSTM 0x10 +#define FAULT_CODE_LDCSTC 0x08 +#define FAULT_CODE_PREFETCH 0x04 +#define FAULT_CODE_WRITE 0x02 +#define FAULT_CODE_FORCECOW 0x01 + +#define DO_COW(m) ((m) & (FAULT_CODE_WRITE|FAULT_CODE_FORCECOW)) +#define READ_FAULT(m) (!((m) & FAULT_CODE_WRITE)) +#define DEBUG +/* + * This is useful to dump out the page tables associated with + * 'addr' in mm 'mm'. + */ +void show_pte(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd; + + if (!mm) + mm = &init_mm; + + printk(KERN_ALERT "pgd = %p\n", mm->pgd); + pgd = pgd_offset(mm, addr); + printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd)); + + do { + pmd_t *pmd; + pte_t *pte; + + pmd = pmd_offset(pgd, addr); + + if (pmd_none(*pmd)) + break; + + if (pmd_bad(*pmd)) { + printk("(bad)"); + break; + } + + /* We must not map this if we have highmem enabled */ + /* FIXME */ + pte = pte_offset_map(pmd, addr); + printk(", *pte=%08lx", pte_val(*pte)); + pte_unmap(pte); + } while(0); + + printk("\n"); +} + +/* + * Oops. The kernel tried to access some page that wasn't present. + */ +static void +__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, + struct pt_regs *regs) +{ + /* + * Are we prepared to handle this kernel fault? + */ + if (fixup_exception(regs)) + return; + + /* + * No handler, we'll have to terminate things with extreme prejudice. + */ + bust_spinlocks(1); + printk(KERN_ALERT + "Unable to handle kernel %s at virtual address %08lx\n", + (addr < PAGE_SIZE) ? "NULL pointer dereference" : + "paging request", addr); + + show_pte(mm, addr); + die("Oops", regs, fsr); + bust_spinlocks(0); + do_exit(SIGKILL); +} + +/* + * Something tried to access memory that isn't in our memory map.. + * User mode accesses just cause a SIGSEGV + */ +static void +__do_user_fault(struct task_struct *tsk, unsigned long addr, + unsigned int fsr, int code, struct pt_regs *regs) +{ + struct siginfo si; + +#ifdef CONFIG_DEBUG_USER + printk("%s: unhandled page fault at 0x%08lx, code 0x%03x\n", + tsk->comm, addr, fsr); + show_pte(tsk->mm, addr); + show_regs(regs); + //dump_backtrace(regs, tsk); // FIXME ARM32 dropped this - why? + while(1); +#endif + + tsk->thread.address = addr; + tsk->thread.error_code = fsr; + tsk->thread.trap_no = 14; + si.si_signo = SIGSEGV; + si.si_errno = 0; + si.si_code = code; + si.si_addr = (void *)addr; + force_sig_info(SIGSEGV, &si, tsk); +} + +static int +__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, + struct task_struct *tsk) +{ + struct vm_area_struct *vma; + int fault, mask; + + vma = find_vma(mm, addr); + fault = -2; /* bad map area */ + if (!vma) + goto out; + if (vma->vm_start > addr) + goto check_stack; + + /* + * Ok, we have a good vm_area for this + * memory access, so we can handle it. + */ +good_area: + if (READ_FAULT(fsr)) /* read? */ + mask = VM_READ|VM_EXEC; + else + mask = VM_WRITE; + + fault = -1; /* bad access type */ + if (!(vma->vm_flags & mask)) + goto out; + + /* + * If for any reason at all we couldn't handle + * the fault, make sure we exit gracefully rather + * than endlessly redo the fault. + */ +survive: + fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, DO_COW(fsr)); + + /* + * Handle the "normal" cases first - successful and sigbus + */ + switch (fault) { + case 2: + tsk->maj_flt++; + return fault; + case 1: + tsk->min_flt++; + case 0: + return fault; + } + + fault = -3; /* out of memory */ + if (tsk->pid != 1) + goto out; + + /* + * If we are out of memory for pid1, + * sleep for a while and retry + */ + yield(); + goto survive; + +check_stack: + if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) + goto good_area; +out: + return fault; +} + +int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) +{ + struct task_struct *tsk; + struct mm_struct *mm; + int fault; + + tsk = current; + mm = tsk->mm; + + printk("do_page_fault: pid: %d\n", tsk->pid); + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + if (in_interrupt() || !mm) + goto no_context; + + down_read(&mm->mmap_sem); + fault = __do_page_fault(mm, addr, fsr, tsk); + up_read(&mm->mmap_sem); + + /* + * Handle the "normal" case first + */ + if (fault > 0) + return 0; + + /* + * We had some memory, but were unable to + * successfully fix up this page fault. + */ + if (fault == 0){ + goto do_sigbus; + } + + /* + * If we are in kernel mode at this point, we + * have no context to handle this fault with. + */ + if (!user_mode(regs)){ + goto no_context; + } + + if (fault == -3) { + /* + * We ran out of memory, or some other thing happened to + * us that made us unable to handle the page fault gracefully. + */ + printk("VM: killing process %s\n", tsk->comm); + do_exit(SIGKILL); + } + else{ + __do_user_fault(tsk, addr, fsr, fault == -1 ? SEGV_ACCERR : SEGV_MAPERR, regs); + } + + return 0; + + +/* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ +do_sigbus: + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + tsk->thread.address = addr; //FIXME - need other bits setting? + tsk->thread.error_code = fsr; + tsk->thread.trap_no = 14; + force_sig(SIGBUS, tsk); +#ifdef CONFIG_DEBUG_USER + printk(KERN_DEBUG "%s: sigbus at 0x%08lx, pc=0x%08lx\n", + current->comm, addr, instruction_pointer(regs)); +#endif + + /* Kernel mode? Handle exceptions or die */ + if (user_mode(regs)) + return 0; + +no_context: + __do_kernel_fault(mm, addr, fsr, regs); + return 0; +} + +/* + * Handle a data abort. Note that we have to handle a range of addresses + * on ARM2/3 for ldm. If both pages are zero-mapped, then we have to force + * a copy-on-write. However, on the second page, we always force COW. + */ +asmlinkage void +do_DataAbort(unsigned long min_addr, unsigned long max_addr, int mode, struct pt_regs *regs) +{ + do_page_fault(min_addr, mode, regs); + + if ((min_addr ^ max_addr) >> PAGE_SHIFT){ + do_page_fault(max_addr, mode | FAULT_CODE_FORCECOW, regs); + } +} + +asmlinkage int +do_PrefetchAbort(unsigned long addr, struct pt_regs *regs) +{ +#if 0 + if (the memc mapping for this page exists) { + printk ("Page in, but got abort (undefined instruction?)\n"); + return 0; + } +#endif + do_page_fault(addr, FAULT_CODE_PREFETCH, regs); + return 1; +} + diff -urN linux-2.5.70-bk13/arch/arm26/mm/fault.h linux-2.5.70-bk14/arch/arm26/mm/fault.h --- linux-2.5.70-bk13/arch/arm26/mm/fault.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/mm/fault.h 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,5 @@ +void show_pte(struct mm_struct *mm, unsigned long addr); + +int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs); + +unsigned long search_extable(unsigned long addr); //FIXME - is it right? diff -urN linux-2.5.70-bk13/arch/arm26/mm/init.c linux-2.5.70-bk14/arch/arm26/mm/init.c --- linux-2.5.70-bk13/arch/arm26/mm/init.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/mm/init.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,401 @@ +/* + * linux/arch/arm/mm/init.c + * + * Copyright (C) 1995-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define TABLE_SIZE PTRS_PER_PTE * sizeof(pte_t)) + +struct mmu_gather mmu_gathers[NR_CPUS]; + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +extern char _stext, _text, _etext, _end, __init_begin, __init_end; +extern unsigned long phys_initrd_start; +extern unsigned long phys_initrd_size; + +/* + * The sole use of this is to pass memory configuration + * data from paging_init to mem_init. + */ +static struct meminfo meminfo __initdata = { 0, }; + +/* + * empty_zero_page is a special page that is used for + * zero-initialized data and COW. + */ +struct page *empty_zero_page; + +void show_mem(void) +{ + int free = 0, total = 0, reserved = 0; + int shared = 0, cached = 0, slab = 0; + struct page *page, *end; + + printk("Mem-info:\n"); + show_free_areas(); + printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); + + + page = NODE_MEM_MAP(0); + end = page + NODE_DATA(0)->node_size; + + do { + total++; + if (PageReserved(page)) + reserved++; + else if (PageSwapCache(page)) + cached++; + else if (PageSlab(page)) + slab++; + else if (!page_count(page)) + free++; + else + shared += atomic_read(&page->count) - 1; + page++; + } while (page < end); + + printk("%d pages of RAM\n", total); + printk("%d free pages\n", free); + printk("%d reserved pages\n", reserved); + printk("%d slab pages\n", slab); + printk("%d pages shared\n", shared); + printk("%d pages swap cached\n", cached); +} + +struct node_info { + unsigned int start; + unsigned int end; + int bootmap_pages; +}; + +#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) +#define PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT) +#define PFN_SIZE(x) ((x) >> PAGE_SHIFT) +#define PFN_RANGE(s,e) PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \ + (((unsigned long)(s)) & PAGE_MASK)) + +/* + * FIXME: We really want to avoid allocating the bootmap bitmap + * over the top of the initrd. Hopefully, this is located towards + * the start of a bank, so if we allocate the bootmap bitmap at + * the end, we won't clash. + */ +static unsigned int __init +find_bootmap_pfn(struct meminfo *mi, unsigned int bootmap_pages) +{ + unsigned int start_pfn, bootmap_pfn; + unsigned int start, end; + + start_pfn = PFN_UP((unsigned long)&_end); + bootmap_pfn = 0; + + /* ARM26 machines only have one node */ + if (mi->bank->node != 0) + BUG(); + + start = PFN_UP(mi->bank->start); + end = PFN_DOWN(mi->bank->size + mi->bank->start); + + if (start < start_pfn) + start = start_pfn; + + if (end <= start) + BUG(); + + if (end - start >= bootmap_pages) + bootmap_pfn = start; + else + BUG(); + + return bootmap_pfn; +} + +/* + * Scan the memory info structure and pull out: + * - the end of memory + * - the number of nodes + * - the pfn range of each node + * - the number of bootmem bitmap pages + */ +static void __init +find_memend_and_nodes(struct meminfo *mi, struct node_info *np) +{ + unsigned int memend_pfn = 0; + numnodes = 1; + + np->bootmap_pages = 0; + + if (mi->bank->size == 0) { + BUG(); + } + + /* + * Get the start and end pfns for this bank + */ + np->start = PFN_UP(mi->bank->start); + np->end = PFN_DOWN(mi->bank->start + mi->bank->size); + + if (memend_pfn < np->end) + memend_pfn = np->end; + + /* + * Calculate the number of pages we require to + * store the bootmem bitmaps. + */ + np->bootmap_pages = bootmem_bootmap_pages(np->end - np->start); + + /* + * This doesn't seem to be used by the Linux memory + * manager any more. If we can get rid of it, we + * also get rid of some of the stuff above as well. + */ + max_low_pfn = memend_pfn - PFN_DOWN(PHYS_OFFSET); + max_pfn = memend_pfn - PFN_DOWN(PHYS_OFFSET); + mi->end = memend_pfn << PAGE_SHIFT; + +} + +/* + * Reserve the various regions of node 0 + */ +static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int bootmap_pages) +{ + pg_data_t *pgdat = NODE_DATA(0); + + /* + * Register the kernel text and data with bootmem. + * Note that this can only be in node 0. + */ + reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext); + + /* + * And don't forget to reserve the allocator bitmap, + * which will be freed later. + */ + reserve_bootmem_node(pgdat, bootmap_pfn << PAGE_SHIFT, + bootmap_pages << PAGE_SHIFT); + + /* + * These should likewise go elsewhere. They pre-reserve + * the screen memory region at the start of main system + * memory. + */ + reserve_bootmem_node(pgdat, 0x02000000, 0x00080000); + +#ifdef CONFIG_BLK_DEV_INITRD + initrd_start = phys_initrd_start; + initrd_end = initrd_start + phys_initrd_size; + + /* Achimedes machines only have one node, so initrd is in node 0 */ + reserve_bootmem_node(pgdat, __pa(initrd_start), + initrd_end - initrd_start); +#endif + +} + + +/* + * Initialise the bootmem allocator for all nodes. This is called + * early during the architecture specific initialisation. + */ +void __init bootmem_init(struct meminfo *mi) +{ + struct node_info node_info; + unsigned int bootmap_pfn; + + find_memend_and_nodes(mi, &node_info); + + bootmap_pfn = find_bootmap_pfn(mi, node_info.bootmap_pages); + + /* + * Note that node 0 must always have some pages. + */ + if (node_info.end == 0) + BUG(); + + /* + * Initialise the bootmem allocator. + */ + init_bootmem_node(NODE_DATA(node), bootmap_pfn, node_info.start, node_info.end); + + /* + * Register all available RAM in this node with the bootmem allocator. + */ + free_bootmem_node(NODE_DATA(node), mi->bank->start, mi->bank->size); + + /* + * Reserve ram for stuff like initrd, video, kernel, etc. + */ + + reserve_node_zero(bootmap_pfn, node_info.bootmap_pages); + +} + +/* + * paging_init() sets up the page tables, initialises the zone memory + * maps, and sets up the zero page, bad page and bad page tables. + */ +void __init paging_init(struct meminfo *mi) +{ + void *zero_page; + unsigned long zone_size[MAX_NR_ZONES]; + unsigned long zhole_size[MAX_NR_ZONES]; + struct bootmem_data *bdata; + pg_data_t *pgdat; + int i; + + memcpy(&meminfo, mi, sizeof(meminfo)); + + /* + * allocate the zero page. Note that we count on this going ok. + */ + zero_page = alloc_bootmem_low_pages(PAGE_SIZE); + + /* + * initialise the page tables. + */ + memtable_init(mi); + flush_tlb_all(); + + /* + * initialise the zones in node 0 (archimedes have only 1 node) + */ + + for (i = 0; i < MAX_NR_ZONES; i++) { + zone_size[i] = 0; + zhole_size[i] = 0; + } + + pgdat = NODE_DATA(0); + bdata = pgdat->bdata; + + zone_size[0] = bdata->node_low_pfn - + (bdata->node_boot_start >> PAGE_SHIFT); + if (!zone_size[0]) + BUG(); + + free_area_init_node(0, pgdat, 0, zone_size, + bdata->node_boot_start >> PAGE_SHIFT, 0); + + mem_map = contig_page_data.node_mem_map; + + /* + * finish off the bad pages once + * the mem_map is initialised + */ + memzero(zero_page, PAGE_SIZE); + empty_zero_page = virt_to_page(zero_page); +} + +static inline void free_area(unsigned long addr, unsigned long end, char *s) +{ + unsigned int size = (end - addr) >> 10; + + for (; addr < end; addr += PAGE_SIZE) { + struct page *page = virt_to_page(addr); + ClearPageReserved(page); + set_page_count(page, 1); + free_page(addr); + totalram_pages++; + } + + if (size && s) + printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); +} + +/* + * mem_init() marks the free areas in the mem_map and tells us how much + * memory is free. This is done after various parts of the system have + * claimed their memory after the kernel image. + */ +void __init mem_init(void) +{ + unsigned int codepages, datapages, initpages; + pg_data_t *pgdat = NODE_DATA(0); + extern int sysctl_overcommit_memory; + + datapages = &_end - &_etext; + codepages = &_etext - &_text; + initpages = &__init_end - &__init_begin; + + high_memory = (void *)__va(meminfo.end); + max_mapnr = virt_to_page(high_memory) - mem_map; + + /* this will put all unused low memory onto the freelists */ + if (pgdat->node_size != 0) + totalram_pages += free_all_bootmem_node(pgdat); + + printk(KERN_INFO "Memory:"); + + num_physpages = meminfo.bank[0].size >> PAGE_SHIFT; + + printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); + printk(KERN_NOTICE "Memory: %luKB available (%dK code, " + "%dK data, %dK init)\n", + (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + codepages >> 10, datapages >> 10, initpages >> 10); + /* + * Turn on overcommit on tiny machines + */ + if (PAGE_SIZE >= 16384 && num_physpages <= 128) { + sysctl_overcommit_memory = 1; + printk("Turning on overcommit\n"); + } +} + +void free_initmem(void) +{ + free_area((unsigned long)(&__init_begin), + (unsigned long)(&__init_end), + "init"); +} + +#ifdef CONFIG_BLK_DEV_INITRD + +static int keep_initrd; + +void free_initrd_mem(unsigned long start, unsigned long end) +{ + if (!keep_initrd) + free_area(start, end, "initrd"); +} + +static int __init keepinitrd_setup(char *__unused) +{ + keep_initrd = 1; + return 1; +} + +__setup("keepinitrd", keepinitrd_setup); +#endif diff -urN linux-2.5.70-bk13/arch/arm26/mm/mm-memc.c linux-2.5.70-bk14/arch/arm26/mm/mm-memc.c --- linux-2.5.70-bk13/arch/arm26/mm/mm-memc.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/mm/mm-memc.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,204 @@ +/* + * linux/arch/arm/mm/mm-armo.c + * + * Copyright (C) 1998-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Page table sludge for older ARM processor architectures. + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#define MEMC_TABLE_SIZE (256*sizeof(unsigned long)) + +kmem_cache_t *pte_cache, *pgd_cache; +int page_nr; + +/* + * Allocate space for a page table and a MEMC table. + * Note that we place the MEMC + * table before the page directory. This means we can + * easily get to both tightly-associated data structures + * with a single pointer. + */ +static inline pgd_t *alloc_pgd_table(void) +{ + void *pg2k = kmem_cache_alloc(pgd_cache, GFP_KERNEL); + + if (pg2k) + pg2k += MEMC_TABLE_SIZE; + + return (pgd_t *)pg2k; +} + +/* + * Free a page table. this function is the counterpart to get_pgd_slow + * below, not alloc_pgd_table above. + */ +void free_pgd_slow(pgd_t *pgd) +{ + unsigned long tbl = (unsigned long)pgd; + + tbl -= MEMC_TABLE_SIZE; + + kmem_cache_free(pgd_cache, (void *)tbl); +} + +/* + * Allocate a new pgd and fill it in ready for use + * + * A new tasks pgd is completely empty (all pages !present) except for: + * + * o The machine vectors at virtual address 0x0 + * o The vmalloc region at the top of address space + * + */ +#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) + +pgd_t *get_pgd_slow(struct mm_struct *mm) +{ + void *pg2k; + pgd_t *new_pgd, *init_pgd; + pmd_t *new_pmd, *init_pmd; + pte_t *new_pte, *init_pte; + struct mm_struct bob; + + new_pgd = alloc_pgd_table(); + if (!new_pgd) + goto no_pgd; + + /* + * This lock is here just to satisfy pmd_alloc and pte_lock + * FIXME: I bet we could avoid taking it pretty much altogether + */ + spin_lock(&mm->page_table_lock); + + /* + * On ARM, first page must always be allocated since it contains + * the machine vectors. + */ + new_pmd = pmd_alloc(mm, new_pgd, 0); + if (!new_pmd) + goto no_pmd; + + new_pte = pte_alloc_kernel(mm, new_pmd, 0); + if (!new_pte) + goto no_pte; + + init_pgd = pgd_offset(&init_mm, 0); + init_pmd = pmd_offset(init_pgd, 0); + init_pte = pte_offset(init_pmd, 0); + + set_pte(new_pte, *init_pte); + + /* + * the page table entries are zeroed + * when the table is created. (see the cache_ctor functions below) + * Now we need to plonk the kernel (vmalloc) area at the end of + * the address space. We copy this from the init thread, just like + * the init_pte we copied above... + */ + memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, + (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); + + spin_unlock(&mm->page_table_lock); + + /* update MEMC tables */ + cpu_memc_update_all(new_pgd); + return new_pgd; + +no_pte: + spin_unlock(&mm->page_table_lock); + pmd_free(new_pmd); + free_pgd_slow(new_pgd); + return NULL; + +no_pmd: + spin_unlock(&mm->page_table_lock); + free_pgd_slow(new_pgd); + return NULL; + +no_pgd: + return NULL; +} + +/* + * No special code is required here. + */ +void setup_mm_for_reboot(char mode) +{ +} + +/* + * This contains the code to setup the memory map on an ARM2/ARM250/ARM3 + * o swapper_pg_dir = 0x0207d000 + * o kernel proper starts at 0x0208000 + * o create (allocate) a pte to contain the machine vectors + * o populate the pte (points to 0x02078000) (FIXME - is it zeroed?) + * o populate the init tasks page directory (pgd) with the new pte + * o zero the rest of the init tasks pgdir (FIXME - what about vmalloc?!) + */ +void __init memtable_init(struct meminfo *mi) +{ + pte_t *pte; + int i; + + page_nr = max_low_pfn; + + pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t)); + pte[0] = mk_pte_phys(PAGE_OFFSET + SCREEN_SIZE, PAGE_READONLY); + pmd_populate(&init_mm, pmd_offset(swapper_pg_dir, 0), pte); + + for (i = 1; i < PTRS_PER_PGD; i++) + pgd_val(swapper_pg_dir[i]) = 0; +} + +void __init iotable_init(struct map_desc *io_desc) +{ + /* nothing to do */ +} + +/* + * We never have holes in the memmap + */ +void __init create_memmap_holes(struct meminfo *mi) +{ +} + +static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags) +{ + memzero(pte, sizeof(pte_t) * PTRS_PER_PTE); +} + +static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags) +{ + memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t)); +} + +void __init pgtable_cache_init(void) +{ + pte_cache = kmem_cache_create("pte-cache", + sizeof(pte_t) * PTRS_PER_PTE, + 0, 0, pte_cache_ctor, NULL); + if (!pte_cache) + BUG(); + + pgd_cache = kmem_cache_create("pgd-cache", MEMC_TABLE_SIZE + + sizeof(pgd_t) * PTRS_PER_PGD, + 0, 0, pgd_cache_ctor, NULL); + if (!pgd_cache) + BUG(); +} diff -urN linux-2.5.70-bk13/arch/arm26/mm/proc-funcs.S linux-2.5.70-bk14/arch/arm26/mm/proc-funcs.S --- linux-2.5.70-bk13/arch/arm26/mm/proc-funcs.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/mm/proc-funcs.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,359 @@ +/* + * linux/arch/arm/mm/proc-arm2,3.S + * + * Copyright (C) 1997-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * MMU functions for ARM2,3 + * + * These are the low level assembler for performing cache + * and memory functions on ARM2, ARM250 and ARM3 processors. + */ +#include +#include +#include +#include +#include + +/* + * MEMC workhorse code. It's both a horse which things it's a pig. + */ +/* + * Function: cpu_memc_update_entry(pgd_t *pgd, unsigned long phys_pte, unsigned long addr) + * Params : pgd Page tables/MEMC mapping + * : phys_pte physical address, or PTE + * : addr virtual address + */ +ENTRY(cpu_memc_update_entry) + tst r1, #PAGE_PRESENT @ is the page present + orreq r1, r1, #PAGE_OLD | PAGE_CLEAN + moveq r2, #0x01f00000 + mov r3, r1, lsr #13 @ convert to physical page nr + and r3, r3, #0x3fc + adr ip, memc_phys_table_32 + ldr r3, [ip, r3] + tst r1, #PAGE_OLD | PAGE_NOT_USER + biceq r3, r3, #0x200 + tsteq r1, #PAGE_READONLY | PAGE_CLEAN + biceq r3, r3, #0x300 + mov r2, r2, lsr #15 @ virtual -> nr + orr r3, r3, r2, lsl #15 + and r2, r2, #0x300 + orr r3, r3, r2, lsl #2 + and r2, r3, #255 + sub r0, r0, #256 * 4 + str r3, [r0, r2, lsl #2] + strb r3, [r3] + movs pc, lr +/* + * Params : r0 = preserved + * : r1 = memc table base (preserved) + * : r2 = page table entry + * : r3 = preserved + * : r4 = unused + * : r5 = memc physical address translation table + * : ip = virtual address (preserved) + */ +update_pte: + mov r4, r2, lsr #13 + and r4, r4, #0x3fc + ldr r4, [r5, r4] @ covert to MEMC page + + tst r2, #PAGE_OLD | PAGE_NOT_USER @ check for MEMC read + biceq r4, r4, #0x200 + tsteq r2, #PAGE_READONLY | PAGE_CLEAN @ check for MEMC write + biceq r4, r4, #0x300 + + orr r4, r4, ip + and r2, ip, #0x01800000 + orr r4, r4, r2, lsr #13 + + and r2, r4, #255 + str r4, [r1, r2, lsl #2] + movs pc, lr + +/* + * Params : r0 = preserved + * : r1 = memc table base (preserved) + * : r2 = page table base + * : r3 = preserved + * : r4 = unused + * : r5 = memc physical address translation table + * : ip = virtual address (updated) + */ +update_pte_table: + stmfd sp!, {r0, lr} + bic r0, r2, #3 +1: ldr r2, [r0], #4 @ get entry + tst r2, #PAGE_PRESENT @ page present + blne update_pte @ process pte + add ip, ip, #32768 @ increment virt addr + ldr r2, [r0], #4 @ get entry + tst r2, #PAGE_PRESENT @ page present + blne update_pte @ process pte + add ip, ip, #32768 @ increment virt addr + ldr r2, [r0], #4 @ get entry + tst r2, #PAGE_PRESENT @ page present + blne update_pte @ process pte + add ip, ip, #32768 @ increment virt addr + ldr r2, [r0], #4 @ get entry + tst r2, #PAGE_PRESENT @ page present + blne update_pte @ process pte + add ip, ip, #32768 @ increment virt addr + tst ip, #32768 * 31 @ finished? + bne 1b + ldmfd sp!, {r0, pc}^ + +/* + * Function: cpu_memc_update_all(pgd_t *pgd) + * Params : pgd Page tables/MEMC mapping + * Notes : this is optimised for 32k pages + */ +ENTRY(cpu_memc_update_all) + stmfd sp!, {r4, r5, lr} + bl clear_tables + sub r1, r0, #256 * 4 @ start of MEMC tables + adr r5, memc_phys_table_32 @ Convert to logical page number + mov ip, #0 @ virtual address +1: ldmia r0!, {r2, r3} @ load two pgd entries + tst r2, #PAGE_PRESENT @ is pgd entry present? + addeq ip, ip, #1048576 @FIXME - PAGE_PRESENT is for PTEs technically... + blne update_pte_table + mov r2, r3 + tst r2, #PAGE_PRESENT @ is pgd entry present? + addeq ip, ip, #1048576 + blne update_pte_table + teq ip, #32 * 1048576 + bne 1b + ldmfd sp!, {r4, r5, pc}^ + +/* + * Build the table to map from physical page number to memc page number + */ + .type memc_phys_table_32, #object +memc_phys_table_32: + .irp b7, 0x00, 0x80 + .irp b6, 0x00, 0x02 + .irp b5, 0x00, 0x04 + .irp b4, 0x00, 0x01 + + .irp b3, 0x00, 0x40 + .irp b2, 0x00, 0x20 + .irp b1, 0x00, 0x10 + .irp b0, 0x00, 0x08 + .long 0x03800300 + \b7 + \b6 + \b5 + \b4 + \b3 + \b2 + \b1 + \b0 + .endr + .endr + .endr + .endr + + .endr + .endr + .endr + .endr + .size memc_phys_table_32, . - memc_phys_table_32 + +/* + * helper for cpu_memc_update_all, this clears out all + * mappings, setting them close to the top of memory, + * and inaccessible (0x01f00000). + * Params : r0 = page table pointer + */ +clear_tables: ldr r1, _arm3_set_pgd - 4 + ldr r2, [r1] + sub r1, r0, #256 * 4 @ start of MEMC tables + add r2, r1, r2, lsl #2 @ end of tables + mov r3, #0x03f00000 @ Default mapping (null mapping) + orr r3, r3, #0x00000f00 + orr r4, r3, #1 + orr r5, r3, #2 + orr ip, r3, #3 +1: stmia r1!, {r3, r4, r5, ip} + add r3, r3, #4 + add r4, r4, #4 + add r5, r5, #4 + add ip, ip, #4 + stmia r1!, {r3, r4, r5, ip} + add r3, r3, #4 + add r4, r4, #4 + add r5, r5, #4 + add ip, ip, #4 + teq r1, r2 + bne 1b + mov pc, lr + +/* + * Function: *_set_pgd(pgd_t *pgd) + * Params : pgd New page tables/MEMC mapping + * Purpose : update MEMC hardware with new mapping + */ + .word page_nr @ extern - declared in mm-memc.c +_arm3_set_pgd: mcr p15, 0, r1, c1, c0, 0 @ flush cache +_arm2_set_pgd: stmfd sp!, {lr} + ldr r1, _arm3_set_pgd - 4 + ldr r2, [r1] + sub r0, r0, #256 * 4 @ start of MEMC tables + add r1, r0, r2, lsl #2 @ end of tables +1: ldmia r0!, {r2, r3, ip, lr} + strb r2, [r2] + strb r3, [r3] + strb ip, [ip] + strb lr, [lr] + ldmia r0!, {r2, r3, ip, lr} + strb r2, [r2] + strb r3, [r3] + strb ip, [ip] + strb lr, [lr] + teq r0, r1 + bne 1b + ldmfd sp!, {pc}^ + +/* + * Function: *_proc_init (void) + * Purpose : Initialise the cache control registers + */ +_arm3_proc_init: + mov r0, #0x001f0000 + orr r0, r0, #0x0000ff00 + orr r0, r0, #0x000000ff + mcr p15, 0, r0, c3, c0 @ ARM3 Cacheable + mcr p15, 0, r0, c4, c0 @ ARM3 Updateable + mov r0, #0 + mcr p15, 0, r0, c5, c0 @ ARM3 Disruptive + mcr p15, 0, r0, c1, c0 @ ARM3 Flush + mov r0, #3 + mcr p15, 0, r0, c2, c0 @ ARM3 Control +_arm2_proc_init: + movs pc, lr + +/* + * Function: *_proc_fin (void) + * Purpose : Finalise processor (disable caches) + */ +_arm3_proc_fin: mov r0, #2 + mcr p15, 0, r0, c2, c0 +_arm2_proc_fin: orrs pc, lr, #PSR_I_BIT|PSR_F_BIT + +/* + * Function: *_xchg_1 (int new, volatile void *ptr) + * Params : new New value to store at... + * : ptr pointer to byte-wide location + * Purpose : Performs an exchange operation + * Returns : Original byte data at 'ptr' + */ +_arm2_xchg_1: mov r2, pc + orr r2, r2, #PSR_I_BIT + teqp r2, #0 + ldrb r2, [r1] + strb r0, [r1] + mov r0, r2 + movs pc, lr + +_arm3_xchg_1: swpb r0, r0, [r1] + movs pc, lr + +/* + * Function: *_xchg_4 (int new, volatile void *ptr) + * Params : new New value to store at... + * : ptr pointer to word-wide location + * Purpose : Performs an exchange operation + * Returns : Original word data at 'ptr' + */ +_arm2_xchg_4: mov r2, pc + orr r2, r2, #PSR_I_BIT + teqp r2, #0 + ldr r2, [r1] + str r0, [r1] + mov r0, r2 + movs pc, lr + +_arm3_xchg_4: swp r0, r0, [r1] + movs pc, lr + +_arm2_3_check_bugs: + bics pc, lr, #PSR_F_BIT @ Clear FIQ disable bit + +armvlsi_name: .asciz "ARM/VLSI" +_arm2_name: .asciz "ARM 2" +_arm250_name: .asciz "ARM 250" +_arm3_name: .asciz "ARM 3" + + .section ".init.text", #alloc, #execinstr +/* + * Purpose : Function pointers used to access above functions - all calls + * come through these + */ + .globl arm2_processor_functions +arm2_processor_functions: + .word _arm2_3_check_bugs + .word _arm2_proc_init + .word _arm2_proc_fin + .word _arm2_set_pgd + .word _arm2_xchg_1 + .word _arm2_xchg_4 + +cpu_arm2_info: + .long armvlsi_name + .long _arm2_name + + .globl arm250_processor_functions +arm250_processor_functions: + .word _arm2_3_check_bugs + .word _arm2_proc_init + .word _arm2_proc_fin + .word _arm2_set_pgd + .word _arm3_xchg_1 + .word _arm3_xchg_4 + +cpu_arm250_info: + .long armvlsi_name + .long _arm250_name + + .globl arm3_processor_functions +arm3_processor_functions: + .word _arm2_3_check_bugs + .word _arm3_proc_init + .word _arm3_proc_fin + .word _arm3_set_pgd + .word _arm3_xchg_1 + .word _arm3_xchg_4 + +cpu_arm3_info: + .long armvlsi_name + .long _arm3_name + +arm2_arch_name: .asciz "armv1" +arm3_arch_name: .asciz "armv2" +arm2_elf_name: .asciz "v1" +arm3_elf_name: .asciz "v2" + .align + + .section ".proc.info", #alloc, #execinstr + + .long 0x41560200 + .long 0xfffffff0 + .long arm2_arch_name + .long arm2_elf_name + .long 0 + .long cpu_arm2_info + .long arm2_processor_functions + + .long 0x41560250 + .long 0xfffffff0 + .long arm3_arch_name + .long arm3_elf_name + .long 0 + .long cpu_arm250_info + .long arm250_processor_functions + + .long 0x41560300 + .long 0xfffffff0 + .long arm3_arch_name + .long arm3_elf_name + .long 0 + .long cpu_arm3_info + .long arm3_processor_functions + diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/ARM-gcc.h linux-2.5.70-bk14/arch/arm26/nwfpe/ARM-gcc.h --- linux-2.5.70-bk13/arch/arm26/nwfpe/ARM-gcc.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/ARM-gcc.h 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,120 @@ +/* +------------------------------------------------------------------------------- +The macro `BITS64' can be defined to indicate that 64-bit integer types are +supported by the compiler. +------------------------------------------------------------------------------- +*/ +#define BITS64 + +/* +------------------------------------------------------------------------------- +Each of the following `typedef's defines the most convenient type that holds +integers of at least as many bits as specified. For example, `uint8' should +be the most convenient type that can hold unsigned integers of as many as +8 bits. The `flag' type must be able to hold either a 0 or 1. For most +implementations of C, `flag', `uint8', and `int8' should all be `typedef'ed +to the same as `int'. +------------------------------------------------------------------------------- +*/ +typedef char flag; +typedef unsigned char uint8; +typedef signed char int8; +typedef int uint16; +typedef int int16; +typedef unsigned int uint32; +typedef signed int int32; +#ifdef BITS64 +typedef unsigned long long int bits64; +typedef signed long long int sbits64; +#endif + +/* +------------------------------------------------------------------------------- +Each of the following `typedef's defines a type that holds integers +of _exactly_ the number of bits specified. For instance, for most +implementation of C, `bits16' and `sbits16' should be `typedef'ed to +`unsigned short int' and `signed short int' (or `short int'), respectively. +------------------------------------------------------------------------------- +*/ +typedef unsigned char bits8; +typedef signed char sbits8; +typedef unsigned short int bits16; +typedef signed short int sbits16; +typedef unsigned int bits32; +typedef signed int sbits32; +#ifdef BITS64 +typedef unsigned long long int uint64; +typedef signed long long int int64; +#endif + +#ifdef BITS64 +/* +------------------------------------------------------------------------------- +The `LIT64' macro takes as its argument a textual integer literal and if +necessary ``marks'' the literal as having a 64-bit integer type. For +example, the Gnu C Compiler (`gcc') requires that 64-bit literals be +appended with the letters `LL' standing for `long long', which is `gcc's +name for the 64-bit integer type. Some compilers may allow `LIT64' to be +defined as the identity macro: `#define LIT64( a ) a'. +------------------------------------------------------------------------------- +*/ +#define LIT64( a ) a##LL +#endif + +/* +------------------------------------------------------------------------------- +The macro `INLINE' can be used before functions that should be inlined. If +a compiler does not support explicit inlining, this macro should be defined +to be `static'. +------------------------------------------------------------------------------- +*/ +#define INLINE extern __inline__ + + +/* For use as a GCC soft-float library we need some special function names. */ + +#ifdef __LIBFLOAT__ + +/* Some 32-bit ops can be mapped straight across by just changing the name. */ +#define float32_add __addsf3 +#define float32_sub __subsf3 +#define float32_mul __mulsf3 +#define float32_div __divsf3 +#define int32_to_float32 __floatsisf +#define float32_to_int32_round_to_zero __fixsfsi +#define float32_to_uint32_round_to_zero __fixunssfsi + +/* These ones go through the glue code. To avoid namespace pollution + we rename the internal functions too. */ +#define float32_eq ___float32_eq +#define float32_le ___float32_le +#define float32_lt ___float32_lt + +/* All the 64-bit ops have to go through the glue, so we pull the same + trick. */ +#define float64_add ___float64_add +#define float64_sub ___float64_sub +#define float64_mul ___float64_mul +#define float64_div ___float64_div +#define int32_to_float64 ___int32_to_float64 +#define float64_to_int32_round_to_zero ___float64_to_int32_round_to_zero +#define float64_to_uint32_round_to_zero ___float64_to_uint32_round_to_zero +#define float64_to_float32 ___float64_to_float32 +#define float32_to_float64 ___float32_to_float64 +#define float64_eq ___float64_eq +#define float64_le ___float64_le +#define float64_lt ___float64_lt + +#if 0 +#define float64_add __adddf3 +#define float64_sub __subdf3 +#define float64_mul __muldf3 +#define float64_div __divdf3 +#define int32_to_float64 __floatsidf +#define float64_to_int32_round_to_zero __fixdfsi +#define float64_to_uint32_round_to_zero __fixunsdfsi +#define float64_to_float32 __truncdfsf2 +#define float32_to_float64 __extendsfdf2 +#endif + +#endif diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/ChangeLog linux-2.5.70-bk14/arch/arm26/nwfpe/ChangeLog --- linux-2.5.70-bk13/arch/arm26/nwfpe/ChangeLog 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/ChangeLog 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,83 @@ +2002-01-19 Russell King + + * fpa11.h - Add documentation + - remove userRegisters pointer from this structure. + - add new method to obtain integer register values. + * softfloat.c - Remove float128 + * softfloat.h - Remove float128 + * softfloat-specialize - Remove float128 + + * The FPA11 structure is not a kernel-specific data structure. + It is used by users of ptrace to examine the values of the + floating point registers. Therefore, any changes to the + FPA11 structure (size or position of elements contained + within) have to be well thought out. + + * Since 128-bit float requires the FPA11 structure to change + size, it has been removed. 128-bit float is currently unused, + and needs various things to be re-worked so that we won't + overflow the available space in the task structure. + + * The changes are designed to break any patch that goes on top + of this code, so that the authors properly review their changes. + +1999-08-19 Scott Bambrough + + * fpmodule.c - Changed version number to 0.95 + * fpa11.h - modified FPA11, FPREG structures + * fpa11.c - Changes due to FPA11, FPREG structure alterations. + * fpa11_cpdo.c - Changes due to FPA11, FPREG structure alterations. + * fpa11_cpdt.c - Changes due to FPA11, FPREG structure alterations. + * fpa11_cprt.c - Changes due to FPA11, FPREG structure alterations. + * single_cpdo.c - Changes due to FPA11, FPREG structure alterations. + * double_cpdo.c - Changes due to FPA11, FPREG structure alterations. + * extended_cpdo.c - Changes due to FPA11, FPREG structure alterations. + + * I discovered several bugs. First and worst is that the kernel + passes in a pointer to the FPE's state area. This is defined + as a struct user_fp (see user.h). This pointer was cast to a + FPA11*. Unfortunately FPA11 and user_fp are of different sizes; + user_fp is smaller. This meant that the FPE scribbled on things + below its area, which is bad, as the area is in the thread_struct + embedded in the process task structure. Thus we were scribbling + over one of the most important structures in the entire OS. + + * user_fp and FPA11 have now been harmonized. Most of the changes + in the above code were dereferencing problems due to moving the + register type out of FPREG, and getting rid of the union variable + fpvalue. + + * Second I noticed resetFPA11 was not always being called for a + task. This should happen on the first floating point exception + that occurs. It is controlled by init_flag in FPA11. The + comment in the code beside init_flag state the kernel guarantees + this to be zero. Not so. I found that the kernel recycles task + structures, and that recycled ones may not have init_flag zeroed. + I couldn't even find anything that guarantees it is zeroed when + when the task structure is initially allocated. In any case + I now initialize the entire FPE state in the thread structure to + zero when allocated and recycled. See alloc_task_struct() and + flush_thread() in arch/arm/process.c. The change to + alloc_task_struct() may not be necessary, but I left it in for + completeness (better safe than sorry). + +1998-11-23 Scott Bambrough + + * README.FPE - fix typo in description of lfm/sfm instructions + * NOTES - Added file to describe known bugs/problems + * fpmodule.c - Changed version number to 0.94 + +1998-11-20 Scott Bambrough + + * README.FPE - fix description of URD, NRM instructions + * TODO - remove URD, NRM instructions from TODO list + * single_cpdo.c - implement URD, NRM + * double_cpdo.c - implement URD, NRM + * extended_cpdo.c - implement URD, NRM + +1998-11-19 Scott Bambrough + + * ChangeLog - Added this file to track changes made. + * fpa11.c - added code to initialize register types to typeNone + * fpa11_cpdt.c - fixed bug in storeExtended (typeExtended changed to + typeDouble in switch statement) diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/Makefile linux-2.5.70-bk14/arch/arm26/nwfpe/Makefile --- linux-2.5.70-bk13/arch/arm26/nwfpe/Makefile 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/Makefile 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,15 @@ +# +# Copyright (C) 1998, 1999, 2001 Philip Blundell +# + +obj-y := +obj-m := +obj-n := + +obj-$(CONFIG_FPE_NWFPE) += nwfpe.o + +nwfpe-objs := fpa11.o fpa11_cpdo.o fpa11_cpdt.o fpa11_cprt.o \ + fpmodule.o fpopcode.o softfloat.o \ + single_cpdo.o double_cpdo.o extended_cpdo.o \ + entry.o + diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/double_cpdo.c linux-2.5.70-bk14/arch/arm26/nwfpe/double_cpdo.c --- linux-2.5.70-bk13/arch/arm26/nwfpe/double_cpdo.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/double_cpdo.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,288 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "softfloat.h" +#include "fpopcode.h" + +float64 float64_exp(float64 Fm); +float64 float64_ln(float64 Fm); +float64 float64_sin(float64 rFm); +float64 float64_cos(float64 rFm); +float64 float64_arcsin(float64 rFm); +float64 float64_arctan(float64 rFm); +float64 float64_log(float64 rFm); +float64 float64_tan(float64 rFm); +float64 float64_arccos(float64 rFm); +float64 float64_pow(float64 rFn,float64 rFm); +float64 float64_pol(float64 rFn,float64 rFm); + +unsigned int DoubleCPDO(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + float64 rFm, rFn = 0; //FIXME - should be zero? + unsigned int Fd, Fm, Fn, nRc = 1; + + //printk("DoubleCPDO(0x%08x)\n",opcode); + + Fm = getFm(opcode); + if (CONSTANT_FM(opcode)) + { + rFm = getDoubleConstant(Fm); + } + else + { + switch (fpa11->fType[Fm]) + { + case typeSingle: + rFm = float32_to_float64(fpa11->fpreg[Fm].fSingle); + break; + + case typeDouble: + rFm = fpa11->fpreg[Fm].fDouble; + break; + + case typeExtended: + // !! patb + //printk("not implemented! why not?\n"); + //!! ScottB + // should never get here, if extended involved + // then other operand should be promoted then + // ExtendedCPDO called. + break; + + default: return 0; + } + } + + if (!MONADIC_INSTRUCTION(opcode)) + { + Fn = getFn(opcode); + switch (fpa11->fType[Fn]) + { + case typeSingle: + rFn = float32_to_float64(fpa11->fpreg[Fn].fSingle); + break; + + case typeDouble: + rFn = fpa11->fpreg[Fn].fDouble; + break; + + default: return 0; + } + } + + Fd = getFd(opcode); + /* !! this switch isn't optimized; better (opcode & MASK_ARITHMETIC_OPCODE)>>24, sort of */ + switch (opcode & MASK_ARITHMETIC_OPCODE) + { + /* dyadic opcodes */ + case ADF_CODE: + fpa11->fpreg[Fd].fDouble = float64_add(rFn,rFm); + break; + + case MUF_CODE: + case FML_CODE: + fpa11->fpreg[Fd].fDouble = float64_mul(rFn,rFm); + break; + + case SUF_CODE: + fpa11->fpreg[Fd].fDouble = float64_sub(rFn,rFm); + break; + + case RSF_CODE: + fpa11->fpreg[Fd].fDouble = float64_sub(rFm,rFn); + break; + + case DVF_CODE: + case FDV_CODE: + fpa11->fpreg[Fd].fDouble = float64_div(rFn,rFm); + break; + + case RDF_CODE: + case FRD_CODE: + fpa11->fpreg[Fd].fDouble = float64_div(rFm,rFn); + break; + +#if 0 + case POW_CODE: + fpa11->fpreg[Fd].fDouble = float64_pow(rFn,rFm); + break; + + case RPW_CODE: + fpa11->fpreg[Fd].fDouble = float64_pow(rFm,rFn); + break; +#endif + + case RMF_CODE: + fpa11->fpreg[Fd].fDouble = float64_rem(rFn,rFm); + break; + +#if 0 + case POL_CODE: + fpa11->fpreg[Fd].fDouble = float64_pol(rFn,rFm); + break; +#endif + + /* monadic opcodes */ + case MVF_CODE: + fpa11->fpreg[Fd].fDouble = rFm; + break; + + case MNF_CODE: + { + unsigned int *p = (unsigned int*)&rFm; + p[1] ^= 0x80000000; + fpa11->fpreg[Fd].fDouble = rFm; + } + break; + + case ABS_CODE: + { + unsigned int *p = (unsigned int*)&rFm; + p[1] &= 0x7fffffff; + fpa11->fpreg[Fd].fDouble = rFm; + } + break; + + case RND_CODE: + case URD_CODE: + fpa11->fpreg[Fd].fDouble = float64_round_to_int(rFm); + break; + + case SQT_CODE: + fpa11->fpreg[Fd].fDouble = float64_sqrt(rFm); + break; + +#if 0 + case LOG_CODE: + fpa11->fpreg[Fd].fDouble = float64_log(rFm); + break; + + case LGN_CODE: + fpa11->fpreg[Fd].fDouble = float64_ln(rFm); + break; + + case EXP_CODE: + fpa11->fpreg[Fd].fDouble = float64_exp(rFm); + break; + + case SIN_CODE: + fpa11->fpreg[Fd].fDouble = float64_sin(rFm); + break; + + case COS_CODE: + fpa11->fpreg[Fd].fDouble = float64_cos(rFm); + break; + + case TAN_CODE: + fpa11->fpreg[Fd].fDouble = float64_tan(rFm); + break; + + case ASN_CODE: + fpa11->fpreg[Fd].fDouble = float64_arcsin(rFm); + break; + + case ACS_CODE: + fpa11->fpreg[Fd].fDouble = float64_arccos(rFm); + break; + + case ATN_CODE: + fpa11->fpreg[Fd].fDouble = float64_arctan(rFm); + break; +#endif + + case NRM_CODE: + break; + + default: + { + nRc = 0; + } + } + + if (0 != nRc) fpa11->fType[Fd] = typeDouble; + return nRc; +} + +#if 0 +float64 float64_exp(float64 rFm) +{ + return rFm; +//series +} + +float64 float64_ln(float64 rFm) +{ + return rFm; +//series +} + +float64 float64_sin(float64 rFm) +{ + return rFm; +//series +} + +float64 float64_cos(float64 rFm) +{ + return rFm; + //series +} + +#if 0 +float64 float64_arcsin(float64 rFm) +{ +//series +} + +float64 float64_arctan(float64 rFm) +{ + //series +} +#endif + +float64 float64_log(float64 rFm) +{ + return float64_div(float64_ln(rFm),getDoubleConstant(7)); +} + +float64 float64_tan(float64 rFm) +{ + return float64_div(float64_sin(rFm),float64_cos(rFm)); +} + +float64 float64_arccos(float64 rFm) +{ +return rFm; + //return float64_sub(halfPi,float64_arcsin(rFm)); +} + +float64 float64_pow(float64 rFn,float64 rFm) +{ + return float64_exp(float64_mul(rFm,float64_ln(rFn))); +} + +float64 float64_pol(float64 rFn,float64 rFm) +{ + return float64_arctan(float64_div(rFn,rFm)); +} +#endif diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/entry.S linux-2.5.70-bk14/arch/arm26/nwfpe/entry.S --- linux-2.5.70-bk13/arch/arm26/nwfpe/entry.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/entry.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,114 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998 + (c) Philip Blundell 1998-1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include + +/* This is the kernel's entry point into the floating point emulator. +It is called from the kernel with code similar to this: + + mov fp, #0 + teqp pc, #PSR_I_BIT | MODE_SVC + ldr r4, .LC2 + ldr pc, [r4] @ Call FP module USR entry point + +The kernel expects the emulator to return via one of two possible +points of return it passes to the emulator. The emulator, if +successful in its emulation, jumps to ret_from_exception and the +kernel takes care of returning control from the trap to the user code. +If the emulator is unable to emulate the instruction, it returns to +fpundefinstr and the kernel halts the user program with a core dump. + +This routine does four things: + +1) It saves SP into a variable called userRegisters. The kernel has +created a struct pt_regs on the stack and saved the user registers +into it. See /usr/include/asm/proc/ptrace.h for details. The +emulator code uses userRegisters as the base of an array of words from +which the contents of the registers can be extracted. + +2) It locates the FP emulator work area within the TSS structure and +points `fpa11' to it. + +3) It calls EmulateAll to emulate a floating point instruction. +EmulateAll returns 1 if the emulation was successful, or 0 if not. + +4) If an instruction has been emulated successfully, it looks ahead at +the next instruction. If it is a floating point instruction, it +executes the instruction, without returning to user space. In this +way it repeatedly looks ahead and executes floating point instructions +until it encounters a non floating point instruction, at which time it +returns via _fpreturn. + +This is done to reduce the effect of the trap overhead on each +floating point instructions. GCC attempts to group floating point +instructions to allow the emulator to spread the cost of the trap over +several floating point instructions. */ + + .globl nwfpe_enter +nwfpe_enter: + mov sl, sp + bl FPA11_CheckInit @ check to see if we are initialised + + ldr r5, [sp, #60] @ get contents of PC + bic r5, r5, #0xfc000003 + ldr r0, [r5, #-4] @ get actual instruction into r0 + bl EmulateAll @ emulate the instruction +1: cmp r0, #0 @ was emulation successful + beq fpundefinstr @ no, return failure + +next: +.Lx1: ldrt r6, [r5], #4 @ get the next instruction and + @ increment PC + + and r2, r6, #0x0F000000 @ test for FP insns + teq r2, #0x0C000000 + teqne r2, #0x0D000000 + teqne r2, #0x0E000000 + bne ret_from_exception @ return ok if not a fp insn + + ldr r9, [sp, #60] @ get new condition codes + and r9, r9, #0xfc000003 + orr r7, r5, r9 + str r7, [sp, #60] @ update PC copy in regs + + mov r0, r6 @ save a copy + mov r1, r9 @ fetch the condition codes + bl checkCondition @ check the condition + cmp r0, #0 @ r0 = 0 ==> condition failed + + @ if condition code failed to match, next insn + beq next @ get the next instruction; + + mov r0, r6 @ prepare for EmulateAll() + adr lr, 1b + orr lr, lr, #3 + b EmulateAll @ if r0 != 0, goto EmulateAll + +.Lret: b ret_from_exception @ let the user eat segfaults + + @ We need to be prepared for the instruction at .Lx1 to fault. + @ Emit the appropriate exception gunk to fix things up. + .section __ex_table,"a" + .align 3 + .long .Lx1 + ldr lr, [lr, $(.Lret - .Lx1)/4] + .previous diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/extended_cpdo.c linux-2.5.70-bk14/arch/arm26/nwfpe/extended_cpdo.c --- linux-2.5.70-bk13/arch/arm26/nwfpe/extended_cpdo.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/extended_cpdo.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,273 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "softfloat.h" +#include "fpopcode.h" + +floatx80 floatx80_exp(floatx80 Fm); +floatx80 floatx80_ln(floatx80 Fm); +floatx80 floatx80_sin(floatx80 rFm); +floatx80 floatx80_cos(floatx80 rFm); +floatx80 floatx80_arcsin(floatx80 rFm); +floatx80 floatx80_arctan(floatx80 rFm); +floatx80 floatx80_log(floatx80 rFm); +floatx80 floatx80_tan(floatx80 rFm); +floatx80 floatx80_arccos(floatx80 rFm); +floatx80 floatx80_pow(floatx80 rFn,floatx80 rFm); +floatx80 floatx80_pol(floatx80 rFn,floatx80 rFm); + +unsigned int ExtendedCPDO(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + floatx80 rFm, rFn; + unsigned int Fd, Fm, Fn, nRc = 1; + + //printk("ExtendedCPDO(0x%08x)\n",opcode); + + Fm = getFm(opcode); + if (CONSTANT_FM(opcode)) + { + rFm = getExtendedConstant(Fm); + } + else + { + switch (fpa11->fType[Fm]) + { + case typeSingle: + rFm = float32_to_floatx80(fpa11->fpreg[Fm].fSingle); + break; + + case typeDouble: + rFm = float64_to_floatx80(fpa11->fpreg[Fm].fDouble); + break; + + case typeExtended: + rFm = fpa11->fpreg[Fm].fExtended; + break; + + default: return 0; + } + } + + if (!MONADIC_INSTRUCTION(opcode)) + { + Fn = getFn(opcode); + switch (fpa11->fType[Fn]) + { + case typeSingle: + rFn = float32_to_floatx80(fpa11->fpreg[Fn].fSingle); + break; + + case typeDouble: + rFn = float64_to_floatx80(fpa11->fpreg[Fn].fDouble); + break; + + case typeExtended: + rFn = fpa11->fpreg[Fn].fExtended; + break; + + default: return 0; + } + } + + Fd = getFd(opcode); + switch (opcode & MASK_ARITHMETIC_OPCODE) + { + /* dyadic opcodes */ + case ADF_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_add(rFn,rFm); + break; + + case MUF_CODE: + case FML_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_mul(rFn,rFm); + break; + + case SUF_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_sub(rFn,rFm); + break; + + case RSF_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_sub(rFm,rFn); + break; + + case DVF_CODE: + case FDV_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_div(rFn,rFm); + break; + + case RDF_CODE: + case FRD_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_div(rFm,rFn); + break; + +#if 0 + case POW_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_pow(rFn,rFm); + break; + + case RPW_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_pow(rFm,rFn); + break; +#endif + + case RMF_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_rem(rFn,rFm); + break; + +#if 0 + case POL_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_pol(rFn,rFm); + break; +#endif + + /* monadic opcodes */ + case MVF_CODE: + fpa11->fpreg[Fd].fExtended = rFm; + break; + + case MNF_CODE: + rFm.high ^= 0x8000; + fpa11->fpreg[Fd].fExtended = rFm; + break; + + case ABS_CODE: + rFm.high &= 0x7fff; + fpa11->fpreg[Fd].fExtended = rFm; + break; + + case RND_CODE: + case URD_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_round_to_int(rFm); + break; + + case SQT_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_sqrt(rFm); + break; + +#if 0 + case LOG_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_log(rFm); + break; + + case LGN_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_ln(rFm); + break; + + case EXP_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_exp(rFm); + break; + + case SIN_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_sin(rFm); + break; + + case COS_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_cos(rFm); + break; + + case TAN_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_tan(rFm); + break; + + case ASN_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_arcsin(rFm); + break; + + case ACS_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_arccos(rFm); + break; + + case ATN_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_arctan(rFm); + break; +#endif + + case NRM_CODE: + break; + + default: + { + nRc = 0; + } + } + + if (0 != nRc) fpa11->fType[Fd] = typeExtended; + return nRc; +} + +#if 0 +floatx80 floatx80_exp(floatx80 Fm) +{ +//series +} + +floatx80 floatx80_ln(floatx80 Fm) +{ +//series +} + +floatx80 floatx80_sin(floatx80 rFm) +{ +//series +} + +floatx80 floatx80_cos(floatx80 rFm) +{ +//series +} + +floatx80 floatx80_arcsin(floatx80 rFm) +{ +//series +} + +floatx80 floatx80_arctan(floatx80 rFm) +{ + //series +} + +floatx80 floatx80_log(floatx80 rFm) +{ + return floatx80_div(floatx80_ln(rFm),getExtendedConstant(7)); +} + +floatx80 floatx80_tan(floatx80 rFm) +{ + return floatx80_div(floatx80_sin(rFm),floatx80_cos(rFm)); +} + +floatx80 floatx80_arccos(floatx80 rFm) +{ + //return floatx80_sub(halfPi,floatx80_arcsin(rFm)); +} + +floatx80 floatx80_pow(floatx80 rFn,floatx80 rFm) +{ + return floatx80_exp(floatx80_mul(rFm,floatx80_ln(rFn))); +} + +floatx80 floatx80_pol(floatx80 rFn,floatx80 rFm) +{ + return floatx80_arctan(floatx80_div(rFn,rFm)); +} +#endif diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/fpa11.c linux-2.5.70-bk14/arch/arm26/nwfpe/fpa11.c --- linux-2.5.70-bk13/arch/arm26/nwfpe/fpa11.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/fpa11.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,221 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "fpopcode.h" + +#include "fpmodule.h" +#include "fpmodule.inl" + +#include +#include + +/* forward declarations */ +unsigned int EmulateCPDO(const unsigned int); +unsigned int EmulateCPDT(const unsigned int); +unsigned int EmulateCPRT(const unsigned int); + +/* Reset the FPA11 chip. Called to initialize and reset the emulator. */ +void resetFPA11(void) +{ + int i; + FPA11 *fpa11 = GET_FPA11(); + + /* initialize the register type array */ + for (i=0;i<=7;i++) + { + fpa11->fType[i] = typeNone; + } + + /* FPSR: set system id to FP_EMULATOR, set AC, clear all other bits */ + fpa11->fpsr = FP_EMULATOR | BIT_AC; + + /* FPCR: set SB, AB and DA bits, clear all others */ +#if MAINTAIN_FPCR + fpa11->fpcr = MASK_RESET; +#endif +} + +void SetRoundingMode(const unsigned int opcode) +{ +#if MAINTAIN_FPCR + FPA11 *fpa11 = GET_FPA11(); + fpa11->fpcr &= ~MASK_ROUNDING_MODE; +#endif + switch (opcode & MASK_ROUNDING_MODE) + { + default: + case ROUND_TO_NEAREST: + float_rounding_mode = float_round_nearest_even; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_TO_NEAREST; +#endif + break; + + case ROUND_TO_PLUS_INFINITY: + float_rounding_mode = float_round_up; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_TO_PLUS_INFINITY; +#endif + break; + + case ROUND_TO_MINUS_INFINITY: + float_rounding_mode = float_round_down; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_TO_MINUS_INFINITY; +#endif + break; + + case ROUND_TO_ZERO: + float_rounding_mode = float_round_to_zero; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_TO_ZERO; +#endif + break; + } +} + +void SetRoundingPrecision(const unsigned int opcode) +{ +#if MAINTAIN_FPCR + FPA11 *fpa11 = GET_FPA11(); + fpa11->fpcr &= ~MASK_ROUNDING_PRECISION; +#endif + switch (opcode & MASK_ROUNDING_PRECISION) + { + case ROUND_SINGLE: + floatx80_rounding_precision = 32; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_SINGLE; +#endif + break; + + case ROUND_DOUBLE: + floatx80_rounding_precision = 64; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_DOUBLE; +#endif + break; + + case ROUND_EXTENDED: + floatx80_rounding_precision = 80; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_EXTENDED; +#endif + break; + + default: floatx80_rounding_precision = 80; + } +} + +void FPA11_CheckInit(void) +{ + FPA11 *fpa11 = GET_FPA11(); + if (unlikely(fpa11->initflag == 0)) + { + resetFPA11(); + SetRoundingMode(ROUND_TO_NEAREST); + SetRoundingPrecision(ROUND_EXTENDED); + fpa11->initflag = 1; + } +} + +/* Emulate the instruction in the opcode. */ +unsigned int EmulateAll(unsigned int opcode) +{ + unsigned int nRc = 1, code; + + code = opcode & 0x00000f00; + if (code == 0x00000100 || code == 0x00000200) + { + /* For coprocessor 1 or 2 (FPA11) */ + code = opcode & 0x0e000000; + if (code == 0x0e000000) + { + if (opcode & 0x00000010) + { + /* Emulate conversion opcodes. */ + /* Emulate register transfer opcodes. */ + /* Emulate comparison opcodes. */ + nRc = EmulateCPRT(opcode); + } + else + { + /* Emulate monadic arithmetic opcodes. */ + /* Emulate dyadic arithmetic opcodes. */ + nRc = EmulateCPDO(opcode); + } + } + else if (code == 0x0c000000) + { + /* Emulate load/store opcodes. */ + /* Emulate load/store multiple opcodes. */ + nRc = EmulateCPDT(opcode); + } + else + { + /* Invalid instruction detected. Return FALSE. */ + nRc = 0; + } + } + + return(nRc); +} + +#if 0 +unsigned int EmulateAll1(unsigned int opcode) +{ + switch ((opcode >> 24) & 0xf) + { + case 0xc: + case 0xd: + if ((opcode >> 20) & 0x1) + { + switch ((opcode >> 8) & 0xf) + { + case 0x1: return PerformLDF(opcode); break; + case 0x2: return PerformLFM(opcode); break; + default: return 0; + } + } + else + { + switch ((opcode >> 8) & 0xf) + { + case 0x1: return PerformSTF(opcode); break; + case 0x2: return PerformSFM(opcode); break; + default: return 0; + } + } + break; + + case 0xe: + if (opcode & 0x10) + return EmulateCPDO(opcode); + else + return EmulateCPRT(opcode); + break; + + default: return 0; + } +} +#endif + diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/fpa11.h linux-2.5.70-bk14/arch/arm26/nwfpe/fpa11.h --- linux-2.5.70-bk13/arch/arm26/nwfpe/fpa11.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/fpa11.h 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,87 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.com, 1998-1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef __FPA11_H__ +#define __FPA11_H__ + +#define GET_FPA11() ((FPA11 *)(¤t_thread_info()->fpstate)) + +/* + * The processes registers are always at the very top of the 8K + * stack+task struct. Use the same method as 'current' uses to + * reach them. + */ +register unsigned int *user_registers asm("sl"); + +#define GET_USERREG() (user_registers) + +#include + +/* includes */ +#include "fpsr.h" /* FP control and status register definitions */ +#include "softfloat.h" + +#define typeNone 0x00 +#define typeSingle 0x01 +#define typeDouble 0x02 +#define typeExtended 0x03 + +/* + * This must be no more and no less than 12 bytes. + */ +typedef union tagFPREG { + floatx80 fExtended; + float64 fDouble; + float32 fSingle; +} FPREG; + +/* + * FPA11 device model. + * + * This structure is exported to user space. Do not re-order. + * Only add new stuff to the end, and do not change the size of + * any element. Elements of this structure are used by user + * space, and must match struct user_fp in include/asm-arm/user.h. + * We include the byte offsets below for documentation purposes. + * + * The size of this structure and FPREG are checked by fpmodule.c + * on initialisation. If the rules have been broken, NWFPE will + * not initialise. + */ +typedef struct tagFPA11 { +/* 0 */ FPREG fpreg[8]; /* 8 floating point registers */ +/* 96 */ FPSR fpsr; /* floating point status register */ +/* 100 */ FPCR fpcr; /* floating point control register */ +/* 104 */ unsigned char fType[8]; /* type of floating point value held in + floating point registers. One of none + single, double or extended. */ +/* 112 */ int initflag; /* this is special. The kernel guarantees + to set it to 0 when a thread is launched, + so we can use it to detect whether this + instance of the emulator needs to be + initialised. */ +} FPA11; + +extern void resetFPA11(void); +extern void SetRoundingMode(const unsigned int); +extern void SetRoundingPrecision(const unsigned int); + +#endif diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/fpa11.inl linux-2.5.70-bk14/arch/arm26/nwfpe/fpa11.inl --- linux-2.5.70-bk13/arch/arm26/nwfpe/fpa11.inl 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/fpa11.inl 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,51 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" + +/* Read and write floating point status register */ +extern __inline__ unsigned int readFPSR(void) +{ + FPA11 *fpa11 = GET_FPA11(); + return(fpa11->fpsr); +} + +extern __inline__ void writeFPSR(FPSR reg) +{ + FPA11 *fpa11 = GET_FPA11(); + /* the sysid byte in the status register is readonly */ + fpa11->fpsr = (fpa11->fpsr & MASK_SYSID) | (reg & ~MASK_SYSID); +} + +/* Read and write floating point control register */ +extern __inline__ FPCR readFPCR(void) +{ + FPA11 *fpa11 = GET_FPA11(); + /* clear SB, AB and DA bits before returning FPCR */ + return(fpa11->fpcr & ~MASK_RFC); +} + +extern __inline__ void writeFPCR(FPCR reg) +{ + FPA11 *fpa11 = GET_FPA11(); + fpa11->fpcr &= ~MASK_WFC; /* clear SB, AB and DA bits */ + fpa11->fpcr |= (reg & MASK_WFC); /* write SB, AB and DA bits */ +} diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/fpa11_cpdo.c linux-2.5.70-bk14/arch/arm26/nwfpe/fpa11_cpdo.c --- linux-2.5.70-bk13/arch/arm26/nwfpe/fpa11_cpdo.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/fpa11_cpdo.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,117 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "fpopcode.h" + +unsigned int SingleCPDO(const unsigned int opcode); +unsigned int DoubleCPDO(const unsigned int opcode); +unsigned int ExtendedCPDO(const unsigned int opcode); + +unsigned int EmulateCPDO(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + unsigned int Fd, nType, nDest, nRc = 1; + + //printk("EmulateCPDO(0x%08x)\n",opcode); + + /* Get the destination size. If not valid let Linux perform + an invalid instruction trap. */ + nDest = getDestinationSize(opcode); + if (typeNone == nDest) return 0; + + SetRoundingMode(opcode); + + /* Compare the size of the operands in Fn and Fm. + Choose the largest size and perform operations in that size, + in order to make use of all the precision of the operands. + If Fm is a constant, we just grab a constant of a size + matching the size of the operand in Fn. */ + if (MONADIC_INSTRUCTION(opcode)) + nType = nDest; + else + nType = fpa11->fType[getFn(opcode)]; + + if (!CONSTANT_FM(opcode)) + { + register unsigned int Fm = getFm(opcode); + if (nType < fpa11->fType[Fm]) + { + nType = fpa11->fType[Fm]; + } + } + + switch (nType) + { + case typeSingle : nRc = SingleCPDO(opcode); break; + case typeDouble : nRc = DoubleCPDO(opcode); break; + case typeExtended : nRc = ExtendedCPDO(opcode); break; + default : nRc = 0; + } + + /* If the operation succeeded, check to see if the result in the + destination register is the correct size. If not force it + to be. */ + Fd = getFd(opcode); + nType = fpa11->fType[Fd]; + if ((0 != nRc) && (nDest != nType)) + { + switch (nDest) + { + case typeSingle: + { + if (typeDouble == nType) + fpa11->fpreg[Fd].fSingle = + float64_to_float32(fpa11->fpreg[Fd].fDouble); + else + fpa11->fpreg[Fd].fSingle = + floatx80_to_float32(fpa11->fpreg[Fd].fExtended); + } + break; + + case typeDouble: + { + if (typeSingle == nType) + fpa11->fpreg[Fd].fDouble = + float32_to_float64(fpa11->fpreg[Fd].fSingle); + else + fpa11->fpreg[Fd].fDouble = + floatx80_to_float64(fpa11->fpreg[Fd].fExtended); + } + break; + + case typeExtended: + { + if (typeSingle == nType) + fpa11->fpreg[Fd].fExtended = + float32_to_floatx80(fpa11->fpreg[Fd].fSingle); + else + fpa11->fpreg[Fd].fExtended = + float64_to_floatx80(fpa11->fpreg[Fd].fDouble); + } + break; + } + + fpa11->fType[Fd] = nDest; + } + + return nRc; +} diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/fpa11_cpdt.c linux-2.5.70-bk14/arch/arm26/nwfpe/fpa11_cpdt.c --- linux-2.5.70-bk13/arch/arm26/nwfpe/fpa11_cpdt.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/fpa11_cpdt.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,368 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.com, 1998-1999 + (c) Philip Blundell, 1998 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "softfloat.h" +#include "fpopcode.h" +#include "fpmodule.h" +#include "fpmodule.inl" + +#include + +static inline +void loadSingle(const unsigned int Fn,const unsigned int *pMem) +{ + FPA11 *fpa11 = GET_FPA11(); + fpa11->fType[Fn] = typeSingle; + get_user(fpa11->fpreg[Fn].fSingle, pMem); +} + +static inline +void loadDouble(const unsigned int Fn,const unsigned int *pMem) +{ + FPA11 *fpa11 = GET_FPA11(); + unsigned int *p; + p = (unsigned int*)&fpa11->fpreg[Fn].fDouble; + fpa11->fType[Fn] = typeDouble; + get_user(p[0], &pMem[1]); + get_user(p[1], &pMem[0]); /* sign & exponent */ +} + +static inline +void loadExtended(const unsigned int Fn,const unsigned int *pMem) +{ + FPA11 *fpa11 = GET_FPA11(); + unsigned int *p; + p = (unsigned int*)&fpa11->fpreg[Fn].fExtended; + fpa11->fType[Fn] = typeExtended; + get_user(p[0], &pMem[0]); /* sign & exponent */ + get_user(p[1], &pMem[2]); /* ls bits */ + get_user(p[2], &pMem[1]); /* ms bits */ +} + +static inline +void loadMultiple(const unsigned int Fn,const unsigned int *pMem) +{ + FPA11 *fpa11 = GET_FPA11(); + register unsigned int *p; + unsigned long x; + + p = (unsigned int*)&(fpa11->fpreg[Fn]); + get_user(x, &pMem[0]); + fpa11->fType[Fn] = (x >> 14) & 0x00000003; + + switch (fpa11->fType[Fn]) + { + case typeSingle: + case typeDouble: + { + get_user(p[0], &pMem[2]); /* Single */ + get_user(p[1], &pMem[1]); /* double msw */ + p[2] = 0; /* empty */ + } + break; + + case typeExtended: + { + get_user(p[1], &pMem[2]); + get_user(p[2], &pMem[1]); /* msw */ + p[0] = (x & 0x80003fff); + } + break; + } +} + +static inline +void storeSingle(const unsigned int Fn,unsigned int *pMem) +{ + FPA11 *fpa11 = GET_FPA11(); + union + { + float32 f; + unsigned int i[1]; + } val; + + switch (fpa11->fType[Fn]) + { + case typeDouble: + val.f = float64_to_float32(fpa11->fpreg[Fn].fDouble); + break; + + case typeExtended: + val.f = floatx80_to_float32(fpa11->fpreg[Fn].fExtended); + break; + + default: val.f = fpa11->fpreg[Fn].fSingle; + } + + put_user(val.i[0], pMem); +} + +static inline +void storeDouble(const unsigned int Fn,unsigned int *pMem) +{ + FPA11 *fpa11 = GET_FPA11(); + union + { + float64 f; + unsigned int i[2]; + } val; + + switch (fpa11->fType[Fn]) + { + case typeSingle: + val.f = float32_to_float64(fpa11->fpreg[Fn].fSingle); + break; + + case typeExtended: + val.f = floatx80_to_float64(fpa11->fpreg[Fn].fExtended); + break; + + default: val.f = fpa11->fpreg[Fn].fDouble; + } + + put_user(val.i[1], &pMem[0]); /* msw */ + put_user(val.i[0], &pMem[1]); /* lsw */ +} + +static inline +void storeExtended(const unsigned int Fn,unsigned int *pMem) +{ + FPA11 *fpa11 = GET_FPA11(); + union + { + floatx80 f; + unsigned int i[3]; + } val; + + switch (fpa11->fType[Fn]) + { + case typeSingle: + val.f = float32_to_floatx80(fpa11->fpreg[Fn].fSingle); + break; + + case typeDouble: + val.f = float64_to_floatx80(fpa11->fpreg[Fn].fDouble); + break; + + default: val.f = fpa11->fpreg[Fn].fExtended; + } + + put_user(val.i[0], &pMem[0]); /* sign & exp */ + put_user(val.i[1], &pMem[2]); + put_user(val.i[2], &pMem[1]); /* msw */ +} + +static inline +void storeMultiple(const unsigned int Fn,unsigned int *pMem) +{ + FPA11 *fpa11 = GET_FPA11(); + register unsigned int nType, *p; + + p = (unsigned int*)&(fpa11->fpreg[Fn]); + nType = fpa11->fType[Fn]; + + switch (nType) + { + case typeSingle: + case typeDouble: + { + put_user(p[0], &pMem[2]); /* single */ + put_user(p[1], &pMem[1]); /* double msw */ + put_user(nType << 14, &pMem[0]); + } + break; + + case typeExtended: + { + put_user(p[2], &pMem[1]); /* msw */ + put_user(p[1], &pMem[2]); + put_user((p[0] & 0x80003fff) | (nType << 14), &pMem[0]); + } + break; + } +} + +unsigned int PerformLDF(const unsigned int opcode) +{ + unsigned int *pBase, *pAddress, *pFinal, nRc = 1, + write_back = WRITE_BACK(opcode); + + //printk("PerformLDF(0x%08x), Fd = 0x%08x\n",opcode,getFd(opcode)); + + pBase = (unsigned int*)readRegister(getRn(opcode)); + if (REG_PC == getRn(opcode)) + { + pBase += 2; + write_back = 0; + } + + pFinal = pBase; + if (BIT_UP_SET(opcode)) + pFinal += getOffset(opcode); + else + pFinal -= getOffset(opcode); + + if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; + + switch (opcode & MASK_TRANSFER_LENGTH) + { + case TRANSFER_SINGLE : loadSingle(getFd(opcode),pAddress); break; + case TRANSFER_DOUBLE : loadDouble(getFd(opcode),pAddress); break; + case TRANSFER_EXTENDED: loadExtended(getFd(opcode),pAddress); break; + default: nRc = 0; + } + + if (write_back) writeRegister(getRn(opcode),(unsigned int)pFinal); + return nRc; +} + +unsigned int PerformSTF(const unsigned int opcode) +{ + unsigned int *pBase, *pAddress, *pFinal, nRc = 1, + write_back = WRITE_BACK(opcode); + + //printk("PerformSTF(0x%08x), Fd = 0x%08x\n",opcode,getFd(opcode)); + SetRoundingMode(ROUND_TO_NEAREST); + + pBase = (unsigned int*)readRegister(getRn(opcode)); + if (REG_PC == getRn(opcode)) + { + pBase += 2; + write_back = 0; + } + + pFinal = pBase; + if (BIT_UP_SET(opcode)) + pFinal += getOffset(opcode); + else + pFinal -= getOffset(opcode); + + if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; + + switch (opcode & MASK_TRANSFER_LENGTH) + { + case TRANSFER_SINGLE : storeSingle(getFd(opcode),pAddress); break; + case TRANSFER_DOUBLE : storeDouble(getFd(opcode),pAddress); break; + case TRANSFER_EXTENDED: storeExtended(getFd(opcode),pAddress); break; + default: nRc = 0; + } + + if (write_back) writeRegister(getRn(opcode),(unsigned int)pFinal); + return nRc; +} + +unsigned int PerformLFM(const unsigned int opcode) +{ + unsigned int i, Fd, *pBase, *pAddress, *pFinal, + write_back = WRITE_BACK(opcode); + + pBase = (unsigned int*)readRegister(getRn(opcode)); + if (REG_PC == getRn(opcode)) + { + pBase += 2; + write_back = 0; + } + + pFinal = pBase; + if (BIT_UP_SET(opcode)) + pFinal += getOffset(opcode); + else + pFinal -= getOffset(opcode); + + if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; + + Fd = getFd(opcode); + for (i=getRegisterCount(opcode);i>0;i--) + { + loadMultiple(Fd,pAddress); + pAddress += 3; Fd++; + if (Fd == 8) Fd = 0; + } + + if (write_back) writeRegister(getRn(opcode),(unsigned int)pFinal); + return 1; +} + +unsigned int PerformSFM(const unsigned int opcode) +{ + unsigned int i, Fd, *pBase, *pAddress, *pFinal, + write_back = WRITE_BACK(opcode); + + pBase = (unsigned int*)readRegister(getRn(opcode)); + if (REG_PC == getRn(opcode)) + { + pBase += 2; + write_back = 0; + } + + pFinal = pBase; + if (BIT_UP_SET(opcode)) + pFinal += getOffset(opcode); + else + pFinal -= getOffset(opcode); + + if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; + + Fd = getFd(opcode); + for (i=getRegisterCount(opcode);i>0;i--) + { + storeMultiple(Fd,pAddress); + pAddress += 3; Fd++; + if (Fd == 8) Fd = 0; + } + + if (write_back) writeRegister(getRn(opcode),(unsigned int)pFinal); + return 1; +} + +#if 1 +unsigned int EmulateCPDT(const unsigned int opcode) +{ + unsigned int nRc = 0; + + //printk("EmulateCPDT(0x%08x)\n",opcode); + + if (LDF_OP(opcode)) + { + nRc = PerformLDF(opcode); + } + else if (LFM_OP(opcode)) + { + nRc = PerformLFM(opcode); + } + else if (STF_OP(opcode)) + { + nRc = PerformSTF(opcode); + } + else if (SFM_OP(opcode)) + { + nRc = PerformSFM(opcode); + } + else + { + nRc = 0; + } + + return nRc; +} +#endif diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/fpa11_cprt.c linux-2.5.70-bk14/arch/arm26/nwfpe/fpa11_cprt.c --- linux-2.5.70-bk13/arch/arm26/nwfpe/fpa11_cprt.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/fpa11_cprt.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,289 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + (c) Philip Blundell, 1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "milieu.h" +#include "softfloat.h" +#include "fpopcode.h" +#include "fpa11.inl" +#include "fpmodule.h" +#include "fpmodule.inl" + +extern flag floatx80_is_nan(floatx80); +extern flag float64_is_nan( float64); +extern flag float32_is_nan( float32); + +void SetRoundingMode(const unsigned int opcode); + +unsigned int PerformFLT(const unsigned int opcode); +unsigned int PerformFIX(const unsigned int opcode); + +static unsigned int +PerformComparison(const unsigned int opcode); + +unsigned int EmulateCPRT(const unsigned int opcode) +{ + unsigned int nRc = 1; + + //printk("EmulateCPRT(0x%08x)\n",opcode); + + if (opcode & 0x800000) + { + /* This is some variant of a comparison (PerformComparison will + sort out which one). Since most of the other CPRT + instructions are oddball cases of some sort or other it makes + sense to pull this out into a fast path. */ + return PerformComparison(opcode); + } + + /* Hint to GCC that we'd like a jump table rather than a load of CMPs */ + switch ((opcode & 0x700000) >> 20) + { + case FLT_CODE >> 20: nRc = PerformFLT(opcode); break; + case FIX_CODE >> 20: nRc = PerformFIX(opcode); break; + + case WFS_CODE >> 20: writeFPSR(readRegister(getRd(opcode))); break; + case RFS_CODE >> 20: writeRegister(getRd(opcode),readFPSR()); break; + +#if 0 /* We currently have no use for the FPCR, so there's no point + in emulating it. */ + case WFC_CODE >> 20: writeFPCR(readRegister(getRd(opcode))); + case RFC_CODE >> 20: writeRegister(getRd(opcode),readFPCR()); break; +#endif + + default: nRc = 0; + } + + return nRc; +} + +unsigned int PerformFLT(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + + unsigned int nRc = 1; + SetRoundingMode(opcode); + + switch (opcode & MASK_ROUNDING_PRECISION) + { + case ROUND_SINGLE: + { + fpa11->fType[getFn(opcode)] = typeSingle; + fpa11->fpreg[getFn(opcode)].fSingle = + int32_to_float32(readRegister(getRd(opcode))); + } + break; + + case ROUND_DOUBLE: + { + fpa11->fType[getFn(opcode)] = typeDouble; + fpa11->fpreg[getFn(opcode)].fDouble = + int32_to_float64(readRegister(getRd(opcode))); + } + break; + + case ROUND_EXTENDED: + { + fpa11->fType[getFn(opcode)] = typeExtended; + fpa11->fpreg[getFn(opcode)].fExtended = + int32_to_floatx80(readRegister(getRd(opcode))); + } + break; + + default: nRc = 0; + } + + return nRc; +} + +unsigned int PerformFIX(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + unsigned int nRc = 1; + unsigned int Fn = getFm(opcode); + + SetRoundingMode(opcode); + + switch (fpa11->fType[Fn]) + { + case typeSingle: + { + writeRegister(getRd(opcode), + float32_to_int32(fpa11->fpreg[Fn].fSingle)); + } + break; + + case typeDouble: + { + writeRegister(getRd(opcode), + float64_to_int32(fpa11->fpreg[Fn].fDouble)); + } + break; + + case typeExtended: + { + writeRegister(getRd(opcode), + floatx80_to_int32(fpa11->fpreg[Fn].fExtended)); + } + break; + + default: nRc = 0; + } + + return nRc; +} + + +static unsigned int __inline__ +PerformComparisonOperation(floatx80 Fn, floatx80 Fm) +{ + unsigned int flags = 0; + + /* test for less than condition */ + if (floatx80_lt(Fn,Fm)) + { + flags |= CC_NEGATIVE; + } + + /* test for equal condition */ + if (floatx80_eq(Fn,Fm)) + { + flags |= CC_ZERO; + } + + /* test for greater than or equal condition */ + if (floatx80_lt(Fm,Fn)) + { + flags |= CC_CARRY; + } + + writeConditionCodes(flags); + return 1; +} + +/* This instruction sets the flags N, Z, C, V in the FPSR. */ + +static unsigned int PerformComparison(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + unsigned int Fn, Fm; + floatx80 rFn, rFm; + int e_flag = opcode & 0x400000; /* 1 if CxFE */ + int n_flag = opcode & 0x200000; /* 1 if CNxx */ + unsigned int flags = 0; + + //printk("PerformComparison(0x%08x)\n",opcode); + + Fn = getFn(opcode); + Fm = getFm(opcode); + + /* Check for unordered condition and convert all operands to 80-bit + format. + ?? Might be some mileage in avoiding this conversion if possible. + Eg, if both operands are 32-bit, detect this and do a 32-bit + comparison (cheaper than an 80-bit one). */ + switch (fpa11->fType[Fn]) + { + case typeSingle: + //printk("single.\n"); + if (float32_is_nan(fpa11->fpreg[Fn].fSingle)) + goto unordered; + rFn = float32_to_floatx80(fpa11->fpreg[Fn].fSingle); + break; + + case typeDouble: + //printk("double.\n"); + if (float64_is_nan(fpa11->fpreg[Fn].fDouble)) + goto unordered; + rFn = float64_to_floatx80(fpa11->fpreg[Fn].fDouble); + break; + + case typeExtended: + //printk("extended.\n"); + if (floatx80_is_nan(fpa11->fpreg[Fn].fExtended)) + goto unordered; + rFn = fpa11->fpreg[Fn].fExtended; + break; + + default: return 0; + } + + if (CONSTANT_FM(opcode)) + { + //printk("Fm is a constant: #%d.\n",Fm); + rFm = getExtendedConstant(Fm); + if (floatx80_is_nan(rFm)) + goto unordered; + } + else + { + //printk("Fm = r%d which contains a ",Fm); + switch (fpa11->fType[Fm]) + { + case typeSingle: + //printk("single.\n"); + if (float32_is_nan(fpa11->fpreg[Fm].fSingle)) + goto unordered; + rFm = float32_to_floatx80(fpa11->fpreg[Fm].fSingle); + break; + + case typeDouble: + //printk("double.\n"); + if (float64_is_nan(fpa11->fpreg[Fm].fDouble)) + goto unordered; + rFm = float64_to_floatx80(fpa11->fpreg[Fm].fDouble); + break; + + case typeExtended: + //printk("extended.\n"); + if (floatx80_is_nan(fpa11->fpreg[Fm].fExtended)) + goto unordered; + rFm = fpa11->fpreg[Fm].fExtended; + break; + + default: return 0; + } + } + + if (n_flag) + { + rFm.high ^= 0x8000; + } + + return PerformComparisonOperation(rFn,rFm); + + unordered: + /* ?? The FPA data sheet is pretty vague about this, in particular + about whether the non-E comparisons can ever raise exceptions. + This implementation is based on a combination of what it says in + the data sheet, observation of how the Acorn emulator actually + behaves (and how programs expect it to) and guesswork. */ + flags |= CC_OVERFLOW; + flags &= ~(CC_ZERO | CC_NEGATIVE); + + if (BIT_AC & readFPSR()) flags |= CC_CARRY; + + if (e_flag) float_raise(float_flag_invalid); + + writeConditionCodes(flags); + return 1; +} diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/fpmodule.c linux-2.5.70-bk14/arch/arm26/nwfpe/fpmodule.c --- linux-2.5.70-bk13/arch/arm26/nwfpe/fpmodule.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/fpmodule.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,182 @@ + +/* + NetWinder Floating Point Emulator + (c) Rebel.com, 1998-1999 + (c) Philip Blundell, 1998-1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" + +#include +#include +#include + +/* XXX */ +#include +#include +#include +#include +#include +#include +/* XXX */ + +#include "softfloat.h" +#include "fpopcode.h" +#include "fpmodule.h" +#include "fpa11.inl" + +/* kernel symbols required for signal handling */ +typedef struct task_struct* PTASK; + +#ifdef MODULE +void fp_send_sig(unsigned long sig, PTASK p, int priv); +#if LINUX_VERSION_CODE > 0x20115 +MODULE_AUTHOR("Scott Bambrough "); +MODULE_DESCRIPTION("NWFPE floating point emulator"); +#endif + +#else +#define fp_send_sig send_sig +#define kern_fp_enter fp_enter + +extern char fpe_type[]; +#endif + +/* kernel function prototypes required */ +void fp_setup(void); + +/* external declarations for saved kernel symbols */ +extern void (*kern_fp_enter)(void); + +/* Original value of fp_enter from kernel before patched by fpe_init. */ +static void (*orig_fp_enter)(void); + +/* forward declarations */ +extern void nwfpe_enter(void); + +#ifdef MODULE +/* + * Return 0 if we can be unloaded. This can only happen if + * kern_fp_enter is still pointing at nwfpe_enter + */ +static int fpe_unload(void) +{ + return (kern_fp_enter == nwfpe_enter) ? 0 : 1; +} +#endif + +static int __init fpe_init(void) +{ + if (sizeof(FPA11) > sizeof(union fp_state)) { + printk(KERN_ERR "nwfpe: bad structure size\n"); + return -EINVAL; + } + + if (sizeof(FPREG) != 12) { + printk(KERN_ERR "nwfpe: bad register size\n"); + return -EINVAL; + } + +#ifdef MODULE + if (!mod_member_present(&__this_module, can_unload)) + return -EINVAL; + __this_module.can_unload = fpe_unload; +#else + if (fpe_type[0] && strcmp(fpe_type, "nwfpe")) + return 0; +#endif + + /* Display title, version and copyright information. */ + printk(KERN_WARNING "NetWinder Floating Point Emulator V0.95 " + "(c) 1998-1999 Rebel.com\n"); + + /* Save pointer to the old FP handler and then patch ourselves in */ + orig_fp_enter = kern_fp_enter; + kern_fp_enter = nwfpe_enter; + + return 0; +} + +static void __exit fpe_exit(void) +{ + /* Restore the values we saved earlier. */ + kern_fp_enter = orig_fp_enter; +} + +/* +ScottB: November 4, 1998 + +Moved this function out of softfloat-specialize into fpmodule.c. +This effectively isolates all the changes required for integrating with the +Linux kernel into fpmodule.c. Porting to NetBSD should only require modifying +fpmodule.c to integrate with the NetBSD kernel (I hope!). + +[1/1/99: Not quite true any more unfortunately. There is Linux-specific +code to access data in user space in some other source files at the +moment (grep for get_user / put_user calls). --philb] + +float_exception_flags is a global variable in SoftFloat. + +This function is called by the SoftFloat routines to raise a floating +point exception. We check the trap enable byte in the FPSR, and raise +a SIGFPE exception if necessary. If not the relevant bits in the +cumulative exceptions flag byte are set and we return. +*/ + +void float_raise(signed char flags) +{ + register unsigned int fpsr, cumulativeTraps; + +#ifdef CONFIG_DEBUG_USER + printk(KERN_DEBUG "NWFPE: %s[%d] takes exception %08x at %p from %08x\n", + current->comm, current->pid, flags, + __builtin_return_address(0), GET_USERREG()[15]); +#endif + + /* Keep SoftFloat exception flags up to date. */ + float_exception_flags |= flags; + + /* Read fpsr and initialize the cumulativeTraps. */ + fpsr = readFPSR(); + cumulativeTraps = 0; + + /* For each type of exception, the cumulative trap exception bit is only + set if the corresponding trap enable bit is not set. */ + if ((!(fpsr & BIT_IXE)) && (flags & BIT_IXC)) + cumulativeTraps |= BIT_IXC; + if ((!(fpsr & BIT_UFE)) && (flags & BIT_UFC)) + cumulativeTraps |= BIT_UFC; + if ((!(fpsr & BIT_OFE)) && (flags & BIT_OFC)) + cumulativeTraps |= BIT_OFC; + if ((!(fpsr & BIT_DZE)) && (flags & BIT_DZC)) + cumulativeTraps |= BIT_DZC; + if ((!(fpsr & BIT_IOE)) && (flags & BIT_IOC)) + cumulativeTraps |= BIT_IOC; + + /* Set the cumulative exceptions flags. */ + if (cumulativeTraps) + writeFPSR(fpsr | cumulativeTraps); + + /* Raise an exception if necessary. */ + if (fpsr & (flags << 16)) + fp_send_sig(SIGFPE, current, 1); +} + +module_init(fpe_init); +module_exit(fpe_exit); diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/fpmodule.h linux-2.5.70-bk14/arch/arm26/nwfpe/fpmodule.h --- linux-2.5.70-bk13/arch/arm26/nwfpe/fpmodule.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/fpmodule.h 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,47 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.com, 1998-1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef __FPMODULE_H__ +#define __FPMODULE_H__ + +#include + +#define REG_ORIG_R0 16 +#define REG_CPSR 15 +#define REG_PC 15 +#define REG_LR 14 +#define REG_SP 13 +#define REG_IP 12 +#define REG_FP 11 +#define REG_R10 10 +#define REG_R9 9 +#define REG_R9 9 +#define REG_R8 8 +#define REG_R7 7 +#define REG_R6 6 +#define REG_R5 5 +#define REG_R4 4 +#define REG_R3 3 +#define REG_R2 2 +#define REG_R1 1 +#define REG_R0 0 + +#endif diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/fpmodule.inl linux-2.5.70-bk14/arch/arm26/nwfpe/fpmodule.inl --- linux-2.5.70-bk13/arch/arm26/nwfpe/fpmodule.inl 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/fpmodule.inl 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,84 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +extern __inline__ +unsigned int readRegister(const unsigned int nReg) +{ + /* Note: The CPU thinks it has dealt with the current instruction. As + a result the program counter has been advanced to the next + instruction, and points 4 bytes beyond the actual instruction + that caused the invalid instruction trap to occur. We adjust + for this in this routine. LDF/STF instructions with Rn = PC + depend on the PC being correct, as they use PC+8 in their + address calculations. */ + unsigned int *userRegisters = GET_USERREG(); + unsigned int val = userRegisters[nReg]; + if (REG_PC == nReg) val -= 4; + return val; +} + +extern __inline__ +void writeRegister(const unsigned int nReg, const unsigned int val) +{ + unsigned int *userRegisters = GET_USERREG(); + userRegisters[nReg] = val; +} + +extern __inline__ +unsigned int readCPSR(void) +{ + return(readRegister(REG_CPSR)); +} + +extern __inline__ +void writeCPSR(const unsigned int val) +{ + writeRegister(REG_CPSR,val); +} + +extern __inline__ +unsigned int readConditionCodes(void) +{ +#ifdef __FPEM_TEST__ + return(0); +#else + return(readCPSR() & CC_MASK); +#endif +} + +extern __inline__ +void writeConditionCodes(const unsigned int val) +{ + unsigned int *userRegisters = GET_USERREG(); + unsigned int rval; + /* + * Operate directly on userRegisters since + * the CPSR may be the PC register itself. + */ + rval = userRegisters[REG_CPSR] & ~CC_MASK; + userRegisters[REG_CPSR] = rval | (val & CC_MASK); +} + +extern __inline__ +unsigned int readMemoryInt(unsigned int *pMem) +{ + return *pMem; +} diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/fpopcode.c linux-2.5.70-bk14/arch/arm26/nwfpe/fpopcode.c --- linux-2.5.70-bk13/arch/arm26/nwfpe/fpopcode.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/fpopcode.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,148 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "softfloat.h" +#include "fpopcode.h" +#include "fpsr.h" +#include "fpmodule.h" +#include "fpmodule.inl" + +const floatx80 floatx80Constant[] = { + { 0x0000, 0x0000000000000000ULL}, /* extended 0.0 */ + { 0x3fff, 0x8000000000000000ULL}, /* extended 1.0 */ + { 0x4000, 0x8000000000000000ULL}, /* extended 2.0 */ + { 0x4000, 0xc000000000000000ULL}, /* extended 3.0 */ + { 0x4001, 0x8000000000000000ULL}, /* extended 4.0 */ + { 0x4001, 0xa000000000000000ULL}, /* extended 5.0 */ + { 0x3ffe, 0x8000000000000000ULL}, /* extended 0.5 */ + { 0x4002, 0xa000000000000000ULL} /* extended 10.0 */ +}; + +const float64 float64Constant[] = { + 0x0000000000000000ULL, /* double 0.0 */ + 0x3ff0000000000000ULL, /* double 1.0 */ + 0x4000000000000000ULL, /* double 2.0 */ + 0x4008000000000000ULL, /* double 3.0 */ + 0x4010000000000000ULL, /* double 4.0 */ + 0x4014000000000000ULL, /* double 5.0 */ + 0x3fe0000000000000ULL, /* double 0.5 */ + 0x4024000000000000ULL /* double 10.0 */ +}; + +const float32 float32Constant[] = { + 0x00000000, /* single 0.0 */ + 0x3f800000, /* single 1.0 */ + 0x40000000, /* single 2.0 */ + 0x40400000, /* single 3.0 */ + 0x40800000, /* single 4.0 */ + 0x40a00000, /* single 5.0 */ + 0x3f000000, /* single 0.5 */ + 0x41200000 /* single 10.0 */ +}; + +unsigned int getTransferLength(const unsigned int opcode) +{ + unsigned int nRc; + + switch (opcode & MASK_TRANSFER_LENGTH) + { + case 0x00000000: nRc = 1; break; /* single precision */ + case 0x00008000: nRc = 2; break; /* double precision */ + case 0x00400000: nRc = 3; break; /* extended precision */ + default: nRc = 0; + } + + return(nRc); +} + +unsigned int getRegisterCount(const unsigned int opcode) +{ + unsigned int nRc; + + switch (opcode & MASK_REGISTER_COUNT) + { + case 0x00000000: nRc = 4; break; + case 0x00008000: nRc = 1; break; + case 0x00400000: nRc = 2; break; + case 0x00408000: nRc = 3; break; + default: nRc = 0; + } + + return(nRc); +} + +unsigned int getRoundingPrecision(const unsigned int opcode) +{ + unsigned int nRc; + + switch (opcode & MASK_ROUNDING_PRECISION) + { + case 0x00000000: nRc = 1; break; + case 0x00000080: nRc = 2; break; + case 0x00080000: nRc = 3; break; + default: nRc = 0; + } + + return(nRc); +} + +unsigned int getDestinationSize(const unsigned int opcode) +{ + unsigned int nRc; + + switch (opcode & MASK_DESTINATION_SIZE) + { + case 0x00000000: nRc = typeSingle; break; + case 0x00000080: nRc = typeDouble; break; + case 0x00080000: nRc = typeExtended; break; + default: nRc = typeNone; + } + + return(nRc); +} + +/* condition code lookup table + index into the table is test code: EQ, NE, ... LT, GT, AL, NV + bit position in short is condition code: NZCV */ +static const unsigned short aCC[16] = { + 0xF0F0, // EQ == Z set + 0x0F0F, // NE + 0xCCCC, // CS == C set + 0x3333, // CC + 0xFF00, // MI == N set + 0x00FF, // PL + 0xAAAA, // VS == V set + 0x5555, // VC + 0x0C0C, // HI == C set && Z clear + 0xF3F3, // LS == C clear || Z set + 0xAA55, // GE == (N==V) + 0x55AA, // LT == (N!=V) + 0x0A05, // GT == (!Z && (N==V)) + 0xF5FA, // LE == (Z || (N!=V)) + 0xFFFF, // AL always + 0 // NV +}; + +unsigned int checkCondition(const unsigned int opcode, const unsigned int ccodes) +{ + return (aCC[opcode>>28] >> (ccodes>>28)) & 1; +} diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/fpopcode.h linux-2.5.70-bk14/arch/arm26/nwfpe/fpopcode.h --- linux-2.5.70-bk13/arch/arm26/nwfpe/fpopcode.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/fpopcode.h 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,390 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef __FPOPCODE_H__ +#define __FPOPCODE_H__ + +/* +ARM Floating Point Instruction Classes +| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +|c o n d|1 1 0 P|U|u|W|L| Rn |v| Fd |0|0|0|1| o f f s e t | CPDT +|c o n d|1 1 0 P|U|w|W|L| Rn |x| Fd |0|0|0|1| o f f s e t | CPDT +| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +|c o n d|1 1 1 0|a|b|c|d|e| Fn |j| Fd |0|0|0|1|f|g|h|0|i| Fm | CPDO +|c o n d|1 1 1 0|a|b|c|L|e| Fn | Rd |0|0|0|1|f|g|h|1|i| Fm | CPRT +|c o n d|1 1 1 0|a|b|c|1|e| Fn |1|1|1|1|0|0|0|1|f|g|h|1|i| Fm | comparisons +| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + +CPDT data transfer instructions + LDF, STF, LFM, SFM + +CPDO dyadic arithmetic instructions + ADF, MUF, SUF, RSF, DVF, RDF, + POW, RPW, RMF, FML, FDV, FRD, POL + +CPDO monadic arithmetic instructions + MVF, MNF, ABS, RND, SQT, LOG, LGN, EXP, + SIN, COS, TAN, ASN, ACS, ATN, URD, NRM + +CPRT joint arithmetic/data transfer instructions + FIX (arithmetic followed by load/store) + FLT (load/store followed by arithmetic) + CMF, CNF CMFE, CNFE (comparisons) + WFS, RFS (write/read floating point status register) + WFC, RFC (write/read floating point control register) + +cond condition codes +P pre/post index bit: 0 = postindex, 1 = preindex +U up/down bit: 0 = stack grows down, 1 = stack grows up +W write back bit: 1 = update base register (Rn) +L load/store bit: 0 = store, 1 = load +Rn base register +Rd destination/source register +Fd floating point destination register +Fn floating point source register +Fm floating point source register or floating point constant + +uv transfer length (TABLE 1) +wx register count (TABLE 2) +abcd arithmetic opcode (TABLES 3 & 4) +ef destination size (rounding precision) (TABLE 5) +gh rounding mode (TABLE 6) +j dyadic/monadic bit: 0 = dyadic, 1 = monadic +i constant bit: 1 = constant (TABLE 6) +*/ + +/* +TABLE 1 ++-------------------------+---+---+---------+---------+ +| Precision | u | v | FPSR.EP | length | ++-------------------------+---+---+---------+---------+ +| Single | 0 ü 0 | x | 1 words | +| Double | 1 ü 1 | x | 2 words | +| Extended | 1 ü 1 | x | 3 words | +| Packed decimal | 1 ü 1 | 0 | 3 words | +| Expanded packed decimal | 1 ü 1 | 1 | 4 words | ++-------------------------+---+---+---------+---------+ +Note: x = don't care +*/ + +/* +TABLE 2 ++---+---+---------------------------------+ +| w | x | Number of registers to transfer | ++---+---+---------------------------------+ +| 0 ü 1 | 1 | +| 1 ü 0 | 2 | +| 1 ü 1 | 3 | +| 0 ü 0 | 4 | ++---+---+---------------------------------+ +*/ + +/* +TABLE 3: Dyadic Floating Point Opcodes ++---+---+---+---+----------+-----------------------+-----------------------+ +| a | b | c | d | Mnemonic | Description | Operation | ++---+---+---+---+----------+-----------------------+-----------------------+ +| 0 | 0 | 0 | 0 | ADF | Add | Fd := Fn + Fm | +| 0 | 0 | 0 | 1 | MUF | Multiply | Fd := Fn * Fm | +| 0 | 0 | 1 | 0 | SUF | Subtract | Fd := Fn - Fm | +| 0 | 0 | 1 | 1 | RSF | Reverse subtract | Fd := Fm - Fn | +| 0 | 1 | 0 | 0 | DVF | Divide | Fd := Fn / Fm | +| 0 | 1 | 0 | 1 | RDF | Reverse divide | Fd := Fm / Fn | +| 0 | 1 | 1 | 0 | POW | Power | Fd := Fn ^ Fm | +| 0 | 1 | 1 | 1 | RPW | Reverse power | Fd := Fm ^ Fn | +| 1 | 0 | 0 | 0 | RMF | Remainder | Fd := IEEE rem(Fn/Fm) | +| 1 | 0 | 0 | 1 | FML | Fast Multiply | Fd := Fn * Fm | +| 1 | 0 | 1 | 0 | FDV | Fast Divide | Fd := Fn / Fm | +| 1 | 0 | 1 | 1 | FRD | Fast reverse divide | Fd := Fm / Fn | +| 1 | 1 | 0 | 0 | POL | Polar angle (ArcTan2) | Fd := arctan2(Fn,Fm) | +| 1 | 1 | 0 | 1 | | undefined instruction | trap | +| 1 | 1 | 1 | 0 | | undefined instruction | trap | +| 1 | 1 | 1 | 1 | | undefined instruction | trap | ++---+---+---+---+----------+-----------------------+-----------------------+ +Note: POW, RPW, POL are deprecated, and are available for backwards + compatibility only. +*/ + +/* +TABLE 4: Monadic Floating Point Opcodes ++---+---+---+---+----------+-----------------------+-----------------------+ +| a | b | c | d | Mnemonic | Description | Operation | ++---+---+---+---+----------+-----------------------+-----------------------+ +| 0 | 0 | 0 | 0 | MVF | Move | Fd := Fm | +| 0 | 0 | 0 | 1 | MNF | Move negated | Fd := - Fm | +| 0 | 0 | 1 | 0 | ABS | Absolute value | Fd := abs(Fm) | +| 0 | 0 | 1 | 1 | RND | Round to integer | Fd := int(Fm) | +| 0 | 1 | 0 | 0 | SQT | Square root | Fd := sqrt(Fm) | +| 0 | 1 | 0 | 1 | LOG | Log base 10 | Fd := log10(Fm) | +| 0 | 1 | 1 | 0 | LGN | Log base e | Fd := ln(Fm) | +| 0 | 1 | 1 | 1 | EXP | Exponent | Fd := e ^ Fm | +| 1 | 0 | 0 | 0 | SIN | Sine | Fd := sin(Fm) | +| 1 | 0 | 0 | 1 | COS | Cosine | Fd := cos(Fm) | +| 1 | 0 | 1 | 0 | TAN | Tangent | Fd := tan(Fm) | +| 1 | 0 | 1 | 1 | ASN | Arc Sine | Fd := arcsin(Fm) | +| 1 | 1 | 0 | 0 | ACS | Arc Cosine | Fd := arccos(Fm) | +| 1 | 1 | 0 | 1 | ATN | Arc Tangent | Fd := arctan(Fm) | +| 1 | 1 | 1 | 0 | URD | Unnormalized round | Fd := int(Fm) | +| 1 | 1 | 1 | 1 | NRM | Normalize | Fd := norm(Fm) | ++---+---+---+---+----------+-----------------------+-----------------------+ +Note: LOG, LGN, EXP, SIN, COS, TAN, ASN, ACS, ATN are deprecated, and are + available for backwards compatibility only. +*/ + +/* +TABLE 5 ++-------------------------+---+---+ +| Rounding Precision | e | f | ++-------------------------+---+---+ +| IEEE Single precision | 0 ü 0 | +| IEEE Double precision | 0 ü 1 | +| IEEE Extended precision | 1 ü 0 | +| undefined (trap) | 1 ü 1 | ++-------------------------+---+---+ +*/ + +/* +TABLE 5 ++---------------------------------+---+---+ +| Rounding Mode | g | h | ++---------------------------------+---+---+ +| Round to nearest (default) | 0 ü 0 | +| Round toward plus infinity | 0 ü 1 | +| Round toward negative infinity | 1 ü 0 | +| Round toward zero | 1 ü 1 | ++---------------------------------+---+---+ +*/ + +/* +=== +=== Definitions for load and store instructions +=== +*/ + +/* bit masks */ +#define BIT_PREINDEX 0x01000000 +#define BIT_UP 0x00800000 +#define BIT_WRITE_BACK 0x00200000 +#define BIT_LOAD 0x00100000 + +/* masks for load/store */ +#define MASK_CPDT 0x0c000000 /* data processing opcode */ +#define MASK_OFFSET 0x000000ff +#define MASK_TRANSFER_LENGTH 0x00408000 +#define MASK_REGISTER_COUNT MASK_TRANSFER_LENGTH +#define MASK_COPROCESSOR 0x00000f00 + +/* Tests for transfer length */ +#define TRANSFER_SINGLE 0x00000000 +#define TRANSFER_DOUBLE 0x00008000 +#define TRANSFER_EXTENDED 0x00400000 +#define TRANSFER_PACKED MASK_TRANSFER_LENGTH + +/* Get the coprocessor number from the opcode. */ +#define getCoprocessorNumber(opcode) ((opcode & MASK_COPROCESSOR) >> 8) + +/* Get the offset from the opcode. */ +#define getOffset(opcode) (opcode & MASK_OFFSET) + +/* Tests for specific data transfer load/store opcodes. */ +#define TEST_OPCODE(opcode,mask) (((opcode) & (mask)) == (mask)) + +#define LOAD_OP(opcode) TEST_OPCODE((opcode),MASK_CPDT | BIT_LOAD) +#define STORE_OP(opcode) ((opcode & (MASK_CPDT | BIT_LOAD)) == MASK_CPDT) + +#define LDF_OP(opcode) (LOAD_OP(opcode) && (getCoprocessorNumber(opcode) == 1)) +#define LFM_OP(opcode) (LOAD_OP(opcode) && (getCoprocessorNumber(opcode) == 2)) +#define STF_OP(opcode) (STORE_OP(opcode) && (getCoprocessorNumber(opcode) == 1)) +#define SFM_OP(opcode) (STORE_OP(opcode) && (getCoprocessorNumber(opcode) == 2)) + +#define PREINDEXED(opcode) ((opcode & BIT_PREINDEX) != 0) +#define POSTINDEXED(opcode) ((opcode & BIT_PREINDEX) == 0) +#define BIT_UP_SET(opcode) ((opcode & BIT_UP) != 0) +#define BIT_UP_CLEAR(opcode) ((opcode & BIT_DOWN) == 0) +#define WRITE_BACK(opcode) ((opcode & BIT_WRITE_BACK) != 0) +#define LOAD(opcode) ((opcode & BIT_LOAD) != 0) +#define STORE(opcode) ((opcode & BIT_LOAD) == 0) + +/* +=== +=== Definitions for arithmetic instructions +=== +*/ +/* bit masks */ +#define BIT_MONADIC 0x00008000 +#define BIT_CONSTANT 0x00000008 + +#define CONSTANT_FM(opcode) ((opcode & BIT_CONSTANT) != 0) +#define MONADIC_INSTRUCTION(opcode) ((opcode & BIT_MONADIC) != 0) + +/* instruction identification masks */ +#define MASK_CPDO 0x0e000000 /* arithmetic opcode */ +#define MASK_ARITHMETIC_OPCODE 0x00f08000 +#define MASK_DESTINATION_SIZE 0x00080080 + +/* dyadic arithmetic opcodes. */ +#define ADF_CODE 0x00000000 +#define MUF_CODE 0x00100000 +#define SUF_CODE 0x00200000 +#define RSF_CODE 0x00300000 +#define DVF_CODE 0x00400000 +#define RDF_CODE 0x00500000 +#define POW_CODE 0x00600000 +#define RPW_CODE 0x00700000 +#define RMF_CODE 0x00800000 +#define FML_CODE 0x00900000 +#define FDV_CODE 0x00a00000 +#define FRD_CODE 0x00b00000 +#define POL_CODE 0x00c00000 +/* 0x00d00000 is an invalid dyadic arithmetic opcode */ +/* 0x00e00000 is an invalid dyadic arithmetic opcode */ +/* 0x00f00000 is an invalid dyadic arithmetic opcode */ + +/* monadic arithmetic opcodes. */ +#define MVF_CODE 0x00008000 +#define MNF_CODE 0x00108000 +#define ABS_CODE 0x00208000 +#define RND_CODE 0x00308000 +#define SQT_CODE 0x00408000 +#define LOG_CODE 0x00508000 +#define LGN_CODE 0x00608000 +#define EXP_CODE 0x00708000 +#define SIN_CODE 0x00808000 +#define COS_CODE 0x00908000 +#define TAN_CODE 0x00a08000 +#define ASN_CODE 0x00b08000 +#define ACS_CODE 0x00c08000 +#define ATN_CODE 0x00d08000 +#define URD_CODE 0x00e08000 +#define NRM_CODE 0x00f08000 + +/* +=== +=== Definitions for register transfer and comparison instructions +=== +*/ + +#define MASK_CPRT 0x0e000010 /* register transfer opcode */ +#define MASK_CPRT_CODE 0x00f00000 +#define FLT_CODE 0x00000000 +#define FIX_CODE 0x00100000 +#define WFS_CODE 0x00200000 +#define RFS_CODE 0x00300000 +#define WFC_CODE 0x00400000 +#define RFC_CODE 0x00500000 +#define CMF_CODE 0x00900000 +#define CNF_CODE 0x00b00000 +#define CMFE_CODE 0x00d00000 +#define CNFE_CODE 0x00f00000 + +/* +=== +=== Common definitions +=== +*/ + +/* register masks */ +#define MASK_Rd 0x0000f000 +#define MASK_Rn 0x000f0000 +#define MASK_Fd 0x00007000 +#define MASK_Fm 0x00000007 +#define MASK_Fn 0x00070000 + +/* condition code masks */ +#define CC_MASK 0xf0000000 +#define CC_NEGATIVE 0x80000000 +#define CC_ZERO 0x40000000 +#define CC_CARRY 0x20000000 +#define CC_OVERFLOW 0x10000000 +#define CC_EQ 0x00000000 +#define CC_NE 0x10000000 +#define CC_CS 0x20000000 +#define CC_HS CC_CS +#define CC_CC 0x30000000 +#define CC_LO CC_CC +#define CC_MI 0x40000000 +#define CC_PL 0x50000000 +#define CC_VS 0x60000000 +#define CC_VC 0x70000000 +#define CC_HI 0x80000000 +#define CC_LS 0x90000000 +#define CC_GE 0xa0000000 +#define CC_LT 0xb0000000 +#define CC_GT 0xc0000000 +#define CC_LE 0xd0000000 +#define CC_AL 0xe0000000 +#define CC_NV 0xf0000000 + +/* rounding masks/values */ +#define MASK_ROUNDING_MODE 0x00000060 +#define ROUND_TO_NEAREST 0x00000000 +#define ROUND_TO_PLUS_INFINITY 0x00000020 +#define ROUND_TO_MINUS_INFINITY 0x00000040 +#define ROUND_TO_ZERO 0x00000060 + +#define MASK_ROUNDING_PRECISION 0x00080080 +#define ROUND_SINGLE 0x00000000 +#define ROUND_DOUBLE 0x00000080 +#define ROUND_EXTENDED 0x00080000 + +/* Get the condition code from the opcode. */ +#define getCondition(opcode) (opcode >> 28) + +/* Get the source register from the opcode. */ +#define getRn(opcode) ((opcode & MASK_Rn) >> 16) + +/* Get the destination floating point register from the opcode. */ +#define getFd(opcode) ((opcode & MASK_Fd) >> 12) + +/* Get the first source floating point register from the opcode. */ +#define getFn(opcode) ((opcode & MASK_Fn) >> 16) + +/* Get the second source floating point register from the opcode. */ +#define getFm(opcode) (opcode & MASK_Fm) + +/* Get the destination register from the opcode. */ +#define getRd(opcode) ((opcode & MASK_Rd) >> 12) + +/* Get the rounding mode from the opcode. */ +#define getRoundingMode(opcode) ((opcode & MASK_ROUNDING_MODE) >> 5) + +static inline const floatx80 getExtendedConstant(const unsigned int nIndex) +{ + extern const floatx80 floatx80Constant[]; + return floatx80Constant[nIndex]; +} + +static inline const float64 getDoubleConstant(const unsigned int nIndex) +{ + extern const float64 float64Constant[]; + return float64Constant[nIndex]; +} + +static inline const float32 getSingleConstant(const unsigned int nIndex) +{ + extern const float32 float32Constant[]; + return float32Constant[nIndex]; +} + +extern unsigned int getRegisterCount(const unsigned int opcode); +extern unsigned int getDestinationSize(const unsigned int opcode); + +#endif diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/fpsr.h linux-2.5.70-bk14/arch/arm26/nwfpe/fpsr.h --- linux-2.5.70-bk13/arch/arm26/nwfpe/fpsr.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/fpsr.h 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,108 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.com, 1998-1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef __FPSR_H__ +#define __FPSR_H__ + +/* +The FPSR is a 32 bit register consisting of 4 parts, each exactly +one byte. + + SYSTEM ID + EXCEPTION TRAP ENABLE BYTE + SYSTEM CONTROL BYTE + CUMULATIVE EXCEPTION FLAGS BYTE + +The FPCR is a 32 bit register consisting of bit flags. +*/ + +/* SYSTEM ID +------------ +Note: the system id byte is read only */ + +typedef unsigned int FPSR; /* type for floating point status register */ +typedef unsigned int FPCR; /* type for floating point control register */ + +#define MASK_SYSID 0xff000000 +#define BIT_HARDWARE 0x80000000 +#define FP_EMULATOR 0x01000000 /* System ID for emulator */ +#define FP_ACCELERATOR 0x81000000 /* System ID for FPA11 */ + +/* EXCEPTION TRAP ENABLE BYTE +----------------------------- */ + +#define MASK_TRAP_ENABLE 0x00ff0000 +#define MASK_TRAP_ENABLE_STRICT 0x001f0000 +#define BIT_IXE 0x00100000 /* inexact exception enable */ +#define BIT_UFE 0x00080000 /* underflow exception enable */ +#define BIT_OFE 0x00040000 /* overflow exception enable */ +#define BIT_DZE 0x00020000 /* divide by zero exception enable */ +#define BIT_IOE 0x00010000 /* invalid operation exception enable */ + +/* SYSTEM CONTROL BYTE +---------------------- */ + +#define MASK_SYSTEM_CONTROL 0x0000ff00 +#define MASK_TRAP_STRICT 0x00001f00 + +#define BIT_AC 0x00001000 /* use alternative C-flag definition + for compares */ +#define BIT_EP 0x00000800 /* use expanded packed decimal format */ +#define BIT_SO 0x00000400 /* select synchronous operation of FPA */ +#define BIT_NE 0x00000200 /* NaN exception bit */ +#define BIT_ND 0x00000100 /* no denormalized numbers bit */ + +/* CUMULATIVE EXCEPTION FLAGS BYTE +---------------------------------- */ + +#define MASK_EXCEPTION_FLAGS 0x000000ff +#define MASK_EXCEPTION_FLAGS_STRICT 0x0000001f + +#define BIT_IXC 0x00000010 /* inexact exception flag */ +#define BIT_UFC 0x00000008 /* underflow exception flag */ +#define BIT_OFC 0x00000004 /* overfloat exception flag */ +#define BIT_DZC 0x00000002 /* divide by zero exception flag */ +#define BIT_IOC 0x00000001 /* invalid operation exception flag */ + +/* Floating Point Control Register +----------------------------------*/ + +#define BIT_RU 0x80000000 /* rounded up bit */ +#define BIT_IE 0x10000000 /* inexact bit */ +#define BIT_MO 0x08000000 /* mantissa overflow bit */ +#define BIT_EO 0x04000000 /* exponent overflow bit */ +#define BIT_SB 0x00000800 /* store bounce */ +#define BIT_AB 0x00000400 /* arithmetic bounce */ +#define BIT_RE 0x00000200 /* rounding exception */ +#define BIT_DA 0x00000100 /* disable FPA */ + +#define MASK_OP 0x00f08010 /* AU operation code */ +#define MASK_PR 0x00080080 /* AU precision */ +#define MASK_S1 0x00070000 /* AU source register 1 */ +#define MASK_S2 0x00000007 /* AU source register 2 */ +#define MASK_DS 0x00007000 /* AU destination register */ +#define MASK_RM 0x00000060 /* AU rounding mode */ +#define MASK_ALU 0x9cfff2ff /* only ALU can write these bits */ +#define MASK_RESET 0x00000d00 /* bits set on reset, all others cleared */ +#define MASK_WFC MASK_RESET +#define MASK_RFC ~MASK_RESET + +#endif diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/milieu.h linux-2.5.70-bk14/arch/arm26/nwfpe/milieu.h --- linux-2.5.70-bk13/arch/arm26/nwfpe/milieu.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/milieu.h 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,48 @@ + +/* +=============================================================================== + +This C header file is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ +arithmetic/softfloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these three paragraphs for those parts of +this code that are retained. + +=============================================================================== +*/ + +/* +------------------------------------------------------------------------------- +Include common integer types and flags. +------------------------------------------------------------------------------- +*/ +#include "ARM-gcc.h" + +/* +------------------------------------------------------------------------------- +Symbolic Boolean literals. +------------------------------------------------------------------------------- +*/ +enum { + FALSE = 0, + TRUE = 1 +}; + diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/single_cpdo.c linux-2.5.70-bk14/arch/arm26/nwfpe/single_cpdo.c --- linux-2.5.70-bk13/arch/arm26/nwfpe/single_cpdo.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/single_cpdo.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,255 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "softfloat.h" +#include "fpopcode.h" + +float32 float32_exp(float32 Fm); +float32 float32_ln(float32 Fm); +float32 float32_sin(float32 rFm); +float32 float32_cos(float32 rFm); +float32 float32_arcsin(float32 rFm); +float32 float32_arctan(float32 rFm); +float32 float32_log(float32 rFm); +float32 float32_tan(float32 rFm); +float32 float32_arccos(float32 rFm); +float32 float32_pow(float32 rFn,float32 rFm); +float32 float32_pol(float32 rFn,float32 rFm); + +unsigned int SingleCPDO(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + float32 rFm, rFn = 0; //FIXME - should be zero? + unsigned int Fd, Fm, Fn, nRc = 1; + + Fm = getFm(opcode); + if (CONSTANT_FM(opcode)) + { + rFm = getSingleConstant(Fm); + } + else + { + switch (fpa11->fType[Fm]) + { + case typeSingle: + rFm = fpa11->fpreg[Fm].fSingle; + break; + + default: return 0; + } + } + + if (!MONADIC_INSTRUCTION(opcode)) + { + Fn = getFn(opcode); + switch (fpa11->fType[Fn]) + { + case typeSingle: + rFn = fpa11->fpreg[Fn].fSingle; + break; + + default: return 0; + } + } + + Fd = getFd(opcode); + switch (opcode & MASK_ARITHMETIC_OPCODE) + { + /* dyadic opcodes */ + case ADF_CODE: + fpa11->fpreg[Fd].fSingle = float32_add(rFn,rFm); + break; + + case MUF_CODE: + case FML_CODE: + fpa11->fpreg[Fd].fSingle = float32_mul(rFn,rFm); + break; + + case SUF_CODE: + fpa11->fpreg[Fd].fSingle = float32_sub(rFn,rFm); + break; + + case RSF_CODE: + fpa11->fpreg[Fd].fSingle = float32_sub(rFm,rFn); + break; + + case DVF_CODE: + case FDV_CODE: + fpa11->fpreg[Fd].fSingle = float32_div(rFn,rFm); + break; + + case RDF_CODE: + case FRD_CODE: + fpa11->fpreg[Fd].fSingle = float32_div(rFm,rFn); + break; + +#if 0 + case POW_CODE: + fpa11->fpreg[Fd].fSingle = float32_pow(rFn,rFm); + break; + + case RPW_CODE: + fpa11->fpreg[Fd].fSingle = float32_pow(rFm,rFn); + break; +#endif + + case RMF_CODE: + fpa11->fpreg[Fd].fSingle = float32_rem(rFn,rFm); + break; + +#if 0 + case POL_CODE: + fpa11->fpreg[Fd].fSingle = float32_pol(rFn,rFm); + break; +#endif + + /* monadic opcodes */ + case MVF_CODE: + fpa11->fpreg[Fd].fSingle = rFm; + break; + + case MNF_CODE: + rFm ^= 0x80000000; + fpa11->fpreg[Fd].fSingle = rFm; + break; + + case ABS_CODE: + rFm &= 0x7fffffff; + fpa11->fpreg[Fd].fSingle = rFm; + break; + + case RND_CODE: + case URD_CODE: + fpa11->fpreg[Fd].fSingle = float32_round_to_int(rFm); + break; + + case SQT_CODE: + fpa11->fpreg[Fd].fSingle = float32_sqrt(rFm); + break; + +#if 0 + case LOG_CODE: + fpa11->fpreg[Fd].fSingle = float32_log(rFm); + break; + + case LGN_CODE: + fpa11->fpreg[Fd].fSingle = float32_ln(rFm); + break; + + case EXP_CODE: + fpa11->fpreg[Fd].fSingle = float32_exp(rFm); + break; + + case SIN_CODE: + fpa11->fpreg[Fd].fSingle = float32_sin(rFm); + break; + + case COS_CODE: + fpa11->fpreg[Fd].fSingle = float32_cos(rFm); + break; + + case TAN_CODE: + fpa11->fpreg[Fd].fSingle = float32_tan(rFm); + break; + + case ASN_CODE: + fpa11->fpreg[Fd].fSingle = float32_arcsin(rFm); + break; + + case ACS_CODE: + fpa11->fpreg[Fd].fSingle = float32_arccos(rFm); + break; + + case ATN_CODE: + fpa11->fpreg[Fd].fSingle = float32_arctan(rFm); + break; +#endif + + case NRM_CODE: + break; + + default: + { + nRc = 0; + } + } + + if (0 != nRc) fpa11->fType[Fd] = typeSingle; + return nRc; +} + +#if 0 +float32 float32_exp(float32 Fm) +{ +//series +} + +float32 float32_ln(float32 Fm) +{ +//series +} + +float32 float32_sin(float32 rFm) +{ +//series +} + +float32 float32_cos(float32 rFm) +{ +//series +} + +float32 float32_arcsin(float32 rFm) +{ +//series +} + +float32 float32_arctan(float32 rFm) +{ + //series +} + +float32 float32_arccos(float32 rFm) +{ + //return float32_sub(halfPi,float32_arcsin(rFm)); +} + +float32 float32_log(float32 rFm) +{ + return float32_div(float32_ln(rFm),getSingleConstant(7)); +} + +float32 float32_tan(float32 rFm) +{ + return float32_div(float32_sin(rFm),float32_cos(rFm)); +} + +float32 float32_pow(float32 rFn,float32 rFm) +{ + return float32_exp(float32_mul(rFm,float32_ln(rFn))); +} + +float32 float32_pol(float32 rFn,float32 rFm) +{ + return float32_arctan(float32_div(rFn,rFm)); +} +#endif diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/softfloat-macros linux-2.5.70-bk14/arch/arm26/nwfpe/softfloat-macros --- linux-2.5.70-bk13/arch/arm26/nwfpe/softfloat-macros 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/softfloat-macros 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,740 @@ + +/* +=============================================================================== + +This C source fragment is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ +arithmetic/softfloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these three paragraphs for those parts of +this code that are retained. + +=============================================================================== +*/ + +/* +------------------------------------------------------------------------------- +Shifts `a' right by the number of bits given in `count'. If any nonzero +bits are shifted off, they are ``jammed'' into the least significant bit of +the result by setting the least significant bit to 1. The value of `count' +can be arbitrarily large; in particular, if `count' is greater than 32, the +result will be either 0 or 1, depending on whether `a' is zero or nonzero. +The result is stored in the location pointed to by `zPtr'. +------------------------------------------------------------------------------- +*/ +INLINE void shift32RightJamming( bits32 a, int16 count, bits32 *zPtr ) +{ + bits32 z; + if ( count == 0 ) { + z = a; + } + else if ( count < 32 ) { + z = ( a>>count ) | ( ( a<<( ( - count ) & 31 ) ) != 0 ); + } + else { + z = ( a != 0 ); + } + *zPtr = z; +} + +/* +------------------------------------------------------------------------------- +Shifts `a' right by the number of bits given in `count'. If any nonzero +bits are shifted off, they are ``jammed'' into the least significant bit of +the result by setting the least significant bit to 1. The value of `count' +can be arbitrarily large; in particular, if `count' is greater than 64, the +result will be either 0 or 1, depending on whether `a' is zero or nonzero. +The result is stored in the location pointed to by `zPtr'. +------------------------------------------------------------------------------- +*/ +INLINE void shift64RightJamming( bits64 a, int16 count, bits64 *zPtr ) +{ + bits64 z; + + __asm__("@shift64RightJamming -- start"); + if ( count == 0 ) { + z = a; + } + else if ( count < 64 ) { + z = ( a>>count ) | ( ( a<<( ( - count ) & 63 ) ) != 0 ); + } + else { + z = ( a != 0 ); + } + __asm__("@shift64RightJamming -- end"); + *zPtr = z; +} + +/* +------------------------------------------------------------------------------- +Shifts the 128-bit value formed by concatenating `a0' and `a1' right by 64 +_plus_ the number of bits given in `count'. The shifted result is at most +64 nonzero bits; this is stored at the location pointed to by `z0Ptr'. The +bits shifted off form a second 64-bit result as follows: The _last_ bit +shifted off is the most-significant bit of the extra result, and the other +63 bits of the extra result are all zero if and only if _all_but_the_last_ +bits shifted off were all zero. This extra result is stored in the location +pointed to by `z1Ptr'. The value of `count' can be arbitrarily large. + (This routine makes more sense if `a0' and `a1' are considered to form a +fixed-point value with binary point between `a0' and `a1'. This fixed-point +value is shifted right by the number of bits given in `count', and the +integer part of the result is returned at the location pointed to by +`z0Ptr'. The fractional part of the result may be slightly corrupted as +described above, and is returned at the location pointed to by `z1Ptr'.) +------------------------------------------------------------------------------- +*/ +INLINE void + shift64ExtraRightJamming( + bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr ) +{ + bits64 z0, z1; + int8 negCount = ( - count ) & 63; + + if ( count == 0 ) { + z1 = a1; + z0 = a0; + } + else if ( count < 64 ) { + z1 = ( a0<>count; + } + else { + if ( count == 64 ) { + z1 = a0 | ( a1 != 0 ); + } + else { + z1 = ( ( a0 | a1 ) != 0 ); + } + z0 = 0; + } + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the +number of bits given in `count'. Any bits shifted off are lost. The value +of `count' can be arbitrarily large; in particular, if `count' is greater +than 128, the result will be 0. The result is broken into two 64-bit pieces +which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + shift128Right( + bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr ) +{ + bits64 z0, z1; + int8 negCount = ( - count ) & 63; + + if ( count == 0 ) { + z1 = a1; + z0 = a0; + } + else if ( count < 64 ) { + z1 = ( a0<>count ); + z0 = a0>>count; + } + else { + z1 = ( count < 64 ) ? ( a0>>( count & 63 ) ) : 0; + z0 = 0; + } + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the +number of bits given in `count'. If any nonzero bits are shifted off, they +are ``jammed'' into the least significant bit of the result by setting the +least significant bit to 1. The value of `count' can be arbitrarily large; +in particular, if `count' is greater than 128, the result will be either 0 +or 1, depending on whether the concatenation of `a0' and `a1' is zero or +nonzero. The result is broken into two 64-bit pieces which are stored at +the locations pointed to by `z0Ptr' and `z1Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + shift128RightJamming( + bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr ) +{ + bits64 z0, z1; + int8 negCount = ( - count ) & 63; + + if ( count == 0 ) { + z1 = a1; + z0 = a0; + } + else if ( count < 64 ) { + z1 = ( a0<>count ) | ( ( a1<>count; + } + else { + if ( count == 64 ) { + z1 = a0 | ( a1 != 0 ); + } + else if ( count < 128 ) { + z1 = ( a0>>( count & 63 ) ) | ( ( ( a0<>count ); + z0 = a0>>count; + } + else { + if ( count == 64 ) { + z2 = a1; + z1 = a0; + } + else { + a2 |= a1; + if ( count < 128 ) { + z2 = a0<>( count & 63 ); + } + else { + z2 = ( count == 128 ) ? a0 : ( a0 != 0 ); + z1 = 0; + } + } + z0 = 0; + } + z2 |= ( a2 != 0 ); + } + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Shifts the 128-bit value formed by concatenating `a0' and `a1' left by the +number of bits given in `count'. Any bits shifted off are lost. The value +of `count' must be less than 64. The result is broken into two 64-bit +pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + shortShift128Left( + bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr ) +{ + + *z1Ptr = a1<>( ( - count ) & 63 ) ); + +} + +/* +------------------------------------------------------------------------------- +Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' left +by the number of bits given in `count'. Any bits shifted off are lost. +The value of `count' must be less than 64. The result is broken into three +64-bit pieces which are stored at the locations pointed to by `z0Ptr', +`z1Ptr', and `z2Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + shortShift192Left( + bits64 a0, + bits64 a1, + bits64 a2, + int16 count, + bits64 *z0Ptr, + bits64 *z1Ptr, + bits64 *z2Ptr + ) +{ + bits64 z0, z1, z2; + int8 negCount; + + z2 = a2<>negCount; + z0 |= a1>>negCount; + } + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Adds the 128-bit value formed by concatenating `a0' and `a1' to the 128-bit +value formed by concatenating `b0' and `b1'. Addition is modulo 2^128, so +any carry out is lost. The result is broken into two 64-bit pieces which +are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + add128( + bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) +{ + bits64 z1; + + z1 = a1 + b1; + *z1Ptr = z1; + *z0Ptr = a0 + b0 + ( z1 < a1 ); + +} + +/* +------------------------------------------------------------------------------- +Adds the 192-bit value formed by concatenating `a0', `a1', and `a2' to the +192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is +modulo 2^192, so any carry out is lost. The result is broken into three +64-bit pieces which are stored at the locations pointed to by `z0Ptr', +`z1Ptr', and `z2Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + add192( + bits64 a0, + bits64 a1, + bits64 a2, + bits64 b0, + bits64 b1, + bits64 b2, + bits64 *z0Ptr, + bits64 *z1Ptr, + bits64 *z2Ptr + ) +{ + bits64 z0, z1, z2; + int8 carry0, carry1; + + z2 = a2 + b2; + carry1 = ( z2 < a2 ); + z1 = a1 + b1; + carry0 = ( z1 < a1 ); + z0 = a0 + b0; + z1 += carry1; + z0 += ( z1 < carry1 ); + z0 += carry0; + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the +128-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo +2^128, so any borrow out (carry out) is lost. The result is broken into two +64-bit pieces which are stored at the locations pointed to by `z0Ptr' and +`z1Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + sub128( + bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) +{ + + *z1Ptr = a1 - b1; + *z0Ptr = a0 - b0 - ( a1 < b1 ); + +} + +/* +------------------------------------------------------------------------------- +Subtracts the 192-bit value formed by concatenating `b0', `b1', and `b2' +from the 192-bit value formed by concatenating `a0', `a1', and `a2'. +Subtraction is modulo 2^192, so any borrow out (carry out) is lost. The +result is broken into three 64-bit pieces which are stored at the locations +pointed to by `z0Ptr', `z1Ptr', and `z2Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + sub192( + bits64 a0, + bits64 a1, + bits64 a2, + bits64 b0, + bits64 b1, + bits64 b2, + bits64 *z0Ptr, + bits64 *z1Ptr, + bits64 *z2Ptr + ) +{ + bits64 z0, z1, z2; + int8 borrow0, borrow1; + + z2 = a2 - b2; + borrow1 = ( a2 < b2 ); + z1 = a1 - b1; + borrow0 = ( a1 < b1 ); + z0 = a0 - b0; + z0 -= ( z1 < borrow1 ); + z1 -= borrow1; + z0 -= borrow0; + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Multiplies `a' by `b' to obtain a 128-bit product. The product is broken +into two 64-bit pieces which are stored at the locations pointed to by +`z0Ptr' and `z1Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void mul64To128( bits64 a, bits64 b, bits64 *z0Ptr, bits64 *z1Ptr ) +{ + bits32 aHigh, aLow, bHigh, bLow; + bits64 z0, zMiddleA, zMiddleB, z1; + + aLow = a; + aHigh = a>>32; + bLow = b; + bHigh = b>>32; + z1 = ( (bits64) aLow ) * bLow; + zMiddleA = ( (bits64) aLow ) * bHigh; + zMiddleB = ( (bits64) aHigh ) * bLow; + z0 = ( (bits64) aHigh ) * bHigh; + zMiddleA += zMiddleB; + z0 += ( ( (bits64) ( zMiddleA < zMiddleB ) )<<32 ) + ( zMiddleA>>32 ); + zMiddleA <<= 32; + z1 += zMiddleA; + z0 += ( z1 < zMiddleA ); + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Multiplies the 128-bit value formed by concatenating `a0' and `a1' by `b' to +obtain a 192-bit product. The product is broken into three 64-bit pieces +which are stored at the locations pointed to by `z0Ptr', `z1Ptr', and +`z2Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + mul128By64To192( + bits64 a0, + bits64 a1, + bits64 b, + bits64 *z0Ptr, + bits64 *z1Ptr, + bits64 *z2Ptr + ) +{ + bits64 z0, z1, z2, more1; + + mul64To128( a1, b, &z1, &z2 ); + mul64To128( a0, b, &z0, &more1 ); + add128( z0, more1, 0, z1, &z0, &z1 ); + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Multiplies the 128-bit value formed by concatenating `a0' and `a1' to the +128-bit value formed by concatenating `b0' and `b1' to obtain a 256-bit +product. The product is broken into four 64-bit pieces which are stored at +the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'. +------------------------------------------------------------------------------- +*/ +INLINE void + mul128To256( + bits64 a0, + bits64 a1, + bits64 b0, + bits64 b1, + bits64 *z0Ptr, + bits64 *z1Ptr, + bits64 *z2Ptr, + bits64 *z3Ptr + ) +{ + bits64 z0, z1, z2, z3; + bits64 more1, more2; + + mul64To128( a1, b1, &z2, &z3 ); + mul64To128( a1, b0, &z1, &more2 ); + add128( z1, more2, 0, z2, &z1, &z2 ); + mul64To128( a0, b0, &z0, &more1 ); + add128( z0, more1, 0, z1, &z0, &z1 ); + mul64To128( a0, b1, &more1, &more2 ); + add128( more1, more2, 0, z2, &more1, &z2 ); + add128( z0, z1, 0, more1, &z0, &z1 ); + *z3Ptr = z3; + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/* +------------------------------------------------------------------------------- +Returns an approximation to the 64-bit integer quotient obtained by dividing +`b' into the 128-bit value formed by concatenating `a0' and `a1'. The +divisor `b' must be at least 2^63. If q is the exact quotient truncated +toward zero, the approximation returned lies between q and q + 2 inclusive. +If the exact quotient q is larger than 64 bits, the maximum positive 64-bit +unsigned integer is returned. +------------------------------------------------------------------------------- +*/ +static bits64 estimateDiv128To64( bits64 a0, bits64 a1, bits64 b ) +{ + bits64 b0, b1; + bits64 rem0, rem1, term0, term1; + bits64 z; + if ( b <= a0 ) return LIT64( 0xFFFFFFFFFFFFFFFF ); + b0 = b>>32; + z = ( b0<<32 <= a0 ) ? LIT64( 0xFFFFFFFF00000000 ) : ( a0 / b0 )<<32; + mul64To128( b, z, &term0, &term1 ); + sub128( a0, a1, term0, term1, &rem0, &rem1 ); + while ( ( (sbits64) rem0 ) < 0 ) { + z -= LIT64( 0x100000000 ); + b1 = b<<32; + add128( rem0, rem1, b0, b1, &rem0, &rem1 ); + } + rem0 = ( rem0<<32 ) | ( rem1>>32 ); + z |= ( b0<<32 <= rem0 ) ? 0xFFFFFFFF : rem0 / b0; + return z; + +} + +/* +------------------------------------------------------------------------------- +Returns an approximation to the square root of the 32-bit significand given +by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of +`aExp' (the least significant bit) is 1, the integer returned approximates +2^31*sqrt(`a'/2^31), where `a' is considered an integer. If bit 0 of `aExp' +is 0, the integer returned approximates 2^31*sqrt(`a'/2^30). In either +case, the approximation returned lies strictly within +/-2 of the exact +value. +------------------------------------------------------------------------------- +*/ +static bits32 estimateSqrt32( int16 aExp, bits32 a ) +{ + static const bits16 sqrtOddAdjustments[] = { + 0x0004, 0x0022, 0x005D, 0x00B1, 0x011D, 0x019F, 0x0236, 0x02E0, + 0x039C, 0x0468, 0x0545, 0x0631, 0x072B, 0x0832, 0x0946, 0x0A67 + }; + static const bits16 sqrtEvenAdjustments[] = { + 0x0A2D, 0x08AF, 0x075A, 0x0629, 0x051A, 0x0429, 0x0356, 0x029E, + 0x0200, 0x0179, 0x0109, 0x00AF, 0x0068, 0x0034, 0x0012, 0x0002 + }; + int8 index; + bits32 z; + + index = ( a>>27 ) & 15; + if ( aExp & 1 ) { + z = 0x4000 + ( a>>17 ) - sqrtOddAdjustments[ index ]; + z = ( ( a / z )<<14 ) + ( z<<15 ); + a >>= 1; + } + else { + z = 0x8000 + ( a>>17 ) - sqrtEvenAdjustments[ index ]; + z = a / z + z; + z = ( 0x20000 <= z ) ? 0xFFFF8000 : ( z<<15 ); + if ( z <= a ) return (bits32) ( ( (sbits32) a )>>1 ); + } + return ( (bits32) ( ( ( (bits64) a )<<31 ) / z ) ) + ( z>>1 ); + +} + +/* +------------------------------------------------------------------------------- +Returns the number of leading 0 bits before the most-significant 1 bit +of `a'. If `a' is zero, 32 is returned. +------------------------------------------------------------------------------- +*/ +static int8 countLeadingZeros32( bits32 a ) +{ + static const int8 countLeadingZerosHigh[] = { + 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + int8 shiftCount; + + shiftCount = 0; + if ( a < 0x10000 ) { + shiftCount += 16; + a <<= 16; + } + if ( a < 0x1000000 ) { + shiftCount += 8; + a <<= 8; + } + shiftCount += countLeadingZerosHigh[ a>>24 ]; + return shiftCount; + +} + +/* +------------------------------------------------------------------------------- +Returns the number of leading 0 bits before the most-significant 1 bit +of `a'. If `a' is zero, 64 is returned. +------------------------------------------------------------------------------- +*/ +static int8 countLeadingZeros64( bits64 a ) +{ + int8 shiftCount; + + shiftCount = 0; + if ( a < ( (bits64) 1 )<<32 ) { + shiftCount += 32; + } + else { + a >>= 32; + } + shiftCount += countLeadingZeros32( a ); + return shiftCount; + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' +is equal to the 128-bit value formed by concatenating `b0' and `b1'. +Otherwise, returns 0. +------------------------------------------------------------------------------- +*/ +INLINE flag eq128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 ) +{ + + return ( a0 == b0 ) && ( a1 == b1 ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less +than or equal to the 128-bit value formed by concatenating `b0' and `b1'. +Otherwise, returns 0. +------------------------------------------------------------------------------- +*/ +INLINE flag le128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 ) +{ + + return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 <= b1 ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less +than the 128-bit value formed by concatenating `b0' and `b1'. Otherwise, +returns 0. +------------------------------------------------------------------------------- +*/ +INLINE flag lt128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 ) +{ + + return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 < b1 ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is +not equal to the 128-bit value formed by concatenating `b0' and `b1'. +Otherwise, returns 0. +------------------------------------------------------------------------------- +*/ +INLINE flag ne128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 ) +{ + + return ( a0 != b0 ) || ( a1 != b1 ); + +} + diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/softfloat-specialize linux-2.5.70-bk14/arch/arm26/nwfpe/softfloat-specialize --- linux-2.5.70-bk13/arch/arm26/nwfpe/softfloat-specialize 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/softfloat-specialize 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,366 @@ + +/* +=============================================================================== + +This C source fragment is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ +arithmetic/softfloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these three paragraphs for those parts of +this code that are retained. + +=============================================================================== +*/ + +/* +------------------------------------------------------------------------------- +Underflow tininess-detection mode, statically initialized to default value. +(The declaration in `softfloat.h' must match the `int8' type here.) +------------------------------------------------------------------------------- +*/ +int8 float_detect_tininess = float_tininess_after_rounding; + +/* +------------------------------------------------------------------------------- +Raises the exceptions specified by `flags'. Floating-point traps can be +defined here if desired. It is currently not possible for such a trap to +substitute a result value. If traps are not implemented, this routine +should be simply `float_exception_flags |= flags;'. + +ScottB: November 4, 1998 +Moved this function out of softfloat-specialize into fpmodule.c. +This effectively isolates all the changes required for integrating with the +Linux kernel into fpmodule.c. Porting to NetBSD should only require modifying +fpmodule.c to integrate with the NetBSD kernel (I hope!). +------------------------------------------------------------------------------- +void float_raise( int8 flags ) +{ + float_exception_flags |= flags; +} +*/ + +/* +------------------------------------------------------------------------------- +Internal canonical NaN format. +------------------------------------------------------------------------------- +*/ +typedef struct { + flag sign; + bits64 high, low; +} commonNaNT; + +/* +------------------------------------------------------------------------------- +The pattern for a default generated single-precision NaN. +------------------------------------------------------------------------------- +*/ +#define float32_default_nan 0xFFFFFFFF + +/* +------------------------------------------------------------------------------- +Returns 1 if the single-precision floating-point value `a' is a NaN; +otherwise returns 0. +------------------------------------------------------------------------------- +*/ +flag float32_is_nan( float32 a ) +{ + + return ( 0xFF000000 < (bits32) ( a<<1 ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the single-precision floating-point value `a' is a signaling +NaN; otherwise returns 0. +------------------------------------------------------------------------------- +*/ +flag float32_is_signaling_nan( float32 a ) +{ + + return ( ( ( a>>22 ) & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of converting the single-precision floating-point NaN +`a' to the canonical NaN format. If `a' is a signaling NaN, the invalid +exception is raised. +------------------------------------------------------------------------------- +*/ +static commonNaNT float32ToCommonNaN( float32 a ) +{ + commonNaNT z; + + if ( float32_is_signaling_nan( a ) ) float_raise( float_flag_invalid ); + z.sign = a>>31; + z.low = 0; + z.high = ( (bits64) a )<<41; + return z; + +} + +/* +------------------------------------------------------------------------------- +Returns the result of converting the canonical NaN `a' to the single- +precision floating-point format. +------------------------------------------------------------------------------- +*/ +static float32 commonNaNToFloat32( commonNaNT a ) +{ + + return ( ( (bits32) a.sign )<<31 ) | 0x7FC00000 | ( a.high>>41 ); + +} + +/* +------------------------------------------------------------------------------- +Takes two single-precision floating-point values `a' and `b', one of which +is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a +signaling NaN, the invalid exception is raised. +------------------------------------------------------------------------------- +*/ +static float32 propagateFloat32NaN( float32 a, float32 b ) +{ + flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN; + + aIsNaN = float32_is_nan( a ); + aIsSignalingNaN = float32_is_signaling_nan( a ); + bIsNaN = float32_is_nan( b ); + bIsSignalingNaN = float32_is_signaling_nan( b ); + a |= 0x00400000; + b |= 0x00400000; + if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid ); + if ( aIsNaN ) { + return ( aIsSignalingNaN & bIsNaN ) ? b : a; + } + else { + return b; + } + +} + +/* +------------------------------------------------------------------------------- +The pattern for a default generated double-precision NaN. +------------------------------------------------------------------------------- +*/ +#define float64_default_nan LIT64( 0xFFFFFFFFFFFFFFFF ) + +/* +------------------------------------------------------------------------------- +Returns 1 if the double-precision floating-point value `a' is a NaN; +otherwise returns 0. +------------------------------------------------------------------------------- +*/ +flag float64_is_nan( float64 a ) +{ + + return ( LIT64( 0xFFE0000000000000 ) < (bits64) ( a<<1 ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the double-precision floating-point value `a' is a signaling +NaN; otherwise returns 0. +------------------------------------------------------------------------------- +*/ +flag float64_is_signaling_nan( float64 a ) +{ + + return + ( ( ( a>>51 ) & 0xFFF ) == 0xFFE ) + && ( a & LIT64( 0x0007FFFFFFFFFFFF ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of converting the double-precision floating-point NaN +`a' to the canonical NaN format. If `a' is a signaling NaN, the invalid +exception is raised. +------------------------------------------------------------------------------- +*/ +static commonNaNT float64ToCommonNaN( float64 a ) +{ + commonNaNT z; + + if ( float64_is_signaling_nan( a ) ) float_raise( float_flag_invalid ); + z.sign = a>>63; + z.low = 0; + z.high = a<<12; + return z; + +} + +/* +------------------------------------------------------------------------------- +Returns the result of converting the canonical NaN `a' to the double- +precision floating-point format. +------------------------------------------------------------------------------- +*/ +static float64 commonNaNToFloat64( commonNaNT a ) +{ + + return + ( ( (bits64) a.sign )<<63 ) + | LIT64( 0x7FF8000000000000 ) + | ( a.high>>12 ); + +} + +/* +------------------------------------------------------------------------------- +Takes two double-precision floating-point values `a' and `b', one of which +is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a +signaling NaN, the invalid exception is raised. +------------------------------------------------------------------------------- +*/ +static float64 propagateFloat64NaN( float64 a, float64 b ) +{ + flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN; + + aIsNaN = float64_is_nan( a ); + aIsSignalingNaN = float64_is_signaling_nan( a ); + bIsNaN = float64_is_nan( b ); + bIsSignalingNaN = float64_is_signaling_nan( b ); + a |= LIT64( 0x0008000000000000 ); + b |= LIT64( 0x0008000000000000 ); + if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid ); + if ( aIsNaN ) { + return ( aIsSignalingNaN & bIsNaN ) ? b : a; + } + else { + return b; + } + +} + +#ifdef FLOATX80 + +/* +------------------------------------------------------------------------------- +The pattern for a default generated extended double-precision NaN. The +`high' and `low' values hold the most- and least-significant bits, +respectively. +------------------------------------------------------------------------------- +*/ +#define floatx80_default_nan_high 0xFFFF +#define floatx80_default_nan_low LIT64( 0xFFFFFFFFFFFFFFFF ) + +/* +------------------------------------------------------------------------------- +Returns 1 if the extended double-precision floating-point value `a' is a +NaN; otherwise returns 0. +------------------------------------------------------------------------------- +*/ +flag floatx80_is_nan( floatx80 a ) +{ + + return ( ( a.high & 0x7FFF ) == 0x7FFF ) && (bits64) ( a.low<<1 ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the extended double-precision floating-point value `a' is a +signaling NaN; otherwise returns 0. +------------------------------------------------------------------------------- +*/ +flag floatx80_is_signaling_nan( floatx80 a ) +{ + //register int lr; + bits64 aLow; + + //__asm__("mov %0, lr" : : "g" (lr)); + //fp_printk("floatx80_is_signalling_nan() called from 0x%08x\n",lr); + aLow = a.low & ~ LIT64( 0x4000000000000000 ); + return + ( ( a.high & 0x7FFF ) == 0x7FFF ) + && (bits64) ( aLow<<1 ) + && ( a.low == aLow ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of converting the extended double-precision floating- +point NaN `a' to the canonical NaN format. If `a' is a signaling NaN, the +invalid exception is raised. +------------------------------------------------------------------------------- +*/ +static commonNaNT floatx80ToCommonNaN( floatx80 a ) +{ + commonNaNT z; + + if ( floatx80_is_signaling_nan( a ) ) float_raise( float_flag_invalid ); + z.sign = a.high>>15; + z.low = 0; + z.high = a.low<<1; + return z; + +} + +/* +------------------------------------------------------------------------------- +Returns the result of converting the canonical NaN `a' to the extended +double-precision floating-point format. +------------------------------------------------------------------------------- +*/ +static floatx80 commonNaNToFloatx80( commonNaNT a ) +{ + floatx80 z; + + z.low = LIT64( 0xC000000000000000 ) | ( a.high>>1 ); + z.high = ( ( (bits16) a.sign )<<15 ) | 0x7FFF; + return z; + +} + +/* +------------------------------------------------------------------------------- +Takes two extended double-precision floating-point values `a' and `b', one +of which is a NaN, and returns the appropriate NaN result. If either `a' or +`b' is a signaling NaN, the invalid exception is raised. +------------------------------------------------------------------------------- +*/ +static floatx80 propagateFloatx80NaN( floatx80 a, floatx80 b ) +{ + flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN; + + aIsNaN = floatx80_is_nan( a ); + aIsSignalingNaN = floatx80_is_signaling_nan( a ); + bIsNaN = floatx80_is_nan( b ); + bIsSignalingNaN = floatx80_is_signaling_nan( b ); + a.low |= LIT64( 0xC000000000000000 ); + b.low |= LIT64( 0xC000000000000000 ); + if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid ); + if ( aIsNaN ) { + return ( aIsSignalingNaN & bIsNaN ) ? b : a; + } + else { + return b; + } + +} + +#endif diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/softfloat.c linux-2.5.70-bk14/arch/arm26/nwfpe/softfloat.c --- linux-2.5.70-bk13/arch/arm26/nwfpe/softfloat.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/softfloat.c 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,3439 @@ +/* +=============================================================================== + +This C source file is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ +arithmetic/softfloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these three paragraphs for those parts of +this code that are retained. + +=============================================================================== +*/ + +#include "fpa11.h" +#include "milieu.h" +#include "softfloat.h" + +/* +------------------------------------------------------------------------------- +Floating-point rounding mode, extended double-precision rounding precision, +and exception flags. +------------------------------------------------------------------------------- +*/ +int8 float_rounding_mode = float_round_nearest_even; +int8 floatx80_rounding_precision = 80; +int8 float_exception_flags; + +/* +------------------------------------------------------------------------------- +Primitive arithmetic functions, including multi-word arithmetic, and +division and square root approximations. (Can be specialized to target if +desired.) +------------------------------------------------------------------------------- +*/ +#include "softfloat-macros" + +/* +------------------------------------------------------------------------------- +Functions and definitions to determine: (1) whether tininess for underflow +is detected before or after rounding by default, (2) what (if anything) +happens when exceptions are raised, (3) how signaling NaNs are distinguished +from quiet NaNs, (4) the default generated quiet NaNs, and (5) how NaNs +are propagated from function inputs to output. These details are target- +specific. +------------------------------------------------------------------------------- +*/ +#include "softfloat-specialize" + +/* +------------------------------------------------------------------------------- +Takes a 64-bit fixed-point value `absZ' with binary point between bits 6 +and 7, and returns the properly rounded 32-bit integer corresponding to the +input. If `zSign' is nonzero, the input is negated before being converted +to an integer. Bit 63 of `absZ' must be zero. Ordinarily, the fixed-point +input is simply rounded to an integer, with the inexact exception raised if +the input cannot be represented exactly as an integer. If the fixed-point +input is too large, however, the invalid exception is raised and the largest +positive or negative integer is returned. +------------------------------------------------------------------------------- +*/ +static int32 roundAndPackInt32( flag zSign, bits64 absZ ) +{ + int8 roundingMode; + flag roundNearestEven; + int8 roundIncrement, roundBits; + int32 z; + + roundingMode = float_rounding_mode; + roundNearestEven = ( roundingMode == float_round_nearest_even ); + roundIncrement = 0x40; + if ( ! roundNearestEven ) { + if ( roundingMode == float_round_to_zero ) { + roundIncrement = 0; + } + else { + roundIncrement = 0x7F; + if ( zSign ) { + if ( roundingMode == float_round_up ) roundIncrement = 0; + } + else { + if ( roundingMode == float_round_down ) roundIncrement = 0; + } + } + } + roundBits = absZ & 0x7F; + absZ = ( absZ + roundIncrement )>>7; + absZ &= ~ ( ( ( roundBits ^ 0x40 ) == 0 ) & roundNearestEven ); + z = absZ; + if ( zSign ) z = - z; + if ( ( absZ>>32 ) || ( z && ( ( z < 0 ) ^ zSign ) ) ) { + float_exception_flags |= float_flag_invalid; + return zSign ? 0x80000000 : 0x7FFFFFFF; + } + if ( roundBits ) float_exception_flags |= float_flag_inexact; + return z; + +} + +/* +------------------------------------------------------------------------------- +Returns the fraction bits of the single-precision floating-point value `a'. +------------------------------------------------------------------------------- +*/ +INLINE bits32 extractFloat32Frac( float32 a ) +{ + + return a & 0x007FFFFF; + +} + +/* +------------------------------------------------------------------------------- +Returns the exponent bits of the single-precision floating-point value `a'. +------------------------------------------------------------------------------- +*/ +INLINE int16 extractFloat32Exp( float32 a ) +{ + + return ( a>>23 ) & 0xFF; + +} + +/* +------------------------------------------------------------------------------- +Returns the sign bit of the single-precision floating-point value `a'. +------------------------------------------------------------------------------- +*/ +INLINE flag extractFloat32Sign( float32 a ) +{ + + return a>>31; + +} + +/* +------------------------------------------------------------------------------- +Normalizes the subnormal single-precision floating-point value represented +by the denormalized significand `aSig'. The normalized exponent and +significand are stored at the locations pointed to by `zExpPtr' and +`zSigPtr', respectively. +------------------------------------------------------------------------------- +*/ +static void + normalizeFloat32Subnormal( bits32 aSig, int16 *zExpPtr, bits32 *zSigPtr ) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros32( aSig ) - 8; + *zSigPtr = aSig<>7; + zSig &= ~ ( ( ( roundBits ^ 0x40 ) == 0 ) & roundNearestEven ); + if ( zSig == 0 ) zExp = 0; + return packFloat32( zSign, zExp, zSig ); + +} + +/* +------------------------------------------------------------------------------- +Takes an abstract floating-point value having sign `zSign', exponent `zExp', +and significand `zSig', and returns the proper single-precision floating- +point value corresponding to the abstract input. This routine is just like +`roundAndPackFloat32' except that `zSig' does not have to be normalized in +any way. In all cases, `zExp' must be 1 less than the ``true'' floating- +point exponent. +------------------------------------------------------------------------------- +*/ +static float32 + normalizeRoundAndPackFloat32( flag zSign, int16 zExp, bits32 zSig ) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros32( zSig ) - 1; + return roundAndPackFloat32( zSign, zExp - shiftCount, zSig<>52 ) & 0x7FF; + +} + +/* +------------------------------------------------------------------------------- +Returns the sign bit of the double-precision floating-point value `a'. +------------------------------------------------------------------------------- +*/ +INLINE flag extractFloat64Sign( float64 a ) +{ + + return a>>63; + +} + +/* +------------------------------------------------------------------------------- +Normalizes the subnormal double-precision floating-point value represented +by the denormalized significand `aSig'. The normalized exponent and +significand are stored at the locations pointed to by `zExpPtr' and +`zSigPtr', respectively. +------------------------------------------------------------------------------- +*/ +static void + normalizeFloat64Subnormal( bits64 aSig, int16 *zExpPtr, bits64 *zSigPtr ) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros64( aSig ) - 11; + *zSigPtr = aSig<>10; + zSig &= ~ ( ( ( roundBits ^ 0x200 ) == 0 ) & roundNearestEven ); + if ( zSig == 0 ) zExp = 0; + return packFloat64( zSign, zExp, zSig ); + +} + +/* +------------------------------------------------------------------------------- +Takes an abstract floating-point value having sign `zSign', exponent `zExp', +and significand `zSig', and returns the proper double-precision floating- +point value corresponding to the abstract input. This routine is just like +`roundAndPackFloat64' except that `zSig' does not have to be normalized in +any way. In all cases, `zExp' must be 1 less than the ``true'' floating- +point exponent. +------------------------------------------------------------------------------- +*/ +static float64 + normalizeRoundAndPackFloat64( flag zSign, int16 zExp, bits64 zSig ) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros64( zSig ) - 1; + return roundAndPackFloat64( zSign, zExp - shiftCount, zSig<>15; + +} + +/* +------------------------------------------------------------------------------- +Normalizes the subnormal extended double-precision floating-point value +represented by the denormalized significand `aSig'. The normalized exponent +and significand are stored at the locations pointed to by `zExpPtr' and +`zSigPtr', respectively. +------------------------------------------------------------------------------- +*/ +static void + normalizeFloatx80Subnormal( bits64 aSig, int32 *zExpPtr, bits64 *zSigPtr ) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros64( aSig ); + *zSigPtr = aSig<>( - shiftCount ); + if ( (bits32) ( aSig<<( shiftCount & 31 ) ) ) { + float_exception_flags |= float_flag_inexact; + } + return aSign ? - z : z; + +} + +/* +------------------------------------------------------------------------------- +Returns the result of converting the single-precision floating-point value +`a' to the double-precision floating-point format. The conversion is +performed according to the IEC/IEEE Standard for Binary Floating-point +Arithmetic. +------------------------------------------------------------------------------- +*/ +float64 float32_to_float64( float32 a ) +{ + flag aSign; + int16 aExp; + bits32 aSig; + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + if ( aExp == 0xFF ) { + if ( aSig ) return commonNaNToFloat64( float32ToCommonNaN( a ) ); + return packFloat64( aSign, 0x7FF, 0 ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat64( aSign, 0, 0 ); + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + --aExp; + } + return packFloat64( aSign, aExp + 0x380, ( (bits64) aSig )<<29 ); + +} + +#ifdef FLOATX80 + +/* +------------------------------------------------------------------------------- +Returns the result of converting the single-precision floating-point value +`a' to the extended double-precision floating-point format. The conversion +is performed according to the IEC/IEEE Standard for Binary Floating-point +Arithmetic. +------------------------------------------------------------------------------- +*/ +floatx80 float32_to_floatx80( float32 a ) +{ + flag aSign; + int16 aExp; + bits32 aSig; + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + if ( aExp == 0xFF ) { + if ( aSig ) return commonNaNToFloatx80( float32ToCommonNaN( a ) ); + return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 ); + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + } + aSig |= 0x00800000; + return packFloatx80( aSign, aExp + 0x3F80, ( (bits64) aSig )<<40 ); + +} + +#endif + +/* +------------------------------------------------------------------------------- +Rounds the single-precision floating-point value `a' to an integer, and +returns the result as a single-precision floating-point value. The +operation is performed according to the IEC/IEEE Standard for Binary +Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +float32 float32_round_to_int( float32 a ) +{ + flag aSign; + int16 aExp; + bits32 lastBitMask, roundBitsMask; + int8 roundingMode; + float32 z; + + aExp = extractFloat32Exp( a ); + if ( 0x96 <= aExp ) { + if ( ( aExp == 0xFF ) && extractFloat32Frac( a ) ) { + return propagateFloat32NaN( a, a ); + } + return a; + } + if ( aExp <= 0x7E ) { + if ( (bits32) ( a<<1 ) == 0 ) return a; + float_exception_flags |= float_flag_inexact; + aSign = extractFloat32Sign( a ); + switch ( float_rounding_mode ) { + case float_round_nearest_even: + if ( ( aExp == 0x7E ) && extractFloat32Frac( a ) ) { + return packFloat32( aSign, 0x7F, 0 ); + } + break; + case float_round_down: + return aSign ? 0xBF800000 : 0; + case float_round_up: + return aSign ? 0x80000000 : 0x3F800000; + } + return packFloat32( aSign, 0, 0 ); + } + lastBitMask = 1; + lastBitMask <<= 0x96 - aExp; + roundBitsMask = lastBitMask - 1; + z = a; + roundingMode = float_rounding_mode; + if ( roundingMode == float_round_nearest_even ) { + z += lastBitMask>>1; + if ( ( z & roundBitsMask ) == 0 ) z &= ~ lastBitMask; + } + else if ( roundingMode != float_round_to_zero ) { + if ( extractFloat32Sign( z ) ^ ( roundingMode == float_round_up ) ) { + z += roundBitsMask; + } + } + z &= ~ roundBitsMask; + if ( z != a ) float_exception_flags |= float_flag_inexact; + return z; + +} + +/* +------------------------------------------------------------------------------- +Returns the result of adding the absolute values of the single-precision +floating-point values `a' and `b'. If `zSign' is true, the sum is negated +before being returned. `zSign' is ignored if the result is a NaN. The +addition is performed according to the IEC/IEEE Standard for Binary +Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +static float32 addFloat32Sigs( float32 a, float32 b, flag zSign ) +{ + int16 aExp, bExp, zExp; + bits32 aSig, bSig, zSig; + int16 expDiff; + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + bSig = extractFloat32Frac( b ); + bExp = extractFloat32Exp( b ); + expDiff = aExp - bExp; + aSig <<= 6; + bSig <<= 6; + if ( 0 < expDiff ) { + if ( aExp == 0xFF ) { + if ( aSig ) return propagateFloat32NaN( a, b ); + return a; + } + if ( bExp == 0 ) { + --expDiff; + } + else { + bSig |= 0x20000000; + } + shift32RightJamming( bSig, expDiff, &bSig ); + zExp = aExp; + } + else if ( expDiff < 0 ) { + if ( bExp == 0xFF ) { + if ( bSig ) return propagateFloat32NaN( a, b ); + return packFloat32( zSign, 0xFF, 0 ); + } + if ( aExp == 0 ) { + ++expDiff; + } + else { + aSig |= 0x20000000; + } + shift32RightJamming( aSig, - expDiff, &aSig ); + zExp = bExp; + } + else { + if ( aExp == 0xFF ) { + if ( aSig | bSig ) return propagateFloat32NaN( a, b ); + return a; + } + if ( aExp == 0 ) return packFloat32( zSign, 0, ( aSig + bSig )>>6 ); + zSig = 0x40000000 + aSig + bSig; + zExp = aExp; + goto roundAndPack; + } + aSig |= 0x20000000; + zSig = ( aSig + bSig )<<1; + --zExp; + if ( (sbits32) zSig < 0 ) { + zSig = aSig + bSig; + ++zExp; + } + roundAndPack: + return roundAndPackFloat32( zSign, zExp, zSig ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of subtracting the absolute values of the single- +precision floating-point values `a' and `b'. If `zSign' is true, the +difference is negated before being returned. `zSign' is ignored if the +result is a NaN. The subtraction is performed according to the IEC/IEEE +Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +static float32 subFloat32Sigs( float32 a, float32 b, flag zSign ) +{ + int16 aExp, bExp, zExp; + bits32 aSig, bSig, zSig; + int16 expDiff; + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + bSig = extractFloat32Frac( b ); + bExp = extractFloat32Exp( b ); + expDiff = aExp - bExp; + aSig <<= 7; + bSig <<= 7; + if ( 0 < expDiff ) goto aExpBigger; + if ( expDiff < 0 ) goto bExpBigger; + if ( aExp == 0xFF ) { + if ( aSig | bSig ) return propagateFloat32NaN( a, b ); + float_raise( float_flag_invalid ); + return float32_default_nan; + } + if ( aExp == 0 ) { + aExp = 1; + bExp = 1; + } + if ( bSig < aSig ) goto aBigger; + if ( aSig < bSig ) goto bBigger; + return packFloat32( float_rounding_mode == float_round_down, 0, 0 ); + bExpBigger: + if ( bExp == 0xFF ) { + if ( bSig ) return propagateFloat32NaN( a, b ); + return packFloat32( zSign ^ 1, 0xFF, 0 ); + } + if ( aExp == 0 ) { + ++expDiff; + } + else { + aSig |= 0x40000000; + } + shift32RightJamming( aSig, - expDiff, &aSig ); + bSig |= 0x40000000; + bBigger: + zSig = bSig - aSig; + zExp = bExp; + zSign ^= 1; + goto normalizeRoundAndPack; + aExpBigger: + if ( aExp == 0xFF ) { + if ( aSig ) return propagateFloat32NaN( a, b ); + return a; + } + if ( bExp == 0 ) { + --expDiff; + } + else { + bSig |= 0x40000000; + } + shift32RightJamming( bSig, expDiff, &bSig ); + aSig |= 0x40000000; + aBigger: + zSig = aSig - bSig; + zExp = aExp; + normalizeRoundAndPack: + --zExp; + return normalizeRoundAndPackFloat32( zSign, zExp, zSig ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of adding the single-precision floating-point values `a' +and `b'. The operation is performed according to the IEC/IEEE Standard for +Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +float32 float32_add( float32 a, float32 b ) +{ + flag aSign, bSign; + + aSign = extractFloat32Sign( a ); + bSign = extractFloat32Sign( b ); + if ( aSign == bSign ) { + return addFloat32Sigs( a, b, aSign ); + } + else { + return subFloat32Sigs( a, b, aSign ); + } + +} + +/* +------------------------------------------------------------------------------- +Returns the result of subtracting the single-precision floating-point values +`a' and `b'. The operation is performed according to the IEC/IEEE Standard +for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +float32 float32_sub( float32 a, float32 b ) +{ + flag aSign, bSign; + + aSign = extractFloat32Sign( a ); + bSign = extractFloat32Sign( b ); + if ( aSign == bSign ) { + return subFloat32Sigs( a, b, aSign ); + } + else { + return addFloat32Sigs( a, b, aSign ); + } + +} + +/* +------------------------------------------------------------------------------- +Returns the result of multiplying the single-precision floating-point values +`a' and `b'. The operation is performed according to the IEC/IEEE Standard +for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +float32 float32_mul( float32 a, float32 b ) +{ + flag aSign, bSign, zSign; + int16 aExp, bExp, zExp; + bits32 aSig, bSig; + bits64 zSig64; + bits32 zSig; + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + bSig = extractFloat32Frac( b ); + bExp = extractFloat32Exp( b ); + bSign = extractFloat32Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0xFF ) { + if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) { + return propagateFloat32NaN( a, b ); + } + if ( ( bExp | bSig ) == 0 ) { + float_raise( float_flag_invalid ); + return float32_default_nan; + } + return packFloat32( zSign, 0xFF, 0 ); + } + if ( bExp == 0xFF ) { + if ( bSig ) return propagateFloat32NaN( a, b ); + if ( ( aExp | aSig ) == 0 ) { + float_raise( float_flag_invalid ); + return float32_default_nan; + } + return packFloat32( zSign, 0xFF, 0 ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat32( zSign, 0, 0 ); + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + } + if ( bExp == 0 ) { + if ( bSig == 0 ) return packFloat32( zSign, 0, 0 ); + normalizeFloat32Subnormal( bSig, &bExp, &bSig ); + } + zExp = aExp + bExp - 0x7F; + aSig = ( aSig | 0x00800000 )<<7; + bSig = ( bSig | 0x00800000 )<<8; + shift64RightJamming( ( (bits64) aSig ) * bSig, 32, &zSig64 ); + zSig = zSig64; + if ( 0 <= (sbits32) ( zSig<<1 ) ) { + zSig <<= 1; + --zExp; + } + return roundAndPackFloat32( zSign, zExp, zSig ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of dividing the single-precision floating-point value `a' +by the corresponding value `b'. The operation is performed according to the +IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +float32 float32_div( float32 a, float32 b ) +{ + flag aSign, bSign, zSign; + int16 aExp, bExp, zExp; + bits32 aSig, bSig, zSig; + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + bSig = extractFloat32Frac( b ); + bExp = extractFloat32Exp( b ); + bSign = extractFloat32Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0xFF ) { + if ( aSig ) return propagateFloat32NaN( a, b ); + if ( bExp == 0xFF ) { + if ( bSig ) return propagateFloat32NaN( a, b ); + float_raise( float_flag_invalid ); + return float32_default_nan; + } + return packFloat32( zSign, 0xFF, 0 ); + } + if ( bExp == 0xFF ) { + if ( bSig ) return propagateFloat32NaN( a, b ); + return packFloat32( zSign, 0, 0 ); + } + if ( bExp == 0 ) { + if ( bSig == 0 ) { + if ( ( aExp | aSig ) == 0 ) { + float_raise( float_flag_invalid ); + return float32_default_nan; + } + float_raise( float_flag_divbyzero ); + return packFloat32( zSign, 0xFF, 0 ); + } + normalizeFloat32Subnormal( bSig, &bExp, &bSig ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat32( zSign, 0, 0 ); + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + } + zExp = aExp - bExp + 0x7D; + aSig = ( aSig | 0x00800000 )<<7; + bSig = ( bSig | 0x00800000 )<<8; + if ( bSig <= ( aSig + aSig ) ) { + aSig >>= 1; + ++zExp; + } + zSig = ( ( (bits64) aSig )<<32 ) / bSig; + if ( ( zSig & 0x3F ) == 0 ) { + zSig |= ( ( (bits64) bSig ) * zSig != ( (bits64) aSig )<<32 ); + } + return roundAndPackFloat32( zSign, zExp, zSig ); + +} + +/* +------------------------------------------------------------------------------- +Returns the remainder of the single-precision floating-point value `a' +with respect to the corresponding value `b'. The operation is performed +according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +float32 float32_rem( float32 a, float32 b ) +{ + flag aSign, bSign, zSign; + int16 aExp, bExp, expDiff; + bits32 aSig, bSig; + bits32 q; + bits64 aSig64, bSig64, q64; + bits32 alternateASig; + sbits32 sigMean; + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + bSig = extractFloat32Frac( b ); + bExp = extractFloat32Exp( b ); + bSign = extractFloat32Sign( b ); + if ( aExp == 0xFF ) { + if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) { + return propagateFloat32NaN( a, b ); + } + float_raise( float_flag_invalid ); + return float32_default_nan; + } + if ( bExp == 0xFF ) { + if ( bSig ) return propagateFloat32NaN( a, b ); + return a; + } + if ( bExp == 0 ) { + if ( bSig == 0 ) { + float_raise( float_flag_invalid ); + return float32_default_nan; + } + normalizeFloat32Subnormal( bSig, &bExp, &bSig ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return a; + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + } + expDiff = aExp - bExp; + aSig |= 0x00800000; + bSig |= 0x00800000; + if ( expDiff < 32 ) { + aSig <<= 8; + bSig <<= 8; + if ( expDiff < 0 ) { + if ( expDiff < -1 ) return a; + aSig >>= 1; + } + q = ( bSig <= aSig ); + if ( q ) aSig -= bSig; + if ( 0 < expDiff ) { + q = ( ( (bits64) aSig )<<32 ) / bSig; + q >>= 32 - expDiff; + bSig >>= 2; + aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q; + } + else { + aSig >>= 2; + bSig >>= 2; + } + } + else { + if ( bSig <= aSig ) aSig -= bSig; + aSig64 = ( (bits64) aSig )<<40; + bSig64 = ( (bits64) bSig )<<40; + expDiff -= 64; + while ( 0 < expDiff ) { + q64 = estimateDiv128To64( aSig64, 0, bSig64 ); + q64 = ( 2 < q64 ) ? q64 - 2 : 0; + aSig64 = - ( ( bSig * q64 )<<38 ); + expDiff -= 62; + } + expDiff += 64; + q64 = estimateDiv128To64( aSig64, 0, bSig64 ); + q64 = ( 2 < q64 ) ? q64 - 2 : 0; + q = q64>>( 64 - expDiff ); + bSig <<= 6; + aSig = ( ( aSig64>>33 )<<( expDiff - 1 ) ) - bSig * q; + } + do { + alternateASig = aSig; + ++q; + aSig -= bSig; + } while ( 0 <= (sbits32) aSig ); + sigMean = aSig + alternateASig; + if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) { + aSig = alternateASig; + } + zSign = ( (sbits32) aSig < 0 ); + if ( zSign ) aSig = - aSig; + return normalizeRoundAndPackFloat32( aSign ^ zSign, bExp, aSig ); + +} + +/* +------------------------------------------------------------------------------- +Returns the square root of the single-precision floating-point value `a'. +The operation is performed according to the IEC/IEEE Standard for Binary +Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +float32 float32_sqrt( float32 a ) +{ + flag aSign; + int16 aExp, zExp; + bits32 aSig, zSig; + bits64 rem, term; + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + if ( aExp == 0xFF ) { + if ( aSig ) return propagateFloat32NaN( a, 0 ); + if ( ! aSign ) return a; + float_raise( float_flag_invalid ); + return float32_default_nan; + } + if ( aSign ) { + if ( ( aExp | aSig ) == 0 ) return a; + float_raise( float_flag_invalid ); + return float32_default_nan; + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return 0; + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + } + zExp = ( ( aExp - 0x7F )>>1 ) + 0x7E; + aSig = ( aSig | 0x00800000 )<<8; + zSig = estimateSqrt32( aExp, aSig ) + 2; + if ( ( zSig & 0x7F ) <= 5 ) { + if ( zSig < 2 ) { + zSig = 0xFFFFFFFF; + } + else { + aSig >>= aExp & 1; + term = ( (bits64) zSig ) * zSig; + rem = ( ( (bits64) aSig )<<32 ) - term; + while ( (sbits64) rem < 0 ) { + --zSig; + rem += ( ( (bits64) zSig )<<1 ) | 1; + } + zSig |= ( rem != 0 ); + } + } + shift32RightJamming( zSig, 1, &zSig ); + return roundAndPackFloat32( 0, zExp, zSig ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the single-precision floating-point value `a' is equal to the +corresponding value `b', and 0 otherwise. The comparison is performed +according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag float32_eq( float32 a, float32 b ) +{ + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid ); + } + return 0; + } + return ( a == b ) || ( (bits32) ( ( a | b )<<1 ) == 0 ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the single-precision floating-point value `a' is less than or +equal to the corresponding value `b', and 0 otherwise. The comparison is +performed according to the IEC/IEEE Standard for Binary Floating-point +Arithmetic. +------------------------------------------------------------------------------- +*/ +flag float32_le( float32 a, float32 b ) +{ + flag aSign, bSign; + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + float_raise( float_flag_invalid ); + return 0; + } + aSign = extractFloat32Sign( a ); + bSign = extractFloat32Sign( b ); + if ( aSign != bSign ) return aSign || ( (bits32) ( ( a | b )<<1 ) == 0 ); + return ( a == b ) || ( aSign ^ ( a < b ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the single-precision floating-point value `a' is less than +the corresponding value `b', and 0 otherwise. The comparison is performed +according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag float32_lt( float32 a, float32 b ) +{ + flag aSign, bSign; + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + float_raise( float_flag_invalid ); + return 0; + } + aSign = extractFloat32Sign( a ); + bSign = extractFloat32Sign( b ); + if ( aSign != bSign ) return aSign && ( (bits32) ( ( a | b )<<1 ) != 0 ); + return ( a != b ) && ( aSign ^ ( a < b ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the single-precision floating-point value `a' is equal to the +corresponding value `b', and 0 otherwise. The invalid exception is raised +if either operand is a NaN. Otherwise, the comparison is performed +according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag float32_eq_signaling( float32 a, float32 b ) +{ + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + float_raise( float_flag_invalid ); + return 0; + } + return ( a == b ) || ( (bits32) ( ( a | b )<<1 ) == 0 ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the single-precision floating-point value `a' is less than or +equal to the corresponding value `b', and 0 otherwise. Quiet NaNs do not +cause an exception. Otherwise, the comparison is performed according to the +IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag float32_le_quiet( float32 a, float32 b ) +{ + flag aSign, bSign; + //int16 aExp, bExp; + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid ); + } + return 0; + } + aSign = extractFloat32Sign( a ); + bSign = extractFloat32Sign( b ); + if ( aSign != bSign ) return aSign || ( (bits32) ( ( a | b )<<1 ) == 0 ); + return ( a == b ) || ( aSign ^ ( a < b ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the single-precision floating-point value `a' is less than +the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an +exception. Otherwise, the comparison is performed according to the IEC/IEEE +Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag float32_lt_quiet( float32 a, float32 b ) +{ + flag aSign, bSign; + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid ); + } + return 0; + } + aSign = extractFloat32Sign( a ); + bSign = extractFloat32Sign( b ); + if ( aSign != bSign ) return aSign && ( (bits32) ( ( a | b )<<1 ) != 0 ); + return ( a != b ) && ( aSign ^ ( a < b ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of converting the double-precision floating-point value +`a' to the 32-bit two's complement integer format. The conversion is +performed according to the IEC/IEEE Standard for Binary Floating-point +Arithmetic---which means in particular that the conversion is rounded +according to the current rounding mode. If `a' is a NaN, the largest +positive integer is returned. Otherwise, if the conversion overflows, the +largest integer with the same sign as `a' is returned. +------------------------------------------------------------------------------- +*/ +int32 float64_to_int32( float64 a ) +{ + flag aSign; + int16 aExp, shiftCount; + bits64 aSig; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + if ( ( aExp == 0x7FF ) && aSig ) aSign = 0; + if ( aExp ) aSig |= LIT64( 0x0010000000000000 ); + shiftCount = 0x42C - aExp; + if ( 0 < shiftCount ) shift64RightJamming( aSig, shiftCount, &aSig ); + return roundAndPackInt32( aSign, aSig ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of converting the double-precision floating-point value +`a' to the 32-bit two's complement integer format. The conversion is +performed according to the IEC/IEEE Standard for Binary Floating-point +Arithmetic, except that the conversion is always rounded toward zero. If +`a' is a NaN, the largest positive integer is returned. Otherwise, if the +conversion overflows, the largest integer with the same sign as `a' is +returned. +------------------------------------------------------------------------------- +*/ +int32 float64_to_int32_round_to_zero( float64 a ) +{ + flag aSign; + int16 aExp, shiftCount; + bits64 aSig, savedASig; + int32 z; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + shiftCount = 0x433 - aExp; + if ( shiftCount < 21 ) { + if ( ( aExp == 0x7FF ) && aSig ) aSign = 0; + goto invalid; + } + else if ( 52 < shiftCount ) { + if ( aExp || aSig ) float_exception_flags |= float_flag_inexact; + return 0; + } + aSig |= LIT64( 0x0010000000000000 ); + savedASig = aSig; + aSig >>= shiftCount; + z = aSig; + if ( aSign ) z = - z; + if ( ( z < 0 ) ^ aSign ) { + invalid: + float_exception_flags |= float_flag_invalid; + return aSign ? 0x80000000 : 0x7FFFFFFF; + } + if ( ( aSig<>= shiftCount; + z = aSig; + if ( aSign ) z = - z; + if ( ( z < 0 ) ^ aSign ) { + invalid: + float_exception_flags |= float_flag_invalid; + return aSign ? 0x80000000 : 0x7FFFFFFF; + } + if ( ( aSig<>1; + if ( ( z & roundBitsMask ) == 0 ) z &= ~ lastBitMask; + } + else if ( roundingMode != float_round_to_zero ) { + if ( extractFloat64Sign( z ) ^ ( roundingMode == float_round_up ) ) { + z += roundBitsMask; + } + } + z &= ~ roundBitsMask; + if ( z != a ) float_exception_flags |= float_flag_inexact; + return z; + +} + +/* +------------------------------------------------------------------------------- +Returns the result of adding the absolute values of the double-precision +floating-point values `a' and `b'. If `zSign' is true, the sum is negated +before being returned. `zSign' is ignored if the result is a NaN. The +addition is performed according to the IEC/IEEE Standard for Binary +Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +static float64 addFloat64Sigs( float64 a, float64 b, flag zSign ) +{ + int16 aExp, bExp, zExp; + bits64 aSig, bSig, zSig; + int16 expDiff; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + bSig = extractFloat64Frac( b ); + bExp = extractFloat64Exp( b ); + expDiff = aExp - bExp; + aSig <<= 9; + bSig <<= 9; + if ( 0 < expDiff ) { + if ( aExp == 0x7FF ) { + if ( aSig ) return propagateFloat64NaN( a, b ); + return a; + } + if ( bExp == 0 ) { + --expDiff; + } + else { + bSig |= LIT64( 0x2000000000000000 ); + } + shift64RightJamming( bSig, expDiff, &bSig ); + zExp = aExp; + } + else if ( expDiff < 0 ) { + if ( bExp == 0x7FF ) { + if ( bSig ) return propagateFloat64NaN( a, b ); + return packFloat64( zSign, 0x7FF, 0 ); + } + if ( aExp == 0 ) { + ++expDiff; + } + else { + aSig |= LIT64( 0x2000000000000000 ); + } + shift64RightJamming( aSig, - expDiff, &aSig ); + zExp = bExp; + } + else { + if ( aExp == 0x7FF ) { + if ( aSig | bSig ) return propagateFloat64NaN( a, b ); + return a; + } + if ( aExp == 0 ) return packFloat64( zSign, 0, ( aSig + bSig )>>9 ); + zSig = LIT64( 0x4000000000000000 ) + aSig + bSig; + zExp = aExp; + goto roundAndPack; + } + aSig |= LIT64( 0x2000000000000000 ); + zSig = ( aSig + bSig )<<1; + --zExp; + if ( (sbits64) zSig < 0 ) { + zSig = aSig + bSig; + ++zExp; + } + roundAndPack: + return roundAndPackFloat64( zSign, zExp, zSig ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of subtracting the absolute values of the double- +precision floating-point values `a' and `b'. If `zSign' is true, the +difference is negated before being returned. `zSign' is ignored if the +result is a NaN. The subtraction is performed according to the IEC/IEEE +Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +static float64 subFloat64Sigs( float64 a, float64 b, flag zSign ) +{ + int16 aExp, bExp, zExp; + bits64 aSig, bSig, zSig; + int16 expDiff; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + bSig = extractFloat64Frac( b ); + bExp = extractFloat64Exp( b ); + expDiff = aExp - bExp; + aSig <<= 10; + bSig <<= 10; + if ( 0 < expDiff ) goto aExpBigger; + if ( expDiff < 0 ) goto bExpBigger; + if ( aExp == 0x7FF ) { + if ( aSig | bSig ) return propagateFloat64NaN( a, b ); + float_raise( float_flag_invalid ); + return float64_default_nan; + } + if ( aExp == 0 ) { + aExp = 1; + bExp = 1; + } + if ( bSig < aSig ) goto aBigger; + if ( aSig < bSig ) goto bBigger; + return packFloat64( float_rounding_mode == float_round_down, 0, 0 ); + bExpBigger: + if ( bExp == 0x7FF ) { + if ( bSig ) return propagateFloat64NaN( a, b ); + return packFloat64( zSign ^ 1, 0x7FF, 0 ); + } + if ( aExp == 0 ) { + ++expDiff; + } + else { + aSig |= LIT64( 0x4000000000000000 ); + } + shift64RightJamming( aSig, - expDiff, &aSig ); + bSig |= LIT64( 0x4000000000000000 ); + bBigger: + zSig = bSig - aSig; + zExp = bExp; + zSign ^= 1; + goto normalizeRoundAndPack; + aExpBigger: + if ( aExp == 0x7FF ) { + if ( aSig ) return propagateFloat64NaN( a, b ); + return a; + } + if ( bExp == 0 ) { + --expDiff; + } + else { + bSig |= LIT64( 0x4000000000000000 ); + } + shift64RightJamming( bSig, expDiff, &bSig ); + aSig |= LIT64( 0x4000000000000000 ); + aBigger: + zSig = aSig - bSig; + zExp = aExp; + normalizeRoundAndPack: + --zExp; + return normalizeRoundAndPackFloat64( zSign, zExp, zSig ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of adding the double-precision floating-point values `a' +and `b'. The operation is performed according to the IEC/IEEE Standard for +Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +float64 float64_add( float64 a, float64 b ) +{ + flag aSign, bSign; + + aSign = extractFloat64Sign( a ); + bSign = extractFloat64Sign( b ); + if ( aSign == bSign ) { + return addFloat64Sigs( a, b, aSign ); + } + else { + return subFloat64Sigs( a, b, aSign ); + } + +} + +/* +------------------------------------------------------------------------------- +Returns the result of subtracting the double-precision floating-point values +`a' and `b'. The operation is performed according to the IEC/IEEE Standard +for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +float64 float64_sub( float64 a, float64 b ) +{ + flag aSign, bSign; + + aSign = extractFloat64Sign( a ); + bSign = extractFloat64Sign( b ); + if ( aSign == bSign ) { + return subFloat64Sigs( a, b, aSign ); + } + else { + return addFloat64Sigs( a, b, aSign ); + } + +} + +/* +------------------------------------------------------------------------------- +Returns the result of multiplying the double-precision floating-point values +`a' and `b'. The operation is performed according to the IEC/IEEE Standard +for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +float64 float64_mul( float64 a, float64 b ) +{ + flag aSign, bSign, zSign; + int16 aExp, bExp, zExp; + bits64 aSig, bSig, zSig0, zSig1; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + bSig = extractFloat64Frac( b ); + bExp = extractFloat64Exp( b ); + bSign = extractFloat64Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0x7FF ) { + if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) { + return propagateFloat64NaN( a, b ); + } + if ( ( bExp | bSig ) == 0 ) { + float_raise( float_flag_invalid ); + return float64_default_nan; + } + return packFloat64( zSign, 0x7FF, 0 ); + } + if ( bExp == 0x7FF ) { + if ( bSig ) return propagateFloat64NaN( a, b ); + if ( ( aExp | aSig ) == 0 ) { + float_raise( float_flag_invalid ); + return float64_default_nan; + } + return packFloat64( zSign, 0x7FF, 0 ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat64( zSign, 0, 0 ); + normalizeFloat64Subnormal( aSig, &aExp, &aSig ); + } + if ( bExp == 0 ) { + if ( bSig == 0 ) return packFloat64( zSign, 0, 0 ); + normalizeFloat64Subnormal( bSig, &bExp, &bSig ); + } + zExp = aExp + bExp - 0x3FF; + aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<10; + bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11; + mul64To128( aSig, bSig, &zSig0, &zSig1 ); + zSig0 |= ( zSig1 != 0 ); + if ( 0 <= (sbits64) ( zSig0<<1 ) ) { + zSig0 <<= 1; + --zExp; + } + return roundAndPackFloat64( zSign, zExp, zSig0 ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of dividing the double-precision floating-point value `a' +by the corresponding value `b'. The operation is performed according to +the IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +float64 float64_div( float64 a, float64 b ) +{ + flag aSign, bSign, zSign; + int16 aExp, bExp, zExp; + bits64 aSig, bSig, zSig; + bits64 rem0, rem1; + bits64 term0, term1; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + bSig = extractFloat64Frac( b ); + bExp = extractFloat64Exp( b ); + bSign = extractFloat64Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0x7FF ) { + if ( aSig ) return propagateFloat64NaN( a, b ); + if ( bExp == 0x7FF ) { + if ( bSig ) return propagateFloat64NaN( a, b ); + float_raise( float_flag_invalid ); + return float64_default_nan; + } + return packFloat64( zSign, 0x7FF, 0 ); + } + if ( bExp == 0x7FF ) { + if ( bSig ) return propagateFloat64NaN( a, b ); + return packFloat64( zSign, 0, 0 ); + } + if ( bExp == 0 ) { + if ( bSig == 0 ) { + if ( ( aExp | aSig ) == 0 ) { + float_raise( float_flag_invalid ); + return float64_default_nan; + } + float_raise( float_flag_divbyzero ); + return packFloat64( zSign, 0x7FF, 0 ); + } + normalizeFloat64Subnormal( bSig, &bExp, &bSig ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat64( zSign, 0, 0 ); + normalizeFloat64Subnormal( aSig, &aExp, &aSig ); + } + zExp = aExp - bExp + 0x3FD; + aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<10; + bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11; + if ( bSig <= ( aSig + aSig ) ) { + aSig >>= 1; + ++zExp; + } + zSig = estimateDiv128To64( aSig, 0, bSig ); + if ( ( zSig & 0x1FF ) <= 2 ) { + mul64To128( bSig, zSig, &term0, &term1 ); + sub128( aSig, 0, term0, term1, &rem0, &rem1 ); + while ( (sbits64) rem0 < 0 ) { + --zSig; + add128( rem0, rem1, 0, bSig, &rem0, &rem1 ); + } + zSig |= ( rem1 != 0 ); + } + return roundAndPackFloat64( zSign, zExp, zSig ); + +} + +/* +------------------------------------------------------------------------------- +Returns the remainder of the double-precision floating-point value `a' +with respect to the corresponding value `b'. The operation is performed +according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +float64 float64_rem( float64 a, float64 b ) +{ + flag aSign, bSign, zSign; + int16 aExp, bExp, expDiff; + bits64 aSig, bSig; + bits64 q, alternateASig; + sbits64 sigMean; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + bSig = extractFloat64Frac( b ); + bExp = extractFloat64Exp( b ); + bSign = extractFloat64Sign( b ); + if ( aExp == 0x7FF ) { + if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) { + return propagateFloat64NaN( a, b ); + } + float_raise( float_flag_invalid ); + return float64_default_nan; + } + if ( bExp == 0x7FF ) { + if ( bSig ) return propagateFloat64NaN( a, b ); + return a; + } + if ( bExp == 0 ) { + if ( bSig == 0 ) { + float_raise( float_flag_invalid ); + return float64_default_nan; + } + normalizeFloat64Subnormal( bSig, &bExp, &bSig ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return a; + normalizeFloat64Subnormal( aSig, &aExp, &aSig ); + } + expDiff = aExp - bExp; + aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<11; + bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11; + if ( expDiff < 0 ) { + if ( expDiff < -1 ) return a; + aSig >>= 1; + } + q = ( bSig <= aSig ); + if ( q ) aSig -= bSig; + expDiff -= 64; + while ( 0 < expDiff ) { + q = estimateDiv128To64( aSig, 0, bSig ); + q = ( 2 < q ) ? q - 2 : 0; + aSig = - ( ( bSig>>2 ) * q ); + expDiff -= 62; + } + expDiff += 64; + if ( 0 < expDiff ) { + q = estimateDiv128To64( aSig, 0, bSig ); + q = ( 2 < q ) ? q - 2 : 0; + q >>= 64 - expDiff; + bSig >>= 2; + aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q; + } + else { + aSig >>= 2; + bSig >>= 2; + } + do { + alternateASig = aSig; + ++q; + aSig -= bSig; + } while ( 0 <= (sbits64) aSig ); + sigMean = aSig + alternateASig; + if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) { + aSig = alternateASig; + } + zSign = ( (sbits64) aSig < 0 ); + if ( zSign ) aSig = - aSig; + return normalizeRoundAndPackFloat64( aSign ^ zSign, bExp, aSig ); + +} + +/* +------------------------------------------------------------------------------- +Returns the square root of the double-precision floating-point value `a'. +The operation is performed according to the IEC/IEEE Standard for Binary +Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +float64 float64_sqrt( float64 a ) +{ + flag aSign; + int16 aExp, zExp; + bits64 aSig, zSig; + bits64 rem0, rem1, term0, term1; //, shiftedRem; + //float64 z; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + if ( aExp == 0x7FF ) { + if ( aSig ) return propagateFloat64NaN( a, a ); + if ( ! aSign ) return a; + float_raise( float_flag_invalid ); + return float64_default_nan; + } + if ( aSign ) { + if ( ( aExp | aSig ) == 0 ) return a; + float_raise( float_flag_invalid ); + return float64_default_nan; + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return 0; + normalizeFloat64Subnormal( aSig, &aExp, &aSig ); + } + zExp = ( ( aExp - 0x3FF )>>1 ) + 0x3FE; + aSig |= LIT64( 0x0010000000000000 ); + zSig = estimateSqrt32( aExp, aSig>>21 ); + zSig <<= 31; + aSig <<= 9 - ( aExp & 1 ); + zSig = estimateDiv128To64( aSig, 0, zSig ) + zSig + 2; + if ( ( zSig & 0x3FF ) <= 5 ) { + if ( zSig < 2 ) { + zSig = LIT64( 0xFFFFFFFFFFFFFFFF ); + } + else { + aSig <<= 2; + mul64To128( zSig, zSig, &term0, &term1 ); + sub128( aSig, 0, term0, term1, &rem0, &rem1 ); + while ( (sbits64) rem0 < 0 ) { + --zSig; + shortShift128Left( 0, zSig, 1, &term0, &term1 ); + term1 |= 1; + add128( rem0, rem1, term0, term1, &rem0, &rem1 ); + } + zSig |= ( ( rem0 | rem1 ) != 0 ); + } + } + shift64RightJamming( zSig, 1, &zSig ); + return roundAndPackFloat64( 0, zExp, zSig ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the double-precision floating-point value `a' is equal to the +corresponding value `b', and 0 otherwise. The comparison is performed +according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag float64_eq( float64 a, float64 b ) +{ + + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid ); + } + return 0; + } + return ( a == b ) || ( (bits64) ( ( a | b )<<1 ) == 0 ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the double-precision floating-point value `a' is less than or +equal to the corresponding value `b', and 0 otherwise. The comparison is +performed according to the IEC/IEEE Standard for Binary Floating-point +Arithmetic. +------------------------------------------------------------------------------- +*/ +flag float64_le( float64 a, float64 b ) +{ + flag aSign, bSign; + + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + float_raise( float_flag_invalid ); + return 0; + } + aSign = extractFloat64Sign( a ); + bSign = extractFloat64Sign( b ); + if ( aSign != bSign ) return aSign || ( (bits64) ( ( a | b )<<1 ) == 0 ); + return ( a == b ) || ( aSign ^ ( a < b ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the double-precision floating-point value `a' is less than +the corresponding value `b', and 0 otherwise. The comparison is performed +according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag float64_lt( float64 a, float64 b ) +{ + flag aSign, bSign; + + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + float_raise( float_flag_invalid ); + return 0; + } + aSign = extractFloat64Sign( a ); + bSign = extractFloat64Sign( b ); + if ( aSign != bSign ) return aSign && ( (bits64) ( ( a | b )<<1 ) != 0 ); + return ( a != b ) && ( aSign ^ ( a < b ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the double-precision floating-point value `a' is equal to the +corresponding value `b', and 0 otherwise. The invalid exception is raised +if either operand is a NaN. Otherwise, the comparison is performed +according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag float64_eq_signaling( float64 a, float64 b ) +{ + + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + float_raise( float_flag_invalid ); + return 0; + } + return ( a == b ) || ( (bits64) ( ( a | b )<<1 ) == 0 ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the double-precision floating-point value `a' is less than or +equal to the corresponding value `b', and 0 otherwise. Quiet NaNs do not +cause an exception. Otherwise, the comparison is performed according to the +IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag float64_le_quiet( float64 a, float64 b ) +{ + flag aSign, bSign; + //int16 aExp, bExp; + + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid ); + } + return 0; + } + aSign = extractFloat64Sign( a ); + bSign = extractFloat64Sign( b ); + if ( aSign != bSign ) return aSign || ( (bits64) ( ( a | b )<<1 ) == 0 ); + return ( a == b ) || ( aSign ^ ( a < b ) ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the double-precision floating-point value `a' is less than +the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an +exception. Otherwise, the comparison is performed according to the IEC/IEEE +Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag float64_lt_quiet( float64 a, float64 b ) +{ + flag aSign, bSign; + + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid ); + } + return 0; + } + aSign = extractFloat64Sign( a ); + bSign = extractFloat64Sign( b ); + if ( aSign != bSign ) return aSign && ( (bits64) ( ( a | b )<<1 ) != 0 ); + return ( a != b ) && ( aSign ^ ( a < b ) ); + +} + +#ifdef FLOATX80 + +/* +------------------------------------------------------------------------------- +Returns the result of converting the extended double-precision floating- +point value `a' to the 32-bit two's complement integer format. The +conversion is performed according to the IEC/IEEE Standard for Binary +Floating-point Arithmetic---which means in particular that the conversion +is rounded according to the current rounding mode. If `a' is a NaN, the +largest positive integer is returned. Otherwise, if the conversion +overflows, the largest integer with the same sign as `a' is returned. +------------------------------------------------------------------------------- +*/ +int32 floatx80_to_int32( floatx80 a ) +{ + flag aSign; + int32 aExp, shiftCount; + bits64 aSig; + + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + if ( ( aExp == 0x7FFF ) && (bits64) ( aSig<<1 ) ) aSign = 0; + shiftCount = 0x4037 - aExp; + if ( shiftCount <= 0 ) shiftCount = 1; + shift64RightJamming( aSig, shiftCount, &aSig ); + return roundAndPackInt32( aSign, aSig ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of converting the extended double-precision floating- +point value `a' to the 32-bit two's complement integer format. The +conversion is performed according to the IEC/IEEE Standard for Binary +Floating-point Arithmetic, except that the conversion is always rounded +toward zero. If `a' is a NaN, the largest positive integer is returned. +Otherwise, if the conversion overflows, the largest integer with the same +sign as `a' is returned. +------------------------------------------------------------------------------- +*/ +int32 floatx80_to_int32_round_to_zero( floatx80 a ) +{ + flag aSign; + int32 aExp, shiftCount; + bits64 aSig, savedASig; + int32 z; + + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + shiftCount = 0x403E - aExp; + if ( shiftCount < 32 ) { + if ( ( aExp == 0x7FFF ) && (bits64) ( aSig<<1 ) ) aSign = 0; + goto invalid; + } + else if ( 63 < shiftCount ) { + if ( aExp || aSig ) float_exception_flags |= float_flag_inexact; + return 0; + } + savedASig = aSig; + aSig >>= shiftCount; + z = aSig; + if ( aSign ) z = - z; + if ( ( z < 0 ) ^ aSign ) { + invalid: + float_exception_flags |= float_flag_invalid; + return aSign ? 0x80000000 : 0x7FFFFFFF; + } + if ( ( aSig<>1; + if ( ( z.low & roundBitsMask ) == 0 ) z.low &= ~ lastBitMask; + } + else if ( roundingMode != float_round_to_zero ) { + if ( extractFloatx80Sign( z ) ^ ( roundingMode == float_round_up ) ) { + z.low += roundBitsMask; + } + } + z.low &= ~ roundBitsMask; + if ( z.low == 0 ) { + ++z.high; + z.low = LIT64( 0x8000000000000000 ); + } + if ( z.low != a.low ) float_exception_flags |= float_flag_inexact; + return z; + +} + +/* +------------------------------------------------------------------------------- +Returns the result of adding the absolute values of the extended double- +precision floating-point values `a' and `b'. If `zSign' is true, the sum is +negated before being returned. `zSign' is ignored if the result is a NaN. +The addition is performed according to the IEC/IEEE Standard for Binary +Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +static floatx80 addFloatx80Sigs( floatx80 a, floatx80 b, flag zSign ) +{ + int32 aExp, bExp, zExp; + bits64 aSig, bSig, zSig0, zSig1; + int32 expDiff; + + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + bSig = extractFloatx80Frac( b ); + bExp = extractFloatx80Exp( b ); + expDiff = aExp - bExp; + if ( 0 < expDiff ) { + if ( aExp == 0x7FFF ) { + if ( (bits64) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b ); + return a; + } + if ( bExp == 0 ) --expDiff; + shift64ExtraRightJamming( bSig, 0, expDiff, &bSig, &zSig1 ); + zExp = aExp; + } + else if ( expDiff < 0 ) { + if ( bExp == 0x7FFF ) { + if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); + return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( aExp == 0 ) ++expDiff; + shift64ExtraRightJamming( aSig, 0, - expDiff, &aSig, &zSig1 ); + zExp = bExp; + } + else { + if ( aExp == 0x7FFF ) { + if ( (bits64) ( ( aSig | bSig )<<1 ) ) { + return propagateFloatx80NaN( a, b ); + } + return a; + } + zSig1 = 0; + zSig0 = aSig + bSig; + if ( aExp == 0 ) { + normalizeFloatx80Subnormal( zSig0, &zExp, &zSig0 ); + goto roundAndPack; + } + zExp = aExp; + goto shiftRight1; + } + + zSig0 = aSig + bSig; + + if ( (sbits64) zSig0 < 0 ) goto roundAndPack; + shiftRight1: + shift64ExtraRightJamming( zSig0, zSig1, 1, &zSig0, &zSig1 ); + zSig0 |= LIT64( 0x8000000000000000 ); + ++zExp; + roundAndPack: + return + roundAndPackFloatx80( + floatx80_rounding_precision, zSign, zExp, zSig0, zSig1 ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of subtracting the absolute values of the extended +double-precision floating-point values `a' and `b'. If `zSign' is true, +the difference is negated before being returned. `zSign' is ignored if the +result is a NaN. The subtraction is performed according to the IEC/IEEE +Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +static floatx80 subFloatx80Sigs( floatx80 a, floatx80 b, flag zSign ) +{ + int32 aExp, bExp, zExp; + bits64 aSig, bSig, zSig0, zSig1; + int32 expDiff; + floatx80 z; + + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + bSig = extractFloatx80Frac( b ); + bExp = extractFloatx80Exp( b ); + expDiff = aExp - bExp; + if ( 0 < expDiff ) goto aExpBigger; + if ( expDiff < 0 ) goto bExpBigger; + if ( aExp == 0x7FFF ) { + if ( (bits64) ( ( aSig | bSig )<<1 ) ) { + return propagateFloatx80NaN( a, b ); + } + float_raise( float_flag_invalid ); + z.low = floatx80_default_nan_low; + z.high = floatx80_default_nan_high; + return z; + } + if ( aExp == 0 ) { + aExp = 1; + bExp = 1; + } + zSig1 = 0; + if ( bSig < aSig ) goto aBigger; + if ( aSig < bSig ) goto bBigger; + return packFloatx80( float_rounding_mode == float_round_down, 0, 0 ); + bExpBigger: + if ( bExp == 0x7FFF ) { + if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); + return packFloatx80( zSign ^ 1, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( aExp == 0 ) ++expDiff; + shift128RightJamming( aSig, 0, - expDiff, &aSig, &zSig1 ); + bBigger: + sub128( bSig, 0, aSig, zSig1, &zSig0, &zSig1 ); + zExp = bExp; + zSign ^= 1; + goto normalizeRoundAndPack; + aExpBigger: + if ( aExp == 0x7FFF ) { + if ( (bits64) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b ); + return a; + } + if ( bExp == 0 ) --expDiff; + shift128RightJamming( bSig, 0, expDiff, &bSig, &zSig1 ); + aBigger: + sub128( aSig, 0, bSig, zSig1, &zSig0, &zSig1 ); + zExp = aExp; + normalizeRoundAndPack: + return + normalizeRoundAndPackFloatx80( + floatx80_rounding_precision, zSign, zExp, zSig0, zSig1 ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of adding the extended double-precision floating-point +values `a' and `b'. The operation is performed according to the IEC/IEEE +Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +floatx80 floatx80_add( floatx80 a, floatx80 b ) +{ + flag aSign, bSign; + + aSign = extractFloatx80Sign( a ); + bSign = extractFloatx80Sign( b ); + if ( aSign == bSign ) { + return addFloatx80Sigs( a, b, aSign ); + } + else { + return subFloatx80Sigs( a, b, aSign ); + } + +} + +/* +------------------------------------------------------------------------------- +Returns the result of subtracting the extended double-precision floating- +point values `a' and `b'. The operation is performed according to the +IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +floatx80 floatx80_sub( floatx80 a, floatx80 b ) +{ + flag aSign, bSign; + + aSign = extractFloatx80Sign( a ); + bSign = extractFloatx80Sign( b ); + if ( aSign == bSign ) { + return subFloatx80Sigs( a, b, aSign ); + } + else { + return addFloatx80Sigs( a, b, aSign ); + } + +} + +/* +------------------------------------------------------------------------------- +Returns the result of multiplying the extended double-precision floating- +point values `a' and `b'. The operation is performed according to the +IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +floatx80 floatx80_mul( floatx80 a, floatx80 b ) +{ + flag aSign, bSign, zSign; + int32 aExp, bExp, zExp; + bits64 aSig, bSig, zSig0, zSig1; + floatx80 z; + + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + bSig = extractFloatx80Frac( b ); + bExp = extractFloatx80Exp( b ); + bSign = extractFloatx80Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0x7FFF ) { + if ( (bits64) ( aSig<<1 ) + || ( ( bExp == 0x7FFF ) && (bits64) ( bSig<<1 ) ) ) { + return propagateFloatx80NaN( a, b ); + } + if ( ( bExp | bSig ) == 0 ) goto invalid; + return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( bExp == 0x7FFF ) { + if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); + if ( ( aExp | aSig ) == 0 ) { + invalid: + float_raise( float_flag_invalid ); + z.low = floatx80_default_nan_low; + z.high = floatx80_default_nan_high; + return z; + } + return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloatx80( zSign, 0, 0 ); + normalizeFloatx80Subnormal( aSig, &aExp, &aSig ); + } + if ( bExp == 0 ) { + if ( bSig == 0 ) return packFloatx80( zSign, 0, 0 ); + normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); + } + zExp = aExp + bExp - 0x3FFE; + mul64To128( aSig, bSig, &zSig0, &zSig1 ); + if ( 0 < (sbits64) zSig0 ) { + shortShift128Left( zSig0, zSig1, 1, &zSig0, &zSig1 ); + --zExp; + } + return + roundAndPackFloatx80( + floatx80_rounding_precision, zSign, zExp, zSig0, zSig1 ); + +} + +/* +------------------------------------------------------------------------------- +Returns the result of dividing the extended double-precision floating-point +value `a' by the corresponding value `b'. The operation is performed +according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +floatx80 floatx80_div( floatx80 a, floatx80 b ) +{ + flag aSign, bSign, zSign; + int32 aExp, bExp, zExp; + bits64 aSig, bSig, zSig0, zSig1; + bits64 rem0, rem1, rem2, term0, term1, term2; + floatx80 z; + + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + bSig = extractFloatx80Frac( b ); + bExp = extractFloatx80Exp( b ); + bSign = extractFloatx80Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0x7FFF ) { + if ( (bits64) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b ); + if ( bExp == 0x7FFF ) { + if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); + goto invalid; + } + return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( bExp == 0x7FFF ) { + if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); + return packFloatx80( zSign, 0, 0 ); + } + if ( bExp == 0 ) { + if ( bSig == 0 ) { + if ( ( aExp | aSig ) == 0 ) { + invalid: + float_raise( float_flag_invalid ); + z.low = floatx80_default_nan_low; + z.high = floatx80_default_nan_high; + return z; + } + float_raise( float_flag_divbyzero ); + return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloatx80( zSign, 0, 0 ); + normalizeFloatx80Subnormal( aSig, &aExp, &aSig ); + } + zExp = aExp - bExp + 0x3FFE; + rem1 = 0; + if ( bSig <= aSig ) { + shift128Right( aSig, 0, 1, &aSig, &rem1 ); + ++zExp; + } + zSig0 = estimateDiv128To64( aSig, rem1, bSig ); + mul64To128( bSig, zSig0, &term0, &term1 ); + sub128( aSig, rem1, term0, term1, &rem0, &rem1 ); + while ( (sbits64) rem0 < 0 ) { + --zSig0; + add128( rem0, rem1, 0, bSig, &rem0, &rem1 ); + } + zSig1 = estimateDiv128To64( rem1, 0, bSig ); + if ( (bits64) ( zSig1<<1 ) <= 8 ) { + mul64To128( bSig, zSig1, &term1, &term2 ); + sub128( rem1, 0, term1, term2, &rem1, &rem2 ); + while ( (sbits64) rem1 < 0 ) { + --zSig1; + add128( rem1, rem2, 0, bSig, &rem1, &rem2 ); + } + zSig1 |= ( ( rem1 | rem2 ) != 0 ); + } + return + roundAndPackFloatx80( + floatx80_rounding_precision, zSign, zExp, zSig0, zSig1 ); + +} + +/* +------------------------------------------------------------------------------- +Returns the remainder of the extended double-precision floating-point value +`a' with respect to the corresponding value `b'. The operation is performed +according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +floatx80 floatx80_rem( floatx80 a, floatx80 b ) +{ + flag aSign, bSign, zSign; + int32 aExp, bExp, expDiff; + bits64 aSig0, aSig1, bSig; + bits64 q, term0, term1, alternateASig0, alternateASig1; + floatx80 z; + + aSig0 = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + bSig = extractFloatx80Frac( b ); + bExp = extractFloatx80Exp( b ); + bSign = extractFloatx80Sign( b ); + if ( aExp == 0x7FFF ) { + if ( (bits64) ( aSig0<<1 ) + || ( ( bExp == 0x7FFF ) && (bits64) ( bSig<<1 ) ) ) { + return propagateFloatx80NaN( a, b ); + } + goto invalid; + } + if ( bExp == 0x7FFF ) { + if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); + return a; + } + if ( bExp == 0 ) { + if ( bSig == 0 ) { + invalid: + float_raise( float_flag_invalid ); + z.low = floatx80_default_nan_low; + z.high = floatx80_default_nan_high; + return z; + } + normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); + } + if ( aExp == 0 ) { + if ( (bits64) ( aSig0<<1 ) == 0 ) return a; + normalizeFloatx80Subnormal( aSig0, &aExp, &aSig0 ); + } + bSig |= LIT64( 0x8000000000000000 ); + zSign = aSign; + expDiff = aExp - bExp; + aSig1 = 0; + if ( expDiff < 0 ) { + if ( expDiff < -1 ) return a; + shift128Right( aSig0, 0, 1, &aSig0, &aSig1 ); + expDiff = 0; + } + q = ( bSig <= aSig0 ); + if ( q ) aSig0 -= bSig; + expDiff -= 64; + while ( 0 < expDiff ) { + q = estimateDiv128To64( aSig0, aSig1, bSig ); + q = ( 2 < q ) ? q - 2 : 0; + mul64To128( bSig, q, &term0, &term1 ); + sub128( aSig0, aSig1, term0, term1, &aSig0, &aSig1 ); + shortShift128Left( aSig0, aSig1, 62, &aSig0, &aSig1 ); + expDiff -= 62; + } + expDiff += 64; + if ( 0 < expDiff ) { + q = estimateDiv128To64( aSig0, aSig1, bSig ); + q = ( 2 < q ) ? q - 2 : 0; + q >>= 64 - expDiff; + mul64To128( bSig, q<<( 64 - expDiff ), &term0, &term1 ); + sub128( aSig0, aSig1, term0, term1, &aSig0, &aSig1 ); + shortShift128Left( 0, bSig, 64 - expDiff, &term0, &term1 ); + while ( le128( term0, term1, aSig0, aSig1 ) ) { + ++q; + sub128( aSig0, aSig1, term0, term1, &aSig0, &aSig1 ); + } + } + else { + term1 = 0; + term0 = bSig; + } + sub128( term0, term1, aSig0, aSig1, &alternateASig0, &alternateASig1 ); + if ( lt128( alternateASig0, alternateASig1, aSig0, aSig1 ) + || ( eq128( alternateASig0, alternateASig1, aSig0, aSig1 ) + && ( q & 1 ) ) + ) { + aSig0 = alternateASig0; + aSig1 = alternateASig1; + zSign = ! zSign; + } + return + normalizeRoundAndPackFloatx80( + 80, zSign, bExp + expDiff, aSig0, aSig1 ); + +} + +/* +------------------------------------------------------------------------------- +Returns the square root of the extended double-precision floating-point +value `a'. The operation is performed according to the IEC/IEEE Standard +for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +floatx80 floatx80_sqrt( floatx80 a ) +{ + flag aSign; + int32 aExp, zExp; + bits64 aSig0, aSig1, zSig0, zSig1; + bits64 rem0, rem1, rem2, rem3, term0, term1, term2, term3; + bits64 shiftedRem0, shiftedRem1; + floatx80 z; + + aSig0 = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + if ( aExp == 0x7FFF ) { + if ( (bits64) ( aSig0<<1 ) ) return propagateFloatx80NaN( a, a ); + if ( ! aSign ) return a; + goto invalid; + } + if ( aSign ) { + if ( ( aExp | aSig0 ) == 0 ) return a; + invalid: + float_raise( float_flag_invalid ); + z.low = floatx80_default_nan_low; + z.high = floatx80_default_nan_high; + return z; + } + if ( aExp == 0 ) { + if ( aSig0 == 0 ) return packFloatx80( 0, 0, 0 ); + normalizeFloatx80Subnormal( aSig0, &aExp, &aSig0 ); + } + zExp = ( ( aExp - 0x3FFF )>>1 ) + 0x3FFF; + zSig0 = estimateSqrt32( aExp, aSig0>>32 ); + zSig0 <<= 31; + aSig1 = 0; + shift128Right( aSig0, 0, ( aExp & 1 ) + 2, &aSig0, &aSig1 ); + zSig0 = estimateDiv128To64( aSig0, aSig1, zSig0 ) + zSig0 + 4; + if ( 0 <= (sbits64) zSig0 ) zSig0 = LIT64( 0xFFFFFFFFFFFFFFFF ); + shortShift128Left( aSig0, aSig1, 2, &aSig0, &aSig1 ); + mul64To128( zSig0, zSig0, &term0, &term1 ); + sub128( aSig0, aSig1, term0, term1, &rem0, &rem1 ); + while ( (sbits64) rem0 < 0 ) { + --zSig0; + shortShift128Left( 0, zSig0, 1, &term0, &term1 ); + term1 |= 1; + add128( rem0, rem1, term0, term1, &rem0, &rem1 ); + } + shortShift128Left( rem0, rem1, 63, &shiftedRem0, &shiftedRem1 ); + zSig1 = estimateDiv128To64( shiftedRem0, shiftedRem1, zSig0 ); + if ( (bits64) ( zSig1<<1 ) <= 10 ) { + if ( zSig1 == 0 ) zSig1 = 1; + mul64To128( zSig0, zSig1, &term1, &term2 ); + shortShift128Left( term1, term2, 1, &term1, &term2 ); + sub128( rem1, 0, term1, term2, &rem1, &rem2 ); + mul64To128( zSig1, zSig1, &term2, &term3 ); + sub192( rem1, rem2, 0, 0, term2, term3, &rem1, &rem2, &rem3 ); + while ( (sbits64) rem1 < 0 ) { + --zSig1; + shortShift192Left( 0, zSig0, zSig1, 1, &term1, &term2, &term3 ); + term3 |= 1; + add192( + rem1, rem2, rem3, term1, term2, term3, &rem1, &rem2, &rem3 ); + } + zSig1 |= ( ( rem1 | rem2 | rem3 ) != 0 ); + } + return + roundAndPackFloatx80( + floatx80_rounding_precision, 0, zExp, zSig0, zSig1 ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the extended double-precision floating-point value `a' is +equal to the corresponding value `b', and 0 otherwise. The comparison is +performed according to the IEC/IEEE Standard for Binary Floating-point +Arithmetic. +------------------------------------------------------------------------------- +*/ +flag floatx80_eq( floatx80 a, floatx80 b ) +{ + + if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) + && (bits64) ( extractFloatx80Frac( a )<<1 ) ) + || ( ( extractFloatx80Exp( b ) == 0x7FFF ) + && (bits64) ( extractFloatx80Frac( b )<<1 ) ) + ) { + if ( floatx80_is_signaling_nan( a ) + || floatx80_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid ); + } + return 0; + } + return + ( a.low == b.low ) + && ( ( a.high == b.high ) + || ( ( a.low == 0 ) + && ( (bits16) ( ( a.high | b.high )<<1 ) == 0 ) ) + ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the extended double-precision floating-point value `a' is +less than or equal to the corresponding value `b', and 0 otherwise. The +comparison is performed according to the IEC/IEEE Standard for Binary +Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag floatx80_le( floatx80 a, floatx80 b ) +{ + flag aSign, bSign; + + if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) + && (bits64) ( extractFloatx80Frac( a )<<1 ) ) + || ( ( extractFloatx80Exp( b ) == 0x7FFF ) + && (bits64) ( extractFloatx80Frac( b )<<1 ) ) + ) { + float_raise( float_flag_invalid ); + return 0; + } + aSign = extractFloatx80Sign( a ); + bSign = extractFloatx80Sign( b ); + if ( aSign != bSign ) { + return + aSign + || ( ( ( (bits16) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) + == 0 ); + } + return + aSign ? le128( b.high, b.low, a.high, a.low ) + : le128( a.high, a.low, b.high, b.low ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the extended double-precision floating-point value `a' is +less than the corresponding value `b', and 0 otherwise. The comparison +is performed according to the IEC/IEEE Standard for Binary Floating-point +Arithmetic. +------------------------------------------------------------------------------- +*/ +flag floatx80_lt( floatx80 a, floatx80 b ) +{ + flag aSign, bSign; + + if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) + && (bits64) ( extractFloatx80Frac( a )<<1 ) ) + || ( ( extractFloatx80Exp( b ) == 0x7FFF ) + && (bits64) ( extractFloatx80Frac( b )<<1 ) ) + ) { + float_raise( float_flag_invalid ); + return 0; + } + aSign = extractFloatx80Sign( a ); + bSign = extractFloatx80Sign( b ); + if ( aSign != bSign ) { + return + aSign + && ( ( ( (bits16) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) + != 0 ); + } + return + aSign ? lt128( b.high, b.low, a.high, a.low ) + : lt128( a.high, a.low, b.high, b.low ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the extended double-precision floating-point value `a' is equal +to the corresponding value `b', and 0 otherwise. The invalid exception is +raised if either operand is a NaN. Otherwise, the comparison is performed +according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag floatx80_eq_signaling( floatx80 a, floatx80 b ) +{ + + if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) + && (bits64) ( extractFloatx80Frac( a )<<1 ) ) + || ( ( extractFloatx80Exp( b ) == 0x7FFF ) + && (bits64) ( extractFloatx80Frac( b )<<1 ) ) + ) { + float_raise( float_flag_invalid ); + return 0; + } + return + ( a.low == b.low ) + && ( ( a.high == b.high ) + || ( ( a.low == 0 ) + && ( (bits16) ( ( a.high | b.high )<<1 ) == 0 ) ) + ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the extended double-precision floating-point value `a' is less +than or equal to the corresponding value `b', and 0 otherwise. Quiet NaNs +do not cause an exception. Otherwise, the comparison is performed according +to the IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag floatx80_le_quiet( floatx80 a, floatx80 b ) +{ + flag aSign, bSign; + + if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) + && (bits64) ( extractFloatx80Frac( a )<<1 ) ) + || ( ( extractFloatx80Exp( b ) == 0x7FFF ) + && (bits64) ( extractFloatx80Frac( b )<<1 ) ) + ) { + if ( floatx80_is_signaling_nan( a ) + || floatx80_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid ); + } + return 0; + } + aSign = extractFloatx80Sign( a ); + bSign = extractFloatx80Sign( b ); + if ( aSign != bSign ) { + return + aSign + || ( ( ( (bits16) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) + == 0 ); + } + return + aSign ? le128( b.high, b.low, a.high, a.low ) + : le128( a.high, a.low, b.high, b.low ); + +} + +/* +------------------------------------------------------------------------------- +Returns 1 if the extended double-precision floating-point value `a' is less +than the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause +an exception. Otherwise, the comparison is performed according to the +IEC/IEEE Standard for Binary Floating-point Arithmetic. +------------------------------------------------------------------------------- +*/ +flag floatx80_lt_quiet( floatx80 a, floatx80 b ) +{ + flag aSign, bSign; + + if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) + && (bits64) ( extractFloatx80Frac( a )<<1 ) ) + || ( ( extractFloatx80Exp( b ) == 0x7FFF ) + && (bits64) ( extractFloatx80Frac( b )<<1 ) ) + ) { + if ( floatx80_is_signaling_nan( a ) + || floatx80_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid ); + } + return 0; + } + aSign = extractFloatx80Sign( a ); + bSign = extractFloatx80Sign( b ); + if ( aSign != bSign ) { + return + aSign + && ( ( ( (bits16) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) + != 0 ); + } + return + aSign ? lt128( b.high, b.low, a.high, a.low ) + : lt128( a.high, a.low, b.high, b.low ); + +} + +#endif + diff -urN linux-2.5.70-bk13/arch/arm26/nwfpe/softfloat.h linux-2.5.70-bk14/arch/arm26/nwfpe/softfloat.h --- linux-2.5.70-bk13/arch/arm26/nwfpe/softfloat.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/nwfpe/softfloat.h 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,232 @@ + +/* +=============================================================================== + +This C header file is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ +arithmetic/softfloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these three paragraphs for those parts of +this code that are retained. + +=============================================================================== +*/ + +#ifndef __SOFTFLOAT_H__ +#define __SOFTFLOAT_H__ + +/* +------------------------------------------------------------------------------- +The macro `FLOATX80' must be defined to enable the extended double-precision +floating-point format `floatx80'. If this macro is not defined, the +`floatx80' type will not be defined, and none of the functions that either +input or output the `floatx80' type will be defined. +------------------------------------------------------------------------------- +*/ +#define FLOATX80 + +/* +------------------------------------------------------------------------------- +Software IEC/IEEE floating-point types. +------------------------------------------------------------------------------- +*/ +typedef unsigned long int float32; +typedef unsigned long long float64; +typedef struct { + unsigned short high; + unsigned long long low; +} floatx80; + +/* +------------------------------------------------------------------------------- +Software IEC/IEEE floating-point underflow tininess-detection mode. +------------------------------------------------------------------------------- +*/ +extern signed char float_detect_tininess; +enum { + float_tininess_after_rounding = 0, + float_tininess_before_rounding = 1 +}; + +/* +------------------------------------------------------------------------------- +Software IEC/IEEE floating-point rounding mode. +------------------------------------------------------------------------------- +*/ +extern signed char float_rounding_mode; +enum { + float_round_nearest_even = 0, + float_round_to_zero = 1, + float_round_down = 2, + float_round_up = 3 +}; + +/* +------------------------------------------------------------------------------- +Software IEC/IEEE floating-point exception flags. +------------------------------------------------------------------------------- +extern signed char float_exception_flags; +enum { + float_flag_inexact = 1, + float_flag_underflow = 2, + float_flag_overflow = 4, + float_flag_divbyzero = 8, + float_flag_invalid = 16 +}; + +ScottB: November 4, 1998 +Changed the enumeration to match the bit order in the FPA11. +*/ + +extern signed char float_exception_flags; +enum { + float_flag_invalid = 1, + float_flag_divbyzero = 2, + float_flag_overflow = 4, + float_flag_underflow = 8, + float_flag_inexact = 16 +}; + +/* +------------------------------------------------------------------------------- +Routine to raise any or all of the software IEC/IEEE floating-point +exception flags. +------------------------------------------------------------------------------- +*/ +void float_raise( signed char ); + +/* +------------------------------------------------------------------------------- +Software IEC/IEEE integer-to-floating-point conversion routines. +------------------------------------------------------------------------------- +*/ +float32 int32_to_float32( signed int ); +float64 int32_to_float64( signed int ); +#ifdef FLOATX80 +floatx80 int32_to_floatx80( signed int ); +#endif + +/* +------------------------------------------------------------------------------- +Software IEC/IEEE single-precision conversion routines. +------------------------------------------------------------------------------- +*/ +signed int float32_to_int32( float32 ); +signed int float32_to_int32_round_to_zero( float32 ); +float64 float32_to_float64( float32 ); +#ifdef FLOATX80 +floatx80 float32_to_floatx80( float32 ); +#endif + +/* +------------------------------------------------------------------------------- +Software IEC/IEEE single-precision operations. +------------------------------------------------------------------------------- +*/ +float32 float32_round_to_int( float32 ); +float32 float32_add( float32, float32 ); +float32 float32_sub( float32, float32 ); +float32 float32_mul( float32, float32 ); +float32 float32_div( float32, float32 ); +float32 float32_rem( float32, float32 ); +float32 float32_sqrt( float32 ); +char float32_eq( float32, float32 ); +char float32_le( float32, float32 ); +char float32_lt( float32, float32 ); +char float32_eq_signaling( float32, float32 ); +char float32_le_quiet( float32, float32 ); +char float32_lt_quiet( float32, float32 ); +char float32_is_signaling_nan( float32 ); + +/* +------------------------------------------------------------------------------- +Software IEC/IEEE double-precision conversion routines. +------------------------------------------------------------------------------- +*/ +signed int float64_to_int32( float64 ); +signed int float64_to_int32_round_to_zero( float64 ); +float32 float64_to_float32( float64 ); +#ifdef FLOATX80 +floatx80 float64_to_floatx80( float64 ); +#endif + +/* +------------------------------------------------------------------------------- +Software IEC/IEEE double-precision operations. +------------------------------------------------------------------------------- +*/ +float64 float64_round_to_int( float64 ); +float64 float64_add( float64, float64 ); +float64 float64_sub( float64, float64 ); +float64 float64_mul( float64, float64 ); +float64 float64_div( float64, float64 ); +float64 float64_rem( float64, float64 ); +float64 float64_sqrt( float64 ); +char float64_eq( float64, float64 ); +char float64_le( float64, float64 ); +char float64_lt( float64, float64 ); +char float64_eq_signaling( float64, float64 ); +char float64_le_quiet( float64, float64 ); +char float64_lt_quiet( float64, float64 ); +char float64_is_signaling_nan( float64 ); + +#ifdef FLOATX80 + +/* +------------------------------------------------------------------------------- +Software IEC/IEEE extended double-precision conversion routines. +------------------------------------------------------------------------------- +*/ +signed int floatx80_to_int32( floatx80 ); +signed int floatx80_to_int32_round_to_zero( floatx80 ); +float32 floatx80_to_float32( floatx80 ); +float64 floatx80_to_float64( floatx80 ); + +/* +------------------------------------------------------------------------------- +Software IEC/IEEE extended double-precision rounding precision. Valid +values are 32, 64, and 80. +------------------------------------------------------------------------------- +*/ +extern signed char floatx80_rounding_precision; + +/* +------------------------------------------------------------------------------- +Software IEC/IEEE extended double-precision operations. +------------------------------------------------------------------------------- +*/ +floatx80 floatx80_round_to_int( floatx80 ); +floatx80 floatx80_add( floatx80, floatx80 ); +floatx80 floatx80_sub( floatx80, floatx80 ); +floatx80 floatx80_mul( floatx80, floatx80 ); +floatx80 floatx80_div( floatx80, floatx80 ); +floatx80 floatx80_rem( floatx80, floatx80 ); +floatx80 floatx80_sqrt( floatx80 ); +char floatx80_eq( floatx80, floatx80 ); +char floatx80_le( floatx80, floatx80 ); +char floatx80_lt( floatx80, floatx80 ); +char floatx80_eq_signaling( floatx80, floatx80 ); +char floatx80_le_quiet( floatx80, floatx80 ); +char floatx80_lt_quiet( floatx80, floatx80 ); +char floatx80_is_signaling_nan( floatx80 ); + +#endif + +#endif diff -urN linux-2.5.70-bk13/arch/arm26/vmlinux-armo.lds.in linux-2.5.70-bk14/arch/arm26/vmlinux-armo.lds.in --- linux-2.5.70-bk13/arch/arm26/vmlinux-armo.lds.in 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/vmlinux-armo.lds.in 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,128 @@ +/* ld script to make ARM Linux kernel + * taken from the i386 version by Russell King + * Written by Martin Mares + * borrowed from Russels ARM port by Ian Molton + */ + +#include + +OUTPUT_ARCH(arm) +ENTRY(stext) +jiffies = jiffies_64; +SECTIONS +{ + . = TEXTADDR; + .init : { /* Init code and data */ + _stext = .; + __init_begin = .; + _sinittext = .; + *(.init.text) + _einittext = .; + __proc_info_begin = .; + *(.proc.info) + __proc_info_end = .; + __arch_info_begin = .; + *(.arch.info) + __arch_info_end = .; + __tagtable_begin = .; + *(.taglist) + __tagtable_end = .; + *(.init.data) + . = ALIGN(16); + __setup_start = .; + *(.init.setup) + __setup_end = .; + __early_begin = .; + *(__early_param) + __early_end = .; + __start___param = .; + *(__param) + __stop___param = .; + __initcall_start = .; + *(.initcall1.init) + *(.initcall2.init) + *(.initcall3.init) + *(.initcall4.init) + *(.initcall5.init) + *(.initcall6.init) + *(.initcall7.init) + __initcall_end = .; + __con_initcall_start = .; + *(.con_initcall.init) + __con_initcall_end = .; + . = ALIGN(32); + __initramfs_start = .; + usr/built-in.o(.init.ramfs) + __initramfs_end = .; + . = ALIGN(32768); + __init_end = .; + } + + /DISCARD/ : { /* Exit code and data */ + *(.exit.text) + *(.exit.data) + *(.exitcall.exit) + } + + .text : { /* Real text segment */ + _text = .; /* Text and read-only data */ + *(.text) + *(.fixup) + *(.gnu.warning) + *(.rodata) + *(.rodata.*) + *(.glue_7) + *(.glue_7t) + *(.got) /* Global offset table */ + + _etext = .; /* End of text section */ + } + + . = ALIGN(16); + __ex_table : { /* Exception table */ + __start___ex_table = .; + *(__ex_table) + __stop___ex_table = .; + } + + RODATA + + . = ALIGN(8192); + + .data : { + /* + * first, the init task union, aligned + * to an 8192 byte boundary. + */ + *(.init.task) + + /* + * The cacheline aligned data + */ + . = ALIGN(32); + *(.data.cacheline_aligned) + + /* + * and the usual data section + */ + *(.data) + CONSTRUCTORS + + _edata = .; + } + + .bss : { + __bss_start = .; /* BSS */ + *(.bss) + *(COMMON) + _end = . ; + } + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } +} diff -urN linux-2.5.70-bk13/arch/arm26/vmlinux.lds.S linux-2.5.70-bk14/arch/arm26/vmlinux.lds.S --- linux-2.5.70-bk13/arch/arm26/vmlinux.lds.S 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/arch/arm26/vmlinux.lds.S 2003-06-09 04:42:02.000000000 -0700 @@ -0,0 +1,12 @@ +#include + +#ifdef CONFIG_ROM_KERNEL + +#include "vmlinux-armo-rom.lds.in" + +#else + +#include "vmlinux-armo.lds.in" + +#endif + diff -urN linux-2.5.70-bk13/drivers/char/mem.c linux-2.5.70-bk14/drivers/char/mem.c --- linux-2.5.70-bk13/drivers/char/mem.c 2003-06-09 04:41:54.000000000 -0700 +++ linux-2.5.70-bk14/drivers/char/mem.c 2003-06-09 04:42:04.000000000 -0700 @@ -713,4 +713,4 @@ return 0; } -__initcall(chr_dev_init); +subsys_initcall(chr_dev_init); diff -urN linux-2.5.70-bk13/drivers/net/sis900.c linux-2.5.70-bk14/drivers/net/sis900.c --- linux-2.5.70-bk13/drivers/net/sis900.c 2003-05-26 18:00:39.000000000 -0700 +++ linux-2.5.70-bk14/drivers/net/sis900.c 2003-06-09 04:42:08.000000000 -0700 @@ -211,7 +211,7 @@ static void sis900_set_mode (long ioaddr, int speed, int duplex); /** - * sis900_get_mac_addr: - Get MAC address for stand alone SiS900 model + * sis900_get_mac_addr - Get MAC address for stand alone SiS900 model * @pci_dev: the sis900 pci device * @net_dev: the net device to get address for * @@ -241,7 +241,7 @@ } /** - * sis630e_get_mac_addr: - Get MAC address for SiS630E model + * sis630e_get_mac_addr - Get MAC address for SiS630E model * @pci_dev: the sis900 pci device * @net_dev: the net device to get address for * @@ -274,7 +274,7 @@ /** - * sis635_get_mac_addr: - Get MAC address for SIS635 model + * sis635_get_mac_addr - Get MAC address for SIS635 model * @pci_dev: the sis900 pci device * @net_dev: the net device to get address for * @@ -310,7 +310,7 @@ } /** - * sis96x_get_mac_addr: - Get MAC address for SiS962 or SiS963 model + * sis96x_get_mac_addr - Get MAC address for SiS962 or SiS963 model * @pci_dev: the sis900 pci device * @net_dev: the net device to get address for * @@ -352,7 +352,7 @@ } /** - * sis900_probe: - Probe for sis900 device + * sis900_probe - Probe for sis900 device * @pci_dev: the sis900 pci device * @pci_id: the pci device ID * @@ -498,7 +498,7 @@ } /** - * sis900_mii_probe: - Probe MII PHY for sis900 + * sis900_mii_probe - Probe MII PHY for sis900 * @net_dev: the net device to probe for * * Search for total of 32 possible mii phy addresses. @@ -618,7 +618,7 @@ } /** - * sis900_default_phy: - Select default PHY for sis900 mac. + * sis900_default_phy - Select default PHY for sis900 mac. * @net_dev: the net device to probe for * * Select first detected PHY with link as default. @@ -671,7 +671,7 @@ /** - * sis900_set_capability: - set the media capability of network adapter. + * sis900_set_capability - set the media capability of network adapter. * @net_dev : the net device to probe for * @phy : default PHY * @@ -701,7 +701,7 @@ #define eeprom_delay() inl(ee_addr) /** - * read_eeprom: - Read Serial EEPROM + * read_eeprom - Read Serial EEPROM * @ioaddr: base i/o address * @location: the EEPROM location to read * @@ -776,7 +776,7 @@ } /** - * mdio_read: - read MII PHY register + * mdio_read - read MII PHY register * @net_dev: the net device to read * @phy_id: the phy address to read * @location: the phy regiester id to read @@ -818,7 +818,7 @@ } /** - * mdio_write: - write MII PHY register + * mdio_write - write MII PHY register * @net_dev: the net device to write * @phy_id: the phy address to write * @location: the phy regiester id to write @@ -872,7 +872,7 @@ /** - * sis900_reset_phy: - reset sis900 mii phy. + * sis900_reset_phy - reset sis900 mii phy. * @net_dev: the net device to write * @phy_addr: default phy address * @@ -895,7 +895,7 @@ } /** - * sis900_open: - open sis900 device + * sis900_open - open sis900 device * @net_dev: the net device to open * * Do some initialization and start net interface. @@ -952,7 +952,7 @@ } /** - * sis900_init_rxfilter: - Initialize the Rx filter + * sis900_init_rxfilter - Initialize the Rx filter * @net_dev: the net device to initialize for * * Set receive filter address to our MAC address @@ -990,7 +990,7 @@ } /** - * sis900_init_tx_ring: - Initialize the Tx descriptor ring + * sis900_init_tx_ring - Initialize the Tx descriptor ring * @net_dev: the net device to initialize for * * Initialize the Tx descriptor ring, @@ -1023,7 +1023,7 @@ } /** - * sis900_init_rx_ring: - Initialize the Rx descriptor ring + * sis900_init_rx_ring - Initialize the Rx descriptor ring * @net_dev: the net device to initialize for * * Initialize the Rx descriptor ring, @@ -1077,7 +1077,7 @@ } /** - * sis630_set_eq: - set phy equalizer value for 630 LAN + * sis630_set_eq - set phy equalizer value for 630 LAN * @net_dev: the net device to set equalizer value * @revision: 630 LAN revision number * @@ -1165,7 +1165,7 @@ } /** - * sis900_timer: - sis900 timer routine + * sis900_timer - sis900 timer routine * @data: pointer to sis900 net device * * On each timer ticks we check two things, @@ -1235,7 +1235,7 @@ } /** - * sis900_check_mode: - check the media mode for sis900 + * sis900_check_mode - check the media mode for sis900 * @net_dev: the net device to be checked * @mii_phy: the mii phy * @@ -1266,7 +1266,7 @@ } /** - * sis900_set_mode: - Set the media mode of mac register. + * sis900_set_mode - Set the media mode of mac register. * @ioaddr: the address of the device * @speed : the transmit speed to be determined * @duplex: the duplex mode to be determined @@ -1310,7 +1310,7 @@ } /** - * sis900_auto_negotiate: Set the Auto-Negotiation Enable/Reset bit. + * sis900_auto_negotiate - Set the Auto-Negotiation Enable/Reset bit. * @net_dev: the net device to read mode for * @phy_addr: mii phy address * @@ -1344,7 +1344,7 @@ /** - * sis900_read_mode: - read media mode for sis900 internal phy + * sis900_read_mode - read media mode for sis900 internal phy * @net_dev: the net device to read mode for * @speed : the transmit speed to be determined * @duplex : the duplex mode to be determined @@ -1401,7 +1401,7 @@ } /** - * sis900_tx_timeout: - sis900 transmit timeout routine + * sis900_tx_timeout - sis900 transmit timeout routine * @net_dev: the net device to transmit * * print transmit timeout status @@ -1456,7 +1456,7 @@ } /** - * sis900_start_xmit: - sis900 start transmit routine + * sis900_start_xmit - sis900 start transmit routine * @skb: socket buffer pointer to put the data being transmitted * @net_dev: the net device to transmit with * @@ -1526,7 +1526,7 @@ } /** - * sis900_interrupt: - sis900 interrupt handler + * sis900_interrupt - sis900 interrupt handler * @irq: the irq number * @dev_instance: the client data object * @regs: snapshot of processor context @@ -1587,7 +1587,7 @@ } /** - * sis900_rx: - sis900 receive routine + * sis900_rx - sis900 receive routine * @net_dev: the net device which receives data * * Process receive interrupt events, @@ -1726,7 +1726,7 @@ } /** - * sis900_finish_xmit: - finish up transmission of packets + * sis900_finish_xmit - finish up transmission of packets * @net_dev: the net device to be transmitted on * * Check for error condition and free socket buffer etc @@ -1796,7 +1796,7 @@ } /** - * sis900_close: - close sis900 device + * sis900_close - close sis900 device * @net_dev: the net device to be closed * * Disable interrupts, stop the Tx and Rx Status Machine @@ -1852,7 +1852,7 @@ } /** - * netdev_ethtool_ioctl: - For the basic support of ethtool + * netdev_ethtool_ioctl - For the basic support of ethtool * @net_dev: the net device to command for * @useraddr: start address of interface request * @@ -1886,7 +1886,7 @@ } /** - * mii_ioctl: - process MII i/o control command + * mii_ioctl - process MII i/o control command * @net_dev: the net device to command for * @rq: parameter for command * @cmd: the i/o command @@ -1922,7 +1922,7 @@ } /** - * sis900_get_stats: - Get sis900 read/write statistics + * sis900_get_stats - Get sis900 read/write statistics * @net_dev: the net device to get statistics for * * get tx/rx statistics for sis900 @@ -1937,7 +1937,7 @@ } /** - * sis900_set_config: - Set media type by net_device.set_config + * sis900_set_config - Set media type by net_device.set_config * @dev: the net device for media type change * @map: ifmap passed by ifconfig * @@ -2034,7 +2034,7 @@ } /** - * sis900_mcast_bitnr: - compute hashtable index + * sis900_mcast_bitnr - compute hashtable index * @addr: multicast address * @revision: revision id of chip * @@ -2057,7 +2057,7 @@ } /** - * set_rx_mode: - Set SiS900 receive mode + * set_rx_mode - Set SiS900 receive mode * @net_dev: the net device to be set * * Set SiS900 receive mode for promiscuous, multicast, or broadcast mode. @@ -2131,7 +2131,7 @@ } /** - * sis900_reset: - Reset sis900 MAC + * sis900_reset - Reset sis900 MAC * @net_dev: the net device to reset * * reset sis900 MAC and wait until finished @@ -2166,7 +2166,7 @@ } /** - * sis900_remove: - Remove sis900 device + * sis900_remove - Remove sis900 device * @pci_dev: the pci device to be removed * * remove and release SiS900 net device diff -urN linux-2.5.70-bk13/include/asm-alpha/ptrace.h linux-2.5.70-bk14/include/asm-alpha/ptrace.h --- linux-2.5.70-bk13/include/asm-alpha/ptrace.h 2003-06-09 04:41:57.000000000 -0700 +++ linux-2.5.70-bk14/include/asm-alpha/ptrace.h 2003-06-09 04:42:15.000000000 -0700 @@ -71,15 +71,8 @@ #define instruction_pointer(regs) ((regs)->pc) extern void show_regs(struct pt_regs *); -/* - * TODO: if kernel-only threads do not have a dummy pt_regs structure at the - * top of the stack, this would cause kernel stack corruption. Either check - * first that we're not dealing with a kernel thread or change the kernel - * stacks to allocate a dummy pt_regs structure. - */ - -#define alpha_task_regs(task) ((struct pt_regs *) \ - ((long) task->thread_info + PAGE_SIZE) - 1) +#define alpha_task_regs(task) \ + ((struct pt_regs *) ((long) (task)->thread_info + 2*PAGE_SIZE) - 1) #define force_successful_syscall_return() (alpha_task_regs(current)->r0 = 0) diff -urN linux-2.5.70-bk13/include/asm-alpha/string.h linux-2.5.70-bk14/include/asm-alpha/string.h --- linux-2.5.70-bk13/include/asm-alpha/string.h 2003-05-26 18:00:38.000000000 -0700 +++ linux-2.5.70-bk14/include/asm-alpha/string.h 2003-06-09 04:42:15.000000000 -0700 @@ -13,6 +13,7 @@ #define __HAVE_ARCH_MEMCPY extern void * memcpy(void *, const void *, size_t); #define __HAVE_ARCH_MEMMOVE +#define __HAVE_ARCH_BCOPY extern void * memmove(void *, const void *, size_t); /* For backward compatibility with modules. Unused otherwise. */ diff -urN linux-2.5.70-bk13/include/asm-alpha/uaccess.h linux-2.5.70-bk14/include/asm-alpha/uaccess.h --- linux-2.5.70-bk13/include/asm-alpha/uaccess.h 2003-05-26 18:00:46.000000000 -0700 +++ linux-2.5.70-bk14/include/asm-alpha/uaccess.h 2003-06-09 04:42:15.000000000 -0700 @@ -340,25 +340,31 @@ * Complex access routines */ +/* This little bit of silliness is to get the GP loaded for a function + that ordinarily wouldn't. Otherwise we could have it done by the macro + directly, which can be optimized the linker. */ +#ifdef MODULE +#define __module_address(sym) "r"(sym), +#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym +#else +#define __module_address(sym) +#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp" +#endif + extern void __copy_user(void); extern inline long __copy_tofrom_user_nocheck(void *to, const void *from, long len) { - /* This little bit of silliness is to get the GP loaded for - a function that ordinarily wouldn't. Otherwise we could - have it done by the macro directly, which can be optimized - the linker. */ - register void * pv __asm__("$27") = __copy_user; - register void * __cu_to __asm__("$6") = to; register const void * __cu_from __asm__("$7") = from; register long __cu_len __asm__("$0") = len; __asm__ __volatile__( - "jsr $28,(%3),__copy_user" - : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to), "=r"(pv) - : "0" (__cu_len), "1" (__cu_from), "2" (__cu_to), "3"(pv) + __module_call(28, 3, __copy_user) + : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) + : __module_address(__copy_user) + "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) : "$1","$2","$3","$4","$5","$28","memory"); return __cu_len; @@ -367,20 +373,8 @@ extern inline long __copy_tofrom_user(void *to, const void *from, long len, const void *validate) { - if (__access_ok((long)validate, len, get_fs())) { - register void * pv __asm__("$27") = __copy_user; - register void * __cu_to __asm__("$6") = to; - register const void * __cu_from __asm__("$7") = from; - register long __cu_len __asm__("$0") = len; - __asm__ __volatile__( - "jsr $28,(%3),__copy_user" - : "=r"(__cu_len), "=r"(__cu_from), "=r"(__cu_to), - "=r" (pv) - : "0" (__cu_len), "1" (__cu_from), "2" (__cu_to), - "3" (pv) - : "$1","$2","$3","$4","$5","$28","memory"); - len = __cu_len; - } + if (__access_ok((long)validate, len, get_fs())) + len = __copy_tofrom_user_nocheck(to, from, len); return len; } @@ -404,18 +398,13 @@ extern inline long __clear_user(void *to, long len) { - /* This little bit of silliness is to get the GP loaded for - a function that ordinarily wouldn't. Otherwise we could - have it done by the macro directly, which can be optimized - the linker. */ - register void * pv __asm__("$27") = __do_clear_user; - register void * __cl_to __asm__("$6") = to; register long __cl_len __asm__("$0") = len; __asm__ __volatile__( - "jsr $28,(%2),__do_clear_user" - : "=r"(__cl_len), "=r"(__cl_to), "=r"(pv) - : "0"(__cl_len), "1"(__cl_to), "2"(pv) + __module_call(28, 2, __do_clear_user) + : "=r"(__cl_len), "=r"(__cl_to) + : __module_address(__do_clear_user) + "0"(__cl_len), "1"(__cl_to) : "$1","$2","$3","$4","$5","$28","memory"); return __cl_len; } @@ -423,20 +412,14 @@ extern inline long clear_user(void *to, long len) { - if (__access_ok((long)to, len, get_fs())) { - register void * pv __asm__("$27") = __do_clear_user; - register void * __cl_to __asm__("$6") = to; - register long __cl_len __asm__("$0") = len; - __asm__ __volatile__( - "jsr $28,(%2),__do_clear_user" - : "=r"(__cl_len), "=r"(__cl_to), "=r"(pv) - : "0"(__cl_len), "1"(__cl_to), "2"(pv) - : "$1","$2","$3","$4","$5","$28","memory"); - len = __cl_len; - } + if (__access_ok((long)to, len, get_fs())) + len = __clear_user(to, len); return len; } +#undef __module_address +#undef __module_call + /* Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else strlen. */ diff -urN linux-2.5.70-bk13/include/asm-alpha/unaligned.h linux-2.5.70-bk14/include/asm-alpha/unaligned.h --- linux-2.5.70-bk13/include/asm-alpha/unaligned.h 2003-05-26 18:00:38.000000000 -0700 +++ linux-2.5.70-bk14/include/asm-alpha/unaligned.h 2003-06-09 04:42:15.000000000 -0700 @@ -14,7 +14,7 @@ * the get/put functions are indeed always optimized, * and that we use the correct sizes. */ -extern void bad_unaligned_access_length(void); +extern void bad_unaligned_access_length(void) __attribute__((noreturn)); /* * EGCS 1.1 knows about arbitrary unaligned loads. Define some diff -urN linux-2.5.70-bk13/include/asm-alpha/unistd.h linux-2.5.70-bk14/include/asm-alpha/unistd.h --- linux-2.5.70-bk13/include/asm-alpha/unistd.h 2003-06-09 04:41:57.000000000 -0700 +++ linux-2.5.70-bk14/include/asm-alpha/unistd.h 2003-06-09 04:42:15.000000000 -0700 @@ -358,7 +358,8 @@ #define __NR_clock_gettime 420 #define __NR_clock_getres 421 #define __NR_clock_nanosleep 422 -#define NR_SYSCALLS 423 +#define __NR_semtimedop 423 +#define NR_SYSCALLS 424 #if defined(__GNUC__) diff -urN linux-2.5.70-bk13/include/asm-arm26/a.out.h linux-2.5.70-bk14/include/asm-arm26/a.out.h --- linux-2.5.70-bk13/include/asm-arm26/a.out.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/a.out.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,38 @@ +#ifndef __ARM_A_OUT_H__ +#define __ARM_A_OUT_H__ + +#include +#include + +struct exec +{ + __u32 a_info; /* Use macros N_MAGIC, etc for access */ + __u32 a_text; /* length of text, in bytes */ + __u32 a_data; /* length of data, in bytes */ + __u32 a_bss; /* length of uninitialized data area for file, in bytes */ + __u32 a_syms; /* length of symbol table data in file, in bytes */ + __u32 a_entry; /* start address */ + __u32 a_trsize; /* length of relocation info for text, in bytes */ + __u32 a_drsize; /* length of relocation info for data, in bytes */ +}; + +/* + * This is always the same + */ +#define N_TXTADDR(a) (0x00008000) + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#define M_ARM 103 + +#ifdef __KERNEL__ +#define STACK_TOP TASK_SIZE +#endif + +#ifndef LIBRARY_START_TEXT +#define LIBRARY_START_TEXT (0x00c00000) +#endif + +#endif /* __A_OUT_GNU_H__ */ diff -urN linux-2.5.70-bk13/include/asm-arm26/arch.h linux-2.5.70-bk14/include/asm-arm26/arch.h --- linux-2.5.70-bk13/include/asm-arm26/arch.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/arch.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,62 @@ +/* + * linux/include/asm-arm/mach/arch.h + * + * Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * The size of struct machine_desc + * (for assembler code) + * FIXME - I count 45... or is this padding? + */ +#define SIZEOF_MACHINE_DESC 48 + +#ifndef __ASSEMBLY__ + +struct tag; + +struct machine_desc { + int nr; /* arch no FIXME - get rid */ + const char *name; /* architecture name */ + unsigned int param_offset; /* parameter page */ + + unsigned int video_start; /* start of video RAM */ + unsigned int video_end; /* end of video RAM */ + + unsigned int reserve_lp0 :1; /* never has lp0 */ + unsigned int reserve_lp1 :1; /* never has lp1 */ + unsigned int reserve_lp2 :1; /* never has lp2 */ + unsigned int soft_reboot :1; /* soft reboot */ + void (*fixup)(struct machine_desc *, + struct tag *, char **, + struct meminfo *); + void (*map_io)(void);/* IO mapping function */ + void (*init_irq)(void); +}; + +/* + * Set of macros to define architecture features. This is built into + * a table by the linker. + */ +#define MACHINE_START(_type,_name) \ +const struct machine_desc __mach_desc_##_type \ + __attribute__((__section__(".arch.info"))) = { \ + nr: MACH_TYPE_##_type, \ + name: _name, + +#define MAINTAINER(n) + +#define BOOT_PARAMS(_params) \ + param_offset: _params, + +#define INITIRQ(_func) \ + init_irq: _func, + +#define MACHINE_END \ +}; + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/assembler.h linux-2.5.70-bk14/include/asm-arm26/assembler.h --- linux-2.5.70-bk13/include/asm-arm26/assembler.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/assembler.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,106 @@ +/* + * linux/asm/assembler.h + * + * This file contains arm architecture specific defines + * for the different processors. + * + * Do not include any C declarations in this file - it is included by + * assembler source. + */ +#ifndef __ASSEMBLY__ +#error "Only include this from assembly code" +#endif + +/* + * Endian independent macros for shifting bytes within registers. + */ +#define pull lsr +#define push lsl +#define byte(x) (x*8) + +#ifdef __STDC__ +#define LOADREGS(cond, base, reglist...)\ + ldm##cond base,reglist^ + +#define RETINSTR(instr, regs...)\ + instr##s regs +#else +#define LOADREGS(cond, base, reglist...)\ + ldm/**/cond base,reglist^ + +#define RETINSTR(instr, regs...)\ + instr/**/s regs +#endif + +#define MODENOP\ + mov r0, r0 + +#define MODE(savereg,tmpreg,mode) \ + mov savereg, pc; \ + bic tmpreg, savereg, $0x0c000003; \ + orr tmpreg, tmpreg, $mode; \ + teqp tmpreg, $0 + +#define RESTOREMODE(savereg) \ + teqp savereg, $0 + +#define SAVEIRQS(tmpreg) + +#define RESTOREIRQS(tmpreg) + +#define DISABLEIRQS(tmpreg)\ + teqp pc, $0x08000003 + +#define ENABLEIRQS(tmpreg)\ + teqp pc, $0x00000003 + +#define USERMODE(tmpreg)\ + teqp pc, $0x00000000;\ + mov r0, r0 + +#define SVCMODE(tmpreg)\ + teqp pc, $0x00000003;\ + mov r0, r0 + + +/* + * Save the current IRQ state and disable IRQs + * Note that this macro assumes FIQs are enabled, and + * that the processor is in SVC mode. + */ + .macro save_and_disable_irqs, oldcpsr, temp + mov \oldcpsr, pc + orr \temp, \oldcpsr, #0x08000000 + teqp \temp, #0 + .endm + +/* + * Restore interrupt state previously stored in + * a register + * ** Actually do nothing on Arc - hope that the caller uses a MOVS PC soon + * after! + */ + .macro restore_irqs, oldcpsr + @ This be restore_irqs + .endm + +/* + * These two are used to save LR/restore PC over a user-based access. + * The old 26-bit architecture requires that we save lr (R14) + */ + .macro save_lr + str lr, [sp, #-4]! + .endm + + .macro restore_pc + ldmfd sp!, {pc}^ + .endm + +#define USER(x...) \ +9999: x; \ + .section __ex_table,"a"; \ + .align 3; \ + .long 9999b,9001f; \ + .previous + + diff -urN linux-2.5.70-bk13/include/asm-arm26/atomic.h linux-2.5.70-bk14/include/asm-arm26/atomic.h --- linux-2.5.70-bk13/include/asm-arm26/atomic.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/atomic.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,115 @@ +/* + * linux/include/asm-arm26/atomic.h + * + * Copyright (c) 1996 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Changelog: + * 27-06-1996 RMK Created + * 13-04-1997 RMK Made functions atomic! + * 07-12-1997 RMK Upgraded for v2.1. + * 26-08-1998 PJB Added #ifdef __KERNEL__ + * + * FIXME - its probably worth seeing what these compile into... + */ +#ifndef __ASM_ARM_ATOMIC_H +#define __ASM_ARM_ATOMIC_H + +#include + +#ifdef CONFIG_SMP +#error SMP is NOT supported +#endif + +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) { (i) } + +#ifdef __KERNEL__ +#include + +#define atomic_read(v) ((v)->counter) +#define atomic_set(v,i) (((v)->counter) = (i)) + +static inline void atomic_add(int i, volatile atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + v->counter += i; + local_irq_restore(flags); +} + +static inline void atomic_sub(int i, volatile atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + v->counter -= i; + local_irq_restore(flags); +} + +static inline void atomic_inc(volatile atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + v->counter += 1; + local_irq_restore(flags); +} + +static inline void atomic_dec(volatile atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + v->counter -= 1; + local_irq_restore(flags); +} + +static inline int atomic_dec_and_test(volatile atomic_t *v) +{ + unsigned long flags; + int val; + + local_irq_save(flags); + val = v->counter; + v->counter = val -= 1; + local_irq_restore(flags); + + return val == 0; +} + +static inline int atomic_add_negative(int i, volatile atomic_t *v) +{ + unsigned long flags; + int val; + + local_irq_save(flags); + val = v->counter; + v->counter = val += i; + local_irq_restore(flags); + + return val < 0; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long flags; + + local_irq_save(flags); + *addr &= ~mask; + local_irq_restore(flags); +} + +/* Atomic operations are already serializing on ARM */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#endif +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/bitops.h linux-2.5.70-bk14/include/asm-arm26/bitops.h --- linux-2.5.70-bk13/include/asm-arm26/bitops.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/bitops.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,349 @@ +/* + * Copyright 1995, Russell King. + * Various bits and pieces copyrights include: + * Linus Torvalds (test_bit). + * Big endian support: Copyright 2001, Nicolas Pitre + * reworked by rmk. + * + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). + * + * Please note that the code in this file should never be included + * from user space. Many of these are not implemented in assembler + * since they would be too costly. Also, they require priviledged + * instructions (which are not available from user mode) to ensure + * that they are atomic. + */ + +#ifndef __ASM_ARM_BITOPS_H +#define __ASM_ARM_BITOPS_H + +#ifdef __KERNEL__ + +#include + +#define smp_mb__before_clear_bit() do { } while (0) +#define smp_mb__after_clear_bit() do { } while (0) + +/* + * These functions are the basis of our bit ops. + * First, the atomic bitops. + * + * The endian issue for these functions is handled by the macros below. + */ +static inline void +____atomic_set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + *p |= mask; + local_irq_restore(flags); +} + +static inline void +____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + *p &= ~mask; + local_irq_restore(flags); +} + +static inline void +____atomic_change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + *p ^= mask; + local_irq_restore(flags); +} + +static inline int +____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned int res; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + res = *p; + *p = res | mask; + local_irq_restore(flags); + + return res & mask; +} + +static inline int +____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned int res; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + res = *p; + *p = res & ~mask; + local_irq_restore(flags); + + return res & mask; +} + +static inline int +____atomic_test_and_change_bit_mask(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned int res; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + res = *p; + *p = res ^ mask; + local_irq_restore(flags); + + return res & mask; +} + +/* + * Now the non-atomic variants. We let the compiler handle all + * optimisations for these. These are all _native_ endian. + */ +static inline void __set_bit(int nr, volatile unsigned long *p) +{ + p[nr >> 5] |= (1UL << (nr & 31)); +} + +static inline void __clear_bit(int nr, volatile unsigned long *p) +{ + p[nr >> 5] &= ~(1UL << (nr & 31)); +} + +static inline void __change_bit(int nr, volatile unsigned long *p) +{ + p[nr >> 5] ^= (1UL << (nr & 31)); +} + +static inline int __test_and_set_bit(int nr, volatile unsigned long *p) +{ + unsigned long oldval, mask = 1UL << (nr & 31); + + p += nr >> 5; + + oldval = *p; + *p = oldval | mask; + return oldval & mask; +} + +static inline int __test_and_clear_bit(int nr, volatile unsigned long *p) +{ + unsigned long oldval, mask = 1UL << (nr & 31); + + p += nr >> 5; + + oldval = *p; + *p = oldval & ~mask; + + return oldval & mask; +} + +static inline int __test_and_change_bit(int nr, volatile unsigned long *p) +{ + unsigned long oldval, mask = 1UL << (nr & 31); + + p += nr >> 5; + + oldval = *p; + *p = oldval ^ mask; + + return oldval & mask; +} + +/* + * This routine doesn't need to be atomic. + */ +static inline int __test_bit(int nr, const unsigned long * p) +{ + return p[nr >> 5] & (1UL << (nr & 31)); +} + +/* + * A note about Endian-ness. + * ------------------------- + * + * ------------ physical data bus bits ----------- + * D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0 + * byte 3 byte 2 byte 1 byte 0 + * + * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. + */ + +/* + * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. + */ +extern void _set_bit_le(int nr, volatile unsigned long * p); +extern void _clear_bit_le(int nr, volatile unsigned long * p); +extern void _change_bit_le(int nr, volatile unsigned long * p); +extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); +extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); +extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); +extern int _find_first_zero_bit_le(void * p, unsigned size); +extern int _find_next_zero_bit_le(void * p, int size, int offset); + +/* + * The __* form of bitops are non-atomic and may be reordered. + */ +#define ATOMIC_BITOP_LE(name,nr,p) \ + (__builtin_constant_p(nr) ? \ + ____atomic_##name(nr, p) : \ + _##name##_le(nr,p)) + +#define ATOMIC_BITOP_BE(name,nr,p) \ + (__builtin_constant_p(nr) ? \ + ____atomic_##name(nr, p) : \ + _##name##_be(nr,p)) + +#define NONATOMIC_BITOP(name,nr,p) \ + (____nonatomic_##name(nr, p)) + +/* + * These are the little endian, atomic definitions. + */ +#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) +#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) +#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) +#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) +#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) +#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) +#define test_bit(nr,p) __test_bit(nr,p) +#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) +#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) + +#define WORD_BITOFF_TO_LE(x) ((x)) + +/* + * ffz = Find First Zero in word. Undefined if no zero exists, + * so code should check against ~0UL first.. + */ +static inline unsigned long ffz(unsigned long word) +{ + int k; + + word = ~word; + k = 31; + if (word & 0x0000ffff) { k -= 16; word <<= 16; } + if (word & 0x00ff0000) { k -= 8; word <<= 8; } + if (word & 0x0f000000) { k -= 4; word <<= 4; } + if (word & 0x30000000) { k -= 2; word <<= 2; } + if (word & 0x40000000) { k -= 1; } + return k; +} + +/* + * ffz = Find First Zero in word. Undefined if no zero exists, + * so code should check against ~0UL first.. + */ +static inline unsigned long __ffs(unsigned long word) +{ + int k; + + k = 31; + if (word & 0x0000ffff) { k -= 16; word <<= 16; } + if (word & 0x00ff0000) { k -= 8; word <<= 8; } + if (word & 0x0f000000) { k -= 4; word <<= 4; } + if (word & 0x30000000) { k -= 2; word <<= 2; } + if (word & 0x40000000) { k -= 1; } + return k; +} + +/* + * fls: find last bit set. + */ + +#define fls(x) generic_fls(x) + +/* + * ffs: find first bit set. This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ + +#define ffs(x) generic_ffs(x) + +/* + * Find first bit set in a 168-bit bitmap, where the first + * 128 bits are unlikely to be set. + */ +static inline int sched_find_first_bit(unsigned long *b) +{ + unsigned long v; + unsigned int off; + + for (off = 0; v = b[off], off < 4; off++) { + if (unlikely(v)) + break; + } + return __ffs(v) + off * 32; +} + +/* + * hweightN: returns the hamming weight (i.e. the number + * of bits set) of a N-bit word + */ + +#define hweight32(x) generic_hweight32(x) +#define hweight16(x) generic_hweight16(x) +#define hweight8(x) generic_hweight8(x) + +/* + * Ext2 is defined to use little-endian byte ordering. + * These do not need to be atomic. + */ +#define ext2_set_bit(nr,p) \ + __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) +#define ext2_set_bit_atomic(lock,nr,p) \ + test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) +#define ext2_clear_bit(nr,p) \ + __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) +#define ext2_clear_bit_atomic(lock,nr,p) \ + test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) +#define ext2_test_bit(nr,p) \ + __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) +#define ext2_find_first_zero_bit(p,sz) \ + _find_first_zero_bit_le(p,sz) +#define ext2_find_next_zero_bit(p,sz,off) \ + _find_next_zero_bit_le(p,sz,off) + +/* + * Minix is defined to use little-endian byte ordering. + * These do not need to be atomic. + */ +#define minix_set_bit(nr,p) \ + __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) +#define minix_test_bit(nr,p) \ + __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) +#define minix_test_and_set_bit(nr,p) \ + __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) +#define minix_test_and_clear_bit(nr,p) \ + __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) +#define minix_find_first_zero_bit(p,sz) \ + _find_first_zero_bit_le(p,sz) + +#endif /* __KERNEL__ */ + +#endif /* _ARM_BITOPS_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/bug.h linux-2.5.70-bk14/include/asm-arm26/bug.h --- linux-2.5.70-bk13/include/asm-arm26/bug.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/bug.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,21 @@ +#ifndef _ASMARM_BUG_H +#define _ASMARM_BUG_H + +#include + +#ifdef CONFIG_DEBUG_BUGVERBOSE +extern void __bug(const char *file, int line, void *data); + +/* give file/line information */ +#define BUG() __bug(__FILE__, __LINE__, NULL) +#define PAGE_BUG(page) __bug(__FILE__, __LINE__, page) + +#else + +/* these just cause an oops */ +#define BUG() (*(int *)0 = 0) +#define PAGE_BUG(page) (*(int *)0 = 0) + +#endif + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/bugs.h linux-2.5.70-bk14/include/asm-arm26/bugs.h --- linux-2.5.70-bk13/include/asm-arm26/bugs.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/bugs.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,15 @@ +/* + * linux/include/asm-arm/bugs.h + * + * Copyright (C) 1995 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_BUGS_H +#define __ASM_BUGS_H + +#define check_bugs() cpu_check_bugs() + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/byteorder.h linux-2.5.70-bk14/include/asm-arm26/byteorder.h --- linux-2.5.70-bk13/include/asm-arm26/byteorder.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/byteorder.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,24 @@ +/* + * linux/include/asm-arm/byteorder.h + * + * ARM Endian-ness. In little endian mode, the data bus is connected such + * that byte accesses appear as: + * 0 = d0...d7, 1 = d8...d15, 2 = d16...d23, 3 = d24...d31 + * and word accesses (data or instruction) appear as: + * d0...d31 + * + */ +#ifndef __ASM_ARM_BYTEORDER_H +#define __ASM_ARM_BYTEORDER_H + +#include + +#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) +# define __BYTEORDER_HAS_U64__ +# define __SWAB_64_THRU_32__ +#endif + +#include + +#endif + diff -urN linux-2.5.70-bk13/include/asm-arm26/cache.h linux-2.5.70-bk14/include/asm-arm26/cache.h --- linux-2.5.70-bk13/include/asm-arm26/cache.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/cache.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,19 @@ +/* + * linux/include/asm-arm26/cache.h + */ +#ifndef __ASMARM_CACHE_H +#define __ASMARM_CACHE_H + +#define L1_CACHE_BYTES 32 +#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) +#define SMP_CACHE_BYTES L1_CACHE_BYTES + +#ifdef MODULE +#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES))) +#else +#define __cacheline_aligned \ + __attribute__((__aligned__(L1_CACHE_BYTES), \ + __section__(".data.cacheline_aligned"))) +#endif + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/cacheflush.h linux-2.5.70-bk14/include/asm-arm26/cacheflush.h --- linux-2.5.70-bk13/include/asm-arm26/cacheflush.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/cacheflush.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,44 @@ +/* + * linux/include/asm-arm/cacheflush.h + * + * Copyright (C) 2000-2002 Russell King + * Copyright (C) 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ARM26 cache 'functions' + * + */ + +#ifndef _ASMARM_CACHEFLUSH_H +#define _ASMARM_CACHEFLUSH_H + +#if 1 //FIXME - BAD INCLUDES!!! +#include +#include +#endif + +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_range(vma,start,end) do { } while (0) +#define flush_cache_page(vma,vmaddr) do { } while (0) +#define flush_page_to_ram(page) do { } while (0) + +#define invalidate_dcache_range(start,end) do { } while (0) +#define clean_dcache_range(start,end) do { } while (0) +#define flush_dcache_range(start,end) do { } while (0) +#define flush_dcache_page(page) do { } while (0) +#define clean_dcache_entry(_s) do { } while (0) +#define clean_cache_entry(_start) do { } while (0) + +#define flush_icache_user_range(start,end, bob, fred) do { } while (0) +#define flush_icache_range(start,end) do { } while (0) +#define flush_icache_page(vma,page) do { } while (0) + +/* DAG: ARM3 will flush cache on MEMC updates anyway? so don't bother */ +/* IM : Yes, it will, but only if setup to do so (we do this). */ +#define clean_cache_area(_start,_size) do { } while (0) + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/calls.h linux-2.5.70-bk14/include/asm-arm26/calls.h --- linux-2.5.70-bk13/include/asm-arm26/calls.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/calls.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,262 @@ +/* + * linux/arch/arm26/lib/calls.h + * + * Copyright (C) 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * FIXME + * This file is included twice in entry-common.S which may not be necessary + */ +#ifndef NR_syscalls +#define NR_syscalls 256 +#else + +__syscall_start: +/* 0 */ .long sys_ni_syscall + .long sys_exit + .long sys_fork_wrapper + .long sys_read + .long sys_write +/* 5 */ .long sys_open + .long sys_close + .long sys_ni_syscall /* was sys_waitpid */ + .long sys_creat + .long sys_link +/* 10 */ .long sys_unlink + .long sys_execve_wrapper + .long sys_chdir + .long sys_time /* used by libc4 */ + .long sys_mknod +/* 15 */ .long sys_chmod + .long sys_lchown16 + .long sys_ni_syscall /* was sys_break */ + .long sys_ni_syscall /* was sys_stat */ + .long sys_lseek +/* 20 */ .long sys_getpid + .long sys_mount + .long sys_oldumount /* used by libc4 */ + .long sys_setuid16 + .long sys_getuid16 +/* 25 */ .long sys_stime + .long sys_ptrace + .long sys_alarm /* used by libc4 */ + .long sys_ni_syscall /* was sys_fstat */ + .long sys_pause +/* 30 */ .long sys_utime /* used by libc4 */ + .long sys_ni_syscall /* was sys_stty */ + .long sys_ni_syscall /* was sys_getty */ + .long sys_access + .long sys_nice +/* 35 */ .long sys_ni_syscall /* was sys_ftime */ + .long sys_sync + .long sys_kill + .long sys_rename + .long sys_mkdir +/* 40 */ .long sys_rmdir + .long sys_dup + .long sys_pipe + .long sys_times + .long sys_ni_syscall /* was sys_prof */ +/* 45 */ .long sys_brk + .long sys_setgid16 + .long sys_getgid16 + .long sys_ni_syscall /* was sys_signal */ + .long sys_geteuid16 +/* 50 */ .long sys_getegid16 + .long sys_acct + .long sys_umount + .long sys_ni_syscall /* was sys_lock */ + .long sys_ioctl +/* 55 */ .long sys_fcntl + .long sys_ni_syscall /* was sys_mpx */ + .long sys_setpgid + .long sys_ni_syscall /* was sys_ulimit */ + .long sys_ni_syscall /* was sys_olduname */ +/* 60 */ .long sys_umask + .long sys_chroot + .long sys_ustat + .long sys_dup2 + .long sys_getppid +/* 65 */ .long sys_getpgrp + .long sys_setsid + .long sys_sigaction + .long sys_ni_syscall /* was sys_sgetmask */ + .long sys_ni_syscall /* was sys_ssetmask */ +/* 70 */ .long sys_setreuid16 + .long sys_setregid16 + .long sys_sigsuspend_wrapper + .long sys_sigpending + .long sys_sethostname +/* 75 */ .long sys_setrlimit + .long sys_old_getrlimit /* used by libc4 */ + .long sys_getrusage + .long sys_gettimeofday + .long sys_settimeofday +/* 80 */ .long sys_getgroups16 + .long sys_setgroups16 + .long old_select /* used by libc4 */ + .long sys_symlink + .long sys_ni_syscall /* was sys_lstat */ +/* 85 */ .long sys_readlink + .long sys_uselib + .long sys_swapon + .long sys_reboot + .long old_readdir /* used by libc4 */ +/* 90 */ .long old_mmap /* used by libc4 */ + .long sys_munmap + .long sys_truncate + .long sys_ftruncate + .long sys_fchmod +/* 95 */ .long sys_fchown16 + .long sys_getpriority + .long sys_setpriority + .long sys_ni_syscall /* was sys_profil */ + .long sys_statfs +/* 100 */ .long sys_fstatfs + .long sys_ni_syscall + .long sys_socketcall + .long sys_syslog + .long sys_setitimer +/* 105 */ .long sys_getitimer + .long sys_newstat + .long sys_newlstat + .long sys_newfstat + .long sys_ni_syscall /* was sys_uname */ +/* 110 */ .long sys_ni_syscall /* was sys_iopl */ + .long sys_vhangup + .long sys_ni_syscall + .long sys_syscall /* call a syscall */ + .long sys_wait4 +/* 115 */ .long sys_swapoff + .long sys_sysinfo + .long sys_ipc + .long sys_fsync + .long sys_sigreturn_wrapper +/* 120 */ .long sys_clone_wapper + .long sys_setdomainname + .long sys_newuname + .long sys_ni_syscall + .long sys_adjtimex +/* 125 */ .long sys_mprotect + .long sys_sigprocmask + .long sys_ni_syscall /* WAS: sys_create_module */ + .long sys_init_module + .long sys_delete_module +/* 130 */ .long sys_ni_syscall /* WAS: sys_get_kernel_syms */ + .long sys_quotactl + .long sys_getpgid + .long sys_fchdir + .long sys_bdflush +/* 135 */ .long sys_sysfs + .long sys_personality + .long sys_ni_syscall /* .long _sys_afs_syscall */ + .long sys_setfsuid16 + .long sys_setfsgid16 +/* 140 */ .long sys_llseek + .long sys_getdents + .long sys_select + .long sys_flock + .long sys_msync +/* 145 */ .long sys_readv + .long sys_writev + .long sys_getsid + .long sys_fdatasync + .long sys_sysctl +/* 150 */ .long sys_mlock + .long sys_munlock + .long sys_mlockall + .long sys_munlockall + .long sys_sched_setparam +/* 155 */ .long sys_sched_getparam + .long sys_sched_setscheduler + .long sys_sched_getscheduler + .long sys_sched_yield + .long sys_sched_get_priority_max +/* 160 */ .long sys_sched_get_priority_min + .long sys_sched_rr_get_interval + .long sys_nanosleep + .long sys_arm_mremap + .long sys_setresuid16 +/* 165 */ .long sys_getresuid16 + .long sys_ni_syscall + .long sys_ni_syscall /* WAS: sys_query_module */ + .long sys_poll + .long sys_nfsservctl +/* 170 */ .long sys_setresgid16 + .long sys_getresgid16 + .long sys_prctl + .long sys_rt_sigreturn_wrapper + .long sys_rt_sigaction +/* 175 */ .long sys_rt_sigprocmask + .long sys_rt_sigpending + .long sys_rt_sigtimedwait + .long sys_rt_sigqueueinfo + .long sys_rt_sigsuspend_wrapper +/* 180 */ .long sys_pread64 + .long sys_pwrite64 + .long sys_chown16 + .long sys_getcwd + .long sys_capget +/* 185 */ .long sys_capset + .long sys_sigaltstack_wrapper + .long sys_sendfile + .long sys_ni_syscall + .long sys_ni_syscall +/* 190 */ .long sys_vfork_wrapper + .long sys_getrlimit + .long sys_mmap2 + .long sys_truncate64 + .long sys_ftruncate64 +/* 195 */ .long sys_stat64 + .long sys_lstat64 + .long sys_fstat64 + .long sys_lchown + .long sys_getuid +/* 200 */ .long sys_getgid + .long sys_geteuid + .long sys_getegid + .long sys_setreuid + .long sys_setregid +/* 205 */ .long sys_getgroups + .long sys_setgroups + .long sys_fchown + .long sys_setresuid + .long sys_getresuid +/* 210 */ .long sys_setresgid + .long sys_getresgid + .long sys_chown + .long sys_setuid + .long sys_setgid +/* 215 */ .long sys_setfsuid + .long sys_setfsgid + .long sys_getdents64 + .long sys_pivot_root + .long sys_mincore +/* 220 */ .long sys_madvise + .long sys_fcntl64 + .long sys_ni_syscall /* TUX */ + .long sys_ni_syscall /* WAS: sys_security */ + .long sys_gettid +/* 225 */ .long sys_readahead + .long sys_setxattr + .long sys_lsetxattr + .long sys_fsetxattr + .long sys_getxattr +/* 230 */ .long sys_lgetxattr + .long sys_fgetxattr + .long sys_listxattr + .long sys_llistxattr + .long sys_flistxattr +/* 235 */ .long sys_removexattr + .long sys_lremovexattr + .long sys_fremovexattr + .long sys_tkill +__syscall_end: + + .rept NR_syscalls - (__syscall_end - __syscall_start) / 4 + .long sys_ni_syscall + .endr +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/checksum.h linux-2.5.70-bk14/include/asm-arm26/checksum.h --- linux-2.5.70-bk13/include/asm-arm26/checksum.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/checksum.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,158 @@ +/* + * linux/include/asm-arm/checksum.h + * + * IP checksum routines + * + * Copyright (C) Original authors of ../asm-i386/checksum.h + * Copyright (C) 1996-1999 Russell King + */ +#ifndef __ASM_ARM_CHECKSUM_H +#define __ASM_ARM_CHECKSUM_H + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums, and handles user-space pointer exceptions correctly, when needed. + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ + +unsigned int +csum_partial_copy_nocheck(const char *src, char *dst, int len, int sum); + +unsigned int +csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *err_ptr); + +/* + * These are the old (and unsafe) way of doing checksums, a warning message will be + * printed if they are used and an exception occurs. + * + * these functions should go away after some time. + */ +#define csum_partial_copy(src,dst,len,sum) csum_partial_copy_nocheck(src,dst,len,sum) + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +static inline unsigned short +ip_fast_csum(unsigned char * iph, unsigned int ihl) +{ + unsigned int sum, tmp1; + + __asm__ __volatile__( + "ldr %0, [%1], #4 @ ip_fast_csum \n\ + ldr %3, [%1], #4 \n\ + sub %2, %2, #5 \n\ + adds %0, %0, %3 \n\ + ldr %3, [%1], #4 \n\ + adcs %0, %0, %3 \n\ + ldr %3, [%1], #4 \n\ +1: adcs %0, %0, %3 \n\ + ldr %3, [%1], #4 \n\ + tst %2, #15 @ do this carefully \n\ + subne %2, %2, #1 @ without destroying \n\ + bne 1b @ the carry flag \n\ + adcs %0, %0, %3 \n\ + adc %0, %0, #0 \n\ + adds %0, %0, %0, lsl #16 \n\ + addcs %0, %0, #0x10000 \n\ + mvn %0, %0 \n\ + mov %0, %0, lsr #16" + : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1) + : "1" (iph), "2" (ihl) + : "cc"); + return sum; +} + +/* + * Fold a partial checksum without adding pseudo headers + */ +static inline unsigned int +csum_fold(unsigned int sum) +{ + __asm__( + "adds %0, %1, %1, lsl #16 @ csum_fold \n\ + addcs %0, %0, #0x10000" + : "=r" (sum) + : "r" (sum) + : "cc"); + return (~sum) >> 16; +} + +static inline unsigned int +csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, + unsigned int proto, unsigned int sum) +{ + __asm__( + "adds %0, %1, %2 @ csum_tcpudp_nofold \n\ + adcs %0, %0, %3 \n\ + adcs %0, %0, %4 \n\ + adcs %0, %0, %5 \n\ + adc %0, %0, #0" + : "=&r"(sum) + : "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len) << 16), "Ir" (proto << 8) + : "cc"); + return sum; +} +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +static inline unsigned short int +csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, + unsigned int proto, unsigned int sum) +{ + __asm__( + "adds %0, %1, %2 @ csum_tcpudp_magic \n\ + adcs %0, %0, %3 \n\ + adcs %0, %0, %4 \n\ + adcs %0, %0, %5 \n\ + adc %0, %0, #0 \n\ + adds %0, %0, %0, lsl #16 \n\ + addcs %0, %0, #0x10000 \n\ + mvn %0, %0" + : "=&r"(sum) + : "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len)), "Ir" (proto << 8) + : "cc"); + return sum >> 16; +} + + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +static inline unsigned short +ip_compute_csum(unsigned char * buff, int len) +{ + return csum_fold(csum_partial(buff, len, 0)); +} + +#define _HAVE_ARCH_IPV6_CSUM +extern unsigned long +__csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len, + __u32 proto, unsigned int sum); + +static inline unsigned short int +csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len, + unsigned short proto, unsigned int sum) +{ + return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len), + htonl(proto), sum)); +} +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/constants.h linux-2.5.70-bk14/include/asm-arm26/constants.h --- linux-2.5.70-bk13/include/asm-arm26/constants.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/constants.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,29 @@ +#ifndef __ASM_OFFSETS_H__ +#define __ASM_OFFSETS_H__ +/* + * DO NOT MODIFY. + * + * This file was generated by arch/arm26/Makefile + * + */ + +#define TSK_USED_MATH 788 /* offsetof(struct task_struct, used_math) */ +#define TSK_ACTIVE_MM 96 /* offsetof(struct task_struct, active_mm) */ + +#define VMA_VM_MM 0 /* offsetof(struct vm_area_struct, vm_mm) */ +#define VMA_VM_FLAGS 20 /* offsetof(struct vm_area_struct, vm_flags) */ + +#define VM_EXEC 4 /* VM_EXEC */ + + +#define PAGE_PRESENT 1 /* L_PTE_PRESENT */ +#define PAGE_READONLY 95 /* PAGE_READONLY */ +#define PAGE_NOT_USER 3 /* PAGE_NONE */ +#define PAGE_OLD 3 /* PAGE_NONE */ +#define PAGE_CLEAN 128 /* L_PTE_DIRTY */ + +#define PAGE_SZ 32768 /* PAGE_SIZE */ + +#define SYS_ERROR0 10420224 /* 0x9f0000 */ + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/current.h linux-2.5.70-bk14/include/asm-arm26/current.h --- linux-2.5.70-bk13/include/asm-arm26/current.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/current.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,15 @@ +#ifndef _ASMARM_CURRENT_H +#define _ASMARM_CURRENT_H + +#include + +static inline struct task_struct *get_current(void) __attribute__ (( __const__ )); + +static inline struct task_struct *get_current(void) +{ + return current_thread_info()->task; +} + +#define current (get_current()) + +#endif /* _ASMARM_CURRENT_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/delay.h linux-2.5.70-bk14/include/asm-arm26/delay.h --- linux-2.5.70-bk13/include/asm-arm26/delay.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/delay.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,34 @@ +#ifndef __ASM_ARM_DELAY_H +#define __ASM_ARM_DELAY_H + +/* + * Copyright (C) 1995 Russell King + * + * Delay routines, using a pre-computed "loops_per_second" value. + */ + +extern void __delay(int loops); + +/* + * division by multiplication: you don't have to worry about + * loss of precision. + * + * Use only for very small delays ( < 1 msec). Should probably use a + * lookup table, really, as the multiplications take much too long with + * short delays. This is a "reasonable" implementation, though (and the + * first constant multiplications gets optimized away if the delay is + * a constant) + * + * FIXME - lets improve it then... + */ +extern void udelay(unsigned long usecs); + +static inline unsigned long muldiv(unsigned long a, unsigned long b, unsigned long c) +{ + return a * b / c; +} + + + +#endif /* defined(_ARM_DELAY_H) */ + diff -urN linux-2.5.70-bk13/include/asm-arm26/div64.h linux-2.5.70-bk14/include/asm-arm26/div64.h --- linux-2.5.70-bk13/include/asm-arm26/div64.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/div64.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,14 @@ +#ifndef __ASM_ARM_DIV64 +#define __ASM_ARM_DIV64 + +/* We're not 64-bit, but... */ +#define do_div(n,base) \ +({ \ + int __res; \ + __res = ((unsigned long)n) % (unsigned int)base; \ + n = ((unsigned long)n) / (unsigned int)base; \ + __res; \ +}) + +#endif + diff -urN linux-2.5.70-bk13/include/asm-arm26/dma.h linux-2.5.70-bk14/include/asm-arm26/dma.h --- linux-2.5.70-bk13/include/asm-arm26/dma.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/dma.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,184 @@ +#ifndef __ASM_ARM_DMA_H +#define __ASM_ARM_DMA_H + +typedef unsigned int dmach_t; + +#include +#include +#include +#include +#include + +// FIXME - do we really need this? arm26 cant do 'proper' DMA + +typedef struct dma_struct dma_t; +typedef unsigned int dmamode_t; + +struct dma_ops { + int (*request)(dmach_t, dma_t *); /* optional */ + void (*free)(dmach_t, dma_t *); /* optional */ + void (*enable)(dmach_t, dma_t *); /* mandatory */ + void (*disable)(dmach_t, dma_t *); /* mandatory */ + int (*residue)(dmach_t, dma_t *); /* optional */ + int (*setspeed)(dmach_t, dma_t *, int); /* optional */ + char *type; +}; + +struct dma_struct { + struct scatterlist buf; /* single DMA */ + int sgcount; /* number of DMA SG */ + struct scatterlist *sg; /* DMA Scatter-Gather List */ + + unsigned int active:1; /* Transfer active */ + unsigned int invalid:1; /* Address/Count changed */ + unsigned int using_sg:1; /* using scatter list? */ + dmamode_t dma_mode; /* DMA mode */ + int speed; /* DMA speed */ + + unsigned int lock; /* Device is allocated */ + const char *device_id; /* Device name */ + + unsigned int dma_base; /* Controller base address */ + int dma_irq; /* Controller IRQ */ + int state; /* Controller state */ + struct scatterlist cur_sg; /* Current controller buffer */ + + struct dma_ops *d_ops; +}; + +/* Prototype: void arch_dma_init(dma) + * Purpose : Initialise architecture specific DMA + * Params : dma - pointer to array of DMA structures + */ +extern void arch_dma_init(dma_t *dma); + +extern void isa_init_dma(dma_t *dma); + + +#define MAX_DMA_ADDRESS 0x03000000 +#define MAX_DMA_CHANNELS 3 + +/* ARC */ +#define DMA_VIRTUAL_FLOPPY0 0 +#define DMA_VIRTUAL_FLOPPY1 1 +#define DMA_VIRTUAL_SOUND 2 + +/* A5K */ +#define DMA_FLOPPY 0 + +/* + * DMA modes + */ +#define DMA_MODE_MASK 3 + +#define DMA_MODE_READ 0 +#define DMA_MODE_WRITE 1 +#define DMA_MODE_CASCADE 2 +#define DMA_AUTOINIT 4 + +extern spinlock_t dma_spin_lock; + +static inline unsigned long claim_dma_lock(void) +{ + unsigned long flags; + spin_lock_irqsave(&dma_spin_lock, flags); + return flags; +} + +static inline void release_dma_lock(unsigned long flags) +{ + spin_unlock_irqrestore(&dma_spin_lock, flags); +} + +/* Clear the 'DMA Pointer Flip Flop'. + * Write 0 for LSB/MSB, 1 for MSB/LSB access. + */ +#define clear_dma_ff(channel) + +/* Set only the page register bits of the transfer address. + * + * NOTE: This is an architecture specific function, and should + * be hidden from the drivers + */ +extern void set_dma_page(dmach_t channel, char pagenr); + +/* Request a DMA channel + * + * Some architectures may need to do allocate an interrupt + */ +extern int request_dma(dmach_t channel, const char * device_id); + +/* Free a DMA channel + * + * Some architectures may need to do free an interrupt + */ +extern void free_dma(dmach_t channel); + +/* Enable DMA for this channel + * + * On some architectures, this may have other side effects like + * enabling an interrupt and setting the DMA registers. + */ +extern void enable_dma(dmach_t channel); + +/* Disable DMA for this channel + * + * On some architectures, this may have other side effects like + * disabling an interrupt or whatever. + */ +extern void disable_dma(dmach_t channel); + +/* Test whether the specified channel has an active DMA transfer + */ +extern int dma_channel_active(dmach_t channel); + +/* Set the DMA scatter gather list for this channel + * + * This should not be called if a DMA channel is enabled, + * especially since some DMA architectures don't update the + * DMA address immediately, but defer it to the enable_dma(). + */ +extern void set_dma_sg(dmach_t channel, struct scatterlist *sg, int nr_sg); + +/* Set the DMA address for this channel + * + * This should not be called if a DMA channel is enabled, + * especially since some DMA architectures don't update the + * DMA address immediately, but defer it to the enable_dma(). + */ +extern void set_dma_addr(dmach_t channel, unsigned long physaddr); + +/* Set the DMA byte count for this channel + * + * This should not be called if a DMA channel is enabled, + * especially since some DMA architectures don't update the + * DMA count immediately, but defer it to the enable_dma(). + */ +extern void set_dma_count(dmach_t channel, unsigned long count); + +/* Set the transfer direction for this channel + * + * This should not be called if a DMA channel is enabled, + * especially since some DMA architectures don't update the + * DMA transfer direction immediately, but defer it to the + * enable_dma(). + */ +extern void set_dma_mode(dmach_t channel, dmamode_t mode); + +/* Set the transfer speed for this channel + */ +extern void set_dma_speed(dmach_t channel, int cycle_ns); + +/* Get DMA residue count. After a DMA transfer, this + * should return zero. Reading this while a DMA transfer is + * still in progress will return unpredictable results. + * If called before the channel has been used, it may return 1. + * Otherwise, it returns the number of _bytes_ left to transfer. + */ +extern int get_dma_residue(dmach_t channel); + +#ifndef NO_DMA +#define NO_DMA 255 +#endif + +#endif /* _ARM_DMA_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/ecard.h linux-2.5.70-bk14/include/asm-arm26/ecard.h --- linux-2.5.70-bk13/include/asm-arm26/ecard.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/ecard.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,291 @@ +/* + * linux/include/asm-arm26/ecard.h + * + * definitions for expansion cards + * + * This is a new system as from Linux 1.2.3 + * + * Changelog: + * 11-12-1996 RMK Further minor improvements + * 12-09-1997 RMK Added interrupt enable/disable for card level + * 18-05-2003 IM Adjusted for ARM26 + * + * Reference: Acorns Risc OS 3 Programmers Reference Manuals. + */ + +#ifndef __ASM_ECARD_H +#define __ASM_ECARD_H + +/* + * Currently understood cards (but not necessarily + * supported): + * Manufacturer Product ID + */ +#define MANU_ACORN 0x0000 +#define PROD_ACORN_SCSI 0x0002 +#define PROD_ACORN_ETHER1 0x0003 +#define PROD_ACORN_MFM 0x000b + +#define MANU_ANT2 0x0011 +#define PROD_ANT_ETHER3 0x00a4 + +#define MANU_ATOMWIDE 0x0017 +#define PROD_ATOMWIDE_3PSERIAL 0x0090 + +#define MANU_IRLAM_INSTRUMENTS 0x001f +#define MANU_IRLAM_INSTRUMENTS_ETHERN 0x5678 + +#define MANU_OAK 0x0021 +#define PROD_OAK_SCSI 0x0058 + +#define MANU_MORLEY 0x002b +#define PROD_MORLEY_SCSI_UNCACHED 0x0067 + +#define MANU_CUMANA 0x003a +#define PROD_CUMANA_SCSI_2 0x003a +#define PROD_CUMANA_SCSI_1 0x00a0 + +#define MANU_ICS 0x003c +#define PROD_ICS_IDE 0x00ae + +#define MANU_ICS2 0x003d +#define PROD_ICS2_IDE 0x00ae + +#define MANU_SERPORT 0x003f +#define PROD_SERPORT_DSPORT 0x00b9 + +#define MANU_ARXE 0x0041 +#define PROD_ARXE_SCSI 0x00be + +#define MANU_I3 0x0046 +#define PROD_I3_ETHERLAN500 0x00d4 +#define PROD_I3_ETHERLAN600 0x00ec +#define PROD_I3_ETHERLAN600A 0x011e + +#define MANU_ANT 0x0053 +#define PROD_ANT_ETHERM 0x00d8 +#define PROD_ANT_ETHERB 0x00e4 + +#define MANU_ALSYSTEMS 0x005b +#define PROD_ALSYS_SCSIATAPI 0x0107 + +#define MANU_MCS 0x0063 +#define PROD_MCS_CONNECT32 0x0125 + +#define MANU_EESOX 0x0064 +#define PROD_EESOX_SCSI2 0x008c + +#define MANU_YELLOWSTONE 0x0096 +#define PROD_YELLOWSTONE_RAPIDE32 0x0120 + +#define MANU_SIMTEC 0x005f +#define PROD_SIMTEC_IDE8 0x0130 +#define PROD_SIMTEC_IDE16 0x0131 + + +#ifdef ECARD_C +#define CONST +#else +#define CONST const +#endif + +#define MAX_ECARDS 4 + +typedef enum { /* Cards address space */ + ECARD_IOC, + ECARD_MEMC, + ECARD_EASI +} card_type_t; + +typedef enum { /* Speed for ECARD_IOC space */ + ECARD_SLOW = 0, + ECARD_MEDIUM = 1, + ECARD_FAST = 2, + ECARD_SYNC = 3 +} card_speed_t; + +struct ecard_id { /* Card ID structure */ + unsigned short manufacturer; + unsigned short product; + void *data; +}; + +struct in_ecid { /* Packed card ID information */ + unsigned short product; /* Product code */ + unsigned short manufacturer; /* Manufacturer code */ + unsigned char id:4; /* Simple ID */ + unsigned char cd:1; /* Chunk dir present */ + unsigned char is:1; /* Interrupt status pointers */ + unsigned char w:2; /* Width */ + unsigned char country; /* Country */ + unsigned char irqmask; /* IRQ mask */ + unsigned char fiqmask; /* FIQ mask */ + unsigned long irqoff; /* IRQ offset */ + unsigned long fiqoff; /* FIQ offset */ +}; + +typedef struct expansion_card ecard_t; +typedef unsigned long *loader_t; + +typedef struct { /* Card handler routines */ + void (*irqenable)(ecard_t *ec, int irqnr); + void (*irqdisable)(ecard_t *ec, int irqnr); + int (*irqpending)(ecard_t *ec); + void (*fiqenable)(ecard_t *ec, int fiqnr); + void (*fiqdisable)(ecard_t *ec, int fiqnr); + int (*fiqpending)(ecard_t *ec); +} expansioncard_ops_t; + +#define ECARD_NUM_RESOURCES (6) + +#define ECARD_RES_IOCSLOW (0) +#define ECARD_RES_IOCMEDIUM (1) +#define ECARD_RES_IOCFAST (2) +#define ECARD_RES_IOCSYNC (3) +#define ECARD_RES_MEMC (4) +#define ECARD_RES_EASI (5) + +#define ecard_resource_start(ec,nr) ((ec)->resource[nr].start) +#define ecard_resource_end(ec,nr) ((ec)->resource[nr].end) +#define ecard_resource_len(ec,nr) ((ec)->resource[nr].end - \ + (ec)->resource[nr].start + 1) + +/* + * This contains all the info needed on an expansion card + */ +struct expansion_card { + struct expansion_card *next; + + struct device dev; + struct resource resource[ECARD_NUM_RESOURCES]; + + /* Public data */ + volatile unsigned char *irqaddr; /* address of IRQ register */ + volatile unsigned char *fiqaddr; /* address of FIQ register */ + unsigned char irqmask; /* IRQ mask */ + unsigned char fiqmask; /* FIQ mask */ + unsigned char claimed; /* Card claimed? */ + + void *irq_data; /* Data for use for IRQ by card */ + void *fiq_data; /* Data for use for FIQ by card */ + const expansioncard_ops_t *ops; /* Enable/Disable Ops for card */ + + CONST unsigned int slot_no; /* Slot number */ + CONST unsigned int dma; /* DMA number (for request_dma) */ + CONST unsigned int irq; /* IRQ number (for request_irq) */ + CONST unsigned int fiq; /* FIQ number (for request_irq) */ + CONST card_type_t type; /* Type of card */ + CONST struct in_ecid cid; /* Card Identification */ + + /* Private internal data */ + const char *card_desc; /* Card description */ + CONST unsigned int podaddr; /* Base Linux address for card */ + CONST loader_t loader; /* loader program */ + u64 dma_mask; +}; + +struct in_chunk_dir { + unsigned int start_offset; + union { + unsigned char string[256]; + unsigned char data[1]; + } d; +}; + +/* + * ecard_claim: claim an expansion card entry + * FIXME - are these atomic / called with interrupts off ? + */ +#define ecard_claim(ec) ((ec)->claimed = 1) + +/* + * ecard_release: release an expansion card entry + */ +#define ecard_release(ec) ((ec)->claimed = 0) + +/* + * Read a chunk from an expansion card + * cd : where to put read data + * ec : expansion card info struct + * id : id number to find + * num: (n+1)'th id to find. + */ +extern int ecard_readchunk (struct in_chunk_dir *cd, struct expansion_card *ec, int id, int num); + +/* + * Obtain the address of a card + */ +extern unsigned int ecard_address (struct expansion_card *ec, card_type_t card_type, card_speed_t speed); + +#ifdef ECARD_C +/* Definitions internal to ecard.c - for it's use only!! + * + * External expansion card header as read from the card + */ +struct ex_ecid { + unsigned char r_irq:1; + unsigned char r_zero:1; + unsigned char r_fiq:1; + unsigned char r_id:4; + unsigned char r_a:1; + + unsigned char r_cd:1; + unsigned char r_is:1; + unsigned char r_w:2; + unsigned char r_r1:4; + + unsigned char r_r2:8; + + unsigned char r_prod[2]; + + unsigned char r_manu[2]; + + unsigned char r_country; + + unsigned char r_irqmask; + unsigned char r_irqoff[3]; + + unsigned char r_fiqmask; + unsigned char r_fiqoff[3]; +}; + +/* + * Chunk directory entry as read from the card + */ +struct ex_chunk_dir { + unsigned char r_id; + unsigned char r_len[3]; + unsigned long r_start; + union { + char string[256]; + char data[1]; + } d; +#define c_id(x) ((x)->r_id) +#define c_len(x) ((x)->r_len[0]|((x)->r_len[1]<<8)|((x)->r_len[2]<<16)) +#define c_start(x) ((x)->r_start) +}; + +#endif + +extern struct bus_type ecard_bus_type; + +#define ECARD_DEV(_d) container_of((_d), struct expansion_card, dev) + +struct ecard_driver { + int (*probe)(struct expansion_card *, const struct ecard_id *id); + void (*remove)(struct expansion_card *); + void (*shutdown)(struct expansion_card *); + const struct ecard_id *id_table; + unsigned int id; + struct device_driver drv; +}; + +#define ECARD_DRV(_d) container_of((_d), struct ecard_driver, drv) + +#define ecard_set_drvdata(ec,data) dev_set_drvdata(&(ec)->dev, (data)) +#define ecard_get_drvdata(ec) dev_get_drvdata(&(ec)->dev) + +int ecard_register_driver(struct ecard_driver *); +void ecard_remove_driver(struct ecard_driver *); + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/elf.h linux-2.5.70-bk14/include/asm-arm26/elf.h --- linux-2.5.70-bk13/include/asm-arm26/elf.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/elf.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,77 @@ +#ifndef __ASMARM_ELF_H +#define __ASMARM_ELF_H + +/* + * ELF register definitions.. + */ + +#include +#include + +//FIXME - is it always 32K ? + +#define ELF_EXEC_PAGESIZE 32768 +#define SET_PERSONALITY(ex,ibcs2) set_personality(PER_LINUX) + +typedef unsigned long elf_greg_t; +typedef unsigned long elf_freg_t[3]; + +#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef struct { void *null; } elf_fpregset_t; + +/* + * This is used to ensure we don't load something for the wrong architecture. + * We can only execute 26-bit code. + */ + +#define EM_ARM 40 +#define EF_ARM_APCS26 0x08 + +//#define elf_check_arch(x) ( ((x)->e_machine == EM_ARM) && ((x)->e_flags & EF_ARM_APCS26) ) FIXME!!!!! - this looks OK, but the flags seem to be wrong. +#define elf_check_arch(x) (1) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 +#define ELF_DATA ELFDATA2LSB; +#define ELF_ARCH EM_ARM + +#define USE_ELF_CORE_DUMP + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) + +/* When the program starts, a1 contains a pointer to a function to be + registered with atexit, as per the SVR4 ABI. A value of 0 means we + have no such handler. */ +#define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0 + +/* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. */ + +extern unsigned int elf_hwcap; +#define ELF_HWCAP (elf_hwcap) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. */ + +/* For now we just provide a fairly general string that describes the + processor family. This could be made more specific later if someone + implemented optimisations that require it. 26-bit CPUs give you + "v1l" for ARM2 (no SWP) and "v2l" for anything else (ARM1 isn't + supported). + */ + +#define ELF_PLATFORM_SIZE 8 +extern char elf_platform[]; +#define ELF_PLATFORM (elf_platform) + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/errno.h linux-2.5.70-bk14/include/asm-arm26/errno.h --- linux-2.5.70-bk13/include/asm-arm26/errno.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/errno.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,6 @@ +#ifndef _ARM_ERRNO_H +#define _ARM_ERRNO_H + +#include + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/fcntl.h linux-2.5.70-bk14/include/asm-arm26/fcntl.h --- linux-2.5.70-bk13/include/asm-arm26/fcntl.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/fcntl.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,86 @@ +#ifndef _ARM_FCNTL_H +#define _ARM_FCNTL_H + +/* open/fcntl - O_SYNC is only implemented on blocks devices and on files + located on an ext2 file system */ +#define O_ACCMODE 0003 +#define O_RDONLY 00 +#define O_WRONLY 01 +#define O_RDWR 02 +#define O_CREAT 0100 /* not fcntl */ +#define O_EXCL 0200 /* not fcntl */ +#define O_NOCTTY 0400 /* not fcntl */ +#define O_TRUNC 01000 /* not fcntl */ +#define O_APPEND 02000 +#define O_NONBLOCK 04000 +#define O_NDELAY O_NONBLOCK +#define O_SYNC 010000 +#define FASYNC 020000 /* fcntl, for BSD compatibility */ +#define O_DIRECTORY 040000 /* must be a directory */ +#define O_NOFOLLOW 0100000 /* don't follow links */ +#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */ +#define O_LARGEFILE 0400000 + +#define F_DUPFD 0 /* dup */ +#define F_GETFD 1 /* get close_on_exec */ +#define F_SETFD 2 /* set/clear close_on_exec */ +#define F_GETFL 3 /* get file->f_flags */ +#define F_SETFL 4 /* set file->f_flags */ +#define F_GETLK 5 +#define F_SETLK 6 +#define F_SETLKW 7 + +#define F_SETOWN 8 /* for sockets. */ +#define F_GETOWN 9 /* for sockets. */ +#define F_SETSIG 10 /* for sockets. */ +#define F_GETSIG 11 /* for sockets. */ + +#define F_GETLK64 12 /* using 'struct flock64' */ +#define F_SETLK64 13 +#define F_SETLKW64 14 + +/* for F_[GET|SET]FL */ +#define FD_CLOEXEC 1 /* actually anything with low bit set goes */ + +/* for posix fcntl() and lockf() */ +#define F_RDLCK 0 +#define F_WRLCK 1 +#define F_UNLCK 2 + +/* for old implementation of bsd flock () */ +#define F_EXLCK 4 /* or 3 */ +#define F_SHLCK 8 /* or 4 */ + +/* for leases */ +#define F_INPROGRESS 16 + +/* operations for bsd flock(), also used by the kernel implementation */ +#define LOCK_SH 1 /* shared lock */ +#define LOCK_EX 2 /* exclusive lock */ +#define LOCK_NB 4 /* or'd with one of the above to prevent + blocking */ +#define LOCK_UN 8 /* remove lock */ + +#define LOCK_MAND 32 /* This is a mandatory flock */ +#define LOCK_READ 64 /* ... Which allows concurrent read operations */ +#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */ +#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */ + +struct flock { + short l_type; + short l_whence; + off_t l_start; + off_t l_len; + pid_t l_pid; +}; + +struct flock64 { + short l_type; + short l_whence; + loff_t l_start; + loff_t l_len; + pid_t l_pid; +}; + +#define F_LINUX_SPECIFIC_BASE 1024 +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/fiq.h linux-2.5.70-bk14/include/asm-arm26/fiq.h --- linux-2.5.70-bk13/include/asm-arm26/fiq.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/fiq.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,37 @@ +/* + * linux/include/asm-arm/fiq.h + * + * Support for FIQ on ARM architectures. + * Written by Philip Blundell , 1998 + * Re-written by Russell King + */ + +#ifndef __ASM_FIQ_H +#define __ASM_FIQ_H + +#include + +struct fiq_handler { + struct fiq_handler *next; + /* Name + */ + const char *name; + /* Called to ask driver to relinquish/ + * reacquire FIQ + * return zero to accept, or - + */ + int (*fiq_op)(void *, int relinquish); + /* data for the relinquish/reacquire functions + */ + void *dev_id; +}; + +extern int claim_fiq(struct fiq_handler *f); +extern void release_fiq(struct fiq_handler *f); +extern void set_fiq_handler(void *start, unsigned int length); +extern void set_fiq_regs(struct pt_regs *regs); +extern void get_fiq_regs(struct pt_regs *regs); +extern void enable_fiq(int fiq); +extern void disable_fiq(int fiq); + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/floppy.h linux-2.5.70-bk14/include/asm-arm26/floppy.h --- linux-2.5.70-bk13/include/asm-arm26/floppy.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/floppy.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,141 @@ +/* + * linux/include/asm-arm/floppy.h + * + * Copyright (C) 1996-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Note that we don't touch FLOPPY_DMA nor FLOPPY_IRQ here + */ +#ifndef __ASM_ARM_FLOPPY_H +#define __ASM_ARM_FLOPPY_H + +#define fd_outb(val,port) \ + do { \ + if ((port) == FD_DOR) \ + fd_setdor((val)); \ + else \ + outb((val),(port)); \ + } while(0) + +#define fd_inb(port) inb((port)) +#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\ + SA_INTERRUPT|SA_SAMPLE_RANDOM,"floppy",NULL) +#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL) +#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK) +#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK) + +#define fd_request_dma() request_dma(DMA_FLOPPY,"floppy") +#define fd_free_dma() free_dma(DMA_FLOPPY) +#define fd_disable_dma() disable_dma(DMA_FLOPPY) +#define fd_enable_dma() enable_dma(DMA_FLOPPY) +#define fd_clear_dma_ff() clear_dma_ff(DMA_FLOPPY) +#define fd_set_dma_mode(mode) set_dma_mode(DMA_FLOPPY, (mode)) +#define fd_set_dma_addr(addr) set_dma_addr(DMA_FLOPPY, virt_to_bus((addr))) +#define fd_set_dma_count(len) set_dma_count(DMA_FLOPPY, (len)) +#define fd_cacheflush(addr,sz) + +/* need to clean up dma.h */ +#define DMA_FLOPPYDISK DMA_FLOPPY + +/* Floppy_selects is the list of DOR's to select drive fd + * + * On initialisation, the floppy list is scanned, and the drives allocated + * in the order that they are found. This is done by seeking the drive + * to a non-zero track, and then restoring it to track 0. If an error occurs, + * then there is no floppy drive present. [to be put back in again] + */ +static unsigned char floppy_selects[2][4] = +{ + { 0x10, 0x21, 0x23, 0x33 }, + { 0x10, 0x21, 0x23, 0x33 } +}; + +#define fd_setdor(dor) \ +do { \ + int new_dor = (dor); \ + if (new_dor & 0xf0) \ + new_dor = (new_dor & 0x0c) | floppy_selects[fdc][new_dor & 3]; \ + else \ + new_dor &= 0x0c; \ + outb(new_dor, FD_DOR); \ +} while (0) + +/* + * Someday, we'll automatically detect which drives are present... + */ +static inline void fd_scandrives (void) +{ +#if 0 + int floppy, drive_count; + + fd_disable_irq(); + raw_cmd = &default_raw_cmd; + raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_SEEK; + raw_cmd->track = 0; + raw_cmd->rate = ?; + drive_count = 0; + for (floppy = 0; floppy < 4; floppy ++) { + current_drive = drive_count; + /* + * Turn on floppy motor + */ + if (start_motor(redo_fd_request)) + continue; + /* + * Set up FDC + */ + fdc_specify(); + /* + * Tell FDC to recalibrate + */ + output_byte(FD_RECALIBRATE); + LAST_OUT(UNIT(floppy)); + /* wait for command to complete */ + if (!successful) { + int i; + for (i = drive_count; i < 3; i--) + floppy_selects[fdc][i] = floppy_selects[fdc][i + 1]; + floppy_selects[fdc][3] = 0; + floppy -= 1; + } else + drive_count++; + } +#else + floppy_selects[0][0] = 0x10; + floppy_selects[0][1] = 0x21; + floppy_selects[0][2] = 0x23; + floppy_selects[0][3] = 0x33; +#endif +} + +#define FDC1 (0x3f0) + +#define FLOPPY0_TYPE 4 +#define FLOPPY1_TYPE 4 + +#define N_FDC 1 +#define N_DRIVE 4 + +#define FLOPPY_MOTOR_MASK 0xf0 + +#define CROSS_64KB(a,s) (0) + +/* + * This allows people to reverse the order of + * fd0 and fd1, in case their hardware is + * strangely connected (as some RiscPCs + * and A5000s seem to be). + */ +static void driveswap(int *ints, int dummy, int dummy2) +{ + floppy_selects[0][0] ^= floppy_selects[0][1]; + floppy_selects[0][1] ^= floppy_selects[0][0]; + floppy_selects[0][0] ^= floppy_selects[0][1]; +} + +#define EXTRA_FLOPPY_PARAMS ,{ "driveswap", &driveswap, NULL, 0, 0 } + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/fpstate.h linux-2.5.70-bk14/include/asm-arm26/fpstate.h --- linux-2.5.70-bk13/include/asm-arm26/fpstate.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/fpstate.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,29 @@ +/* + * linux/include/asm-arm/fpstate.h + * + * Copyright (C) 1995 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARM_FPSTATE_H +#define __ASM_ARM_FPSTATE_H + +#define FP_SIZE 35 + +struct fp_hard_struct { + unsigned int save[FP_SIZE]; /* as yet undefined */ +}; + +struct fp_soft_struct { + unsigned int save[FP_SIZE]; /* undefined information */ +}; + +union fp_state { + struct fp_hard_struct hard; + struct fp_soft_struct soft; +}; + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/hardirq.h linux-2.5.70-bk14/include/asm-arm26/hardirq.h --- linux-2.5.70-bk13/include/asm-arm26/hardirq.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/hardirq.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,93 @@ +#ifndef __ASM_HARDIRQ_H +#define __ASM_HARDIRQ_H + +#include +#include +#include + +/* softirq.h is sensitive to the offsets of these fields */ +typedef struct { + unsigned int __softirq_pending; + unsigned int __local_irq_count; + unsigned int __local_bh_count; + unsigned int __syscall_count; + struct task_struct * __ksoftirqd_task; /* waitqueue is too large */ +} ____cacheline_aligned irq_cpustat_t; + +#include /* Standard mappings for irq_cpustat_t above */ + +/* + * We put the hardirq and softirq counter into the preemption + * counter. The bitmask has the following meaning: + * + * - bits 0-7 are the preemption count (max depth: 256) + * - bits 8-15 are the softirq count (max # of softirqs: 256) + * - bits 16-23 are the hardirq count (max # of hardirqs: 256) + * - bit 26 is the PREEMPT_ACTIVE flag + */ +#define PREEMPT_BITS 8 +#define SOFTIRQ_BITS 8 +#define HARDIRQ_BITS 8 + +#define PREEMPT_SHIFT 0 +#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) +#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) + +#define __MASK(x) ((1UL << (x))-1) + +#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT) +#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) +#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) + +#define hardirq_count() (preempt_count() & HARDIRQ_MASK) +#define softirq_count() (preempt_count() & SOFTIRQ_MASK) +#define irq_count() (preempt_count() & (HARDIRQ_MASK|SOFTIRQ_MASK)) + +#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) +#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) +#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) + +/* + * The hardirq mask has to be large enough to have space + * for potentially all IRQ sources in the system nesting + * on a single CPU: + */ +#if (1 << HARDIRQ_BITS) < NR_IRQS +# error HARDIRQ_BITS is too low! +#endif + +/* + * Are we doing bottom half or hardware interrupt processing? + * Are we in a softirq context? Interrupt context? + */ +#define in_irq() (hardirq_count()) +#define in_softirq() (softirq_count()) +#define in_interrupt() (irq_count()) + +#define hardirq_trylock() (!in_interrupt()) +#define hardirq_endlock() do { } while (0) + +#define irq_enter() (preempt_count() += HARDIRQ_OFFSET) + +#ifdef CONFIG_PREEMPT +# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) +#else +# define in_atomic() (preempt_count() != 0) +# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET +#endif + +#ifndef CONFIG_SMP +#define irq_exit() \ + do { \ + preempt_count() -= HARDIRQ_OFFSET; \ + if (!in_interrupt() && softirq_pending(smp_processor_id())) \ + __asm__("bl%? __do_softirq": : : "lr");/* out of line */\ + preempt_enable_no_resched(); \ + } while (0) + +#define synchronize_irq(irq) barrier() +#else +#error SMP not supported +#endif + +#endif /* __ASM_HARDIRQ_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/hardware.h linux-2.5.70-bk14/include/asm-arm26/hardware.h --- linux-2.5.70-bk13/include/asm-arm26/hardware.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/hardware.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,109 @@ +/* + * linux/include/asm-arm/arch-arc/hardware.h + * + * Copyright (C) 1996-1999 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file contains the hardware definitions of the + * Acorn Archimedes/A5000 machines. + * + * Modifications: + * 04-04-1998 PJB/RMK Merged arc and a5k versions + */ +#ifndef __ASM_HARDWARE_H +#define __ASM_HARDWARE_H + +#include + + +/* + * What hardware must be present - these can be tested by the kernel + * source. + */ +#define HAS_IOC +#define HAS_MEMC +#define HAS_VIDC + +#define VDMA_ALIGNMENT PAGE_SIZE +#define VDMA_XFERSIZE 16 +#define VDMA_INIT 0 +#define VDMA_START 1 +#define VDMA_END 2 + +#ifndef __ASSEMBLY__ +extern void memc_write(unsigned int reg, unsigned long val); + +#define video_set_dma(start,end,offset) \ +do { \ + memc_write (VDMA_START, (start >> 2)); \ + memc_write (VDMA_END, (end - VDMA_XFERSIZE) >> 2); \ + memc_write (VDMA_INIT, (offset >> 2)); \ +} while (0) +#endif + + +/* Hardware addresses of major areas. + * *_START is the physical address + * *_SIZE is the size of the region + * *_BASE is the virtual address + */ +#define IO_START 0x03000000 +#define IO_SIZE 0x01000000 +#define IO_BASE 0x03000000 + +/* + * Screen mapping information + */ +#define SCREEN_START 0x02000000 +#define SCREEN_END 0x02078000 +#define SCREEN_SIZE 0x00078000 +#define SCREEN_BASE 0x02000000 + + +#define EXPMASK_BASE 0x03360000 +#define IOEB_BASE 0x03350000 +#define VIDC_BASE 0x03400000 +#define LATCHA_BASE 0x03250040 +#define LATCHB_BASE 0x03250018 +#define IOC_BASE 0x03200000 +#define FLOPPYDMA_BASE 0x0302a000 +#define PCIO_BASE 0x03010000 + +// FIXME - are the below correct? +#define PODSLOT_IOC0_BASE 0x03240000 +#define PODSLOT_IOC_SIZE (1 << 14) +#define PODSLOT_MEMC_BASE 0x03000000 +#define PODSLOT_MEMC_SIZE (1 << 14) + +#define vidc_writel(val) __raw_writel(val, VIDC_BASE) + +#ifndef __ASSEMBLY__ + +/* + * for use with inb/outb + */ +#define IOEB_VID_CTL (IOEB_BASE + 0x48) +#define IOEB_PRESENT (IOEB_BASE + 0x50) +#define IOEB_PSCLR (IOEB_BASE + 0x58) +#define IOEB_MONTYPE (IOEB_BASE + 0x70) + +#define IO_EC_IOC_BASE 0x80090000 +#define IO_EC_MEMC_BASE 0x80000000 + +#ifdef CONFIG_ARCH_ARC +/* A680 hardware */ +#define WD1973_BASE 0x03290000 +#define WD1973_LATCH 0x03350000 +#define Z8530_BASE 0x032b0008 +#define SCSI_BASE 0x03100000 +#endif + +#endif + +#define EXPMASK_STATUS (EXPMASK_BASE + 0x00) +#define EXPMASK_ENABLE (EXPMASK_BASE + 0x04) + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/hdreg.h linux-2.5.70-bk14/include/asm-arm26/hdreg.h --- linux-2.5.70-bk13/include/asm-arm26/hdreg.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/hdreg.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,15 @@ +/* + * linux/include/asm-arm26/hdreg.h + * + * Used by include/linux/ide.h + * + * Copyright (C) 1994-1996 Linus Torvalds & authors + */ + +#ifndef __ASMARM_HDREG_H +#define __ASMARM_HDREG_H + +typedef unsigned int ide_ioreg_t; + +#endif /* __ASMARM_HDREG_H */ + diff -urN linux-2.5.70-bk13/include/asm-arm26/ian_char.h linux-2.5.70-bk14/include/asm-arm26/ian_char.h --- linux-2.5.70-bk13/include/asm-arm26/ian_char.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/ian_char.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,79 @@ +#ifndef _ian_char_h_included +#define _ian_char_h_included +static int charset[256*8] = {0x777700, 0x7700770, 0x7700770, 0x7777770, 0x7700770, 0x7700770, 0x7700770, 0x0, 0x777770, 0x7700770, 0x7700770, 0x777770, 0x7700770, 0x7700770, 0x777770, 0x0, 0x777700, 0x7700770, 0x770, 0x770, 0x770, 0x7700770, 0x777700, 0x0, 0x77770, 0x770770, 0x7700770, 0x7700770, 0x7700770, 0x770770, 0x77770, 0x0, 0x7777770, 0x770, 0x770, 0x777770, 0x770, 0x770, 0x7777770, 0x0, 0x7777770, 0x770, 0x770, 0x777770, 0x770, 0x770, 0x770, 0x0, 0x777700, 0x7700770, 0x770, 0x7770770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7777770, 0x7700770, 0x7700770, 0x7700770, 0x0, 0x7777770, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x7777770, 0x0, 0x7777700, 0x770000, 0x770000, 0x770000, 0x770000, 0x770770, 0x77700, 0x0, 0x7700770, 0x770770, 0x77770, 0x7770, 0x77770, 0x770770, 0x7700770, 0x0, 0x770, 0x770, 0x770, 0x770, 0x770, 0x770, 0x7777770, 0x0, 0x77000770, 0x77707770, 0x77777770, 0x77070770, 0x77070770, 0x77000770, 0x77000770, 0x0, 0x7700770, 0x7700770, 0x7707770, 0x7777770, 0x7770770, 0x7700770, 0x7700770, 0x0, 0x777700, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x777770, 0x7700770, 0x7700770, 0x777770, 0x770, 0x770, 0x770, 0x0, 0x777700, 0x7700770, 0x7700770, 0x7700770, 0x7070770, 0x770770, 0x7707700, 0x0, 0x777770, 0x7700770, 0x7700770, 0x777770, 0x770770, 0x7700770, 0x7700770, 0x0, 0x777700, 0x7700770, 0x770, 0x777700, 0x7700000, 0x7700770, 0x777700, 0x0, 0x7777770, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x77000, 0x0, 0x77000770, 0x77000770, 0x77070770, 0x77070770, 0x77777770, 0x77707770, 0x77000770, 0x0, 0x7700770, 0x7700770, 0x777700, 0x77000, 0x777700, 0x7700770, 0x7700770, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x77000, 0x77000, 0x77000, 0x0, 0x7777770, 0x7700000, 0x770000, 0x77000, 0x7700, 0x770, 0x7777770, 0x0, 0x777770, 0x770, 0x770, 0x770, 0x770, 0x770, 0x777770, 0x0, 0x0, 0x770, 0x7700, 0x77000, 0x770000, 0x7700000, 0x0, 0x0, 0x7777700, 0x7700000, 0x7700000, 0x7700000, 0x7700000, 0x7700000, 0x7777700, 0x0, 0x777700, 0x7700770, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x77777777, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x0, 0x77000, 0x0, 0x770770, 0x770770, 0x770770, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7707700, 0x7707700, 0x77777770, 0x7707700, 0x77777770, 0x7707700, 0x7707700, 0x0, 0x770000, 0x77777700, 0x70770, 0x7777700, 0x77070000, 0x7777770, 0x77000, 0x0, 0x770, 0x7700770, 0x770000, 0x77000, 0x7700, 0x7700770, 0x7700000, 0x0, 0x77700, 0x770770, 0x770770, 0x77700, 0x70770770, 0x7700770, 0x77077700, 0x0, 0x77000, 0x77000, 0x77000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x770000, 0x77000, 0x7700, 0x7700, 0x7700, 0x77000, 0x770000, 0x0, 0x7700, 0x77000, 0x770000, 0x770000, 0x770000, 0x77000, 0x7700, 0x0, 0x0, 0x77000, 0x7777770, 0x777700, 0x7777770, 0x77000, 0x0, 0x0, 0x0, 0x77000, 0x77000, 0x7777770, 0x77000, 0x77000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x77000, 0x77000, 0x7700, 0x0, 0x0, 0x0, 0x7777770, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x77000, 0x77000, 0x0, 0x0, 0x7700000, 0x770000, 0x77000, 0x7700, 0x770, 0x0, 0x0, 0x777700, 0x7700770, 0x7770770, 0x7777770, 0x7707770, 0x7700770, 0x777700, 0x0, 0x77000, 0x77700, 0x77000, 0x77000, 0x77000, 0x77000, 0x7777770, 0x0, 0x777700, 0x7700770, 0x7700000, 0x770000, 0x77000, 0x7700, 0x7777770, 0x0, 0x777700, 0x7700770, 0x7700000, 0x777000, 0x7700000, 0x7700770, 0x777700, 0x0, 0x770000, 0x777000, 0x777700, 0x770770, 0x7777770, 0x770000, 0x770000, 0x0, 0x7777770, 0x770, 0x777770, 0x7700000, 0x7700000, 0x7700770, 0x777700, 0x0, 0x777000, 0x7700, 0x770, 0x777770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x7777770, 0x7700000, 0x770000, 0x77000, 0x7700, 0x7700, 0x7700, 0x0, 0x777700, 0x7700770, 0x7700770, 0x777700, 0x7700770, 0x7700770, 0x777700, 0x0, 0x777700, 0x7700770, 0x7700770, 0x7777700, 0x7700000, 0x770000, 0x77700, 0x0, 0x0, 0x0, 0x77000, 0x77000, 0x0, 0x77000, 0x77000, 0x0, 0x0, 0x0, 0x77000, 0x77000, 0x0, 0x77000, 0x77000, 0x7700, 0x770000, 0x77000, 0x7700, 0x770, 0x7700, 0x77000, 0x770000, 0x0, 0x0, 0x0, 0x7777770, 0x0, 0x7777770, 0x0, 0x0, 0x0, 0x7700, 0x77000, 0x770000, 0x7700000, 0x770000, 0x77000, 0x7700, 0x0, 0x777700, 0x7700770, 0x770000, 0x77000, 0x77000, 0x0, 0x77000, 0x0, 0x777700, 0x7700770, 0x7770770, 0x7070770, 0x7770770, 0x770, 0x777700, 0x0, 0x777700, 0x7700770, 0x7700770, 0x7777770, 0x7700770, 0x7700770, 0x7700770, 0x0, 0x777770, 0x7700770, 0x7700770, 0x777770, 0x7700770, 0x7700770, 0x777770, 0x0, 0x777700, 0x7700770, 0x770, 0x770, 0x770, 0x7700770, 0x777700, 0x0, 0x77770, 0x770770, 0x7700770, 0x7700770, 0x7700770, 0x770770, 0x77770, 0x0, 0x7777770, 0x770, 0x770, 0x777770, 0x770, 0x770, 0x7777770, 0x0, 0x7777770, 0x770, 0x770, 0x777770, 0x770, 0x770, 0x770, 0x0, 0x777700, 0x7700770, 0x770, 0x7770770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7777770, 0x7700770, 0x7700770, 0x7700770, 0x0, 0x7777770, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x7777770, 0x0, 0x7777700, 0x770000, 0x770000, 0x770000, 0x770000, 0x770770, 0x77700, 0x0, 0x7700770, 0x770770, 0x77770, 0x7770, 0x77770, 0x770770, 0x7700770, 0x0, 0x770, 0x770, 0x770, 0x770, 0x770, 0x770, 0x7777770, 0x0, 0x77000770, 0x77707770, 0x77777770, 0x77070770, 0x77070770, 0x77000770, 0x77000770, 0x0, 0x7700770, 0x7700770, 0x7707770, 0x7777770, 0x7770770, 0x7700770, 0x7700770, 0x0, 0x777700, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x777770, 0x7700770, 0x7700770, 0x777770, 0x770, 0x770, 0x770, 0x0, 0x777700, 0x7700770, 0x7700770, 0x7700770, 0x7070770, 0x770770, 0x7707700, 0x0, 0x777770, 0x7700770, 0x7700770, 0x777770, 0x770770, 0x7700770, 0x7700770, 0x0, 0x777700, 0x7700770, 0x770, 0x777700, 0x7700000, 0x7700770, 0x777700, 0x0, 0x7777770, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x77000, 0x0, 0x77000770, 0x77000770, 0x77070770, 0x77070770, 0x77777770, 0x77707770, 0x77000770, 0x0, 0x7700770, 0x7700770, 0x777700, 0x77000, 0x777700, 0x7700770, 0x7700770, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x77000, 0x77000, 0x77000, 0x0, 0x7777770, 0x7700000, 0x770000, 0x77000, 0x7700, 0x770, 0x7777770, 0x0, 0x777770, 0x770, 0x770, 0x770, 0x770, 0x770, 0x777770, 0x0, 0x0, 0x770, 0x7700, 0x77000, 0x770000, 0x7700000, 0x0, 0x0, 0x7777700, 0x7700000, 0x7700000, 0x7700000, 0x7700000, 0x7700000, 0x7777700, 0x0, 0x777700, 0x7700770, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x77777777, 0x7700, 0x77000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x777700, 0x7700000, 0x7777700, 0x7700770, 0x7777700, 0x0, 0x770, 0x770, 0x777770, 0x7700770, 0x7700770, 0x7700770, 0x777770, 0x0, 0x0, 0x0, 0x777700, 0x7700770, 0x770, 0x7700770, 0x777700, 0x0, 0x7700000, 0x7700000, 0x7777700, 0x7700770, 0x7700770, 0x7700770, 0x7777700, 0x0, 0x0, 0x0, 0x777700, 0x7700770, 0x7777770, 0x770, 0x777700, 0x0, 0x777000, 0x7700, 0x7700, 0x777770, 0x7700, 0x7700, 0x7700, 0x0, 0x0, 0x0, 0x7777700, 0x7700770, 0x7700770, 0x7777700, 0x7700000, 0x777700, 0x770, 0x770, 0x777770, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x0, 0x77000, 0x0, 0x77700, 0x77000, 0x77000, 0x77000, 0x777700, 0x0, 0x77000, 0x0, 0x77700, 0x77000, 0x77000, 0x77000, 0x77000, 0x7770, 0x770, 0x770, 0x7700770, 0x770770, 0x77770, 0x770770, 0x7700770, 0x0, 0x77700, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x777700, 0x0, 0x0, 0x0, 0x7707700, 0x77777770, 0x77070770, 0x77070770, 0x77000770, 0x0, 0x0, 0x0, 0x777770, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x0, 0x0, 0x0, 0x777700, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x0, 0x0, 0x777770, 0x7700770, 0x7700770, 0x777770, 0x770, 0x770, 0x0, 0x0, 0x7777700, 0x7700770, 0x7700770, 0x7777700, 0x7700000, 0x77700000, 0x0, 0x0, 0x770770, 0x7707770, 0x770, 0x770, 0x770, 0x0, 0x0, 0x0, 0x7777700, 0x770, 0x777700, 0x7700000, 0x777770, 0x0, 0x7700, 0x7700, 0x777770, 0x7700, 0x7700, 0x7700, 0x777000, 0x0, 0x0, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x7777700, 0x0, 0x0, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x77000, 0x0, 0x0, 0x0, 0x77000770, 0x77070770, 0x77070770, 0x77777770, 0x7707700, 0x0, 0x0, 0x0, 0x7700770, 0x777700, 0x77000, 0x777700, 0x7700770, 0x0, 0x0, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7777700, 0x7700000, 0x777700, 0x0, 0x0, 0x7777770, 0x770000, 0x77000, 0x7700, 0x7777770, 0x0, 0x770000, 0x77000, 0x77000, 0x7770, 0x77000, 0x77000, 0x770000, 0x0, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x0, 0x7700, 0x77000, 0x77000, 0x7770000, 0x77000, 0x77000, 0x7700, 0x0, 0x70007700, 0x77070770, 0x7700070, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x77000000, 0x77000000, 0x7700000, 0x7700000, 0x7707770, 0x777000, 0x770000, 0x0, 0x777000, 0x77000770, 0x77070770, 0x77070770, 0x77777770, 0x77707770, 0x77000770, 0x0, 0x777000, 0x7707700, 0x0, 0x77070770, 0x77070770, 0x77777770, 0x7707700, 0x0, 0x7777777, 0x7007007, 0x7007007, 0x7007777, 0x7000007, 0x7000007, 0x7777777, 0x0, 0x7700770, 0x70077007, 0x70000007, 0x7000070, 0x70000007, 0x70077007, 0x7700770, 0x0, 0x77000, 0x7700770, 0x7000070, 0x7700770, 0x777700, 0x77000, 0x77000, 0x0, 0x77000, 0x7700770, 0x0, 0x7700770, 0x7700770, 0x7777700, 0x7700000, 0x777700, 0x77700000, 0x70000000, 0x7000000, 0x700770, 0x707007, 0x770, 0x7007, 0x770, 0x77000, 0x70700, 0x77770070, 0x70000007, 0x77770070, 0x70700, 0x77000, 0x0, 0x77000, 0x707000, 0x7007777, 0x70000007, 0x7007777, 0x707000, 0x77000, 0x0, 0x777700, 0x700700, 0x700700, 0x77700777, 0x7000070, 0x700700, 0x77000, 0x0, 0x77000, 0x700700, 0x7000070, 0x77700777, 0x700700, 0x700700, 0x777700, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x77077077, 0x77077077, 0x0, 0x70007777, 0x77077070, 0x70707070, 0x70007070, 0x0, 0x0, 0x0, 0x0, 0x77, 0x770077, 0x77000, 0x7700, 0x770, 0x77077077, 0x77077000, 0x0, 0x0, 0x0, 0x777700, 0x7777770, 0x7777770, 0x777700, 0x0, 0x0, 0x770000, 0x77000, 0x77000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x770000, 0x770000, 0x77000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x770000, 0x77000, 0x7700, 0x7700, 0x77000, 0x770000, 0x0, 0x0, 0x7700, 0x77000, 0x770000, 0x770000, 0x77000, 0x7700, 0x0, 0x77077000, 0x7707700, 0x7707700, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7707700, 0x7707700, 0x770770, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7707700, 0x7707700, 0x770770, 0x0, 0x0, 0x0, 0x777700, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x77777777, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7777770, 0x0, 0x0, 0x0, 0x0, 0x77707770, 0x770077, 0x770077, 0x77770077, 0x770077, 0x770077, 0x77707770, 0x0, 0x0, 0x0, 0x7770770, 0x77077077, 0x77777077, 0x77077, 0x7770770, 0x0, 0x77000, 0x77000, 0x7777770, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x7777770, 0x77000, 0x7777770, 0x77000, 0x77000, 0x77000, 0x777700, 0x7700770, 0x770, 0x7707777, 0x7700770, 0x7700770, 0x7700770, 0x0, 0x7777700, 0x7700770, 0x7700770, 0x7707777, 0x7700770, 0x7700770, 0x7700770, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x77000, 0x0, 0x77000, 0x77000, 0x77000, 0x77000, 0x77000, 0x0, 0x70000, 0x7777700, 0x77070770, 0x70770, 0x77070770, 0x7777700, 0x70000, 0x0, 0x777000, 0x7707700, 0x7700, 0x777770, 0x7700, 0x7700, 0x7777770, 0x0, 0x0, 0x7700770, 0x777700, 0x7700770, 0x7700770, 0x777700, 0x7700770, 0x0, 0x7700770, 0x777700, 0x77000, 0x77000, 0x7777770, 0x77000, 0x77000, 0x0, 0x77000, 0x77000, 0x77000, 0x0, 0x77000, 0x77000, 0x77000, 0x0, 0x777700, 0x770, 0x777700, 0x7700770, 0x777700, 0x7700000, 0x777700, 0x0, 0x7700770, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x777700, 0x7000070, 0x70077007, 0x70000707, 0x70000707, 0x70077007, 0x7000070, 0x777700, 0x777000, 0x7700000, 0x7777000, 0x7707700, 0x7777000, 0x0, 0x7777700, 0x0, 0x0, 0x77007700, 0x7700770, 0x770077, 0x770077, 0x7700770, 0x77007700, 0x0, 0x7777770, 0x7700000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7777770, 0x0, 0x0, 0x0, 0x0, 0x777700, 0x7000070, 0x70077707, 0x70700707, 0x70077707, 0x70700707, 0x7000070, 0x777700, 0x7777770, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x777700, 0x7700770, 0x777700, 0x0, 0x0, 0x0, 0x0, 0x0, 0x77000, 0x77000, 0x7777770, 0x77000, 0x77000, 0x0, 0x7777770, 0x0, 0x77700, 0x700000, 0x77000, 0x700, 0x777700, 0x0, 0x0, 0x0, 0x77700, 0x700000, 0x77000, 0x700000, 0x77700, 0x0, 0x0, 0x0, 0x770000, 0x77000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x77007700, 0x77007700, 0x77007700, 0x77007700, 0x7777700, 0x770, 0x77000000, 0x7777700, 0x7707770, 0x7707770, 0x7707700, 0x7707700, 0x7777700, 0x0, 0x0, 0x0, 0x0, 0x77000, 0x77000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x77000, 0x7700, 0x7000, 0x7700, 0x7000, 0x7000, 0x77700, 0x0, 0x0, 0x0, 0x777000, 0x7707700, 0x7707700, 0x7707700, 0x777000, 0x0, 0x7777700, 0x0, 0x0, 0x770077, 0x7700770, 0x77007700, 0x77007700, 0x7700770, 0x770077, 0x0, 0x70, 0x77, 0x70, 0x70070, 0x70070, 0x7070000, 0x77770000, 0x7000000, 0x70, 0x77, 0x70, 0x77770070, 0x70000070, 0x77770000, 0x70000, 0x77770000, 0x777, 0x700, 0x777, 0x70700, 0x70777, 0x7070000, 0x77770000, 0x7000000, 0x77000, 0x0, 0x77000, 0x77000, 0x7700, 0x7700770, 0x777700, 0x0, 0x7700, 0x77000, 0x0, 0x777700, 0x7700770, 0x7777770, 0x7700770, 0x0, 0x770000, 0x77000, 0x0, 0x777700, 0x7700770, 0x7777770, 0x7700770, 0x0, 0x77000, 0x7700770, 0x0, 0x777700, 0x7700770, 0x7777770, 0x7700770, 0x0, 0x7707700, 0x770770, 0x0, 0x777700, 0x7700770, 0x7777770, 0x7700770, 0x0, 0x7700770, 0x7700770, 0x0, 0x777700, 0x7700770, 0x7777770, 0x7700770, 0x0, 0x777700, 0x7700770, 0x777700, 0x777700, 0x7700770, 0x7777770, 0x7700770, 0x0, 0x77777700, 0x7700770, 0x7700770, 0x77777770, 0x7700770, 0x7700770, 0x77700770, 0x0, 0x777700, 0x7700770, 0x770, 0x770, 0x7700770, 0x777700, 0x7700, 0x770, 0x7700, 0x77000, 0x7777770, 0x770, 0x777770, 0x770, 0x7777770, 0x0, 0x770000, 0x77000, 0x7777770, 0x770, 0x777770, 0x770, 0x7777770, 0x0, 0x777700, 0x7700770, 0x7777770, 0x770, 0x777770, 0x770, 0x7777770, 0x0, 0x7700770, 0x0, 0x7777770, 0x770, 0x777770, 0x770, 0x7777770, 0x0, 0x7700, 0x77000, 0x0, 0x7777770, 0x77000, 0x77000, 0x7777770, 0x0, 0x770000, 0x77000, 0x0, 0x7777770, 0x77000, 0x77000, 0x7777770, 0x0, 0x777700, 0x7700770, 0x0, 0x7777770, 0x77000, 0x77000, 0x7777770, 0x0, 0x7700770, 0x7700770, 0x0, 0x7777770, 0x77000, 0x77000, 0x7777770, 0x0, 0x77770, 0x770770, 0x7700770, 0x7707777, 0x7700770, 0x770770, 0x77770, 0x0, 0x7707700, 0x770770, 0x0, 0x7700770, 0x7707770, 0x7770770, 0x7700770, 0x0, 0x7700, 0x77000, 0x777700, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x770000, 0x77000, 0x777700, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x777700, 0x7700770, 0x777700, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x7707700, 0x770770, 0x777700, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x7700770, 0x0, 0x777700, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x0, 0x77000770, 0x7707700, 0x777000, 0x777000, 0x7707700, 0x77000770, 0x0, 0x70777700, 0x7700770, 0x7770770, 0x7777770, 0x7707770, 0x7700770, 0x777707, 0x0, 0x7700, 0x77000, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x770000, 0x77000, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x777700, 0x7700770, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x7700770, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7700770, 0x777700, 0x0, 0x770000, 0x77000, 0x7700770, 0x7700770, 0x777700, 0x77000, 0x77000, 0x0, 0x7777, 0x770, 0x777770, 0x7700770, 0x777770, 0x770, 0x7777, 0x0, 0x777700, 0x7700770, 0x7700770, 0x770770, 0x7700770, 0x7700770, 0x770770, 0x77, 0x7700, 0x77000, 0x777700, 0x7700000, 0x7777700, 0x7700770, 0x7777700, 0x0, 0x770000, 0x77000, 0x777700, 0x7700000, 0x7777700, 0x7700770, 0x7777700, 0x0, 0x77000, 0x7700770, 0x777700, 0x7700000, 0x7777700, 0x7700770, 0x7777700, 0x0, 0x7707700, 0x770770, 0x777700, 0x7700000, 0x7777700, 0x7700770, 0x7777700, 0x0, 0x7700770, 0x0, 0x777700, 0x7700000, 0x7777700, 0x7700770, 0x7777700, 0x0, 0x777700, 0x7700770, 0x777700, 0x7700000, 0x7777700, 0x7700770, 0x7777700, 0x0, 0x0, 0x0, 0x77777700, 0x70770000, 0x77777700, 0x770770, 0x77777700, 0x0, 0x0, 0x0, 0x777700, 0x7700770, 0x770, 0x7700770, 0x777700, 0x770, 0x7700, 0x77000, 0x777700, 0x7700770, 0x7777770, 0x770, 0x777700, 0x0, 0x770000, 0x77000, 0x777700, 0x7700770, 0x7777770, 0x770, 0x777700, 0x0, 0x777700, 0x7700770, 0x777700, 0x7700770, 0x7777770, 0x770, 0x777700, 0x0, 0x7700770, 0x0, 0x777700, 0x7700770, 0x7777770, 0x770, 0x777700, 0x0, 0x7700, 0x77000, 0x0, 0x77700, 0x77000, 0x77000, 0x777700, 0x0, 0x770000, 0x77000, 0x0, 0x77700, 0x77000, 0x77000, 0x777700, 0x0, 0x777700, 0x7700770, 0x0, 0x77700, 0x77000, 0x77000, 0x777700, 0x0, 0x7700770, 0x0, 0x0, 0x77700, 0x77000, 0x77000, 0x777700, 0x0, 0x77000, 0x7777700, 0x770000, 0x7700000, 0x7777700, 0x7700770, 0x7777700, 0x0, 0x7707700, 0x770770, 0x0, 0x777770, 0x7700770, 0x7700770, 0x7700770, 0x0, 0x7700, 0x77000, 0x0, 0x777700, 0x7700770, 0x7700770, 0x777700, 0x0, 0x770000, 0x77000, 0x0, 0x777700, 0x7700770, 0x7700770, 0x777700, 0x0, 0x777700, 0x7700770, 0x0, 0x777700, 0x7700770, 0x7700770, 0x777700, 0x0, 0x7707700, 0x770770, 0x0, 0x777700, 0x7700770, 0x7700770, 0x777700, 0x0, 0x7700770, 0x0, 0x0, 0x777700, 0x7700770, 0x7700770, 0x777700, 0x0, 0x0, 0x77000, 0x0, 0x77777777, 0x0, 0x77000, 0x0, 0x0, 0x0, 0x7000000, 0x777700, 0x7770770, 0x7707770, 0x7700770, 0x777707, 0x0, 0x7700, 0x77000, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7777700, 0x0, 0x770000, 0x77000, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7777700, 0x0, 0x777700, 0x7700770, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7777700, 0x0, 0x7700770, 0x0, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7777700, 0x0, 0x770000, 0x77000, 0x7700770, 0x7700770, 0x7700770, 0x7777700, 0x7700000, 0x777700, 0x770, 0x770, 0x777770, 0x7700770, 0x777770, 0x770, 0x770, 0x0, 0x7700770, 0x0, 0x7700770, 0x7700770, 0x7700770, 0x7777700, 0x7700000, 0x777700}; + + +static void ian_scroll_screen(void){ + int i; + char *j; + char *screen_base = (char*)0x2000000;//1F88000; + for(i = 8; i < 480 ; i++) + for(j = screen_base + i*(640/2) ; j < screen_base + (i+1)*(640/2) ; j++) + *(j-(8*(640/2))) = *j; + for(i = 479 ; i >= 472 ; i--) + for(j = screen_base + i*(640/2) ; j < screen_base + (i+1)*(640/2) ; j++) + *j = 0; + +} + +static void ian_print(char *string){ + char *screen_base = (char*)0x2000000; + static int x, y, first, page; + int *temp; + int i; + + if(first == 0){ + first = 1; + for(i = 0 ; i < (640/2)*480 ; i++) screen_base[i] = 0; + } + + while(*string){ + if(*string==10){ + y++; + x = 0; + if(y > 59){ + y = 59; + ian_scroll_screen(); + } + } + else if(*string == 8){ + if(x) + x--; + } + else{ + if(x > 79){ + y++; + x = 0; + if(y > 59){ + y = 59; + ian_scroll_screen(); + } + } + + temp = (int *)(screen_base + ((640/2)*8*y) + (x * 4)); + for(i = 0 ; i < 8 ; i++){ + *temp = charset[i + (*string * 8)]; + temp += 640/8; + } + x++; + } + string++; + } +} + +static void ian_print_num(unsigned long value){ + int i; + char out[9]; + + for(i = 7; i >= 0 ; i--){ + out[i] = value & 0x0F; + out[i] = out[i]<10?out[i]+'0':out[i]+'A'-10; + value = value >> 4; + } + out[8] = 0; + + ian_print(out); + +} + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/ide.h linux-2.5.70-bk14/include/asm-arm26/ide.h --- linux-2.5.70-bk13/include/asm-arm26/ide.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/ide.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,75 @@ +/* + * linux/include/asm-arm/ide.h + * + * Copyright (C) 1994-1996 Linus Torvalds & authors + */ + +/* + * This file contains the i386 architecture specific IDE code. + */ + +#ifndef __ASMARM_IDE_H +#define __ASMARM_IDE_H + +#ifdef __KERNEL__ + +#ifndef MAX_HWIFS +#define MAX_HWIFS 4 +#endif + +#include +#include + +/* JMA 18.05.03 these will never be needed, but the kernel needs them to compile */ +#define __ide_mm_insw(port,addr,len) readsw(port,addr,len) +#define __ide_mm_insl(port,addr,len) readsl(port,addr,len) +#define __ide_mm_outsw(port,addr,len) writesw(port,addr,len) +#define __ide_mm_outsl(port,addr,len) writesl(port,addr,len) + +/* + * Set up a hw structure for a specified data port, control port and IRQ. + * This should follow whatever the default interface uses. + */ +static __inline__ void +ide_init_hwif_ports(hw_regs_t *hw, int data_port, int ctrl_port, int *irq) +{ + ide_ioreg_t reg = (ide_ioreg_t) data_port; + int i; + + for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { + hw->io_ports[i] = reg; + reg += 1; + } + hw->io_ports[IDE_CONTROL_OFFSET] = (ide_ioreg_t) ctrl_port; + if (irq) + *irq = 0; +} + +/* + * This registers the standard ports for this architecture with the IDE + * driver. + */ +static __inline__ void ide_init_default_hwifs(void) +{ + if (machine_is_a5k()) { + hw_regs_t hw; + + memset(&hw, 0, sizeof(hw)); + + ide_init_hwif_ports(&hw, 0x1f0, 0x3f6, NULL); + hw.irq = IRQ_HARDDISK; + ide_register_hw(&hw,NULL); + } +} + + +/* + * We always use the new IDE port registering, + * so these are fixed here. + */ +#define ide_default_io_base(i) ((ide_ioreg_t)0) +#define ide_default_irq(b) (0) + +#endif /* __KERNEL__ */ + +#endif /* __ASMARM_IDE_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/io.h linux-2.5.70-bk14/include/asm-arm26/io.h --- linux-2.5.70-bk13/include/asm-arm26/io.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/io.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,419 @@ +/* + * linux/include/asm-arm/io.h + * + * Copyright (C) 1996-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Modifications: + * 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both + * constant addresses and variable addresses. + * 04-Dec-1997 RMK Moved a lot of this stuff to the new architecture + * specific IO header files. + * 27-Mar-1999 PJB Second parameter of memcpy_toio is const.. + * 04-Apr-1999 PJB Added check_signature. + * 12-Dec-1999 RMK More cleanups + * 18-Jun-2000 RMK Removed virt_to_* and friends definitions + */ +#ifndef __ASM_ARM_IO_H +#define __ASM_ARM_IO_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include + +/* + * Generic IO read/write. These perform native-endian accesses. Note + * that some architectures will want to re-define __raw_{read,write}w. + */ +extern void __raw_writesb(unsigned int addr, const void *data, int bytelen); +extern void __raw_writesw(unsigned int addr, const void *data, int wordlen); +extern void __raw_writesl(unsigned int addr, const void *data, int longlen); + +extern void __raw_readsb(unsigned int addr, void *data, int bytelen); +extern void __raw_readsw(unsigned int addr, void *data, int wordlen); +extern void __raw_readsl(unsigned int addr, void *data, int longlen); + +#define __raw_writeb(v,a) (*(volatile unsigned char *)(a) = (v)) +#define __raw_writew(v,a) (*(volatile unsigned short *)(a) = (v)) +#define __raw_writel(v,a) (*(volatile unsigned int *)(a) = (v)) + +#define __raw_readb(a) (*(volatile unsigned char *)(a)) +#define __raw_readw(a) (*(volatile unsigned short *)(a)) +#define __raw_readl(a) (*(volatile unsigned int *)(a)) + + +/* + * Bad read/write accesses... + */ +extern void __readwrite_bug(const char *fn); + +/* + * Now, pick up the machine-defined IO definitions + */ + +#define IO_SPACE_LIMIT 0xffffffff + +/* + * GCC is totally crap at loading/storing data. We try to persuade it + * to do the right thing by using these whereever possible instead of + * the above. + */ +#define __arch_base_getb(b,o) \ + ({ \ + unsigned int v, r = (b); \ + __asm__ __volatile__( \ + "ldrb %0, [%1, %2]" \ + : "=r" (v) \ + : "r" (r), "Ir" (o)); \ + v; \ + }) + +#define __arch_base_getl(b,o) \ + ({ \ + unsigned int v, r = (b); \ + __asm__ __volatile__( \ + "ldr %0, [%1, %2]" \ + : "=r" (v) \ + : "r" (r), "Ir" (o)); \ + v; \ + }) + +#define __arch_base_putb(v,b,o) \ + ({ \ + unsigned int r = (b); \ + __asm__ __volatile__( \ + "strb %0, [%1, %2]" \ + : \ + : "r" (v), "r" (r), "Ir" (o)); \ + }) + +#define __arch_base_putl(v,b,o) \ + ({ \ + unsigned int r = (b); \ + __asm__ __volatile__( \ + "str %0, [%1, %2]" \ + : \ + : "r" (v), "r" (r), "Ir" (o)); \ + }) + +/* + * We use two different types of addressing - PC style addresses, and ARM + * addresses. PC style accesses the PC hardware with the normal PC IO + * addresses, eg 0x3f8 for serial#1. ARM addresses are 0x80000000+ + * and are translated to the start of IO. Note that all addresses are + * shifted left! + */ +#define __PORT_PCIO(x) (!((x) & 0x80000000)) + +/* + * Dynamic IO functions - let the compiler + * optimize the expressions + */ +static inline void __outb (unsigned int value, unsigned int port) +{ + unsigned long temp; + __asm__ __volatile__( + "tst %2, #0x80000000\n\t" + "mov %0, %4\n\t" + "addeq %0, %0, %3\n\t" + "strb %1, [%0, %2, lsl #2] @ outb" + : "=&r" (temp) + : "r" (value), "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE) + : "cc"); +} + +static inline void __outw (unsigned int value, unsigned int port) +{ + unsigned long temp; + __asm__ __volatile__( + "tst %2, #0x80000000\n\t" + "mov %0, %4\n\t" + "addeq %0, %0, %3\n\t" + "str %1, [%0, %2, lsl #2] @ outw" + : "=&r" (temp) + : "r" (value|value<<16), "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE) + : "cc"); +} + +static inline void __outl (unsigned int value, unsigned int port) +{ + unsigned long temp; + __asm__ __volatile__( + "tst %2, #0x80000000\n\t" + "mov %0, %4\n\t" + "addeq %0, %0, %3\n\t" + "str %1, [%0, %2, lsl #2] @ outl" + : "=&r" (temp) + : "r" (value), "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE) + : "cc"); +} + +#define DECLARE_DYN_IN(sz,fnsuffix,instr) \ +static inline unsigned sz __in##fnsuffix (unsigned int port) \ +{ \ + unsigned long temp, value; \ + __asm__ __volatile__( \ + "tst %2, #0x80000000\n\t" \ + "mov %0, %4\n\t" \ + "addeq %0, %0, %3\n\t" \ + "ldr" instr " %1, [%0, %2, lsl #2] @ in" #fnsuffix \ + : "=&r" (temp), "=r" (value) \ + : "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE) \ + : "cc"); \ + return (unsigned sz)value; \ +} + +static inline unsigned int __ioaddr (unsigned int port) \ +{ \ + if (__PORT_PCIO(port)) \ + return (unsigned int)(PCIO_BASE + (port << 2)); \ + else \ + return (unsigned int)(IO_BASE + (port << 2)); \ +} + +#define DECLARE_IO(sz,fnsuffix,instr) \ + DECLARE_DYN_IN(sz,fnsuffix,instr) + +DECLARE_IO(char,b,"b") +DECLARE_IO(short,w,"") +DECLARE_IO(int,l,"") + +#undef DECLARE_IO +#undef DECLARE_DYN_IN + +/* + * Constant address IO functions + * + * These have to be macros for the 'J' constraint to work - + * +/-4096 immediate operand. + */ +#define __outbc(value,port) \ +({ \ + if (__PORT_PCIO((port))) \ + __asm__ __volatile__( \ + "strb %0, [%1, %2] @ outbc" \ + : : "r" (value), "r" (PCIO_BASE), "Jr" ((port) << 2)); \ + else \ + __asm__ __volatile__( \ + "strb %0, [%1, %2] @ outbc" \ + : : "r" (value), "r" (IO_BASE), "r" ((port) << 2)); \ +}) + +#define __inbc(port) \ +({ \ + unsigned char result; \ + if (__PORT_PCIO((port))) \ + __asm__ __volatile__( \ + "ldrb %0, [%1, %2] @ inbc" \ + : "=r" (result) : "r" (PCIO_BASE), "Jr" ((port) << 2)); \ + else \ + __asm__ __volatile__( \ + "ldrb %0, [%1, %2] @ inbc" \ + : "=r" (result) : "r" (IO_BASE), "r" ((port) << 2)); \ + result; \ +}) + +#define __outwc(value,port) \ +({ \ + unsigned long v = value; \ + if (__PORT_PCIO((port))) \ + __asm__ __volatile__( \ + "str %0, [%1, %2] @ outwc" \ + : : "r" (v|v<<16), "r" (PCIO_BASE), "Jr" ((port) << 2)); \ + else \ + __asm__ __volatile__( \ + "str %0, [%1, %2] @ outwc" \ + : : "r" (v|v<<16), "r" (IO_BASE), "r" ((port) << 2)); \ +}) + +#define __inwc(port) \ +({ \ + unsigned short result; \ + if (__PORT_PCIO((port))) \ + __asm__ __volatile__( \ + "ldr %0, [%1, %2] @ inwc" \ + : "=r" (result) : "r" (PCIO_BASE), "Jr" ((port) << 2)); \ + else \ + __asm__ __volatile__( \ + "ldr %0, [%1, %2] @ inwc" \ + : "=r" (result) : "r" (IO_BASE), "r" ((port) << 2)); \ + result & 0xffff; \ +}) + +#define __outlc(value,port) \ +({ \ + unsigned long v = value; \ + if (__PORT_PCIO((port))) \ + __asm__ __volatile__( \ + "str %0, [%1, %2] @ outlc" \ + : : "r" (v), "r" (PCIO_BASE), "Jr" ((port) << 2)); \ + else \ + __asm__ __volatile__( \ + "str %0, [%1, %2] @ outlc" \ + : : "r" (v), "r" (IO_BASE), "r" ((port) << 2)); \ +}) + +#define __inlc(port) \ +({ \ + unsigned long result; \ + if (__PORT_PCIO((port))) \ + __asm__ __volatile__( \ + "ldr %0, [%1, %2] @ inlc" \ + : "=r" (result) : "r" (PCIO_BASE), "Jr" ((port) << 2)); \ + else \ + __asm__ __volatile__( \ + "ldr %0, [%1, %2] @ inlc" \ + : "=r" (result) : "r" (IO_BASE), "r" ((port) << 2)); \ + result; \ +}) + +#define __ioaddrc(port) \ +({ \ + unsigned long addr; \ + if (__PORT_PCIO((port))) \ + addr = PCIO_BASE + ((port) << 2); \ + else \ + addr = IO_BASE + ((port) << 2); \ + addr; \ +}) + +#define inb(p) (__builtin_constant_p((p)) ? __inbc(p) : __inb(p)) +#define inw(p) (__builtin_constant_p((p)) ? __inwc(p) : __inw(p)) +#define inl(p) (__builtin_constant_p((p)) ? __inlc(p) : __inl(p)) +#define outb(v,p) (__builtin_constant_p((p)) ? __outbc(v,p) : __outb(v,p)) +#define outw(v,p) (__builtin_constant_p((p)) ? __outwc(v,p) : __outw(v,p)) +#define outl(v,p) (__builtin_constant_p((p)) ? __outlc(v,p) : __outl(v,p)) +#define __ioaddr(p) (__builtin_constant_p((p)) ? __ioaddr(p) : __ioaddrc(p)) + +/* JMA 18.02.03 added sb,sl from arm/io.h, changing io to ioaddr */ + +#define outsb(p,d,l) __raw_writesb(__ioaddr(p),d,l) +#define outsw(p,d,l) __raw_writesw(__ioaddr(p),d,l) +#define outsl(p,d,l) __raw_writesl(__ioaddr(p),d,l) + +#define insb(p,d,l) __raw_readsb(__ioaddr(p),d,l) +#define insw(p,d,l) __raw_readsw(__ioaddr(p),d,l) +#define insl(p,d,l) __raw_readsl(__ioaddr(p),d,l) + +#define insw(p,d,l) __raw_readsw(__ioaddr(p),d,l) +#define outsw(p,d,l) __raw_writesw(__ioaddr(p),d,l) + +#define readb(c) (__readwrite_bug("readb"),0) +#define readw(c) (__readwrite_bug("readw"),0) +#define readl(c) (__readwrite_bug("readl"),0) +#define writeb(v,c) __readwrite_bug("writeb") +#define writew(v,c) __readwrite_bug("writew") +#define writel(v,c) __readwrite_bug("writel") + +#define readsw(p,d,l) (__readwrite_bug("readsw"),0) +#define readsl(p,d,l) (__readwrite_bug("readsl"),0) +#define writesw(p,d,l) __readwrite_bug("writesw") +#define writesl(p,d,l) __readwrite_bug("writesl") + +/* the following macro is depreciated */ +#define ioaddr(port) __ioaddr((port)) + +/* + * No ioremap support here. + */ +#define __arch_ioremap(c,s,f,a) ((void *)(c)) +#define __arch_iounmap(c) do { } while (0) + + +#if defined(__arch_putb) || defined(__arch_putw) || defined(__arch_putl) || \ + defined(__arch_getb) || defined(__arch_getw) || defined(__arch_getl) +#warning machine class uses old __arch_putw or __arch_getw +#endif + +/* + * IO port access primitives + * ------------------------- + * + * The ARM doesn't have special IO access instructions; all IO is memory + * mapped. Note that these are defined to perform little endian accesses + * only. Their primary purpose is to access PCI and ISA peripherals. + * + * Note that for a big endian machine, this implies that the following + * big endian mode connectivity is in place, as described by numerious + * ARM documents: + * + * PCI: D0-D7 D8-D15 D16-D23 D24-D31 + * ARM: D24-D31 D16-D23 D8-D15 D0-D7 + * + * The machine specific io.h include defines __io to translate an "IO" + * address to a memory address. + * + * Note that we prevent GCC re-ordering or caching values in expressions + * by introducing sequence points into the in*() definitions. Note that + * __raw_* do not guarantee this behaviour. + */ +/* +#define outsb(p,d,l) __raw_writesb(__io(p),d,l) +#define outsw(p,d,l) __raw_writesw(__io(p),d,l) + +#define insb(p,d,l) __raw_readsb(__io(p),d,l) +#define insw(p,d,l) __raw_readsw(__io(p),d,l) +*/ +#define outb_p(val,port) outb((val),(port)) +#define outw_p(val,port) outw((val),(port)) +#define inb_p(port) inb((port)) +#define inw_p(port) inw((port)) +#define inl_p(port) inl((port)) + +#define outsb_p(port,from,len) outsb(port,from,len) +#define outsw_p(port,from,len) outsw(port,from,len) +#define insb_p(port,to,len) insb(port,to,len) +#define insw_p(port,to,len) insw(port,to,len) + +/* + * String version of IO memory access ops: + */ +extern void _memcpy_fromio(void *, unsigned long, size_t); +extern void _memcpy_toio(unsigned long, const void *, size_t); +extern void _memset_io(unsigned long, int, size_t); + +/* + * ioremap and friends. + * + * ioremap takes a PCI memory address, as specified in + * linux/Documentation/IO-mapping.txt. + */ +extern void * __ioremap(unsigned long, size_t, unsigned long, unsigned long); +extern void __iounmap(void *addr); + +#ifndef __arch_ioremap +#define ioremap(cookie,size) __ioremap(cookie,size,0,1) +#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0,1) +#define iounmap(cookie) __iounmap(cookie) +#else +#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0,1) +#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0,1) +#define iounmap(cookie) __arch_iounmap(cookie) +#endif + +/* + * DMA-consistent mapping functions. These allocate/free a region of + * uncached, unwrite-buffered mapped memory space for use with DMA + * devices. This is the "generic" version. The PCI specific version + * is in pci.h + */ +extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle); +extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle); +extern void consistent_sync(void *vaddr, size_t size, int rw); + +/* + * can the hardware map this into one segment or not, given no other + * constraints. + */ +#define BIOVEC_MERGEABLE(vec1, vec2) \ + ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) + +#endif /* __KERNEL__ */ +#endif /* __ASM_ARM_IO_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/ioc.h linux-2.5.70-bk14/include/asm-arm26/ioc.h --- linux-2.5.70-bk13/include/asm-arm26/ioc.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/ioc.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,72 @@ +/* + * linux/include/asm-arm/hardware/ioc.h + * + * Copyright (C) Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Use these macros to read/write the IOC. All it does is perform the actual + * read/write. + */ +#ifndef __ASMARM_HARDWARE_IOC_H +#define __ASMARM_HARDWARE_IOC_H + +#ifndef __ASSEMBLY__ + +/* + * We use __raw_base variants here so that we give the compiler the + * chance to keep IOC_BASE in a register. + */ +#define ioc_readb(off) __raw_readb(IOC_BASE + (off)) +#define ioc_writeb(val,off) __raw_writeb(val, IOC_BASE + (off)) + +#endif + +#define IOC_CONTROL (0x00) +#define IOC_KARTTX (0x04) +#define IOC_KARTRX (0x04) + +#define IOC_IRQSTATA (0x10) +#define IOC_IRQREQA (0x14) +#define IOC_IRQCLRA (0x14) +#define IOC_IRQMASKA (0x18) + +#define IOC_IRQSTATB (0x20) +#define IOC_IRQREQB (0x24) +#define IOC_IRQMASKB (0x28) + +#define IOC_FIQSTAT (0x30) +#define IOC_FIQREQ (0x34) +#define IOC_FIQMASK (0x38) + +#define IOC_T0CNTL (0x40) +#define IOC_T0LTCHL (0x40) +#define IOC_T0CNTH (0x44) +#define IOC_T0LTCHH (0x44) +#define IOC_T0GO (0x48) +#define IOC_T0LATCH (0x4c) + +#define IOC_T1CNTL (0x50) +#define IOC_T1LTCHL (0x50) +#define IOC_T1CNTH (0x54) +#define IOC_T1LTCHH (0x54) +#define IOC_T1GO (0x58) +#define IOC_T1LATCH (0x5c) + +#define IOC_T2CNTL (0x60) +#define IOC_T2LTCHL (0x60) +#define IOC_T2CNTH (0x64) +#define IOC_T2LTCHH (0x64) +#define IOC_T2GO (0x68) +#define IOC_T2LATCH (0x6c) + +#define IOC_T3CNTL (0x70) +#define IOC_T3LTCHL (0x70) +#define IOC_T3CNTH (0x74) +#define IOC_T3LTCHH (0x74) +#define IOC_T3GO (0x78) +#define IOC_T3LATCH (0x7c) + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/ioctl.h linux-2.5.70-bk14/include/asm-arm26/ioctl.h --- linux-2.5.70-bk13/include/asm-arm26/ioctl.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/ioctl.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,74 @@ +/* + * linux/ioctl.h for Linux by H.H. Bergman. + */ + +#ifndef _ASMARM_IOCTL_H +#define _ASMARM_IOCTL_H + +/* ioctl command encoding: 32 bits total, command in lower 16 bits, + * size of the parameter structure in the lower 14 bits of the + * upper 16 bits. + * Encoding the size of the parameter structure in the ioctl request + * is useful for catching programs compiled with old versions + * and to avoid overwriting user space outside the user buffer area. + * The highest 2 bits are reserved for indicating the ``access mode''. + * NOTE: This limits the max parameter size to 16kB -1 ! + */ + +/* + * The following is for compatibility across the various Linux + * platforms. The i386 ioctl numbering scheme doesn't really enforce + * a type field. De facto, however, the top 8 bits of the lower 16 + * bits are indeed used as a type field, so we might just as well make + * this explicit here. Please be sure to use the decoding macros + * below from now on. + */ +#define _IOC_NRBITS 8 +#define _IOC_TYPEBITS 8 +#define _IOC_SIZEBITS 14 +#define _IOC_DIRBITS 2 + +#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) +#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) +#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) +#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) + +#define _IOC_NRSHIFT 0 +#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) +#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) +#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) + +/* + * Direction bits. + */ +#define _IOC_NONE 0U +#define _IOC_WRITE 1U +#define _IOC_READ 2U + +#define _IOC(dir,type,nr,size) \ + (((dir) << _IOC_DIRSHIFT) | \ + ((type) << _IOC_TYPESHIFT) | \ + ((nr) << _IOC_NRSHIFT) | \ + ((size) << _IOC_SIZESHIFT)) + +/* used to create numbers */ +#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) +#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) +#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) +#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) + +/* used to decode ioctl numbers.. */ +#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) +#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) +#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) +#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) + +/* ...and for the drivers/sound files... */ + +#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) +#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) +#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) +#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) +#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) + +#endif /* _ASMARM_IOCTL_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/ioctls.h linux-2.5.70-bk14/include/asm-arm26/ioctls.h --- linux-2.5.70-bk13/include/asm-arm26/ioctls.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/ioctls.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,81 @@ +#ifndef __ASM_ARM_IOCTLS_H +#define __ASM_ARM_IOCTLS_H + +#include + +/* 0x54 is just a magic number to make these relatively unique ('T') */ + +#define TCGETS 0x5401 +#define TCSETS 0x5402 +#define TCSETSW 0x5403 +#define TCSETSF 0x5404 +#define TCGETA 0x5405 +#define TCSETA 0x5406 +#define TCSETAW 0x5407 +#define TCSETAF 0x5408 +#define TCSBRK 0x5409 +#define TCXONC 0x540A +#define TCFLSH 0x540B +#define TIOCEXCL 0x540C +#define TIOCNXCL 0x540D +#define TIOCSCTTY 0x540E +#define TIOCGPGRP 0x540F +#define TIOCSPGRP 0x5410 +#define TIOCOUTQ 0x5411 +#define TIOCSTI 0x5412 +#define TIOCGWINSZ 0x5413 +#define TIOCSWINSZ 0x5414 +#define TIOCMGET 0x5415 +#define TIOCMBIS 0x5416 +#define TIOCMBIC 0x5417 +#define TIOCMSET 0x5418 +#define TIOCGSOFTCAR 0x5419 +#define TIOCSSOFTCAR 0x541A +#define FIONREAD 0x541B +#define TIOCINQ FIONREAD +#define TIOCLINUX 0x541C +#define TIOCCONS 0x541D +#define TIOCGSERIAL 0x541E +#define TIOCSSERIAL 0x541F +#define TIOCPKT 0x5420 +#define FIONBIO 0x5421 +#define TIOCNOTTY 0x5422 +#define TIOCSETD 0x5423 +#define TIOCGETD 0x5424 +#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */ +#define TIOCSBRK 0x5427 /* BSD compatibility */ +#define TIOCCBRK 0x5428 /* BSD compatibility */ +#define TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ + +#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ +#define FIOCLEX 0x5451 +#define FIOASYNC 0x5452 +#define TIOCSERCONFIG 0x5453 +#define TIOCSERGWILD 0x5454 +#define TIOCSERSWILD 0x5455 +#define TIOCGLCKTRMIOS 0x5456 +#define TIOCSLCKTRMIOS 0x5457 +#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TIOCSERGETLSR 0x5459 /* Get line status register */ +#define TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ +#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ +#define FIOQSIZE 0x545E + +/* Used for packet mode */ +#define TIOCPKT_DATA 0 +#define TIOCPKT_FLUSHREAD 1 +#define TIOCPKT_FLUSHWRITE 2 +#define TIOCPKT_STOP 4 +#define TIOCPKT_START 8 +#define TIOCPKT_NOSTOP 16 +#define TIOCPKT_DOSTOP 32 + +#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/ipc.h linux-2.5.70-bk14/include/asm-arm26/ipc.h --- linux-2.5.70-bk13/include/asm-arm26/ipc.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/ipc.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,28 @@ +#ifndef __ASMARM_IPC_H +#define __ASMARM_IPC_H + +/* + * These are used to wrap system calls on ARM. + * + * See arch/arm/kernel/sys-arm.c for ugly details.. + */ +struct ipc_kludge { + struct msgbuf *msgp; + long msgtyp; +}; + +#define SEMOP 1 +#define SEMGET 2 +#define SEMCTL 3 +#define MSGSND 11 +#define MSGRCV 12 +#define MSGGET 13 +#define MSGCTL 14 +#define SHMAT 21 +#define SHMDT 22 +#define SHMGET 23 +#define SHMCTL 24 + +#define IPCCALL(version,op) ((version)<<16 | (op)) + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/ipcbuf.h linux-2.5.70-bk14/include/asm-arm26/ipcbuf.h --- linux-2.5.70-bk13/include/asm-arm26/ipcbuf.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/ipcbuf.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,29 @@ +#ifndef __ASMARM_IPCBUF_H +#define __ASMARM_IPCBUF_H + +/* + * The ipc64_perm structure for arm architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 32-bit mode_t and seq + * - 2 miscellaneous 32-bit values + */ + +struct ipc64_perm +{ + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + unsigned short __pad1; + unsigned short seq; + unsigned short __pad2; + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* __ASMARM_IPCBUF_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/irq.h linux-2.5.70-bk14/include/asm-arm26/irq.h --- linux-2.5.70-bk13/include/asm-arm26/irq.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/irq.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,50 @@ +#ifndef __ASM_ARM_IRQ_H +#define __ASM_ARM_IRQ_H + +#include + +#ifndef NR_IRQS +#define NR_IRQS 128 +#endif + + +/* JMA 18.05.02 Copied off arch/arm/irq.h */ +#ifndef irq_canonicalize +#define irq_canonicalize(i) (i) +#endif + + +/* + * Use this value to indicate lack of interrupt + * capability + */ +#ifndef NO_IRQ +#define NO_IRQ ((unsigned int)(-1)) +#endif + +struct irqaction; + +#define disable_irq_nosync(i) disable_irq(i) + +extern void disable_irq(unsigned int); +extern void enable_irq(unsigned int); + +#define __IRQT_FALEDGE (1 << 0) +#define __IRQT_RISEDGE (1 << 1) +#define __IRQT_LOWLVL (1 << 2) +#define __IRQT_HIGHLVL (1 << 3) + +#define IRQT_NOEDGE (0) +#define IRQT_RISING (__IRQT_RISEDGE) +#define IRQT_FALLING (__IRQT_FALEDGE) +#define IRQT_BOTHEDGE (__IRQT_RISEDGE|__IRQT_FALEDGE) +#define IRQT_LOW (__IRQT_LOWLVL) +#define IRQT_HIGH (__IRQT_HIGHLVL) +#define IRQT_PROBE (1 << 4) + +int set_irq_type(unsigned int irq, unsigned int type); + +int setup_irq(unsigned int, struct irqaction *); + +#endif + diff -urN linux-2.5.70-bk13/include/asm-arm26/irqchip.h linux-2.5.70-bk14/include/asm-arm26/irqchip.h --- linux-2.5.70-bk13/include/asm-arm26/irqchip.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/irqchip.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,118 @@ +/* + * linux/include/asm-arm/mach/irq.h + * + * Copyright (C) 1995-2000 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_ARM_MACH_IRQ_H +#define __ASM_ARM_MACH_IRQ_H + +struct irqdesc; +struct pt_regs; +struct seq_file; + +typedef void (*irq_handler_t)(unsigned int, struct irqdesc *, struct pt_regs *); +typedef void (*irq_control_t)(unsigned int); + +struct irqchip { + /* + * Acknowledge the IRQ. + * If this is a level-based IRQ, then it is expected to mask the IRQ + * as well. + */ + void (*ack)(unsigned int); + /* + * Mask the IRQ in hardware. + */ + void (*mask)(unsigned int); + /* + * Unmask the IRQ in hardware. + */ + void (*unmask)(unsigned int); + /* + * Re-run the IRQ + */ + void (*rerun)(unsigned int); + /* + * Set the type of the IRQ. + */ + int (*type)(unsigned int, unsigned int); +}; + +struct irqdesc { + irq_handler_t handle; + struct irqchip *chip; + struct irqaction *action; + + unsigned int enabled : 1; /* IRQ is currently enabled */ + unsigned int triggered: 1; /* IRQ has occurred */ + unsigned int running : 1; /* IRQ is running */ + unsigned int pending : 1; /* IRQ is pending */ + unsigned int probing : 1; /* IRQ in use for a probe */ + unsigned int probe_ok : 1; /* IRQ can be used for probe */ + unsigned int valid : 1; /* IRQ claimable */ + unsigned int noautoenable : 1; /* don't automatically enable IRQ */ + unsigned int unused :23; + unsigned int depth; /* disable depth */ + + /* + * IRQ lock detection + */ + unsigned int lck_cnt; + unsigned int lck_pc; + unsigned int lck_jif; +}; + +extern struct irqdesc irq_desc[]; + +/* + * This is internal. Do not use it. + */ +extern void (*init_arch_irq)(void); +extern void init_FIQ(void); +extern int show_fiq_list(struct seq_file *, void *); +void __set_irq_handler(unsigned int irq, irq_handler_t, int); + +/* + * External stuff. + */ +#define set_irq_handler(irq,handler) __set_irq_handler(irq,handler,0) +#define set_irq_chained_handler(irq,handler) __set_irq_handler(irq,handler,1) + +void set_irq_chip(unsigned int irq, struct irqchip *); +void set_irq_flags(unsigned int irq, unsigned int flags); + +#ifdef not_yet +/* + * This is to be used by the top-level machine IRQ decoder only. + */ +static inline void call_irq(struct pt_regs *regs, unsigned int irq) +{ + struct irqdesc *desc = irq_desc + irq; + + spin_lock(&irq_controller_lock); + desc->handle(irq, desc, regs); + spin_unlock(&irq_controller_lock); + + if (softirq_pending(smp_processor_id())) + do_softirq(); +} +#endif + +#define IRQF_VALID (1 << 0) +#define IRQF_PROBE (1 << 1) +#define IRQF_NOAUTOEN (1 << 2) + +/* + * Built-in IRQ handlers. + */ +void do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs); +void do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs); +void do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs); +void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs); +void dummy_mask_unmask_irq(unsigned int irq); + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/keyboard.h.old linux-2.5.70-bk14/include/asm-arm26/keyboard.h.old --- linux-2.5.70-bk13/include/asm-arm26/keyboard.h.old 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/keyboard.h.old 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,78 @@ +/* + * linux/include/asm-arm/keyboard.h + * + * Copyright (C) 1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Keyboard driver definitions for ARM + */ +#ifndef __ASM_ARM_KEYBOARD_H +#define __ASM_ARM_KEYBOARD_H + +#include +#include + +/* + * We provide a unified keyboard interface when in VC_MEDIUMRAW + * mode. This means that all keycodes must be common between + * all supported keyboards. This unfortunately puts us at odds + * with the PC keyboard interface chip... but we can't do anything + * about that now. + */ +#ifdef __KERNEL__ + +extern int (*k_setkeycode)(unsigned int, unsigned int); +extern int (*k_getkeycode)(unsigned int); +extern int (*k_translate)(unsigned char, unsigned char *, char); +extern char (*k_unexpected_up)(unsigned char); +extern void (*k_leds)(unsigned char); + + +static inline int kbd_setkeycode(unsigned int sc, unsigned int kc) +{ + int ret = -EINVAL; + + if (k_setkeycode) + ret = k_setkeycode(sc, kc); + + return ret; +} + +static inline int kbd_getkeycode(unsigned int sc) +{ + int ret = -EINVAL; + + if (k_getkeycode) + ret = k_getkeycode(sc); + + return ret; +} + +static inline void kbd_leds(unsigned char leds) +{ + if (k_leds) + k_leds(leds); +} + +extern int k_sysrq_key; +extern unsigned char *k_sysrq_xlate; + +#define SYSRQ_KEY k_sysrq_key +#define kbd_sysrq_xlate k_sysrq_xlate +#define kbd_translate k_translate +#define kbd_unexpected_up k_unexpected_up + +#define NR_SCANCODES 128 + +extern int a5kkbd_init_hw(void); + +#define kbd_disable_irq() disable_irq(IRQ_KEYBOARDRX) +#define kbd_enable_irq() enable_irq(IRQ_KEYBOARDRX) +#define kbd_init_hw() a5kkbd_init_hw() + +#endif /* __KERNEL__ */ + +#endif /* __ASM_ARM_KEYBOARD_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/kmap_types.h linux-2.5.70-bk14/include/asm-arm26/kmap_types.h --- linux-2.5.70-bk13/include/asm-arm26/kmap_types.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/kmap_types.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,12 @@ +#ifndef __ARM_KMAP_TYPES_H +#define __ARM_KMAP_TYPES_H + +/* + * This is the "bare minimum". AIO seems to require this. + */ +enum km_type { + KM_IRQ0, + KM_USER1 +}; + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/leds.h linux-2.5.70-bk14/include/asm-arm26/leds.h --- linux-2.5.70-bk13/include/asm-arm26/leds.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/leds.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,51 @@ +/* + * linux/include/asm-arm/leds.h + * + * Copyright (C) 1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Event-driven interface for LEDs on machines + * Added led_start and led_stop- Alex Holden, 28th Dec 1998. + */ +#ifndef ASM_ARM_LEDS_H +#define ASM_ARM_LEDS_H + +#include + +typedef enum { + led_idle_start, + led_idle_end, + led_timer, + led_start, + led_stop, + led_claim, /* override idle & timer leds */ + led_release, /* restore idle & timer leds */ + led_start_timer_mode, + led_stop_timer_mode, + led_green_on, + led_green_off, + led_amber_on, + led_amber_off, + led_red_on, + led_red_off, + led_blue_on, + led_blue_off, + /* + * I want this between led_timer and led_start, but + * someone has decided to export this to user space + */ + led_halted +} led_event_t; + +/* Use this routine to handle LEDs */ + +#ifdef CONFIG_LEDS +extern void (*leds_event)(led_event_t); +#else +#define leds_event(e) +#endif + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/limits.h linux-2.5.70-bk14/include/asm-arm26/limits.h --- linux-2.5.70-bk13/include/asm-arm26/limits.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/limits.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,11 @@ +#ifndef __ASM_PIPE_H +#define __ASM_PIPE_H + +#ifndef PAGE_SIZE +#include +#endif + +#define PIPE_BUF PAGE_SIZE + +#endif + diff -urN linux-2.5.70-bk13/include/asm-arm26/linkage.h linux-2.5.70-bk14/include/asm-arm26/linkage.h --- linux-2.5.70-bk13/include/asm-arm26/linkage.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/linkage.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,7 @@ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#define __ALIGN .align 0 +#define __ALIGN_STR ".align 0" + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/linux_logo.h linux-2.5.70-bk14/include/asm-arm26/linux_logo.h --- linux-2.5.70-bk13/include/asm-arm26/linux_logo.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/linux_logo.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,19 @@ +/* + * linux/include/asm-arm/linux_logo.h + * + * Copyright (C) 1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Linux console driver logo definitions for ARM + */ + +#include +#include + +#define linux_logo_banner "ARM Linux version " UTS_RELEASE + +#include + diff -urN linux-2.5.70-bk13/include/asm-arm26/locks.h linux-2.5.70-bk14/include/asm-arm26/locks.h --- linux-2.5.70-bk13/include/asm-arm26/locks.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/locks.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,161 @@ +/* + * linux/include/asm-arm/proc-armo/locks.h + * + * Copyright (C) 2000 Russell King + * Fixes for 26 bit machines, (C) 2000 Dave Gilbert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Interrupt safe locking assembler. + */ +#ifndef __ASM_PROC_LOCKS_H +#define __ASM_PROC_LOCKS_H + +/* Decrements by 1, fails if value < 0 */ +#define __down_op(ptr,fail) \ + ({ \ + __asm__ __volatile__ ( \ + "@ atomic down operation\n" \ +" mov ip, pc\n" \ +" orr lr, ip, #0x08000000\n" \ +" teqp lr, #0\n" \ +" ldr lr, [%0]\n" \ +" and ip, ip, #0x0c000003\n" \ +" subs lr, lr, #1\n" \ +" str lr, [%0]\n" \ +" orrmi ip, ip, #0x80000000 @ set N\n" \ +" teqp ip, #0\n" \ +" movmi ip, %0\n" \ +" blmi " #fail \ + : \ + : "r" (ptr) \ + : "ip", "lr", "cc"); \ + }) + +#define __down_op_ret(ptr,fail) \ + ({ \ + unsigned int result; \ + __asm__ __volatile__ ( \ +" @ down_op_ret\n" \ +" mov ip, pc\n" \ +" orr lr, ip, #0x08000000\n" \ +" teqp lr, #0\n" \ +" ldr lr, [%1]\n" \ +" and ip, ip, #0x0c000003\n" \ +" subs lr, lr, #1\n" \ +" str lr, [%1]\n" \ +" orrmi ip, ip, #0x80000000 @ set N\n" \ +" teqp ip, #0\n" \ +" movmi ip, %1\n" \ +" movpl ip, #0\n" \ +" blmi " #fail "\n" \ +" mov %0, ip" \ + : "=&r" (result) \ + : "r" (ptr) \ + : "ip", "lr", "cc"); \ + result; \ + }) + +#define __up_op(ptr,wake) \ + ({ \ + __asm__ __volatile__ ( \ + "@ up_op\n" \ +" mov ip, pc\n" \ +" orr lr, ip, #0x08000000\n" \ +" teqp lr, #0\n" \ +" ldr lr, [%0]\n" \ +" and ip, ip, #0x0c000003\n" \ +" adds lr, lr, #1\n" \ +" str lr, [%0]\n" \ +" orrle ip, ip, #0x80000000 @ set N - should this be mi ??? DAG ! \n" \ +" teqp ip, #0\n" \ +" movmi ip, %0\n" \ +" blmi " #wake \ + : \ + : "r" (ptr) \ + : "ip", "lr", "cc"); \ + }) + +/* + * The value 0x01000000 supports up to 128 processors and + * lots of processes. BIAS must be chosen such that sub'ing + * BIAS once per CPU will result in the long remaining + * negative. + */ +#define RW_LOCK_BIAS 0x01000000 +#define RW_LOCK_BIAS_STR "0x01000000" + +/* Decrements by RW_LOCK_BIAS rather than 1, fails if value != 0 */ +#define __down_op_write(ptr,fail) \ + ({ \ + __asm__ __volatile__( \ + "@ down_op_write\n" \ +" mov ip, pc\n" \ +" orr lr, ip, #0x08000000\n" \ +" teqp lr, #0\n" \ +" and ip, ip, #0x0c000003\n" \ +\ +" ldr lr, [%0]\n" \ +" subs lr, lr, %1\n" \ +" str lr, [%0]\n" \ +\ +" orreq ip, ip, #0x40000000 @ set Z \n"\ +" teqp ip, #0\n" \ +" movne ip, %0\n" \ +" blne " #fail \ + : \ + : "r" (ptr), "I" (RW_LOCK_BIAS) \ + : "ip", "lr", "cc"); \ + }) + +/* Increments by RW_LOCK_BIAS, wakes if value >= 0 */ +#define __up_op_write(ptr,wake) \ + ({ \ + __asm__ __volatile__( \ + "@ up_op_read\n" \ +" mov ip, pc\n" \ +" orr lr, ip, #0x08000000\n" \ +" teqp lr, #0\n" \ +\ +" ldr lr, [%0]\n" \ +" and ip, ip, #0x0c000003\n" \ +" adds lr, lr, %1\n" \ +" str lr, [%0]\n" \ +\ +" orrcs ip, ip, #0x20000000 @ set C\n" \ +" teqp ip, #0\n" \ +" movcs ip, %0\n" \ +" blcs " #wake \ + : \ + : "r" (ptr), "I" (RW_LOCK_BIAS) \ + : "ip", "lr", "cc"); \ + }) + +#define __down_op_read(ptr,fail) \ + __down_op(ptr, fail) + +#define __up_op_read(ptr,wake) \ + ({ \ + __asm__ __volatile__( \ + "@ up_op_read\n" \ +" mov ip, pc\n" \ +" orr lr, ip, #0x08000000\n" \ +" teqp lr, #0\n" \ +\ +" ldr lr, [%0]\n" \ +" and ip, ip, #0x0c000003\n" \ +" adds lr, lr, %1\n" \ +" str lr, [%0]\n" \ +\ +" orreq ip, ip, #0x40000000 @ Set Z \n" \ +" teqp ip, #0\n" \ +" moveq ip, %0\n" \ +" bleq " #wake \ + : \ + : "r" (ptr), "I" (1) \ + : "ip", "lr", "cc"); \ + }) + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/mach-types.h linux-2.5.70-bk14/include/asm-arm26/mach-types.h --- linux-2.5.70-bk13/include/asm-arm26/mach-types.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/mach-types.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,36 @@ +/* + * Unlike ARM32 this is NOT automatically generated. DONT delete it + */ + +#ifndef __ASM_ARM_MACH_TYPE_H +#define __ASM_ARM_MACH_TYPE_H + +#include + +#ifndef __ASSEMBLY__ +extern unsigned int __machine_arch_type; +#endif + +#define MACH_TYPE_ARCHIMEDES 10 +#define MACH_TYPE_A5K 11 + +#ifdef CONFIG_ARCH_ARC +# define machine_arch_type MACH_TYPE_ARCHIMEDES +# define machine_is_archimedes() (machine_arch_type == MACH_TYPE_ARCHIMEDES) +#else +# define machine_is_archimedes() (0) +#endif + +#ifdef CONFIG_ARCH_A5K +# define machine_arch_type MACH_TYPE_A5K +# define machine_is_a5k() (machine_arch_type == MACH_TYPE_A5K) +#else +# define machine_is_a5k() (0) +#endif + +#ifndef machine_arch_type +#error Unknown machine type +#define machine_arch_type __machine_arch_type +#endif + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/map.h linux-2.5.70-bk14/include/asm-arm26/map.h --- linux-2.5.70-bk13/include/asm-arm26/map.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/map.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,24 @@ +/* + * linux/include/asm-arm/map.h + * + * Copyright (C) 1999-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Page table mapping constructs and function prototypes + */ +struct map_desc { + unsigned long virtual; + unsigned long physical; + unsigned long length; + unsigned int type; +}; + +struct meminfo; + +extern void create_memmap_holes(struct meminfo *); +extern void memtable_init(struct meminfo *); +extern void iotable_init(struct map_desc *); +extern void setup_io_desc(void); diff -urN linux-2.5.70-bk13/include/asm-arm26/mc146818rtc.h linux-2.5.70-bk14/include/asm-arm26/mc146818rtc.h --- linux-2.5.70-bk13/include/asm-arm26/mc146818rtc.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/mc146818rtc.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,28 @@ +/* + * Machine dependent access functions for RTC registers. + */ +#ifndef _ASM_MC146818RTC_H +#define _ASM_MC146818RTC_H + +#include +#include + +#ifndef RTC_PORT +#define RTC_PORT(x) (0x70 + (x)) +#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ +#endif + +/* + * The yet supported machines all access the RTC index register via + * an ISA port access but the way to access the date register differs ... + */ +#define CMOS_READ(addr) ({ \ +outb_p((addr),RTC_PORT(0)); \ +inb_p(RTC_PORT(1)); \ +}) +#define CMOS_WRITE(val, addr) ({ \ +outb_p((addr),RTC_PORT(0)); \ +outb_p((val),RTC_PORT(1)); \ +}) + +#endif /* _ASM_MC146818RTC_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/memory.h linux-2.5.70-bk14/include/asm-arm26/memory.h --- linux-2.5.70-bk13/include/asm-arm26/memory.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/memory.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,101 @@ +/* + * linux/include/asm-arm26/memory.h + * + * Copyright (C) 2000-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Note: this file should not be included by non-asm/.h files + */ +#ifndef __ASM_ARM_MEMORY_H +#define __ASM_ARM_MEMORY_H + +/* + * User space: 26MB + */ +#define TASK_SIZE (0x01a00000UL) + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE (TASK_SIZE / 3) + +/* + * Page offset: 32MB + */ +#define PAGE_OFFSET (0x02000000UL) +#define PHYS_OFFSET (0x02000000UL) + +#define PHYS_TO_NID(addr) (0) + +/* + * PFNs are used to describe any physical page; this means + * PFN 0 == physical address 0. + * + * This is the PFN of the first RAM page in the kernel + * direct-mapped view. We assume this is the first page + * of RAM in the mem_map as well. + */ +#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) + +/* + * These are *only* valid on the kernel direct mapped RAM memory. + */ +static inline unsigned long virt_to_phys(void *x) +{ + return (unsigned long)x; +} + +static inline void *phys_to_virt(unsigned long x) +{ + return (void *)((unsigned long)x); +} + +#define __pa(x) (unsigned long)(x) +#define __va(x) ((void *)(unsigned long)(x)) + +/* + * Virtual <-> DMA view memory address translations + * Again, these are *only* valid on the kernel direct mapped RAM + * memory. Use of these is *depreciated*. + */ +#define virt_to_bus(x) ((unsigned long)(x)) +#define bus_to_virt(x) ((void *)((unsigned long)(x))) + +/* + * Conversion between a struct page and a physical address. + * + * Note: when converting an unknown physical address to a + * struct page, the resulting pointer must be validated + * using VALID_PAGE(). It must return an invalid struct page + * for any physical address not corresponding to a system + * RAM address. + * + * page_to_pfn(page) convert a struct page * to a PFN number + * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * + * pfn_valid(pfn) indicates whether a PFN number is valid + * + * virt_to_page(k) convert a _valid_ virtual address to struct page * + * virt_addr_valid(k) indicates whether a virtual address is valid + */ +#define page_to_pfn(page) (((page) - mem_map) + PHYS_PFN_OFFSET) +#define pfn_to_page(pfn) ((mem_map + (pfn)) - PHYS_PFN_OFFSET) +#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) + +#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) +#define virt_addr_valid(kaddr) ((int)(kaddr) >= PAGE_OFFSET && (int)(kaddr) < (unsigned long)high_memory) + +/* + * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die. + */ +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) + +/* + * We should really eliminate virt_to_bus() here - it's depreciated. + */ +#define page_to_bus(page) (page_address(page)) + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/mman.h linux-2.5.70-bk14/include/asm-arm26/mman.h --- linux-2.5.70-bk13/include/asm-arm26/mman.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/mman.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,41 @@ +#ifndef __ARM_MMAN_H__ +#define __ARM_MMAN_H__ + +#define PROT_READ 0x1 /* page can be read */ +#define PROT_WRITE 0x2 /* page can be written */ +#define PROT_EXEC 0x4 /* page can be executed */ +#define PROT_SEM 0x8 /* page may be used for atomic ops */ +#define PROT_NONE 0x0 /* page can not be accessed */ + +#define MAP_SHARED 0x01 /* Share changes */ +#define MAP_PRIVATE 0x02 /* Changes are private */ +#define MAP_TYPE 0x0f /* Mask for type of mapping */ +#define MAP_FIXED 0x10 /* Interpret addr exactly */ +#define MAP_ANONYMOUS 0x20 /* don't use a file */ + +#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ +#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ +#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ +#define MAP_LOCKED 0x2000 /* pages are locked */ +#define MAP_NORESERVE 0x4000 /* don't check for reservations */ +#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ +#define MAP_NONBLOCK 0x10000 /* do not block on IO */ + +#define MS_ASYNC 1 /* sync memory asynchronously */ +#define MS_INVALIDATE 2 /* invalidate the caches */ +#define MS_SYNC 4 /* synchronous memory sync */ + +#define MCL_CURRENT 1 /* lock all current mappings */ +#define MCL_FUTURE 2 /* lock all future mappings */ + +#define MADV_NORMAL 0x0 /* default page-in behavior */ +#define MADV_RANDOM 0x1 /* page-in minimum required */ +#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ +#define MADV_WILLNEED 0x3 /* pre-fault pages */ +#define MADV_DONTNEED 0x4 /* discard these pages */ + +/* compatibility flags */ +#define MAP_ANON MAP_ANONYMOUS +#define MAP_FILE 0 + +#endif /* __ARM_MMAN_H__ */ diff -urN linux-2.5.70-bk13/include/asm-arm26/mmu.h linux-2.5.70-bk14/include/asm-arm26/mmu.h --- linux-2.5.70-bk13/include/asm-arm26/mmu.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/mmu.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,9 @@ +#ifndef __ARM_MMU_H +#define __ARM_MMU_H + +/* + * The ARM doesn't have a mmu context + */ +typedef struct { } mm_context_t; + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/mmu_context.h linux-2.5.70-bk14/include/asm-arm26/mmu_context.h --- linux-2.5.70-bk13/include/asm-arm26/mmu_context.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/mmu_context.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,51 @@ +/* + * linux/include/asm-arm/mmu_context.h + * + * Copyright (C) 1996 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Changelog: + * 27-06-1996 RMK Created + */ +#ifndef __ASM_ARM_MMU_CONTEXT_H +#define __ASM_ARM_MMU_CONTEXT_H + +#define init_new_context(tsk,mm) 0 +#define destroy_context(mm) do { } while(0) + +/* + * This is called when "tsk" is about to enter lazy TLB mode. + * + * mm: describes the currently active mm context + * tsk: task which is entering lazy tlb + * cpu: cpu number which is entering lazy tlb + * + * tsk->mm will be NULL + */ +static inline void +enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) +{ +} + +/* + * This is the actual mm switch as far as the scheduler + * is concerned. No registers are touched. + */ +static inline void +switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk, unsigned int cpu) +{ + cpu_switch_mm(next->pgd, next); +} + +#define deactivate_mm(tsk,mm) do { } while (0) + +static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) +{ + cpu_switch_mm(next->pgd, next); +} + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/module.h linux-2.5.70-bk14/include/asm-arm26/module.h --- linux-2.5.70-bk13/include/asm-arm26/module.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/module.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,12 @@ +#ifndef _ASM_ARM_MODULE_H +#define _ASM_ARM_MODULE_H +/* + * This file contains the arm architecture specific module code. + */ + +#define module_map(x) vmalloc(x) +#define module_unmap(x) vfree(x) +#define module_arch_init(x) (0) +#define arch_init_modules(x) do { } while (0) + +#endif /* _ASM_ARM_MODULE_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/msgbuf.h linux-2.5.70-bk14/include/asm-arm26/msgbuf.h --- linux-2.5.70-bk13/include/asm-arm26/msgbuf.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/msgbuf.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,31 @@ +#ifndef _ASMARM_MSGBUF_H +#define _ASMARM_MSGBUF_H + +/* + * The msqid64_ds structure for arm architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + unsigned long __unused1; + __kernel_time_t msg_rtime; /* last msgrcv time */ + unsigned long __unused2; + __kernel_time_t msg_ctime; /* last change time */ + unsigned long __unused3; + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused4; + unsigned long __unused5; +}; + +#endif /* _ASMARM_MSGBUF_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/namei.h linux-2.5.70-bk14/include/asm-arm26/namei.h --- linux-2.5.70-bk13/include/asm-arm26/namei.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/namei.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,25 @@ +/* + * linux/include/asm-arm/namei.h + * + * Routines to handle famous /usr/gnemul + * Derived from the Sparc version of this file + * + * Included from linux/fs/namei.c + */ + +#ifndef __ASMARM_NAMEI_H +#define __ASMARM_NAMEI_H + +#define ARM_BSD_EMUL "usr/gnemul/bsd/" + +static inline char *__emul_prefix(void) +{ + switch (current->personality) { + case PER_BSD: + return ARM_BSD_EMUL; + default: + return NULL; + } +} + +#endif /* __ASMARM_NAMEI_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/oldlatches.h linux-2.5.70-bk14/include/asm-arm26/oldlatches.h --- linux-2.5.70-bk13/include/asm-arm26/oldlatches.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/oldlatches.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,37 @@ +/* + * linux/include/asm-arm/arch-arc/oldlatches.h + * + * Copyright (C) 1996 Russell King, Dave Gilbert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Modifications: + * 04-04-1998 PJB/RMK Merged arc and a5k versions + */ +#ifndef _ASM_ARCH_OLDLATCH_H +#define _ASM_ARCH_OLDLATCH_H + +#define LATCHA_FDSEL0 (1<<0) +#define LATCHA_FDSEL1 (1<<1) +#define LATCHA_FDSEL2 (1<<2) +#define LATCHA_FDSEL3 (1<<3) +#define LATCHA_FDSELALL (0xf) +#define LATCHA_SIDESEL (1<<4) +#define LATCHA_MOTOR (1<<5) +#define LATCHA_INUSE (1<<6) +#define LATCHA_CHANGERST (1<<7) + +#define LATCHB_FDCDENSITY (1<<1) +#define LATCHB_FDCRESET (1<<3) +#define LATCHB_PRINTSTROBE (1<<4) + +/* newval=(oldval & mask)|newdata */ +void oldlatch_bupdate(unsigned char mask,unsigned char newdata); + +/* newval=(oldval & mask)|newdata */ +void oldlatch_aupdate(unsigned char mask,unsigned char newdata); + +#endif + diff -urN linux-2.5.70-bk13/include/asm-arm26/page.h linux-2.5.70-bk14/include/asm-arm26/page.h --- linux-2.5.70-bk13/include/asm-arm26/page.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/page.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,115 @@ +#ifndef _ASMARM_PAGE_H +#define _ASMARM_PAGE_H + +#include + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +extern void __clear_user_page(void *p, unsigned long user); +extern void __copy_user_page(void *to, const void *from, unsigned long user); +extern void copy_page(void *to, const void *from); + +//FIXME these may be wrong on ARM26 +#define clear_user_page(addr,vaddr,pg) \ + do { \ + preempt_disable(); \ + __clear_user_page(addr, vaddr); \ + preempt_enable(); \ + } while (0) + +#define copy_user_page(to,from,vaddr,pg) \ + do { \ + preempt_disable(); \ + __copy_user_page(to, from, vaddr); \ + preempt_enable(); \ + } while (0) + +#define clear_page(page) memzero((void *)(page), PAGE_SIZE) +#define copy_page(to, from) __copy_user_page(to, from, 0); + +#undef STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgprot; } pgprot_t; + +#define pgd_val(x) ((x).pgd) +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#else +/* + * .. while these make it easier on the compiler + */ +typedef unsigned long pgd_t; +typedef unsigned long pte_t; +typedef unsigned long pmd_t; +typedef unsigned long pgprot_t; + +//FIXME - should these cast to unsigned long? +#define pgd_val(x) (x) +#define pte_val(x) (x) +#define pmd_val(x) (x) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pmd(x) (x) +#define __pgprot(x) (x) + +#endif /* STRICT_MM_TYPECHECKS */ +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +/* PAGE_SHIFT determines the page size. This is configurable. */ +#if defined(CONFIG_PAGESIZE_16) +#define PAGE_SHIFT 14 /* 16K */ +#else /* default */ +#define PAGE_SHIFT 15 /* 32K */ +#endif + +#define EXEC_PAGESIZE 32768 + +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +/* Pure 2^n version of get_order */ +static inline int get_order(unsigned long size) +{ + int order; + + size = (size-1) >> (PAGE_SHIFT-1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +#include + +#endif /* !__ASSEMBLY__ */ + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#endif /* __KERNEL__ */ + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/param.h linux-2.5.70-bk14/include/asm-arm26/param.h --- linux-2.5.70-bk13/include/asm-arm26/param.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/param.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,37 @@ +/* + * linux/include/asm-arm/param.h + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_PARAM_H +#define __ASM_PARAM_H + +#ifndef __KERNEL_HZ +#define __KERNEL_HZ 100 +#endif + +#ifdef __KERNEL__ +# define HZ __KERNEL_HZ /* Internal kernel timer frequency */ +# define USER_HZ 100 /* User interfaces are in "ticks" */ +# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ +#else +# define HZ 100 +#endif + +#ifndef NGROUPS +#define NGROUPS 32 +#endif + +#ifndef NOGROUP +#define NOGROUP (-1) +#endif + +/* max length of hostname */ +#define MAXHOSTNAMELEN 64 + +#endif + diff -urN linux-2.5.70-bk13/include/asm-arm26/parport.h linux-2.5.70-bk14/include/asm-arm26/parport.h --- linux-2.5.70-bk13/include/asm-arm26/parport.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/parport.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,18 @@ +/* + * linux/include/asm-arm/parport.h: ARM-specific parport initialisation + * + * Copyright (C) 1999, 2000 Tim Waugh + * + * This file should only be included by drivers/parport/parport_pc.c. + */ + +#ifndef __ASMARM_PARPORT_H +#define __ASMARM_PARPORT_H + +static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); +static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) +{ + return parport_pc_find_isa_ports (autoirq, autodma); +} + +#endif /* !(_ASMARM_PARPORT_H) */ diff -urN linux-2.5.70-bk13/include/asm-arm26/pci.h linux-2.5.70-bk14/include/asm-arm26/pci.h --- linux-2.5.70-bk13/include/asm-arm26/pci.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/pci.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,5 @@ +/* Should not be needed. IDE stupidity */ +/* JMA 18.05.03 - is kinda needed, if only to tell it we don't have a PCI bus */ + +#define PCI_DMA_BUS_IS_PHYS 0 + diff -urN linux-2.5.70-bk13/include/asm-arm26/percpu.h linux-2.5.70-bk14/include/asm-arm26/percpu.h --- linux-2.5.70-bk13/include/asm-arm26/percpu.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/percpu.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,6 @@ +#ifndef __ARM_PERCPU +#define __ARM_PERCPU + +#include + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/pgalloc.h linux-2.5.70-bk14/include/asm-arm26/pgalloc.h --- linux-2.5.70-bk13/include/asm-arm26/pgalloc.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/pgalloc.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,70 @@ +/* + * linux/include/asm-arm/pgalloc.h + * + * Copyright (C) 2000-2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASMARM_PGALLOC_H +#define _ASMARM_PGALLOC_H + +#include +#include +#include +#include + +extern kmem_cache_t *pte_cache; + +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr){ + return kmem_cache_alloc(pte_cache, GFP_KERNEL); +} + +static inline void pte_free_kernel(pte_t *pte){ + if (pte) + kmem_cache_free(pte_cache, pte); +} + +/* + * Populate the pmdp entry with a pointer to the pte. This pmd is part + * of the mm address space. + * + * If 'mm' is the init tasks mm, then we are doing a vmalloc, and we + * need to set stuff up correctly for it. + */ +static inline void +pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) +{ +//FIXME - is this doing the right thing? + set_pmd(pmdp, (unsigned long)ptep | 1/*FIXME _PMD_PRESENT*/); +} + +/* + * FIXME - We use the old 2.5.5-rmk1 hack for this. + * This is not truly correct, but should be functional. + */ +#define pte_alloc_one(mm,addr) ((struct page *)pte_alloc_one_kernel(mm,addr)) +#define pte_free(pte) pte_free_kernel((pte_t *)pte) +#define pmd_populate(mm,pmdp,ptep) pmd_populate_kernel(mm,pmdp,(pte_t *)ptep) + +/* + * Since we have only two-level page tables, these are trivial + * + * trick __pmd_alloc into optimising away. The actual value is irrelevant though as it + * is thrown away. It just cant be zero. -IM + */ + +#define pmd_alloc_one(mm,addr) ((pmd_t *)2); BUG() +#define pmd_free(pmd) do { } while (0) +#define pgd_populate(mm,pmd,pte) (0) + +extern pgd_t *get_pgd_slow(struct mm_struct *mm); +extern void free_pgd_slow(pgd_t *pgd); + +#define pgd_alloc(mm) get_pgd_slow(mm) +#define pgd_free(pgd) free_pgd_slow(pgd) + +#define check_pgt_cache() do { } while (0) + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/pgtable.h linux-2.5.70-bk14/include/asm-arm26/pgtable.h --- linux-2.5.70-bk13/include/asm-arm26/pgtable.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/pgtable.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,298 @@ +/* + * linux/include/asm-arm26/pgtable.h + * + * Copyright (C) 2000-2002 Russell King + * Copyright (C) 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASMARM_PGTABLE_H +#define _ASMARM_PGTABLE_H + +#include +#include + +/* + * The table below defines the page protection levels that we insert into our + * Linux page table version. These get translated into the best that the + * architecture can perform. Note that on most ARM hardware: + * 1) We cannot do execute protection + * 2) If we could do execute protection, then read is implied + * 3) write implies read permissions + */ +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY +#define __P110 PAGE_COPY +#define __P111 PAGE_COPY + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED + +/* + * PMD_SHIFT determines the size of the area a second-level page table can map + * PGDIR_SHIFT determines what a third-level page table entry can map + */ +#define PGD_SHIFT 25 +#define PMD_SHIFT 20 + +#define PGD_SIZE (1UL << PGD_SHIFT) +#define PGD_MASK (~(PGD_SIZE-1)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* The kernel likes to use these names for the above (ick) */ +#define PGDIR_SIZE PGD_SIZE +#define PGDIR_MASK PGD_MASK + +#define PTRS_PER_PGD 32 +#define PTRS_PER_PMD 1 +#define PTRS_PER_PTE 32 + +#define FIRST_USER_PGD_NR 1 +#define USER_PTRS_PER_PGD ((TASK_SIZE/PGD_SIZE) - FIRST_USER_PGD_NR) + +// FIXME - WTF? +#define LIBRARY_TEXT_START 0x0c000000 + + + +#ifndef __ASSEMBLY__ +extern void __pte_error(const char *file, int line, unsigned long val); +extern void __pmd_error(const char *file, int line, unsigned long val); +extern void __pgd_error(const char *file, int line, unsigned long val); + +#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) +#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) +#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern struct page *empty_zero_page; +#define ZERO_PAGE(vaddr) (empty_zero_page) + +#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) +#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) +#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) +#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) +#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) +#define page_pte_prot(page,prot) mk_pte(page, prot) +#define page_pte(page) mk_pte(page, __pgprot(0)) + +/* + * Terminology: PGD = Page Directory, PMD = Page Middle Directory, + * PTE = Page Table Entry + * + * on arm26 we have no 2nd level page table. we simulate this by removing the + * PMD. + * + * pgd_none is 0 to prevernt pmd_alloc() calling __pmd_alloc(). This causes it + * to return pmd_offset(pgd,addr) which is a pointer to the pgd (IOW, a no-op). + * + * however, to work this way, whilst we are allocating 32 pgds, containing 32 + * PTEs, the actual work is done on the PMDs, thus: + * + * instead of mm->pgd->pmd->pte + * we have mm->pgdpmd->pte + * + * IOW, think of PGD operations and PMD ones as being the same thing, just + * that PGD stuff deals with the mm_struct side of things, wheras PMD stuff + * deals with the pte side of things. + * + * additionally, we store some bits in the PGD and PTE pointers: + * PGDs: + * o The lowest (1) bit of the PGD is to determine if it is present or swap. + * o The 2nd bit of the PGD is unused and must be zero. + * o The top 6 bits of the PGD must be zero. + * PTEs: + * o The lower 5 bits of a pte are flags. bit 1 is the 'present' flag. The + * others determine the pages attributes. + * + * the pgd_val, pmd_val, and pte_val macros seem to be private to our code. + * They get the RAW value of the PGD/PMD/PTE entry, including our flags + * encoded into the pointers. + * + * The pgd_offset, pmd_offset, and pte_offset macros are used by the kernel, + * so they shouldnt have our flags attached. + * + * If you understood that, feel free to explain it to me... + * + */ + +#define _PMD_PRESENT (0x01) + +/* These definitions allow us to optimise out stuff like pmd_alloc() */ +#define pgd_none(pgd) (0) +#define pgd_bad(pgd) (0) +#define pgd_present(pgd) (1) +#define pgd_clear(pgdp) do { } while (0) + +/* Whilst these handle our actual 'page directory' (the agglomeration of pgd and pmd) + */ +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_bad(pmd) ((pmd_val(pmd) & 0xfc000002)) +#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT) +#define set_pmd(pmd_ptr, pmd) ((*(pmd_ptr)) = (pmd)) +#define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0)) + +/* and these handle our pte tables */ +#define pte_none(pte) (!pte_val(pte)) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) +#define set_pte(pte_ptr, pte) ((*(pte_ptr)) = (pte)) +#define pte_clear(ptep) set_pte((ptep), __pte(0)) + +/* macros to ease the getting of pointers to stuff... */ +#define pgd_offset(mm, addr) ((pgd_t *)(mm)->pgd + __pgd_index(addr)) +#define pmd_offset(pgd, addr) ((pmd_t *)(pgd)) +#define pte_offset(pmd, addr) ((pte_t *)pmd_page(*(pmd)) + __pte_index(addr)) + +/* there is no __pmd_index as we dont use pmds */ +#define __pgd_index(addr) ((addr) >> PGD_SHIFT) +#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + + +/* Keep the kernel happy */ +#define pgd_index(addr) __pgd_index(addr) +#define pgd_offset_k(addr) (pgd_offset(&init_mm, addr)) + +/* + * The vmalloc() routines leaves a hole of 4kB between each vmalloced + * area for the same reason. ;) FIXME: surely 1 page not 4k ? + */ +#define VMALLOC_START 0x01a00000 +#define VMALLOC_VMADDR(x) ((unsigned long)(x)) +#define VMALLOC_END 0x01c00000 + +/* Is pmd_page supposed to return a pointer to a page in some arches? ours seems to + * return a pointer to memory (no special alignment) + */ +#define pmd_page(pmd) ((unsigned long)(pmd_val((pmd)) & ~_PMD_PRESENT)) +#define pmd_page_kernel(pmd) ((pte_t *)(pmd_val((pmd)) & ~_PMD_PRESENT)) + +#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) + +#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) +#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) +#define pte_unmap(pte) do { } while (0) +#define pte_unmap_nested(pte) do { } while (0) + + +#define _PAGE_PRESENT 0x01 +#define _PAGE_READONLY 0x02 +#define _PAGE_NOT_USER 0x04 +#define _PAGE_OLD 0x08 +#define _PAGE_CLEAN 0x10 + +// an old page has never been read. +// a clean page has never been written. + +/* -- present -- -- !dirty -- --- !write --- ---- !user --- */ +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY | _PAGE_NOT_USER) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_CLEAN ) +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY ) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY ) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_NOT_USER) + +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_OLD | _PAGE_CLEAN) + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +#define pte_read(pte) (!(pte_val(pte) & _PAGE_NOT_USER)) +#define pte_write(pte) (!(pte_val(pte) & _PAGE_READONLY)) +#define pte_exec(pte) (!(pte_val(pte) & _PAGE_NOT_USER)) +#define pte_dirty(pte) (!(pte_val(pte) & _PAGE_CLEAN)) +#define pte_young(pte) (!(pte_val(pte) & _PAGE_OLD)) +//ONLY when !pte_present() I think. nicked from arm32 (FIXME!) +#define pte_file(pte) (!(pte_val(pte) & _PAGE_OLD)) + +#define PTE_BIT_FUNC(fn,op) \ +static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } + +PTE_BIT_FUNC(wrprotect, |= _PAGE_READONLY); +PTE_BIT_FUNC(mkwrite, &= ~_PAGE_READONLY); +PTE_BIT_FUNC(exprotect, |= _PAGE_NOT_USER); +PTE_BIT_FUNC(mkexec, &= ~_PAGE_NOT_USER); +PTE_BIT_FUNC(mkclean, |= _PAGE_CLEAN); +PTE_BIT_FUNC(mkdirty, &= ~_PAGE_CLEAN); +PTE_BIT_FUNC(mkold, |= _PAGE_OLD); +PTE_BIT_FUNC(mkyoung, &= ~_PAGE_OLD); + +/* + * We don't store cache state bits in the page table here. FIXME - or do we? + */ +#define pgprot_noncached(prot) (prot) +#define pgprot_writecombine(prot) (prot) //FIXME - is a no-op? + +extern void pgtable_cache_init(void); + +//FIXME - nicked from arm32 and brutally hacked. probably wrong. +#define pte_to_pgoff(x) (pte_val(x) >> 2) +#define pgoff_to_pte(x) __pte(((x) << 2) & ~_PAGE_OLD) + +//FIXME - next line borrowed from arm32. is it right? +#define PTE_FILE_MAX_BITS 30 + + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); + return pte; +} + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + +/* Encode and decode a swap entry. + * + * We support up to 32GB of swap on 4k machines + */ +#define __swp_type(x) (((x).val >> 2) & 0x7f) +#define __swp_offset(x) ((x).val >> 9) +#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) + +/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ +/* FIXME: this is not correct */ +#define kern_addr_valid(addr) (1) + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) +{ + pte_t pte; + pte_val(pte) = physpage | pgprot_val(pgprot); + return pte; +} + + +#include + +/* + * remap a physical address `phys' of size `size' with page protection `prot' + * into virtual address `from' + */ +#define io_remap_page_range(vma,from,phys,size,prot) \ + remap_page_range(vma,from,phys,size,prot) + +typedef pte_t *pte_addr_t; + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASMARM_PGTABLE_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/poll.h linux-2.5.70-bk14/include/asm-arm26/poll.h --- linux-2.5.70-bk13/include/asm-arm26/poll.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/poll.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,25 @@ +#ifndef __ASMARM_POLL_H +#define __ASMARM_POLL_H + +/* These are specified by iBCS2 */ +#define POLLIN 0x0001 +#define POLLPRI 0x0002 +#define POLLOUT 0x0004 +#define POLLERR 0x0008 +#define POLLHUP 0x0010 +#define POLLNVAL 0x0020 + +/* The rest seem to be more-or-less nonstandard. Check them! */ +#define POLLRDNORM 0x0040 +#define POLLRDBAND 0x0080 +#define POLLWRNORM 0x0100 +#define POLLWRBAND 0x0200 +#define POLLMSG 0x0400 + +struct pollfd { + int fd; + short events; + short revents; +}; + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/posix_types.h linux-2.5.70-bk14/include/asm-arm26/posix_types.h --- linux-2.5.70-bk13/include/asm-arm26/posix_types.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/posix_types.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,81 @@ +/* + * linux/include/asm-arm/posix_types.h + * + * Copyright (C) 1996-1998 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Changelog: + * 27-06-1996 RMK Created + */ +#ifndef __ARCH_ARM_POSIX_TYPES_H +#define __ARCH_ARM_POSIX_TYPES_H + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. Also, we cannot + * assume GCC is being used. + */ + +typedef unsigned short __kernel_dev_t; +typedef unsigned long __kernel_ino_t; +typedef unsigned short __kernel_mode_t; +typedef unsigned short __kernel_nlink_t; +typedef long __kernel_off_t; +typedef int __kernel_pid_t; +typedef unsigned short __kernel_ipc_pid_t; +typedef unsigned short __kernel_uid_t; +typedef unsigned short __kernel_gid_t; +typedef unsigned int __kernel_size_t; +typedef int __kernel_ssize_t; +typedef int __kernel_ptrdiff_t; +typedef long __kernel_time_t; +typedef long __kernel_suseconds_t; +typedef long __kernel_clock_t; +typedef int __kernel_timer_t; +typedef int __kernel_clockid_t; +typedef int __kernel_daddr_t; +typedef char * __kernel_caddr_t; +typedef unsigned short __kernel_uid16_t; +typedef unsigned short __kernel_gid16_t; +typedef unsigned int __kernel_uid32_t; +typedef unsigned int __kernel_gid32_t; + +typedef unsigned short __kernel_old_uid_t; +typedef unsigned short __kernel_old_gid_t; + +#ifdef __GNUC__ +typedef long long __kernel_loff_t; +#endif + +typedef struct { +#if defined(__KERNEL__) || defined(__USE_ALL) + int val[2]; +#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */ + int __val[2]; +#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */ +} __kernel_fsid_t; + +#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) + +#undef __FD_SET +#define __FD_SET(fd, fdsetp) \ + (((fd_set *)fdsetp)->fds_bits[fd >> 5] |= (1<<(fd & 31))) + +#undef __FD_CLR +#define __FD_CLR(fd, fdsetp) \ + (((fd_set *)fdsetp)->fds_bits[fd >> 5] &= ~(1<<(fd & 31))) + +#undef __FD_ISSET +#define __FD_ISSET(fd, fdsetp) \ + ((((fd_set *)fdsetp)->fds_bits[fd >> 5] & (1<<(fd & 31))) != 0) + +#undef __FD_ZERO +#define __FD_ZERO(fdsetp) \ + (memset (fdsetp, 0, sizeof (*(fd_set *)fdsetp))) + +#endif + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/proc-fns.h linux-2.5.70-bk14/include/asm-arm26/proc-fns.h --- linux-2.5.70-bk13/include/asm-arm26/proc-fns.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/proc-fns.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,49 @@ +/* + * linux/include/asm-arm26/proc-fns.h + * + * Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASSEMBLY__ + +#include + +/* + * Don't change this structure - ASM code + * relies on it. + */ +extern struct processor { + /* check for any bugs */ + void (*_check_bugs)(void); + /* Set up any processor specifics */ + void (*_proc_init)(void); + /* Disable any processor specifics */ + void (*_proc_fin)(void); + /* set the MEMC hardware mappings */ + void (*_set_pgd)(pgd_t *pgd); + /* XCHG */ + unsigned long (*_xchg_1)(unsigned long x, volatile void *ptr); + unsigned long (*_xchg_4)(unsigned long x, volatile void *ptr); +} processor; + +extern const struct processor arm2_processor_functions; +extern const struct processor arm250_processor_functions; +extern const struct processor arm3_processor_functions; + +#define cpu_check_bugs() processor._check_bugs() +#define cpu_proc_init() processor._proc_init() +#define cpu_proc_fin() processor._proc_fin() +#define cpu_do_idle() do { } while (0) +#define cpu_switch_mm(pgd,mm) processor._set_pgd(pgd) +#define cpu_xchg_1(x,ptr) processor._xchg_1(x,ptr) +#define cpu_xchg_4(x,ptr) processor._xchg_4(x,ptr) + + +//FIXME - these shouldnt be in proc-fn.h +extern void cpu_memc_update_all(pgd_t *pgd); +extern void cpu_memc_update_entry(pgd_t *pgd, unsigned long phys_pte, unsigned long log_addr); + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/processor.h linux-2.5.70-bk14/include/asm-arm26/processor.h --- linux-2.5.70-bk13/include/asm-arm26/processor.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/processor.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,121 @@ +/* + * linux/include/asm-arm26/processor.h + * + * Copyright (C) 1995 Russell King + * Copyright (C) 2003 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARM_PROCESSOR_H +#define __ASM_ARM_PROCESSOR_H + +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ __label__ _l; _l: &&_l;}) + +#ifdef __KERNEL__ + +#define EISA_bus 0 +#define MCA_bus 0 +#define MCA_bus__is_a_macro + +#include +#include +#include + +#define KERNEL_STACK_SIZE 4096 + +typedef struct { + void (*put_byte)(void); /* Special calling convention */ + void (*get_byte)(void); /* Special calling convention */ + void (*put_half)(void); /* Special calling convention */ + void (*get_half)(void); /* Special calling convention */ + void (*put_word)(void); /* Special calling convention */ + void (*get_word)(void); /* Special calling convention */ + void (*put_dword)(void); /* Special calling convention */ + unsigned long (*copy_from_user)(void *to, const void *from, unsigned long sz); + unsigned long (*copy_to_user)(void *to, const void *from, unsigned long sz); + unsigned long (*clear_user)(void *addr, unsigned long sz); + unsigned long (*strncpy_from_user)(char *to, const char *from, unsigned long sz); + unsigned long (*strnlen_user)(const char *s, long n); +} uaccess_t; + +extern uaccess_t uaccess_user, uaccess_kernel; + +#define EXTRA_THREAD_STRUCT \ + uaccess_t *uaccess; /* User access functions*/ + +#define EXTRA_THREAD_STRUCT_INIT \ + uaccess: &uaccess_kernel, + +// FIXME?!! + +#define start_thread(regs,pc,sp) \ +({ \ + unsigned long *stack = (unsigned long *)sp; \ + set_fs(USER_DS); \ + memzero(regs->uregs, sizeof (regs->uregs)); \ + regs->ARM_pc = pc | ~0xfc000003; /* pc */ \ + regs->ARM_sp = sp; /* sp */ \ + regs->ARM_r2 = stack[2]; /* r2 (envp) */ \ + regs->ARM_r1 = stack[1]; /* r1 (argv) */ \ + regs->ARM_r0 = stack[0]; /* r0 (argc) */ \ +}) + +#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1020]) +#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1018]) + +struct debug_entry { + u32 address; + u32 insn; +}; + +struct debug_info { + int nsaved; + struct debug_entry bp[2]; +}; + +struct thread_struct { + /* fault info */ + unsigned long address; + unsigned long trap_no; + unsigned long error_code; + /* debugging */ + struct debug_info debug; + EXTRA_THREAD_STRUCT +}; + +#define INIT_THREAD { \ +EXTRA_THREAD_STRUCT_INIT \ +} + +/* Forward declaration, a strange C thing */ +struct task_struct; + +/* Free all resources held by a thread. */ +extern void release_thread(struct task_struct *); + +/* Copy and release all segment info associated with a VM */ +#define copy_segments(tsk, mm) do { } while (0) +#define release_segments(mm) do { } while (0) + +unsigned long get_wchan(struct task_struct *p); + +#define cpu_relax() barrier() + +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + +/* + * Create a new kernel thread + */ +extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); + +#endif + +#endif /* __ASM_ARM_PROCESSOR_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/procinfo.h linux-2.5.70-bk14/include/asm-arm26/procinfo.h --- linux-2.5.70-bk13/include/asm-arm26/procinfo.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/procinfo.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,56 @@ +/* + * linux/include/asm-arm/procinfo.h + * + * Copyright (C) 1996-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_PROCINFO_H +#define __ASM_PROCINFO_H + +#ifndef __ASSEMBLY__ + +//struct processor; +//struct cpu_user_fns; + +struct proc_info_item { + const char *manufacturer; + const char *cpu_name; +}; + +/* + * Note! struct processor is always defined if we're + * using MULTI_CPU, otherwise this entry is unused, + * but still exists. + * + * NOTE! The following structure is defined by assembly + * language, NOT C code. For more information, check: + * arch/arm/mm/proc-*.S and arch/arm/kernel/head-armv.S + */ +struct proc_info_list { + unsigned int cpu_val; + unsigned int cpu_mask; + const char *arch_name; + const char *elf_name; + unsigned int elf_hwcap; + struct proc_info_item *info; + struct processor *proc; +}; + +#endif /* __ASSEMBLY__ */ + +#define PROC_INFO_SZ 48 + +#define HWCAP_SWP 1 +#define HWCAP_HALF 2 +#define HWCAP_THUMB 4 +#define HWCAP_26BIT 8 /* Play it safe */ +#define HWCAP_FAST_MULT 16 +#define HWCAP_FPA 32 +#define HWCAP_VFP 64 +#define HWCAP_EDSP 128 +#define HWCAP_JAVA 256 + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/ptrace.h linux-2.5.70-bk14/include/asm-arm26/ptrace.h --- linux-2.5.70-bk13/include/asm-arm26/ptrace.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/ptrace.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,103 @@ +#ifndef __ASM_ARM_PTRACE_H +#define __ASM_ARM_PTRACE_H + +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 +#define PTRACE_GETFPREGS 14 +#define PTRACE_SETFPREGS 15 +#define PTRACE_OLDSETOPTIONS 21 + +/* options set using PTRACE_SETOPTIONS */ +#define PTRACE_O_TRACESYSGOOD 0x00000001 + +#define MODE_USR26 0x00000000 +#define MODE_FIQ26 0x00000001 +#define MODE_IRQ26 0x00000002 +#define MODE_SVC26 0x00000003 +#define MODE_MASK 0x00000003 + +#define PSR_F_BIT 0x04000000 +#define PSR_I_BIT 0x08000000 +#define PSR_V_BIT 0x10000000 +#define PSR_C_BIT 0x20000000 +#define PSR_Z_BIT 0x40000000 +#define PSR_N_BIT 0x80000000 + +#define PCMASK 0xfc000003 + + +#ifndef __ASSEMBLY__ + +#define pc_pointer(v) ((v) & ~PCMASK) /* convert v to pc type address */ +#define instruction_pointer(regs) (pc_pointer((regs)->ARM_pc)) /* get pc */ + +/* this struct defines the way the registers are stored on the + stack during a system call. */ + +struct pt_regs { + long uregs[17]; +}; + +#define ARM_pc uregs[15] +#define ARM_lr uregs[14] +#define ARM_sp uregs[13] +#define ARM_ip uregs[12] +#define ARM_fp uregs[11] +#define ARM_r10 uregs[10] +#define ARM_r9 uregs[9] +#define ARM_r8 uregs[8] +#define ARM_r7 uregs[7] +#define ARM_r6 uregs[6] +#define ARM_r5 uregs[5] +#define ARM_r4 uregs[4] +#define ARM_r3 uregs[3] +#define ARM_r2 uregs[2] +#define ARM_r1 uregs[1] +#define ARM_r0 uregs[0] +#define ARM_ORIG_r0 uregs[16] + +#ifdef __KERNEL__ + +#define processor_mode(regs) \ + ((regs)->ARM_pc & MODE_MASK) + +#define user_mode(regs) \ + (processor_mode(regs) == MODE_USR26) + +#define interrupts_enabled(regs) \ + (!((regs)->ARM_pc & PSR_I_BIT)) + +#define fast_interrupts_enabled(regs) \ + (!((regs)->ARM_pc & PSR_F_BIT)) + +#define condition_codes(regs) \ + ((regs)->ARM_pc & (PSR_V_BIT|PSR_C_BIT|PSR_Z_BIT|PSR_N_BIT)) + +/* Are the current registers suitable for user mode? + * (used to maintain security in signal handlers) + */ +static inline int valid_user_regs(struct pt_regs *regs) +{ + if (user_mode(regs) && + (regs->ARM_pc & (PSR_F_BIT | PSR_I_BIT)) == 0) + return 1; + + /* + * force it to be something sensible + */ + regs->ARM_pc &= ~(MODE_MASK | PSR_F_BIT | PSR_I_BIT); + + return 0; +} + +extern void show_regs(struct pt_regs *); + +#define predicate(x) (x & 0xf0000000) +#define PREDICATE_ALWAYS 0xe0000000 + +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +#endif + diff -urN linux-2.5.70-bk13/include/asm-arm26/resource.h linux-2.5.70-bk14/include/asm-arm26/resource.h --- linux-2.5.70-bk13/include/asm-arm26/resource.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/resource.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,47 @@ +#ifndef _ARM_RESOURCE_H +#define _ARM_RESOURCE_H + +/* + * Resource limits + */ + +#define RLIMIT_CPU 0 /* CPU time in ms */ +#define RLIMIT_FSIZE 1 /* Maximum filesize */ +#define RLIMIT_DATA 2 /* max data size */ +#define RLIMIT_STACK 3 /* max stack size */ +#define RLIMIT_CORE 4 /* max core file size */ +#define RLIMIT_RSS 5 /* max resident set size */ +#define RLIMIT_NPROC 6 /* max number of processes */ +#define RLIMIT_NOFILE 7 /* max number of open files */ +#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ +#define RLIMIT_AS 9 /* address space limit */ +#define RLIMIT_LOCKS 10 /* maximum file locks held */ + +#define RLIM_NLIMITS 11 + +#ifdef __KERNEL__ + +/* + * SuS says limits have to be unsigned. + * Which makes a ton more sense anyway. + */ +#define RLIM_INFINITY (~0UL) + +#define INIT_RLIMITS \ +{ \ + { RLIM_INFINITY, RLIM_INFINITY }, \ + { RLIM_INFINITY, RLIM_INFINITY }, \ + { RLIM_INFINITY, RLIM_INFINITY }, \ + { _STK_LIM, RLIM_INFINITY }, \ + { 0, RLIM_INFINITY }, \ + { RLIM_INFINITY, RLIM_INFINITY }, \ + { 0, 0 }, \ + { INR_OPEN, INR_OPEN }, \ + { RLIM_INFINITY, RLIM_INFINITY }, \ + { RLIM_INFINITY, RLIM_INFINITY }, \ + { RLIM_INFINITY, RLIM_INFINITY }, \ +} + +#endif /* __KERNEL__ */ + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/rmap.h linux-2.5.70-bk14/include/asm-arm26/rmap.h --- linux-2.5.70-bk13/include/asm-arm26/rmap.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/rmap.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,66 @@ +#ifndef _ARM_RMAP_H +#define _ARM_RMAP_H + +/* + * linux/include/asm-arm26/proc-armv/rmap.h + * + * Architecture dependant parts of the reverse mapping code, + * + * ARM is different since hardware page tables are smaller than + * the page size and Linux uses a "duplicate" one with extra info. + * For rmap this means that the first 2 kB of a page are the hardware + * page tables and the last 2 kB are the software page tables. + */ + +static inline void pgtable_add_rmap(struct page *page, struct mm_struct * mm, unsigned long address) +{ + page->mapping = (void *)mm; + page->index = address & ~((PTRS_PER_PTE * PAGE_SIZE) - 1); + inc_page_state(nr_page_table_pages); +} + +static inline void pgtable_remove_rmap(struct page *page) +{ + page->mapping = NULL; + page->index = 0; + dec_page_state(nr_page_table_pages); +} + +static inline struct mm_struct * ptep_to_mm(pte_t * ptep) +{ + struct page * page = virt_to_page(ptep); + return (struct mm_struct *)page->mapping; +} + +/* The page table takes half of the page */ +#define PTE_MASK ((PAGE_SIZE / 2) - 1) + +static inline unsigned long ptep_to_address(pte_t * ptep) +{ + struct page * page = virt_to_page(ptep); + unsigned long low_bits; + + low_bits = ((unsigned long)ptep & PTE_MASK) * PTRS_PER_PTE; + return page->index + low_bits; +} + +//FIXME!!! IS these correct? +static inline pte_addr_t ptep_to_paddr(pte_t *ptep) +{ + return (pte_addr_t)ptep; +} + +static inline pte_t *rmap_ptep_map(pte_addr_t pte_paddr) +{ + return (pte_t *)pte_paddr; +} + +static inline void rmap_ptep_unmap(pte_t *pte) +{ + return; +} + + +//#include + +#endif /* _ARM_RMAP_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/scatterlist.h linux-2.5.70-bk14/include/asm-arm26/scatterlist.h --- linux-2.5.70-bk13/include/asm-arm26/scatterlist.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/scatterlist.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,26 @@ +#ifndef _ASMARM_SCATTERLIST_H +#define _ASMARM_SCATTERLIST_H + +#include + +struct scatterlist { + struct page *page; /* buffer page */ + unsigned int offset; /* buffer offset */ + dma_addr_t dma_address; /* dma address */ + unsigned int length; /* length */ + char *__address; /* for set_dma_addr */ +}; + +/* + * These macros should be used after a pci_map_sg call has been done + * to get bus addresses of each of the SG entries and their lengths. + * You should only work with the number of sg entries pci_map_sg + * returns, or alternatively stop on the first sg_dma_len(sg) which + * is 0. + */ +#define sg_dma_address(sg) ((sg)->dma_address) +#define sg_dma_len(sg) ((sg)->length) + +#define ISA_DMA_THRESHOLD (0xffffffff) + +#endif /* _ASMARM_SCATTERLIST_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/segment.h linux-2.5.70-bk14/include/asm-arm26/segment.h --- linux-2.5.70-bk13/include/asm-arm26/segment.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/segment.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,11 @@ +#ifndef __ASM_ARM_SEGMENT_H +#define __ASM_ARM_SEGMENT_H + +#define __KERNEL_CS 0x0 +#define __KERNEL_DS 0x0 + +#define __USER_CS 0x1 +#define __USER_DS 0x1 + +#endif /* __ASM_ARM_SEGMENT_H */ + diff -urN linux-2.5.70-bk13/include/asm-arm26/semaphore-helper.h linux-2.5.70-bk14/include/asm-arm26/semaphore-helper.h --- linux-2.5.70-bk13/include/asm-arm26/semaphore-helper.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/semaphore-helper.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,84 @@ +#ifndef ASMARM_SEMAPHORE_HELPER_H +#define ASMARM_SEMAPHORE_HELPER_H + +/* + * These two _must_ execute atomically wrt each other. + */ +static inline void wake_one_more(struct semaphore * sem) +{ + unsigned long flags; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + if (atomic_read(&sem->count) <= 0) + sem->waking++; + spin_unlock_irqrestore(&semaphore_wake_lock, flags); +} + +static inline int waking_non_zero(struct semaphore *sem) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + if (sem->waking > 0) { + sem->waking--; + ret = 1; + } + spin_unlock_irqrestore(&semaphore_wake_lock, flags); + return ret; +} + +/* + * waking non zero interruptible + * 1 got the lock + * 0 go to sleep + * -EINTR interrupted + * + * We must undo the sem->count down_interruptible() increment while we are + * protected by the spinlock in order to make this atomic_inc() with the + * atomic_read() in wake_one_more(), otherwise we can race. -arca + */ +static inline int waking_non_zero_interruptible(struct semaphore *sem, + struct task_struct *tsk) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + if (sem->waking > 0) { + sem->waking--; + ret = 1; + } else if (signal_pending(tsk)) { + atomic_inc(&sem->count); + ret = -EINTR; + } + spin_unlock_irqrestore(&semaphore_wake_lock, flags); + return ret; +} + +/* + * waking_non_zero_try_lock: + * 1 failed to lock + * 0 got the lock + * + * We must undo the sem->count down_interruptible() increment while we are + * protected by the spinlock in order to make this atomic_inc() with the + * atomic_read() in wake_one_more(), otherwise we can race. -arca + */ +static inline int waking_non_zero_trylock(struct semaphore *sem) +{ + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + if (sem->waking <= 0) + atomic_inc(&sem->count); + else { + sem->waking--; + ret = 0; + } + spin_unlock_irqrestore(&semaphore_wake_lock, flags); + return ret; +} + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/semaphore.h linux-2.5.70-bk14/include/asm-arm26/semaphore.h --- linux-2.5.70-bk13/include/asm-arm26/semaphore.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/semaphore.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,128 @@ +/* + * linux/include/asm-arm/semaphore.h + */ +#ifndef __ASM_ARM_SEMAPHORE_H +#define __ASM_ARM_SEMAPHORE_H + +#include +#include +#include +#include + +#include +#include + +struct semaphore { + atomic_t count; + int sleepers; + wait_queue_head_t wait; +#if WAITQUEUE_DEBUG + long __magic; +#endif +}; + +#if WAITQUEUE_DEBUG +# define __SEM_DEBUG_INIT(name) \ + , (long)&(name).__magic +#else +# define __SEM_DEBUG_INIT(name) +#endif + +#define __SEMAPHORE_INIT(name,count) \ + { ATOMIC_INIT(count), 0, \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ + __SEM_DEBUG_INIT(name) } + +#define __MUTEX_INITIALIZER(name) \ + __SEMAPHORE_INIT(name,1) + +#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ + struct semaphore name = __SEMAPHORE_INIT(name,count) + +#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) +#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) + +static inline void sema_init(struct semaphore *sem, int val) +{ + atomic_set(&sem->count, val); + sem->sleepers = 0; + init_waitqueue_head(&sem->wait); +#if WAITQUEUE_DEBUG + sem->__magic = (long)&sem->__magic; +#endif +} + +static inline void init_MUTEX(struct semaphore *sem) +{ + sema_init(sem, 1); +} + +static inline void init_MUTEX_LOCKED(struct semaphore *sem) +{ + sema_init(sem, 0); +} + +/* + * special register calling convention + */ +asmlinkage void __down_failed(void); +asmlinkage int __down_interruptible_failed(void); +asmlinkage int __down_trylock_failed(void); +asmlinkage void __up_wakeup(void); + +extern void __down(struct semaphore * sem); +extern int __down_interruptible(struct semaphore * sem); +extern int __down_trylock(struct semaphore * sem); +extern void __up(struct semaphore * sem); + +/* + * This is ugly, but we want the default case to fall through. + * "__down" is the actual routine that waits... + */ +static inline void down(struct semaphore * sem) +{ +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + __down_op(sem, __down_failed); +} + +/* + * This is ugly, but we want the default case to fall through. + * "__down_interruptible" is the actual routine that waits... + */ +static inline int down_interruptible (struct semaphore * sem) +{ +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + return __down_op_ret(sem, __down_interruptible_failed); +} + +static inline int down_trylock(struct semaphore *sem) +{ +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + return __down_op_ret(sem, __down_trylock_failed); +} + +/* + * Note! This is subtle. We jump to wake people up only if + * the semaphore was negative (== somebody was waiting on it). + * The default case (no contention) will result in NO + * jumps for both down() and up(). + */ +static inline void up(struct semaphore * sem) +{ +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + __up_op(sem, __up_wakeup); +} + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/sembuf.h linux-2.5.70-bk14/include/asm-arm26/sembuf.h --- linux-2.5.70-bk13/include/asm-arm26/sembuf.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/sembuf.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,25 @@ +#ifndef _ASMARM_SEMBUF_H +#define _ASMARM_SEMBUF_H + +/* + * The semid64_ds structure for arm architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t sem_otime; /* last semop time */ + unsigned long __unused1; + __kernel_time_t sem_ctime; /* last change time */ + unsigned long __unused2; + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* _ASMARM_SEMBUF_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/serial.h linux-2.5.70-bk14/include/asm-arm26/serial.h --- linux-2.5.70-bk13/include/asm-arm26/serial.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/serial.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,65 @@ +/* + * linux/include/asm-arm/serial.h + * + * Copyright (C) 1996 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Changelog: + * 15-10-1996 RMK Created + */ + +#ifndef __ASM_SERIAL_H +#define __ASM_SERIAL_H + +#include + +/* + * This assumes you have a 1.8432 MHz clock for your UART. + * + * It'd be nice if someone built a serial card with a 24.576 MHz + * clock, since the 16550A is capable of handling a top speed of 1.5 + * megabits/second; but this requires the faster clock. + */ +#define BASE_BAUD (1843200 / 16) + +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) + +#define RS_TABLE_SIZE 16 + +#if defined(CONFIG_ARCH_A5K) + /* UART CLK PORT IRQ FLAGS */ + +#define STD_SERIAL_PORT_DEFNS \ + { 0, BASE_BAUD, 0x3F8, 10, STD_COM_FLAGS }, /* ttyS0 */ \ + { 0, BASE_BAUD, 0x2F8, 10, STD_COM_FLAGS }, /* ttyS1 */ + +#else + +#define STD_SERIAL_PORT_DEFNS \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS0 */ \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS1 */ + +#endif + +#define EXTRA_SERIAL_PORT_DEFNS \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS2 */ \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS3 */ \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS4 */ \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS5 */ \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS6 */ \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS7 */ \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS8 */ \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS9 */ \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS10 */ \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS11 */ \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS12 */ \ + { 0, BASE_BAUD, 0 , 0, STD_COM_FLAGS }, /* ttyS13 */ + +#define SERIAL_PORT_DFNS \ + STD_SERIAL_PORT_DEFNS \ + EXTRA_SERIAL_PORT_DEFNS + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/setup.h linux-2.5.70-bk14/include/asm-arm26/setup.h --- linux-2.5.70-bk13/include/asm-arm26/setup.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/setup.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,205 @@ +/* + * linux/include/asm/setup.h + * + * Copyright (C) 1997-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Structure passed to kernel to tell it about the + * hardware it's running on. See linux/Documentation/arm/Setup + * for more info. + */ +#ifndef __ASMARM_SETUP_H +#define __ASMARM_SETUP_H + +#define COMMAND_LINE_SIZE 1024 + +/* The list ends with an ATAG_NONE node. */ +#define ATAG_NONE 0x00000000 + +struct tag_header { + u32 size; + u32 tag; +}; + +/* The list must start with an ATAG_CORE node */ +#define ATAG_CORE 0x54410001 + +struct tag_core { + u32 flags; /* bit 0 = read-only */ + u32 pagesize; + u32 rootdev; +}; + +/* it is allowed to have multiple ATAG_MEM nodes */ +#define ATAG_MEM 0x54410002 + +struct tag_mem32 { + u32 size; + u32 start; /* physical start address */ +}; + +/* VGA text type displays */ +#define ATAG_VIDEOTEXT 0x54410003 + +struct tag_videotext { + u8 x; + u8 y; + u16 video_page; + u8 video_mode; + u8 video_cols; + u16 video_ega_bx; + u8 video_lines; + u8 video_isvga; + u16 video_points; +}; + +/* describes how the ramdisk will be used in kernel */ +#define ATAG_RAMDISK 0x54410004 + +struct tag_ramdisk { + u32 flags; /* bit 0 = load, bit 1 = prompt */ + u32 size; /* decompressed ramdisk size in _kilo_ bytes */ + u32 start; /* starting block of floppy-based RAM disk image */ +}; + +/* describes where the compressed ramdisk image lives */ +/* + * this one accidentally used virtual addresses - as such, + * its depreciated. + */ +#define ATAG_INITRD 0x54410005 + +/* describes where the compressed ramdisk image lives */ +#define ATAG_INITRD2 0x54420005 + +struct tag_initrd { + u32 start; /* physical start address */ + u32 size; /* size of compressed ramdisk image in bytes */ +}; + +/* board serial number. "64 bits should be enough for everybody" */ +#define ATAG_SERIAL 0x54410006 + +struct tag_serialnr { + u32 low; + u32 high; +}; + +/* board revision */ +#define ATAG_REVISION 0x54410007 + +struct tag_revision { + u32 rev; +}; + +/* initial values for vesafb-type framebuffers. see struct screen_info + * in include/linux/tty.h + */ +#define ATAG_VIDEOLFB 0x54410008 + +struct tag_videolfb { + u16 lfb_width; + u16 lfb_height; + u16 lfb_depth; + u16 lfb_linelength; + u32 lfb_base; + u32 lfb_size; + u8 red_size; + u8 red_pos; + u8 green_size; + u8 green_pos; + u8 blue_size; + u8 blue_pos; + u8 rsvd_size; + u8 rsvd_pos; +}; + +/* command line: \0 terminated string */ +#define ATAG_CMDLINE 0x54410009 + +struct tag_cmdline { + char cmdline[1]; /* this is the minimum size */ +}; + +/* acorn RiscPC specific information */ +#define ATAG_ACORN 0x41000101 + +struct tag_acorn { + u32 memc_control_reg; + u32 vram_pages; + u8 sounddefault; + u8 adfsdrives; +}; + +/* footbridge memory clock, see arch/arm/mach-footbridge/arch.c */ +#define ATAG_MEMCLK 0x41000402 + +struct tag_memclk { + u32 fmemclk; +}; + +struct tag { + struct tag_header hdr; + union { + struct tag_core core; + struct tag_mem32 mem; + struct tag_videotext videotext; + struct tag_ramdisk ramdisk; + struct tag_initrd initrd; + struct tag_serialnr serialnr; + struct tag_revision revision; + struct tag_videolfb videolfb; + struct tag_cmdline cmdline; + + /* + * Acorn specific + */ + struct tag_acorn acorn; + + /* + * DC21285 specific + */ + struct tag_memclk memclk; + } u; +}; + +struct tagtable { + u32 tag; + int (*parse)(const struct tag *); +}; + +#define __tag __attribute__((unused, __section__(".taglist"))) +#define __tagtable(tag, fn) \ +static struct tagtable __tagtable_##fn __tag = { tag, fn } + +#define tag_member_present(tag,member) \ + ((unsigned long)(&((struct tag *)0L)->member + 1) \ + <= (tag)->hdr.size * 4) + +#define tag_next(t) ((struct tag *)((u32 *)(t) + (t)->hdr.size)) +#define tag_size(type) ((sizeof(struct tag_header) + sizeof(struct type)) >> 2) + +#define for_each_tag(t,base) \ + for (t = base; t->hdr.size; t = tag_next(t)) + +/* + * Memory map description + */ +#define NR_BANKS 8 + +struct meminfo { + int nr_banks; + unsigned long end; + struct { + unsigned long start; + unsigned long size; + int node; + } bank[NR_BANKS]; +}; + +extern struct meminfo meminfo; + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/shmbuf.h linux-2.5.70-bk14/include/asm-arm26/shmbuf.h --- linux-2.5.70-bk13/include/asm-arm26/shmbuf.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/shmbuf.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,42 @@ +#ifndef _ASMARM_SHMBUF_H +#define _ASMARM_SHMBUF_H + +/* + * The shmid64_ds structure for arm architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + unsigned long __unused1; + __kernel_time_t shm_dtime; /* last detach time */ + unsigned long __unused2; + __kernel_time_t shm_ctime; /* last change time */ + unsigned long __unused3; + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + unsigned long shm_nattch; /* no. of current attaches */ + unsigned long __unused4; + unsigned long __unused5; +}; + +struct shminfo64 { + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* _ASMARM_SHMBUF_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/shmparam.h linux-2.5.70-bk14/include/asm-arm26/shmparam.h --- linux-2.5.70-bk13/include/asm-arm26/shmparam.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/shmparam.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,15 @@ +#ifndef _ASMARM_SHMPARAM_H +#define _ASMARM_SHMPARAM_H + +#ifndef SHMMAX +#define SHMMAX 0x003fa000 +#endif + +/* + * This should be the size of the virtually indexed cache/ways, + * or page size, whichever is greater since the cache aliases + * every size/ways bytes. + */ +#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ + +#endif /* _ASMARM_SHMPARAM_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/sigcontext.h linux-2.5.70-bk14/include/asm-arm26/sigcontext.h --- linux-2.5.70-bk13/include/asm-arm26/sigcontext.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/sigcontext.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,33 @@ +#ifndef _ASMARM_SIGCONTEXT_H +#define _ASMARM_SIGCONTEXT_H + +/* + * Signal context structure - contains all info to do with the state + * before the signal handler was invoked. Note: only add new entries + * to the end of the structure. + */ +struct sigcontext { + unsigned long trap_no; + unsigned long error_code; + unsigned long oldmask; + unsigned long arm_r0; + unsigned long arm_r1; + unsigned long arm_r2; + unsigned long arm_r3; + unsigned long arm_r4; + unsigned long arm_r5; + unsigned long arm_r6; + unsigned long arm_r7; + unsigned long arm_r8; + unsigned long arm_r9; + unsigned long arm_r10; + unsigned long arm_fp; + unsigned long arm_ip; + unsigned long arm_sp; + unsigned long arm_lr; + unsigned long arm_pc; + unsigned long fault_address; +}; + + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/siginfo.h linux-2.5.70-bk14/include/asm-arm26/siginfo.h --- linux-2.5.70-bk13/include/asm-arm26/siginfo.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/siginfo.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,6 @@ +#ifndef _ASMARM_SIGINFO_H +#define _ASMARM_SIGINFO_H + +#include + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/signal.h linux-2.5.70-bk14/include/asm-arm26/signal.h --- linux-2.5.70-bk13/include/asm-arm26/signal.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/signal.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,201 @@ +#ifndef _ASMARM_SIGNAL_H +#define _ASMARM_SIGNAL_H + +#include + +/* Avoid too many header ordering problems. */ +struct siginfo; + +#ifdef __KERNEL__ +/* Most things should be clean enough to redefine this at will, if care + is taken to make libc match. */ + +#define _NSIG 64 +#define _NSIG_BPW 32 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +#else +/* Here we must cater to libcs that poke about in kernel headers. */ + +#define NSIG 32 +typedef unsigned long sigset_t; + +#endif /* __KERNEL__ */ + +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGIOT 6 +#define SIGBUS 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGUSR1 10 +#define SIGSEGV 11 +#define SIGUSR2 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGSTKFLT 16 +#define SIGCHLD 17 +#define SIGCONT 18 +#define SIGSTOP 19 +#define SIGTSTP 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGURG 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGIO 29 +#define SIGPOLL SIGIO +/* +#define SIGLOST 29 +*/ +#define SIGPWR 30 +#define SIGSYS 31 +#define SIGUNUSED 31 + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX (_NSIG-1) + +#define SIGSWI 32 + +/* + * SA_FLAGS values: + * + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_SIGINFO deliver the signal with SIGINFO structs + * SA_THIRTYTWO delivers the signal in 32-bit mode, even if the task + * is running in 26-bit. + * SA_ONSTACK allows alternate signal stacks (see sigaltstack(2)). + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the + * SA_NODEFER prevents the current signal from being masked in the handler. + * SA_RESETHAND clears the handler when the signal is delivered. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ +#define SA_NOCLDSTOP 0x00000001 +#define SA_NOCLDWAIT 0x00000002 /* not supported yet */ +#define SA_SIGINFO 0x00000004 +#define SA_THIRTYTWO 0x02000000 +#define SA_RESTORER 0x04000000 +#define SA_ONSTACK 0x08000000 +#define SA_RESTART 0x10000000 +#define SA_NODEFER 0x40000000 +#define SA_RESETHAND 0x80000000 + +#define SA_NOMASK SA_NODEFER +#define SA_ONESHOT SA_RESETHAND +#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ + + +/* + * sigaltstack controls + */ +#define SS_ONSTACK 1 +#define SS_DISABLE 2 + +#define MINSIGSTKSZ 2048 +#define SIGSTKSZ 8192 + +#ifdef __KERNEL__ + +/* + * These values of sa_flags are used only by the kernel as part of the + * irq handling routines. + * + * SA_INTERRUPT is also used by the irq handling routines. + * SA_SHIRQ is for shared interrupt support on PCI and EISA. + */ +#define SA_PROBE 0x80000000 +#define SA_SAMPLE_RANDOM 0x10000000 +#define SA_IRQNOMASK 0x08000000 +#define SA_SHIRQ 0x04000000 +#endif + +#define SIG_BLOCK 0 /* for blocking signals */ +#define SIG_UNBLOCK 1 /* for unblocking signals */ +#define SIG_SETMASK 2 /* for setting the signal mask */ + +/* Type of a signal handler. */ +typedef void (*__sighandler_t)(int); + +#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ +#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ +#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ + +#ifdef __KERNEL__ +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + unsigned long sa_flags; + void (*sa_restorer)(void); +}; + +struct sigaction { + __sighandler_t sa_handler; + unsigned long sa_flags; + void (*sa_restorer)(void); + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; +}; + +#else +/* Here we must cater to libcs that poke about in kernel headers. */ + +struct sigaction { + union { + __sighandler_t _sa_handler; + void (*_sa_sigaction)(int, struct siginfo *, void *); + } _u; + sigset_t sa_mask; + unsigned long sa_flags; + void (*sa_restorer)(void); +}; + +#define sa_handler _u._sa_handler +#define sa_sigaction _u._sa_sigaction + +#endif /* __KERNEL__ */ + +typedef struct sigaltstack { + void *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + +#ifdef __KERNEL__ +#include + +#define sigmask(sig) (1UL << ((sig) - 1)) +//FIXME!!! +//#define HAVE_ARCH_GET_SIGNAL_TO_DELIVER + +#endif + + +#ifdef __KERNEL__ +#include +#define ptrace_signal_deliver(regs, cookie) do { } while (0) +#endif + + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/sizes.h linux-2.5.70-bk14/include/asm-arm26/sizes.h --- linux-2.5.70-bk13/include/asm-arm26/sizes.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/sizes.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,52 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/* DO NOT EDIT!! - this file automatically generated + * from .s file by awk -f s2h.awk + */ +/* Size defintions + * Copyright (C) ARM Limited 1998. All rights reserved. + */ + +#ifndef __sizes_h +#define __sizes_h 1 + +/* handy sizes */ +#define SZ_1K 0x00000400 +#define SZ_4K 0x00001000 +#define SZ_8K 0x00002000 +#define SZ_16K 0x00004000 +#define SZ_64K 0x00010000 +#define SZ_128K 0x00020000 +#define SZ_256K 0x00040000 +#define SZ_512K 0x00080000 + +#define SZ_1M 0x00100000 +#define SZ_2M 0x00200000 +#define SZ_4M 0x00400000 +#define SZ_8M 0x00800000 +#define SZ_16M 0x01000000 +#define SZ_32M 0x02000000 +#define SZ_64M 0x04000000 +#define SZ_128M 0x08000000 +#define SZ_256M 0x10000000 +#define SZ_512M 0x20000000 + +#define SZ_1G 0x40000000 +#define SZ_2G 0x80000000 + +#endif + +/* END */ diff -urN linux-2.5.70-bk13/include/asm-arm26/smp.h linux-2.5.70-bk14/include/asm-arm26/smp.h --- linux-2.5.70-bk13/include/asm-arm26/smp.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/smp.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,10 @@ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +#include + +#ifdef CONFIG_SMP +#error SMP not supported +#endif + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/socket.h linux-2.5.70-bk14/include/asm-arm26/socket.h --- linux-2.5.70-bk13/include/asm-arm26/socket.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/socket.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,64 @@ +#ifndef _ASMARM_SOCKET_H +#define _ASMARM_SOCKET_H + +#include + +/* For setsockopt(2) */ +#define SOL_SOCKET 1 + +#define SO_DEBUG 1 +#define SO_REUSEADDR 2 +#define SO_TYPE 3 +#define SO_ERROR 4 +#define SO_DONTROUTE 5 +#define SO_BROADCAST 6 +#define SO_SNDBUF 7 +#define SO_RCVBUF 8 +#define SO_KEEPALIVE 9 +#define SO_OOBINLINE 10 +#define SO_NO_CHECK 11 +#define SO_PRIORITY 12 +#define SO_LINGER 13 +#define SO_BSDCOMPAT 14 +/* To add :#define SO_REUSEPORT 15 */ +#define SO_PASSCRED 16 +#define SO_PEERCRED 17 +#define SO_RCVLOWAT 18 +#define SO_SNDLOWAT 19 +#define SO_RCVTIMEO 20 +#define SO_SNDTIMEO 21 + +/* Security levels - as per NRL IPv6 - don't actually do anything */ +#define SO_SECURITY_AUTHENTICATION 22 +#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 +#define SO_SECURITY_ENCRYPTION_NETWORK 24 + +#define SO_BINDTODEVICE 25 + +/* Socket filtering */ +#define SO_ATTACH_FILTER 26 +#define SO_DETACH_FILTER 27 + +#define SO_PEERNAME 28 +#define SO_TIMESTAMP 29 +#define SCM_TIMESTAMP SO_TIMESTAMP + +#define SO_ACCEPTCONN 30 + +/* Nast libc5 fixup - bletch */ +#if defined(__KERNEL__) +/* Socket types. */ +#define SOCK_STREAM 1 /* stream (connection) socket */ +#define SOCK_DGRAM 2 /* datagram (conn.less) socket */ +#define SOCK_RAW 3 /* raw socket */ +#define SOCK_RDM 4 /* reliably-delivered message */ +#define SOCK_SEQPACKET 5 /* sequential packet socket */ +#define SOCK_PACKET 10 /* linux specific way of */ + /* getting packets at the dev */ + /* level. For writing rarp and */ + /* other similar things on the */ + /* user level. */ +#define SOCK_MAX (SOCK_PACKET+1) +#endif + +#endif /* _ASM_SOCKET_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/sockios.h linux-2.5.70-bk14/include/asm-arm26/sockios.h --- linux-2.5.70-bk13/include/asm-arm26/sockios.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/sockios.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,12 @@ +#ifndef __ARCH_ARM_SOCKIOS_H +#define __ARCH_ARM_SOCKIOS_H + +/* Socket-level I/O control calls. */ +#define FIOSETOWN 0x8901 +#define SIOCSPGRP 0x8902 +#define FIOGETOWN 0x8903 +#define SIOCGPGRP 0x8904 +#define SIOCATMARK 0x8905 +#define SIOCGSTAMP 0x8906 /* Get stamp */ + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/softirq.h linux-2.5.70-bk14/include/asm-arm26/softirq.h --- linux-2.5.70-bk13/include/asm-arm26/softirq.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/softirq.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,20 @@ +#ifndef __ASM_SOFTIRQ_H +#define __ASM_SOFTIRQ_H + +#include +#include + +#define local_bh_disable() \ + do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0) +#define __local_bh_enable() \ + do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0) + +#define local_bh_enable() \ +do { \ + __local_bh_enable(); \ + if (unlikely(!in_interrupt() && softirq_pending(smp_processor_id()))) \ + __asm__("bl%? __do_softirq": : : "lr");/* out of line */\ + preempt_check_resched(); \ +} while (0) + +#endif /* __ASM_SOFTIRQ_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/spinlock.h linux-2.5.70-bk14/include/asm-arm26/spinlock.h --- linux-2.5.70-bk13/include/asm-arm26/spinlock.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/spinlock.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,6 @@ +#ifndef __ASM_SPINLOCK_H +#define __ASM_SPINLOCK_H + +#error ARM architecture does not support SMP spin locks + +#endif /* __ASM_SPINLOCK_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/stat.h linux-2.5.70-bk14/include/asm-arm26/stat.h --- linux-2.5.70-bk13/include/asm-arm26/stat.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/stat.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,79 @@ +#ifndef _ASMARM_STAT_H +#define _ASMARM_STAT_H + +struct __old_kernel_stat { + unsigned short st_dev; + unsigned short st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; +}; + +struct stat { + unsigned short st_dev; + unsigned short __pad1; + unsigned long st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned short __pad2; + unsigned long st_size; + unsigned long st_blksize; + unsigned long st_blocks; + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + unsigned long __unused4; + unsigned long __unused5; +}; + +/* This matches struct stat64 in glibc2.1, hence the absolutely + * insane amounts of padding around dev_t's. + */ +struct stat64 { + unsigned short st_dev; + unsigned char __pad0b[6]; + unsigned char __pad0[4]; + +#define STAT64_HAS_BROKEN_ST_INO 1 + unsigned long __st_ino; + unsigned int st_mode; + unsigned int st_nlink; + + unsigned long st_uid; + unsigned long st_gid; + + unsigned short st_rdev; + unsigned char __pad3b[6]; + unsigned char __pad3[4]; + + long long st_size; + unsigned long st_blksize; + + unsigned long st_blocks; /* Number 512-byte blocks allocated. */ + unsigned long __pad4; /* Future possible st_blocks hi bits */ + + unsigned long st_atime; + unsigned long st_atime_nsec; + + unsigned long st_mtime; + unsigned long st_mtime_nsec; + + unsigned long st_ctime; + unsigned long st_ctime_nsec; + + unsigned long long st_ino; +}; + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/statfs.h linux-2.5.70-bk14/include/asm-arm26/statfs.h --- linux-2.5.70-bk13/include/asm-arm26/statfs.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/statfs.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,25 @@ +#ifndef _ASMARM_STATFS_H +#define _ASMARM_STATFS_H + +#ifndef __KERNEL_STRICT_NAMES + +#include + +typedef __kernel_fsid_t fsid_t; + +#endif + +struct statfs { + long f_type; + long f_bsize; + long f_blocks; + long f_bfree; + long f_bavail; + long f_files; + long f_ffree; + __kernel_fsid_t f_fsid; + long f_namelen; + long f_spare[6]; +}; + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/string.h linux-2.5.70-bk14/include/asm-arm26/string.h --- linux-2.5.70-bk13/include/asm-arm26/string.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/string.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,43 @@ +#ifndef __ASM_ARM_STRING_H +#define __ASM_ARM_STRING_H + +/* + * We don't do inline string functions, since the + * optimised inline asm versions are not small. + */ + +#define __HAVE_ARCH_STRRCHR +extern char * strrchr(const char * s, int c); + +#define __HAVE_ARCH_STRCHR +extern char * strchr(const char * s, int c); + +#define __HAVE_ARCH_MEMCPY +extern void * memcpy(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMMOVE +extern void * memmove(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMCHR +extern void * memchr(const void *, int, __kernel_size_t); + +#define __HAVE_ARCH_MEMZERO +#define __HAVE_ARCH_MEMSET +extern void * memset(void *, int, __kernel_size_t); + +extern void __memzero(void *ptr, __kernel_size_t n); + +#define memset(p,v,n) \ + ({ \ + if ((n) != 0) { \ + if (__builtin_constant_p((v)) && (v) == 0) \ + __memzero((p),(n)); \ + else \ + memset((p),(v),(n)); \ + } \ + (p); \ + }) + +#define memzero(p,n) ({ if ((n) != 0) __memzero((p),(n)); (p); }) + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/suspend.h linux-2.5.70-bk14/include/asm-arm26/suspend.h --- linux-2.5.70-bk13/include/asm-arm26/suspend.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/suspend.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,4 @@ +#ifdef _ASMARM_SUSPEND_H +#define _ASMARM_SUSPEND_H + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/sysirq.h linux-2.5.70-bk14/include/asm-arm26/sysirq.h --- linux-2.5.70-bk13/include/asm-arm26/sysirq.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/sysirq.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,61 @@ +/* + * linux/include/asm-arm/arch-arc/irqs.h + * + * Copyright (C) 1996 Russell King, Dave Gilbert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Modifications: + * 04-04-1998 PJB Merged arc and a5k versions + */ + +#include + +#if defined(CONFIG_ARCH_A5K) +#define IRQ_PRINTER 0 +#define IRQ_BATLOW 1 +#define IRQ_FLOPPYINDEX 2 +#define IRQ_FLOPPYDISK 12 +#elif defined(CONFIG_ARCH_ARC) +#define IRQ_PRINTERBUSY 0 +#define IRQ_SERIALRING 1 +#define IRQ_PRINTERACK 2 +#define IRQ_FLOPPYCHANGED 12 +#endif + +#define IRQ_VSYNCPULSE 3 +#define IRQ_POWERON 4 +#define IRQ_TIMER0 5 +#define IRQ_TIMER1 6 +#define IRQ_IMMEDIATE 7 +#define IRQ_EXPCARDFIQ 8 +#define IRQ_SOUNDCHANGE 9 +#define IRQ_SERIALPORT 10 +#define IRQ_HARDDISK 11 +#define IRQ_EXPANSIONCARD 13 +#define IRQ_KEYBOARDTX 14 +#define IRQ_KEYBOARDRX 15 + +#if defined(CONFIG_ARCH_A5K) +#define FIQ_SERIALPORT 4 +#elif defined(CONFIG_ARCH_ARC) +#define FIQ_FLOPPYIRQ 1 +#define FIQ_FD1772 FIQ_FLOPPYIRQ +#endif + +#define FIQ_FLOPPYDATA 0 +#define FIQ_ECONET 2 +#define FIQ_EXPANSIONCARD 6 +#define FIQ_FORCE 7 + +#define IRQ_TIMER IRQ_TIMER0 + +/* + * This is the offset of the FIQ "IRQ" numbers + */ +#define FIQ_START 64 + +#define irq_cannonicalize(i) (i) + diff -urN linux-2.5.70-bk13/include/asm-arm26/system.h linux-2.5.70-bk14/include/asm-arm26/system.h --- linux-2.5.70-bk13/include/asm-arm26/system.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/system.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,204 @@ +#ifndef __ASM_ARM_SYSTEM_H +#define __ASM_ARM_SYSTEM_H + +#ifdef __KERNEL__ + +#include +#include +#include + +#define vectors_base() (0) + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + extern void __bad_xchg(volatile void *, int); + + switch (size) { + case 1: return cpu_xchg_1(x, ptr); + case 4: return cpu_xchg_4(x, ptr); + default: __bad_xchg(ptr, size); + } + return 0; +} + +/* + * We need to turn the caches off before calling the reset vector - RiscOS + * messes up if we don't + */ +#define proc_hard_reset() cpu_proc_fin() + +/* + * A couple of speedups for the ARM + */ + +/* + * Enable IRQs (sti) + */ +#define local_irq_enable() \ + do { \ + unsigned long temp; \ + __asm__ __volatile__( \ +" mov %0, pc @ sti\n" \ +" bic %0, %0, #0x08000000\n" \ +" teqp %0, #0\n" \ + : "=r" (temp) \ + : \ + : "memory"); \ + } while(0) + +/* + * Disable IRQs (cli) + */ +#define local_irq_disable() \ + do { \ + unsigned long temp; \ + __asm__ __volatile__( \ +" mov %0, pc @ cli\n" \ +" orr %0, %0, #0x08000000\n" \ +" teqp %0, #0\n" \ + : "=r" (temp) \ + : \ + : "memory"); \ + } while(0) + +/* Disable FIQs (clf) */ + +#define __clf() do { \ + unsigned long temp; \ + __asm__ __volatile__( \ +" mov %0, pc @ clf\n" \ +" orr %0, %0, #0x04000000\n" \ +" teqp %0, #0\n" \ + : "=r" (temp)); \ + } while(0) + +/* Enable FIQs (stf) */ + +#define __stf() do { \ + unsigned long temp; \ + __asm__ __volatile__( \ +" mov %0, pc @ stf\n" \ +" bic %0, %0, #0x04000000\n" \ +" teqp %0, #0\n" \ + : "=r" (temp)); \ + } while(0) + +/* + * save current IRQ & FIQ state + */ +#define local_save_flags(x) \ + do { \ + __asm__ __volatile__( \ +" mov %0, pc @ save_flags\n" \ +" and %0, %0, #0x0c000000\n" \ + : "=r" (x)); \ + } while (0) + +/* + * Save the current interrupt enable state & disable IRQs + */ +#define local_irq_save(x) \ + do { \ + unsigned long temp; \ + __asm__ __volatile__( \ +" mov %0, pc @ save_flags_cli\n" \ +" orr %1, %0, #0x08000000\n" \ +" and %0, %0, #0x0c000000\n" \ +" teqp %1, #0\n" \ + : "=r" (x), "=r" (temp) \ + : \ + : "memory"); \ + } while (0) + +/* + * restore saved IRQ & FIQ state + */ +#define local_irq_restore(x) \ + do { \ + unsigned long temp; \ + __asm__ __volatile__( \ +" mov %0, pc @ restore_flags\n" \ +" bic %0, %0, #0x0c000000\n" \ +" orr %0, %0, %1\n" \ +" teqp %0, #0\n" \ + : "=&r" (temp) \ + : "r" (x) \ + : "memory"); \ + } while (0) + + +struct thread_info; + +/* information about the system we're running on */ +extern unsigned int system_rev; +extern unsigned int system_serial_low; +extern unsigned int system_serial_high; + +struct pt_regs; + +void die(const char *msg, struct pt_regs *regs, int err) + __attribute__((noreturn)); + +void die_if_kernel(const char *str, struct pt_regs *regs, int err); + +void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, + struct pt_regs *), + int sig, const char *name); + +#define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +#define tas(ptr) (xchg((ptr),1)) + +extern asmlinkage void __backtrace(void); + +/* + * Include processor dependent parts + */ + +#define mb() __asm__ __volatile__ ("" : : : "memory") +#define rmb() mb() +#define wmb() mb() +#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); + +#define prepare_to_switch() do { } while(0) + +/* + * switch_to(prev, next) should switch from task `prev' to `next' + * `prev' will never be the same as `next'. + * The `mb' is to tell GCC not to cache `current' across this call. + */ +struct thread_info; + +extern struct task_struct *__switch_to(struct thread_info *, struct thread_info *); + +#define switch_to(prev,next,last) \ + do { \ + __switch_to(prev->thread_info,next->thread_info); \ + mb(); \ + } while (0) + + +#ifdef CONFIG_SMP +#error SMP not supported +#endif /* CONFIG_SMP */ + +#define irqs_disabled() \ +({ \ + unsigned long flags; \ + local_save_flags(flags); \ + flags & PSR_I_BIT; \ +}) + +#define set_mb(var, value) do { var = value; mb(); } while (0) +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) + +#define clf() __clf() +#define stf() __stf() + +#endif /* __KERNEL__ */ + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/termbits.h linux-2.5.70-bk14/include/asm-arm26/termbits.h --- linux-2.5.70-bk13/include/asm-arm26/termbits.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/termbits.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,170 @@ +#ifndef __ASM_ARM_TERMBITS_H +#define __ASM_ARM_TERMBITS_H + +typedef unsigned char cc_t; +typedef unsigned int speed_t; +typedef unsigned int tcflag_t; + +#define NCCS 19 +struct termios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ +}; + +/* c_cc characters */ +#define VINTR 0 +#define VQUIT 1 +#define VERASE 2 +#define VKILL 3 +#define VEOF 4 +#define VTIME 5 +#define VMIN 6 +#define VSWTC 7 +#define VSTART 8 +#define VSTOP 9 +#define VSUSP 10 +#define VEOL 11 +#define VREPRINT 12 +#define VDISCARD 13 +#define VWERASE 14 +#define VLNEXT 15 +#define VEOL2 16 + +/* c_iflag bits */ +#define IGNBRK 0000001 +#define BRKINT 0000002 +#define IGNPAR 0000004 +#define PARMRK 0000010 +#define INPCK 0000020 +#define ISTRIP 0000040 +#define INLCR 0000100 +#define IGNCR 0000200 +#define ICRNL 0000400 +#define IUCLC 0001000 +#define IXON 0002000 +#define IXANY 0004000 +#define IXOFF 0010000 +#define IMAXBEL 0020000 + +/* c_oflag bits */ +#define OPOST 0000001 +#define OLCUC 0000002 +#define ONLCR 0000004 +#define OCRNL 0000010 +#define ONOCR 0000020 +#define ONLRET 0000040 +#define OFILL 0000100 +#define OFDEL 0000200 +#define NLDLY 0000400 +#define NL0 0000000 +#define NL1 0000400 +#define CRDLY 0003000 +#define CR0 0000000 +#define CR1 0001000 +#define CR2 0002000 +#define CR3 0003000 +#define TABDLY 0014000 +#define TAB0 0000000 +#define TAB1 0004000 +#define TAB2 0010000 +#define TAB3 0014000 +#define XTABS 0014000 +#define BSDLY 0020000 +#define BS0 0000000 +#define BS1 0020000 +#define VTDLY 0040000 +#define VT0 0000000 +#define VT1 0040000 +#define FFDLY 0100000 +#define FF0 0000000 +#define FF1 0100000 + +/* c_cflag bit meaning */ +#define CBAUD 0010017 +#define B0 0000000 /* hang up */ +#define B50 0000001 +#define B75 0000002 +#define B110 0000003 +#define B134 0000004 +#define B150 0000005 +#define B200 0000006 +#define B300 0000007 +#define B600 0000010 +#define B1200 0000011 +#define B1800 0000012 +#define B2400 0000013 +#define B4800 0000014 +#define B9600 0000015 +#define B19200 0000016 +#define B38400 0000017 +#define EXTA B19200 +#define EXTB B38400 +#define CSIZE 0000060 +#define CS5 0000000 +#define CS6 0000020 +#define CS7 0000040 +#define CS8 0000060 +#define CSTOPB 0000100 +#define CREAD 0000200 +#define PARENB 0000400 +#define PARODD 0001000 +#define HUPCL 0002000 +#define CLOCAL 0004000 +#define CBAUDEX 0010000 +#define B57600 0010001 +#define B115200 0010002 +#define B230400 0010003 +#define B460800 0010004 +#define B500000 0010005 +#define B576000 0010006 +#define B921600 0010007 +#define B1000000 0010010 +#define B1152000 0010011 +#define B1500000 0010012 +#define B2000000 0010013 +#define B2500000 0010014 +#define B3000000 0010015 +#define B3500000 0010016 +#define B4000000 0010017 +#define CIBAUD 002003600000 /* input baud rate (not used) */ +#define CMSPAR 010000000000 /* mark or space (stick) parity */ +#define CRTSCTS 020000000000 /* flow control */ + +/* c_lflag bits */ +#define ISIG 0000001 +#define ICANON 0000002 +#define XCASE 0000004 +#define ECHO 0000010 +#define ECHOE 0000020 +#define ECHOK 0000040 +#define ECHONL 0000100 +#define NOFLSH 0000200 +#define TOSTOP 0000400 +#define ECHOCTL 0001000 +#define ECHOPRT 0002000 +#define ECHOKE 0004000 +#define FLUSHO 0010000 +#define PENDIN 0040000 +#define IEXTEN 0100000 + +/* tcflow() and TCXONC use these */ +#define TCOOFF 0 +#define TCOON 1 +#define TCIOFF 2 +#define TCION 3 + +/* tcflush() and TCFLSH use these */ +#define TCIFLUSH 0 +#define TCOFLUSH 1 +#define TCIOFLUSH 2 + +/* tcsetattr uses these */ +#define TCSANOW 0 +#define TCSADRAIN 1 +#define TCSAFLUSH 2 + +#endif /* __ASM_ARM_TERMBITS_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/termios.h linux-2.5.70-bk14/include/asm-arm26/termios.h --- linux-2.5.70-bk13/include/asm-arm26/termios.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/termios.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,108 @@ +#ifndef __ASM_ARM_TERMIOS_H +#define __ASM_ARM_TERMIOS_H + +#include +#include + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +#define NCC 8 +struct termio { + unsigned short c_iflag; /* input mode flags */ + unsigned short c_oflag; /* output mode flags */ + unsigned short c_cflag; /* control mode flags */ + unsigned short c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[NCC]; /* control characters */ +}; + +#ifdef __KERNEL__ +/* intr=^C quit=^| erase=del kill=^U + eof=^D vtime=\0 vmin=\1 sxtc=\0 + start=^Q stop=^S susp=^Z eol=\0 + reprint=^R discard=^U werase=^W lnext=^V + eol2=\0 +*/ +#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" +#endif + +/* modem lines */ +#define TIOCM_LE 0x001 +#define TIOCM_DTR 0x002 +#define TIOCM_RTS 0x004 +#define TIOCM_ST 0x008 +#define TIOCM_SR 0x010 +#define TIOCM_CTS 0x020 +#define TIOCM_CAR 0x040 +#define TIOCM_RNG 0x080 +#define TIOCM_DSR 0x100 +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 + +/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ + +/* line disciplines */ +#define N_TTY 0 +#define N_SLIP 1 +#define N_MOUSE 2 +#define N_PPP 3 +#define N_STRIP 4 +#define N_AX25 5 +#define N_X25 6 /* X.25 async */ +#define N_6PACK 7 +#define N_MASC 8 /* Reserved for Mobitex module */ +#define N_R3964 9 /* Reserved for Simatic R3964 module */ +#define N_PROFIBUS_FDL 10 /* Reserved for Profibus */ +#define N_IRDA 11 /* Linux IrDa - http://irda.sourceforge.net/ */ +#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */ +#define N_HDLC 13 /* synchronous HDLC */ +#define N_SYNC_PPP 14 +#define N_HCI 15 /* Bluetooth HCI UART */ + +#ifdef __KERNEL__ + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ + unsigned short __tmp; \ + get_user(__tmp,&(termio)->x); \ + *(unsigned short *) &(termios)->x = __tmp; \ +} + +#define user_termio_to_kernel_termios(termios, termio) \ +({ \ + SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ + copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ +}) + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +#define kernel_termios_to_user_termio(termio, termios) \ +({ \ + put_user((termios)->c_iflag, &(termio)->c_iflag); \ + put_user((termios)->c_oflag, &(termio)->c_oflag); \ + put_user((termios)->c_cflag, &(termio)->c_cflag); \ + put_user((termios)->c_lflag, &(termio)->c_lflag); \ + put_user((termios)->c_line, &(termio)->c_line); \ + copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ +}) + +#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) + +#endif /* __KERNEL__ */ + +#endif /* __ASM_ARM_TERMIOS_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/thread_info.h linux-2.5.70-bk14/include/asm-arm26/thread_info.h --- linux-2.5.70-bk13/include/asm-arm26/thread_info.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/thread_info.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,144 @@ +/* + * linux/include/asm-arm26/thread_info.h + * + * Copyright (C) 2002 Russell King. + * Copyright (C) 2003 Ian Molton. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_ARM_THREAD_INFO_H +#define __ASM_ARM_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +struct task_struct; +struct exec_domain; + +#include +#include +#include + +typedef unsigned long mm_segment_t; + +struct cpu_context_save { + __u32 r4; + __u32 r5; + __u32 r6; + __u32 r7; + __u32 r8; + __u32 r9; + __u32 sl; + __u32 fp; + __u32 sp; + __u32 pc; +}; + +/* + * low level task data that entry.S needs immediate access to. + * We assume cpu_context follows immedately after cpu_domain. + */ +struct thread_info { + unsigned long flags; /* low level flags */ + __s32 preempt_count; /* 0 => preemptable, <0 => bug */ + mm_segment_t addr_limit; /* address limit */ + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + __u32 cpu; /* cpu */ + struct cpu_context_save cpu_context; /* cpu context */ + struct restart_block restart_block; + union fp_state fpstate; +}; + +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task &tsk, \ + .exec_domain &default_exec_domain, \ + .flags 0, \ + .preempt_count 0, \ + .addr_limit KERNEL_DS, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +/* + * how to get the thread information struct from C + */ +static inline struct thread_info *current_thread_info(void) __attribute__ (( __const__ )); + +static inline struct thread_info *current_thread_info(void) +{ + register unsigned long sp asm ("sp"); + return (struct thread_info *)(sp & ~0x1fff); +} + +/* FIXME - PAGE_SIZE < 32K */ +#define THREAD_SIZE (8192) +/*FIXME INIT_THREAD_SIZE - how big? */ +//#define INIT_THREAD_SIZE (65536) +#define __get_user_regs(x) (((struct pt_regs *)((unsigned long)(x) + THREAD_SIZE - 8)) - 1) + +extern struct thread_info *alloc_thread_info(void); +extern void free_thread_info(struct thread_info *); + +#define get_thread_info(ti) get_task_struct((ti)->task) +#define put_thread_info(ti) put_task_struct((ti)->task) + +#define thread_saved_pc(tsk) \ + ((unsigned long)(pc_pointer((tsk)->thread_info->cpu_context.pc))) +#define thread_saved_fp(tsk) \ + ((unsigned long)((tsk)->thread_info->cpu_context.fp)) + +#else /* !__ASSEMBLY__ */ + +#define TI_FLAGS 0 +#define TI_PREEMPT 4 +#define TI_ADDR_LIMIT 8 +#define TI_TASK 12 +#define TI_EXEC_DOMAIN 16 +#define TI_CPU 20 +#define TI_CPU_SAVE 24 +#define TI_RESTART_BLOCK 28 +#define TI_FPSTATE 68 + +#endif + +#define PREEMPT_ACTIVE 0x04000000 + +/* + * thread information flags: + * TIF_SYSCALL_TRACE - syscall trace active + * TIF_NOTIFY_RESUME - resumption notification requested + * TIF_SIGPENDING - signal pending + * TIF_NEED_RESCHED - rescheduling necessary + * TIF_USEDFPU - FPU was used by this task this quantum (SMP) + * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED + */ +#define TIF_NOTIFY_RESUME 0 +#define TIF_SIGPENDING 1 +#define TIF_NEED_RESCHED 2 +#define TIF_SYSCALL_TRACE 8 +#define TIF_USED_FPU 16 +#define TIF_POLLING_NRFLAG 17 + +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_USED_FPU (1 << TIF_USED_FPU) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) + +/* + * Change these and you break ASM code in entry-common.S + */ +#define _TIF_WORK_MASK 0x000000ff + +#endif /* __KERNEL__ */ +#endif /* __ASM_ARM_THREAD_INFO_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/timex.h linux-2.5.70-bk14/include/asm-arm26/timex.h --- linux-2.5.70-bk13/include/asm-arm26/timex.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/timex.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,31 @@ +/* + * linux/include/asm-arm/timex.h + * + * Copyright (C) 1997,1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Architecture Specific TIME specifications + */ +#ifndef _ASMARM_TIMEX_H +#define _ASMARM_TIMEX_H + +/* + * On the RiscPC, the clock ticks at 2MHz. + */ +#define CLOCK_TICK_RATE 2000000 + +/* IS THAT RIGHT ON A5000? FIXME */ + +typedef unsigned long cycles_t; + +extern cycles_t cacheflush_time; + +static inline cycles_t get_cycles (void) +{ + return 0; +} + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/tlb.h linux-2.5.70-bk14/include/asm-arm26/tlb.h --- linux-2.5.70-bk13/include/asm-arm26/tlb.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/tlb.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,62 @@ +#ifndef __ASMARM_TLB_H +#define __ASMARM_TLB_H + +#include + +/* + * TLB handling. This allows us to remove pages from the page + * tables, and efficiently handle the TLB issues. + */ +struct mmu_gather { + struct mm_struct *mm; + unsigned int freed; + + unsigned int flushes; + unsigned int avoided_flushes; +}; + +extern struct mmu_gather mmu_gathers[NR_CPUS]; + +static inline struct mmu_gather * +tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) +{ + int cpu = smp_processor_id(); + struct mmu_gather *tlb = &mmu_gathers[cpu]; + + tlb->mm = mm; + tlb->freed = 0; + + return tlb; +} + +static inline void +tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) +{ + struct mm_struct *mm = tlb->mm; + unsigned long freed = tlb->freed; + int rss = mm->rss; + + if (rss < freed) + freed = rss; + mm->rss = rss - freed; + + if (freed) { + flush_tlb_mm(mm); + tlb->flushes++; + } else { + tlb->avoided_flushes++; + } + + /* keep the page table cache within bounds */ + check_pgt_cache(); +} + +#define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0) +#define tlb_start_vma(tlb,vma) do { } while (0) +#define tlb_end_vma(tlb,vma) do { } while (0) + +#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) +#define pte_free_tlb(tlb,ptep) pte_free(ptep) +#define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp) + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/tlbflush.h linux-2.5.70-bk14/include/asm-arm26/tlbflush.h --- linux-2.5.70-bk13/include/asm-arm26/tlbflush.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/tlbflush.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,70 @@ +#ifndef __ASMARM_TLBFLUSH_H +#define __ASMARM_TLBFLUSH_H + +/* + * TLB flushing: + * + * - flush_tlb_all() flushes all processes TLBs + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + */ + +#define flush_tlb_all() memc_update_all() +#define flush_tlb_mm(mm) memc_update_mm(mm) +#define flush_tlb_page(vma, vmaddr) do { printk("flush_tlb_page\n");} while (0) // IS THIS RIGHT? +#define flush_tlb_range(vma,start,end) \ + do { memc_update_mm(vma->vm_mm); (void)(start); (void)(end); } while (0) +#define flush_tlb_pgtables(mm,start,end) do { printk("flush_tlb_pgtables\n");} while (0) +#define flush_tlb_kernel_range(s,e) do { printk("flush_tlb_range\n");} while (0) + +/* + * The following handle the weird MEMC chip + */ +static inline void memc_update_all(void) +{ + struct task_struct *p; + cpu_memc_update_all(init_mm.pgd); + for_each_process(p) { + if (!p->mm) + continue; + cpu_memc_update_all(p->mm->pgd); + } + processor._set_pgd(current->active_mm->pgd); +} + +static inline void memc_update_mm(struct mm_struct *mm) +{ + cpu_memc_update_all(mm->pgd); + + if (mm == current->active_mm) + processor._set_pgd(mm->pgd); +} + +static inline void +memc_clear(struct mm_struct *mm, struct page *page) +{ + cpu_memc_update_entry(mm->pgd, (unsigned long) page_address(page), 0); + + if (mm == current->active_mm) + processor._set_pgd(mm->pgd); +} + +static inline void +memc_update_addr(struct mm_struct *mm, pte_t pte, unsigned long vaddr) +{ + cpu_memc_update_entry(mm->pgd, pte_val(pte), vaddr); + + if (mm == current->active_mm) + processor._set_pgd(mm->pgd); +} + +static inline void +update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +{ + struct mm_struct *mm = vma->vm_mm; +printk("update_mmu_cache\n"); + memc_update_addr(mm, pte, addr); +} + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/topology.h linux-2.5.70-bk14/include/asm-arm26/topology.h --- linux-2.5.70-bk13/include/asm-arm26/topology.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/topology.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,6 @@ +#ifndef _ASM_ARM_TOPOLOGY_H +#define _ASM_ARM_TOPOLOGY_H + +#include + +#endif /* _ASM_ARM_TOPOLOGY_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/types.h linux-2.5.70-bk14/include/asm-arm26/types.h --- linux-2.5.70-bk13/include/asm-arm26/types.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/types.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,59 @@ +#ifndef __ASM_ARM_TYPES_H +#define __ASM_ARM_TYPES_H + +#ifndef __ASSEMBLY__ + +typedef unsigned short umode_t; + +/* + * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the + * header files exported to user space + */ + +typedef __signed__ char __s8; +typedef unsigned char __u8; + +typedef __signed__ short __s16; +typedef unsigned short __u16; + +typedef __signed__ int __s32; +typedef unsigned int __u32; + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +typedef __signed__ long long __s64; +typedef unsigned long long __u64; +#endif + +#endif /* __ASSEMBLY__ */ + +/* + * These aren't exported outside the kernel to avoid name space clashes + */ +#ifdef __KERNEL__ + +#define BITS_PER_LONG 32 + +#ifndef __ASSEMBLY__ + +typedef signed char s8; +typedef unsigned char u8; + +typedef signed short s16; +typedef unsigned short u16; + +typedef signed int s32; +typedef unsigned int u32; + +typedef signed long long s64; +typedef unsigned long long u64; + +/* Dma addresses are 32-bits wide. */ + +typedef u32 dma_addr_t; +typedef u32 dma64_addr_t; + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/uaccess-asm.h linux-2.5.70-bk14/include/asm-arm26/uaccess-asm.h --- linux-2.5.70-bk13/include/asm-arm26/uaccess-asm.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/uaccess-asm.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,151 @@ +/* + * linux/include/asm-arm/proc-armo/uaccess.h + * + * Copyright (C) 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * The fs functions are implemented on the ARM2 and ARM3 architectures + * manually. + * Use *_user functions to access user memory with faulting behaving + * as though the user is accessing the memory. + * Use set_fs(get_ds()) and then the *_user functions to allow them to + * access kernel memory. + */ + +/* + * These are the values used to represent the user `fs' and the kernel `ds' + */ +#define KERNEL_DS 0x03000000 +#define USER_DS 0x02000000 + +extern uaccess_t uaccess_user, uaccess_kernel; + +static inline void set_fs (mm_segment_t fs) +{ + current_thread_info()->addr_limit = fs; + current->thread.uaccess = fs == USER_DS ? &uaccess_user : &uaccess_kernel; +} + +#define __range_ok(addr,size) ({ \ + unsigned long flag, sum; \ + __asm__ __volatile__("subs %1, %0, %3; cmpcs %1, %2; movcs %0, #0" \ + : "=&r" (flag), "=&r" (sum) \ + : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \ + : "cc"); \ + flag; }) + +#define __addr_ok(addr) ({ \ + unsigned long flag; \ + __asm__ __volatile__("cmp %2, %0; movlo %0, #0" \ + : "=&r" (flag) \ + : "0" (current_thread_info()->addr_limit), "r" (addr) \ + : "cc"); \ + (flag == 0); }) + +#define __put_user_asm_byte(x,addr,err) \ + __asm__ __volatile__( \ + " mov r0, %1\n" \ + " mov r1, %2\n" \ + " mov r2, %0\n" \ + " mov lr, pc\n" \ + " mov pc, %3\n" \ + " mov %0, r2\n" \ + : "=r" (err) \ + : "r" (x), "r" (addr), "r" (current->thread.uaccess->put_byte), \ + "0" (err) \ + : "r0", "r1", "r2", "lr") + +#define __put_user_asm_half(x,addr,err) \ + __asm__ __volatile__( \ + " mov r0, %1\n" \ + " mov r1, %2\n" \ + " mov r2, %0\n" \ + " mov lr, pc\n" \ + " mov pc, %3\n" \ + " mov %0, r2\n" \ + : "=r" (err) \ + : "r" (x), "r" (addr), "r" (current->thread.uaccess->put_half), \ + "0" (err) \ + : "r0", "r1", "r2", "lr") + +#define __put_user_asm_word(x,addr,err) \ + __asm__ __volatile__( \ + " mov r0, %1\n" \ + " mov r1, %2\n" \ + " mov r2, %0\n" \ + " mov lr, pc\n" \ + " mov pc, %3\n" \ + " mov %0, r2\n" \ + : "=r" (err) \ + : "r" (x), "r" (addr), "r" (current->thread.uaccess->put_word), \ + "0" (err) \ + : "r0", "r1", "r2", "lr") + +#define __put_user_asm_dword(x,addr,err) \ + __asm__ __volatile__( \ + " mov r0, %1\n" \ + " mov r1, %2\n" \ + " mov r2, %0\n" \ + " mov lr, pc\n" \ + " mov pc, %3\n" \ + " mov %0, r2\n" \ + : "=r" (err) \ + : "r" (x), "r" (addr), "r" (current->thread.uaccess->put_dword), \ + "0" (err) \ + : "r0", "r1", "r2", "lr") + +#define __get_user_asm_byte(x,addr,err) \ + __asm__ __volatile__( \ + " mov r0, %2\n" \ + " mov r1, %0\n" \ + " mov lr, pc\n" \ + " mov pc, %3\n" \ + " mov %0, r1\n" \ + " mov %1, r0\n" \ + : "=r" (err), "=r" (x) \ + : "r" (addr), "r" (current->thread.uaccess->get_byte), "0" (err) \ + : "r0", "r1", "r2", "lr") + +#define __get_user_asm_half(x,addr,err) \ + __asm__ __volatile__( \ + " mov r0, %2\n" \ + " mov r1, %0\n" \ + " mov lr, pc\n" \ + " mov pc, %3\n" \ + " mov %0, r1\n" \ + " mov %1, r0\n" \ + : "=r" (err), "=r" (x) \ + : "r" (addr), "r" (current->thread.uaccess->get_half), "0" (err) \ + : "r0", "r1", "r2", "lr") + +#define __get_user_asm_word(x,addr,err) \ + __asm__ __volatile__( \ + " mov r0, %2\n" \ + " mov r1, %0\n" \ + " mov lr, pc\n" \ + " mov pc, %3\n" \ + " mov %0, r1\n" \ + " mov %1, r0\n" \ + : "=r" (err), "=r" (x) \ + : "r" (addr), "r" (current->thread.uaccess->get_word), "0" (err) \ + : "r0", "r1", "r2", "lr") + +#define __do_copy_from_user(to,from,n) \ + (n) = current->thread.uaccess->copy_from_user((to),(from),(n)) + +#define __do_copy_to_user(to,from,n) \ + (n) = current->thread.uaccess->copy_to_user((to),(from),(n)) + +#define __do_clear_user(addr,sz) \ + (sz) = current->thread.uaccess->clear_user((addr),(sz)) + +#define __do_strncpy_from_user(dst,src,count,res) \ + (res) = current->thread.uaccess->strncpy_from_user(dst,src,count) + +#define __do_strnlen_user(s,n,res) \ + (res) = current->thread.uaccess->strnlen_user(s,n) diff -urN linux-2.5.70-bk13/include/asm-arm26/uaccess.h linux-2.5.70-bk14/include/asm-arm26/uaccess.h --- linux-2.5.70-bk13/include/asm-arm26/uaccess.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/uaccess.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,260 @@ +#ifndef _ASMARM_UACCESS_H +#define _ASMARM_UACCESS_H + +/* + * User space memory access functions + */ +#include +#include + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + +/* Returns 0 if exception not found and fixup otherwise. */ +extern unsigned long search_exception_table(unsigned long); +extern int fixup_exception(struct pt_regs *regs); + +#define get_ds() (KERNEL_DS) +#define get_fs() (current_thread_info()->addr_limit) +#define segment_eq(a,b) ((a) == (b)) + +#include + +#define access_ok(type,addr,size) (__range_ok(addr,size) == 0) + +static inline int verify_area(int type, const void * addr, unsigned long size) +{ + return access_ok(type, addr, size) ? 0 : -EFAULT; +} + +/* + * Single-value transfer routines. They automatically use the right + * size if we just have the right pointer type. Note that the functions + * which read from user space (*get_*) need to take care not to leak + * kernel data even if the calling code is buggy and fails to check + * the return value. This means zeroing out the destination variable + * or buffer on error. Normally this is done out of line by the + * fixup code, but there are a few places where it intrudes on the + * main code path. When we only write to user space, there is no + * problem. + * + * The "__xxx" versions of the user access functions do not verify the + * address space - it must have been done previously with a separate + * "access_ok()" call. + * + * The "xxx_error" versions set the third argument to EFAULT if an + * error occurs, and leave it unchanged on success. Note that these + * versions are void (ie, don't return a value as such). + */ + +extern int __get_user_1(void *); +extern int __get_user_2(void *); +extern int __get_user_4(void *); +extern int __get_user_8(void *); +extern int __get_user_bad(void); + +#define __get_user_x(__r1,__p,__e,__s,__i...) \ + __asm__ __volatile__ ("bl __get_user_" #__s \ + : "=&r" (__e), "=r" (__r1) \ + : "0" (__p) \ + : __i) + +#define get_user(x,p) \ + ({ \ + const register typeof(*(p)) *__p asm("r0") = (p); \ + register typeof(*(p)) __r1 asm("r1"); \ + register int __e asm("r0"); \ + switch (sizeof(*(p))) { \ + case 1: \ + __get_user_x(__r1, __p, __e, 1, "lr"); \ + break; \ + case 2: \ + __get_user_x(__r1, __p, __e, 2, "r2", "lr"); \ + break; \ + case 4: \ + __get_user_x(__r1, __p, __e, 4, "lr"); \ + break; \ + case 8: \ + __get_user_x(__r1, __p, __e, 8, "lr"); \ + break; \ + default: __e = __get_user_bad(); break; \ + } \ + x = __r1; \ + __e; \ + }) + + +#define __get_user(x,ptr) \ +({ \ + long __gu_err = 0; \ + __get_user_err((x),(ptr),__gu_err); \ + __gu_err; \ +}) + +#define __get_user_error(x,ptr,err) \ +({ \ + __get_user_err((x),(ptr),err); \ + (void) 0; \ +}) + +#define __get_user_err(x,ptr,err) \ +do { \ + unsigned long __gu_addr = (unsigned long)(ptr); \ + unsigned long __gu_val; \ + switch (sizeof(*(ptr))) { \ + case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ + case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ + case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \ + default: (__gu_val) = __get_user_bad(); \ + } \ + (x) = (__typeof__(*(ptr)))__gu_val; \ +} while (0) + +extern int __put_user_1(void *, unsigned int); +extern int __put_user_2(void *, unsigned int); +extern int __put_user_4(void *, unsigned int); +extern int __put_user_8(void *, unsigned long long); +extern int __put_user_bad(void); + +#define __put_user_x(__r1,__p,__e,__s,__i...) \ + __asm__ __volatile__ ("bl __put_user_" #__s \ + : "=&r" (__e) \ + : "0" (__p), "r" (__r1) \ + : __i) + +#define put_user(x,p) \ + ({ \ + const register typeof(*(p)) __r1 asm("r1") = (x); \ + const register typeof(*(p)) *__p asm("r0") = (p); \ + register int __e asm("r0"); \ + switch (sizeof(*(p))) { \ + case 1: \ + __put_user_x(__r1, __p, __e, 1, "r2", "lr"); \ + break; \ + case 2: \ + __put_user_x(__r1, __p, __e, 2, "r2", "lr"); \ + break; \ + case 4: \ + __put_user_x(__r1, __p, __e, 4, "r2", "lr"); \ + break; \ + case 8: \ + __put_user_x(__r1, __p, __e, 8, "ip", "lr"); \ + break; \ + default: __e = __put_user_bad(); break; \ + } \ + __e; \ + }) + +#define __put_user(x,ptr) \ +({ \ + long __pu_err = 0; \ + __put_user_err((x),(ptr),__pu_err); \ + __pu_err; \ +}) + +#define __put_user_error(x,ptr,err) \ +({ \ + __put_user_err((x),(ptr),err); \ + (void) 0; \ +}) + +#define __put_user_err(x,ptr,err) \ +do { \ + unsigned long __pu_addr = (unsigned long)(ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + switch (sizeof(*(ptr))) { \ + case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ + case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ + case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \ + case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \ + default: __put_user_bad(); \ + } \ +} while (0) + +static __inline__ unsigned long copy_from_user(void *to, const void *from, unsigned long n) +{ + if (access_ok(VERIFY_READ, from, n)) + __do_copy_from_user(to, from, n); + else /* security hole - plug it */ + memzero(to, n); + return n; +} + +static __inline__ unsigned long __copy_from_user(void *to, const void *from, unsigned long n) +{ + __do_copy_from_user(to, from, n); + return n; +} + +static __inline__ unsigned long copy_to_user(void *to, const void *from, unsigned long n) +{ + if (access_ok(VERIFY_WRITE, to, n)) + __do_copy_to_user(to, from, n); + return n; +} + +static __inline__ unsigned long __copy_to_user(void *to, const void *from, unsigned long n) +{ + __do_copy_to_user(to, from, n); + return n; +} + +static __inline__ unsigned long clear_user (void *to, unsigned long n) +{ + if (access_ok(VERIFY_WRITE, to, n)) + __do_clear_user(to, n); + return n; +} + +static __inline__ unsigned long __clear_user (void *to, unsigned long n) +{ + __do_clear_user(to, n); + return n; +} + +static __inline__ long strncpy_from_user (char *dst, const char *src, long count) +{ + long res = -EFAULT; + if (access_ok(VERIFY_READ, src, 1)) + __do_strncpy_from_user(dst, src, count, res); + return res; +} + +static __inline__ long __strncpy_from_user (char *dst, const char *src, long count) +{ + long res; + __do_strncpy_from_user(dst, src, count, res); + return res; +} + +#define strlen_user(s) strnlen_user(s, ~0UL >> 1) + +static inline long strnlen_user(const char *s, long n) +{ + unsigned long res = 0; + + if (__addr_ok(s)) + __do_strnlen_user(s, n, res); + + return res; +} + +#endif /* _ASMARM_UACCESS_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/ucontext.h linux-2.5.70-bk14/include/asm-arm26/ucontext.h --- linux-2.5.70-bk13/include/asm-arm26/ucontext.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/ucontext.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,12 @@ +#ifndef _ASMARM_UCONTEXT_H +#define _ASMARM_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif /* !_ASMARM_UCONTEXT_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/unaligned.h linux-2.5.70-bk14/include/asm-arm26/unaligned.h --- linux-2.5.70-bk13/include/asm-arm26/unaligned.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/unaligned.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,118 @@ +#ifndef __ASM_ARM_UNALIGNED_H +#define __ASM_ARM_UNALIGNED_H + +#include + +extern int __bug_unaligned_x(void *ptr); + +/* + * What is the most efficient way of loading/storing an unaligned value? + * + * That is the subject of this file. Efficiency here is defined as + * minimum code size with minimum register usage for the common cases. + * It is currently not believed that long longs are common, so we + * trade efficiency for the chars, shorts and longs against the long + * longs. + * + * Current stats with gcc 2.7.2.2 for these functions: + * + * ptrsize get: code regs put: code regs + * 1 1 1 1 2 + * 2 3 2 3 2 + * 4 7 3 7 3 + * 8 20 6 16 6 + * + * gcc 2.95.1 seems to code differently: + * + * ptrsize get: code regs put: code regs + * 1 1 1 1 2 + * 2 3 2 3 2 + * 4 7 4 7 4 + * 8 19 8 15 6 + * + * which may or may not be more efficient (depending upon whether + * you can afford the extra registers). Hopefully the gcc 2.95 + * is inteligent enough to decide if it is better to use the + * extra register, but evidence so far seems to suggest otherwise. + * + * Unfortunately, gcc is not able to optimise the high word + * out of long long >> 32, or the low word from long long << 32 + */ + +#define __get_unaligned_2_le(__p) \ + (__p[0] | __p[1] << 8) + +#define __get_unaligned_4_le(__p) \ + (__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24) + +#define __get_unaligned_le(ptr) \ + ({ \ + __typeof__(*(ptr)) __v; \ + __u8 *__p = (__u8 *)(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: __v = *(ptr); break; \ + case 2: __v = __get_unaligned_2_le(__p); break; \ + case 4: __v = __get_unaligned_4_le(__p); break; \ + case 8: { \ + unsigned int __v1, __v2; \ + __v2 = __get_unaligned_4_le((__p+4)); \ + __v1 = __get_unaligned_4_le(__p); \ + __v = ((unsigned long long)__v2 << 32 | __v1); \ + } \ + break; \ + default: __v = __bug_unaligned_x(__p); break; \ + } \ + __v; \ + }) + +static inline void __put_unaligned_2_le(__u32 __v, register __u8 *__p) +{ + *__p++ = __v; + *__p++ = __v >> 8; +} + +static inline void __put_unaligned_4_le(__u32 __v, register __u8 *__p) +{ + __put_unaligned_2_le(__v >> 16, __p + 2); + __put_unaligned_2_le(__v, __p); +} + +static inline void __put_unaligned_8_le(const unsigned long long __v, register __u8 *__p) +{ + /* + * tradeoff: 8 bytes of stack for all unaligned puts (2 + * instructions), or an extra register in the long long + * case - go for the extra register. + */ + __put_unaligned_4_le(__v >> 32, __p+4); + __put_unaligned_4_le(__v, __p); +} + +/* + * Try to store an unaligned value as efficiently as possible. + */ +#define __put_unaligned_le(val,ptr) \ + ({ \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(ptr) = (val); \ + break; \ + case 2: __put_unaligned_2_le((val),(__u8 *)(ptr)); \ + break; \ + case 4: __put_unaligned_4_le((val),(__u8 *)(ptr)); \ + break; \ + case 8: __put_unaligned_8_le((val),(__u8 *)(ptr)); \ + break; \ + default: __bug_unaligned_x(ptr); \ + break; \ + } \ + (void) 0; \ + }) + +/* + * Select endianness + */ +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le + +#endif diff -urN linux-2.5.70-bk13/include/asm-arm26/uncompress.h linux-2.5.70-bk14/include/asm-arm26/uncompress.h --- linux-2.5.70-bk13/include/asm-arm26/uncompress.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/uncompress.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,111 @@ +/* + * linux/include/asm-arm/arch-arc/uncompress.h + * + * Copyright (C) 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#define VIDMEM ((char *)0x02000000) + +int video_num_columns, video_num_lines, video_size_row; +int white, bytes_per_char_h; +extern unsigned long con_charconvtable[256]; + +struct param_struct { + unsigned long page_size; + unsigned long nr_pages; + unsigned long ramdisk_size; + unsigned long mountrootrdonly; + unsigned long rootdev; + unsigned long video_num_cols; + unsigned long video_num_rows; + unsigned long video_x; + unsigned long video_y; + unsigned long memc_control_reg; + unsigned char sounddefault; + unsigned char adfsdrives; + unsigned char bytes_per_char_h; + unsigned char bytes_per_char_v; + unsigned long unused[256/4-11]; +}; + +static struct param_struct *params = (struct param_struct *)0x0207c000; + +/* + * This does not append a newline + */ +static void puts(const char *s) +{ + extern void ll_write_char(char *, unsigned long); + int x,y; + unsigned char c; + char *ptr; + + x = params->video_x; + y = params->video_y; + + while ( ( c = *(unsigned char *)s++ ) != '\0' ) { + if ( c == '\n' ) { + x = 0; + if ( ++y >= video_num_lines ) { + y--; + } + } else { + ptr = VIDMEM + ((y*video_num_columns*params->bytes_per_char_v+x)*bytes_per_char_h); + ll_write_char(ptr, c|(white<<16)); + if ( ++x >= video_num_columns ) { + x = 0; + if ( ++y >= video_num_lines ) { + y--; + } + } + } + } + + params->video_x = x; + params->video_y = y; +} + +static void error(char *x); + +/* + * Setup for decompression + */ +static void arch_decomp_setup(void) +{ + int i; + + video_num_lines = params->video_num_rows; + video_num_columns = params->video_num_cols; + bytes_per_char_h = params->bytes_per_char_h; + video_size_row = video_num_columns * bytes_per_char_h; + if (bytes_per_char_h == 4) + for (i = 0; i < 256; i++) + con_charconvtable[i] = + (i & 128 ? 1 << 0 : 0) | + (i & 64 ? 1 << 4 : 0) | + (i & 32 ? 1 << 8 : 0) | + (i & 16 ? 1 << 12 : 0) | + (i & 8 ? 1 << 16 : 0) | + (i & 4 ? 1 << 20 : 0) | + (i & 2 ? 1 << 24 : 0) | + (i & 1 ? 1 << 28 : 0); + else + for (i = 0; i < 16; i++) + con_charconvtable[i] = + (i & 8 ? 1 << 0 : 0) | + (i & 4 ? 1 << 8 : 0) | + (i & 2 ? 1 << 16 : 0) | + (i & 1 ? 1 << 24 : 0); + + white = bytes_per_char_h == 8 ? 0xfc : 7; + + if (params->nr_pages * params->page_size < 4096*1024) error("<4M of mem\n"); +} + +/* + * nothing to do + */ +#define arch_decomp_wdog() diff -urN linux-2.5.70-bk13/include/asm-arm26/unistd.h linux-2.5.70-bk14/include/asm-arm26/unistd.h --- linux-2.5.70-bk13/include/asm-arm26/unistd.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/unistd.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,481 @@ +/* + * linux/include/asm-arm/unistd.h + * + * Copyright (C) 2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Please forward _all_ changes to this file to rmk@arm.linux.org.uk, + * no matter what the change is. Thanks! + */ +#ifndef __ASM_ARM_UNISTD_H +#define __ASM_ARM_UNISTD_H + +#include + +#define __NR_SYSCALL_BASE 0x900000 + +/* + * This file contains the system call numbers. + */ + +#define __NR_restart_syscall (__NR_SYSCALL_BASE+ 0) +#define __NR_exit (__NR_SYSCALL_BASE+ 1) +#define __NR_fork (__NR_SYSCALL_BASE+ 2) +#define __NR_read (__NR_SYSCALL_BASE+ 3) +#define __NR_write (__NR_SYSCALL_BASE+ 4) +#define __NR_open (__NR_SYSCALL_BASE+ 5) +#define __NR_close (__NR_SYSCALL_BASE+ 6) +#define __NR_waitpid (__NR_SYSCALL_BASE+ 7) +#define __NR_creat (__NR_SYSCALL_BASE+ 8) +#define __NR_link (__NR_SYSCALL_BASE+ 9) +#define __NR_unlink (__NR_SYSCALL_BASE+ 10) +#define __NR_execve (__NR_SYSCALL_BASE+ 11) +#define __NR_chdir (__NR_SYSCALL_BASE+ 12) +#define __NR_time (__NR_SYSCALL_BASE+ 13) +#define __NR_mknod (__NR_SYSCALL_BASE+ 14) +#define __NR_chmod (__NR_SYSCALL_BASE+ 15) +#define __NR_lchown (__NR_SYSCALL_BASE+ 16) +#define __NR_break (__NR_SYSCALL_BASE+ 17) + +#define __NR_lseek (__NR_SYSCALL_BASE+ 19) +#define __NR_getpid (__NR_SYSCALL_BASE+ 20) +#define __NR_mount (__NR_SYSCALL_BASE+ 21) +#define __NR_umount (__NR_SYSCALL_BASE+ 22) +#define __NR_setuid (__NR_SYSCALL_BASE+ 23) +#define __NR_getuid (__NR_SYSCALL_BASE+ 24) +#define __NR_stime (__NR_SYSCALL_BASE+ 25) +#define __NR_ptrace (__NR_SYSCALL_BASE+ 26) +#define __NR_alarm (__NR_SYSCALL_BASE+ 27) + +#define __NR_pause (__NR_SYSCALL_BASE+ 29) +#define __NR_utime (__NR_SYSCALL_BASE+ 30) +#define __NR_stty (__NR_SYSCALL_BASE+ 31) +#define __NR_gtty (__NR_SYSCALL_BASE+ 32) +#define __NR_access (__NR_SYSCALL_BASE+ 33) +#define __NR_nice (__NR_SYSCALL_BASE+ 34) +#define __NR_ftime (__NR_SYSCALL_BASE+ 35) +#define __NR_sync (__NR_SYSCALL_BASE+ 36) +#define __NR_kill (__NR_SYSCALL_BASE+ 37) +#define __NR_rename (__NR_SYSCALL_BASE+ 38) +#define __NR_mkdir (__NR_SYSCALL_BASE+ 39) +#define __NR_rmdir (__NR_SYSCALL_BASE+ 40) +#define __NR_dup (__NR_SYSCALL_BASE+ 41) +#define __NR_pipe (__NR_SYSCALL_BASE+ 42) +#define __NR_times (__NR_SYSCALL_BASE+ 43) +#define __NR_prof (__NR_SYSCALL_BASE+ 44) +#define __NR_brk (__NR_SYSCALL_BASE+ 45) +#define __NR_setgid (__NR_SYSCALL_BASE+ 46) +#define __NR_getgid (__NR_SYSCALL_BASE+ 47) +#define __NR_signal (__NR_SYSCALL_BASE+ 48) +#define __NR_geteuid (__NR_SYSCALL_BASE+ 49) +#define __NR_getegid (__NR_SYSCALL_BASE+ 50) +#define __NR_acct (__NR_SYSCALL_BASE+ 51) +#define __NR_umount2 (__NR_SYSCALL_BASE+ 52) +#define __NR_lock (__NR_SYSCALL_BASE+ 53) +#define __NR_ioctl (__NR_SYSCALL_BASE+ 54) +#define __NR_fcntl (__NR_SYSCALL_BASE+ 55) +#define __NR_mpx (__NR_SYSCALL_BASE+ 56) +#define __NR_setpgid (__NR_SYSCALL_BASE+ 57) +#define __NR_ulimit (__NR_SYSCALL_BASE+ 58) + +#define __NR_umask (__NR_SYSCALL_BASE+ 60) +#define __NR_chroot (__NR_SYSCALL_BASE+ 61) +#define __NR_ustat (__NR_SYSCALL_BASE+ 62) +#define __NR_dup2 (__NR_SYSCALL_BASE+ 63) +#define __NR_getppid (__NR_SYSCALL_BASE+ 64) +#define __NR_getpgrp (__NR_SYSCALL_BASE+ 65) +#define __NR_setsid (__NR_SYSCALL_BASE+ 66) +#define __NR_sigaction (__NR_SYSCALL_BASE+ 67) +#define __NR_sgetmask (__NR_SYSCALL_BASE+ 68) +#define __NR_ssetmask (__NR_SYSCALL_BASE+ 69) +#define __NR_setreuid (__NR_SYSCALL_BASE+ 70) +#define __NR_setregid (__NR_SYSCALL_BASE+ 71) +#define __NR_sigsuspend (__NR_SYSCALL_BASE+ 72) +#define __NR_sigpending (__NR_SYSCALL_BASE+ 73) +#define __NR_sethostname (__NR_SYSCALL_BASE+ 74) +#define __NR_setrlimit (__NR_SYSCALL_BASE+ 75) +#define __NR_getrlimit (__NR_SYSCALL_BASE+ 76) /* Back compat 2GB limited rlimit */ +#define __NR_getrusage (__NR_SYSCALL_BASE+ 77) +#define __NR_gettimeofday (__NR_SYSCALL_BASE+ 78) +#define __NR_settimeofday (__NR_SYSCALL_BASE+ 79) +#define __NR_getgroups (__NR_SYSCALL_BASE+ 80) +#define __NR_setgroups (__NR_SYSCALL_BASE+ 81) +#define __NR_select (__NR_SYSCALL_BASE+ 82) +#define __NR_symlink (__NR_SYSCALL_BASE+ 83) + +#define __NR_readlink (__NR_SYSCALL_BASE+ 85) +#define __NR_uselib (__NR_SYSCALL_BASE+ 86) +#define __NR_swapon (__NR_SYSCALL_BASE+ 87) +#define __NR_reboot (__NR_SYSCALL_BASE+ 88) +#define __NR_readdir (__NR_SYSCALL_BASE+ 89) +#define __NR_mmap (__NR_SYSCALL_BASE+ 90) +#define __NR_munmap (__NR_SYSCALL_BASE+ 91) +#define __NR_truncate (__NR_SYSCALL_BASE+ 92) +#define __NR_ftruncate (__NR_SYSCALL_BASE+ 93) +#define __NR_fchmod (__NR_SYSCALL_BASE+ 94) +#define __NR_fchown (__NR_SYSCALL_BASE+ 95) +#define __NR_getpriority (__NR_SYSCALL_BASE+ 96) +#define __NR_setpriority (__NR_SYSCALL_BASE+ 97) +#define __NR_profil (__NR_SYSCALL_BASE+ 98) +#define __NR_statfs (__NR_SYSCALL_BASE+ 99) +#define __NR_fstatfs (__NR_SYSCALL_BASE+100) +#define __NR_ioperm (__NR_SYSCALL_BASE+101) +#define __NR_socketcall (__NR_SYSCALL_BASE+102) +#define __NR_syslog (__NR_SYSCALL_BASE+103) +#define __NR_setitimer (__NR_SYSCALL_BASE+104) +#define __NR_getitimer (__NR_SYSCALL_BASE+105) +#define __NR_stat (__NR_SYSCALL_BASE+106) +#define __NR_lstat (__NR_SYSCALL_BASE+107) +#define __NR_fstat (__NR_SYSCALL_BASE+108) + + +#define __NR_vhangup (__NR_SYSCALL_BASE+111) +#define __NR_idle (__NR_SYSCALL_BASE+112) +#define __NR_syscall (__NR_SYSCALL_BASE+113) /* syscall to call a syscall! */ +#define __NR_wait4 (__NR_SYSCALL_BASE+114) +#define __NR_swapoff (__NR_SYSCALL_BASE+115) +#define __NR_sysinfo (__NR_SYSCALL_BASE+116) +#define __NR_ipc (__NR_SYSCALL_BASE+117) +#define __NR_fsync (__NR_SYSCALL_BASE+118) +#define __NR_sigreturn (__NR_SYSCALL_BASE+119) +#define __NR_clone (__NR_SYSCALL_BASE+120) +#define __NR_setdomainname (__NR_SYSCALL_BASE+121) +#define __NR_uname (__NR_SYSCALL_BASE+122) +#define __NR_modify_ldt (__NR_SYSCALL_BASE+123) +#define __NR_adjtimex (__NR_SYSCALL_BASE+124) +#define __NR_mprotect (__NR_SYSCALL_BASE+125) +#define __NR_sigprocmask (__NR_SYSCALL_BASE+126) +#define __NR_create_module (__NR_SYSCALL_BASE+127) +#define __NR_init_module (__NR_SYSCALL_BASE+128) +#define __NR_delete_module (__NR_SYSCALL_BASE+129) +#define __NR_get_kernel_syms (__NR_SYSCALL_BASE+130) +#define __NR_quotactl (__NR_SYSCALL_BASE+131) +#define __NR_getpgid (__NR_SYSCALL_BASE+132) +#define __NR_fchdir (__NR_SYSCALL_BASE+133) +#define __NR_bdflush (__NR_SYSCALL_BASE+134) +#define __NR_sysfs (__NR_SYSCALL_BASE+135) +#define __NR_personality (__NR_SYSCALL_BASE+136) +#define __NR_afs_syscall (__NR_SYSCALL_BASE+137) /* Syscall for Andrew File System */ +#define __NR_setfsuid (__NR_SYSCALL_BASE+138) +#define __NR_setfsgid (__NR_SYSCALL_BASE+139) +#define __NR__llseek (__NR_SYSCALL_BASE+140) +#define __NR_getdents (__NR_SYSCALL_BASE+141) +#define __NR__newselect (__NR_SYSCALL_BASE+142) +#define __NR_flock (__NR_SYSCALL_BASE+143) +#define __NR_msync (__NR_SYSCALL_BASE+144) +#define __NR_readv (__NR_SYSCALL_BASE+145) +#define __NR_writev (__NR_SYSCALL_BASE+146) +#define __NR_getsid (__NR_SYSCALL_BASE+147) +#define __NR_fdatasync (__NR_SYSCALL_BASE+148) +#define __NR__sysctl (__NR_SYSCALL_BASE+149) +#define __NR_mlock (__NR_SYSCALL_BASE+150) +#define __NR_munlock (__NR_SYSCALL_BASE+151) +#define __NR_mlockall (__NR_SYSCALL_BASE+152) +#define __NR_munlockall (__NR_SYSCALL_BASE+153) +#define __NR_sched_setparam (__NR_SYSCALL_BASE+154) +#define __NR_sched_getparam (__NR_SYSCALL_BASE+155) +#define __NR_sched_setscheduler (__NR_SYSCALL_BASE+156) +#define __NR_sched_getscheduler (__NR_SYSCALL_BASE+157) +#define __NR_sched_yield (__NR_SYSCALL_BASE+158) +#define __NR_sched_get_priority_max (__NR_SYSCALL_BASE+159) +#define __NR_sched_get_priority_min (__NR_SYSCALL_BASE+160) +#define __NR_sched_rr_get_interval (__NR_SYSCALL_BASE+161) +#define __NR_nanosleep (__NR_SYSCALL_BASE+162) +#define __NR_mremap (__NR_SYSCALL_BASE+163) +#define __NR_setresuid (__NR_SYSCALL_BASE+164) +#define __NR_getresuid (__NR_SYSCALL_BASE+165) +#define __NR_vm86 (__NR_SYSCALL_BASE+166) +#define __NR_query_module (__NR_SYSCALL_BASE+167) +#define __NR_poll (__NR_SYSCALL_BASE+168) +#define __NR_nfsservctl (__NR_SYSCALL_BASE+169) +#define __NR_setresgid (__NR_SYSCALL_BASE+170) +#define __NR_getresgid (__NR_SYSCALL_BASE+171) +#define __NR_prctl (__NR_SYSCALL_BASE+172) +#define __NR_rt_sigreturn (__NR_SYSCALL_BASE+173) +#define __NR_rt_sigaction (__NR_SYSCALL_BASE+174) +#define __NR_rt_sigprocmask (__NR_SYSCALL_BASE+175) +#define __NR_rt_sigpending (__NR_SYSCALL_BASE+176) +#define __NR_rt_sigtimedwait (__NR_SYSCALL_BASE+177) +#define __NR_rt_sigqueueinfo (__NR_SYSCALL_BASE+178) +#define __NR_rt_sigsuspend (__NR_SYSCALL_BASE+179) +#define __NR_pread64 (__NR_SYSCALL_BASE+180) +#define __NR_pwrite64 (__NR_SYSCALL_BASE+181) +#define __NR_chown (__NR_SYSCALL_BASE+182) +#define __NR_getcwd (__NR_SYSCALL_BASE+183) +#define __NR_capget (__NR_SYSCALL_BASE+184) +#define __NR_capset (__NR_SYSCALL_BASE+185) +#define __NR_sigaltstack (__NR_SYSCALL_BASE+186) +#define __NR_sendfile (__NR_SYSCALL_BASE+187) + /* 188 reserved */ + /* 189 reserved */ +#define __NR_vfork (__NR_SYSCALL_BASE+190) +#define __NR_ugetrlimit (__NR_SYSCALL_BASE+191) /* SuS compliant getrlimit */ +#define __NR_mmap2 (__NR_SYSCALL_BASE+192) +#define __NR_truncate64 (__NR_SYSCALL_BASE+193) +#define __NR_ftruncate64 (__NR_SYSCALL_BASE+194) +#define __NR_stat64 (__NR_SYSCALL_BASE+195) +#define __NR_lstat64 (__NR_SYSCALL_BASE+196) +#define __NR_fstat64 (__NR_SYSCALL_BASE+197) +#define __NR_lchown32 (__NR_SYSCALL_BASE+198) +#define __NR_getuid32 (__NR_SYSCALL_BASE+199) +#define __NR_getgid32 (__NR_SYSCALL_BASE+200) +#define __NR_geteuid32 (__NR_SYSCALL_BASE+201) +#define __NR_getegid32 (__NR_SYSCALL_BASE+202) +#define __NR_setreuid32 (__NR_SYSCALL_BASE+203) +#define __NR_setregid32 (__NR_SYSCALL_BASE+204) +#define __NR_getgroups32 (__NR_SYSCALL_BASE+205) +#define __NR_setgroups32 (__NR_SYSCALL_BASE+206) +#define __NR_fchown32 (__NR_SYSCALL_BASE+207) +#define __NR_setresuid32 (__NR_SYSCALL_BASE+208) +#define __NR_getresuid32 (__NR_SYSCALL_BASE+209) +#define __NR_setresgid32 (__NR_SYSCALL_BASE+210) +#define __NR_getresgid32 (__NR_SYSCALL_BASE+211) +#define __NR_chown32 (__NR_SYSCALL_BASE+212) +#define __NR_setuid32 (__NR_SYSCALL_BASE+213) +#define __NR_setgid32 (__NR_SYSCALL_BASE+214) +#define __NR_setfsuid32 (__NR_SYSCALL_BASE+215) +#define __NR_setfsgid32 (__NR_SYSCALL_BASE+216) +#define __NR_getdents64 (__NR_SYSCALL_BASE+217) +#define __NR_pivot_root (__NR_SYSCALL_BASE+218) +#define __NR_mincore (__NR_SYSCALL_BASE+219) +#define __NR_madvise (__NR_SYSCALL_BASE+220) +#define __NR_fcntl64 (__NR_SYSCALL_BASE+221) + /* 222 for tux */ +#define __NR_security (__NR_SYSCALL_BASE+223) +#define __NR_gettid (__NR_SYSCALL_BASE+224) +#define __NR_readahead (__NR_SYSCALL_BASE+225) +#define __NR_setxattr (__NR_SYSCALL_BASE+226) +#define __NR_lsetxattr (__NR_SYSCALL_BASE+227) +#define __NR_fsetxattr (__NR_SYSCALL_BASE+228) +#define __NR_getxattr (__NR_SYSCALL_BASE+229) +#define __NR_lgetxattr (__NR_SYSCALL_BASE+230) +#define __NR_fgetxattr (__NR_SYSCALL_BASE+231) +#define __NR_listxattr (__NR_SYSCALL_BASE+232) +#define __NR_llistxattr (__NR_SYSCALL_BASE+233) +#define __NR_flistxattr (__NR_SYSCALL_BASE+234) +#define __NR_removexattr (__NR_SYSCALL_BASE+235) +#define __NR_lremovexattr (__NR_SYSCALL_BASE+236) +#define __NR_fremovexattr (__NR_SYSCALL_BASE+237) +#define __NR_tkill (__NR_SYSCALL_BASE+238) + +/* + * The following SWIs are ARM private. + */ +#define __ARM_NR_BASE (__NR_SYSCALL_BASE+0x0f0000) +#define __ARM_NR_breakpoint (__ARM_NR_BASE+1) +#define __ARM_NR_cacheflush (__ARM_NR_BASE+2) +#define __ARM_NR_usr26 (__ARM_NR_BASE+3) +#define __ARM_NR_usr32 (__ARM_NR_BASE+4) + +#define __sys2(x) #x +#define __sys1(x) __sys2(x) + +#ifndef __syscall +#define __syscall(name) "swi\t" __sys1(__NR_##name) "\n\t" +#endif + +#define __syscall_return(type, res) \ +do { \ + if ((unsigned long)(res) >= (unsigned long)(-125)) { \ + errno = -(res); \ + res = -1; \ + } \ + return (type) (res); \ +} while (0) + +#define _syscall0(type,name) \ +type name(void) { \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + "mov %0,r0" \ + :"=r" (__res) : : "r0","lr"); \ + __syscall_return(type,__res); \ +} + +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) { \ + long __res; \ + __asm__ __volatile__ ( \ + "mov\tr0,%1\n\t" \ + __syscall(name) \ + "mov %0,r0" \ + : "=r" (__res) \ + : "r" ((long)(arg1)) \ + : "r0","lr"); \ + __syscall_return(type,__res); \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1,type2 arg2) { \ + long __res; \ + __asm__ __volatile__ ( \ + "mov\tr0,%1\n\t" \ + "mov\tr1,%2\n\t" \ + __syscall(name) \ + "mov\t%0,r0" \ + : "=r" (__res) \ + : "r" ((long)(arg1)),"r" ((long)(arg2)) \ + : "r0","r1","lr"); \ + __syscall_return(type,__res); \ +} + + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1,type2 arg2,type3 arg3) { \ + long __res; \ + __asm__ __volatile__ ( \ + "mov\tr0,%1\n\t" \ + "mov\tr1,%2\n\t" \ + "mov\tr2,%3\n\t" \ + __syscall(name) \ + "mov\t%0,r0" \ + : "=r" (__res) \ + : "r" ((long)(arg1)),"r" ((long)(arg2)),"r" ((long)(arg3)) \ + : "r0","r1","r2","lr"); \ + __syscall_return(type,__res); \ +} + + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ + long __res; \ + __asm__ __volatile__ ( \ + "mov\tr0,%1\n\t" \ + "mov\tr1,%2\n\t" \ + "mov\tr2,%3\n\t" \ + "mov\tr3,%4\n\t" \ + __syscall(name) \ + "mov\t%0,r0" \ + : "=r" (__res) \ + : "r" ((long)(arg1)),"r" ((long)(arg2)),"r" ((long)(arg3)),"r" ((long)(arg4)) \ + : "r0","r1","r2","r3","lr"); \ + __syscall_return(type,__res); \ +} + + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) { \ + long __res; \ + __asm__ __volatile__ ( \ + "mov\tr0,%1\n\t" \ + "mov\tr1,%2\n\t" \ + "mov\tr2,%3\n\t" \ + "mov\tr3,%4\n\t" \ + "mov\tr4,%5\n\t" \ + __syscall(name) \ + "mov\t%0,r0" \ + : "=r" (__res) \ + : "r" ((long)(arg1)),"r" ((long)(arg2)),"r" ((long)(arg3)),"r" ((long)(arg4)), \ + "r" ((long)(arg5)) \ + : "r0","r1","r2","r3","r4","lr"); \ + __syscall_return(type,__res); \ +} + +#ifdef __KERNEL_SYSCALLS__ + +struct rusage; +asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); + +static inline long idle(void) +{ + extern long sys_idle(void); + return sys_idle(); +} + +static inline long pause(void) +{ + extern long sys_pause(void); + return sys_pause(); +} + +static inline long sync(void) +{ + extern long sys_sync(void); + return sys_sync(); +} + +static inline pid_t setsid(void) +{ + extern long sys_setsid(void); + return sys_setsid(); +} + +static inline long write(int fd, const char *buf, off_t count) +{ + extern long sys_write(int, const char *, int); + return sys_write(fd, buf, count); +} + +static inline long read(int fd, char *buf, off_t count) +{ + extern long sys_read(int, char *, int); + return sys_read(fd, buf, count); +} + +static inline off_t lseek(int fd, off_t offset, int count) +{ + extern off_t sys_lseek(int, off_t, int); + return sys_lseek(fd, offset, count); +} + +static inline long dup(int fd) +{ + extern long sys_dup(int); + return sys_dup(fd); +} + +static inline long open(const char *file, int flag, int mode) +{ + extern long sys_open(const char *, int, int); + return sys_open(file, flag, mode); +} + +static inline long close(int fd) +{ + extern long sys_close(unsigned int); + return sys_close(fd); +} + +static inline long _exit(int exitcode) +{ + extern long sys_exit(int) __attribute__((noreturn)); + return sys_exit(exitcode); +} + +static inline pid_t waitpid(pid_t pid, int *wait_stat, int options) +{ + return sys_wait4((int)pid, wait_stat, options, NULL); +} + +static inline long delete_module(const char *name) +{ + extern long sys_delete_module(const char *name); + return sys_delete_module(name); +} + +static inline pid_t wait(int * wait_stat) +{ + return sys_wait4(-1, wait_stat, 0, NULL); +} + +/* + * The following two can't be eliminated yet - they rely on + * specific conditions. + */ +static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp); + +#endif + +/* + * "Conditional" syscalls + * + * What we want is __attribute__((weak,alias("sys_ni_syscall"))), + * but it doesn't work on all toolchains, so we just do it by hand + */ +#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall"); + +#endif /* __ASM_ARM_UNISTD_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/user.h linux-2.5.70-bk14/include/asm-arm26/user.h --- linux-2.5.70-bk13/include/asm-arm26/user.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/user.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,84 @@ +#ifndef _ARM_USER_H +#define _ARM_USER_H + +#include +#include +/* Core file format: The core file is written in such a way that gdb + can understand it and provide useful information to the user (under + linux we use the 'trad-core' bfd). There are quite a number of + obstacles to being able to view the contents of the floating point + registers, and until these are solved you will not be able to view the + contents of them. Actually, you can read in the core file and look at + the contents of the user struct to find out what the floating point + registers contain. + The actual file contents are as follows: + UPAGE: 1 page consisting of a user struct that tells gdb what is present + in the file. Directly after this is a copy of the task_struct, which + is currently not used by gdb, but it may come in useful at some point. + All of the registers are stored as part of the upage. The upage should + always be only one page. + DATA: The data area is stored. We use current->end_text to + current->brk to pick up all of the user variables, plus any memory + that may have been malloced. No attempt is made to determine if a page + is demand-zero or if a page is totally unused, we just cover the entire + range. All of the addresses are rounded in such a way that an integral + number of pages is written. + STACK: We need the stack information in order to get a meaningful + backtrace. We need to write the data from (esp) to + current->start_stack, so we round each of these off in order to be able + to write an integer number of pages. + The minimum core file size is 3 pages, or 12288 bytes. +*/ + +struct user_fp { + struct fp_reg { + unsigned int sign1:1; + unsigned int unused:15; + unsigned int sign2:1; + unsigned int exponent:14; + unsigned int j:1; + unsigned int mantissa1:31; + unsigned int mantissa0:32; + } fpregs[8]; + unsigned int fpsr:32; + unsigned int fpcr:32; + unsigned char ftype[8]; + unsigned int init_flag; +}; + +/* When the kernel dumps core, it starts by dumping the user struct - + this will be used by gdb to figure out where the data and stack segments + are within the file, and what virtual addresses to use. */ +struct user{ +/* We start with the registers, to mimic the way that "memory" is returned + from the ptrace(3,...) function. */ + struct pt_regs regs; /* Where the registers are actually stored */ +/* ptrace does not yet supply these. Someday.... */ + int u_fpvalid; /* True if math co-processor being used. */ + /* for this mess. Not yet used. */ +/* The rest of this junk is to help gdb figure out what goes where */ + unsigned long int u_tsize; /* Text segment size (pages). */ + unsigned long int u_dsize; /* Data segment size (pages). */ + unsigned long int u_ssize; /* Stack segment size (pages). */ + unsigned long start_code; /* Starting virtual address of text. */ + unsigned long start_stack; /* Starting virtual address of stack area. + This is actually the bottom of the stack, + the top of the stack is always found in the + esp register. */ + long int signal; /* Signal that caused the core dump. */ + int reserved; /* No longer used */ + struct pt_regs * u_ar0; /* Used by gdb to help find the values for */ + /* the registers. */ + unsigned long magic; /* To uniquely identify a core file */ + char u_comm[32]; /* User command that was responsible */ + int u_debugreg[8]; + struct user_fp u_fp; /* FP state */ + struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */ + /* the FP registers. */ +}; +#define NBPG PAGE_SIZE +#define UPAGES 1 +#define HOST_TEXT_START_ADDR (u.start_code) +#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) + +#endif /* _ARM_USER_H */ diff -urN linux-2.5.70-bk13/include/asm-arm26/xor.h linux-2.5.70-bk14/include/asm-arm26/xor.h --- linux-2.5.70-bk13/include/asm-arm26/xor.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.5.70-bk14/include/asm-arm26/xor.h 2003-06-09 04:42:15.000000000 -0700 @@ -0,0 +1,141 @@ +/* + * linux/include/asm-arm/xor.h + * + * Copyright (C) 2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include + +#define __XOR(a1, a2) a1 ^= a2 + +#define GET_BLOCK_2(dst) \ + __asm__("ldmia %0, {%1, %2}" \ + : "=r" (dst), "=r" (a1), "=r" (a2) \ + : "0" (dst)) + +#define GET_BLOCK_4(dst) \ + __asm__("ldmia %0, {%1, %2, %3, %4}" \ + : "=r" (dst), "=r" (a1), "=r" (a2), "=r" (a3), "=r" (a4) \ + : "0" (dst)) + +#define XOR_BLOCK_2(src) \ + __asm__("ldmia %0!, {%1, %2}" \ + : "=r" (src), "=r" (b1), "=r" (b2) \ + : "0" (src)); \ + __XOR(a1, b1); __XOR(a2, b2); + +#define XOR_BLOCK_4(src) \ + __asm__("ldmia %0!, {%1, %2, %3, %4}" \ + : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \ + : "0" (src)); \ + __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4) + +#define PUT_BLOCK_2(dst) \ + __asm__ __volatile__("stmia %0!, {%2, %3}" \ + : "=r" (dst) \ + : "0" (dst), "r" (a1), "r" (a2)) + +#define PUT_BLOCK_4(dst) \ + __asm__ __volatile__("stmia %0!, {%2, %3, %4, %5}" \ + : "=r" (dst) \ + : "0" (dst), "r" (a1), "r" (a2), "r" (a3), "r" (a4)) + +static void +xor_arm4regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + unsigned int lines = bytes / sizeof(unsigned long) / 4; + register unsigned int a1 __asm__("r4"); + register unsigned int a2 __asm__("r5"); + register unsigned int a3 __asm__("r6"); + register unsigned int a4 __asm__("r7"); + register unsigned int b1 __asm__("r8"); + register unsigned int b2 __asm__("r9"); + register unsigned int b3 __asm__("ip"); + register unsigned int b4 __asm__("lr"); + + do { + GET_BLOCK_4(p1); + XOR_BLOCK_4(p2); + PUT_BLOCK_4(p1); + } while (--lines); +} + +static void +xor_arm4regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + unsigned int lines = bytes / sizeof(unsigned long) / 4; + register unsigned int a1 __asm__("r4"); + register unsigned int a2 __asm__("r5"); + register unsigned int a3 __asm__("r6"); + register unsigned int a4 __asm__("r7"); + register unsigned int b1 __asm__("r8"); + register unsigned int b2 __asm__("r9"); + register unsigned int b3 __asm__("ip"); + register unsigned int b4 __asm__("lr"); + + do { + GET_BLOCK_4(p1); + XOR_BLOCK_4(p2); + XOR_BLOCK_4(p3); + PUT_BLOCK_4(p1); + } while (--lines); +} + +static void +xor_arm4regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + unsigned int lines = bytes / sizeof(unsigned long) / 2; + register unsigned int a1 __asm__("r8"); + register unsigned int a2 __asm__("r9"); + register unsigned int b1 __asm__("ip"); + register unsigned int b2 __asm__("lr"); + + do { + GET_BLOCK_2(p1); + XOR_BLOCK_2(p2); + XOR_BLOCK_2(p3); + XOR_BLOCK_2(p4); + PUT_BLOCK_2(p1); + } while (--lines); +} + +static void +xor_arm4regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + unsigned int lines = bytes / sizeof(unsigned long) / 2; + register unsigned int a1 __asm__("r8"); + register unsigned int a2 __asm__("r9"); + register unsigned int b1 __asm__("ip"); + register unsigned int b2 __asm__("lr"); + + do { + GET_BLOCK_2(p1); + XOR_BLOCK_2(p2); + XOR_BLOCK_2(p3); + XOR_BLOCK_2(p4); + XOR_BLOCK_2(p5); + PUT_BLOCK_2(p1); + } while (--lines); +} + +static struct xor_block_template xor_block_arm4regs = { + name: "arm4regs", + do_2: xor_arm4regs_2, + do_3: xor_arm4regs_3, + do_4: xor_arm4regs_4, + do_5: xor_arm4regs_5, +}; + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_arm4regs); \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_32regs); \ + } while (0) diff -urN linux-2.5.70-bk13/include/linux/zconf.h linux-2.5.70-bk14/include/linux/zconf.h --- linux-2.5.70-bk13/include/linux/zconf.h 2003-06-09 04:41:57.000000000 -0700 +++ linux-2.5.70-bk14/include/linux/zconf.h 2003-06-09 04:42:17.000000000 -0700 @@ -23,7 +23,7 @@ /* Maximum value for memLevel in deflateInit2 */ #ifndef MAX_MEM_LEVEL -# define MAX_MEM_LEVEL 9 +# define MAX_MEM_LEVEL 8 #endif /* Maximum value for windowBits in deflateInit2 and inflateInit2. diff -urN linux-2.5.70-bk13/include/linux/zlib.h linux-2.5.70-bk14/include/linux/zlib.h --- linux-2.5.70-bk13/include/linux/zlib.h 2003-06-09 04:41:57.000000000 -0700 +++ linux-2.5.70-bk14/include/linux/zlib.h 2003-06-09 04:42:17.000000000 -0700 @@ -33,10 +33,6 @@ #include -#ifdef __cplusplus -extern "C" { -#endif - #define ZLIB_VERSION "1.1.3" /* @@ -95,7 +91,7 @@ memory management. The compression library attaches no meaning to the opaque value. - zalloc must return Z_NULL if there is not enough memory for the object. + zalloc must return NULL if there is not enough memory for the object. If zlib is used in a multi-threaded application, zalloc and zfree must be thread safe. @@ -157,8 +153,6 @@ #define Z_DEFLATED 8 /* The deflate compression method (the only one supported in this version) */ -#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ - /* basic functions */ extern const char * zlib_zlibVersion (void); @@ -180,7 +174,7 @@ Initializes the internal stream state for compression. The fields zalloc, zfree and opaque must be initialized before by the caller. - If zalloc and zfree are set to Z_NULL, deflateInit updates them to + If zalloc and zfree are set to NULL, deflateInit updates them to use default allocation functions. The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: @@ -302,11 +296,11 @@ Initializes the internal stream state for decompression. The fields next_in, avail_in, and workspace must be initialized before by - the caller. If next_in is not Z_NULL and avail_in is large enough (the exact + the caller. If next_in is not NULL and avail_in is large enough (the exact value depends on the compression method), inflateInit determines the compression method from the zlib header and allocates all data structures accordingly; otherwise the allocation will be deferred to the first call of - inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to + inflate. If zalloc and zfree are set to NULL, inflateInit updates them to use default allocation functions. inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough @@ -640,8 +634,4 @@ extern int zlib_inflateSyncPoint (z_streamp z); extern const uLong * zlib_get_crc_table (void); -#ifdef __cplusplus -} -#endif - #endif /* _ZLIB_H */ diff -urN linux-2.5.70-bk13/include/linux/zutil.h linux-2.5.70-bk14/include/linux/zutil.h --- linux-2.5.70-bk13/include/linux/zutil.h 2003-06-09 04:41:57.000000000 -0700 +++ linux-2.5.70-bk14/include/linux/zutil.h 2003-06-09 04:42:17.000000000 -0700 @@ -18,11 +18,6 @@ #include #include -#ifndef local -# define local static -#endif -/* compile with -Dlocal if your debugger can't find static symbols */ - typedef unsigned char uch; typedef unsigned short ush; typedef unsigned long ulg; @@ -86,7 +81,7 @@ An Adler-32 checksum is almost as reliable as a CRC32 but can be computed much faster. Usage example: - uLong adler = adler32(0L, Z_NULL, 0); + uLong adler = adler32(0L, NULL, 0); while (read_buffer(buffer, length) != EOF) { adler = adler32(adler, buffer, length); @@ -101,7 +96,7 @@ unsigned long s2 = (adler >> 16) & 0xffff; int k; - if (buf == Z_NULL) return 1L; + if (buf == NULL) return 1L; while (len > 0) { k = len < NMAX ? len : NMAX; diff -urN linux-2.5.70-bk13/kernel/kmod.c linux-2.5.70-bk14/kernel/kmod.c --- linux-2.5.70-bk13/kernel/kmod.c 2003-05-26 18:00:27.000000000 -0700 +++ linux-2.5.70-bk14/kernel/kmod.c 2003-06-09 04:42:18.000000000 -0700 @@ -58,9 +58,9 @@ * If module auto-loading support is disabled then this function * becomes a no-operation. */ -#define MODULENAME_SIZE 32 int request_module(const char *fmt, ...) { +#define MODULENAME_SIZE 32 va_list args; char module_name[MODULENAME_SIZE]; unsigned int max_modprobes; diff -urN linux-2.5.70-bk13/lib/zlib_deflate/deflate.c linux-2.5.70-bk14/lib/zlib_deflate/deflate.c --- linux-2.5.70-bk13/lib/zlib_deflate/deflate.c 2003-06-09 04:41:57.000000000 -0700 +++ linux-2.5.70-bk14/lib/zlib_deflate/deflate.c 2003-06-09 04:42:18.000000000 -0700 @@ -66,18 +66,18 @@ typedef block_state (*compress_func) (deflate_state *s, int flush); /* Compression function. Returns the block state after the call. */ -local void fill_window (deflate_state *s); -local block_state deflate_stored (deflate_state *s, int flush); -local block_state deflate_fast (deflate_state *s, int flush); -local block_state deflate_slow (deflate_state *s, int flush); -local void lm_init (deflate_state *s); -local void putShortMSB (deflate_state *s, uInt b); -local void flush_pending (z_streamp strm); -local int read_buf (z_streamp strm, Byte *buf, unsigned size); -local uInt longest_match (deflate_state *s, IPos cur_match); +static void fill_window (deflate_state *s); +static block_state deflate_stored (deflate_state *s, int flush); +static block_state deflate_fast (deflate_state *s, int flush); +static block_state deflate_slow (deflate_state *s, int flush); +static void lm_init (deflate_state *s); +static void putShortMSB (deflate_state *s, uInt b); +static void flush_pending (z_streamp strm); +static int read_buf (z_streamp strm, Byte *buf, unsigned size); +static uInt longest_match (deflate_state *s, IPos cur_match); #ifdef DEBUG_ZLIB -local void check_match (deflate_state *s, IPos start, IPos match, +static void check_match (deflate_state *s, IPos start, IPos match, int length); #endif @@ -111,7 +111,7 @@ compress_func func; } config; -local const config configuration_table[10] = { +static const config configuration_table[10] = { /* good lazy nice chain */ /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ /* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */ @@ -199,13 +199,13 @@ * output size for (length,distance) codes is <= 24 bits. */ - if (version == Z_NULL || version[0] != my_version[0] || + if (version == NULL || version[0] != my_version[0] || stream_size != sizeof(z_stream)) { return Z_VERSION_ERROR; } - if (strm == Z_NULL) return Z_STREAM_ERROR; + if (strm == NULL) return Z_STREAM_ERROR; - strm->msg = Z_NULL; + strm->msg = NULL; if (level == Z_DEFAULT_COMPRESSION) level = 6; @@ -216,7 +216,7 @@ windowBits = -windowBits; } if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || - windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || + windowBits < 9 || windowBits > 15 || level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) { return Z_STREAM_ERROR; } @@ -266,7 +266,7 @@ uInt n; IPos hash_head = 0; - if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL) + if (strm == NULL || strm->state == NULL || dictionary == NULL) return Z_STREAM_ERROR; s = (deflate_state *) strm->state; @@ -305,11 +305,11 @@ { deflate_state *s; - if (strm == Z_NULL || strm->state == Z_NULL) + if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; strm->total_in = strm->total_out = 0; - strm->msg = Z_NULL; + strm->msg = NULL; strm->data_type = Z_UNKNOWN; s = (deflate_state *)strm->state; @@ -340,7 +340,7 @@ compress_func func; int err = Z_OK; - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; s = (deflate_state *) strm->state; if (level == Z_DEFAULT_COMPRESSION) { @@ -371,7 +371,7 @@ * IN assertion: the stream state is correct and there is enough room in * pending_buf. */ -local void putShortMSB( +static void putShortMSB( deflate_state *s, uInt b ) @@ -386,7 +386,7 @@ * to avoid allocating a large strm->next_out buffer and copying into it. * (See also read_buf()). */ -local void flush_pending( +static void flush_pending( z_streamp strm ) { @@ -396,7 +396,7 @@ if (len > strm->avail_out) len = strm->avail_out; if (len == 0) return; - if (strm->next_out != Z_NULL) { + if (strm->next_out != NULL) { memcpy(strm->next_out, s->pending_out, len); strm->next_out += len; } @@ -418,13 +418,13 @@ int old_flush; /* value of flush param for previous deflate call */ deflate_state *s; - if (strm == Z_NULL || strm->state == Z_NULL || + if (strm == NULL || strm->state == NULL || flush > Z_FINISH || flush < 0) { return Z_STREAM_ERROR; } s = (deflate_state *) strm->state; - if ((strm->next_in == Z_NULL && strm->avail_in != 0) || + if ((strm->next_in == NULL && strm->avail_in != 0) || (s->status == FINISH_STATE && flush != Z_FINISH)) { return Z_STREAM_ERROR; } @@ -555,7 +555,7 @@ int status; deflate_state *s; - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; s = (deflate_state *) strm->state; status = s->status; @@ -564,7 +564,7 @@ return Z_STREAM_ERROR; } - strm->state = Z_NULL; + strm->state = NULL; return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; } @@ -586,7 +586,7 @@ deflate_workspace *mem; - if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) { + if (source == NULL || dest == NULL || source->state == NULL) { return Z_STREAM_ERROR; } @@ -632,7 +632,7 @@ * allocating a large strm->next_in buffer and copying from it. * (See also flush_pending()). */ -local int read_buf( +static int read_buf( z_streamp strm, Byte *buf, unsigned size @@ -658,7 +658,7 @@ /* =========================================================================== * Initialize the "longest match" routines for a new zlib stream */ -local void lm_init( +static void lm_init( deflate_state *s ) { @@ -693,7 +693,7 @@ /* For 80x86 and 680x0, an optimized version will be provided in match.asm or * match.S. The code will be functionally equivalent. */ -local uInt longest_match( +static uInt longest_match( deflate_state *s, IPos cur_match /* current match */ ) @@ -836,7 +836,7 @@ /* =========================================================================== * Check that the match at match_start is indeed a match. */ -local void check_match( +static void check_match( deflate_state *s, IPos start, IPos match, @@ -872,7 +872,7 @@ * performed for at least two bytes (required for the zip translate_eol * option -- not supported here). */ -local void fill_window(s) +static void fill_window(s) deflate_state *s; { register unsigned n, m; @@ -968,7 +968,7 @@ #define FLUSH_BLOCK_ONLY(s, eof) { \ zlib_tr_flush_block(s, (s->block_start >= 0L ? \ (char *)&s->window[(unsigned)s->block_start] : \ - (char *)Z_NULL), \ + NULL), \ (ulg)((long)s->strstart - s->block_start), \ (eof)); \ s->block_start = s->strstart; \ @@ -991,7 +991,7 @@ * NOTE: this function should be optimized to avoid extra copying from * window to pending_buf. */ -local block_state deflate_stored( +static block_state deflate_stored( deflate_state *s, int flush ) @@ -1050,7 +1050,7 @@ * new strings in the dictionary only for unmatched strings or for short * matches. It is used only for the fast compression options. */ -local block_state deflate_fast( +static block_state deflate_fast( deflate_state *s, int flush ) @@ -1144,7 +1144,7 @@ * evaluation for matches: a match is finally adopted only if there is * no better match at the next window position. */ -local block_state deflate_slow( +static block_state deflate_slow( deflate_state *s, int flush ) diff -urN linux-2.5.70-bk13/lib/zlib_deflate/deftree.c linux-2.5.70-bk14/lib/zlib_deflate/deftree.c --- linux-2.5.70-bk13/lib/zlib_deflate/deftree.c 2003-06-09 04:41:57.000000000 -0700 +++ linux-2.5.70-bk14/lib/zlib_deflate/deftree.c 2003-06-09 04:42:18.000000000 -0700 @@ -60,16 +60,16 @@ #define REPZ_11_138 18 /* repeat a zero length 11-138 times (7 bits of repeat count) */ -local const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ +static const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; -local const int extra_dbits[D_CODES] /* extra bits for each distance code */ +static const int extra_dbits[D_CODES] /* extra bits for each distance code */ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; -local const int extra_blbits[BL_CODES]/* extra bits for each bit length code */ +static const int extra_blbits[BL_CODES]/* extra bits for each bit length code */ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; -local const uch bl_order[BL_CODES] +static const uch bl_order[BL_CODES] = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; /* The lengths of the bit length codes are sent in order of decreasing * probability, to avoid transmitting the lengths for unused bit length codes. @@ -84,31 +84,31 @@ * Local data. These are initialized only once. */ -local ct_data static_ltree[L_CODES+2]; +static ct_data static_ltree[L_CODES+2]; /* The static literal tree. Since the bit lengths are imposed, there is no * need for the L_CODES extra codes used during heap construction. However * The codes 286 and 287 are needed to build a canonical tree (see zlib_tr_init * below). */ -local ct_data static_dtree[D_CODES]; +static ct_data static_dtree[D_CODES]; /* The static distance tree. (Actually a trivial tree since all codes use * 5 bits.) */ -local uch dist_code[512]; +static uch dist_code[512]; /* distance codes. The first 256 values correspond to the distances * 3 .. 258, the last 256 values correspond to the top 8 bits of * the 15 bit distances. */ -local uch length_code[MAX_MATCH-MIN_MATCH+1]; +static uch length_code[MAX_MATCH-MIN_MATCH+1]; /* length code for each normalized match length (0 == MIN_MATCH) */ -local int base_length[LENGTH_CODES]; +static int base_length[LENGTH_CODES]; /* First normalized length for each code (0 = MIN_MATCH) */ -local int base_dist[D_CODES]; +static int base_dist[D_CODES]; /* First normalized distance for each code (0 = distance of 1) */ struct static_tree_desc_s { @@ -119,37 +119,37 @@ int max_length; /* max bit length for the codes */ }; -local static_tree_desc static_l_desc = +static static_tree_desc static_l_desc = {static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; -local static_tree_desc static_d_desc = +static static_tree_desc static_d_desc = {static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; -local static_tree_desc static_bl_desc = +static static_tree_desc static_bl_desc = {(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; /* =========================================================================== * Local (static) routines in this file. */ -local void tr_static_init (void); -local void init_block (deflate_state *s); -local void pqdownheap (deflate_state *s, ct_data *tree, int k); -local void gen_bitlen (deflate_state *s, tree_desc *desc); -local void gen_codes (ct_data *tree, int max_code, ush *bl_count); -local void build_tree (deflate_state *s, tree_desc *desc); -local void scan_tree (deflate_state *s, ct_data *tree, int max_code); -local void send_tree (deflate_state *s, ct_data *tree, int max_code); -local int build_bl_tree (deflate_state *s); -local void send_all_trees (deflate_state *s, int lcodes, int dcodes, +static void tr_static_init (void); +static void init_block (deflate_state *s); +static void pqdownheap (deflate_state *s, ct_data *tree, int k); +static void gen_bitlen (deflate_state *s, tree_desc *desc); +static void gen_codes (ct_data *tree, int max_code, ush *bl_count); +static void build_tree (deflate_state *s, tree_desc *desc); +static void scan_tree (deflate_state *s, ct_data *tree, int max_code); +static void send_tree (deflate_state *s, ct_data *tree, int max_code); +static int build_bl_tree (deflate_state *s); +static void send_all_trees (deflate_state *s, int lcodes, int dcodes, int blcodes); -local void compress_block (deflate_state *s, ct_data *ltree, +static void compress_block (deflate_state *s, ct_data *ltree, ct_data *dtree); -local void set_data_type (deflate_state *s); -local unsigned bi_reverse (unsigned value, int length); -local void bi_windup (deflate_state *s); -local void bi_flush (deflate_state *s); -local void copy_block (deflate_state *s, char *buf, unsigned len, +static void set_data_type (deflate_state *s); +static unsigned bi_reverse (unsigned value, int length); +static void bi_windup (deflate_state *s); +static void bi_flush (deflate_state *s); +static void copy_block (deflate_state *s, char *buf, unsigned len, int header); #ifndef DEBUG_ZLIB @@ -174,9 +174,9 @@ * IN assertion: length <= 16 and value fits in length bits. */ #ifdef DEBUG_ZLIB -local void send_bits (deflate_state *s, int value, int length); +static void send_bits (deflate_state *s, int value, int length); -local void send_bits( +static void send_bits( deflate_state *s, int value, /* value to send */ int length /* number of bits */ @@ -226,7 +226,7 @@ * this function may be called by two threads concurrently, but this is * harmless since both invocations do exactly the same thing. */ -local void tr_static_init(void) +static void tr_static_init(void) { static int static_init_done = 0; int n; /* iterates over tree elements */ @@ -327,7 +327,7 @@ /* =========================================================================== * Initialize a new block. */ -local void init_block( +static void init_block( deflate_state *s ) { @@ -372,7 +372,7 @@ * when the heap property is re-established (each father smaller than its * two sons). */ -local void pqdownheap( +static void pqdownheap( deflate_state *s, ct_data *tree, /* the tree to restore */ int k /* node to move down */ @@ -408,7 +408,7 @@ * The length opt_len is updated; static_len is also updated if stree is * not null. */ -local void gen_bitlen( +static void gen_bitlen( deflate_state *s, tree_desc *desc /* the tree descriptor */ ) @@ -496,7 +496,7 @@ * OUT assertion: the field code is set for all tree elements of non * zero code length. */ -local void gen_codes( +static void gen_codes( ct_data *tree, /* the tree to decorate */ int max_code, /* largest code with non zero frequency */ ush *bl_count /* number of codes at each bit length */ @@ -539,7 +539,7 @@ * and corresponding code. The length opt_len is updated; static_len is * also updated if stree is not null. The field max_code is set. */ -local void build_tree( +static void build_tree( deflate_state *s, tree_desc *desc /* the tree descriptor */ ) @@ -627,7 +627,7 @@ * Scan a literal or distance tree to determine the frequencies of the codes * in the bit length tree. */ -local void scan_tree( +static void scan_tree( deflate_state *s, ct_data *tree, /* the tree to be scanned */ int max_code /* and its largest code of non zero frequency */ @@ -673,7 +673,7 @@ * Send a literal or distance tree in compressed form, using the codes in * bl_tree. */ -local void send_tree( +static void send_tree( deflate_state *s, ct_data *tree, /* the tree to be scanned */ int max_code /* and its largest code of non zero frequency */ @@ -725,7 +725,7 @@ * Construct the Huffman tree for the bit lengths and return the index in * bl_order of the last bit length code to send. */ -local int build_bl_tree( +static int build_bl_tree( deflate_state *s ) { @@ -761,7 +761,7 @@ * lengths of the bit length codes, the literal tree and the distance tree. * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. */ -local void send_all_trees( +static void send_all_trees( deflate_state *s, int lcodes, /* number of codes for each tree */ int dcodes, /* number of codes for each tree */ @@ -1019,7 +1019,7 @@ /* =========================================================================== * Send the block data compressed using the given Huffman trees */ -local void compress_block( +static void compress_block( deflate_state *s, ct_data *ltree, /* literal tree */ ct_data *dtree /* distance tree */ @@ -1073,7 +1073,7 @@ * IN assertion: the fields freq of dyn_ltree are set and the total of all * frequencies does not exceed 64K (to fit in an int on 16 bit machines). */ -local void set_data_type( +static void set_data_type( deflate_state *s ) { @@ -1090,7 +1090,7 @@ * Copy a stored block, storing first the length and its * one's complement if requested. */ -local void copy_block( +static void copy_block( deflate_state *s, char *buf, /* the input data */ unsigned len, /* its length */ diff -urN linux-2.5.70-bk13/lib/zlib_inflate/infblock.c linux-2.5.70-bk14/lib/zlib_inflate/infblock.c --- linux-2.5.70-bk13/lib/zlib_inflate/infblock.c 2003-06-09 04:41:57.000000000 -0700 +++ linux-2.5.70-bk14/lib/zlib_inflate/infblock.c 2003-06-09 04:42:18.000000000 -0700 @@ -16,7 +16,7 @@ #define bits word.what.Bits /* Table for deflate from PKZIP's appnote.txt. */ -local const uInt border[] = { /* Order of the bit length code lengths */ +static const uInt border[] = { /* Order of the bit length code lengths */ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; /* @@ -71,7 +71,7 @@ uLong *c ) { - if (c != Z_NULL) + if (c != NULL) *c = s->check; if (s->mode == CODES) zlib_inflate_codes_free(s->sub.decode.codes, z); @@ -79,8 +79,8 @@ s->bitk = 0; s->bitb = 0; s->read = s->write = s->window; - if (s->checkfn != Z_NULL) - z->adler = s->check = (*s->checkfn)(0L, (const Byte *)Z_NULL, 0); + if (s->checkfn != NULL) + z->adler = s->check = (*s->checkfn)(0L, NULL, 0); } inflate_blocks_statef *zlib_inflate_blocks_new( @@ -97,7 +97,7 @@ s->end = s->window + w; s->checkfn = c; s->mode = TYPE; - zlib_inflate_blocks_reset(s, z, Z_NULL); + zlib_inflate_blocks_reset(s, z, NULL); return s; } @@ -141,7 +141,7 @@ zlib_inflate_trees_fixed(&bl, &bd, &tl, &td, z); s->sub.decode.codes = zlib_inflate_codes_new(bl, bd, tl, td, z); - if (s->sub.decode.codes == Z_NULL) + if (s->sub.decode.codes == NULL) { r = Z_MEM_ERROR; LEAVE @@ -270,7 +270,7 @@ s->sub.trees.index = i; } } - s->sub.trees.tb = Z_NULL; + s->sub.trees.tb = NULL; { uInt bl, bd; inflate_huft *tl, *td; @@ -289,7 +289,7 @@ r = t; LEAVE } - if ((c = zlib_inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL) + if ((c = zlib_inflate_codes_new(bl, bd, tl, td, z)) == NULL) { r = Z_MEM_ERROR; LEAVE @@ -333,7 +333,7 @@ z_streamp z ) { - zlib_inflate_blocks_reset(s, z, Z_NULL); + zlib_inflate_blocks_reset(s, z, NULL); return Z_OK; } @@ -351,7 +351,7 @@ /* Returns true if inflate is currently at the end of a block generated * by Z_SYNC_FLUSH or Z_FULL_FLUSH. - * IN assertion: s != Z_NULL + * IN assertion: s != NULL */ int zlib_inflate_blocks_sync_point( inflate_blocks_statef *s diff -urN linux-2.5.70-bk13/lib/zlib_inflate/infcodes.c linux-2.5.70-bk14/lib/zlib_inflate/infcodes.c --- linux-2.5.70-bk13/lib/zlib_inflate/infcodes.c 2003-06-09 04:41:57.000000000 -0700 +++ linux-2.5.70-bk14/lib/zlib_inflate/infcodes.c 2003-06-09 04:42:18.000000000 -0700 @@ -149,15 +149,9 @@ DUMPBITS(j) c->mode = COPY; case COPY: /* o: copying bytes in window, waiting for space */ -#ifndef __TURBOC__ /* Turbo C bug for following expression */ - f = (uInt)(q - s->window) < c->sub.copy.dist ? - s->end - (c->sub.copy.dist - (q - s->window)) : - q - c->sub.copy.dist; -#else f = q - c->sub.copy.dist; - if ((uInt)(q - s->window) < c->sub.copy.dist) - f = s->end - (c->sub.copy.dist - (uInt)(q - s->window)); -#endif + while (f < s->window) /* modulo window size-"while" instead */ + f += s->end - s->window; /* of "if" handles invalid distances */ while (c->len) { NEEDOUT diff -urN linux-2.5.70-bk13/lib/zlib_inflate/inffast.c linux-2.5.70-bk14/lib/zlib_inflate/inffast.c --- linux-2.5.70-bk13/lib/zlib_inflate/inffast.c 2003-06-09 04:41:57.000000000 -0700 +++ linux-2.5.70-bk14/lib/zlib_inflate/inffast.c 2003-06-09 04:42:18.000000000 -0700 @@ -90,28 +90,41 @@ /* do the copy */ m -= c; - if ((uInt)(q - s->window) >= d) /* offset before dest */ - { /* just copy */ - r = q - d; - *q++ = *r++; c--; /* minimum count is three, */ - *q++ = *r++; c--; /* so unroll loop a little */ - } - else /* else offset after destination */ + r = q - d; + if (r < s->window) /* wrap if needed */ { - e = d - (uInt)(q - s->window); /* bytes from offset to end */ - r = s->end - e; /* pointer to offset */ - if (c > e) /* if source crosses, */ + do { + r += s->end - s->window; /* force pointer in window */ + } while (r < s->window); /* covers invalid distances */ + e = s->end - r; + if (c > e) { - c -= e; /* copy to end of window */ + c -= e; /* wrapped copy */ do { - *q++ = *r++; + *q++ = *r++; } while (--e); - r = s->window; /* copy rest from start of window */ + r = s->window; + do { + *q++ = *r++; + } while (--c); } + else /* normal copy */ + { + *q++ = *r++; c--; + *q++ = *r++; c--; + do { + *q++ = *r++; + } while (--c); + } + } + else /* normal copy */ + { + *q++ = *r++; c--; + *q++ = *r++; c--; + do { + *q++ = *r++; + } while (--c); } - do { /* copy all or what's left */ - *q++ = *r++; - } while (--c); break; } else if ((e & 64) == 0) diff -urN linux-2.5.70-bk13/lib/zlib_inflate/inffixed.h linux-2.5.70-bk14/lib/zlib_inflate/inffixed.h --- linux-2.5.70-bk13/lib/zlib_inflate/inffixed.h 2003-05-26 18:00:25.000000000 -0700 +++ linux-2.5.70-bk14/lib/zlib_inflate/inffixed.h 2003-06-09 04:42:18.000000000 -0700 @@ -7,9 +7,9 @@ subject to change. Applications should only use zlib.h. */ -local uInt fixed_bl = 9; -local uInt fixed_bd = 5; -local inflate_huft fixed_tl[] = { +static uInt fixed_bl = 9; +static uInt fixed_bd = 5; +static inflate_huft fixed_tl[] = { {{{96,7}},256}, {{{0,8}},80}, {{{0,8}},16}, {{{84,8}},115}, {{{82,7}},31}, {{{0,8}},112}, {{{0,8}},48}, {{{0,9}},192}, {{{80,7}},10}, {{{0,8}},96}, {{{0,8}},32}, {{{0,9}},160}, @@ -139,7 +139,7 @@ {{{82,7}},27}, {{{0,8}},111}, {{{0,8}},47}, {{{0,9}},191}, {{{0,8}},15}, {{{0,8}},143}, {{{0,8}},79}, {{{0,9}},255} }; -local inflate_huft fixed_td[] = { +static inflate_huft fixed_td[] = { {{{80,5}},1}, {{{87,5}},257}, {{{83,5}},17}, {{{91,5}},4097}, {{{81,5}},5}, {{{89,5}},1025}, {{{85,5}},65}, {{{93,5}},16385}, {{{80,5}},3}, {{{88,5}},513}, {{{84,5}},33}, {{{92,5}},8193}, diff -urN linux-2.5.70-bk13/lib/zlib_inflate/inflate.c linux-2.5.70-bk14/lib/zlib_inflate/inflate.c --- linux-2.5.70-bk13/lib/zlib_inflate/inflate.c 2003-06-09 04:41:57.000000000 -0700 +++ linux-2.5.70-bk14/lib/zlib_inflate/inflate.c 2003-06-09 04:42:18.000000000 -0700 @@ -18,12 +18,12 @@ z_streamp z ) { - if (z == Z_NULL || z->state == Z_NULL || z->workspace == Z_NULL) + if (z == NULL || z->state == NULL || z->workspace == NULL) return Z_STREAM_ERROR; z->total_in = z->total_out = 0; - z->msg = Z_NULL; + z->msg = NULL; z->state->mode = z->state->nowrap ? BLOCKS : METHOD; - zlib_inflate_blocks_reset(z->state->blocks, z, Z_NULL); + zlib_inflate_blocks_reset(z->state->blocks, z, NULL); return Z_OK; } @@ -32,11 +32,11 @@ z_streamp z ) { - if (z == Z_NULL || z->state == Z_NULL || z->workspace == Z_NULL) + if (z == NULL || z->state == NULL || z->workspace == NULL) return Z_STREAM_ERROR; - if (z->state->blocks != Z_NULL) + if (z->state->blocks != NULL) zlib_inflate_blocks_free(z->state->blocks, z); - z->state = Z_NULL; + z->state = NULL; return Z_OK; } @@ -48,16 +48,16 @@ int stream_size ) { - if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || - stream_size != sizeof(z_stream) || z->workspace == Z_NULL) + if (version == NULL || version[0] != ZLIB_VERSION[0] || + stream_size != sizeof(z_stream) || z->workspace == NULL) return Z_VERSION_ERROR; /* initialize state */ - if (z == Z_NULL) + if (z == NULL) return Z_STREAM_ERROR; - z->msg = Z_NULL; + z->msg = NULL; z->state = &WS(z)->internal_state; - z->state->blocks = Z_NULL; + z->state->blocks = NULL; /* handle undocumented nowrap option (no zlib header or check) */ z->state->nowrap = 0; @@ -77,8 +77,8 @@ /* create inflate_blocks state */ if ((z->state->blocks = - zlib_inflate_blocks_new(z, z->state->nowrap ? Z_NULL : zlib_adler32, (uInt)1 << w)) - == Z_NULL) + zlib_inflate_blocks_new(z, z->state->nowrap ? NULL : zlib_adler32, (uInt)1 << w)) + == NULL) { zlib_inflateEnd(z); return Z_MEM_ERROR; @@ -125,7 +125,7 @@ int r, trv; uInt b; - if (z == Z_NULL || z->state == Z_NULL || z->next_in == Z_NULL) + if (z == NULL || z->state == NULL || z->next_in == NULL) return Z_STREAM_ERROR; trv = f == Z_FINISH ? Z_BUF_ERROR : Z_OK; r = Z_BUF_ERROR; @@ -260,7 +260,7 @@ uLong r, w; /* temporaries to save total_in and total_out */ /* set up */ - if (z == Z_NULL || z->state == Z_NULL) + if (z == NULL || z->state == NULL) return Z_STREAM_ERROR; if (z->state->mode != I_BAD) { @@ -313,7 +313,7 @@ z_streamp z ) { - if (z == Z_NULL || z->state == Z_NULL || z->state->blocks == Z_NULL) + if (z == NULL || z->state == NULL || z->state->blocks == NULL) return Z_STREAM_ERROR; return zlib_inflate_blocks_sync_point(z->state->blocks); } @@ -352,7 +352,7 @@ /* is there room until end of buffer? */ if (t > m) t = m; /* update check information */ - if (s->checkfn != Z_NULL) + if (s->checkfn != NULL) s->check = (*s->checkfn)(s->check, q, t); memcpy(q, p, t); q += t; diff -urN linux-2.5.70-bk13/lib/zlib_inflate/inftrees.c linux-2.5.70-bk14/lib/zlib_inflate/inftrees.c --- linux-2.5.70-bk13/lib/zlib_inflate/inftrees.c 2003-06-09 04:41:57.000000000 -0700 +++ linux-2.5.70-bk14/lib/zlib_inflate/inftrees.c 2003-06-09 04:42:18.000000000 -0700 @@ -22,7 +22,7 @@ #define bits word.what.Bits -local int huft_build ( +static int huft_build ( uInt *, /* code lengths in bits */ uInt, /* number of codes */ uInt, /* number of "simple" codes */ @@ -35,18 +35,18 @@ uInt * ); /* space for values */ /* Tables for deflate from PKZIP's appnote.txt. */ -local const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */ +static const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; /* see note #13 above about 258 */ -local const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */ +static const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */ -local const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */ +static const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577}; -local const uInt cpdext[30] = { /* Extra bits for distance codes */ +static const uInt cpdext[30] = { /* Extra bits for distance codes */ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; @@ -87,7 +87,7 @@ /* If BMAX needs to be larger than 16, then h and x[] should be uLong. */ #define BMAX 15 /* maximum bit length of any code */ -local int huft_build( +static int huft_build( uInt *b, /* code lengths in bits (all assumed <= BMAX) */ uInt n, /* number of codes (assumed <= 288) */ uInt s, /* number of simple-valued codes (0..s-1) */ @@ -139,7 +139,7 @@ } while (--i); if (c[0] == n) /* null input--all zero length codes */ { - *t = (inflate_huft *)Z_NULL; + *t = NULL; *m = 0; return Z_OK; } @@ -193,8 +193,8 @@ p = v; /* grab values in bit order */ h = -1; /* no tables yet--level -1 */ w = -l; /* bits decoded == (l * h) */ - u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */ - q = (inflate_huft *)Z_NULL; /* ditto */ + u[0] = NULL; /* just to keep compilers happy */ + q = NULL; /* ditto */ z = 0; /* ditto */ /* go through the bit lengths (k already is bits in shortest code) */ @@ -229,7 +229,7 @@ /* allocate new table */ if (*hn + z > MANY) /* (note: doesn't matter for fixed) */ - return Z_MEM_ERROR; /* not enough memory */ + return Z_DATA_ERROR; /* overflow of MANY */ u[h] = q = hp + *hn; *hn += z; @@ -302,8 +302,7 @@ uInt *v; /* work area for huft_build */ v = WS(z)->tree_work_area_1; - r = huft_build(c, 19, 19, (uInt*)Z_NULL, (uInt*)Z_NULL, - tb, bb, hp, &hn, v); + r = huft_build(c, 19, 19, NULL, NULL, tb, bb, hp, &hn, v); if (r == Z_DATA_ERROR) z->msg = (char*)"oversubscribed dynamic bit lengths tree"; else if (r == Z_BUF_ERROR || *bb == 0) diff -urN linux-2.5.70-bk13/lib/zlib_inflate/infutil.c linux-2.5.70-bk14/lib/zlib_inflate/infutil.c --- linux-2.5.70-bk13/lib/zlib_inflate/infutil.c 2003-06-09 04:41:57.000000000 -0700 +++ linux-2.5.70-bk14/lib/zlib_inflate/infutil.c 2003-06-09 04:42:18.000000000 -0700 @@ -44,7 +44,7 @@ z->total_out += n; /* update check information */ - if (s->checkfn != Z_NULL) + if (s->checkfn != NULL) z->adler = s->check = (*s->checkfn)(s->check, q, n); /* copy as far as end of window */ @@ -70,7 +70,7 @@ z->total_out += n; /* update check information */ - if (s->checkfn != Z_NULL) + if (s->checkfn != NULL) z->adler = s->check = (*s->checkfn)(s->check, q, n); /* copy */ diff -urN linux-2.5.70-bk13/scripts/docproc.c linux-2.5.70-bk14/scripts/docproc.c --- linux-2.5.70-bk13/scripts/docproc.c 2003-05-26 18:00:24.000000000 -0700 +++ linux-2.5.70-bk14/scripts/docproc.c 2003-06-09 04:42:26.000000000 -0700 @@ -93,7 +93,7 @@ waitpid(pid, &ret ,0); } if (WIFEXITED(ret)) - exitstatus = WEXITSTATUS(ret); + exitstatus |= WEXITSTATUS(ret); else exitstatus = 0xff; } diff -urN linux-2.5.70-bk13/scripts/kernel-doc linux-2.5.70-bk14/scripts/kernel-doc --- linux-2.5.70-bk13/scripts/kernel-doc 2003-05-26 18:00:39.000000000 -0700 +++ linux-2.5.70-bk14/scripts/kernel-doc 2003-06-09 04:42:27.000000000 -0700 @@ -154,6 +154,7 @@ # '%CONST' - name of a constant. my $errors = 0; +my $warnings = 0; # match expressions used to find embedded type information my $type_constant = '\%([-_\w]+)'; @@ -1352,7 +1353,9 @@ "or member '$param' not " . "described in '$declaration_name'\n"; } - ++$errors; + print STDERR "Warning(${file}:$.):". + " No description found for parameter '$param'\n"; + ++$warnings; } push @parameterlist, $param; @@ -1456,6 +1459,12 @@ chomp; process_file($_); } +if ($verbose && $errors) { + print STDERR "$errors errors\n"; +} +if ($verbose && $warnings) { + print STDERR "$warnings warnings\n"; +} exit($errors); @@ -1580,7 +1589,7 @@ } else { print STDERR "Warning(${file}:$.): Cannot understand $_ on line $.", " - I thought it was a doc line\n"; - ++$errors; + ++$warnings; $state = 0; } } elsif ($state == 2) { # look for head: lines, and include content @@ -1633,7 +1642,7 @@ } else { # i dont know - bad line? ignore. print STDERR "Warning(${file}:$.): bad line: $_"; - ++$errors; + ++$warnings; } } elsif ($state == 3) { # scanning for function { (end of prototype) if ($decl_type eq 'function') {