# This is a BitKeeper generated patch for the following project: # Project Name: Linux kernel tree # This patch format is intended for GNU patch command version 2.5 or higher. # This patch includes the following deltas: # ChangeSet 1.1347 -> 1.1360 # arch/i386/kernel/process.c 1.57 -> 1.58 # drivers/video/fbmem.c 1.73.1.6 -> 1.99 # fs/nfsd/nfs3xdr.c 1.39 -> 1.40 # drivers/video/tcx.c 1.6 -> 1.8 # drivers/video/sis/vgatypes.h 1.3 -> 1.8 # drivers/net/3c59x.c 1.43 -> 1.44 # fs/cifs/file.c 1.33 -> 1.40 # drivers/video/sis/oem310.h 1.4 -> 1.9 # fs/nfsd/stats.c 1.5 -> 1.6 # drivers/video/sis/osdef.h 1.3 -> 1.4 # include/asm-ia64/sn/pci/pcibr_private.h 1.10 -> 1.11 # drivers/media/dvb/ttpci/av7110_firm.h 1.3 -> 1.4 # arch/ia64/sn/io/sn2/ml_iograph.c 1.5 -> 1.6 # drivers/media/dvb/frontends/tda1004x.c 1.3 -> 1.4 # drivers/base/firmware_class.c 1.5 -> 1.6 # include/asm-i386/system.h 1.29 -> 1.30 # arch/ia64/sn/io/sn2/xtalk.c 1.2 -> 1.3 # fs/libfs.c 1.26 -> 1.27 # include/asm-ia64/sn/sgi.h 1.7 -> 1.8 # include/linux/ext3_fs_i.h 1.8 -> 1.9 # drivers/pci/quirks.c 1.34 -> 1.35 # drivers/input/serio/serio.c 1.20 -> 1.23 # drivers/char/Kconfig 1.24 -> 1.25 # drivers/video/Kconfig 1.22.1.8 -> 1.34 # include/linux/buffer_head.h 1.44 -> 1.45 # fs/cifs/AUTHORS 1.4 -> 1.5 # arch/ia64/sn/io/sn2/klconflib.c 1.3 -> 1.4 # fs/xfs/pagebuf/page_buf.c 1.68 -> 1.69 # arch/ia64/kernel/efi.c 1.24 -> 1.25 # include/asm-i386/processor.h 1.57 -> 1.58 # include/linux/mm.h 1.133 -> 1.135 # drivers/video/sis/init301.c 1.4.1.1 -> 1.11 # include/asm-i386/pgalloc.h 1.20 -> 1.21 # init/initramfs.c 1.11 -> 1.12 # drivers/ide/pci/piix.c 1.19 -> 1.20 # drivers/video/epson1355fb.c 1.13 -> 1.14 # mm/page_alloc.c 1.174 -> 1.175 # mm/readahead.c 1.38 -> 1.39 # init/do_mounts_initrd.c 1.5 -> 1.6 # drivers/video/vesafb.c 1.31.1.2 -> 1.35 # arch/i386/kernel/smp.c 1.33 -> 1.34 # fs/ext3/Makefile 1.11 -> 1.12 # drivers/video/pvr2fb.c 1.14.1.1 -> 1.16 # include/linux/init_task.h 1.27 -> 1.29 # include/linux/resource.h 1.2 -> 1.3 # include/linux/sched.h 1.173 -> 1.175 # kernel/fork.c 1.145 -> 1.146 # drivers/video/g364fb.c 1.20.1.1 -> 1.23 # drivers/block/ll_rw_blk.c 1.219 -> 1.220 # kernel/sysctl.c 1.55 -> 1.56 # include/linux/writeback.h 1.23 -> 1.24 # drivers/video/macfb.c 1.26.1.1 -> 1.29 # drivers/video/Makefile 1.84.1.4 -> 1.95 # include/net/tcp.h 1.51 -> 1.52 # drivers/net/tlan.c 1.29 -> 1.30 # arch/ia64/kernel/setup.c 1.62 -> 1.63 # mm/vmscan.c 1.173 -> 1.174 # drivers/net/smc-ultra.c 1.17 -> 1.18 # arch/i386/Kconfig 1.91 -> 1.92 # arch/h8300/Makefile 1.5 -> 1.6 # include/linux/elf.h 1.26 -> 1.27 # include/asm-i386/page.h 1.24 -> 1.25 # drivers/media/dvb/dvb-core/dvb_demux.c 1.9 -> 1.10 # drivers/video/sis/300vtbl.h 1.4 -> 1.8 # arch/i386/kernel/traps.c 1.63 -> 1.65 # init/main.c 1.109 -> 1.111 # drivers/video/skeletonfb.c 1.23 -> 1.24 # include/linux/ioport.h 1.12 -> 1.13 # fs/binfmt_aout.c 1.18 -> 1.19 # fs/cifs/cifsglob.h 1.14 -> 1.15 # drivers/video/controlfb.c 1.26.1.1 -> 1.30 # drivers/video/cyber2000fb.c 1.31 -> 1.33 # include/asm-ia64/sn/arc/hinv.h 1.4 -> (deleted) # fs/cifs/cifssmb.c 1.28 -> 1.30 # include/linux/blkdev.h 1.127 -> 1.129 # drivers/input/mouse/psmouse-base.c 1.34 -> 1.35 # include/asm-i386/spinlock.h 1.10 -> 1.11 # include/asm-ia64/perfmon_default_smpl.h 1.2 -> 1.3 # include/asm-i386/mach-default/irq_vectors.h 1.6 -> 1.8 # drivers/video/sa1100fb.c 1.29.1.1 -> 1.31 # include/linux/fs.h 1.274 -> 1.276 # drivers/video/sis/sis_main.h 1.10 -> 1.17 # arch/i386/kernel/entry.S 1.69 -> 1.72 # drivers/video/tdfxfb.c 1.42.1.2 -> 1.48 # include/linux/linux_logo.h 1.5 -> 1.6 # include/linux/quotaops.h 1.14 -> 1.15 # net/ipv4/tcp.c 1.48 -> 1.49 # arch/ia64/sn/io/machvec/pci.c 1.11 -> 1.12 # drivers/video/cg6.c 1.5 -> 1.7 # drivers/video/sis/init.c 1.4 -> 1.9 # arch/ia64/mm/numa.c 1.3 -> 1.5 # drivers/video/sis/oem300.h 1.3 -> 1.6 # arch/ia64/sn/io/machvec/pci_dma.c 1.16 -> 1.17 # fs/cifs/transport.c 1.19 -> 1.21 # fs/jbd/transaction.c 1.76 -> 1.77 # include/linux/fb.h 1.51.1.2 -> 1.61 # mm/bootmem.c 1.22 -> 1.23 # include/asm-ia64/sn/klconfig.h 1.6 -> 1.7 # drivers/video/aty/Makefile 1.10 -> 1.11 # drivers/video/i810/i810_main.c 1.12.1.2 -> 1.17 # drivers/video/i810/Makefile 1.2 -> 1.3 # arch/i386/kernel/io_apic.c 1.81 -> 1.82 # fs/ramfs/inode.c 1.37 -> 1.38 # include/video/tdfx.h 1.3 -> 1.4 # mm/filemap.c 1.210 -> 1.211 # drivers/input/mouse/synaptics.c 1.9 -> 1.12 # drivers/ide/pci/siimage.c 1.17 -> 1.18 # arch/arm/Makefile 1.53 -> 1.54 # drivers/net/e1000/e1000_main.c 1.92 -> 1.93 # include/video/neomagic.h 1.3 -> 1.5 # include/linux/font.h 1.8 -> 1.9 # include/linux/ide.h 1.76 -> 1.77 # fs/exec.c 1.99 -> 1.100 # fs/binfmt_som.c 1.3 -> 1.4 # drivers/video/platinumfb.c 1.20.1.2 -> 1.24 # kernel/signal.c 1.98 -> 1.99 # arch/i386/boot/Makefile 1.28 -> 1.29 # include/video/sisfb.h 1.1.1.1 -> 1.7 # mm/mprotect.c 1.25 -> 1.26 # init/Makefile 1.26 -> 1.27 # drivers/ide/ide-disk.c 1.62 -> 1.63 # include/linux/pci_ids.h 1.107.1.16 -> 1.119 # arch/ia64/mm/init.c 1.51 -> 1.52 # mm/shmem.c 1.135 -> 1.141 # drivers/video/sis/sis_accel.h 1.3 -> 1.5 # include/video/aty128.h 1.3 -> 1.4 # drivers/video/riva/nv_type.h 1.1 -> 1.2 # drivers/video/logo/Kconfig 1.3 -> 1.5 # include/linux/input.h 1.39 -> 1.40 # drivers/video/vgastate.c 1.3.1.1 -> 1.6 # arch/ia64/sn/io/io.c 1.8 -> 1.9 # fs/binfmt_elf.c 1.59 -> 1.60 # mm/memory.c 1.139 -> 1.140 # drivers/net/irda/Makefile 1.19 -> 1.20 # drivers/video/chipsfb.c 1.19.1.1 -> 1.22 # arch/ia64/kernel/ia64_ksyms.c 1.33 -> 1.34 # drivers/video/console/Makefile 1.15 -> 1.17 # drivers/video/modedb.c 1.10 -> 1.11 # drivers/net/e100/e100_main.c 1.86 -> 1.87 # drivers/input/mouse/synaptics.h 1.3 -> 1.4 # arch/ia64/kernel/process.c 1.46 -> 1.47 # arch/i386/kernel/setup.c 1.100 -> 1.102 # arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c 1.9 -> 1.10 # fs/cifs/cifs_debug.c 1.12 -> 1.14 # Documentation/DocBook/Makefile 1.42 -> 1.43 # include/linux/tcp.h 1.12 -> 1.13 # drivers/video/aty/aty128fb.c 1.39.1.1 -> 1.42 # include/linux/sunrpc/stats.h 1.4 -> 1.5 # init/version.c 1.5 -> 1.6 # fs/cifs/smbencrypt.c 1.6 -> 1.7 # drivers/acpi/utilities/utdelete.c 1.19 -> 1.20 # drivers/video/sgivwfb.c 1.29 -> 1.31 # drivers/video/imsttfb.c 1.27.1.2 -> 1.33 # arch/ia64/sn/io/platform_init/irix_io_init.c 1.2 -> 1.3 # drivers/video/sis/init301.h 1.4 -> 1.9 # arch/ia64/sn/io/sgi_if.c 1.6 -> 1.7 # drivers/input/joydev.c 1.19 -> 1.20 # drivers/media/video/tea6420.c 1.3 -> 1.4 # fs/cifs/smbdes.c 1.2 -> 1.3 # include/linux/spinlock.h 1.27 -> 1.28 # fs/ext3/ialloc.c 1.31 -> 1.32 # drivers/video/tridentfb.c 1.11.1.1 -> 1.14 # net/core/dev.c 1.119 -> 1.120 # arch/ia64/sn/io/sn2/shub_intr.c 1.4 -> 1.5 # drivers/ide/Makefile 1.13 -> 1.14 # drivers/video/softcursor.c 1.44 -> 1.49 # drivers/acpi/power.c 1.16 -> 1.17 # include/linux/binfmts.h 1.9 -> 1.10 # include/asm-ia64/sn/ioerror_handling.h 1.5 -> 1.6 # arch/ia64/sn/io/sn2/shub.c 1.7 -> 1.8 # arch/ia64/sn/io/hwgfs/hcl.c 1.7 -> 1.8 # drivers/md/raid1.c 1.72 -> 1.73 # drivers/video/cfbimgblt.c 1.27.1.1 -> 1.29 # arch/i386/kernel/nmi.c 1.23 -> 1.24 # drivers/input/input.c 1.40 -> 1.41 # drivers/md/raid0.c 1.38 -> 1.39 # drivers/acpi/ec.c 1.25 -> 1.26 # fs/cifs/cifsfs.c 1.32 -> 1.35 # mm/mremap.c 1.33 -> 1.34 # include/linux/sysctl.h 1.52 -> 1.54 # arch/ia64/ia32/ia32_entry.S 1.28 -> 1.29 # drivers/video/igafb.c 1.18 -> 1.19 # fs/Kconfig 1.39 -> 1.40 # fs/buffer.c 1.215 -> 1.217 # drivers/block/loop.c 1.110 -> 1.111 # fs/Makefile 1.59 -> 1.60 # drivers/video/logo/logo.c 1.5.1.2 -> 1.13 # include/linux/serio.h 1.18 -> 1.20 # include/linux/miscdevice.h 1.10 -> 1.11 # include/asm-i386/smp.h 1.29 -> 1.30 # include/linux/ext3_fs.h 1.30 -> 1.31 # drivers/acpi/events/evgpe.c 1.14 -> 1.15 # drivers/net/tulip/tulip_core.c 1.49 -> 1.50 # drivers/video/bw2.c 1.6 -> 1.8 # fs/proc/array.c 1.55 -> 1.56 # MAINTAINERS 1.139.1.35 -> 1.172 # arch/ia64/sn/io/hwgfs/labelcl.c 1.1 -> 1.2 # drivers/video/sis/310vtbl.h 1.4 -> 1.9 # drivers/acpi/bus.c 1.34 -> 1.35 # arch/ia64/lib/csum_partial_copy.c 1.5 -> 1.6 # drivers/video/sis/initdef.h 1.4 -> 1.9 # include/asm-i386/mmu.h 1.5 -> 1.6 # drivers/video/neofb.c 1.29.1.1 -> 1.34 # arch/ia64/sn/io/sn2/geo_op.c 1.1 -> 1.2 # mm/mmap.c 1.92 -> 1.93 # fs/proc/base.c 1.59 -> 1.60 # arch/ia64/sn/io/sn2/klgraph.c 1.3 -> 1.4 # fs/cifs/cifsencrypt.c 1.6 -> 1.7 # drivers/video/sstfb.c 1.27.1.1 -> 1.31 # arch/s390/boot/install.sh 1.1 -> 1.2 # fs/cifs/dir.c 1.17 -> 1.19 # drivers/input/mouse/logips2pp.c 1.2 -> 1.3 # drivers/video/cg3.c 1.6 -> 1.8 # include/asm-i386/unistd.h 1.30 -> 1.33 # arch/ia64/sn/io/drivers/ioconfig_bus.c 1.6 -> 1.7 # drivers/net/eepro100.c 1.68 -> 1.69 # drivers/video/aty/atyfb.h 1.14 -> 1.15 # drivers/media/dvb/ttpci/av7110.c 1.12 -> 1.13 # kernel/resource.c 1.16 -> 1.17 # Makefile 1.436 -> 1.437 # fs/ext3/inode.c 1.88 -> 1.89 # drivers/video/sis/vstruct.h 1.4 -> 1.9 # include/asm-i386/elf.h 1.11 -> 1.12 # net/ipv4/tcp_output.c 1.32 -> 1.33 # drivers/video/sis/sis_main.c 1.21.1.2 -> 1.34 # arch/ia64/sn/io/sn2/bte_error.c 1.3 -> 1.4 # drivers/video/matrox/matroxfb_base.c 1.42 -> 1.43 # net/wanrouter/wanmain.c 1.20 -> 1.21 # drivers/video/console/fbcon.c 1.100.1.2 -> 1.116 # drivers/video/aty/atyfb_base.c 1.58 -> 1.59 # include/linux/page-flags.h 1.42 -> 1.43 # drivers/media/video/tda9840.c 1.3 -> 1.4 # drivers/char/vt_ioctl.c 1.22.1.9 -> 1.33 # drivers/video/riva/fbdev.c 1.45.1.5 -> 1.59 # init/do_mounts_rd.c 1.7 -> 1.8 # drivers/video/tgafb.c 1.25 -> 1.28 # drivers/video/p9100.c 1.6 -> 1.8 # drivers/video/console/fbcon.h 1.30.1.1 -> 1.34 # include/asm-i386/kmap_types.h 1.14 -> 1.15 # net/ipv4/sysctl_net_ipv4.c 1.13 -> 1.14 # drivers/video/logo/Makefile 1.2.1.1 -> 1.7 # drivers/media/video/tea6415c.c 1.3 -> 1.4 # include/asm-arm/arch-shark/hardware.h 1.4 -> 1.5 # drivers/net/irda/Kconfig 1.12 -> 1.13 # net/ipv4/tcp_input.c 1.46 -> 1.47 # arch/i386/mm/init.c 1.54 -> 1.55 # net/ipv4/tcp_minisocks.c 1.42 -> 1.43 # drivers/acpi/battery.c 1.16 -> 1.17 # drivers/media/video/bttv-driver.c 1.35 -> 1.36 # net/ipv4/Kconfig 1.11 -> 1.12 # arch/ia64/sn/io/machvec/pci_bus_cvlink.c 1.14 -> 1.15 # arch/h8300/Kconfig 1.10 -> 1.11 # kernel/sched.c 1.221 -> 1.222 # drivers/Makefile 1.40 -> 1.41 # drivers/scsi/Makefile 1.48 -> 1.49 # init/Kconfig 1.28 -> 1.29 # include/linux/jbd.h 1.38 -> 1.39 # drivers/ide/ide-io.c 1.20 -> 1.21 # drivers/video/matrox/matroxfb_crtc2.c 1.27 -> 1.29 # drivers/video/sis/init.h 1.4 -> 1.9 # arch/ia64/kernel/perfmon_default_smpl.c 1.2 -> 1.3 # drivers/video/valkyriefb.c 1.19.1.1 -> 1.22 # arch/arm/mm/proc-xscale.S 1.24 -> 1.25 # arch/ia64/lib/checksum.c 1.4 -> 1.5 # net/sunrpc/sunrpc_syms.c 1.23 -> 1.24 # arch/i386/kernel/acpi/boot.c 1.32 -> 1.33 # include/asm-ia64/sn/arc/types.h 1.5 -> (deleted) # drivers/video/leo.c 1.5 -> 1.7 # arch/ia64/kernel/irq.c 1.28 -> 1.29 # fs/cifs/CHANGES 1.36 -> 1.42 # drivers/video/cirrusfb.c 1.24 -> 1.25 # drivers/input/mouse/psmouse.h 1.3 -> 1.4 # fs/jbd/commit.c 1.40 -> 1.41 # drivers/video/fbmon.c 1.9 -> 1.10 # fs/hugetlbfs/inode.c 1.34 -> 1.35 # fs/ext3/super.c 1.79 -> 1.80 # drivers/scsi/Kconfig 1.41 -> 1.42 # fs/cifs/connect.c 1.30 -> 1.38 # include/linux/ext3_fs_sb.h 1.7 -> 1.8 # arch/ia64/kernel/perfmon.c 1.63 -> 1.64 # include/linux/netdevice.h 1.63 -> 1.64 # fs/cifs/README 1.5 -> 1.6 # drivers/video/aty/mach64_cursor.c 1.10 -> 1.11 # kernel/panic.c 1.14 -> 1.15 # mm/truncate.c 1.11 -> 1.12 # drivers/char/Makefile 1.60 -> 1.61 # drivers/video/cg14.c 1.8 -> 1.10 # drivers/video/acornfb.c 1.23 -> 1.24 # fs/cifs/misc.c 1.13 -> 1.14 # drivers/media/common/saa7146_i2c.c 1.7 -> 1.8 # drivers/video/radeonfb.c 1.30 -> 1.33 # fs/inode.c 1.108 -> 1.109 # include/asm-i386/hw_irq.h 1.24 -> 1.25 # arch/i386/kernel/i386_ksyms.c 1.56 -> 1.57 # net/sunrpc/stats.c 1.8 -> 1.9 # drivers/video/pm2fb.c 1.22 -> 1.23 # fs/fs-writeback.c 1.41 -> 1.42 # drivers/video/sis/sis_accel.c 1.5 -> 1.8 # drivers/video/stifb.c 1.17.1.2 -> 1.20 # scripts/mkcompile_h 1.17 -> 1.18 # drivers/video/ffb.c 1.7 -> 1.9 # (new) -> 1.1 fs/reiser4/crab_lock.h # (new) -> 1.1 fs/reiser4/plugin/item/extent.h # (new) -> 1.1 fs/reiser4/parser/parser.h # (new) -> 1.1 fs/reiser4/yet_unneeded_abstractions/oid/oid.h # (new) -> 1.1 drivers/dump/dump_memdev.c # (new) -> 1.1 fs/reiser4/plugin/plugin_header.h # (new) -> 1.1 fs/reiser4/scint.h # (new) -> 1.1 fs/reiser4/block_alloc.h # (new) -> 1.1 drivers/dump/dump_i386.c # (new) -> 1.1 fs/reiser4/parser/tmp.c # (new) -> 1.1 fs/reiser4/plugin/item/tail.h # (new) -> 1.1 fs/reiser4/plugin/node/node.c # (new) -> 1.1 drivers/scsi/sata_sil.c # (new) -> 1.1 drivers/scsi/libata-core.c # (new) -> 1.1 fs/reiser4/as_ops.c # (new) -> 1.1 include/asm-i386/dump.h # (new) -> 1.1 fs/reiser4/plugin/item/tail.c # (new) -> 1.1 fs/reiser4/doc/reiser4.writeback.overview # (new) -> 1.1 fs/reiser4/yet_unneeded_abstractions/oid/oid40.h # (new) -> 1.1 drivers/dump/dump_filters.c # (new) -> 1.1 drivers/dump/dump_gzip.c # (new) -> 1.1 fs/reiser4/dformat.h # (new) -> 1.1 fs/reiser4/ktxnmgrd.h # (new) -> 1.1 fs/reiser4/plugin/dir/dir.c # (new) -> 1.1 fs/reiser4/plugin/item/extent.c # (new) -> 1.1 drivers/video/console/elpp.c # (new) -> 1.1 drivers/net/irda/stir4200.c # (new) -> 1.1 fs/reiser4/parser/pars.yacc.h # (new) -> 1.1 fs/reiser4/spin_macros.h # (new) -> 1.1 include/video/epson1355.h # (new) -> 1.1 fs/reiser4/ioctl.h # (new) -> 1.1 fs/reiser4/znode.h # (new) -> 1.1 include/linux/libata.h # (new) -> 1.1 fs/ext3/extents.c # (new) -> 1.1 fs/reiser4/plugin/hash.c # (new) -> 1.1 fs/reiser4/tree_walk.c # (new) -> 1.1 include/asm-i386/kexec.h # (new) -> 1.1 drivers/video/asiliantfb.c # (new) -> 1.1 fs/reiser4/lnode.h # (new) -> 1.1 include/linux/cpuset.h # (new) -> 1.1 fs/reiser4/plugin/item/ctail.c # (new) -> 1.1 fs/reiser4/plugin/item/sde.c # (new) -> 1.1 fs/reiser4/doc/lock-ordering # (new) -> 1.1 fs/reiser4/vfs_ops.h # (new) -> 1.1 fs/reiser4/coord.c # (new) -> 1.1 fs/reiser4/plugin/node/node.h # (new) -> 1.1 fs/reiser4/yet_unneeded_abstractions/oid/oid40.c # (new) -> 1.1 fs/reiser4/plugin/item/static_stat.c # (new) -> 1.1 fs/reiser4/prof.c # (new) -> 1.1 fs/reiser4/dscale.h # (new) -> 1.1 fs/reiser4/doc/directory-service # (new) -> 1.1 fs/reiser4/plugin/item/sde.h # (new) -> 1.1 fs/reiser4/tree.c # (new) -> 1.1 fs/reiser4/flush.c # (new) -> 1.1 fs/reiser4/plugin/space/bitmap.c # (new) -> 1.1 fs/reiser4/kattr.h # (new) -> 1.1 fs/reiser4/plugin/name/invterp.c # (new) -> 1.1 arch/i386/kernel/relocate_kernel.S # (new) -> 1.1 fs/reiser4/spinprof.c # (new) -> 1.1 fs/reiser4/dscale.c # (new) -> 1.1 fs/reiser4/plugin/object.c # (new) -> 1.1 drivers/scsi/sata_via.c # (new) -> 1.1 include/linux/kexec.h # (new) -> 1.1 drivers/scsi/libata.h # (new) -> 1.1 fs/reiser4/scint.c # (new) -> 1.1 fs/reiser4/compress.c # (new) -> 1.1 Documentation/fb/elpp.txt # (new) -> 1.1 fs/reiser4/seal.c # (new) -> 1.1 fs/reiser4/page_cache.c # (new) -> 1.1 fs/reiser4/doc/lock-ordering.dot # (new) -> 1.1 fs/reiser4/status_flags.h # (new) -> 1.1 fs/reiser4/plugin/dir/dir.h # (new) -> 1.1 include/linux/dump_netdev.h # (new) -> 1.1 fs/reiser4/debug.c # (new) -> 1.1 fs/reiser4/forward.h # (new) -> 1.1 fs/reiser4/plugin/flush/flush.alg # (new) -> 1.1 fs/reiser4/plugin/item/ctail.h # (new) -> 1.1 include/linux/dumpdev.h # (new) -> 1.1 fs/reiser4/plugin/file/file.h # (new) -> 1.1 fs/reiser4/oid.c # (new) -> 1.1 fs/reiser4/plugin/security/perm.c # (new) -> 1.1 fs/reiser4/plugin/file/pseudo.h # (new) -> 1.1 fs/reiser4/kcond.c # (new) -> 1.1 fs/reiser4/plugin/disk_format/disk_format.h # (new) -> 1.1 fs/reiser4/plugin/item/extent_repack_ops.c # (new) -> 1.1 drivers/dump/dump_scheme.c # (new) -> 1.1 fs/reiser4/trace.c # (new) -> 1.1 fs/reiser4/doc/page-cache-for-formatted-nodes # (new) -> 1.1 fs/reiser4/doc/readdir-problems-and-implementations # (new) -> 1.1 fs/reiser4/readahead.h # (new) -> 1.1 drivers/dump/dump_fmt.c # (new) -> 1.1 fs/reiser4/plugin/item/cde.c # (new) -> 1.1 fs/reiser4/tree_mod.c # (new) -> 1.1 fs/reiser4/diskmap.c # (new) -> 1.1 fs/reiser4/jnode.c # (new) -> 1.1 fs/reiser4/parser/pars.cls.h # (new) -> 1.1 fs/reiser4/tree_mod.h # (new) -> 1.1 fs/reiser4/txnmgr.c # (new) -> 1.1 fs/reiser4/entd.c # (new) -> 1.1 fs/reiser4/plugin/space/bitmap.h # (new) -> 1.1 fs/reiser4/reiser4.h # (new) -> 1.1 init/kerntypes.c # (new) -> 1.1 fs/reiser4/plugin/disk_format/disk_format40.c # (new) -> 1.1 fs/reiser4/parser/parser.doc # (new) -> 1.1 drivers/scsi/libata-scsi.c # (new) -> 1.1 fs/reiser4/plugin/item/item.c # (new) -> 1.1 kernel/kexec.c # (new) -> 1.1 fs/reiser4/plugin/dir/hashed_dir.c # (new) -> 1.1 fs/reiser4/tree_walk.h # (new) -> 1.1 include/linux/cpuset_types.h # (new) -> 1.1 Documentation/fb/neofb.txt # (new) -> 1.1 Documentation/DocBook/libata.tmpl # (new) -> 1.1 fs/reiser4/parser/parser.y # (new) -> 1.1 fs/reiser4/super.h # (new) -> 1.1 fs/reiser4/crypt.c # (new) -> 1.1 fs/reiser4/emergency_flush.h # (new) -> 1.1 drivers/scsi/ata_piix.c # (new) -> 1.1 fs/reiser4/plugin/dir/hashed_dir.h # (new) -> 1.1 fs/reiser4/plugin/item/extent_item_ops.c # (new) -> 1.1 fs/reiser4/inode.h # (new) -> 1.1 fs/reiser4/regression.sh # (new) -> 1.1 drivers/dump/dump_setup.c # (new) -> 1.1 fs/reiser4/plugin/space/space_allocator.h # (new) -> 1.1 drivers/dump/dump_overlay.c # (new) -> 1.1 fs/reiser4/eottl.c # (new) -> 1.1 fs/reiser4/lnode.c # (new) -> 1.1 fs/reiser4/linux-5_reiser4_syscall.patch # (new) -> 1.1 fs/reiser4/plugin/cryptcompress.h # (new) -> 1.1 include/linux/dump.h # (new) -> 1.1 fs/reiser4/emergency_flush.c # (new) -> 1.1 fs/reiser4/plugin/plugin_set.h # (new) -> 1.1 fs/reiser4/diskmap.h # (new) -> 1.1 fs/reiser4/interpolate.c # (new) -> 1.1 fs/reiser4/plugin/node/node40.c # (new) -> 1.1 fs/reiser4/plugin/symlink.h # (new) -> 1.1 fs/reiser4/tap.c # (new) -> 1.1 fs/reiser4/type_safe_list.h # (new) -> 1.1 fs/reiser4/doc/oid-locid # (new) -> 1.1 fs/reiser4/trace.h # (new) -> 1.1 fs/reiser4/wander.c # (new) -> 1.1 fs/reiser4/parser/lib.c # (new) -> 1.1 fs/reiser4/txnmgr.h # (new) -> 1.1 fs/reiser4/inode.c # (new) -> 1.1 fs/reiser4/plugin/file/funcs.h # (new) -> 1.1 fs/reiser4/pool.c # (new) -> 1.1 fs/reiser4/page_cache.h # (new) -> 1.1 fs/reiser4/znode.c # (new) -> 1.1 drivers/video/console/symbols_16x16.c # (new) -> 1.1 kernel/cpuset.c # (new) -> 1.1 fs/reiser4/plugin/plugin.h # (new) -> 1.1 drivers/scsi/sata_svw.c # (new) -> 1.1 include/linux/ata.h # (new) -> 1.1 fs/reiser4/plugin/item/internal.h # (new) -> 1.1 fs/reiser4/search.c # (new) -> 1.1 drivers/video/logo/logo_elpp.c # (new) -> 1.1 fs/reiser4/bufmgr/wander.txt # (new) -> 1.1 fs/reiser4/init_super.h # (new) -> 1.1 fs/reiser4/key.c # (new) -> 1.1 fs/reiser4/wander.h # (new) -> 1.1 fs/reiser4/entd.h # (new) -> 1.1 fs/reiser4/kassign.h # (new) -> 1.1 fs/reiser4/kattr.c # (new) -> 1.1 fs/reiser4/parser/r4.dif # (new) -> 1.1 fs/reiser4/tap.h # (new) -> 1.1 fs/reiser4/plugin/file/pseudo.c # (new) -> 1.1 fs/reiser4/pool.h # (new) -> 1.1 fs/reiser4/repacker.h # (new) -> 1.1 fs/reiser4/parser/lex.l # (new) -> 1.1 fs/reiser4/statcnt.h # (new) -> 1.1 fs/reiser4/seal.h # (new) -> 1.1 fs/reiser4/blocknrset.c # (new) -> 1.1 fs/reiser4/ktxnmgrd.c # (new) -> 1.1 drivers/dump/Makefile # (new) -> 1.1 fs/reiser4/plugin/file/tail_conversion.c # (new) -> 1.1 fs/reiser4/super.c # (new) -> 1.1 drivers/scsi/sata_promise.c # (new) -> 1.1 drivers/dump/dump_rle.c # (new) -> 1.1 drivers/video/console/elpp.h # (new) -> 1.1 fs/reiser4/parser/lib.h # (new) -> 1.1 fs/reiser4/latch.c # (new) -> 1.1 fs/reiser4/plugin/file/file.c # (new) -> 1.1 fs/reiser4/repacker.c # (new) -> 1.1 fs/reiser4/stats.h # (new) -> 1.1 fs/reiser4/type_safe_hash.h # (new) -> 1.1 fs/reiser4/parser/Makefile # (new) -> 1.1 fs/reiser4/plugin/plugin_set.c # (new) -> 1.1 fs/reiser4/latch.h # (new) -> 1.1 fs/reiser4/spinprof.h # (new) -> 1.1 include/video/cvisionppc.h # (new) -> 1.1 fs/reiser4/plugin/dir/pseudo_dir.h # (new) -> 1.1 fs/reiser4/readahead.c # (new) -> 1.1 fs/reiser4/plugin/dir/pseudo_dir.c # (new) -> 1.1 fs/reiser4/plugin/file/invert.c # (new) -> 1.1 fs/reiser4/plugin/item/static_stat.h # (new) -> 1.1 drivers/net/irda/stir4200.h # (new) -> 1.1 fs/reiser4/README # (new) -> 1.1 fs/reiser4/plugin/security/perm.h # (new) -> 1.1 drivers/char/sn_serial.c # (new) -> 1.1 fs/reiser4/kcond.h # (new) -> 1.1 fs/reiser4/plugin/item/extent_file_ops.c # (new) -> 1.1 fs/reiser4/plugin/node/node40.h # (new) -> 1.1 arch/i386/kernel/machine_kexec.c # (new) -> 1.1 fs/reiser4/estimate.c # (new) -> 1.1 fs/reiser4/plugin/pseudo/pseudo.c # (new) -> 1.1 fs/reiser4/yet_unneeded_abstractions/oid/oid.c # (new) -> 1.1 fs/reiser4/plugin/plugin.c # (new) -> 1.1 fs/reiser4/plugin/plugin_hash.h # (new) -> 1.1 fs/reiser4/doc/metadata-in-pagecache # (new) -> 1.1 fs/reiser4/file_ops.c # (new) -> 1.1 fs/reiser4/lock.h # (new) -> 1.1 fs/reiser4/status_flags.c # (new) -> 1.1 fs/reiser4/plugin/item/cde.h # (new) -> 1.1 fs/reiser4/plugin/symlink.c # (new) -> 1.1 fs/reiser4/carry_ops.h # (new) -> 1.1 fs/reiser4/doc/sys-reiser4-implemenation-overview # (new) -> 1.1 drivers/dump/dump_arm.c # (new) -> 1.1 fs/reiser4/lib.h # (new) -> 1.1 fs/reiser4/plugin/security/acl.c # (new) -> 1.1 fs/reiser4/Makefile.user-level # (new) -> 1.1 fs/reiser4/prof.h # (new) -> 1.1 fs/reiser4/plugin/item/extent_flush_ops.c # (new) -> 1.1 fs/reiser4/plugin/plugin_hash.c # (new) -> 1.1 fs/reiser4/kassign.c # (new) -> 1.1 fs/reiser4/carry_ops.c # (new) -> 1.1 fs/reiser4/debug.h # (new) -> 1.1 fs/reiser4/context.c # (new) -> 1.1 fs/reiser4/key.h # (new) -> 1.1 fs/reiser4/plugin/cryptcompress.c # (new) -> 1.1 fs/reiser4/crab_lock.c # (new) -> 1.1 fs/reiser4/stats.c # (new) -> 1.1 fs/reiser4/cluster.c # (new) -> 1.1 fs/reiser4/sys_reiser4.c # (new) -> 1.1 fs/reiser4/Makefile # (new) -> 1.1 fs/reiser4/plugin/pseudo/pseudo.h # (new) -> 1.1 fs/reiser4/plugin/object.h # (new) -> 1.1 fs/reiser4/inode_ops.c # (new) -> 1.1 fs/reiser4/lock.c # (new) -> 1.1 fs/reiser4/plugin/item/internal.c # (new) -> 1.1 fs/reiser4/coord.h # (new) -> 1.1 fs/reiser4/plugin/item/item.h # (new) -> 1.1 drivers/dump/dump_methods.h # (new) -> 1.1 fs/reiser4/vfs_ops.c # (new) -> 1.1 drivers/dump/dump_execute.c # (new) -> 1.1 fs/reiser4/plugin/digest.c # (new) -> 1.1 fs/reiser4/syntax.alg # (new) -> 1.1 fs/reiser4/plugin/file/symfile.c # (new) -> 1.1 fs/reiser4/tree.h # (new) -> 1.1 fs/reiser4/flush_queue.c # (new) -> 1.1 fs/reiser4/plugin/tail_policy.c # (new) -> 1.1 fs/reiser4/jnode.h # (new) -> 1.1 drivers/dump/dump_netdev.c # (new) -> 1.1 fs/reiser4/carry.c # (new) -> 1.1 fs/reiser4/carry.h # (new) -> 1.1 fs/reiser4/init_super.c # (new) -> 1.1 fs/reiser4/plugin/disk_format/disk_format.c # (new) -> 1.1 fs/reiser4/plugin/disk_format/disk_format40.h # (new) -> 1.1 fs/reiser4/context.h # (new) -> 1.1 fs/reiser4/flush.h # (new) -> 1.1 fs/reiser4/block_alloc.c # # The following is the BitKeeper ChangeSet Log # -------------------------------------------- # 03/10/17 jsimmons@infradead.org 1.1344.1.2 # [FBDEV RADEON} The return of fb_setup. # # [FBDEV SGIVW] Match info to code. # -------------------------------------------- # 03/10/17 davidm@tiger.hpl.hp.com 1.1296.88.29 # ia64: Add missing exports to modules build again. # -------------------------------------------- # 03/10/17 davidm@tiger.hpl.hp.com 1.1296.88.30 # ia64: Fix printk format error. # -------------------------------------------- # 03/10/17 davidm@tiger.hpl.hp.com 1.1296.88.31 # ia64: Don't mix code and declarations (not C90-compliant). # -------------------------------------------- # 03/10/19 cifs.adm@hostme.bitkeeper.com 1.1348 # Merge bk://linux.bkbits.net/linux-2.5 # into hostme.bitkeeper.com:/repos/c/cifs/linux-2.5cifs # -------------------------------------------- # 03/10/19 len.brown@intel.com 1.1347.1.1 # Merge intel.com:/home/lenb/bk/linux-2.6.0 # into intel.com:/home/lenb/bk/linux-acpi-test-2.6.0 # -------------------------------------------- # 03/10/20 bjorn.helgaas@hp.com 1.1296.88.32 # [PATCH] ia64: fix EFI memory map trimming # # This fixes a problem in EFI memory map trimming. For example, # here's part of the memory map on my i2000: # # mem00: type=4, attr=0x9, range=[0x0000000000000000-0x0000000000001000) (0MB) # mem01: type=7, attr=0x9, range=[0x0000000000001000-0x0000000000088000) (0MB) # mem02: type=4, attr=0x9, range=[0x0000000000088000-0x00000000000a0000) (0MB) # mem03: type=5, attr=0x8000000000000009, range=[0x00000000000c0000-0x0000000000100000) (0MB) # mem04: type=7, attr=0x9, range=[0x0000000000100000-0x0000000004000000) (63MB) # mem05: type=2, attr=0x9, range=[0x0000000004000000-0x00000000049ba000) (9MB) # mem06: type=7, attr=0x9, range=[0x00000000049ba000-0x000000007ec0b000) (1954MB) # ... # # There's a hole at 0xa0000-0xc0000, so we should ignore all the WB memory # in that granule. With 16MB granules, the existing code trims like this # (note the 4K page at 0x0 should have been ignored, but wasn't). # -------------------------------------------- # 03/10/20 davidm@tiger.hpl.hp.com 1.1347.2.1 # Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5 # into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5 # -------------------------------------------- # 03/10/20 jbarnes@sgi.com 1.1347.2.2 # [PATCH] ia64: zero out topology related sysfs nodes # # Tony pointed out (thanks Tony) that in -test8 kobject_set_name() will # try to free a kobject's k_name field if it's non-NULL, so we need to # zero it out in case kmalloc() gave us recycled memory or we'll try to # kfree a bogus area. # -------------------------------------------- # 03/10/20 davidm@tiger.hpl.hp.com 1.1347.2.3 # ia64: Sync with i386 irq.c (deadlock avoidance for certain disable_irq()/ # enable_irq() sequences). # -------------------------------------------- # 03/10/20 arun.sharma@intel.com 1.1347.2.4 # [PATCH] ia64: make strace of ia32 processes work again # # Newer versions of strace manipulate the syscall arguments and to make this # work for ia32 processes, we need to reload the syscall args after # doing the syscall-trace callback. # -------------------------------------------- # 03/10/20 eranian@hpl.hp.co 1.1347.2.5 # [PATCH] ia64: two perfmon fixes # # - converts hdr_cur_pos into hdr_cur_offs to avoid leaking kernel # addresses to userland. As a consequence hdr_last_pos is gone. # # - correct buffer saturation mode bug by which the hdr_count would # be reset, leading to no visible samples collected. # -------------------------------------------- # 03/10/20 B.Zolnierkiewicz@elka.pw.edu.pl 1.1347.3.1 # [PATCH] fix drivers/ide/pci/siimage.c for PROC_FS=n # # From: "Noah J. Misch" # # The Silicon Image driver is not building properly when CONFIG_PROC_FS is unset. # This patch corrects that problem. It appears as though several utility # functions at the top of drivers/ide/pci/siimage.c that the driver always needs # accidentally fell within an #ifdef CONFIG_PROC_FS. I also removed an excess # include while I noticed it. # -------------------------------------------- # 03/10/20 kraxel@bytesex.org 1.1347.3.2 # [PATCH] Fix bttv BUG() at video-buf.c:378 # # As found by Herbert Xu: the last v4l update broke bttv. videobuf_iolock # was passed a vb that has just been filled with zeros. # # Fixed like this. # -------------------------------------------- # 03/10/20 neilb@cse.unsw.edu.au 1.1347.3.3 # [PATCH] md - Use sector rather than block numbers when splitting raid0 requests. # # When raid0 needs to split a request, it uses 'block' (1K) addresses # rather than sector (512b) addresses, which causes problems if the sector # address is odd. This patch fixes the problem. # # Thanks to Andy Polyakov # -------------------------------------------- # 03/10/20 neilb@cse.unsw.edu.au 1.1347.3.4 # [PATCH] kNFSd - In READDIRPLUS reply, don't return a file handle for a mounted directory. # # ... as if "nohide" is in used, it will be the wrong filehandle, and # returning a filehandle is optional anyway. # -------------------------------------------- # 03/10/20 torvalds@home.osdl.org 1.1349 # Merge bk://cifs.bkbits.net/linux-2.5cifs # into home.osdl.org:/home/torvalds/v2.5/linux # -------------------------------------------- # 03/10/20 len.brown@intel.com 1.1347.1.2 # [ACPI] fix !CONFIG_PCI build # use X86 ACPI specific version of eisa_set_level_irq() # http://bugzilla.kernel.org/show_bug.cgi?id=1390 # -------------------------------------------- # 03/10/20 len.brown@intel.com 1.1347.1.3 # [ACPI] Broken fan detection prevents booting (Shaohua David Li) # http://bugme.osdl.org/show_bug.cgi?id=1185 # -------------------------------------------- # 03/10/20 torvalds@home.osdl.org 1.1350 # Merge http://lia64.bkbits.net/to-linus-2.5 # into home.osdl.org:/home/torvalds/v2.5/linux # -------------------------------------------- # 03/10/20 viro@parcelfarce.linux.theplanet.co.uk 1.1351 # [PATCH] Fix initrd with devfs enabled # # This fixes initrd with devfs. With that combination the late-boot code # does temporary mount of devfs over rootfs /dev, which made /dev/initrd # inaccessible. For setups without devfs that didn't happen. # # The fix is trivial - put the file in question outside of /dev; IOW, # we simply replace "/dev/initrd" with "/initrd.image" in init/*. # # Confirmed to fix the problem by Valdis Kletnieks # -------------------------------------------- # 03/10/21 hunold@linuxtv.org 1.1352 # [PATCH] Fix bugs in various DVB drivers # # - DVB networking uses big endian crc32, so change all occurences of # crc32_le to crc32_be # - fix usage of firmware location Kconfig option in tda1004x frontend # driver # - add missing VBI line decoding initialization to saa7113 code for # av7110 driver # - make av7110 firmware static and *not* __initdata, so recover_arm() # can work in case the driver is compiled in statically # -------------------------------------------- # 03/10/21 hunold@linuxtv.org 1.1353 # [PATCH] Fix bug in saa7146 analog tv i2c-handling # # - remove cruft, add I2C_ADAP_CLASS_TV_ANALOG identifier for analog tv # i2c handler # -------------------------------------------- # 03/10/21 hunold@linuxtv.org 1.1354 # [PATCH] Fix bugs in analog tv i2c-helper chipset drivers # # - remove cruft, memset() i2c-client structures in tda9840, tea6420, # tea6415c driver, otherwise i2c_register()/kobject() segfaults later on # -------------------------------------------- # 03/10/21 B.Zolnierkiewicz@elka.pw.edu.pl 1.1355 # [PATCH] fix drivers/ide/pci/cmd640.c for CONFIG_PCI=n # # CMD640 driver also supports VLB version of the chipset, therefore fix # drivers/ide/Makefile to include pci/ subdir even if CONFIG_BLK_DEV_IDEPCI=n. # -------------------------------------------- # 03/10/21 dave.jiang@com.rmk.(none) 1.1351.1.1 # [ARM PATCH] 1691/1: Fix IOP321 platform booting in 2.6 # # Patch from Dave Jiang # # The fix allows IOP321 based platforms to boot all the way instead of # blank screen after "starting kernel...". Inherited from Deepak's # earlier patch. # -------------------------------------------- # 03/10/21 alex@de.rmk.(none) 1.1351.1.2 # [ARM PATCH] 1692/1: Shark: PCIMEM_BASE # # Patch from Alexander Schulz # # This patch adds the define for PCIMEM_BASE so that the shark kernel # compiles again. # -------------------------------------------- # 03/10/21 torvalds@home.osdl.org 1.1356 # Merge bk://bk.arm.linux.org.uk/linux-2.6-rmk # into home.osdl.org:/home/torvalds/v2.5/linux # -------------------------------------------- # 03/10/21 jgarzik@redhat.com 1.1351.2.1 # [libata] Merge Serial ATA core, and drivers for: # Intel ICH5 (production) # ServerWorks / Apple K2 (beta) # VIA (beta) # Silicon Image 3112 (broken!) # Various Promise (alpha/beta) # -------------------------------------------- # 03/10/21 jgarzik@redhat.com 1.1351.2.2 # [libata] Integrate Serial ATA driver into kernel tree. # -------------------------------------------- # 03/10/21 jgarzik@redhat.com 1.1357 # Merge bk://kernel.bkbits.net/jgarzik/libata-2.5 # into redhat.com:/spare/repo/libata-2.5-merge # -------------------------------------------- # 03/10/21 jamesclv@us.ibm.com 1.1358 # [PATCH] Allow more APIC irq sources # # The "irq_vector[]" array is indexed by the sum of all RTEs in all I/O # APICs, and is not necessarily limited by the x86 CPU irq vector inputs. # # In fact, the irq vector index would overflow on big machines with lots # of IO APIC's, causing the boot to fail. # # So grow the array for the big SMP boxes, keeping the default the same as # before (and shrink the vector entry size down to a 8-bit value, since # that's the size of the actual CPU vector entry). # -------------------------------------------- # 03/10/21 torvalds@home.osdl.org 1.1359 # Merge http://linux-acpi.bkbits.net/linux-acpi-release-2.6.0 # into home.osdl.org:/home/torvalds/v2.5/linux # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.1 # Many files: # Import patch core.diff # # Reiser4 2003.10.17 # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.2 # reiser4.only.diff # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.3 # tmpfs-01-ENAMETOOLONG.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.4 # tmpfs-02-gid-fix.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.5 # tmpfs-03-swapoff-truncate-race.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.6 # tmpfs-04-getpage-truncate-race.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.7 # tmpfs-05-writepage-truncate-race.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.8 # tmpfs-06-i_size_write.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.9 # tmpfs-07-write-mark_page_accessed.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.10 # serio-01-renaming.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.11 # serio-02-race-fix.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.12 # serio-03-blacklist.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.13 # serio-04-synaptics-cleanup.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.14 # serio-05-reconnect-facility.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.15 # serio-06-synaptics-use-reconnect.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.16 # Merge http://fbdev.bkbits.net:8080/fbdev-2.5 # into smp.uni.325i.org:/test/mjc-sources # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.17 # Merge http://linux-acpi.bkbits.net/linux-acpi-release-2.6.0 # into smp.uni.325i.org:/test/mjc-sources # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.18 # tcp-vegas.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.19 # stir4200.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.20 # wan-dev-get.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.21 # nfs-seq-file.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.22 # kexec # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.23 # kexec merge. # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.24 # lkcd-lite.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.25 # lkcd-driver.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.26 # dump_netdev.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.27 # 1-ext3-extents.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.28 # 2-exec-shield.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.29 # cpuset merge. # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.30 # patch-test8-am # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.31 # Kconfig, Makefile: # Import patch # # -Os compilation for a few architectures. expect more to follow # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.32 # cpuset stuff, grr. # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.33 # altixcleanup1.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.34 # idebarrier3.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.35 # altixconsole.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.36 # request_firmware_async-workqueue-removal.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.37 # MODULE_ALIAS_BLOCK # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.38 # elpp-2.6.0-0.1.patch # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1356.1.39 # cpuset for the millionth time # -------------------------------------------- # 03/10/22 mjc@smp.uni.325i.org 1.1360 # Merge bk://linux.bkbits.net/linux-2.5 # into smp.uni.325i.org:/test/mjc-sources # -------------------------------------------- # diff -Nru a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile --- a/Documentation/DocBook/Makefile Wed Oct 22 10:40:05 2003 +++ b/Documentation/DocBook/Makefile Wed Oct 22 10:40:05 2003 @@ -12,7 +12,7 @@ deviceiobook.sgml procfs-guide.sgml tulip-user.sgml \ writing_usb_driver.sgml scsidrivers.sgml sis900.sgml \ kernel-api.sgml journal-api.sgml lsm.sgml usb.sgml \ - gadget.sgml + gadget.sgml libata.sgml ### # The build process is as follows (targets): diff -Nru a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/Documentation/DocBook/libata.tmpl Wed Oct 22 10:40:10 2003 @@ -0,0 +1,91 @@ + + + + + libATA Developer's Guide + + + + Jeff + Garzik + + + + + 2003 + Jeff Garzik + + + + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + + + + + + + + Thanks + + The bulk of the ATA knowledge comes thanks to long conversations with + Andre Hedrick (www.linux-ide.org). + + + Thanks to Alan Cox for pointing out similarities + between SATA and SCSI, and in general for motivation to hack on + libata. + + + libata's device detection + method, ata_pio_devchk, and in general all the early probing was + based on extensive study of Hale Landis's probe/reset code in his + ATADRVR driver (www.ata-atapi.com). + + + + + libata Library +!Edrivers/scsi/libata-core.c +!Edrivers/scsi/libata-scsi.c + + + + libata Internals +!Idrivers/scsi/libata-core.c +!Idrivers/scsi/libata-scsi.c + + + + ata_piix Internals +!Idrivers/scsi/ata_piix.c + + + + ata_sil Internals +!Idrivers/scsi/sata_sil.c + + + + ata_via Internals +!Idrivers/scsi/sata_via.c + + + diff -Nru a/Documentation/fb/elpp.txt b/Documentation/fb/elpp.txt --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/Documentation/fb/elpp.txt Wed Oct 22 10:40:10 2003 @@ -0,0 +1,125 @@ + + ELPP, The Enhanced Linux Progress Patch + --------------------------------------- +What is this? + Hides the bootup messages and shows a fancy logo at boot. It +can however show a configurable number of messages while at the same +time showing the logo and a progress bar. The success, failure and +the warning messages of the initscripts can also be shown here. + +Who needs it? + A few rare people who are not interested in the messages shown when +booting. (like the friend of Cajus Pollmeier, The author of the original +Linux Progress Patch). A few of my friends use it but I dont. + +Requirements: + Requires a vesa capable machine. Linux 2.6 or later are supported. +If you are looking for patch for 2.4, you can have a look at the Linux +Progress Patch at "http://freshmeat.net/projects/lpp/". You +might also be interested in the bootsplash + +How to use it? + -> Download a fresh 2.6.0-Test5 kernel from http://www.kernel.org + (should work for other 2.6.0's too) + -> Download the patch from http://students.iiit.net/~prasad_s/lpp/ + -> Apply the patch... + cd linux-2.6.0 + bzip2 -cd /path/to/patch/elpp2.6.0-0.1.bz2 | patch -p1 + -> Configure and compile the Kernel + + make xconfig; + + and then enable the following + Graphics Support -> VGA text console + -> Video mode selection support + + Graphics Support -> Framebuffer Console support + + Graphics Support -> Logo configuration + -> The Enhanced Linux Progress Patch + + + After finishing with the configuration, you need to do a + make bzImage, make modules, make modules_install and a make install + to install the new kernel. + + -> Configure your boot loader. + + Based on your hardware you need to select a vesa mode. Check + linux-2.6.0/Documentation/fb/vesafb.txt for more information. + passing vga=0x301 would in most cases be sufficient. However + higher resolutions are also supported. + + To display the display of messages, you need to pass the + arguments "console=/dev/tty2 CONSOLE=/dev/tty2" to the kernel. + + In lilo its done as.. + image=xxxxxx + label=xxxxxx + vga=0x301 <===== + append="console=/dev/tty2 CONSOLE=/dev/tty2" + + In grub its just + kernel=xxxxxxx vga=0x301 console=/dev/tty2 CONSOLE=/dev/tty2 + + -> Change the initscripts. + Changing the init scripts would do the job of passing messages + and progress to the kernel. Take a look at the distributed + files etc/rc and etc/init.d/functions for the modifications. + + Hint: Search for the word progress and the matches would + reveal the changes done. + + In case of RedHat9 The distributed initscripts can directly be used. + + Here's what the modifications mean... + 1. If you echo a string "10 Hello world" into /proc/progress + the initial number is taken as progress and the other + part of the string forms the message to be displayed. + + 2. If the message starts with a '=' it indicates the + success/failure of the previous message. + =s is for success. + =f is for failure. + =w is for warning. + + 3. If the message starts with a '!' its a command. + !c to clear the previous messages. + !s to enable/disable the progress bar. + +Uninstalling: + For uninstalling ELPP you can install your backup kernel and +for the initscripts you may need to reinstall the package that provides +them. + +Creating Themes: + Themes for ELPP will be in the form of an image for boot-screen +and some compile time macros for colors and positioning of messages and +progress bar. Have a look at linux-2.6.0/drivers/video/console/elpp.h +for the various macros. + Boot Image can be made from any ASCII-PNM image with less than +224 colors. For this you can use the GNU image manipulation program +(GIMP). The steps involved are as below: + +-> Open your favorite image in GIMP. +-> In the context menu... go to Image->Mode->Indexed +-> In the dialog that pops up, se the colors to 223. +-> Save the image as PNM (ASCII) + + Once you get the ASCII-PNM you can now use the script +linux-2.6.0/scripts/pnmtologo to convert your PNM to a 'c' file. + +-> ./pnmtologo -n linux_progress_logo -o logo_elpp.c +-> cp logo_elpp.c linux-2.6.0/drivers/video/logo/ + + and these commands set your favorite image as the boot image. + + After changing the image and the macros you just need to +recompile the kernel and install the new kernel. A proper theming +scheme is being formulated. + + + ----------------------------------------- + Comments, queries and changes welcome at: + Prasad + ----------------------------------------- diff -Nru a/Documentation/fb/neofb.txt b/Documentation/fb/neofb.txt --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/Documentation/fb/neofb.txt Wed Oct 22 10:40:10 2003 @@ -0,0 +1,27 @@ +the neofb framebuffer driver supports the following Neomagic chipsets: + +NM2070 MagicGraph 128 +NM2090 MagicGraph 128V +NM2093 MagicGraph 128ZV +NM2097 MagicGraph 128ZV+ +NM2160 MagicGraph 128XD +NM2200 MagicGraph 256AV +NM2230 MagicGraph 256AV+ +NM2360 MagicGraph 256ZX +NM2380 MagicGraph 256XL+ + +with the following options: + +disabled Disable this driver's initialization. +internal Enable output on internal LCD Display. +external Enable output on external CRT. +nostretch Disable stretching of modes smaller than LCD. +nopciburst Disable PCI burst mode. +libretto Force Libretto 100/110 800x480 LCD. +picturebook Force Picturebook 1024x480 LCD. + +at the boot prompt: + video=neofb:picturebook + +as a module: + modprobe neofb picturebook=1 diff -Nru a/MAINTAINERS b/MAINTAINERS --- a/MAINTAINERS Wed Oct 22 10:40:07 2003 +++ b/MAINTAINERS Wed Oct 22 10:40:07 2003 @@ -722,6 +722,11 @@ W: http://sourceforge.net/projects/emu10k1/ S: Maintained +EPSON 1355 FRAMEBUFFER DRIVER +P: Christopher Hoover +M: ch@murgatroid.com, ch@hpl.hp.com +S: Maintained + ETHEREXPRESS-16 NETWORK DRIVER P: Philip Blundell M: Philip.Blundell@pobox.com @@ -1751,6 +1756,12 @@ L: linux-kernel@vger.kernel.org (kernel issues) L: selinux@tycho.nsa.gov (general discussion) W: http://www.nsa.gov/selinux +S: Supported + +SGI SN-IA64 (Altix) SERIAL CONSOLE DRIVER +P: Pat Gefre +M: pfg@sgi.com +L: linux-ia64@vger.kernel.org S: Supported SGI VISUAL WORKSTATION 320 AND 540 diff -Nru a/Makefile b/Makefile --- a/Makefile Wed Oct 22 10:40:08 2003 +++ b/Makefile Wed Oct 22 10:40:08 2003 @@ -275,7 +275,7 @@ CPPFLAGS := -D__KERNEL__ -Iinclude \ $(if $(KBUILD_SRC),-Iinclude2 -I$(srctree)/include) -CFLAGS := -Wall -Wstrict-prototypes -Wno-trigraphs -O2 \ +CFLAGS := -Wall -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common AFLAGS := -D__ASSEMBLY__ @@ -430,6 +430,12 @@ # Here goes the main Makefile # --------------------------------------------------------------------------- + +ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE +CFLAGS += -Os +else +CFLAGS += -O2 +endif ifndef CONFIG_FRAME_POINTER CFLAGS += -fomit-frame-pointer diff -Nru a/arch/arm/Makefile b/arch/arm/Makefile --- a/arch/arm/Makefile Wed Oct 22 10:40:04 2003 +++ b/arch/arm/Makefile Wed Oct 22 10:40:04 2003 @@ -14,8 +14,6 @@ GZFLAGS :=-9 #CFLAGS +=-pipe -CFLAGS :=$(CFLAGS:-O2=-Os) - ifeq ($(CONFIG_FRAME_POINTER),y) CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog endif diff -Nru a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S --- a/arch/arm/mm/proc-xscale.S Wed Oct 22 10:40:09 2003 +++ b/arch/arm/mm/proc-xscale.S Wed Oct 22 10:40:09 2003 @@ -670,7 +670,7 @@ .type __80321_proc_info,#object __80321_proc_info: .long 0x69052420 - .long 0xfffffff0 + .long 0xfffff7e0 .long 0x00000c0e b __xscale_setup .long cpu_arch_name diff -Nru a/arch/h8300/Kconfig b/arch/h8300/Kconfig --- a/arch/h8300/Kconfig Wed Oct 22 10:40:09 2003 +++ b/arch/h8300/Kconfig Wed Oct 22 10:40:09 2003 @@ -5,6 +5,10 @@ mainmenu "uClinux/h8300 (w/o MMU) Kernel Configuration" +config H8300 + bool + default y + config MMU bool default n diff -Nru a/arch/h8300/Makefile b/arch/h8300/Makefile --- a/arch/h8300/Makefile Wed Oct 22 10:40:03 2003 +++ b/arch/h8300/Makefile Wed Oct 22 10:40:03 2003 @@ -34,7 +34,7 @@ ldflags-$(CONFIG_CPU_H8S) := -mh8300self CFLAGS += $(cflags-y) -CFLAGS += -mint32 -fno-builtin -Os +CFLAGS += -mint32 -fno-builtin CFLAGS += -g CFLAGS += -D__linux__ CFLAGS += -DUTS_SYSNAME=\"uClinux\" diff -Nru a/arch/i386/Kconfig b/arch/i386/Kconfig --- a/arch/i386/Kconfig Wed Oct 22 10:40:02 2003 +++ b/arch/i386/Kconfig Wed Oct 22 10:40:02 2003 @@ -1127,6 +1127,52 @@ menu "Kernel hacking" +config CRASH_DUMP + tristate "Crash dump support (EXPERIMENTAL)" + depends on EXPERIMENTAL + default n + ---help--- + Say Y here to enable saving an image of system memory when a panic + or other error occurs. Dumps can also be forced with the SysRq+d + key if MAGIC_SYSRQ is enabled. + +config CRASH_DUMP_NETDEV + tristate "Crash dump network device driver" + depends on CRASH_DUMP + help + Say Y to allow saving crash dumps over a network device. + +config CRASH_DUMP_MEMDEV + bool "Crash dump staged memory driver" + depends on CRASH_DUMP + help + Say Y to allow intermediate saving crash dumps in spare + memory pages which would then be written out to disk + later. + +config CRASH_DUMP_SOFTBOOT + bool "Save crash dump across a soft reboot" + depends on CRASH_DUMP_MEMDEV + help + Say Y to allow a crash dump to be preserved in memory + pages across a soft reboot and written out to disk + thereafter. For this to work, CRASH_DUMP must be + configured as part of the kernel (not as a module). + +config CRASH_DUMP_COMPRESS_RLE + tristate "Crash dump RLE compression" + depends on CRASH_DUMP + help + Say Y to allow saving dumps with Run Length Encoding compression. + +config CRASH_DUMP_COMPRESS_GZIP + tristate "Crash dump GZIP compression" + select ZLIB_INFLATE + select ZLIB_DEFLATE + depends on CRASH_DUMP + help + Say Y to allow saving dumps with Gnu Zip compression. + config DEBUG_KERNEL bool "Kernel debugging" help diff -Nru a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile --- a/arch/i386/boot/Makefile Wed Oct 22 10:40:04 2003 +++ b/arch/i386/boot/Makefile Wed Oct 22 10:40:04 2003 @@ -100,3 +100,4 @@ install: $(BOOTIMAGE) sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)" + if [ -f init/kerntypes.o ]; then cp init/kerntypes.o $(INSTALL_PATH)/Kerntypes; fi diff -Nru a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c --- a/arch/i386/kernel/acpi/boot.c Wed Oct 22 10:40:09 2003 +++ b/arch/i386/kernel/acpi/boot.c Wed Oct 22 10:40:09 2003 @@ -247,6 +247,34 @@ #endif /*CONFIG_X86_IO_APIC*/ +#ifdef CONFIG_ACPI_BUS +/* + * Set specified PIC IRQ to level triggered mode. + * + * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers + * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. + * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0) + * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0) + * + * As the BIOS should have done this for us, + * print a warning if the IRQ wasn't already set to level. + */ + +void acpi_pic_set_level_irq(unsigned int irq) +{ + unsigned char mask = 1 << (irq & 7); + unsigned int port = 0x4d0 + (irq >> 3); + unsigned char val = inb(port); + + if (!(val & mask)) { + printk(KERN_WARNING PREFIX "IRQ %d was Edge Triggered, " + "setting to Level Triggerd\n", irq); + outb(val | mask, port); + } +} +#endif /* CONFIG_ACPI_BUS */ + + static unsigned long __init acpi_scan_rsdp ( diff -Nru a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S --- a/arch/i386/kernel/entry.S Wed Oct 22 10:40:04 2003 +++ b/arch/i386/kernel/entry.S Wed Oct 22 10:40:04 2003 @@ -880,5 +880,20 @@ .long sys_utimes .long sys_fadvise64_64 .long sys_ni_syscall /* sys_vserver */ +#ifdef CONFIG_REISER4_FS + .long sys_reiser4 +#else + .long sys_ni_syscall /* 275 */ +#endif + .long sys_kexec_load + .long sys_kexec_load + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_cpuset_create /* 280 + .long sys_cpuset_destroy + .long sys_cpuset_alloc + .long sys_cpuset_attach + .long sys_cpuset_getfreecpus + nr_syscalls=(.-sys_call_table)/4 diff -Nru a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c --- a/arch/i386/kernel/i386_ksyms.c Wed Oct 22 10:40:09 2003 +++ b/arch/i386/kernel/i386_ksyms.c Wed Oct 22 10:40:09 2003 @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -34,6 +35,7 @@ #include #include #include +#include extern void dump_thread(struct pt_regs *, struct user *); extern spinlock_t rtc_lock; @@ -210,6 +212,23 @@ #if defined(CONFIG_X86_SPEEDSTEP_SMI) || defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE) EXPORT_SYMBOL(ist_info); +#endif + +#ifdef CONFIG_CRASH_DUMP_MODULE +#ifdef CONFIG_SMP +extern irq_desc_t irq_desc[NR_IRQS]; +extern unsigned long irq_affinity[NR_IRQS]; +extern void stop_this_cpu(void *); +EXPORT_SYMBOL(irq_desc); +EXPORT_SYMBOL(irq_affinity); +EXPORT_SYMBOL(stop_this_cpu); +EXPORT_SYMBOL(dump_send_ipi); +#endif +extern int pfn_is_ram(unsigned long); +EXPORT_SYMBOL(pfn_is_ram); +#ifdef ARCH_HAS_NMI_WATCHDOG +EXPORT_SYMBOL(touch_nmi_watchdog); +#endif #endif EXPORT_SYMBOL(csum_partial); diff -Nru a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c --- a/arch/i386/kernel/io_apic.c Wed Oct 22 10:40:04 2003 +++ b/arch/i386/kernel/io_apic.c Wed Oct 22 10:40:04 2003 @@ -1138,12 +1138,13 @@ return 0; } -int irq_vector[NR_IRQS] = { FIRST_DEVICE_VECTOR , 0 }; +/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */ +u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 }; static int __init assign_irq_vector(int irq) { static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; - BUG_ON(irq >= NR_IRQS); + BUG_ON(irq >= NR_IRQ_VECTORS); if (IO_APIC_VECTOR(irq) > 0) return IO_APIC_VECTOR(irq); next: diff -Nru a/arch/i386/kernel/machine_kexec.c b/arch/i386/kernel/machine_kexec.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/arch/i386/kernel/machine_kexec.c Wed Oct 22 10:40:11 2003 @@ -0,0 +1,116 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * machine_kexec + * ======================= + */ + + +static void set_idt(void *newidt, __u16 limit) +{ + unsigned char curidt[6]; + + /* ia32 supports unaliged loads & stores */ + (*(__u16 *)(curidt)) = limit; + (*(__u32 *)(curidt +2)) = (unsigned long)(newidt); + + __asm__ __volatile__ ( + "lidt %0\n" + : "=m" (curidt) + ); +}; + + +static void set_gdt(void *newgdt, __u16 limit) +{ + unsigned char curgdt[6]; + + /* ia32 supports unaligned loads & stores */ + (*(__u16 *)(curgdt)) = limit; + (*(__u32 *)(curgdt +2)) = (unsigned long)(newgdt); + + __asm__ __volatile__ ( + "lgdt %0\n" + : "=m" (curgdt) + ); +}; + +static void load_segments(void) +{ +#define __STR(X) #X +#define STR(X) __STR(X) + + __asm__ __volatile__ ( + "\tljmp $"STR(__KERNEL_CS)",$1f\n" + "\t1:\n" + "\tmovl $"STR(__KERNEL_DS)",%eax\n" + "\tmovl %eax,%ds\n" + "\tmovl %eax,%es\n" + "\tmovl %eax,%fs\n" + "\tmovl %eax,%gs\n" + "\tmovl %eax,%ss\n" + ); +#undef STR +#undef __STR +} + +typedef void (*relocate_new_kernel_t)( + unsigned long indirection_page, unsigned long reboot_code_buffer, + unsigned long start_address, unsigned int has_pae); + +const extern unsigned char relocate_new_kernel[]; +extern void relocate_new_kernel_end(void); +const extern unsigned int relocate_new_kernel_size; +extern void use_mm(struct mm_struct *mm); + +void machine_kexec(struct kimage *image) +{ + unsigned long indirection_page; + unsigned long reboot_code_buffer; + relocate_new_kernel_t rnk; + + /* switch to an mm where the reboot_code_buffer is identity mapped */ + use_mm(&init_mm); + stop_apics(); + + /* Interrupts aren't acceptable while we reboot */ + local_irq_disable(); + reboot_code_buffer = page_to_pfn(image->reboot_code_pages) << PAGE_SHIFT; + indirection_page = image->head & PAGE_MASK; + + /* copy it out */ + memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); + + /* The segment registers are funny things, they are + * automatically loaded from a table, in memory wherever you + * set them to a specific selector, but this table is never + * accessed again you set the segment to a different selector. + * + * The more common model is are caches where the behide + * the scenes work is done, but is also dropped at arbitrary + * times. + * + * I take advantage of this here by force loading the + * segments, before I zap the gdt with an invalid value. + */ + load_segments(); + /* The gdt & idt are now invalid. + * If you want to load them you must set up your own idt & gdt. + */ + set_gdt(phys_to_virt(0),0); + set_idt(phys_to_virt(0),0); + + /* now call it */ + rnk = (relocate_new_kernel_t) reboot_code_buffer; + (*rnk)(indirection_page, reboot_code_buffer, image->start, cpu_has_pae); +} diff -Nru a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c --- a/arch/i386/kernel/nmi.c Wed Oct 22 10:40:07 2003 +++ b/arch/i386/kernel/nmi.c Wed Oct 22 10:40:07 2003 @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -436,6 +437,7 @@ bust_spinlocks(1); printk("NMI Watchdog detected LOCKUP on CPU%d, eip %08lx, registers:\n", cpu, regs->eip); show_registers(regs); + dump("NMI Watchdog detected LOCKUP", regs); printk("console shuts up ...\n"); console_silent(); spin_unlock(&nmi_print_lock); diff -Nru a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c --- a/arch/i386/kernel/process.c Wed Oct 22 10:40:01 2003 +++ b/arch/i386/kernel/process.c Wed Oct 22 10:40:01 2003 @@ -37,9 +37,12 @@ #include #include #include +#include +#include #include #include +#include #include #include #include @@ -503,6 +506,8 @@ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ __unlazy_fpu(prev_p); + if (next_p->mm) + load_user_cs_desc(cpu, next_p->mm); /* * Reload esp0, LDT and the page table pointer: @@ -772,5 +777,307 @@ if (copy_to_user(u_info, &info, sizeof(info))) return -EFAULT; return 0; +} + +/* + * In some cases (e.g. Hyper-Threading), we want to avoid L1 evictions + * by the processes running on the same package. One thing we can do + * is to shuffle the initial stack for them. + * + * (Plus, wiggling the stack a bit also makes it a bit harder for + * attackers to guess the stack pointer.) + */ + +static inline unsigned int get_random_int(void) +{ + unsigned int jitter, tsc = 0; + + if (!exec_shield_randomize) + return 0; + /* + * This is a pretty fast call, so no performance worries: + */ + get_random_bytes(&jitter, sizeof(jitter)); +#ifdef CONFIG_X86_HAS_TSC + rdtscl(tsc); +#endif + jitter += current->pid + (int)&tsc + jiffies + tsc; + + return jitter; +} + +unsigned long arch_align_stack(unsigned long sp) +{ + if (current->flags & PF_RELOCEXEC) + sp -= ((get_random_int() % 65536) << 4); + return sp & ~0xf; +} + +#if SHLIB_BASE >= 0x01000000 +# error SHLIB_BASE must be under 16MB! +#endif + +static unsigned long +arch_get_unmapped_nonexecutable_area(struct mm_struct *mm, unsigned long addr, unsigned long len) +{ + struct vm_area_struct *vma, *prev_vma; + unsigned long stack_limit; + int first_time = 1; + + if (!mm->mmap_top) { + printk("hm, %s:%d, !mmap_top.\n", current->comm, current->pid); + mm->mmap_top = mmap_top(); + } + stack_limit = mm->mmap_top; + + /* requested length too big for entire address space */ + if (len > TASK_SIZE) + return -ENOMEM; + + /* dont allow allocations above current stack limit */ + if (mm->non_executable_cache > stack_limit) + mm->non_executable_cache = stack_limit; + + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + + /* make sure it can fit in the remaining address space */ + if (mm->non_executable_cache < len) + return -ENOMEM; + + /* either no address requested or cant fit in requested address hole */ +try_again: + addr = (mm->non_executable_cache - len)&PAGE_MASK; + do { + if (!(vma = find_vma_prev(mm, addr, &prev_vma))) + return -ENOMEM; + + /* new region fits between prev_vma->vm_end and vma->vm_start, use it */ + if (addr+len <= vma->vm_start && (!prev_vma || (addr >= prev_vma->vm_end))) { + /* remember the address as a hint for next time */ + mm->non_executable_cache = addr; + return addr; + + /* pull non_executable_cache down to the first hole */ + } else if (mm->non_executable_cache == vma->vm_end) + mm->non_executable_cache = vma->vm_start; + + /* try just below the current vma->vm_start */ + addr = vma->vm_start-len; + } while (len <= vma->vm_start); + /* if hint left us with no space for the requested mapping try again */ + if (first_time) { + first_time = 0; + mm->non_executable_cache = stack_limit; + goto try_again; + } + return -ENOMEM; +} + +static unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len) +{ + unsigned long range = end - len - start; + if (end <= start + len) + return 0; + return PAGE_ALIGN(get_random_int() % range + start); +} + +static inline unsigned long +stock_arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long start_addr; + + if (len > TASK_SIZE) + return -ENOMEM; + + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + start_addr = addr = mm->free_area_cache; + +full_search: + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) { + /* + * Start a new search - just in case we missed + * some holes. + */ + if (start_addr != TASK_UNMAPPED_BASE) { + start_addr = addr = TASK_UNMAPPED_BASE; + goto full_search; + } + return -ENOMEM; + } + if (!vma || addr + len <= vma->vm_start) { + /* + * Remember the place where we stopped the search: + */ + mm->free_area_cache = addr + len; + return addr; + } + addr = vma->vm_end; + } +} + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, + unsigned long len0, unsigned long pgoff, unsigned long flags, + unsigned long prot) +{ + unsigned long addr = addr0, len = len0; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int ascii_shield = 0; + unsigned long tmp; + + /* + * Fall back to the old layout: + */ + if (!(current->flags & PF_RELOCEXEC)) + return stock_arch_get_unmapped_area(filp, addr0, len0, pgoff, flags); + if (len > TASK_SIZE) + return -ENOMEM; + + if (!addr && (prot & PROT_EXEC) && !(flags & MAP_FIXED)) + addr = randomize_range(SHLIB_BASE, 0x01000000, len); + + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) { + return addr; + } + } + + if (prot & PROT_EXEC) { + ascii_shield = 1; + addr = SHLIB_BASE; + } else { + /* this can fail if the stack was unlimited */ + if ((tmp = arch_get_unmapped_nonexecutable_area(mm, addr, len)) != -ENOMEM) + return tmp; +search_upper: + addr = PAGE_ALIGN(arch_align_stack(TASK_UNMAPPED_BASE)); + } + + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) { + return -ENOMEM; + } + if (!vma || addr + len <= vma->vm_start) { + /* + * Must not let a PROT_EXEC mapping get into the + * brk area: + */ + if (ascii_shield && (addr + len > mm->brk)) { + ascii_shield = 0; + goto search_upper; + } + /* + * Up until the brk area we randomize addresses + * as much as possible: + */ + if (ascii_shield && (addr >= 0x01000000)) { + tmp = randomize_range(0x01000000, mm->brk, len); + vma = find_vma(mm, tmp); + if (TASK_SIZE - len >= tmp && + (!vma || tmp + len <= vma->vm_start)) + return tmp; + } + /* + * Ok, randomization didnt work out - return + * the result of the linear search: + */ + return addr; + } + addr = vma->vm_end; + } +} + +void arch_add_exec_range(struct mm_struct *mm, unsigned long limit) +{ + if (limit > mm->context.exec_limit) { + mm->context.exec_limit = limit; + set_user_cs(&mm->context.user_cs, limit); + if (mm == current->mm) + load_user_cs_desc(smp_processor_id(), mm); + } +} + +void arch_remove_exec_range(struct mm_struct *mm, unsigned long old_end) +{ + struct vm_area_struct *vma; + unsigned long limit = 0; + + if (old_end == mm->context.exec_limit) { + for (vma = mm->mmap; vma; vma = vma->vm_next) + if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit)) + limit = vma->vm_end; + + mm->context.exec_limit = limit; + set_user_cs(&mm->context.user_cs, limit); + if (mm == current->mm) + load_user_cs_desc(smp_processor_id(), mm); + } +} + +void arch_flush_exec_range(struct mm_struct *mm) +{ + mm->context.exec_limit = 0; + set_user_cs(&mm->context.user_cs, 0); +} + +/* + * Generate random brk address between 128MB and 196MB. (if the layout + * allows it.) + */ +void randomize_brk(unsigned long old_brk) +{ + unsigned long new_brk, range_start, range_end; + + range_start = 0x08000000; + if (current->mm->brk >= range_start) + range_start = current->mm->brk; + range_end = range_start + 0x02000000; + new_brk = randomize_range(range_start, range_end, 0); + if (new_brk) + current->mm->brk = new_brk; +} + +/* + * Top of mmap area (just below the process stack). + * leave an at least ~128 MB hole. Randomize it. + */ +#define MIN_GAP (128*1024*1024) +#define MAX_GAP (TASK_SIZE/6*5) + +unsigned long mmap_top(void) +{ + unsigned long gap = 0; + + gap = current->rlim[RLIMIT_STACK].rlim_cur; + if (gap < MIN_GAP) + gap = MIN_GAP; + else if (gap > MAX_GAP) + gap = MAX_GAP; + + gap = arch_align_stack(gap) & PAGE_MASK; + + return TASK_SIZE - gap; } diff -Nru a/arch/i386/kernel/relocate_kernel.S b/arch/i386/kernel/relocate_kernel.S --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/arch/i386/kernel/relocate_kernel.S Wed Oct 22 10:40:10 2003 @@ -0,0 +1,110 @@ +#include + + /* + * Must be relocatable PIC code callable as a C function, that once + * it starts can not use the previous processes stack. + */ + .globl relocate_new_kernel +relocate_new_kernel: + /* read the arguments and say goodbye to the stack */ + movl 4(%esp), %ebx /* indirection_page */ + movl 8(%esp), %ebp /* reboot_code_buffer */ + movl 12(%esp), %edx /* start address */ + movl 16(%esp), %ecx /* cpu_has_pae */ + + /* zero out flags, and disable interrupts */ + pushl $0 + popfl + + /* set a new stack at the bottom of our page... */ + lea 4096(%ebp), %esp + + /* store the parameters back on the stack */ + pushl %edx /* store the start address */ + + /* Set cr0 to a known state: + * 31 0 == Paging disabled + * 18 0 == Alignment check disabled + * 16 0 == Write protect disabled + * 3 0 == No task switch + * 2 0 == Don't do FP software emulation. + * 0 1 == Proctected mode enabled + */ + movl %cr0, %eax + andl $~((1<<31)|(1<<18)|(1<<16)|(1<<3)|(1<<2)), %eax + orl $(1<<0), %eax + movl %eax, %cr0 + + /* clear cr4 if applicable */ + testl %ecx, %ecx + jz 1f + /* Set cr4 to a known state: + * Setting everything to zero seems safe. + */ + movl %cr4, %eax + andl $0, %eax + movl %eax, %cr4 + + jmp 1f +1: + + /* Flush the TLB (needed?) */ + xorl %eax, %eax + movl %eax, %cr3 + + /* Do the copies */ + cld +0: /* top, read another word for the indirection page */ + movl %ebx, %ecx + movl (%ebx), %ecx + addl $4, %ebx + testl $0x1, %ecx /* is it a destination page */ + jz 1f + movl %ecx, %edi + andl $0xfffff000, %edi + jmp 0b +1: + testl $0x2, %ecx /* is it an indirection page */ + jz 1f + movl %ecx, %ebx + andl $0xfffff000, %ebx + jmp 0b +1: + testl $0x4, %ecx /* is it the done indicator */ + jz 1f + jmp 2f +1: + testl $0x8, %ecx /* is it the source indicator */ + jz 0b /* Ignore it otherwise */ + movl %ecx, %esi /* For every source page do a copy */ + andl $0xfffff000, %esi + + movl $1024, %ecx + rep ; movsl + jmp 0b + +2: + + /* To be certain of avoiding problems with self-modifying code + * I need to execute a serializing instruction here. + * So I flush the TLB, it's handy, and not processor dependent. + */ + xorl %eax, %eax + movl %eax, %cr3 + + /* set all of the registers to known values */ + /* leave %esp alone */ + + xorl %eax, %eax + xorl %ebx, %ebx + xorl %ecx, %ecx + xorl %edx, %edx + xorl %esi, %esi + xorl %edi, %edi + xorl %ebp, %ebp + ret +relocate_new_kernel_end: + + .globl relocate_new_kernel_size +relocate_new_kernel_size: + .long relocate_new_kernel_end - relocate_new_kernel diff -Nru a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c --- a/arch/i386/kernel/setup.c Wed Oct 22 10:40:05 2003 +++ b/arch/i386/kernel/setup.c Wed Oct 22 10:40:05 2003 @@ -450,6 +450,7 @@ print_memory_map(who); } /* setup_memory_region */ +unsigned long crashdump_addr = 0xdeadbeef; static void __init parse_cmdline_early (char ** cmdline_p) { @@ -567,6 +568,9 @@ if (c == ' ' && !memcmp(from, "highmem=", 8)) highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT; + if (c == ' ' && !memcmp(from, "crashdump=", 10)) + crashdump_addr = memparse(from+10, &from); + c = *(from++); if (!c) break; @@ -794,6 +798,14 @@ extern unsigned long setup_memory(void); #endif /* !CONFIG_DISCONTIGMEM */ +static int __init setup_exec_shield(char *str) +{ + get_option (&str, &exec_shield); + + return 1; +} +__setup("exec-shield=", setup_exec_shield); + /* * Request address space for all standard RAM and ROM resources * and also for regions reported as reserved by the e820. @@ -949,6 +961,8 @@ __setup("noreplacement", noreplacement_setup); +extern void crashdump_reserve(void); + void __init setup_arch(char **cmdline_p) { unsigned long max_low_pfn; @@ -1007,6 +1021,10 @@ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/ #endif paging_init(); + +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT + crashdump_reserve(); /* Preserve crash dump state from prev boot */ +#endif dmi_scan_machine(); diff -Nru a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c --- a/arch/i386/kernel/smp.c Wed Oct 22 10:40:01 2003 +++ b/arch/i386/kernel/smp.c Wed Oct 22 10:40:01 2003 @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -144,6 +145,13 @@ */ cfg = __prepare_ICR(shortcut, vector); + if (vector == DUMP_VECTOR) { + /* + * Setup DUMP IPI to be delivered as an NMI + */ + cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI; + } + /* * Send the IPI. The write to APIC_ICR fires this off. */ @@ -467,6 +475,11 @@ on_each_cpu(do_flush_tlb_all, 0, 1, 1); } +void dump_send_ipi(void) +{ + send_IPI_allbutself(DUMP_VECTOR); +} + /* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing @@ -545,7 +558,7 @@ return 0; } -static void stop_this_cpu (void * dummy) +void stop_this_cpu (void * dummy) { /* * Remove this CPU: @@ -606,4 +619,3 @@ atomic_inc(&call_data->finished); } } - diff -Nru a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c --- a/arch/i386/kernel/traps.c Wed Oct 22 10:40:03 2003 +++ b/arch/i386/kernel/traps.c Wed Oct 22 10:40:03 2003 @@ -25,6 +25,7 @@ #include #include #include +#include #ifdef CONFIG_EISA #include @@ -262,6 +263,7 @@ handle_BUG(regs); printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); show_registers(regs); + dump((char *)str, regs); bust_spinlocks(0); spin_unlock_irq(&die_lock); if (in_interrupt()) @@ -372,6 +374,10 @@ DO_ERROR(12, SIGBUS, "stack segment", stack_segment) DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, get_cr2()) +/* + * the original non-exec stack patch was written by + * Solar Designer . Thanks! + */ asmlinkage void do_general_protection(struct pt_regs * regs, long error_code) { if (regs->eflags & X86_EFLAGS_IF) @@ -382,6 +388,31 @@ if (!(regs->xcs & 3)) goto gp_in_kernel; + + /* + * lazy-check for CS validity on exec-shield binaries: + */ + if (current->mm) { + int cpu = smp_processor_id(); + struct desc_struct *desc1, *desc2; + + desc1 = ¤t->mm->context.user_cs; + desc2 = cpu_gdt_table[cpu] + GDT_ENTRY_DEFAULT_USER_CS; + + /* + * The CS was not in sync - reload it and retry the + * instruction. If the instruction still faults then + * we wont hit this branch next time around. + */ + if (desc1->a != desc2->a || desc1->b != desc2->b) { + load_user_cs_desc(cpu, current->mm); + return; + } + } + if (print_fatal_signals) { + printk("#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code, error_code/8, regs->eip, smp_processor_id()); + printk(" exec_limit: %08lx, user_cs: %08lx/%08lx.\n", current->mm->context.exec_limit, current->mm->context.user_cs.a, current->mm->context.user_cs.b); + } current->thread.error_code = error_code; current->thread.trap_no = 13; diff -Nru a/arch/i386/mm/init.c b/arch/i386/mm/init.c --- a/arch/i386/mm/init.c Wed Oct 22 10:40:08 2003 +++ b/arch/i386/mm/init.c Wed Oct 22 10:40:08 2003 @@ -187,6 +187,13 @@ return 0; } +/* To enable modules to check if a page is in RAM */ +int pfn_is_ram(unsigned long pfn) +{ + return (page_is_ram(pfn)); +} + + #ifdef CONFIG_HIGHMEM pte_t *kmap_pte; pgprot_t kmap_prot; diff -Nru a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S --- a/arch/ia64/ia32/ia32_entry.S Wed Oct 22 10:40:07 2003 +++ b/arch/ia64/ia32/ia32_entry.S Wed Oct 22 10:40:07 2003 @@ -142,6 +142,19 @@ ;; st8 [r2]=r3 // initialize return code to -ENOSYS br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch syscall args + // Need to reload arguments (they may be changed by the tracing process) + adds r2=IA64_PT_REGS_R9_OFFSET+16,sp // r2 = &pt_regs.r9 + adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13 + ;; + ld4 r33=[r2],8 // r9 == ecx + ld4 r37=[r3],16 // r13 == ebp + ;; + ld4 r34=[r2],8 // r10 == edx + ld4 r36=[r3],8 // r15 == edi + ;; + ld4 r32=[r2],8 // r11 == ebx + ld4 r35=[r3],8 // r14 == esi + ;; .ret2: br.call.sptk.few rp=b6 // do the syscall .ia32_strace_check_retval: cmp.lt p6,p0=r8,r0 // syscall failed? diff -Nru a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c --- a/arch/ia64/kernel/efi.c Wed Oct 22 10:40:01 2003 +++ b/arch/ia64/kernel/efi.c Wed Oct 22 10:40:01 2003 @@ -297,9 +297,9 @@ u64 start; u64 end; } prev, curr; - void *efi_map_start, *efi_map_end, *p, *q, *r; + void *efi_map_start, *efi_map_end, *p, *q; efi_memory_desc_t *md, *check_md; - u64 efi_desc_size, start, end, granule_addr, first_non_wb_addr = 0; + u64 efi_desc_size, start, end, granule_addr, last_granule_addr, first_non_wb_addr = 0; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; @@ -312,40 +312,33 @@ if (!(md->attribute & EFI_MEMORY_WB)) continue; - if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > first_non_wb_addr) { - /* - * Search for the next run of contiguous WB memory. Start search - * at first granule boundary covered by md. - */ - granule_addr = ((md->phys_addr + IA64_GRANULE_SIZE - 1) - & -IA64_GRANULE_SIZE); - first_non_wb_addr = granule_addr; - for (q = p; q < efi_map_end; q += efi_desc_size) { - check_md = q; - - if (check_md->attribute & EFI_MEMORY_WB) - trim_bottom(check_md, granule_addr); - - if (check_md->phys_addr < granule_addr) - continue; - - if (!(check_md->attribute & EFI_MEMORY_WB)) - break; /* hit a non-WB region; stop search */ + /* + * granule_addr is the base of md's first granule. + * [granule_addr - first_non_wb_addr) is guaranteed to + * be contiguous WB memory. + */ + granule_addr = md->phys_addr & ~(IA64_GRANULE_SIZE - 1); + first_non_wb_addr = max(first_non_wb_addr, granule_addr); + + if (first_non_wb_addr < md->phys_addr) { + trim_bottom(md, granule_addr + IA64_GRANULE_SIZE); + granule_addr = md->phys_addr & ~(IA64_GRANULE_SIZE - 1); + first_non_wb_addr = max(first_non_wb_addr, granule_addr); + } - if (check_md->phys_addr != first_non_wb_addr) - break; /* hit a memory hole; stop search */ + for (q = p; q < efi_map_end; q += efi_desc_size) { + check_md = q; + if ((check_md->attribute & EFI_MEMORY_WB) && + (check_md->phys_addr == first_non_wb_addr)) first_non_wb_addr += check_md->num_pages << EFI_PAGE_SHIFT; - } - /* round it down to the previous granule-boundary: */ - first_non_wb_addr &= -IA64_GRANULE_SIZE; - - if (!(first_non_wb_addr > granule_addr)) - continue; /* couldn't find enough contiguous memory */ - - for (r = p; r < q; r += efi_desc_size) - trim_top(r, first_non_wb_addr); + else + break; /* non-WB or hole */ } + + last_granule_addr = first_non_wb_addr & ~(IA64_GRANULE_SIZE - 1); + if (last_granule_addr < md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) + trim_top(md, last_granule_addr); if (is_available_memory(md)) { if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) { diff -Nru a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c --- a/arch/ia64/kernel/ia64_ksyms.c Wed Oct 22 10:40:05 2003 +++ b/arch/ia64/kernel/ia64_ksyms.c Wed Oct 22 10:40:05 2003 @@ -34,13 +34,8 @@ #include EXPORT_SYMBOL(probe_irq_mask); -#include #include -/* not coded yet?? EXPORT_SYMBOL(csum_ipv6_magic); */ -EXPORT_SYMBOL(csum_partial_copy_nocheck); -EXPORT_SYMBOL(csum_tcpudp_magic); -EXPORT_SYMBOL(ip_compute_csum); -EXPORT_SYMBOL(ip_fast_csum); +EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ #include EXPORT_SYMBOL(__ia64_memcpy_fromio); @@ -58,9 +53,11 @@ EXPORT_SYMBOL(clear_page); #ifdef CONFIG_VIRTUAL_MEM_MAP +#include #include EXPORT_SYMBOL(vmalloc_end); EXPORT_SYMBOL(ia64_pfn_valid); +EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ #endif #include diff -Nru a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c --- a/arch/ia64/kernel/irq.c Wed Oct 22 10:40:09 2003 +++ b/arch/ia64/kernel/irq.c Wed Oct 22 10:40:09 2003 @@ -379,8 +379,11 @@ void disable_irq(unsigned int irq) { + irq_desc_t *desc = irq_descp(irq); + disable_irq_nosync(irq); - synchronize_irq(irq); + if (desc->action) + synchronize_irq(irq); } /** @@ -402,7 +405,7 @@ spin_lock_irqsave(&desc->lock, flags); switch (desc->depth) { case 1: { - unsigned int status = desc->status & ~IRQ_DISABLED; + unsigned int status = desc->status & ~(IRQ_DISABLED | IRQ_INPROGRESS); desc->status = status; if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { desc->status = status | IRQ_REPLAY; diff -Nru a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c --- a/arch/ia64/kernel/perfmon.c Wed Oct 22 10:40:09 2003 +++ b/arch/ia64/kernel/perfmon.c Wed Oct 22 10:40:09 2003 @@ -4225,7 +4225,7 @@ ret = -EBUSY; } else { pfm_sessions.pfs_sys_use_dbregs++; - DPRINT(("load [%d] increased sys_use_dbreg=%lu\n", task->pid, pfm_sessions.pfs_sys_use_dbregs)); + DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs)); set_dbregs = 1; } } diff -Nru a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c --- a/arch/ia64/kernel/perfmon_default_smpl.c Wed Oct 22 10:40:09 2003 +++ b/arch/ia64/kernel/perfmon_default_smpl.c Wed Oct 22 10:40:09 2003 @@ -93,17 +93,17 @@ hdr->hdr_version = PFM_DEFAULT_SMPL_VERSION; hdr->hdr_buf_size = arg->buf_size; - hdr->hdr_cur_pos = (void *)((unsigned long)buf)+sizeof(*hdr); - hdr->hdr_last_pos = (void *)((unsigned long)buf)+arg->buf_size; + hdr->hdr_cur_offs = sizeof(*hdr); hdr->hdr_overflows = 0UL; hdr->hdr_count = 0UL; - DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u\n", + DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u cur_offs=%lu\n", task->pid, buf, hdr->hdr_buf_size, sizeof(*hdr), - hdr->hdr_version)); + hdr->hdr_version, + hdr->hdr_cur_offs)); return 0; } @@ -125,8 +125,8 @@ } hdr = (pfm_default_smpl_hdr_t *)buf; - cur = hdr->hdr_cur_pos; - last = hdr->hdr_last_pos; + cur = buf+hdr->hdr_cur_offs; + last = buf+hdr->hdr_buf_size; ovfl_pmd = arg->ovfl_pmd; ovfl_notify = arg->ovfl_notify; @@ -191,7 +191,7 @@ /* * update position for next entry */ - hdr->hdr_cur_pos = cur + sizeof(*ent) + (npmds << 3); + hdr->hdr_cur_offs += sizeof(*ent) + (npmds << 3); /* * keep same ovfl_pmds, ovfl_notify @@ -212,10 +212,9 @@ hdr->hdr_overflows++; /* - * if no notification is needed, then we saturate the buffer + * if no notification requested, then we saturate the buffer */ if (ovfl_notify == 0) { - hdr->hdr_count = 0UL; arg->ovfl_ctrl.bits.notify_user = 0; arg->ovfl_ctrl.bits.block_task = 0; arg->ovfl_ctrl.bits.mask_monitoring = 1; @@ -236,8 +235,8 @@ hdr = (pfm_default_smpl_hdr_t *)buf; - hdr->hdr_count = 0UL; - hdr->hdr_cur_pos = (void *)((unsigned long)buf)+sizeof(*hdr); + hdr->hdr_count = 0UL; + hdr->hdr_cur_offs = sizeof(*hdr); ctrl->bits.mask_monitoring = 0; ctrl->bits.reset_ovfl_pmds = 1; /* uses long-reset values */ diff -Nru a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c --- a/arch/ia64/kernel/process.c Wed Oct 22 10:40:05 2003 +++ b/arch/ia64/kernel/process.c Wed Oct 22 10:40:05 2003 @@ -685,12 +685,16 @@ (*efi.reset_system)(EFI_RESET_WARM, 0, 0, 0); } +EXPORT_SYMBOL(machine_restart); + void machine_halt (void) { cpu_halt(); } +EXPORT_SYMBOL(machine_halt); + void machine_power_off (void) { @@ -698,3 +702,5 @@ pm_power_off(); machine_halt(); } + +EXPORT_SYMBOL(machine_power_off); diff -Nru a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c --- a/arch/ia64/kernel/setup.c Wed Oct 22 10:40:02 2003 +++ b/arch/ia64/kernel/setup.c Wed Oct 22 10:40:02 2003 @@ -327,9 +327,11 @@ * because we don't *really* know whether there's anything there, but we hope that * all new boxes will implement HCDP. */ - extern unsigned char acpi_legacy_devices; - if (!efi.hcdp && acpi_legacy_devices) - setup_serial_legacy(); + { + extern unsigned char acpi_legacy_devices; + if (!efi.hcdp && acpi_legacy_devices) + setup_serial_legacy(); + } #endif #ifdef CONFIG_VT diff -Nru a/arch/ia64/lib/checksum.c b/arch/ia64/lib/checksum.c --- a/arch/ia64/lib/checksum.c Wed Oct 22 10:40:09 2003 +++ b/arch/ia64/lib/checksum.c Wed Oct 22 10:40:09 2003 @@ -1,8 +1,8 @@ /* * Network checksum routines * - * Copyright (C) 1999 Hewlett-Packard Co - * Copyright (C) 1999 Stephane Eranian + * Copyright (C) 1999, 2003 Hewlett-Packard Co + * Stephane Eranian * * Most of the code coming from arch/alpha/lib/checksum.c * @@ -10,6 +10,7 @@ * in an architecture-specific manner due to speed.. */ +#include #include #include @@ -40,6 +41,8 @@ ((unsigned long) proto << 8)); } +EXPORT_SYMBOL(csum_tcpudp_magic); + unsigned int csum_tcpudp_nofold (unsigned long saddr, unsigned long daddr, unsigned short len, unsigned short proto, unsigned int sum) @@ -84,6 +87,7 @@ return result; } +EXPORT_SYMBOL(csum_partial); /* * this routine is used for miscellaneous IP-like checksums, mainly @@ -94,3 +98,5 @@ { return ~do_csum(buff,len); } + +EXPORT_SYMBOL(ip_compute_csum); diff -Nru a/arch/ia64/lib/csum_partial_copy.c b/arch/ia64/lib/csum_partial_copy.c --- a/arch/ia64/lib/csum_partial_copy.c Wed Oct 22 10:40:08 2003 +++ b/arch/ia64/lib/csum_partial_copy.c Wed Oct 22 10:40:08 2003 @@ -1,12 +1,13 @@ /* * Network Checksum & Copy routine * - * Copyright (C) 1999 Hewlett-Packard Co - * Copyright (C) 1999 Stephane Eranian + * Copyright (C) 1999, 2003 Hewlett-Packard Co + * Stephane Eranian * * Most of the code has been imported from Linux/Alpha */ +#include #include #include @@ -146,3 +147,4 @@ return do_csum_partial_copy_from_user(src, dst, len, sum, NULL); } +EXPORT_SYMBOL(csum_partial_copy_nocheck); diff -Nru a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c --- a/arch/ia64/mm/init.c Wed Oct 22 10:40:04 2003 +++ b/arch/ia64/mm/init.c Wed Oct 22 10:40:04 2003 @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -43,6 +44,8 @@ #ifdef CONFIG_VIRTUAL_MEM_MAP unsigned long vmalloc_end = VMALLOC_END_INIT; struct page *vmem_map; + + EXPORT_SYMBOL(vmem_map); #endif static int pgt_cache_water[2] = { 25, 50 }; diff -Nru a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c --- a/arch/ia64/mm/numa.c Wed Oct 22 10:40:04 2003 +++ b/arch/ia64/mm/numa.c Wed Oct 22 10:40:04 2003 @@ -11,12 +11,19 @@ */ #include +#include #include +#include #include +#include #include #include #include +static struct memblk *sysfs_memblks; +static struct node *sysfs_nodes; +static struct cpu *sysfs_cpus; + /* * The following structures are usually initialized by ACPI or * similar mechanisms and describe the NUMA characteristics of the machine. @@ -43,3 +50,52 @@ return (i < num_memblks) ? node_memblk[i].nid : (num_memblks ? -1 : 0); } + +static int __init topology_init(void) +{ + int i, err = 0; + + sysfs_nodes = kmalloc(sizeof(struct node) * numnodes, GFP_KERNEL); + if (!sysfs_nodes) { + err = -ENOMEM; + goto out; + } + memset(sysfs_nodes, 0, sizeof(struct node) * numnodes); + + sysfs_memblks = kmalloc(sizeof(struct memblk) * num_memblks, + GFP_KERNEL); + if (!sysfs_memblks) { + kfree(sysfs_nodes); + err = -ENOMEM; + goto out; + } + memset(sysfs_memblks, 0, sizeof(struct memblk) * num_memblks); + + sysfs_cpus = kmalloc(sizeof(struct cpu) * NR_CPUS, GFP_KERNEL); + if (!sysfs_cpus) { + kfree(sysfs_memblks); + kfree(sysfs_nodes); + err = -ENOMEM; + goto out; + } + memset(sysfs_cpus, 0, sizeof(struct cpu) * NR_CPUS); + + for (i = 0; i < numnodes; i++) + if ((err = register_node(&sysfs_nodes[i], i, 0))) + goto out; + + for (i = 0; i < num_memblks; i++) + if ((err = register_memblk(&sysfs_memblks[i], i, + &sysfs_nodes[memblk_to_node(i)]))) + goto out; + + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(i)) + if((err = register_cpu(&sysfs_cpus[i], i, + &sysfs_nodes[cpu_to_node(i)]))) + goto out; + out: + return err; +} + +__initcall(topology_init); diff -Nru a/arch/ia64/sn/io/drivers/ioconfig_bus.c b/arch/ia64/sn/io/drivers/ioconfig_bus.c --- a/arch/ia64/sn/io/drivers/ioconfig_bus.c Wed Oct 22 10:40:08 2003 +++ b/arch/ia64/sn/io/drivers/ioconfig_bus.c Wed Oct 22 10:40:08 2003 @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include diff -Nru a/arch/ia64/sn/io/hwgfs/hcl.c b/arch/ia64/sn/io/hwgfs/hcl.c --- a/arch/ia64/sn/io/hwgfs/hcl.c Wed Oct 22 10:40:07 2003 +++ b/arch/ia64/sn/io/hwgfs/hcl.c Wed Oct 22 10:40:07 2003 @@ -28,11 +28,6 @@ #include #include -#define HCL_NAME "SGI-HWGRAPH COMPATIBILITY DRIVER" -#define HCL_TEMP_NAME "HCL_TEMP_NAME_USED_FOR_HWGRAPH_VERTEX_CREATE" -#define HCL_TEMP_NAME_LEN 44 -#define HCL_VERSION "1.0" - #define vertex_hdl_t hwgfs_handle_t vertex_hdl_t hwgraph_root; vertex_hdl_t linux_busnum; @@ -40,26 +35,6 @@ extern void pci_bus_cvlink_init(void); /* - * Debug flag definition. - */ -#define OPTION_NONE 0x00 -#define HCL_DEBUG_NONE 0x00000 -#define HCL_DEBUG_ALL 0x0ffff -#if defined(CONFIG_HCL_DEBUG) -static unsigned int hcl_debug_init __initdata = HCL_DEBUG_NONE; -#endif -static unsigned int hcl_debug = HCL_DEBUG_NONE; -#if defined(CONFIG_HCL_DEBUG) && !defined(MODULE) -static unsigned int boot_options = OPTION_NONE; -#endif - -invplace_t invplace_none = { - GRAPH_VERTEX_NONE, - GRAPH_VERTEX_PLACE_NONE, - NULL -}; - -/* * init_hcl() - Boot time initialization. * */ @@ -403,39 +378,6 @@ return(0); } -#if 0 -/* - * hwgraph_edge_add - This routines has changed from the original conext. - * All it does now is to create a symbolic link from "from" to "to". - */ -/* ARGSUSED */ -int -hwgraph_edge_add(vertex_hdl_t from, vertex_hdl_t to, char *name) -{ - - char *path, *link; - vertex_hdl_t handle = NULL; - int rv, i; - - handle = hwgfs_find_handle(from, name, 0, 0, 0, 1); - if (handle) { - return(0); - } - - path = kmalloc(1024, GFP_KERNEL); - memset(path, 0x0, 1024); - link = kmalloc(1024, GFP_KERNEL); - memset(path, 0x0, 1024); - i = hwgfs_generate_path (to, link, 1024); - rv = hwgfs_mk_symlink (from, (const char *)name, - DEVFS_FL_DEFAULT, link, - &handle, NULL); - return(0); - - -} -#endif - int hwgraph_edge_add(vertex_hdl_t from, vertex_hdl_t to, char *name) { @@ -718,24 +660,6 @@ 0, /* minor */ 0, /* char | block */ 1)); /* traverse symlinks */ -} - -/* - * hwgraph_inventory_remove - Removes an inventory entry. - * - * Remove an inventory item associated with a vertex. It is the caller's - * responsibility to make sure that there are no races between removing - * inventory from a vertex and simultaneously removing that vertex. -*/ -int -hwgraph_inventory_remove( vertex_hdl_t de, - int class, - int type, - major_t controller, - minor_t unit, - int state) -{ - return(0); /* Just a Stub for IRIX code. */ } /* diff -Nru a/arch/ia64/sn/io/hwgfs/labelcl.c b/arch/ia64/sn/io/hwgfs/labelcl.c --- a/arch/ia64/sn/io/hwgfs/labelcl.c Wed Oct 22 10:40:07 2003 +++ b/arch/ia64/sn/io/hwgfs/labelcl.c Wed Oct 22 10:40:07 2003 @@ -16,7 +16,6 @@ #include #include #include -#include #include #include diff -Nru a/arch/ia64/sn/io/io.c b/arch/ia64/sn/io/io.c --- a/arch/ia64/sn/io/io.c Wed Oct 22 10:40:04 2003 +++ b/arch/ia64/sn/io/io.c Wed Oct 22 10:40:04 2003 @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -29,7 +28,6 @@ #include extern xtalk_provider_t hub_provider; -extern void hub_intr_init(vertex_hdl_t hubv); static int force_fire_and_forget = 1; static int ignore_conveyor_override; @@ -601,7 +599,6 @@ hub_provider_startup(vertex_hdl_t hubv) { hub_pio_init(hubv); - hub_intr_init(hubv); } /* diff -Nru a/arch/ia64/sn/io/machvec/pci.c b/arch/ia64/sn/io/machvec/pci.c --- a/arch/ia64/sn/io/machvec/pci.c Wed Oct 22 10:40:04 2003 +++ b/arch/ia64/sn/io/machvec/pci.c Wed Oct 22 10:40:04 2003 @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff -Nru a/arch/ia64/sn/io/machvec/pci_bus_cvlink.c b/arch/ia64/sn/io/machvec/pci_bus_cvlink.c --- a/arch/ia64/sn/io/machvec/pci_bus_cvlink.c Wed Oct 22 10:40:09 2003 +++ b/arch/ia64/sn/io/machvec/pci_bus_cvlink.c Wed Oct 22 10:40:09 2003 @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -437,11 +436,11 @@ #ifdef CONFIG_PROC_FS extern void register_sn_procfs(void); #endif - extern void irix_io_init(void); + extern void sn_io_init(void); extern void sn_init_cpei_timer(void); init_hcl(); - irix_io_init(); + sn_io_init(); for (cnode = 0; cnode < numnodes; cnode++) { extern void intr_init_vecblk(cnodeid_t); diff -Nru a/arch/ia64/sn/io/machvec/pci_dma.c b/arch/ia64/sn/io/machvec/pci_dma.c --- a/arch/ia64/sn/io/machvec/pci_dma.c Wed Oct 22 10:40:04 2003 +++ b/arch/ia64/sn/io/machvec/pci_dma.c Wed Oct 22 10:40:04 2003 @@ -29,7 +29,6 @@ #include #include #include -#include /* * For ATE allocations diff -Nru a/arch/ia64/sn/io/platform_init/irix_io_init.c b/arch/ia64/sn/io/platform_init/irix_io_init.c --- a/arch/ia64/sn/io/platform_init/irix_io_init.c Wed Oct 22 10:40:06 2003 +++ b/arch/ia64/sn/io/platform_init/irix_io_init.c Wed Oct 22 10:40:06 2003 @@ -27,13 +27,6 @@ extern int pci_bus_to_hcl_cvlink(void); extern void mlreset(void); -/* #define DEBUG_IO_INIT 1 */ -#ifdef DEBUG_IO_INIT -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif /* DEBUG_IO_INIT */ - /* * This routine is responsible for the setup of all the IRIX hwgraph style * stuff that's been pulled into linux. It's called by sn_pci_find_bios which @@ -45,12 +38,12 @@ */ void -irix_io_init(void) +sn_io_init(void) { cnodeid_t cnode; /* - * This is the Master CPU. Emulate mlsetup and main.c in Irix. + * This is the Master CPU. */ mlreset(); diff -Nru a/arch/ia64/sn/io/sgi_if.c b/arch/ia64/sn/io/sgi_if.c --- a/arch/ia64/sn/io/sgi_if.c Wed Oct 22 10:40:06 2003 +++ b/arch/ia64/sn/io/sgi_if.c Wed Oct 22 10:40:06 2003 @@ -12,16 +12,14 @@ #include #include #include -#include #include #include -#include #include #include #include void * -snia_kmem_zalloc(size_t size, int flag) +snia_kmem_zalloc(size_t size) { void *ptr = kmalloc(size, GFP_KERNEL); if ( ptr ) @@ -34,26 +32,6 @@ { kfree(ptr); } - -/* - * the alloc/free_node routines do a simple kmalloc for now .. - */ -void * -snia_kmem_alloc_node(register size_t size, register int flags, cnodeid_t node) -{ - /* someday will Allocate on node 'node' */ - return(kmalloc(size, GFP_KERNEL)); -} - -void * -snia_kmem_zalloc_node(register size_t size, register int flags, cnodeid_t node) -{ - void *ptr = kmalloc(size, GFP_KERNEL); - if ( ptr ) - BZERO(ptr, size); - return(ptr); -} - /* * print_register() allows formatted printing of bit fields. individual diff -Nru a/arch/ia64/sn/io/sn2/bte_error.c b/arch/ia64/sn/io/sn2/bte_error.c --- a/arch/ia64/sn/io/sn2/bte_error.c Wed Oct 22 10:40:08 2003 +++ b/arch/ia64/sn/io/sn2/bte_error.c Wed Oct 22 10:40:08 2003 @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include diff -Nru a/arch/ia64/sn/io/sn2/geo_op.c b/arch/ia64/sn/io/sn2/geo_op.c --- a/arch/ia64/sn/io/sn2/geo_op.c Wed Oct 22 10:40:08 2003 +++ b/arch/ia64/sn/io/sn2/geo_op.c Wed Oct 22 10:40:08 2003 @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include diff -Nru a/arch/ia64/sn/io/sn2/klconflib.c b/arch/ia64/sn/io/sn2/klconflib.c --- a/arch/ia64/sn/io/sn2/klconflib.c Wed Oct 22 10:40:01 2003 +++ b/arch/ia64/sn/io/sn2/klconflib.c Wed Oct 22 10:40:01 2003 @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -25,9 +24,6 @@ #include -#define LDEBUG 0 -#define NIC_UNKNOWN ((nic_t) -1) - #undef DEBUG_KLGRAPH #ifdef DEBUG_KLGRAPH #define DBG(x...) printk(x) @@ -186,33 +182,6 @@ } -/* - * get_actual_nasid - * - * Completely disabled brds have their klconfig on - * some other nasid as they have no memory. But their - * actual nasid is hidden in the klconfig. Use this - * routine to get it. Works for normal boards too. - */ -nasid_t -get_actual_nasid(lboard_t *brd) -{ - klhub_t *hub ; - - if (!brd) - return INVALID_NASID ; - - /* find out if we are a completely disabled brd. */ - - hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB); - if (!hub) - return INVALID_NASID ; - if (!(hub->hub_info.flags & KLINFO_ENABLE)) /* disabled node brd */ - return hub->hub_info.physid ; - else - return brd->brd_nasid ; -} - int xbow_port_io_enabled(nasid_t nasid, int link) { @@ -292,25 +261,6 @@ format_module_id(buffer, modnum, MODULE_FORMAT_BRIEF); sprintf(path, EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d/%s", buffer, geo_slab(brd->brd_geoid), board_name); } - -/* - * Get the module number for a NASID. - */ -moduleid_t -get_module_id(nasid_t nasid) -{ - lboard_t *brd; - - brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA); - - if (!brd) - return INVALID_MODULE; - else - return geo_module(brd->brd_geoid); -} - - -#define MHZ 1000000 /* Get the canonical hardware graph name for the given pci component diff -Nru a/arch/ia64/sn/io/sn2/klgraph.c b/arch/ia64/sn/io/sn2/klgraph.c --- a/arch/ia64/sn/io/sn2/klgraph.c Wed Oct 22 10:40:08 2003 +++ b/arch/ia64/sn/io/sn2/klgraph.c Wed Oct 22 10:40:08 2003 @@ -28,13 +28,11 @@ #include #include -// #define KLGRAPH_DEBUG 1 +/* #define KLGRAPH_DEBUG 1 */ #ifdef KLGRAPH_DEBUG #define GRPRINTF(x) printk x -#define CE_GRPANIC CE_PANIC #else #define GRPRINTF(x) -#define CE_GRPANIC CE_PANIC #endif #include diff -Nru a/arch/ia64/sn/io/sn2/ml_iograph.c b/arch/ia64/sn/io/sn2/ml_iograph.c --- a/arch/ia64/sn/io/sn2/ml_iograph.c Wed Oct 22 10:40:01 2003 +++ b/arch/ia64/sn/io/sn2/ml_iograph.c Wed Oct 22 10:40:01 2003 @@ -15,12 +15,10 @@ #include #include #include -#include #include #include #include #include -#include #include #include #include @@ -59,9 +57,9 @@ { xswitch_vol_t xvolinfo; int rc; - extern void * snia_kmem_zalloc(size_t size, int flag); + extern void * snia_kmem_zalloc(size_t size); - xvolinfo = snia_kmem_zalloc(sizeof(struct xswitch_vol_s), GFP_KERNEL); + xvolinfo = snia_kmem_zalloc(sizeof(struct xswitch_vol_s)); mutex_init(&xvolinfo->xswitch_volunteer_mutex); rc = hwgraph_info_add_LBL(xswitch, INFO_LBL_XSWITCH_VOL, @@ -79,12 +77,11 @@ { xswitch_vol_t xvolinfo; int rc; - extern void snia_kmem_free(void *ptr, size_t size); rc = hwgraph_info_remove_LBL(xswitch, INFO_LBL_XSWITCH_VOL, (arbitrary_info_t *)&xvolinfo); - snia_kmem_free(xvolinfo, sizeof(struct xswitch_vol_s)); + kfree(xvolinfo); } /* * A Crosstalk master volunteers to manage xwidgets on the specified xswitch. @@ -508,18 +505,11 @@ ASSERT_ALWAYS(to); rc = hwgraph_edge_add(from, to, EDGE_LBL_INTERCONNECT); - if (rc == -EEXIST) - goto link_done; - if (rc != GRAPH_SUCCESS) { + if ((rc != -EEXIST) && (rc != GRAPH_SUCCESS)) { printk("%s: Unable to establish link" " for xbmon.", pathname); } -link_done: } - -#ifdef SN0_USE_BTE - bte_bpush_war(cnode, (void *)board); -#endif } } @@ -617,7 +607,6 @@ nodepda_t *npdap; struct semaphore *peer_sema = 0; uint32_t widget_partnum; - cpu_cookie_t c = 0; npdap = NODEPDA(cnodeid); @@ -839,34 +828,6 @@ static struct io_brick_map_s io_brick_tab[] = { - -/* Ibrick widget number to PCI bus number map */ - { MODULE_IBRICK, /* Ibrick type */ - /* PCI Bus # Widget # */ - { 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */ - 0, /* 0x8 */ - 0, /* 0x9 */ - 0, 0, /* 0xa - 0xb */ - 0, /* 0xc */ - 0, /* 0xd */ - 2, /* 0xe */ - 1 /* 0xf */ - } - }, - -/* Pbrick widget number to PCI bus number map */ - { MODULE_PBRICK, /* Pbrick type */ - /* PCI Bus # Widget # */ - { 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */ - 2, /* 0x8 */ - 1, /* 0x9 */ - 0, 0, /* 0xa - 0xb */ - 4, /* 0xc */ - 6, /* 0xd */ - 3, /* 0xe */ - 5 /* 0xf */ - } - }, /* PXbrick widget number to PCI bus number map */ { MODULE_PXBRICK, /* PXbrick type */ diff -Nru a/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c --- a/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c Wed Oct 22 10:40:05 2003 +++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c Wed Oct 22 10:40:05 2003 @@ -319,9 +319,8 @@ reg_p b_respp; pcibr_slot_info_resp_t slotp; pcibr_slot_func_info_resp_t funcp; - extern void snia_kmem_free(void *, int); - slotp = snia_kmem_zalloc(sizeof(*slotp), 0); + slotp = snia_kmem_zalloc(sizeof(*slotp)); if (slotp == NULL) { return(ENOMEM); } @@ -395,7 +394,7 @@ return(EFAULT); } - snia_kmem_free(slotp, sizeof(*slotp)); + kfree(slotp); return(0); } diff -Nru a/arch/ia64/sn/io/sn2/shub.c b/arch/ia64/sn/io/sn2/shub.c --- a/arch/ia64/sn/io/sn2/shub.c Wed Oct 22 10:40:06 2003 +++ b/arch/ia64/sn/io/sn2/shub.c Wed Oct 22 10:40:06 2003 @@ -203,7 +203,7 @@ uint64_t longarg; int nasid; - cnode = (cnodeid_t)file->f_dentry->d_fsdata; + cnode = (cnodeid_t)(long)file->f_dentry->d_fsdata; switch (cmd) { case SNDRV_SHUB_CONFIGURE: diff -Nru a/arch/ia64/sn/io/sn2/shub_intr.c b/arch/ia64/sn/io/sn2/shub_intr.c --- a/arch/ia64/sn/io/sn2/shub_intr.c Wed Oct 22 10:40:06 2003 +++ b/arch/ia64/sn/io/sn2/shub_intr.c Wed Oct 22 10:40:06 2003 @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -27,12 +26,6 @@ #include #include -/* ARGSUSED */ -void -hub_intr_init(vertex_hdl_t hubv) -{ -} - xwidgetnum_t hub_widget_id(nasid_t nasid) { @@ -77,7 +70,7 @@ xtalk_addr = SH_II_INT0 | ((unsigned long)nasid << 36) | (1UL << 47); } - intr_hdl = snia_kmem_alloc_node(sizeof(struct hub_intr_s), KM_NOSLEEP, cnode); + intr_hdl = kmalloc(sizeof(struct hub_intr_s), GFP_KERNEL); ASSERT_ALWAYS(intr_hdl); xtalk_info = &intr_hdl->i_xtalk_info; diff -Nru a/arch/ia64/sn/io/sn2/xtalk.c b/arch/ia64/sn/io/sn2/xtalk.c --- a/arch/ia64/sn/io/sn2/xtalk.c Wed Oct 22 10:40:01 2003 +++ b/arch/ia64/sn/io/sn2/xtalk.c Wed Oct 22 10:40:01 2003 @@ -912,11 +912,6 @@ if (!(widget_info = xwidget_info_get(widget))) return(1); - /* Remove the inventory information associated - * with the widget. - */ - hwgraph_inventory_remove(widget, -1, -1, -1, -1, -1); - hwid = &(widget_info->w_hwid); /* Clean out the xwidget information */ diff -Nru a/arch/s390/boot/install.sh b/arch/s390/boot/install.sh --- a/arch/s390/boot/install.sh Wed Oct 22 10:40:08 2003 +++ b/arch/s390/boot/install.sh Wed Oct 22 10:40:08 2003 @@ -16,7 +16,8 @@ # $1 - kernel version # $2 - kernel image file # $3 - kernel map file -# $4 - default install path (blank if root directory) +# $4 - kernel type file +# $5 - default install path (blank if root directory) # # User may have a custom install script @@ -26,13 +27,22 @@ # Default install - same as make zlilo -if [ -f $4/vmlinuz ]; then - mv $4/vmlinuz $4/vmlinuz.old +if [ -f $5/vmlinuz ]; then + mv $5/vmlinuz $5/vmlinuz.old fi -if [ -f $4/System.map ]; then - mv $4/System.map $4/System.old +if [ -f $5/System.map ]; then + mv $5/System.map $5/System.old fi -cat $2 > $4/vmlinuz -cp $3 $4/System.map +if [ -f $5/Kerntypes ]; then + mv $5/Kerntypes $5/Kerntypes.old +fi + +cat $2 > $5/vmlinuz +cp $3 $5/System.map + +# copy the kernel type file if it exists +if [ -f $4 ]; then + cp $4 $5/Kerntypes +fi diff -Nru a/drivers/Makefile b/drivers/Makefile --- a/drivers/Makefile Wed Oct 22 10:40:09 2003 +++ b/drivers/Makefile Wed Oct 22 10:40:09 2003 @@ -49,3 +49,4 @@ obj-$(CONFIG_MCA) += mca/ obj-$(CONFIG_EISA) += eisa/ obj-$(CONFIG_CPU_FREQ) += cpufreq/ +obj-$(CONFIG_CRASH_DUMP) += dump/ diff -Nru a/drivers/acpi/battery.c b/drivers/acpi/battery.c --- a/drivers/acpi/battery.c Wed Oct 22 10:40:08 2003 +++ b/drivers/acpi/battery.c Wed Oct 22 10:40:08 2003 @@ -360,7 +360,7 @@ ACPI_FUNCTION_TRACE("acpi_battery_read_info"); - if (!battery) + if (!battery || (off != 0)) goto end; if (battery->flags.present) @@ -459,7 +459,7 @@ ACPI_FUNCTION_TRACE("acpi_battery_read_state"); - if (!battery) + if (!battery || (off != 0)) goto end; if (battery->flags.present) @@ -543,7 +543,7 @@ ACPI_FUNCTION_TRACE("acpi_battery_read_alarm"); - if (!battery) + if (!battery || (off != 0)) goto end; if (!battery->flags.present) { diff -Nru a/drivers/acpi/bus.c b/drivers/acpi/bus.c --- a/drivers/acpi/bus.c Wed Oct 22 10:40:08 2003 +++ b/drivers/acpi/bus.c Wed Oct 22 10:40:08 2003 @@ -39,7 +39,7 @@ #define _COMPONENT ACPI_BUS_COMPONENT ACPI_MODULE_NAME ("acpi_bus") -extern void eisa_set_level_irq(unsigned int irq); +extern void acpi_pic_set_level_irq(unsigned int irq); FADT_DESCRIPTOR acpi_fadt; struct acpi_device *acpi_root; @@ -615,7 +615,7 @@ if (acpi_ioapic) mp_config_ioapic_for_sci(acpi_fadt.sci_int); else - eisa_set_level_irq(acpi_fadt.sci_int); + acpi_pic_set_level_irq(acpi_fadt.sci_int); #endif status = acpi_enable_subsystem(ACPI_FULL_INITIALIZATION); diff -Nru a/drivers/acpi/ec.c b/drivers/acpi/ec.c --- a/drivers/acpi/ec.c Wed Oct 22 10:40:07 2003 +++ b/drivers/acpi/ec.c Wed Oct 22 10:40:07 2003 @@ -94,6 +94,13 @@ /* External interfaces use first EC only, so remember */ static struct acpi_device *first_ec; +/* + * We use kernel thread to handle ec's gpe query, so the query may defer. + * The query need a context, which can be freed when we replace ec_ecdt + * with EC device. So defered query may have a wrong context. + * We use an indication to avoid it + */ +static int ec_device_init = 0; /* -------------------------------------------------------------------------- Transaction Management -------------------------------------------------------------------------- */ @@ -393,8 +400,11 @@ acpi_disable_gpe(NULL, ec->gpe_bit, ACPI_ISR); - status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, - acpi_ec_gpe_query, ec); + if (!ec_device_init) + acpi_ec_gpe_query(ec); /* directly query when device didn't init */ + else + status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, + acpi_ec_gpe_query, ec); } /* -------------------------------------------------------------------------- @@ -589,6 +599,8 @@ we now have the *real* EC info, so kill the makeshift one.*/ acpi_evaluate_integer(ec->handle, "_UID", NULL, &uid); if (ec_ecdt && ec_ecdt->uid == uid) { + acpi_disable_gpe(NULL, ec_ecdt->gpe_bit, ACPI_NOT_ISR); + ec_device_init = 1; acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); diff -Nru a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c --- a/drivers/acpi/events/evgpe.c Wed Oct 22 10:40:07 2003 +++ b/drivers/acpi/events/evgpe.c Wed Oct 22 10:40:07 2003 @@ -217,8 +217,8 @@ gpe_number = (i * ACPI_GPE_REGISTER_WIDTH) + j; int_status |= acpi_ev_gpe_dispatch ( - &gpe_block->event_info[gpe_number], - gpe_number + gpe_block->register_info[gpe_number].base_gpe_number); + &gpe_block->event_info[gpe_number], + j + gpe_register_info->base_gpe_number); } } } diff -Nru a/drivers/acpi/power.c b/drivers/acpi/power.c --- a/drivers/acpi/power.c Wed Oct 22 10:40:06 2003 +++ b/drivers/acpi/power.c Wed Oct 22 10:40:06 2003 @@ -337,6 +337,9 @@ if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) return_VALUE(-EINVAL); + if ((device->power.state < ACPI_STATE_D0) || (device->power.state > ACPI_STATE_D3)) + return_VALUE(-ENODEV); + cl = &device->power.states[device->power.state].resources; tl = &device->power.states[state].resources; @@ -359,8 +362,6 @@ goto end; } - device->power.state = state; - /* * Then we dereference all power resources used in the current list. */ @@ -370,6 +371,8 @@ goto end; } + /* We shouldn't change the state till all above operations succeed */ + device->power.state = state; end: if (result) ACPI_DEBUG_PRINT((ACPI_DB_WARN, diff -Nru a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c --- a/drivers/acpi/utilities/utdelete.c Wed Oct 22 10:40:05 2003 +++ b/drivers/acpi/utilities/utdelete.c Wed Oct 22 10:40:05 2003 @@ -416,7 +416,7 @@ u32 i; union acpi_generic_state *state_list = NULL; union acpi_generic_state *state; - + union acpi_operand_object *tmp; ACPI_FUNCTION_TRACE_PTR ("ut_update_object_reference", object); @@ -448,8 +448,16 @@ switch (ACPI_GET_OBJECT_TYPE (object)) { case ACPI_TYPE_DEVICE: - acpi_ut_update_ref_count (object->device.system_notify, action); - acpi_ut_update_ref_count (object->device.device_notify, action); + tmp = object->device.system_notify; + if (tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT) + object->device.system_notify = NULL; + acpi_ut_update_ref_count (tmp, action); + + tmp = object->device.device_notify; + if (tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT) + object->device.device_notify = NULL; + acpi_ut_update_ref_count (tmp, action); + break; @@ -470,6 +478,10 @@ if (ACPI_FAILURE (status)) { goto error_exit; } + + tmp = object->package.elements[i]; + if (tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT) + object->package.elements[i] = NULL; } break; @@ -481,6 +493,10 @@ if (ACPI_FAILURE (status)) { goto error_exit; } + + tmp = object->buffer_field.buffer_obj; + if ( tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT) + object->buffer_field.buffer_obj = NULL; break; @@ -491,6 +507,10 @@ if (ACPI_FAILURE (status)) { goto error_exit; } + + tmp = object->field.region_obj; + if ( tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT) + object->field.region_obj = NULL; break; @@ -502,11 +522,19 @@ goto error_exit; } + tmp = object->bank_field.bank_obj; + if ( tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT) + object->bank_field.bank_obj = NULL; + status = acpi_ut_create_update_state_and_push ( object->bank_field.region_obj, action, &state_list); if (ACPI_FAILURE (status)) { goto error_exit; } + + tmp = object->bank_field.region_obj; + if ( tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT) + object->bank_field.region_obj = NULL; break; @@ -518,11 +546,19 @@ goto error_exit; } + tmp = object->index_field.index_obj; + if ( tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT) + object->index_field.index_obj = NULL; + status = acpi_ut_create_update_state_and_push ( object->index_field.data_obj, action, &state_list); if (ACPI_FAILURE (status)) { goto error_exit; } + + tmp = object->index_field.data_obj; + if ( tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT) + object->index_field.data_obj = NULL; break; diff -Nru a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c --- a/drivers/base/firmware_class.c Wed Oct 22 10:40:01 2003 +++ b/drivers/base/firmware_class.c Wed Oct 22 10:40:01 2003 @@ -415,18 +415,22 @@ void (*cont)(const struct firmware *fw, void *context); }; -static void +static int request_firmware_work_func(void *arg) { struct firmware_work *fw_work = arg; const struct firmware *fw; - if (!arg) - return; + if (!arg) { + WARN_ON(1); + return 0; + } + daemonize("firmware/%s", fw_work->name); request_firmware(&fw, fw_work->name, fw_work->device); fw_work->cont(fw, fw_work->context); release_firmware(fw); module_put(fw_work->module); kfree(fw_work); + return 0; } /** @@ -451,6 +455,8 @@ { struct firmware_work *fw_work = kmalloc(sizeof (struct firmware_work), GFP_ATOMIC); + int ret; + if (!fw_work) return -ENOMEM; if (!try_module_get(module)) { @@ -465,9 +471,14 @@ .context = context, .cont = cont, }; - INIT_WORK(&fw_work->work, request_firmware_work_func, fw_work); - schedule_work(&fw_work->work); + ret = kernel_thread(request_firmware_work_func, fw_work, + CLONE_FS | CLONE_FILES); + + if (ret < 0) { + fw_work->cont(NULL, fw_work->context); + return ret; + } return 0; } diff -Nru a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c --- a/drivers/block/ll_rw_blk.c Wed Oct 22 10:40:01 2003 +++ b/drivers/block/ll_rw_blk.c Wed Oct 22 10:40:01 2003 @@ -240,11 +240,40 @@ INIT_LIST_HEAD(&q->plug_list); blk_queue_activity_fn(q, NULL, NULL); + + q->ordered = QUEUE_ORDERED_NONE; } EXPORT_SYMBOL(blk_queue_make_request); /** + * blk_queue_ordered - does this queue support ordered writes + * @q: the request queue + * @flag: see below + * + * Description: + * For journalled file systems, doing ordered writes on a commit + * block instead of explicitly doing wait_on_buffer (which is bad + * for performance) can be a big win. Block drivers supporting this + * feature should call this function and indicate so. + * + * SCSI drivers usually need to support ordered tags, while others + * may have to do a complete drive cache flush if they are using write + * back caching (or not and lying about it) + * + * With this in mind, the values are + * QUEUE_ORDERED_NONE: the default, doesn't support barrier + * QUEUE_ORDERED_TAG: supports ordered tags + * QUEUE_ORDERED_FLUSH: supports barrier through cache flush + **/ +void blk_queue_ordered(request_queue_t *q, int flag) +{ + q->ordered = flag; +} + +EXPORT_SYMBOL(blk_queue_ordered); + +/** * blk_queue_bounce_limit - set bounce buffer limit for queue * @q: the request queue for the device * @dma_addr: bus address limit @@ -1820,6 +1849,8 @@ if (unlikely(!q)) return; + + WARN_ON(!req->ref_count); if (unlikely(--req->ref_count)) return; @@ -1986,7 +2017,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) { struct request *req, *freereq = NULL; - int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, ra; + int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, ra, err; sector_t sector; sector = bio->bi_sector; @@ -2004,7 +2035,11 @@ spin_lock_prefetch(q->queue_lock); - barrier = test_bit(BIO_RW_BARRIER, &bio->bi_rw); + barrier = bio_barrier(bio); + if (barrier && (q->ordered == QUEUE_ORDERED_NONE)) { + err = -EOPNOTSUPP; + goto end_io; + } ra = bio->bi_rw & (1 << BIO_RW_AHEAD); @@ -2086,6 +2121,7 @@ /* * READA bit set */ + err = -EWOULDBLOCK; if (ra) goto end_io; @@ -2141,7 +2177,7 @@ return 0; end_io: - bio_endio(bio, nr_sectors << 9, -EWOULDBLOCK); + bio_endio(bio, nr_sectors << 9, err); return 0; } diff -Nru a/drivers/block/loop.c b/drivers/block/loop.c --- a/drivers/block/loop.c Wed Oct 22 10:40:07 2003 +++ b/drivers/block/loop.c Wed Oct 22 10:40:07 2003 @@ -55,6 +55,7 @@ #include #include #include +#include #include #include #include @@ -1124,6 +1125,7 @@ MODULE_PARM(max_loop, "i"); MODULE_PARM_DESC(max_loop, "Maximum number of loop devices (1-256)"); MODULE_LICENSE("GPL"); +MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); int loop_register_transfer(struct loop_func_table *funcs) { diff -Nru a/drivers/char/Kconfig b/drivers/char/Kconfig --- a/drivers/char/Kconfig Wed Oct 22 10:40:01 2003 +++ b/drivers/char/Kconfig Wed Oct 22 10:40:01 2003 @@ -373,6 +373,22 @@ If you have an Alchemy AU1000 processor (MIPS based) and you want to use serial ports, say Y. Otherwise, say N. +config SGI_L1_SERIAL + bool "SGI Altix L1 serial support" + depends on SERIAL_NONSTANDARD && IA64 + help + If you have an SGI Altix and you want to use the serial port + connected to the system controller (you want this!), say Y. + Otherwise, say N. + +config SGI_L1_SERIAL_CONSOLE + bool "SGI Altix L1 serial console support" + depends on SGI_L1_SERIAL + help + If you have an SGI Altix and you would like to use the system + controller serial port as your console (you want this!), + say Y. Otherwise, say N. + config AU1000_SERIAL_CONSOLE bool "Enable Au1000 serial console" depends on AU1000_UART diff -Nru a/drivers/char/Makefile b/drivers/char/Makefile --- a/drivers/char/Makefile Wed Oct 22 10:40:09 2003 +++ b/drivers/char/Makefile Wed Oct 22 10:40:09 2003 @@ -42,6 +42,7 @@ obj-$(CONFIG_SH_SCI) += sh-sci.o generic_serial.o obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o obj-$(CONFIG_RAW_DRIVER) += raw.o +obj-$(CONFIG_SGI_L1_SERIAL) += sn_serial.o obj-$(CONFIG_PRINTER) += lp.o obj-$(CONFIG_TIPAR) += tipar.o diff -Nru a/drivers/char/sn_serial.c b/drivers/char/sn_serial.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/sn_serial.c Wed Oct 22 10:40:11 2003 @@ -0,0 +1,1000 @@ +/* + * C-Brick Serial Port (and console) driver for SGI Altix machines. + * + * This driver is NOT suitable for talking to the l1-controller for + * anything other than 'console activities' --- please use the l1 + * driver for that. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* this is needed for get_console_nasid */ +#include +#include + +#if defined(CONFIG_SGI_L1_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) +static char sysrq_serial_str[] = "\eSYS"; +static char *sysrq_serial_ptr = sysrq_serial_str; +static unsigned long sysrq_requested; +#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE && CONFIG_MAGIC_SYSRQ */ + +/* minor device number */ +#define SN_SAL_MINOR 64 + +/* number of characters left in xmit buffer before we ask for more */ +#define WAKEUP_CHARS 128 + +/* number of characters we can transmit to the SAL console at a time */ +#define SN_SAL_MAX_CHARS 120 + +#define SN_SAL_EVENT_WRITE_WAKEUP 0 + +/* 64K, when we're asynch, it must be at least printk's LOG_BUF_LEN to + * avoid losing chars, (always has to be a power of 2) */ +#define SN_SAL_BUFFER_SIZE (64 * (1 << 10)) + +#define SN_SAL_UART_FIFO_DEPTH 16 +#define SN_SAL_UART_FIFO_SPEED_CPS 9600/10 + +/* we don't kmalloc/get_free_page these as we want them available + * before either of those are initialized */ +static char sn_xmit_buff_mem[SN_SAL_BUFFER_SIZE]; + +struct volatile_circ_buf { + char *cb_buf; + int cb_head; + int cb_tail; +}; + +static struct volatile_circ_buf xmit = { .cb_buf = sn_xmit_buff_mem }; +static char sn_tmp_buffer[SN_SAL_BUFFER_SIZE]; + +static struct tty_struct *sn_sal_tty; + +static struct timer_list sn_sal_timer; +static int sn_sal_event; /* event type for task queue */ + +static int sn_sal_is_asynch; +static int sn_sal_irq; +static spinlock_t sn_sal_lock = SPIN_LOCK_UNLOCKED; +static int sn_total_tx_count; +static int sn_total_rx_count; + +static void sn_sal_tasklet_action(unsigned long data); +static DECLARE_TASKLET(sn_sal_tasklet, sn_sal_tasklet_action, 0); + +static unsigned long sn_interrupt_timeout; + +extern u64 master_node_bedrock_address; + +static int sn_debug_printf(const char *fmt, ...); + +#undef DEBUG +#ifdef DEBUG +#define DPRINTF(x...) sn_debug_printf(x) +#else +#define DPRINTF(x...) do { } while (0) +#endif + +struct sn_sal_ops { + int (*sal_puts)(const char *s, int len); + int (*sal_getc)(void); + int (*sal_input_pending)(void); + void (*sal_wakeup_transmit)(void); +}; + +/* This is the pointer used. It is assigned to point to one of + * the tables below. + */ +static struct sn_sal_ops *sn_func; + +/* Prototypes */ +static void __init sn_sal_serial_console_init(void); +static int snt_hw_puts(const char *, int); +static int snt_poll_getc(void); +static int snt_poll_input_pending(void); +static int snt_sim_puts(const char *, int); +static int snt_sim_getc(void); +static int snt_sim_input_pending(void); +static int snt_intr_getc(void); +static int snt_intr_input_pending(void); +static void sn_intr_transmit_chars(void); + +/* A table for polling */ +static struct sn_sal_ops poll_ops = { + .sal_puts = snt_hw_puts, + .sal_getc = snt_poll_getc, + .sal_input_pending = snt_poll_input_pending +}; + +/* A table for the simulator */ +static struct sn_sal_ops sim_ops = { + .sal_puts = snt_sim_puts, + .sal_getc = snt_sim_getc, + .sal_input_pending = snt_sim_input_pending +}; + +/* A table for interrupts enabled */ +static struct sn_sal_ops intr_ops = { + .sal_puts = snt_hw_puts, + .sal_getc = snt_intr_getc, + .sal_input_pending = snt_intr_input_pending, + .sal_wakeup_transmit = sn_intr_transmit_chars +}; + + +/* the console does output in two distinctly different ways: + * synchronous and asynchronous (buffered). initally, early_printk + * does synchronous output. any data written goes directly to the SAL + * to be output (incidentally, it is internally buffered by the SAL) + * after interrupts and timers are initialized and available for use, + * the console init code switches to asynchronous output. this is + * also the earliest opportunity to begin polling for console input. + * after console initialization, console output and tty (serial port) + * output is buffered and sent to the SAL asynchronously (either by + * timer callback or by UART interrupt) */ + + +/* routines for running the console in polling mode */ + +static int +snt_hw_puts(const char *s, int len) +{ + /* looking at the PROM source code, putb calls the flush + * routine, so if we send characters in FIFO sized chunks, it + * should go out by the next time the timer gets called */ + return ia64_sn_console_putb(s, len); +} + +static int +snt_poll_getc(void) +{ + int ch; + ia64_sn_console_getc(&ch); + return ch; +} + +static int +snt_poll_input_pending(void) +{ + int status, input; + + status = ia64_sn_console_check(&input); + return !status && input; +} + + +/* routines for running the console on the simulator */ + +static int +snt_sim_puts(const char *str, int count) +{ + int counter = count; + +#ifdef FLAG_DIRECT_CONSOLE_WRITES + /* This is an easy way to pre-pend the output to know whether the output + * was done via sal or directly */ + writeb('[', master_node_bedrock_address + (UART_TX << 3)); + writeb('+', master_node_bedrock_address + (UART_TX << 3)); + writeb(']', master_node_bedrock_address + (UART_TX << 3)); + writeb(' ', master_node_bedrock_address + (UART_TX << 3)); +#endif /* FLAG_DIRECT_CONSOLE_WRITES */ + while (counter > 0) { + writeb(*str, master_node_bedrock_address + (UART_TX << 3)); + counter--; + str++; + } + + return count; +} + +static int +snt_sim_getc(void) +{ + return readb(master_node_bedrock_address + (UART_RX << 3)); +} + +static int +snt_sim_input_pending(void) +{ + return readb(master_node_bedrock_address + (UART_LSR << 3)) & UART_LSR_DR; +} + + +/* routines for an interrupt driven console (normal) */ + +static int +snt_intr_getc(void) +{ + return ia64_sn_console_readc(); +} + +static int +snt_intr_input_pending(void) +{ + return ia64_sn_console_intr_status() & SAL_CONSOLE_INTR_RECV; +} + +/* The early printk (possible setup) and function call */ + +void +early_printk_sn_sal(const char *s, unsigned count) +{ + extern void early_sn_setup(void); + + if (!sn_func) { + if (IS_RUNNING_ON_SIMULATOR()) + sn_func = &sim_ops; + else + sn_func = &poll_ops; + + early_sn_setup(); + } + sn_func->sal_puts(s, count); +} + +/* this is as "close to the metal" as we can get, used when the driver + * itself may be broken */ +static int +sn_debug_printf(const char *fmt, ...) +{ + static char printk_buf[1024]; + int printed_len; + va_list args; + + va_start(args, fmt); + printed_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args); + early_printk_sn_sal(printk_buf, printed_len); + va_end(args); + return printed_len; +} + +/* + * Interrupt handling routines. + */ + +static void +sn_sal_sched_event(int event) +{ + sn_sal_event |= (1 << event); + tasklet_schedule(&sn_sal_tasklet); +} + +/* sn_receive_chars can be called before sn_sal_tty is initialized. in + * that case, its only use is to trigger sysrq and kdb */ +static void +sn_receive_chars(struct pt_regs *regs, unsigned long *flags) +{ + int ch; + + while (sn_func->sal_input_pending()) { + ch = sn_func->sal_getc(); + if (ch < 0) { + printk(KERN_ERR "sn_serial: An error occured while " + "obtaining data from the console (0x%0x)\n", ch); + break; + } +#if defined(CONFIG_SGI_L1_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) + if (sysrq_requested) { + unsigned long sysrq_timeout = sysrq_requested + HZ*5; + + sysrq_requested = 0; + if (ch && time_before(jiffies, sysrq_timeout)) { + spin_unlock_irqrestore(&sn_sal_lock, *flags); + handle_sysrq(ch, regs, NULL); + spin_lock_irqsave(&sn_sal_lock, *flags); + /* don't record this char */ + continue; + } + } + if (ch == *sysrq_serial_ptr) { + if (!(*++sysrq_serial_ptr)) { + sysrq_requested = jiffies; + sysrq_serial_ptr = sysrq_serial_str; + } + } + else + sysrq_serial_ptr = sysrq_serial_str; +#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE && CONFIG_MAGIC_SYSRQ */ + + /* record the character to pass up to the tty layer */ + if (sn_sal_tty) { + *sn_sal_tty->flip.char_buf_ptr = ch; + sn_sal_tty->flip.char_buf_ptr++; + sn_sal_tty->flip.count++; + if (sn_sal_tty->flip.count == TTY_FLIPBUF_SIZE) + break; + } + sn_total_rx_count++; + } + + if (sn_sal_tty) + tty_flip_buffer_push((struct tty_struct *)sn_sal_tty); +} + + +/* synch_flush_xmit must be called with sn_sal_lock */ +static void +synch_flush_xmit(void) +{ + int xmit_count, tail, head, loops, ii; + int result; + char *start; + + if (xmit.cb_head == xmit.cb_tail) + return; /* Nothing to do. */ + + head = xmit.cb_head; + tail = xmit.cb_tail; + start = &xmit.cb_buf[tail]; + + /* twice around gets the tail to the end of the buffer and + * then to the head, if needed */ + loops = (head < tail) ? 2 : 1; + + for (ii = 0; ii < loops; ii++) { + xmit_count = (head < tail) ? (SN_SAL_BUFFER_SIZE - tail) : (head - tail); + + if (xmit_count > 0) { + result = sn_func->sal_puts((char *)start, xmit_count); + if (!result) + sn_debug_printf("\n*** synch_flush_xmit failed to flush\n"); + if (result > 0) { + xmit_count -= result; + sn_total_tx_count += result; + tail += result; + tail &= SN_SAL_BUFFER_SIZE - 1; + xmit.cb_tail = tail; + start = (char *)&xmit.cb_buf[tail]; + } + } + } +} + +/* must be called with a lock protecting the circular buffer and + * sn_sal_tty */ +static void +sn_poll_transmit_chars(void) +{ + int xmit_count, tail, head; + int result; + char *start; + + BUG_ON(!sn_sal_is_asynch); + + if (xmit.cb_head == xmit.cb_tail || + (sn_sal_tty && (sn_sal_tty->stopped || sn_sal_tty->hw_stopped))) { + /* Nothing to do. */ + return; + } + + head = xmit.cb_head; + tail = xmit.cb_tail; + start = &xmit.cb_buf[tail]; + + xmit_count = (head < tail) ? (SN_SAL_BUFFER_SIZE - tail) : (head - tail); + + if (xmit_count == 0) + sn_debug_printf("\n*** empty xmit_count\n"); + + /* use the ops, as we could be on the simulator */ + result = sn_func->sal_puts((char *)start, xmit_count); + if (!result) + sn_debug_printf("\n*** error in synchronous sal_puts\n"); + /* XXX chadt clean this up */ + if (result > 0) { + xmit_count -= result; + sn_total_tx_count += result; + tail += result; + tail &= SN_SAL_BUFFER_SIZE - 1; + xmit.cb_tail = tail; + start = &xmit.cb_buf[tail]; + } + + /* if there's few enough characters left in the xmit buffer + * that we could stand for the upper layer to send us some + * more, ask for it. */ + if (sn_sal_tty) + if (CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE) < WAKEUP_CHARS) + sn_sal_sched_event(SN_SAL_EVENT_WRITE_WAKEUP); +} + + +/* must be called with a lock protecting the circular buffer and + * sn_sal_tty */ +static void +sn_intr_transmit_chars(void) +{ + int xmit_count, tail, head, loops, ii; + int result; + char *start; + + BUG_ON(!sn_sal_is_asynch); + + if (xmit.cb_head == xmit.cb_tail || + (sn_sal_tty && (sn_sal_tty->stopped || sn_sal_tty->hw_stopped))) { + /* Nothing to do. */ + return; + } + + head = xmit.cb_head; + tail = xmit.cb_tail; + start = &xmit.cb_buf[tail]; + + /* twice around gets the tail to the end of the buffer and + * then to the head, if needed */ + loops = (head < tail) ? 2 : 1; + + for (ii = 0; ii < loops; ii++) { + xmit_count = (head < tail) ? + (SN_SAL_BUFFER_SIZE - tail) : (head - tail); + + if (xmit_count > 0) { + result = ia64_sn_console_xmit_chars((char *)start, xmit_count); +#ifdef DEBUG + if (!result) + sn_debug_printf("`"); +#endif + if (result > 0) { + xmit_count -= result; + sn_total_tx_count += result; + tail += result; + tail &= SN_SAL_BUFFER_SIZE - 1; + xmit.cb_tail = tail; + start = &xmit.cb_buf[tail]; + } + } + } + + /* if there's few enough characters left in the xmit buffer + * that we could stand for the upper layer to send us some + * more, ask for it. */ + if (sn_sal_tty) + if (CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE) < WAKEUP_CHARS) + sn_sal_sched_event(SN_SAL_EVENT_WRITE_WAKEUP); +} + + +static irqreturn_t +sn_sal_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + /* this call is necessary to pass the interrupt back to the + * SAL, since it doesn't intercept the UART interrupts + * itself */ + int status = ia64_sn_console_intr_status(); + unsigned long flags; + + spin_lock_irqsave(&sn_sal_lock, flags); + if (status & SAL_CONSOLE_INTR_RECV) + sn_receive_chars(regs, &flags); + if (status & SAL_CONSOLE_INTR_XMIT) + sn_intr_transmit_chars(); + spin_unlock_irqrestore(&sn_sal_lock, flags); + return IRQ_HANDLED; +} + + +/* returns the console irq if interrupt is successfully registered, + * else 0 */ +static int +sn_sal_connect_interrupt(void) +{ + cpuid_t intr_cpuid; + unsigned int intr_cpuloc; + nasid_t console_nasid; + unsigned int console_irq; + int result; + + console_nasid = ia64_sn_get_console_nasid(); + intr_cpuid = NODEPDA(NASID_TO_COMPACT_NODEID(console_nasid))->node_first_cpu; + intr_cpuloc = cpu_physical_id(intr_cpuid); + console_irq = CPU_VECTOR_TO_IRQ(intr_cpuloc, SGI_UART_VECTOR); + + result = intr_connect_level(intr_cpuid, SGI_UART_VECTOR); + BUG_ON(result != SGI_UART_VECTOR); + + result = request_irq(console_irq, sn_sal_interrupt, SA_INTERRUPT, "SAL console driver", &sn_sal_tty); + if (result >= 0) + return console_irq; + + printk(KERN_INFO "sn_serial: console proceeding in polled mode\n"); + return 0; +} + +static void +sn_sal_tasklet_action(unsigned long data) +{ + unsigned long flags; + + if (sn_sal_tty) { + spin_lock_irqsave(&sn_sal_lock, flags); + if (sn_sal_tty) { + if (test_and_clear_bit(SN_SAL_EVENT_WRITE_WAKEUP, &sn_sal_event)) { + if ((sn_sal_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && sn_sal_tty->ldisc.write_wakeup) + (sn_sal_tty->ldisc.write_wakeup)((struct tty_struct *)sn_sal_tty); + wake_up_interruptible((wait_queue_head_t *)&sn_sal_tty->write_wait); + } + } + spin_unlock_irqrestore(&sn_sal_lock, flags); + } +} + + +/* + * This function handles polled mode. + */ +static void +sn_sal_timer_poll(unsigned long dummy) +{ + unsigned long flags; + + if (!sn_sal_irq) { + spin_lock_irqsave(&sn_sal_lock, flags); + sn_receive_chars(NULL, &flags); + sn_poll_transmit_chars(); + spin_unlock_irqrestore(&sn_sal_lock, flags); + mod_timer(&sn_sal_timer, jiffies + sn_interrupt_timeout); + } +} + + +/* + * User-level console routines + */ + +static int +sn_sal_open(struct tty_struct *tty, struct file *filp) +{ + unsigned long flags; + + DPRINTF("sn_sal_open: sn_sal_tty = %p, tty = %p, filp = %p\n", + sn_sal_tty, tty, filp); + + spin_lock_irqsave(&sn_sal_lock, flags); + if (!sn_sal_tty) + sn_sal_tty = tty; + spin_unlock_irqrestore(&sn_sal_lock, flags); + + return 0; +} + + +/* We're keeping all our resources. We're keeping interrupts turned + * on. Maybe just let the tty layer finish its stuff...? GMSH + */ +static void +sn_sal_close(struct tty_struct *tty, struct file * filp) +{ + if (tty->count == 1) { + unsigned long flags; + tty->closing = 1; + if (tty->driver->flush_buffer) + tty->driver->flush_buffer(tty); + if (tty->ldisc.flush_buffer) + tty->ldisc.flush_buffer(tty); + tty->closing = 0; + spin_lock_irqsave(&sn_sal_lock, flags); + sn_sal_tty = NULL; + spin_unlock_irqrestore(&sn_sal_lock, flags); + } +} + + +static int +sn_sal_write(struct tty_struct *tty, int from_user, + const unsigned char *buf, int count) +{ + int c, ret = 0; + unsigned long flags; + + if (from_user) { + while (1) { + int c1; + c = CIRC_SPACE_TO_END(xmit.cb_head, xmit.cb_tail, + SN_SAL_BUFFER_SIZE); + + if (count < c) + c = count; + if (c <= 0) + break; + + c -= copy_from_user(sn_tmp_buffer, buf, c); + if (!c) { + if (!ret) + ret = -EFAULT; + break; + } + + /* Turn off interrupts and see if the xmit buffer has + * moved since the last time we looked. + */ + spin_lock_irqsave(&sn_sal_lock, flags); + c1 = CIRC_SPACE_TO_END(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE); + + if (c1 < c) + c = c1; + + memcpy(xmit.cb_buf + xmit.cb_head, sn_tmp_buffer, c); + xmit.cb_head = ((xmit.cb_head + c) & (SN_SAL_BUFFER_SIZE - 1)); + spin_unlock_irqrestore(&sn_sal_lock, flags); + + buf += c; + count -= c; + ret += c; + } + } + else { + /* The buffer passed in isn't coming from userland, + * so cut out the middleman (sn_tmp_buffer). + */ + spin_lock_irqsave(&sn_sal_lock, flags); + while (1) { + c = CIRC_SPACE_TO_END(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE); + + if (count < c) + c = count; + if (c <= 0) { + break; + } + memcpy(xmit.cb_buf + xmit.cb_head, buf, c); + xmit.cb_head = ((xmit.cb_head + c) & (SN_SAL_BUFFER_SIZE - 1)); + buf += c; + count -= c; + ret += c; + } + spin_unlock_irqrestore(&sn_sal_lock, flags); + } + + spin_lock_irqsave(&sn_sal_lock, flags); + if (xmit.cb_head != xmit.cb_tail && !(tty && (tty->stopped || tty->hw_stopped))) + if (sn_func->sal_wakeup_transmit) + sn_func->sal_wakeup_transmit(); + spin_unlock_irqrestore(&sn_sal_lock, flags); + + return ret; +} + + +static void +sn_sal_put_char(struct tty_struct *tty, unsigned char ch) +{ + unsigned long flags; + + spin_lock_irqsave(&sn_sal_lock, flags); + if (CIRC_SPACE(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE) != 0) { + xmit.cb_buf[xmit.cb_head] = ch; + xmit.cb_head = (xmit.cb_head + 1) & (SN_SAL_BUFFER_SIZE-1); + if ( sn_func->sal_wakeup_transmit ) + sn_func->sal_wakeup_transmit(); + } + spin_unlock_irqrestore(&sn_sal_lock, flags); +} + + +static void +sn_sal_flush_chars(struct tty_struct *tty) +{ + unsigned long flags; + + spin_lock_irqsave(&sn_sal_lock, flags); + if (CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE)) + if (sn_func->sal_wakeup_transmit) + sn_func->sal_wakeup_transmit(); + spin_unlock_irqrestore(&sn_sal_lock, flags); +} + + +static int +sn_sal_write_room(struct tty_struct *tty) +{ + unsigned long flags; + int space; + + spin_lock_irqsave(&sn_sal_lock, flags); + space = CIRC_SPACE(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE); + spin_unlock_irqrestore(&sn_sal_lock, flags); + return space; +} + + +static int +sn_sal_chars_in_buffer(struct tty_struct *tty) +{ + unsigned long flags; + int space; + + spin_lock_irqsave(&sn_sal_lock, flags); + space = CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE); + DPRINTF("<%d>", space); + spin_unlock_irqrestore(&sn_sal_lock, flags); + return space; +} + + +static void +sn_sal_flush_buffer(struct tty_struct *tty) +{ + unsigned long flags; + + /* drop everything */ + spin_lock_irqsave(&sn_sal_lock, flags); + xmit.cb_head = xmit.cb_tail = 0; + spin_unlock_irqrestore(&sn_sal_lock, flags); + + /* wake up tty level */ + wake_up_interruptible(&tty->write_wait); + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && tty->ldisc.write_wakeup) + (tty->ldisc.write_wakeup)(tty); +} + + +static void +sn_sal_hangup(struct tty_struct *tty) +{ + sn_sal_flush_buffer(tty); +} + + +static void +sn_sal_wait_until_sent(struct tty_struct *tty, int timeout) +{ + /* this is SAL's problem */ + DPRINTF(""); +} + + +/* + * sn_sal_read_proc + * + * Console /proc interface + */ + +static int +sn_sal_read_proc(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + int len = 0; + off_t begin = 0; + + len += sprintf(page, "sn_serial: nasid:%d irq:%d tx:%d rx:%d\n", + get_console_nasid(), sn_sal_irq, + sn_total_tx_count, sn_total_rx_count); + *eof = 1; + + if (off >= len+begin) + return 0; + *start = page + (off-begin); + + return count < begin+len-off ? count : begin+len-off; +} + + +static struct tty_operations sn_sal_driver_ops = { + .open = sn_sal_open, + .close = sn_sal_close, + .write = sn_sal_write, + .put_char = sn_sal_put_char, + .flush_chars = sn_sal_flush_chars, + .write_room = sn_sal_write_room, + .chars_in_buffer = sn_sal_chars_in_buffer, + .hangup = sn_sal_hangup, + .wait_until_sent = sn_sal_wait_until_sent, + .read_proc = sn_sal_read_proc, +}; +static struct tty_driver *sn_sal_driver; + +/* sn_sal_init wishlist: + * - allocate sn_tmp_buffer + * - fix up the tty_driver struct + * - turn on receive interrupts + * - do any termios twiddling once and for all + */ + +/* + * Boot-time initialization code + */ + +static void __init +sn_sal_switch_to_asynch(void) +{ + unsigned long flags; + + sn_debug_printf("sn_serial: about to switch to asynchronous console\n"); + + /* without early_printk, we may be invoked late enough to race + * with other cpus doing console IO at this point, however + * console interrupts will never be enabled */ + spin_lock_irqsave(&sn_sal_lock, flags); + + /* early_printk invocation may have done this for us */ + if (!sn_func) { + if (IS_RUNNING_ON_SIMULATOR()) + sn_func = &sim_ops; + else + sn_func = &poll_ops; + } + + /* we can't turn on the console interrupt (as request_irq + * calls kmalloc, which isn't set up yet), so we rely on a + * timer to poll for input and push data from the console + * buffer. + */ + init_timer(&sn_sal_timer); + sn_sal_timer.function = sn_sal_timer_poll; + + if (IS_RUNNING_ON_SIMULATOR()) + sn_interrupt_timeout = 6; + else { + /* 960cps / 16 char FIFO = 60HZ + * HZ / (SN_SAL_FIFO_SPEED_CPS / SN_SAL_FIFO_DEPTH) */ + sn_interrupt_timeout = HZ * SN_SAL_UART_FIFO_DEPTH / SN_SAL_UART_FIFO_SPEED_CPS; + } + mod_timer(&sn_sal_timer, jiffies + sn_interrupt_timeout); + + sn_sal_is_asynch = 1; + spin_unlock_irqrestore(&sn_sal_lock, flags); +} + +static void __init +sn_sal_switch_to_interrupts(void) +{ + int irq; + + sn_debug_printf("sn_serial: switching to interrupt driven console\n"); + + irq = sn_sal_connect_interrupt(); + if (irq) { + unsigned long flags; + spin_lock_irqsave(&sn_sal_lock, flags); + + /* sn_sal_irq is a global variable. When it's set to + * a non-zero value, we stop polling for input (since + * interrupts should now be enabled). */ + sn_sal_irq = irq; + sn_func = &intr_ops; + + /* turn on receive interrupts */ + ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV); + spin_unlock_irqrestore(&sn_sal_lock, flags); + } +} + +static int __init +sn_sal_module_init(void) +{ + int retval; + + printk("sn_serial: sn_sal_module_init\n"); + + if (!ia64_platform_is("sn2")) + return -ENODEV; + + sn_sal_driver = alloc_tty_driver(1); + if ( !sn_sal_driver ) + return -ENOMEM; + + sn_sal_driver->owner = THIS_MODULE; + sn_sal_driver->driver_name = "sn_serial"; + sn_sal_driver->name = "ttyS"; + sn_sal_driver->major = TTY_MAJOR; + sn_sal_driver->minor_start = SN_SAL_MINOR; + sn_sal_driver->type = TTY_DRIVER_TYPE_SERIAL; + sn_sal_driver->subtype = SERIAL_TYPE_NORMAL; + sn_sal_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS; + + tty_set_operations(sn_sal_driver, &sn_sal_driver_ops); + + /* when this driver is compiled in, the console initialization + * will have already switched us into asynchronous operation + * before we get here through the module initcalls */ + if (!sn_sal_is_asynch) + sn_sal_switch_to_asynch(); + + /* at this point (module_init) we can try to turn on interrupts */ + if (!IS_RUNNING_ON_SIMULATOR()) + sn_sal_switch_to_interrupts(); + + sn_sal_driver->init_termios = tty_std_termios; + sn_sal_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; + + if ((retval = tty_register_driver(sn_sal_driver))) { + printk(KERN_ERR "sn_serial: Unable to register tty driver\n"); + return retval; + } +#ifdef CONFIG_SGI_L1_SERIAL_CONSOLE + sn_sal_serial_console_init(); +#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE */ + return 0; +} + + +static void __exit +sn_sal_module_exit(void) +{ + del_timer_sync(&sn_sal_timer); + tty_unregister_driver(sn_sal_driver); + put_tty_driver(sn_sal_driver); +} + +module_init(sn_sal_module_init); +module_exit(sn_sal_module_exit); + +/* + * Kernel console definitions + */ + +#ifdef CONFIG_SGI_L1_SERIAL_CONSOLE +/* + * Print a string to the SAL console. The console_lock must be held + * when we get here. + */ +static void +sn_sal_console_write(struct console *co, const char *s, unsigned count) +{ + unsigned long flags; + + BUG_ON(!sn_sal_is_asynch); + + /* somebody really wants this output, might be an + * oops, kdb, panic, etc. make sure they get it. */ + if (spin_is_locked(&sn_sal_lock)) { + synch_flush_xmit(); + sn_func->sal_puts(s, count); + } + else if (in_interrupt()) { + spin_lock_irqsave(&sn_sal_lock, flags); + synch_flush_xmit(); + spin_unlock_irqrestore(&sn_sal_lock, flags); + sn_func->sal_puts(s, count); + } + else + sn_sal_write(NULL, 0, s, count); +} + +static struct tty_driver * +sn_sal_console_device(struct console *c, int *index) +{ + *index = c->index; + return sn_sal_driver; +} + +static int __init +sn_sal_console_setup(struct console *co, char *options) +{ + return 0; +} + + +static struct console sal_console = { + .name = "ttyS", + .write = sn_sal_console_write, + .device = sn_sal_console_device, + .setup = sn_sal_console_setup, + .index = -1 +}; + +static void __init +sn_sal_serial_console_init(void) +{ + if (ia64_platform_is("sn2")) { + sn_sal_switch_to_asynch(); + sn_debug_printf("sn_sal_serial_console_init : register console\n"); + register_console(&sal_console); + } +} + +#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE */ diff -Nru a/drivers/dump/Makefile b/drivers/dump/Makefile --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/Makefile Wed Oct 22 10:40:10 2003 @@ -0,0 +1,15 @@ +# +# Makefile for the dump device drivers. +# + +dump-y := dump_setup.o dump_fmt.o dump_filters.o dump_scheme.o dump_execute.o +dump-$(CONFIG_X86) += dump_i386.o +dump-$(CONFIG_ARM) += dump_arm.o +dump-$(CONFIG_CRASH_DUMP_MEMDEV) += dump_memdev.o dump_overlay.o +dump-objs += $(dump-y) + +obj-$(CONFIG_CRASH_DUMP) += dump.o +obj-$(CONFIG_CRASH_DUMP_BLOCKDEV) += dump_blockdev.o +obj-$(CONFIG_CRASH_DUMP_NETDEV) += dump_netdev.o +obj-$(CONFIG_CRASH_DUMP_COMPRESS_RLE) += dump_rle.o +obj-$(CONFIG_CRASH_DUMP_COMPRESS_GZIP) += dump_gzip.o diff -Nru a/drivers/dump/dump_arm.c b/drivers/dump/dump_arm.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/dump_arm.c Wed Oct 22 10:40:11 2003 @@ -0,0 +1,234 @@ +/* + * Architecture specific (ARM/XScale) functions for Linux crash dumps. + * + * Created by: Fleming Feng (fleming.feng@intel.com) + * + * Copyright(C) 2003 Intel Corp. All rights reserved. + * + * This code is released under version 2 of the GNU GPL. + */ + +/* + * The hooks for dumping the kernel virtual memory to disk are in this + * file. Any time a modification is made to the virtual memory mechanism, + * these routines must be changed to use the new mechanisms. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static __s32 saved_irq_count; /* saved preempt_count() flags */ + +static int alloc_dha_stack(void) +{ + int i; + void *ptr; + + if (dump_header_asm.dha_stack[0]) + return 0; + + ptr = vmalloc(THREAD_SIZE * num_online_cpus()); + if (!ptr) { + printk("vmalloc for dha_stacks failed\n"); + return -ENOMEM; + } + + for( i = 0; i < num_online_cpus(); i++){ + dump_header_asm.dha_stack[i] = (u32)((unsigned long)ptr + + (i * THREAD_SIZE)); + } + + return 0; +} + +static int free_dha_stack(void) +{ + if (dump_header_asm.dha_stack[0]){ + vfree((void*)dump_header_asm.dha_stack[0]); + dump_header_asm.dha_stack[0] = 0; + } + return 0; +} + +void __dump_save_regs(struct pt_regs* dest_regs, const struct pt_regs* regs) +{ + + /* Here, because the arm version uses _dump_regs_t, + * instead of pt_regs in dump_header_asm, while the + * the function is defined inside architecture independent + * header file include/linux/dump.h, the size of block of + * memory copied is not equal to pt_regs. + */ + + memcpy(dest_regs, regs, sizeof(_dump_regs_t)); + +} + +#ifdef CONFIG_SMP +/* FIXME: This is reserved for possible future usage for SMP system + * based on ARM/XScale. Currently, there is no information for an + * SMP system based on ARM/XScale, they are not used! + */ +/* save registers on other processor */ +void +__dump_save_other_cpus(void) +{ + + /* Dummy now! */ + + return; + +} +#else /* !CONFIG_SMP */ +#define save_other_cpu_state() do { } while (0) +#endif /* !CONFIG_SMP */ + +/* + * Kludge - dump from interrupt context is unreliable (Fixme) + * + * We do this so that softirqs initiated for dump i/o + * get processed and we don't hang while waiting for i/o + * to complete or in any irq synchronization attempt. + * + * This is not quite legal of course, as it has the side + * effect of making all interrupts & softirqs triggered + * while dump is in progress complete before currently + * pending softirqs and the currently executing interrupt + * code. + */ +static inline void +irq_bh_save(void) +{ + saved_irq_count = irq_count(); + preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK); +} + +static inline void +irq_bh_restore(void) +{ + preempt_count() |= saved_irq_count; +} + +/* + * Name: __dump_irq_enable + * Func: Reset system so interrupts are enabled. + * This is used for dump methods that requires interrupts + * Eventually, all methods will have interrupts disabled + * and this code can be removed. + * + * Re-enable interrupts + */ +void +__dump_irq_enable(void) +{ + irq_bh_save(); + local_irq_enable(); +} + +/* Name: __dump_irq_restore + * Func: Resume the system state in an architecture-specific way. + */ +void +__dump_irq_restore(void) +{ + local_irq_disable(); + irq_bh_restore(); +} + + +/* + * Name: __dump_configure_header() + * Func: Meant to fill in arch specific header fields except per-cpu state + * already captured in dump_lcrash_configure_header. + */ +int +__dump_configure_header(const struct pt_regs *regs) +{ + return (0); +} + +/* + * Name: dump_die_event + * Func: Called from notify_die + */ +static int dump_die_event(struct notifier_block* this, + unsigned long event, + void* arg) +{ + const struct die_args* args = (const struct die_args*)arg; + + switch(event){ + case DIE_PANIC: + case DIE_OOPS: + case DIE_WATCHDOG: + dump_execute(args->str, args->regs); + break; + } + return NOTIFY_DONE; + +} + +static struct notifier_block dump_die_block = { + .notifier_call = dump_die_event, +}; + +/* Name: __dump_init() + * Func: Initialize the dumping routine process. + */ +void +__dump_init(uint64_t local_memory_start) +{ + /* hook into PANIC and OOPS */ + register_die_notifier(&dump_die_block); +} + +/* + * Name: __dump_open() + * Func: Open the dump device (architecture specific). This is in + * case it's necessary in the future. + */ +void +__dump_open(void) +{ + + alloc_dha_stack(); + + return; +} + +/* + * Name: __dump_cleanup() + * Func: Free any architecture specific data structures. This is called + * when the dump module is being removed. + */ +void +__dump_cleanup(void) +{ + free_dha_stack(); + unregister_die_notifier(&dump_die_block); + + /* return */ + return; +} + +/* + * Name: __dump_page_valid() + * Func: Check if page is valid to dump. + */ +int +__dump_page_valid(unsigned long index) +{ + if(!pfn_valid(index)) + return 0; + else + return 1; +} + diff -Nru a/drivers/dump/dump_execute.c b/drivers/dump/dump_execute.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/dump_execute.c Wed Oct 22 10:40:11 2003 @@ -0,0 +1,126 @@ +/* + * The file has the common/generic dump execution code + * + * Started: Oct 2002 - Suparna Bhattacharya + * Split and rewrote high level dump execute code to make use + * of dump method interfaces. + * + * Derived from original code in dump_base.c created by + * Matt Robinson ) + * + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved. + * Copyright (C) 2002 International Business Machines Corp. + * + * Assumes dumper and dump config settings are in place + * (invokes corresponding dumper specific routines as applicable) + * + * This code is released under version 2 of the GNU GPL. + */ +#include +#include +#include +#include "dump_methods.h" + +struct notifier_block *dump_notifier_list; /* dump started/ended callback */ + +/* Dump progress indicator */ +void +dump_speedo(int i) +{ + static const char twiddle[4] = { '|', '\\', '-', '/' }; + printk("%c\b", twiddle[i&3]); +} + +/* Make the device ready and write out the header */ +int dump_begin(void) +{ + int err = 0; + + /* dump_dev = dump_config.dumper->dev; */ + dumper_reset(); + if ((err = dump_dev_silence())) { + /* quiesce failed, can't risk continuing */ + /* Todo/Future: switch to alternate dump scheme if possible */ + printk("dump silence dev failed ! error %d\n", err); + return err; + } + + pr_debug("Writing dump header\n"); + if ((err = dump_update_header())) { + printk("dump update header failed ! error %d\n", err); + dump_dev_resume(); + return err; + } + + dump_config.dumper->curr_offset = DUMP_BUFFER_SIZE; + + return 0; +} + +/* + * Write the dump terminator, a final header update and let go of + * exclusive use of the device for dump. + */ +int dump_complete(void) +{ + int ret = 0; + + if (dump_config.level != DUMP_LEVEL_HEADER) { + if ((ret = dump_update_end_marker())) { + printk("dump update end marker error %d\n", ret); + } + if ((ret = dump_update_header())) { + printk("dump update header error %d\n", ret); + } + } + ret = dump_dev_resume(); + + return ret; +} + +/* Saves all dump data */ +int dump_execute_savedump(void) +{ + int ret = 0, err = 0; + + if ((ret = dump_begin())) { + return ret; + } + + if (dump_config.level != DUMP_LEVEL_HEADER) { + ret = dump_sequencer(); + } + if ((err = dump_complete())) { + printk("Dump complete failed. Error %d\n", err); + } + + return ret; +} + +/* Does all the real work: Capture and save state */ +int dump_generic_execute(const char *panic_str, const struct pt_regs *regs) +{ + int ret = 0; + + if ((ret = dump_configure_header(panic_str, regs))) { + printk("dump config header failed ! error %d\n", ret); + return ret; + } + + /* tell interested parties that a dump is about to start */ + notifier_call_chain(&dump_notifier_list, DUMP_BEGIN, + &dump_config.dump_device); + + if (dump_config.level != DUMP_LEVEL_NONE) + ret = dump_execute_savedump(); + + pr_debug("dumped %ld blocks of %d bytes each\n", + dump_config.dumper->count, DUMP_BUFFER_SIZE); + + /* tell interested parties that a dump has completed */ + notifier_call_chain(&dump_notifier_list, DUMP_END, + &dump_config.dump_device); + + return ret; +} diff -Nru a/drivers/dump/dump_filters.c b/drivers/dump/dump_filters.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/dump_filters.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,143 @@ +/* + * Default filters to select data to dump for various passes. + * + * Started: Oct 2002 - Suparna Bhattacharya + * Split and rewrote default dump selection logic to generic dump + * method interfaces + * Derived from a portion of dump_base.c created by + * Matt Robinson ) + * + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved. + * Copyright (C) 2002 International Business Machines Corp. + * + * Used during single-stage dumping and during stage 1 of the 2-stage scheme + * (Stage 2 of the 2-stage scheme uses the fully transparent filters + * i.e. passthru filters in dump_overlay.c) + * + * Future: Custom selective dump may involve a different set of filters. + * + * This code is released under version 2 of the GNU GPL. + */ + +#include +#include +#include +#include +#include +#include "dump_methods.h" + + +/* Copied from mm/bootmem.c - FIXME */ +/* return the number of _pages_ that will be allocated for the boot bitmap */ +unsigned long dump_calc_bootmap_pages (void) +{ + unsigned long mapsize; + unsigned long pages = num_physpages; + + mapsize = (pages+7)/8; + mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK; + mapsize >>= PAGE_SHIFT; + + return mapsize; +} + + +#define DUMP_PFN_SAFETY_MARGIN 1024 /* 4 MB */ +/* temporary */ +extern unsigned long min_low_pfn; + + +int dump_low_page(struct page *p) +{ + return page_to_pfn(p) < min_low_pfn + dump_calc_bootmap_pages() + + 1 + DUMP_PFN_SAFETY_MARGIN; +} + +static inline int kernel_page(struct page *p) +{ + /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */ + return PageReserved(p) || (!PageLRU(p) && PageInuse(p)); +} + +static inline int user_page(struct page *p) +{ + return PageInuse(p) && (!PageReserved(p) && PageLRU(p)); +} + +static inline int unreferenced_page(struct page *p) +{ + return !PageInuse(p) && !PageReserved(p); +} + + +/* loc marks the beginning of a range of pages */ +int dump_filter_kernpages(int pass, unsigned long loc, unsigned long sz) +{ + struct page *page = (struct page *)loc; + /* if any of the pages is a kernel page, select this set */ + while (sz) { + if (dump_low_page(page) || kernel_page(page)) + return 1; + sz -= PAGE_SIZE; + page++; + } + return 0; +} + + +/* loc marks the beginning of a range of pages */ +int dump_filter_userpages(int pass, unsigned long loc, unsigned long sz) +{ + struct page *page = (struct page *)loc; + int ret = 0; + /* select if the set has any user page, and no kernel pages */ + while (sz) { + if (user_page(page) && !dump_low_page(page)) { + ret = 1; + } else if (kernel_page(page) || dump_low_page(page)) { + return 0; + } + page++; + sz -= PAGE_SIZE; + } + return ret; +} + + + +/* loc marks the beginning of a range of pages */ +int dump_filter_unusedpages(int pass, unsigned long loc, unsigned long sz) +{ + struct page *page = (struct page *)loc; + + /* select if the set does not have any used pages */ + while (sz) { + if (!unreferenced_page(page) || dump_low_page(page)) { + return 0; + } + page++; + sz -= PAGE_SIZE; + } + return 1; +} + +/* dummy: last (non-existent) pass */ +int dump_filter_none(int pass, unsigned long loc, unsigned long sz) +{ + return 0; +} + +/* TBD: resolve level bitmask ? */ +struct dump_data_filter dump_filter_table[] = { + { .name = "kern", .selector = dump_filter_kernpages, + .level_mask = DUMP_MASK_KERN}, + { .name = "user", .selector = dump_filter_userpages, + .level_mask = DUMP_MASK_USED}, + { .name = "unused", .selector = dump_filter_unusedpages, + .level_mask = DUMP_MASK_UNUSED}, + { .name = "none", .selector = dump_filter_none, + .level_mask = DUMP_MASK_REST}, + { .name = "", .selector = NULL, .level_mask = 0} +}; + diff -Nru a/drivers/dump/dump_fmt.c b/drivers/dump/dump_fmt.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/dump_fmt.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,404 @@ +/* + * Implements the routines which handle the format specific + * aspects of dump for the default dump format. + * + * Used in single stage dumping and stage 1 of soft-boot based dumping + * Saves data in LKCD (lcrash) format + * + * Previously a part of dump_base.c + * + * Started: Oct 2002 - Suparna Bhattacharya + * Split off and reshuffled LKCD dump format code around generic + * dump method interfaces. + * + * Derived from original code created by + * Matt Robinson ) + * + * Contributions from SGI, IBM, HP, MCL, and others. + * + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2000 - 2002 TurboLinux, Inc. All rights reserved. + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved. + * Copyright (C) 2002 International Business Machines Corp. + * + * This code is released under version 2 of the GNU GPL. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "dump_methods.h" + +/* + * SYSTEM DUMP LAYOUT + * + * System dumps are currently the combination of a dump header and a set + * of data pages which contain the system memory. The layout of the dump + * (for full dumps) is as follows: + * + * +-----------------------------+ + * | generic dump header | + * +-----------------------------+ + * | architecture dump header | + * +-----------------------------+ + * | page header | + * +-----------------------------+ + * | page data | + * +-----------------------------+ + * | page header | + * +-----------------------------+ + * | page data | + * +-----------------------------+ + * | | | + * | | | + * | | | + * | | | + * | V | + * +-----------------------------+ + * | PAGE_END header | + * +-----------------------------+ + * + * There are two dump headers, the first which is architecture + * independent, and the other which is architecture dependent. This + * allows different architectures to dump different data structures + * which are specific to their chipset, CPU, etc. + * + * After the dump headers come a succession of dump page headers along + * with dump pages. The page header contains information about the page + * size, any flags associated with the page (whether it's compressed or + * not), and the address of the page. After the page header is the page + * data, which is either compressed (or not). Each page of data is + * dumped in succession, until the final dump header (PAGE_END) is + * placed at the end of the dump, assuming the dump device isn't out + * of space. + * + * This mechanism allows for multiple compression types, different + * types of data structures, different page ordering, etc., etc., etc. + * It's a very straightforward mechanism for dumping system memory. + */ + +struct __dump_header dump_header; /* the primary dump header */ +struct __dump_header_asm dump_header_asm; /* the arch-specific dump header */ + +/* + * Set up common header fields (mainly the arch indep section) + * Per-cpu state is handled by lcrash_save_context + * Returns the size of the header in bytes. + */ +static int lcrash_init_dump_header(const char *panic_str) +{ + struct timeval dh_time; + u32 temp_dha_stack[DUMP_MAX_NUM_CPUS]; + + /* make sure the dump header isn't TOO big */ + if ((sizeof(struct __dump_header) + + sizeof(struct __dump_header_asm)) > DUMP_BUFFER_SIZE) { + printk("lcrash_init_header(): combined " + "headers larger than DUMP_BUFFER_SIZE!\n"); + return -E2BIG; + } + + /* initialize the dump headers to zero */ + /* save dha_stack pointer because it may contains pointer for stack! */ + memcpy(&(temp_dha_stack[0]), &(dump_header_asm.dha_stack[0]), + DUMP_MAX_NUM_CPUS * sizeof(u32)); + memset(&dump_header, 0, sizeof(dump_header)); + memset(&dump_header_asm, 0, sizeof(dump_header_asm)); + memcpy(&(dump_header_asm.dha_stack[0]), &(temp_dha_stack[0]), + DUMP_MAX_NUM_CPUS * sizeof(u32)); + + /* configure dump header values */ + dump_header.dh_magic_number = DUMP_MAGIC_NUMBER; + dump_header.dh_version = DUMP_VERSION_NUMBER; + dump_header.dh_memory_start = PAGE_OFFSET; + dump_header.dh_memory_end = DUMP_MAGIC_NUMBER; + dump_header.dh_header_size = sizeof(struct __dump_header); + dump_header.dh_page_size = PAGE_SIZE; + dump_header.dh_dump_level = dump_config.level; + dump_header.dh_current_task = (unsigned long) current; + dump_header.dh_dump_compress = dump_config.dumper->compress-> + compress_type; + dump_header.dh_dump_flags = dump_config.flags; + dump_header.dh_dump_device = dump_config.dumper->dev->device_id; + +#if DUMP_DEBUG >= 6 + dump_header.dh_num_bytes = 0; +#endif + dump_header.dh_num_dump_pages = 0; + do_gettimeofday(&dh_time); + dump_header.dh_time.tv_sec = dh_time.tv_sec; + dump_header.dh_time.tv_usec = dh_time.tv_usec; + + memcpy((void *)&(dump_header.dh_utsname_sysname), + (const void *)&(system_utsname.sysname), __NEW_UTS_LEN + 1); + memcpy((void *)&(dump_header.dh_utsname_nodename), + (const void *)&(system_utsname.nodename), __NEW_UTS_LEN + 1); + memcpy((void *)&(dump_header.dh_utsname_release), + (const void *)&(system_utsname.release), __NEW_UTS_LEN + 1); + memcpy((void *)&(dump_header.dh_utsname_version), + (const void *)&(system_utsname.version), __NEW_UTS_LEN + 1); + memcpy((void *)&(dump_header.dh_utsname_machine), + (const void *)&(system_utsname.machine), __NEW_UTS_LEN + 1); + memcpy((void *)&(dump_header.dh_utsname_domainname), + (const void *)&(system_utsname.domainname), __NEW_UTS_LEN + 1); + + if (panic_str) { + memcpy((void *)&(dump_header.dh_panic_string), + (const void *)panic_str, DUMP_PANIC_LEN); + } + + dump_header_asm.dha_magic_number = DUMP_ASM_MAGIC_NUMBER; + dump_header_asm.dha_version = DUMP_ASM_VERSION_NUMBER; + dump_header_asm.dha_header_size = sizeof(dump_header_asm); +#ifdef CONFIG_ARM + dump_header_asm.dha_physaddr_start = PHYS_OFFSET; +#endif + + dump_header_asm.dha_smp_num_cpus = num_online_cpus(); + pr_debug("smp_num_cpus in header %d\n", + dump_header_asm.dha_smp_num_cpus); + + dump_header_asm.dha_dumping_cpu = smp_processor_id(); + + return sizeof(dump_header) + sizeof(dump_header_asm); +} + + +int dump_lcrash_configure_header(const char *panic_str, + const struct pt_regs *regs) +{ + int retval = 0; + + dump_config.dumper->header_len = lcrash_init_dump_header(panic_str); + + /* capture register states for all processors */ + dump_save_this_cpu(regs); + __dump_save_other_cpus(); /* side effect:silence cpus */ + + /* configure architecture-specific dump header values */ + if ((retval = __dump_configure_header(regs))) + return retval; + + dump_config.dumper->header_dirty++; + return 0; +} + +/* save register and task context */ +void dump_lcrash_save_context(int cpu, const struct pt_regs *regs, + struct task_struct *tsk) +{ + dump_header_asm.dha_smp_current_task[cpu] = (uint32_t) tsk; + + __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs); + + /* take a snapshot of the stack */ + /* doing this enables us to tolerate slight drifts on this cpu */ + if (dump_header_asm.dha_stack[cpu]) { + memcpy((void *)dump_header_asm.dha_stack[cpu], + tsk->thread_info, THREAD_SIZE); + } + dump_header_asm.dha_stack_ptr[cpu] = (uint32_t)(tsk->thread_info); +} + +/* write out the header */ +int dump_write_header(void) +{ + int retval = 0, size; + void *buf = dump_config.dumper->dump_buf; + + /* accounts for DUMP_HEADER_OFFSET if applicable */ + if ((retval = dump_dev_seek(0))) { + printk("Unable to seek to dump header offset: %d\n", + retval); + return retval; + } + + memcpy(buf, (void *)&dump_header, sizeof(dump_header)); + size = sizeof(dump_header); + memcpy(buf + size, (void *)&dump_header_asm, sizeof(dump_header_asm)); + size += sizeof(dump_header_asm); + size = PAGE_ALIGN(size); + retval = dump_ll_write(buf , size); + + if (retval < size) + return (retval >= 0) ? ENOSPC : retval; + return 0; +} + +int dump_generic_update_header(void) +{ + int err = 0; + + if (dump_config.dumper->header_dirty) { + if ((err = dump_write_header())) { + printk("dump write header failed !err %d\n", err); + } else { + dump_config.dumper->header_dirty = 0; + } + } + + return err; +} + +static inline int is_curr_stack_page(struct page *page, unsigned long size) +{ + unsigned long thread_addr = (unsigned long)current_thread_info(); + unsigned long addr = (unsigned long)page_address(page); + + return !PageHighMem(page) && (addr < thread_addr + THREAD_SIZE) + && (addr + size > thread_addr); +} + +static inline int is_dump_page(struct page *page, unsigned long size) +{ + unsigned long addr = (unsigned long)page_address(page); + unsigned long dump_buf = (unsigned long)dump_config.dumper->dump_buf; + + return !PageHighMem(page) && (addr < dump_buf + DUMP_BUFFER_SIZE) + && (addr + size > dump_buf); +} + +int dump_allow_compress(struct page *page, unsigned long size) +{ + /* + * Don't compress the page if any part of it overlaps + * with the current stack or dump buffer (since the contents + * in these could be changing while compression is going on) + */ + return !is_curr_stack_page(page, size) && !is_dump_page(page, size); +} + +void lcrash_init_pageheader(struct __dump_page *dp, struct page *page, + unsigned long sz) +{ + memset(dp, sizeof(struct __dump_page), 0); + dp->dp_flags = 0; + dp->dp_size = 0; + if (sz > 0) + dp->dp_address = page_to_pfn(page) << PAGE_SHIFT; + +#if DUMP_DEBUG > 6 + dp->dp_page_index = dump_header.dh_num_dump_pages; + dp->dp_byte_offset = dump_header.dh_num_bytes + DUMP_BUFFER_SIZE + + DUMP_HEADER_OFFSET; /* ?? */ +#endif /* DUMP_DEBUG */ +} + +int dump_lcrash_add_data(unsigned long loc, unsigned long len) +{ + struct page *page = (struct page *)loc; + void *addr, *buf = dump_config.dumper->curr_buf; + struct __dump_page *dp = (struct __dump_page *)buf; + int bytes, size; + + if (buf > dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE) + return -ENOMEM; + + lcrash_init_pageheader(dp, page, len); + buf += sizeof(struct __dump_page); + + while (len) { + addr = kmap_atomic(page, KM_DUMP); + size = bytes = (len > PAGE_SIZE) ? PAGE_SIZE : len; + /* check for compression */ + if (dump_allow_compress(page, bytes)) { + size = dump_compress_data((char *)addr, bytes, (char *)buf); + } + /* set the compressed flag if the page did compress */ + if (size && (size < bytes)) { + dp->dp_flags |= DUMP_DH_COMPRESSED; + } else { + /* compression failed -- default to raw mode */ + dp->dp_flags |= DUMP_DH_RAW; + memcpy(buf, addr, bytes); + size = bytes; + } + /* memset(buf, 'A', size); temporary: testing only !! */ + kunmap_atomic(addr, KM_DUMP); + dp->dp_size += size; + buf += size; + len -= bytes; + page++; + } + + /* now update the header */ +#if DUMP_DEBUG > 6 + dump_header.dh_num_bytes += dp->dp_size + sizeof(*dp); +#endif + dump_header.dh_num_dump_pages++; + dump_config.dumper->header_dirty++; + + dump_config.dumper->curr_buf = buf; + + return len; +} + +int dump_lcrash_update_end_marker(void) +{ + struct __dump_page *dp = + (struct __dump_page *)dump_config.dumper->curr_buf; + unsigned long left; + int ret = 0; + + lcrash_init_pageheader(dp, NULL, 0); + dp->dp_flags |= DUMP_DH_END; /* tbd: truncation test ? */ + + /* now update the header */ +#if DUMP_DEBUG > 6 + dump_header.dh_num_bytes += sizeof(*dp); +#endif + dump_config.dumper->curr_buf += sizeof(*dp); + left = dump_config.dumper->curr_buf - dump_config.dumper->dump_buf; + + printk("\n"); + + while (left) { + if ((ret = dump_dev_seek(dump_config.dumper->curr_offset))) { + printk("Seek failed at offset 0x%llx\n", + dump_config.dumper->curr_offset); + return ret; + } + + if (DUMP_BUFFER_SIZE > left) + memset(dump_config.dumper->curr_buf, 'm', + DUMP_BUFFER_SIZE - left); + + if ((ret = dump_ll_write(dump_config.dumper->dump_buf, + DUMP_BUFFER_SIZE)) < DUMP_BUFFER_SIZE) { + return (ret < 0) ? ret : -ENOSPC; + } + + dump_config.dumper->curr_offset += DUMP_BUFFER_SIZE; + + if (left > DUMP_BUFFER_SIZE) { + left -= DUMP_BUFFER_SIZE; + memcpy(dump_config.dumper->dump_buf, + dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE, left); + dump_config.dumper->curr_buf -= DUMP_BUFFER_SIZE; + } else { + left = 0; + } + } + return 0; +} + + +/* Default Formatter (lcrash) */ +struct dump_fmt_ops dump_fmt_lcrash_ops = { + .configure_header = dump_lcrash_configure_header, + .update_header = dump_generic_update_header, + .save_context = dump_lcrash_save_context, + .add_data = dump_lcrash_add_data, + .update_end_marker = dump_lcrash_update_end_marker +}; + +struct dump_fmt dump_fmt_lcrash = { + .name = "lcrash", + .ops = &dump_fmt_lcrash_ops +}; + diff -Nru a/drivers/dump/dump_gzip.c b/drivers/dump/dump_gzip.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/dump_gzip.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,118 @@ +/* + * GZIP Compression functions for kernel crash dumps. + * + * Created by: Matt Robinson (yakker@sourceforge.net) + * Copyright 2001 Matt D. Robinson. All rights reserved. + * + * This code is released under version 2 of the GNU GPL. + */ + +/* header files */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void *deflate_workspace; + +/* + * Name: dump_compress_gzip() + * Func: Compress a DUMP_PAGE_SIZE page using gzip-style algorithms (the. + * deflate functions similar to what's used in PPP). + */ +static u16 +dump_compress_gzip(const u8 *old, u16 oldsize, u8 *new, u16 newsize) +{ + /* error code and dump stream */ + int err; + z_stream dump_stream; + + dump_stream.workspace = deflate_workspace; + + if ((err = zlib_deflateInit(&dump_stream, Z_BEST_COMPRESSION)) != Z_OK) { + /* fall back to RLE compression */ + printk("dump_compress_gzip(): zlib_deflateInit() " + "failed (%d)!\n", err); + return 0; + } + + /* use old (page of memory) and size (DUMP_PAGE_SIZE) as in-streams */ + dump_stream.next_in = (u8 *) old; + dump_stream.avail_in = oldsize; + + /* out streams are new (dpcpage) and new size (DUMP_DPC_PAGE_SIZE) */ + dump_stream.next_out = new; + dump_stream.avail_out = newsize; + + /* deflate the page -- check for error */ + err = zlib_deflate(&dump_stream, Z_FINISH); + if (err != Z_STREAM_END) { + /* zero is return code here */ + (void)zlib_deflateEnd(&dump_stream); + printk("dump_compress_gzip(): zlib_deflate() failed (%d)!\n", + err); + return 0; + } + + /* let's end the deflated compression stream */ + if ((err = zlib_deflateEnd(&dump_stream)) != Z_OK) { + printk("dump_compress_gzip(): zlib_deflateEnd() " + "failed (%d)!\n", err); + } + + /* return the compressed byte total (if it's smaller) */ + if (dump_stream.total_out >= oldsize) { + return oldsize; + } + return dump_stream.total_out; +} + +/* setup the gzip compression functionality */ +static struct __dump_compress dump_gzip_compression = { + .compress_type = DUMP_COMPRESS_GZIP, + .compress_func = dump_compress_gzip, + .compress_name = "GZIP", +}; + +/* + * Name: dump_compress_gzip_init() + * Func: Initialize gzip as a compression mechanism. + */ +static int __init +dump_compress_gzip_init(void) +{ + deflate_workspace = vmalloc(zlib_deflate_workspacesize()); + if (!deflate_workspace) { + printk("dump_compress_gzip_init(): Failed to " + "alloc %d bytes for deflate workspace\n", + zlib_deflate_workspacesize()); + return -ENOMEM; + } + dump_register_compression(&dump_gzip_compression); + return 0; +} + +/* + * Name: dump_compress_gzip_cleanup() + * Func: Remove gzip as a compression mechanism. + */ +static void __exit +dump_compress_gzip_cleanup(void) +{ + vfree(deflate_workspace); + dump_unregister_compression(DUMP_COMPRESS_GZIP); +} + +/* module initialization */ +module_init(dump_compress_gzip_init); +module_exit(dump_compress_gzip_cleanup); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("LKCD Development Team "); +MODULE_DESCRIPTION("Gzip compression module for crash dump driver"); diff -Nru a/drivers/dump/dump_i386.c b/drivers/dump/dump_i386.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/dump_i386.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,309 @@ +/* + * Architecture specific (i386) functions for Linux crash dumps. + * + * Created by: Matt Robinson (yakker@sgi.com) + * + * Copyright 1999 Silicon Graphics, Inc. All rights reserved. + * + * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com) + * Copyright 2000 TurboLinux, Inc. All rights reserved. + * + * This code is released under version 2 of the GNU GPL. + */ + +/* + * The hooks for dumping the kernel virtual memory to disk are in this + * file. Any time a modification is made to the virtual memory mechanism, + * these routines must be changed to use the new mechanisms. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "dump_methods.h" +#include + +#include +#include +#include +#include + +static __s32 saved_irq_count; /* saved preempt_count() flags */ + +static int +alloc_dha_stack(void) +{ + int i; + void *ptr; + + if (dump_header_asm.dha_stack[0]) + return 0; + + ptr = vmalloc(THREAD_SIZE * num_online_cpus()); + if (!ptr) { + printk("vmalloc for dha_stacks failed\n"); + return -ENOMEM; + } + + for (i = 0; i < num_online_cpus(); i++) { + dump_header_asm.dha_stack[i] = (u32)((unsigned long)ptr + + (i * THREAD_SIZE)); + } + return 0; +} + +static int +free_dha_stack(void) +{ + if (dump_header_asm.dha_stack[0]) { + vfree((void *)dump_header_asm.dha_stack[0]); + dump_header_asm.dha_stack[0] = 0; + } + return 0; +} + + +void +__dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs) +{ + *dest_regs = *regs; + + /* In case of panic dumps, we collects regs on entry to panic. + * so, we shouldn't 'fix' ssesp here again. But it is hard to + * tell just looking at regs whether ssesp need fixing. We make + * this decision by looking at xss in regs. If we have better + * means to determine that ssesp are valid (by some flag which + * tells that we are here due to panic dump), then we can use + * that instead of this kludge. + */ + if (!user_mode(regs)) { + if ((0xffff & regs->xss) == __KERNEL_DS) + /* already fixed up */ + return; + dest_regs->esp = (unsigned long)&(regs->esp); + __asm__ __volatile__ ("movw %%ss, %%ax;" + :"=a"(dest_regs->xss)); + } +} + + +#ifdef CONFIG_SMP +extern unsigned long irq_affinity[]; +extern irq_desc_t irq_desc[]; +extern void dump_send_ipi(void); + +static int dump_expect_ipi[NR_CPUS]; +static atomic_t waiting_for_dump_ipi; +static unsigned long saved_affinity[NR_IRQS]; + +extern void stop_this_cpu(void *); /* exported by i386 kernel */ + +static int +dump_nmi_callback(struct pt_regs *regs, int cpu) +{ + if (!dump_expect_ipi[cpu]) + return 0; + + dump_expect_ipi[cpu] = 0; + + dump_save_this_cpu(regs); + atomic_dec(&waiting_for_dump_ipi); + + /* Execute halt */ + stop_this_cpu(NULL); + return 1; +} + +/* save registers on other processors */ +void +__dump_save_other_cpus(void) +{ + int i, cpu = smp_processor_id(); + int other_cpus = num_online_cpus()-1; + + if (other_cpus > 0) { + atomic_set(&waiting_for_dump_ipi, other_cpus); + + for (i = 0; i < NR_CPUS; i++) { + dump_expect_ipi[i] = (i != cpu && cpu_online(i)); + } + + /* short circuit normal NMI handling temporarily */ + set_nmi_callback(dump_nmi_callback); + wmb(); + + dump_send_ipi(); + /* may be we dont need to wait for NMI to be processed. + just write out the header at the end of dumping, if + this IPI is not processed until then, there probably + is a problem and we just fail to capture state of + other cpus. */ + while(atomic_read(&waiting_for_dump_ipi) > 0) { + cpu_relax(); + } + + unset_nmi_callback(); + } +} + +/* + * Routine to save the old irq affinities and change affinities of all irqs to + * the dumping cpu. + */ +static void +set_irq_affinity(void) +{ + int i; + int cpu = smp_processor_id(); + + memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long)); + for (i = 0; i < NR_IRQS; i++) { + if (irq_desc[i].handler == NULL) + continue; + irq_affinity[i] = 1UL << cpu; + if (irq_desc[i].handler->set_affinity != NULL) + irq_desc[i].handler->set_affinity(i, irq_affinity[i]); + } +} + +/* + * Restore old irq affinities. + */ +static void +reset_irq_affinity(void) +{ + int i; + + memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long)); + for (i = 0; i < NR_IRQS; i++) { + if (irq_desc[i].handler == NULL) + continue; + if (irq_desc[i].handler->set_affinity != NULL) + irq_desc[i].handler->set_affinity(i, saved_affinity[i]); + } +} + +#else /* !CONFIG_SMP */ +#define set_irq_affinity() do { } while (0) +#define reset_irq_affinity() do { } while (0) +#define save_other_cpu_states() do { } while (0) +#endif /* !CONFIG_SMP */ + +/* + * Kludge - dump from interrupt context is unreliable (Fixme) + * + * We do this so that softirqs initiated for dump i/o + * get processed and we don't hang while waiting for i/o + * to complete or in any irq synchronization attempt. + * + * This is not quite legal of course, as it has the side + * effect of making all interrupts & softirqs triggered + * while dump is in progress complete before currently + * pending softirqs and the currently executing interrupt + * code. + */ +static inline void +irq_bh_save(void) +{ + saved_irq_count = irq_count(); + preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK); +} + +static inline void +irq_bh_restore(void) +{ + preempt_count() |= saved_irq_count; +} + +/* + * Name: __dump_irq_enable + * Func: Reset system so interrupts are enabled. + * This is used for dump methods that require interrupts + * Eventually, all methods will have interrupts disabled + * and this code can be removed. + * + * Change irq affinities + * Re-enable interrupts + */ +void +__dump_irq_enable(void) +{ + set_irq_affinity(); + irq_bh_save(); + local_irq_enable(); +} + +/* + * Name: __dump_irq_restore + * Func: Resume the system state in an architecture-specific way. + + */ +void +__dump_irq_restore(void) +{ + local_irq_disable(); + reset_irq_affinity(); + irq_bh_restore(); +} + +/* + * Name: __dump_configure_header() + * Func: Meant to fill in arch specific header fields except per-cpu state + * already captured via __dump_save_context for all CPUs. + */ +int +__dump_configure_header(const struct pt_regs *regs) +{ + return (0); +} + +/* + * Name: __dump_init() + * Func: Initialize the dumping routine process. + */ +void +__dump_init(uint64_t local_memory_start) +{ + return; +} + +/* + * Name: __dump_open() + * Func: Open the dump device (architecture specific). + */ +void +__dump_open(void) +{ + alloc_dha_stack(); +} + +/* + * Name: __dump_cleanup() + * Func: Free any architecture specific data structures. This is called + * when the dump module is being removed. + */ +void +__dump_cleanup(void) +{ + free_dha_stack(); +} + +extern int pfn_is_ram(unsigned long); + +/* + * Name: __dump_page_valid() + * Func: Check if page is valid to dump. + */ +int +__dump_page_valid(unsigned long index) +{ + if (!pfn_valid(index)) + return 0; + + return pfn_is_ram(index); +} + diff -Nru a/drivers/dump/dump_memdev.c b/drivers/dump/dump_memdev.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/dump_memdev.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,640 @@ +/* + * Implements the dump driver interface for saving a dump in available + * memory areas. The saved pages may be written out to persistent storage + * after a soft reboot. + * + * Started: Oct 2002 - Suparna Bhattacharya + * + * Copyright (C) 2002 International Business Machines Corp. + * + * This code is released under version 2 of the GNU GPL. + * + * The approach of tracking pages containing saved dump using map pages + * allocated as needed has been derived from the Mission Critical Linux + * mcore dump implementation. + * + * Credits and a big thanks for letting the lkcd project make use of + * the excellent piece of work and also helping with clarifications + * and tips along the way are due to: + * Dave Winchell (primary author of mcore) + * Jeff Moyer + * Josh Huber + * + * For those familiar with the mcore code, the main differences worth + * noting here (besides the dump device abstraction) result from enabling + * "high" memory pages (pages not permanently mapped in the kernel + * address space) to be used for saving dump data (because of which a + * simple virtual address based linked list cannot be used anymore for + * managing free pages), an added level of indirection for faster + * lookups during the post-boot stage, and the idea of pages being + * made available as they get freed up while dump to memory progresses + * rather than one time before starting the dump. The last point enables + * a full memory snapshot to be saved starting with an initial set of + * bootstrap pages given a good compression ratio. (See dump_overlay.c) + * + */ + +/* + * -----------------MEMORY LAYOUT ------------------ + * The memory space consists of a set of discontiguous pages, and + * discontiguous map pages as well, rooted in a chain of indirect + * map pages (also discontiguous). Except for the indirect maps + * (which must be preallocated in advance), the rest of the pages + * could be in high memory. + * + * root + * | --------- -------- -------- + * --> | . . +|--->| . +|------->| . . | indirect + * --|--|--- ---|---- --|-|--- maps + * | | | | | + * ------ ------ ------- ------ ------- + * | . | | . | | . . | | . | | . . | maps + * --|--- --|--- --|--|-- --|--- ---|-|-- + * page page page page page page page data + * pages + * + * Writes to the dump device happen sequentially in append mode. + * The main reason for the existence of the indirect map is + * to enable a quick way to lookup a specific logical offset in + * the saved data post-soft-boot, e.g. to writeout pages + * with more critical data first, even though such pages + * would have been compressed and copied last, being the lowest + * ranked candidates for reuse due to their criticality. + * (See dump_overlay.c) + */ +#include +#include +#include +#include +#include "dump_methods.h" + +#define DUMP_MAP_SZ (PAGE_SIZE / sizeof(unsigned long)) /* direct map size */ +#define DUMP_IND_MAP_SZ DUMP_MAP_SZ - 1 /* indirect map size */ +#define DUMP_NR_BOOTSTRAP 64 /* no of bootstrap pages */ + +extern int dump_low_page(struct page *); + +/* check if the next entry crosses a page boundary */ +static inline int is_last_map_entry(unsigned long *map) +{ + unsigned long addr = (unsigned long)(map + 1); + + return (!(addr & (PAGE_SIZE - 1))); +} + +/* Todo: should have some validation checks */ +/* The last entry in the indirect map points to the next indirect map */ +/* Indirect maps are referred to directly by virtual address */ +static inline unsigned long *next_indirect_map(unsigned long *map) +{ + return (unsigned long *)map[DUMP_IND_MAP_SZ]; +} + +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT +/* Called during early bootup - fixme: make this __init */ +void dump_early_reserve_map(struct dump_memdev *dev) +{ + unsigned long *map1, *map2; + loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT; + int i, j; + + printk("Reserve bootmap space holding previous dump of %lld pages\n", + last); + map1= (unsigned long *)dev->indirect_map_root; + + while (map1 && (off < last)) { + reserve_bootmem(virt_to_phys((void *)map1), PAGE_SIZE); + for (i=0; (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last); + i++, off += DUMP_MAP_SZ) { + pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]); + if (map1[i] >= max_low_pfn) + continue; + reserve_bootmem(map1[i] << PAGE_SHIFT, PAGE_SIZE); + map2 = pfn_to_kaddr(map1[i]); + for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] && + (off + j < last); j++) { + pr_debug("\t map[%d][%d] = 0x%lx\n", i, j, + map2[j]); + if (map2[j] < max_low_pfn) { + reserve_bootmem(map2[j] << PAGE_SHIFT, + PAGE_SIZE); + } + } + } + map1 = next_indirect_map(map1); + } + dev->nr_free = 0; /* these pages don't belong to this boot */ +} +#endif + +/* mark dump pages so that they aren't used by this kernel */ +void dump_mark_map(struct dump_memdev *dev) +{ + unsigned long *map1, *map2; + loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT; + struct page *page; + int i, j; + + printk("Dump: marking pages in use by previous dump\n"); + map1= (unsigned long *)dev->indirect_map_root; + + while (map1 && (off < last)) { + page = virt_to_page(map1); + set_page_count(page, 1); + for (i=0; (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last); + i++, off += DUMP_MAP_SZ) { + pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]); + page = pfn_to_page(map1[i]); + set_page_count(page, 1); + map2 = kmap_atomic(page, KM_DUMP); + for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] && + (off + j < last); j++) { + pr_debug("\t map[%d][%d] = 0x%lx\n", i, j, + map2[j]); + page = pfn_to_page(map2[j]); + set_page_count(page, 1); + } + } + map1 = next_indirect_map(map1); + } +} + + +/* + * Given a logical offset into the mem device lookup the + * corresponding page + * loc is specified in units of pages + * Note: affects curr_map (even in the case where lookup fails) + */ +struct page *dump_mem_lookup(struct dump_memdev *dump_mdev, unsigned long loc) +{ + unsigned long *map; + unsigned long i, index = loc / DUMP_MAP_SZ; + struct page *page = NULL; + unsigned long curr_pfn, curr_map, *curr_map_ptr = NULL; + + map = (unsigned long *)dump_mdev->indirect_map_root; + if (!map) + return NULL; + + if (loc > dump_mdev->last_offset >> PAGE_SHIFT) + return NULL; + + /* + * first locate the right indirect map + * in the chain of indirect maps + */ + for (i = 0; i + DUMP_IND_MAP_SZ < index ; i += DUMP_IND_MAP_SZ) { + if (!(map = next_indirect_map(map))) + return NULL; + } + /* then the right direct map */ + /* map entries are referred to by page index */ + if ((curr_map = map[index - i])) { + page = pfn_to_page(curr_map); + /* update the current traversal index */ + /* dump_mdev->curr_map = &map[index - i];*/ + curr_map_ptr = &map[index - i]; + } + + if (page) + map = kmap_atomic(page, KM_DUMP); + else + return NULL; + + /* and finally the right entry therein */ + /* data pages are referred to by page index */ + i = index * DUMP_MAP_SZ; + if ((curr_pfn = map[loc - i])) { + page = pfn_to_page(curr_pfn); + dump_mdev->curr_map = curr_map_ptr; + dump_mdev->curr_map_offset = loc - i; + dump_mdev->ddev.curr_offset = loc << PAGE_SHIFT; + } else { + page = NULL; + } + kunmap_atomic(map, KM_DUMP); + + return page; +} + +/* + * Retrieves a pointer to the next page in the dump device + * Used during the lookup pass post-soft-reboot + */ +struct page *dump_mem_next_page(struct dump_memdev *dev) +{ + unsigned long i; + unsigned long *map; + struct page *page = NULL; + + if (dev->ddev.curr_offset + PAGE_SIZE >= dev->last_offset) { + return NULL; + } + + if ((i = (unsigned long)(++dev->curr_map_offset)) >= DUMP_MAP_SZ) { + /* move to next map */ + if (is_last_map_entry(++dev->curr_map)) { + /* move to the next indirect map page */ + printk("dump_mem_next_page: go to next indirect map\n"); + dev->curr_map = (unsigned long *)*dev->curr_map; + if (!dev->curr_map) + return NULL; + } + i = dev->curr_map_offset = 0; + pr_debug("dump_mem_next_page: next map 0x%lx, entry 0x%lx\n", + dev->curr_map, *dev->curr_map); + + }; + + if (*dev->curr_map) { + map = kmap_atomic(pfn_to_page(*dev->curr_map), KM_DUMP); + if (map[i]) + page = pfn_to_page(map[i]); + kunmap_atomic(map, KM_DUMP); + dev->ddev.curr_offset += PAGE_SIZE; + }; + + return page; +} + +/* Copied from dump_filters.c */ +static inline int kernel_page(struct page *p) +{ + /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */ + return PageReserved(p) || (!PageLRU(p) && PageInuse(p)); +} + +static inline int user_page(struct page *p) +{ + return PageInuse(p) && (!PageReserved(p) && PageLRU(p)); +} + +int dump_reused_by_boot(struct page *page) +{ + /* Todo + * Checks: + * if PageReserved + * if < __end + bootmem_bootmap_pages for this boot + allowance + * if overwritten by initrd (how to check ?) + * Also, add more checks in early boot code + * e.g. bootmem bootmap alloc verify not overwriting dump, and if + * so then realloc or move the dump pages out accordingly. + */ + + /* Temporary proof of concept hack, avoid overwriting kern pages */ + + return (kernel_page(page) || dump_low_page(page) || user_page(page)); +} + + +/* Uses the free page passed in to expand available space */ +int dump_mem_add_space(struct dump_memdev *dev, struct page *page) +{ + struct page *map_page; + unsigned long *map; + unsigned long i; + + if (!dev->curr_map) + return -ENOMEM; /* must've exhausted indirect map */ + + if (!*dev->curr_map || dev->curr_map_offset >= DUMP_MAP_SZ) { + /* add map space */ + *dev->curr_map = page_to_pfn(page); + dev->curr_map_offset = 0; + return 0; + } + + /* add data space */ + i = dev->curr_map_offset; + map_page = pfn_to_page(*dev->curr_map); + map = (unsigned long *)kmap_atomic(map_page, KM_DUMP); + map[i] = page_to_pfn(page); + kunmap_atomic(map, KM_DUMP); + dev->curr_map_offset = ++i; + dev->last_offset += PAGE_SIZE; + if (i >= DUMP_MAP_SZ) { + /* move to next map */ + if (is_last_map_entry(++dev->curr_map)) { + /* move to the next indirect map page */ + pr_debug("dump_mem_add_space: using next" + "indirect map\n"); + dev->curr_map = (unsigned long *)*dev->curr_map; + } + } + return 0; +} + + +/* Caution: making a dest page invalidates existing contents of the page */ +int dump_check_and_free_page(struct dump_memdev *dev, struct page *page) +{ + int err = 0; + + /* + * the page can be used as a destination only if we are sure + * it won't get overwritten by the soft-boot, and is not + * critical for us right now. + */ + if (dump_reused_by_boot(page)) + return 0; + + if ((err = dump_mem_add_space(dev, page))) { + printk("Warning: Unable to extend memdev space. Err %d\n", + err); + return 0; + } + + dev->nr_free++; + return 1; +} + + +/* Set up the initial maps and bootstrap space */ +/* Must be called only after any previous dump is written out */ +int dump_mem_open(struct dump_dev *dev, unsigned long devid) +{ + struct dump_memdev *dump_mdev = DUMP_MDEV(dev); + unsigned long nr_maps, *map, *prev_map = &dump_mdev->indirect_map_root; + void *addr; + struct page *page; + unsigned long i = 0; + int err = 0; + + /* Todo: sanity check for unwritten previous dump */ + + /* allocate pages for indirect map (non highmem area) */ + nr_maps = num_physpages / DUMP_MAP_SZ; /* maps to cover entire mem */ + for (i = 0; i < nr_maps; i += DUMP_IND_MAP_SZ) { + if (!(map = (unsigned long *)dump_alloc_mem(PAGE_SIZE))) { + printk("Unable to alloc indirect map %ld\n", + i / DUMP_IND_MAP_SZ); + return -ENOMEM; + } + clear_page(map); + *prev_map = (unsigned long)map; + prev_map = &map[DUMP_IND_MAP_SZ]; + }; + + dump_mdev->curr_map = (unsigned long *)dump_mdev->indirect_map_root; + dump_mdev->curr_map_offset = 0; + + /* + * allocate a few bootstrap pages: at least 1 map and 1 data page + * plus enough to save the dump header + */ + i = 0; + do { + if (!(addr = dump_alloc_mem(PAGE_SIZE))) { + printk("Unable to alloc bootstrap page %ld\n", i); + return -ENOMEM; + } + + page = virt_to_page(addr); + if (dump_low_page(page)) { + dump_free_mem(addr); + continue; + } + + if (dump_mem_add_space(dump_mdev, page)) { + printk("Warning: Unable to extend memdev " + "space. Err %d\n", err); + dump_free_mem(addr); + continue; + } + i++; + } while (i < DUMP_NR_BOOTSTRAP); + + printk("dump memdev init: %ld maps, %ld bootstrap pgs, %ld free pgs\n", + nr_maps, i, dump_mdev->last_offset >> PAGE_SHIFT); + + dump_mdev->last_bs_offset = dump_mdev->last_offset; + + return 0; +} + +/* Releases all pre-alloc'd pages */ +int dump_mem_release(struct dump_dev *dev) +{ + struct dump_memdev *dump_mdev = DUMP_MDEV(dev); + struct page *page, *map_page; + unsigned long *map, *prev_map; + void *addr; + int i; + + if (!dump_mdev->nr_free) + return 0; + + pr_debug("dump_mem_release\n"); + page = dump_mem_lookup(dump_mdev, 0); + for (i = 0; page && (i < DUMP_NR_BOOTSTRAP - 1); i++) { + if (PageHighMem(page)) + break; + addr = page_address(page); + if (!addr) { + printk("page_address(%p) = NULL\n", page); + break; + } + pr_debug("Freeing page at 0x%lx\n", addr); + dump_free_mem(addr); + if (dump_mdev->curr_map_offset >= DUMP_MAP_SZ - 1) { + map_page = pfn_to_page(*dump_mdev->curr_map); + if (PageHighMem(map_page)) + break; + page = dump_mem_next_page(dump_mdev); + addr = page_address(map_page); + if (!addr) { + printk("page_address(%p) = NULL\n", + map_page); + break; + } + pr_debug("Freeing map page at 0x%lx\n", addr); + dump_free_mem(addr); + i++; + } else { + page = dump_mem_next_page(dump_mdev); + } + } + + /* now for the last used bootstrap page used as a map page */ + if ((i < DUMP_NR_BOOTSTRAP) && (*dump_mdev->curr_map)) { + map_page = pfn_to_page(*dump_mdev->curr_map); + if ((map_page) && !PageHighMem(map_page)) { + addr = page_address(map_page); + if (!addr) { + printk("page_address(%p) = NULL\n", map_page); + } else { + pr_debug("Freeing map page at 0x%lx\n", addr); + dump_free_mem(addr); + i++; + } + } + } + + printk("Freed %d bootstrap pages\n", i); + + /* free the indirect maps */ + map = (unsigned long *)dump_mdev->indirect_map_root; + + i = 0; + while (map) { + prev_map = map; + map = next_indirect_map(map); + dump_free_mem(prev_map); + i++; + } + + printk("Freed %d indirect map(s)\n", i); + + /* Reset the indirect map */ + dump_mdev->indirect_map_root = 0; + dump_mdev->curr_map = 0; + + /* Reset the free list */ + dump_mdev->nr_free = 0; + + dump_mdev->last_offset = dump_mdev->ddev.curr_offset = 0; + dump_mdev->last_used_offset = 0; + dump_mdev->curr_map = NULL; + dump_mdev->curr_map_offset = 0; + return 0; +} + +/* + * Long term: + * It is critical for this to be very strict. Cannot afford + * to have anything running and accessing memory while we overwrite + * memory (potential risk of data corruption). + * If in doubt (e.g if a cpu is hung and not responding) just give + * up and refuse to proceed with this scheme. + * + * Note: I/O will only happen after soft-boot/switchover, so we can + * safely disable interrupts and force stop other CPUs if this is + * going to be a disruptive dump, no matter what they + * are in the middle of. + */ +/* + * ATM Most of this is already taken care of in the nmi handler + * We may halt the cpus rightaway if we know this is going to be disruptive + * For now, since we've limited ourselves to overwriting free pages we + * aren't doing much here. Eventually, we'd have to wait to make sure other + * cpus aren't using memory we could be overwriting + */ +int dump_mem_silence(struct dump_dev *dev) +{ + struct dump_memdev *dump_mdev = DUMP_MDEV(dev); + + if (dump_mdev->last_offset > dump_mdev->last_bs_offset) { + /* prefer to run lkcd config & start with a clean slate */ + return -EEXIST; + } + return 0; +} + +extern int dump_overlay_resume(void); + +/* Trigger the next stage of dumping */ +int dump_mem_resume(struct dump_dev *dev) +{ + dump_overlay_resume(); + return 0; +} + +/* + * Allocate mem dev pages as required and copy buffer contents into it. + * Fails if the no free pages are available + * Keeping it simple and limited for starters (can modify this over time) + * Does not handle holes or a sparse layout + * Data must be in multiples of PAGE_SIZE + */ +int dump_mem_write(struct dump_dev *dev, void *buf, unsigned long len) +{ + struct dump_memdev *dump_mdev = DUMP_MDEV(dev); + struct page *page; + unsigned long n = 0; + void *addr; + unsigned long *saved_curr_map, saved_map_offset; + int ret = 0; + + pr_debug("dump_mem_write: offset 0x%llx, size %ld\n", + dev->curr_offset, len); + + if (dev->curr_offset + len > dump_mdev->last_offset) { + printk("Out of space to write\n"); + return -ENOSPC; + } + + if ((len & (PAGE_SIZE - 1)) || (dev->curr_offset & (PAGE_SIZE - 1))) + return -EINVAL; /* not aligned in units of page size */ + + saved_curr_map = dump_mdev->curr_map; + saved_map_offset = dump_mdev->curr_map_offset; + page = dump_mem_lookup(dump_mdev, dev->curr_offset >> PAGE_SHIFT); + + for (n = len; (n > 0) && page; n -= PAGE_SIZE, buf += PAGE_SIZE ) { + addr = kmap_atomic(page, KM_DUMP); + /* memset(addr, 'x', PAGE_SIZE); */ + memcpy(addr, buf, PAGE_SIZE); + kunmap_atomic(addr, KM_DUMP); + /* dev->curr_offset += PAGE_SIZE; */ + page = dump_mem_next_page(dump_mdev); + } + + dump_mdev->curr_map = saved_curr_map; + dump_mdev->curr_map_offset = saved_map_offset; + + if (dump_mdev->last_used_offset < dev->curr_offset) + dump_mdev->last_used_offset = dev->curr_offset; + + return (len - n) ? (len - n) : ret ; +} + +/* dummy - always ready */ +int dump_mem_ready(struct dump_dev *dev, void *buf) +{ + return 0; +} + +/* + * Should check for availability of space to write upto the offset + * affects only the curr_offset; last_offset untouched + * Keep it simple: Only allow multiples of PAGE_SIZE for now + */ +int dump_mem_seek(struct dump_dev *dev, loff_t offset) +{ + struct dump_memdev *dump_mdev = DUMP_MDEV(dev); + + if (offset & (PAGE_SIZE - 1)) + return -EINVAL; /* allow page size units only for now */ + + /* Are we exceeding available space ? */ + if (offset > dump_mdev->last_offset) { + printk("dump_mem_seek failed for offset 0x%llx\n", + offset); + return -ENOSPC; + } + + dump_mdev->ddev.curr_offset = offset; + return 0; +} + +struct dump_dev_ops dump_memdev_ops = { + .open = dump_mem_open, + .release = dump_mem_release, + .silence = dump_mem_silence, + .resume = dump_mem_resume, + .seek = dump_mem_seek, + .write = dump_mem_write, + .read = NULL, /* not implemented at the moment */ + .ready = dump_mem_ready +}; + +static struct dump_memdev default_dump_memdev = { + .ddev = {.type_name = "memdev", .ops = &dump_memdev_ops, + .device_id = 0x14} + /* assume the rest of the fields are zeroed by default */ +}; + +/* may be overwritten if a previous dump exists */ +struct dump_memdev *dump_memdev = &default_dump_memdev; + diff -Nru a/drivers/dump/dump_methods.h b/drivers/dump/dump_methods.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/dump_methods.h Wed Oct 22 10:40:11 2003 @@ -0,0 +1,341 @@ +/* + * Generic interfaces for flexible system dump + * + * Started: Oct 2002 - Suparna Bhattacharya (suparna@in.ibm.com) + * + * Copyright (C) 2002 International Business Machines Corp. + * + * This code is released under version 2 of the GNU GPL. + */ + +#ifndef _LINUX_DUMP_METHODS_H +#define _LINUX_DUMP_METHODS_H + +/* + * Inspired by Matt Robinson's suggestion of introducing dump + * methods as a way to enable different crash dump facilities to + * coexist where each employs its own scheme or dumping policy. + * + * The code here creates a framework for flexible dump by defining + * a set of methods and providing associated helpers that differentiate + * between the underlying mechanism (how to dump), overall scheme + * (sequencing of stages and data dumped and associated quiescing), + * output format (what the dump output looks like), target type + * (where to save the dump; see dumpdev.h), and selection policy + * (state/data to dump). + * + * These sets of interfaces can be mixed and matched to build a + * dumper suitable for a given situation, allowing for + * flexibility as well appropriate degree of code reuse. + * For example all features and options of lkcd (including + * granular selective dumping in the near future) should be + * available even when say, the 2 stage soft-boot based mechanism + * is used for taking disruptive dumps. + * + * Todo: Additionally modules or drivers may supply their own + * custom dumpers which extend dump with module specific + * information or hardware state, and can even tweak the + * mechanism when it comes to saving state relevant to + * them. + */ + +#include +#include +#include +#include + +#define MAX_PASSES 6 +#define MAX_DEVS 4 + + +/* To customise selection of pages to be dumped in a given pass/group */ +struct dump_data_filter{ + char name[32]; + int (*selector)(int, unsigned long, unsigned long); + ulong level_mask; /* dump level(s) for which this filter applies */ + loff_t start, end; /* location range applicable */ +}; + + +/* + * Determined by the kind of dump mechanism and appropriate + * overall scheme + */ +struct dump_scheme_ops { + /* sets aside memory, inits data structures etc */ + int (*configure)(unsigned long devid); + /* releases resources */ + int (*unconfigure)(void); + + /* ordering of passes, invoking iterator */ + int (*sequencer)(void); + /* iterates over system data, selects and acts on data to dump */ + int (*iterator)(int, int (*)(unsigned long, unsigned long), + struct dump_data_filter *); + /* action when data is selected for dump */ + int (*save_data)(unsigned long, unsigned long); + /* action when data is to be excluded from dump */ + int (*skip_data)(unsigned long, unsigned long); + /* policies for space, multiple dump devices etc */ + int (*write_buffer)(void *, unsigned long); +}; + +struct dump_scheme { + /* the name serves as an anchor to locate the scheme after reboot */ + char name[32]; + struct dump_scheme_ops *ops; + struct list_head list; +}; + +/* determined by the dump (file) format */ +struct dump_fmt_ops { + /* build header */ + int (*configure_header)(const char *, const struct pt_regs *); + int (*update_header)(void); /* update header and write it out */ + /* save curr context */ + void (*save_context)(int, const struct pt_regs *, + struct task_struct *); + /* typically called by the save_data action */ + /* add formatted data to the dump buffer */ + int (*add_data)(unsigned long, unsigned long); + int (*update_end_marker)(void); +}; + +struct dump_fmt { + unsigned long magic; + char name[32]; /* lcrash, crash, elf-core etc */ + struct dump_fmt_ops *ops; + struct list_head list; +}; + +/* + * Modules will be able add their own data capture schemes by + * registering their own dumpers. Typically they would use the + * primary dumper as a template and tune it with their routines. + * Still Todo. + */ + +/* The combined dumper profile (mechanism, scheme, dev, fmt) */ +struct dumper { + char name[32]; /* singlestage, overlay (stg1), passthru(stg2), pull */ + struct dump_scheme *scheme; + struct dump_fmt *fmt; + struct __dump_compress *compress; + struct dump_data_filter *filter; + struct dump_dev *dev; + /* state valid only for active dumper(s) - per instance */ + /* run time state/context */ + int curr_pass; + unsigned long count; + loff_t curr_offset; /* current logical offset into dump device */ + loff_t curr_loc; /* current memory location */ + void *curr_buf; /* current position in the dump buffer */ + void *dump_buf; /* starting addr of dump buffer */ + int header_dirty; /* whether the header needs to be written out */ + int header_len; + struct list_head dumper_list; /* links to other dumpers */ +}; + +/* Starting point to get to the current configured state */ +struct dump_config { + ulong level; + ulong flags; + struct dumper *dumper; + unsigned long dump_device; + unsigned long dump_addr; /* relevant only for in-memory dumps */ + struct list_head dump_dev_list; +}; + +extern struct dump_config dump_config; + +/* Used to save the dump config across a reboot for 2-stage dumps: + * + * Note: The scheme, format, compression and device type should be + * registered at bootup, for this config to be sharable across soft-boot. + * The function addresses could have changed and become invalid, and + * need to be set up again. + */ +struct dump_config_block { + u64 magic; /* for a quick sanity check after reboot */ + struct dump_memdev memdev; /* handle to dump stored in memory */ + struct dump_config config; + struct dumper dumper; + struct dump_scheme scheme; + struct dump_fmt fmt; + struct __dump_compress compress; + struct dump_data_filter filter_table[MAX_PASSES]; + struct dump_anydev dev[MAX_DEVS]; /* target dump device */ +}; + + +/* Wrappers that invoke the methods for the current (active) dumper */ + +/* Scheme operations */ + +static inline int dump_sequencer(void) +{ + return dump_config.dumper->scheme->ops->sequencer(); +} + +static inline int dump_iterator(int pass, int (*action)(unsigned long, + unsigned long), struct dump_data_filter *filter) +{ + return dump_config.dumper->scheme->ops->iterator(pass, action, filter); +} + +#define dump_save_data dump_config.dumper->scheme->ops->save_data +#define dump_skip_data dump_config.dumper->scheme->ops->skip_data + +static inline int dump_write_buffer(void *buf, unsigned long len) +{ + return dump_config.dumper->scheme->ops->write_buffer(buf, len); +} + +static inline int dump_configure(unsigned long devid) +{ + return dump_config.dumper->scheme->ops->configure(devid); +} + +static inline int dump_unconfigure(void) +{ + return dump_config.dumper->scheme->ops->unconfigure(); +} + +/* Format operations */ + +static inline int dump_configure_header(const char *panic_str, + const struct pt_regs *regs) +{ + return dump_config.dumper->fmt->ops->configure_header(panic_str, regs); +} + +static inline void dump_save_context(int cpu, const struct pt_regs *regs, + struct task_struct *tsk) +{ + dump_config.dumper->fmt->ops->save_context(cpu, regs, tsk); +} + +static inline int dump_save_this_cpu(const struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + + dump_save_context(cpu, regs, current); + return 1; +} + +static inline int dump_update_header(void) +{ + return dump_config.dumper->fmt->ops->update_header(); +} + +static inline int dump_update_end_marker(void) +{ + return dump_config.dumper->fmt->ops->update_end_marker(); +} + +static inline int dump_add_data(unsigned long loc, unsigned long sz) +{ + return dump_config.dumper->fmt->ops->add_data(loc, sz); +} + +/* Compression operation */ +static inline int dump_compress_data(char *src, int slen, char *dst) +{ + return dump_config.dumper->compress->compress_func(src, slen, + dst, DUMP_DPC_PAGE_SIZE); +} + + +/* Prototypes of some default implementations of dump methods */ + +extern struct __dump_compress dump_none_compression; + +/* Default scheme methods (dump_scheme.c) */ + +extern int dump_generic_sequencer(void); +extern int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned + long), struct dump_data_filter *filter); +extern int dump_generic_save_data(unsigned long loc, unsigned long sz); +extern int dump_generic_skip_data(unsigned long loc, unsigned long sz); +extern int dump_generic_write_buffer(void *buf, unsigned long len); +extern int dump_generic_configure(unsigned long); +extern int dump_generic_unconfigure(void); + +/* Default scheme template */ +extern struct dump_scheme dump_scheme_singlestage; + +/* Default dump format methods */ + +extern int dump_lcrash_configure_header(const char *panic_str, + const struct pt_regs *regs); +extern void dump_lcrash_save_context(int cpu, const struct pt_regs *regs, + struct task_struct *tsk); +extern int dump_generic_update_header(void); +extern int dump_lcrash_add_data(unsigned long loc, unsigned long sz); +extern int dump_lcrash_update_end_marker(void); + +/* Default format (lcrash) template */ +extern struct dump_fmt dump_fmt_lcrash; + +/* Default dump selection filter table */ + +/* + * Entries listed in order of importance and correspond to passes + * The last entry (with a level_mask of zero) typically reflects data that + * won't be dumped -- this may for example be used to identify data + * that will be skipped for certain so the corresponding memory areas can be + * utilized as scratch space. + */ +extern struct dump_data_filter dump_filter_table[]; + +/* Some pre-defined dumpers */ +extern struct dumper dumper_singlestage; +extern struct dumper dumper_stage1; +extern struct dumper dumper_stage2; + +/* These are temporary */ +#define DUMP_MASK_HEADER DUMP_LEVEL_HEADER +#define DUMP_MASK_KERN DUMP_LEVEL_KERN +#define DUMP_MASK_USED DUMP_LEVEL_USED +#define DUMP_MASK_UNUSED DUMP_LEVEL_ALL_RAM +#define DUMP_MASK_REST 0 /* dummy for now */ + +/* Helpers - move these to dump.h later ? */ + +int dump_generic_execute(const char *panic_str, const struct pt_regs *regs); +extern int dump_ll_write(void *buf, unsigned long len); +int dump_check_and_free_page(struct dump_memdev *dev, struct page *page); + +static inline void dumper_reset(void) +{ + dump_config.dumper->curr_buf = dump_config.dumper->dump_buf; + dump_config.dumper->curr_loc = 0; + dump_config.dumper->curr_offset = 0; + dump_config.dumper->count = 0; + dump_config.dumper->curr_pass = 0; +} + +/* + * May later be moulded to perform boot-time allocations so we can dump + * earlier during bootup + */ +static inline void *dump_alloc_mem(unsigned long size) +{ + return kmalloc(size, GFP_KERNEL); +} + +static inline void dump_free_mem(void *buf) +{ + struct page *page; + + /* ignore reserved pages (e.g. post soft boot stage) */ + if (buf && (page = virt_to_page(buf))) { + if (PageReserved(page)) + return; + } + + kfree(buf); +} + + +#endif /* _LINUX_DUMP_METHODS_H */ diff -Nru a/drivers/dump/dump_netdev.c b/drivers/dump/dump_netdev.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/dump_netdev.c Wed Oct 22 10:40:11 2003 @@ -0,0 +1,865 @@ +/* + * Implements the dump driver interface for saving a dump via network + * interface. + * + * Some of this code has been taken/adapted from Ingo Molnar's netconsole + * code. LKCD team expresses its thanks to Ingo. + * + * Started: June 2002 - Mohamed Abbas + * Adapted netconsole code to implement LKCD dump over the network. + * + * Nov 2002 - Bharata B. Rao + * Innumerable code cleanups, simplification and some fixes. + * Netdump configuration done by ioctl instead of using module parameters. + * + * Copyright (C) 2001 Ingo Molnar + * Copyright (C) 2002 International Business Machines Corp. + * + * This code is released under version 2 of the GNU GPL. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static int startup_handshake; +static int page_counter; +static struct net_device *dump_ndev; +static struct in_device *dump_in_dev; +static u16 source_port, target_port; +static u32 source_ip, target_ip; +static unsigned char daddr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} ; +static spinlock_t dump_skb_lock = SPIN_LOCK_UNLOCKED; +static int dump_nr_skbs; +static struct sk_buff *dump_skb; +static unsigned long flags_global; +static int netdump_in_progress; +static char device_name[IFNAMSIZ]; + +/* + * security depends on the trusted path between the netconsole + * server and netconsole client, since none of the packets are + * encrypted. The random magic number protects the protocol + * against spoofing. + */ +static u64 dump_magic; + +#define MAX_UDP_CHUNK 1460 +#define MAX_PRINT_CHUNK (MAX_UDP_CHUNK-HEADER_LEN) + +/* + * We maintain a small pool of fully-sized skbs, + * to make sure the message gets out even in + * extreme OOM situations. + */ +#define DUMP_MAX_SKBS 32 + +#define MAX_SKB_SIZE \ + (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ + sizeof(struct iphdr) + sizeof(struct ethhdr)) + +static void +dump_refill_skbs(void) +{ + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&dump_skb_lock, flags); + while (dump_nr_skbs < DUMP_MAX_SKBS) { + skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); + if (!skb) + break; + if (dump_skb) + skb->next = dump_skb; + else + skb->next = NULL; + dump_skb = skb; + dump_nr_skbs++; + } + spin_unlock_irqrestore(&dump_skb_lock, flags); +} + +static struct +sk_buff * dump_get_skb(void) +{ + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&dump_skb_lock, flags); + skb = dump_skb; + if (skb) { + dump_skb = skb->next; + skb->next = NULL; + dump_nr_skbs--; + } + spin_unlock_irqrestore(&dump_skb_lock, flags); + + return skb; +} + +/* + * Zap completed output skbs. + */ +static void +zap_completion_queue(void) +{ + int count; + unsigned long flags; + struct softnet_data *sd; + + count=0; + sd = &__get_cpu_var(softnet_data); + if (sd->completion_queue) { + struct sk_buff *clist; + + local_irq_save(flags); + clist = sd->completion_queue; + sd->completion_queue = NULL; + local_irq_restore(flags); + + while (clist != NULL) { + struct sk_buff *skb = clist; + clist = clist->next; + __kfree_skb(skb); + count++; + if (count > 10000) + printk("Error in sk list\n"); + } + } +} + +static void +dump_send_skb(struct net_device *dev, const char *msg, unsigned int msg_len, + reply_t *reply) +{ + int once = 1; + int total_len, eth_len, ip_len, udp_len, count = 0; + struct sk_buff *skb; + struct udphdr *udph; + struct iphdr *iph; + struct ethhdr *eth; + + udp_len = msg_len + HEADER_LEN + sizeof(*udph); + ip_len = eth_len = udp_len + sizeof(*iph); + total_len = eth_len + ETH_HLEN; + +repeat_loop: + zap_completion_queue(); + if (dump_nr_skbs < DUMP_MAX_SKBS) + dump_refill_skbs(); + + skb = alloc_skb(total_len, GFP_ATOMIC); + if (!skb) { + skb = dump_get_skb(); + if (!skb) { + count++; + if (once && (count == 1000000)) { + printk("possibly FATAL: out of netconsole " + "skbs!!! will keep retrying.\n"); + once = 0; + } + dev->poll_controller(dev); + goto repeat_loop; + } + } + + atomic_set(&skb->users, 1); + skb_reserve(skb, total_len - msg_len - HEADER_LEN); + skb->data[0] = NETCONSOLE_VERSION; + + put_unaligned(htonl(reply->nr), (u32 *) (skb->data + 1)); + put_unaligned(htonl(reply->code), (u32 *) (skb->data + 5)); + put_unaligned(htonl(reply->info), (u32 *) (skb->data + 9)); + + memcpy(skb->data + HEADER_LEN, msg, msg_len); + skb->len += msg_len + HEADER_LEN; + + udph = (struct udphdr *) skb_push(skb, sizeof(*udph)); + udph->source = source_port; + udph->dest = target_port; + udph->len = htons(udp_len); + udph->check = 0; + + iph = (struct iphdr *)skb_push(skb, sizeof(*iph)); + + iph->version = 4; + iph->ihl = 5; + iph->tos = 0; + iph->tot_len = htons(ip_len); + iph->id = 0; + iph->frag_off = 0; + iph->ttl = 64; + iph->protocol = IPPROTO_UDP; + iph->check = 0; + iph->saddr = source_ip; + iph->daddr = target_ip; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + + eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); + + eth->h_proto = htons(ETH_P_IP); + memcpy(eth->h_source, dev->dev_addr, dev->addr_len); + memcpy(eth->h_dest, daddr, dev->addr_len); + + count=0; +repeat_poll: + spin_lock(&dev->xmit_lock); + dev->xmit_lock_owner = smp_processor_id(); + + count++; + + + if (netif_queue_stopped(dev)) { + dev->xmit_lock_owner = -1; + spin_unlock(&dev->xmit_lock); + + dev->poll_controller(dev); + zap_completion_queue(); + + + goto repeat_poll; + } + + dev->hard_start_xmit(skb, dev); + + dev->xmit_lock_owner = -1; + spin_unlock(&dev->xmit_lock); +} + +static unsigned short +udp_check(struct udphdr *uh, int len, unsigned long saddr, unsigned long daddr, + unsigned long base) +{ + return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base); +} + +static int +udp_checksum_init(struct sk_buff *skb, struct udphdr *uh, + unsigned short ulen, u32 saddr, u32 daddr) +{ + if (uh->check == 0) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if (skb->ip_summed == CHECKSUM_HW) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (!udp_check(uh, ulen, saddr, daddr, skb->csum)) + return 0; + skb->ip_summed = CHECKSUM_NONE; + } + if (skb->ip_summed != CHECKSUM_UNNECESSARY) + skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, + IPPROTO_UDP, 0); + /* Probably, we should checksum udp header (it should be in cache + * in any case) and data in tiny packets (< rx copybreak). + */ + return 0; +} + +static __inline__ int +__udp_checksum_complete(struct sk_buff *skb) +{ + return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, + skb->csum)); +} + +static __inline__ +int udp_checksum_complete(struct sk_buff *skb) +{ + return skb->ip_summed != CHECKSUM_UNNECESSARY && + __udp_checksum_complete(skb); +} + +int new_req = 0; +static req_t req; + +static int +dump_rx_hook(struct sk_buff *skb) +{ + int proto; + struct iphdr *iph; + struct udphdr *uh; + __u32 len, saddr, daddr, ulen; + req_t *__req; + + /* + * First check if were are dumping or doing startup handshake, if + * not quickly return. + */ + if (!netdump_in_progress) + return NET_RX_SUCCESS; + + if (skb->dev->type != ARPHRD_ETHER) + goto out; + + proto = ntohs(skb->mac.ethernet->h_proto); + if (proto != ETH_P_IP) + goto out; + + if (skb->pkt_type == PACKET_OTHERHOST) + goto out; + + if (skb_shared(skb)) + goto out; + + /* IP header correctness testing: */ + iph = (struct iphdr *)skb->data; + if (!pskb_may_pull(skb, sizeof(struct iphdr))) + goto out; + + if (iph->ihl < 5 || iph->version != 4) + goto out; + + if (!pskb_may_pull(skb, iph->ihl*4)) + goto out; + + if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) + goto out; + + len = ntohs(iph->tot_len); + if (skb->len < len || len < iph->ihl*4) + goto out; + + saddr = iph->saddr; + daddr = iph->daddr; + if (iph->protocol != IPPROTO_UDP) + goto out; + + if (source_ip != daddr) + goto out; + + if (target_ip != saddr) + goto out; + + len -= iph->ihl*4; + uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); + ulen = ntohs(uh->len); + + if (ulen != len || ulen < (sizeof(*uh) + sizeof(*__req))) + goto out; + + if (udp_checksum_init(skb, uh, ulen, saddr, daddr) < 0) + goto out; + + if (udp_checksum_complete(skb)) + goto out; + + if (source_port != uh->dest) + goto out; + + if (target_port != uh->source) + goto out; + + __req = (req_t *)(uh + 1); + if ((ntohl(__req->command) != COMM_GET_MAGIC) && + (ntohl(__req->command) != COMM_HELLO) && + (ntohl(__req->command) != COMM_START_WRITE_NETDUMP_ACK) && + (ntohl(__req->command) != COMM_START_NETDUMP_ACK) && + (memcmp(&__req->magic, &dump_magic, sizeof(dump_magic)) != 0)) + goto out; + + req.magic = ntohl(__req->magic); + req.command = ntohl(__req->command); + req.from = ntohl(__req->from); + req.to = ntohl(__req->to); + req.nr = ntohl(__req->nr); + new_req = 1; +out: + return NET_RX_DROP; +} + +static void +dump_send_mem(struct net_device *dev, req_t *req, const char* buff, size_t len) +{ + int i; + + int nr_chunks = len/1024; + reply_t reply; + + reply.nr = req->nr; + reply.info = 0; + + if ( nr_chunks <= 0) + nr_chunks = 1; + for (i = 0; i < nr_chunks; i++) { + unsigned int offset = i*1024; + reply.code = REPLY_MEM; + reply.info = offset; + dump_send_skb(dev, buff + offset, 1024, &reply); + } +} + +/* + * This function waits for the client to acknowledge the receipt + * of the netdump startup reply, with the possibility of packets + * getting lost. We resend the startup packet if no ACK is received, + * after a 1 second delay. + * + * (The client can test the success of the handshake via the HELLO + * command, and send ACKs until we enter netdump mode.) + */ +static int +dump_handshake(struct dump_dev *net_dev) +{ + char tmp[200]; + reply_t reply; + int i, j; + + if (startup_handshake) { + sprintf(tmp, "NETDUMP start, waiting for start-ACK.\n"); + reply.code = REPLY_START_NETDUMP; + reply.nr = 0; + reply.info = 0; + } else { + sprintf(tmp, "NETDUMP start, waiting for start-ACK.\n"); + reply.code = REPLY_START_WRITE_NETDUMP; + reply.nr = net_dev->curr_offset; + reply.info = net_dev->curr_offset; + } + + /* send 300 handshake packets before declaring failure */ + for (i = 0; i < 300; i++) { + dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply); + + /* wait 1 sec */ + for (j = 0; j < 10000; j++) { + udelay(100); + dump_ndev->poll_controller(dump_ndev); + zap_completion_queue(); + if (new_req) + break; + } + + /* + * if there is no new request, try sending the handshaking + * packet again + */ + if (!new_req) + continue; + + /* + * check if the new request is of the expected type, + * if so, return, else try sending the handshaking + * packet again + */ + if (startup_handshake) { + if (req.command == COMM_HELLO || req.command == + COMM_START_NETDUMP_ACK) { + return 0; + } else { + new_req = 0; + continue; + } + } else { + if (req.command == COMM_SEND_MEM) { + return 0; + } else { + new_req = 0; + continue; + } + } + } + return -1; +} + +static ssize_t +do_netdump(struct dump_dev *net_dev, const char* buff, size_t len) +{ + reply_t reply; + char tmp[200]; + ssize_t ret = 0; + int repeatCounter, counter, total_loop; + + netdump_in_progress = 1; + + if (dump_handshake(net_dev) < 0) { + printk("network dump failed due to handshake failure\n"); + goto out; + } + + /* + * Ideally startup handshake should be done during dump configuration, + * i.e., in dump_net_open(). This will be done when I figure out + * the dependency between startup handshake, subsequent write and + * various commands wrt to net-server. + */ + if (startup_handshake) + startup_handshake = 0; + + counter = 0; + repeatCounter = 0; + total_loop = 0; + while (1) { + if (!new_req) { + dump_ndev->poll_controller(dump_ndev); + zap_completion_queue(); + } + if (!new_req) { + repeatCounter++; + + if (repeatCounter > 5) { + counter++; + if (counter > 10000) { + if (total_loop >= 100000) { + printk("Time OUT LEAVE NOW\n"); + goto out; + } else { + total_loop++; + printk("Try number %d out of " + "10 before Time Out\n", + total_loop); + } + } + mdelay(1); + repeatCounter = 0; + } + continue; + } + repeatCounter = 0; + counter = 0; + total_loop = 0; + new_req = 0; + switch (req.command) { + case COMM_NONE: + break; + + case COMM_SEND_MEM: + dump_send_mem(dump_ndev, &req, buff, len); + break; + + case COMM_EXIT: + case COMM_START_WRITE_NETDUMP_ACK: + ret = len; + goto out; + + case COMM_HELLO: + sprintf(tmp, "Hello, this is netdump version " + "0.%02d\n", NETCONSOLE_VERSION); + reply.code = REPLY_HELLO; + reply.nr = req.nr; + reply.info = net_dev->curr_offset; + dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply); + break; + + case COMM_GET_PAGE_SIZE: + sprintf(tmp, "PAGE_SIZE: %ld\n", PAGE_SIZE); + reply.code = REPLY_PAGE_SIZE; + reply.nr = req.nr; + reply.info = PAGE_SIZE; + dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply); + break; + + case COMM_GET_NR_PAGES: + reply.code = REPLY_NR_PAGES; + reply.nr = req.nr; + reply.info = num_physpages; + reply.info = page_counter; + sprintf(tmp, "Number of pages: %ld\n", num_physpages); + dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply); + break; + + case COMM_GET_MAGIC: + reply.code = REPLY_MAGIC; + reply.nr = req.nr; + reply.info = NETCONSOLE_VERSION; + dump_send_skb(dump_ndev, (char *)&dump_magic, + sizeof(dump_magic), &reply); + break; + + default: + reply.code = REPLY_ERROR; + reply.nr = req.nr; + reply.info = req.command; + sprintf(tmp, "Got unknown command code %d!\n", + req.command); + dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply); + break; + } + } +out: + netdump_in_progress = 0; + return ret; +} + +static int +dump_validate_config(void) +{ + source_ip = dump_in_dev->ifa_list->ifa_local; + if (!source_ip) { + printk("network device %s has no local address, " + "aborting.\n", device_name); + return -1; + } + +#define IP(x) ((unsigned char *)&source_ip)[x] + printk("Source %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3)); +#undef IP + + if (!source_port) { + printk("source_port parameter not specified, aborting.\n"); + return -1; + } + printk(":%i\n", source_port); + source_port = htons(source_port); + + if (!target_ip) { + printk("target_ip parameter not specified, aborting.\n"); + return -1; + } + +#define IP(x) ((unsigned char *)&target_ip)[x] + printk("Target %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3)); +#undef IP + + if (!target_port) { + printk("target_port parameter not specified, aborting.\n"); + return -1; + } + printk(":%i\n", target_port); + target_port = htons(target_port); + + printk("Target Ethernet Address %02x:%02x:%02x:%02x:%02x:%02x", + daddr[0], daddr[1], daddr[2], daddr[3], daddr[4], daddr[5]); + + if ((daddr[0] & daddr[1] & daddr[2] & daddr[3] & daddr[4] & + daddr[5]) == 255) + printk("(Broadcast)"); + printk("\n"); + return 0; +} + +/* + * Prepares the dump device so we can take a dump later. + * Validates the netdump configuration parameters. + * + * TODO: Network connectivity check should be done here. + */ +static int +dump_net_open(struct dump_dev *net_dev, unsigned long arg) +{ + int retval = 0; + + /* get the interface name */ + if (copy_from_user(device_name, (void *)arg, IFNAMSIZ)) + return -EFAULT; + + if (!(dump_ndev = dev_get_by_name(device_name))) { + printk("network device %s does not exist, aborting.\n", + device_name); + return -ENODEV; + } + + if (!dump_ndev->poll_controller) { + printk("network device %s does not implement polling yet, " + "aborting.\n", device_name); + retval = -1; /* return proper error */ + goto err1; + } + + if (!(dump_in_dev = in_dev_get(dump_ndev))) { + printk("network device %s is not an IP protocol device, " + "aborting.\n", device_name); + retval = -EINVAL; + goto err1; + } + + if ((retval = dump_validate_config()) < 0) + goto err2; + + net_dev->curr_offset = 0; + printk("Network device %s successfully configured for dumping\n", + device_name); + return retval; +err2: + in_dev_put(dump_in_dev); +err1: + dev_put(dump_ndev); + return retval; +} + +/* + * Close the dump device and release associated resources + * Invoked when unconfiguring the dump device. + */ +static int +dump_net_release(struct dump_dev *net_dev) +{ + if (dump_in_dev) + in_dev_put(dump_in_dev); + if (dump_ndev) + dev_put(dump_ndev); + return 0; +} + +/* + * Prepare the dump device for use (silence any ongoing activity + * and quiesce state) when the system crashes. + */ +static int +dump_net_silence(struct dump_dev *net_dev) +{ + local_irq_save(flags_global); + dump_ndev->rx_hook = dump_rx_hook; + startup_handshake = 1; + net_dev->curr_offset = 0; + printk("Dumping to network device %s on CPU %d ...\n", device_name, + smp_processor_id()); + return 0; +} + +/* + * Invoked when dumping is done. This is the time to put things back + * (i.e. undo the effects of dump_block_silence) so the device is + * available for normal use. + */ +static int +dump_net_resume(struct dump_dev *net_dev) +{ + int indx; + reply_t reply; + char tmp[200]; + + if (!dump_ndev) + return (0); + + sprintf(tmp, "NETDUMP end.\n"); + for( indx = 0; indx < 6; indx++) { + reply.code = REPLY_END_NETDUMP; + reply.nr = 0; + reply.info = 0; + dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply); + } + printk("NETDUMP END!\n"); + local_irq_restore(flags_global); + dump_ndev->rx_hook = NULL; + startup_handshake = 0; + return 0; +} + +/* + * Seek to the specified offset in the dump device. + * Makes sure this is a valid offset, otherwise returns an error. + */ +static int +dump_net_seek(struct dump_dev *net_dev, loff_t off) +{ + /* + * For now using DUMP_HEADER_OFFSET as hard coded value, + * See dump_block_seekin dump_blockdev.c to know how to + * do this properly. + */ + net_dev->curr_offset = off + DUMP_HEADER_OFFSET; + return 0; +} + +/* + * + */ +static int +dump_net_write(struct dump_dev *net_dev, void *buf, unsigned long len) +{ + int cnt, i, off; + ssize_t ret; + + cnt = len/ PAGE_SIZE; + + for (i = 0; i < cnt; i++) { + off = i* PAGE_SIZE; + ret = do_netdump(net_dev, buf+off, PAGE_SIZE); + if (ret <= 0) + return -1; + net_dev->curr_offset = net_dev->curr_offset + PAGE_SIZE; + } + return len; +} + +/* + * check if the last dump i/o is over and ready for next request + */ +static int +dump_net_ready(struct dump_dev *net_dev, void *buf) +{ + return 0; +} + +/* + * ioctl function used for configuring network dump + */ +static int +dump_net_ioctl(struct dump_dev *net_dev, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case DIOSTARGETIP: + target_ip = arg; + break; + case DIOSTARGETPORT: + target_port = (u16)arg; + break; + case DIOSSOURCEPORT: + source_port = (u16)arg; + break; + case DIOSETHADDR: + return copy_from_user(daddr, (void *)arg, 6); + break; + case DIOGTARGETIP: + case DIOGTARGETPORT: + case DIOGSOURCEPORT: + case DIOGETHADDR: + break; + default: + return -EINVAL; + } + return 0; +} + +struct dump_dev_ops dump_netdev_ops = { + .open = dump_net_open, + .release = dump_net_release, + .silence = dump_net_silence, + .resume = dump_net_resume, + .seek = dump_net_seek, + .write = dump_net_write, + /* .read not implemented */ + .ready = dump_net_ready, + .ioctl = dump_net_ioctl +}; + +static struct dump_dev default_dump_netdev = { + .type_name = "networkdev", + .ops = &dump_netdev_ops, + .curr_offset = 0 +}; + +static int __init +dump_netdev_init(void) +{ + default_dump_netdev.curr_offset = 0; + + if (dump_register_device(&default_dump_netdev) < 0) { + printk("network dump device driver registration failed\n"); + return -1; + } + printk("network device driver for LKCD registered\n"); + + get_random_bytes(&dump_magic, sizeof(dump_magic)); + return 0; +} + +static void __exit +dump_netdev_cleanup(void) +{ + dump_unregister_device(&default_dump_netdev); +} + +MODULE_AUTHOR("LKCD Development Team "); +MODULE_DESCRIPTION("Network Dump Driver for Linux Kernel Crash Dump (LKCD)"); +MODULE_LICENSE("GPL"); + +module_init(dump_netdev_init); +module_exit(dump_netdev_cleanup); diff -Nru a/drivers/dump/dump_overlay.c b/drivers/dump/dump_overlay.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/dump_overlay.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,832 @@ +/* + * Two-stage soft-boot based dump scheme methods (memory overlay + * with post soft-boot writeout) + * + * Started: Oct 2002 - Suparna Bhattacharya + * + * This approach of saving the dump in memory and writing it + * out after a softboot without clearing memory is derived from the + * Mission Critical Linux dump implementation. Credits and a big + * thanks for letting the lkcd project make use of the excellent + * piece of work and also for helping with clarifications and + * tips along the way are due to: + * Dave Winchell (primary author of mcore) + * and also to + * Jeff Moyer + * Josh Huber + * + * For those familiar with the mcore implementation, the key + * differences/extensions here are in allowing entire memory to be + * saved (in compressed form) through a careful ordering scheme + * on both the way down as well on the way up after boot, the latter + * for supporting the LKCD notion of passes in which most critical + * data is the first to be saved to the dump device. Also the post + * boot writeout happens from within the kernel rather than driven + * from userspace. + * + * The sequence is orchestrated through the abstraction of "dumpers", + * one for the first stage which then sets up the dumper for the next + * stage, providing for a smooth and flexible reuse of the singlestage + * dump scheme methods and a handle to pass dump device configuration + * information across the soft boot. + * + * Copyright (C) 2002 International Business Machines Corp. + * + * This code is released under version 2 of the GNU GPL. + */ + +/* + * Disruptive dumping using the second kernel soft-boot option + * for issuing dump i/o operates in 2 stages: + * + * (1) - Saves the (compressed & formatted) dump in memory using a + * carefully ordered overlay scheme designed to capture the + * entire physical memory or selective portions depending on + * dump config settings, + * - Registers the stage 2 dumper and + * - Issues a soft reboot w/o clearing memory. + * + * The overlay scheme starts with a small bootstrap free area + * and follows a reverse ordering of passes wherein it + * compresses and saves data starting with the least critical + * areas first, thus freeing up the corresponding pages to + * serve as destination for subsequent data to be saved, and + * so on. With a good compression ratio, this makes it feasible + * to capture an entire physical memory dump without significantly + * reducing memory available during regular operation. + * + * (2) Post soft-reboot, runs through the saved memory dump and + * writes it out to disk, this time around, taking care to + * save the more critical data first (i.e. pages which figure + * in early passes for a regular dump). Finally issues a + * clean reboot. + * + * Since the data was saved in memory after selection/filtering + * and formatted as per the chosen output dump format, at this + * stage the filter and format actions are just dummy (or + * passthrough) actions, except for influence on ordering of + * passes. + */ + +#include +#include +#include +#include +#include +#include "dump_methods.h" + +extern struct list_head dumper_list_head; +extern struct dump_memdev *dump_memdev; +extern struct dumper dumper_stage2; +static struct dump_config_block *dump_saved_config = NULL; +extern struct dump_blockdev *dump_blockdev; +static struct dump_memdev *saved_dump_memdev = NULL; +static struct dumper *saved_dumper = NULL; + +unsigned long dump_oncpu; + +static struct dumper *dumper_by_name(const char *name) +{ +#ifdef LATER + struct dumper *dumper; + list_for_each_entry(dumper, &dumper_list_head, dumper_list) + if (!strncmp(dumper->name, name, 32)) + return dumper; + + /* not found */ + return NULL; +#endif + /* Temporary proof of concept */ + if (!strncmp(dumper_stage2.name, name, 32)) + return &dumper_stage2; + else + return NULL; +} + +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT +extern void dump_early_reserve_map(struct dump_memdev *); + +void crashdump_reserve(void) +{ + extern unsigned long crashdump_addr; + + if (crashdump_addr == 0xdeadbeef) + return; + + /* reserve dump config and saved dump pages */ + dump_saved_config = (struct dump_config_block *)crashdump_addr; + /* magic verification */ + if (dump_saved_config->magic != DUMP_MAGIC_LIVE) { + printk("Invalid dump magic. Ignoring dump\n"); + dump_saved_config = NULL; + return; + } + + printk("Dump may be available from previous boot\n"); + + reserve_bootmem(virt_to_phys((void *)crashdump_addr), + PAGE_ALIGN(sizeof(struct dump_config_block))); + dump_early_reserve_map(&dump_saved_config->memdev); + +} +#endif + +/* + * Loads the dump configuration from a memory block saved across soft-boot + * The ops vectors need fixing up as the corresp. routines may have + * relocated in the new soft-booted kernel. + */ +int dump_load_config(struct dump_config_block *config) +{ + struct dumper *dumper; + struct dump_data_filter *filter_table, *filter; + struct dump_dev *dev; + int i; + + if (config->magic != DUMP_MAGIC_LIVE) + return -ENOENT; /* not a valid config */ + + /* initialize generic config data */ + memcpy(&dump_config, &config->config, sizeof(dump_config)); + + /* initialize dumper state */ + if (!(dumper = dumper_by_name(config->dumper.name))) { + printk("dumper name mismatch\n"); + return -ENOENT; /* dumper mismatch */ + } + + /* verify and fixup schema */ + if (strncmp(dumper->scheme->name, config->scheme.name, 32)) { + printk("dumper scheme mismatch\n"); + return -ENOENT; /* mismatch */ + } + config->scheme.ops = dumper->scheme->ops; + config->dumper.scheme = &config->scheme; + + /* verify and fixup filter operations */ + filter_table = dumper->filter; + for (i = 0, filter = config->filter_table; + ((i < MAX_PASSES) && filter_table[i].selector); + i++, filter++) { + if (strncmp(filter_table[i].name, filter->name, 32)) { + printk("dump filter mismatch\n"); + return -ENOENT; /* filter name mismatch */ + } + filter->selector = filter_table[i].selector; + } + config->dumper.filter = config->filter_table; + + /* fixup format */ + if (strncmp(dumper->fmt->name, config->fmt.name, 32)) { + printk("dump format mismatch\n"); + return -ENOENT; /* mismatch */ + } + config->fmt.ops = dumper->fmt->ops; + config->dumper.fmt = &config->fmt; + + /* fixup target device */ + dev = (struct dump_dev *)(&config->dev[0]); + if (dumper->dev == NULL) { + pr_debug("Vanilla dumper - assume default\n"); + if (dump_dev == NULL) + return -ENODEV; + dumper->dev = dump_dev; + } + + if (strncmp(dumper->dev->type_name, dev->type_name, 32)) { + printk("dump dev type mismatch %s instead of %s\n", + dev->type_name, dumper->dev->type_name); + return -ENOENT; /* mismatch */ + } + dev->ops = dumper->dev->ops; + config->dumper.dev = dev; + + /* fixup memory device containing saved dump pages */ + /* assume statically init'ed dump_memdev */ + config->memdev.ddev.ops = dump_memdev->ddev.ops; + /* switch to memdev from prev boot */ + saved_dump_memdev = dump_memdev; /* remember current */ + dump_memdev = &config->memdev; + + /* Make this the current primary dumper */ + dump_config.dumper = &config->dumper; + + return 0; +} + +/* Saves the dump configuration in a memory block for use across a soft-boot */ +int dump_save_config(struct dump_config_block *config) +{ + printk("saving dump config settings\n"); + + /* dump config settings */ + memcpy(&config->config, &dump_config, sizeof(dump_config)); + + /* dumper state */ + memcpy(&config->dumper, dump_config.dumper, sizeof(struct dumper)); + memcpy(&config->scheme, dump_config.dumper->scheme, + sizeof(struct dump_scheme)); + memcpy(&config->fmt, dump_config.dumper->fmt, sizeof(struct dump_fmt)); + memcpy(&config->dev[0], dump_config.dumper->dev, + sizeof(struct dump_anydev)); + memcpy(&config->filter_table, dump_config.dumper->filter, + sizeof(struct dump_data_filter)*MAX_PASSES); + + /* handle to saved mem pages */ + memcpy(&config->memdev, dump_memdev, sizeof(struct dump_memdev)); + + config->magic = DUMP_MAGIC_LIVE; + + return 0; +} + +int dump_init_stage2(struct dump_config_block *saved_config) +{ + int err = 0; + + pr_debug("dump_init_stage2\n"); + /* Check if dump from previous boot exists */ + if (saved_config) { + printk("loading dumper from previous boot \n"); + /* load and configure dumper from previous boot */ + if ((err = dump_load_config(saved_config))) + return err; + + if (!dump_oncpu) { + if ((err = dump_configure(dump_config.dump_device))) { + printk("Stage 2 dump configure failed\n"); + return err; + } + } + + dumper_reset(); + dump_dev = dump_config.dumper->dev; + /* write out the dump */ + err = dump_generic_execute(NULL, NULL); + + dump_saved_config = NULL; + + if (!dump_oncpu) { + dump_unconfigure(); + } + + return err; + + } else { + /* no dump to write out */ + printk("no dumper from previous boot \n"); + return 0; + } +} + +extern void dump_mem_markpages(struct dump_memdev *); + +int dump_switchover_stage(void) +{ + int ret = 0; + + /* trigger stage 2 rightaway - in real life would be after soft-boot */ + /* dump_saved_config would be a boot param */ + saved_dump_memdev = dump_memdev; + saved_dumper = dump_config.dumper; + ret = dump_init_stage2(dump_saved_config); + dump_memdev = saved_dump_memdev; + dump_config.dumper = saved_dumper; + return ret; +} + +int dump_activate_softboot(void) +{ + int err = 0; + + /* temporary - switchover to writeout previously saved dump */ + err = dump_switchover_stage(); /* non-disruptive case */ + if (dump_oncpu) + dump_config.dumper = &dumper_stage1; /* set things back */ + + return err; +} + +/* --- DUMP SCHEME ROUTINES --- */ + +static inline int dump_buf_pending(struct dumper *dumper) +{ + return (dumper->curr_buf - dumper->dump_buf); +} + +/* Invoked during stage 1 of soft-reboot based dumping */ +int dump_overlay_sequencer(void) +{ + struct dump_data_filter *filter = dump_config.dumper->filter; + struct dump_data_filter *filter2 = dumper_stage2.filter; + int pass = 0, err = 0, save = 0; + int (*action)(unsigned long, unsigned long); + + /* Make sure gzip compression is being used */ + if (dump_config.dumper->compress->compress_type != DUMP_COMPRESS_GZIP) { + printk(" Please set GZIP compression \n"); + return -EINVAL; + } + + /* start filling in dump data right after the header */ + dump_config.dumper->curr_offset = + PAGE_ALIGN(dump_config.dumper->header_len); + + /* Locate the last pass */ + for (;filter->selector; filter++, pass++); + + /* + * Start from the end backwards: overlay involves a reverse + * ordering of passes, since less critical pages are more + * likely to be reusable as scratch space once we are through + * with them. + */ + for (--pass, --filter; pass >= 0; pass--, filter--) + { + /* Assumes passes are exclusive (even across dumpers) */ + /* Requires care when coding the selection functions */ + if ((save = filter->level_mask & dump_config.level)) + action = dump_save_data; + else + action = dump_skip_data; + + /* Remember the offset where this pass started */ + /* The second stage dumper would use this */ + if (dump_buf_pending(dump_config.dumper) & (PAGE_SIZE - 1)) { + pr_debug("Starting pass %d with pending data\n", pass); + pr_debug("filling dummy data to page-align it\n"); + dump_config.dumper->curr_buf = (void *)PAGE_ALIGN( + (unsigned long)dump_config.dumper->curr_buf); + } + + filter2[pass].start = dump_config.dumper->curr_offset + + dump_buf_pending(dump_config.dumper); + + err = dump_iterator(pass, action, filter); + + filter2[pass].end = dump_config.dumper->curr_offset + + dump_buf_pending(dump_config.dumper); + + if (err < 0) { + printk("dump_overlay_seq: failure %d in pass %d\n", + err, pass); + break; + } + printk("\n %d overlay pages %s of %d each in pass %d\n", + err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass); + } + + return err; +} + +/* from dump_memdev.c */ +extern struct page *dump_mem_lookup(struct dump_memdev *dev, unsigned long loc); +extern struct page *dump_mem_next_page(struct dump_memdev *dev); + +static inline struct page *dump_get_saved_page(loff_t loc) +{ + return (dump_mem_lookup(dump_memdev, loc >> PAGE_SHIFT)); +} + +static inline struct page *dump_next_saved_page(void) +{ + return (dump_mem_next_page(dump_memdev)); +} + +/* + * Iterates over list of saved dump pages. Invoked during second stage of + * soft boot dumping + * + * Observation: If additional selection is desired at this stage then + * a different iterator could be written which would advance + * to the next page header everytime instead of blindly picking up + * the data. In such a case loc would be interpreted differently. + * At this moment however a blind pass seems sufficient, cleaner and + * faster. + */ +int dump_saved_data_iterator(int pass, int (*action)(unsigned long, + unsigned long), struct dump_data_filter *filter) +{ + loff_t loc = filter->start; + struct page *page; + unsigned long count = 0; + int err = 0; + unsigned long sz; + + printk("pass %d, start off 0x%llx end offset 0x%llx\n", pass, + filter->start, filter->end); + + /* loc will get treated as logical offset into stage 1 */ + page = dump_get_saved_page(loc); + + for (; loc < filter->end; loc += PAGE_SIZE) { + dump_config.dumper->curr_loc = loc; + if (!page) { + printk("no more saved data for pass %d\n", pass); + break; + } + sz = (loc + PAGE_SIZE > filter->end) ? filter->end - loc : + PAGE_SIZE; + + if (page && filter->selector(pass, (unsigned long)page, + PAGE_SIZE)) { + pr_debug("mem offset 0x%llx\n", loc); + if ((err = action((unsigned long)page, sz))) + break; + else + count++; + /* clear the contents of page */ + /* fixme: consider using KM_DUMP instead */ + clear_highpage(page); + + } + page = dump_next_saved_page(); + } + + return err ? err : count; +} + +static inline int dump_overlay_pages_done(struct page *page, int nr) +{ + int ret=0; + + for (; nr ; page++, nr--) { + if (dump_check_and_free_page(dump_memdev, page)) + ret++; + } + return ret; +} + +int dump_overlay_save_data(unsigned long loc, unsigned long len) +{ + int err = 0; + struct page *page = (struct page *)loc; + static unsigned long cnt = 0; + + if ((err = dump_generic_save_data(loc, len))) + return err; + + if (dump_overlay_pages_done(page, len >> PAGE_SHIFT)) { + cnt++; + if (!(cnt & 0x7f)) + pr_debug("released page 0x%lx\n", page_to_pfn(page)); + } + + return err; +} + + +int dump_overlay_skip_data(unsigned long loc, unsigned long len) +{ + struct page *page = (struct page *)loc; + + dump_overlay_pages_done(page, len >> PAGE_SHIFT); + return 0; +} + +int dump_overlay_resume(void) +{ + int err = 0; + + /* + * switch to stage 2 dumper, save dump_config_block + * and then trigger a soft-boot + */ + dumper_stage2.header_len = dump_config.dumper->header_len; + dump_config.dumper = &dumper_stage2; + if ((err = dump_save_config(dump_saved_config))) + return err; + + dump_dev = dump_config.dumper->dev; + + return err; + err = dump_switchover_stage(); /* plugs into soft boot mechanism */ + dump_config.dumper = &dumper_stage1; /* set things back */ + return err; +} + +int dump_overlay_configure(unsigned long devid) +{ + struct dump_dev *dev; + struct dump_config_block *saved_config = dump_saved_config; + int err = 0; + + /* If there is a previously saved dump, write it out first */ + if (saved_config) { + printk("Processing old dump pending writeout\n"); + err = dump_switchover_stage(); + if (err) { + printk("failed to writeout saved dump\n"); + return err; + } + dump_free_mem(saved_config); /* testing only: not after boot */ + } + + dev = dumper_stage2.dev = dump_config.dumper->dev; + /* From here on the intermediate dump target is memory-only */ + dump_dev = dump_config.dumper->dev = &dump_memdev->ddev; + if ((err = dump_generic_configure(0))) { + printk("dump generic configure failed: err %d\n", err); + return err; + } + /* temporary */ + dumper_stage2.dump_buf = dump_config.dumper->dump_buf; + + /* Sanity check on the actual target dump device */ + if (!dev || (err = dev->ops->open(dev, devid))) { + return err; + } + /* TBD: should we release the target if this is soft-boot only ? */ + + /* alloc a dump config block area to save across reboot */ + if (!(dump_saved_config = dump_alloc_mem(sizeof(struct + dump_config_block)))) { + printk("dump config block alloc failed\n"); + /* undo configure */ + dump_generic_unconfigure(); + return -ENOMEM; + } + dump_config.dump_addr = (unsigned long)dump_saved_config; + printk("Dump config block of size %d set up at 0x%lx\n", + sizeof(*dump_saved_config), (unsigned long)dump_saved_config); + return 0; +} + +int dump_overlay_unconfigure(void) +{ + struct dump_dev *dev = dumper_stage2.dev; + int err = 0; + + pr_debug("dump_overlay_unconfigure\n"); + /* Close the secondary device */ + dev->ops->release(dev); + pr_debug("released secondary device\n"); + + err = dump_generic_unconfigure(); + pr_debug("Unconfigured generic portions\n"); + dump_free_mem(dump_saved_config); + dump_saved_config = NULL; + pr_debug("Freed saved config block\n"); + dump_dev = dump_config.dumper->dev = dumper_stage2.dev; + + printk("Unconfigured overlay dumper\n"); + return err; +} + +int dump_staged_unconfigure(void) +{ + int err = 0; + struct dump_config_block *saved_config = dump_saved_config; + struct dump_dev *dev; + + pr_debug("dump_staged_unconfigure\n"); + err = dump_generic_unconfigure(); + + /* now check if there is a saved dump waiting to be written out */ + if (saved_config) { + printk("Processing saved dump pending writeout\n"); + if ((err = dump_switchover_stage())) { + printk("Error in commiting saved dump at 0x%lx\n", + (unsigned long)saved_config); + printk("Old dump may hog memory\n"); + } else { + dump_free_mem(saved_config); + pr_debug("Freed saved config block\n"); + } + dump_saved_config = NULL; + } else { + dev = &dump_memdev->ddev; + dev->ops->release(dev); + } + printk("Unconfigured second stage dumper\n"); + + return 0; +} + +/* ----- PASSTHRU FILTER ROUTINE --------- */ + +/* transparent - passes everything through */ +int dump_passthru_filter(int pass, unsigned long loc, unsigned long sz) +{ + return 1; +} + +/* ----- PASSTRU FORMAT ROUTINES ---- */ + + +int dump_passthru_configure_header(const char *panic_str, const struct pt_regs *regs) +{ + dump_config.dumper->header_dirty++; + return 0; +} + +/* Copies bytes of data from page(s) to the specified buffer */ +int dump_copy_pages(void *buf, struct page *page, unsigned long sz) +{ + unsigned long len = 0, bytes; + void *addr; + + while (len < sz) { + addr = kmap_atomic(page, KM_DUMP); + bytes = (sz > len + PAGE_SIZE) ? PAGE_SIZE : sz - len; + memcpy(buf, addr, bytes); + kunmap_atomic(addr, KM_DUMP); + buf += bytes; + len += bytes; + page++; + } + /* memset(dump_config.dumper->curr_buf, 0x57, len); temporary */ + + return sz - len; +} + +int dump_passthru_update_header(void) +{ + long len = dump_config.dumper->header_len; + struct page *page; + void *buf = dump_config.dumper->dump_buf; + int err = 0; + + if (!dump_config.dumper->header_dirty) + return 0; + + pr_debug("Copying header of size %ld bytes from memory\n", len); + if (len > DUMP_BUFFER_SIZE) + return -E2BIG; + + page = dump_mem_lookup(dump_memdev, 0); + for (; (len > 0) && page; buf += PAGE_SIZE, len -= PAGE_SIZE) { + if ((err = dump_copy_pages(buf, page, PAGE_SIZE))) + return err; + page = dump_mem_next_page(dump_memdev); + } + if (len > 0) { + printk("Incomplete header saved in mem\n"); + return -ENOENT; + } + + if ((err = dump_dev_seek(0))) { + printk("Unable to seek to dump header offset\n"); + return err; + } + err = dump_ll_write(dump_config.dumper->dump_buf, + buf - dump_config.dumper->dump_buf); + if (err < dump_config.dumper->header_len) + return (err < 0) ? err : -ENOSPC; + + dump_config.dumper->header_dirty = 0; + return 0; +} + +static loff_t next_dph_offset = 0; + +static int dph_valid(struct __dump_page *dph) +{ + if ((dph->dp_address & (PAGE_SIZE - 1)) || (dph->dp_flags + > DUMP_DH_COMPRESSED) || (!dph->dp_flags) || + (dph->dp_size > PAGE_SIZE)) { + printk("dp->address = 0x%llx, dp->size = 0x%x, dp->flag = 0x%x\n", + dph->dp_address, dph->dp_size, dph->dp_flags); + return 0; + } + return 1; +} + +int dump_verify_lcrash_data(void *buf, unsigned long sz) +{ + struct __dump_page *dph; + + /* sanity check for page headers */ + while (next_dph_offset + sizeof(*dph) < sz) { + dph = (struct __dump_page *)(buf + next_dph_offset); + if (!dph_valid(dph)) { + printk("Invalid page hdr at offset 0x%llx\n", + next_dph_offset); + return -EINVAL; + } + next_dph_offset += dph->dp_size + sizeof(*dph); + } + + next_dph_offset -= sz; + return 0; +} + +/* + * TBD/Later: Consider avoiding the copy by using a scatter/gather + * vector representation for the dump buffer + */ +int dump_passthru_add_data(unsigned long loc, unsigned long sz) +{ + struct page *page = (struct page *)loc; + void *buf = dump_config.dumper->curr_buf; + int err = 0; + + if ((err = dump_copy_pages(buf, page, sz))) { + printk("dump_copy_pages failed"); + return err; + } + + if ((err = dump_verify_lcrash_data(buf, sz))) { + printk("dump_verify_lcrash_data failed\n"); + printk("Invalid data for pfn 0x%lx\n", page_to_pfn(page)); + printk("Page flags 0x%lx\n", page->flags); + printk("Page count 0x%x\n", atomic_read(&page->count)); + return err; + } + + dump_config.dumper->curr_buf = buf + sz; + + return 0; +} + + +/* Stage 1 dumper: Saves compressed dump in memory and soft-boots system */ + +/* Scheme to overlay saved data in memory for writeout after a soft-boot */ +struct dump_scheme_ops dump_scheme_overlay_ops = { + .configure = dump_overlay_configure, + .unconfigure = dump_overlay_unconfigure, + .sequencer = dump_overlay_sequencer, + .iterator = dump_page_iterator, + .save_data = dump_overlay_save_data, + .skip_data = dump_overlay_skip_data, + .write_buffer = dump_generic_write_buffer +}; + +struct dump_scheme dump_scheme_overlay = { + .name = "overlay", + .ops = &dump_scheme_overlay_ops +}; + + +/* Stage 1 must use a good compression scheme - default to gzip */ +extern struct __dump_compress dump_gzip_compression; + +struct dumper dumper_stage1 = { + .name = "stage1", + .scheme = &dump_scheme_overlay, + .fmt = &dump_fmt_lcrash, + .compress = &dump_none_compression, /* needs to be gzip */ + .filter = dump_filter_table, + .dev = NULL, +}; + +/* Stage 2 dumper: Activated after softboot to write out saved dump to device */ + +/* Formatter that transfers data as is (transparent) w/o further conversion */ +struct dump_fmt_ops dump_fmt_passthru_ops = { + .configure_header = dump_passthru_configure_header, + .update_header = dump_passthru_update_header, + .save_context = NULL, /* unused */ + .add_data = dump_passthru_add_data, + .update_end_marker = dump_lcrash_update_end_marker +}; + +struct dump_fmt dump_fmt_passthru = { + .name = "passthru", + .ops = &dump_fmt_passthru_ops +}; + +/* Filter that simply passes along any data within the range (transparent)*/ +/* Note: The start and end ranges in the table are filled in at run-time */ + +extern int dump_filter_none(int pass, unsigned long loc, unsigned long sz); + +struct dump_data_filter dump_passthru_filtertable[MAX_PASSES] = { +{.name = "passkern", .selector = dump_passthru_filter, + .level_mask = DUMP_MASK_KERN }, +{.name = "passuser", .selector = dump_passthru_filter, + .level_mask = DUMP_MASK_USED }, +{.name = "passunused", .selector = dump_passthru_filter, + .level_mask = DUMP_MASK_UNUSED }, +{.name = "none", .selector = dump_filter_none, + .level_mask = DUMP_MASK_REST } +}; + + +/* Scheme to handle data staged / preserved across a soft-boot */ +struct dump_scheme_ops dump_scheme_staged_ops = { + .configure = dump_generic_configure, + .unconfigure = dump_staged_unconfigure, + .sequencer = dump_generic_sequencer, + .iterator = dump_saved_data_iterator, + .save_data = dump_generic_save_data, + .skip_data = dump_generic_skip_data, + .write_buffer = dump_generic_write_buffer +}; + +struct dump_scheme dump_scheme_staged = { + .name = "staged", + .ops = &dump_scheme_staged_ops +}; + +/* The stage 2 dumper comprising all these */ +struct dumper dumper_stage2 = { + .name = "stage2", + .scheme = &dump_scheme_staged, + .fmt = &dump_fmt_passthru, + .compress = &dump_none_compression, + .filter = dump_passthru_filtertable, + .dev = NULL, +}; + diff -Nru a/drivers/dump/dump_rle.c b/drivers/dump/dump_rle.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/dump_rle.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,175 @@ +/* + * RLE Compression functions for kernel crash dumps. + * + * Created by: Matt Robinson (yakker@sourceforge.net) + * Copyright 2001 Matt D. Robinson. All rights reserved. + * + * This code is released under version 2 of the GNU GPL. + */ + +/* header files */ +#include +#include +#include +#include +#include +#include +#include + +/* + * Name: dump_compress_rle() + * Func: Compress a DUMP_PAGE_SIZE (hardware) page down to something more + * reasonable, if possible. This is the same routine we use in IRIX. + */ +static u16 +dump_compress_rle(const u8 *old, u16 oldsize, u8 *new, u16 newsize) +{ + u16 ri, wi, count = 0; + u_char value = 0, cur_byte; + + /* + * If the block should happen to "compress" to larger than the + * buffer size, allocate a larger one and change cur_buf_size. + */ + + wi = ri = 0; + + while (ri < oldsize) { + if (!ri) { + cur_byte = value = old[ri]; + count = 0; + } else { + if (count == 255) { + if (wi + 3 > oldsize) { + return oldsize; + } + new[wi++] = 0; + new[wi++] = count; + new[wi++] = value; + value = cur_byte = old[ri]; + count = 0; + } else { + if ((cur_byte = old[ri]) == value) { + count++; + } else { + if (count > 1) { + if (wi + 3 > oldsize) { + return oldsize; + } + new[wi++] = 0; + new[wi++] = count; + new[wi++] = value; + } else if (count == 1) { + if (value == 0) { + if (wi + 3 > oldsize) { + return oldsize; + } + new[wi++] = 0; + new[wi++] = 1; + new[wi++] = 0; + } else { + if (wi + 2 > oldsize) { + return oldsize; + } + new[wi++] = value; + new[wi++] = value; + } + } else { /* count == 0 */ + if (value == 0) { + if (wi + 2 > oldsize) { + return oldsize; + } + new[wi++] = value; + new[wi++] = value; + } else { + if (wi + 1 > oldsize) { + return oldsize; + } + new[wi++] = value; + } + } /* if count > 1 */ + + value = cur_byte; + count = 0; + + } /* if byte == value */ + + } /* if count == 255 */ + + } /* if ri == 0 */ + ri++; + + } + if (count > 1) { + if (wi + 3 > oldsize) { + return oldsize; + } + new[wi++] = 0; + new[wi++] = count; + new[wi++] = value; + } else if (count == 1) { + if (value == 0) { + if (wi + 3 > oldsize) + return oldsize; + new[wi++] = 0; + new[wi++] = 1; + new[wi++] = 0; + } else { + if (wi + 2 > oldsize) + return oldsize; + new[wi++] = value; + new[wi++] = value; + } + } else { /* count == 0 */ + if (value == 0) { + if (wi + 2 > oldsize) + return oldsize; + new[wi++] = value; + new[wi++] = value; + } else { + if (wi + 1 > oldsize) + return oldsize; + new[wi++] = value; + } + } /* if count > 1 */ + + value = cur_byte; + count = 0; + return wi; +} + +/* setup the rle compression functionality */ +static struct __dump_compress dump_rle_compression = { + .compress_type = DUMP_COMPRESS_RLE, + .compress_func = dump_compress_rle, + .compress_name = "RLE", +}; + +/* + * Name: dump_compress_rle_init() + * Func: Initialize rle compression for dumping. + */ +static int __init +dump_compress_rle_init(void) +{ + dump_register_compression(&dump_rle_compression); + return 0; +} + +/* + * Name: dump_compress_rle_cleanup() + * Func: Remove rle compression for dumping. + */ +static void __exit +dump_compress_rle_cleanup(void) +{ + dump_unregister_compression(DUMP_COMPRESS_RLE); +} + +/* module initialization */ +module_init(dump_compress_rle_init); +module_exit(dump_compress_rle_cleanup); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("LKCD Development Team "); +MODULE_DESCRIPTION("RLE compression module for crash dump driver"); diff -Nru a/drivers/dump/dump_scheme.c b/drivers/dump/dump_scheme.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/dump_scheme.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,370 @@ +/* + * Default single stage dump scheme methods + * + * Previously a part of dump_base.c + * + * Started: Oct 2002 - Suparna Bhattacharya + * Split and rewrote LKCD dump scheme to generic dump method + * interfaces + * Derived from original code created by + * Matt Robinson ) + * + * Contributions from SGI, IBM, HP, MCL, and others. + * + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved. + * Copyright (C) 2002 International Business Machines Corp. + * + * This code is released under version 2 of the GNU GPL. + */ + +/* + * Implements the default dump scheme, i.e. single-stage gathering and + * saving of dump data directly to the target device, which operates in + * a push mode, where the dumping system decides what data it saves + * taking into account pre-specified dump config options. + * + * Aside: The 2-stage dump scheme, where there is a soft-reset between + * the gathering and saving phases, also reuses some of these + * default routines (see dump_overlay.c) + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "dump_methods.h" + +extern int panic_timeout; /* time before reboot */ + +extern void dump_speedo(int); + +/* Default sequencer used during single stage dumping */ +/* Also invoked during stage 2 of soft-boot based dumping */ +int dump_generic_sequencer(void) +{ + struct dump_data_filter *filter = dump_config.dumper->filter; + int pass = 0, err = 0, save = 0; + int (*action)(unsigned long, unsigned long); + + /* + * We want to save the more critical data areas first in + * case we run out of space, encounter i/o failures, or get + * interrupted otherwise and have to give up midway + * So, run through the passes in increasing order + */ + for (;filter->selector; filter++, pass++) + { + /* Assumes passes are exclusive (even across dumpers) */ + /* Requires care when coding the selection functions */ + if ((save = filter->level_mask & dump_config.level)) + action = dump_save_data; + else + action = dump_skip_data; + + if ((err = dump_iterator(pass, action, filter)) < 0) + break; + + printk("\n %d dump pages %s of %d each in pass %d\n", + err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass); + + } + + return (err < 0) ? err : 0; +} + +static inline struct page *dump_get_page(loff_t loc) +{ + + unsigned long page_index = loc >> PAGE_SHIFT; + + /* todo: complete this to account for ia64/discontig mem */ + /* todo: and to check for validity, ram page, no i/o mem etc */ + /* need to use pfn/physaddr equiv of kern_addr_valid */ + + /* Important: + * On ARM/XScale system, the physical address starts from + * PHYS_OFFSET, and it maybe the situation that PHYS_OFFSET != 0. + * For example on Intel's PXA250, PHYS_OFFSET = 0xa0000000. And the + * page index starts from PHYS_PFN_OFFSET. When configuring + * filter, filter->start is assigned to 0 in dump_generic_configure. + * Here we want to adjust it by adding PHYS_PFN_OFFSET to it! + */ +#ifdef CONFIG_ARM + page_index += PHYS_PFN_OFFSET; +#endif + if (__dump_page_valid(page_index)) + return pfn_to_page(page_index); + else + return NULL; + +} + +/* Default iterator: for singlestage and stage 1 of soft-boot dumping */ +/* Iterates over range of physical memory pages in DUMP_PAGE_SIZE increments */ +int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned long), + struct dump_data_filter *filter) +{ + /* Todo : fix unit, type */ + loff_t loc; + int count = 0, err = 0; + struct page *page; + + /* Todo: Add membanks code */ + /* TBD: Check if we need to address DUMP_PAGE_SIZE < PAGE_SIZE */ + + for (loc = filter->start; loc < filter->end; loc += DUMP_PAGE_SIZE) { + dump_config.dumper->curr_loc = loc; + page = dump_get_page(loc); + if (page && filter->selector(pass, (unsigned long) page, + DUMP_PAGE_SIZE)) { + if ((err = action((unsigned long)page, DUMP_PAGE_SIZE))) + { + printk("dump_page_iterator: err %d for loc " + "0x%llx, in pass %d\n", err, loc, pass); + break; + } else + count++; + } + } + + return err ? err : count; +} + +/* + * Base function that saves the selected block of data in the dump + * Action taken when iterator decides that data needs to be saved + */ +int dump_generic_save_data(unsigned long loc, unsigned long sz) +{ + void *buf; + void *dump_buf = dump_config.dumper->dump_buf; + int left, bytes, ret; + + if ((ret = dump_add_data(loc, sz))) { + return ret; + } + buf = dump_config.dumper->curr_buf; + + /* If we've filled up the buffer write it out */ + if ((left = buf - dump_buf) >= DUMP_BUFFER_SIZE) { + bytes = dump_write_buffer(dump_buf, DUMP_BUFFER_SIZE); + if (bytes < DUMP_BUFFER_SIZE) { + printk("dump_write_buffer failed %d\n", bytes); + return bytes ? -ENOSPC : bytes; + } + + left -= bytes; + + /* -- A few chores to do from time to time -- */ + dump_config.dumper->count++; + + if (!(dump_config.dumper->count & 0x3f)) { + /* Update the header every one in a while */ + memset((void *)dump_buf, 'b', DUMP_BUFFER_SIZE); + if ((ret = dump_update_header()) < 0) { + /* issue warning */ + return ret; + } + printk("."); + + touch_nmi_watchdog(); + } else if (!(dump_config.dumper->count & 0x7)) { + /* Show progress so the user knows we aren't hung */ + dump_speedo(dump_config.dumper->count >> 3); + } + /* Todo: Touch/Refresh watchdog */ + + /* --- Done with periodic chores -- */ + + /* + * extra bit of copying to simplify verification + * in the second kernel boot based scheme + */ + memcpy(dump_buf - DUMP_PAGE_SIZE, dump_buf + + DUMP_BUFFER_SIZE - DUMP_PAGE_SIZE, DUMP_PAGE_SIZE); + + /* now adjust the leftover bits back to the top of the page */ + /* this case would not arise during stage 2 (passthru) */ + memset(dump_buf, 'z', DUMP_BUFFER_SIZE); + if (left) { + memcpy(dump_buf, dump_buf + DUMP_BUFFER_SIZE, left); + } + buf -= DUMP_BUFFER_SIZE; + dump_config.dumper->curr_buf = buf; + } + + return 0; +} + +int dump_generic_skip_data(unsigned long loc, unsigned long sz) +{ + /* dummy by default */ + return 0; +} + +/* + * Common low level routine to write a buffer to current dump device + * Expects checks for space etc to have been taken care of by the caller + * Operates serially at the moment for simplicity. + * TBD/Todo: Consider batching for improved throughput + */ +int dump_ll_write(void *buf, unsigned long len) +{ + long transferred = 0, last_transfer = 0; + int ret = 0; + + /* make sure device is ready */ + while ((ret = dump_dev_ready(NULL)) == -EAGAIN); + if (ret < 0) { + printk("dump_dev_ready failed !err %d\n", ret); + return ret; + } + + while (len) { + if ((last_transfer = dump_dev_write(buf, len)) <= 0) { + ret = last_transfer; + printk("dump_dev_write failed !err %d\n", + ret); + break; + } + /* wait till complete */ + while ((ret = dump_dev_ready(buf)) == -EAGAIN) + cpu_relax(); + + if (ret < 0) { + printk("i/o failed !err %d\n", ret); + break; + } + + len -= last_transfer; + buf += last_transfer; + transferred += last_transfer; + } + return (ret < 0) ? ret : transferred; +} + +/* default writeout routine for single dump device */ +/* writes out the dump data ensuring enough space is left for the end marker */ +int dump_generic_write_buffer(void *buf, unsigned long len) +{ + long written = 0; + int err = 0; + + /* check for space */ + if ((err = dump_dev_seek(dump_config.dumper->curr_offset + len + + 2*DUMP_BUFFER_SIZE)) < 0) { + printk("dump_write_buffer: insuff space after offset 0x%llx\n", + dump_config.dumper->curr_offset); + return err; + } + /* alignment check would happen as a side effect of this */ + if ((err = dump_dev_seek(dump_config.dumper->curr_offset)) < 0) + return err; + + written = dump_ll_write(buf, len); + + /* all or none */ + + if (written < len) + written = written ? -ENOSPC : written; + else + dump_config.dumper->curr_offset += len; + + return written; +} + +int dump_generic_configure(unsigned long devid) +{ + struct dump_dev *dev = dump_config.dumper->dev; + struct dump_data_filter *filter; + void *buf; + int ret = 0; + + /* Allocate the dump buffer and initialize dumper state */ + /* Assume that we get aligned addresses */ + if (!(buf = dump_alloc_mem(DUMP_BUFFER_SIZE + 3 * DUMP_PAGE_SIZE))) + return -ENOMEM; + + if ((unsigned long)buf & (PAGE_SIZE - 1)) { + /* sanity check for page aligned address */ + dump_free_mem(buf); + return -ENOMEM; /* fixme: better error code */ + } + + /* Initialize the rest of the fields */ + dump_config.dumper->dump_buf = buf + DUMP_PAGE_SIZE; + dumper_reset(); + + /* Open the dump device */ + if (!dev) + return -ENODEV; + + if ((ret = dev->ops->open(dev, devid))) { + return ret; + } + + /* Initialise the memory ranges in the dump filter */ + for (filter = dump_config.dumper->filter ;filter->selector; filter++) { + if (!filter->start && !filter->end) { + filter->start = 0; + filter->end = num_physpages << PAGE_SHIFT; + } + } + + return 0; +} + +int dump_generic_unconfigure(void) +{ + struct dump_dev *dev = dump_config.dumper->dev; + void *buf = dump_config.dumper->dump_buf; + int ret = 0; + + pr_debug("Generic unconfigure\n"); + /* Close the dump device */ + if (dev && (ret = dev->ops->release(dev))) + return ret; + + printk("Closed dump device\n"); + + if (buf) + dump_free_mem((buf - DUMP_PAGE_SIZE)); + + dump_config.dumper->curr_buf = dump_config.dumper->dump_buf = NULL; + pr_debug("Released dump buffer\n"); + + return 0; +} + + +/* Set up the default dump scheme */ + +struct dump_scheme_ops dump_scheme_singlestage_ops = { + .configure = dump_generic_configure, + .unconfigure = dump_generic_unconfigure, + .sequencer = dump_generic_sequencer, + .iterator = dump_page_iterator, + .save_data = dump_generic_save_data, + .skip_data = dump_generic_skip_data, + .write_buffer = dump_generic_write_buffer, +}; + +struct dump_scheme dump_scheme_singlestage = { + .name = "single-stage", + .ops = &dump_scheme_singlestage_ops +}; + +/* The single stage dumper comprising all these */ +struct dumper dumper_singlestage = { + .name = "single-stage", + .scheme = &dump_scheme_singlestage, + .fmt = &dump_fmt_lcrash, + .compress = &dump_none_compression, + .filter = dump_filter_table, + .dev = NULL, +}; + diff -Nru a/drivers/dump/dump_setup.c b/drivers/dump/dump_setup.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/dump/dump_setup.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,822 @@ +/* + * Standard kernel function entry points for Linux crash dumps. + * + * Created by: Matt Robinson (yakker@sourceforge.net) + * Contributions from SGI, IBM, HP, MCL, and others. + * + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2000 - 2002 TurboLinux, Inc. All rights reserved. + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved. + * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved. + * + * This code is released under version 2 of the GNU GPL. + */ + +/* + * ----------------------------------------------------------------------- + * + * DUMP HISTORY + * + * This dump code goes back to SGI's first attempts at dumping system + * memory on SGI systems running IRIX. A few developers at SGI needed + * a way to take this system dump and analyze it, and created 'icrash', + * or IRIX Crash. The mechanism (the dumps and 'icrash') were used + * by support people to generate crash reports when a system failure + * occurred. This was vital for large system configurations that + * couldn't apply patch after patch after fix just to hope that the + * problems would go away. So the system memory, along with the crash + * dump analyzer, allowed support people to quickly figure out what the + * problem was on the system with the crash dump. + * + * In comes Linux. SGI started moving towards the open source community, + * and upon doing so, SGI wanted to take its support utilities into Linux + * with the hopes that they would end up the in kernel and user space to + * be used by SGI's customers buying SGI Linux systems. One of the first + * few products to be open sourced by SGI was LKCD, or Linux Kernel Crash + * Dumps. LKCD comprises of a patch to the kernel to enable system + * dumping, along with 'lcrash', or Linux Crash, to analyze the system + * memory dump. A few additional system scripts and kernel modifications + * are also included to make the dump mechanism and dump data easier to + * process and use. + * + * As soon as LKCD was released into the open source community, a number + * of larger companies started to take advantage of it. Today, there are + * many community members that contribute to LKCD, and it continues to + * flourish and grow as an open source project. + */ + +/* + * DUMP TUNABLES + * + * This is the list of system tunables (via /proc) that are available + * for Linux systems. All the read, write, etc., functions are listed + * here. Currently, there are a few different tunables for dumps: + * + * dump_device (used to be dumpdev): + * The device for dumping the memory pages out to. This + * may be set to the primary swap partition for disruptive dumps, + * and must be an unused partition for non-disruptive dumps. + * Todo: In the case of network dumps, this may be interpreted + * as the IP address of the netdump server to connect to. + * + * dump_compress (used to be dump_compress_pages): + * This is the flag which indicates which compression mechanism + * to use. This is a BITMASK, not an index (0,1,2,4,8,16,etc.). + * This is the current set of values: + * + * 0: DUMP_COMPRESS_NONE -- Don't compress any pages. + * 1: DUMP_COMPRESS_RLE -- This uses RLE compression. + * 2: DUMP_COMPRESS_GZIP -- This uses GZIP compression. + * + * dump_level: + * The amount of effort the dump module should make to save + * information for post crash analysis. This value is now + * a BITMASK value, not an index: + * + * 0: Do nothing, no dumping. (DUMP_LEVEL_NONE) + * + * 1: Print out the dump information to the dump header, and + * write it out to the dump_device. (DUMP_LEVEL_HEADER) + * + * 2: Write out the dump header and all kernel memory pages. + * (DUMP_LEVEL_KERN) + * + * 4: Write out the dump header and all kernel and user + * memory pages. (DUMP_LEVEL_USED) + * + * 8: Write out the dump header and all conventional/cached + * memory (RAM) pages in the system (kernel, user, free). + * (DUMP_LEVEL_ALL_RAM) + * + * 16: Write out everything, including non-conventional memory + * like firmware, proms, I/O registers, uncached memory. + * (DUMP_LEVEL_ALL) + * + * The dump_level will default to 1. + * + * dump_flags: + * These are the flags to use when talking about dumps. There + * are lots of possibilities. This is a BITMASK value, not an index. + * + * ----------------------------------------------------------------------- + */ + +#include +#include +#include +#include +#include +#include "dump_methods.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * ----------------------------------------------------------------------- + * V A R I A B L E S + * ----------------------------------------------------------------------- + */ + +/* Dump tunables */ +struct dump_config dump_config = { + .level = 0, + .flags = 0, + .dump_device = 0, + .dump_addr = 0, + .dumper = NULL +}; +#ifdef CONFIG_ARM +static _dump_regs_t all_regs; +#endif + +/* Global variables used in dump.h */ + +/* Other global fields */ +extern struct __dump_header dump_header; +struct dump_dev *dump_dev = NULL; /* Active dump device */ +static int dump_compress = 0; + +static u16 dump_compress_none(const u8 *old, u16 oldsize, u8 *new, u16 newsize); +struct __dump_compress dump_none_compression = { + .compress_type = DUMP_COMPRESS_NONE, + .compress_func = dump_compress_none, + .compress_name = "none", +}; + +/* our device operations and functions */ +static int dump_ioctl(struct inode *i, struct file *f, + unsigned int cmd, unsigned long arg); +static int dump_open(struct inode *, struct file *); + +static struct file_operations dump_fops = { + .owner = THIS_MODULE, + .ioctl = dump_ioctl, + .open = dump_open, +}; + +static struct miscdevice dump_miscdev = { + .minor = DUMP_MINOR, + .name = "dump", + .fops = &dump_fops, +}; + +/* static variables */ +static int dump_okay = 0; /* can we dump out to disk? */ +static spinlock_t dump_lock = SPIN_LOCK_UNLOCKED; + +/* used for dump compressors */ +static struct list_head dump_compress_list = LIST_HEAD_INIT(dump_compress_list); + +/* list of registered dump targets */ +static struct list_head dump_target_list = LIST_HEAD_INIT(dump_target_list); + +/* lkcd info structure -- this is used by lcrash for basic system data */ +struct __lkcdinfo lkcdinfo = { + .ptrsz = (sizeof(void *) * 8), +#if defined(__LITTLE_ENDIAN) + .byte_order = __LITTLE_ENDIAN, +#else + .byte_order = __BIG_ENDIAN, +#endif + .page_shift = PAGE_SHIFT, + .page_size = PAGE_SIZE, + .page_mask = PAGE_MASK, + .page_offset = PAGE_OFFSET, +}; + +/* + * ----------------------------------------------------------------------- + * / P R O C T U N A B L E F U N C T I O N S + * ----------------------------------------------------------------------- + */ + +static int proc_dump_device(ctl_table *ctl, int write, struct file *f, + void *buffer, size_t *lenp); + +static int proc_doulonghex(ctl_table *ctl, int write, struct file *f, + void *buffer, size_t *lenp); +/* + * sysctl-tuning infrastructure. + */ +static ctl_table dump_table[] = { + { .ctl_name = CTL_DUMP_LEVEL, + .procname = DUMP_LEVEL_NAME, + .data = &dump_config.level, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_doulonghex, }, + + { .ctl_name = CTL_DUMP_FLAGS, + .procname = DUMP_FLAGS_NAME, + .data = &dump_config.flags, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_doulonghex, }, + + { .ctl_name = CTL_DUMP_COMPRESS, + .procname = DUMP_COMPRESS_NAME, + .data = &dump_compress, /* FIXME */ + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, }, + + { .ctl_name = CTL_DUMP_DEVICE, + .procname = DUMP_DEVICE_NAME, + .mode = 0644, + .data = &dump_config.dump_device, /* FIXME */ + .maxlen = sizeof(int), + .proc_handler = proc_dump_device }, + +#ifdef CONFIG_CRASH_DUMP_MEMDEV + { .ctl_name = CTL_DUMP_ADDR, + .procname = DUMP_ADDR_NAME, + .mode = 0444, + .data = &dump_config.dump_addr, + .maxlen = sizeof(unsigned long), + .proc_handler = proc_doulonghex }, +#endif + + { 0, } +}; + +static ctl_table dump_root[] = { + { .ctl_name = KERN_DUMP, + .procname = "dump", + .mode = 0555, + .child = dump_table }, + { 0, } +}; + +static ctl_table kernel_root[] = { + { .ctl_name = CTL_KERN, + .procname = "kernel", + .mode = 0555, + .child = dump_root, }, + { 0, } +}; + +static struct ctl_table_header *sysctl_header; + +/* + * ----------------------------------------------------------------------- + * C O M P R E S S I O N F U N C T I O N S + * ----------------------------------------------------------------------- + */ + +/* + * Name: dump_compress_none() + * Func: Don't do any compression, period. + */ +static u16 +dump_compress_none(const u8 *old, u16 oldsize, u8 *new, u16 newsize) +{ + /* just return the old size */ + return oldsize; +} + + +/* + * Name: dump_execute() + * Func: Execute the dumping process. This makes sure all the appropriate + * fields are updated correctly, and calls dump_execute_memdump(), + * which does the real work. + */ +void +dump_execute(const char *panic_str, const struct pt_regs *regs) +{ + int state = -1; + unsigned long flags; + + /* make sure we can dump */ + if (!dump_okay) { + pr_info("LKCD not yet configured, can't take dump now\n"); + return; + } + + /* Exclude multiple dumps at the same time, + * and disable interrupts, some drivers may re-enable + * interrupts in with silence() + * + * Try and acquire spin lock. If successful, leave preempt + * and interrupts disabled. See spin_lock_irqsave in spinlock.h + */ + local_irq_save(flags); + if (!spin_trylock(&dump_lock)) { + local_irq_restore(flags); + pr_info("LKCD dump already in progress\n"); + return; + } + + /* Bring system into the strictest level of quiescing for min drift + * dump drivers can soften this as required in dev->ops->silence() + */ + dump_oncpu = smp_processor_id() + 1; + + state = dump_generic_execute(panic_str, regs); + + dump_oncpu = 0; + spin_unlock_irqrestore(&dump_lock, flags); + + if (state < 0) { + printk("Dump Incomplete or failed!\n"); + } else { + printk("Dump Complete; %d dump pages saved.\n", + dump_header.dh_num_dump_pages); + } +} + +/* + * Name: dump_register_compression() + * Func: Register a dump compression mechanism. + */ +void +dump_register_compression(struct __dump_compress *item) +{ + if (item) + list_add(&(item->list), &dump_compress_list); +} + +/* + * Name: dump_unregister_compression() + * Func: Remove a dump compression mechanism, and re-assign the dump + * compression pointer if necessary. + */ +void +dump_unregister_compression(int compression_type) +{ + struct list_head *tmp; + struct __dump_compress *dc; + + /* let's make sure our list is valid */ + if (compression_type != DUMP_COMPRESS_NONE) { + list_for_each(tmp, &dump_compress_list) { + dc = list_entry(tmp, struct __dump_compress, list); + if (dc->compress_type == compression_type) { + list_del(&(dc->list)); + break; + } + } + } +} + +/* + * Name: dump_compress_init() + * Func: Initialize (or re-initialize) compression scheme. + */ +static int +dump_compress_init(int compression_type) +{ + struct list_head *tmp; + struct __dump_compress *dc; + + /* try to remove the compression item */ + list_for_each(tmp, &dump_compress_list) { + dc = list_entry(tmp, struct __dump_compress, list); + if (dc->compress_type == compression_type) { + dump_config.dumper->compress = dc; + dump_compress = compression_type; + pr_debug("Dump Compress %s\n", dc->compress_name); + return 0; + } + } + + /* + * nothing on the list -- return ENODATA to indicate an error + * + * NB: + * EAGAIN: reports "Resource temporarily unavailable" which + * isn't very enlightening. + */ + printk("compression_type:%d not found\n", compression_type); + + return -ENODATA; +} + +static int +dumper_setup(unsigned long flags, unsigned long devid) +{ + int ret = 0; + + /* unconfigure old dumper if it exists */ + dump_okay = 0; + if (dump_config.dumper) { + pr_debug("Unconfiguring current dumper\n"); + dump_unconfigure(); + } + /* set up new dumper */ + if (dump_config.flags & DUMP_FLAGS_SOFTBOOT) { + printk("Configuring softboot based dump \n"); +#ifdef CONFIG_CRASH_DUMP_MEMDEV + dump_config.dumper = &dumper_stage1; +#else + printk("Requires CONFIG_CRASHDUMP_MEMDEV. Can't proceed.\n"); + return -1; +#endif + } else { + dump_config.dumper = &dumper_singlestage; + } + dump_config.dumper->dev = dump_dev; + + ret = dump_configure(devid); + if (!ret) { + dump_okay = 1; + pr_debug("%s dumper set up for dev 0x%lx\n", + dump_config.dumper->name, devid); + dump_config.dump_device = devid; + } else { + printk("%s dumper set up failed for dev 0x%lx\n", + dump_config.dumper->name, devid); + dump_config.dumper = NULL; + } + return ret; +} + +static int +dump_target_init(int target) +{ + char type[20]; + struct list_head *tmp; + struct dump_dev *dev; + + switch (target) { + case DUMP_FLAGS_DISKDUMP: + strcpy(type, "blockdev"); break; + case DUMP_FLAGS_NETDUMP: + strcpy(type, "networkdev"); break; + default: + return -1; + } + + /* + * This is a bit stupid, generating strings from flag + * and doing strcmp. This is done because 'struct dump_dev' + * has string 'type_name' and not interger 'type'. + */ + list_for_each(tmp, &dump_target_list) { + dev = list_entry(tmp, struct dump_dev, list); + if (strcmp(type, dev->type_name) == 0) { + dump_dev = dev; + return 0; + } + } + return -1; +} + +static int dump_open(struct inode *i, struct file *f) +{ + /* check capabilities */ + return capable(CAP_SYS_ADMIN) ? 0 :-EPERM; +} + +/* + * Name: dump_ioctl() + * Func: Allow all dump tunables through a standard ioctl() mechanism. + * This is far better than before, where we'd go through /proc, + * because now this will work for multiple OS and architectures. + */ +static int +dump_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg) +{ + if (!dump_config.dumper && cmd == DIOSDUMPCOMPRESS) + /* dump device must be configured first */ + return -ENODEV; + + /* + * This is the main mechanism for controlling get/set data + * for various dump device parameters. The real trick here + * is setting the dump device (DIOSDUMPDEV). That's what + * triggers everything else. + */ + switch (cmd) { + case DIOSDUMPDEV: /* set dump_device */ + pr_debug("Configuring dump device\n"); + if (!(f->f_flags & O_RDWR)) + return -EPERM; + + __dump_open(); + return dumper_setup(dump_config.flags, arg); + + + case DIOGDUMPDEV: /* get dump_device */ + return put_user((long)dump_config.dump_device, (long *)arg); + + case DIOSDUMPLEVEL: /* set dump_level */ + if (!(f->f_flags & O_RDWR)) + return -EPERM; + + /* make sure we have a positive value */ + if (arg < 0) + return -EINVAL; + + /* Fixme: clean this up */ + dump_config.level = 0; + switch ((int)arg) { + case DUMP_LEVEL_ALL: + case DUMP_LEVEL_ALL_RAM: + dump_config.level |= DUMP_MASK_UNUSED; + case DUMP_LEVEL_USED: + dump_config.level |= DUMP_MASK_USED; + case DUMP_LEVEL_KERN: + dump_config.level |= DUMP_MASK_KERN; + case DUMP_LEVEL_HEADER: + dump_config.level |= DUMP_MASK_HEADER; + case DUMP_LEVEL_NONE: + break; + default: + return (-EINVAL); + } + pr_debug("Dump Level 0x%lx\n", dump_config.level); + break; + + case DIOGDUMPLEVEL: /* get dump_level */ + /* fixme: handle conversion */ + return put_user((long)dump_config.level, (long *)arg); + + + case DIOSDUMPFLAGS: /* set dump_flags */ + /* check flags */ + if (!(f->f_flags & O_RDWR)) + return -EPERM; + + /* make sure we have a positive value */ + if (arg < 0) + return -EINVAL; + + if (dump_target_init(arg & DUMP_FLAGS_TARGETMASK) < 0) + return -EINVAL; /* return proper error */ + + dump_config.flags = arg; + + pr_debug("Dump Flags 0x%lx\n", dump_config.flags); + break; + + case DIOGDUMPFLAGS: /* get dump_flags */ + return put_user((long)dump_config.flags, (long *)arg); + + case DIOSDUMPCOMPRESS: /* set the dump_compress status */ + if (!(f->f_flags & O_RDWR)) + return -EPERM; + + return dump_compress_init((int)arg); + + case DIOGDUMPCOMPRESS: /* get the dump_compress status */ + return put_user((long)(dump_config.dumper ? + dump_config.dumper->compress->compress_type : 0), + (long *)arg); + + default: + /* + * these are network dump specific ioctls, let the + * module handle them. + */ + return dump_dev_ioctl(cmd, arg); + } + return 0; +} + +/* + * Handle special cases for dump_device + * changing dump device requires doing an opening the device + */ +static int +proc_dump_device(ctl_table *ctl, int write, struct file *f, + void *buffer, size_t *lenp) +{ + int *valp = ctl->data; + int oval = *valp; + int ret = -EPERM; + + /* same permission checks as ioctl */ + if (capable(CAP_SYS_ADMIN)) { + ret = proc_doulonghex(ctl, write, f, buffer, lenp); + if (ret == 0 && write && *valp != oval) { + /* need to restore old value to close properly */ + dump_config.dump_device = (dev_t) oval; + __dump_open(); + ret = dumper_setup(dump_config.flags, (dev_t) *valp); + } + } + + return ret; +} + +/* All for the want of a proc_do_xxx routine which prints values in hex */ +static int +proc_doulonghex(ctl_table *ctl, int write, struct file *f, + void *buffer, size_t *lenp) +{ +#define TMPBUFLEN 20 + unsigned long *i; + size_t len, left; + char buf[TMPBUFLEN]; + + if (!ctl->data || !ctl->maxlen || !*lenp || (f->f_pos)) { + *lenp = 0; + return 0; + } + + i = (unsigned long *) ctl->data; + left = *lenp; + + sprintf(buf, "0x%lx\n", (*i)); + len = strlen(buf); + if (len > left) + len = left; + if(copy_to_user(buffer, buf, len)) + return -EFAULT; + + left -= len; + *lenp -= left; + f->f_pos += *lenp; + return 0; +} + +/* + * ----------------------------------------------------------------------- + * I N I T F U N C T I O N S + * ----------------------------------------------------------------------- + */ + +/* + * These register and unregister routines are exported for modules + * to register their dump drivers (like block, net etc) + */ +int +dump_register_device(struct dump_dev *ddev) +{ + struct list_head *tmp; + struct dump_dev *dev; + + list_for_each(tmp, &dump_target_list) { + dev = list_entry(tmp, struct dump_dev, list); + if (strcmp(ddev->type_name, dev->type_name) == 0) { + printk("Target type %s already registered\n", + dev->type_name); + return -1; /* return proper error */ + } + } + list_add(&(ddev->list), &dump_target_list); + + return 0; +} + +void +dump_unregister_device(struct dump_dev *ddev) +{ + list_del(&(ddev->list)); + if (ddev != dump_dev) + return; + + dump_okay = 0; + + if (dump_config.dumper) + dump_unconfigure(); + + dump_config.flags &= ~DUMP_FLAGS_TARGETMASK; + dump_okay = 0; + dump_dev = NULL; + dump_config.dumper = NULL; +} + +static int panic_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ +#ifdef CONFIG_ARM + get_current_general_regs(&all_regs); + get_current_cp14_regs(&all_regs); + get_current_cp15_regs(&all_regs); + dump_execute((const char *)ptr, &all_regs); +#else + struct pt_regs regs; + + get_current_regs(®s); + dump_execute((const char *)ptr, ®s); +#endif + return 0; +} + +extern struct notifier_block *panic_notifier_list; +static int panic_event(struct notifier_block *, unsigned long, void *); +static struct notifier_block panic_block = { + .notifier_call = panic_event, +}; + +#ifdef CONFIG_MAGIC_SYSRQ +/* Sysrq handler */ +static void sysrq_handle_crashdump(int key, struct pt_regs *pt_regs, + struct tty_struct *tty) { + dump_execute("sysrq", pt_regs); +} + +static struct sysrq_key_op sysrq_crashdump_op = { + .handler = sysrq_handle_crashdump, + .help_msg = "Dump", + .action_msg = "Starting crash dump", +}; +#endif + +static inline void +dump_sysrq_register(void) +{ +#ifdef CONFIG_MAGIC_SYSRQ + __sysrq_lock_table(); + __sysrq_put_key_op(DUMP_SYSRQ_KEY, &sysrq_crashdump_op); + __sysrq_unlock_table(); +#endif +} + +static inline void +dump_sysrq_unregister(void) +{ +#ifdef CONFIG_MAGIC_SYSRQ + __sysrq_lock_table(); + if (__sysrq_get_key_op(DUMP_SYSRQ_KEY) == &sysrq_crashdump_op) + __sysrq_put_key_op(DUMP_SYSRQ_KEY, NULL); + __sysrq_unlock_table(); +#endif +} + +/* + * Name: dump_init() + * Func: Initialize the dump process. This will set up any architecture + * dependent code. The big key is we need the memory offsets before + * the page table is initialized, because the base memory offset + * is changed after paging_init() is called. + */ +static int __init +dump_init(void) +{ + struct sysinfo info; + int err; + + /* try to create our dump device */ + err = misc_register(&dump_miscdev); + if (err) { + printk("cannot register dump character device!\n"); + return err; + } + + __dump_init((u64)PAGE_OFFSET); + + /* set the dump_compression_list structure up */ + dump_register_compression(&dump_none_compression); + + /* grab the total memory size now (not if/when we crash) */ + si_meminfo(&info); + + /* set the memory size */ + dump_header.dh_memory_size = (u64)info.totalram; + + sysctl_header = register_sysctl_table(kernel_root, 0); + dump_sysrq_register(); + + notifier_chain_register(&panic_notifier_list, &panic_block); + dump_function_ptr = dump_execute; + + pr_info("Crash dump driver initialized.\n"); + return 0; +} + +static void __exit +dump_cleanup(void) +{ + dump_okay = 0; + + if (dump_config.dumper) + dump_unconfigure(); + + /* arch-specific cleanup routine */ + __dump_cleanup(); + + /* ignore errors while unregistering -- since can't do anything */ + unregister_sysctl_table(sysctl_header); + misc_deregister(&dump_miscdev); + dump_sysrq_unregister(); + notifier_chain_unregister(&panic_notifier_list, &panic_block); + dump_function_ptr = NULL; +} + +EXPORT_SYMBOL(dump_register_compression); +EXPORT_SYMBOL(dump_unregister_compression); +EXPORT_SYMBOL(dump_register_device); +EXPORT_SYMBOL(dump_unregister_device); +EXPORT_SYMBOL(dump_config); + +EXPORT_SYMBOL(__dump_irq_enable); +EXPORT_SYMBOL(__dump_irq_restore); + +MODULE_AUTHOR("Matt D. Robinson "); +MODULE_DESCRIPTION("Linux Kernel Crash Dump (LKCD) driver"); +MODULE_LICENSE("GPL"); + +module_init(dump_init); +module_exit(dump_cleanup); diff -Nru a/drivers/ide/Makefile b/drivers/ide/Makefile --- a/drivers/ide/Makefile Wed Oct 22 10:40:06 2003 +++ b/drivers/ide/Makefile Wed Oct 22 10:40:06 2003 @@ -8,7 +8,7 @@ # In the future, some of these should be built conditionally. # # First come modules that register themselves with the core -obj-$(CONFIG_BLK_DEV_IDEPCI) += pci/ +obj-$(CONFIG_BLK_DEV_IDE) += pci/ # Core IDE code - must come before legacy diff -Nru a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c --- a/drivers/ide/ide-disk.c Wed Oct 22 10:40:04 2003 +++ b/drivers/ide/ide-disk.c Wed Oct 22 10:40:04 2003 @@ -1371,6 +1371,7 @@ static int write_cache (ide_drive_t *drive, int arg) { ide_task_t args; + int err; if (!(drive->id->cfs_enable_2 & 0x3000)) return 1; @@ -1380,7 +1381,10 @@ SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; args.command_type = ide_cmd_type_parser(&args); - (void) ide_raw_taskfile(drive, &args, NULL); + + err = ide_raw_taskfile(drive, &args, NULL); + if (err) + return err; drive->wcache = arg; return 0; @@ -1688,6 +1692,12 @@ if (drive->id->cfs_enable_2 & 0x3000) write_cache(drive, (id->cfs_enable_2 & 0x3000)); + drive->wcache = drive->id->cfs_enable_1 & 0x20; + if (drive->wcache) { + printk("%s: write cache enabled\n", drive->name); + blk_queue_ordered(drive->queue, QUEUE_ORDERED_FLUSH); + } + #ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT if (drive->using_dma) HWIF(drive)->ide_dma_queued_on(drive); @@ -1755,7 +1765,7 @@ drive->wcache = 0; /* Cache enabled ? */ if (drive->id->csfo & 1) - drive->wcache = 1; + drive->wcache = 1; /* Cache command set available ? */ if (drive->id->cfs_enable_1 & (1<<5)) drive->wcache = 1; diff -Nru a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c --- a/drivers/ide/ide-io.c Wed Oct 22 10:40:09 2003 +++ b/drivers/ide/ide-io.c Wed Oct 22 10:40:09 2003 @@ -85,30 +85,63 @@ #endif /* DISK_RECOVERY_TIME */ } -/** - * ide_end_request - complete an IDE I/O - * @drive: IDE device for the I/O - * @uptodate: - * @nr_sectors: number of sectors completed - * - * This is our end_request wrapper function. We complete the I/O - * update random number input and dequeue the request, which if - * it was tagged may be out of order. +/* + * preempt pending requests, and store this cache flush for immediate + * execution */ - -int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) +static struct request *ide_queue_flush_cmd(ide_drive_t *drive, + struct request *rq, int post) { - struct request *rq; - unsigned long flags; - int ret = 1; + struct request *flush_rq = &HWGROUP(drive)->wrq; - spin_lock_irqsave(&ide_lock, flags); - rq = HWGROUP(drive)->rq; + /* + * write cache disabled, just return barrier write immediately + */ + if (!drive->wcache) + return rq; - BUG_ON(!(rq->flags & REQ_STARTED)); + /* + * if last rq issued was the post-flush, we can skip the pre-flush + */ + if (drive->last_rq_flush) { + rq->flags |= REQ_BAR_PREFLUSH; + return rq; + } - if (!nr_sectors) - nr_sectors = rq->hard_cur_sectors; + blkdev_dequeue_request(rq); + + memset(drive->special_buf, 0, sizeof(drive->special_buf)); + + ide_init_drive_cmd(flush_rq); + + flush_rq->flags = REQ_DRIVE_TASK; + flush_rq->buffer = drive->special_buf; + flush_rq->special = rq; + flush_rq->buffer[0] = WIN_FLUSH_CACHE; + flush_rq->nr_sectors = rq->nr_sectors; + + if (drive->id->cfs_enable_2 & 0x2400) + flush_rq->buffer[0] = WIN_FLUSH_CACHE_EXT; + + if (!post) { + drive->doing_barrier = 1; + flush_rq->flags |= REQ_BAR_PREFLUSH; + } else + flush_rq->flags |= REQ_BAR_POSTFLUSH; + + flush_rq->flags |= REQ_STARTED; + flush_rq->timeout = jiffies; + list_add(&flush_rq->queuelist, &drive->queue->queue_head); + HWGROUP(drive)->rq = NULL; + return flush_rq; +} + +static int __ide_end_request(ide_drive_t *drive, struct request *rq, + int uptodate, int nr_sectors) +{ + int ret = 1; + + BUG_ON(!(rq->flags & REQ_STARTED)); /* * if failfast is set on a request, override number of sectors and @@ -128,14 +161,55 @@ if (!end_that_request_first(rq, uptodate, nr_sectors)) { add_disk_randomness(rq->rq_disk); - if (!blk_rq_tagged(rq)) - blkdev_dequeue_request(rq); - else + + if (blk_rq_tagged(rq)) blk_queue_end_tag(drive->queue, rq); - HWGROUP(drive)->rq = NULL; + else if (!blk_barrier_rq(rq)) + blkdev_dequeue_request(rq); + end_that_request_last(rq); + HWGROUP(drive)->rq = NULL; ret = 0; } + + return ret; +} + +/** + * ide_end_request - complete an IDE I/O + * @drive: IDE device for the I/O + * @uptodate: + * @nr_sectors: number of sectors completed + * + * This is our end_request wrapper function. We complete the I/O + * update random number input and dequeue the request, which if + * it was tagged may be out of order. + */ + +int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) +{ + struct request *rq; + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&ide_lock, flags); + rq = HWGROUP(drive)->rq; + + if (!nr_sectors) + nr_sectors = rq->hard_cur_sectors; + + if (!blk_barrier_rq(rq)) + ret = __ide_end_request(drive, rq, uptodate, nr_sectors); + else { + struct request *flush_rq = &HWGROUP(drive)->wrq; + + flush_rq->nr_sectors -= nr_sectors; + if (!flush_rq->nr_sectors) { + ide_queue_flush_cmd(drive, rq, 1); + ret = 0; + } + } + spin_unlock_irqrestore(&ide_lock, flags); return ret; } @@ -171,6 +245,97 @@ spin_unlock_irqrestore(&ide_lock, flags); } +/* + * FIXME: probably move this somewhere else, name is bad too :) + */ +static sector_t ide_get_error_location(ide_drive_t *drive, char *args) +{ + u32 high, low; + u8 hcyl, lcyl, sect; + sector_t sector; + + high = 0; + hcyl = args[5]; + lcyl = args[4]; + sect = args[3]; + + if (drive->id->cfs_enable_2 & 0x2400) { + low = (hcyl << 16) | (lcyl << 8) | sect; + HWIF(drive)->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); + high = ide_read_24(drive); + } else { + u8 cur = HWIF(drive)->INB(IDE_SELECT_REG); + if (cur & 0x40) + low = (hcyl << 16) | (lcyl << 8) | sect; + else { + low = hcyl * drive->head * drive->sect; + low += lcyl * drive->sect; + low += sect - 1; + } + } + + sector = ((sector_t) high << 24) | low; + return sector; +} + +static void ide_complete_barrier(ide_drive_t *drive, struct request *rq, + int error) +{ + struct request *real_rq = rq->special; + int good_sectors, bad_sectors; + sector_t sector; + + if (!error) { + if (blk_barrier_postflush(rq)) { + /* + * this completes the barrier write + */ + __ide_end_request(drive, real_rq, 1, real_rq->hard_nr_sectors); + drive->doing_barrier = 0; + drive->last_rq_flush = 1; + } else { + /* + * just indicate that we did the pre flush + */ + real_rq->flags |= REQ_BAR_PREFLUSH; + __elv_add_request(drive->queue, real_rq, ELEVATOR_INSERT_FRONT, 0); + } + +#ifdef IDE_DUMP_FLUSH_TIMINGS + printk("%s: %sflush took %lu jiffies\n", drive->name, blk_barrier_postflush(rq) ? "post" : "pre", jiffies - rq->timeout); +#endif + + /* + * all is fine, return + */ + return; + } + + /* + * bummer, flush failed. if it was the pre-flush, fail the barrier. + * if it was the post-flush, complete the succesful part of the request + * and fail the rest + */ + good_sectors = 0; + if (blk_barrier_postflush(rq)) { + sector = ide_get_error_location(drive, rq->buffer); + + if ((sector >= real_rq->hard_sector) && + (sector < real_rq->hard_sector + real_rq->hard_nr_sectors)) + good_sectors = sector - real_rq->hard_sector; + } else + sector = real_rq->hard_sector; + + bad_sectors = real_rq->hard_nr_sectors - good_sectors; + if (good_sectors) + __ide_end_request(drive, real_rq, 1, good_sectors); + if (bad_sectors) + __ide_end_request(drive, real_rq, 0, bad_sectors); + + printk(KERN_ERR "%s: failed barrier write: sector=%Lx(good=%d/bad=%d)\n", drive->name, sector, good_sectors, bad_sectors); + blk_queue_ordered(drive->queue, QUEUE_ORDERED_NONE); +} + /** * ide_end_drive_cmd - end an explicit drive command * @drive: command @@ -260,6 +425,10 @@ spin_lock_irqsave(&ide_lock, flags); blkdev_dequeue_request(rq); + + if (blk_barrier_preflush(rq) || blk_barrier_postflush(rq)) + ide_complete_barrier(drive, rq, err); + HWGROUP(drive)->rq = NULL; end_that_request_last(rq); spin_unlock_irqrestore(&ide_lock, flags); @@ -752,6 +921,15 @@ repeat: best = NULL; drive = hwgroup->drive; + + /* + * drive is doing pre-flush, ordered write, post-flush sequence. even + * though that is 3 requests, it must be seen as a single transaction. + * we must not preempt this drive until that is complete + */ + if (drive->doing_barrier) + return drive; + do { if ((!drive->sleep || time_after_eq(jiffies, drive->sleep)) && !elv_queue_empty(drive->queue)) { @@ -919,6 +1097,15 @@ } /* + * if rq is a barrier write, issue pre cache flush if not + * already done + */ + if (blk_barrier_rq(rq) && !blk_barrier_preflush(rq)) + rq = ide_queue_flush_cmd(drive, rq, 0); + + drive->last_rq_flush = 0; + + /* * Sanity: don't accept a request that isn't a PM request * if we are currently power managed. This is very important as * blk_stop_queue() doesn't prevent the elv_next_request() @@ -937,6 +1124,10 @@ break; } + /* + * we can only queue read-write requests, so let the drive + * queue drain before continuing with this command. + */ if (!rq->bio && ata_pending_commands(drive)) break; @@ -1344,6 +1535,7 @@ { memset(rq, 0, sizeof(*rq)); rq->flags = REQ_DRIVE_CMD; + rq->ref_count = 1; } EXPORT_SYMBOL(ide_init_drive_cmd); diff -Nru a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c --- a/drivers/ide/pci/piix.c Wed Oct 22 10:40:01 2003 +++ b/drivers/ide/pci/piix.c Wed Oct 22 10:40:01 2003 @@ -797,7 +797,9 @@ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_11,PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15}, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16}, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_10,PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17}, +#ifndef CONFIG_SCSI_SATA { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18}, +#endif /* !CONFIG_SCSI_SATA */ { 0, }, }; diff -Nru a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c --- a/drivers/ide/pci/siimage.c Wed Oct 22 10:40:04 2003 +++ b/drivers/ide/pci/siimage.c Wed Oct 22 10:40:04 2003 @@ -35,13 +35,13 @@ #include "siimage.h" #if defined(DISPLAY_SIIMAGE_TIMINGS) && defined(CONFIG_PROC_FS) -#include #include static u8 siimage_proc = 0; #define SIIMAGE_MAX_DEVS 16 static struct pci_dev *siimage_devs[SIIMAGE_MAX_DEVS]; static int n_siimage_devs; +#endif /* defined(DISPLAY_SIIMAGE_TIMINGS) && defined(CONFIG_PROC_FS) */ /** * pdev_is_sata - check if device is SATA @@ -120,7 +120,8 @@ base |= drive->select.b.unit << drive->select.b.unit; return base; } - + +#if defined(DISPLAY_SIIMAGE_TIMINGS) && defined(CONFIG_PROC_FS) /** * print_siimage_get_info - print minimal proc information * @buf: buffer to write into (kernel space) diff -Nru a/drivers/input/input.c b/drivers/input/input.c --- a/drivers/input/input.c Wed Oct 22 10:40:07 2003 +++ b/drivers/input/input.c Wed Oct 22 10:40:07 2003 @@ -447,9 +447,10 @@ list_add_tail(&dev->node, &input_dev_list); list_for_each_entry(handler, &input_handler_list, node) - if ((id = input_match_device(handler->id_table, dev))) - if ((handle = handler->connect(handler, dev, id))) - input_link_handle(handle); + if (!handler->blacklist || !input_match_device(handler->blacklist, dev)) + if ((id = input_match_device(handler->id_table, dev))) + if ((handle = handler->connect(handler, dev, id))) + input_link_handle(handle); #ifdef CONFIG_HOTPLUG input_call_hotplug("add", dev); @@ -507,9 +508,10 @@ list_add_tail(&handler->node, &input_handler_list); list_for_each_entry(dev, &input_dev_list, node) - if ((id = input_match_device(handler->id_table, dev))) - if ((handle = handler->connect(handler, dev, id))) - input_link_handle(handle); + if (!handler->blacklist || !input_match_device(handler->blacklist, dev)) + if ((id = input_match_device(handler->id_table, dev))) + if ((handle = handler->connect(handler, dev, id))) + input_link_handle(handle); #ifdef CONFIG_PROC_FS input_devices_state++; diff -Nru a/drivers/input/joydev.c b/drivers/input/joydev.c --- a/drivers/input/joydev.c Wed Oct 22 10:40:06 2003 +++ b/drivers/input/joydev.c Wed Oct 22 10:40:06 2003 @@ -380,10 +380,6 @@ struct joydev *joydev; int i, j, t, minor; - /* Avoid tablets */ - if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_TOUCH, dev->keybit)) - return NULL; - for (minor = 0; minor < JOYDEV_MINORS && joydev_table[minor]; minor++); if (minor == JOYDEV_MINORS) { printk(KERN_ERR "joydev: no more free joydev devices\n"); @@ -464,6 +460,15 @@ joydev_free(joydev); } +static struct input_device_id joydev_blacklist[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT(EV_KEY) }, + .keybit = { [LONG(BTN_TOUCH)] = BIT(BTN_TOUCH) }, + }, /* Avoid itouchpads, touchscreens and tablets */ + { }, /* Terminating entry */ +}; + static struct input_device_id joydev_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, @@ -493,6 +498,7 @@ .minor = JOYDEV_MINOR_BASE, .name = "joydev", .id_table = joydev_ids, + .blacklist = joydev_blacklist, }; static int __init joydev_init(void) diff -Nru a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c --- a/drivers/input/mouse/logips2pp.c Wed Oct 22 10:40:08 2003 +++ b/drivers/input/mouse/logips2pp.c Wed Oct 22 10:40:08 2003 @@ -10,6 +10,7 @@ */ #include +#include #include "psmouse.h" #include "logips2pp.h" diff -Nru a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c --- a/drivers/input/mouse/psmouse-base.c Wed Oct 22 10:40:03 2003 +++ b/drivers/input/mouse/psmouse-base.c Wed Oct 22 10:40:03 2003 @@ -141,7 +141,8 @@ goto out; } - if (psmouse->pktcnt && time_after(jiffies, psmouse->last + HZ/2)) { + if (psmouse->state == PSMOUSE_ACTIVATED && + psmouse->pktcnt && time_after(jiffies, psmouse->last + HZ/2)) { printk(KERN_WARNING "psmouse.c: %s at %s lost synchronization, throwing %d bytes away.\n", psmouse->name, psmouse->phys, psmouse->pktcnt); psmouse->pktcnt = 0; @@ -276,24 +277,18 @@ return PSMOUSE_PS2; /* - * Try Synaptics TouchPad magic ID + * Try Synaptics TouchPad */ - - param[0] = 0; - psmouse_command(psmouse, param, PSMOUSE_CMD_SETRES); - psmouse_command(psmouse, param, PSMOUSE_CMD_SETRES); - psmouse_command(psmouse, param, PSMOUSE_CMD_SETRES); - psmouse_command(psmouse, param, PSMOUSE_CMD_SETRES); - psmouse_command(psmouse, param, PSMOUSE_CMD_GETINFO); - - if (param[1] == 0x47) { + if (synaptics_detect(psmouse) == 0) { psmouse->vendor = "Synaptics"; psmouse->name = "TouchPad"; - if (!synaptics_init(psmouse)) + +#if CONFIG_MOUSE_PS2_SYNAPTICS + if (synaptics_init(psmouse) == 0) return PSMOUSE_SYNAPTICS; - else - return PSMOUSE_PS2; - } +#endif + return PSMOUSE_PS2; + } /* * Try Genius NetMouse magic init. @@ -519,7 +514,18 @@ struct psmouse *psmouse = serio->private; psmouse->state = PSMOUSE_IGNORE; - synaptics_disconnect(psmouse); + + if (psmouse->ptport) { + if (psmouse->ptport->deactivate) + psmouse->ptport->deactivate(psmouse); + __serio_unregister_port(&psmouse->ptport->serio); /* we have serio_sem */ + kfree(psmouse->ptport); + psmouse->ptport = NULL; + } + + if (psmouse->disconnect) + psmouse->disconnect(psmouse); + input_unregister_device(&psmouse->dev); serio_close(serio); kfree(psmouse); @@ -532,20 +538,10 @@ static int psmouse_pm_callback(struct pm_dev *dev, pm_request_t request, void *data) { struct psmouse *psmouse = dev->data; - struct serio_dev *ser_dev = psmouse->serio->dev; - - synaptics_disconnect(psmouse); - - /* We need to reopen the serio port to reinitialize the i8042 controller */ - serio_close(psmouse->serio); - serio_open(psmouse->serio, ser_dev); - - /* Probe and re-initialize the mouse */ - psmouse_probe(psmouse); - psmouse_initialize(psmouse); - synaptics_pt_init(psmouse); - psmouse_activate(psmouse); + psmouse->state = PSMOUSE_IGNORE; + serio_reconnect(psmouse->serio); + return 0; } @@ -553,7 +549,6 @@ * psmouse_connect() is a callback from the serio module when * an unhandled serio port is found. */ - static void psmouse_connect(struct serio *serio, struct serio_dev *dev) { struct psmouse *psmouse; @@ -578,7 +573,6 @@ psmouse->dev.private = psmouse; serio->private = psmouse; - if (serio_open(serio, dev)) { kfree(psmouse); return; @@ -590,10 +584,12 @@ return; } - pmdev = pm_register(PM_SYS_DEV, PM_SYS_UNKNOWN, psmouse_pm_callback); - if (pmdev) { - psmouse->dev.pm_dev = pmdev; - pmdev->data = psmouse; + if (serio->type != SERIO_PS_PSTHRU) { + pmdev = pm_register(PM_SYS_DEV, PM_SYS_UNKNOWN, psmouse_pm_callback); + if (pmdev) { + psmouse->dev.pm_dev = pmdev; + pmdev->data = psmouse; + } } sprintf(psmouse->devname, "%s %s %s", @@ -614,14 +610,70 @@ psmouse_initialize(psmouse); - synaptics_pt_init(psmouse); + if (psmouse->ptport) { + printk(KERN_INFO "serio: %s port at %s\n", psmouse->ptport->serio.name, psmouse->phys); + __serio_register_port(&psmouse->ptport->serio); /* we have serio_sem */ + if (psmouse->ptport->activate) + psmouse->ptport->activate(psmouse); + } + + psmouse_activate(psmouse); +} + + +static int psmouse_reconnect(struct serio *serio) +{ + struct psmouse *psmouse = serio->private; + struct serio_dev *dev = serio->dev; + int old_type = psmouse->type; + + if (!dev) { + printk(KERN_DEBUG "psmouse: reconnect request, but serio is disconnected, ignoring...\n"); + return -1; + } + + /* We need to reopen the serio port to reinitialize the i8042 controller */ + serio_close(serio); + if (serio_open(serio, dev)) { + /* do a disconnect here as serio_open leaves dev as NULL so disconnect + * will not be called automatically later + */ + psmouse_disconnect(serio); + return -1; + } + + psmouse->state = PSMOUSE_NEW_DEVICE; + psmouse->type = psmouse->acking = psmouse->cmdcnt = psmouse->pktcnt = 0; + if (psmouse->reconnect) { + if (psmouse->reconnect(psmouse)) + return -1; + } else if (psmouse_probe(psmouse) != old_type) + return -1; + + /* ok, the device type (and capabilities) match the old one, + * we can continue using it, complete intialization + */ + psmouse->type = old_type; + psmouse_initialize(psmouse); + if (psmouse->ptport) { + if (psmouse_reconnect(&psmouse->ptport->serio)) { + __serio_unregister_port(&psmouse->ptport->serio); + __serio_register_port(&psmouse->ptport->serio); + if (psmouse->ptport->activate) + psmouse->ptport->activate(psmouse); + } + } + psmouse_activate(psmouse); + return 0; } + static struct serio_dev psmouse_dev = { .interrupt = psmouse_interrupt, .connect = psmouse_connect, + .reconnect = psmouse_reconnect, .disconnect = psmouse_disconnect, .cleanup = psmouse_cleanup, }; diff -Nru a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h --- a/drivers/input/mouse/psmouse.h Wed Oct 22 10:40:09 2003 +++ b/drivers/input/mouse/psmouse.h Wed Oct 22 10:40:09 2003 @@ -22,10 +22,20 @@ #define PSMOUSE_ACTIVATED 1 #define PSMOUSE_IGNORE 2 +struct psmouse; + +struct psmouse_ptport { + struct serio serio; + + void (*activate)(struct psmouse *parent); + void (*deactivate)(struct psmouse *parent); +}; + struct psmouse { void *private; struct input_dev dev; struct serio *serio; + struct psmouse_ptport *ptport; char *vendor; char *name; unsigned char cmdbuf[8]; @@ -41,6 +51,9 @@ char error; char devname[64]; char phys[32]; + + int (*reconnect)(struct psmouse *psmouse); + void (*disconnect)(struct psmouse *psmouse); }; #define PSMOUSE_PS2 1 diff -Nru a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c --- a/drivers/input/mouse/synaptics.c Wed Oct 22 10:40:04 2003 +++ b/drivers/input/mouse/synaptics.c Wed Oct 22 10:40:04 2003 @@ -2,7 +2,8 @@ * Synaptics TouchPad PS/2 mouse driver * * 2003 Dmitry Torokhov - * Added support for pass-through port + * Added support for pass-through port. Special thanks to Peter Berg Larsen + * for explaining various Synaptics quirks. * * 2003 Peter Osterlund * Ported to 2.5 input device infrastructure. @@ -194,9 +195,7 @@ static int synaptics_query_hardware(struct psmouse *psmouse) { - struct synaptics_data *priv = psmouse->private; int retries = 0; - int mode; while ((retries++ < 3) && synaptics_reset(psmouse)) printk(KERN_ERR "synaptics reset failed\n"); @@ -207,8 +206,15 @@ return -1; if (synaptics_capability(psmouse)) return -1; + + return 0; +} - mode = SYN_BIT_ABSOLUTE_MODE | SYN_BIT_HIGH_RATE; +static int synaptics_set_mode(struct psmouse *psmouse, int mode) +{ + struct synaptics_data *priv = psmouse->private; + + mode |= SYN_BIT_ABSOLUTE_MODE | SYN_BIT_HIGH_RATE; if (SYN_ID_MAJOR(priv->identity) >= 4) mode |= SYN_BIT_DISABLE_GESTURE; if (SYN_CAP_EXTENDED(priv->capabilities)) @@ -265,49 +271,38 @@ } } -int synaptics_pt_init(struct psmouse *psmouse) +static void synaptics_pt_activate(struct psmouse *psmouse) { - struct synaptics_data *priv = psmouse->private; - struct serio *port; - struct psmouse *child; + struct psmouse *child = psmouse->ptport->serio.private; + + /* adjust the touchpad to child's choice of protocol */ + if (child && child->type >= PSMOUSE_GENPS) { + if (synaptics_set_mode(psmouse, SYN_BIT_FOUR_BYTE_CLIENT)) + printk(KERN_INFO "synaptics: failed to enable 4-byte guest protocol\n"); + } +} - if (psmouse->type != PSMOUSE_SYNAPTICS) - return -1; - if (!SYN_CAP_EXTENDED(priv->capabilities)) - return -1; - if (!SYN_CAP_PASS_THROUGH(priv->capabilities)) - return -1; +static void synaptics_pt_create(struct psmouse *psmouse) +{ + struct psmouse_ptport *port; - priv->ptport = port = kmalloc(sizeof(struct serio), GFP_KERNEL); + psmouse->ptport = port = kmalloc(sizeof(struct psmouse_ptport), GFP_KERNEL); if (!port) { - printk(KERN_ERR "synaptics: not enough memory to allocate serio port\n"); - return -1; + printk(KERN_ERR "synaptics: not enough memory to allocate pass-through port\n"); + return; } - memset(port, 0, sizeof(struct serio)); - port->type = SERIO_PS_PSTHRU; - port->name = "Synaptics pass-through"; - port->phys = "synaptics-pt/serio0"; - port->write = synaptics_pt_write; - port->open = synaptics_pt_open; - port->close = synaptics_pt_close; - port->driver = psmouse; + memset(port, 0, sizeof(struct psmouse_ptport)); - printk(KERN_INFO "serio: %s port at %s\n", port->name, psmouse->phys); - serio_register_slave_port(port); + port->serio.type = SERIO_PS_PSTHRU; + port->serio.name = "Synaptics pass-through"; + port->serio.phys = "synaptics-pt/serio0"; + port->serio.write = synaptics_pt_write; + port->serio.open = synaptics_pt_open; + port->serio.close = synaptics_pt_close; + port->serio.driver = psmouse; - /* adjust the touchpad to child's choice of protocol */ - child = port->private; - if (child && child->type >= PSMOUSE_GENPS) { - if (synaptics_mode_cmd(psmouse, (SYN_BIT_ABSOLUTE_MODE | - SYN_BIT_HIGH_RATE | - SYN_BIT_DISABLE_GESTURE | - SYN_BIT_FOUR_BYTE_CLIENT | - SYN_BIT_W_MODE))) - printk(KERN_INFO "synaptics: failed to enable 4-byte guest protocol\n"); - } - - return 0; + port->activate = synaptics_pt_activate; } /***************************************************************************** @@ -371,27 +366,82 @@ clear_bit(REL_Y, dev->relbit); } +static void synaptics_disconnect(struct psmouse *psmouse) +{ + synaptics_mode_cmd(psmouse, 0); + kfree(psmouse->private); +} + +static int synaptics_reconnect(struct psmouse *psmouse) +{ + struct synaptics_data *priv = psmouse->private; + struct synaptics_data old_priv = *priv; + + if (synaptics_detect(psmouse)) + return -1; + + if (synaptics_query_hardware(psmouse)) { + printk(KERN_ERR "Unable to query Synaptics hardware.\n"); + return -1; + } + + if (old_priv.identity != priv->identity || + old_priv.model_id != priv->model_id || + old_priv.capabilities != priv->capabilities || + old_priv.ext_cap != priv->ext_cap) + return -1; + + if (synaptics_set_mode(psmouse, 0)) { + printk(KERN_ERR "Unable to initialize Synaptics hardware.\n"); + return -1; + } + + return 0; +} + +int synaptics_detect(struct psmouse *psmouse) +{ + unsigned char param[4]; + + param[0] = 0; + + psmouse_command(psmouse, param, PSMOUSE_CMD_SETRES); + psmouse_command(psmouse, param, PSMOUSE_CMD_SETRES); + psmouse_command(psmouse, param, PSMOUSE_CMD_SETRES); + psmouse_command(psmouse, param, PSMOUSE_CMD_SETRES); + psmouse_command(psmouse, param, PSMOUSE_CMD_GETINFO); + + return param[1] == 0x47 ? 0 : -1; +} + int synaptics_init(struct psmouse *psmouse) { struct synaptics_data *priv; -#ifndef CONFIG_MOUSE_PS2_SYNAPTICS - return -1; -#endif - psmouse->private = priv = kmalloc(sizeof(struct synaptics_data), GFP_KERNEL); if (!priv) return -1; memset(priv, 0, sizeof(struct synaptics_data)); if (synaptics_query_hardware(psmouse)) { - printk(KERN_ERR "Unable to query/initialize Synaptics hardware.\n"); + printk(KERN_ERR "Unable to query Synaptics hardware.\n"); + goto init_fail; + } + + if (synaptics_set_mode(psmouse, 0)) { + printk(KERN_ERR "Unable to initialize Synaptics hardware.\n"); goto init_fail; } + if (SYN_CAP_EXTENDED(priv->capabilities) && SYN_CAP_PASS_THROUGH(priv->capabilities)) + synaptics_pt_create(psmouse); + print_ident(priv); set_input_params(&psmouse->dev, priv); + psmouse->disconnect = synaptics_disconnect; + psmouse->reconnect = synaptics_reconnect; + return 0; init_fail: @@ -399,36 +449,13 @@ return -1; } -void synaptics_disconnect(struct psmouse *psmouse) -{ - struct synaptics_data *priv = psmouse->private; - - if (psmouse->type == PSMOUSE_SYNAPTICS && priv) { - synaptics_mode_cmd(psmouse, 0); - if (priv->ptport) { - serio_unregister_slave_port(priv->ptport); - kfree(priv->ptport); - } - kfree(priv); - } -} - /***************************************************************************** * Functions to interpret the absolute mode packets ****************************************************************************/ static void synaptics_parse_hw_state(unsigned char buf[], struct synaptics_data *priv, struct synaptics_hw_state *hw) { - hw->up = 0; - hw->down = 0; - hw->b0 = 0; - hw->b1 = 0; - hw->b2 = 0; - hw->b3 = 0; - hw->b4 = 0; - hw->b5 = 0; - hw->b6 = 0; - hw->b7 = 0; + memset(hw, 0, sizeof(struct synaptics_hw_state)); if (SYN_MODEL_NEWABS(priv->model_id)) { hw->x = (((buf[3] & 0x10) << 8) | @@ -570,64 +597,47 @@ input_sync(dev); } +static int synaptics_validate_byte(struct psmouse *psmouse) +{ + static unsigned char newabs_mask[] = { 0xC8, 0x00, 0x00, 0xC8, 0x00 }; + static unsigned char newabs_rslt[] = { 0x80, 0x00, 0x00, 0xC0, 0x00 }; + static unsigned char oldabs_mask[] = { 0xC0, 0x60, 0x00, 0xC0, 0x60 }; + static unsigned char oldabs_rslt[] = { 0xC0, 0x00, 0x00, 0x80, 0x00 }; + struct synaptics_data *priv = psmouse->private; + int idx = psmouse->pktcnt - 1; + + if (SYN_MODEL_NEWABS(priv->model_id)) + return (psmouse->packet[idx] & newabs_mask[idx]) == newabs_rslt[idx]; + else + return (psmouse->packet[idx] & oldabs_mask[idx]) == oldabs_rslt[idx]; +} + void synaptics_process_byte(struct psmouse *psmouse, struct pt_regs *regs) { struct input_dev *dev = &psmouse->dev; struct synaptics_data *priv = psmouse->private; - unsigned char data = psmouse->packet[psmouse->pktcnt - 1]; - int newabs = SYN_MODEL_NEWABS(priv->model_id); input_regs(dev, regs); - switch (psmouse->pktcnt) { - case 1: - if (newabs ? ((data & 0xC8) != 0x80) : ((data & 0xC0) != 0xC0)) { - printk(KERN_WARNING "Synaptics driver lost sync at 1st byte\n"); - goto bad_sync; - } - break; - case 2: - if (!newabs && ((data & 0x60) != 0x00)) { - printk(KERN_WARNING "Synaptics driver lost sync at 2nd byte\n"); - goto bad_sync; - } - break; - case 4: - if (newabs ? ((data & 0xC8) != 0xC0) : ((data & 0xC0) != 0x80)) { - printk(KERN_WARNING "Synaptics driver lost sync at 4th byte\n"); - goto bad_sync; - } - break; - case 5: - if (!newabs && ((data & 0x60) != 0x00)) { - printk(KERN_WARNING "Synaptics driver lost sync at 5th byte\n"); - goto bad_sync; - } - break; - default: - if (psmouse->pktcnt < 6) - break; /* Wait for full packet */ - + if (psmouse->pktcnt >= 6) { /* Full packet received */ if (priv->out_of_sync) { priv->out_of_sync = 0; printk(KERN_NOTICE "Synaptics driver resynced.\n"); } - if (priv->ptport && synaptics_is_pt_packet(psmouse->packet)) - synaptics_pass_pt_packet(priv->ptport, psmouse->packet); + if (psmouse->ptport && psmouse->ptport->serio.dev && synaptics_is_pt_packet(psmouse->packet)) + synaptics_pass_pt_packet(&psmouse->ptport->serio, psmouse->packet); else synaptics_process_packet(psmouse); - psmouse->pktcnt = 0; - break; - } - return; - bad_sync: - priv->out_of_sync++; - psmouse->pktcnt = 0; - if (psmouse_resetafter > 0 && priv->out_of_sync == psmouse_resetafter) { - psmouse->state = PSMOUSE_IGNORE; - serio_rescan(psmouse->serio); + } else if (psmouse->pktcnt && !synaptics_validate_byte(psmouse)) { + printk(KERN_WARNING "Synaptics driver lost sync at byte %d\n", psmouse->pktcnt); + psmouse->pktcnt = 0; + if (++priv->out_of_sync == psmouse_resetafter) { + psmouse->state = PSMOUSE_IGNORE; + printk(KERN_NOTICE "synaptics: issuing reconnect request\n"); + serio_reconnect(psmouse->serio); + } } } diff -Nru a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h --- a/drivers/input/mouse/synaptics.h Wed Oct 22 10:40:05 2003 +++ b/drivers/input/mouse/synaptics.h Wed Oct 22 10:40:05 2003 @@ -9,11 +9,9 @@ #ifndef _SYNAPTICS_H #define _SYNAPTICS_H - extern void synaptics_process_byte(struct psmouse *psmouse, struct pt_regs *regs); +extern int synaptics_detect(struct psmouse *psmouse); extern int synaptics_init(struct psmouse *psmouse); -extern int synaptics_pt_init(struct psmouse *psmouse); -extern void synaptics_disconnect(struct psmouse *psmouse); /* synaptics queries */ #define SYN_QUE_IDENTIFY 0x00 @@ -105,8 +103,6 @@ /* Data for normal processing */ unsigned int out_of_sync; /* # of packets out of sync */ int old_w; /* Previous w value */ - - struct serio *ptport; /* pass-through port */ }; #endif /* _SYNAPTICS_H */ diff -Nru a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c --- a/drivers/input/serio/serio.c Wed Oct 22 10:40:01 2003 +++ b/drivers/input/serio/serio.c Wed Oct 22 10:40:01 2003 @@ -49,14 +49,15 @@ EXPORT_SYMBOL(serio_interrupt); EXPORT_SYMBOL(serio_register_port); -EXPORT_SYMBOL(serio_register_slave_port); +EXPORT_SYMBOL(__serio_register_port); EXPORT_SYMBOL(serio_unregister_port); -EXPORT_SYMBOL(serio_unregister_slave_port); +EXPORT_SYMBOL(__serio_unregister_port); EXPORT_SYMBOL(serio_register_device); EXPORT_SYMBOL(serio_unregister_device); EXPORT_SYMBOL(serio_open); EXPORT_SYMBOL(serio_close); EXPORT_SYMBOL(serio_rescan); +EXPORT_SYMBOL(serio_reconnect); struct serio_event { int type; @@ -83,10 +84,20 @@ } #define SERIO_RESCAN 1 +#define SERIO_RECONNECT 2 static DECLARE_WAIT_QUEUE_HEAD(serio_wait); static DECLARE_COMPLETION(serio_exited); +static void serio_invalidate_pending_events(struct serio *serio) +{ + struct serio_event *event; + + list_for_each_entry(event, &serio_event_list, node) + if (event->serio == serio) + event->serio = NULL; +} + void serio_handle_events(void) { struct list_head *node, *next; @@ -95,17 +106,27 @@ list_for_each_safe(node, next, &serio_event_list) { event = container_of(node, struct serio_event, node); + down(&serio_sem); + if (event->serio == NULL) + goto event_done; + switch (event->type) { + case SERIO_RECONNECT : + if (event->serio->dev && event->serio->dev->reconnect) + if (event->serio->dev->reconnect(event->serio) == 0) + break; + /* reconnect failed - fall through to rescan */ + case SERIO_RESCAN : - down(&serio_sem); if (event->serio->dev && event->serio->dev->disconnect) event->serio->dev->disconnect(event->serio); serio_find_dev(event->serio); - up(&serio_sem); break; default: break; } +event_done: + up(&serio_sem); list_del_init(node); kfree(event); } @@ -130,18 +151,27 @@ complete_and_exit(&serio_exited, 0); } -void serio_rescan(struct serio *serio) +static void serio_queue_event(struct serio *serio, int event_type) { struct serio_event *event; - if (!(event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC))) - return; + if ((event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC))) { + event->type = event_type; + event->serio = serio; + + list_add_tail(&event->node, &serio_event_list); + wake_up(&serio_wait); + } +} - event->type = SERIO_RESCAN; - event->serio = serio; +void serio_rescan(struct serio *serio) +{ + serio_queue_event(serio, SERIO_RESCAN); +} - list_add_tail(&event->node, &serio_event_list); - wake_up(&serio_wait); +void serio_reconnect(struct serio *serio) +{ + serio_queue_event(serio, SERIO_RECONNECT); } irqreturn_t serio_interrupt(struct serio *serio, @@ -163,17 +193,16 @@ void serio_register_port(struct serio *serio) { down(&serio_sem); - list_add_tail(&serio->node, &serio_list); - serio_find_dev(serio); + __serio_register_port(serio); up(&serio_sem); } /* - * Same as serio_register_port but does not try to acquire serio_sem. - * Should be used when registering a serio from other input device's + * Should only be called directly if serio_sem has already been taken, + * for example when unregistering a serio from other input device's * connect() function. */ -void serio_register_slave_port(struct serio *serio) +void __serio_register_port(struct serio *serio) { list_add_tail(&serio->node, &serio_list); serio_find_dev(serio); @@ -182,19 +211,18 @@ void serio_unregister_port(struct serio *serio) { down(&serio_sem); - list_del_init(&serio->node); - if (serio->dev && serio->dev->disconnect) - serio->dev->disconnect(serio); + __serio_unregister_port(serio); up(&serio_sem); } /* - * Same as serio_unregister_port but does not try to acquire serio_sem. - * Should be used when unregistering a serio from other input device's + * Should only be called directly if serio_sem has already been taken, + * for example when unregistering a serio from other input device's * disconnect() function. */ -void serio_unregister_slave_port(struct serio *serio) +void __serio_unregister_port(struct serio *serio) { + serio_invalidate_pending_events(serio); list_del_init(&serio->node); if (serio->dev && serio->dev->disconnect) serio->dev->disconnect(serio); diff -Nru a/drivers/md/raid0.c b/drivers/md/raid0.c --- a/drivers/md/raid0.c Wed Oct 22 10:40:07 2003 +++ b/drivers/md/raid0.c Wed Oct 22 10:40:07 2003 @@ -332,7 +332,7 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio) { mddev_t *mddev = q->queuedata; - unsigned int sect_in_chunk, chunksize_bits, chunk_size; + unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects; raid0_conf_t *conf = mddev_to_conf(mddev); struct strip_zone *zone; mdk_rdev_t *tmp_dev; @@ -340,11 +340,12 @@ sector_t block, rsect; chunk_size = mddev->chunk_size >> 10; + chunk_sects = mddev->chunk_size >> 9; chunksize_bits = ffz(~chunk_size); block = bio->bi_sector >> 1; - if (unlikely(chunk_size < (block & (chunk_size - 1)) + (bio->bi_size >> 10))) { + if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) { struct bio_pair *bp; /* Sanity check -- queue functions should prevent this happening */ if (bio->bi_vcnt != 1 || @@ -353,7 +354,7 @@ /* This is a one page bio that upper layers * refuse to split for us, so we need to split it. */ - bp = bio_split(bio, bio_split_pool, (chunk_size - (block & (chunk_size - 1)))<<1 ); + bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); if (raid0_make_request(q, &bp->bio1)) generic_make_request(&bp->bio1); if (raid0_make_request(q, &bp->bio2)) diff -Nru a/drivers/md/raid1.c b/drivers/md/raid1.c --- a/drivers/md/raid1.c Wed Oct 22 10:40:07 2003 +++ b/drivers/md/raid1.c Wed Oct 22 10:40:07 2003 @@ -879,7 +879,7 @@ conf = mddev_to_conf(mddev); bio = r1_bio->master_bio; switch(r1_bio->cmd) { - case SPECIAL: + case WRITESYNC: sync_request_write(mddev, r1_bio); break; case READ: @@ -989,7 +989,7 @@ r1_bio->mddev = mddev; r1_bio->sector = sector_nr; - r1_bio->cmd = SPECIAL; + r1_bio->cmd = WRITESYNC; r1_bio->read_disk = disk; bio = r1_bio->master_bio; diff -Nru a/drivers/media/common/saa7146_i2c.c b/drivers/media/common/saa7146_i2c.c --- a/drivers/media/common/saa7146_i2c.c Wed Oct 22 10:40:09 2003 +++ b/drivers/media/common/saa7146_i2c.c Wed Oct 22 10:40:09 2003 @@ -409,11 +409,8 @@ if( NULL != i2c_adapter ) { memset(i2c_adapter,0,sizeof(struct i2c_adapter)); strcpy(i2c_adapter->name, dev->name); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) - i2c_adapter->data = dev; -#else i2c_set_adapdata(i2c_adapter,dev); -#endif + i2c_adapter->class = I2C_ADAP_CLASS_TV_ANALOG; i2c_adapter->algo = &saa7146_algo; i2c_adapter->algo_data = NULL; i2c_adapter->id = I2C_ALGO_SAA7146; diff -Nru a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c --- a/drivers/media/dvb/dvb-core/dvb_demux.c Wed Oct 22 10:40:03 2003 +++ b/drivers/media/dvb/dvb-core/dvb_demux.c Wed Oct 22 10:40:03 2003 @@ -105,7 +105,7 @@ { u32 crc; - crc = crc32_le(~0, data, length); + crc = crc32_be(~0, data, length); data[length] = (crc >> 24) & 0xff; data[length+1] = (crc >> 16) & 0xff; @@ -116,7 +116,7 @@ static u32 dvb_dmx_crc32 (struct dvb_demux_feed *f, const u8 *src, size_t len) { - return (f->feed.sec.crc_val = crc32_le (f->feed.sec.crc_val, src, len)); + return (f->feed.sec.crc_val = crc32_be (f->feed.sec.crc_val, src, len)); } diff -Nru a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c --- a/drivers/media/dvb/frontends/tda1004x.c Wed Oct 22 10:40:01 2003 +++ b/drivers/media/dvb/frontends/tda1004x.c Wed Oct 22 10:40:01 2003 @@ -44,12 +44,12 @@ #include "dvb_frontend.h" #include "dvb_functions.h" -#ifndef CONFIG_TDA1004X_MC_LOCATION -#define CONFIG_TDA1004X_MC_LOCATION "/etc/dvb/tda1004x.mc" +#ifndef DVB_TDA1004X_FIRMWARE_FILE +#define DVB_TDA1004X_FIRMWARE_FILE "/etc/dvb/tda1004x.mc" #endif static int tda1004x_debug = 0; -static char *tda1004x_firmware = CONFIG_TDA1004X_MC_LOCATION; +static char *tda1004x_firmware = DVB_TDA1004X_FIRMWARE_FILE; #define TDA10045H_ADDRESS 0x08 diff -Nru a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c --- a/drivers/media/dvb/ttpci/av7110.c Wed Oct 22 10:40:08 2003 +++ b/drivers/media/dvb/ttpci/av7110.c Wed Oct 22 10:40:08 2003 @@ -4498,6 +4498,30 @@ 0x1d, 0x00, 0x1e, 0x00, + 0x41, 0x77, + 0x42, 0x77, + 0x43, 0x77, + 0x44, 0x77, + 0x45, 0x77, + 0x46, 0x77, + 0x47, 0x77, + 0x48, 0x77, + 0x49, 0x77, + 0x4a, 0x77, + 0x4b, 0x77, + 0x4c, 0x77, + 0x4d, 0x77, + 0x4e, 0x77, + 0x4f, 0x77, + 0x50, 0x77, + 0x51, 0x77, + 0x52, 0x77, + 0x53, 0x77, + 0x54, 0x77, + 0x55, 0x77, + 0x56, 0x77, + 0x57, 0xff, + 0xff }; diff -Nru a/drivers/media/dvb/ttpci/av7110_firm.h b/drivers/media/dvb/ttpci/av7110_firm.h --- a/drivers/media/dvb/ttpci/av7110_firm.h Wed Oct 22 10:40:01 2003 +++ b/drivers/media/dvb/ttpci/av7110_firm.h Wed Oct 22 10:40:01 2003 @@ -1,7 +1,7 @@ #include -u8 Dpram [] __initdata = { +static u8 Dpram [] = { 0xe5, 0x9f, 0xf0, 0x1c, 0xe1, 0xb0, 0xf0, 0x0e, 0xe5, 0x9f, 0xf0, 0x18, 0xe2, 0x5e, 0xf0, 0x04, 0xe2, 0x5e, 0xf0, 0x08, 0xe1, 0xa0, 0x00, 0x00, @@ -41,7 +41,7 @@ }; -u8 Root [] __initdata = { +static u8 Root [] = { 0xb4, 0x90, 0x49, 0x18, 0x1c, 0x0b, 0x4a, 0x18, 0x1a, 0x50, 0x4f, 0x18, 0x1a, 0x79, 0x10, 0x8f, 0x21, 0x00, 0x2f, 0x00, 0xdd, 0x04, 0xcb, 0x10, diff -Nru a/drivers/media/video/bttv-driver.c b/drivers/media/video/bttv-driver.c --- a/drivers/media/video/bttv-driver.c Wed Oct 22 10:40:08 2003 +++ b/drivers/media/video/bttv-driver.c Wed Oct 22 10:40:09 2003 @@ -2818,6 +2818,7 @@ up(&fh->cap.lock); return POLLERR; } + fh->cap.read_buf->memory = V4L2_MEMORY_USERPTR; field = videobuf_next_field(&fh->cap); if (0 != fh->cap.ops->buf_prepare(file,fh->cap.read_buf,field)) { up(&fh->cap.lock); diff -Nru a/drivers/media/video/tda9840.c b/drivers/media/video/tda9840.c --- a/drivers/media/video/tda9840.c Wed Oct 22 10:40:08 2003 +++ b/drivers/media/video/tda9840.c Wed Oct 22 10:40:08 2003 @@ -196,6 +196,7 @@ printk("tda9840.o: not enough kernel memory.\n"); return -ENOMEM; } + memset(client, 0, sizeof(struct i2c_client)); /* fill client structure */ sprintf(client->name,"tda9840 (0x%02x)", address); @@ -258,9 +259,7 @@ } static struct i2c_driver driver = { -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,54) .owner = THIS_MODULE, -#endif .name = "tda9840 driver", .id = I2C_DRIVERID_TDA9840, .flags = I2C_DF_NOTIFY, diff -Nru a/drivers/media/video/tea6415c.c b/drivers/media/video/tea6415c.c --- a/drivers/media/video/tea6415c.c Wed Oct 22 10:40:08 2003 +++ b/drivers/media/video/tea6415c.c Wed Oct 22 10:40:08 2003 @@ -70,6 +70,7 @@ if (0 == client) { return -ENOMEM; } + memset(client, 0, sizeof(struct i2c_client)); /* fill client structure */ sprintf(client->name,"tea6415c (0x%02x)", address); @@ -207,9 +208,7 @@ } static struct i2c_driver driver = { -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,54) .owner = THIS_MODULE, -#endif .name = "tea6415c driver", .id = I2C_DRIVERID_TEA6415C, .flags = I2C_DF_NOTIFY, diff -Nru a/drivers/media/video/tea6420.c b/drivers/media/video/tea6420.c --- a/drivers/media/video/tea6420.c Wed Oct 22 10:40:06 2003 +++ b/drivers/media/video/tea6420.c Wed Oct 22 10:40:06 2003 @@ -110,7 +110,8 @@ if (0 == client) { return -ENOMEM; } - + memset(client, 0x0, sizeof(struct i2c_client)); + /* fill client structure */ sprintf(client->name,"tea6420 (0x%02x)", address); client->id = tea6420_id++; @@ -187,9 +188,7 @@ } static struct i2c_driver driver = { -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,54) .owner = THIS_MODULE, -#endif .name = "tea6420 driver", .id = I2C_DRIVERID_TEA6420, .flags = I2C_DF_NOTIFY, diff -Nru a/drivers/net/3c59x.c b/drivers/net/3c59x.c --- a/drivers/net/3c59x.c Wed Oct 22 10:40:01 2003 +++ b/drivers/net/3c59x.c Wed Oct 22 10:40:01 2003 @@ -900,6 +900,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void vortex_tx_timeout(struct net_device *dev); static void acpi_set_WOL(struct net_device *dev); +static void vorboom_poll(struct net_device *dev); static struct ethtool_ops vortex_ethtool_ops; /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ @@ -1450,6 +1451,9 @@ dev->set_multicast_list = set_rx_mode; dev->tx_timeout = vortex_tx_timeout; dev->watchdog_timeo = (watchdog * HZ) / 1000; +#ifdef HAVE_POLL_CONTROLLER + dev->poll_controller = &vorboom_poll; +#endif if (pdev) { vp->pm_state_valid = 1; pci_save_state(VORTEX_PCI(vp), vp->power_state); @@ -2439,6 +2443,29 @@ spin_unlock(&vp->lock); return IRQ_HANDLED; } + +#ifdef HAVE_POLL_CONTROLLER + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ + +static void vorboom_poll (struct net_device *dev) +{ + struct vortex_private *vp = (struct vortex_private *)dev->priv; + + disable_irq(dev->irq); + if (vp->full_bus_master_tx) + boomerang_interrupt(dev->irq, dev, 0); + else + vortex_interrupt(dev->irq, dev, 0); + enable_irq(dev->irq); +} + +#endif + static int vortex_rx(struct net_device *dev) { diff -Nru a/drivers/net/e100/e100_main.c b/drivers/net/e100/e100_main.c --- a/drivers/net/e100/e100_main.c Wed Oct 22 10:40:05 2003 +++ b/drivers/net/e100/e100_main.c Wed Oct 22 10:40:05 2003 @@ -539,6 +539,22 @@ readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */ } +#ifdef HAVE_POLL_CONTROLLER + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void +e100_poll(struct net_device *dev) +{ + disable_irq(dev->irq); + e100intr(dev->irq, dev, NULL); + enable_irq(dev->irq); +} +#endif + static int __devinit e100_found1(struct pci_dev *pcid, const struct pci_device_id *ent) { @@ -557,6 +573,9 @@ SET_MODULE_OWNER(dev); +#ifdef HAVE_POLL_CONTROLLER + dev->poll_controller = &e100_poll; +#endif if (first_time) { first_time = false; printk(KERN_NOTICE "%s - version %s\n", diff -Nru a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c --- a/drivers/net/e1000/e1000_main.c Wed Oct 22 10:40:04 2003 +++ b/drivers/net/e1000/e1000_main.c Wed Oct 22 10:40:04 2003 @@ -165,6 +165,7 @@ static inline void e1000_rx_checksum(struct e1000_adapter *adapter, struct e1000_rx_desc *rx_desc, struct sk_buff *skb); +static void e1000_Poll(struct net_device *dev); static void e1000_tx_timeout(struct net_device *dev); static void e1000_tx_timeout_task(struct net_device *dev); static void e1000_smartspeed(struct e1000_adapter *adapter); @@ -442,6 +443,9 @@ adapter->bd_number = cards_found; +#ifdef HAVE_POLL_CONTROLLER + netdev->poll_controller = &e1000_Poll; +#endif /* setup the private structure */ if((err = e1000_sw_init(adapter))) @@ -2105,6 +2109,15 @@ adapter->hw.get_link_status = 1; mod_timer(&adapter->watchdog_timer, jiffies); } + +#ifdef HAVE_POLL_CONTROLLER +static void e1000_Poll(struct net_device *dev) +{ + disable_irq(dev->irq); + e1000_intr(dev->irq, dev, NULL); + enable_irq(dev->irq); +} +#endif #ifdef CONFIG_E1000_NAPI if(netif_rx_schedule_prep(netdev)) { diff -Nru a/drivers/net/eepro100.c b/drivers/net/eepro100.c --- a/drivers/net/eepro100.c Wed Oct 22 10:40:08 2003 +++ b/drivers/net/eepro100.c Wed Oct 22 10:40:08 2003 @@ -543,6 +543,7 @@ static int speedo_rx(struct net_device *dev); static void speedo_tx_buffer_gc(struct net_device *dev); static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs); +static void poll_speedo (struct net_device *dev); static int speedo_close(struct net_device *dev); static struct net_device_stats *speedo_get_stats(struct net_device *dev); static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); @@ -885,6 +886,9 @@ dev->get_stats = &speedo_get_stats; dev->set_multicast_list = &set_rx_mode; dev->do_ioctl = &speedo_ioctl; +#ifdef HAVE_POLL_CONTROLLER + dev->poll_controller = &poll_speedo; +#endif if (register_netdevice(dev)) goto err_free_unlock; @@ -1674,6 +1678,23 @@ clear_bit(0, (void*)&sp->in_interrupt); return IRQ_RETVAL(handled); } + +#ifdef HAVE_POLL_CONTROLLER + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ + +static void poll_speedo (struct net_device *dev) +{ + disable_irq(dev->irq); + speedo_interrupt (dev->irq, dev, NULL); + enable_irq(dev->irq); +} + +#endif static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry) { diff -Nru a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig --- a/drivers/net/irda/Kconfig Wed Oct 22 10:40:08 2003 +++ b/drivers/net/irda/Kconfig Wed Oct 22 10:40:08 2003 @@ -223,6 +223,25 @@ Please note that the driver is still experimental. And of course, you will need both USB and IrDA support in your kernel... +config SIGMATEL_FIR + tristate "SigmaTel USB dongles (EXPERIMENTAL)" + depends on IRDA && USB && EXPERIMENTAL + ---help--- + Say Y here if you want to build support for the SigmaTel STIr4200 + USB IrDA FIR Dongle device driver. + + USB dongles based on the SigmaTel STIr4200 don't conform to the + IrDA-USB device class specification, and therefore need their + own specific driver. Those dongles support SIR and FIR (4Mbps) + speeds. On the other hand, those dongles tend to be less efficient + than a FIR chipset. + + To compile it as a module, choose M here: the module will be called + stir4200. + + Please note that the driver is still experimental. And of course, + you will need both USB and IrDA support in your kernel... + config NSC_FIR tristate "NSC PC87108/PC87338" depends on IRDA && ISA diff -Nru a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile --- a/drivers/net/irda/Makefile Wed Oct 22 10:40:05 2003 +++ b/drivers/net/irda/Makefile Wed Oct 22 10:40:05 2003 @@ -9,6 +9,7 @@ obj-$(CONFIG_IRPORT_SIR) += irport.o # FIR drivers obj-$(CONFIG_USB_IRDA) += irda-usb.o +obj-$(CONFIG_SIGMATEL_FIR) += stir4200.o obj-$(CONFIG_NSC_FIR) += nsc-ircc.o obj-$(CONFIG_WINBOND_FIR) += w83977af_ir.o obj-$(CONFIG_SA1100_FIR) += sa1100_ir.o diff -Nru a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/net/irda/stir4200.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,1959 @@ +/***************************************************************************** +* +* Filename: stir4200.c +* Version: 0.2 +* Description: IrDA-USB Driver +* Status: Experimental +* Author: Paul Stewart +* +* Copyright (C) 2000, Roman Weissgaerber +* Copyright (C) 2001, Dag Brattli +* Copyright (C) 2001, Jean Tourrilhes +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +* +*****************************************************************************/ + +/* +* This driver is based on usb-irda.c. The STIr4200 has bulk in and out +* endpoints just like usr-irda devices, but the data it sends and receives +* is raw; like irtty, it needs to call the wrap and unwrap functions to add +* and remove SOF/BOF and escape characters to/from the frame. It doesn't +* have an interrupt endpoint like the IrDA-USB devices. +*/ + +/*------------------------------------------------------------------*/ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stir4200.h" + +/*------------------------------------------------------------------*/ + +static int qos_mtt_bits = 0; +static int rx_sensitivity = 0; +static int tx_power = 0; + +/* These are the currently known IrDA USB dongles. Add new dongles here */ +static struct usb_device_id dongles[] = { + /* SigmaTel, Inc, STIr4200 IrDA/USB Bridge */ + { USB_DEVICE(0x066f, 0x4200), .driver_info = 0 }, + { }, /* The end */ +}; + +MODULE_DEVICE_TABLE(usb, dongles); + +/*------------------------------------------------------------------*/ + +static void stir_disconnect(struct usb_interface *intf); +static void stir_change_speed(struct stir_cb *self, int speed); +static int stir_hard_xmit(struct sk_buff *skb, struct net_device *dev); +static int stir_open(struct stir_cb *self); +static int stir_close(struct stir_cb *self); +static void stir_setup_receive_timer(struct stir_cb *self); +static void stir_write_bulk_callback(struct urb *urb, struct pt_regs *regs); +static void stir_change_speed_callback(struct urb *urb, struct pt_regs *regs); +static void stir_receive(struct urb *urb, struct pt_regs *regs); +static int stir_net_open(struct net_device *dev); +static int stir_net_close(struct net_device *dev); +static int stir_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static void stir_net_timeout(struct net_device *dev); +static struct net_device_stats *stir_net_get_stats(struct net_device *dev); + + +/************************ REGISTER OPERATIONS ************************/ + +static int stir_write_reg(struct stir_cb *self, unsigned short reg, + unsigned char value) +{ + struct usb_device *dev = self->usbdev; + int ret; + + if (reg >= 0x10) { + ERROR("%s(), Ignoring bogus register read request %d\n", + __FUNCTION__, reg); + return 0; + } + + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + STIR_REQ_WRITEREG_SINGLE, STIR_REQ_WRITEREG, + value, reg, self->ctrl_buf, 1, + MSECS_TO_JIFFIES(STIR_CTRL_TIMEOUT)); + + if (ret != 1) { + ERROR("%s(), cannot write register %d = 0x%x (%d)\n", + __FUNCTION__, reg, value, ret); + return 0; + } + + return ret; +} + +/************************ BUFFER ROUTINES ************************/ +static int stir_irda_init_iobuf(iobuff_t *io, int size) +{ + io->head = kmalloc(size, GFP_KERNEL); + if (io->head != NULL) { + io->truesize = size; + io->in_frame = FALSE; + io->state = OUTSIDE_FRAME; + io->data = io->head; + } + return io->head ? 0 : -ENOMEM; +} + +/*********************** FIR WRAPPER ROUTINES ***********************/ +/* + * The STIr4220 use a proprietary wrapping of data when communicating + * with the driver at FIR. It's very similar to the SIR wrapping, + * the only difference is the start/stop bytes and CRC. + * Those minor differences force us to use our own wrappers. + * Jean II + */ + +/*------------------------------------------------------------------*/ +/* + * Prepare a SIR IrDA frame for transmission to the USB dongle. We + * use the standard async_wrap_skb() code used with most of the + * serial-based IrDA modules, and prepend the header as required by + * the SigmaTel datasheet: a two byte 0x55 0xAA sequence and two + * little-endian length bytes. + */ +static inline int stir_wrap_sir_skb(struct sk_buff *skb, iobuff_t *buf) +{ + __u8 *ptr; + __u16 wraplen; + + ptr = buf->data = buf->head; + *ptr++ = 0x55; + *ptr++ = 0xAA; + + wraplen = async_wrap_skb(skb, buf->data + STIR_IRDA_HEADER, + buf->truesize - STIR_IRDA_HEADER); + + *ptr++ = wraplen & 0xff; + *ptr++ = (wraplen >> 8) & 0xff; + + buf->len = wraplen + STIR_IRDA_HEADER; + + return buf->len; +} + +/*------------------------------------------------------------------*/ +/* + * Prepare a FIR IrDA frame for transmission to the USB dongle. The + * FIR transmit frame is documented in the datasheet. It consists of + * a two byte 0x55 0xAA sequence, two little-endian length bytes, a + * sequence of exactly 16 XBOF bytes of 0x7E, two BOF bytes of 0x7E, + * then the data escaped as follows: + * + * 0x7D -> 0x7D 0x5D + * 0x7E -> 0x7D 0x5E + * 0x7F -> 0x7D 0x5F + * + * Then, 4 bytes of little endian (stuffed) FCS follow, then two + * trailing EOF bytes of 0x7E. + */ +static inline int stir_stuff_fir_byte(__u8 *buf, __u8 c) +{ + if (c == 0x7d || c == 0x7e || c == 0x7f) { + *buf++ = 0x7d; + *buf = c ^ 0x20; + return 2; + } + *buf = c; + return 1; +} + +static int stir_wrap_fir_skb(struct sk_buff *skb, iobuff_t *buf) +{ + __u8 *ptr; + __u8 *hdr; + __u32 fcs = ~(crc32_le(~0, skb->data, skb->len)); + __u16 wraplen; + int i; + + /* Size of header (2) + size (2) + preamble (16) + bofs (2) + + * stuffed fcs bytes (8) + eofs (2) + */ + if (buf->truesize < 32) + return 0; + + /* Header */ + ptr = buf->data = buf->head; + *ptr++ = 0x55; + *ptr++ = 0xAA; + + hdr = ptr; + ptr += 2; + + /* Preamble */ + for (i = 0; i < 16; i++) + *ptr++ = 0x7f; + + /* BOFs */ + *ptr++ = 0x7e; + *ptr++ = 0x7e; + + /* Address / Control / Information */ + for (i = 0; i < skb->len; i++) { + /* Must be able to fit possibly stuffed byte, 4 + * possibly stuffed FCS bytes, and 2 trailing EOF + * bytes. + */ + if (ptr - buf->head >= buf->truesize - 12) return 0; + ptr += stir_stuff_fir_byte(ptr, skb->data[i]); + } + + /* FCS */ + ptr += stir_stuff_fir_byte(ptr, fcs & 0xff); + ptr += stir_stuff_fir_byte(ptr, (fcs >> 8) & 0xff); + ptr += stir_stuff_fir_byte(ptr, (fcs >> 16) & 0xff); + ptr += stir_stuff_fir_byte(ptr, (fcs >> 24) & 0xff); + + /* EOF */ + *ptr++ = 0x7e; + *ptr++ = 0x7e; + + /* Total lenght, minus the header */ + wraplen = ptr - buf->head - STIR_IRDA_HEADER; + + /* Fill in header length */ + *hdr++ = wraplen & 0xff; + *hdr++ = (wraplen >> 8) & 0xff; + + buf->len = wraplen + STIR_IRDA_HEADER; + + return buf->len; +} + +/* + * Function async_bump (buf, len, stats) + * + * Got a frame, make a copy of it, and pass it up the stack! We can try + * to inline it since it's only called from state_inside_frame + */ +static inline void stir_fir_bump(struct net_device *dev, + struct net_device_stats *stats, + iobuff_t *rx_buff) +{ + struct sk_buff *newskb; + struct sk_buff *dataskb; + int docopy; + + /* Check if we need to copy the data to a new skb or not. + * If the driver doesn't use ZeroCopy Rx, we have to do it. + * With ZeroCopy Rx, the rx_buff already point to a valid + * skb. But, if the frame is small, it is more efficient to + * copy it to save memory (copy will be fast anyway - that's + * called Rx-copy-break). Jean II */ + docopy = ((rx_buff->skb == NULL) || + (rx_buff->len < IRDA_RX_COPY_THRESHOLD)); + + /* Allocate a new skb */ + newskb = dev_alloc_skb(docopy ? rx_buff->len + 1 : rx_buff->truesize); + if (!newskb) { + stats->rx_dropped++; + /* We could deliver the current skb if doing ZeroCopy Rx, + * but this would stall the Rx path. Better drop the + * packet... Jean II */ + return; + } + + /* Align IP header to 20 bytes (i.e. increase skb->data) + * Note this is only useful with IrLAN, as PPP has a variable + * header size (2 or 1 bytes) - Jean II */ + skb_reserve(newskb, 1); + + if(docopy) { + /* Copy data without CRC (lenght already checked) */ + memcpy(newskb->data, rx_buff->data, rx_buff->len - 4); + /* Deliver this skb */ + dataskb = newskb; + } else { + /* We are using ZeroCopy. Deliver old skb */ + dataskb = rx_buff->skb; + /* And hook the new skb to the rx_buff */ + rx_buff->skb = newskb; + rx_buff->head = newskb->data; /* NOT newskb->head */ + } + + /* Set proper length on skb (without CRC) */ + skb_put(dataskb, rx_buff->len - 4); + + /* Feed it to IrLAP layer */ + dataskb->dev = dev; + dataskb->mac.raw = dataskb->data; + dataskb->protocol = htons(ETH_P_IRDA); + + netif_rx(dataskb); + + stats->rx_packets++; + stats->rx_bytes += rx_buff->len - 4; + + /* Clean up rx_buff (redundant with async_unwrap_bof() ???) */ + rx_buff->data = rx_buff->head; + rx_buff->len = 0; +} + +/* + * Function async_unwrap_bof(dev, byte) + * + * Handle Beggining Of Frame character received within a frame + * + */ +static inline void +stir_unwrap_fir_bof(struct stir_cb *self, + iobuff_t *rx_buff, __u8 byte) +{ + /* Not supposed to happen... - Jean II */ + IRDA_DEBUG(0, "%s(), Received STIR-XBOF !\n", __FUNCTION__); +} + +/* + * Function async_unwrap_eof(dev, byte) + * + * Handle End Of Frame character received within a frame + * + */ +static inline void stir_unwrap_fir_eof(struct stir_cb *self, + iobuff_t *rx_buff, __u8 byte) +{ + __u32 fcs_frame; + __u32 fcs_calc; + + switch(rx_buff->state) { + + case BEGIN_FRAME: + case LINK_ESCAPE: + case INSIDE_FRAME: + default: + /* We receive multiple BOF/EOF */ + if(rx_buff->len == 0) + break; + + /* Note : in the case of BEGIN_FRAME and LINK_ESCAPE, + * the fcs will most likely not match and generate an + * error, as expected - Jean II */ + rx_buff->state = OUTSIDE_FRAME; + rx_buff->in_frame = FALSE; + + /* We can't inline the CRC calculation, as we have + * nowhere to store it in rx_buff... Jean II */ + if(rx_buff->len > 4) { + fcs_calc = ~(crc32_le(~0, rx_buff->data, + rx_buff->len - 4)); + fcs_frame = (rx_buff->data[rx_buff->len - 4] | + (rx_buff->data[rx_buff->len - 3] << 8) | + (rx_buff->data[rx_buff->len - 2] << 16) | + (rx_buff->data[rx_buff->len - 1] << 24)); + IRDA_DEBUG(0, "%s(), crc = 0x%X, crc = 0x%X, len = %d\n", + __FUNCTION__, fcs_calc, fcs_frame, rx_buff->len); + } else { + fcs_calc = 0; + fcs_frame = 1; + } + + /* You may see an abnormal number of CRC failures around + * there... This is due to a nice hardware bug in + * the STIr4200. Quite often, the hardware will + * pass us two or more packets in a URB without any + * BOF/EOF in between. The CRC is the one of the last + * packet, but we will treat those packets as a single + * packet, so it won't match. + * Of course, because there is no separators, there is + * no way we can correct this bug. + * Jean II */ + + /* Test FCS and signal success if the frame is good */ + if (fcs_calc == fcs_frame) { + /* Deliver frame */ + stir_fir_bump(self->netdev, &self->stats, rx_buff); + } else { + /* Wrong CRC, discard frame! */ + irda_device_set_media_busy(self->netdev, TRUE); + + IRDA_DEBUG(0, "%s(), crc error\n", + __FUNCTION__); + self->stats.rx_errors++; + self->stats.rx_crc_errors++; + } + + /* Fall through : We may receive only a single BOF/EOF */ + case OUTSIDE_FRAME: + /* BOF == EOF, so beware... */ + + /* Now receiving frame */ + rx_buff->state = BEGIN_FRAME; + rx_buff->in_frame = TRUE; + + /* Time to initialize receive buffer */ + rx_buff->data = rx_buff->head; + rx_buff->len = 0; + rx_buff->fcs = INIT_FCS; + break; + } +} + +/* + * Function async_unwrap_ce(dev, byte) + * + * Handle Character Escape character received within a frame + * + */ +static inline void stir_unwrap_fir_ce(struct stir_cb *self, + iobuff_t *rx_buff, __u8 byte) +{ + switch(rx_buff->state) { + case OUTSIDE_FRAME: + /* Activate carrier sense */ + irda_device_set_media_busy(self->netdev, TRUE); + break; + + case LINK_ESCAPE: + WARNING("%s: state not defined\n", __FUNCTION__); + break; + + case BEGIN_FRAME: + case INSIDE_FRAME: + default: + /* Stuffed byte comming */ + rx_buff->state = LINK_ESCAPE; + break; + } +} + +/* + * Function async_unwrap_other(dev, byte) + * + * Handle other characters received within a frame + * + */ +static inline void stir_unwrap_fir_other(struct stir_cb *self, + iobuff_t *rx_buff, __u8 byte) +{ + switch(rx_buff->state) { + /* This is on the critical path, case are ordered by + * probability (most frequent first) - Jean II */ + case INSIDE_FRAME: + /* Must be the next byte of the frame */ + if (rx_buff->len < rx_buff->truesize) { + rx_buff->data[rx_buff->len++] = byte; + } else { + IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", + __FUNCTION__); + rx_buff->state = OUTSIDE_FRAME; + } + break; + + case LINK_ESCAPE: + /* + * Stuffed char, complement bit 5 of byte + * following CE, IrLAP p.114 + */ + byte ^= IRDA_TRANS; + if (rx_buff->len < rx_buff->truesize) { + rx_buff->data[rx_buff->len++] = byte; + rx_buff->state = INSIDE_FRAME; + } else { + IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", + __FUNCTION__); + rx_buff->state = OUTSIDE_FRAME; + } + break; + + case OUTSIDE_FRAME: + /* Activate carrier sense */ + if(byte != XBOF) + irda_device_set_media_busy(self->netdev, TRUE); + break; + + case BEGIN_FRAME: + default: + rx_buff->data[rx_buff->len++] = byte; + rx_buff->state = INSIDE_FRAME; + break; + } +} + +/* + * Function stir_async_unwrap_fir_chars (dev, rx_buff, byte) + * + * Parse and de-stuff frame received from the IrDA-port + * + */ +void stir_async_fir_chars(struct stir_cb *self, + iobuff_t *rx_buff, + __u8 *bytes, int len) +{ + __u8 byte; + int i; + + /* Having the loop here is more efficient - Jean II */ + for (i = 0; i < len; i++) { + byte = bytes[i]; + switch(byte) { + case CE: + stir_unwrap_fir_ce(self, rx_buff, byte); + break; + case STIR_XBOF: + stir_unwrap_fir_bof(self, rx_buff, byte); + break; + case STIR_EOF: + stir_unwrap_fir_eof(self, rx_buff, byte); + break; + default: + stir_unwrap_fir_other(self, rx_buff, byte); + break; + } + } +} + +/************************ TRANSMIT ROUTINES ************************/ +/* + * Receive packets from the IrDA stack and send them on the USB pipe. + * Handle speed change, timeout and lot's of uglyness... + */ + + +/*------------------------------------------------------------------*/ +/* + * This function returns the bytes that should be programmed into the + * MODE and PDCLK registers, respectively, in order to get a desired + * transmit and receive bitrate. + */ +static void stir_get_speed_bytes(int speed, __u8 *bytes) +{ + switch (speed) { + case 2400: + bytes[0] = STIR_MODE_SIR | STIR_MODE_NRESET | STIR_MODE_2400; + bytes[1] = STIR_PDCLK_2400; + break; + default: + case 9600: + bytes[0] = STIR_MODE_SIR | STIR_MODE_NRESET; + bytes[1] = STIR_PDCLK_9600; + break; + case 19200: + bytes[0] = STIR_MODE_SIR | STIR_MODE_NRESET; + bytes[1] = STIR_PDCLK_19200; + break; + case 38400: + bytes[0] = STIR_MODE_SIR | STIR_MODE_NRESET; + bytes[1] = STIR_PDCLK_38400; + break; + case 57600: + bytes[0] = STIR_MODE_SIR | STIR_MODE_NRESET; + bytes[1] = STIR_PDCLK_57600; + break; + case 115200: + bytes[0] = STIR_MODE_SIR | STIR_MODE_NRESET; + bytes[1] = STIR_PDCLK_115200; + break; + case 4000000: + bytes[0] = STIR_MODE_FIR | STIR_MODE_NRESET | STIR_MODE_FFRSTEN; + bytes[1] = STIR_PDCLK_4000000; + break; + } +} + +/*------------------------------------------------------------------*/ +/* + * Send a command to change the speed of the dongle + * Need to be called with spinlock on. + */ +static void stir_change_speed(struct stir_cb *self, int speed) +{ + __u8 speed_bytes[2]; + IRDA_DEBUG(2, "%s(), speed=%d\n", __FUNCTION__, speed); + + if (speed == -1) return; + + IRDA_DEBUG(2, "%s(), changing speed to %d\n", __FUNCTION__, + speed); + self->speed = speed; + + stir_get_speed_bytes(speed, speed_bytes); + + stir_write_reg(self, STIR_REG_MODE, speed_bytes[0]); + stir_write_reg(self, STIR_REG_PDCLK, speed_bytes[1]); +} + +/*------------------------------------------------------------------*/ +/* + * Send a frame using the bulk endpoint of the STIr4200. We wrap the + * skb in the framing required by the skb and speed we're + * transmitting. + */ +static int stir_tx_submit(struct stir_cb *self, struct sk_buff *skb) +{ + struct urb *purb = self->tx_urb; + int res, mtt, txlen; + + if (purb->status != 0) { + WARNING("%s(), URB still in use!\n", __FUNCTION__); + return 1; + } + + if (self->speed == 4000000) { + txlen = stir_wrap_fir_skb(skb, &self->tx_buff); + } else { + txlen = stir_wrap_sir_skb(skb, &self->tx_buff); + } + + dev_kfree_skb(skb); + + usb_fill_bulk_urb(purb, self->usbdev, + usb_sndbulkpipe(self->usbdev, self->bulk_out_ep), + self->tx_buff.data, txlen, + stir_write_bulk_callback, self); + + purb->transfer_buffer_length = txlen; + /* Note : unlink *must* be Asynchronous because of the code in + * stir_net_timeout() -> call in irq - Jean II */ + purb->transfer_flags = URB_ASYNC_UNLINK; + + /* Timeout need to be shorter than NET watchdog timer */ + purb->timeout = MSECS_TO_JIFFIES(STIR_BULK_TIMEOUT); + purb->context = self; + + /* Generate min turn time. FIXME: can we do better than this? */ + /* Trying to a turnaround time at this level is trying to measure + * processor clock cycle with a wrist-watch, approximate at best... + * + * What we know is the last time we received a frame over USB. + * Due to latency over USB that depend on the USB load, we don't + * know when this frame was received over IrDA (a few ms before ?) + * Then, same story for our outgoing frame... + * + * Jean II */ + + mtt = irda_get_mtt(skb); + if (mtt) { + int diff; + int sdiff = 0; + struct timeval now; + + do_gettimeofday(&now); + diff = now.tv_usec - self->stamp.tv_usec; + /* Factor in USB delays -> Get rid of udelay() that + * would be lost in the noise - Jean II */ + diff += STIR_MIN_RTT; + if (diff < 0) { + diff += 1000000; + sdiff = -1; + } + + /* Check if the mtt is larger than the time we have + * already used by all the protocol processing + */ + if (now.tv_sec + sdiff == self->stamp.tv_usec && + mtt > diff) { + mtt -= diff; + if (mtt > 1000) + mdelay(mtt/1000); + else + udelay(mtt); + } + } + + /* Ask USB to send the packet */ + if ((res = usb_submit_urb(purb, GFP_ATOMIC))) { + WARNING("%s(), failed Tx URB\n", __FUNCTION__); + self->stats.tx_errors++; + /* Let USB recover : We will catch that in the watchdog */ + /*netif_start_queue(netdev);*/ + } else { + /* Increment packet stats */ + self->stats.tx_packets++; + self->stats.tx_bytes += skb->len; + } + + return 0; +} + +/*------------------------------------------------------------------*/ +/* + * This function changes the transmission/receive speed + * asynchronously. The STIr4200 documentation mentions a "Write + * Multiple Registers" call which would work quite nicely in this + * situation for updating both the MODE and PDCLK registers which + * happen to be adjacent to each other. Unfortunately, I haven't been + * able to get this call to work for me. Instead, I do a two URB + * requests in a row, setting each register. The second URB request + * is started when the first completes in + * stir_change_speed_callback(). + */ +static int stir_change_speed_async(struct stir_cb *self, int speed) +{ + struct urb *purb; + struct usb_ctrlrequest *dr; + __u8 *sbuf; + int status; + + if (speed == -1) return 0; + + sbuf = (__u8 *) self->ctrl_buf; + stir_get_speed_bytes(speed, sbuf); + + /* Fill first URB */ + purb = self->speed_urb[0]; + + if (purb->status != 0) { + WARNING("%s(), URB still in use!\n", __FUNCTION__); + return -EINVAL; + } + + dr = (void *) (sbuf + 2); + dr->bRequestType = STIR_REQ_WRITEREG; + dr->bRequest = STIR_REQ_WRITEREG_SINGLE; + dr->wValue = cpu_to_le16(sbuf[0]); + dr->wIndex = cpu_to_le16(STIR_REG_MODE); + dr->wLength = cpu_to_le16(0); + + usb_fill_control_urb(purb, self->usbdev, + usb_sndctrlpipe(self->usbdev, 0), + (unsigned char *) dr, sbuf, 0, + stir_change_speed_callback, self); + purb->transfer_buffer_length = 0; + + /* Fill second URB */ + purb = self->speed_urb[1]; + + if (purb->status != 0) { + WARNING("%s(), URB still in use!\n", __FUNCTION__); + return -EINVAL; + } + + dr = (dr + 1); + dr->bRequestType = STIR_REQ_WRITEREG; + dr->bRequest = STIR_REQ_WRITEREG_SINGLE; + dr->wValue = cpu_to_le16(sbuf[1]); + dr->wIndex = cpu_to_le16(STIR_REG_PDCLK); + dr->wLength = cpu_to_le16(0); + + usb_fill_control_urb(purb, self->usbdev, + usb_sndctrlpipe(self->usbdev, 0), + (unsigned char *) dr, sbuf, 0, + stir_change_speed_callback, self); + purb->transfer_buffer_length = 0; + + /* Submit first URB */ + status = usb_submit_urb(self->speed_urb[0], GFP_ATOMIC); + self->speed = speed; + + return status; +} + +/*------------------------------------------------------------------*/ +/* + * This callback is fired when a speed-change URB is complete. + */ +static void stir_change_speed_callback(struct urb *purb, struct pt_regs *regs) +{ + struct stir_cb *self = purb->context; + + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); + + /* We should always have a context */ + if ((!self->netopen) || (!self->present)) { + IRDA_DEBUG(0, "%s(), Network is gone...\n", __FUNCTION__); + return; + } + + IRDA_DEBUG(2, "%s(): Change-speed to %d: phase %d urb status %d\n", + __FUNCTION__, self->speed, purb == self->speed_urb[1], + purb->status); + + if (purb->status != 0) { + /* Wait for net timeout to sort this out */ + return; + } + + if (purb == self->speed_urb[0]) { + /* Phase one complete. Now perform phase 2 */ + usb_submit_urb(self->speed_urb[1], GFP_ATOMIC); + return; + } + + /* We're ready for more packets */ + netif_wake_queue(self->netdev); +} + +/*------------------------------------------------------------------*/ +/* + * This callback is fired when we expect a frame to have completed + * trasmission from the FIFO. We are now able to perform a + * speed-change without affecting untransmitted data. The callbacks + * from stir_change_speed_async() will take care of calling + * netif_wake_queue() when this process has completed. + */ +static void stir_update_speed_callback(unsigned long data) +{ + struct stir_cb *self = (struct stir_cb *) data; + + /* Find ourselves */ + ASSERT(self != NULL, return;); + + /* If the network is closed or the device gone, stop everything */ + if ((!self->netopen) || (!self->present)) { + IRDA_DEBUG(0, "%s(), Network is gone!\n", __FUNCTION__); + /* Don't re-submit the URB : will stall the Rx path */ + return; + } + + + IRDA_DEBUG(2, "%s(): Changing to speed %d now\n", __FUNCTION__, + self->new_speed); + + if (stir_change_speed_async(self, self->new_speed) == 0) + self->new_speed = -1; +} + +/*------------------------------------------------------------------*/ +/* + * Send an IrDA frame to the USB dongle (for transmission). If a + * speed change is required, handle this first. + */ +static int stir_hard_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct stir_cb *self = netdev->priv; + unsigned long flags; + s32 speed; + int err = 1; /* Failed */ + + netif_stop_queue(netdev); + + /* Protect us from USB callbacks, net watchdog and else. */ + spin_lock_irqsave(&self->lock, flags); + + /* Check if the device is still there. + * We need to check self->present under the spinlock because + * of stir_disconnect() is synchronous - Jean II */ + if (!self->present) { + IRDA_DEBUG(0, "%s(), Device is gone...\n", __FUNCTION__); + goto drop; + } + + IRDA_DEBUG(2, "%s(): Xmit: len: %d, speed: %d (%d)\n", + __FUNCTION__, skb->len, irda_get_next_speed(skb), + self->speed); + + /* Check if we need to change the speed */ + speed = irda_get_next_speed(skb); + if ((speed != self->speed) && (speed != -1)) { + if (skb->len == 0) { + /* Set the desired speed */ + if (stir_change_speed_async(self, speed)) { + ERROR("%s(), change_speed() returned error\n", + __FUNCTION__); + goto drop; + } + + /* We let the change_speed callback drive the + * rest of the sending, and eventual call of + * netif_wake_queue */ + netdev->trans_start = jiffies; + err = 0; /* No error */ + goto drop; + } else { + /* Wait until after the frame is transmitted to + * change speeds */ + self->new_speed = speed; + } + } + + if (stir_tx_submit(self, skb) == 0) { + netdev->trans_start = jiffies; + spin_unlock_irqrestore(&self->lock, flags); + return 0; + } + +drop: + /* Drop silently the skb and exit */ + dev_kfree_skb(skb); + spin_unlock_irqrestore(&self->lock, flags); + return err; +} + +/*------------------------------------------------------------------*/ +/* + * Note : this function will be called only for tx_urb... + */ +static void stir_write_bulk_callback(struct urb *purb, struct pt_regs *regs) +{ + unsigned long flags; + struct stir_cb *self = purb->context; + + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); + + /* We should always have a context */ + ASSERT(self != NULL, return;); + + /* Check for timeout and other USB nasties */ + if(purb->status != 0) { + /* I get a lot of -ECONNABORTED = -103 here - Jean II */ + IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __FUNCTION__, purb->status, purb->transfer_flags); + + /* Don't do anything here, that might confuse the USB layer, + * and we could go in recursion and blow the kernel stack... + * Instead, we will wait for stir_net_timeout(), the + * network layer watchdog, to fix the situation. + * Jean II */ + /* A reset of the dongle might be welcomed here - Jean II */ + return; + } + + /* urb is now available */ + //purb->status = 0; -> tested above + + /* Make sure we read self->present properly */ + spin_lock_irqsave(&self->lock, flags); + + /* If the network is closed, stop everything */ + if ((!self->netopen) || (!self->present)) { + IRDA_DEBUG(0, "%s(), Network is gone...\n", __FUNCTION__); + spin_unlock_irqrestore(&self->lock, flags); + return; + } + + if (self->new_speed != -1) { + /* We'll assume in this case that the FIFO was empty + * at the time of transmission, and the current URB + * has just now begun transmission. We need to wait + * until it has completed sending before we change + * speeds. I use STIR_MIN_SPEED_DELAY as a fudge + * factor to make sure we've delayed long enough for a + * UA response to an SNRM to get across in my setup. + * It is possible that this value may need to be more + * conservative (larger). + */ + struct timer_list *st = &self->speed_timer; + int wait_msecs + = purb->actual_length * 8 * 1000 / self->speed; + + if (wait_msecs < STIR_MIN_SPEED_DELAY) + wait_msecs = STIR_MIN_SPEED_DELAY; + + mod_timer(st, jiffies + MSECS_TO_JIFFIES(wait_msecs)); + } else { + /* Guess what, there is another bug ! If we send multiple + * packets per window, the first packet get corrupted. + * Probably we need an ugly timeout around here, or + * maybe check the hardware status or something. + * I just can't believe this ! + * Jean II */ + + /* Allow the stack to send more packets */ + netif_wake_queue(self->netdev); + } + + spin_unlock_irqrestore(&self->lock, flags); +} + +/*------------------------------------------------------------------*/ +/* + * Helper function for watchdog timer. Check out the status of an + * URB, and respond accordingly, updating device statistics. + */ +static inline int stir_check_urb(struct stir_cb *self, struct urb *purb, + const char *urb_name) +{ + if (purb->status != 0) { + IRDA_DEBUG(0, "%s: %s timed out, urb->status=%d, " + "urb->transfer_flags=0x%04X\n", + self->netdev->name, urb_name, purb->status, + purb->transfer_flags); + + /* Increase error count */ + self->stats.tx_errors++; + + switch (purb->status) { + case -EINPROGRESS: /* -115 */ + usb_unlink_urb(purb); + /* Note : above will *NOT* call netif_wake_queue() + * in completion handler, because purb->status will + * be -ENOENT. We will fix that at the next watchdog, + * leaving more time to USB to recover... + * Also, we are in interrupt, so we need to have + * USB_ASYNC_UNLINK to work properly... + * Jean II */ + break; + case -ECONNABORTED: /* -103 */ + case -ECONNRESET: /* -104 */ + case -ETIMEDOUT: /* -110 */ + case -ENOENT: /* -2 (urb unlinked by us) */ + default: /* ??? - Play safe */ + purb->status = 0; + netif_wake_queue(self->netdev); + break; + } + return 1; + } + return 0; +} + +/*------------------------------------------------------------------*/ +/* + * Watchdog timer from the network layer. + * After a predetermined timeout, if we don't give confirmation that + * the packet has been sent (i.e. no call to netif_wake_queue()), + * the network layer will call this function. + * Note that URB that we submit have also a timeout. When the URB timeout + * expire, the normal URB callback is called (write_bulk_callback()). + */ +static void stir_net_timeout(struct net_device *netdev) +{ + unsigned long flags; + struct stir_cb *self = netdev->priv; + int done = 0; /* If we have made any progress */ + + IRDA_DEBUG(0, "%s(), Network layer thinks we timed out!\n", __FUNCTION__); + + /* Protect us from USB callbacks, net Tx and else. */ + spin_lock_irqsave(&self->lock, flags); + + if (!self->present) { + WARNING("%s(), device not present!\n", __FUNCTION__); + netif_stop_queue(netdev); + spin_unlock_irqrestore(&self->lock, flags); + return; + } + + + done += stir_check_urb(self, self->tx_urb, "Transmit"); + done += stir_check_urb(self, self->speed_urb[0], "Speed 0"); + done += stir_check_urb(self, self->speed_urb[1], "Speed 1"); + + spin_unlock_irqrestore(&self->lock, flags); + + /* XXX Deal with speed urb in a similar manner */ + + /* Maybe we need a reset */ + /* Note : Some drivers seem to use a usb_set_interface() when they + * need to reset the hardware. Hum... + */ + + /* if(done == 0) */ +} + +/************************* RECEIVE ROUTINES *************************/ +/* + * Receive packets from the USB layer stack and pass them to the IrDA stack. + * Try to work around USB failures... + */ + +/* + * Note : + * Some of you may have noticed that most dongle have an interrupt in pipe + * that we don't use. Here is the little secret... + * When we hang a Rx URB on the bulk in pipe, it generates some USB traffic + * in every USB frame. This is unnecessary overhead. + * The interrupt in pipe will generate an event every time a packet is + * received. Reading an interrupt pipe adds minimal overhead, but has some + * latency (~1ms). + * If we are connected (speed != 9600), we want to minimise latency, so + * we just always hang the Rx URB and ignore the interrupt. + * If we are not connected (speed == 9600), there is usually no Rx traffic, + * and we want to minimise the USB overhead. In this case we should wait + * on the interrupt pipe and hang the Rx URB only when an interrupt is + * received. + * Jean II + */ + +/*------------------------------------------------------------------*/ +/* + * Submit a Rx URB to the USB layer to handle reception of a frame + * Mostly called by the completion callback of the previous URB. + * + * Jean II + */ +static void stir_rx_submit(struct stir_cb *self, struct urb *purb) +{ + int ret; + + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); + + /* Check that we have an urb */ + if (!purb) { + WARNING("%s(), Bug : purb == NULL\n", __FUNCTION__); + return; + } + + /* Reinitialize URB */ + usb_fill_bulk_urb(purb, self->usbdev, + usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep), + self->rxdata, STIR_IRDA_RXFRAME_LEN, + stir_receive, self); + purb->transfer_buffer_length = STIR_IRDA_RXFRAME_LEN; + purb->status = 0; + + ret = usb_submit_urb(purb, GFP_ATOMIC); + if (ret) { + /* If this ever happen, we are in deep s***. + * Basically, the Rx path will stop... */ + WARNING("%s(), Failed to submit Rx URB %d\n", __FUNCTION__, ret); + } +} + + +/*------------------------------------------------------------------*/ +/* + * Function stir_delayed_rx_submit(unsigned long data) + * + * Called by the kernel timer subsystem to restart receiving after + * a delay (because the RX FIFO was empty) + * + */ +static void stir_delayed_rx_submit(unsigned long data) +{ + struct stir_cb *self = (struct stir_cb *) data; + int i; + + /* Find ourselves */ + ASSERT(self != NULL, return;); + + /* If the network is closed or the device gone, stop everything */ + if ((!self->netopen) || (!self->present)) { + IRDA_DEBUG(0, "%s(), Network is gone!\n", __FUNCTION__); + /* Don't re-submit the URB : will stall the Rx path */ + return; + } + + self->idle_rx_urb = self->rx_urb[STIR_MAX_ACTIVE_RX_URBS]; + self->idle_rx_urb->context = NULL; + + for (i = 0; i < STIR_MAX_ACTIVE_RX_URBS; i++) + stir_rx_submit(self, self->rx_urb[i]); + +} + +/*------------------------------------------------------------------*/ +/* + * Setup the receive timer. This function is called every time a + * zero-length frame is received from the bulk endpoint. With the + * SigmaTel, if the FIFO is empty a zero-length buffer is received + * immediately. If we immediately resubmitted the URB, we'd spend a + * lot of system time setting up and clearing URBs. Instead, this + * function sets up a period of moderate attentiveness, followed by a + * fairly inattentive state where URBs are submitted only often enough + * so a FIFO overrun is unlikely. The do_reset parameter allows the + * transmit code to reset the timer so that a response to a + * transmitted packet can be received quickly. + */ +static void stir_setup_receive_timer(struct stir_cb *self) +{ + struct timer_list *st = &self->submit_rx_timer; + unsigned long expires; + + if (self->idle_periods < STIR_IDLE_PERIODS) { + self->idle_periods++; + expires = jiffies + MSECS_TO_JIFFIES(STIR_IDLE_TIMEOUT); + } else { + if (self->speed <= 0 || + self->speed > STIR_FIFO_SIZE * 8) { + expires = jiffies + 2 * HZ; + } else { + expires = jiffies + + STIR_FIFO_SIZE * 8 * HZ / self->speed; + } + } + + mod_timer(st, jiffies); +} + +/*------------------------------------------------------------------*/ +/* + * Function stir_receive(purb) + * + * Called by the USB subsystem when a frame has been received + * + */ +static void stir_receive(struct urb *purb, struct pt_regs *regs) +{ + struct stir_cb *self = (struct stir_cb *) purb->context; + int i; + + IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, purb->actual_length); + + /* Find ourselves */ + ASSERT(self != NULL, return;); + + /* If the network is closed or the device gone, stop everything */ + if ((!self->netopen) || (!self->present)) { + IRDA_DEBUG(0, "%s(), Network is gone!\n", __FUNCTION__); + /* Don't re-submit the URB : will stall the Rx path */ + return; + } + + /* Check the status */ + if(purb->status != 0) { + switch (purb->status) { + case -ECONNRESET: /* -104 */ + IRDA_DEBUG(0, "%s(), Connection Reset (-104), " + "transfer_flags 0x%04X \n", + __FUNCTION__, purb->transfer_flags); + /* uhci_cleanup_unlink() is going to kill the Rx + * URB just after we return. No problem, at this + * point the URB will be idle ;-) - Jean II */ + break; + default: + IRDA_DEBUG(0, "%s(), RX status %d, " + "transfer_flags 0x%04X \n", __FUNCTION__, + purb->status, purb->transfer_flags); + break; + } + purb->actual_length = 0; /* Treat it like an empty frame */ + } + + /* If we received data in this last urb, submit another right + * away. Otherwise, we perform the submit on a timer, so as + * not to flood the device with idle requests. We should + * scale this delay by the speed at which we expec the FIFO to + * have filled up. + */ + if (purb->actual_length <= 0) { +#if 1 + stir_setup_receive_timer(self); +#else + /* The code above may not work properly for two reason : + * First, the minimum timer granularity we can have is 10ms, + * which is 3 or 4 turnarounds or small packets, so it will + * show up in term of latency performance. + * Second, the hardware FIR Rx FIFO is busted, and will + * aggregate Rx packets without properly adding a separator + * (BOF/EOF) in between, and our Rx code won't be able + * to properly decapsulate that. + * The hardware interface is c**p, and there is only so + * much we can do to workaround that... + * Jean II */ + /* Actually, my test at FIR show that this made absolutely + * no difference. This is going to be very ugly. Jean II */ + + /* Submit the idle URB to replace the URB we've just received */ + stir_rx_submit(self, self->idle_rx_urb); + + /* Recycle Rx URB : Now, the idle URB is the present one */ + purb->context = NULL; + self->idle_rx_urb = purb; +#endif + return; + } + + self->idle_periods = 0; + + /* + * Remember the time we received this frame, so we can + * reduce the min turn time a bit since we will know + * how much time we have used for protocol processing + */ + do_gettimeofday(&self->stamp); + + if (self->speed == 4000000) { + stir_async_fir_chars(self, &self->rx_buff, + self->rxdata, purb->actual_length); + } else { + for (i = 0; i < purb->actual_length; i++) { + async_unwrap_char(self->netdev, &self->stats, + &self->rx_buff, self->rxdata[i]); + } + } + + /* Note : at this point, the URB we've just received (purb) + * is still referenced by the USB layer. For example, if we + * have received a -ECONNRESET, uhci_cleanup_unlink() will + * continue to process it (in fact, cleaning it up). + * If we were to submit this URB, disaster would ensue. + * Therefore, we submit our idle URB, and put this URB in our + * idle slot.... + * Jean II */ + /* Note : with this scheme, we could submit the idle URB before + * processing the Rx URB. Another time... Jean II */ + + /* Submit the idle URB to replace the URB we've just received */ + stir_rx_submit(self, self->idle_rx_urb); + + /* Recycle Rx URB : Now, the idle URB is the present one */ + purb->context = NULL; + self->idle_rx_urb = purb; +} + + +/********************** IRDA DEVICE CALLBACKS **********************/ +/* + * Main calls from the IrDA/Network subsystem. + * Mostly registering a new irda-usb device and removing it.... + * We only deal with the IrDA side of the business, the USB side will + * be dealt with below... + */ + +/*------------------------------------------------------------------*/ +/* + * Function stir_net_open (dev) + * + * Network device is taken up. Usually this is done by "ifconfig irda0 up" + * + * Note : don't mess with self->netopen - Jean II + */ +static int stir_net_open(struct net_device *netdev) +{ + struct stir_cb *self = (struct stir_cb *) netdev->priv; + char hwname[16]; + int i; + + IRDA_DEBUG(1, "%s()\n", __FUNCTION__); + + /* Initialize internal values */ + self->new_speed = -1; + + /* To do *before* submitting Rx urbs and starting net Tx queue + * Jean II */ + self->netopen = 1; + + /* + * Now that everything should be initialized properly, + * Open new IrLAP layer instance to take care of us... + * Note : will send immediately a speed change... + */ + sprintf(hwname, "usb#%d", self->usbdev->devnum); + self->irlap = irlap_open(netdev, &self->qos, hwname); + if (!self->irlap) + return -ENOMEM; + + /* Allow IrLAP to send data to us */ + netif_start_queue(netdev); + + /* We submit all the Rx URB except for one that we keep idle. + * Need to be initialised before submitting other USBs, because + * in some cases as soon as we submit the URBs the USB layer + * will trigger a dummy receive - Jean II */ + self->idle_rx_urb = self->rx_urb[STIR_MAX_ACTIVE_RX_URBS]; + self->idle_rx_urb->context = NULL; + + /* Now that we can pass data to IrLAP, allow the USB layer + * to send us some data... */ + for (i = 0; i < STIR_MAX_ACTIVE_RX_URBS; i++) + stir_rx_submit(self, self->rx_urb[i]); + + /* Ready to play !!! */ + return 0; +} + +/*------------------------------------------------------------------*/ +/* + * Function stir_net_close (self) + * + * Network device is taken down. Usually this is done by + * "ifconfig irda0 down" + */ +static int stir_net_close(struct net_device *netdev) +{ + struct stir_cb *self; + int i; + + IRDA_DEBUG(1, "%s()\n", __FUNCTION__); + + self = (struct stir_cb *) netdev->priv; + + /* Clear this flag *before* unlinking the urbs and *before* + * stopping the network Tx queue - Jean II */ + self->netopen = 0; + + /* Stop network Tx queue */ + netif_stop_queue(netdev); + + /* Deallocate all the Rx path buffers (URBs and skb) */ + for (i = 0; i < STIR_MAX_RX_URBS; i++) { + struct urb *purb = self->rx_urb[i]; + /* Cancel the receive command */ + usb_unlink_urb(purb); + } + /* Cancel Tx and speed URB - need to be synchronous to avoid races */ + self->tx_urb->transfer_flags &= ~URB_ASYNC_UNLINK; + usb_unlink_urb(self->tx_urb); + self->speed_urb[0]->transfer_flags &= ~URB_ASYNC_UNLINK; + usb_unlink_urb(self->speed_urb[0]); + self->speed_urb[1]->transfer_flags &= ~URB_ASYNC_UNLINK; + usb_unlink_urb(self->speed_urb[1]); + + /* Stop and remove instance of IrLAP */ + if (self->irlap) + irlap_close(self->irlap); + self->irlap = NULL; + + return 0; +} + +/*------------------------------------------------------------------*/ +/* + * IOCTLs : Extra out-of-band network commands... + */ +static int stir_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + unsigned long flags; + struct if_irda_req *irq = (struct if_irda_req *) rq; + struct stir_cb *self; + int ret = 0; + + self = dev->priv; + + IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd); + + switch (cmd) { + case SIOCSBANDWIDTH: /* Set bandwidth */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + /* Protect us from USB callbacks, net watchdog and else. */ + spin_lock_irqsave(&self->lock, flags); + /* Check if the device is still there */ + if(self->present) { + /* Set the desired speed */ + stir_change_speed(self, irq->ifr_baudrate); + } + spin_unlock_irqrestore(&self->lock, flags); + break; + case SIOCSMEDIABUSY: /* Set media busy */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + /* Check if the IrDA stack is still there */ + if(self->netopen) + irda_device_set_media_busy(self->netdev, TRUE); + break; + case SIOCGRECEIVING: /* Check if we are receiving right now */ + + /* Note : because of the way UHCI works, it's almost impossible + * to get this info. The Controller DMA directly to memory and + * signal only when the whole frame is finished. To know if the + * first TD of the URB has been filled or not seems hard work... + * + * The other solution would be to use the "receiving" command + * on the default decriptor with a usb_control_msg(), but that + * would add USB traffic and would return result only in the + * next USB frame (~1ms). + * + * I've been told that current dongles send status info on + * their interrupt endpoint, and that's what the Windows + * driver uses to know this info. + * Unfortunately, this is not yet in the spec... + * + * Jean II + */ + + irq->ifr_receiving = 0; + break; + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +/*------------------------------------------------------------------*/ +/* + * Get device stats (for /proc/net/dev and ifconfig) + */ +static struct net_device_stats *stir_net_get_stats(struct net_device *dev) +{ + struct stir_cb *self = dev->priv; + return &self->stats; +} + +/********************* IRDA CONFIG SUBROUTINES *********************/ +/* + * Various subroutines dealing with IrDA and network stuff we use to + * configure and initialise each irda-usb instance. + * These functions are used below in the main calls of the driver... + */ + +/*------------------------------------------------------------------*/ +/* + * Set proper values in the IrDA QOS structure + */ +static void stir_init_qos(struct stir_cb *self) +{ + IRDA_DEBUG(3, "%s()\n", __FUNCTION__); + + /* Initialize QoS for this device */ + irda_init_max_qos_capabilies(&self->qos); + + /* That's the Rx capability. */ + self->qos.baud_rate.bits &= IR_2400 | IR_9600 | IR_19200 | + IR_38400 | IR_57600 | IR_115200 | + (IR_4000000 << 8); + self->qos.min_turn_time.bits &= 0x07; /* >= 1ms turnaround */ + self->qos.additional_bofs.bits &= 0xff; /* Any additional BOFs */ + self->qos.window_size.bits &= 0x7f; /* Up to 7 unacked frames */ + //self->qos.data_size.bits &= 0x07; /* Conservative: 256 bytes */ + self->qos.data_size.bits &= 0x3f; /* This seems to work OK */ + /* Module parameter can override the rx window size */ + if (qos_mtt_bits) + self->qos.min_turn_time.bits = qos_mtt_bits; + /* + * Note : most of those values apply only for the receive path, + * the transmit path will be set differently - Jean II + */ + irda_qos_bits_to_value(&self->qos); + + /* We would need to fix the Tx window to 1 - Jean II */ +} + +/* Setup timers for future use. */ +static void stir_init_timer(struct stir_cb *self) +{ + self->submit_rx_timer.function = stir_delayed_rx_submit; + self->submit_rx_timer.data = (unsigned long) self; + init_timer(&self->submit_rx_timer); + + self->speed_timer.function = stir_update_speed_callback; + self->speed_timer.data = (unsigned long) self; + init_timer(&self->speed_timer); +} + +/*------------------------------------------------------------------*/ +/* + * Initialise the network side of the irda-usb instance + * Called when a new USB instance is registered in stir_probe() + */ +static int stir_open(struct stir_cb *self) +{ + struct net_device *netdev = self->netdev; + + IRDA_DEBUG(1, "%s()\n", __FUNCTION__); + + stir_init_qos(self); + + stir_init_timer(self); + + /* Initialize the device -- bring it out of reset, set to 9600 bps */ + stir_change_speed(self, 9600); + + /* Write out sensitivity and power values */ + stir_write_reg(self, STIR_REG_CTRL1, (tx_power & 0x3) << 1); + stir_write_reg(self, STIR_REG_CTRL2, (rx_sensitivity & 0x7) << 5); + + self->idle_periods = 0; + + /* Override the network functions we need to use */ + netdev->hard_start_xmit = stir_hard_xmit; + netdev->tx_timeout = stir_net_timeout; + netdev->watchdog_timeo = 250*HZ/1000; /* 250 ms > USB timeout */ + netdev->open = stir_net_open; + netdev->stop = stir_net_close; + netdev->get_stats = stir_net_get_stats; + netdev->do_ioctl = stir_net_ioctl; + + return register_netdev(netdev); +} + +/*------------------------------------------------------------------*/ +/* + * Cleanup the network side of the irda-usb instance + * Called when a USB instance is removed in stir_disconnect() + */ +static int stir_close(struct stir_cb *self) +{ + IRDA_DEBUG(1, "%s()\n", __FUNCTION__); + + /* Remove netdevice */ + unregister_netdev(self->netdev); + + del_timer_sync(&self->submit_rx_timer); + del_timer_sync(&self->speed_timer); + + /* Free any allocated data buffers */ + if(self->rx_buff.head != NULL) { + kfree_skb(self->rx_buff.skb); + self->rx_buff.skb = NULL; + self->rx_buff.head = NULL; + } + if(self->tx_buff.head != NULL) { + kfree(self->tx_buff.head); + self->tx_buff.head = NULL; + } + if(self->rxdata != NULL) { + kfree(self->rxdata); + self->rxdata = NULL; + } + if (self->ctrl_buf != NULL) { + free_page((unsigned long) self->ctrl_buf); + self->ctrl_buf = NULL; + } + + return 0; +} + +/********************** USB CONFIG SUBROUTINES **********************/ +/* + * Various subroutines dealing with USB stuff we use to configure and + * initialise each irda-usb instance. + * These functions are used below in the main calls of the driver... + */ + +/*------------------------------------------------------------------*/ +/* + * Function stir_parse_endpoints(dev, ifnum) + * + * Parse the various endpoints and find the one we need. + * + * The endpoint are the pipes used to communicate with the USB device. + * The spec defines 2 endpoints of type bulk transfer, one in, and one out. + * These are used to pass frames back and forth with the dongle. + * Most dongle have also an interrupt endpoint, that will be probably + * documented in the next spec... + */ +static int stir_parse_endpoints(struct stir_cb *self, + struct usb_host_endpoint *endpoint, int ennum) +{ + int i; /* Endpoint index in table */ + + /* Init : no endpoints */ + self->bulk_in_ep = 0; + self->bulk_out_ep = 0; + + /* Let's look at all those endpoints */ + for(i = 0; i < ennum; i++) { + /* All those variables will get optimised by the compiler, + * so let's aim for clarity... - Jean II */ + __u8 ep; /* Endpoint address */ + __u8 dir; /* Endpoint direction */ + __u8 attr; /* Endpoint attribute */ + __u16 psize; /* Endpoint max packet size in bytes */ + + /* Get endpoint address, direction and attribute */ + ep = endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; + dir = endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK; + attr = endpoint[i].desc.bmAttributes; + psize = endpoint[i].desc.wMaxPacketSize; + + /* Is it a bulk endpoint ??? */ + if(attr == USB_ENDPOINT_XFER_BULK) { + /* We need to find an IN and an OUT */ + if(dir == USB_DIR_IN) { + /* This is our Rx endpoint */ + self->bulk_in_ep = ep; + } else { + /* This is our Tx endpoint */ + self->bulk_out_ep = ep; + self->bulk_out_mtu = psize; + } + } else { + ERROR("%s(), Unrecognized endpoint %02X.\n", + __FUNCTION__, ep); + } + } + + IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d)\n", + __FUNCTION__, self->bulk_in_ep, self->bulk_out_ep, + self->bulk_out_mtu); + /* Should be 8, 16, 32 or 64 bytes */ + ASSERT(self->bulk_out_mtu == 64, ;); + + return((self->bulk_in_ep != 0) && (self->bulk_out_ep != 0)); +} + +/*********************** USB DEVICE CALLBACKS ***********************/ +/* + * Main calls from the USB subsystem. + * Mostly registering a new irda-usb device and removing it.... + */ + +/*------------------------------------------------------------------*/ +/* + * This routine is called by the USB subsystem for each new device + * in the system. We need to check if the device is ours, and in + * this case start handling it. + * Note : it might be worth protecting this function by a global + * spinlock... Or not, because maybe USB already deal with that... + */ +static int stir_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *dev = interface_to_usbdev(intf); + struct stir_cb *self = NULL; + struct net_device *net; + struct usb_host_interface *interface; + int ret = -ENOMEM; + int i; + + /* Note : the probe make sure to call us only for devices that + * matches the list of dongle (top of the file). So, we + * don't need to check if the dongle is really ours. + * Jean II */ + + MESSAGE("SigmaTel STIr4200 IRDA/USB found at address %d, " + "Vendor: %x, Product: %x\n", + dev->devnum, dev->descriptor.idVendor, + dev->descriptor.idProduct); + + /* + * Allocate network device container. + */ + net = alloc_irdadev(sizeof(*self)); + if(!net) + goto err_out; + + SET_MODULE_OWNER(net); + self = net->priv; + self->netdev = net; + spin_lock_init(&self->lock); + + /* Find our endpoints */ + interface = &intf->altsetting[0]; + if(!stir_parse_endpoints(self, interface->endpoint, + interface->desc.bNumEndpoints)) { + ERROR("%s(), Bogus endpoints...\n", __FUNCTION__); + ret = -EIO; + goto err_out1; + } + + /* Create all of the needed urbs */ + for (i = 0; i < STIR_MAX_RX_URBS; i++) { + self->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL); + if (!self->rx_urb[i]) + goto err_out2; + } + + self->tx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!self->tx_urb) + goto err_out2; + + self->speed_urb[0] = usb_alloc_urb(0, GFP_KERNEL); + if (!self->speed_urb[0]) + goto err_out3; + + self->speed_urb[1] = usb_alloc_urb(0, GFP_KERNEL); + if (!self->speed_urb[1]) + goto err_out4; + + self->present = 1; + self->netopen = 0; + self->usbdev = dev; + self->usbintf = intf; + + /* Bootstrap ZeroCopy Rx */ + if (self->rx_buff.head == NULL) { + self->rx_buff.truesize = IRDA_SKB_MAX_MTU; + self->rx_buff.skb = __dev_alloc_skb(self->rx_buff.truesize, GFP_KERNEL); + if (self->rx_buff.skb == NULL) { + ERROR("%s(), dev_alloc_skb() failed for rxbuf!\n", + __FUNCTION__); + goto err_out5; + } + skb_reserve(self->rx_buff.skb, 1); + self->rx_buff.head = self->rx_buff.skb->data; + } + + /* Create all other necessary buffers */ + if (stir_irda_init_iobuf(&self->tx_buff, 4000)) { + ERROR("%s(), init_iobuf() failed for txbuf!\n", + __FUNCTION__); + goto err_out6; + } + + self->rxdata = kmalloc(4096, GFP_KERNEL); + if (self->rxdata == NULL) { + ERROR("%s(), Can't allocate rxdata buf\n", + __FUNCTION__); + goto err_out7; + } + + if (self->ctrl_buf == NULL) { + self->ctrl_buf = (unsigned char *) __get_free_page(GFP_KERNEL); + if (self->ctrl_buf == NULL) { + ERROR("%s(), Can't allocate ctrl buf\n", + __FUNCTION__); + goto err_out8; + } + } + + ret = stir_open(self); + if (ret) + goto err_out9; + + MESSAGE("IrDA: Registered SigmaTel device %s\n", net->name); + + usb_set_intfdata(intf, self); + + return 0; +err_out9: + free_page((unsigned long) self->ctrl_buf); +err_out8: + kfree(self->rxdata); +err_out7: + kfree(self->tx_buff.head); +err_out6: + kfree_skb(self->rx_buff.skb); +err_out5: + usb_free_urb(self->speed_urb[1]); +err_out4: + usb_free_urb(self->speed_urb[0]); +err_out3: + usb_free_urb(self->tx_urb); +err_out2: + for (i = 0; i < STIR_MAX_RX_URBS; i++) { + if (self->rx_urb[i]) + usb_free_urb(self->rx_urb[i]); + } +err_out1: + free_netdev(net); +err_out: + return ret; +} + +/*------------------------------------------------------------------*/ +/* + * The current irda-usb device is removed, the USB layer tell us + * to shut it down... + * One of the constraints is that when we exit this function, + * we cannot use the usb_device no more. Gone. Destroyed. kfree(). + * Most other subsystem allow you to destroy the instance at a time + * when it's convenient to you, to postpone it to a later date, but + * not the USB subsystem. + * So, we must make bloody sure that everything gets deactivated. + * Jean II + */ +static void stir_disconnect(struct usb_interface *intf) +{ + unsigned long flags; + struct stir_cb *self = usb_get_intfdata(intf); + int i; + + IRDA_DEBUG(1, "%s()\n", __FUNCTION__); + + usb_set_intfdata(intf, NULL); + if (!self) + return; + + /* Make sure that the Tx path is not executing. - Jean II */ + spin_lock_irqsave(&self->lock, flags); + + /* Oups ! We are not there any more. + * This will stop/desactivate the Tx path. - Jean II */ + self->present = 0; + + /* We need to have irq enabled to unlink the URBs. That's OK, + * at this point the Tx path is gone - Jean II */ + spin_unlock_irqrestore(&self->lock, flags); + + /* Hum... Check if networking is still active (avoid races) */ + if((self->netopen) || (self->irlap)) { + /* Accept no more transmissions */ + /*netif_device_detach(self->netdev);*/ + netif_stop_queue(self->netdev); + /* Stop all the receive URBs */ + for (i = 0; i < STIR_MAX_RX_URBS; i++) + usb_unlink_urb(self->rx_urb[i]); + /* Cancel Tx URB. + * Toggle flags to make sure it's synchronous. */ + self->tx_urb->transfer_flags &= ~URB_ASYNC_UNLINK; + usb_unlink_urb(self->tx_urb); + self->speed_urb[0]->transfer_flags &= ~URB_ASYNC_UNLINK; + usb_unlink_urb(self->speed_urb[0]); + self->speed_urb[1]->transfer_flags &= ~URB_ASYNC_UNLINK; + usb_unlink_urb(self->speed_urb[0]); + } + + /* Cleanup the device stuff */ + stir_close(self); + /* No longer attached to USB bus */ + self->usbdev = NULL; + self->usbintf = NULL; + + /* Clean up our urbs */ + for (i = 0; i < STIR_MAX_RX_URBS; i++) + usb_free_urb(self->rx_urb[i]); + /* Clean up Tx and speed URB */ + usb_free_urb(self->tx_urb); + usb_free_urb(self->speed_urb[0]); + usb_free_urb(self->speed_urb[1]); + + free_netdev(self->netdev); + + IRDA_DEBUG(0, "%s(), SigmaTel Disconnected\n", __FUNCTION__); +} + +/*------------------------------------------------------------------*/ +/* + * USB device callbacks + */ +static struct usb_driver irda_driver = { + .name = "stir4200", + .probe = stir_probe, + .disconnect = stir_disconnect, + .id_table = dongles, + .owner = THIS_MODULE, +}; + +/************************* MODULE CALLBACKS *************************/ +/* + * Deal with module insertion/removal + * Mostly tell USB about our existence + */ + +/*------------------------------------------------------------------*/ +/* + * Module insertion + */ +int __init stir_init(void) +{ + if (usb_register(&irda_driver) < 0) + return -1; + + MESSAGE("SigmaTel support registered\n"); + return 0; +} +module_init(stir_init); + +/*------------------------------------------------------------------*/ +/* + * Module removal + */ +void __exit stir_cleanup(void) +{ + /* Deregister the driver and remove all pending instances */ + usb_deregister(&irda_driver); +} +module_exit(stir_cleanup); + +/*------------------------------------------------------------------*/ +/* + * Module parameters + */ +MODULE_PARM(qos_mtt_bits, "i"); +MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time"); +MODULE_PARM(rx_sensitivity, "i"); +MODULE_PARM_DESC(rx_sensitivity, "Set Receiver sensitivity (0-7, 0 is most sensitive)"); +MODULE_PARM(tx_power, "i"); +MODULE_PARM_DESC(tx_power, "Set Transmitter power (0-3, 0 is highest power)"); + +MODULE_AUTHOR("Paul Stewart , Roman Weissgaerber , Dag Brattli and Jean Tourrilhes "); +MODULE_DESCRIPTION("IrDA-USB Dongle Driver for SigmaTel STIr4200"); +MODULE_LICENSE("GPL"); diff -Nru a/drivers/net/irda/stir4200.h b/drivers/net/irda/stir4200.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/net/irda/stir4200.h Wed Oct 22 10:40:11 2003 @@ -0,0 +1,154 @@ +/***************************************************************************** +* +* Filename: stir4200.h +* Version: 0.2 +* Description: IrDA-USB Driver +* Status: Experimental +* Author: Paul Stewart +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +* +*****************************************************************************/ + +/* These are definitions out of the SigmaTel STIr4200 datasheet, plus a + * sprinkling of additional constants and data structs inspired by irda-usb.h + */ + +#define STIR_IRDA_HEADER 4 +#define STIR_MIN_RTT 500 +#define STIR_CTRL_TIMEOUT 100 /* milliseconds */ +#define STIR_BULK_TIMEOUT 200 /* milliseconds */ +#define STIR_IRDA_RXFRAME_LEN 64 +#define STIR_FIFO_SIZE 4096 +#define STIR_IDLE_TIMEOUT 10 /* milliseconds */ +#define STIR_IDLE_PERIODS 700 /* roughly 7 second idle window */ +#define STIR_MIN_SPEED_DELAY 75 /* milliseconds */ + +#define STIR_MAX_ACTIVE_RX_URBS 1 /* Don't touch !!! */ +#define STIR_MAX_RX_URBS (STIR_MAX_ACTIVE_RX_URBS + 1) + +#define STIR_BOF 0x7E /* Beginning of frame */ +#define STIR_XBOF 0x7F +#define STIR_EOF 0x7E /* End of frame */ + +#define STIR_REQ_WRITEREG 0x40 +#define STIR_REQ_WRITEREG_MULTI 0x00 +#define STIR_REQ_WRITEREG_SINGLE 0x03 +#define STIR_REQ_READ 0xC0 +#define STIR_REQ_READ_REG 0x01 +#define STIR_REQ_READ_ROM 0x02 + +#define STIR_REG_MODE 1 +#define STIR_MODE_FIR 0x80 +#define STIR_MODE_SIR 0x20 +#define STIR_MODE_ASK 0x10 +#define STIR_MODE_FASTRX 0x08 +#define STIR_MODE_FFRSTEN 0x04 +#define STIR_MODE_NRESET 0x02 +#define STIR_MODE_2400 0x01 + +#define STIR_REG_PDCLK 2 +#define STIR_PDCLK_4000000 0x02 +#define STIR_PDCLK_115200 0x09 +#define STIR_PDCLK_57600 0x13 +#define STIR_PDCLK_38400 0x1D +#define STIR_PDCLK_19200 0x3B +#define STIR_PDCLK_9600 0x77 +#define STIR_PDCLK_2400 0xDF /* also set bit 0 of STIR_REG_MODE */ + +#define STIR_REG_CTRL1 3 +#define STIR_CTRL1_SDMODE 0x80 +#define STIR_CTRL1_RXSLOW 0x40 +#define STIR_CTRL1_TXPWD 0x10 +#define STIR_CTRL1_RXPWD 0x08 +#define STIR_CTRL1_TXPWR0 0x00 /* 0 = highest power */ +#define STIR_CTRL1_TXPWR1 0x02 +#define STIR_CTRL1_TXPWR2 0x04 +#define STIR_CTRL1_TXPWR3 0x06 /* 3 = lowest power */ +#define STIR_CTRL1_SRESET 0x01 + +#define STIR_REG_CTRL2 4 +#define STIR_CTRL2_FIR_1 0x00 +#define STIR_CTRL2_FIR_2 0x20 +#define STIR_CTRL2_FIR_3 0x40 +#define STIR_CTRL2_FIR_4 0x60 +#define STIR_CTRL2_FIR_5 0x80 +#define STIR_CTRL2_SIR_4 0x00 +#define STIR_CTRL2_SIR_8 0x20 +#define STIR_CTRL2_SIR_12 0x40 +#define STIR_CTRL2_SIR_16 0x60 +#define STIR_CTRL2_SIR_20 0x80 +#define STIR_CTRL2_SIR_24 0xA0 +#define STIR_CTRL2_SIR_28 0xC0 +#define STIR_CTRL2_SPWITDH 0x08 +#define STIR_CTRL2_REVID 0x03 + +#define STIR_REG_FIFOCTL 5 +#define STIR_FIFOCTL_DIR 0x10 +#define STIR_FIFOCTL_CLR 0x08 +#define STIR_FIFOCTL_EMPTY 0x04 + +#define STIR_REG_FIFOCNT1 6 +#define STIR_REG_FIFOCNT2 7 + +#define STIR_REG_IRDIG 9 +#define STIR_IRDIG_RXHIGH 0x80 +#define STIR_IRDIG_RXLOW 0x40 + +#define STIR_REG_TEST 15 +#define STIR_TEST_PLLDOWN 0x80 +#define STIR_TEST_LOOPIR 0x40 +#define STIR_TEST_LOOPUSB 0x20 +#define STIR_TEST_TSTENA 0x10 +#define STIR_TEST_TSTOSC 0x0F + +struct stir_cb { + struct usb_device *usbdev; /* init: probe_irda */ + struct usb_interface *usbintf; /* init: probe_irda */ + int netopen; /* Device is active for network */ + int present; /* Device is present on the bus */ + __u8 bulk_in_ep; /* Rx Endpoint assignments */ + __u8 bulk_out_ep; /* Tx Endpoint assignments */ + __u16 bulk_out_mtu; /* Max Tx packet size in bytes */ + + struct urb *rx_urb[STIR_MAX_RX_URBS];/* used to receive data frames */ + struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */ + struct urb *tx_urb; /* URB used to send data frames */ + + struct net_device *netdev; /* Yes! we are some kind of netdev. */ + struct net_device_stats stats; + struct irlap_cb *irlap; /* The link layer we are binded to */ + + struct qos_info qos; + + struct timeval stamp; /* For mtt calculation */ + + spinlock_t lock; /* For serializing operations */ + + __u32 speed; /* Current speed */ + __u32 new_speed; + + iobuff_t tx_buff; + iobuff_t rx_buff; + unsigned char *rxdata; + + unsigned int idle_periods; + struct timer_list submit_rx_timer; + + struct urb *speed_urb[2]; + unsigned char *ctrl_buf; + struct timer_list speed_timer; +}; + diff -Nru a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c --- a/drivers/net/smc-ultra.c Wed Oct 22 10:40:02 2003 +++ b/drivers/net/smc-ultra.c Wed Oct 22 10:40:02 2003 @@ -122,6 +122,14 @@ #define ULTRA_IO_EXTENT 32 #define EN0_ERWCNT 0x08 /* Early receive warning count. */ + +static void ultra_poll(struct net_device *dev) +{ + disable_irq(dev->irq); + ei_interrupt(dev->irq, dev, NULL); + enable_irq(dev->irq); +} + /* Probe for the Ultra. This looks like a 8013 with the station address PROM at I/O ports +8 to +13, with a checksum following. @@ -134,6 +142,9 @@ SET_MODULE_OWNER(dev); +#ifdef HAVE_POLL_CONTROLLER + dev->poll_controller = &ultra_poll; +#endif if (base_addr > 0x1ff) /* Check a single specified location. */ return ultra_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ diff -Nru a/drivers/net/tlan.c b/drivers/net/tlan.c --- a/drivers/net/tlan.c Wed Oct 22 10:40:02 2003 +++ b/drivers/net/tlan.c Wed Oct 22 10:40:02 2003 @@ -346,6 +346,8 @@ static void TLan_EeReceiveByte( u16, u8 *, int ); static int TLan_EeReadByte( struct net_device *, u8, u8 * ); +static void TLan_Poll(struct net_device *); + static void TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb) @@ -893,6 +895,9 @@ dev->get_stats = &TLan_GetStats; dev->set_multicast_list = &TLan_SetMulticastList; dev->do_ioctl = &TLan_ioctl; +#ifdef HAVE_POLL_CONTROLLER + dev->poll_controller = &TLan_Poll; +#endif dev->tx_timeout = &TLan_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -1176,7 +1181,14 @@ return IRQ_HANDLED; } /* TLan_HandleInterrupts */ - +#ifdef HAVE_POLL_CONTROLLER +static void TLan_Poll(struct net_device *dev) +{ + disable_irq(dev->irq); + TLan_HandleInterrupt(dev->irq, dev, NULL); + enable_irq(dev->irq); +} +#endif /*************************************************************** diff -Nru a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c --- a/drivers/net/tulip/tulip_core.c Wed Oct 22 10:40:07 2003 +++ b/drivers/net/tulip/tulip_core.c Wed Oct 22 10:40:07 2003 @@ -247,6 +247,7 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev); static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void set_rx_mode(struct net_device *dev); +static void poll_tulip(struct net_device *dev); @@ -1632,6 +1633,9 @@ dev->get_stats = tulip_get_stats; dev->do_ioctl = private_ioctl; dev->set_multicast_list = set_rx_mode; +#ifdef HAVE_POLL_CONTROLLER + dev->poll_controller = &poll_tulip; +#endif if (register_netdev(dev)) goto err_out_free_ring; @@ -1787,6 +1791,24 @@ /* pci_power_off (pdev, -1); */ } + + +#ifdef HAVE_POLL_CONTROLLER + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ + +static void poll_tulip (struct net_device *dev) +{ + disable_irq(dev->irq); + tulip_interrupt (dev->irq, dev, NULL); + enable_irq(dev->irq); +} + +#endif static struct pci_driver tulip_driver = { diff -Nru a/drivers/pci/quirks.c b/drivers/pci/quirks.c --- a/drivers/pci/quirks.c Wed Oct 22 10:40:01 2003 +++ b/drivers/pci/quirks.c Wed Oct 22 10:40:01 2003 @@ -748,6 +748,60 @@ sis_96x_compatible = 1; } +#ifdef CONFIG_SCSI_SATA +static void __init quirk_intel_ide_combined(struct pci_dev *pdev) +{ + u8 prog, comb, tmp; + + /* + * Narrow down to Intel SATA PCI devices. + */ + switch (pdev->device) { + /* PCI ids taken from drivers/scsi/ata_piix.c */ + case 0x24d1: + case 0x24df: + case 0x25a3: + case 0x25b0: + break; + default: + /* we do not handle this PCI device */ + return; + } + + /* + * Read combined mode register. + */ + pci_read_config_byte(pdev, 0x90, &tmp); /* combined mode reg */ + tmp &= 0x6; /* interesting bits 2:1, PATA primary/secondary */ + if (tmp == 0x4) /* bits 10x */ + comb = (1 << 0); /* SATA port 0, PATA port 1 */ + else if (tmp == 0x6) /* bits 11x */ + comb = (1 << 2); /* PATA port 0, SATA port 1 */ + else + return; /* not in combined mode */ + + /* + * Read programming interface register. + * (Tells us if it's legacy or native mode) + */ + pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); + + /* if SATA port is in native mode, we're ok. */ + if (prog & comb) + return; + + /* SATA port is in legacy mode. Reserve port so that + * IDE driver does not attempt to use it. If request_region + * fails, it will be obvious at boot time, so we don't bother + * checking return values. + */ + if (comb == (1 << 0)) + request_region(0x1f0, 8, "libata"); /* port 0 */ + else + request_region(0x170, 8, "libata"); /* port 1 */ +} +#endif /* CONFIG_SCSI_SATA */ + /* * The main table of quirks. * @@ -850,6 +904,14 @@ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc }, + +#ifdef CONFIG_SCSI_SATA + /* Fixup BIOSes that configure Parallel ATA (PATA / IDE) and + * Serial ATA (SATA) into the same PCI ID. + */ + { PCI_FIXUP_FINAL, PCI_VENDOR_ID_INTEL, PCI_ANY_ID, + quirk_intel_ide_combined }, +#endif /* CONFIG_SCSI_SATA */ { 0 } }; diff -Nru a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig --- a/drivers/scsi/Kconfig Wed Oct 22 10:40:09 2003 +++ b/drivers/scsi/Kconfig Wed Oct 22 10:40:09 2003 @@ -403,6 +403,58 @@ To compile this driver as a module, choose M here: the module will be called megaraid. +config SCSI_SATA + bool "Serial ATA (SATA) support" + depends on SCSI && EXPERIMENTAL + help + This driver family supports Serial ATA host controllers + and devices. + + If unsure, say N. + +config SCSI_SATA_SVW + tristate "ServerWorks Frodo / Apple K2 SATA support (EXPERIMENTAL)" + depends on SCSI_SATA && PCI && EXPERIMENTAL + help + This option enables support for Broadcom/Serverworks/Apple K2 + SATA support. + + If unsure, say N. + +config SCSI_ATA_PIIX + tristate "Intel PIIX/ICH SATA support" + depends on SCSI_SATA && PCI + help + This option enables support for ICH5 Serial ATA. + If PATA support was enabled previously, this enables + support for select Intel PIIX/ICH PATA host controllers. + + If unsure, say N. + +config SCSI_SATA_PROMISE + tristate "Promise SATA support" + depends on SCSI_SATA && PCI && EXPERIMENTAL + help + This option enables support for Promise Serial ATA. + + If unsure, say N. + +config SCSI_SATA_SIL + tristate "Silicon Image SATA support" + depends on SCSI_SATA && PCI && BROKEN + help + This option enables support for Silicon Image Serial ATA. + + If unsure, say N. + +config SCSI_SATA_VIA + tristate "VIA SATA support" + depends on SCSI_SATA && PCI && EXPERIMENTAL + help + This option enables support for VIA Serial ATA. + + If unsure, say N. + config SCSI_BUSLOGIC tristate "BusLogic SCSI support" depends on (PCI || ISA) && SCSI diff -Nru a/drivers/scsi/Makefile b/drivers/scsi/Makefile --- a/drivers/scsi/Makefile Wed Oct 22 10:40:09 2003 +++ b/drivers/scsi/Makefile Wed Oct 22 10:40:09 2003 @@ -112,6 +112,11 @@ obj-$(CONFIG_SCSI_CPQFCTS) += cpqfc.o obj-$(CONFIG_SCSI_LASI700) += lasi700.o 53c700.o obj-$(CONFIG_SCSI_NSP32) += nsp32.o +obj-$(CONFIG_SCSI_SATA_SVW) += libata.o sata_svw.o +obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o +obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o +obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o +obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o obj-$(CONFIG_ARM) += arm/ @@ -146,6 +151,7 @@ NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o cpqfc-objs := cpqfcTSinit.o cpqfcTScontrol.o cpqfcTSi2c.o \ cpqfcTSworker.o cpqfcTStrigger.o +libata-objs := libata-core.o libata-scsi.o # Files generated that shall be removed upon make clean clean-files := 53c7xx_d.h 53c700_d.h \ diff -Nru a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/scsi/ata_piix.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,590 @@ +/* + + ata_piix.c - Intel PATA/SATA controllers + + + Copyright 2003 Red Hat Inc + Copyright 2003 Jeff Garzik + + + Copyright header from piix.c: + + Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer + Copyright (C) 1998-2000 Andre Hedrick + Copyright (C) 2003 Red Hat Inc + + May be copied or modified under the terms of the GNU General Public License + + */ +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#define DRV_NAME "ata_piix" +#define DRV_VERSION "0.95" + +enum { + PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ + ICH5_PCS = 0x92, /* port control and status */ + + PIIX_FLAG_COMBINED = (1 << 30), /* combined mode possible */ + + PIIX_COMB_PRI = (1 << 0), /* combined mode, PATA primary */ + PIIX_COMB_SEC = (1 << 1), /* combined mode, PATA secondary */ + + PIIX_80C_PRI = (1 << 5) | (1 << 4), + PIIX_80C_SEC = (1 << 7) | (1 << 6), + + ich5_pata = 0, + ich5_sata = 1, + piix4_pata = 2, +}; + +static int piix_init_one (struct pci_dev *pdev, + const struct pci_device_id *ent); + +static void piix_pata_phy_reset(struct ata_port *ap); +static void piix_sata_phy_reset(struct ata_port *ap); +static void piix_sata_port_disable(struct ata_port *ap); +static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio); +static void piix_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma); + +static unsigned int in_module_init = 1; + +static struct pci_device_id piix_pci_tbl[] = { +#ifdef ATA_ENABLE_PATA + { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata }, + { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, + { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, +#endif + + { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + + { } /* terminate list */ +}; + +static struct pci_driver piix_pci_driver = { + .name = DRV_NAME, + .id_table = piix_pci_tbl, + .probe = piix_init_one, + .remove = ata_pci_remove_one, +}; + +static Scsi_Host_Template piix_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = ATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .dma_boundary = ATA_DMA_BOUNDARY, + .slave_configure = ata_scsi_slave_config, +}; + +static struct ata_port_operations piix_pata_ops = { + .port_disable = ata_port_disable, + .set_piomode = piix_set_piomode, + .set_udmamode = piix_set_udmamode, + + .tf_load = ata_tf_load_pio, + .tf_read = ata_tf_read_pio, + .check_status = ata_check_status_pio, + .exec_command = ata_exec_command_pio, + + .phy_reset = piix_pata_phy_reset, + .phy_config = pata_phy_config, + + .bmdma_start = ata_bmdma_start_pio, + .fill_sg = ata_fill_sg, + .eng_timeout = ata_eng_timeout, + + .irq_handler = ata_interrupt, +}; + +static struct ata_port_operations piix_sata_ops = { + .port_disable = piix_sata_port_disable, + .set_piomode = piix_set_piomode, + .set_udmamode = piix_set_udmamode, + + .tf_load = ata_tf_load_pio, + .tf_read = ata_tf_read_pio, + .check_status = ata_check_status_pio, + .exec_command = ata_exec_command_pio, + + .phy_reset = piix_sata_phy_reset, + .phy_config = pata_phy_config, /* not a typo */ + + .bmdma_start = ata_bmdma_start_pio, + .fill_sg = ata_fill_sg, + .eng_timeout = ata_eng_timeout, + + .irq_handler = ata_interrupt, +}; + +static struct ata_port_info piix_port_info[] = { + /* ich5_pata */ + { + .sht = &piix_sht, + .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = ATA_UDMA_MASK_40C, /* FIXME: cbl det */ + .port_ops = &piix_pata_ops, + }, + + /* ich5_sata */ + { + .sht = &piix_sht, + .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED | + ATA_FLAG_SRST, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .port_ops = &piix_sata_ops, + }, + + /* piix4_pata */ + { + .sht = &piix_sht, + .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = ATA_UDMA_MASK_40C, /* FIXME: cbl det */ + .port_ops = &piix_pata_ops, + }, +}; + +static struct pci_bits piix_enable_bits[] = { + { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ + { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */ +}; + +MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik"); +MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, piix_pci_tbl); + +/** + * piix_pata_cbl_detect - Probe host controller cable detect info + * @ap: Port for which cable detect info is desired + * + * Read 80c cable indicator from SATA PCI device's PCI config + * register. This register is normally set by firmware (BIOS). + * + * LOCKING: + * None (inherited from caller). + */ +static void piix_pata_cbl_detect(struct ata_port *ap) +{ + struct pci_dev *pdev = ap->host_set->pdev; + u8 tmp, mask; + + /* no 80c support in host controller? */ + if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0) + goto cbl40; + + /* check BIOS cable detect results */ + mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; + pci_read_config_byte(pdev, PIIX_IOCFG, &tmp); + if ((tmp & mask) == 0) + goto cbl40; + + ap->cbl = ATA_CBL_PATA80; + return; + +cbl40: + ap->cbl = ATA_CBL_PATA40; + ap->udma_mask &= ATA_UDMA_MASK_40C; +} + +/** + * piix_pata_phy_reset - Probe specified port on PATA host controller + * @ap: Port to probe + * + * Probe PATA phy. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_pata_phy_reset(struct ata_port *ap) +{ + if (!pci_test_config_bits(ap->host_set->pdev, + &piix_enable_bits[ap->port_no])) { + ata_port_disable(ap); + printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); + return; + } + + piix_pata_cbl_detect(ap); + + ata_port_probe(ap); + + ata_bus_reset(ap); +} + +/** + * piix_pcs_probe - Probe SATA port configuration and status register + * @ap: Port to probe + * @have_port: (output) Non-zero if SATA port is enabled + * @have_device: (output) Non-zero if SATA phy indicates device present + * + * Reads SATA PCI device's PCI config register Port Configuration + * and Status (PCS) to determine port and device availability. + * + * LOCKING: + * None (inherited from caller). + */ +static void piix_pcs_probe (struct ata_port *ap, unsigned int *have_port, + unsigned int *have_device) +{ + struct pci_dev *pdev = ap->host_set->pdev; + u16 pcs; + + pci_read_config_word(pdev, ICH5_PCS, &pcs); + + /* is SATA port enabled? */ + if (pcs & (1 << ap->port_no)) { + *have_port = 1; + + if (pcs & (1 << (ap->port_no + 4))) + *have_device = 1; + } +} + +/** + * piix_pcs_disable - Disable SATA port + * @ap: Port to disable + * + * Disable SATA phy for specified port. + * + * LOCKING: + * None (inherited from caller). + */ +static void piix_pcs_disable (struct ata_port *ap) +{ + struct pci_dev *pdev = ap->host_set->pdev; + u16 pcs; + + pci_read_config_word(pdev, ICH5_PCS, &pcs); + + if (pcs & (1 << ap->port_no)) { + pcs &= ~(1 << ap->port_no); + pci_write_config_word(pdev, ICH5_PCS, pcs); + } +} + +/** + * piix_sata_phy_reset - Probe specified port on SATA host controller + * @ap: Port to probe + * + * Probe SATA phy. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_sata_phy_reset(struct ata_port *ap) +{ + unsigned int have_port = 0, have_dev = 0; + + if (!pci_test_config_bits(ap->host_set->pdev, + &piix_enable_bits[ap->port_no])) { + ata_port_disable(ap); + printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); + return; + } + + piix_pcs_probe(ap, &have_port, &have_dev); + + /* if port not enabled, exit */ + if (!have_port) { + ata_port_disable(ap); + printk(KERN_INFO "ata%u: SATA port disabled. ignoring.\n", + ap->id); + return; + } + + /* if port enabled but no device, disable port and exit */ + if (!have_dev) { + piix_sata_port_disable(ap); + printk(KERN_INFO "ata%u: SATA port has no device. disabling.\n", + ap->id); + return; + } + + ap->cbl = ATA_CBL_SATA; + + ata_port_probe(ap); + + ata_bus_reset(ap); +} + +/** + * piix_sata_port_disable - Disable SATA port + * @ap: Port to disable. + * + * Disable SATA port. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_sata_port_disable(struct ata_port *ap) +{ + ata_port_disable(ap); + piix_pcs_disable(ap); +} + +/** + * piix_set_piomode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: um + * @pio: PIO mode, 0 - 4 + * + * Set PIO mode for device, in host controller PCI config space. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio) +{ + struct pci_dev *dev = ap->host_set->pdev; + unsigned int is_slave = (adev->flags & ATA_DFLAG_MASTER) ? 0 : 1; + unsigned int master_port= ap->port_no ? 0x42 : 0x40; + unsigned int slave_port = 0x44; + u16 master_data; + u8 slave_data; + + static const /* ISP RTC */ + u8 timings[][2] = { { 0, 0 }, + { 0, 0 }, + { 1, 0 }, + { 2, 1 }, + { 2, 3 }, }; + + pci_read_config_word(dev, master_port, &master_data); + if (is_slave) { + master_data |= 0x4000; + /* enable PPE, IE and TIME */ + master_data |= 0x0070; + pci_read_config_byte(dev, slave_port, &slave_data); + slave_data &= (ap->port_no ? 0x0f : 0xf0); + slave_data |= + (timings[pio][0] << 2) | + (timings[pio][1] << (ap->port_no ? 4 : 0)); + } else { + master_data &= 0xccf8; + /* enable PPE, IE and TIME */ + master_data |= 0x0007; + master_data |= + (timings[pio][0] << 12) | + (timings[pio][1] << 8); + } + pci_write_config_word(dev, master_port, master_data); + if (is_slave) + pci_write_config_byte(dev, slave_port, slave_data); +} + +/** + * piix_set_udmamode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: um + * @udma: udma mode, 0 - 6 + * + * Set UDMA mode for device, in host controller PCI config space. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma) +{ + struct pci_dev *dev = ap->host_set->pdev; + u8 maslave = ap->port_no ? 0x42 : 0x40; + u8 speed = udma; + unsigned int drive_dn = (ap->port_no ? 2 : 0) + adev->devno; + int a_speed = 3 << (drive_dn * 4); + int u_flag = 1 << drive_dn; + int v_flag = 0x01 << drive_dn; + int w_flag = 0x10 << drive_dn; + int u_speed = 0; + int sitre; + u16 reg4042, reg44, reg48, reg4a, reg54; + u8 reg55; + + pci_read_config_word(dev, maslave, ®4042); + DPRINTK("reg4042 = 0x%04x\n", reg4042); + sitre = (reg4042 & 0x4000) ? 1 : 0; + pci_read_config_word(dev, 0x44, ®44); + pci_read_config_word(dev, 0x48, ®48); + pci_read_config_word(dev, 0x4a, ®4a); + pci_read_config_word(dev, 0x54, ®54); + pci_read_config_byte(dev, 0x55, ®55); + + switch(speed) { + case XFER_UDMA_4: + case XFER_UDMA_2: u_speed = 2 << (drive_dn * 4); break; + case XFER_UDMA_6: + case XFER_UDMA_5: + case XFER_UDMA_3: + case XFER_UDMA_1: u_speed = 1 << (drive_dn * 4); break; + case XFER_UDMA_0: u_speed = 0 << (drive_dn * 4); break; + default: + BUG(); + return; + } + + if (!(reg48 & u_flag)) + pci_write_config_word(dev, 0x48, reg48|u_flag); + if (speed == XFER_UDMA_5) { + pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag); + } else { + pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); + } + if (!(reg4a & u_speed)) { + pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); + pci_write_config_word(dev, 0x4a, reg4a|u_speed); + } + if (speed > XFER_UDMA_2) { + if (!(reg54 & v_flag)) { + pci_write_config_word(dev, 0x54, reg54|v_flag); + } + } else { + pci_write_config_word(dev, 0x54, reg54 & ~v_flag); + } +} + +/** + * piix_probe_combined - Determine if PATA and SATA are combined + * @pdev: PCI device to examine + * @mask: (output) zero, %PIIX_COMB_PRI or %PIIX_COMB_SEC + * + * Determine if BIOS has secretly stuffed a PATA port into our + * otherwise-beautiful SATA PCI device. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + */ +static void piix_probe_combined (struct pci_dev *pdev, unsigned int *mask) +{ + u8 tmp; + + pci_read_config_byte(pdev, 0x90, &tmp); /* combined mode reg */ + tmp &= 0x6; /* interesting bits 2:1, PATA primary/secondary */ + + /* backwards from what one might expect */ + if (tmp == 0x4) /* bits 10x */ + *mask |= PIIX_COMB_SEC; + if (tmp == 0x6) /* bits 11x */ + *mask |= PIIX_COMB_PRI; +} + +/** + * piix_init_one - Register PIIX ATA PCI device with kernel services + * @pdev: PCI device to register + * @ent: Entry in piix_pci_tbl matching with @pdev + * + * Called from kernel PCI layer. We probe for combined mode (sigh), + * and then hand over control to libata, for it to do the rest. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, or -ERRNO value. + */ + +static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_port_info *port_info[2]; + unsigned int combined = 0, n_ports = 1; + unsigned int pata_comb = 0, sata_comb = 0; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + /* no hotplugging support (FIXME) */ + if (!in_module_init) + return -ENODEV; + + port_info[0] = &piix_port_info[ent->driver_data]; + port_info[1] = NULL; + if (port_info[0]->host_flags & PIIX_FLAG_COMBINED) + piix_probe_combined(pdev, &combined); + + if (combined & PIIX_COMB_PRI) + sata_comb = 1; + else if (combined & PIIX_COMB_SEC) + pata_comb = 1; + + if (pata_comb || sata_comb) { + port_info[sata_comb] = &piix_port_info[ent->driver_data]; + port_info[sata_comb]->host_flags |= ATA_FLAG_SLAVE_POSS; /* sigh */ + port_info[pata_comb] = &piix_port_info[ich5_pata]; /*ich5-specific*/ + n_ports++; + + printk(KERN_WARNING DRV_NAME ": combined mode detected\n"); + } + + return ata_pci_init_one(pdev, port_info, n_ports); +} + +/** + * piix_init - + * + * LOCKING: + * + * RETURNS: + * + */ + +static int __init piix_init(void) +{ + int rc; + + DPRINTK("pci_module_init\n"); + rc = pci_module_init(&piix_pci_driver); + if (rc) + return rc; + + in_module_init = 0; + + DPRINTK("done\n"); + return 0; +} + +/** + * piix_exit - + * + * LOCKING: + * + */ + +static void __exit piix_exit(void) +{ + pci_unregister_driver(&piix_pci_driver); +} + +module_init(piix_init); +module_exit(piix_exit); + diff -Nru a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/scsi/libata-core.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,3292 @@ +/* + libata-core.c - helper library for ATA + + Copyright 2003 Red Hat, Inc. All rights reserved. + Copyright 2003 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include +#include +#include + +#include "libata.h" + +static void atapi_cdb_send(struct ata_port *ap); +static unsigned int ata_busy_sleep (struct ata_port *ap, + unsigned long tmout_pat, + unsigned long tmout); +static void __ata_dev_select (struct ata_port *ap, unsigned int device); +static void ata_qc_push (struct ata_queued_cmd *qc, unsigned int append); +static void ata_dma_complete(struct ata_port *ap, u8 host_stat, + unsigned int done_late); +static void ata_host_set_pio(struct ata_port *ap); +static void ata_host_set_udma(struct ata_port *ap); +static void ata_dev_set_pio(struct ata_port *ap, unsigned int device); +static void ata_dev_set_udma(struct ata_port *ap, unsigned int device); + +static unsigned int ata_unique_id = 1; + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("Library module for ATA devices"); +MODULE_LICENSE("GPL"); + +static const char * thr_state_name[] = { + "THR_UNKNOWN", + "THR_PORT_RESET", + "THR_AWAIT_DEATH", + "THR_PROBE_FAILED", + "THR_IDLE", + "THR_PROBE_SUCCESS", + "THR_PROBE_START", + "THR_PIO_POLL", + "THR_PIO_TMOUT", + "THR_PIO", + "THR_PIO_LAST", + "THR_PIO_LAST_POLL", + "THR_PIO_ERR", + "THR_PACKET", +}; + +/** + * ata_thr_state_name - convert thread state enum to string + * @thr_state: thread state to be converted to string + * + * Converts the specified thread state id to a constant C string. + * + * LOCKING: + * None. + * + * RETURNS: + * The THR_xxx-prefixed string naming the specified thread + * state id, or the string "". + */ + +static const char *ata_thr_state_name(unsigned int thr_state) +{ + if (thr_state < ARRAY_SIZE(thr_state_name)) + return thr_state_name[thr_state]; + return ""; +} + +/** + * msleep - sleep for a number of milliseconds + * @msecs: number of milliseconds to sleep + * + * Issues schedule_timeout call for the specified number + * of milliseconds. + * + * LOCKING: + * None. + */ + +static void msleep(unsigned long msecs) +{ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(msecs)); +} + +/** + * ata_tf_load_pio - send taskfile registers to host controller + * @ioaddr: set of IO ports to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller using PIO. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + outb(tf->ctl, ioaddr->ctl_addr); + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + outb(tf->hob_feature, ioaddr->error_addr); + outb(tf->hob_nsect, ioaddr->nsect_addr); + outb(tf->hob_lbal, ioaddr->lbal_addr); + outb(tf->hob_lbam, ioaddr->lbam_addr); + outb(tf->hob_lbah, ioaddr->lbah_addr); + VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } + + if (is_addr) { + outb(tf->feature, ioaddr->error_addr); + outb(tf->nsect, ioaddr->nsect_addr); + outb(tf->lbal, ioaddr->lbal_addr); + outb(tf->lbam, ioaddr->lbam_addr); + outb(tf->lbah, ioaddr->lbah_addr); + VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } + + if (tf->flags & ATA_TFLAG_DEVICE) { + outb(tf->device, ioaddr->device_addr); + VPRINTK("device 0x%X\n", tf->device); + } + + ata_wait_idle(ap); +} + +/** + * ata_tf_load_mmio - send taskfile registers to host controller + * @ioaddr: set of IO ports to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller using MMIO. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + writeb(tf->ctl, ap->ioaddr.ctl_addr); + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + writeb(tf->hob_feature, (void *) ioaddr->error_addr); + writeb(tf->hob_nsect, (void *) ioaddr->nsect_addr); + writeb(tf->hob_lbal, (void *) ioaddr->lbal_addr); + writeb(tf->hob_lbam, (void *) ioaddr->lbam_addr); + writeb(tf->hob_lbah, (void *) ioaddr->lbah_addr); + VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } + + if (is_addr) { + writeb(tf->feature, (void *) ioaddr->error_addr); + writeb(tf->nsect, (void *) ioaddr->nsect_addr); + writeb(tf->lbal, (void *) ioaddr->lbal_addr); + writeb(tf->lbam, (void *) ioaddr->lbam_addr); + writeb(tf->lbah, (void *) ioaddr->lbah_addr); + VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } + + if (tf->flags & ATA_TFLAG_DEVICE) { + writeb(tf->device, (void *) ioaddr->device_addr); + VPRINTK("device 0x%X\n", tf->device); + } + + ata_wait_idle(ap); +} + +/** + * ata_exec_command_pio - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues PIO write to ATA command register, with proper + * synchronization with interrupt handler / other threads. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf) +{ + DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); + + outb(tf->command, ap->ioaddr.cmdstat_addr); + ata_pause(ap); +} + + +/** + * ata_exec_command_mmio - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues MMIO write to ATA command register, with proper + * synchronization with interrupt handler / other threads. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) +{ + DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); + + writeb(tf->command, (void *) ap->ioaddr.cmdstat_addr); + ata_pause(ap); +} + +/** + * ata_exec - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues PIO write to ATA command register, with proper + * synchronization with interrupt handler / other threads. + * + * LOCKING: + * Obtains host_set lock. + */ + +static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf) +{ + unsigned long flags; + + DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); + spin_lock_irqsave(&ap->host_set->lock, flags); + ap->ops->exec_command(ap, tf); + spin_unlock_irqrestore(&ap->host_set->lock, flags); +} + +/** + * ata_tf_to_host - issue ATA taskfile to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA taskfile register set to ATA host controller, + * via PIO, with proper synchronization with interrupt handler and + * other threads. + * + * LOCKING: + * Obtains host_set lock. + */ + +static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf) +{ + init_MUTEX_LOCKED(&ap->sem); + + ap->ops->tf_load(ap, tf); + + ata_exec(ap, tf); +} + +/** + * ata_tf_to_host_nolock - issue ATA taskfile to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA taskfile register set to ATA host controller, + * via PIO, with proper synchronization with interrupt handler and + * other threads. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf) +{ + init_MUTEX_LOCKED(&ap->sem); + + ap->ops->tf_load(ap, tf); + ap->ops->exec_command(ap, tf); +} + +/** + * ata_tf_read_pio - input device's ATA taskfile shadow registers + * @ioaddr: set of IO ports from which input is read + * @tf: ATA taskfile register set for storing input + * + * Reads ATA taskfile registers for currently-selected device + * into @tf via PIO. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + tf->nsect = inb(ioaddr->nsect_addr); + tf->lbal = inb(ioaddr->lbal_addr); + tf->lbam = inb(ioaddr->lbam_addr); + tf->lbah = inb(ioaddr->lbah_addr); + tf->device = inb(ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr); + tf->hob_feature = inb(ioaddr->error_addr); + tf->hob_nsect = inb(ioaddr->nsect_addr); + tf->hob_lbal = inb(ioaddr->lbal_addr); + tf->hob_lbam = inb(ioaddr->lbam_addr); + tf->hob_lbah = inb(ioaddr->lbah_addr); + } +} + +/** + * ata_tf_read_mmio - input device's ATA taskfile shadow registers + * @ioaddr: set of IO ports from which input is read + * @tf: ATA taskfile register set for storing input + * + * Reads ATA taskfile registers for currently-selected device + * into @tf via MMIO. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + tf->nsect = readb((void *)ioaddr->nsect_addr); + tf->lbal = readb((void *)ioaddr->lbal_addr); + tf->lbam = readb((void *)ioaddr->lbam_addr); + tf->lbah = readb((void *)ioaddr->lbah_addr); + tf->device = readb((void *)ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + writeb(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr); + tf->hob_feature = readb((void *)ioaddr->error_addr); + tf->hob_nsect = readb((void *)ioaddr->nsect_addr); + tf->hob_lbal = readb((void *)ioaddr->lbal_addr); + tf->hob_lbam = readb((void *)ioaddr->lbam_addr); + tf->hob_lbah = readb((void *)ioaddr->lbah_addr); + } +} + +/** + * ata_check_status_pio - Read device status reg & clear interrupt + * @ap: port where the device is + * + * Reads ATA taskfile status register for currently-selected device + * via PIO and return it's value. This also clears pending interrupts + * from this device + * + * LOCKING: + * Inherited from caller. + */ +u8 ata_check_status_pio(struct ata_port *ap) +{ + return inb(ap->ioaddr.cmdstat_addr); +} + +/** + * ata_check_status_mmio - Read device status reg & clear interrupt + * @ap: port where the device is + * + * Reads ATA taskfile status register for currently-selected device + * via MMIO and return it's value. This also clears pending interrupts + * from this device + * + * LOCKING: + * Inherited from caller. + */ +u8 ata_check_status_mmio(struct ata_port *ap) +{ + return readb((void *) ap->ioaddr.cmdstat_addr); +} + +static const char * udma_str[] = { + "UDMA/16", + "UDMA/25", + "UDMA/33", + "UDMA/44", + "UDMA/66", + "UDMA/100", + "UDMA/133", + "UDMA7", +}; + +/** + * ata_udma_string - convert UDMA bit offset to string + * @udma_mask: mask of bits supported; only highest bit counts. + * + * Determine string which represents the highest speed + * (highest bit in @udma_mask). + * + * LOCKING: + * None. + * + * RETURNS: + * Constant C string representing highest speed listed in + * @udma_mask, or the constant C string "". + */ + +static const char *ata_udma_string(unsigned int udma_mask) +{ + int i; + + for (i = 7; i >= 0; i--) { + if (udma_mask & (1 << i)) + return udma_str[i]; + } + + return ""; +} + +/** + * ata_pio_devchk - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static unsigned int ata_pio_devchk(struct ata_port *ap, + unsigned int device) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u8 nsect, lbal; + + __ata_dev_select(ap, device); + + outb(0x55, ioaddr->nsect_addr); + outb(0xaa, ioaddr->lbal_addr); + + outb(0xaa, ioaddr->nsect_addr); + outb(0x55, ioaddr->lbal_addr); + + outb(0x55, ioaddr->nsect_addr); + outb(0xaa, ioaddr->lbal_addr); + + nsect = inb(ioaddr->nsect_addr); + lbal = inb(ioaddr->lbal_addr); + + if ((nsect == 0x55) && (lbal == 0xaa)) + return 1; /* we found a device */ + + return 0; /* nothing found */ +} + +/** + * ata_mmio_devchk - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static unsigned int ata_mmio_devchk(struct ata_port *ap, + unsigned int device) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u8 nsect, lbal; + + __ata_dev_select(ap, device); + + writeb(0x55, (void *) ioaddr->nsect_addr); + writeb(0xaa, (void *) ioaddr->lbal_addr); + + writeb(0xaa, (void *) ioaddr->nsect_addr); + writeb(0x55, (void *) ioaddr->lbal_addr); + + writeb(0x55, (void *) ioaddr->nsect_addr); + writeb(0xaa, (void *) ioaddr->lbal_addr); + + nsect = readb((void *) ioaddr->nsect_addr); + lbal = readb((void *) ioaddr->lbal_addr); + + if ((nsect == 0x55) && (lbal == 0xaa)) + return 1; /* we found a device */ + + return 0; /* nothing found */ +} + +/** + * ata_dev_devchk - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static unsigned int ata_dev_devchk(struct ata_port *ap, + unsigned int device) +{ + if (ap->flags & ATA_FLAG_MMIO) + return ata_mmio_devchk(ap, device); + return ata_pio_devchk(ap, device); +} + +/** + * ata_dev_classify - determine device type based on ATA-spec signature + * @tf: ATA taskfile register set for device to be identified + * + * Determine from taskfile register contents whether a device is + * ATA or ATAPI, as per "Signature and persistence" section + * of ATA/PI spec (volume 1, sect 5.14). + * + * LOCKING: + * None. + * + * RETURNS: + * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN + * the event of failure. + */ + +static unsigned int ata_dev_classify(struct ata_taskfile *tf) +{ + /* Apple's open source Darwin code hints that some devices only + * put a proper signature into the LBA mid/high registers, + * So, we only check those. It's sufficient for uniqueness. + */ + + if (((tf->lbam == 0) && (tf->lbah == 0)) || + ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) { + DPRINTK("found ATA device by sig\n"); + return ATA_DEV_ATA; + } + + if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) || + ((tf->lbam == 0x69) && (tf->lbah == 0x96))) { + DPRINTK("found ATAPI device by sig\n"); + return ATA_DEV_ATAPI; + } + + DPRINTK("unknown device\n"); + return ATA_DEV_UNKNOWN; +} + +/** + * ata_dev_try_classify - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device, + unsigned int maybe_have_dev) +{ + struct ata_device *dev = &ap->device[device]; + struct ata_taskfile tf; + unsigned int class; + u8 err; + + __ata_dev_select(ap, device); + + memset(&tf, 0, sizeof(tf)); + + err = ata_chk_err(ap); + ap->ops->tf_read(ap, &tf); + + dev->class = ATA_DEV_NONE; + + /* see if device passed diags */ + if (err == 1) + /* do nothing */ ; + else if ((device == 0) && (err == 0x81)) + /* do nothing */ ; + else + return err; + + /* determine if device if ATA or ATAPI */ + class = ata_dev_classify(&tf); + if (class == ATA_DEV_UNKNOWN) + return err; + if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) + return err; + + dev->class = class; + + return err; +} + +/** + * ata_dev_id_string - + * @dev: + * @s: + * @ofs: + * @len: + * + * LOCKING: + * + * RETURNS: + * + */ + +unsigned int ata_dev_id_string(struct ata_device *dev, unsigned char *s, + unsigned int ofs, unsigned int len) +{ + unsigned int c, ret = 0; + + while (len > 0) { + c = dev->id[ofs] >> 8; + *s = c; + s++; + + ret = c = dev->id[ofs] & 0xff; + *s = c; + s++; + + ofs++; + len -= 2; + } + + return ret; +} + +/** + * ata_dev_parse_strings - + * @dev: + * + * LOCKING: + */ + +static void ata_dev_parse_strings(struct ata_device *dev) +{ + assert (dev->class == ATA_DEV_ATA); + memcpy(dev->vendor, "ATA ", 8); + + ata_dev_id_string(dev, dev->product, ATA_ID_PROD_OFS, + sizeof(dev->product)); +} + +/** + * __ata_dev_select - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static void __ata_dev_select (struct ata_port *ap, unsigned int device) +{ + u8 tmp; + + if (device == 0) + tmp = ATA_DEVICE_OBS; + else + tmp = ATA_DEVICE_OBS | ATA_DEV1; + + if (ap->flags & ATA_FLAG_MMIO) { + writeb(tmp, (void *) ap->ioaddr.device_addr); + } else { + outb(tmp, ap->ioaddr.device_addr); + } + ata_pause(ap); /* needed; also flushes, for mmio */ +} + +/** + * ata_dev_select - + * @ap: + * @device: + * @wait: + * @can_sleep: + * + * LOCKING: + * + * RETURNS: + * + */ + +void ata_dev_select(struct ata_port *ap, unsigned int device, + unsigned int wait, unsigned int can_sleep) +{ + VPRINTK("ENTER, ata%u: device %u, wait %u\n", + ap->id, device, wait); + + if (wait) + ata_wait_idle(ap); + + __ata_dev_select(ap, device); + + if (wait) { + if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI) + msleep(150); + ata_wait_idle(ap); + } +} + +/** + * ata_dump_id - + * @dev: + * + * LOCKING: + */ + +static inline void ata_dump_id(struct ata_device *dev) +{ + DPRINTK("49==0x%04x " + "53==0x%04x " + "63==0x%04x " + "64==0x%04x " + "75==0x%04x \n", + dev->id[49], + dev->id[53], + dev->id[63], + dev->id[64], + dev->id[75]); + DPRINTK("80==0x%04x " + "81==0x%04x " + "82==0x%04x " + "83==0x%04x " + "84==0x%04x \n", + dev->id[80], + dev->id[81], + dev->id[82], + dev->id[83], + dev->id[84]); + DPRINTK("88==0x%04x " + "93==0x%04x\n", + dev->id[88], + dev->id[93]); +} + +/** + * ata_dev_identify - obtain IDENTIFY x DEVICE page + * @ap: port on which device we wish to probe resides + * @device: device bus address, starting at zero + * + * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE + * command, and read back the 512-byte device information page. + * The device information page is fed to us via the standard + * PIO-IN protocol, but we hand-code it here. (TODO: investigate + * using standard PIO-IN paths) + * + * After reading the device information page, we use several + * bits of information from it to initialize data structures + * that will be used during the lifetime of the ata_device. + * Other data from the info page is used to disqualify certain + * older ATA devices we do not wish to support. + * + * LOCKING: + * Inherited from caller. Some functions called by this function + * obtain the host_set lock. + */ + +static void ata_dev_identify(struct ata_port *ap, unsigned int device) +{ + struct ata_device *dev = &ap->device[device]; + unsigned int i; + u16 tmp, udma_modes; + u8 status; + struct ata_taskfile tf; + unsigned int using_edd; + + if (!ata_dev_present(dev)) { + DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", + ap->id, device); + return; + } + + if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) + using_edd = 0; + else + using_edd = 1; + + DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); + + assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI || + dev->class == ATA_DEV_NONE); + + ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ + +retry: + ata_tf_init(ap, &tf, device); + tf.ctl |= ATA_NIEN; + tf.protocol = ATA_PROT_PIO_READ; + + if (dev->class == ATA_DEV_ATA) { + tf.command = ATA_CMD_ID_ATA; + DPRINTK("do ATA identify\n"); + } else { + tf.command = ATA_CMD_ID_ATAPI; + DPRINTK("do ATAPI identify\n"); + } + + ata_tf_to_host(ap, &tf); + + /* crazy ATAPI devices... */ + if (dev->class == ATA_DEV_ATAPI) + msleep(150); + + if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) + goto err_out; + + status = ata_chk_status(ap); + if (status & ATA_ERR) { + /* + * arg! EDD works for all test cases, but seems to return + * the ATA signature for some ATAPI devices. Until the + * reason for this is found and fixed, we fix up the mess + * here. If IDENTIFY DEVICE returns command aborted + * (as ATAPI devices do), then we issue an + * IDENTIFY PACKET DEVICE. + * + * ATA software reset (SRST, the default) does not appear + * to have this problem. + */ + if ((using_edd) && (tf.command == ATA_CMD_ID_ATA)) { + u8 err = ata_chk_err(ap); + if (err & ATA_ABORTED) { + dev->class = ATA_DEV_ATAPI; + goto retry; + } + } + goto err_out; + } + + /* make sure we have BSY=0, DRQ=1 */ + if ((status & ATA_DRQ) == 0) { + printk(KERN_WARNING "ata%u: dev %u (ATA%s?) not returning id page (0x%x)\n", + ap->id, device, + dev->class == ATA_DEV_ATA ? "" : "PI", + status); + goto err_out; + } + + /* read IDENTIFY [X] DEVICE page */ + if (ap->flags & ATA_FLAG_MMIO) { + for (i = 0; i < ATA_ID_WORDS; i++) + dev->id[i] = readw((void *)ap->ioaddr.data_addr); + } else + for (i = 0; i < ATA_ID_WORDS; i++) + dev->id[i] = inw(ap->ioaddr.data_addr); + + /* wait for host_idle */ + status = ata_wait_idle(ap); + if (status & (ATA_BUSY | ATA_DRQ)) { + printk(KERN_WARNING "ata%u: dev %u (ATA%s?) error after id page (0x%x)\n", + ap->id, device, + dev->class == ATA_DEV_ATA ? "" : "PI", + status); + goto err_out; + } + + ata_irq_on(ap); /* re-enable interrupts */ + + /* print device capabilities */ + printk(KERN_DEBUG "ata%u: dev %u cfg " + "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", + ap->id, device, dev->id[49], + dev->id[82], dev->id[83], dev->id[84], + dev->id[85], dev->id[86], dev->id[87], + dev->id[88]); + + /* + * common ATA, ATAPI feature tests + */ + + /* we require LBA and DMA support (bits 8 & 9 of word 49) */ + if (!ata_id_has_dma(dev) || !ata_id_has_lba(dev)) { + printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id); + goto err_out_nosup; + } + + /* we require UDMA support */ + udma_modes = + tmp = dev->id[ATA_ID_UDMA_MODES]; + if ((tmp & 0xff) == 0) { + printk(KERN_DEBUG "ata%u: no udma\n", ap->id); + goto err_out_nosup; + } + + ata_dump_id(dev); + + ata_dev_parse_strings(dev); + + /* ATA-specific feature tests */ + if (dev->class == ATA_DEV_ATA) { + if (!ata_id_is_ata(dev)) /* sanity check */ + goto err_out_nosup; + + tmp = dev->id[ATA_ID_MAJOR_VER]; + for (i = 14; i >= 1; i--) + if (tmp & (1 << i)) + break; + + /* we require at least ATA-3 */ + if (i < 3) { + printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id); + goto err_out_nosup; + } + + if (ata_id_has_lba48(dev)) { + dev->flags |= ATA_DFLAG_LBA48; + dev->n_sectors = ata_id_u64(dev, 100); + } else { + dev->n_sectors = ata_id_u32(dev, 60); + } + + ap->host->max_cmd_len = 16; + + /* print device info to dmesg */ + printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors%s\n", + ap->id, device, + ata_udma_string(udma_modes), + dev->n_sectors, + dev->flags & ATA_DFLAG_LBA48 ? " (lba48)" : ""); + } + + /* ATAPI-specific feature tests */ + else { + if (ata_id_is_ata(dev)) /* sanity check */ + goto err_out_nosup; + + /* see if 16-byte commands supported */ + tmp = dev->id[0] & 0x3; + if (tmp == 1) + ap->host->max_cmd_len = 16; + + /* print device info to dmesg */ + printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", + ap->id, device, + ata_udma_string(udma_modes)); + } + + DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); + return; + +err_out_nosup: + printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", + ap->id, device); +err_out: + ata_irq_on(ap); /* re-enable interrupts */ + dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */ + DPRINTK("EXIT, err\n"); +} + +/** + * ata_port_reset - + * @ap: + * + * LOCKING: + */ + +static void ata_port_reset(struct ata_port *ap) +{ + unsigned int i, found = 0; + + ap->ops->phy_reset(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + goto err_out; + + for (i = 0; i < ATA_MAX_DEVICES; i++) { + ata_dev_identify(ap, i); + if (ata_dev_present(&ap->device[i])) { + found = 1; + if (ap->ops->dev_config) + ap->ops->dev_config(ap, &ap->device[i]); + } + } + + if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED)) + goto err_out_disable; + + ap->ops->phy_config(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + goto err_out_disable; + + ap->thr_state = THR_PROBE_SUCCESS; + + return; + +err_out_disable: + ap->ops->port_disable(ap); +err_out: + ap->thr_state = THR_PROBE_FAILED; +} + +/** + * ata_port_probe - + * @ap: + * + * LOCKING: + */ + +void ata_port_probe(struct ata_port *ap) +{ + ap->flags &= ~ATA_FLAG_PORT_DISABLED; +} + +/** + * sata_phy_reset - + * @ap: + * + * LOCKING: + * + */ +void sata_phy_reset(struct ata_port *ap) +{ + u32 sstatus; + unsigned long timeout = jiffies + (HZ * 5); + + scr_write(ap, SCR_CONTROL, 0x301); /* issue phy wake/reset */ + scr_read(ap, SCR_CONTROL); /* dummy read; flush */ + udelay(400); /* FIXME: a guess */ + scr_write(ap, SCR_CONTROL, 0x300); /* issue phy wake/reset */ + + /* wait for phy to become ready, if necessary */ + do { + msleep(200); + sstatus = scr_read(ap, SCR_STATUS); + if ((sstatus & 0xf) != 1) + break; + } while (time_before(jiffies, timeout)); + + /* TODO: phy layer with polling, timeouts, etc. */ + if (sata_dev_present(ap)) + ata_port_probe(ap); + else { + sstatus = scr_read(ap, SCR_STATUS); + printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", + ap->id, sstatus); + ata_port_disable(ap); + } + + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + + ata_bus_reset(ap); +} + +/** + * ata_port_disable - + * @ap: + * + * LOCKING: + */ + +void ata_port_disable(struct ata_port *ap) +{ + ap->device[0].class = ATA_DEV_NONE; + ap->device[1].class = ATA_DEV_NONE; + ap->flags |= ATA_FLAG_PORT_DISABLED; +} + +/** + * pata_phy_config - + * @ap: + * + * LOCKING: + * + */ +void pata_phy_config(struct ata_port *ap) +{ + unsigned int force_pio; + + ata_host_set_pio(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + + ata_host_set_udma(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + +#ifdef ATA_FORCE_PIO + force_pio = 1; +#else + force_pio = 0; +#endif + + if (force_pio) { + ata_dev_set_pio(ap, 0); + ata_dev_set_pio(ap, 1); + + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + } else { + ata_dev_set_udma(ap, 0); + ata_dev_set_udma(ap, 1); + + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + } + +} + +/** + * ata_busy_sleep - sleep until BSY clears, or timeout + * @ap: port containing status register to be polled + * @tmout_pat: impatience timeout + * @tmout: overall timeout + * + * LOCKING: + * + */ + +static unsigned int ata_busy_sleep (struct ata_port *ap, + unsigned long tmout_pat, + unsigned long tmout) +{ + unsigned long timer_start, timeout; + u8 status; + + status = ata_busy_wait(ap, ATA_BUSY, 300); + timer_start = jiffies; + timeout = timer_start + tmout_pat; + while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { + msleep(50); + status = ata_busy_wait(ap, ATA_BUSY, 3); + } + + if (status & ATA_BUSY) + printk(KERN_WARNING "ata%u is slow to respond, " + "please be patient\n", ap->id); + + timeout = timer_start + tmout; + while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { + msleep(50); + status = ata_chk_status(ap); + } + + if (status & ATA_BUSY) { + printk(KERN_ERR "ata%u failed to respond (%lu secs)\n", + ap->id, tmout / HZ); + return 1; + } + + return 0; +} + +static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int dev0 = devmask & (1 << 0); + unsigned int dev1 = devmask & (1 << 1); + unsigned long timeout; + + /* if device 0 was found in ata_dev_devchk, wait for its + * BSY bit to clear + */ + if (dev0) + ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + /* if device 1 was found in ata_dev_devchk, wait for + * register access, then wait for BSY to clear + */ + timeout = jiffies + ATA_TMOUT_BOOT; + while (dev1) { + u8 nsect, lbal; + + __ata_dev_select(ap, 1); + if (ap->flags & ATA_FLAG_MMIO) { + nsect = readb((void *) ioaddr->nsect_addr); + lbal = readb((void *) ioaddr->lbal_addr); + } else { + nsect = inb(ioaddr->nsect_addr); + lbal = inb(ioaddr->lbal_addr); + } + if ((nsect == 1) && (lbal == 1)) + break; + if (time_after(jiffies, timeout)) { + dev1 = 0; + break; + } + msleep(50); /* give drive a breather */ + } + if (dev1) + ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + /* is all this really necessary? */ + __ata_dev_select(ap, 0); + if (dev1) + __ata_dev_select(ap, 1); + if (dev0) + __ata_dev_select(ap, 0); +} + +/** + * ata_bus_edd - + * @ap: + * + * LOCKING: + * + */ + +static unsigned int ata_bus_edd(struct ata_port *ap) +{ + struct ata_taskfile tf; + + /* set up execute-device-diag (bus reset) taskfile */ + /* also, take interrupts to a known state (disabled) */ + DPRINTK("execute-device-diag\n"); + ata_tf_init(ap, &tf, 0); + tf.ctl |= ATA_NIEN; + tf.command = ATA_CMD_EDD; + tf.protocol = ATA_PROT_NODATA; + + /* do bus reset */ + ata_tf_to_host(ap, &tf); + + /* spec says at least 2ms. but who knows with those + * crazy ATAPI devices... + */ + msleep(150); + + return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); +} + +static unsigned int ata_bus_softreset(struct ata_port *ap, + unsigned int devmask) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + DPRINTK("ata%u: bus reset via SRST\n", ap->id); + + /* software reset. causes dev0 to be selected */ + if (ap->flags & ATA_FLAG_MMIO) { + writeb(ap->ctl, ioaddr->ctl_addr); + udelay(10); /* FIXME: flush */ + writeb(ap->ctl | ATA_SRST, ioaddr->ctl_addr); + udelay(10); /* FIXME: flush */ + writeb(ap->ctl, ioaddr->ctl_addr); + } else { + outb(ap->ctl, ioaddr->ctl_addr); + udelay(10); + outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr); + udelay(10); + outb(ap->ctl, ioaddr->ctl_addr); + } + + /* spec mandates ">= 2ms" before checking status. + * We wait 150ms, because that was the magic delay used for + * ATAPI devices in Hale Landis's ATADRVR, for the period of time + * between when the ATA command register is written, and then + * status is checked. Because waiting for "a while" before + * checking status is fine, post SRST, we perform this magic + * delay here as well. + */ + msleep(150); + + ata_bus_post_reset(ap, devmask); + + return 0; +} + +/** + * ata_bus_reset - reset host port and associated ATA channel + * @ap: port to reset + * + * This is typically the first time we actually start issuing + * commands to the ATA channel. We wait for BSY to clear, then + * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its + * result. Determine what devices, if any, are on the channel + * by looking at the device 0/1 error register. Look at the signature + * stored in each device's taskfile registers, to determine if + * the device is ATA or ATAPI. + * + * LOCKING: + * Inherited from caller. Some functions called by this function + * obtain the host_set lock. + * + * SIDE EFFECTS: + * Sets ATA_FLAG_PORT_DISABLED if bus reset fails. + */ + +void ata_bus_reset(struct ata_port *ap) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; + u8 err; + unsigned int dev0, dev1 = 0, rc = 0, devmask = 0; + + DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no); + + /* set up device control */ + if (ap->flags & ATA_FLAG_MMIO) + writeb(ap->ctl, ioaddr->ctl_addr); + else + outb(ap->ctl, ioaddr->ctl_addr); + + /* determine if device 0/1 are present */ + dev0 = ata_dev_devchk(ap, 0); + if (slave_possible) + dev1 = ata_dev_devchk(ap, 1); + + if (dev0) + devmask |= (1 << 0); + if (dev1) + devmask |= (1 << 1); + + /* select device 0 again */ + __ata_dev_select(ap, 0); + + /* issue bus reset */ + if (ap->flags & ATA_FLAG_SRST) + rc = ata_bus_softreset(ap, devmask); + else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) + rc = ata_bus_edd(ap); + + if (rc) + goto err_out; + + /* + * determine by signature whether we have ATA or ATAPI devices + */ + err = ata_dev_try_classify(ap, 0, dev0); + if ((slave_possible) && (err != 0x81)) + ata_dev_try_classify(ap, 1, dev1); + + /* re-enable interrupts */ + ata_irq_on(ap); + + /* is double-select really necessary? */ + if (ap->device[1].class != ATA_DEV_NONE) + __ata_dev_select(ap, 1); + if (ap->device[0].class != ATA_DEV_NONE) + __ata_dev_select(ap, 0); + + /* if no devices were detected, disable this port */ + if ((ap->device[0].class == ATA_DEV_NONE) && + (ap->device[1].class == ATA_DEV_NONE)) + goto err_out; + + DPRINTK("EXIT\n"); + return; + +err_out: + printk(KERN_ERR "ata%u: disabling port\n", ap->id); + ap->ops->port_disable(ap); + + DPRINTK("EXIT\n"); +} + +/** + * ata_host_set_pio - + * @ap: + * + * LOCKING: + */ + +static void ata_host_set_pio(struct ata_port *ap) +{ + struct ata_device *master, *slave; + unsigned int pio, i; + u16 mask; + + master = &ap->device[0]; + slave = &ap->device[1]; + + assert (ata_dev_present(master) || ata_dev_present(slave)); + + mask = ap->pio_mask; + if (ata_dev_present(master)) + mask &= (master->id[ATA_ID_PIO_MODES] & 0x03); + if (ata_dev_present(slave)) + mask &= (slave->id[ATA_ID_PIO_MODES] & 0x03); + + /* require pio mode 3 or 4 support for host and all devices */ + if (mask == 0) { + printk(KERN_WARNING "ata%u: no PIO3/4 support, ignoring\n", + ap->id); + goto err_out; + } + + pio = (mask & ATA_ID_PIO4) ? 4 : 3; + for (i = 0; i < ATA_MAX_DEVICES; i++) + if (ata_dev_present(&ap->device[i])) { + ap->device[i].pio_mode = (pio == 3) ? + XFER_PIO_3 : XFER_PIO_4; + ap->ops->set_piomode(ap, &ap->device[i], pio); + } + + return; + +err_out: + ap->ops->port_disable(ap); +} + +/** + * ata_host_set_udma - + * @ap: + * + * LOCKING: + */ + +static void ata_host_set_udma(struct ata_port *ap) +{ + struct ata_device *master, *slave; + u16 mask; + unsigned int i, j; + int udma_mode = -1; + + master = &ap->device[0]; + slave = &ap->device[1]; + + assert (ata_dev_present(master) || ata_dev_present(slave)); + assert ((ap->flags & ATA_FLAG_PORT_DISABLED) == 0); + + DPRINTK("udma masks: host 0x%X, master 0x%X, slave 0x%X\n", + ap->udma_mask, + (!ata_dev_present(master)) ? 0xff : + (master->id[ATA_ID_UDMA_MODES] & 0xff), + (!ata_dev_present(slave)) ? 0xff : + (slave->id[ATA_ID_UDMA_MODES] & 0xff)); + + mask = ap->udma_mask; + if (ata_dev_present(master)) + mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff); + if (ata_dev_present(slave)) + mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff); + + i = XFER_UDMA_7; + while (i >= XFER_UDMA_0) { + j = i - XFER_UDMA_0; + DPRINTK("mask 0x%X i 0x%X j %u\n", mask, i, j); + if (mask & (1 << j)) { + udma_mode = i; + break; + } + + i--; + } + + /* require udma for host and all attached devices */ + if (udma_mode < 0) { + printk(KERN_WARNING "ata%u: no UltraDMA support, ignoring\n", + ap->id); + goto err_out; + } + + for (i = 0; i < ATA_MAX_DEVICES; i++) + if (ata_dev_present(&ap->device[i])) { + ap->device[i].udma_mode = udma_mode; + ap->ops->set_udmamode(ap, &ap->device[i], udma_mode); + } + + return; + +err_out: + ap->ops->port_disable(ap); +} + +/** + * ata_dev_set_xfermode - + * @ap: + * @dev: + * + * LOCKING: + */ + +static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) +{ + struct ata_taskfile tf; + + /* set up set-features taskfile */ + DPRINTK("set features - xfer mode\n"); + ata_tf_init(ap, &tf, dev->devno); + tf.ctl |= ATA_NIEN; + tf.command = ATA_CMD_SET_FEATURES; + tf.feature = SETFEATURES_XFER; + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.protocol = ATA_PROT_NODATA; + if (dev->flags & ATA_DFLAG_PIO) + tf.nsect = dev->pio_mode; + else + tf.nsect = dev->udma_mode; + + /* do bus reset */ + ata_tf_to_host(ap, &tf); + + /* crazy ATAPI devices... */ + if (dev->class == ATA_DEV_ATAPI) + msleep(150); + + ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + ata_irq_on(ap); /* re-enable interrupts */ + + ata_wait_idle(ap); + + DPRINTK("EXIT\n"); +} + +/** + * ata_dev_set_udma - + * @ap: + * @device: + * + * LOCKING: + */ + +static void ata_dev_set_udma(struct ata_port *ap, unsigned int device) +{ + struct ata_device *dev = &ap->device[device]; + + if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) + return; + + ata_dev_set_xfermode(ap, dev); + + assert((dev->udma_mode >= XFER_UDMA_0) && + (dev->udma_mode <= XFER_UDMA_7)); + printk(KERN_INFO "ata%u: dev %u configured for %s\n", + ap->id, device, + udma_str[dev->udma_mode - XFER_UDMA_0]); +} + +/** + * ata_dev_set_pio - + * @ap: + * @device: + * + * LOCKING: + */ + +static void ata_dev_set_pio(struct ata_port *ap, unsigned int device) +{ + struct ata_device *dev = &ap->device[device]; + + if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) + return; + + /* force PIO mode */ + dev->flags |= ATA_DFLAG_PIO; + + ata_dev_set_xfermode(ap, dev); + + assert((dev->pio_mode >= XFER_PIO_3) && + (dev->pio_mode <= XFER_PIO_4)); + printk(KERN_INFO "ata%u: dev %u configured for PIO%c\n", + ap->id, device, + dev->pio_mode == 3 ? '3' : '4'); +} + +/** + * ata_sg_clean - + * @qc: + * + * LOCKING: + */ + +static void ata_sg_clean(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + Scsi_Cmnd *cmd = qc->scsicmd; + struct scatterlist *sg = qc->sg; + int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); + + assert(dir == SCSI_DATA_READ || dir == SCSI_DATA_WRITE); + assert(qc->flags & ATA_QCFLAG_SG); + assert(sg != NULL); + + if (!cmd->use_sg) + assert(qc->n_elem == 1); + + DPRINTK("unmapping %u sg elements\n", qc->n_elem); + + if (cmd->use_sg) + pci_unmap_sg(ap->host_set->pdev, sg, qc->n_elem, dir); + else + pci_unmap_single(ap->host_set->pdev, sg[0].dma_address, + sg[0].length, dir); + + qc->flags &= ~ATA_QCFLAG_SG; + qc->sg = NULL; +} + +/** + * ata_fill_sg - + * @qc: + * + * LOCKING: + * + */ +void ata_fill_sg(struct ata_queued_cmd *qc) +{ + struct scatterlist *sg = qc->sg; + struct ata_port *ap = qc->ap; + unsigned int i; + + assert(sg != NULL); + assert(qc->n_elem > 0); + + for (i = 0; i < qc->n_elem; i++) { + ap->prd[i].addr = cpu_to_le32(sg[i].dma_address); + ap->prd[i].flags_len = cpu_to_le32(sg[i].length); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", + i, le32_to_cpu(ap->prd[i].addr), le32_to_cpu(ap->prd[i].flags_len)); + } + ap->prd[qc->n_elem - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); +} + +/** + * ata_sg_setup_one - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * + */ + +static int ata_sg_setup_one(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + Scsi_Cmnd *cmd = qc->scsicmd; + int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); + struct scatterlist *sg = qc->sg; + unsigned int have_sg = (qc->flags & ATA_QCFLAG_SG); + + assert(sg == &qc->sgent); + assert(qc->n_elem == 1); + + sg->page = virt_to_page(cmd->request_buffer); + sg->offset = (unsigned long) cmd->request_buffer & ~PAGE_MASK; + sg->length = cmd->request_bufflen; + + if (!have_sg) + return 0; + + sg->dma_address = pci_map_single(ap->host_set->pdev, + cmd->request_buffer, + cmd->request_bufflen, dir); + + DPRINTK("mapped buffer of %d bytes for %s\n", cmd->request_bufflen, + qc->flags & ATA_QCFLAG_WRITE ? "write" : "read"); + + return 0; +} + +/** + * ata_sg_setup - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * + */ + +static int ata_sg_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + Scsi_Cmnd *cmd = qc->scsicmd; + struct scatterlist *sg; + int n_elem; + unsigned int have_sg = (qc->flags & ATA_QCFLAG_SG); + + VPRINTK("ENTER, ata%u, use_sg %d\n", ap->id, cmd->use_sg); + assert(cmd->use_sg > 0); + + sg = (struct scatterlist *)cmd->request_buffer; + if (have_sg) { + int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); + n_elem = pci_map_sg(ap->host_set->pdev, sg, cmd->use_sg, dir); + if (n_elem < 1) + return -1; + DPRINTK("%d sg elements mapped\n", n_elem); + } else { + n_elem = cmd->use_sg; + } + qc->n_elem = n_elem; + + return 0; +} + +/** + * ata_pio_poll - + * @ap: + * + * LOCKING: + * + * RETURNS: + * + */ + +static unsigned long ata_pio_poll(struct ata_port *ap) +{ + u8 status; + unsigned int poll_state = THR_UNKNOWN; + unsigned int reg_state = THR_UNKNOWN; + const unsigned int tmout_state = THR_PIO_TMOUT; + + switch (ap->thr_state) { + case THR_PIO: + case THR_PIO_POLL: + poll_state = THR_PIO_POLL; + reg_state = THR_PIO; + break; + case THR_PIO_LAST: + case THR_PIO_LAST_POLL: + poll_state = THR_PIO_LAST_POLL; + reg_state = THR_PIO_LAST; + break; + default: + BUG(); + break; + } + + status = ata_chk_status(ap); + if (status & ATA_BUSY) { + if (time_after(jiffies, ap->thr_timeout)) { + ap->thr_state = tmout_state; + return 0; + } + ap->thr_state = poll_state; + return ATA_SHORT_PAUSE; + } + + ap->thr_state = reg_state; + return 0; +} + +/** + * ata_pio_start - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static void ata_pio_start (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + assert((qc->tf.protocol == ATA_PROT_PIO_READ) || + (qc->tf.protocol == ATA_PROT_PIO_WRITE)); + + qc->flags |= ATA_QCFLAG_POLL; + qc->tf.ctl |= ATA_NIEN; /* disable interrupts */ + ata_tf_to_host_nolock(ap, &qc->tf); + ata_thread_wake(ap, THR_PIO); +} + +/** + * ata_pio_complete - + * @ap: + * + * LOCKING: + */ + +static void ata_pio_complete (struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + unsigned long flags; + u8 drv_stat; + + /* + * This is purely hueristic. This is a fast path. + * Sometimes when we enter, BSY will be cleared in + * a chk-status or two. If not, the drive is probably seeking + * or something. Snooze for a couple msecs, then + * chk-status again. If still busy, fall back to + * THR_PIO_POLL state. + */ + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); + if (drv_stat & (ATA_BUSY | ATA_DRQ)) { + msleep(2); + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); + if (drv_stat & (ATA_BUSY | ATA_DRQ)) { + ap->thr_state = THR_PIO_LAST_POLL; + ap->thr_timeout = jiffies + ATA_TMOUT_PIO; + return; + } + } + + drv_stat = ata_wait_idle(ap); + if (drv_stat & (ATA_BUSY | ATA_DRQ)) { + ap->thr_state = THR_PIO_ERR; + return; + } + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + + spin_lock_irqsave(&ap->host_set->lock, flags); + ap->thr_state = THR_IDLE; + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + ata_irq_on(ap); + + ata_qc_complete(qc, drv_stat, 0); +} + +/** + * ata_pio_sector - + * @ap: + * + * LOCKING: + */ + +static void ata_pio_sector(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + struct scatterlist *sg; + Scsi_Cmnd *cmd; + unsigned char *buf; + u8 status; + + /* + * This is purely hueristic. This is a fast path. + * Sometimes when we enter, BSY will be cleared in + * a chk-status or two. If not, the drive is probably seeking + * or something. Snooze for a couple msecs, then + * chk-status again. If still busy, fall back to + * THR_PIO_POLL state. + */ + status = ata_busy_wait(ap, ATA_BUSY, 5); + if (status & ATA_BUSY) { + msleep(2); + status = ata_busy_wait(ap, ATA_BUSY, 10); + if (status & ATA_BUSY) { + ap->thr_state = THR_PIO_POLL; + ap->thr_timeout = jiffies + ATA_TMOUT_PIO; + return; + } + } + + /* handle BSY=0, DRQ=0 as error */ + if ((status & ATA_DRQ) == 0) { + ap->thr_state = THR_PIO_ERR; + return; + } + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + + cmd = qc->scsicmd; + sg = qc->sg; + + if (qc->cursect == (qc->nsect - 1)) + ap->thr_state = THR_PIO_LAST; + + buf = kmap(sg[qc->cursg].page) + + sg[qc->cursg].offset + (qc->cursg_ofs * ATA_SECT_SIZE); + + qc->cursect++; + qc->cursg_ofs++; + + if (cmd->use_sg) + if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg[qc->cursg].length) { + qc->cursg++; + qc->cursg_ofs = 0; + } + + DPRINTK("data %s, drv_stat 0x%X\n", + qc->flags & ATA_QCFLAG_WRITE ? "write" : "read", + status); + + /* do the actual data transfer */ + /* FIXME: mmio-ize */ + if (qc->flags & ATA_QCFLAG_WRITE) + outsl(ap->ioaddr.data_addr, buf, ATA_SECT_DWORDS); + else + insl(ap->ioaddr.data_addr, buf, ATA_SECT_DWORDS); + + kunmap(sg[qc->cursg].page); +} + +/** + * ata_eng_schedule - run an iteration of the pio/dma/whatever engine + * @ap: port on which activity will occur + * @eng: instance of engine + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ +static void ata_eng_schedule (struct ata_port *ap, struct ata_engine *eng) +{ + /* FIXME */ +} + +/** + * ata_eng_timeout - Handle timeout of queued command + * @ap: Port on which timed-out command is active + * + * Some part of the kernel (currently, only the SCSI layer) + * has noticed that the active command on port @ap has not + * completed after a specified length of time. Handle this + * condition by disabling DMA (if necessary) and completing + * transactions, with error if necessary. + * + * This also handles the case of the "lost interrupt", where + * for some reason (possibly hardware bug, possibly driver bug) + * an interrupt was not delivered to the driver, even though the + * transaction completed successfully. + * + * LOCKING: + * Inherited from SCSI layer (none, can sleep) + */ + +void ata_eng_timeout(struct ata_port *ap) +{ + u8 host_stat, drv_stat; + struct ata_queued_cmd *qc; + + DPRINTK("ENTER\n"); + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (!qc) { + printk(KERN_ERR "ata%u: BUG: timeout without command\n", + ap->id); + goto out; + } + + switch (qc->tf.protocol) { + case ATA_PROT_DMA_READ: + case ATA_PROT_DMA_WRITE: + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.bmdma_addr; + host_stat = readb(mmio + ATA_DMA_STATUS); + } else + host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + + printk(KERN_ERR "ata%u: DMA timeout, stat 0x%x\n", + ap->id, host_stat); + + ata_dma_complete(ap, host_stat, 1); + break; + + case ATA_PROT_NODATA: + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + + printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x\n", + ap->id, qc->tf.command, drv_stat); + + ata_qc_complete(qc, drv_stat, 1); + break; + + default: + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + + printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", + ap->id, qc->tf.command, drv_stat); + + ata_qc_complete(qc, drv_stat, 1); + break; + } + +out: + DPRINTK("EXIT\n"); +} + +/** + * ata_qc_new - + * @ap: + * @dev: + * + * LOCKING: + */ + +static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) +{ + struct ata_queued_cmd *qc = NULL; + unsigned int i; + + for (i = 0; i < ATA_MAX_QUEUE; i++) + if (!test_and_set_bit(i, &ap->qactive)) { + qc = ata_qc_from_tag(ap, i); + break; + } + + if (qc) + qc->tag = i; + + return qc; +} + +/** + * ata_qc_new_init - + * @ap: + * @dev: + * + * LOCKING: + */ + +struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, + struct ata_device *dev) +{ + struct ata_queued_cmd *qc; + + qc = ata_qc_new(ap); + if (qc) { + qc->sg = NULL; + qc->flags = 0; + qc->scsicmd = NULL; + qc->ap = ap; + qc->dev = dev; + INIT_LIST_HEAD(&qc->node); + init_MUTEX_LOCKED(&qc->sem); + + ata_tf_init(ap, &qc->tf, dev->devno); + + if (likely((dev->flags & ATA_DFLAG_PIO) == 0)) + qc->flags |= ATA_QCFLAG_DMA; + if (dev->flags & ATA_DFLAG_LBA48) + qc->tf.flags |= ATA_TFLAG_LBA48; + } + + return qc; +} + +/** + * ata_qc_complete - + * @qc: + * @drv_stat: + * @done_late: + * + * LOCKING: + * + */ + +void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat, unsigned int done_late) +{ + struct ata_port *ap = qc->ap; + Scsi_Cmnd *cmd = qc->scsicmd; + unsigned int tag, do_clear = 0; + + assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ + assert(qc->flags & ATA_QCFLAG_ACTIVE); + + if (likely(qc->flags & ATA_QCFLAG_SG)) + ata_sg_clean(qc); + + if (cmd) { + if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) { + if (qc->flags & ATA_QCFLAG_ATAPI) + cmd->result = SAM_STAT_CHECK_CONDITION; + else + ata_to_sense_error(qc); + } else { + cmd->result = SAM_STAT_GOOD; + } + + qc->scsidone(cmd); + } + + qc->flags &= ~ATA_QCFLAG_ACTIVE; + tag = qc->tag; + if (likely(ata_tag_valid(tag))) { + if (tag == ap->active_tag) + ap->active_tag = ATA_TAG_POISON; + qc->tag = ATA_TAG_POISON; + do_clear = 1; + } + + up(&qc->sem); + + if (likely(do_clear)) + clear_bit(tag, &ap->qactive); +} + +/** + * ata_qc_push - + * @qc: + * @append: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ +static void ata_qc_push (struct ata_queued_cmd *qc, unsigned int append) +{ + struct ata_port *ap = qc->ap; + struct ata_engine *eng = &ap->eng; + + if (likely(append)) + list_add_tail(&qc->node, &eng->q); + else + list_add(&qc->node, &eng->q); + + if (!test_and_set_bit(ATA_EFLG_ACTIVE, &eng->flags)) + ata_eng_schedule(ap, eng); +} + +/** + * ata_qc_issue - + * @qc: + * + * LOCKING: + * + * RETURNS: + * + */ +int ata_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + Scsi_Cmnd *cmd = qc->scsicmd; + unsigned int dma = qc->flags & ATA_QCFLAG_DMA; + + ata_dev_select(ap, qc->dev->devno, 1, 0); + + /* set up SG table */ + if (cmd->use_sg) { + if (ata_sg_setup(qc)) + goto err_out; + } else { + if (ata_sg_setup_one(qc)) + goto err_out; + } + + ap->ops->fill_sg(qc); + + qc->ap->active_tag = qc->tag; + qc->flags |= ATA_QCFLAG_ACTIVE; + + if (likely(dma)) { + ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ + ap->ops->bmdma_start(qc); /* initiate bmdma */ + } else + /* load tf registers, initiate polling pio */ + ata_pio_start(qc); + + return 0; + +err_out: + return -1; +} + +/** + * ata_bmdma_start_mmio - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = (qc->flags & ATA_QCFLAG_WRITE); + u8 host_stat, dmactl; + void *mmio = (void *) ap->ioaddr.bmdma_addr; + + /* load PRD table addr. */ + mb(); /* make sure PRD table writes are visible to controller */ + writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); + + /* specify data direction */ + /* FIXME: redundant to later start-dma command? */ + writeb(rw ? 0 : ATA_DMA_WR, mmio + ATA_DMA_CMD); + + /* clear interrupt, error bits */ + host_stat = readb(mmio + ATA_DMA_STATUS); + writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, mmio + ATA_DMA_STATUS); + + /* issue r/w command */ + ap->ops->exec_command(ap, &qc->tf); + + /* start host DMA transaction */ + dmactl = readb(mmio + ATA_DMA_CMD); + writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); + + /* Strictly, one may wish to issue a readb() here, to + * flush the mmio write. However, control also passes + * to the hardware at this point, and it will interrupt + * us when we are to resume control. So, in effect, + * we don't care when the mmio write flushes. + * Further, a read of the DMA status register _immediately_ + * following the write may not be what certain flaky hardware + * is expected, so I think it is best to not add a readb() + * without first all the MMIO ATA cards/mobos. + * Or maybe I'm just being paranoid. + */ +} + +/** + * ata_bmdma_start_pio - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_bmdma_start_pio (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = (qc->flags & ATA_QCFLAG_WRITE); + u8 host_stat, dmactl; + + /* load PRD table addr. */ + outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); + + /* specify data direction */ + /* FIXME: redundant to later start-dma command? */ + outb(rw ? 0 : ATA_DMA_WR, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + + /* clear interrupt, error bits */ + host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, + ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + + /* issue r/w command */ + ap->ops->exec_command(ap, &qc->tf); + + /* start host DMA transaction */ + dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + outb(dmactl | ATA_DMA_START, + ap->ioaddr.bmdma_addr + ATA_DMA_CMD); +} + +/** + * ata_dma_complete - + * @ap: + * @host_stat: + * @done_late: + * + * LOCKING: + */ + +static void ata_dma_complete(struct ata_port *ap, u8 host_stat, + unsigned int done_late) +{ + VPRINTK("ENTER\n"); + + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.bmdma_addr; + + /* clear start/stop bit */ + writeb(0, mmio + ATA_DMA_CMD); + + /* ack intr, err bits */ + writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, + mmio + ATA_DMA_STATUS); + } else { + /* clear start/stop bit */ + outb(0, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + + /* ack intr, err bits */ + outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, + ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + } + + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_altstatus(ap); /* dummy read */ + + DPRINTK("host %u, host_stat==0x%X, drv_stat==0x%X\n", + ap->id, (u32) host_stat, (u32) ata_chk_status(ap)); + + /* get drive status; clear intr; complete txn */ + ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag), + ata_wait_idle(ap), done_late); +} + +/** + * ata_host_intr - Handle host interrupt for given (port, task) + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine + * + * Handle host interrupt for given queued command. Currently, + * only DMA interrupts are handled. All other commands are + * handled via polling with interrupts disabled (nIEN bit). + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * One if interrupt was handled, zero if not (shared irq). + */ + +static inline unsigned int ata_host_intr (struct ata_port *ap, + struct ata_queued_cmd *qc) +{ + u8 status, host_stat; + unsigned int handled = 0; + + switch (qc->tf.protocol) { + case ATA_PROT_DMA_READ: + case ATA_PROT_DMA_WRITE: + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.bmdma_addr; + host_stat = readb(mmio + ATA_DMA_STATUS); + } else + host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + VPRINTK("BUS_DMA (host_stat 0x%X)\n", host_stat); + + if (!(host_stat & ATA_DMA_INTR)) { + ap->stats.idle_irq++; + break; + } + + ata_dma_complete(ap, host_stat, 0); + handled = 1; + break; + + case ATA_PROT_NODATA: /* command completion, but no data xfer */ + status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); + ata_qc_complete(qc, status, 0); + handled = 1; + break; + + default: + ap->stats.idle_irq++; + +#ifdef ATA_IRQ_TRAP + if ((ap->stats.idle_irq % 1000) == 0) { + handled = 1; + ata_irq_ack(ap, 0); /* debug trap */ + printk(KERN_WARNING "ata%d: irq trap\n", ap->id); + } +#endif + break; + } + + return handled; +} + +/** + * ata_interrupt - + * @irq: + * @dev_instance: + * @regs: + * + * LOCKING: + * + * RETURNS: + * + */ + +irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ata_host_set *host_set = dev_instance; + unsigned int i; + unsigned int handled = 0; + unsigned long flags; + + /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ + spin_lock_irqsave(&host_set->lock, flags); + + for (i = 0; i < host_set->n_ports; i++) { + struct ata_port *ap; + + ap = host_set->ports[i]; + if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc && ((qc->flags & ATA_QCFLAG_POLL) == 0)) + handled += ata_host_intr(ap, qc); + } + } + + spin_unlock_irqrestore(&host_set->lock, flags); + + return IRQ_RETVAL(handled); +} + +/** + * ata_thread_wake - + * @ap: + * @thr_state: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_thread_wake(struct ata_port *ap, unsigned int thr_state) +{ + assert(ap->thr_state == THR_IDLE); + ap->thr_state = thr_state; + up(&ap->thr_sem); +} + +/** + * ata_thread_timer - + * @opaque: + * + * LOCKING: + */ + +static void ata_thread_timer(unsigned long opaque) +{ + struct ata_port *ap = (struct ata_port *) opaque; + + up(&ap->thr_sem); +} + +/** + * ata_thread_iter - + * @ap: + * + * LOCKING: + * + * RETURNS: + * + */ + +static unsigned long ata_thread_iter(struct ata_port *ap) +{ + long timeout = 0; + + DPRINTK("ata%u: thr_state %s\n", + ap->id, ata_thr_state_name(ap->thr_state)); + + switch (ap->thr_state) { + case THR_UNKNOWN: + ap->thr_state = THR_PORT_RESET; + break; + + case THR_PROBE_START: + down(&ap->sem); + ap->thr_state = THR_PORT_RESET; + break; + + case THR_PORT_RESET: + ata_port_reset(ap); + break; + + case THR_PROBE_SUCCESS: + up(&ap->probe_sem); + ap->thr_state = THR_IDLE; + break; + + case THR_PROBE_FAILED: + up(&ap->probe_sem); + ap->thr_state = THR_AWAIT_DEATH; + break; + + case THR_AWAIT_DEATH: + timeout = -1; + break; + + case THR_IDLE: + timeout = 30 * HZ; + break; + + case THR_PIO: + ata_pio_sector(ap); + break; + + case THR_PIO_LAST: + ata_pio_complete(ap); + break; + + case THR_PIO_POLL: + case THR_PIO_LAST_POLL: + timeout = ata_pio_poll(ap); + break; + + case THR_PIO_TMOUT: + printk(KERN_ERR "ata%d: FIXME: THR_PIO_TMOUT\n", /* FIXME */ + ap->id); + timeout = 11 * HZ; + break; + + case THR_PIO_ERR: + printk(KERN_ERR "ata%d: FIXME: THR_PIO_ERR\n", /* FIXME */ + ap->id); + timeout = 11 * HZ; + break; + + case THR_PACKET: + atapi_cdb_send(ap); + break; + + default: + printk(KERN_DEBUG "ata%u: unknown thr state %s\n", + ap->id, ata_thr_state_name(ap->thr_state)); + break; + } + + DPRINTK("ata%u: new thr_state %s, returning %ld\n", + ap->id, ata_thr_state_name(ap->thr_state), timeout); + return timeout; +} + +/** + * ata_thread - + * @data: + * + * LOCKING: + * + * RETURNS: + * + */ + +static int ata_thread (void *data) +{ + struct ata_port *ap = data; + long timeout; + + daemonize ("katad-%u", ap->id); + allow_signal(SIGTERM); + + while (1) { + cond_resched(); + + timeout = ata_thread_iter(ap); + + if (signal_pending (current)) + flush_signals(current); + + if ((timeout < 0) || (ap->time_to_die)) + break; + + /* note sleeping for full timeout not guaranteed (that's ok) */ + if (timeout) { + mod_timer(&ap->thr_timer, jiffies + timeout); + down_interruptible(&ap->thr_sem); + + if (signal_pending (current)) + flush_signals(current); + + if (ap->time_to_die) + break; + } + } + + printk(KERN_DEBUG "ata%u: thread exiting\n", ap->id); + ap->thr_pid = -1; + complete_and_exit (&ap->thr_exited, 0); +} + +/** + * ata_thread_kill - kill per-port kernel thread + * @ap: port those thread is to be killed + * + * LOCKING: + * + */ + +static int ata_thread_kill(struct ata_port *ap) +{ + int ret = 0; + + if (ap->thr_pid >= 0) { + ap->time_to_die = 1; + wmb(); + ret = kill_proc(ap->thr_pid, SIGTERM, 1); + if (ret) + printk(KERN_ERR "ata%d: unable to kill kernel thread\n", + ap->id); + else + wait_for_completion(&ap->thr_exited); + } + + return ret; +} + +/** + * atapi_cdb_send - Write CDB bytes to hardware + * @ap: Port to which ATAPI device is attached. + * + * When device has indicated its readiness to accept + * a CDB, this function is called. Send the CDB. + * If DMA is to be performed, exit immediately. + * Otherwise, we are in polling mode, so poll + * status under operation succeeds or fails. + * + * LOCKING: + * Kernel thread context (may sleep) + */ + +static void atapi_cdb_send(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + u8 status; + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + assert(qc->flags & ATA_QCFLAG_ACTIVE); + + /* sleep-wait for BSY to clear */ + DPRINTK("busy wait\n"); + if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) + goto err_out; + + /* make sure DRQ is set */ + status = ata_chk_status(ap); + if ((status & ATA_DRQ) == 0) + goto err_out; + + /* send SCSI cdb */ + /* FIXME: mmio-ize */ + DPRINTK("send cdb\n"); + outsl(ap->ioaddr.data_addr, + qc->scsicmd->cmnd, ap->host->max_cmd_len / 4); + + /* if we are DMA'ing, irq handler takes over from here */ + if (qc->tf.feature == ATAPI_PKT_DMA) + goto out; + + /* sleep-wait for BSY to clear */ + DPRINTK("busy wait 2\n"); + if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) + goto err_out; + + /* wait for BSY,DRQ to clear */ + status = ata_wait_idle(ap); + if (status & (ATA_BUSY | ATA_DRQ)) + goto err_out; + + /* transaction completed, indicate such to scsi stack */ + ata_qc_complete(qc, status, 0); + ata_irq_on(ap); + +out: + ap->thr_state = THR_IDLE; + return; + +err_out: + ata_qc_complete(qc, ATA_ERR, 0); + goto out; +} + +/** + * ata_host_remove - + * @ap: + * @do_unregister: + * + * LOCKING: + */ + +static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) +{ + struct Scsi_Host *sh = ap->host; + + DPRINTK("ENTER\n"); + + if (do_unregister) + scsi_remove_host(sh); /* FIXME: check return val */ + + ata_thread_kill(ap); /* FIXME: check return val */ + + pci_free_consistent(ap->host_set->pdev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); +} + +/** + * ata_host_init - + * @host: + * @ent: + * @port_no: + * + * LOCKING: + * + */ + +static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, + struct ata_host_set *host_set, + struct ata_probe_ent *ent, unsigned int port_no) +{ + unsigned int i; + + host->max_id = 16; + host->max_lun = 1; + host->max_channel = 1; + host->unique_id = ata_unique_id++; + host->max_cmd_len = 12; + scsi_set_device(host, &ent->pdev->dev); + + ap->flags = ATA_FLAG_PORT_DISABLED; + ap->id = host->unique_id; + ap->host = host; + ap->ctl = ATA_DEVCTL_OBS; + ap->host_set = host_set; + ap->port_no = port_no; + ap->pio_mask = ent->pio_mask; + ap->udma_mask = ent->udma_mask; + ap->flags |= ent->host_flags; + ap->ops = ent->port_ops; + ap->thr_state = THR_PROBE_START; + ap->cbl = ATA_CBL_NONE; + ap->device[0].flags = ATA_DFLAG_MASTER; + ap->active_tag = ATA_TAG_POISON; + + /* ata_engine init */ + ap->eng.flags = 0; + INIT_LIST_HEAD(&ap->eng.q); + + for (i = 0; i < ATA_MAX_DEVICES; i++) + ap->device[i].devno = i; + + init_completion(&ap->thr_exited); + init_MUTEX_LOCKED(&ap->probe_sem); + init_MUTEX_LOCKED(&ap->sem); + init_MUTEX_LOCKED(&ap->thr_sem); + + init_timer(&ap->thr_timer); + ap->thr_timer.function = ata_thread_timer; + ap->thr_timer.data = (unsigned long) ap; + +#ifdef ATA_IRQ_TRAP + ap->stats.unhandled_irq = 1; + ap->stats.idle_irq = 1; +#endif + + memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports)); +} + +/** + * ata_host_add - + * @ent: + * @host_set: + * @port_no: + * + * LOCKING: + * + * RETURNS: + * + */ + +static struct ata_port * ata_host_add(struct ata_probe_ent *ent, + struct ata_host_set *host_set, + unsigned int port_no) +{ + struct pci_dev *pdev = ent->pdev; + struct Scsi_Host *host; + struct ata_port *ap; + + DPRINTK("ENTER\n"); + host = scsi_host_alloc(ent->sht, sizeof(struct ata_port)); + if (!host) + return NULL; + + ap = (struct ata_port *) &host->hostdata[0]; + + ata_host_init(ap, host, host_set, ent, port_no); + + ap->prd = pci_alloc_consistent(pdev, ATA_PRD_TBL_SZ, &ap->prd_dma); + if (!ap->prd) + goto err_out; + DPRINTK("prd alloc, virt %p, dma %x\n", ap->prd, ap->prd_dma); + + ap->thr_pid = kernel_thread(ata_thread, ap, CLONE_FS | CLONE_FILES); + if (ap->thr_pid < 0) { + printk(KERN_ERR "ata%d: unable to start kernel thread\n", + ap->id); + goto err_out_free; + } + + return ap; + +err_out_free: + pci_free_consistent(ap->host_set->pdev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); + +err_out: + scsi_host_put(host); + return NULL; +} + +/** + * ata_device_add - + * @ent: + * + * LOCKING: + * + * RETURNS: + * + */ + +int ata_device_add(struct ata_probe_ent *ent) +{ + unsigned int count = 0, i; + struct pci_dev *pdev = ent->pdev; + struct ata_host_set *host_set; + + DPRINTK("ENTER\n"); + /* alloc a container for our list of ATA ports (buses) */ + host_set = kmalloc(sizeof(struct ata_host_set) + + (ent->n_ports * sizeof(void *)), GFP_KERNEL); + if (!host_set) + return 0; + memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *))); + spin_lock_init(&host_set->lock); + + host_set->pdev = pdev; + host_set->n_ports = ent->n_ports; + host_set->irq = ent->irq; + host_set->mmio_base = ent->mmio_base; + + /* register each port bound to this device */ + for (i = 0; i < ent->n_ports; i++) { + struct ata_port *ap; + + ap = ata_host_add(ent, host_set, i); + if (!ap) + goto err_out; + + host_set->ports[i] = ap; + + /* print per-port info to dmesg */ + printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX " + "bmdma 0x%lX irq %lu\n", + ap->id, + ap->flags & ATA_FLAG_SATA ? 'S' : 'P', + ata_udma_string(ent->udma_mask), + ap->ioaddr.cmd_addr, + ap->ioaddr.ctl_addr, + ap->ioaddr.bmdma_addr, + ent->irq); + + count++; + } + + if (!count) { + kfree(host_set); + return 0; + } + + /* obtain irq, that is shared between channels */ + if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, + DRV_NAME, host_set)) + goto err_out; + + /* perform each probe synchronously */ + DPRINTK("probe begin\n"); + for (i = 0; i < count; i++) { + struct ata_port *ap; + int rc; + + ap = host_set->ports[i]; + + DPRINTK("ata%u: probe begin\n", ap->id); + up(&ap->sem); /* start probe */ + + DPRINTK("ata%u: probe-wait begin\n", ap->id); + down(&ap->probe_sem); /* wait for end */ + + DPRINTK("ata%u: probe-wait end\n", ap->id); + + rc = scsi_add_host(ap->host, &pdev->dev); + if (rc) { + printk(KERN_ERR "ata%u: scsi_add_host failed\n", + ap->id); + /* FIXME: do something useful here */ + /* FIXME: handle unconditional calls to + * scsi_scan_host and ata_host_remove, below, + * at the very least + */ + } + } + + /* probes are done, now scan each port's disk(s) */ + DPRINTK("probe begin\n"); + for (i = 0; i < count; i++) { + struct ata_port *ap = host_set->ports[i]; + + scsi_scan_host(ap->host); + } + + pci_set_drvdata(pdev, host_set); + + VPRINTK("EXIT, returning %u\n", ent->n_ports); + return ent->n_ports; /* success */ + +err_out: + for (i = 0; i < count; i++) { + ata_host_remove(host_set->ports[i], 1); + scsi_host_put(host_set->ports[i]->host); + } + kfree(host_set); + VPRINTK("EXIT, returning 0\n"); + return 0; +} + +/** + * ata_scsi_release - SCSI layer callback hook for host unload + * @host: libata host to be unloaded + * + * Performs all duties necessary to shut down a libata port: + * Kill port kthread, disable port, and release resources. + * + * LOCKING: + * Inherited from SCSI layer. + * + * RETURNS: + * One. + */ + +int ata_scsi_release(struct Scsi_Host *host) +{ + struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; + + DPRINTK("ENTER\n"); + + ap->ops->port_disable(ap); + ata_host_remove(ap, 0); + + DPRINTK("EXIT\n"); + return 1; +} + +/** + * ata_std_ports - initialize ioaddr with standard port offsets. + * @ioaddr: + */ +void ata_std_ports(struct ata_ioports *ioaddr) +{ + ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; + ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; + ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; + ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; + ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; + ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; + ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; + ioaddr->cmdstat_addr = ioaddr->cmd_addr + ATA_REG_CMD; +} + +/** + * ata_pci_init_one - + * @pdev: + * @port_info: + * @n_ports: + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * + */ + +int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, + unsigned int n_ports) +{ + struct ata_probe_ent *probe_ent, *probe_ent2 = NULL; + struct ata_port_info *port0, *port1; + u8 tmp8, mask; + unsigned int legacy_mode = 0; + int rc; + + DPRINTK("ENTER\n"); + + port0 = port_info[0]; + if (n_ports > 1) + port1 = port_info[1]; + else + port1 = port0; + + if ((port0->host_flags & ATA_FLAG_NO_LEGACY) == 0) { + /* TODO: support transitioning to native mode? */ + pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); + mask = (1 << 2) | (1 << 0); + if ((tmp8 & mask) != mask) + legacy_mode = (1 << 3); + } + + /* FIXME... */ + if ((!legacy_mode) && (n_ports > 1)) { + printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n"); + return -EINVAL; + } + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + if (legacy_mode) { + if (!request_region(0x1f0, 8, "libata")) { + struct resource *conflict, res; + res.start = 0x1f0; + res.end = 0x1f0 + 8 - 1; + conflict = ____request_resource(&ioport_resource, &res); + if (!strcmp(conflict->name, "libata")) + legacy_mode |= (1 << 0); + else + printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n"); + } else + legacy_mode |= (1 << 0); + + if (!request_region(0x170, 8, "libata")) { + struct resource *conflict, res; + res.start = 0x170; + res.end = 0x170 + 8 - 1; + conflict = ____request_resource(&ioport_resource, &res); + if (!strcmp(conflict->name, "libata")) + legacy_mode |= (1 << 1); + else + printk(KERN_WARNING "ata: 0x170 IDE port busy\n"); + } else + legacy_mode |= (1 << 1); + } + + /* we have legacy mode, but all ports are unavailable */ + if (legacy_mode == (1 << 3)) { + rc = -EBUSY; + goto err_out_regions; + } + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (!probe_ent) { + rc = -ENOMEM; + goto err_out_regions; + } + + memset(probe_ent, 0, sizeof(*probe_ent)); + probe_ent->pdev = pdev; + INIT_LIST_HEAD(&probe_ent->node); + + if (legacy_mode) { + probe_ent2 = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (!probe_ent2) { + rc = -ENOMEM; + goto err_out_free_ent; + } + + memset(probe_ent2, 0, sizeof(*probe_ent)); + probe_ent2->pdev = pdev; + INIT_LIST_HEAD(&probe_ent2->node); + } + + probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); + probe_ent->sht = port0->sht; + probe_ent->host_flags = port0->host_flags; + probe_ent->pio_mask = port0->pio_mask; + probe_ent->udma_mask = port0->udma_mask; + probe_ent->port_ops = port0->port_ops; + + if (legacy_mode) { + probe_ent->port[0].cmd_addr = 0x1f0; + probe_ent->port[0].ctl_addr = 0x3f6; + probe_ent->n_ports = 1; + probe_ent->irq = 14; + ata_std_ports(&probe_ent->port[0]); + + probe_ent2->port[0].cmd_addr = 0x170; + probe_ent2->port[0].ctl_addr = 0x376; + probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8; + probe_ent2->n_ports = 1; + probe_ent2->irq = 15; + ata_std_ports(&probe_ent2->port[0]); + + probe_ent2->sht = port1->sht; + probe_ent2->host_flags = port1->host_flags; + probe_ent2->pio_mask = port1->pio_mask; + probe_ent2->udma_mask = port1->udma_mask; + probe_ent2->port_ops = port1->port_ops; + } else { + probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); + ata_std_ports(&probe_ent->port[0]); + probe_ent->port[0].ctl_addr = + pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; + + probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); + ata_std_ports(&probe_ent->port[1]); + probe_ent->port[1].ctl_addr = + pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; + probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8; + + probe_ent->n_ports = 2; + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + } + + pci_set_master(pdev); + + /* FIXME: check ata_device_add return */ + if (legacy_mode) { + if (legacy_mode & (1 << 0)) + ata_device_add(probe_ent); + if (legacy_mode & (1 << 1)) + ata_device_add(probe_ent2); + kfree(probe_ent2); + } else { + ata_device_add(probe_ent); + assert(probe_ent2 == NULL); + } + kfree(probe_ent); + + return 0; + +err_out_free_ent: + kfree(probe_ent); +err_out_regions: + if (legacy_mode & (1 << 0)) + release_region(0x1f0, 8); + if (legacy_mode & (1 << 1)) + release_region(0x170, 8); + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + +/** + * ata_pci_remove_one - PCI layer callback for device removal + * @pdev: PCI device that was removed + * + * PCI layer indicates to libata via this hook that + * hot-unplug or module unload event has occured. + * Handle this by unregistering all objects associated + * with this PCI device. Free those objects. Then finally + * release PCI resources and disable device. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + */ + +void ata_pci_remove_one (struct pci_dev *pdev) +{ + struct ata_host_set *host_set = pci_get_drvdata(pdev); + struct ata_port *ap; + unsigned int i; + + for (i = 0; i < host_set->n_ports; i++) { + ap = host_set->ports[i]; + + /* FIXME: check return val */ + scsi_remove_host(ap->host); + } + + free_irq(host_set->irq, host_set); + if (host_set->mmio_base) + iounmap(host_set->mmio_base); + + for (i = 0; i < host_set->n_ports; i++) { + Scsi_Host_Template *sht; + + ap = host_set->ports[i]; + sht = ap->host->hostt; + + ata_scsi_release(ap->host); + scsi_host_put(ap->host); /* FIXME: check return val */ + } + + kfree(host_set); + + pci_release_regions(pdev); + + for (i = 0; i < host_set->n_ports; i++) { + struct ata_ioports *ioaddr; + + ap = host_set->ports[i]; + ioaddr = &ap->ioaddr; + + if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) { + if (ioaddr->cmd_addr == 0x1f0) + release_region(0x1f0, 8); + else if (ioaddr->cmd_addr == 0x170) + release_region(0x170, 8); + } + } + + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +/* move to PCI subsystem */ +int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits) +{ + unsigned long tmp = 0; + + switch (bits->width) { + case 1: { + u8 tmp8 = 0; + pci_read_config_byte(pdev, bits->reg, &tmp8); + tmp = tmp8; + break; + } + case 2: { + u16 tmp16 = 0; + pci_read_config_word(pdev, bits->reg, &tmp16); + tmp = tmp16; + break; + } + case 4: { + u32 tmp32 = 0; + pci_read_config_dword(pdev, bits->reg, &tmp32); + tmp = tmp32; + break; + } + + default: + return -EINVAL; + } + + tmp &= bits->mask; + + return (tmp == bits->val) ? 1 : 0; +} + +/** + * ata_init - + * + * LOCKING: + * + * RETURNS: + * + */ + +static int __init ata_init(void) +{ + printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); + return 0; +} + +module_init(ata_init); + +/* + * libata is essentially a library of internal helper functions for + * low-level ATA host controller drivers. As such, the API/ABI is + * likely to change as new drivers are added and updated. + * Do not depend on ABI/API stability. + */ + +EXPORT_SYMBOL_GPL(pci_test_config_bits); +EXPORT_SYMBOL_GPL(ata_std_ports); +EXPORT_SYMBOL_GPL(ata_device_add); +EXPORT_SYMBOL_GPL(ata_qc_complete); +EXPORT_SYMBOL_GPL(ata_eng_timeout); +EXPORT_SYMBOL_GPL(ata_tf_load_pio); +EXPORT_SYMBOL_GPL(ata_tf_load_mmio); +EXPORT_SYMBOL_GPL(ata_tf_read_pio); +EXPORT_SYMBOL_GPL(ata_tf_read_mmio); +EXPORT_SYMBOL_GPL(ata_check_status_pio); +EXPORT_SYMBOL_GPL(ata_check_status_mmio); +EXPORT_SYMBOL_GPL(ata_exec_command_pio); +EXPORT_SYMBOL_GPL(ata_exec_command_mmio); +EXPORT_SYMBOL_GPL(ata_interrupt); +EXPORT_SYMBOL_GPL(ata_fill_sg); +EXPORT_SYMBOL_GPL(ata_bmdma_start_pio); +EXPORT_SYMBOL_GPL(ata_bmdma_start_mmio); +EXPORT_SYMBOL_GPL(ata_port_probe); +EXPORT_SYMBOL_GPL(sata_phy_reset); +EXPORT_SYMBOL_GPL(pata_phy_config); +EXPORT_SYMBOL_GPL(ata_bus_reset); +EXPORT_SYMBOL_GPL(ata_port_disable); +EXPORT_SYMBOL_GPL(ata_pci_init_one); +EXPORT_SYMBOL_GPL(ata_pci_remove_one); +EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); +EXPORT_SYMBOL_GPL(ata_scsi_error); +EXPORT_SYMBOL_GPL(ata_scsi_slave_config); +EXPORT_SYMBOL_GPL(ata_scsi_release); + diff -Nru a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/scsi/libata-scsi.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,1115 @@ +/* + libata-scsi.c - helper library for ATA + + Copyright 2003 Red Hat, Inc. All rights reserved. + Copyright 2003 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#include "libata.h" + +struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap, + struct ata_device *dev, + Scsi_Cmnd *cmd, + void (*done)(Scsi_Cmnd *)) +{ + struct ata_queued_cmd *qc; + + qc = ata_qc_new_init(ap, dev); + if (qc) { + qc->scsicmd = cmd; + qc->scsidone = done; + + if (cmd->use_sg) { + qc->sg = (struct scatterlist *) cmd->request_buffer; + qc->n_elem = cmd->use_sg; + } else { + qc->sg = &qc->sgent; + qc->n_elem = 1; + } + } else { + cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); + done(cmd); + } + + return qc; +} + +/** + * ata_to_sense_error - + * @qc: + * @cmd: + * + * LOCKING: + */ + +void ata_to_sense_error(struct ata_queued_cmd *qc) +{ + Scsi_Cmnd *cmd = qc->scsicmd; + + cmd->result = SAM_STAT_CHECK_CONDITION; + + cmd->sense_buffer[0] = 0x70; + cmd->sense_buffer[2] = MEDIUM_ERROR; + cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */ + + /* additional-sense-code[-qualifier] */ + if ((qc->flags & ATA_QCFLAG_WRITE) == 0) { + cmd->sense_buffer[12] = 0x11; /* "unrecovered read error" */ + cmd->sense_buffer[13] = 0x04; + } else { + cmd->sense_buffer[12] = 0x0C; /* "write error - */ + cmd->sense_buffer[13] = 0x02; /* auto-reallocation failed" */ + } +} + +/** + * ata_scsi_slave_config - + * @sdev: + * + * LOCKING: + * + */ + +int ata_scsi_slave_config(struct scsi_device *sdev) +{ + sdev->use_10_for_rw = 1; + sdev->use_10_for_ms = 1; + + return 0; /* scsi layer doesn't check return value, sigh */ +} + +/** + * ata_scsi_error - SCSI layer error handler callback + * @host: SCSI host on which error occurred + * + * Handles SCSI-layer-thrown error events. + * + * LOCKING: + * Inherited from SCSI layer (none, can sleep) + * + * RETURNS: + * Zero. + */ + +int ata_scsi_error(struct Scsi_Host *host) +{ + struct ata_port *ap; + + DPRINTK("ENTER\n"); + + ap = (struct ata_port *) &host->hostdata[0]; + ap->ops->eng_timeout(ap); + + DPRINTK("EXIT\n"); + return 0; +} + +/** + * ata_scsi_rw_xlat - + * @qc: + * @scsicmd: + * @cmd_size: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * + */ + +static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd, + unsigned int cmd_size) +{ + struct ata_taskfile *tf = &qc->tf; + unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; + unsigned int dma = qc->flags & ATA_QCFLAG_DMA; + + qc->cursect = qc->cursg = qc->cursg_ofs = 0; + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf->hob_nsect = 0; + tf->hob_lbal = 0; + tf->hob_lbam = 0; + tf->hob_lbah = 0; + + if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 || + scsicmd[0] == READ_16) { + if (likely(dma)) { + if (lba48) + tf->command = ATA_CMD_READ_EXT; + else + tf->command = ATA_CMD_READ; + tf->protocol = ATA_PROT_DMA_READ; + } else { + if (lba48) + tf->command = ATA_CMD_PIO_READ_EXT; + else + tf->command = ATA_CMD_PIO_READ; + tf->protocol = ATA_PROT_PIO_READ; + } + qc->flags &= ~ATA_QCFLAG_WRITE; + VPRINTK("reading\n"); + } else { + if (likely(dma)) { + if (lba48) + tf->command = ATA_CMD_WRITE_EXT; + else + tf->command = ATA_CMD_WRITE; + tf->protocol = ATA_PROT_DMA_WRITE; + } else { + if (lba48) + tf->command = ATA_CMD_PIO_WRITE_EXT; + else + tf->command = ATA_CMD_PIO_WRITE; + tf->protocol = ATA_PROT_PIO_WRITE; + } + qc->flags |= ATA_QCFLAG_WRITE; + VPRINTK("writing\n"); + } + + if (cmd_size == 10) { + if (lba48) { + tf->hob_nsect = scsicmd[7]; + tf->hob_lbal = scsicmd[2]; + + qc->nsect = ((unsigned int)scsicmd[7] << 8) | + scsicmd[8]; + } else { + /* if we don't support LBA48 addressing, the request + * -may- be too large. */ + if ((scsicmd[2] & 0xf0) || scsicmd[7]) + return 1; + + /* stores LBA27:24 in lower 4 bits of device reg */ + tf->device |= scsicmd[2]; + + qc->nsect = scsicmd[8]; + } + tf->device |= ATA_LBA; + + tf->nsect = scsicmd[8]; + tf->lbal = scsicmd[5]; + tf->lbam = scsicmd[4]; + tf->lbah = scsicmd[3]; + + VPRINTK("ten-byte command\n"); + return 0; + } + + if (cmd_size == 6) { + qc->nsect = tf->nsect = scsicmd[4]; + tf->lbal = scsicmd[3]; + tf->lbam = scsicmd[2]; + tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */ + + VPRINTK("six-byte command\n"); + return 0; + } + + if (cmd_size == 16) { + /* rule out impossible LBAs and sector counts */ + if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11]) + return 1; + + if (lba48) { + tf->hob_nsect = scsicmd[12]; + tf->hob_lbal = scsicmd[6]; + tf->hob_lbam = scsicmd[5]; + tf->hob_lbah = scsicmd[4]; + + qc->nsect = ((unsigned int)scsicmd[12] << 8) | + scsicmd[13]; + } else { + /* once again, filter out impossible non-zero values */ + if (scsicmd[4] || scsicmd[5] || scsicmd[12] || + (scsicmd[6] & 0xf0)) + return 1; + + /* stores LBA27:24 in lower 4 bits of device reg */ + tf->device |= scsicmd[2]; + + qc->nsect = scsicmd[13]; + } + tf->device |= ATA_LBA; + + tf->nsect = scsicmd[13]; + tf->lbal = scsicmd[9]; + tf->lbam = scsicmd[8]; + tf->lbah = scsicmd[7]; + + VPRINTK("sixteen-byte command\n"); + return 0; + } + + DPRINTK("no-byte command\n"); + return 1; +} + +/** + * ata_scsi_rw_queue - + * @ap: + * @dev: + * @cmd: + * @done: + * @cmd_size: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_scsi_rw_queue(struct ata_port *ap, struct ata_device *dev, + Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *), + unsigned int cmd_size) +{ + struct ata_queued_cmd *qc; + u8 *scsicmd = cmd->cmnd; + + VPRINTK("ENTER\n"); + + if (unlikely(cmd->request_bufflen < 1)) { + printk(KERN_WARNING "ata%u(%u): empty request buffer\n", + ap->id, dev->devno); + goto err_out; + } + + qc = ata_scsi_qc_new(ap, dev, cmd, done); + if (!qc) + return; + + qc->flags |= ATA_QCFLAG_SG; /* data is present; dma-map it */ + + if (ata_scsi_rw_xlat(qc, scsicmd, cmd_size)) + goto err_out; + + /* select device, send command to hardware */ + if (ata_qc_issue(qc)) + goto err_out; + + VPRINTK("EXIT\n"); + return; + +err_out: + ata_bad_cdb(cmd, done); + DPRINTK("EXIT - badcmd\n"); +} + +/** + * ata_scsi_rbuf_get - Map response buffer. + * @cmd: SCSI command containing buffer to be mapped. + * @buf_out: Pointer to mapped area. + * + * Maps buffer contained within SCSI command @cmd. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * FIXME: kmap inside spin_lock_irqsave ok? + * + * RETURNS: + * Length of response buffer. + */ + +static unsigned int ata_scsi_rbuf_get(Scsi_Cmnd *cmd, u8 **buf_out) +{ + u8 *buf; + unsigned int buflen; + + if (cmd->use_sg) { + struct scatterlist *sg; + + sg = (struct scatterlist *) cmd->request_buffer; + buf = kmap(sg->page) + sg->offset; + buflen = sg->length; + } else { + buf = cmd->request_buffer; + buflen = cmd->request_bufflen; + } + + memset(buf, 0, buflen); + *buf_out = buf; + return buflen; +} + +/** + * ata_scsi_rbuf_put - Unmap response buffer. + * @cmd: SCSI command containing buffer to be unmapped. + * + * Unmaps response buffer contained within @cmd. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static inline void ata_scsi_rbuf_put(Scsi_Cmnd *cmd) +{ + if (cmd->use_sg) { + struct scatterlist *sg; + + sg = (struct scatterlist *) cmd->request_buffer; + kunmap(sg->page); + } +} + +/** + * ata_scsi_rbuf_fill - wrapper for SCSI command simulators + * @args: Port / device / SCSI command of interest. + * @actor: Callback hook for desired SCSI command simulator + * + * Takes care of the hard work of simulating a SCSI command... + * Mapping the response buffer, calling the command's handler, + * and handling the handler's return value. This return value + * indicates whether the handler wishes the SCSI command to be + * completed successfully, or not. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_scsi_rbuf_fill(struct ata_scsi_args *args, + unsigned int (*actor) (struct ata_scsi_args *args, + u8 *rbuf, unsigned int buflen)) +{ + u8 *rbuf; + unsigned int buflen, rc; + Scsi_Cmnd *cmd = args->cmd; + + buflen = ata_scsi_rbuf_get(cmd, &rbuf); + rc = actor(args, rbuf, buflen); + ata_scsi_rbuf_put(cmd); + + if (rc) + ata_bad_cdb(cmd, args->done); + else { + cmd->result = SAM_STAT_GOOD; + args->done(cmd); + } +} + +/** + * ata_scsiop_inq_std - Simulate INQUIRY command + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Returns standard device identification data associated + * with non-EVPD INQUIRY command output. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + const u8 hdr[] = { + TYPE_DISK, + 0, + 0x5, /* claim SPC-3 version compatibility */ + 2, + 96 - 4 + }; + + VPRINTK("ENTER\n"); + + memcpy(rbuf, hdr, sizeof(hdr)); + + if (buflen > 36) { + memcpy(&rbuf[8], args->dev->vendor, 8); + memcpy(&rbuf[16], args->dev->product, 16); + memcpy(&rbuf[32], DRV_VERSION, 4); + } + + if (buflen > 63) { + const u8 versions[] = { + 0x60, /* SAM-3 (no version claimed) */ + + 0x03, + 0x20, /* SBC-2 (no version claimed) */ + + 0x02, + 0x60 /* SPC-3 (no version claimed) */ + }; + + memcpy(rbuf + 59, versions, sizeof(versions)); + } + + return 0; +} + +/** + * ata_scsiop_inq_00 - Simulate INQUIRY EVPD page 0, list of pages + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Returns list of inquiry EVPD pages available. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + const u8 pages[] = { + 0x00, /* page 0x00, this page */ + 0x80, /* page 0x80, unit serial no page */ + 0x83 /* page 0x83, device ident page */ + }; + rbuf[3] = sizeof(pages); /* number of supported EVPD pages */ + + if (buflen > 6) + memcpy(rbuf + 4, pages, sizeof(pages)); + + return 0; +} + +/** + * ata_scsiop_inq_80 - Simulate INQUIRY EVPD page 80, device serial number + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Returns ATA device serial number. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + const u8 hdr[] = { + 0, + 0x80, /* this page code */ + 0, + ATA_SERNO_LEN, /* page len */ + }; + memcpy(rbuf, hdr, sizeof(hdr)); + + if (buflen > (ATA_SERNO_LEN + 4)) + ata_dev_id_string(args->dev, (unsigned char *) &rbuf[4], + ATA_ID_SERNO_OFS, ATA_SERNO_LEN); + + return 0; +} + +static const char *inq_83_str = "Linux ATA-SCSI simulator"; + +/** + * ata_scsiop_inq_83 - Simulate INQUIRY EVPD page 83, device identity + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Returns device identification. Currently hardcoded to + * return "Linux ATA-SCSI simulator". + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + rbuf[1] = 0x83; /* this page code */ + rbuf[3] = 4 + strlen(inq_83_str); /* page len */ + + /* our one and only identification descriptor (vendor-specific) */ + if (buflen > (strlen(inq_83_str) + 4 + 4)) { + rbuf[4 + 0] = 2; /* code set: ASCII */ + rbuf[4 + 3] = strlen(inq_83_str); + memcpy(rbuf + 4 + 4, inq_83_str, strlen(inq_83_str)); + } + + return 0; +} + +/** + * ata_scsiop_noop - + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * No operation. Simply returns success to caller, to indicate + * that the caller should successfully complete this SCSI command. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + VPRINTK("ENTER\n"); + return 0; +} + +/** + * ata_scsiop_sync_cache - Simulate SYNCHRONIZE CACHE command + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Initiates flush of device's cache. + * + * TODO: + * Actually do this :) + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + VPRINTK("ENTER\n"); + + /* FIXME */ + return 1; +} + +/** + * ata_msense_push - Push data onto MODE SENSE data output buffer + * @ptr_io: (input/output) Location to store more output data + * @last: End of output data buffer + * @buf: Pointer to BLOB being added to output buffer + * @buflen: Length of BLOB + * + * Store MODE SENSE data on an output buffer. + * + * LOCKING: + * None. + */ + +static void ata_msense_push(u8 **ptr_io, const u8 *last, + const u8 *buf, unsigned int buflen) +{ + u8 *ptr = *ptr_io; + + if ((ptr + buflen - 1) > last) + return; + + memcpy(ptr, buf, buflen); + + ptr += buflen; + + *ptr_io = ptr; +} + +/** + * ata_msense_caching - Simulate MODE SENSE caching info page + * @dev: + * @ptr_io: + * @last: + * + * Generate a caching info page, which conditionally indicates + * write caching to the SCSI layer, depending on device + * capabilities. + * + * LOCKING: + * None. + */ + +static unsigned int ata_msense_caching(struct ata_device *dev, u8 **ptr_io, + const u8 *last) +{ + u8 page[7] = { 0xf, 0, 0x10, 0, 0x8, 0xa, 0 }; + if (dev->flags & ATA_DFLAG_WCACHE) + page[6] = 0x4; + + ata_msense_push(ptr_io, last, page, sizeof(page)); + return sizeof(page); +} + +/** + * ata_msense_ctl_mode - Simulate MODE SENSE control mode page + * @dev: + * @ptr_io: + * @last: + * + * Generate a generic MODE SENSE control mode page. + * + * LOCKING: + * None. + */ + +static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last) +{ + const u8 page[] = {0xa, 0xa, 2, 0, 0, 0, 0, 0, 0xff, 0xff, 0, 30}; + + ata_msense_push(ptr_io, last, page, sizeof(page)); + return sizeof(page); +} + +/** + * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Simulate MODE SENSE commands. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + u8 *scsicmd = args->cmd->cmnd, *p, *last; + struct ata_device *dev = args->dev; + unsigned int page_control, six_byte, output_len; + + VPRINTK("ENTER\n"); + + six_byte = (scsicmd[0] == MODE_SENSE); + + /* we only support saved and current values (which we treat + * in the same manner) + */ + page_control = scsicmd[2] >> 6; + if ((page_control != 0) && (page_control != 3)) + return 1; + + if (six_byte) + output_len = 4; + else + output_len = 8; + + p = rbuf + output_len; + last = rbuf + buflen - 1; + + switch(scsicmd[2] & 0x3f) { + case 0x08: /* caching */ + output_len += ata_msense_caching(dev, &p, last); + break; + + case 0x0a: { /* control mode */ + output_len += ata_msense_ctl_mode(&p, last); + break; + } + + case 0x3f: /* all pages */ + output_len += ata_msense_caching(dev, &p, last); + output_len += ata_msense_ctl_mode(&p, last); + break; + + default: /* invalid page code */ + return 1; + } + + if (six_byte) { + output_len--; + rbuf[0] = output_len; + } else { + output_len -= 2; + rbuf[0] = output_len >> 8; + rbuf[1] = output_len; + } + + return 0; +} + +/** + * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Simulate READ CAPACITY commands. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + u64 n_sectors = args->dev->n_sectors; + u32 tmp; + + VPRINTK("ENTER\n"); + + n_sectors--; /* one off */ + + tmp = n_sectors; /* note: truncates, if lba48 */ + if (args->cmd->cmnd[0] == READ_CAPACITY) { + rbuf[0] = tmp >> (8 * 3); + rbuf[1] = tmp >> (8 * 2); + rbuf[2] = tmp >> (8 * 1); + rbuf[3] = tmp; + + tmp = ATA_SECT_SIZE; + rbuf[6] = tmp >> 8; + rbuf[7] = tmp; + + } else { + rbuf[2] = n_sectors >> (8 * 7); + rbuf[3] = n_sectors >> (8 * 6); + rbuf[4] = n_sectors >> (8 * 5); + rbuf[5] = n_sectors >> (8 * 4); + rbuf[6] = tmp >> (8 * 3); + rbuf[7] = tmp >> (8 * 2); + rbuf[8] = tmp >> (8 * 1); + rbuf[9] = tmp; + + tmp = ATA_SECT_SIZE; + rbuf[12] = tmp >> 8; + rbuf[13] = tmp; + } + + return 0; +} + +/** + * ata_scsiop_report_luns - Simulate REPORT LUNS command + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Simulate REPORT LUNS command. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + VPRINTK("ENTER\n"); + rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */ + + return 0; +} + +/** + * ata_scsi_badcmd - + * @cmd: + * @done: + * @asc: + * @ascq: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_scsi_badcmd(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *), u8 asc, u8 ascq) +{ + DPRINTK("ENTER\n"); + cmd->result = SAM_STAT_CHECK_CONDITION; + + cmd->sense_buffer[0] = 0x70; + cmd->sense_buffer[2] = ILLEGAL_REQUEST; + cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */ + cmd->sense_buffer[12] = asc; + cmd->sense_buffer[13] = ascq; + + done(cmd); +} + +/** + * atapi_scsi_queuecmd - Send CDB to ATAPI device + * @ap: Port to which ATAPI device is attached. + * @dev: Target device for CDB. + * @cmd: SCSI command being sent to device. + * @done: SCSI command completion function. + * + * Sends CDB to ATAPI device. If the Linux SCSI layer sends a + * non-data command, then this function handles the command + * directly, via polling. Otherwise, the bmdma engine is started. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static void atapi_scsi_queuecmd(struct ata_port *ap, struct ata_device *dev, + Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) +{ + struct ata_queued_cmd *qc; + u8 *scsicmd = cmd->cmnd, status; + unsigned int doing_dma = 0; + + VPRINTK("ENTER, drv_stat = 0x%x\n", ata_chk_status(ap)); + + if (cmd->sc_data_direction == SCSI_DATA_UNKNOWN) { + DPRINTK("unknown data, scsicmd 0x%x\n", scsicmd[0]); + ata_bad_cdb(cmd, done); + return; + } + + switch(scsicmd[0]) { + case READ_6: + case WRITE_6: + case MODE_SELECT: + case MODE_SENSE: + DPRINTK("read6/write6/modesel/modesense trap\n"); + ata_bad_scsiop(cmd, done); + return; + + default: + /* do nothing */ + break; + } + + qc = ata_scsi_qc_new(ap, dev, cmd, done); + if (!qc) { + printk(KERN_ERR "ata%u: command queue empty\n", ap->id); + return; + } + + qc->flags |= ATA_QCFLAG_ATAPI; + + qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + if (cmd->sc_data_direction == SCSI_DATA_WRITE) { + qc->flags |= ATA_QCFLAG_WRITE; + DPRINTK("direction: write\n"); + } + + qc->tf.command = ATA_CMD_PACKET; + + /* set up SG table */ + if (cmd->sc_data_direction == SCSI_DATA_NONE) { + ap->active_tag = qc->tag; + qc->flags |= ATA_QCFLAG_ACTIVE | ATA_QCFLAG_POLL; + qc->tf.protocol = ATA_PROT_ATAPI; + + ata_dev_select(ap, dev->devno, 1, 0); + + DPRINTK("direction: none\n"); + qc->tf.ctl |= ATA_NIEN; /* disable interrupts */ + ata_tf_to_host_nolock(ap, &qc->tf); + } else { + qc->flags |= ATA_QCFLAG_SG; /* data is present; dma-map it */ + qc->tf.feature = ATAPI_PKT_DMA; + qc->tf.protocol = ATA_PROT_ATAPI_DMA; + + doing_dma = 1; + + /* select device, send command to hardware */ + if (ata_qc_issue(qc)) + goto err_out; + } + + status = ata_busy_wait(ap, ATA_BUSY, 1000); + if (status & ATA_BUSY) { + ata_thread_wake(ap, THR_PACKET); + return; + } + if ((status & ATA_DRQ) == 0) + goto err_out; + + /* FIXME: mmio-ize */ + DPRINTK("writing cdb\n"); + outsl(ap->ioaddr.data_addr, scsicmd, ap->host->max_cmd_len / 4); + + if (!doing_dma) + ata_thread_wake(ap, THR_PACKET); + + VPRINTK("EXIT\n"); + return; + +err_out: + if (!doing_dma) + ata_irq_on(ap); /* re-enable interrupts */ + ata_bad_cdb(cmd, done); + DPRINTK("EXIT - badcmd\n"); +} + +/** + * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device + * @cmd: SCSI command to be sent + * @done: Completion function, called when command is complete + * + * In some cases, this function translates SCSI commands into + * ATA taskfiles, and queues the taskfiles to be sent to + * hardware. In other cases, this function simulates a + * SCSI device by evaluating and responding to certain + * SCSI commands. This creates the overall effect of + * ATA and ATAPI devices appearing as SCSI devices. + * + * LOCKING: + * Releases scsi-layer-held lock, and obtains host_set lock. + * + * RETURNS: + * Zero. + */ + +int ata_scsi_queuecmd(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) +{ + u8 *scsicmd = cmd->cmnd; + struct ata_port *ap; + struct ata_device *dev; + struct ata_scsi_args args; + const unsigned int atapi_support = +#ifdef ATA_ENABLE_ATAPI + 1; +#else + 0; +#endif + + /* Note: spin_lock_irqsave is held by caller... */ + spin_unlock(cmd->device->host->host_lock); + + ap = (struct ata_port *) &cmd->device->host->hostdata[0]; + + DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + ap->id, + cmd->device->channel, cmd->device->id, cmd->device->lun, + scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3], + scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7], + scsicmd[8]); + + /* skip commands not addressed to targets we care about */ + if ((cmd->device->channel != 0) || (cmd->device->lun != 0) || + (cmd->device->id >= ATA_MAX_DEVICES)) { + cmd->result = (DID_BAD_TARGET << 16); /* FIXME: correct? */ + done(cmd); + goto out; + } + + spin_lock(&ap->host_set->lock); + + dev = &ap->device[cmd->device->id]; + + if (!ata_dev_present(dev)) { + DPRINTK("no device\n"); + cmd->result = (DID_BAD_TARGET << 16); /* FIXME: correct? */ + done(cmd); + goto out_unlock; + } + + if (dev->class == ATA_DEV_ATAPI) { + if (atapi_support) + atapi_scsi_queuecmd(ap, dev, cmd, done); + else { + cmd->result = (DID_BAD_TARGET << 16); /* correct? */ + done(cmd); + } + goto out_unlock; + } + + /* fast path */ + switch(scsicmd[0]) { + case READ_6: + case WRITE_6: + ata_scsi_rw_queue(ap, dev, cmd, done, 6); + goto out_unlock; + + case READ_10: + case WRITE_10: + ata_scsi_rw_queue(ap, dev, cmd, done, 10); + goto out_unlock; + + case READ_16: + case WRITE_16: + ata_scsi_rw_queue(ap, dev, cmd, done, 16); + goto out_unlock; + + default: + /* do nothing */ + break; + } + + /* + * slow path + */ + + args.ap = ap; + args.dev = dev; + args.cmd = cmd; + args.done = done; + + switch(scsicmd[0]) { + case TEST_UNIT_READY: /* FIXME: correct? */ + case FORMAT_UNIT: /* FIXME: correct? */ + case SEND_DIAGNOSTIC: /* FIXME: correct? */ + ata_scsi_rbuf_fill(&args, ata_scsiop_noop); + break; + + case INQUIRY: + if (scsicmd[1] & 2) /* is CmdDt set? */ + ata_bad_cdb(cmd, done); + else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); + else if (scsicmd[2] == 0x00) + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); + else if (scsicmd[2] == 0x80) + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); + else if (scsicmd[2] == 0x83) + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); + else + ata_bad_cdb(cmd, done); + break; + + case MODE_SENSE: + case MODE_SENSE_10: + ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); + break; + + case MODE_SELECT: /* unconditionally return */ + case MODE_SELECT_10: /* bad-field-in-cdb */ + ata_bad_cdb(cmd, done); + break; + + case SYNCHRONIZE_CACHE: + if ((dev->flags & ATA_DFLAG_WCACHE) == 0) + ata_bad_scsiop(cmd, done); + else + ata_scsi_rbuf_fill(&args, ata_scsiop_sync_cache); + break; + + case READ_CAPACITY: + ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); + break; + + case SERVICE_ACTION_IN: + if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) + ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); + else + ata_bad_cdb(cmd, done); + break; + + case REPORT_LUNS: + ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); + break; + + /* mandantory commands we haven't implemented yet */ + case REQUEST_SENSE: + + /* all other commands */ + default: + ata_bad_scsiop(cmd, done); + break; + } + +out_unlock: + spin_unlock(&ap->host_set->lock); +out: + spin_lock(cmd->device->host->host_lock); + return 0; +} + diff -Nru a/drivers/scsi/libata.h b/drivers/scsi/libata.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/scsi/libata.h Wed Oct 22 10:40:10 2003 @@ -0,0 +1,94 @@ +/* + libata.h - helper library for ATA + + Copyright 2003 Red Hat, Inc. All rights reserved. + Copyright 2003 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#ifndef __LIBATA_H__ +#define __LIBATA_H__ + +#define DRV_NAME "libata" +#define DRV_VERSION "0.75" /* must be exactly four chars */ + +struct ata_scsi_args { + struct ata_port *ap; + struct ata_device *dev; + Scsi_Cmnd *cmd; + void (*done)(Scsi_Cmnd *); +}; + + +/* libata-core.c */ +extern unsigned int ata_dev_id_string(struct ata_device *dev, unsigned char *s, + unsigned int ofs, unsigned int len); +extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, + struct ata_device *dev); +extern int ata_qc_issue(struct ata_queued_cmd *qc); +extern void ata_dev_select(struct ata_port *ap, unsigned int device, + unsigned int wait, unsigned int can_sleep); +extern void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_thread_wake(struct ata_port *ap, unsigned int thr_state); + + +/* libata-scsi.c */ +extern void ata_to_sense_error(struct ata_queued_cmd *qc); +extern void ata_scsi_rw_queue(struct ata_port *ap, struct ata_device *dev, + Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *), + unsigned int cmd_size); +extern int ata_scsi_error(struct Scsi_Host *host); +extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); + +extern unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); + +extern unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern void ata_scsi_badcmd(Scsi_Cmnd *cmd, + void (*done)(Scsi_Cmnd *), + u8 asc, u8 ascq); +extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, + unsigned int (*actor) (struct ata_scsi_args *args, + u8 *rbuf, unsigned int buflen)); + +static inline void ata_bad_scsiop(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) +{ + ata_scsi_badcmd(cmd, done, 0x20, 0x00); +} + +static inline void ata_bad_cdb(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) +{ + ata_scsi_badcmd(cmd, done, 0x24, 0x00); +} + +#endif /* __LIBATA_H__ */ diff -Nru a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/scsi/sata_promise.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,766 @@ +/* + * sata_promise.c - Promise SATA + * + * Copyright 2003 Red Hat, Inc. + * + * The contents of this file are subject to the Open + * Software License version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is included herein + * by reference. + * + * Alternatively, the contents of this file may be used under the terms + * of the GNU General Public License version 2 (the "GPL") as distributed + * in the kernel source COPYING file, in which case the provisions of + * the GPL are applicable instead of the above. If you wish to allow + * the use of your version of this file only under the terms of the + * GPL and not to allow others to use your version of this file under + * the OSL, indicate your decision by deleting the provisions above and + * replace them with the notice and other provisions required by the GPL. + * If you do not delete the provisions above, a recipient may use your + * version of this file under either the OSL or the GPL. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#define DRV_NAME "sata_promise" +#define DRV_VERSION "0.83" + + +enum { + PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */ + + PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */ + PDC_TBG_MODE = 0x41, /* TBG mode */ + PDC_FLASH_CTL = 0x44, /* Flash control register */ + PDC_CTLSTAT = 0x60, /* IDE control and status register */ + PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ + PDC_SLEW_CTL = 0x470, /* slew rate control reg */ + PDC_20621_SEQCTL = 0x400, + PDC_20621_SEQMASK = 0x480, + + PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */ + + board_2037x = 0, /* FastTrak S150 TX2plus */ + board_20319 = 1, /* FastTrak S150 TX4 */ + board_20621 = 2, /* FastTrak S150 SX4 */ + + PDC_FLAG_20621 = (1 << 30), /* we have a 20621 */ +}; + + +static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg); +static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static void pdc_sata_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio); +static void pdc_sata_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma); +static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); +static void pdc_dma_start(struct ata_queued_cmd *qc); +static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs); +static void pdc_eng_timeout(struct ata_port *ap); +static void pdc_20621_phy_reset (struct ata_port *ap); + + +static Scsi_Host_Template pdc_sata_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = ATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .dma_boundary = ATA_DMA_BOUNDARY, + .slave_configure = ata_scsi_slave_config, +}; + +static struct ata_port_operations pdc_sata_ops = { + .port_disable = ata_port_disable, + .set_piomode = pdc_sata_set_piomode, + .set_udmamode = pdc_sata_set_udmamode, + .tf_load = ata_tf_load_mmio, + .tf_read = ata_tf_read_mmio, + .check_status = ata_check_status_mmio, + .exec_command = ata_exec_command_mmio, + .phy_reset = sata_phy_reset, + .phy_config = pata_phy_config, /* not a typo */ + .bmdma_start = pdc_dma_start, + .fill_sg = ata_fill_sg, + .eng_timeout = pdc_eng_timeout, + .irq_handler = pdc_interrupt, + .scr_read = pdc_sata_scr_read, + .scr_write = pdc_sata_scr_write, +}; + +static struct ata_port_operations pdc_20621_ops = { + .port_disable = ata_port_disable, + .set_piomode = pdc_sata_set_piomode, + .set_udmamode = pdc_sata_set_udmamode, + .tf_load = ata_tf_load_mmio, + .tf_read = ata_tf_read_mmio, + .check_status = ata_check_status_mmio, + .exec_command = ata_exec_command_mmio, + .phy_reset = pdc_20621_phy_reset, + .phy_config = pata_phy_config, /* not a typo */ + .bmdma_start = pdc_dma_start, + .fill_sg = ata_fill_sg, + .eng_timeout = pdc_eng_timeout, + .irq_handler = pdc_interrupt, +}; + +static struct ata_port_info pdc_port_info[] = { + /* board_2037x */ + { + .sht = &pdc_sata_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SRST | ATA_FLAG_MMIO, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .port_ops = &pdc_sata_ops, + }, + + /* board_20319 */ + { + .sht = &pdc_sata_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SRST | ATA_FLAG_MMIO, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .port_ops = &pdc_sata_ops, + }, + + /* board_20621 */ + { + .sht = &pdc_sata_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SRST | ATA_FLAG_MMIO | + PDC_FLAG_20621, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .port_ops = &pdc_20621_ops, + }, + +}; + +static struct pci_device_id pdc_sata_pci_tbl[] = { + { PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_2037x }, + { PCI_VENDOR_ID_PROMISE, 0x3375, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_2037x }, + { PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_20319 }, + { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_20319 }, +#if 0 /* broken currently */ + { PCI_VENDOR_ID_PROMISE, 0x6622, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_20621 }, +#endif + { } /* terminate list */ +}; + + +static struct pci_driver pdc_sata_pci_driver = { + .name = DRV_NAME, + .id_table = pdc_sata_pci_tbl, + .probe = pdc_sata_init_one, + .remove = ata_pci_remove_one, +}; + + +static void pdc_20621_phy_reset (struct ata_port *ap) +{ + VPRINTK("ENTER\n"); + ap->cbl = ATA_CBL_SATA; + ata_port_probe(ap); + ata_bus_reset(ap); +} + +static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) +{ + if (sc_reg > SCR_CONTROL) + return 0xffffffffU; + return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); +} + + +static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, + u32 val) +{ + if (sc_reg > SCR_CONTROL) + return; + writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); +} + +static void pdc_sata_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio) +{ + /* dummy */ +} + + +static void pdc_sata_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma) +{ + /* dummy */ +} + +enum pdc_packet_bits { + PDC_PKT_READ = (1 << 2), + PDC_PKT_NODATA = (1 << 3), + + PDC_PKT_SIZEMASK = (1 << 7) | (1 << 6) | (1 << 5), + PDC_PKT_CLEAR_BSY = (1 << 4), + PDC_PKT_WAIT_DRDY = (1 << 3) | (1 << 4), + PDC_LAST_REG = (1 << 3), + + PDC_REG_DEVCTL = (1 << 3) | (1 << 2) | (1 << 1), +}; + +static inline void pdc_pkt_header(struct ata_taskfile *tf, dma_addr_t sg_table, + unsigned int devno, u8 *buf) +{ + u8 dev_reg; + u32 *buf32 = (u32 *) buf; + + /* set control bits (byte 0), zero delay seq id (byte 3), + * and seq id (byte 2) + */ + switch (tf->protocol) { + case ATA_PROT_DMA_READ: + buf32[0] = cpu_to_le32(PDC_PKT_READ); + break; + + case ATA_PROT_DMA_WRITE: + buf32[0] = 0; + break; + + case ATA_PROT_NODATA: + buf32[0] = cpu_to_le32(PDC_PKT_NODATA); + break; + + default: + BUG(); + break; + } + + buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */ + buf32[2] = 0; /* no next-packet */ + + if (devno == 0) + dev_reg = ATA_DEVICE_OBS; + else + dev_reg = ATA_DEVICE_OBS | ATA_DEV1; + + /* select device */ + buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE; + buf[13] = dev_reg; + + /* device control register */ + buf[14] = (1 << 5) | PDC_REG_DEVCTL; + buf[15] = tf->ctl; +} + +static inline void pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf, + unsigned int i) +{ + if (tf->flags & ATA_TFLAG_DEVICE) { + buf[i++] = (1 << 5) | ATA_REG_DEVICE; + buf[i++] = tf->device; + } + + /* and finally the command itself; also includes end-of-pkt marker */ + buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD; + buf[i++] = tf->command; +} + +static void pdc_prep_lba28(struct ata_taskfile *tf, dma_addr_t sg_table, + unsigned int devno, u8 *buf) +{ + unsigned int i; + + pdc_pkt_header(tf, sg_table, devno, buf); + + /* the "(1 << 5)" should be read "(count << 5)" */ + + i = 16; + + /* ATA command block registers */ + buf[i++] = (1 << 5) | ATA_REG_FEATURE; + buf[i++] = tf->feature; + + buf[i++] = (1 << 5) | ATA_REG_NSECT; + buf[i++] = tf->nsect; + + buf[i++] = (1 << 5) | ATA_REG_LBAL; + buf[i++] = tf->lbal; + + buf[i++] = (1 << 5) | ATA_REG_LBAM; + buf[i++] = tf->lbam; + + buf[i++] = (1 << 5) | ATA_REG_LBAH; + buf[i++] = tf->lbah; + + pdc_pkt_footer(tf, buf, i); +} + +static void pdc_prep_lba48(struct ata_taskfile *tf, dma_addr_t sg_table, + unsigned int devno, u8 *buf) +{ + unsigned int i; + + pdc_pkt_header(tf, sg_table, devno, buf); + + /* the "(2 << 5)" should be read "(count << 5)" */ + + i = 16; + + /* ATA command block registers */ + buf[i++] = (2 << 5) | ATA_REG_FEATURE; + buf[i++] = tf->hob_feature; + buf[i++] = tf->feature; + + buf[i++] = (2 << 5) | ATA_REG_NSECT; + buf[i++] = tf->hob_nsect; + buf[i++] = tf->nsect; + + buf[i++] = (2 << 5) | ATA_REG_LBAL; + buf[i++] = tf->hob_lbal; + buf[i++] = tf->lbal; + + buf[i++] = (2 << 5) | ATA_REG_LBAM; + buf[i++] = tf->hob_lbam; + buf[i++] = tf->lbam; + + buf[i++] = (2 << 5) | ATA_REG_LBAH; + buf[i++] = tf->hob_lbah; + buf[i++] = tf->lbah; + + pdc_pkt_footer(tf, buf, i); +} + +static inline void __pdc_dma_complete (struct ata_port *ap, + struct ata_queued_cmd *qc) +{ + void *dmactl = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; + u32 val; + + /* clear DMA start/stop bit (bit 7) */ + val = readl(dmactl); + writel(val & ~(1 << 7), dmactl); + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_altstatus(ap); /* dummy read */ +} + +static inline void pdc_dma_complete (struct ata_port *ap, + struct ata_queued_cmd *qc) +{ + __pdc_dma_complete(ap, qc); + + /* get drive status; clear intr; complete txn */ + ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag), + ata_wait_idle(ap), 0); +} + +static void pdc_eng_timeout(struct ata_port *ap) +{ + u8 drv_stat; + struct ata_queued_cmd *qc; + + DPRINTK("ENTER\n"); + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (!qc) { + printk(KERN_ERR "ata%u: BUG: timeout without command\n", + ap->id); + goto out; + } + + switch (qc->tf.protocol) { + case ATA_PROT_DMA_READ: + case ATA_PROT_DMA_WRITE: + printk(KERN_ERR "ata%u: DMA timeout\n", ap->id); + __pdc_dma_complete(ap, qc); + ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag), + ata_wait_idle(ap) | ATA_ERR, 0); + break; + + case ATA_PROT_NODATA: + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + + printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x\n", + ap->id, qc->tf.command, drv_stat); + + ata_qc_complete(qc, drv_stat, 1); + break; + + default: + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + + printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", + ap->id, qc->tf.command, drv_stat); + + ata_qc_complete(qc, drv_stat, 1); + break; + } + +out: + DPRINTK("EXIT\n"); +} + +static inline unsigned int pdc_host_intr( struct ata_port *ap, + struct ata_queued_cmd *qc) +{ + u8 status; + unsigned int handled = 0; + + switch (qc->tf.protocol) { + case ATA_PROT_DMA_READ: + case ATA_PROT_DMA_WRITE: + pdc_dma_complete(ap, qc); + handled = 1; + break; + + case ATA_PROT_NODATA: /* command completion, but no data xfer */ + status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); + ata_qc_complete(qc, status, 0); + handled = 1; + break; + + default: + ap->stats.idle_irq++; + break; + } + + return handled; +} + +static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ata_host_set *host_set = dev_instance; + struct ata_port *ap; + u32 mask = 0; + unsigned int i, tmp; + unsigned int handled = 0, have_20621 = 0; + void *mmio_base; + + VPRINTK("ENTER\n"); + + if (!host_set || !host_set->mmio_base) { + VPRINTK("QUICK EXIT\n"); + return IRQ_NONE; + } + + mmio_base = host_set->mmio_base; + + for (i = 0; i < host_set->n_ports; i++) { + ap = host_set->ports[i]; + if (ap && (ap->flags & PDC_FLAG_20621)) { + have_20621 = 1; + break; + } + } + + /* reading should also clear interrupts */ + if (have_20621) { + mmio_base += PDC_CHIP0_OFS; + mask = readl(mmio_base + PDC_20621_SEQMASK); + } else { + mask = readl(mmio_base + PDC_INT_SEQMASK); + } + + if (mask == 0xffffffff) { + VPRINTK("QUICK EXIT 2\n"); + return IRQ_NONE; + } + mask &= 0xf; /* only 16 tags possible */ + if (!mask) { + VPRINTK("QUICK EXIT 3\n"); + return IRQ_NONE; + } + + spin_lock_irq(&host_set->lock); + + for (i = 0; i < host_set->n_ports; i++) { + VPRINTK("port %u\n", i); + ap = host_set->ports[i]; + tmp = mask & (1 << (i + 1)); + if (tmp && ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc && ((qc->flags & ATA_QCFLAG_POLL) == 0)) + handled += pdc_host_intr(ap, qc); + } + } + + spin_unlock_irq(&host_set->lock); + + VPRINTK("EXIT\n"); + + return IRQ_RETVAL(handled); +} + +static void pdc_dma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_host_set *host_set = ap->host_set; + unsigned int port_no = ap->port_no; + void *mmio = host_set->mmio_base; + void *dmactl = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; + unsigned int rw = (qc->flags & ATA_QCFLAG_WRITE); + u32 val; + u8 seq = (u8) (port_no + 1); + + wmb(); /* flush writes made to PRD table in DMA memory */ + + if (ap->flags & PDC_FLAG_20621) + mmio += PDC_CHIP0_OFS; + + VPRINTK("ENTER, ap %p, mmio %p\n", ap, mmio); + + /* indicate where our S/G table is to chip */ + writel(ap->prd_dma, (void *) ap->ioaddr.cmd_addr + PDC_PRD_TBL); + + /* clear dma start bit (paranoia), clear intr seq id (paranoia), + * set DMA direction (bit 6 == from chip -> drive) + */ + val = readl(dmactl); + VPRINTK("val == %x\n", val); + val &= ~(1 << 7); /* clear dma start/stop bit */ + if (rw) /* set/clear dma direction bit */ + val |= (1 << 6); + else + val &= ~(1 << 6); + if (qc->tf.ctl & ATA_NIEN) /* set/clear irq-mask bit */ + val |= (1 << 10); + else + val &= ~(1 << 10); + writel(val, dmactl); + val = readl(dmactl); + VPRINTK("val == %x\n", val); + + /* FIXME: clear any intr status bits here? */ + + ata_exec_command_mmio(ap, &qc->tf); + + VPRINTK("FIVE\n"); + if (ap->flags & PDC_FLAG_20621) + writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); + else + writel(0x00000001, mmio + (seq * 4)); + + /* start host DMA transaction */ + writel(val | seq | (1 << 7), dmactl); +} + +static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base) +{ + port->cmd_addr = base; + port->data_addr = base; + port->error_addr = base + 0x4; + port->nsect_addr = base + 0x8; + port->lbal_addr = base + 0xc; + port->lbam_addr = base + 0x10; + port->lbah_addr = base + 0x14; + port->device_addr = base + 0x18; + port->cmdstat_addr = base + 0x1c; + port->ctl_addr = base + 0x38; +} + +static void pdc_20621_init(struct ata_probe_ent *pe) +{ +} + +static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) +{ + void *mmio = pe->mmio_base; + u32 tmp; + + if (chip_id == board_20621) + return; + + /* change FIFO_SHD to 8 dwords. Promise driver does this... + * dunno why. + */ + tmp = readl(mmio + PDC_FLASH_CTL); + if ((tmp & (1 << 16)) == 0) + writel(tmp | (1 << 16), mmio + PDC_FLASH_CTL); + + /* clear plug/unplug flags for all ports */ + tmp = readl(mmio + PDC_SATA_PLUG_CSR); + writel(tmp | 0xff, mmio + PDC_SATA_PLUG_CSR); + + /* mask plug/unplug ints */ + tmp = readl(mmio + PDC_SATA_PLUG_CSR); + writel(tmp | 0xff0000, mmio + PDC_SATA_PLUG_CSR); + + /* reduce TBG clock to 133 Mhz. FIXME: why? */ + tmp = readl(mmio + PDC_TBG_MODE); + tmp &= ~0x30000; /* clear bit 17, 16*/ + tmp |= 0x10000; /* set bit 17:16 = 0:1 */ + writel(tmp, mmio + PDC_TBG_MODE); + + /* adjust slew rate control register. FIXME: why? */ + tmp = readl(mmio + PDC_SLEW_CTL); + tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */ + tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */ + writel(tmp, mmio + PDC_SLEW_CTL); +} + +static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_probe_ent *probe_ent = NULL; + unsigned long base; + void *mmio_base; + unsigned int board_idx = (unsigned int) ent->driver_data; + int rc; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + /* + * If this driver happens to only be useful on Apple's K2, then + * we should check that here as it has a normal Serverworks ID + */ + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (probe_ent == NULL) { + rc = -ENOMEM; + goto err_out_regions; + } + + memset(probe_ent, 0, sizeof(*probe_ent)); + probe_ent->pdev = pdev; + INIT_LIST_HEAD(&probe_ent->node); + + mmio_base = ioremap(pci_resource_start(pdev, 3), + pci_resource_len(pdev, 3)); + if (mmio_base == NULL) { + rc = -ENOMEM; + goto err_out_free_ent; + } + base = (unsigned long) mmio_base; + + probe_ent->sht = pdc_port_info[board_idx].sht; + probe_ent->host_flags = pdc_port_info[board_idx].host_flags; + probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; + probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask; + probe_ent->port_ops = pdc_port_info[board_idx].port_ops; + + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + probe_ent->mmio_base = mmio_base; + + if (board_idx == board_20621) + base += PDC_CHIP0_OFS; + + pdc_sata_setup_port(&probe_ent->port[0], base + 0x200); + probe_ent->port[0].scr_addr = base + 0x400; + + pdc_sata_setup_port(&probe_ent->port[1], base + 0x280); + probe_ent->port[1].scr_addr = base + 0x500; + + /* notice 4-port boards */ + switch (board_idx) { + case board_20319: + case board_20621: + probe_ent->n_ports = 4; + + pdc_sata_setup_port(&probe_ent->port[2], base + 0x300); + probe_ent->port[2].scr_addr = base + 0x600; + + pdc_sata_setup_port(&probe_ent->port[3], base + 0x380); + probe_ent->port[3].scr_addr = base + 0x700; + break; + case board_2037x: + probe_ent->n_ports = 2; + break; + default: + BUG(); + break; + } + + pci_set_master(pdev); + + /* initialize adapter */ + switch (board_idx) { + case board_20621: + pdc_20621_init(probe_ent); + break; + + default: + pdc_host_init(board_idx, probe_ent); + break; + } + + /* FIXME: check ata_device_add return value */ + ata_device_add(probe_ent); + kfree(probe_ent); + + return 0; + +err_out_free_ent: + kfree(probe_ent); +err_out_regions: + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + + + +static int __init pdc_sata_init(void) +{ + int rc; + + rc = pci_module_init(&pdc_sata_pci_driver); + if (rc) + return rc; + + return 0; +} + + +static void __exit pdc_sata_exit(void) +{ + pci_unregister_driver(&pdc_sata_pci_driver); +} + + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("Promise SATA low-level driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl); + +module_init(pdc_sata_init); +module_exit(pdc_sata_exit); diff -Nru a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/scsi/sata_sil.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,331 @@ +/* + * ata_sil.c - Silicon Image SATA + * + * Copyright 2003 Red Hat, Inc. + * Copyright 2003 Benjamin Herrenschmidt + * + * The contents of this file are subject to the Open + * Software License version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is included herein + * by reference. + * + * Alternatively, the contents of this file may be used under the terms + * of the GNU General Public License version 2 (the "GPL") as distributed + * in the kernel source COPYING file, in which case the provisions of + * the GPL are applicable instead of the above. If you wish to allow + * the use of your version of this file only under the terms of the + * GPL and not to allow others to use your version of this file under + * the OSL, indicate your decision by deleting the provisions above and + * replace them with the notice and other provisions required by the GPL. + * If you do not delete the provisions above, a recipient may use your + * version of this file under either the OSL or the GPL. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#define DRV_NAME "ata_sil" +#define DRV_VERSION "0.51" + +enum { + sil_3112 = 0, + + SIL_IDE0_TF = 0x80, + SIL_IDE0_CTL = 0x8A, + SIL_IDE0_BMDMA = 0x00, + SIL_IDE0_SCR = 0x100, + + SIL_IDE1_TF = 0xC0, + SIL_IDE1_CTL = 0xCA, + SIL_IDE1_BMDMA = 0x08, + SIL_IDE1_SCR = 0x180, +}; + +static void sil_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio); +static void sil_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma); +static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); +static void sil_dev_config(struct ata_port *ap, struct ata_device *dev); +static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); +static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); + +static struct pci_device_id sil_pci_tbl[] = { + { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, + { } /* terminate list */ +}; + +static struct pci_driver sil_pci_driver = { + .name = DRV_NAME, + .id_table = sil_pci_tbl, + .probe = sil_init_one, + .remove = ata_pci_remove_one, +}; + +static Scsi_Host_Template sil_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = ATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .dma_boundary = ATA_DMA_BOUNDARY, + .slave_configure = ata_scsi_slave_config, +}; + +static struct ata_port_operations sil_ops = { + .port_disable = ata_port_disable, + .dev_config = sil_dev_config, + .set_piomode = sil_set_piomode, + .set_udmamode = sil_set_udmamode, + .tf_load = ata_tf_load_mmio, + .tf_read = ata_tf_read_mmio, + .check_status = ata_check_status_mmio, + .exec_command = ata_exec_command_mmio, + .phy_reset = sata_phy_reset, + .phy_config = pata_phy_config, /* not a typo */ + .bmdma_start = ata_bmdma_start_mmio, + .fill_sg = ata_fill_sg, + .eng_timeout = ata_eng_timeout, + .irq_handler = ata_interrupt, + .scr_read = sil_scr_read, + .scr_write = sil_scr_write, +}; + +static struct ata_port_info sil_port_info[] = { + /* sil_3112 */ + { + .sht = &sil_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SRST | ATA_FLAG_MMIO, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x7f, /* udma0-6; FIXME */ + .port_ops = &sil_ops, + }, +}; + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, sil_pci_tbl); + +static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg) +{ + unsigned long offset = ap->ioaddr.scr_addr; + + switch (sc_reg) { + case SCR_STATUS: + return offset + 4; + case SCR_ERROR: + return offset + 8; + case SCR_CONTROL: + return offset; + default: + /* do nothing */ + break; + } + + return 0; +} + +static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg) +{ + void *mmio = (void *) sil_scr_addr(ap, sc_reg); + if (mmio) + return readl(mmio); + return 0xffffffffU; +} + +static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +{ + void *mmio = (void *) sil_scr_addr(ap, sc_reg); + if (mmio) + writel(val, mmio); +} + +/** + * sil_dev_config - Apply device/host-specific errata fixups + * @ap: Port containing device to be examined + * @dev: Device to be examined + * + * After the IDENTIFY [PACKET] DEVICE step is complete, and a + * device is known to be present, this function is called. + * We apply two errata fixups which are specific to Silicon Image, + * a Seagate and a Maxtor fixup. + * + * For certain Seagate devices, we must limit the maximum sectors + * to under 8K. + * + * For certain Maxtor devices, we must not program the drive + * beyond udma5. + * + * Both fixups are unfairly pessimistic. As soon as I get more + * information on these errata, I will create a more exhaustive + * list, and apply the fixups to only the specific + * devices/hosts/firmwares that need it. + */ +static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) +{ + const char *s = &dev->product[0]; + unsigned int len = strnlen(s, sizeof(dev->product)); + + /* ATAPI specifies that empty space is blank-filled; remove blanks */ + while ((len > 0) && (s[len - 1] == ' ')) + len--; + + /* limit to udma5 */ + if (!memcmp(s, "Maxtor ", 7)) { + printk(KERN_INFO "ata%u(%u): applying pessimistic Maxtor errata fix\n", + ap->id, dev->devno); + ap->udma_mask &= ATA_UDMA5; + return; + } + + /* limit requests to 15 sectors */ + if ((len > 4) && (!memcmp(s, "ST", 2))) { + if ((!memcmp(s + len - 2, "AS", 2)) || + (!memcmp(s + len - 3, "ASL", 3))) { + printk(KERN_INFO "ata%u(%u): applying pessimistic Seagate errata fix\n", + ap->id, dev->devno); + ap->host->max_sectors = 15; + ap->host->hostt->max_sectors = 15; + return; + } + } +} + +static void sil_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio) +{ + /* We need empty implementation, the core doesn't test for NULL + * function pointer + */ +} + +static void sil_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma) +{ + /* We need empty implementation, the core doesn't test for NULL + * function pointer + */ +} + +static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_probe_ent *probe_ent = NULL; + unsigned long base; + void *mmio_base; + int rc; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + /* + * If this driver happens to only be useful on Apple's K2, then + * we should check that here as it has a normal Serverworks ID + */ + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (probe_ent == NULL) { + rc = -ENOMEM; + goto err_out_regions; + } + + memset(probe_ent, 0, sizeof(*probe_ent)); + INIT_LIST_HEAD(&probe_ent->node); + probe_ent->pdev = pdev; + probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops; + probe_ent->sht = sil_port_info[ent->driver_data].sht; + probe_ent->n_ports = 2; + probe_ent->pio_mask = sil_port_info[ent->driver_data].pio_mask; + probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask; + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags; + + mmio_base = ioremap(pci_resource_start(pdev, 5), + pci_resource_len(pdev, 5)); + if (mmio_base == NULL) { + rc = -ENOMEM; + goto err_out_free_ent; + } + + probe_ent->mmio_base = mmio_base; + + base = (unsigned long) mmio_base; + probe_ent->port[0].cmd_addr = base + SIL_IDE0_TF; + probe_ent->port[0].ctl_addr = base + SIL_IDE0_CTL; + probe_ent->port[0].bmdma_addr = base + SIL_IDE0_BMDMA; + probe_ent->port[0].scr_addr = base + SIL_IDE0_SCR; + ata_std_ports(&probe_ent->port[0]); + + probe_ent->port[1].cmd_addr = base + SIL_IDE1_TF; + probe_ent->port[1].ctl_addr = base + SIL_IDE1_CTL; + probe_ent->port[1].bmdma_addr = base + SIL_IDE1_BMDMA; + probe_ent->port[1].scr_addr = base + SIL_IDE1_SCR; + ata_std_ports(&probe_ent->port[1]); + + pci_set_master(pdev); + + /* FIXME: check ata_device_add return value */ + ata_device_add(probe_ent); + kfree(probe_ent); + + return 0; + +err_out_free_ent: + kfree(probe_ent); +err_out_regions: + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + +static int __init sil_init(void) +{ + int rc; + + rc = pci_module_init(&sil_pci_driver); + if (rc) + return rc; + + return 0; +} + +static void __exit sil_exit(void) +{ + pci_unregister_driver(&sil_pci_driver); +} + + +module_init(sil_init); +module_exit(sil_exit); diff -Nru a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/scsi/sata_svw.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,389 @@ +/* + * ata_k2.c - Broadcom (Apple K2) SATA + * + * Copyright 2003 Benjamin Herrenschmidt + * + * Bits from Jeff Garzik, Copyright RedHat, Inc. + * + * This driver probably works with non-Apple versions of the + * Broadcom chipset... + * + * The contents of this file are subject to the Open + * Software License version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is included herein + * by reference. + * + * Alternatively, the contents of this file may be used under the terms + * of the GNU General Public License version 2 (the "GPL") as distributed + * in the kernel source COPYING file, in which case the provisions of + * the GPL are applicable instead of the above. If you wish to allow + * the use of your version of this file only under the terms of the + * GPL and not to allow others to use your version of this file under + * the OSL, indicate your decision by deleting the provisions above and + * replace them with the notice and other provisions required by the GPL. + * If you do not delete the provisions above, a recipient may use your + * version of this file under either the OSL or the GPL. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#ifdef CONFIG_ALL_PPC +#include +#include +#endif /* CONFIG_ALL_PPC */ + +#define DRV_NAME "ata_k2" +#define DRV_VERSION "1.02" + + +static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) +{ + if (sc_reg > SCR_CONTROL) + return 0xffffffffU; + return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); +} + + +static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, + u32 val) +{ + if (sc_reg > SCR_CONTROL) + return; + writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); +} + + +static void k2_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + writeb(tf->ctl, ioaddr->ctl_addr); + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->error_addr); + writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr); + writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr); + writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr); + writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr); + } else if (is_addr) { + writew(tf->feature, ioaddr->error_addr); + writew(tf->nsect, ioaddr->nsect_addr); + writew(tf->lbal, ioaddr->lbal_addr); + writew(tf->lbam, ioaddr->lbam_addr); + writew(tf->lbah, ioaddr->lbah_addr); + } + + if (tf->flags & ATA_TFLAG_DEVICE) + writeb(tf->device, ioaddr->device_addr); + + ata_wait_idle(ap); +} + + +static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u16 nsect, lbal, lbam, lbah; + + nsect = tf->nsect = readw(ioaddr->nsect_addr); + lbal = tf->lbal = readw(ioaddr->lbal_addr); + lbam = tf->lbam = readw(ioaddr->lbam_addr); + lbah = tf->lbah = readw(ioaddr->lbah_addr); + tf->device = readw(ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + tf->hob_feature = readw(ioaddr->error_addr) >> 8; + tf->hob_nsect = nsect >> 8; + tf->hob_lbal = lbal >> 8; + tf->hob_lbam = lbam >> 8; + tf->hob_lbah = lbah >> 8; + } +} + + +static u8 k2_stat_check_status(struct ata_port *ap) +{ + return readl((void *) ap->ioaddr.cmdstat_addr); +} + +static void k2_sata_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio) +{ + /* We need empty implementation, the core doesn't test for NULL + * function pointer + */ +} + + +static void k2_sata_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma) +{ + /* We need empty implementation, the core doesn't test for NULL + * function pointer + */ +} + + +#ifdef CONFIG_ALL_PPC +/* + * k2_sata_proc_info + * inout : decides on the direction of the dataflow and the meaning of the + * variables + * buffer: If inout==FALSE data is being written to it else read from it + * *start: If inout==FALSE start of the valid data in the buffer + * offset: If inout==FALSE offset from the beginning of the imaginary file + * from which we start writing into the buffer + * length: If inout==FALSE max number of bytes to be written into the buffer + * else number of bytes in the buffer + */ +static int k2_sata_proc_info(char *page, char **start, off_t offset, int count, + int hostno, int inout) +{ + struct Scsi_Host *hpnt; + struct ata_port *ap; + struct device_node *np; + int len, index; + + /* Find ourself. That's locking-broken, shitty etc... but thanks to + * /proc/scsi interface and lack of state kept around in this driver, + * its best I want to do for now... + */ + hpnt = scsi_hostlist; + while (hpnt) { + if (hostno == hpnt->host_no) + break; + hpnt = hpnt->next; + } + if (!hpnt) + return 0; + + /* Find the ata_port */ + ap = (struct ata_port *) &hpnt->hostdata[0]; + if (ap == NULL) + return 0; + + /* Find the OF node for the PCI device proper */ + np = pci_device_to_OF_node(ap->host_set->pdev); + if (np == NULL) + return 0; + + /* Match it to a port node */ + index = (ap == ap->host_set->ports[0]) ? 0 : 1; + for (np = np->child; np != NULL; np = np->sibling) { + u32 *reg = (u32 *)get_property(np, "reg", NULL); + if (!reg) + continue; + if (index == *reg) + break; + } + if (np == NULL) + return 0; + + len = sprintf(page, "devspec: %s\n", np->full_name); + + return len; +} +#endif /* CONFIG_ALL_PPC */ + + +static Scsi_Host_Template k2_sata_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = ATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .dma_boundary = ATA_DMA_BOUNDARY, + .slave_configure = ata_scsi_slave_config, +#ifdef CONFIG_ALL_PPC + .proc_info = k2_sata_proc_info +#endif +}; + + +static struct ata_port_operations k2_sata_ops = { + .port_disable = ata_port_disable, + .set_piomode = k2_sata_set_piomode, + .set_udmamode = k2_sata_set_udmamode, + .tf_load = k2_sata_tf_load, + .tf_read = k2_sata_tf_read, + .check_status = k2_stat_check_status, + .exec_command = ata_exec_command_mmio, + .phy_reset = sata_phy_reset, + .phy_config = pata_phy_config, /* not a typo */ + .bmdma_start = ata_bmdma_start_mmio, + .fill_sg = ata_fill_sg, + .eng_timeout = ata_eng_timeout, + .irq_handler = ata_interrupt, + .scr_read = k2_sata_scr_read, + .scr_write = k2_sata_scr_write, +}; + + +static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base) +{ + port->cmd_addr = base; + port->data_addr = base; + port->error_addr = base + 0x4; + port->nsect_addr = base + 0x8; + port->lbal_addr = base + 0xc; + port->lbam_addr = base + 0x10; + port->lbah_addr = base + 0x14; + port->device_addr = base + 0x18; + port->cmdstat_addr = base + 0x1c; + port->ctl_addr = base + 0x20; + port->bmdma_addr = base + 0x30; + port->scr_addr = base + 0x40; +} + + +static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_probe_ent *probe_ent = NULL; + unsigned long base; + void *mmio_base; + int rc; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + /* + * If this driver happens to only be useful on Apple's K2, then + * we should check that here as it has a normal Serverworks ID + */ + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (probe_ent == NULL) { + rc = -ENOMEM; + goto err_out_regions; + } + + memset(probe_ent, 0, sizeof(*probe_ent)); + probe_ent->pdev = pdev; + INIT_LIST_HEAD(&probe_ent->node); + + mmio_base = ioremap(pci_resource_start(pdev, 5), + pci_resource_len(pdev, 5)); + if (mmio_base == NULL) { + rc = -ENOMEM; + goto err_out_free_ent; + } + base = (unsigned long) mmio_base; + + /* + * Check for the "disabled" second function to avoid registering + * useless interfaces on K2 + */ + if (readl(mmio_base + 0x40) == 0xffffffffUL && + readl(mmio_base + 0x140) == 0xffffffffUL) { + rc = -ENODEV; + goto err_out_unmap; + } + probe_ent->sht = &k2_sata_sht; + probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SRST | ATA_FLAG_MMIO; + probe_ent->port_ops = &k2_sata_ops; + probe_ent->n_ports = 2; + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + probe_ent->mmio_base = mmio_base; + + /* + * We don't care much about the PIO/UDMA masks, but the core won't like us + * if we don't fill these + */ + probe_ent->pio_mask = 0x1f; + probe_ent->udma_mask = 0x7f; + + k2_sata_setup_port(&probe_ent->port[0], base); + k2_sata_setup_port(&probe_ent->port[1], base + 0x100); + + pci_set_master(pdev); + + /* FIXME: check ata_device_add return value */ + ata_device_add(probe_ent); + kfree(probe_ent); + + return 0; + +err_out_unmap: + iounmap((void *)base); +err_out_free_ent: + kfree(probe_ent); +err_out_regions: + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + + +static struct pci_device_id k2_sata_pci_tbl[] = { + { 0x1166, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { } +}; + + +static struct pci_driver k2_sata_pci_driver = { + .name = DRV_NAME, + .id_table = k2_sata_pci_tbl, + .probe = k2_sata_init_one, + .remove = ata_pci_remove_one, +}; + + +static int __init k2_sata_init(void) +{ + int rc; + + rc = pci_module_init(&k2_sata_pci_driver); + if (rc) + return rc; + + return 0; +} + + +static void __exit k2_sata_exit(void) +{ + pci_unregister_driver(&k2_sata_pci_driver); +} + + +MODULE_AUTHOR("Benjamin Herrenschmidt"); +MODULE_DESCRIPTION("low-level driver for K2 SATA controller"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl); + +module_init(k2_sata_init); +module_exit(k2_sata_exit); diff -Nru a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/scsi/sata_via.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,264 @@ +/* + sata_via.c - VIA Serial ATA controllers + + Copyright 2003 Red Hat, Inc. All rights reserved. + Copyright 2003 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#define DRV_NAME "sata_via" +#define DRV_VERSION "0.11" + +enum { + via_sata = 0, +}; + +static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); +static void svia_sata_phy_reset(struct ata_port *ap); +static void svia_port_disable(struct ata_port *ap); +static void svia_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio); +static void svia_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma); + +static unsigned int in_module_init = 1; + +static struct pci_device_id svia_pci_tbl[] = { + { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, via_sata }, + + { } /* terminate list */ +}; + +static struct pci_driver svia_pci_driver = { + .name = DRV_NAME, + .id_table = svia_pci_tbl, + .probe = svia_init_one, + .remove = ata_pci_remove_one, +}; + +static Scsi_Host_Template svia_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = ATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .dma_boundary = ATA_DMA_BOUNDARY, + .slave_configure = ata_scsi_slave_config, +}; + +static struct ata_port_operations svia_sata_ops = { + .port_disable = svia_port_disable, + .set_piomode = svia_set_piomode, + .set_udmamode = svia_set_udmamode, + + .tf_load = ata_tf_load_pio, + .tf_read = ata_tf_read_pio, + .check_status = ata_check_status_pio, + .exec_command = ata_exec_command_pio, + + .phy_reset = svia_sata_phy_reset, + .phy_config = pata_phy_config, /* not a typo */ + + .bmdma_start = ata_bmdma_start_pio, + .fill_sg = ata_fill_sg, + .eng_timeout = ata_eng_timeout, + + .irq_handler = ata_interrupt, +}; + +static struct ata_port_info svia_port_info[] = { + /* via_sata */ + { + .sht = &svia_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY + | ATA_FLAG_SRST, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .port_ops = &svia_sata_ops, + }, +}; + +static struct pci_bits svia_enable_bits[] = { + { 0x40U, 1U, 0x02UL, 0x02UL }, /* port 0 */ + { 0x40U, 1U, 0x01UL, 0x01UL }, /* port 1 */ +}; + + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, svia_pci_tbl); + +/** + * svia_sata_phy_reset - + * @ap: + * + * LOCKING: + * + */ + +static void svia_sata_phy_reset(struct ata_port *ap) +{ + if (!pci_test_config_bits(ap->host_set->pdev, + &svia_enable_bits[ap->port_no])) { + ata_port_disable(ap); + printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); + return; + } + + ata_port_probe(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + + ata_bus_reset(ap); +} + +/** + * svia_port_disable - + * @ap: + * + * LOCKING: + * + */ + +static void svia_port_disable(struct ata_port *ap) +{ + ata_port_disable(ap); + + /* FIXME */ +} + +/** + * svia_set_piomode - + * @ap: + * @adev: + * @pio: + * + * LOCKING: + * + */ + +static void svia_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio) +{ + /* FIXME: needed? */ +} + +/** + * svia_set_udmamode - + * @ap: + * @adev: + * @udma: + * + * LOCKING: + * + */ + +static void svia_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma) +{ + /* FIXME: needed? */ +} + +/** + * svia_init_one - + * @pdev: + * @ent: + * + * LOCKING: + * + * RETURNS: + * + */ + +static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_port_info *port_info[1]; + unsigned int n_ports = 1; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + /* no hotplugging support (FIXME) */ + if (!in_module_init) + return -ENODEV; + + port_info[0] = &svia_port_info[ent->driver_data]; + + return ata_pci_init_one(pdev, port_info, n_ports); +} + +/** + * svia_init - + * + * LOCKING: + * + * RETURNS: + * + */ + +static int __init svia_init(void) +{ + int rc; + + DPRINTK("pci_module_init\n"); + rc = pci_module_init(&svia_pci_driver); + if (rc) + return rc; + + in_module_init = 0; + + DPRINTK("done\n"); + return 0; +} + +/** + * svia_exit - + * + * LOCKING: + * + */ + +static void __exit svia_exit(void) +{ + pci_unregister_driver(&svia_pci_driver); +} + +module_init(svia_init); +module_exit(svia_exit); + diff -Nru a/drivers/video/Kconfig b/drivers/video/Kconfig --- a/drivers/video/Kconfig Wed Oct 22 10:40:01 2003 +++ b/drivers/video/Kconfig Wed Oct 22 10:40:01 2003 @@ -55,7 +55,7 @@ config FB_PM2 tristate "Permedia2 support" - depends on FB && (AMIGA || PCI) && BROKEN + depends on FB && (AMIGA || PCI) help This is the frame buffer device driver for the Permedia2 AGP frame buffer card from ASK, aka `Graphic Blaster Exxtreme'. There is a @@ -68,16 +68,9 @@ help Support the Permedia2 FIFOI disconnect feature (see CONFIG_FB_PM2). -config FB_PM2_PCI - bool "generic Permedia2 PCI board support" - depends on FB_PM2 && PCI - help - Say Y to enable support for Permedia2 AGP frame buffer card from - 3Dlabs (aka `Graphic Blaster Exxtreme') on the PCI bus. - config FB_PM2_CVPPC bool "Phase5 CVisionPPC/BVisionPPC support" - depends on FB_PM2 && AMIGA + depends on FB_PM2 && AMIGA && BROKEN help Say Y to enable support for the Amiga Phase 5 CVisionPPC BVisionPPC framebuffer cards. Phase 5 is no longer with us, alas. @@ -266,6 +259,10 @@ This is the frame buffer device driver for the Chips & Technologies 65550 graphics chip in PowerBooks. +config FB_ASILIANT + bool "Chips 69000 display support" + depends on FB && PCI + config FB_IMSTT bool "IMS Twin Turbo display support" depends on FB && PCI @@ -414,35 +411,15 @@ messages. Most people will want to say N here. If unsure, you will also want to say N. -config FB_E1355 +config FB_EPSON1355 bool "Epson 1355 framebuffer support" - depends on FB && SUPERH + depends on FB && (SUPERH || ARCH_CEIVA) help Build in support for the SED1355 Epson Research Embedded RAMDAC LCD/CRT Controller (since redesignated as the S1D13505) as a framebuffer. Product specs at . -config E1355_REG_BASE - hex "Register Base Address" - depends on FB_E1355 - default "a8000000" - help - Epson SED1355/S1D13505 LCD/CRT controller register base address. - See the manuals at - for - discussion. - -config E1355_FB_BASE - hex "Framebuffer Base Address" - depends on FB_E1355 - default "a8200000" - help - Epson SED1355/S1D13505 LCD/CRT controller memory base address. See - the manuals at - for - discussion. - config FB_RIVA tristate "nVidia Riva support" depends on FB && PCI @@ -685,6 +662,19 @@ framebuffer device. The ATI product support page for these boards is at . +config FB_ATY_GENERIC_LCD + bool "Mach64 generic LCD support (EXPERIMENTAL)" + depends on FB_ATY_CT + help + Say Y if you have a laptop with an ATI Rage LT PRO, Rage Mobility, + Rage XC, or Rage XL chipset. + +config FB_ATY_XL_INIT + bool "Rage XL No-BIOS Init support" + depends on FB_ATY_CT + help + Say Y here to support booting a Rage XL without BIOS support. + config FB_ATY_GX bool "Mach64 GX support" if PCI depends on FB_ATY @@ -696,33 +686,41 @@ is at . -config FB_ATY_XL_INIT - bool " Rage XL No-BIOS Init support" if FB_ATY_CT - depends on FB_ATY - help - Say Y here to support booting a Rage XL without BIOS support. - config FB_SIS - tristate "SIS acceleration" + tristate "SiS acceleration" depends on FB && PCI help - This is the frame buffer device driver for the SiS 630 and 640 Super - Socket 7 UMA cards. Specs available at . + This is the frame buffer device driver for the SiS 300, 315 and Xabre + series VGA controller. + + Specs available at . + + See for + documentation and updates. + + The driver is also available as a module ( = code which can be + inserted and removed from the running kernel whenever you want). The + module will be called sisfb. If you want to compile it as a + module, say M here and read Documentation/modules.txt. config FB_SIS_300 - bool "SIS 630/540/730 support" + bool "SIS 300 series support" depends on FB_SIS help - This is the frame buffer device driver for the SiS 630 and related - Super Socket 7 UMA cards. Specs available at - . + This is the frame buffer device driver for the SiS 300 series VGA + controllers. This includes the 300, 540, 630, 730. + Documentation and updates available at + http://www.winischhofer.net/linuxsisvga.shtml config FB_SIS_315 - bool "SIS 315H/315 support" + bool "SIS 315/Xabre support" depends on FB_SIS help - This is the frame buffer device driver for the SiS 315 graphics - card. Specs available at . + This is the frame buffer device driver for the SiS 315 and Xabre + series VGA controllers. This includes the 315, 315H, 315PRO, 650, + 651, M650, 652, M652, 740, 330 (Xabre), 660, M660, 760, M760. + Documentation and updates available at + http://www.winischhofer.net/linuxsisvga.shtml config FB_NEOMAGIC tristate "NeoMagic display support" diff -Nru a/drivers/video/Makefile b/drivers/video/Makefile --- a/drivers/video/Makefile Wed Oct 22 10:40:02 2003 +++ b/drivers/video/Makefile Wed Oct 22 10:40:02 2003 @@ -15,14 +15,14 @@ obj-$(CONFIG_FB_ACORN) += acornfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_AMIGA) += amifb.o c2p.o -obj-$(CONFIG_FB_PM2) += pm2fb.o +obj-$(CONFIG_FB_PM2) += pm2fb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_PM3) += pm3fb.o obj-$(CONFIG_FB_APOLLO) += dnfb.o cfbfillrect.o cfbimgblt.o obj-$(CONFIG_FB_Q40) += q40fb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_ATARI) += atafb.o obj-$(CONFIG_FB_68328) += 68328fb.o obj-$(CONFIG_FB_RADEON) += radeonfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o -obj-$(CONFIG_FB_NEOMAGIC) += neofb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o +obj-$(CONFIG_FB_NEOMAGIC) += neofb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o vgastate.o obj-$(CONFIG_FB_IGA) += igafb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_CONTROL) += controlfb.o macmodes.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_PLATINUM) += platinumfb.o macmodes.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o @@ -33,7 +33,7 @@ obj-$(CONFIG_FB_CYBER) += cyberfb.o obj-$(CONFIG_FB_CYBER2000) += cyber2000fb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_SGIVW) += sgivwfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o -obj-$(CONFIG_FB_3DFX) += tdfxfb.o cfbimgblt.o +obj-$(CONFIG_FB_3DFX) += tdfxfb.o obj-$(CONFIG_FB_MAC) += macfb.o macmodes.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_HP300) += hpfb.o cfbfillrect.o cfbimgblt.o obj-$(CONFIG_FB_OF) += offb.o cfbfillrect.o cfbimgblt.o cfbcopyarea.o @@ -68,9 +68,10 @@ obj-$(CONFIG_FB_SA1100) += sa1100fb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_VIRTUAL) += vfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_HIT) += hitfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o -obj-$(CONFIG_FB_E1355) += epson1355fb.o -obj-$(CONFIG_FB_PVR2) += pvr2fb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o +obj-$(CONFIG_FB_EPSON1355) += epson1355fb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o +obj-$(CONFIG_FB_PVR2) += pvr2fb.o cfbcillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_VOODOO1) += sstfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o +obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_FFB) += ffb.o sbuslib.o cfbimgblt.o cfbcopyarea.o obj-$(CONFIG_FB_CG6) += cg6.o sbuslib.o cfbimgblt.o cfbcopyarea.o diff -Nru a/drivers/video/acornfb.c b/drivers/video/acornfb.c --- a/drivers/video/acornfb.c Wed Oct 22 10:40:09 2003 +++ b/drivers/video/acornfb.c Wed Oct 22 10:40:09 2003 @@ -1287,7 +1287,6 @@ } } - fb_info.currcon = -1; fb_info.screen_base = (char *)SCREEN_BASE; fb_info.fix.smem_start = SCREEN_START; current_par.using_vram = 0; diff -Nru a/drivers/video/asiliantfb.c b/drivers/video/asiliantfb.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/video/asiliantfb.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,619 @@ +/* + * drivers/video/asiliantfb.c + * frame buffer driver for Asiliant 69000 chip + * Copyright (C) 2001-2003 Saito.K & Jeanne + * + * from driver/video/chipsfb.c and, + * + * drivers/video/asiliantfb.c -- frame buffer device for + * Asiliant 69030 chip (formerly Intel, formerly Chips & Technologies) + * Author: apc@agelectronics.co.uk + * Copyright (C) 2000 AG Electronics + * Note: the data sheets don't seem to be available from Asiliant. + * They are available by searching developer.intel.com, but are not otherwise + * linked to. + * + * This driver should be portable with minimal effort to the 69000 display + * chip, and to the twin-display mode of the 69030. + * Contains code from Thomas Hhenleitner (thanks) + * + * Derived from the CT65550 driver chipsfb.c: + * Copyright (C) 1998 Paul Mackerras + * ...which was derived from the Powermac "chips" driver: + * Copyright (C) 1997 Fabio Riccardi. + * And from the frame buffer device for Open Firmware-initialized devices: + * Copyright (C) 1997 Geert Uytterhoeven. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct fb_info asiliantfb_info; + +/* Built in clock of the 69030 */ +const unsigned Fref = 14318180; + +static u32 pseudo_palette[17]; + +#define mmio_base (p->screen_base + 0x400000) + +#define mm_write_ind(num, val, ap, dp) do { \ + writeb((num), mmio_base + (ap)); writeb((val), mmio_base + (dp)); \ +} while (0) + +static void mm_write_xr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x7ac, 0x7ad); +} +#define write_xr(num, val) mm_write_xr(p, num, val) + +static void mm_write_fr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x7a0, 0x7a1); +} +#define write_fr(num, val) mm_write_fr(p, num, val) + +static void mm_write_cr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x7a8, 0x7a9); +} +#define write_cr(num, val) mm_write_cr(p, num, val) + +static void mm_write_gr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x79c, 0x79d); +} +#define write_gr(num, val) mm_write_gr(p, num, val) + +static void mm_write_sr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x788, 0x789); +} +#define write_sr(num, val) mm_write_sr(p, num, val) + +static void mm_write_ar(struct fb_info *p, u8 reg, u8 data) +{ + readb(mmio_base + 0x7b4); + mm_write_ind(reg, data, 0x780, 0x780); +} +#define write_ar(num, val) mm_write_ar(p, num, val) + +/* + * Exported functions + */ +int asiliantfb_init(void); + +static int asiliantfb_pci_init(struct pci_dev *dp, const struct pci_device_id *); +static int asiliantfb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info); +static int asiliantfb_set_par(struct fb_info *info); +static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *info); + +static struct fb_ops asiliantfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = asiliantfb_check_var, + .fb_set_par = asiliantfb_set_par, + .fb_setcolreg = asiliantfb_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_cursor = soft_cursor, +}; + +/* Calculate the ratios for the dot clocks without using a single long long + * value */ +static void asiliant_calc_dclk2(u32 *ppixclock, u8 *dclk2_m, u8 *dclk2_n, u8 *dclk2_div) +{ + unsigned pixclock = *ppixclock; + unsigned Ftarget = 1000000 * (1000000 / pixclock); + unsigned n; + unsigned best_error = 0xffffffff; + unsigned best_m = 0xffffffff, + best_n = 0xffffffff; + unsigned ratio; + unsigned remainder; + unsigned char divisor = 0; + + /* Calculate the frequency required. This is hard enough. */ + ratio = 1000000 / pixclock; + remainder = 1000000 % pixclock; + Ftarget = 1000000 * ratio + (1000000 * remainder) / pixclock; + + while (Ftarget < 100000000) { + divisor += 0x10; + Ftarget <<= 1; + } + + ratio = Ftarget / Fref; + remainder = Ftarget % Fref; + + /* This expresses the constraint that 150kHz <= Fref/n <= 5Mhz, + * together with 3 <= n <= 257. */ + for (n = 3; n <= 257; n++) { + unsigned m = n * ratio + (n * remainder) / Fref; + + /* 3 <= m <= 257 */ + if (m >= 3 && m <= 257) { + unsigned new_error = ((Ftarget * n) - (Fref * m)) >= 0 ? + ((Ftarget * n) - (Fref * m)) : ((Fref * m) - (Ftarget * n)); + if (new_error < best_error) { + best_n = n; + best_m = m; + best_error = new_error; + } + } + /* But if VLD = 4, then 4m <= 1028 */ + else if (m <= 1028) { + /* remember there are still only 8-bits of precision in m, so + * avoid over-optimistic error calculations */ + unsigned new_error = ((Ftarget * n) - (Fref * (m & ~3))) >= 0 ? + ((Ftarget * n) - (Fref * (m & ~3))) : ((Fref * (m & ~3)) - (Ftarget * n)); + if (new_error < best_error) { + best_n = n; + best_m = m; + best_error = new_error; + } + } + } + if (best_m > 257) + best_m >>= 2; /* divide m by 4, and leave VCO loop divide at 4 */ + else + divisor |= 4; /* or set VCO loop divide to 1 */ + *dclk2_m = best_m - 2; + *dclk2_n = best_n - 2; + *dclk2_div = divisor; + *ppixclock = pixclock; + return; +} + +static void asiliant_set_timing(struct fb_info *p) +{ + unsigned hd = p->var.xres / 8; + unsigned hs = (p->var.xres + p->var.right_margin) / 8; + unsigned he = (p->var.xres + p->var.right_margin + p->var.hsync_len) / 8; + unsigned ht = (p->var.left_margin + p->var.xres + p->var.right_margin + p->var.hsync_len) / 8; + unsigned vd = p->var.yres; + unsigned vs = p->var.yres + p->var.lower_margin; + unsigned ve = p->var.yres + p->var.lower_margin + p->var.vsync_len; + unsigned vt = p->var.upper_margin + p->var.yres + p->var.lower_margin + p->var.vsync_len; + unsigned wd = (p->var.xres_virtual * ((p->var.bits_per_pixel+7)/8)) / 8; + + if ((p->var.xres == 640) && (p->var.yres == 480) && (p->var.pixclock == 39722)) { + write_fr(0x01, 0x02); /* LCD */ + } else { + write_fr(0x01, 0x01); /* CRT */ + } + + write_cr(0x11, (ve - 1) & 0x0f); + write_cr(0x00, (ht - 5) & 0xff); + write_cr(0x01, hd - 1); + write_cr(0x02, hd); + write_cr(0x03, ((ht - 1) & 0x1f) | 0x80); + write_cr(0x04, hs); + write_cr(0x05, (((ht - 1) & 0x20) <<2) | (he & 0x1f)); + write_cr(0x3c, (ht - 1) & 0xc0); + write_cr(0x06, (vt - 2) & 0xff); + write_cr(0x30, (vt - 2) >> 8); + write_cr(0x07, 0x00); + write_cr(0x08, 0x00); + write_cr(0x09, 0x00); + write_cr(0x10, (vs - 1) & 0xff); + write_cr(0x32, ((vs - 1) >> 8) & 0xf); + write_cr(0x11, ((ve - 1) & 0x0f) | 0x80); + write_cr(0x12, (vd - 1) & 0xff); + write_cr(0x31, ((vd - 1) & 0xf00) >> 8); + write_cr(0x13, wd & 0xff); + write_cr(0x41, (wd & 0xf00) >> 8); + write_cr(0x15, (vs - 1) & 0xff); + write_cr(0x33, ((vs - 1) >> 8) & 0xf); + write_cr(0x38, ((ht - 5) & 0x100) >> 8); + write_cr(0x16, (vt - 1) & 0xff); + write_cr(0x18, 0x00); + + if (p->var.xres == 640) { + writeb(0xc7, mmio_base + 0x784); /* set misc output reg */ + } else { + writeb(0x07, mmio_base + 0x784); /* set misc output reg */ + } +} + +static int asiliantfb_check_var(struct fb_var_screeninfo *var, + struct fb_info *p) +{ + unsigned long Ftarget, ratio, remainder; + + ratio = 1000000 / var->pixclock; + remainder = 1000000 % var->pixclock; + Ftarget = 1000000 * ratio + (1000000 * remainder) / var->pixclock; + + /* First check the constraint that the maximum post-VCO divisor is 32, + * and the maximum Fvco is 220MHz */ + if (Ftarget > 220000000 || Ftarget < 3125000) { + printk(KERN_ERR "asiliantfb dotclock must be between 3.125 and 220MHz\n"); + return -ENXIO; + } + var->xres_virtual = var->xres; + var->yres_virtual = var->yres; + + if (var->bits_per_pixel == 24) { + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = var->blue.length = var->green.length = 8; + } else if (var->bits_per_pixel == 16) { + switch (var->red.offset) { + case 11: + var->green.length = 6; + break; + case 10: + var->green.length = 5; + break; + default: + return -EINVAL; + } + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = var->blue.length = 5; + } else if (var->bits_per_pixel == 8) { + var->red.offset = var->green.offset = var->blue.offset = 0; + var->red.length = var->green.length = var->blue.length = 8; + } + return 0; +} + +static int asiliantfb_set_par(struct fb_info *p) +{ + u8 dclk2_m; /* Holds m-2 value for register */ + u8 dclk2_n; /* Holds n-2 value for register */ + u8 dclk2_div; /* Holds divisor bitmask */ + + /* Set pixclock */ + asiliant_calc_dclk2(&p->var.pixclock, &dclk2_m, &dclk2_n, &dclk2_div); + + /* Set color depth */ + if (p->var.bits_per_pixel == 24) { + write_xr(0x81, 0x16); /* 24 bit packed color mode */ + write_xr(0x82, 0x00); /* Disable palettes */ + write_xr(0x20, 0x20); /* 24 bit blitter mode */ + } else if (p->var.bits_per_pixel == 16) { + if (p->var.red.offset == 11) + write_xr(0x81, 0x15); /* 16 bit color mode */ + else + write_xr(0x81, 0x14); /* 15 bit color mode */ + write_xr(0x82, 0x00); /* Disable palettes */ + write_xr(0x20, 0x10); /* 16 bit blitter mode */ + } else if (p->var.bits_per_pixel == 8) { + write_xr(0x0a, 0x02); /* Linear */ + write_xr(0x81, 0x12); /* 8 bit color mode */ + write_xr(0x82, 0x00); /* Graphics gamma enable */ + write_xr(0x20, 0x00); /* 8 bit blitter mode */ + } + p->fix.line_length = p->var.xres * (p->var.bits_per_pixel >> 3); + p->fix.visual = (p->var.bits_per_pixel == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; + write_xr(0xc4, dclk2_m); + write_xr(0xc5, dclk2_n); + write_xr(0xc7, dclk2_div); + /* Set up the CR registers */ + asiliant_set_timing(p); + return 0; +} + +static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *p) +{ + if (regno > 255) + return 1; + red >>= 8; + green >>= 8; + blue >>= 8; + + /* Set hardware palete */ + writeb(regno, mmio_base + 0x790); + udelay(1); + writeb(red, mmio_base + 0x791); + writeb(green, mmio_base + 0x791); + writeb(blue, mmio_base + 0x791); + + switch(p->var.bits_per_pixel) { + case 15: + if (regno < 16) { + ((u32 *)(p->pseudo_palette))[regno] = + ((red & 0xf8) << 7) | + ((green & 0xf8) << 2) | + ((blue & 0xf8) >> 3); + } + break; + case 16: + if (regno < 16) { + ((u32 *)(p->pseudo_palette))[regno] = + ((red & 0xf8) << 8) | + ((green & 0xfc) << 3) | + ((blue & 0xf8) >> 3); + } + break; + case 24: + if (regno < 24) { + ((u32 *)(p->pseudo_palette))[regno] = + (red << 16) | + (green << 8) | + (blue); + } + break; + } + return 0; +} + +struct chips_init_reg { + unsigned char addr; + unsigned char data; +}; + +#define N_ELTS(x) (sizeof(x) / sizeof(x[0])) + +static struct chips_init_reg chips_init_sr[] = +{ + {0x00, 0x03}, /* Reset register */ + {0x01, 0x01}, /* Clocking mode */ + {0x02, 0x0f}, /* Plane mask */ + {0x04, 0x0e} /* Memory mode */ +}; + +static struct chips_init_reg chips_init_gr[] = +{ + {0x03, 0x00}, /* Data rotate */ + {0x05, 0x00}, /* Graphics mode */ + {0x06, 0x01}, /* Miscellaneous */ + {0x08, 0x00} /* Bit mask */ +}; + +static struct chips_init_reg chips_init_ar[] = +{ + {0x10, 0x01}, /* Mode control */ + {0x11, 0x00}, /* Overscan */ + {0x12, 0x0f}, /* Memory plane enable */ + {0x13, 0x00} /* Horizontal pixel panning */ +}; + +static struct chips_init_reg chips_init_cr[] = +{ + {0x0c, 0x00}, /* Start address high */ + {0x0d, 0x00}, /* Start address low */ + {0x40, 0x00}, /* Extended Start Address */ + {0x41, 0x00}, /* Extended Start Address */ + {0x14, 0x00}, /* Underline location */ + {0x17, 0xe3}, /* CRT mode control */ + {0x70, 0x00} /* Interlace control */ +}; + + +static struct chips_init_reg chips_init_fr[] = +{ + {0x01, 0x02}, + {0x03, 0x08}, + {0x08, 0xcc}, + {0x0a, 0x08}, + {0x18, 0x00}, + {0x1e, 0x80}, + {0x40, 0x83}, + {0x41, 0x00}, + {0x48, 0x13}, + {0x4d, 0x60}, + {0x4e, 0x0f}, + + {0x0b, 0x01}, + + {0x21, 0x51}, + {0x22, 0x1d}, + {0x23, 0x5f}, + {0x20, 0x4f}, + {0x34, 0x00}, + {0x24, 0x51}, + {0x25, 0x00}, + {0x27, 0x0b}, + {0x26, 0x00}, + {0x37, 0x80}, + {0x33, 0x0b}, + {0x35, 0x11}, + {0x36, 0x02}, + {0x31, 0xea}, + {0x32, 0x0c}, + {0x30, 0xdf}, + {0x10, 0x0c}, + {0x11, 0xe0}, + {0x12, 0x50}, + {0x13, 0x00}, + {0x16, 0x03}, + {0x17, 0xbd}, + {0x1a, 0x00}, +}; + + +static struct chips_init_reg chips_init_xr[] = +{ + {0xce, 0x00}, /* set default memory clock */ + {0xcc, 200 }, /* MCLK ratio M */ + {0xcd, 18 }, /* MCLK ratio N */ + {0xce, 0x90}, /* MCLK divisor = 2 */ + + {0xc4, 209 }, + {0xc5, 118 }, + {0xc7, 32 }, + {0xcf, 0x06}, + {0x09, 0x01}, /* IO Control - CRT controller extensions */ + {0x0a, 0x02}, /* Frame buffer mapping */ + {0x0b, 0x01}, /* PCI burst write */ + {0x40, 0x03}, /* Memory access control */ + {0x80, 0x82}, /* Pixel pipeline configuration 0 */ + {0x81, 0x12}, /* Pixel pipeline configuration 1 */ + {0x82, 0x08}, /* Pixel pipeline configuration 2 */ + + {0xd0, 0x0f}, + {0xd1, 0x01}, +}; + +static void __init chips_hw_init(struct fb_info *p) +{ + int i; + + for (i = 0; i < N_ELTS(chips_init_xr); ++i) + write_xr(chips_init_xr[i].addr, chips_init_xr[i].data); + write_xr(0x81, 0x12); + write_xr(0x82, 0x08); + write_xr(0x20, 0x00); + for (i = 0; i < N_ELTS(chips_init_sr); ++i) + write_sr(chips_init_sr[i].addr, chips_init_sr[i].data); + for (i = 0; i < N_ELTS(chips_init_gr); ++i) + write_gr(chips_init_gr[i].addr, chips_init_gr[i].data); + for (i = 0; i < N_ELTS(chips_init_ar); ++i) + write_ar(chips_init_ar[i].addr, chips_init_ar[i].data); + /* Enable video output in attribute index register */ + writeb(0x20, mmio_base + 0x780); + for (i = 0; i < N_ELTS(chips_init_cr); ++i) + write_cr(chips_init_cr[i].addr, chips_init_cr[i].data); + for (i = 0; i < N_ELTS(chips_init_fr); ++i) + write_fr(chips_init_fr[i].addr, chips_init_fr[i].data); +} + +static struct fb_fix_screeninfo asiliantfb_fix __initdata = { + .id = "Asiliant 69000", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_PSEUDOCOLOR, + .accel = FB_ACCEL_NONE, + .line_length = 640, + .smem_len = 0x200000, /* 2MB */ +}; + +static struct fb_var_screeninfo asiliantfb_var __initdata = { + .xres = 640, + .yres = 480, + .xres_virtual = 640, + .yres_virtual = 480, + .bits_per_pixel = 8, + .red = { .length = 8 }, + .green = { .length = 8 }, + .blue = { .length = 8 }, + .height = -1, + .width = -1, + .vmode = FB_VMODE_NONINTERLACED, + .pixclock = 39722, + .left_margin = 48, + .right_margin = 16, + .upper_margin = 33, + .lower_margin = 10, + .hsync_len = 96, + .vsync_len = 2, +}; + +static void __init init_asiliant(struct fb_info *p, unsigned long addr) +{ + p->fix = asiliantfb_fix; + p->fix.smem_start = addr; + p->var = asiliantfb_var; + p->fbops = &asiliantfb_ops; + p->pseudo_palette = pseudo_palette; + p->flags = FBINFO_FLAG_DEFAULT; + + fb_alloc_cmap(&p->cmap, 256, 0); + + if (register_framebuffer(p) < 0) { + printk(KERN_ERR "C&T 69000 framebuffer failed to register\n"); + return; + } + + printk(KERN_INFO "fb%d: Asiliant 69000 frame buffer (%dK RAM detected)\n", + p->node, p->fix.smem_len / 1024); + + writeb(0xff, mmio_base + 0x78c); + chips_hw_init(p); +} + +static int __devinit +asiliantfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) +{ + struct fb_info *p = &asiliantfb_info; + unsigned long addr, size; + + if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) + return -ENODEV; + addr = pci_resource_start(dp, 0); + size = pci_resource_len(dp, 0); + if (addr == 0) + return -ENODEV; + if (p->screen_base != 0) + return -EBUSY; + if (!request_mem_region(addr, size, "asiliantfb")) + return -EBUSY; + + p->screen_base = ioremap(addr, 0x800000); + if (p->screen_base == NULL) { + release_mem_region(addr, size); + return -ENOMEM; + } + + pci_write_config_dword(dp, 4, 0x02800083); + writeb(3, addr + 0x400784); + + init_asiliant(p, addr); + + /* Clear the entire framebuffer */ + memset(p->screen_base, 0, 0x200000); + + pci_set_drvdata(dp, p); + return 0; +} + +static void __devexit asiliantfb_remove(struct pci_dev *dp) +{ + struct fb_info *p = pci_get_drvdata(dp); + + if (p != &asiliantfb_info || p->screen_base == NULL) + return; + unregister_framebuffer(p); + iounmap(p->screen_base); + p->screen_base = NULL; + release_mem_region(pci_resource_start(dp, 0), pci_resource_len(dp, 0)); +} + +static struct pci_device_id asiliantfb_pci_tbl[] __devinitdata = { + { PCI_VENDOR_ID_CT, PCI_DEVICE_ID_CT_69000, PCI_ANY_ID, PCI_ANY_ID }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, asiliantfb_pci_tbl); + +static struct pci_driver asiliantfb_driver = { + .name = "asiliantfb", + .id_table = asiliantfb_pci_tbl, + .probe = asiliantfb_pci_init, + .remove = __devexit_p(asiliantfb_remove), +}; + +int __init asiliantfb_init(void) +{ + return pci_module_init(&asiliantfb_driver); +} + +static void __exit asiliantfb_exit(void) +{ + pci_unregister_driver(&asiliantfb_driver); +} + +MODULE_LICENSE("GPL"); diff -Nru a/drivers/video/aty/Makefile b/drivers/video/aty/Makefile --- a/drivers/video/aty/Makefile Wed Oct 22 10:40:04 2003 +++ b/drivers/video/aty/Makefile Wed Oct 22 10:40:04 2003 @@ -4,4 +4,3 @@ atyfb-y := atyfb_base.o mach64_accel.o atyfb-$(CONFIG_FB_ATY_GX) += mach64_gx.o atyfb-$(CONFIG_FB_ATY_CT) += mach64_ct.o mach64_cursor.o -atyfb-objs := $(atyfb-y) diff -Nru a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c --- a/drivers/video/aty/aty128fb.c Wed Oct 22 10:40:05 2003 +++ b/drivers/video/aty/aty128fb.c Wed Oct 22 10:40:05 2003 @@ -24,6 +24,10 @@ * Paul Mundt * - PCI hotplug * + * Jon Smirl + * - PCI ID update + * - replace ROM BIOS search + * * Based off of Geert's atyfb.c and vfb.c. * * TODO: @@ -43,6 +47,7 @@ #include #include +#include #include #include #include @@ -136,8 +141,25 @@ /* Chip generations */ enum { rage_128, + rage_128_pci, rage_128_pro, - rage_M3 + rage_128_pro_pci, + rage_M3, + rage_M3_pci, + rage_M4, + rage_128_ultra, +}; + +/* Must match above enum */ +static const char *r128_family[] __devinitdata = { + "AGP", + "PCI", + "PRO AGP", + "PRO PCI", + "M3 AGP", + "M3 PCI", + "M4 AGP", + "Ultra AGP", }; /* @@ -149,32 +171,100 @@ /* supported Rage128 chipsets */ static struct pci_device_id aty128_pci_tbl[] = { - { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RE, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, - { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RF, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, - { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RI, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, - { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RK, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, - { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RL, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, - { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_Rage128_PD, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LE, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M3_pci }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LF, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M3 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_MF, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M4 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_ML, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M4 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PA, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PC, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PD, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro_pci }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, - { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PR, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PG, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PH, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PI, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PJ, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PK, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PL, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PN, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PP, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro_pci }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PQ, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PR, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro_pci }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PS, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, - { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_U3, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, - { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_U1, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PU, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, - { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LE, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M3 }, - { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LF, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M3 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PV, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PW, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PX, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RE, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pci }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RF, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RG, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RK, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pci }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RL, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SE, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SF, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pci }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SG, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SH, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SK, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SL, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SN, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TF, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TL, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TR, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TS, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TT, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, + { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TU, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, { 0, } }; @@ -250,14 +340,7 @@ .accel = FB_ACCEL_ATI_RAGE128, }; -#ifdef MODULE static char *mode __initdata = NULL; -#ifdef CONFIG_MTRR -static int nomtrr __initdata = 0; -#endif /* CONFIG_MTRR */ -#endif /* MODULE */ - -static char *mode_option __initdata = NULL; #ifdef CONFIG_PPC_PMAC static int default_vmode __initdata = VMODE_1024_768_60; @@ -349,7 +432,6 @@ * Interface used by the world */ int aty128fb_init(void); -int aty128fb_setup(char *options); static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); @@ -374,7 +456,7 @@ #if !defined(CONFIG_PPC) && !defined(__sparc__) static void __init aty128_get_pllinfo(struct aty128fb_par *par, void *bios); -static void __init *aty128_map_ROM(struct pci_dev *pdev); +static void __init *aty128_map_ROM(struct pci_dev *pdev, const struct aty128fb_par *par); static void __init aty128_unmap_ROM(struct pci_dev *dev, void * rom); #endif static void aty128_timings(struct aty128fb_par *par); @@ -1410,61 +1492,6 @@ return 0; } -int __init -aty128fb_setup(char *options) -{ - char *this_opt; - - if (!options || !*options) - return 0; - - while ((this_opt = strsep(&options, ",")) != NULL) { -#ifdef CONFIG_PMAC_PBOOK - if (!strncmp(this_opt, "lcd:", 4)) { - default_lcd_on = simple_strtoul(this_opt+4, NULL, 0); - continue; - } else if (!strncmp(this_opt, "crt:", 4)) { - default_crt_on = simple_strtoul(this_opt+4, NULL, 0); - continue; - } -#endif -#ifdef CONFIG_MTRR - if(!strncmp(this_opt, "nomtrr", 6)) { - mtrr = 0; - continue; - } -#endif -#ifdef CONFIG_PPC_PMAC - /* vmode and cmode deprecated */ - if (!strncmp(this_opt, "vmode:", 6)) { - unsigned int vmode = simple_strtoul(this_opt+6, NULL, 0); - if (vmode > 0 && vmode <= VMODE_MAX) - default_vmode = vmode; - continue; - } else if (!strncmp(this_opt, "cmode:", 6)) { - unsigned int cmode = simple_strtoul(this_opt+6, NULL, 0); - switch (cmode) { - case 0: - case 8: - default_cmode = CMODE_8; - break; - case 15: - case 16: - default_cmode = CMODE_16; - break; - case 24: - case 32: - default_cmode = CMODE_32; - break; - } - continue; - } -#endif /* CONFIG_PPC_PMAC */ - mode_option = this_opt; - } - return 0; -} - /* * Initialisation @@ -1476,7 +1503,7 @@ struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par = info->par; struct fb_var_screeninfo var; - char video_card[25]; + char video_card[DEVICE_NAME_SIZE]; u8 chip_rev; u32 dac; @@ -1486,43 +1513,13 @@ /* Get the chip revision */ chip_rev = (aty_ld_le32(CONFIG_CNTL) >> 16) & 0x1F; - switch (pdev->device) { - case PCI_DEVICE_ID_ATI_RAGE128_RE: - strcpy(video_card, "Rage128 RE (PCI)"); - break; - case PCI_DEVICE_ID_ATI_RAGE128_RF: - strcpy(video_card, "Rage128 RF (AGP)"); - break; - case PCI_DEVICE_ID_ATI_RAGE128_RK: - strcpy(video_card, "Rage128 RK (PCI)"); - break; - case PCI_DEVICE_ID_ATI_RAGE128_RL: - strcpy(video_card, "Rage128 RL (AGP)"); - break; - case PCI_DEVICE_ID_ATI_Rage128_PD: - strcpy(video_card, "Rage128 Pro PD (PCI)"); - break; - case PCI_DEVICE_ID_ATI_RAGE128_PF: - strcpy(video_card, "Rage128 Pro PF (AGP)"); - break; - case PCI_DEVICE_ID_ATI_RAGE128_PR: - strcpy(video_card, "Rage128 Pro PR (PCI)"); - break; - case PCI_DEVICE_ID_ATI_RAGE128_U3: - strcpy(video_card, "Rage128 Pro TR (AGP)"); - break; - case PCI_DEVICE_ID_ATI_RAGE128_U1: - strcpy(video_card, "Rage128 Pro TF (AGP)"); - break; - case PCI_DEVICE_ID_ATI_RAGE128_LE: - strcpy(video_card, "Rage Mobility M3 (PCI)"); - break; - case PCI_DEVICE_ID_ATI_RAGE128_LF: - strcpy(video_card, "Rage Mobility M3 (AGP)"); - break; - default: - return -ENODEV; - } + strcpy(video_card, "Rage128 XX "); + video_card[8] = ent->device >> 8; + video_card[9] = ent->device & 0xFF; + + /* range check to make sure */ + if (ent->driver_data < (sizeof(r128_family)/sizeof(char *))) + strncat(video_card, r128_family[ent->driver_data], sizeof(video_card)); printk(KERN_INFO "aty128fb: %s [chip rev 0x%x] ", video_card, chip_rev); @@ -1536,17 +1533,17 @@ /* fill in info */ info->fbops = &aty128fb_ops; info->flags = FBINFO_FLAG_DEFAULT; - + #ifdef CONFIG_PMAC_PBOOK par->lcd_on = default_lcd_on; par->crt_on = default_crt_on; #endif - + var = default_var; #ifdef CONFIG_PPC_PMAC if (_machine == _MACH_Pmac) { - if (mode_option) { - if (!mac_find_mode(&var, info, mode_option, 8)) + if (mode) { + if (!mac_find_mode(&var, info, mode, 8)) var = default_var; } else { if (default_vmode <= 0 || default_vmode > VMODE_MAX) @@ -1575,8 +1572,12 @@ if (machine_is_compatible("PowerBook3,2")) default_vmode = VMODE_1152_768_60; - if (default_cmode < CMODE_8 || default_cmode > CMODE_32) - default_cmode = CMODE_8; + if (default_cmode > 16) + default_cmode = CMODE_32; + else if (default_cmode > 8) + default_cmode = CMODE_16; + else + default_cmode = CMODE_8; if (mac_vmode_to_var(default_vmode, default_cmode, &var)) var = default_var; @@ -1584,9 +1585,10 @@ } else #endif /* CONFIG_PPC_PMAC */ { - if (fb_find_mode(&var, info, mode_option, NULL, 0, - &defaultmode, 8) == 0) - var = default_var; + if (mode) + if (fb_find_mode(&var, info, mode, NULL, + 0, &defaultmode, 8) == 0) + var = default_var; } var.accel_flags &= ~FB_ACCELF_TEXT; @@ -1614,7 +1616,7 @@ var.activate = FB_ACTIVATE_NOW; aty128_init_engine(par); - + if (register_framebuffer(info) < 0) return 0; @@ -1647,7 +1649,7 @@ unsigned long fb_addr, reg_addr; struct aty128fb_par *par; struct fb_info *info; - int err, size; + int err; #if !defined(CONFIG_PPC) && !defined(__sparc__) void *bios = NULL; #endif @@ -1675,17 +1677,13 @@ } /* We have the resources. Now virtualize them */ - size = sizeof(struct fb_info) + sizeof(struct aty128fb_par); - if (!(info = kmalloc(size, GFP_ATOMIC))) { + if (!(info = framebuffer_alloc(sizeof(struct aty128fb_par), &pdev->dev))) { printk(KERN_ERR "aty128fb: can't alloc fb_info_aty128\n"); goto err_free_mmio; } - memset(info, 0, size); + par = info->par; - par = (struct aty128fb_par *)(info + 1); info->pseudo_palette = par->pseudo_palette; - - info->par = par; info->fix = aty128fb_fix; /* Virtualize mmio region */ @@ -1716,7 +1714,7 @@ } #if !defined(CONFIG_PPC) && !defined(__sparc__) - if (!(bios = aty128_map_ROM(pdev))) + if (!(bios = aty128_map_ROM(pdev, par))) printk(KERN_INFO "aty128fb: BIOS not located, guessing timings.\n"); else { printk(KERN_INFO "aty128fb: Rage128 BIOS located at %lx\n", @@ -1776,47 +1774,61 @@ release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); - release_mem_region(pci_resource_start(pdev, 1), - pci_resource_len(pdev, 1)); release_mem_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); #ifdef CONFIG_PMAC_PBOOK if (info == aty128_fb) aty128_fb = NULL; #endif - kfree(info); + framebuffer_release(info); } #endif /* CONFIG_PCI */ /* PPC and Sparc cannot read video ROM */ #if !defined(CONFIG_PPC) && !defined(__sparc__) -static void * __init aty128_map_ROM(struct pci_dev *dev) +static void * __init aty128_map_ROM(struct pci_dev *dev, const struct aty128fb_par *par) { + struct resource *r; + void *rom; + // If this is a primary card, there is a shadow copy of the // ROM somewhere in the first meg. We will just ignore the copy // and use the ROM directly. + /* Fix from ATI for problem with Rage128 hardware not leaving ROM enabled */ + unsigned int temp; + temp = aty_ld_le32(RAGE128_MPP_TB_CONFIG); + temp &= 0x00ffffffu; + temp |= 0x04 << 24; + aty_st_le32(RAGE128_MPP_TB_CONFIG, temp); + temp = aty_ld_le32(RAGE128_MPP_TB_CONFIG); + // no need to search for the ROM, just ask the card where it is. - struct resource *r = &dev->resource[PCI_ROM_RESOURCE]; - unsigned char *addr; - + r = &dev->resource[PCI_ROM_RESOURCE]; + // assign the ROM an address if it doesn't have one - if (r->start == 0) + if (r->parent == NULL) pci_assign_resource(dev, PCI_ROM_RESOURCE); // enable if needed - if (!(r->flags & PCI_ROM_ADDRESS_ENABLE)) + if (!(r->flags & PCI_ROM_ADDRESS_ENABLE)) { pci_write_config_dword(dev, dev->rom_base_reg, r->start | PCI_ROM_ADDRESS_ENABLE); + r->flags |= PCI_ROM_ADDRESS_ENABLE; + } - addr = ioremap(r->start, r->end - r->start + 1); + rom = ioremap(r->start, r->end - r->start + 1); + if (!rom) { + printk(KERN_ERR "aty128fb: ROM failed to map\n"); + return NULL; + } // Very simple test to make sure it appeared - if (addr && (*addr != 0x55)) { - printk("aty128fb: Invalid ROM signature %x\n", *addr); - iounmap(addr); + if (readb(rom) != 0x55) { + printk(KERN_ERR "aty128fb: Invalid ROM signature %x should be 0x55\n", readb(rom)); + aty128_unmap_ROM(dev, rom); return NULL; } - return (void *)addr; + return rom; } static void __init aty128_unmap_ROM(struct pci_dev *dev, void * rom) @@ -1826,10 +1838,12 @@ iounmap(rom); - r->flags &= !PCI_ROM_ADDRESS_ENABLE; + r->flags &= ~PCI_ROM_ADDRESS_ENABLE; r->end -= r->start; r->start = 0; + /* This will disable and set address to unassigned */ pci_write_config_dword(dev, dev->rom_base_reg, 0); + release_resource(r); } static void __init @@ -2331,6 +2345,7 @@ return pci_module_init(&aty128fb_driver); } +#ifdef MODULE static void __exit aty128fb_exit(void) { #ifdef CONFIG_PMAC_PBOOK @@ -2338,14 +2353,32 @@ #endif pci_unregister_driver(&aty128fb_driver); } +#endif + +#ifdef MODULE +module_init(aty128fb_init); +module_exit(aty128fb_exit); +#endif MODULE_AUTHOR("(c)1999-2003 Brad Douglas "); MODULE_DESCRIPTION("FBDev driver for ATI Rage128 / Pro cards"); MODULE_LICENSE("GPL"); -MODULE_PARM(mode, "s"); +module_param(mode, charp, 0); MODULE_PARM_DESC(mode, "Specify resolution as \"x[-][@]\" "); #ifdef CONFIG_MTRR -MODULE_PARM(nomtrr, "i"); -MODULE_PARM_DESC(nomtrr, "Disable MTRR support (0 or 1=disabled) (default=0)"); +module_param_named(nomtrr, mtrr, invbool, 0); +MODULE_PARM_DESC(mtrr, "bool: Disable MTRR support (0 or 1=disabled) (default=0)"); +#endif +#ifdef CONFIG_PPC_PMAC +module_param_named(vmode, default_vmode, int, 0); +MODULE_PARM_DESC(default_vmode, "Deprecated: video mode int"); +module_param_named(cmode, default_cmode, int, 0); +MODULE_PARM_DESC(default_cmode, "Deprecated: color mode int"); +#endif +#ifdef CONFIG_PMAC_PBOOK +module_param_named(lcd, default_lcd_on, bool, 0); +MODULE_PARM_DESC(default_lcd_on, "bool: Default LCD on"); +module_param_named(crt, default_crt_on, bool, 0); +MODULE_PARM_DESC(default_crt_on, "bool: Default CRT on"); #endif diff -Nru a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h --- a/drivers/video/aty/atyfb.h Wed Oct 22 10:40:08 2003 +++ b/drivers/video/aty/atyfb.h Wed Oct 22 10:40:08 2003 @@ -55,18 +55,10 @@ /* * The hardware parameters for each card */ - -struct aty_cursor { - u8 bits[8][64]; - u8 mask[8][64]; - u8 *ram; -}; - struct atyfb_par { struct aty_cmap_regs *aty_cmap_regs; const struct aty_dac_ops *dac_ops; const struct aty_pll_ops *pll_ops; - struct aty_cursor *cursor; unsigned long ati_regbase; unsigned long clk_wr_offset; struct crtc crtc; @@ -237,10 +229,10 @@ * Hardware cursor support */ -extern struct aty_cursor *aty_init_cursor(struct fb_info *info); +extern int aty_init_cursor(struct fb_info *info); extern int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor); extern void aty_set_cursor_color(struct fb_info *info); -extern void aty_set_cursor_shape(struct fb_info *info); +extern void aty_set_cursor_shape(struct fb_info *info, u8 *src, u8 *dst, unsigned int width); /* * Hardware acceleration diff -Nru a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c --- a/drivers/video/aty/atyfb_base.c Wed Oct 22 10:40:08 2003 +++ b/drivers/video/aty/atyfb_base.c Wed Oct 22 10:40:08 2003 @@ -202,7 +202,7 @@ .fb_fillrect = atyfb_fillrect, .fb_copyarea = atyfb_copyarea, .fb_imageblit = atyfb_imageblit, - .fb_cursor = soft_cursor, + .fb_cursor = atyfb_cursor, #ifdef __sparc__ .fb_mmap = atyfb_mmap, #endif @@ -1889,7 +1889,7 @@ #ifdef CONFIG_FB_ATY_CT if (curblink && M64_HAS(INTEGRATED)) - par->cursor = aty_init_cursor(info); + aty_init_cursor(info); #endif /* CONFIG_FB_ATY_CT */ info->var = var; @@ -2470,22 +2470,6 @@ } #endif /* CONFIG_ATARI */ -/* -#ifdef CONFIG_FB_ATY_CT - * Erase HW Cursor * - if (par->cursor && (info->currcon >= 0)) - atyfb_cursor(&fb_display[par->currcon], CM_ERASE, - par->cursor->pos.x, par->cursor->pos.y); -#endif * CONFIG_FB_ATY_CT * - -#ifdef CONFIG_FB_ATY_CT - * Install hw cursor * - if (par->cursor) { - aty_set_cursor_color(info); - aty_set_cursor_shape(info); - } -#endif * CONFIG_FB_ATY_CT */ - /* * Blank the display. */ @@ -2595,13 +2579,9 @@ iounmap((void *) par->ati_regbase); if (info->screen_base) iounmap((void *) info->screen_base); -#ifdef __BIG_ENDIAN - if (par->cursor && par->cursor->ram) - iounmap(par->cursor->ram); -#endif #endif - if (par->cursor) - kfree(par->cursor); + if (info->sprite.addr) + iounmap(info->sprite.addr); #ifdef __sparc__ if (par->mmap_map) kfree(par->mmap_map); diff -Nru a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c --- a/drivers/video/aty/mach64_cursor.c Wed Oct 22 10:40:09 2003 +++ b/drivers/video/aty/mach64_cursor.c Wed Oct 22 10:40:09 2003 @@ -1,12 +1,11 @@ - /* * ATI Mach64 CT/VT/GT/LT Cursor Support */ #include -#include #include #include +#include #include #include @@ -22,104 +21,70 @@ /* * Hardware Cursor support. */ - -static const u8 cursor_pixel_map[2] = { 0, 15 }; -static const u8 cursor_color_map[2] = { 0, 0xff }; - static const u8 cursor_bits_lookup[16] = { - 0x00, 0x40, 0x10, 0x50, 0x04, 0x44, 0x14, 0x54, - 0x01, 0x41, 0x11, 0x51, 0x05, 0x45, 0x15, 0x55 + 0x55, 0x15, 0x45, 0x05, 0x51, 0x11, 0x41, 0x01, + 0x54, 0x14, 0x44, 0x04, 0x50, 0x10, 0x40, 0x00 }; static const u8 cursor_mask_lookup[16] = { 0xaa, 0x2a, 0x8a, 0x0a, 0xa2, 0x22, 0x82, 0x02, 0xa8, 0x28, 0x88, 0x08, 0xa0, 0x20, 0x80, 0x00 -}; - -void aty_set_cursor_color(struct fb_info *info) -{ - struct atyfb_par *par = (struct atyfb_par *) info->par; - struct aty_cursor *c = par->cursor; - const u8 *pixel = cursor_pixel_map; /* ++Geert: Why?? */ - const u8 *red = cursor_color_map; - const u8 *green = cursor_color_map; - const u8 *blue = cursor_color_map; - u32 fg_color, bg_color; - - if (!c) - return; - -#ifdef __sparc__ - if (par->mmaped) - return; -#endif - fg_color = (u32) red[0] << 24; - fg_color |= (u32) green[0] << 16; - fg_color |= (u32) blue[0] << 8; - fg_color |= (u32) pixel[0]; - - bg_color = (u32) red[1] << 24; - bg_color |= (u32) green[1] << 16; - bg_color |= (u32) blue[1] << 8; - bg_color |= (u32) pixel[1]; - - wait_for_fifo(2, par); - aty_st_le32(CUR_CLR0, fg_color, par); - aty_st_le32(CUR_CLR1, bg_color, par); -} +}; -void aty_set_cursor_shape(struct fb_info *info) +void aty_set_cursor_shape(struct fb_info *info, u8 *src, u8 *dst, unsigned int width) { - struct atyfb_par *par = (struct atyfb_par *) info->par; - struct fb_cursor *cursor = &info->cursor; - struct aty_cursor *c = par->cursor; - u8 *ram, m, b; - int x, y; + int i, j, offset = info->sprite.scan_align - width; + u8 *mask = info->cursor.mask, m, b; - if (!c) - return; -#ifdef __sparc__ - if (par->mmaped) - return; -#endif - - ram = c->ram; - for (y = 0; y < cursor->image.height; y++) { - for (x = 0; x < cursor->image.width >> 2; x++) { - m = c->mask[x][y]; - b = c->bits[x][y]; - fb_writeb(cursor_mask_lookup[m >> 4] | - cursor_bits_lookup[(b & m) >> 4], ram++); - fb_writeb(cursor_mask_lookup[m & 0x0f] | - cursor_bits_lookup[(b & m) & 0x0f], - ram++); - } - for (; x < 8; x++) { - fb_writeb(0xaa, ram++); - fb_writeb(0xaa, ram++); + // Clear cursor image with 1010101010... + fb_memset(dst, 0xaa, 1024); + + for (i = 0; i < info->cursor.image.height; i++) { + for (j = 0; j < width; j++) { + b = *src++; + m = *mask++; + // Upper 4 bits of mask data + fb_writeb(cursor_mask_lookup[m >> 4 ] | + cursor_bits_lookup[(b & m) >> 4], dst++); + // Lower 4 bits of mask + fb_writeb(cursor_mask_lookup[m & 0x0f ] | + cursor_bits_lookup[(b & m) & 0x0f], dst++); } + dst += offset*2; } - fb_memset(ram, 0xaa, (64 - cursor->image.height) * 16); } -static void aty_set_cursor(struct fb_info *info) +int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct atyfb_par *par = (struct atyfb_par *) info->par; - struct fb_cursor *cursor = &info->cursor; - struct aty_cursor *c = par->cursor; u16 xoff, yoff; int x, y; - - if (!c) - return; - + #ifdef __sparc__ if (par->mmaped) - return; + return -EPERM; #endif - if (cursor->enable) { - x = cursor->image.dx - cursor->hot.x - info->var.xoffset; + /* Hide cursor */ + wait_for_fifo(1, par); + aty_st_le32(GEN_TEST_CNTL, aty_ld_le32(GEN_TEST_CNTL, par) & ~HWCURSOR_ENABLE, par); + + /* Set size */ + if (cursor->set & FB_CUR_SETSIZE) { + info->cursor.image.height = cursor->image.height; + info->cursor.image.width = cursor->image.width; + } + + /* Set hot spot */ + if (cursor->set & FB_CUR_SETHOT) + info->cursor.hot = cursor->hot; + + /* set position */ + if (cursor->set & FB_CUR_SETPOS) { + info->cursor.image.dx = cursor->image.dx; + info->cursor.image.dy = cursor->image.dy; + + x = info->cursor.image.dx - info->cursor.hot.x - info->var.xoffset; if (x < 0) { xoff = -x; x = 0; @@ -127,7 +92,7 @@ xoff = 0; } - y = cursor->image.dy - cursor->hot.y - info->var.yoffset; + y = info->cursor.image.dy - info->cursor.hot.y - info->var.yoffset; if (y < 0) { yoff = -y; y = 0; @@ -135,107 +100,78 @@ yoff = 0; } + /* + * In doublescan mode, the cursor location also needs to be + * doubled. + */ + if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN) + y<<=1; wait_for_fifo(4, par); - aty_st_le32(CUR_OFFSET, (info->fix.smem_len >> 3) + (yoff << 1), - par); + aty_st_le32(CUR_OFFSET, (info->fix.smem_len >> 3) + (yoff << 1), par); aty_st_le32(CUR_HORZ_VERT_OFF, - ((u32) (64 - cursor->image.height + yoff) << 16) | xoff, + ((u32) (64 - info->cursor.image.height + yoff) << 16) | xoff, par); aty_st_le32(CUR_HORZ_VERT_POSN, ((u32) y << 16) | x, par); + } + + /* Set color map */ + if (cursor->set & FB_CUR_SETCMAP) { + u32 fg_idx, bg_idx, fg, bg; + + info->cursor.image.fg_color = cursor->image.fg_color; + info->cursor.image.bg_color = cursor->image.bg_color; + fg_idx = info->cursor.image.fg_color; + bg_idx = info->cursor.image.bg_color; + + fg = (info->cmap.red[fg_idx] << 24) | + (info->cmap.green[fg_idx] << 16) | + (info->cmap.blue[fg_idx] << 8) | 15; + + bg = (info->cmap.red[bg_idx] << 24) | + (info->cmap.green[bg_idx] << 16) | + (info->cmap.blue[bg_idx] << 8) | 15; + + wait_for_fifo(2, par); + aty_st_le32(CUR_CLR0, fg, par); + aty_st_le32(CUR_CLR1, bg, par); + } + + if (cursor->set & FB_CUR_SETSHAPE) + load_cursor_image(info); + + if (info->cursor.enable) { + wait_for_fifo(1, par); aty_st_le32(GEN_TEST_CNTL, aty_ld_le32(GEN_TEST_CNTL, par) | HWCURSOR_ENABLE, par); - } else { - wait_for_fifo(1, par); - aty_st_le32(GEN_TEST_CNTL, - aty_ld_le32(GEN_TEST_CNTL, - par) & ~HWCURSOR_ENABLE, par); - } - if (par->blitter_may_be_busy) - wait_for_idle(par); -} - -int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor) -{ - struct atyfb_par *par = (struct atyfb_par *) info->par; - struct aty_cursor *c = par->cursor; - - if (!c) - return -1; - -#ifdef __sparc__ - if (par->mmaped) - return 0; -#endif - - aty_set_cursor(info); - cursor->image.dx = info->cursor.image.dx; - cursor->image.dy = info->cursor.image.dy; - - aty_set_cursor(info); + } return 0; } -struct aty_cursor *__init aty_init_cursor(struct fb_info *info) +int __init aty_init_cursor(struct fb_info *info) { - struct aty_cursor *cursor; unsigned long addr; - cursor = kmalloc(sizeof(struct aty_cursor), GFP_ATOMIC); - if (!cursor) - return 0; - memset(cursor, 0, sizeof(*cursor)); - info->fix.smem_len -= PAGE_SIZE; #ifdef __sparc__ addr = (unsigned long) info->screen_base - 0x800000 + info->fix.smem_len; - cursor->ram = (u8 *) addr; + info->sprite.addr = (u8 *) addr; #else #ifdef __BIG_ENDIAN addr = info->fix.smem_start - 0x800000 + info->fix.smem_len; - cursor->ram = (u8 *) ioremap(addr, 1024); + info->sprite.addr = (u8 *) ioremap(addr, 1024); #else addr = (unsigned long) info->screen_base + info->fix.smem_len; - cursor->ram = (u8 *) addr; + info->sprite.addr = (u8 *) addr; #endif #endif - if (!cursor->ram) { - kfree(cursor); - return NULL; - } - return cursor; + if (!info->sprite.addr) + return -ENXIO; + info->sprite.size = PAGE_SIZE; + info->sprite.scan_align = 8; // Scratch pad 8 bytes wide + info->sprite.buf_align = 8; // *64; // and 64 lines tall. + info->sprite.flags = FB_PIXMAP_IO; + info->sprite.outbuf = aty_set_cursor_shape; + return 0; } -int atyfb_set_font(struct fb_info *info, int width, int height) -{ - struct atyfb_par *par = (struct atyfb_par *) info->par; - struct fb_cursor *cursor = &info->cursor; - struct aty_cursor *c = par->cursor; - int i, j; - - if (c) { - if (!width || !height) { - width = 8; - height = 16; - } - - cursor->hot.x = 0; - cursor->hot.y = 0; - cursor->image.width = width; - cursor->image.height = height; - - memset(c->bits, 0xff, sizeof(c->bits)); - memset(c->mask, 0, sizeof(c->mask)); - - for (i = 0, j = width; j >= 0; j -= 8, i++) { - c->mask[i][height - 2] = - (j >= 8) ? 0xff : (0xff << (8 - j)); - c->mask[i][height - 1] = - (j >= 8) ? 0xff : (0xff << (8 - j)); - } - - aty_set_cursor_color(info); - aty_set_cursor_shape(info); - } - return 1; -} diff -Nru a/drivers/video/bw2.c b/drivers/video/bw2.c --- a/drivers/video/bw2.c Wed Oct 22 10:40:07 2003 +++ b/drivers/video/bw2.c Wed Oct 22 10:40:07 2003 @@ -119,6 +119,7 @@ unsigned long fbsize; struct sbus_dev *sdev; + struct fb_info *info; struct list_head list; }; @@ -284,95 +285,90 @@ } } -struct all_info { - struct fb_info info; - struct bw2_par par; - struct list_head list; -}; static LIST_HEAD(bw2_list); static void bw2_init_one(struct sbus_dev *sdev) { - struct all_info *all; + struct bw2_par *par; + struct fb_info *info; struct resource *resp; #ifdef CONFIG_SUN4 struct resource res; #endif int linebytes; - all = kmalloc(sizeof(*all), GFP_KERNEL); - if (!all) { + info = framebuffer_alloc(sizeof(struct bw2_par), NULL); + if (!info) { printk(KERN_ERR "bw2: Cannot allocate memory.\n"); return; } - memset(all, 0, sizeof(*all)); + par = info->par; + par->info = info; - INIT_LIST_HEAD(&all->list); + INIT_LIST_HEAD(&par->list); - spin_lock_init(&all->par.lock); - all->par.sdev = sdev; + spin_lock_init(&par->lock); + par->sdev = sdev; #ifdef CONFIG_SUN4 if (!sdev) { - all->par.physbase = sun4_bwtwo_physaddr; + par->physbase = sun4_bwtwo_physaddr; res.start = sun4_bwtwo_physaddr; res.end = res.start + BWTWO_REGISTER_OFFSET + sizeof(struct bw2_regs) - 1; res.flags = IORESOURCE_IO; resp = &res; - all->info.var.xres = all->info.var.xres_virtual = 1152; - all->info.var.yres = all->info.var.yres_virtual = 900; - all->info.bits_per_pixel = 1; + info->var.xres = info->var.xres_virtual = 1152; + info->info.var.yres = info->var.yres_virtual = 900; + info->info.bits_per_pixel = 1; linebytes = 1152 / 8; } else #else { if (!sdev) BUG(); - all->par.physbase = sdev->reg_addrs[0].phys_addr; + par->physbase = sdev->reg_addrs[0].phys_addr; resp = &sdev->resource[0]; - sbusfb_fill_var(&all->info.var, (sdev ? sdev->prom_node : 0), 1); + sbusfb_fill_var(&info->var, (sdev ? sdev->prom_node : 0), 1); linebytes = prom_getintdefault(sdev->prom_node, "linebytes", - all->info.var.xres); + info->var.xres); } #endif - all->par.regs = (struct bw2_regs *) + par->regs = (struct bw2_regs *) sbus_ioremap(resp, BWTWO_REGISTER_OFFSET, sizeof(struct bw2_regs), "bw2 regs"); if (sdev && !prom_getbool(sdev->prom_node, "width")) - bw2_do_default_mode(&all->par, &all->info, &linebytes); + bw2_do_default_mode(par, info, &linebytes); - all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres); + par->fbsize = PAGE_ALIGN(linebytes * info->var.yres); - all->info.flags = FBINFO_FLAG_DEFAULT; - all->info.fbops = &bw2_ops; + info->flags = FBINFO_FLAG_DEFAULT; + info->fbops = &bw2_ops; #if defined(CONFIG_SPARC32) if (sdev) - all->info.screen_base = (char *) + info->screen_base = (char *) prom_getintdefault(sdev->prom_node, "address", 0); #endif - if (!all->info.screen_base) - all->info.screen_base = (char *) - sbus_ioremap(resp, 0, all->par.fbsize, "bw2 ram"); - all->info.currcon = -1; - all->info.par = &all->par; + if (!info->screen_base) + info->screen_base = (char *) + sbus_ioremap(resp, 0, par->fbsize, "bw2 ram"); - bw2_blank(0, &all->info); + bw2_blank(0, info); - bw2_init_fix(&all->info, linebytes); + bw2_init_fix(info, linebytes); - if (register_framebuffer(&all->info) < 0) { + if (register_framebuffer(info) < 0) { printk(KERN_ERR "bw2: Could not register framebuffer.\n"); - kfree(all); + kfree(info); return; } - list_add(&all->list, &bw2_list); + list_add(&par->list, &bw2_list); printk("bw2: bwtwo at %lx:%lx\n", (long) (sdev ? sdev->reg_addrs[0].which_io : 0), - (long) all->par.physbase); + (long) par->physbase); } int __init bw2_init(void) @@ -396,10 +392,10 @@ struct list_head *pos, *tmp; list_for_each_safe(pos, tmp, &bw2_list) { - struct all_info *all = list_entry(pos, typeof(*all), list); + struct bw2_par *par = list_entry(pos, typeof(*par), list); - unregister_framebuffer(&all->info); - kfree(all); + unregister_framebuffer(par->info); + framebuffer_release(par->info); } } diff -Nru a/drivers/video/cg14.c b/drivers/video/cg14.c --- a/drivers/video/cg14.c Wed Oct 22 10:40:09 2003 +++ b/drivers/video/cg14.c Wed Oct 22 10:40:09 2003 @@ -204,6 +204,7 @@ int mode; int ramsize; struct sbus_dev *sdev; + struct fb_info *info; struct list_head list; }; @@ -386,16 +387,12 @@ { 0, 0, 0 } }; -struct all_info { - struct fb_info info; - struct cg14_par par; - struct list_head list; -}; static LIST_HEAD(cg14_list); static void cg14_init_one(struct sbus_dev *sdev, int node, int parent_node) { - struct all_info *all; + struct fb_info *info; + struct cg14_par *par; unsigned long phys, rphys; u32 bases[6]; int is_8mb, linebytes, i; @@ -413,70 +410,71 @@ } } - all = kmalloc(sizeof(*all), GFP_KERNEL); - if (!all) { + info = framebuffer_alloc(sizeof(struct cg14_par), NULL); + if (!info) { printk(KERN_ERR "cg14: Cannot allocate memory.\n"); return; } - memset(all, 0, sizeof(*all)); + par = info->par; + par->info = info; - INIT_LIST_HEAD(&all->list); + INIT_LIST_HEAD(&par->list); - spin_lock_init(&all->par.lock); + spin_lock_init(&par->lock); - sbusfb_fill_var(&all->info.var, node, 8); + sbusfb_fill_var(&info->var, node, 8); linebytes = prom_getintdefault(sdev->prom_node, "linebytes", - all->info.var.xres); - all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres); + info->var.xres); + par->fbsize = PAGE_ALIGN(linebytes * info->var.yres); - all->par.sdev = sdev; + par->sdev = sdev; if (sdev) { rphys = sdev->reg_addrs[0].phys_addr; - all->par.physbase = phys = sdev->reg_addrs[1].phys_addr; - all->par.iospace = sdev->reg_addrs[0].which_io; + par->physbase = phys = sdev->reg_addrs[1].phys_addr; + par->iospace = sdev->reg_addrs[0].which_io; - all->par.regs = (struct cg14_regs *) + par->regs = (struct cg14_regs *) sbus_ioremap(&sdev->resource[0], 0, sizeof(struct cg14_regs), "cg14 regs"); - all->par.clut = (struct cg14_clut *) + par->clut = (struct cg14_clut *) sbus_ioremap(&sdev->resource[0], CG14_CLUT1, sizeof(struct cg14_clut), "cg14 clut"); - all->par.cursor = (struct cg14_cursor *) + par->cursor = (struct cg14_cursor *) sbus_ioremap(&sdev->resource[0], CG14_CURSORREGS, sizeof(struct cg14_cursor), "cg14 cursor"); - all->info.screen_base = (char *) + info->screen_base = (char *) sbus_ioremap(&sdev->resource[1], 0, - all->par.fbsize, "cg14 ram"); + par->fbsize, "cg14 ram"); } else { rphys = __get_phys(bases[0]); - all->par.physbase = phys = __get_phys(bases[1]); - all->par.iospace = __get_iospace(bases[0]); - all->par.regs = (struct cg14_regs *)(unsigned long)bases[0]; - all->par.clut = (struct cg14_clut *)((unsigned long)bases[0] + + par->physbase = phys = __get_phys(bases[1]); + par->iospace = __get_iospace(bases[0]); + par->regs = (struct cg14_regs *)(unsigned long)bases[0]; + par->clut = (struct cg14_clut *)((unsigned long)bases[0] + CG14_CLUT1); - all->par.cursor = + par->cursor = (struct cg14_cursor *)((unsigned long)bases[0] + CG14_CURSORREGS); - all->info.screen_base = (char *)(unsigned long)bases[1]; + info->screen_base = (char *)(unsigned long)bases[1]; } prom_getproperty(node, "reg", (char *) &bases[0], sizeof(bases)); is_8mb = (bases[5] == 0x800000); - if (sizeof(all->par.mmap_map) != sizeof(__cg14_mmap_map)) { + if (sizeof(par->mmap_map) != sizeof(__cg14_mmap_map)) { extern void __cg14_mmap_sized_wrongly(void); __cg14_mmap_sized_wrongly(); } - memcpy(&all->par.mmap_map, &__cg14_mmap_map, sizeof(all->par.mmap_map)); + memcpy(&par->mmap_map, &__cg14_mmap_map, sizeof(par->mmap_map)); for (i = 0; i < CG14_MMAP_ENTRIES; i++) { - struct sbus_mmap_map *map = &all->par.mmap_map[i]; + struct sbus_mmap_map *map = &par->mmap_map[i]; if (!map->size) break; @@ -488,35 +486,33 @@ map->size *= 2; } - all->par.mode = MDI_8_PIX; - all->par.ramsize = (is_8mb ? 0x800000 : 0x400000); + par->mode = MDI_8_PIX; + par->ramsize = (is_8mb ? 0x800000 : 0x400000); - all->info.flags = FBINFO_FLAG_DEFAULT; - all->info.fbops = &cg14_ops; - all->info.currcon = -1; - all->info.par = &all->par; + info->flags = FBINFO_FLAG_DEFAULT; + info->fbops = &cg14_ops; - __cg14_reset(&all->par); + __cg14_reset(par); - if (fb_alloc_cmap(&all->info.cmap, 256, 0)) { + if (fb_alloc_cmap(&info->cmap, 256, 0)) { printk(KERN_ERR "cg14: Could not allocate color map.\n"); - kfree(all); + kfree(info); return; } - cg14_init_fix(&all->info, linebytes); + cg14_init_fix(info, linebytes); - if (register_framebuffer(&all->info) < 0) { + if (register_framebuffer(info) < 0) { printk(KERN_ERR "cg14: Could not register framebuffer.\n"); - fb_dealloc_cmap(&all->info.cmap); - kfree(all); + fb_dealloc_cmap(&info->cmap); + kfree(info); return; } - list_add(&all->list, &cg14_list); + list_add(&par->list, &cg14_list); printk("cg14: cgfourteen at %lx:%lx\n", - all->par.physbase, all->par.iospace); + par->physbase, par->iospace); } @@ -552,11 +548,13 @@ struct list_head *pos, *tmp; list_for_each_safe(pos, tmp, &cg14_list) { - struct all_info *all = list_entry(pos, typeof(*all), list); + struct cg14_par *par = list_entry(pos, typeof(*par), list); + + unregister_framebuffer(par->info); + + fb_dealloc_cmap(&par->info->cmap); - unregister_framebuffer(&all->info); - fb_dealloc_cmap(&all->info.cmap); - kfree(all); + framebuffer_release(par->info); } } diff -Nru a/drivers/video/cg3.c b/drivers/video/cg3.c --- a/drivers/video/cg3.c Wed Oct 22 10:40:08 2003 +++ b/drivers/video/cg3.c Wed Oct 22 10:40:08 2003 @@ -122,6 +122,7 @@ unsigned long fbsize; struct sbus_dev *sdev; + struct fb_info *info; struct list_head list; }; @@ -354,80 +355,75 @@ } } -struct all_info { - struct fb_info info; - struct cg3_par par; - struct list_head list; -}; static LIST_HEAD(cg3_list); static void cg3_init_one(struct sbus_dev *sdev) { - struct all_info *all; + struct fb_info *info; + struct cg3_par *par; int linebytes; - all = kmalloc(sizeof(*all), GFP_KERNEL); - if (!all) { + info = framebuffer_alloc(sizeof(struct cg3_par), NULL); + if (!info) { printk(KERN_ERR "cg3: Cannot allocate memory.\n"); return; } - memset(all, 0, sizeof(*all)); + par = info->par; + par->info = info; - INIT_LIST_HEAD(&all->list); + INIT_LIST_HEAD(&par->list); - spin_lock_init(&all->par.lock); - all->par.sdev = sdev; + spin_lock_init(&par->lock); + par->sdev = sdev; - all->par.physbase = sdev->reg_addrs[0].phys_addr; + par->physbase = sdev->reg_addrs[0].phys_addr; - sbusfb_fill_var(&all->info.var, sdev->prom_node, 8); + sbusfb_fill_var(&info->var, sdev->prom_node, 8); if (!strcmp(sdev->prom_name, "cgRDI")) - all->par.flags |= CG3_FLAG_RDI; - if (all->par.flags & CG3_FLAG_RDI) - cg3_rdi_maybe_fixup_var(&all->info.var, sdev); + par->flags |= CG3_FLAG_RDI; + if (par->flags & CG3_FLAG_RDI) + cg3_rdi_maybe_fixup_var(&info->var, sdev); linebytes = prom_getintdefault(sdev->prom_node, "linebytes", - all->info.var.xres); - all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres); + info->var.xres); + par->fbsize = PAGE_ALIGN(linebytes * info->var.yres); - all->par.regs = (struct cg3_regs *) + par->regs = (struct cg3_regs *) sbus_ioremap(&sdev->resource[0], CG3_REGS_OFFSET, sizeof(struct cg3_regs), "cg3 regs"); - all->info.flags = FBINFO_FLAG_DEFAULT; - all->info.fbops = &cg3_ops; + info->flags = FBINFO_FLAG_DEFAULT; + info->fbops = &cg3_ops; #ifdef CONFIG_SPARC32 - all->info.screen_base = (char *) + info->screen_base = (char *) prom_getintdefault(sdev->prom_node, "address", 0); #endif - if (!all->info.screen_base) - all->info.screen_base = (char *) + if (!info->screen_base) + info->screen_base = (char *) sbus_ioremap(&sdev->resource[0], CG3_RAM_OFFSET, - all->par.fbsize, "cg3 ram"); - all->info.currcon = -1; - all->info.par = &all->par; + par->fbsize, "cg3 ram"); - cg3_blank(0, &all->info); + cg3_blank(0, info); if (!prom_getbool(sdev->prom_node, "width")) - cg3_do_default_mode(&all->par); + cg3_do_default_mode(par); - if (fb_alloc_cmap(&all->info.cmap, 256, 0)) { + if (fb_alloc_cmap(&info->cmap, 256, 0)) { printk(KERN_ERR "cg3: Could not allocate color map.\n"); - kfree(all); + kfree(info); return; } - cg3_init_fix(&all->info, linebytes); + cg3_init_fix(info, linebytes); - if (register_framebuffer(&all->info) < 0) { + if (register_framebuffer(info) < 0) { printk(KERN_ERR "cg3: Could not register framebuffer.\n"); - fb_dealloc_cmap(&all->info.cmap); - kfree(all); + fb_dealloc_cmap(&info->cmap); + kfree(info); return; } - list_add(&all->list, &cg3_list); + list_add(&par->list, &cg3_list); printk("cg3: %s at %lx:%lx\n", sdev->prom_name, @@ -454,11 +450,12 @@ struct list_head *pos, *tmp; list_for_each_safe(pos, tmp, &cg3_list) { - struct all_info *all = list_entry(pos, typeof(*all), list); + struct cg3_par *par = list_entry(pos, typeof(*par), list); + + unregister_framebuffer(par->info); + fb_dealloc_cmap(&par->info->cmap); - unregister_framebuffer(&all->info); - fb_dealloc_cmap(&all->info.cmap); - kfree(all); + framebuffer_release(par->info); } } diff -Nru a/drivers/video/cg6.c b/drivers/video/cg6.c --- a/drivers/video/cg6.c Wed Oct 22 10:40:04 2003 +++ b/drivers/video/cg6.c Wed Oct 22 10:40:04 2003 @@ -263,6 +263,7 @@ unsigned long fbsize; struct sbus_dev *sdev; + struct fb_info *info; struct list_head list; }; @@ -630,94 +631,89 @@ sbus_writel(tmp, &par->bt->control); } -struct all_info { - struct fb_info info; - struct cg6_par par; - struct list_head list; -}; static LIST_HEAD(cg6_list); static void cg6_init_one(struct sbus_dev *sdev) { - struct all_info *all; + struct fb_info *info; + struct cg6_par *par; int linebytes; - all = kmalloc(sizeof(*all), GFP_KERNEL); - if (!all) { + info = framebuffer_alloc(sizeof(struct cg6_par), NULL); + if (!info) { printk(KERN_ERR "cg6: Cannot allocate memory.\n"); return; } - memset(all, 0, sizeof(*all)); + par = info->par; + par->info = info; - INIT_LIST_HEAD(&all->list); + INIT_LIST_HEAD(&par->list); - spin_lock_init(&all->par.lock); - all->par.sdev = sdev; + spin_lock_init(&par->lock); + par->sdev = sdev; - all->par.physbase = sdev->reg_addrs[0].phys_addr; + par->physbase = sdev->reg_addrs[0].phys_addr; - sbusfb_fill_var(&all->info.var, sdev->prom_node, 8); + sbusfb_fill_var(&info->var, sdev->prom_node, 8); linebytes = prom_getintdefault(sdev->prom_node, "linebytes", - all->info.var.xres); - all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres); + info->var.xres); + par->fbsize = PAGE_ALIGN(linebytes * info->var.yres); if (prom_getbool(sdev->prom_node, "dblbuf")) - all->par.fbsize *= 4; + par->fbsize *= 4; - all->par.fbc = (struct cg6_fbc *) + par->fbc = (struct cg6_fbc *) sbus_ioremap(&sdev->resource[0], CG6_FBC_OFFSET, 4096, "cgsix fbc"); - all->par.tec = (struct cg6_tec *) + par->tec = (struct cg6_tec *) sbus_ioremap(&sdev->resource[0], CG6_TEC_OFFSET, sizeof(struct cg6_tec), "cgsix tec"); - all->par.thc = (struct cg6_thc *) + par->thc = (struct cg6_thc *) sbus_ioremap(&sdev->resource[0], CG6_THC_OFFSET, sizeof(struct cg6_thc), "cgsix thc"); - all->par.bt = (struct bt_regs *) + par->bt = (struct bt_regs *) sbus_ioremap(&sdev->resource[0], CG6_BROOKTREE_OFFSET, sizeof(struct bt_regs), "cgsix dac"); - all->par.fhc = (u32 *) + par->fhc = (u32 *) sbus_ioremap(&sdev->resource[0], CG6_FHC_OFFSET, sizeof(u32), "cgsix fhc"); - all->info.flags = FBINFO_FLAG_DEFAULT; - all->info.fbops = &cg6_ops; + info->flags = FBINFO_FLAG_DEFAULT; + info->fbops = &cg6_ops; #ifdef CONFIG_SPARC32 - all->info.screen_base = (char *) + info->screen_base = (char *) prom_getintdefault(sdev->prom_node, "address", 0); #endif - if (!all->info.screen_base) - all->info.screen_base = (char *) + if (!info->screen_base) + info->screen_base = (char *) sbus_ioremap(&sdev->resource[0], CG6_RAM_OFFSET, - all->par.fbsize, "cgsix ram"); - all->info.currcon = -1; - all->info.par = &all->par; + par->fbsize, "cgsix ram"); - all->info.var.accel_flags = FB_ACCELF_TEXT; + info->var.accel_flags = FB_ACCELF_TEXT; - cg6_bt_init(&all->par); - cg6_chip_init(&all->info); - cg6_blank(0, &all->info); + cg6_bt_init(par); + cg6_chip_init(info); + cg6_blank(0, info); - if (fb_alloc_cmap(&all->info.cmap, 256, 0)) { + if (fb_alloc_cmap(&info->cmap, 256, 0)) { printk(KERN_ERR "cg6: Could not allocate color map.\n"); - kfree(all); + kfree(info); return; } - cg6_init_fix(&all->info, linebytes); + cg6_init_fix(info, linebytes); - if (register_framebuffer(&all->info) < 0) { + if (register_framebuffer(info) < 0) { printk(KERN_ERR "cg6: Could not register framebuffer.\n"); - fb_dealloc_cmap(&all->info.cmap); - kfree(all); + fb_dealloc_cmap(&info->cmap); + kfree(info); return; } - list_add(&all->list, &cg6_list); + list_add(&par->list, &cg6_list); printk("cg6: CGsix [%s] at %lx:%lx\n", - all->info.fix.id, + info->fix.id, (long) sdev->reg_addrs[0].which_io, (long) sdev->reg_addrs[0].phys_addr); } @@ -741,11 +737,12 @@ struct list_head *pos, *tmp; list_for_each_safe(pos, tmp, &cg6_list) { - struct all_info *all = list_entry(pos, typeof(*all), list); + struct cg6_par *par = list_entry(pos, typeof(*par), list); + + unregister_framebuffer(par->info); + fb_dealloc_cmap(&par->info->cmap); - unregister_framebuffer(&all->info); - fb_dealloc_cmap(&all->info.cmap); - kfree(all); + framebuffer_release(par->info); } } diff -Nru a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c --- a/drivers/video/chipsfb.c Wed Oct 22 10:40:05 2003 +++ b/drivers/video/chipsfb.c Wed Oct 22 10:40:05 2003 @@ -85,7 +85,7 @@ /* * Exported functions */ -int chips_init(void); +int chipsfb_init(void); static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *); static int chipsfb_check_var(struct fb_var_screeninfo *var, @@ -460,7 +460,7 @@ .remove = __devexit_p(chipsfb_remove), }; -int __init chips_init(void) +int __init chipsfb_init(void) { return pci_module_init(&chipsfb_driver); } diff -Nru a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c --- a/drivers/video/cirrusfb.c Wed Oct 22 10:40:09 2003 +++ b/drivers/video/cirrusfb.c Wed Oct 22 10:40:09 2003 @@ -2787,6 +2787,7 @@ fb_info->gen.info.switch_con = &fbgen_switch; fb_info->gen.info.updatevar = &fbgen_update_var; fb_info->gen.info.flags = FBINFO_FLAG_DEFAULT; + fb_info->gen.info.dev = fb_info->pdev; for (j = 0; j < 256; j++) { if (j < 16) { diff -Nru a/drivers/video/console/Makefile b/drivers/video/console/Makefile --- a/drivers/video/console/Makefile Wed Oct 22 10:40:05 2003 +++ b/drivers/video/console/Makefile Wed Oct 22 10:40:05 2003 @@ -3,18 +3,16 @@ # Rewritten to use lists instead of if-statements. # Font handling -font-objs := fonts.o +font-y := fonts.o -font-objs-$(CONFIG_FONT_SUN8x16) += font_sun8x16.o -font-objs-$(CONFIG_FONT_SUN12x22) += font_sun12x22.o -font-objs-$(CONFIG_FONT_8x8) += font_8x8.o -font-objs-$(CONFIG_FONT_8x16) += font_8x16.o -font-objs-$(CONFIG_FONT_6x11) += font_6x11.o -font-objs-$(CONFIG_FONT_PEARL_8x8) += font_pearl_8x8.o -font-objs-$(CONFIG_FONT_ACORN_8x8) += font_acorn_8x8.o -font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o - -font-objs += $(font-objs-y) +font-$(CONFIG_FONT_SUN8x16) += font_sun8x16.o +font-$(CONFIG_FONT_SUN12x22) += font_sun12x22.o +font-$(CONFIG_FONT_8x8) += font_8x8.o +font-$(CONFIG_FONT_8x16) += font_8x16.o +font-$(CONFIG_FONT_6x11) += font_6x11.o +font-$(CONFIG_FONT_PEARL_8x8) += font_pearl_8x8.o +font-$(CONFIG_FONT_ACORN_8x8) += font_acorn_8x8.o +font-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o # Each configuration option enables a list of files. @@ -28,11 +26,16 @@ obj-$(CONFIG_FB_STI) += sticore.o +obj-$(CONFIG_LOGO_ELPP) += elpp.o symbols_16x16.o + # Files generated that shall be removed upon make clean clean-files := promcon_tbl.c - -$(obj)/promcon_tbl.c: $(src)/prom.uni - $(objtree)/scripts/conmakehash $< | \ + +quiet_cmd_promtbl = GEN $@ + cmd_promtbl = scripts/conmakehash $< | \ sed -e '/#include <[^>]*>/p' -e 's/types/init/' \ -e 's/dfont\(_uni.*\]\)/promfont\1 __initdata/' > $@ + +$(obj)/promcon_tbl.c: $(src)/prom.uni + $(call cmd,promtbl) diff -Nru a/drivers/video/console/elpp.c b/drivers/video/console/elpp.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/video/console/elpp.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,470 @@ + +/* + * The Enhanced Linux Progress Patch + * by Prasad , IIIT - Hyderabad, INDIA + * + * drivers/video/console/elpp.c ELPP implementation file + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "fbcon.h" +#include "elpp.h" + +extern const struct linux_logo linux_progress_logo; + +#ifndef BAR_BG_COLOR +#define BAR_BG_COLOR {0x0000,0x0000,0xffff} /* Blue */ +#endif + +#ifndef BAR_FG_COLOR +#define BAR_FG_COLOR {0xffff,0x0000,0x0000} /* Red */ +#endif + +#ifndef TEXT_BG_COLOR +#define TEXT_BG_COLOR {0x0000,0x4444,0x0000} /* Dark Green */ +#endif + +#ifndef TEXT_FG_COLOR +#define TEXT_FG_COLOR {0xffff,0xffff,0xffff} /* White */ +#endif + +#ifndef ERROR_BG_COLOR +#define ERROR_BG_COLOR {0x4444,0x0000,0x0000} /* Dark Red */ +#endif + +#ifndef WARN_BG_COLOR +#define WARN_BG_COLOR {0x0000,0x0000,0x4444} /* Dark Blue */ +#endif + +#ifndef ELPP_BAR_LENGTH +#define ELPP_BAR_LENGTH (linux_progress_logo.width) /* Width of logo */ +#endif + +#ifndef ELPP_BAR_WIDTH +#define ELPP_BAR_WIDTH 10 +#endif + +#ifndef ELPP_BAR_X +#define ELPP_BAR_X -1 /* val<0 => align below logo */ +#endif + +#ifndef ELPP_BAR_Y +#define ELPP_BAR_Y -1 /* val<0 => bottom of logo */ +#endif + +#ifndef ELPP_MSG_X +#define ELPP_MSG_X -1 /* val<0 => align below logo */ +#endif + +#ifndef ELPP_MSG_Y +#define ELPP_MSG_Y -1 /* val<0 => bottom of logo */ +#endif + +#ifndef ELPP_MSG_LENGTH +#define ELPP_MSG_LENGTH 75 +#endif + +#ifndef ELPP_NUM_MSGS +#define ELPP_NUM_MSGS 5 /* Number of messages */ +#endif + +#define MSG_PREV \ + (messages[(msg_position - 1 + ELPP_NUM_MSGS) % ELPP_NUM_MSGS]) + +#define MSG_NEXT \ + (messages[(msg_position + 1) % ELPP_NUM_MSGS]) + +#define MSG_CUR \ + (messages[(msg_position + ELPP_NUM_MSGS) % ELPP_NUM_MSGS]) + +/* We store the previous messages in this structure */ +struct elpp_message { + char text[ELPP_MSG_LENGTH]; + short success; + short flag; +}; + +/* Setting up the proc interface */ +static ssize_t elpp_write(struct file *, const char *, size_t , loff_t *); +int elpp_permissions(struct inode *, int, struct nameidata *); + +static struct proc_dir_entry *elpp_proc_entry = NULL; +static struct elpp_message messages[ELPP_NUM_MSGS] = {{"\0",0,0}}; +static short msg_position = 0; +static short elpp_booting = 1; +static int barX=0, barY=0, textX=0, textY=0; + +static const u16 colors[][3] = { {0x0000,0x0000,0x0000}, + BAR_FG_COLOR, + BAR_BG_COLOR, + TEXT_FG_COLOR, + TEXT_BG_COLOR, + ERROR_BG_COLOR, + WARN_BG_COLOR, + {0xffff,0xffff,0xffff}, + {0x0000,0xffff,0x0000}, + {0xffff,0x0000,0x0000}, + {0xffff,0xffff,0x0000}}; + +static struct file_operations elpp_file_operations = { + .owner = THIS_MODULE, + .write = elpp_write +}; + +static struct inode_operations elpp_inode_operations = { + .lookup = &elpp_file_operations, + .permission = elpp_permissions +}; + +/* Setup the progress bar */ +void __init elpp_setup(void) +{ + struct fb_info *info = registered_fb[(int) con2fb_map[fg_console]]; + struct font_desc *font = get_default_font(info->var.xres, info->var.yres); + + /* Calculate the bar location */ + if( ELPP_BAR_Y < 0 ) + barY = (info->var.yres + linux_progress_logo.height) / 2 - + (font->height * ELPP_NUM_MSGS) - ELPP_BAR_WIDTH; + else + barY = ELPP_BAR_Y + (info->var.yres - linux_progress_logo.height)/2; + + if( ELPP_BAR_X < 0 ) + barX = (info->var.xres - linux_progress_logo.width)/2; + else + barX = ELPP_BAR_X + (info->var.xres - linux_progress_logo.width)/2; + + /* Calculate the text location */ + if( ELPP_MSG_X < 0 ) + textX = (info->var.xres - linux_progress_logo.width)/2; + else + textX = ELPP_MSG_X + (info->var.xres - linux_progress_logo.width)/2; + + if( ELPP_MSG_Y < 0 ) + textY = (info->var.yres + linux_progress_logo.height) / 2 - + (font->height * ELPP_NUM_MSGS); + else + textY = ELPP_MSG_Y + (info->var.yres - linux_progress_logo.height)/2; + +} + +/* Registers an entry in the /proc directory */ +void __init elpp_register(void) +{ + elpp_proc_entry = create_proc_entry("progress", + S_IFREG | S_IRUGO | S_IWUSR, &proc_root ); + if( elpp_proc_entry == NULL ) + { + printk(" Unable to create proc entry.\n"); + return; + } + + elpp_proc_entry->owner = THIS_MODULE; + elpp_proc_entry->proc_iops = &elpp_inode_operations; + elpp_proc_entry->write_proc = elpp_write; + + elpp_setup(); +} + +/* ASCII to Integer... needed for progress */ +static int elpp_atoi(const char *name) +{ + int val = 0; + + for (;; name++) { + switch (*name) { + case '0'...'9': + val = 10*val+(*name-'0'); + break; + default: + return val; + } + } +} + +/* Parse the message passed from userspace */ +static int elpp_parse(const char *str) +{ + int progress = -1, success = 0; + char *msg_text; + + if( str[0] == '!' ) { + + /* Its a command... */ + if( str[1] == 'c' ) { + + int i; + for(i=0; i 100)? 100: progress; + return progress; +} + +/* Based on acccel_putc in fbcon.c */ +static void elpp_render_char(struct fb_info *info, struct font_desc *font, + int ch, int xx, int yy, int fgcol, int bgcol) +{ + struct fb_image image; + unsigned short charmask = 0xff; + unsigned int scan_align = info->pixmap.scan_align - 1; + unsigned int buf_align = info->pixmap.buf_align - 1; + unsigned int size, pitch; + unsigned int width = (font->width + 7) >> 3; + u8 *src, *dst; + + image.dx = xx; + image.dy = yy; + image.width = font->width; + image.height = font->height; + image.fg_color = fgcol; + image.bg_color = bgcol; + image.depth = 1; + + pitch = width + scan_align; + pitch &= ~scan_align; + size = pitch * font->height; + size += buf_align; + size &= ~buf_align; + dst = info->pixmap.addr + fb_get_buffer_offset(info, size); + image.data = dst; + src = font->data + (ch & charmask) * font->height * width; + + move_buf_aligned(info, dst, src, pitch, width, image.height); + info->fbops->fb_imageblit(info, &image); +} + +/* Show the symbol */ +static void elpp_showsymbol(int ch, struct fb_info *info, + int x, int y, int bgcol) +{ + struct font_desc *font = &symbols_16x16; + + if( ch < 1 || ch > 3 ) return; + + elpp_render_char( info, font, (ch - 1) * 2, x, y, 7+ch, bgcol); + elpp_render_char( info, font, (ch - 1) * 2 + 1, x+8, y, 7+ch, bgcol); +} + +/* Show the messages */ +static void elpp_show_messages(struct fb_info *info) +{ + struct font_desc *font = get_default_font(info->var.xres, info->var.yres); + int i,counter=0; + struct fb_fillrect rect; + + rect.dy = textY; + rect.dx = textX; + rect.width = (ELPP_MSG_X < 0)? + ELPP_BAR_LENGTH : + ELPP_MSG_LENGTH * font->width; + rect.height = font->height * ELPP_NUM_MSGS; + rect.rop = 0; + rect.color = 4; + info->fbops->fb_fillrect(info, &rect); + + for( i = msg_position?msg_position-1:ELPP_NUM_MSGS-1, counter = 0 ; + counter < ELPP_NUM_MSGS; + i = i?i-1:ELPP_NUM_MSGS-1, counter++ ) { + int j; + int bgcol; + + rect.dy = textY + font->height * (ELPP_NUM_MSGS-counter-1); + rect.height = font->height; + + /* Set the background for text */ + switch(messages[i].success) { + default: + case 1: + rect.color = 4; + bgcol = 4; + break; + case 2: + rect.color = 5; + bgcol = 5; + break; + case 3: + rect.color = 6; + bgcol = 6; + break; + } + info->fbops->fb_fillrect(info, &rect); + + if(messages[i].flag == 0) + break; + + /* Show the text now */ + for( j = 0; j < strlen(messages[i].text); j++ ) { + elpp_render_char( info, font, + messages[i].text[j], textX + font->width * j, + textY + font->height * (ELPP_NUM_MSGS-counter-1), + 3, bgcol ); + } + + /* Show the bitmap for success/failure/warning */ + elpp_showsymbol(messages[i].success, info, + textX + (font->width * (ELPP_MSG_LENGTH - 3)), + textY + font->height * (ELPP_NUM_MSGS - counter - 1), bgcol ); + } +} + +/* Update the contents on the screen */ +static void elpp_updatescreen(int progress) +{ + struct fb_info *info = registered_fb[(int) con2fb_map[fg_console]]; + struct fb_fillrect rect; + + /* Create the colors. TODO: Find an efficient way */ + info->fbops->fb_setcolreg(1, colors[1][0], colors[1][1], + colors[1][2], 0xffff, info); + info->fbops->fb_setcolreg(2, colors[2][0], colors[2][1], + colors[2][2], 0xffff, info); + info->fbops->fb_setcolreg(3, colors[3][0], colors[3][1], + colors[3][2], 0xffff, info); + info->fbops->fb_setcolreg(4, colors[4][0], colors[4][1], + colors[4][2], 0xffff, info); + info->fbops->fb_setcolreg(5, colors[5][0], colors[5][1], + colors[5][2], 0xffff, info); + info->fbops->fb_setcolreg(6, colors[6][0], colors[6][1], + colors[6][2], 0xffff, info); + info->fbops->fb_setcolreg(7, colors[7][0], colors[7][1], + colors[7][2], 0xffff, info); + info->fbops->fb_setcolreg(8, colors[8][0], colors[8][1], + colors[8][2], 0xffff, info); + info->fbops->fb_setcolreg(9, colors[9][0], colors[9][1], + colors[9][2], 0xffff, info); + info->fbops->fb_setcolreg(10, colors[10][0], colors[10][1], + colors[10][2], 0xffff, info); + + + /* Draw the progress */ + rect.dx = barX; + rect.dy = barY; + rect.width = (progress * ELPP_BAR_LENGTH)/100; + rect.height = ELPP_BAR_WIDTH; + rect.color = 1; + rect.rop = 0; + info->fbops->fb_fillrect(info, &rect); + + /* Draw the remaining part of the progress Bar */ + rect.dx = rect.width + rect.dx; + rect.width = ELPP_BAR_LENGTH - rect.width; + rect.color = 2; + info->fbops->fb_fillrect(info, &rect); + + elpp_show_messages(info); +} + +void elpp_progress(char *str) +{ + int progress; + + progress = elpp_parse(str); + if( progress > 0 ) + elpp_updatescreen(progress); +} + +/* Something written into /proc/progress */ +static ssize_t elpp_write(struct file *file, const char *buf, + size_t length, loff_t *offset) +{ + int progress; + char tmp_string[ELPP_MSG_LENGTH]; + int len = length 0 ) + elpp_updatescreen(progress); + + /* We have read the entire string */ + return length; +} + +/* /proc/progress access permissions */ +int elpp_permissions(struct inode *inode, int op, struct nameidata *dummy) +{ + /* W for root */ + if ( op == 2 && current->euid == 0 ) + return 0; + return -EACCES; +} + diff -Nru a/drivers/video/console/elpp.h b/drivers/video/console/elpp.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/video/console/elpp.h Wed Oct 22 10:40:10 2003 @@ -0,0 +1,45 @@ + +/* + * The Enhanced Linux Progress Patch + * by Prasad , IIIT - Hyderabad, INDIA + * + * drivers/video/console/elpp.h Setup file for ELPP + * + * THE LOGO + * linux_progress_logo is a global structure that represents + * the image being shown at the startup. So the variables + * linux_progress_logo.width and linux_progress_logo.height + * represent the width and height of the logo respectively. + * The logo is displayed both horizontally and vertically + * centered in the screen. + * + * COLORS + * The colors are represented in the form of 4-digit hex. + * Some sample colors: + * Red : {0xffff, 0x0000, 0x0000} + * Blue : {0x0000, 0x0000, 0xffff} + * Green : {0x0000, 0xffff, 0x0000} + * Black : {0x0000, 0x0000, 0x0000} + * White : {0xffff, 0xffff, 0xffff} + * DarkBlue: {0x0000, 0x0000, 0x2222} + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#define BAR_BG_COLOR {0x0000,0x0000,0xffff} /* Blue */ +#define BAR_FG_COLOR {0xffff,0x0000,0x0000} /* Red */ +#define TEXT_BG_COLOR {0x0000,0x2222,0x0000} /* Dark Green */ +#define TEXT_FG_COLOR {0xffff,0xffff,0xffff} /* White */ +#define ERROR_BG_COLOR {0x2222,0x0000,0x0000} /* Dark Red */ +#define WARN_BG_COLOR {0x0000,0x0000,0x2222} /* Dark Blue */ +#define ELPP_BAR_LENGTH 365 /* Width of logo */ +#define ELPP_BAR_WIDTH 10 +#define ELPP_BAR_X 275 /* val<0 => align below logo */ +#define ELPP_BAR_Y 300 /* val<0 => bottom of logo */ +#define ELPP_MSG_X 275 /* val<0 => align below logo */ +#define ELPP_MSG_Y 185 /* val<0 => bottom of logo */ +#define ELPP_MSG_LENGTH 45 +#define ELPP_NUM_MSGS 7 /* Number of messages */ + diff -Nru a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c --- a/drivers/video/console/fbcon.c Wed Oct 22 10:40:08 2003 +++ b/drivers/video/console/fbcon.c Wed Oct 22 10:40:08 2003 @@ -195,8 +195,7 @@ { struct fb_info *info = (struct fb_info *) private; - /* Test to see if the cursor is erased but still on */ - if (!info || (info->cursor.rop == ROP_COPY)) + if (!info) return; info->cursor.enable ^= 1; info->fbops->fb_cursor(info, &info->cursor); @@ -226,8 +225,7 @@ struct fb_info *info = (struct fb_info *) dev_addr; schedule_work(&info->queue); - cursor_timer.expires = jiffies + HZ / 5; - add_timer(&cursor_timer); + mod_timer(&cursor_timer, jiffies + HZ/5); } int __init fb_console_setup(char *this_opt) @@ -308,97 +306,6 @@ } /* - * drawing helpers - */ -static void putcs_unaligned(struct vc_data *vc, struct fb_info *info, - struct fb_image *image, int count, - const unsigned short *s) -{ - unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; - unsigned int width = (vc->vc_font.width + 7) >> 3; - unsigned int cellsize = vc->vc_font.height * width; - unsigned int maxcnt = info->pixmap.size/cellsize; - unsigned int shift_low = 0, mod = vc->vc_font.width % 8; - unsigned int shift_high = 8, size, pitch, cnt, k; - unsigned int buf_align = info->pixmap.buf_align - 1; - unsigned int scan_align = info->pixmap.scan_align - 1; - unsigned int idx = vc->vc_font.width >> 3; - u8 mask, *src, *dst, *dst0; - - while (count) { - if (count > maxcnt) - cnt = k = maxcnt; - else - cnt = k = count; - - image->width = vc->vc_font.width * cnt; - pitch = ((image->width + 7) >> 3) + scan_align; - pitch &= ~scan_align; - size = pitch * vc->vc_font.height + buf_align; - size &= ~buf_align; - dst0 = info->pixmap.addr + fb_get_buffer_offset(info, size); - image->data = dst0; - while (k--) { - src = vc->vc_font.data + (scr_readw(s++) & charmask)* - cellsize; - dst = dst0; - mask = (u8) (0xfff << shift_high); - move_buf_unaligned(info, dst, src, pitch, image->height, - mask, shift_high, shift_low, mod, idx); - shift_low += mod; - dst0 += (shift_low >= 8) ? width : width - 1; - shift_low &= 7; - shift_high = 8 - shift_low; - } - info->fbops->fb_imageblit(info, image); - image->dx += cnt * vc->vc_font.width; - count -= cnt; - atomic_dec(&info->pixmap.count); - smp_mb__after_atomic_dec(); - } -} - -static void putcs_aligned(struct vc_data *vc, struct fb_info *info, - struct fb_image *image, int count, - const unsigned short *s) -{ - unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; - unsigned int width = vc->vc_font.width >> 3; - unsigned int cellsize = vc->vc_font.height * width; - unsigned int maxcnt = info->pixmap.size/cellsize; - unsigned int scan_align = info->pixmap.scan_align - 1; - unsigned int buf_align = info->pixmap.buf_align - 1; - unsigned int pitch, cnt, size, k; - u8 *src, *dst, *dst0; - - while (count) { - if (count > maxcnt) - cnt = k = maxcnt; - else - cnt = k = count; - - pitch = width * cnt + scan_align; - pitch &= ~scan_align; - size = pitch * vc->vc_font.height + buf_align; - size &= ~buf_align; - image->width = vc->vc_font.width * cnt; - dst0 = info->pixmap.addr + fb_get_buffer_offset(info, size); - image->data = dst0; - while (k--) { - src = vc->vc_font.data + (scr_readw(s++)&charmask)*cellsize; - dst = dst0; - move_buf_aligned(info, dst, src, pitch, width, image->height); - dst0 += width; - } - info->fbops->fb_imageblit(info, image); - image->dx += cnt * vc->vc_font.width; - count -= cnt; - atomic_dec(&info->pixmap.count); - smp_mb__after_atomic_dec(); - } -} - -/* * Accelerated handlers. */ void accel_bmove(struct vc_data *vc, struct fb_info *info, int sy, @@ -432,51 +339,24 @@ info->fbops->fb_fillrect(info, ®ion); } -static void accel_putc(struct vc_data *vc, struct fb_info *info, - int c, int ypos, int xpos) +void accel_putcs(struct vc_data *vc, struct fb_info *info, + const unsigned short *s, int count, int yy, int xx) { unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; unsigned int width = (vc->vc_font.width + 7) >> 3; + unsigned int cellsize = vc->vc_font.height * width; + unsigned int maxcnt = info->pixmap.size/cellsize; unsigned int scan_align = info->pixmap.scan_align - 1; unsigned int buf_align = info->pixmap.buf_align - 1; + unsigned int shift_low = 0, mod = vc->vc_font.width % 8; + unsigned int shift_high = 8, pitch, cnt, size, k; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; int fgshift = (vc->vc_hi_font_mask) ? 9 : 8; - unsigned int size, pitch; - struct fb_image image; - u8 *src, *dst; - - image.dx = xpos * vc->vc_font.width; - image.dy = ypos * vc->vc_font.height; - image.width = vc->vc_font.width; - image.height = vc->vc_font.height; - image.fg_color = attr_fgcol(fgshift, c); - image.bg_color = attr_bgcol(bgshift, c); - image.depth = 1; - - pitch = width + scan_align; - pitch &= ~scan_align; - size = pitch * vc->vc_font.height; - size += buf_align; - size &= ~buf_align; - dst = info->pixmap.addr + fb_get_buffer_offset(info, size); - image.data = dst; - src = vc->vc_font.data + (c & charmask) * vc->vc_font.height * width; - - move_buf_aligned(info, dst, src, pitch, width, image.height); - - info->fbops->fb_imageblit(info, &image); - atomic_dec(&info->pixmap.count); - smp_mb__after_atomic_dec(); -} - -void accel_putcs(struct vc_data *vc, struct fb_info *info, - const unsigned short *s, int count, int yy, int xx) -{ - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - int fgshift = (vc->vc_hi_font_mask) ? 9 : 8; + unsigned int idx = vc->vc_font.width >> 3; struct fb_image image; u16 c = scr_readw(s); - + u8 *src, *dst, *dst0; + image.fg_color = attr_fgcol(fgshift, c); image.bg_color = attr_bgcol(bgshift, c); image.dx = xx * vc->vc_font.width; @@ -484,10 +364,43 @@ image.height = vc->vc_font.height; image.depth = 1; - if (!(vc->vc_font.width & 7)) - putcs_aligned(vc, info, &image, count, s); - else - putcs_unaligned(vc, info, &image, count, s); + while (count) { + if (count > maxcnt) + cnt = k = maxcnt; + else + cnt = k = count; + + image.width = vc->vc_font.width * cnt; + pitch = ((image.width + 7) >> 3) + scan_align; + pitch &= ~scan_align; + size = pitch * image.height + buf_align; + size &= ~buf_align; + dst0 = fb_get_buffer_offset(info, &info->pixmap, size); + image.data = dst0; + while (k--) { + src = vc->vc_font.data + (scr_readw(s++) & charmask)*cellsize; + dst = dst0; + + if (mod) { + move_buf_unaligned(info, &info->pixmap, dst, pitch, + src, idx, image.height, shift_high, + shift_low, mod); + shift_low += mod; + dst0 += (shift_low >= 8) ? width : width - 1; + shift_low &= 7; + shift_high = 8 - shift_low; + } else { + move_buf_aligned(info, &info->pixmap, dst, pitch, + src, idx, image.height); + dst0 += width; + } + } + info->fbops->fb_imageblit(info, &image); + image.dx += cnt * vc->vc_font.width; + count -= cnt; + atomic_dec(&info->pixmap.count); + smp_mb__after_atomic_dec(); + } } void accel_clear_margins(struct vc_data *vc, struct fb_info *info, @@ -672,11 +585,11 @@ #endif /* Initialize the work queue. If the driver provides its * own work queue this means it will use something besides - * default timer to flash the cursor. */ + * default timer to flash the cursor. */ if (!info->queue.func) { INIT_WORK(&info->queue, fb_flashcursor, info); - cursor_timer.expires = jiffies + HZ / 50; + cursor_timer.expires = jiffies + HZ / 5; cursor_timer.data = (unsigned long ) info; add_timer(&cursor_timer); } @@ -728,15 +641,13 @@ static void fbcon_set_display(struct vc_data *vc, int init, int logo) { struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]]; + int nr_rows, nr_cols, old_rows, old_cols, i, charcnt = 256; struct display *p = &fb_display[vc->vc_num]; - int nr_rows, nr_cols; - int old_rows, old_cols; unsigned short *save = NULL, *r, *q; - int i, charcnt = 256; struct font_desc *font; if (vc->vc_num != fg_console || (info->flags & FBINFO_FLAG_MODULE) || - info->fix.type == FB_TYPE_TEXT) + (info->fix.type == FB_TYPE_TEXT)) logo = 0; info->var.xoffset = info->var.yoffset = p->yscroll = 0; /* reset wrap/pan */ @@ -960,19 +871,50 @@ accel_clear(vc, info, real_y(p, sy), sx, height, width); } - static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos) { struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]]; + unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; + unsigned int scan_align = info->pixmap.scan_align - 1; + unsigned int buf_align = info->pixmap.buf_align - 1; + unsigned int width = (vc->vc_font.width + 7) >> 3; + int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; + int fgshift = (vc->vc_hi_font_mask) ? 9 : 8; struct display *p = &fb_display[vc->vc_num]; - + unsigned int size, pitch; + struct fb_image image; + u8 *src, *dst; + if (!info->fbops->fb_blank && console_blanked) return; if (vt_cons[vc->vc_num]->vc_mode != KD_TEXT) return; - accel_putc(vc, info, c, real_y(p, ypos), xpos); + image.dx = xpos * vc->vc_font.width; + image.dy = real_y(p, ypos) * vc->vc_font.height; + image.width = vc->vc_font.width; + image.height = vc->vc_font.height; + image.fg_color = attr_fgcol(fgshift, c); + image.bg_color = attr_bgcol(bgshift, c); + image.depth = 1; + + src = vc->vc_font.data + (c & charmask) * vc->vc_font.height * width; + + pitch = width + scan_align; + pitch &= ~scan_align; + size = pitch * vc->vc_font.height; + size += buf_align; + size &= ~buf_align; + + dst = fb_get_buffer_offset(info, &info->pixmap, size); + image.data = dst; + + move_buf_aligned(info, &info->pixmap, dst, pitch, src, width, image.height); + + info->fbops->fb_imageblit(info, &image); + atomic_dec(&info->pixmap.count); + smp_mb__after_atomic_dec(); } static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, @@ -993,13 +935,8 @@ static void fbcon_cursor(struct vc_data *vc, int mode) { struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]]; - unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - int fgshift = (vc->vc_hi_font_mask) ? 9 : 8; struct display *p = &fb_display[vc->vc_num]; - int w = (vc->vc_font.width + 7) >> 3, c; int y = real_y(p, vc->vc_y); - struct fb_cursor cursor; if (mode & CM_SOFTBACK) { mode &= ~CM_SOFTBACK; @@ -1012,28 +949,32 @@ } else if (softback_lines) fbcon_set_origin(vc); - c = scr_readw((u16 *) vc->vc_pos); + del_timer(&cursor_timer); + if (info->cursor.enable) { + info->cursor.enable = 0; + info->fbops->fb_cursor(info, &info->cursor); + } - cursor.image.data = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); - cursor.set = FB_CUR_SETCUR; - cursor.image.depth = 1; - - switch (mode) { - case CM_ERASE: - if (info->cursor.rop == ROP_XOR) { - info->cursor.enable = 0; - info->cursor.rop = ROP_COPY; - info->fbops->fb_cursor(info, &cursor); - } - break; - case CM_MOVE: - case CM_DRAW: - info->cursor.enable = 1; + if (mode != CM_ERASE) { + unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; + int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; + int fgshift = (vc->vc_hi_font_mask) ? 9 : 8; + int s_pitch = (vc->vc_font.width + 7) >> 3; + int size = s_pitch * vc->vc_font.height; + struct fb_cursor cursor; + int cur_height, c; + u8 *dst; - if (info->cursor.image.fg_color != attr_fgcol(fgshift, c) || - info->cursor.image.bg_color != attr_bgcol(bgshift, c)) { - cursor.image.fg_color = attr_fgcol(fgshift, c); - cursor.image.bg_color = attr_bgcol(bgshift, c); + memset(&cursor, 0, sizeof(struct fb_cursor)); + cursor.enable = 1; + + c = scr_readw((u16 *) vc->vc_pos); + + if (info->cursor.image.bg_color != attr_fgcol(fgshift, c) || + info->cursor.image.fg_color != attr_bgcol(bgshift, c)) { + cursor.image.bg_color = attr_fgcol(fgshift, c); + cursor.image.fg_color = attr_bgcol(bgshift, c); + cursor.image.depth = 1; cursor.set |= FB_CUR_SETCMAP; } @@ -1055,22 +996,20 @@ cursor.hot.x = cursor.hot.y = 0; cursor.set |= FB_CUR_SETHOT; } + + if ((cursor.set & FB_CUR_SETSIZE)) { + dst = kmalloc(size, GFP_ATOMIC); - if ((cursor.set & FB_CUR_SETSIZE) || ((vc->vc_cursor_type & 0x0f) != p->cursor_shape)) { - char *mask = kmalloc(w*vc->vc_font.height, GFP_ATOMIC); - int cur_height, size, i = 0; + if (!dst) + return; + memset(dst, 0, size); - if (!mask) return; - if (info->cursor.mask) kfree(info->cursor.mask); - info->cursor.mask = mask; - - p->cursor_shape = vc->vc_cursor_type & 0x0f; - cursor.set |= FB_CUR_SETSHAPE; - + info->cursor.mask = dst; + switch (vc->vc_cursor_type & 0x0f) { - case CUR_NONE: + case CUR_NONE: cur_height = 0; break; case CUR_UNDERLINE: @@ -1085,22 +1024,23 @@ case CUR_TWO_THIRDS: cur_height = (vc->vc_font.height << 1)/3; break; - case CUR_BLOCK: + case CUR_BLOCK: default: cur_height = vc->vc_font.height; break; } - size = (vc->vc_font.height - cur_height) * w; - while (size--) - mask[i++] = 0; - size = cur_height * w; - while (size--) - mask[i++] = 0xff; + dst += (vc->vc_font.height - cur_height) * s_pitch; + memset(dst, 0xff, cur_height * s_pitch); } - info->cursor.rop = ROP_XOR; + + info->cursor.image.data = vc->vc_font.data + ((c & charmask) * size); + cursor.set |= FB_CUR_SETSHAPE; info->fbops->fb_cursor(info, &cursor); + info->cursor.enable = 1; + atomic_dec(&info->sprite.count); + smp_mb__after_atomic_dec(); + mod_timer(&cursor_timer, jiffies + HZ/5); vbl_cursor_cnt = CURSOR_DRAW_DELAY; - break; } } @@ -1826,9 +1766,11 @@ vc->vc_font.height = h; if (vc->vc_hi_font_mask && cnt == 256) { vc->vc_hi_font_mask = 0; - if (vc->vc_can_do_color) + if (vc->vc_can_do_color) { vc->vc_complement_mask >>= 1; - + vc->vc_s_complement_mask >>= 1; + } + /* ++Edmund: reorder the attribute bits */ if (vc->vc_can_do_color) { unsigned short *cp = @@ -1847,9 +1789,11 @@ } } else if (!vc->vc_hi_font_mask && cnt == 512) { vc->vc_hi_font_mask = 0x100; - if (vc->vc_can_do_color) + if (vc->vc_can_do_color) { vc->vc_complement_mask <<= 1; - + vc->vc_s_complement_mask <<= 1; + } + /* ++Edmund: reorder the attribute bits */ { unsigned short *cp = diff -Nru a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h --- a/drivers/video/console/fbcon.h Wed Oct 22 10:40:08 2003 +++ b/drivers/video/console/fbcon.h Wed Oct 22 10:40:08 2003 @@ -33,10 +33,12 @@ u_short scrollmode; /* Scroll Method */ short yscroll; /* Hardware scrolling */ int vrows; /* number of virtual rows */ - int cursor_shape; }; /* drivers/video/console/fbcon.c */ +#ifdef CONFIG_LOGO_ELPP +extern struct display fb_display[MAX_NR_CONSOLES]; +#endif extern char con2fb_map[MAX_NR_CONSOLES]; extern int set_con2fb_map(int unit, int newidx); diff -Nru a/drivers/video/console/symbols_16x16.c b/drivers/video/console/symbols_16x16.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/video/console/symbols_16x16.c Wed Oct 22 10:40:10 2003 @@ -0,0 +1,123 @@ + +#include + +#define FONTDATAMAX 96 + +static unsigned char symbols_data_8x16[FONTDATAMAX] = { + + /* Success, tick mark */ + 0x00, /* 0000000000000000 */ + 0x00, /* 0000000000000000 */ + 0x00, /* 0000000000001111 */ + 0x00, /* 0000000000011110 */ + 0x00, /* 0000000000111100 */ + 0x00, /* 0000000001111000 */ + 0x00, /* 0000000011110000 */ + 0x01, /* 0000000111100000 */ + 0xc3, /* 1100001111000000 */ + 0xe7, /* 1110011110000000 */ + 0x7f, /* 0111111100000000 */ + 0x3e, /* 0011111000000000 */ + 0x1c, /* 0001110000000000 */ + 0x08, /* 0000100000000000 */ + 0x00, /* 0000000000000000 */ + 0x00, /* 0000000000000000 */ + + 0x00, /* 0000000000000000 */ + 0x00, /* 0000000000000000 */ + 0x0f, /* 0000000000001111 */ + 0x1e, /* 0000000000011110 */ + 0x3c, /* 0000000000111100 */ + 0x78, /* 0000000001111000 */ + 0xf0, /* 0000000011110000 */ + 0xe0, /* 0000000111100000 */ + 0xc0, /* 1100001111000000 */ + 0x80, /* 1110011110000000 */ + 0x00, /* 0111111100000000 */ + 0x00, /* 0011111000000000 */ + 0x00, /* 0001110000000000 */ + 0x00, /* 0000100000000000 */ + 0x00, /* 0000000000000000 */ + 0x00, /* 0000000000000000 */ + + /* Failure, cross mark */ + 0x00, /* 0000000000000000 */ + 0x00, /* 0000000000000000 */ + 0xf0, /* 1111000000001111 */ + 0x78, /* 0111100000011110 */ + 0x3c, /* 0011110000111100 */ + 0x1e, /* 0001111001111000 */ + 0x0f, /* 0000111111110000 */ + 0x07, /* 0000011111100000 */ + 0x07, /* 0000011111100000 */ + 0x0f, /* 0000111111110000 */ + 0x1e, /* 0001111001111000 */ + 0x3c, /* 0011110000111100 */ + 0x78, /* 0111100000011110 */ + 0xf0, /* 1111000000001111 */ + 0x00, /* 0000000000000000 */ + 0x00, /* 0000000000000000 */ + + 0x00, /* 0000000000000000 */ + 0x00, /* 0000000000000000 */ + 0x0f, /* 1111000000001111 */ + 0x1e, /* 0111100000011110 */ + 0x3c, /* 0011110000111100 */ + 0x78, /* 0001111001111000 */ + 0xf0, /* 0000111111110000 */ + 0xe0, /* 0000011111100000 */ + 0xe0, /* 0000011111100000 */ + 0xf0, /* 0000111111110000 */ + 0x78, /* 0001111001111000 */ + 0x3c, /* 0011110000111100 */ + 0x1e, /* 0111100000011110 */ + 0x0f, /* 1111000000001111 */ + 0x00, /* 0000000000000000 */ + 0x00, /* 0000000000000000 */ + + /* Warning, exclamation mark */ + 0x00, /* 0000000000000000 */ + 0x00, /* 0000000000000000 */ + 0x03, /* 0000001111000000 */ + 0x07, /* 0000011111100000 */ + 0x07, /* 0000011111100000 */ + 0x07, /* 0000011111100000 */ + 0x03, /* 0000001111000000 */ + 0x03, /* 0000001111000000 */ + 0x01, /* 0000000110000000 */ + 0x01, /* 0000000110000000 */ + 0x00, /* 0000000000000000 */ + 0x01, /* 0000000110000000 */ + 0x03, /* 0000001111000000 */ + 0x01, /* 0000000110000000 */ + 0x00, /* 0000000000000000 */ + 0x00, /* 0000000000000000 */ + + 0x00, /* 0000000000000000 */ + 0x00, /* 0000000000000000 */ + 0xc0, /* 0000001111000000 */ + 0xe0, /* 0000011111100000 */ + 0xe0, /* 0000011111100000 */ + 0xe0, /* 0000011111100000 */ + 0xc0, /* 0000001111000000 */ + 0xc0, /* 0000001111000000 */ + 0x80, /* 0000000110000000 */ + 0x80, /* 0000000110000000 */ + 0x00, /* 0000000000000000 */ + 0x80, /* 0000000110000000 */ + 0xc0, /* 0000001111000000 */ + 0x80, /* 0000000110000000 */ + 0x00, /* 0000000000000000 */ + 0x00, /* 0000000000000000 */ + +}; + +struct font_desc symbols_16x16 = { + 8, + "SYMBOLS16", + 8, + 16, + symbols_data_8x16, + 0 +}; + diff -Nru a/drivers/video/controlfb.c b/drivers/video/controlfb.c --- a/drivers/video/controlfb.c Wed Oct 22 10:40:03 2003 +++ b/drivers/video/controlfb.c Wed Oct 22 10:40:03 2003 @@ -136,8 +136,8 @@ /* * inititialization */ -int control_init(void); -void control_setup(char *); +int controlfb_init(void); +void controlfb_setup(char *); /******************** Prototypes for internal functions **********************/ @@ -553,7 +553,7 @@ /* * Called from fbmem.c for probing & initializing */ -int __init control_init(void) +int __init controlfb_init(void) { struct device_node *dp; @@ -1057,7 +1057,7 @@ /* * Parse user speficied options (`video=controlfb:') */ -void __init control_setup(char *options) +void __init controlfb_setup(char *options) { char *this_opt; diff -Nru a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c --- a/drivers/video/cyber2000fb.c Wed Oct 22 10:40:03 2003 +++ b/drivers/video/cyber2000fb.c Wed Oct 22 10:40:03 2003 @@ -62,7 +62,7 @@ #include "cyber2000fb.h" struct cfb_info { - struct fb_info fb; + struct fb_info *fb; struct display_switch *dispsw; struct display *display; struct pci_dev *dev; @@ -148,10 +148,10 @@ static void cyber2000fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { - struct cfb_info *cfb = (struct cfb_info *)info; + struct cfb_info *cfb = info->par; unsigned long dst, col; - if (!(cfb->fb.var.accel_flags & FB_ACCELF_TEXT)) { + if (!(info->var.accel_flags & FB_ACCELF_TEXT)) { cfb_fillrect(info, rect); return; } @@ -161,12 +161,12 @@ cyber2000fb_writew(rect->height - 1, CO_REG_PIXHEIGHT, cfb); col = rect->color; - if (cfb->fb.var.bits_per_pixel > 8) - col = ((u32 *)cfb->fb.pseudo_palette)[col]; + if (info->var.bits_per_pixel > 8) + col = ((u32 *)info->pseudo_palette)[col]; cyber2000fb_writel(col, CO_REG_FGCOLOUR, cfb); - dst = rect->dx + rect->dy * cfb->fb.var.xres_virtual; - if (cfb->fb.var.bits_per_pixel == 24) { + dst = rect->dx + rect->dy * info->var.xres_virtual; + if (info->var.bits_per_pixel == 24) { cyber2000fb_writeb(dst, CO_REG_X_PHASE, cfb); dst *= 3; } @@ -180,11 +180,11 @@ static void cyber2000fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) { - struct cfb_info *cfb = (struct cfb_info *)info; + struct cfb_info *cfb = info->par; unsigned int cmd = CO_CMD_L_PATTERN_FGCOL; unsigned long src, dst; - if (!(cfb->fb.var.accel_flags & FB_ACCELF_TEXT)) { + if (!(info->var.accel_flags & FB_ACCELF_TEXT)) { cfb_copyarea(info, region); return; } @@ -193,8 +193,8 @@ cyber2000fb_writew(region->width - 1, CO_REG_PIXWIDTH, cfb); cyber2000fb_writew(region->height - 1, CO_REG_PIXHEIGHT, cfb); - src = region->sx + region->sy * cfb->fb.var.xres_virtual; - dst = region->dx + region->dy * cfb->fb.var.xres_virtual; + src = region->sx + region->sy * info->var.xres_virtual; + dst = region->dx + region->dy * info->var.xres_virtual; if (region->sx < region->dx) { src += region->width - 1; @@ -203,12 +203,12 @@ } if (region->sy < region->dy) { - src += (region->height - 1) * cfb->fb.var.xres_virtual; - dst += (region->height - 1) * cfb->fb.var.xres_virtual; + src += (region->height - 1) * info->var.xres_virtual; + dst += (region->height - 1) * info->var.xres_virtual; cmd |= CO_CMD_L_INC_UP; } - if (cfb->fb.var.bits_per_pixel == 24) { + if (info->var.bits_per_pixel == 24) { cyber2000fb_writeb(dst, CO_REG_X_PHASE, cfb); src *= 3; dst *= 3; @@ -234,10 +234,10 @@ static int cyber2000fb_sync(struct fb_info *info) { - struct cfb_info *cfb = (struct cfb_info *)info; + struct cfb_info *cfb = info->par; int count = 100000; - if (!(cfb->fb.var.accel_flags & FB_ACCELF_TEXT)) + if (!(info->var.accel_flags & FB_ACCELF_TEXT)) return 0; while (cyber2000fb_readb(CO_REG_CONTROL, cfb) & CO_CTRL_BUSY) { @@ -269,12 +269,12 @@ cyber2000fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { - struct cfb_info *cfb = (struct cfb_info *)info; - struct fb_var_screeninfo *var = &cfb->fb.var; + struct cfb_info *cfb = info->par; + struct fb_var_screeninfo *var = &info->var; u32 pseudo_val; int ret = 1; - switch (cfb->fb.fix.visual) { + switch (info->fix.visual) { default: return 1; @@ -400,7 +400,7 @@ * Now set our pseudo palette for the CFB16/24/32 drivers. */ if (regno < 16) - ((u32 *)cfb->fb.pseudo_palette)[regno] = pseudo_val; + ((u32 *)info->pseudo_palette)[regno] = pseudo_val; return ret; } @@ -744,7 +744,7 @@ static int cyber2000fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { - struct cfb_info *cfb = (struct cfb_info *)info; + struct cfb_info *cfb = info->par; struct par_info hw; unsigned int mem; int err; @@ -831,8 +831,8 @@ } mem = var->xres_virtual * var->yres_virtual * (var->bits_per_pixel / 8); - if (mem > cfb->fb.fix.smem_len) - var->yres_virtual = cfb->fb.fix.smem_len * 8 / + if (mem > info->fix.smem_len) + var->yres_virtual = info->fix.smem_len * 8 / (var->bits_per_pixel * var->xres_virtual); if (var->yres > var->yres_virtual) @@ -853,8 +853,8 @@ static int cyber2000fb_set_par(struct fb_info *info) { - struct cfb_info *cfb = (struct cfb_info *)info; - struct fb_var_screeninfo *var = &cfb->fb.var; + struct cfb_info *cfb = info->par; + struct fb_var_screeninfo *var = &info->var; struct par_info hw; unsigned int mem; @@ -924,7 +924,7 @@ hw.fetch <<= 1; hw.fetch += 1; - cfb->fb.fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; + info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; /* * Same here - if the size of the video mode exceeds the @@ -933,8 +933,8 @@ * In theory, since NetWinders contain just one VGA card, * we should never end up hitting this problem. */ - mem = cfb->fb.fix.line_length * var->yres_virtual; - BUG_ON(mem > cfb->fb.fix.smem_len); + mem = info->fix.line_length * var->yres_virtual; + BUG_ON(mem > info->fix.smem_len); /* * 8bpp displays are always pseudo colour. 16bpp and above @@ -943,11 +943,11 @@ * palettes, true colour does not.) */ if (var->bits_per_pixel == 8) - cfb->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; + info->fix.visual = FB_VISUAL_PSEUDOCOLOR; else if (hw.ramdac & RAMDAC_BYPASS) - cfb->fb.fix.visual = FB_VISUAL_TRUECOLOR; + info->fix.visual = FB_VISUAL_TRUECOLOR; else - cfb->fb.fix.visual = FB_VISUAL_DIRECTCOLOR; + info->fix.visual = FB_VISUAL_DIRECTCOLOR; cyber2000fb_set_timing(cfb, &hw); cyber2000fb_update_start(cfb, var); @@ -962,18 +962,18 @@ static int cyber2000fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { - struct cfb_info *cfb = (struct cfb_info *)info; + struct cfb_info *cfb = info->par; if (cyber2000fb_update_start(cfb, var)) return -EINVAL; - cfb->fb.var.xoffset = var->xoffset; - cfb->fb.var.yoffset = var->yoffset; + info->var.xoffset = var->xoffset; + info->var.yoffset = var->yoffset; if (var->vmode & FB_VMODE_YWRAP) { - cfb->fb.var.vmode |= FB_VMODE_YWRAP; + info->var.vmode |= FB_VMODE_YWRAP; } else { - cfb->fb.var.vmode &= ~FB_VMODE_YWRAP; + info->var.vmode &= ~FB_VMODE_YWRAP; } return 0; @@ -998,7 +998,7 @@ */ static int cyber2000fb_blank(int blank, struct fb_info *info) { - struct cfb_info *cfb = (struct cfb_info *)info; + struct cfb_info *cfb = info->par; unsigned int sync = 0; int i; @@ -1111,7 +1111,7 @@ void cyber2000fb_get_fb_var(struct cfb_info *cfb, struct fb_var_screeninfo *var) { - memcpy(var, &cfb->fb.var, sizeof(struct fb_var_screeninfo)); + memcpy(var, &cfb->fb->var, sizeof(struct fb_var_screeninfo)); } /* @@ -1122,13 +1122,13 @@ if (int_cfb_info != NULL) { info->dev = int_cfb_info->dev; info->regs = int_cfb_info->regs; - info->fb = int_cfb_info->fb.screen_base; - info->fb_size = int_cfb_info->fb.fix.smem_len; + info->fb = int_cfb_info->fb->screen_base; + info->fb_size = int_cfb_info->fb->fix.smem_len; info->enable_extregs = cyber2000fb_enable_extregs; info->disable_extregs = cyber2000fb_disable_extregs; info->info = int_cfb_info; - strlcpy(info->dev_name, int_cfb_info->fb.fix.id, sizeof(info->dev_name)); + strlcpy(info->dev_name, int_cfb_info->fb->fix.id, sizeof(info->dev_name)); } return int_cfb_info != NULL; @@ -1220,18 +1220,18 @@ } static struct cfb_info * __devinit -cyberpro_alloc_fb_info(unsigned int id, char *name) +cyberpro_alloc_fb_info(unsigned int id, char *name, struct device *dev) { struct cfb_info *cfb; + struct fb_info *fb_info; - cfb = kmalloc(sizeof(struct cfb_info) + - sizeof(u32) * 16, GFP_KERNEL); + fb_info = framebuffer_alloc(sizeof(struct cfb_info) + 32 * 16, dev); - if (!cfb) + if (!fb_info) return NULL; - memset(cfb, 0, sizeof(struct cfb_info)); - + cfb = fb_info->par; + cfb->fb = fb_info; cfb->id = id; if (id == ID_CYBERPRO_5000) @@ -1248,43 +1248,43 @@ else cfb->divisors[3] = 6; - strcpy(cfb->fb.fix.id, name); + strcpy(fb_info->fix.id, name); - cfb->fb.fix.type = FB_TYPE_PACKED_PIXELS; - cfb->fb.fix.type_aux = 0; - cfb->fb.fix.xpanstep = 0; - cfb->fb.fix.ypanstep = 1; - cfb->fb.fix.ywrapstep = 0; + fb_info->fix.type = FB_TYPE_PACKED_PIXELS; + fb_info->fix.type_aux = 0; + fb_info->fix.xpanstep = 0; + fb_info->fix.ypanstep = 1; + fb_info->fix.ywrapstep = 0; switch (id) { case ID_IGA_1682: - cfb->fb.fix.accel = 0; + fb_info->fix.accel = 0; break; case ID_CYBERPRO_2000: - cfb->fb.fix.accel = FB_ACCEL_IGS_CYBER2000; + fb_info->fix.accel = FB_ACCEL_IGS_CYBER2000; break; case ID_CYBERPRO_2010: - cfb->fb.fix.accel = FB_ACCEL_IGS_CYBER2010; + fb_info->fix.accel = FB_ACCEL_IGS_CYBER2010; break; case ID_CYBERPRO_5000: - cfb->fb.fix.accel = FB_ACCEL_IGS_CYBER5000; + fb_info->fix.accel = FB_ACCEL_IGS_CYBER5000; break; } - cfb->fb.var.nonstd = 0; - cfb->fb.var.activate = FB_ACTIVATE_NOW; - cfb->fb.var.height = -1; - cfb->fb.var.width = -1; - cfb->fb.var.accel_flags = FB_ACCELF_TEXT; - - cfb->fb.fbops = &cyber2000fb_ops; - cfb->fb.flags = FBINFO_FLAG_DEFAULT; - cfb->fb.pseudo_palette = (void *)(cfb + 1); + fb_info->var.nonstd = 0; + fb_info->var.activate = FB_ACTIVATE_NOW; + fb_info->var.height = -1; + fb_info->var.width = -1; + fb_info->var.accel_flags = FB_ACCELF_TEXT; + + fb_info->fbops = &cyber2000fb_ops; + fb_info->flags = FBINFO_FLAG_DEFAULT; + fb_info->pseudo_palette = (void *)(cfb + 1); - fb_alloc_cmap(&cfb->fb.cmap, NR_PALETTE, 0); + fb_alloc_cmap(&fb_info->cmap, NR_PALETTE, 0); return cfb; } @@ -1296,9 +1296,9 @@ /* * Free the colourmap */ - fb_alloc_cmap(&cfb->fb.cmap, 0, 0); + fb_alloc_cmap(&cfb->fb->cmap, 0, 0); - kfree(cfb); + kfree(cfb->fb); } } @@ -1339,6 +1339,7 @@ */ static int __devinit cyberpro_common_probe(struct cfb_info *cfb) { + struct fb_info *fb_info = cfb->fb; u_long smem_size; u_int h_sync, v_sync; int err; @@ -1363,22 +1364,22 @@ default: smem_size = 0x00100000; break; } - cfb->fb.fix.smem_len = smem_size; - cfb->fb.fix.mmio_len = MMIO_SIZE; - cfb->fb.screen_base = cfb->region; + fb_info->fix.smem_len = smem_size; + fb_info->fix.mmio_len = MMIO_SIZE; + fb_info->screen_base = cfb->region; err = -EINVAL; - if (!fb_find_mode(&cfb->fb.var, &cfb->fb, NULL, NULL, 0, + if (!fb_find_mode(&fb_info->var, fb_info, NULL, NULL, 0, &cyber2000fb_default_mode, 8)) { - printk("%s: no valid mode found\n", cfb->fb.fix.id); + printk("%s: no valid mode found\n", fb_info->fix.id); goto failed; } - cfb->fb.var.yres_virtual = cfb->fb.fix.smem_len * 8 / - (cfb->fb.var.bits_per_pixel * cfb->fb.var.xres_virtual); + fb_info->var.yres_virtual = fb_info->fix.smem_len * 8 / + (fb_info->var.bits_per_pixel * fb_info->var.xres_virtual); - if (cfb->fb.var.yres_virtual < cfb->fb.var.yres) - cfb->fb.var.yres_virtual = cfb->fb.var.yres; + if (fb_info->var.yres_virtual < fb_info->var.yres) + fb_info->var.yres_virtual = fb_info->var.yres; // fb_set_var(&cfb->fb.var, -1, &cfb->fb); @@ -1388,18 +1389,18 @@ * the precision and fit the results into 32-bit registers. * (1953125000 * 512 = 1e12) */ - h_sync = 1953125000 / cfb->fb.var.pixclock; - h_sync = h_sync * 512 / (cfb->fb.var.xres + cfb->fb.var.left_margin + - cfb->fb.var.right_margin + cfb->fb.var.hsync_len); - v_sync = h_sync / (cfb->fb.var.yres + cfb->fb.var.upper_margin + - cfb->fb.var.lower_margin + cfb->fb.var.vsync_len); + h_sync = 1953125000 / fb_info->var.pixclock; + h_sync = h_sync * 512 / (fb_info->var.xres + fb_info->var.left_margin + + fb_info->var.right_margin + fb_info->var.hsync_len); + v_sync = h_sync / (cfb->fb->var.yres + fb_info->var.upper_margin + + fb_info->var.lower_margin + fb_info->var.vsync_len); printk(KERN_INFO "%s: %dKiB VRAM, using %dx%d, %d.%03dkHz, %dHz\n", - cfb->fb.fix.id, cfb->fb.fix.smem_len >> 10, - cfb->fb.var.xres, cfb->fb.var.yres, + fb_info->fix.id, fb_info->fix.smem_len >> 10, + fb_info->var.xres, fb_info->var.yres, h_sync / 1000, h_sync % 1000, v_sync); - err = register_framebuffer(&cfb->fb); + err = register_framebuffer(fb_info); failed: return err; @@ -1419,7 +1420,7 @@ * Restore the old video mode and the palette. * We also need to tell fbcon to redraw the console. */ - cyber2000fb_set_par(&cfb->fb); + cyber2000fb_set_par(cfb->fb); } #ifdef CONFIG_ARCH_SHARK @@ -1434,7 +1435,7 @@ if (!request_mem_region(FB_START,FB_SIZE,"CyberPro2010")) return err; - cfb = cyberpro_alloc_fb_info(ID_CYBERPRO_2010, "CyberPro2010"); + cfb = cyberpro_alloc_fb_info(ID_CYBERPRO_2010, "CyberPro2010", NULL); if (!cfb) goto failed_release; @@ -1444,8 +1445,8 @@ goto failed_ioremap; cfb->regs = cfb->region + MMIO_OFFSET; - cfb->fb.fix.mmio_start = FB_START + MMIO_OFFSET; - cfb->fb.fix.smem_start = FB_START; + cfb->fb->fix.mmio_start = FB_START + MMIO_OFFSET; + cfb->fb->fix.smem_start = FB_START; /* * Bring up the hardware. This is expected to enable access @@ -1540,7 +1541,7 @@ */ val = cyber2000_grphr(EXT_BUS_CTL, cfb); if (!(val & EXT_BUS_CTL_PCIBURST_WRITE)) { - printk(KERN_INFO "%s: enabling PCI bursts\n", cfb->fb.fix.id); + printk(KERN_INFO "%s: enabling PCI bursts\n", cfb->fb->fix.id); val |= EXT_BUS_CTL_PCIBURST_WRITE; @@ -1571,7 +1572,7 @@ return err; err = -ENOMEM; - cfb = cyberpro_alloc_fb_info(id->driver_data, name); + cfb = cyberpro_alloc_fb_info(id->driver_data, name, &dev->dev); if (!cfb) goto failed_release; @@ -1582,8 +1583,8 @@ goto failed_ioremap; cfb->regs = cfb->region + MMIO_OFFSET; - cfb->fb.fix.mmio_start = pci_resource_start(dev, 0) + MMIO_OFFSET; - cfb->fb.fix.smem_start = pci_resource_start(dev, 0); + cfb->fb->fix.mmio_start = pci_resource_start(dev, 0) + MMIO_OFFSET; + cfb->fb->fix.smem_start = pci_resource_start(dev, 0); /* * Bring up the hardware. This is expected to enable access @@ -1644,12 +1645,11 @@ * we will be leaving hooks that could cause * oopsen laying around. */ - if (unregister_framebuffer(&cfb->fb)) + if (unregister_framebuffer(cfb->fb)) printk(KERN_WARNING "%s: danger Will Robinson, " "danger danger! Oopsen imminent!\n", - cfb->fb.fix.id); + cfb->fb->fix.id); iounmap(cfb->region); - cyberpro_free_fb_info(cfb); /* * Ensure that the driver data is no longer @@ -1660,6 +1660,8 @@ int_cfb_info = NULL; pci_release_regions(dev); + + framebuffer_release(cfb->fb); } } diff -Nru a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c --- a/drivers/video/epson1355fb.c Wed Oct 22 10:40:01 2003 +++ b/drivers/video/epson1355fb.c Wed Oct 22 10:40:01 2003 @@ -1,541 +1,714 @@ /* - * linux/drivers/video/epson1355fb.c - * -- Support for the Epson SED1355 LCD/CRT controller + * linux/drivers/video/epson1355fb.c -- Epson S1D13505 frame buffer for 2.5. * - * Copyright (C) 2000 Philipp Rumpf + * Epson Research S1D13505 Embedded RAMDAC LCD/CRT Controller + * (previously known as SED1355) * - * based on linux/drivers/video/skeletonfb.c, which was + * Cf. http://www.erd.epson.com/vdc/html/S1D13505.html + * + * + * Copyright (C) Hewlett-Packard Company. All rights reserved. + * + * Written by Christopher Hoover + * + * Adapted from: + * + * linux/drivers/video/skeletonfb.c + * Modified to new api Jan 2001 by James Simmons (jsimmons@transvirtual.com) * Created 28 Dec 1997 by Geert Uytterhoeven * + * linux/drivers/video/epson1355fb.c (2.4 driver) + * Copyright (C) 2000 Philipp Rumpf + * * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive - * for more details. - */ -/* TODO (roughly in order of priority): - * 16 bpp support - * crt support - * hw cursor support - * SwivelView + * License. See the file COPYING in the main directory of this archive for + * more details. + * + * + * Noteworthy Issues + * ----------------- + * + * This driver is complicated by the fact that this is a 16-bit chip + * and, on at least one platform (ceiva), we can only do 16-bit reads + * and writes to the framebuffer. We hide this from user space + * except in the case of mmap(). + * + * + * To Do + * ----- + * + * - Test 8-bit pseudocolor mode + * - Allow setting bpp, virtual resolution + * - Implement horizontal panning + * - (maybe) Implement hardware cursor */ -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include +#include +#include #include +#include #include -#include