## Automatically generated incremental diff
## From: linux-2.5.70-bk11
## To: linux-2.5.70-bk12
## Robot: $Id: make-incremental-diff,v 1.11 2002/02/20 02:59:33 hpa Exp $
diff -urN linux-2.5.70-bk11/Documentation/kernel-parameters.txt linux-2.5.70-bk12/Documentation/kernel-parameters.txt
--- linux-2.5.70-bk11/Documentation/kernel-parameters.txt 2003-05-26 18:01:03.000000000 -0700
+++ linux-2.5.70-bk12/Documentation/kernel-parameters.txt 2003-06-07 04:47:43.000000000 -0700
@@ -540,8 +540,6 @@
[KNL,ACPI] Mark specific memory as reserved.
Region of memory to be used, from ss to ss+nn.
- memfrac= [KNL]
-
meye= [HW] Set MotionEye Camera parameters
See Documentation/video4linux/meye.txt.
@@ -617,6 +615,9 @@
noht [SMP,IA-32] Disables P4 Xeon(tm) HyperThreading.
+ noirqdebug [IA-32] Disables the code which attempts to detect and
+ disable unhandled interrupt sources.
+
noisapnp [ISAPNP] Disables ISA PnP code.
noinitrd [RAM] Tells the kernel not to load any configured
diff -urN linux-2.5.70-bk11/Documentation/sound/alsa/ALSA-Configuration.txt linux-2.5.70-bk12/Documentation/sound/alsa/ALSA-Configuration.txt
--- linux-2.5.70-bk11/Documentation/sound/alsa/ALSA-Configuration.txt 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/Documentation/sound/alsa/ALSA-Configuration.txt 2003-06-07 04:47:43.000000000 -0700
@@ -187,6 +187,13 @@
Module supports up to 8 cards, PnP and autoprobe.
+ Module snd-azt3328
+ ------------------
+
+ Module for soundcards based on Aztech AZF3328 PCI chip.
+
+ Module supports up to 8 cards.
+
Module snd-cmi8330
------------------
diff -urN linux-2.5.70-bk11/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl linux-2.5.70-bk12/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
--- linux-2.5.70-bk11/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl 2003-06-07 04:47:43.000000000 -0700
@@ -2906,7 +2906,8 @@
- This callback may be called multiple times, too.
+ This function is always called before the close callback is called.
+ Also, the callback may be called multiple times, too.
Keep track whether the resource was already released.
@@ -3007,14 +3008,16 @@
- When the pcm supports the suspend/resume operation,
+ When the pcm supports the suspend/resume operation
+ (i.e. SNDRV_PCM_INFO_RESUME flag is set),
SUSPEND and RESUME
- commands must be handled, too. Obviously it does suspend and
- resume of the pcm substream. Usually, the
- SUSPEND is identical with
- STOP command and the
- RESUME is identical with
- START command.
+ commands must be handled, too.
+ These commands are issued when the power-management status is
+ changed. Obviously, the SUSPEND and
+ RESUME
+ do suspend and resume of the pcm substream, and usually, they
+ are identical with STOP and
+ START commands, respectively.
@@ -3331,9 +3334,96 @@
- There are many different constraints. You can even define your
- own constraint rules. I won't explain the details here, rather I
- would like to say, Luke, use the source.
+ There are many different constraints.
+ Look in sound/asound.h for a complete list.
+ You can even define your own constraint rules.
+ For example, let's suppose my_chip can manage a substream of 1 channel
+ if and only if the format is S16_LE, otherwise it supports any format
+ specified in the snd_pcm_hardware_t stucture (or in any
+ other constraint_list). You can build a rule like this:
+
+
+ Example of Hardware Constraints for Channels
+
+min < 2) {
+ fmt.bits[0] &= SNDRV_PCM_FMTBIT_S16_LE;
+ return snd_mask_refine(f, &fmt);
+ }
+ return 0;
+ }
+]]>
+
+
+
+
+
+ Then you need to call this function to add your rule:
+
+
+
+runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ hw_rule_channels_by_format, 0, SNDRV_PCM_HW_PARAM_FORMAT,
+ -1);
+]]>
+
+
+
+
+
+ The rule function is called when an application sets the number of
+ channels. But an application can set the format before the number of
+ channels. Thus you also need to define the inverse rule:
+
+
+ Example of Hardware Constraints for Channels
+
+bits[0] == SNDRV_PCM_FMTBIT_S16_LE) {
+ ch.min = ch.max = 1;
+ ch.integer = 1;
+ return snd_interval_refine(c, &ch);
+ }
+ return 0;
+ }
+]]>
+
+
+
+
+
+ ...and in the open callback:
+
+
+runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
+ hw_rule_format_by_channels, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ -1);
+]]>
+
+
+
+
+
+ I won't explain more details here, rather I
+ would like to say, Luke, use the source.
@@ -5756,6 +5846,10 @@
Kevin Conder reformatted the original plain-text to the
DocBook format.
+
+ Giuliano Pochini corrected typos and contributed the example codes
+ in the hardware constraints section.
+
diff -urN linux-2.5.70-bk11/Documentation/sysctl/vm.txt linux-2.5.70-bk12/Documentation/sysctl/vm.txt
--- linux-2.5.70-bk11/Documentation/sysctl/vm.txt 2003-05-26 18:00:45.000000000 -0700
+++ linux-2.5.70-bk12/Documentation/sysctl/vm.txt 2003-06-07 04:47:43.000000000 -0700
@@ -22,6 +22,7 @@
- dirty_background_ratio
- dirty_expire_centisecs
- dirty_writeback_centisecs
+- min_free_kbytes
==============================================================
@@ -74,3 +75,11 @@
2 ^ page-cluster. Values above 2 ^ 5 don't make much sense
for swap because we only cluster swap data in 32-page groups.
+==============================================================
+
+min_free_kbytes:
+
+This is used to force the Linux VM to keep a minimum number
+of kilobytes free. The VM uses this number to compute a pages_min
+value for each lowmem zone in the system. Each lowmem zone gets
+a number of reserved free pages based proportionally on its size.
diff -urN linux-2.5.70-bk11/MAINTAINERS linux-2.5.70-bk12/MAINTAINERS
--- linux-2.5.70-bk11/MAINTAINERS 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/MAINTAINERS 2003-06-07 04:47:43.000000000 -0700
@@ -1118,8 +1118,10 @@
S: Maintained
LINUX FOR 64BIT POWERPC
-P: David Engebretsen
+P: David Engebretsen (stable kernel)
M: engebret@us.ibm.com
+P: Anton Blanchard (development kernel)
+M: anton@au.ibm.com
W: http://linuxppc64.org
L: linuxppc64-dev@lists.linuxppc.org
S: Supported
diff -urN linux-2.5.70-bk11/Makefile linux-2.5.70-bk12/Makefile
--- linux-2.5.70-bk11/Makefile 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/Makefile 2003-06-07 04:47:43.000000000 -0700
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 70
-EXTRAVERSION = -bk11
+EXTRAVERSION = -bk12
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff -urN linux-2.5.70-bk11/arch/alpha/kernel/irq.c linux-2.5.70-bk12/arch/alpha/kernel/irq.c
--- linux-2.5.70-bk11/arch/alpha/kernel/irq.c 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/arch/alpha/kernel/irq.c 2003-06-07 04:47:43.000000000 -0700
@@ -34,7 +34,10 @@
* Controller mappings for all interrupt sources:
*/
irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
- [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}
+ [0 ... NR_IRQS-1] = {
+ .handler = &no_irq_type,
+ .lock = SPIN_LOCK_UNLOCKED
+ }
};
static void register_irq_proc(unsigned int irq);
diff -urN linux-2.5.70-bk11/arch/i386/kernel/irq.c linux-2.5.70-bk12/arch/i386/kernel/irq.c
--- linux-2.5.70-bk11/arch/i386/kernel/irq.c 2003-05-26 18:00:22.000000000 -0700
+++ linux-2.5.70-bk12/arch/i386/kernel/irq.c 2003-06-07 04:47:43.000000000 -0700
@@ -66,8 +66,12 @@
/*
* Controller mappings for all interrupt sources:
*/
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
- { [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .handler = &no_irq_type,
+ .lock = SPIN_LOCK_UNLOCKED
+ }
+};
static void register_irq_proc (unsigned int irq);
@@ -209,7 +213,6 @@
{
int status = 1; /* Force the "do bottom halves" bit */
int retval = 0;
- struct irqaction *first_action = action;
if (!(action->flags & SA_INTERRUPT))
local_irq_enable();
@@ -222,30 +225,88 @@
if (status & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
local_irq_disable();
- if (retval != 1) {
- static int count = 100;
- if (count) {
- count--;
- if (retval) {
- printk("irq event %d: bogus retval mask %x\n",
- irq, retval);
- } else {
- printk("irq %d: nobody cared!\n", irq);
- }
- dump_stack();
- printk("handlers:\n");
- action = first_action;
- do {
- printk("[<%p>]", action->handler);
- print_symbol(" (%s)",
- (unsigned long)action->handler);
- printk("\n");
- action = action->next;
- } while (action);
- }
+ return retval;
+}
+
+static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+ struct irqaction *action;
+
+ if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
+ printk(KERN_ERR "irq event %d: bogus return value %x\n",
+ irq, action_ret);
+ } else {
+ printk(KERN_ERR "irq %d: nobody cared!\n", irq);
+ }
+ dump_stack();
+ printk(KERN_ERR "handlers:\n");
+ action = desc->action;
+ do {
+ printk(KERN_ERR "[<%p>]", action->handler);
+ print_symbol(" (%s)",
+ (unsigned long)action->handler);
+ printk("\n");
+ action = action->next;
+ } while (action);
+}
+
+static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+ static int count = 100;
+
+ if (count) {
+ count--;
+ __report_bad_irq(irq, desc, action_ret);
}
+}
+
+static int noirqdebug;
- return status;
+static int __init noirqdebug_setup(char *str)
+{
+ noirqdebug = 1;
+ printk("IRQ lockup detection disabled\n");
+ return 1;
+}
+
+__setup("noirqdebug", noirqdebug_setup);
+
+/*
+ * If 99,900 of the previous 100,000 interrupts have not been handled then
+ * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
+ * turn the IRQ off.
+ *
+ * (The other 100-of-100,000 interrupts may have been a correctly-functioning
+ * device sharing an IRQ with the failing one)
+ *
+ * Called under desc->lock
+ */
+static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+ if (action_ret != IRQ_HANDLED) {
+ desc->irqs_unhandled++;
+ if (action_ret != IRQ_NONE)
+ report_bad_irq(irq, desc, action_ret);
+ }
+
+ desc->irq_count++;
+ if (desc->irq_count < 100000)
+ return;
+
+ desc->irq_count = 0;
+ if (desc->irqs_unhandled > 99900) {
+ /*
+ * The interrupt is stuck
+ */
+ __report_bad_irq(irq, desc, action_ret);
+ /*
+ * Now kill the IRQ
+ */
+ printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
+ desc->status |= IRQ_DISABLED;
+ desc->handler->disable(irq);
+ }
+ desc->irqs_unhandled = 0;
}
/*
@@ -418,10 +479,13 @@
* SMP environment.
*/
for (;;) {
+ irqreturn_t action_ret;
+
spin_unlock(&desc->lock);
- handle_IRQ_event(irq, ®s, action);
+ action_ret = handle_IRQ_event(irq, ®s, action);
spin_lock(&desc->lock);
-
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
diff -urN linux-2.5.70-bk11/arch/ia64/kernel/irq.c linux-2.5.70-bk12/arch/ia64/kernel/irq.c
--- linux-2.5.70-bk11/arch/ia64/kernel/irq.c 2003-05-26 18:00:56.000000000 -0700
+++ linux-2.5.70-bk12/arch/ia64/kernel/irq.c 2003-06-07 04:47:44.000000000 -0700
@@ -65,8 +65,13 @@
/*
* Controller mappings for all interrupt sources:
*/
-irq_desc_t _irq_desc[NR_IRQS] __cacheline_aligned =
- { [0 ... NR_IRQS-1] = { IRQ_DISABLED, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .status = IRQ_DISABLED,
+ .handler = &no_irq_type,
+ .lock = SPIN_LOCK_UNLOCKED
+ }
+};
#ifdef CONFIG_IA64_GENERIC
struct irq_desc * __ia64_irq_desc (unsigned int irq)
diff -urN linux-2.5.70-bk11/arch/ia64/kernel/module.c linux-2.5.70-bk12/arch/ia64/kernel/module.c
--- linux-2.5.70-bk11/arch/ia64/kernel/module.c 2003-05-26 18:00:39.000000000 -0700
+++ linux-2.5.70-bk12/arch/ia64/kernel/module.c 2003-06-07 04:47:44.000000000 -0700
@@ -887,3 +887,13 @@
if (mod->arch.unwind)
unw_remove_unwind_table(mod->arch.unw_table);
}
+
+#ifdef CONFIG_SMP
+void percpu_modcopy(void *pcpudst, const void *src, unsigned long size)
+{
+ unsigned int i;
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpu_possible(i))
+ memcpy(pcpudst + __per_cpu_offset[i], src, size);
+}
+#endif /* CONFIG_SMP */
diff -urN linux-2.5.70-bk11/arch/mips/au1000/common/serial.c linux-2.5.70-bk12/arch/mips/au1000/common/serial.c
--- linux-2.5.70-bk11/arch/mips/au1000/common/serial.c 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/arch/mips/au1000/common/serial.c 2003-06-07 04:47:44.000000000 -0700
@@ -2549,11 +2549,8 @@
memset(&serial_driver, 0, sizeof(struct tty_driver));
serial_driver.magic = TTY_DRIVER_MAGIC;
serial_driver.driver_name = "serial";
-#if (LINUX_VERSION_CODE > 0x2032D && defined(CONFIG_DEVFS_FS))
- serial_driver.name = "tts/";
-#else
+ serial_driver.devfs_name = "tts/";
serial_driver.name = "ttyS";
-#endif
serial_driver.major = TTY_MAJOR;
serial_driver.minor_start = 64 + SERIAL_DEV_OFFSET;
serial_driver.num = NR_PORTS;
diff -urN linux-2.5.70-bk11/arch/mips/kernel/irq.c linux-2.5.70-bk12/arch/mips/kernel/irq.c
--- linux-2.5.70-bk11/arch/mips/kernel/irq.c 2003-05-26 18:00:24.000000000 -0700
+++ linux-2.5.70-bk12/arch/mips/kernel/irq.c 2003-06-07 04:47:44.000000000 -0700
@@ -24,8 +24,12 @@
/*
* Controller mappings for all interrupt sources:
*/
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
- { [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .handler = &no_irq_type,
+ .lock = SPIN_LOCK_UNLOCKED
+ }
+};
/*
* Special irq handlers.
diff -urN linux-2.5.70-bk11/arch/ppc/8260_io/enet.c linux-2.5.70-bk12/arch/ppc/8260_io/enet.c
--- linux-2.5.70-bk11/arch/ppc/8260_io/enet.c 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/arch/ppc/8260_io/enet.c 2003-06-07 04:47:44.000000000 -0700
@@ -608,7 +608,7 @@
/* Initialize the CPM Ethernet on SCC.
*/
-int __init scc_enet_init(void)
+static int __init scc_enet_init(void)
{
struct net_device *dev;
struct scc_enet_private *cep;
@@ -630,15 +630,19 @@
bd = (bd_t *)__res;
- /* Create an Ethernet device instance.
+ /* Allocate some private information.
*/
- dev = alloc_etherdev(sizeof(*cep));
- if (!dev)
+ cep = (struct scc_enet_private *)kmalloc(sizeof(*cep), GFP_KERNEL);
+ if (cep == NULL)
return -ENOMEM;
- cep = dev->priv;
+ __clear_user(cep,sizeof(*cep));
spin_lock_init(&cep->lock);
+ /* Create an Ethernet device instance.
+ */
+ dev = init_etherdev(0, 0);
+
/* Get pointer to SCC area in parameter RAM.
*/
ep = (scc_enet_t *)(&immap->im_dprambase[PROFF_ENET]);
@@ -767,7 +771,6 @@
/* Allocate a page.
*/
mem_addr = __get_free_page(GFP_KERNEL);
- /* BUG: no check for failure */
/* Initialize the BD for every fragment in the page.
*/
@@ -805,7 +808,6 @@
/* Install our interrupt handler.
*/
request_irq(SIU_INT_ENET, scc_enet_interrupt, 0, "enet", dev);
- /* BUG: no check for failure */
/* Set GSMR_H to enable all normal operating modes.
* Set GSMR_L to enable Ethernet to MC68160.
@@ -835,6 +837,7 @@
io->iop_pdatc |= PC_EST8260_ENET_NOTFD;
dev->base_addr = (unsigned long)ep;
+ dev->priv = cep;
/* The CPM Ethernet specific entries in the device structure. */
dev->open = scc_enet_open;
@@ -849,12 +852,6 @@
*/
sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
- err = register_netdev(dev);
- if (err) {
- kfree(dev);
- return err;
- }
-
printk("%s: SCC ENET Version 0.1, ", dev->name);
for (i=0; i<5; i++)
printk("%02x:", dev->dev_addr[i]);
@@ -863,3 +860,4 @@
return 0;
}
+module_init(scc_enet_init);
diff -urN linux-2.5.70-bk11/arch/ppc/8260_io/fcc_enet.c linux-2.5.70-bk12/arch/ppc/8260_io/fcc_enet.c
--- linux-2.5.70-bk11/arch/ppc/8260_io/fcc_enet.c 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/arch/ppc/8260_io/fcc_enet.c 2003-06-07 04:47:44.000000000 -0700
@@ -1323,12 +1323,12 @@
/* Initialize the CPM Ethernet on FCC.
*/
-int __init fec_enet_init(void)
+static int __init fec_enet_init(void)
{
struct net_device *dev;
struct fcc_enet_private *cep;
fcc_info_t *fip;
- int i, np, err;
+ int i, np;
volatile immap_t *immap;
volatile iop8260_t *io;
@@ -1339,16 +1339,23 @@
fip = fcc_ports;
while (np-- > 0) {
- /* Create an Ethernet device instance.
+
+ /* Allocate some private information.
*/
- dev = alloc_etherdev(sizeof(*cep));
- if (!dev)
+ cep = (struct fcc_enet_private *)
+ kmalloc(sizeof(*cep), GFP_KERNEL);
+ if (cep == NULL)
return -ENOMEM;
- cep = dev->priv;
+ __clear_user(cep,sizeof(*cep));
spin_lock_init(&cep->lock);
cep->fip = fip;
+ /* Create an Ethernet device instance.
+ */
+ dev = init_etherdev(0, 0);
+ dev->priv = cep;
+
init_fcc_shutdown(fip, cep, immap);
init_fcc_ioports(fip, io, immap);
init_fcc_param(fip, dev, immap);
@@ -1369,12 +1376,6 @@
init_fcc_startup(fip, dev);
- err = register_netdev(dev);
- if (err) {
- kfree(dev);
- return err;
- }
-
printk("%s: FCC ENET Version 0.3, ", dev->name);
for (i=0; i<5; i++)
printk("%02x:", dev->dev_addr[i]);
@@ -1393,6 +1394,7 @@
return 0;
}
+module_init(fec_enet_init);
/* Make sure the device is shut down during initialization.
*/
diff -urN linux-2.5.70-bk11/arch/ppc/8260_io/uart.c linux-2.5.70-bk12/arch/ppc/8260_io/uart.c
--- linux-2.5.70-bk11/arch/ppc/8260_io/uart.c 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/arch/ppc/8260_io/uart.c 2003-06-07 04:47:44.000000000 -0700
@@ -2467,11 +2467,8 @@
__clear_user(&serial_driver,sizeof(struct tty_driver));
serial_driver.magic = TTY_DRIVER_MAGIC;
serial_driver.driver_name = "serial";
-#ifdef CONFIG_DEVFS_FS
- serial_driver.name = "tts/";
-#else
+ serial_driver.devfs_name = "tts/";
serial_driver.name = "ttyS";
-#endif
serial_driver.major = TTY_MAJOR;
serial_driver.minor_start = 64;
serial_driver.num = NR_PORTS;
diff -urN linux-2.5.70-bk11/arch/ppc/8xx_io/enet.c linux-2.5.70-bk12/arch/ppc/8xx_io/enet.c
--- linux-2.5.70-bk11/arch/ppc/8xx_io/enet.c 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/arch/ppc/8xx_io/enet.c 2003-06-07 04:47:44.000000000 -0700
@@ -639,11 +639,11 @@
* transmit and receive to make sure we don't catch the CPM with some
* inconsistent control information.
*/
-int __init scc_enet_init(void)
+static int __init scc_enet_init(void)
{
struct net_device *dev;
struct scc_enet_private *cep;
- int i, j, k, err;
+ int i, j, k;
unsigned char *eap, *ba;
dma_addr_t mem_addr;
bd_t *bd;
@@ -659,13 +659,19 @@
bd = (bd_t *)__res;
- dev = alloc_etherdev(sizeof(*cep));
- if (!dev)
+ /* Allocate some private information.
+ */
+ cep = (struct scc_enet_private *)kmalloc(sizeof(*cep), GFP_KERNEL);
+ if (cep == NULL)
return -ENOMEM;
- cep = dev->priv;
+ __clear_user(cep,sizeof(*cep));
spin_lock_init(&cep->lock);
+ /* Create an Ethernet device instance.
+ */
+ dev = init_etherdev(0, 0);
+
/* Get pointer to SCC area in parameter RAM.
*/
ep = (scc_enet_t *)(&cp->cp_dparam[PROFF_ENET]);
@@ -835,7 +841,6 @@
/* Allocate a page.
*/
ba = (unsigned char *)consistent_alloc(GFP_KERNEL, PAGE_SIZE, &mem_addr);
- /* BUG: no check for failure */
/* Initialize the BD for every fragment in the page.
*/
@@ -934,6 +939,7 @@
#endif
dev->base_addr = (unsigned long)ep;
+ dev->priv = cep;
#if 0
dev->name = "CPM_ENET";
#endif
@@ -947,12 +953,6 @@
dev->get_stats = scc_enet_get_stats;
dev->set_multicast_list = set_multicast_list;
- err = register_netdev(dev);
- if (err) {
- kfree(dev);
- return err;
- }
-
/* And last, enable the transmit and receive processing.
*/
sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
@@ -964,3 +964,5 @@
return 0;
}
+
+module_init(scc_enet_init);
diff -urN linux-2.5.70-bk11/arch/ppc/8xx_io/fec.c linux-2.5.70-bk12/arch/ppc/8xx_io/fec.c
--- linux-2.5.70-bk11/arch/ppc/8xx_io/fec.c 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/arch/ppc/8xx_io/fec.c 2003-06-07 04:47:44.000000000 -0700
@@ -1566,11 +1566,11 @@
/* Initialize the FEC Ethernet on 860T.
*/
-int __init fec_enet_init(void)
+static int __init fec_enet_init(void)
{
struct net_device *dev;
struct fec_enet_private *fep;
- int i, j, k, err;
+ int i, j, k;
unsigned char *eap, *iap, *ba;
unsigned long mem_addr;
volatile cbd_t *bdp;
@@ -1586,11 +1586,17 @@
bd = (bd_t *)__res;
- dev = alloc_etherdev(sizeof(*fep));
- if (!dev)
+ /* Allocate some private information.
+ */
+ fep = (struct fec_enet_private *)kmalloc(sizeof(*fep), GFP_KERNEL);
+ if (fep == NULL)
return -ENOMEM;
- fep = dev->priv;
+ __clear_user(fep,sizeof(*fep));
+
+ /* Create an Ethernet device instance.
+ */
+ dev = init_etherdev(0, 0);
fecp = &(immap->im_cpm.cp_fec);
@@ -1655,7 +1661,6 @@
/* Allocate a page.
*/
ba = (unsigned char *)consistent_alloc(GFP_KERNEL, PAGE_SIZE, &mem_addr);
- /* BUG: no check for failure */
/* Initialize the BD for every fragment in the page.
*/
@@ -1710,6 +1715,7 @@
#endif
dev->base_addr = (unsigned long)fecp;
+ dev->priv = fep;
/* The FEC Ethernet specific entries in the device structure. */
dev->open = fec_enet_open;
@@ -1746,12 +1752,6 @@
fecp->fec_mii_speed = 0; /* turn off MDIO */
#endif /* CONFIG_USE_MDIO */
- err = register_netdev(dev);
- if (err) {
- kfree(dev);
- return err;
- }
-
printk ("%s: FEC ENET Version 0.2, FEC irq %d"
#ifdef PHY_INTERRUPT
", MII irq %d"
@@ -1782,6 +1782,7 @@
return 0;
}
+module_init(fec_enet_init);
/* This function is called to start or restart the FEC during a link
* change. This only happens when switching between half and full
diff -urN linux-2.5.70-bk11/arch/ppc/8xx_io/uart.c linux-2.5.70-bk12/arch/ppc/8xx_io/uart.c
--- linux-2.5.70-bk11/arch/ppc/8xx_io/uart.c 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/arch/ppc/8xx_io/uart.c 2003-06-07 04:47:44.000000000 -0700
@@ -2520,11 +2520,8 @@
__clear_user(&serial_driver,sizeof(struct tty_driver));
serial_driver.magic = TTY_DRIVER_MAGIC;
serial_driver.driver_name = "serial";
-#ifdef CONFIG_DEVFS_FS
- serial_driver.name = "tts/";
-#else
+ serial_driver.devfs_name = "tts/";
serial_driver.name = "ttyS";
-#endif
serial_driver.major = TTY_MAJOR;
serial_driver.minor_start = 64;
serial_driver.num = NR_PORTS;
diff -urN linux-2.5.70-bk11/arch/ppc/kernel/irq.c linux-2.5.70-bk12/arch/ppc/kernel/irq.c
--- linux-2.5.70-bk11/arch/ppc/kernel/irq.c 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/arch/ppc/kernel/irq.c 2003-06-07 04:47:44.000000000 -0700
@@ -66,8 +66,12 @@
#define MAXCOUNT 10000000
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
- { [0 ... NR_IRQS-1] = { 0, NULL, NULL, 0, SPIN_LOCK_UNLOCKED}};
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .handler = &no_irq_type,
+ .lock = SPIN_LOCK_UNLOCKED
+ }
+};
int ppc_spurious_interrupts = 0;
struct irqaction *ppc_irq_action[NR_IRQS];
diff -urN linux-2.5.70-bk11/arch/ppc64/kernel/irq.c linux-2.5.70-bk12/arch/ppc64/kernel/irq.c
--- linux-2.5.70-bk11/arch/ppc64/kernel/irq.c 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/arch/ppc64/kernel/irq.c 2003-06-07 04:47:44.000000000 -0700
@@ -63,8 +63,11 @@
volatile unsigned char *chrp_int_ack_special;
static void register_irq_proc (unsigned int irq);
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
- { [0 ... NR_IRQS-1] = { 0, NULL, NULL, 0, SPIN_LOCK_UNLOCKED}};
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .lock = SPIN_LOCK_UNLOCKED
+ }
+};
int ppc_spurious_interrupts = 0;
unsigned long lpEvent_count = 0;
diff -urN linux-2.5.70-bk11/arch/ppc64/mm/numa.c linux-2.5.70-bk12/arch/ppc64/mm/numa.c
--- linux-2.5.70-bk11/arch/ppc64/mm/numa.c 2003-05-26 18:00:43.000000000 -0700
+++ linux-2.5.70-bk12/arch/ppc64/mm/numa.c 2003-06-07 04:47:44.000000000 -0700
@@ -25,6 +25,7 @@
int numa_memory_lookup_table[MAX_MEMORY >> MEMORY_INCREMENT_SHIFT] =
{ [ 0 ... ((MAX_MEMORY >> MEMORY_INCREMENT_SHIFT) - 1)] = -1};
unsigned long numa_cpumask_lookup_table[MAX_NUMNODES];
+int nr_cpus_in_node[MAX_NUMNODES] = { [0 ... (MAX_NUMNODES -1)] = 0};
struct pglist_data node_data[MAX_NUMNODES];
bootmem_data_t plat_node_bdata[MAX_NUMNODES];
@@ -33,7 +34,10 @@
{
dbg("cpu %d maps to domain %d\n", cpu, node);
numa_cpu_lookup_table[cpu] = node;
- numa_cpumask_lookup_table[node] |= 1UL << cpu;
+ if (!(numa_cpumask_lookup_table[node] & 1UL << cpu)) {
+ numa_cpumask_lookup_table[node] |= 1UL << cpu;
+ nr_cpus_in_node[node]++;
+ }
}
static int __init parse_numa_properties(void)
diff -urN linux-2.5.70-bk11/arch/sh/kernel/irq.c linux-2.5.70-bk12/arch/sh/kernel/irq.c
--- linux-2.5.70-bk11/arch/sh/kernel/irq.c 2003-05-26 18:00:37.000000000 -0700
+++ linux-2.5.70-bk12/arch/sh/kernel/irq.c 2003-06-07 04:47:44.000000000 -0700
@@ -41,8 +41,11 @@
/*
* Controller mappings for all interrupt sources:
*/
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
- { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }};
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .handler = &no_irq_type,
+ }
+};
/*
* Special irq handlers.
diff -urN linux-2.5.70-bk11/arch/sparc64/kernel/ioctl32.c linux-2.5.70-bk12/arch/sparc64/kernel/ioctl32.c
--- linux-2.5.70-bk11/arch/sparc64/kernel/ioctl32.c 2003-05-26 18:00:56.000000000 -0700
+++ linux-2.5.70-bk12/arch/sparc64/kernel/ioctl32.c 2003-06-07 04:47:44.000000000 -0700
@@ -789,7 +789,7 @@
struct socket *mysock = sockfd_lookup(fd, &ret);
- if (mysock && mysock->sk && mysock->sk->family == AF_INET6) { /* ipv6 */
+ if (mysock && mysock->sk && mysock->sk->sk_family == AF_INET6) { /* ipv6 */
ret = copy_from_user (&r6.rtmsg_dst, &(((struct in6_rtmsg32 *)arg)->rtmsg_dst),
3 * sizeof(struct in6_addr));
ret |= __get_user (r6.rtmsg_type, &(((struct in6_rtmsg32 *)arg)->rtmsg_type));
diff -urN linux-2.5.70-bk11/arch/sparc64/solaris/timod.c linux-2.5.70-bk12/arch/sparc64/solaris/timod.c
--- linux-2.5.70-bk11/arch/sparc64/solaris/timod.c 2003-05-26 18:00:38.000000000 -0700
+++ linux-2.5.70-bk12/arch/sparc64/solaris/timod.c 2003-06-07 04:47:44.000000000 -0700
@@ -149,10 +149,10 @@
SOLD("wakeing socket");
sock = SOCKET_I(current->files->fd[fd]->f_dentry->d_inode);
wake_up_interruptible(&sock->wait);
- read_lock(&sock->sk->callback_lock);
+ read_lock(&sock->sk->sk_callback_lock);
if (sock->fasync_list && !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
__kill_fasync(sock->fasync_list, SIGIO, POLL_IN);
- read_unlock(&sock->sk->callback_lock);
+ read_unlock(&sock->sk->sk_callback_lock);
SOLD("done");
}
diff -urN linux-2.5.70-bk11/arch/um/kernel/irq.c linux-2.5.70-bk12/arch/um/kernel/irq.c
--- linux-2.5.70-bk11/arch/um/kernel/irq.c 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/arch/um/kernel/irq.c 2003-06-07 04:47:44.000000000 -0700
@@ -31,8 +31,12 @@
static void register_irq_proc (unsigned int irq);
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
- { [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .handler = &no_irq_type,
+ .lock = SPIN_LOCK_UNLOCKED
+ }
+};
/*
* Generic no controller code
diff -urN linux-2.5.70-bk11/arch/v850/kernel/irq.c linux-2.5.70-bk12/arch/v850/kernel/irq.c
--- linux-2.5.70-bk11/arch/v850/kernel/irq.c 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/arch/v850/kernel/irq.c 2003-06-07 04:47:44.000000000 -0700
@@ -28,8 +28,12 @@
/*
* Controller mappings for all interrupt sources:
*/
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
- { [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .handler = &no_irq_type,
+ .lock = SPIN_LOCK_UNLOCKED
+ }
+};
/*
* Special irq handlers.
diff -urN linux-2.5.70-bk11/arch/x86_64/kernel/irq.c linux-2.5.70-bk12/arch/x86_64/kernel/irq.c
--- linux-2.5.70-bk11/arch/x86_64/kernel/irq.c 2003-05-26 18:00:39.000000000 -0700
+++ linux-2.5.70-bk12/arch/x86_64/kernel/irq.c 2003-06-07 04:47:44.000000000 -0700
@@ -65,8 +65,12 @@
/*
* Controller mappings for all interrupt sources:
*/
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
- { [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .handler = &no_irq_type,
+ .lock = SPIN_LOCK_UNLOCKED
+ }
+};
static void register_irq_proc (unsigned int irq);
diff -urN linux-2.5.70-bk11/drivers/atm/he.c linux-2.5.70-bk12/drivers/atm/he.c
--- linux-2.5.70-bk11/drivers/atm/he.c 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/drivers/atm/he.c 2003-06-07 04:47:44.000000000 -0700
@@ -2656,7 +2656,7 @@
* TBRQ, the host issues the close command to the adapter.
*/
- while (((tx_inuse = atomic_read(&vcc->sk->wmem_alloc)) > 0) &&
+ while (((tx_inuse = atomic_read(&vcc->sk->sk_wmem_alloc)) > 0) &&
(retry < MAX_RETRY)) {
set_current_state(TASK_UNINTERRUPTIBLE);
(void) schedule_timeout(sleep);
diff -urN linux-2.5.70-bk11/drivers/atm/idt77252.c linux-2.5.70-bk12/drivers/atm/idt77252.c
--- linux-2.5.70-bk11/drivers/atm/idt77252.c 2003-05-26 18:00:39.000000000 -0700
+++ linux-2.5.70-bk12/drivers/atm/idt77252.c 2003-06-07 04:47:44.000000000 -0700
@@ -728,7 +728,8 @@
struct atm_vcc *vcc = vc->tx_vcc;
vc->estimator->cells += (skb->len + 47) / 48;
- if (atomic_read(&vcc->sk->wmem_alloc) > (vcc->sk->sndbuf >> 1)) {
+ if (atomic_read(&vcc->sk->sk_wmem_alloc) >
+ (vcc->sk->sk_sndbuf >> 1)) {
u32 cps = vc->estimator->maxcps;
vc->estimator->cps = cps;
@@ -2023,7 +2024,7 @@
atomic_inc(&vcc->stats->tx_err);
return -ENOMEM;
}
- atomic_add(skb->truesize, &vcc->sk->wmem_alloc);
+ atomic_add(skb->truesize, &vcc->sk->sk_wmem_alloc);
memcpy(skb_put(skb, 52), cell, 52);
diff -urN linux-2.5.70-bk11/drivers/atm/iphase.c linux-2.5.70-bk12/drivers/atm/iphase.c
--- linux-2.5.70-bk11/drivers/atm/iphase.c 2003-05-26 18:00:59.000000000 -0700
+++ linux-2.5.70-bk12/drivers/atm/iphase.c 2003-06-07 04:47:44.000000000 -0700
@@ -1782,14 +1782,14 @@
if (ia_vcc->pcr < iadev->rate_limit) {
if (vcc->qos.txtp.max_sdu != 0) {
if (ia_vcc->pcr > 60000)
- vcc->sk->sndbuf = vcc->qos.txtp.max_sdu * 5;
+ vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
else if (ia_vcc->pcr > 2000)
- vcc->sk->sndbuf = vcc->qos.txtp.max_sdu * 4;
+ vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
else
- vcc->sk->sndbuf = 3*vcc->qos.txtp.max_sdu;
+ vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
}
else
- vcc->sk->sndbuf = 24576;
+ vcc->sk->sk_sndbuf = 24576;
}
vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
diff -urN linux-2.5.70-bk11/drivers/base/cpu.c linux-2.5.70-bk12/drivers/base/cpu.c
--- linux-2.5.70-bk11/drivers/base/cpu.c 2003-06-07 04:47:37.000000000 -0700
+++ linux-2.5.70-bk12/drivers/base/cpu.c 2003-06-07 04:47:44.000000000 -0700
@@ -6,8 +6,7 @@
#include
#include
#include
-
-#include
+#include
struct class cpu_class = {
diff -urN linux-2.5.70-bk11/drivers/base/memblk.c linux-2.5.70-bk12/drivers/base/memblk.c
--- linux-2.5.70-bk11/drivers/base/memblk.c 2003-05-26 18:01:01.000000000 -0700
+++ linux-2.5.70-bk12/drivers/base/memblk.c 2003-06-07 04:47:44.000000000 -0700
@@ -7,8 +7,7 @@
#include
#include
#include
-
-#include
+#include
static struct class memblk_class = {
diff -urN linux-2.5.70-bk11/drivers/base/node.c linux-2.5.70-bk12/drivers/base/node.c
--- linux-2.5.70-bk11/drivers/base/node.c 2003-05-26 18:00:24.000000000 -0700
+++ linux-2.5.70-bk12/drivers/base/node.c 2003-06-07 04:47:44.000000000 -0700
@@ -7,8 +7,7 @@
#include
#include
#include
-
-#include
+#include
static struct class node_class = {
diff -urN linux-2.5.70-bk11/drivers/block/DAC960.c linux-2.5.70-bk12/drivers/block/DAC960.c
--- linux-2.5.70-bk11/drivers/block/DAC960.c 2003-05-26 18:00:45.000000000 -0700
+++ linux-2.5.70-bk12/drivers/block/DAC960.c 2003-06-07 04:47:44.000000000 -0700
@@ -2309,8 +2309,7 @@
(PhysicalDeviceInfo->NegotiatedDataWidthBits == 16
? "Wide " :""),
(PhysicalDeviceInfo->NegotiatedSynchronousMegaTransfers
- * (PhysicalDeviceInfo->NegotiatedDataWidthBits == 16
- ? 2 : 1)));
+ * PhysicalDeviceInfo->NegotiatedDataWidthBits/8));
if (InquiryUnitSerialNumber->PeripheralDeviceType != 0x1F)
DAC960_Info(" Serial Number: %s\n", Controller, SerialNumber);
if (PhysicalDeviceInfo->PhysicalDeviceState ==
diff -urN linux-2.5.70-bk11/drivers/block/loop.c linux-2.5.70-bk12/drivers/block/loop.c
--- linux-2.5.70-bk11/drivers/block/loop.c 2003-05-26 18:00:41.000000000 -0700
+++ linux-2.5.70-bk12/drivers/block/loop.c 2003-06-07 04:47:44.000000000 -0700
@@ -236,7 +236,6 @@
up(&mapping->host->i_sem);
out:
kunmap(bvec->bv_page);
- balance_dirty_pages_ratelimited(mapping);
return ret;
unlock:
diff -urN linux-2.5.70-bk11/drivers/block/nbd.c linux-2.5.70-bk12/drivers/block/nbd.c
--- linux-2.5.70-bk11/drivers/block/nbd.c 2003-05-26 18:00:26.000000000 -0700
+++ linux-2.5.70-bk12/drivers/block/nbd.c 2003-06-07 04:47:44.000000000 -0700
@@ -120,7 +120,7 @@
do {
- sock->sk->allocation = GFP_NOIO;
+ sock->sk->sk_allocation = GFP_NOIO;
iov.iov_base = buf;
iov.iov_len = size;
msg.msg_name = NULL;
diff -urN linux-2.5.70-bk11/drivers/char/dz.c linux-2.5.70-bk12/drivers/char/dz.c
--- linux-2.5.70-bk11/drivers/char/dz.c 2003-06-07 04:47:38.000000000 -0700
+++ linux-2.5.70-bk12/drivers/char/dz.c 2003-06-07 04:47:44.000000000 -0700
@@ -1295,11 +1295,8 @@
memset(&serial_driver, 0, sizeof(struct tty_driver));
serial_driver.magic = TTY_DRIVER_MAGIC;
serial_driver.owner = THIS_MODULE;
-#if (LINUX_VERSION_CODE > 0x2032D && defined(CONFIG_DEVFS_FS))
+ serial_driver.devfs_name = "tts/";
serial_driver.name = "ttyS";
-#else
- serial_driver.name = "tts/";
-#endif
serial_driver.major = TTY_MAJOR;
serial_driver.minor_start = 64;
serial_driver.num = DZ_NB_PORT;
diff -urN linux-2.5.70-bk11/drivers/char/ip2main.c linux-2.5.70-bk12/drivers/char/ip2main.c
--- linux-2.5.70-bk11/drivers/char/ip2main.c 2003-06-07 04:47:38.000000000 -0700
+++ linux-2.5.70-bk12/drivers/char/ip2main.c 2003-06-07 04:47:44.000000000 -0700
@@ -94,9 +94,7 @@
#include
#include
#include
-#ifdef CONFIG_DEVFS_FS
#include
-#endif
#include
#include
#include
@@ -229,11 +227,6 @@
/* String constants for port names */
static char *pcDriver_name = "ip2";
-#ifdef CONFIG_DEVFS_FS
-static char *pcTty = "tts/F%d";
-#else
-static char *pcTty = "ttyF";
-#endif
static char *pcIpl = "ip2ipl";
/* Serial subtype definitions */
@@ -564,10 +557,7 @@
int
ip2_loadmain(int *iop, int *irqp, unsigned char *firmware, int firmsize)
{
-#ifdef CONFIG_DEVFS_FS
- int j, box;
-#endif
- int i;
+ int i, j, box;
int err;
int status = 0;
static int loaded;
@@ -786,7 +776,8 @@
/* Initialise the relevant fields. */
ip2_tty_driver.magic = TTY_DRIVER_MAGIC;
ip2_tty_driver.owner = THIS_MODULE;
- ip2_tty_driver.name = pcTty;
+ ip2_tty_driver.name = "ttyF";
+ ip2_tty_driver.devfs_name = "tts/F";
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,0)
ip2_tty_driver.driver_name = pcDriver_name;
ip2_tty_driver.read_proc = ip2_read_proc;
@@ -798,11 +789,7 @@
ip2_tty_driver.subtype = SERIAL_TYPE_NORMAL;
ip2_tty_driver.init_termios = tty_std_termios;
ip2_tty_driver.init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
-#ifdef CONFIG_DEVFS_FS
ip2_tty_driver.flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS;
-#else
- ip2_tty_driver.flags = TTY_DRIVER_REAL_RAW;
-#endif
ip2_tty_driver.refcount = &ref_count;
ip2_tty_driver.table = TtyTable;
ip2_tty_driver.termios = Termios;
@@ -851,7 +838,6 @@
continue;
}
-#ifdef CONFIG_DEVFS_FS
if ( NULL != ( pB = i2BoardPtrTable[i] ) ) {
devfs_mk_cdev(MKDEV(IP2_IPL_MAJOR, 4 * i),
S_IRUSR | S_IWUSR | S_IRGRP | S_IFCHR,
@@ -874,7 +860,6 @@
}
}
}
-#endif
if (poll_only) {
// Poll only forces driver to only use polling and
diff -urN linux-2.5.70-bk11/drivers/char/mem.c linux-2.5.70-bk12/drivers/char/mem.c
--- linux-2.5.70-bk11/drivers/char/mem.c 2003-05-26 18:00:40.000000000 -0700
+++ linux-2.5.70-bk12/drivers/char/mem.c 2003-06-07 04:47:44.000000000 -0700
@@ -23,6 +23,7 @@
#include
#include
#include
+#include
#include
#include
@@ -524,20 +525,22 @@
{
loff_t ret;
- lock_kernel();
+ down(&file->f_dentry->d_inode->i_sem);
switch (orig) {
case 0:
file->f_pos = offset;
ret = file->f_pos;
+ force_successful_syscall_return();
break;
case 1:
file->f_pos += offset;
ret = file->f_pos;
+ force_successful_syscall_return();
break;
default:
ret = -EINVAL;
}
- unlock_kernel();
+ up(&file->f_dentry->d_inode->i_sem);
return ret;
}
diff -urN linux-2.5.70-bk11/drivers/char/pty.c linux-2.5.70-bk12/drivers/char/pty.c
--- linux-2.5.70-bk11/drivers/char/pty.c 2003-05-26 18:00:28.000000000 -0700
+++ linux-2.5.70-bk12/drivers/char/pty.c 2003-06-07 04:47:44.000000000 -0700
@@ -347,11 +347,8 @@
pty_driver.magic = TTY_DRIVER_MAGIC;
pty_driver.owner = THIS_MODULE;
pty_driver.driver_name = "pty_master";
-#ifdef CONFIG_DEVFS_FS
- pty_driver.name = "pty/m";
-#else
pty_driver.name = "pty";
-#endif
+ pty_driver.devfs_name = "pty/m";
pty_driver.major = PTY_MASTER_MAJOR;
pty_driver.minor_start = 0;
pty_driver.num = NR_PTYS;
@@ -382,11 +379,8 @@
pty_slave_driver = pty_driver;
pty_slave_driver.driver_name = "pty_slave";
pty_slave_driver.proc_entry = 0;
-#ifdef CONFIG_DEVFS_FS
- pty_slave_driver.name = "pty/s";
-#else
pty_slave_driver.name = "ttyp";
-#endif
+ pty_slave_driver.devfs_name = "pty/s";
pty_slave_driver.subtype = PTY_TYPE_SLAVE;
pty_slave_driver.major = PTY_SLAVE_MAJOR;
pty_slave_driver.minor_start = 0;
diff -urN linux-2.5.70-bk11/drivers/char/rocket.c linux-2.5.70-bk12/drivers/char/rocket.c
--- linux-2.5.70-bk11/drivers/char/rocket.c 2003-06-07 04:47:38.000000000 -0700
+++ linux-2.5.70-bk12/drivers/char/rocket.c 2003-06-07 04:47:44.000000000 -0700
@@ -2650,6 +2650,7 @@
memset(&rocket_driver, 0, sizeof (struct tty_driver));
rocket_driver.magic = TTY_DRIVER_MAGIC;
rocket_driver.flags = TTY_DRIVER_NO_DEVFS;
+ rocket_driver.devfs_name = "tts/R";
rocket_driver.name = "ttyR";
rocket_driver.driver_name = "Comtrol RocketPort";
rocket_driver.major = TTY_ROCKET_MAJOR;
diff -urN linux-2.5.70-bk11/drivers/char/selection.c linux-2.5.70-bk12/drivers/char/selection.c
--- linux-2.5.70-bk11/drivers/char/selection.c 2003-05-26 18:00:59.000000000 -0700
+++ linux-2.5.70-bk12/drivers/char/selection.c 2003-06-07 04:47:44.000000000 -0700
@@ -118,7 +118,6 @@
int i, ps, pe;
unsigned int currcons = fg_console;
- unblank_screen();
poke_blanked_console();
{ unsigned short *args, xs, ys, xe, ye;
diff -urN linux-2.5.70-bk11/drivers/char/serial167.c linux-2.5.70-bk12/drivers/char/serial167.c
--- linux-2.5.70-bk11/drivers/char/serial167.c 2003-06-07 04:47:38.000000000 -0700
+++ linux-2.5.70-bk12/drivers/char/serial167.c 2003-06-07 04:47:44.000000000 -0700
@@ -2359,11 +2359,8 @@
memset(&cy_serial_driver, 0, sizeof(struct tty_driver));
cy_serial_driver.magic = TTY_DRIVER_MAGIC;
cy_serial_driver.owner = THIS_MODULE;
-#ifdef CONFIG_DEVFS_FS
- cy_serial_driver.name = "tts/";
-#else
+ cy_serial_driver.devfs_name = "tts/";
cy_serial_driver.name = "ttyS";
-#endif
cy_serial_driver.major = TTY_MAJOR;
cy_serial_driver.minor_start = 64;
cy_serial_driver.num = NR_PORTS;
diff -urN linux-2.5.70-bk11/drivers/char/sh-sci.c linux-2.5.70-bk12/drivers/char/sh-sci.c
--- linux-2.5.70-bk11/drivers/char/sh-sci.c 2003-06-07 04:47:38.000000000 -0700
+++ linux-2.5.70-bk12/drivers/char/sh-sci.c 2003-06-07 04:47:44.000000000 -0700
@@ -994,11 +994,8 @@
sci_driver.magic = TTY_DRIVER_MAGIC;
sci_driver.owner = THIS_MODULE;
sci_driver.driver_name = "sci";
-#ifdef CONFIG_DEVFS_FS
- sci_driver.name = "ttsc/";
-#else
sci_driver.name = "ttySC";
-#endif
+ sci_driver.devfs_name = "ttsc/";
sci_driver.major = SCI_MAJOR;
sci_driver.minor_start = SCI_MINOR_START;
sci_driver.num = SCI_NPORTS;
diff -urN linux-2.5.70-bk11/drivers/char/stallion.c linux-2.5.70-bk12/drivers/char/stallion.c
--- linux-2.5.70-bk11/drivers/char/stallion.c 2003-06-07 04:47:38.000000000 -0700
+++ linux-2.5.70-bk12/drivers/char/stallion.c 2003-06-07 04:47:44.000000000 -0700
@@ -136,11 +136,6 @@
static char *stl_drvtitle = "Stallion Multiport Serial Driver";
static char *stl_drvname = "stallion";
static char *stl_drvversion = "5.6.0";
-#ifdef CONFIG_DEVFS_FS
-static char *stl_serialname = "tts/E%d";
-#else
-static char *stl_serialname = "ttyE";
-#endif
static struct tty_driver stl_serial;
static struct tty_struct *stl_ttys[STL_MAXDEVS];
@@ -3178,7 +3173,8 @@
stl_serial.magic = TTY_DRIVER_MAGIC;
stl_serial.owner = THIS_MODULE;
stl_serial.driver_name = stl_drvname;
- stl_serial.name = stl_serialname;
+ stl_serial.name = "ttyE";
+ stl_serial.devfs_name = "tts/E";
stl_serial.major = STL_SERIALMAJOR;
stl_serial.minor_start = 0;
stl_serial.num = STL_MAXBRDS * STL_MAXPORTS;
diff -urN linux-2.5.70-bk11/drivers/char/tty_io.c linux-2.5.70-bk12/drivers/char/tty_io.c
--- linux-2.5.70-bk11/drivers/char/tty_io.c 2003-06-07 04:47:38.000000000 -0700
+++ linux-2.5.70-bk12/drivers/char/tty_io.c 2003-06-07 04:47:44.000000000 -0700
@@ -2082,17 +2082,6 @@
tty->driver->write(tty, 0, &ch, 1);
}
-#ifdef CONFIG_DEVFS_FS
-static void tty_unregister_devfs(struct tty_driver *driver, int index)
-{
- char path[64];
- tty_line_name(driver, index, path);
- devfs_remove(path);
-}
-#else
-# define tty_unregister_devfs(driver, index) do { } while (0)
-#endif /* CONFIG_DEVFS_FS */
-
struct tty_dev {
struct list_head node;
dev_t dev;
@@ -2124,7 +2113,6 @@
static void tty_add_class_device(char *name, dev_t dev, struct device *device)
{
struct tty_dev *tty_dev = NULL;
- char *temp;
int retval;
tty_dev = kmalloc(sizeof(*tty_dev), GFP_KERNEL);
@@ -2132,16 +2120,9 @@
return;
memset(tty_dev, 0x00, sizeof(*tty_dev));
- /* stupid '/' in tty name strings... */
- temp = strrchr(name, '/');
- if (temp && (temp[1] != 0x00))
- ++temp;
- else
- temp = name;
-
tty_dev->class_dev.dev = device;
tty_dev->class_dev.class = &tty_class;
- snprintf(tty_dev->class_dev.class_id, BUS_ID_SIZE, "%s", temp);
+ snprintf(tty_dev->class_dev.class_id, BUS_ID_SIZE, "%s", name);
retval = class_device_register(&tty_dev->class_dev);
if (retval)
goto error;
@@ -2195,7 +2176,6 @@
struct device *device)
{
dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
- char name[64];
if (index >= driver->num) {
printk(KERN_ERR "Attempt to register invalid tty line number "
@@ -2203,16 +2183,16 @@
return;
}
- tty_line_name(driver, index, name);
- devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR, name);
-
- /* stupid console driver devfs names... change vc/X into ttyX */
- if (driver->type == TTY_DRIVER_TYPE_CONSOLE)
- sprintf(name, "tty%d", MINOR(dev));
+ devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR,
+ "%s%d", driver->devfs_name, index + driver->name_base);
/* we don't care about the ptys */
- if (driver->type != TTY_DRIVER_TYPE_PTY)
- tty_add_class_device (name, dev, device);
+ /* how nice to hide this behind some crappy interface.. */
+ if (driver->type != TTY_DRIVER_TYPE_PTY) {
+ char name[64];
+ tty_line_name(driver, index, name);
+ tty_add_class_device(name, dev, device);
+ }
}
/**
@@ -2225,7 +2205,7 @@
*/
void tty_unregister_device(struct tty_driver *driver, unsigned index)
{
- tty_unregister_devfs(driver, index);
+ devfs_remove("%s%d", driver->devfs_name, index + driver->name_base);
tty_remove_class_device(MKDEV(driver->major, driver->minor_start) + index);
}
diff -urN linux-2.5.70-bk11/drivers/char/vme_scc.c linux-2.5.70-bk12/drivers/char/vme_scc.c
--- linux-2.5.70-bk11/drivers/char/vme_scc.c 2003-06-07 04:47:38.000000000 -0700
+++ linux-2.5.70-bk12/drivers/char/vme_scc.c 2003-06-07 04:47:44.000000000 -0700
@@ -131,11 +131,8 @@
scc_driver.magic = TTY_DRIVER_MAGIC;
scc_driver.owner = THIS_MODULE;
scc_driver.driver_name = "scc";
-#ifdef CONFIG_DEVFS_FS
- scc_driver.name = "tts/";
-#else
scc_driver.name = "ttyS";
-#endif
+ scc_driver.devfs_name = "tts/";
scc_driver.major = TTY_MAJOR;
scc_driver.minor_start = SCC_MINOR_BASE;
scc_driver.num = 2;
diff -urN linux-2.5.70-bk11/drivers/char/vt.c linux-2.5.70-bk12/drivers/char/vt.c
--- linux-2.5.70-bk11/drivers/char/vt.c 2003-05-26 18:00:56.000000000 -0700
+++ linux-2.5.70-bk12/drivers/char/vt.c 2003-06-07 04:47:44.000000000 -0700
@@ -163,6 +163,12 @@
static int printable; /* Is console ready for printing? */
+/*
+ * ignore_poke: don't unblank the screen when things are typed. This is
+ * mainly for the privacy of braille terminal users.
+ */
+static int ignore_poke;
+
int do_poke_blanked_console;
int console_blanked;
@@ -1314,7 +1320,7 @@
case 14: /* set vesa powerdown interval */
vesa_off_interval = ((par[1] < 60) ? par[1] : 60) * 60 * HZ;
break;
- case 15: /* Activate the previous console */
+ case 15: /* activate the previous console */
set_console(last_console);
break;
}
@@ -2282,6 +2288,13 @@
ret = 0;
}
break;
+ case 14: /* blank screen until explicitly unblanked, not only poked */
+ ignore_poke = 1;
+ do_blank_screen(0);
+ break;
+ case 15: /* which console is blanked ? */
+ ret = console_blanked;
+ break;
default:
ret = -EINVAL;
break;
@@ -2518,7 +2531,8 @@
memset(&console_driver, 0, sizeof(struct tty_driver));
console_driver.magic = TTY_DRIVER_MAGIC;
console_driver.owner = THIS_MODULE;
- console_driver.name = "vc/";
+ console_driver.devfs_name = "vc/";
+ console_driver.name = "tty";
console_driver.name_base = 1;
console_driver.major = TTY_MAJOR;
console_driver.minor_start = 1;
@@ -2712,14 +2726,7 @@
hide_cursor(currcons);
if (!from_timer_handler)
del_timer_sync(&console_timer);
- if (vesa_off_interval) {
- console_timer.function = vesa_powerdown_screen;
- mod_timer(&console_timer, jiffies + vesa_off_interval);
- } else {
- if (!from_timer_handler)
- del_timer_sync(&console_timer);
- console_timer.function = unblank_screen_t;
- }
+ console_timer.function = unblank_screen_t;
save_screen(currcons);
/* In case we need to reset origin, blanking hook returns 1 */
@@ -2730,6 +2737,12 @@
if (console_blank_hook && console_blank_hook(1))
return;
+
+ if (vesa_off_interval) {
+ console_timer.function = vesa_powerdown_screen;
+ mod_timer(&console_timer, jiffies + vesa_off_interval);
+ }
+
if (vesa_blank_mode)
sw->con_blank(vc_cons[currcons].d, vesa_blank_mode + 1);
}
@@ -2754,6 +2767,7 @@
{
int currcons;
+ ignore_poke = 0;
if (!console_blanked)
return;
if (!vc_cons_allocated(fg_console)) {
@@ -2771,12 +2785,12 @@
}
console_blanked = 0;
- if (console_blank_hook)
- console_blank_hook(0);
- set_palette(currcons);
if (sw->con_blank(vc_cons[currcons].d, 0))
/* Low-level driver cannot restore -> do it ourselves */
update_screen(fg_console);
+ if (console_blank_hook)
+ console_blank_hook(0);
+ set_palette(currcons);
set_cursor(fg_console);
}
@@ -2791,7 +2805,7 @@
void poke_blanked_console(void)
{
del_timer(&console_timer);
- if (!vt_cons[fg_console] || vt_cons[fg_console]->vc_mode == KD_GRAPHICS)
+ if (ignore_poke || !vt_cons[fg_console] || vt_cons[fg_console]->vc_mode == KD_GRAPHICS)
return;
if (console_blanked) {
console_timer.function = unblank_screen_t;
diff -urN linux-2.5.70-bk11/drivers/isdn/i4l/isdn_tty.c linux-2.5.70-bk12/drivers/isdn/i4l/isdn_tty.c
--- linux-2.5.70-bk11/drivers/isdn/i4l/isdn_tty.c 2003-06-07 04:47:38.000000000 -0700
+++ linux-2.5.70-bk12/drivers/isdn/i4l/isdn_tty.c 2003-06-07 04:47:45.000000000 -0700
@@ -62,12 +62,6 @@
#define MODEM_PARANOIA_CHECK
#define MODEM_DO_RESTART
-#ifdef CONFIG_DEVFS_FS
-static char *isdn_ttyname_ttyI = "isdn/ttyI%d";
-#else
-static char *isdn_ttyname_ttyI = "ttyI";
-#endif
-
struct isdn_modem isdn_mdm;
static int bit2si[8] =
@@ -2013,7 +2007,8 @@
m = &isdn_mdm;
memset(&m->tty_modem, 0, sizeof(struct tty_driver));
m->tty_modem.magic = TTY_DRIVER_MAGIC;
- m->tty_modem.name = isdn_ttyname_ttyI;
+ m->tty_modem.name = "ttyI";
+ m->tty_modem.devfs_name = "isdn/ttyI";
m->tty_modem.major = ISDN_TTY_MAJOR;
m->tty_modem.minor_start = 0;
m->tty_modem.num = ISDN_MAX_CHANNELS;
diff -urN linux-2.5.70-bk11/drivers/macintosh/macserial.c linux-2.5.70-bk12/drivers/macintosh/macserial.c
--- linux-2.5.70-bk11/drivers/macintosh/macserial.c 2003-06-07 04:47:39.000000000 -0700
+++ linux-2.5.70-bk12/drivers/macintosh/macserial.c 2003-06-07 04:47:45.000000000 -0700
@@ -2568,11 +2568,8 @@
serial_driver.magic = TTY_DRIVER_MAGIC;
serial_driver.owner = THIS_MODULE;
serial_driver.driver_name = "macserial";
-#ifdef CONFIG_DEVFS_FS
- serial_driver.name = "tts/";
-#else
+ serial_driver.devfs_name = "tts/";
serial_driver.name = "ttyS";
-#endif /* CONFIG_DEVFS_FS */
serial_driver.major = TTY_MAJOR;
serial_driver.minor_start = 64;
serial_driver.num = zs_channels_found;
diff -urN linux-2.5.70-bk11/drivers/net/Makefile linux-2.5.70-bk12/drivers/net/Makefile
--- linux-2.5.70-bk11/drivers/net/Makefile 2003-06-07 04:47:39.000000000 -0700
+++ linux-2.5.70-bk12/drivers/net/Makefile 2003-06-07 04:47:45.000000000 -0700
@@ -65,7 +65,7 @@
obj-$(CONFIG_WINBOND_840) += mii.o
obj-$(CONFIG_SUNDANCE) += sundance.o mii.o
obj-$(CONFIG_HAMACHI) += hamachi.o mii.o
-obj-$(CONFIG_NET) += Space.o setup.o net_init.o loopback.o
+obj-$(CONFIG_NET) += Space.o net_init.o loopback.o
obj-$(CONFIG_SEEQ8005) += seeq8005.o
obj-$(CONFIG_ETHERTAP) += ethertap.o
obj-$(CONFIG_NET_SB1000) += sb1000.o
diff -urN linux-2.5.70-bk11/drivers/net/pppoe.c linux-2.5.70-bk12/drivers/net/pppoe.c
--- linux-2.5.70-bk11/drivers/net/pppoe.c 2003-06-07 04:47:39.000000000 -0700
+++ linux-2.5.70-bk12/drivers/net/pppoe.c 2003-06-07 04:47:45.000000000 -0700
@@ -277,11 +277,12 @@
lock_sock(sk);
- if (sk->state & (PPPOX_CONNECTED|PPPOX_BOUND)){
+ if (sk->sk_state &
+ (PPPOX_CONNECTED | PPPOX_BOUND)) {
pppox_unbind_sock(sk);
dev_put(dev);
- sk->state = PPPOX_ZOMBIE;
- sk->state_change(sk);
+ sk->sk_state = PPPOX_ZOMBIE;
+ sk->sk_state_change(sk);
}
release_sock(sk);
@@ -347,16 +348,16 @@
struct pppox_opt *po = pppox_sk(sk);
struct pppox_opt *relay_po = NULL;
- if (sk->state & PPPOX_BOUND) {
+ if (sk->sk_state & PPPOX_BOUND) {
skb_pull(skb, sizeof(struct pppoe_hdr));
ppp_input(&po->chan, skb);
- } else if (sk->state & PPPOX_RELAY) {
+ } else if (sk->sk_state & PPPOX_RELAY) {
relay_po = get_item_by_addr(&po->pppoe_relay);
if (relay_po == NULL)
goto abort_kfree;
- if ((relay_po->sk->state & PPPOX_CONNECTED) == 0)
+ if ((relay_po->sk->sk_state & PPPOX_CONNECTED) == 0)
goto abort_put;
skb_pull(skb, sizeof(struct pppoe_hdr));
@@ -447,7 +448,7 @@
/* We're no longer connect at the PPPOE layer,
* and must wait for ppp channel to disconnect us.
*/
- sk->state = PPPOX_ZOMBIE;
+ sk->sk_state = PPPOX_ZOMBIE;
}
bh_unlock_sock(sk);
@@ -503,12 +504,12 @@
sock->state = SS_UNCONNECTED;
sock->ops = &pppoe_ops;
- sk->backlog_rcv = pppoe_rcv_core;
- sk->state = PPPOX_NONE;
- sk->type = SOCK_STREAM;
- sk->family = PF_PPPOX;
- sk->protocol = PX_PROTO_OE;
- sk->destruct = pppoe_sk_free;
+ sk->sk_backlog_rcv = pppoe_rcv_core;
+ sk->sk_state = PPPOX_NONE;
+ sk->sk_type = SOCK_STREAM;
+ sk->sk_family = PF_PPPOX;
+ sk->sk_protocol = PX_PROTO_OE;
+ sk->sk_destruct = pppoe_sk_free;
po = pppox_sk(sk) = kmalloc(sizeof(*po), GFP_KERNEL);
if (!po)
@@ -536,7 +537,7 @@
pppox_unbind_sock(sk);
/* Signal the death of the socket. */
- sk->state = PPPOX_DEAD;
+ sk->sk_state = PPPOX_DEAD;
po = pppox_sk(sk);
if (po->pppoe_pa.sid) {
@@ -551,7 +552,7 @@
sock_orphan(sk);
sock->sk = NULL;
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
sock_put(sk);
return error;
@@ -575,12 +576,12 @@
/* Check for already bound sockets */
error = -EBUSY;
- if ((sk->state & PPPOX_CONNECTED) && sp->sa_addr.pppoe.sid)
+ if ((sk->sk_state & PPPOX_CONNECTED) && sp->sa_addr.pppoe.sid)
goto end;
/* Check for already disconnected sockets, on attempts to disconnect */
error = -EALREADY;
- if((sk->state & PPPOX_DEAD) && !sp->sa_addr.pppoe.sid )
+ if ((sk->sk_state & PPPOX_DEAD) && !sp->sa_addr.pppoe.sid )
goto end;
error = 0;
@@ -596,7 +597,7 @@
memset(po, 0, sizeof(struct pppox_opt));
po->sk = sk;
- sk->state = PPPOX_NONE;
+ sk->sk_state = PPPOX_NONE;
}
/* Don't re-bind if sid==0 */
@@ -630,7 +631,7 @@
if (error)
goto err_put;
- sk->state = PPPOX_CONNECTED;
+ sk->sk_state = PPPOX_CONNECTED;
}
po->num = sp->sa_addr.pppoe.sid;
@@ -678,7 +679,7 @@
case PPPIOCGMRU:
err = -ENXIO;
- if (!(sk->state & PPPOX_CONNECTED))
+ if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
@@ -692,7 +693,7 @@
case PPPIOCSMRU:
err = -ENXIO;
- if (!(sk->state & PPPOX_CONNECTED))
+ if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
@@ -719,11 +720,11 @@
struct pppox_opt *relay_po;
err = -EBUSY;
- if (sk->state & (PPPOX_BOUND|PPPOX_ZOMBIE|PPPOX_DEAD))
+ if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE | PPPOX_DEAD))
break;
err = -ENOTCONN;
- if (!(sk->state & PPPOX_CONNECTED))
+ if (!(sk->sk_state & PPPOX_CONNECTED))
break;
/* PPPoE address from the user specifies an outbound
@@ -747,17 +748,17 @@
break;
sock_put(relay_po->sk);
- sk->state |= PPPOX_RELAY;
+ sk->sk_state |= PPPOX_RELAY;
err = 0;
break;
}
case PPPOEIOCDFWD:
err = -EALREADY;
- if (!(sk->state & PPPOX_RELAY))
+ if (!(sk->sk_state & PPPOX_RELAY))
break;
- sk->state &= ~PPPOX_RELAY;
+ sk->sk_state &= ~PPPOX_RELAY;
err = 0;
break;
@@ -780,7 +781,7 @@
struct net_device *dev;
char *start;
- if (sock_flag(sk, SOCK_DEAD) || !(sk->state & PPPOX_CONNECTED)) {
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
error = -ENOTCONN;
goto end;
}
@@ -812,7 +813,7 @@
skb->dev = dev;
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
skb->protocol = __constant_htons(ETH_P_PPP_SES);
ph = (struct pppoe_hdr *) skb_put(skb, total_len + sizeof(struct pppoe_hdr));
@@ -856,7 +857,7 @@
int data_len = skb->len;
struct sk_buff *skb2;
- if (sock_flag(sk, SOCK_DEAD) || !(sk->state & PPPOX_CONNECTED))
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
goto abort;
hdr.ver = 1;
@@ -938,7 +939,7 @@
int len;
struct pppoe_hdr *ph = NULL;
- if (sk->state & PPPOX_BOUND) {
+ if (sk->sk_state & PPPOX_BOUND) {
error = -EIO;
goto end;
}
diff -urN linux-2.5.70-bk11/drivers/net/pppox.c linux-2.5.70-bk12/drivers/net/pppox.c
--- linux-2.5.70-bk11/drivers/net/pppox.c 2003-05-26 18:00:57.000000000 -0700
+++ linux-2.5.70-bk12/drivers/net/pppox.c 2003-06-07 04:47:45.000000000 -0700
@@ -58,9 +58,9 @@
{
/* Clear connection to ppp device, if attached. */
- if (sk->state & (PPPOX_BOUND|PPPOX_ZOMBIE)) {
+ if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE)) {
ppp_unregister_channel(&pppox_sk(sk)->chan);
- sk->state = PPPOX_DEAD;
+ sk->sk_state = PPPOX_DEAD;
}
}
@@ -81,7 +81,7 @@
case PPPIOCGCHAN: {
int index;
rc = -ENOTCONN;
- if (!(sk->state & PPPOX_CONNECTED))
+ if (!(sk->sk_state & PPPOX_CONNECTED))
break;
rc = -EINVAL;
@@ -90,12 +90,13 @@
break;
rc = 0;
- sk->state |= PPPOX_BOUND;
+ sk->sk_state |= PPPOX_BOUND;
break;
}
default:
- if (pppox_protos[sk->protocol]->ioctl)
- rc = pppox_protos[sk->protocol]->ioctl(sock, cmd, arg);
+ if (pppox_protos[sk->sk_protocol]->ioctl)
+ rc = pppox_protos[sk->sk_protocol]->ioctl(sock, cmd,
+ arg);
break;
};
diff -urN linux-2.5.70-bk11/drivers/net/setup.c linux-2.5.70-bk12/drivers/net/setup.c
--- linux-2.5.70-bk11/drivers/net/setup.c 2003-06-07 04:47:39.000000000 -0700
+++ linux-2.5.70-bk12/drivers/net/setup.c 1969-12-31 16:00:00.000000000 -0800
@@ -1,54 +0,0 @@
-
-/*
- * New style setup code for the network devices
- */
-
-#include
-#include
-#include
-#include
-#include
-
-extern int scc_enet_init(void);
-extern int fec_enet_init(void);
-
-/*
- * Devices in this list must do new style probing. That is they must
- * allocate their own device objects and do their own bus scans.
- */
-
-struct net_probe
-{
- int (*probe)(void);
- int status; /* non-zero if autoprobe has failed */
-};
-
-static struct net_probe pci_probes[] __initdata = {
- /*
- * Early setup devices
- */
-#if defined(CONFIG_SCC_ENET)
- {scc_enet_init, 0},
-#endif
-#if defined(CONFIG_FEC_ENET)
- {fec_enet_init, 0},
-#endif
- {NULL, 0},
-};
-
-
-/*
- * Run the updated device probes. These do not need a device passed
- * into them.
- */
-
-void __init net_device_init(void)
-{
- struct net_probe *p = pci_probes;
-
- while (p->probe != NULL)
- {
- p->status = p->probe();
- p++;
- }
-}
diff -urN linux-2.5.70-bk11/drivers/s390/net/ctctty.c linux-2.5.70-bk12/drivers/s390/net/ctctty.c
--- linux-2.5.70-bk11/drivers/s390/net/ctctty.c 2003-06-07 04:47:39.000000000 -0700
+++ linux-2.5.70-bk12/drivers/s390/net/ctctty.c 2003-06-07 04:47:45.000000000 -0700
@@ -28,9 +28,7 @@
#include
#include
#include
-#ifdef CONFIG_DEVFS_FS
-# include
-#endif
+#include
#include "ctctty.h"
#define CTC_TTY_MAJOR 43
@@ -89,12 +87,6 @@
#define CTC_TTY_NAME "ctctty"
-#ifdef CONFIG_DEVFS_FS
-static char *ctc_ttyname = "ctc/" CTC_TTY_NAME "%d";
-#else
-static char *ctc_ttyname = CTC_TTY_NAME;
-#endif
-
static __u32 ctc_tty_magic = CTC_ASYNC_MAGIC;
static int ctc_tty_shuttingdown = 0;
@@ -1170,7 +1162,8 @@
device = &driver->ctc_tty_device;
device->magic = TTY_DRIVER_MAGIC;
- device->name = ctc_ttyname;
+ device->devfs_name = "ctc/" CTC_TTY_NAME;
+ device->name = CTC_TTY_NAME;
device->major = CTC_TTY_MAJOR;
device->minor_start = 0;
device->num = CTC_TTY_MAX_DEVICES;
diff -urN linux-2.5.70-bk11/drivers/serial/8250.c linux-2.5.70-bk12/drivers/serial/8250.c
--- linux-2.5.70-bk11/drivers/serial/8250.c 2003-05-26 18:00:21.000000000 -0700
+++ linux-2.5.70-bk12/drivers/serial/8250.c 2003-06-07 04:47:46.000000000 -0700
@@ -1999,11 +1999,8 @@
static struct uart_driver serial8250_reg = {
.owner = THIS_MODULE,
.driver_name = "serial",
-#ifdef CONFIG_DEVFS_FS
- .dev_name = "tts/",
-#else
+ .devfs_name = "tts/",
.dev_name = "ttyS",
-#endif
.major = TTY_MAJOR,
.minor = 64,
.nr = UART_NR,
diff -urN linux-2.5.70-bk11/drivers/serial/core.c linux-2.5.70-bk12/drivers/serial/core.c
--- linux-2.5.70-bk11/drivers/serial/core.c 2003-05-26 18:00:26.000000000 -0700
+++ linux-2.5.70-bk12/drivers/serial/core.c 2003-06-07 04:47:46.000000000 -0700
@@ -2115,6 +2115,7 @@
normal->magic = TTY_DRIVER_MAGIC;
normal->owner = drv->owner;
normal->driver_name = drv->driver_name;
+ normal->devfs_name = drv->devfs_name;
normal->name = drv->dev_name;
normal->major = drv->major;
normal->minor_start = drv->minor;
diff -urN linux-2.5.70-bk11/drivers/serial/nb85e_uart.c linux-2.5.70-bk12/drivers/serial/nb85e_uart.c
--- linux-2.5.70-bk11/drivers/serial/nb85e_uart.c 2003-05-26 18:00:56.000000000 -0700
+++ linux-2.5.70-bk12/drivers/serial/nb85e_uart.c 2003-06-07 04:47:46.000000000 -0700
@@ -524,11 +524,8 @@
static struct uart_driver nb85e_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "nb85e_uart",
-#ifdef CONFIG_DEVFS_FS
- .dev_name = "tts/",
-#else
+ .devfs_name = "tts/",
.dev_name = "ttyS",
-#endif
.major = TTY_MAJOR,
.minor = NB85E_UART_MINOR_BASE,
.nr = NB85E_UART_NUM_CHANNELS,
diff -urN linux-2.5.70-bk11/drivers/serial/sunsab.c linux-2.5.70-bk12/drivers/serial/sunsab.c
--- linux-2.5.70-bk11/drivers/serial/sunsab.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/drivers/serial/sunsab.c 2003-06-07 04:47:46.000000000 -0700
@@ -830,11 +830,8 @@
static struct uart_driver sunsab_reg = {
.owner = THIS_MODULE,
.driver_name = "serial",
-#ifdef CONFIG_DEVFS_FS
- .dev_name = "tts/",
-#else
+ .devfs_name = "tts/",
.dev_name = "ttyS",
-#endif
.major = TTY_MAJOR,
};
diff -urN linux-2.5.70-bk11/drivers/serial/sunsu.c linux-2.5.70-bk12/drivers/serial/sunsu.c
--- linux-2.5.70-bk11/drivers/serial/sunsu.c 2003-05-26 18:00:25.000000000 -0700
+++ linux-2.5.70-bk12/drivers/serial/sunsu.c 2003-06-07 04:47:46.000000000 -0700
@@ -1285,11 +1285,8 @@
static struct uart_driver sunsu_reg = {
.owner = THIS_MODULE,
.driver_name = "serial",
-#ifdef CONFIG_DEVFS_FS
- .dev_name = "tts/",
-#else
+ .devfs_name = "tts/",
.dev_name = "ttyS",
-#endif
.major = TTY_MAJOR,
};
diff -urN linux-2.5.70-bk11/drivers/serial/sunzilog.c linux-2.5.70-bk12/drivers/serial/sunzilog.c
--- linux-2.5.70-bk11/drivers/serial/sunzilog.c 2003-05-26 18:01:00.000000000 -0700
+++ linux-2.5.70-bk12/drivers/serial/sunzilog.c 2003-06-07 04:47:46.000000000 -0700
@@ -1030,11 +1030,8 @@
static struct uart_driver sunzilog_reg = {
.owner = THIS_MODULE,
.driver_name = "ttyS",
-#ifdef CONFIG_DEVFS_FS
- .dev_name = "tts/",
-#else
+ .devfs_name = "tts/",
.dev_name = "ttyS",
-#endif
.major = TTY_MAJOR,
};
diff -urN linux-2.5.70-bk11/drivers/tc/zs.c linux-2.5.70-bk12/drivers/tc/zs.c
--- linux-2.5.70-bk11/drivers/tc/zs.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/drivers/tc/zs.c 2003-06-07 04:47:46.000000000 -0700
@@ -1840,11 +1840,8 @@
memset(&serial_driver, 0, sizeof(struct tty_driver));
serial_driver.magic = TTY_DRIVER_MAGIC;
serial_driver.owner = THIS_MODULE;
-#if (LINUX_VERSION_CODE > 0x2032D && defined(CONFIG_DEVFS_FS))
- serial_driver.name = "tts/";
-#else
+ serial_driver.devfs_name = "tts/";
serial_driver.name = "ttyS";
-#endif
serial_driver.major = TTY_MAJOR;
serial_driver.minor_start = 64;
serial_driver.num = zs_channels_found;
diff -urN linux-2.5.70-bk11/drivers/usb/net/catc.c linux-2.5.70-bk12/drivers/usb/net/catc.c
--- linux-2.5.70-bk11/drivers/usb/net/catc.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/drivers/usb/net/catc.c 2003-06-07 04:47:47.000000000 -0700
@@ -795,7 +795,7 @@
memset(catc, 0, sizeof(struct catc));
- netdev = init_etherdev(0, 0);
+ netdev = alloc_etherdev(0);
if (!netdev) {
kfree(catc);
return -EIO;
@@ -933,6 +933,17 @@
for (i = 0; i < 5; i++) printk("%2.2x:", netdev->dev_addr[i]);
printk("%2.2x.\n", netdev->dev_addr[i]);
usb_set_intfdata(intf, catc);
+
+ if (register_netdev(netdev) != 0) {
+ usb_set_intfdata(intf, NULL);
+ usb_free_urb(catc->ctrl_urb);
+ usb_free_urb(catc->tx_urb);
+ usb_free_urb(catc->rx_urb);
+ usb_free_urb(catc->irq_urb);
+ kfree(netdev);
+ kfree(catc);
+ return -EIO;
+ }
return 0;
}
diff -urN linux-2.5.70-bk11/drivers/usb/net/cdc-ether.c linux-2.5.70-bk12/drivers/usb/net/cdc-ether.c
--- linux-2.5.70-bk11/drivers/usb/net/cdc-ether.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/drivers/usb/net/cdc-ether.c 2003-06-07 04:47:47.000000000 -0700
@@ -1216,7 +1216,7 @@
}
// Now we need to get a kernel Ethernet interface.
- net = init_etherdev( NULL, 0 );
+ net = alloc_etherdev(0);
if ( !net ) {
// Hmm... The kernel is not sharing today...
// Fine, we didn't want it anyway...
@@ -1263,6 +1263,11 @@
// TODO - last minute HACK
ether_dev->comm_ep_in = 5;
+ if (register_netdev(net) != 0) {
+ usb_put_dev(usb);
+ goto out;
+ }
+
/* FIXME!!! This driver needs to be fixed to work with the new USB interface logic
* this is not the correct thing to be doing here, we need to set the interface
* driver specific data field.
@@ -1270,6 +1275,13 @@
// Okay, we are finally done...
return 0;
+
+out:
+ usb_driver_release_interface( &CDCEther_driver,
+ &(usb->config[ether_dev->configuration_num].interface[ether_dev->comm_interface]) );
+
+ usb_driver_release_interface( &CDCEther_driver,
+ &(usb->config[ether_dev->configuration_num].interface[ether_dev->data_interface]) );
// bailing out with our tail between our knees
error_all:
usb_free_urb(ether_dev->tx_urb);
diff -urN linux-2.5.70-bk11/drivers/usb/net/kaweth.c linux-2.5.70-bk12/drivers/usb/net/kaweth.c
--- linux-2.5.70-bk11/drivers/usb/net/kaweth.c 2003-05-26 18:00:44.000000000 -0700
+++ linux-2.5.70-bk12/drivers/usb/net/kaweth.c 2003-06-07 04:47:47.000000000 -0700
@@ -1016,7 +1016,7 @@
kaweth_dbg("Initializing net device.");
- if(!(netdev = kmalloc(sizeof(struct net_device), GFP_KERNEL))) {
+ if (!(netdev = alloc_etherdev(0))) {
kfree(kaweth);
return -ENOMEM;
}
@@ -1054,18 +1054,21 @@
SET_MODULE_OWNER(netdev);
- if (!init_etherdev(netdev, 0)) {
+ usb_set_intfdata(intf, kaweth);
+
+ if (register_netdev(netdev) != 0) {
kaweth_err("Error calling init_etherdev.");
- goto err_tx_and_rx;
+ goto err_intfdata;
}
kaweth_info("kaweth interface created at %s", kaweth->net->name);
kaweth_dbg("Kaweth probe returning.");
- usb_set_intfdata(intf, kaweth);
return 0;
+err_intfdata:
+ usb_set_intfdata(intf, NULL);
err_tx_and_rx:
usb_free_urb(kaweth->rx_urb);
err_only_tx:
@@ -1113,6 +1116,7 @@
kaweth_dbg("Unregistering net device");
unregister_netdev(kaweth->net);
+ kfree(kaweth->net);
}
usb_free_urb(kaweth->rx_urb);
diff -urN linux-2.5.70-bk11/drivers/usb/net/pegasus.c linux-2.5.70-bk12/drivers/usb/net/pegasus.c
--- linux-2.5.70-bk11/drivers/usb/net/pegasus.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/drivers/usb/net/pegasus.c 2003-06-07 04:47:47.000000000 -0700
@@ -865,8 +865,6 @@
if (!pegasus->rx_skb)
return -ENOMEM;
- down(&pegasus->sem);
-
set_registers(pegasus, EthID, 6, net->dev_addr);
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
@@ -894,8 +892,6 @@
set_carrier(net);
res = 0;
exit:
- up(&pegasus->sem);
-
return res;
}
@@ -903,13 +899,11 @@
{
pegasus_t *pegasus = net->priv;
- down(&pegasus->sem);
pegasus->flags &= ~PEGASUS_RUNNING;
netif_stop_queue(net);
if (!(pegasus->flags & PEGASUS_UNPLUG))
disable_net_traffic(pegasus);
unlink_all_urbs(pegasus);
- up(&pegasus->sem);
return 0;
}
@@ -1068,7 +1062,6 @@
pegasus_t *pegasus = net->priv;
int res;
- down(&pegasus->sem);
switch (cmd) {
case SIOCETHTOOL:
res = pegasus_ethtool_ioctl(net, rq->ifr_data);
@@ -1080,17 +1073,14 @@
res = 0;
break;
case SIOCDEVPRIVATE + 2:
- if (!capable(CAP_NET_ADMIN)) {
- up(&pegasus->sem);
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
- }
write_mii_word(pegasus, pegasus->phy, data[1] & 0x1f, data[2]);
res = 0;
break;
default:
res = -EOPNOTSUPP;
}
- up(&pegasus->sem);
return res;
}
@@ -1170,33 +1160,27 @@
struct net_device *net;
pegasus_t *pegasus;
int dev_index = id - pegasus_ids;
+ int res = -ENOMEM;
+ usb_get_dev(dev);
if (!(pegasus = kmalloc(sizeof (struct pegasus), GFP_KERNEL))) {
err("out of memory allocating device structure");
- return -ENOMEM;
+ goto out;
}
- usb_get_dev(dev);
memset(pegasus, 0, sizeof (struct pegasus));
pegasus->dev_index = dev_index;
init_waitqueue_head(&pegasus->ctrl_wait);
- if (!alloc_urbs(pegasus)) {
- kfree(pegasus);
- return -ENOMEM;
- }
+ if (!alloc_urbs(pegasus))
+ goto out1;
- net = init_etherdev(NULL, 0);
- if (!net) {
- free_all_urbs(pegasus);
- kfree(pegasus);
- return -ENODEV;
- }
+ net = alloc_etherdev(0);
+ if (!net)
+ goto out2;
- init_MUTEX(&pegasus->sem);
tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);
- down(&pegasus->sem);
pegasus->usb = dev;
pegasus->net = net;
SET_MODULE_OWNER(net);
@@ -1221,12 +1205,8 @@
get_interrupt_interval(pegasus);
if (reset_mac(pegasus)) {
err("can't reset MAC");
- unregister_netdev(pegasus->net);
- free_all_urbs(pegasus);
- kfree(pegasus->net);
- kfree(pegasus);
- pegasus = NULL;
- goto exit;
+ res = -EIO;
+ goto out3;
}
set_ethernet_addr(pegasus);
fill_skb_pool(pegasus);
@@ -1240,13 +1220,24 @@
warn("can't locate MII phy, using default");
pegasus->phy = 1;
}
-exit:
- up(&pegasus->sem);
- if (pegasus) {
- usb_set_intfdata(intf, pegasus);
- return 0;
- }
- return -EIO;
+ usb_set_intfdata(intf, pegasus);
+ res = register_netdev(net);
+ if (res)
+ goto out4;
+ return 0;
+
+out4:
+ usb_set_intfdata(intf, NULL);
+ free_skb_pool(pegasus);
+out3:
+ kfree(net);
+out2:
+ free_all_urbs(pegasus);
+out1:
+ kfree(pegasus);
+out:
+ usb_put_dev(dev);
+ return res;
}
static void pegasus_disconnect(struct usb_interface *intf)
@@ -1269,7 +1260,6 @@
dev_kfree_skb(pegasus->rx_skb);
kfree(pegasus->net);
kfree(pegasus);
- pegasus = NULL;
}
static struct usb_driver pegasus_driver = {
diff -urN linux-2.5.70-bk11/drivers/usb/net/pegasus.h linux-2.5.70-bk12/drivers/usb/net/pegasus.h
--- linux-2.5.70-bk11/drivers/usb/net/pegasus.h 2003-05-26 18:01:00.000000000 -0700
+++ linux-2.5.70-bk12/drivers/usb/net/pegasus.h 2003-06-07 04:47:47.000000000 -0700
@@ -98,7 +98,6 @@
struct sk_buff *rx_skb;
struct usb_ctrlrequest dr;
wait_queue_head_t ctrl_wait;
- struct semaphore sem;
spinlock_t rx_pool_lock;
int chip;
unsigned char intr_buff[8];
diff -urN linux-2.5.70-bk11/drivers/usb/net/rtl8150.c linux-2.5.70-bk12/drivers/usb/net/rtl8150.c
--- linux-2.5.70-bk11/drivers/usb/net/rtl8150.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/drivers/usb/net/rtl8150.c 2003-06-07 04:47:47.000000000 -0700
@@ -88,7 +88,6 @@
struct rtl8150 {
unsigned long flags;
struct usb_device *udev;
- struct semaphore sem;
struct tasklet_struct tl;
struct net_device_stats stats;
struct net_device *netdev;
@@ -638,8 +637,6 @@
if (!dev->rx_skb)
return -ENOMEM;
- down(&dev->sem);
-
set_registers(dev, IDR, 6, netdev->dev_addr);
usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
@@ -653,7 +650,6 @@
warn("%s: intr_urb submit failed: %d", __FUNCTION__, res);
netif_start_queue(netdev);
enable_net_traffic(dev);
- up(&dev->sem);
return res;
}
@@ -667,12 +663,10 @@
if (!dev)
return -ENODEV;
- down(&dev->sem);
netif_stop_queue(netdev);
if (!test_bit(RTL8150_UNPLUG, &dev->flags))
disable_net_traffic(dev);
unlink_all_urbs(dev);
- up(&dev->sem);
return res;
}
@@ -760,7 +754,6 @@
data = (u16 *) & rq->ifr_data;
res = 0;
- down(&dev->sem);
switch (cmd) {
case SIOCETHTOOL:
res = rtl8150_ethtool_ioctl(netdev, rq->ifr_data);
@@ -771,16 +764,13 @@
read_mii_word(dev, dev->phy, (data[1] & 0x1f), &data[3]);
break;
case SIOCDEVPRIVATE + 2:
- if (!capable(CAP_NET_ADMIN)) {
- up(&dev->sem);
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
- }
write_mii_word(dev, dev->phy, (data[1] & 0x1f), data[2]);
break;
default:
res = -EOPNOTSUPP;
}
- up(&dev->sem);
return res;
}
@@ -798,18 +788,16 @@
} else
memset(dev, 0, sizeof(rtl8150_t));
- netdev = init_etherdev(NULL, 0);
+ netdev = alloc_etherdev(0);
if (!netdev) {
kfree(dev);
err("Oh boy, out of memory again?!?");
return -ENOMEM;
}
- init_MUTEX(&dev->sem);
tasklet_init(&dev->tl, rx_fixup, (unsigned long)dev);
spin_lock_init(&dev->rx_pool_lock);
- down(&dev->sem);
dev->udev = udev;
dev->netdev = netdev;
SET_MODULE_OWNER(netdev);
@@ -828,23 +816,30 @@
if (!alloc_all_urbs(dev)) {
err("out of memory");
- goto err;
+ goto out;
}
if (!rtl8150_reset(dev)) {
err("couldn't reset the device");
- free_all_urbs(dev);
- goto err;
+ goto out1;
}
fill_skb_pool(dev);
set_ethernet_addr(dev);
info("%s: rtl8150 is detected", netdev->name);
- up(&dev->sem);
usb_set_intfdata(intf, dev);
+
+ if (register_netdev(netdev) != 0) {
+ err("couldn't register the device");
+ goto out2;
+ }
return 0;
-err:
- unregister_netdev(dev->netdev);
- up(&dev->sem);
+
+out2:
+ usb_set_intfdata(intf, NULL);
+ free_skb_pool(dev);
+out1:
+ free_all_urbs(dev);
+out:
kfree(netdev);
kfree(dev);
return -EIO;
@@ -865,8 +860,6 @@
dev_kfree_skb(dev->rx_skb);
kfree(dev->netdev);
kfree(dev);
- dev->netdev = NULL;
- dev = NULL;
}
}
diff -urN linux-2.5.70-bk11/drivers/usb/net/usbnet.c linux-2.5.70-bk12/drivers/usb/net/usbnet.c
--- linux-2.5.70-bk11/drivers/usb/net/usbnet.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/drivers/usb/net/usbnet.c 2003-06-07 04:47:47.000000000 -0700
@@ -205,7 +205,6 @@
// housekeeping
struct usb_device *udev;
struct driver_info *driver_info;
- struct semaphore mutex;
wait_queue_head_t *wait;
// i/o info: pipes etc
@@ -214,7 +213,7 @@
struct timer_list delay;
// protocol/interface state
- struct net_device net;
+ struct net_device *net;
struct net_device_stats stats;
int msg_level;
unsigned long data [5];
@@ -295,27 +294,24 @@
MODULE_PARM_DESC (msg_level, "Initial message level (default = 1)");
-#define mutex_lock(x) down(x)
-#define mutex_unlock(x) up(x)
-
#define RUN_CONTEXT (in_irq () ? "in_irq" \
: (in_interrupt () ? "in_interrupt" : "can sleep"))
#ifdef DEBUG
#define devdbg(usbnet, fmt, arg...) \
- printk(KERN_DEBUG "%s: " fmt "\n" , (usbnet)->net.name , ## arg)
+ printk(KERN_DEBUG "%s: " fmt "\n" , (usbnet)->net->name , ## arg)
#else
#define devdbg(usbnet, fmt, arg...) do {} while(0)
#endif
#define deverr(usbnet, fmt, arg...) \
- printk(KERN_ERR "%s: " fmt "\n" , (usbnet)->net.name , ## arg)
+ printk(KERN_ERR "%s: " fmt "\n" , (usbnet)->net->name , ## arg)
#define devwarn(usbnet, fmt, arg...) \
- printk(KERN_WARNING "%s: " fmt "\n" , (usbnet)->net.name , ## arg)
+ printk(KERN_WARNING "%s: " fmt "\n" , (usbnet)->net->name , ## arg)
#define devinfo(usbnet, fmt, arg...) \
do { if ((usbnet)->msg_level >= 1) \
- printk(KERN_INFO "%s: " fmt "\n" , (usbnet)->net.name , ## arg); \
+ printk(KERN_INFO "%s: " fmt "\n" , (usbnet)->net->name , ## arg); \
} while (0)
/*-------------------------------------------------------------------------*/
@@ -505,7 +501,7 @@
else if (tmp != 12)
return -EINVAL;
for (i = tmp = 0; i < 6; i++, tmp += 2)
- dev->net.dev_addr [i] =
+ dev->net->dev_addr [i] =
(nibble (buf [tmp]) << 4) + nibble (buf [tmp + 1]);
return 0;
}
@@ -605,7 +601,7 @@
* in routine cases. info->ether describes the multicast support.
*/
- dev->net.mtu = cpu_to_le16p (&info->ether->wMaxSegmentSize)
+ dev->net->mtu = cpu_to_le16p (&info->ether->wMaxSegmentSize)
- ETH_HLEN;
return 0;
@@ -806,18 +802,18 @@
// detect whether another side is connected
if ((retval = gl_control_write (dev, GENELINK_CONNECT_WRITE, 0)) != 0) {
dbg ("%s: genelink_check_connect write fail - %X",
- dev->net.name, retval);
+ dev->net->name, retval);
return retval;
}
// usb interrupt read to ack another side
if ((retval = gl_interrupt_read (dev)) != 0) {
dbg ("%s: genelink_check_connect read fail - %X",
- dev->net.name, retval);
+ dev->net->name, retval);
return retval;
}
- dbg ("%s: genelink_check_connect read success", dev->net.name);
+ dbg ("%s: genelink_check_connect read success", dev->net->name);
return 0;
}
@@ -829,14 +825,14 @@
// allocate the private data structure
if ((priv = kmalloc (sizeof *priv, GFP_KERNEL)) == 0) {
dbg ("%s: cannot allocate private data per device",
- dev->net.name);
+ dev->net->name);
return -ENOMEM;
}
// allocate irq urb
if ((priv->irq_urb = usb_alloc_urb (0, GFP_KERNEL)) == 0) {
dbg ("%s: cannot allocate private irq urb per device",
- dev->net.name);
+ dev->net->name);
kfree (priv);
return -ENOMEM;
}
@@ -920,14 +916,11 @@
if (gl_skb) {
// copy the packet data to the new skb
- memcpy (gl_skb->data, packet->packet_data, size);
-
- // set skb data size
- gl_skb->len = size;
- gl_skb->dev = &dev->net;
+ memcpy(skb_put(gl_skb, size), packet->packet_data, size);
+ gl_skb->dev = dev->net;
// determine the packet's protocol ID
- gl_skb->protocol = eth_type_trans (gl_skb, &dev->net);
+ gl_skb->protocol = eth_type_trans (gl_skb, dev->net);
// update the status
dev->stats.rx_packets++;
@@ -1143,7 +1136,7 @@
return;
}
- dbg ("%s registers:", dev->net.name);
+ dbg ("%s registers:", dev->net->name);
for (reg = 0; reg < 0x20; reg++) {
int retval;
@@ -1156,10 +1149,10 @@
retval = nc_register_read (dev, reg, vp);
if (retval < 0)
dbg ("%s reg [0x%x] ==> error %d",
- dev->net.name, reg, retval);
+ dev->net->name, reg, retval);
else
dbg ("%s reg [0x%x] = 0x%x",
- dev->net.name, reg, *vp);
+ dev->net->name, reg, *vp);
}
kfree (vp);
}
@@ -1321,7 +1314,7 @@
nc_register_write (dev, REG_TTL,
MK_TTL (NC_READ_TTL_MS, TTL_OTHER (ttl)) );
- dbg ("%s: assigned TTL, %d ms", dev->net.name, NC_READ_TTL_MS);
+ dbg ("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS);
if (dev->msg_level >= 2)
devinfo (dev, "port %c, peer %sconnected",
@@ -1347,7 +1340,7 @@
status = *vp;
kfree (vp);
if (retval != 0) {
- dbg ("%s net1080_check_conn read - %d", dev->net.name, retval);
+ dbg ("%s net1080_check_conn read - %d", dev->net->name, retval);
return retval;
}
if ((status & STATUS_CONN_OTHER) != STATUS_CONN_OTHER)
@@ -1416,11 +1409,11 @@
if (!(skb->len & 0x01)
|| MIN_FRAMED > skb->len
- || skb->len > FRAMED_SIZE (dev->net.mtu)) {
+ || skb->len > FRAMED_SIZE (dev->net->mtu)) {
dev->stats.rx_frame_errors++;
dbg ("rx framesize %d range %d..%d mtu %d", skb->len,
- (int)MIN_FRAMED, (int)FRAMED_SIZE (dev->net.mtu),
- dev->net.mtu);
+ (int)MIN_FRAMED, (int)FRAMED_SIZE (dev->net->mtu),
+ dev->net->mtu);
nc_ensure_sync (dev);
return 0;
}
@@ -1795,7 +1788,7 @@
#ifdef CONFIG_USB_NET1080
if (dev->driver_info->flags & FLAG_FRAMING_NC)
- size = FRAMED_SIZE (dev->net.mtu);
+ size = FRAMED_SIZE (dev->net->mtu);
else
#endif
#ifdef CONFIG_USB_GENESYS
@@ -1805,10 +1798,10 @@
#endif
#ifdef CONFIG_USB_ZAURUS
if (dev->driver_info->flags & FLAG_FRAMING_Z)
- size = 6 + (sizeof (struct ethhdr) + dev->net.mtu);
+ size = 6 + (sizeof (struct ethhdr) + dev->net->mtu);
else
#endif
- size = (sizeof (struct ethhdr) + dev->net.mtu);
+ size = (sizeof (struct ethhdr) + dev->net->mtu);
if ((skb = alloc_skb (size, flags)) == 0) {
devdbg (dev, "no rx skb");
@@ -1829,8 +1822,8 @@
spin_lock_irqsave (&dev->rxq.lock, lockflags);
- if (netif_running (&dev->net)
- && netif_device_present (&dev->net)
+ if (netif_running (dev->net)
+ && netif_device_present (dev->net)
&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
switch (retval = usb_submit_urb (urb, GFP_ATOMIC)){
case -EPIPE:
@@ -1841,7 +1834,7 @@
break;
case -ENODEV:
devdbg (dev, "device gone");
- netif_device_detach (&dev->net);
+ netif_device_detach (dev->net);
break;
default:
devdbg (dev, "rx submit, %d", retval);
@@ -1874,8 +1867,8 @@
if (skb->len) {
int status;
- skb->dev = &dev->net;
- skb->protocol = eth_type_trans (skb, &dev->net);
+ skb->dev = dev->net;
+ skb->protocol = eth_type_trans (skb, dev->net);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
@@ -1968,7 +1961,7 @@
defer_bh (dev, skb);
if (urb) {
- if (netif_running (&dev->net)
+ if (netif_running (dev->net)
&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
rx_submit (dev, urb, GFP_ATOMIC);
return;
@@ -2024,7 +2017,6 @@
DECLARE_WAIT_QUEUE_HEAD (unlink_wakeup);
DECLARE_WAITQUEUE (wait, current);
- mutex_lock (&dev->mutex);
netif_stop_queue (net);
if (dev->msg_level >= 2)
@@ -2054,7 +2046,6 @@
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
- mutex_unlock (&dev->mutex);
return 0;
}
@@ -2070,8 +2061,6 @@
int retval = 0;
struct driver_info *info = dev->driver_info;
- mutex_lock (&dev->mutex);
-
// put into "known safe" state
if (info->reset && (retval = info->reset (dev)) < 0) {
devinfo (dev, "open reset fail (%d) usbnet usb-%s-%s, %s",
@@ -2091,7 +2080,7 @@
if (dev->msg_level >= 2)
devinfo (dev, "open: enable queueing "
"(rx %d, tx %d) mtu %d %s framing",
- RX_QLEN (dev), TX_QLEN (dev), dev->net.mtu,
+ RX_QLEN (dev), TX_QLEN (dev), dev->net->mtu,
(info->flags & (FLAG_FRAMING_NC | FLAG_FRAMING_GL))
? ((info->flags & FLAG_FRAMING_NC)
? "NetChip"
@@ -2102,7 +2091,6 @@
// delay posting reads until we're fully open
tasklet_schedule (&dev->bh);
done:
- mutex_unlock (&dev->mutex);
return retval;
}
@@ -2201,7 +2189,7 @@
status);
else {
clear_bit (EVENT_TX_HALT, &dev->flags);
- netif_wake_queue (&dev->net);
+ netif_wake_queue (dev->net);
}
}
if (test_bit (EVENT_RX_HALT, &dev->flags)) {
@@ -2220,7 +2208,7 @@
if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
struct urb *urb = 0;
- if (netif_running (&dev->net))
+ if (netif_running (dev->net))
urb = usb_alloc_urb (0, GFP_KERNEL);
else
clear_bit (EVENT_RX_MEMORY, &dev->flags);
@@ -2265,7 +2253,7 @@
jiffies + THROTTLE_JIFFIES);
devdbg (dev, "tx throttle %d", urb->status);
}
- netif_stop_queue (&dev->net);
+ netif_stop_queue (dev->net);
break;
default:
devdbg (dev, "tx err %d", entry->urb->status);
@@ -2438,8 +2426,8 @@
}
// or are we maybe short a few urbs?
- } else if (netif_running (&dev->net)
- && netif_device_present (&dev->net)
+ } else if (netif_running (dev->net)
+ && netif_device_present (dev->net)
&& !timer_pending (&dev->delay)
&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
int temp = dev->rxq.qlen;
@@ -2461,7 +2449,7 @@
tasklet_schedule (&dev->bh);
}
if (dev->txq.qlen < TX_QLEN (dev))
- netif_wake_queue (&dev->net);
+ netif_wake_queue (dev->net);
}
}
@@ -2491,11 +2479,12 @@
xdev->bus->bus_name, xdev->devpath,
dev->driver_info->description);
- unregister_netdev (&dev->net);
+ unregister_netdev (dev->net);
if (dev->driver_info->unbind)
dev->driver_info->unbind (dev, intf);
+ kfree(dev->net);
kfree (dev);
usb_put_dev (xdev);
}
@@ -2523,15 +2512,17 @@
xdev = interface_to_usbdev (udev);
interface = &udev->altsetting [udev->act_altsetting];
+ usb_get_dev (xdev);
+
+ status = -ENOMEM;
+
// set up our own records
if (!(dev = kmalloc (sizeof *dev, GFP_KERNEL))) {
dbg ("can't kmalloc dev");
- return -ENOMEM;
+ goto out;
}
memset (dev, 0, sizeof *dev);
- init_MUTEX_LOCKED (&dev->mutex);
- usb_get_dev (xdev);
dev->udev = xdev;
dev->driver_info = info;
dev->msg_level = msg_level;
@@ -2546,16 +2537,16 @@
init_timer (&dev->delay);
// set up network interface records
- net = &dev->net;
+ net = alloc_etherdev(0);
+ if (!net)
+ goto out1;
+
SET_MODULE_OWNER (net);
+ dev->net = net;
net->priv = dev;
strcpy (net->name, "usb%d");
memcpy (net->dev_addr, node_id, sizeof node_id);
- // point-to-point link ... we always use Ethernet headers
- // supports win32 interop (some devices) and the bridge driver.
- ether_setup (net);
-
// possible with some EHCI controllers
if (dma_supported (&udev->dev, 0xffffffffffffffffULL))
net->features |= NETIF_F_HIGHDMA;
@@ -2592,26 +2583,37 @@
status = 0;
}
- if (status < 0) {
- kfree (dev);
- return status;
- }
+ if (status < 0)
+ goto out2;
+
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
- SET_NETDEV_DEV(&dev->net, &dev->udev->dev);
- register_netdev (&dev->net);
+ SET_NETDEV_DEV(dev->net, &dev->udev->dev);
+ status = register_netdev (dev->net);
+ if (status)
+ goto out3;
devinfo (dev, "register usbnet at usb-%s-%s, %s",
xdev->bus->bus_name, xdev->devpath,
dev->driver_info->description);
// ok, it's ready to go.
usb_set_intfdata (udev, dev);
- mutex_unlock (&dev->mutex);
// start as if the link is up
- netif_device_attach (&dev->net);
+ netif_device_attach (dev->net);
return 0;
+
+out3:
+ if (info->unbind)
+ info->unbind (dev, udev);
+out2:
+ kfree(net);
+out1:
+ kfree(dev);
+out:
+ usb_put_dev(xdev);
+ return status;
}
diff -urN linux-2.5.70-bk11/drivers/usb/serial/usb-serial.c linux-2.5.70-bk12/drivers/usb/serial/usb-serial.c
--- linux-2.5.70-bk11/drivers/usb/serial/usb-serial.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/drivers/usb/serial/usb-serial.c 2003-06-07 04:47:47.000000000 -0700
@@ -1304,11 +1304,8 @@
.magic = TTY_DRIVER_MAGIC,
.owner = THIS_MODULE,
.driver_name = "usbserial",
-#ifndef CONFIG_DEVFS_FS
+ .devfs_name = "usb/tts/",
.name = "ttyUSB",
-#else
- .name = "usb/tts/",
-#endif
.major = SERIAL_TTY_MAJOR,
.minor_start = 0,
.num = SERIAL_TTY_MINORS,
diff -urN linux-2.5.70-bk11/fs/buffer.c linux-2.5.70-bk12/fs/buffer.c
--- linux-2.5.70-bk11/fs/buffer.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/fs/buffer.c 2003-06-07 04:47:47.000000000 -0700
@@ -36,6 +36,7 @@
#include
#include
#include
+#include
#include
static void invalidate_bh_lrus(void);
diff -urN linux-2.5.70-bk11/fs/exec.c linux-2.5.70-bk12/fs/exec.c
--- linux-2.5.70-bk11/fs/exec.c 2003-05-26 18:00:38.000000000 -0700
+++ linux-2.5.70-bk12/fs/exec.c 2003-06-07 04:47:47.000000000 -0700
@@ -614,8 +614,6 @@
spin_unlock_irq(lock);
schedule();
spin_lock_irq(lock);
- if (oldsig->group_exit_task)
- BUG();
}
spin_unlock_irq(lock);
diff -urN linux-2.5.70-bk11/fs/fat/misc.c linux-2.5.70-bk12/fs/fat/misc.c
--- linux-2.5.70-bk11/fs/fat/misc.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/fs/fat/misc.c 2003-06-07 04:47:47.000000000 -0700
@@ -311,7 +311,7 @@
*bh = sb_bread(sb, phys);
if (*bh == NULL) {
printk(KERN_ERR "FAT: Directory bread(block %llu) failed\n",
- phys);
+ (unsigned long long)phys);
/* skip this block */
*pos = (iblock + 1) << sb->s_blocksize_bits;
goto next;
diff -urN linux-2.5.70-bk11/fs/fcntl.c linux-2.5.70-bk12/fs/fcntl.c
--- linux-2.5.70-bk11/fs/fcntl.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/fs/fcntl.c 2003-06-07 04:47:47.000000000 -0700
@@ -12,6 +12,7 @@
#include
#include
#include
+#include
#include
#include
@@ -318,6 +319,7 @@
* to fix this will be in libc.
*/
err = filp->f_owner.pid;
+ force_successful_syscall_return();
break;
case F_SETOWN:
err = f_setown(filp, arg, 1);
diff -urN linux-2.5.70-bk11/fs/ncpfs/inode.c linux-2.5.70-bk12/fs/ncpfs/inode.c
--- linux-2.5.70-bk11/fs/ncpfs/inode.c 2003-05-26 18:00:40.000000000 -0700
+++ linux-2.5.70-bk12/fs/ncpfs/inode.c 2003-06-07 04:47:47.000000000 -0700
@@ -295,9 +295,9 @@
static void ncp_stop_tasks(struct ncp_server *server) {
struct sock* sk = server->ncp_sock->sk;
- sk->error_report = server->error_report;
- sk->data_ready = server->data_ready;
- sk->write_space = server->write_space;
+ sk->sk_error_report = server->error_report;
+ sk->sk_data_ready = server->data_ready;
+ sk->sk_write_space = server->write_space;
del_timer_sync(&server->timeout_tm);
flush_scheduled_work();
}
@@ -550,12 +550,12 @@
INIT_LIST_HEAD(&server->tx.requests);
init_MUTEX(&server->rcv.creq_sem);
- server->tx.creq = NULL;
- server->rcv.creq = NULL;
- server->data_ready = sock->sk->data_ready;
- server->write_space = sock->sk->write_space;
- server->error_report = sock->sk->error_report;
- sock->sk->user_data = server;
+ server->tx.creq = NULL;
+ server->rcv.creq = NULL;
+ server->data_ready = sock->sk->sk_data_ready;
+ server->write_space = sock->sk->sk_write_space;
+ server->error_report = sock->sk->sk_error_report;
+ sock->sk->sk_user_data = server;
init_timer(&server->timeout_tm);
#undef NCP_PACKET_SIZE
@@ -566,15 +566,15 @@
if (server->packet == NULL)
goto out_nls;
- sock->sk->data_ready = ncp_tcp_data_ready;
- sock->sk->error_report = ncp_tcp_error_report;
+ sock->sk->sk_data_ready = ncp_tcp_data_ready;
+ sock->sk->sk_error_report = ncp_tcp_error_report;
if (sock->type == SOCK_STREAM) {
server->rcv.ptr = (unsigned char*)&server->rcv.buf;
server->rcv.len = 10;
server->rcv.state = 0;
INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server);
INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server);
- sock->sk->write_space = ncp_tcp_write_space;
+ sock->sk->sk_write_space = ncp_tcp_write_space;
} else {
INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server);
INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server);
diff -urN linux-2.5.70-bk11/fs/ncpfs/sock.c linux-2.5.70-bk12/fs/ncpfs/sock.c
--- linux-2.5.70-bk11/fs/ncpfs/sock.c 2003-05-26 18:00:38.000000000 -0700
+++ linux-2.5.70-bk12/fs/ncpfs/sock.c 2003-06-07 04:47:47.000000000 -0700
@@ -83,21 +83,21 @@
};
void ncp_tcp_data_ready(struct sock *sk, int len) {
- struct ncp_server *server = sk->user_data;
+ struct ncp_server *server = sk->sk_user_data;
server->data_ready(sk, len);
schedule_work(&server->rcv.tq);
}
void ncp_tcp_error_report(struct sock *sk) {
- struct ncp_server *server = sk->user_data;
+ struct ncp_server *server = sk->sk_user_data;
server->error_report(sk);
schedule_work(&server->rcv.tq);
}
void ncp_tcp_write_space(struct sock *sk) {
- struct ncp_server *server = sk->user_data;
+ struct ncp_server *server = sk->sk_user_data;
/* We do not need any locking: we first set tx.creq, and then we do sendmsg,
not vice versa... */
@@ -427,7 +427,7 @@
unsigned int hdrl;
result -= 8;
- hdrl = sock->sk->family == AF_INET ? 8 : 6;
+ hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
if (sign_verify_reply(server, ((char*)req->reply_buf) + hdrl, result - hdrl, cpu_to_le32(result), ((char*)req->reply_buf) + result)) {
printk(KERN_INFO "ncpfs: Signature violation\n");
result = -EIO;
diff -urN linux-2.5.70-bk11/fs/proc/base.c linux-2.5.70-bk12/fs/proc/base.c
--- linux-2.5.70-bk11/fs/proc/base.c 2003-05-26 18:00:41.000000000 -0700
+++ linux-2.5.70-bk12/fs/proc/base.c 2003-06-07 04:47:47.000000000 -0700
@@ -557,7 +557,24 @@
}
#endif
+static loff_t mem_lseek(struct file * file, loff_t offset, int orig)
+{
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ break;
+ case 1:
+ file->f_pos += offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+ force_successful_syscall_return();
+ return file->f_pos;
+}
+
static struct file_operations proc_mem_operations = {
+ .llseek = mem_lseek,
.read = mem_read,
.write = mem_write,
.open = mem_open,
diff -urN linux-2.5.70-bk11/fs/proc/proc_misc.c linux-2.5.70-bk12/fs/proc/proc_misc.c
--- linux-2.5.70-bk11/fs/proc/proc_misc.c 2003-05-26 18:00:24.000000000 -0700
+++ linux-2.5.70-bk12/fs/proc/proc_misc.c 2003-06-07 04:47:47.000000000 -0700
@@ -378,8 +378,23 @@
{
int i, len;
extern unsigned long total_forks;
- u64 jif = get_jiffies_64() - INITIAL_JIFFIES;
+ u64 jif;
unsigned int sum = 0, user = 0, nice = 0, system = 0, idle = 0, iowait = 0;
+ struct timeval now;
+ unsigned long seq;
+
+ /* Atomically read jiffies and time of day */
+ do {
+ seq = read_seqbegin(&xtime_lock);
+
+ jif = get_jiffies_64();
+ do_gettimeofday(&now);
+ } while (read_seqretry(&xtime_lock, seq));
+
+ /* calc # of seconds since boot time */
+ jif -= INITIAL_JIFFIES;
+ jif = ((u64)now.tv_sec * HZ) + (now.tv_usec/(1000000/HZ)) - jif;
+ do_div(jif, HZ);
for (i = 0 ; i < NR_CPUS; i++) {
int j;
@@ -419,7 +434,6 @@
len += sprintf(page + len, " %u", kstat_irqs(i));
#endif
- do_div(jif, HZ);
len += sprintf(page + len,
"\nctxt %lu\n"
"btime %lu\n"
@@ -427,7 +441,7 @@
"procs_running %lu\n"
"procs_blocked %lu\n",
nr_context_switches(),
- xtime.tv_sec - (unsigned long) jif,
+ (unsigned long)jif,
total_forks,
nr_running(),
nr_iowait());
diff -urN linux-2.5.70-bk11/include/asm-alpha/ptrace.h linux-2.5.70-bk12/include/asm-alpha/ptrace.h
--- linux-2.5.70-bk11/include/asm-alpha/ptrace.h 2003-05-26 18:00:25.000000000 -0700
+++ linux-2.5.70-bk12/include/asm-alpha/ptrace.h 2003-06-07 04:47:47.000000000 -0700
@@ -70,6 +70,19 @@
#define user_mode(regs) (((regs)->ps & 8) != 0)
#define instruction_pointer(regs) ((regs)->pc)
extern void show_regs(struct pt_regs *);
+
+/*
+ * TODO: if kernel-only threads do not have a dummy pt_regs structure at the
+ * top of the stack, this would cause kernel stack corruption. Either check
+ * first that we're not dealing with a kernel thread or change the kernel
+ * stacks to allocate a dummy pt_regs structure.
+ */
+
+#define alpha_task_regs(task) ((struct pt_regs *) \
+ ((long) task->thread_info + PAGE_SIZE) - 1)
+
+#define force_successful_syscall_return() (alpha_task_regs(current)->r0 = 0)
+
#endif
#endif
diff -urN linux-2.5.70-bk11/include/asm-generic/percpu.h linux-2.5.70-bk12/include/asm-generic/percpu.h
--- linux-2.5.70-bk11/include/asm-generic/percpu.h 2003-05-26 18:00:26.000000000 -0700
+++ linux-2.5.70-bk12/include/asm-generic/percpu.h 2003-06-07 04:47:47.000000000 -0700
@@ -8,22 +8,25 @@
extern unsigned long __per_cpu_offset[NR_CPUS];
/* Separate out the type, so (int[3], foo) works. */
-#ifndef MODULE
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) name##__per_cpu
-#endif
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) (*RELOC_HIDE(&var##__per_cpu, __per_cpu_offset[cpu]))
#define __get_cpu_var(var) per_cpu(var, smp_processor_id())
+static inline void percpu_modcopy(void *pcpudst, const void *src,
+ unsigned long size)
+{
+ unsigned int i;
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpu_possible(i))
+ memcpy(pcpudst + __per_cpu_offset[i], src, size);
+}
#else /* ! SMP */
-/* Can't define per-cpu variables in modules. Sorry --RR */
-#ifndef MODULE
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) name##__per_cpu
-#endif
#define per_cpu(var, cpu) ((void)cpu, var##__per_cpu)
#define __get_cpu_var(var) var##__per_cpu
diff -urN linux-2.5.70-bk11/include/asm-i386/cpu.h linux-2.5.70-bk12/include/asm-i386/cpu.h
--- linux-2.5.70-bk11/include/asm-i386/cpu.h 2003-05-26 18:00:43.000000000 -0700
+++ linux-2.5.70-bk12/include/asm-i386/cpu.h 2003-06-07 04:47:47.000000000 -0700
@@ -3,8 +3,8 @@
#include
#include
+#include
-#include
#include
struct i386_cpu {
diff -urN linux-2.5.70-bk11/include/asm-i386/memblk.h linux-2.5.70-bk12/include/asm-i386/memblk.h
--- linux-2.5.70-bk11/include/asm-i386/memblk.h 2003-05-26 18:00:40.000000000 -0700
+++ linux-2.5.70-bk12/include/asm-i386/memblk.h 2003-06-07 04:47:47.000000000 -0700
@@ -4,8 +4,8 @@
#include
#include
#include
+#include
-#include
#include
struct i386_memblk {
diff -urN linux-2.5.70-bk11/include/asm-i386/node.h linux-2.5.70-bk12/include/asm-i386/node.h
--- linux-2.5.70-bk11/include/asm-i386/node.h 2003-05-26 18:00:27.000000000 -0700
+++ linux-2.5.70-bk12/include/asm-i386/node.h 2003-06-07 04:47:47.000000000 -0700
@@ -4,8 +4,7 @@
#include
#include
#include
-
-#include
+#include
struct i386_node {
struct node node;
diff -urN linux-2.5.70-bk11/include/asm-ia64/percpu.h linux-2.5.70-bk12/include/asm-ia64/percpu.h
--- linux-2.5.70-bk11/include/asm-ia64/percpu.h 2003-05-26 18:00:45.000000000 -0700
+++ linux-2.5.70-bk12/include/asm-ia64/percpu.h 2003-06-07 04:47:47.000000000 -0700
@@ -8,6 +8,7 @@
* Copyright (C) 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang
*/
+#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
#ifdef __ASSEMBLY__
@@ -19,15 +20,15 @@
extern unsigned long __per_cpu_offset[NR_CPUS];
-#ifndef MODULE
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) name##__per_cpu
-#endif
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) name##__per_cpu
#define __get_cpu_var(var) (var##__per_cpu)
#ifdef CONFIG_SMP
# define per_cpu(var, cpu) (*RELOC_HIDE(&var##__per_cpu, __per_cpu_offset[cpu]))
+
+extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
#else
# define per_cpu(var, cpu) ((void)cpu, __get_cpu_var(var))
#endif
diff -urN linux-2.5.70-bk11/include/asm-ia64/ptrace.h linux-2.5.70-bk12/include/asm-ia64/ptrace.h
--- linux-2.5.70-bk11/include/asm-ia64/ptrace.h 2003-05-26 18:00:42.000000000 -0700
+++ linux-2.5.70-bk12/include/asm-ia64/ptrace.h 2003-06-07 04:47:47.000000000 -0700
@@ -250,11 +250,10 @@
extern void ia64_increment_ip (struct pt_regs *pt);
extern void ia64_decrement_ip (struct pt_regs *pt);
-static inline void
-force_successful_syscall_return (void)
-{
- ia64_task_regs(current)->r8 = 0;
-}
+#define force_successful_syscall_return() \
+ do { \
+ ia64_task_regs(current)->r8 = 0; \
+ } while (0)
#endif /* !__KERNEL__ */
diff -urN linux-2.5.70-bk11/include/asm-ppc64/mmzone.h linux-2.5.70-bk12/include/asm-ppc64/mmzone.h
--- linux-2.5.70-bk11/include/asm-ppc64/mmzone.h 2003-05-26 18:00:59.000000000 -0700
+++ linux-2.5.70-bk12/include/asm-ppc64/mmzone.h 2003-06-07 04:47:48.000000000 -0700
@@ -21,6 +21,7 @@
extern int numa_cpu_lookup_table[];
extern int numa_memory_lookup_table[];
extern unsigned long numa_cpumask_lookup_table[];
+extern int nr_cpus_in_node[];
#define MAX_MEMORY (1UL << 41)
/* 256MB regions */
diff -urN linux-2.5.70-bk11/include/asm-ppc64/topology.h linux-2.5.70-bk12/include/asm-ppc64/topology.h
--- linux-2.5.70-bk11/include/asm-ppc64/topology.h 2003-05-26 18:00:59.000000000 -0700
+++ linux-2.5.70-bk12/include/asm-ppc64/topology.h 2003-06-07 04:47:48.000000000 -0700
@@ -38,6 +38,8 @@
#define pcibus_to_cpumask(bus) (cpu_online_map)
+#define nr_cpus_node(node) (nr_cpus_in_node[node])
+
/* Cross-node load balancing interval. */
#define NODE_BALANCE_RATE 10
diff -urN linux-2.5.70-bk11/include/linux/atalk.h linux-2.5.70-bk12/include/linux/atalk.h
--- linux-2.5.70-bk11/include/linux/atalk.h 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/atalk.h 2003-06-07 04:47:48.000000000 -0700
@@ -196,7 +196,7 @@
extern void aarp_cleanup_module(void);
#endif /* MODULE */
-#define at_sk(__sk) ((struct atalk_sock *)(__sk)->protinfo)
+#define at_sk(__sk) ((struct atalk_sock *)(__sk)->sk_protinfo)
extern struct sock *atalk_sockets;
extern rwlock_t atalk_sockets_lock;
diff -urN linux-2.5.70-bk11/include/linux/atmdev.h linux-2.5.70-bk12/include/linux/atmdev.h
--- linux-2.5.70-bk11/include/linux/atmdev.h 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/atmdev.h 2003-06-07 04:47:48.000000000 -0700
@@ -30,7 +30,7 @@
#define ATM_DS3_PCR (8000*12)
/* DS3: 12 cells in a 125 usec time slot */
-#define atm_sk(__sk) ((struct atm_vcc *)(__sk)->protinfo)
+#define atm_sk(__sk) ((struct atm_vcc *)(__sk)->sk_protinfo)
#define ATM_SD(s) (atm_sk((s)->sk))
@@ -413,19 +413,20 @@
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
{
- atomic_add(truesize, &vcc->sk->rmem_alloc);
+ atomic_add(truesize, &vcc->sk->sk_rmem_alloc);
}
static inline void atm_return(struct atm_vcc *vcc,int truesize)
{
- atomic_sub(truesize, &vcc->sk->rmem_alloc);
+ atomic_sub(truesize, &vcc->sk->sk_rmem_alloc);
}
static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size)
{
- return (size + atomic_read(&vcc->sk->wmem_alloc)) < vcc->sk->sndbuf;
+ return (size + atomic_read(&vcc->sk->sk_wmem_alloc)) <
+ vcc->sk->sk_sndbuf;
}
diff -urN linux-2.5.70-bk11/include/linux/bitops.h linux-2.5.70-bk12/include/linux/bitops.h
--- linux-2.5.70-bk11/include/linux/bitops.h 2003-05-26 18:00:59.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/bitops.h 2003-06-07 04:47:48.000000000 -0700
@@ -1,5 +1,6 @@
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
+#include
#include
/*
@@ -107,7 +108,25 @@
return (res & 0x0F) + ((res >> 4) & 0x0F);
}
-#include
+static inline unsigned long generic_hweight64(u64 w)
+{
+#if BITS_PER_LONG < 64
+ return generic_hweight32((unsigned int)(w >> 32)) +
+ generic_hweight32((unsigned int)w);
+#else
+ u64 res;
+ res = (w & 0x5555555555555555) + ((w >> 1) & 0x5555555555555555);
+ res = (res & 0x3333333333333333) + ((res >> 2) & 0x3333333333333333);
+ res = (res & 0x0F0F0F0F0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F0F0F0F0F);
+ res = (res & 0x00FF00FF00FF00FF) + ((res >> 8) & 0x00FF00FF00FF00FF);
+ res = (res & 0x0000FFFF0000FFFF) + ((res >> 16) & 0x0000FFFF0000FFFF);
+ return (res & 0x00000000FFFFFFFF) + ((res >> 32) & 0x00000000FFFFFFFF);
+#endif
+}
+static inline unsigned long hweight_long(unsigned long w)
+{
+ return sizeof(w) == 4 ? generic_hweight32(w) : generic_hweight64(w);
+}
#endif
diff -urN linux-2.5.70-bk11/include/linux/cpu.h linux-2.5.70-bk12/include/linux/cpu.h
--- linux-2.5.70-bk11/include/linux/cpu.h 2003-05-26 18:00:25.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/cpu.h 2003-06-07 04:47:48.000000000 -0700
@@ -31,6 +31,24 @@
extern int register_cpu(struct cpu *, int, struct node *);
extern struct class cpu_class;
+struct notifier_block;
+
+#ifdef CONFIG_SMP
+/* Need to know about CPUs going up/down? */
+extern int register_cpu_notifier(struct notifier_block *nb);
+extern void unregister_cpu_notifier(struct notifier_block *nb);
+
+int cpu_up(unsigned int cpu);
+#else
+static inline int register_cpu_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline void unregister_cpu_notifier(struct notifier_block *nb)
+{
+}
+#endif /* CONFIG_SMP */
+
/* Stop CPUs going up and down. */
extern struct semaphore cpucontrol;
#endif /* _LINUX_CPU_H_ */
diff -urN linux-2.5.70-bk11/include/linux/genhd.h linux-2.5.70-bk12/include/linux/genhd.h
--- linux-2.5.70-bk11/include/linux/genhd.h 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/genhd.h 2003-06-07 04:47:48.000000000 -0700
@@ -158,16 +158,15 @@
#ifdef CONFIG_SMP
static inline int init_disk_stats(struct gendisk *disk)
{
- disk->dkstats = kmalloc_percpu(sizeof (struct disk_stats), GFP_KERNEL);
+ disk->dkstats = alloc_percpu(struct disk_stats);
if (!disk->dkstats)
return 0;
- disk_stat_set_all(disk, 0);
return 1;
}
static inline void free_disk_stats(struct gendisk *disk)
{
- kfree_percpu(disk->dkstats);
+ free_percpu(disk->dkstats);
}
#else /* CONFIG_SMP */
static inline int init_disk_stats(struct gendisk *disk)
diff -urN linux-2.5.70-bk11/include/linux/if_ec.h linux-2.5.70-bk12/include/linux/if_ec.h
--- linux-2.5.70-bk11/include/linux/if_ec.h 2003-05-26 18:00:22.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/if_ec.h 2003-06-07 04:47:48.000000000 -0700
@@ -56,7 +56,7 @@
unsigned short num;
};
-#define ec_sk(__sk) ((struct econet_opt *)(__sk)->protinfo)
+#define ec_sk(__sk) ((struct econet_opt *)(__sk)->sk_protinfo)
struct ec_device
{
diff -urN linux-2.5.70-bk11/include/linux/if_pppox.h linux-2.5.70-bk12/include/linux/if_pppox.h
--- linux-2.5.70-bk11/include/linux/if_pppox.h 2003-05-26 18:00:24.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/if_pppox.h 2003-06-07 04:47:48.000000000 -0700
@@ -132,7 +132,7 @@
#define pppoe_pa proto.pppoe.pa
#define pppoe_relay proto.pppoe.relay
-#define pppox_sk(__sk) ((struct pppox_opt *)(__sk)->protinfo)
+#define pppox_sk(__sk) ((struct pppox_opt *)(__sk)->sk_protinfo)
struct module;
diff -urN linux-2.5.70-bk11/include/linux/if_wanpipe.h linux-2.5.70-bk12/include/linux/if_wanpipe.h
--- linux-2.5.70-bk11/include/linux/if_wanpipe.h 2003-05-26 18:01:00.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/if_wanpipe.h 2003-06-07 04:47:48.000000000 -0700
@@ -34,7 +34,7 @@
typedef struct
{
unsigned char free;
- unsigned char sk_state;
+ unsigned char state_sk;
int rcvbuf;
int sndbuf;
int rmem;
@@ -117,7 +117,7 @@
unsigned short num;
};
-#define wp_sk(__sk) ((struct wanpipe_opt *)(__sk)->protinfo)
+#define wp_sk(__sk) ((struct wanpipe_opt *)(__sk)->sk_protinfo)
#endif
diff -urN linux-2.5.70-bk11/include/linux/ipv6.h linux-2.5.70-bk12/include/linux/ipv6.h
--- linux-2.5.70-bk11/include/linux/ipv6.h 2003-05-26 18:00:44.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/ipv6.h 2003-06-07 04:47:48.000000000 -0700
@@ -229,7 +229,7 @@
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only)
-#define ipv6_only_sock(sk) ((sk)->family == PF_INET6 && __ipv6_only_sock(sk))
+#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
#else
#define __ipv6_only_sock(sk) 0
#define ipv6_only_sock(sk) 0
diff -urN linux-2.5.70-bk11/include/linux/irq.h linux-2.5.70-bk12/include/linux/irq.h
--- linux-2.5.70-bk11/include/linux/irq.h 2003-05-26 18:00:38.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/irq.h 2003-06-07 04:47:48.000000000 -0700
@@ -61,6 +61,8 @@
hw_irq_controller *handler;
struct irqaction *action; /* IRQ action list */
unsigned int depth; /* nested irq disables */
+ unsigned int irq_count; /* For detecting broken interrupts */
+ unsigned int irqs_unhandled;
spinlock_t lock;
} ____cacheline_aligned irq_desc_t;
diff -urN linux-2.5.70-bk11/include/linux/mmzone.h linux-2.5.70-bk12/include/linux/mmzone.h
--- linux-2.5.70-bk11/include/linux/mmzone.h 2003-05-26 18:00:41.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/mmzone.h 2003-06-07 04:47:48.000000000 -0700
@@ -249,13 +249,32 @@
#define for_each_zone(zone) \
for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
+/**
+ * is_highmem - helper function to quickly check if a struct zone is a
+ * highmem zone or not. This is an attempt to keep references
+ * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
+ * @zone - pointer to struct zone variable
+ */
+static inline int is_highmem(struct zone *zone)
+{
+ return (zone - zone->zone_pgdat->node_zones == ZONE_HIGHMEM);
+}
+
+/* These two functions are used to setup the per zone pages min values */
+struct ctl_table;
+struct file;
+int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
+ void *, size_t *);
+extern void setup_per_zone_pages_min(void);
+
+
#ifdef CONFIG_NUMA
#define MAX_NR_MEMBLKS BITS_PER_LONG /* Max number of Memory Blocks */
#else /* !CONFIG_NUMA */
#define MAX_NR_MEMBLKS 1
#endif /* CONFIG_NUMA */
-#include
+#include
/* Returns the number of the current Node. */
#define numa_node_id() (cpu_to_node(smp_processor_id()))
diff -urN linux-2.5.70-bk11/include/linux/module.h linux-2.5.70-bk12/include/linux/module.h
--- linux-2.5.70-bk11/include/linux/module.h 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/module.h 2003-06-07 04:47:48.000000000 -0700
@@ -247,6 +247,9 @@
char *strtab;
#endif
+ /* Per-cpu data. */
+ void *percpu;
+
/* The command line arguments (may be mangled). People like
keeping pointers to this stuff */
char *args;
@@ -263,6 +266,13 @@
/* Is this address in a module? */
struct module *module_text_address(unsigned long addr);
+/* Returns module and fills in value, defined and namebuf, or NULL if
+ symnum out of range. */
+struct module *module_get_kallsym(unsigned int symnum,
+ unsigned long *value,
+ char *type,
+ char namebuf[128]);
+int is_exported(const char *name, const struct module *mod);
#ifdef CONFIG_MODULE_UNLOAD
unsigned int module_refcount(struct module *mod);
@@ -408,6 +418,19 @@
return NULL;
}
+static inline struct module *module_get_kallsym(unsigned int symnum,
+ unsigned long *value,
+ char *type,
+ char namebuf[128])
+{
+ return NULL;
+}
+
+static inline int is_exported(const char *name, const struct module *mod)
+{
+ return 0;
+}
+
static inline int register_module_notifier(struct notifier_block * nb)
{
/* no events will happen anyway, so this can always succeed */
diff -urN linux-2.5.70-bk11/include/linux/netdevice.h linux-2.5.70-bk12/include/linux/netdevice.h
--- linux-2.5.70-bk11/include/linux/netdevice.h 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/netdevice.h 2003-06-07 04:47:48.000000000 -0700
@@ -333,6 +333,7 @@
void *dn_ptr; /* DECnet specific data */
void *ip6_ptr; /* IPv6 specific data */
void *ec_ptr; /* Econet specific data */
+ void *ax25_ptr; /* AX.25 specific data */
struct list_head poll_list; /* Link to poll list */
int quota;
diff -urN linux-2.5.70-bk11/include/linux/percpu.h linux-2.5.70-bk12/include/linux/percpu.h
--- linux-2.5.70-bk11/include/linux/percpu.h 2003-05-26 18:00:57.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/percpu.h 2003-06-07 04:47:48.000000000 -0700
@@ -1,9 +1,16 @@
#ifndef __LINUX_PERCPU_H
#define __LINUX_PERCPU_H
#include /* For preempt_disable() */
-#include /* For kmalloc_percpu() */
+#include /* For kmalloc() */
+#include
+#include /* For memset() */
#include
+/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
+#ifndef PERCPU_ENOUGH_ROOM
+#define PERCPU_ENOUGH_ROOM 32768
+#endif
+
/* Must be an lvalue. */
#define get_cpu_var(var) (*({ preempt_disable(); &__get_cpu_var(var); }))
#define put_cpu_var(var) preempt_enable()
@@ -17,7 +24,7 @@
/*
* Use this to get to a cpu's version of the per-cpu object allocated using
- * kmalloc_percpu. If you want to get "this cpu's version", maybe you want
+ * alloc_percpu. If you want to get "this cpu's version", maybe you want
* to use get_cpu_ptr...
*/
#define per_cpu_ptr(ptr, cpu) \
@@ -26,19 +33,22 @@
(__typeof__(ptr))__p->ptrs[(cpu)]; \
})
-extern void *kmalloc_percpu(size_t size, int flags);
-extern void kfree_percpu(const void *);
+extern void *__alloc_percpu(size_t size, size_t align);
+extern void free_percpu(const void *);
extern void kmalloc_percpu_init(void);
#else /* CONFIG_SMP */
#define per_cpu_ptr(ptr, cpu) (ptr)
-static inline void *kmalloc_percpu(size_t size, int flags)
+static inline void *__alloc_percpu(size_t size, size_t align)
{
- return(kmalloc(size, flags));
+ void *ret = kmalloc(size, GFP_KERNEL);
+ if (ret)
+ memset(ret, 0, size);
+ return ret;
}
-static inline void kfree_percpu(const void *ptr)
+static inline void free_percpu(const void *ptr)
{
kfree(ptr);
}
@@ -46,9 +56,13 @@
#endif /* CONFIG_SMP */
+/* Simple wrapper for the common case: zeros memory. */
+#define alloc_percpu(type) \
+ ((type *)(__alloc_percpu(sizeof(type), __alignof__(type))))
+
/*
- * Use these with kmalloc_percpu. If
- * 1. You want to operate on memory allocated by kmalloc_percpu (dereference
+ * Use these with alloc_percpu. If
+ * 1. You want to operate on memory allocated by alloc_percpu (dereference
* and read/modify/write) AND
* 2. You want "this cpu's version" of the object AND
* 3. You want to do this safely since:
diff -urN linux-2.5.70-bk11/include/linux/ptrace.h linux-2.5.70-bk12/include/linux/ptrace.h
--- linux-2.5.70-bk11/include/linux/ptrace.h 2003-05-26 18:00:23.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/ptrace.h 2003-06-07 04:47:48.000000000 -0700
@@ -92,6 +92,23 @@
if (unlikely(child->ptrace))
__ptrace_unlink(child);
}
+
+
+#ifndef force_successful_syscall_return
+/*
+ * System call handlers that, upon successful completion, need to return a
+ * negative value should call force_successful_syscall_return() right before
+ * returning. On architectures where the syscall convention provides for a
+ * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
+ * others), this macro can be used to ensure that the error flag will not get
+ * set. On architectures which do not support a separate error flag, the macro
+ * is a no-op and the spurious error condition needs to be filtered out by some
+ * other means (e.g., in user-level, by passing an extra argument to the
+ * syscall handler, or something along those lines).
+ */
+#define force_successful_syscall_return() do { } while (0)
+#endif
+
#endif
#endif
diff -urN linux-2.5.70-bk11/include/linux/rtnetlink.h linux-2.5.70-bk12/include/linux/rtnetlink.h
--- linux-2.5.70-bk11/include/linux/rtnetlink.h 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/rtnetlink.h 2003-06-07 04:47:48.000000000 -0700
@@ -603,8 +603,8 @@
#define rtnl_shlock_nowait() down_trylock(&rtnl_sem)
#define rtnl_shunlock() do { up(&rtnl_sem); \
- if (rtnl && rtnl->receive_queue.qlen) \
- rtnl->data_ready(rtnl, 0); \
+ if (rtnl && rtnl->sk_receive_queue.qlen) \
+ rtnl->sk_data_ready(rtnl, 0); \
} while(0)
extern void rtnl_lock(void);
diff -urN linux-2.5.70-bk11/include/linux/sched.h linux-2.5.70-bk12/include/linux/sched.h
--- linux-2.5.70-bk11/include/linux/sched.h 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/sched.h 2003-06-07 04:47:48.000000000 -0700
@@ -288,11 +288,6 @@
uid_t uid;
};
-#define get_current_user() ({ \
- struct user_struct *__user = current->user; \
- atomic_inc(&__user->__count); \
- __user; })
-
extern struct user_struct *find_user(uid_t);
extern struct user_struct root_user;
diff -urN linux-2.5.70-bk11/include/linux/serial_core.h linux-2.5.70-bk12/include/linux/serial_core.h
--- linux-2.5.70-bk11/include/linux/serial_core.h 2003-05-26 18:00:23.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/serial_core.h 2003-06-07 04:47:48.000000000 -0700
@@ -266,6 +266,7 @@
struct module *owner;
const char *driver_name;
const char *dev_name;
+ const char *devfs_name;
int major;
int minor;
int nr;
diff -urN linux-2.5.70-bk11/include/linux/smp.h linux-2.5.70-bk12/include/linux/smp.h
--- linux-2.5.70-bk11/include/linux/smp.h 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/smp.h 2003-06-07 04:47:48.000000000 -0700
@@ -84,14 +84,6 @@
#define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU*/
#define MSG_CALL_FUNCTION 0x0004 /* Call function on all other CPUs */
-struct notifier_block;
-
-/* Need to know about CPUs going up/down? */
-extern int register_cpu_notifier(struct notifier_block *nb);
-extern void unregister_cpu_notifier(struct notifier_block *nb);
-
-int cpu_up(unsigned int cpu);
-
/*
* Mark the boot cpu "online" so that it can call console drivers in
* printk() and can access its per-cpu storage.
@@ -117,16 +109,6 @@
#define cpu_possible(cpu) ({ BUG_ON((cpu) != 0); 1; })
#define smp_prepare_boot_cpu() do {} while (0)
-struct notifier_block;
-
-/* Need to know about CPUs going up/down? */
-static inline int register_cpu_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-static inline void unregister_cpu_notifier(struct notifier_block *nb)
-{
-}
#endif /* !SMP */
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
diff -urN linux-2.5.70-bk11/include/linux/sysctl.h linux-2.5.70-bk12/include/linux/sysctl.h
--- linux-2.5.70-bk11/include/linux/sysctl.h 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/sysctl.h 2003-06-07 04:47:48.000000000 -0700
@@ -156,6 +156,7 @@
VM_HUGETLB_PAGES=18, /* int: Number of available Huge Pages */
VM_SWAPPINESS=19, /* Tendency to steal mapped memory */
VM_LOWER_ZONE_PROTECTION=20,/* Amount of protection of lower zones */
+ VM_MIN_FREE_KBYTES=21, /* Minimum free kilobytes to maintain */
};
diff -urN linux-2.5.70-bk11/include/linux/tcp.h linux-2.5.70-bk12/include/linux/tcp.h
--- linux-2.5.70-bk11/include/linux/tcp.h 2003-05-26 18:00:39.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/tcp.h 2003-06-07 04:47:48.000000000 -0700
@@ -17,11 +17,8 @@
#ifndef _LINUX_TCP_H
#define _LINUX_TCP_H
-#include
-#include
+#include
#include
-#include
-#include
struct tcphdr {
__u16 source;
@@ -188,6 +185,13 @@
__u32 tcpi_reordering;
};
+#ifdef __KERNEL__
+
+#include
+#include
+#include
+#include
+
/* This defines a selective acknowledgement block. */
struct tcp_sack_block {
__u32 start_seq;
@@ -384,4 +388,6 @@
#define tcp_sk(__sk) (&((struct tcp_sock *)__sk)->tcp)
+#endif
+
#endif /* _LINUX_TCP_H */
diff -urN linux-2.5.70-bk11/include/linux/topology.h linux-2.5.70-bk12/include/linux/topology.h
--- linux-2.5.70-bk11/include/linux/topology.h 1969-12-31 16:00:00.000000000 -0800
+++ linux-2.5.70-bk12/include/linux/topology.h 2003-06-07 04:47:48.000000000 -0700
@@ -0,0 +1,51 @@
+/*
+ * include/linux/topology.h
+ *
+ * Written by: Matthew Dobson, IBM Corporation
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to
+ */
+#ifndef _LINUX_TOPOLOGY_H
+#define _LINUX_TOPOLOGY_H
+
+#include
+#include
+#include
+
+#include
+
+#ifndef nr_cpus_node
+#define nr_cpus_node(node) (hweight_long(node_to_cpumask(node)))
+#endif
+
+static inline int __next_node_with_cpus(int node)
+{
+ do
+ ++node;
+ while (node < numnodes && !nr_cpus_node(node));
+ return node;
+}
+
+#define for_each_node_with_cpus(node) \
+ for (node = 0; node < numnodes; node = __next_node_with_cpus(node))
+
+#endif /* _LINUX_TOPOLOGY_H */
diff -urN linux-2.5.70-bk11/include/linux/tty_driver.h linux-2.5.70-bk12/include/linux/tty_driver.h
--- linux-2.5.70-bk11/include/linux/tty_driver.h 2003-05-26 18:00:41.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/tty_driver.h 2003-06-07 04:47:48.000000000 -0700
@@ -124,6 +124,7 @@
struct cdev cdev;
struct module *owner;
const char *driver_name;
+ const char *devfs_name;
const char *name;
int name_base; /* offset of printed name */
short major; /* major device number */
diff -urN linux-2.5.70-bk11/include/linux/udp.h linux-2.5.70-bk12/include/linux/udp.h
--- linux-2.5.70-bk11/include/linux/udp.h 2003-05-26 18:00:26.000000000 -0700
+++ linux-2.5.70-bk12/include/linux/udp.h 2003-06-07 04:47:48.000000000 -0700
@@ -17,10 +17,7 @@
#ifndef _LINUX_UDP_H
#define _LINUX_UDP_H
-#include
-#include
-#include
-#include
+#include
struct udphdr {
__u16 source;
@@ -36,6 +33,12 @@
/* UDP encapsulation types */
#define UDP_ENCAP_ESPINUDP 2 /* draft-ietf-ipsec-udp-encaps-06 */
+#ifdef __KERNEL__
+
+#include
+#include
+#include
+
struct udp_opt {
int pending; /* Any pending frames ? */
unsigned int corkflag; /* Cork is required */
@@ -63,4 +66,6 @@
#define udp_sk(__sk) (&((struct udp_sock *)__sk)->udp)
+#endif
+
#endif /* _LINUX_UDP_H */
diff -urN linux-2.5.70-bk11/include/net/af_unix.h linux-2.5.70-bk12/include/net/af_unix.h
--- linux-2.5.70-bk11/include/net/af_unix.h 2003-05-26 18:00:45.000000000 -0700
+++ linux-2.5.70-bk12/include/net/af_unix.h 2003-06-07 04:47:48.000000000 -0700
@@ -24,8 +24,8 @@
static inline unix_socket *next_unix_socket(int *i, unix_socket *s)
{
/* More in this chain? */
- if (s->next)
- return s->next;
+ if (s->sk_next)
+ return s->sk_next;
/* Look for next non-empty chain. */
for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
if (unix_socket_table[*i])
diff -urN linux-2.5.70-bk11/include/net/ax25.h linux-2.5.70-bk12/include/net/ax25.h
--- linux-2.5.70-bk11/include/net/ax25.h 2003-05-26 18:00:42.000000000 -0700
+++ linux-2.5.70-bk12/include/net/ax25.h 2003-06-07 04:47:48.000000000 -0700
@@ -201,7 +201,7 @@
struct sock *sk; /* Backlink to socket */
} ax25_cb;
-#define ax25_sk(__sk) ((ax25_cb *)(__sk)->protinfo)
+#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo)
/* af_ax25.c */
extern ax25_cb *ax25_list;
@@ -233,7 +233,12 @@
/* ax25_dev.c */
extern ax25_dev *ax25_dev_list;
extern spinlock_t ax25_dev_lock;
-extern ax25_dev *ax25_dev_ax25dev(struct net_device *);
+
+static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev)
+{
+ return dev->ax25_ptr;
+}
+
extern ax25_dev *ax25_addr_ax25dev(ax25_address *);
extern void ax25_dev_device_up(struct net_device *);
extern void ax25_dev_device_down(struct net_device *);
diff -urN linux-2.5.70-bk11/include/net/bluetooth/hci_core.h linux-2.5.70-bk12/include/net/bluetooth/hci_core.h
--- linux-2.5.70-bk11/include/net/bluetooth/hci_core.h 2003-05-26 18:00:40.000000000 -0700
+++ linux-2.5.70-bk12/include/net/bluetooth/hci_core.h 2003-06-07 04:47:48.000000000 -0700
@@ -485,7 +485,7 @@
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
/* HCI info for socket */
-#define hci_pi(sk) ((struct hci_pinfo *) sk->protinfo)
+#define hci_pi(sk) ((struct hci_pinfo *)sk->sk_protinfo)
struct hci_pinfo {
struct hci_dev *hdev;
struct hci_filter filter;
diff -urN linux-2.5.70-bk11/include/net/bluetooth/l2cap.h linux-2.5.70-bk12/include/net/bluetooth/l2cap.h
--- linux-2.5.70-bk11/include/net/bluetooth/l2cap.h 2003-05-26 18:00:25.000000000 -0700
+++ linux-2.5.70-bk12/include/net/bluetooth/l2cap.h 2003-06-07 04:47:48.000000000 -0700
@@ -206,7 +206,7 @@
};
/* ----- L2CAP channel and socket info ----- */
-#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk->protinfo)
+#define l2cap_pi(sk) ((struct l2cap_pinfo *)sk->sk_protinfo)
struct l2cap_pinfo {
__u16 psm;
diff -urN linux-2.5.70-bk11/include/net/bluetooth/rfcomm.h linux-2.5.70-bk12/include/net/bluetooth/rfcomm.h
--- linux-2.5.70-bk11/include/net/bluetooth/rfcomm.h 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/include/net/bluetooth/rfcomm.h 2003-06-07 04:47:48.000000000 -0700
@@ -302,7 +302,7 @@
u8 rc_channel;
};
-#define rfcomm_pi(sk) ((struct rfcomm_pinfo *) sk->protinfo)
+#define rfcomm_pi(sk) ((struct rfcomm_pinfo *)sk->sk_protinfo)
struct rfcomm_pinfo {
struct rfcomm_dlc *dlc;
diff -urN linux-2.5.70-bk11/include/net/bluetooth/sco.h linux-2.5.70-bk12/include/net/bluetooth/sco.h
--- linux-2.5.70-bk11/include/net/bluetooth/sco.h 2003-05-26 18:00:25.000000000 -0700
+++ linux-2.5.70-bk12/include/net/bluetooth/sco.h 2003-06-07 04:47:48.000000000 -0700
@@ -71,7 +71,7 @@
#define sco_conn_unlock(c) spin_unlock(&c->lock);
/* ----- SCO socket info ----- */
-#define sco_pi(sk) ((struct sco_pinfo *) sk->protinfo)
+#define sco_pi(sk) ((struct sco_pinfo *)sk->sk_protinfo)
struct sco_pinfo {
__u32 flags;
diff -urN linux-2.5.70-bk11/include/net/dn.h linux-2.5.70-bk12/include/net/dn.h
--- linux-2.5.70-bk11/include/net/dn.h 2003-05-26 18:00:42.000000000 -0700
+++ linux-2.5.70-bk12/include/net/dn.h 2003-06-07 04:47:48.000000000 -0700
@@ -133,7 +133,7 @@
};
-#define DN_SK(__sk) ((struct dn_scp *)(__sk)->protinfo)
+#define DN_SK(__sk) ((struct dn_scp *)(__sk)->sk_protinfo)
/*
* src,dst : Source and Destination DECnet addresses
diff -urN linux-2.5.70-bk11/include/net/dn_nsp.h linux-2.5.70-bk12/include/net/dn_nsp.h
--- linux-2.5.70-bk11/include/net/dn_nsp.h 2003-05-26 18:00:45.000000000 -0700
+++ linux-2.5.70-bk12/include/net/dn_nsp.h 2003-06-07 04:47:48.000000000 -0700
@@ -201,7 +201,7 @@
*/
static __inline__ int dn_congested(struct sock *sk)
{
- return atomic_read(&sk->rmem_alloc) > (sk->rcvbuf >> 1);
+ return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
}
#define DN_MAX_NSP_DATA_HEADER (11)
diff -urN linux-2.5.70-bk11/include/net/ip.h linux-2.5.70-bk12/include/net/ip.h
--- linux-2.5.70-bk11/include/net/ip.h 2003-05-26 18:00:43.000000000 -0700
+++ linux-2.5.70-bk12/include/net/ip.h 2003-06-07 04:47:48.000000000 -0700
@@ -238,7 +238,7 @@
{
inet_sk(sk)->rcv_saddr = inet_sk(sk)->saddr = 0;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- if (sk->family == PF_INET6) {
+ if (sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
memset(&np->saddr, 0, sizeof(np->saddr));
diff -urN linux-2.5.70-bk11/include/net/ip6_route.h linux-2.5.70-bk12/include/net/ip6_route.h
--- linux-2.5.70-bk11/include/net/ip6_route.h 2003-05-26 18:00:26.000000000 -0700
+++ linux-2.5.70-bk12/include/net/ip6_route.h 2003-06-07 04:47:48.000000000 -0700
@@ -107,11 +107,11 @@
struct ipv6_pinfo *np = inet6_sk(sk);
struct rt6_info *rt = (struct rt6_info *) dst;
- write_lock(&sk->dst_lock);
+ write_lock(&sk->sk_dst_lock);
__sk_dst_set(sk, dst);
np->daddr_cache = daddr;
np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
- write_unlock(&sk->dst_lock);
+ write_unlock(&sk->sk_dst_lock);
}
#endif
diff -urN linux-2.5.70-bk11/include/net/ipv6.h linux-2.5.70-bk12/include/net/ipv6.h
--- linux-2.5.70-bk11/include/net/ipv6.h 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/include/net/ipv6.h 2003-06-07 04:47:48.000000000 -0700
@@ -145,7 +145,7 @@
int snmp6_register_dev(struct inet6_dev *idev);
int snmp6_unregister_dev(struct inet6_dev *idev);
-int snmp6_mib_init(void *ptr[2], size_t mibsize);
+int snmp6_mib_init(void *ptr[2], size_t mibsize, size_t mibalign);
void snmp6_mib_free(void *ptr[2]);
struct ip6_ra_chain
diff -urN linux-2.5.70-bk11/include/net/ipx.h linux-2.5.70-bk12/include/net/ipx.h
--- linux-2.5.70-bk11/include/net/ipx.h 2003-05-26 18:00:22.000000000 -0700
+++ linux-2.5.70-bk12/include/net/ipx.h 2003-06-07 04:47:48.000000000 -0700
@@ -105,7 +105,7 @@
unsigned short ipx_ncp_conn;
};
-#define ipx_sk(__sk) ((struct ipx_opt *)(__sk)->protinfo)
+#define ipx_sk(__sk) ((struct ipx_opt *)(__sk)->sk_protinfo)
#define IPX_SKB_CB(__skb) ((struct ipx_cb *)&((__skb)->cb[0]))
#endif
#define IPX_MIN_EPHEMERAL_SOCKET 0x4000
diff -urN linux-2.5.70-bk11/include/net/irda/af_irda.h linux-2.5.70-bk12/include/net/irda/af_irda.h
--- linux-2.5.70-bk11/include/net/irda/af_irda.h 2003-05-26 18:00:56.000000000 -0700
+++ linux-2.5.70-bk12/include/net/irda/af_irda.h 2003-06-07 04:47:48.000000000 -0700
@@ -77,6 +77,6 @@
LOCAL_FLOW rx_flow;
};
-#define irda_sk(__sk) ((struct irda_sock *)(__sk)->protinfo)
+#define irda_sk(__sk) ((struct irda_sock *)(__sk)->sk_protinfo)
#endif /* AF_IRDA_H */
diff -urN linux-2.5.70-bk11/include/net/llc_c_ev.h linux-2.5.70-bk12/include/net/llc_c_ev.h
--- linux-2.5.70-bk11/include/net/llc_c_ev.h 2003-05-26 18:00:24.000000000 -0700
+++ linux-2.5.70-bk12/include/net/llc_c_ev.h 2003-06-07 04:47:48.000000000 -0700
@@ -275,7 +275,7 @@
static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
{
- return atomic_read(&sk->rmem_alloc) + skb->truesize <
- (unsigned)sk->rcvbuf;
+ return atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
+ (unsigned)sk->sk_rcvbuf;
}
#endif /* LLC_C_EV_H */
diff -urN linux-2.5.70-bk11/include/net/llc_conn.h linux-2.5.70-bk12/include/net/llc_conn.h
--- linux-2.5.70-bk11/include/net/llc_conn.h 2003-05-26 18:00:56.000000000 -0700
+++ linux-2.5.70-bk12/include/net/llc_conn.h 2003-06-07 04:47:48.000000000 -0700
@@ -67,7 +67,7 @@
Used for resending FRMR */
};
-#define llc_sk(__sk) ((struct llc_opt *)(__sk)->protinfo)
+#define llc_sk(__sk) ((struct llc_opt *)(__sk)->sk_protinfo)
extern struct sock *llc_sk_alloc(int family, int priority);
extern void llc_sk_free(struct sock *sk);
diff -urN linux-2.5.70-bk11/include/net/netrom.h linux-2.5.70-bk12/include/net/netrom.h
--- linux-2.5.70-bk11/include/net/netrom.h 2003-05-26 18:00:40.000000000 -0700
+++ linux-2.5.70-bk12/include/net/netrom.h 2003-06-07 04:47:48.000000000 -0700
@@ -74,7 +74,7 @@
struct sock *sk; /* Backlink to socket */
} nr_cb;
-#define nr_sk(__sk) ((nr_cb *)(__sk)->protinfo)
+#define nr_sk(__sk) ((nr_cb *)(__sk)->sk_protinfo)
struct nr_neigh {
struct nr_neigh *next;
diff -urN linux-2.5.70-bk11/include/net/rose.h linux-2.5.70-bk12/include/net/rose.h
--- linux-2.5.70-bk11/include/net/rose.h 2003-05-26 18:00:24.000000000 -0700
+++ linux-2.5.70-bk12/include/net/rose.h 2003-06-07 04:47:48.000000000 -0700
@@ -138,7 +138,7 @@
struct sock *sk; /* Backlink to socket */
} rose_cb;
-#define rose_sk(__sk) ((rose_cb *)(__sk)->protinfo)
+#define rose_sk(__sk) ((rose_cb *)(__sk)->sk_protinfo)
/* af_rose.c */
extern ax25_address rose_callsign;
diff -urN linux-2.5.70-bk11/include/net/route.h linux-2.5.70-bk12/include/net/route.h
--- linux-2.5.70-bk11/include/net/route.h 2003-05-26 18:00:20.000000000 -0700
+++ linux-2.5.70-bk12/include/net/route.h 2003-06-07 04:47:48.000000000 -0700
@@ -44,7 +44,7 @@
/* RTO_CONN is not used (being alias for 0), but preserved not to break
* some modules referring to it. */
-#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sk->localroute)
+#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sk->sk_localroute)
struct inet_peer;
struct rtable
diff -urN linux-2.5.70-bk11/include/net/sctp/sctp.h linux-2.5.70-bk12/include/net/sctp/sctp.h
--- linux-2.5.70-bk11/include/net/sctp/sctp.h 2003-05-26 18:00:27.000000000 -0700
+++ linux-2.5.70-bk12/include/net/sctp/sctp.h 2003-06-07 04:47:48.000000000 -0700
@@ -597,7 +597,7 @@
#define sctp_sstate(sk, state) __sctp_sstate((sk), (SCTP_SS_##state))
int static inline __sctp_sstate(const struct sock *sk, sctp_sock_state_t state)
{
- return sk->state == state;
+ return sk->sk_state == state;
}
#endif /* __net_sctp_h__ */
diff -urN linux-2.5.70-bk11/include/net/sock.h linux-2.5.70-bk12/include/net/sock.h
--- linux-2.5.70-bk11/include/net/sock.h 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/include/net/sock.h 2003-06-07 04:47:48.000000000 -0700
@@ -59,10 +59,11 @@
* the other protocols.
*/
-/* Define this to get the sk->debug debugging facility. */
+/* Define this to get the sk->sk_debug debugging facility. */
#define SOCK_DEBUGGING
#ifdef SOCK_DEBUGGING
-#define SOCK_DEBUG(sk, msg...) do { if((sk) && ((sk)->debug)) printk(KERN_DEBUG msg); } while (0)
+#define SOCK_DEBUG(sk, msg...) do { if ((sk) && ((sk)->sk_debug)) \
+ printk(KERN_DEBUG msg); } while (0)
#else
#define SOCK_DEBUG(sk, msg...) do { } while (0)
#endif
@@ -79,123 +80,147 @@
} socket_lock_t;
#define sock_lock_init(__sk) \
-do { spin_lock_init(&((__sk)->lock.slock)); \
- (__sk)->lock.owner = NULL; \
- init_waitqueue_head(&((__sk)->lock.wq)); \
+do { spin_lock_init(&((__sk)->sk_lock.slock)); \
+ (__sk)->sk_lock.owner = NULL; \
+ init_waitqueue_head(&((__sk)->sk_lock.wq)); \
} while(0)
+struct sock;
+
+/**
+ * struct sock_common - minimal network layer representation of sockets
+ * @skc_family - network address family
+ * @skc_state - Connection state
+ * @skc_reuse - %SO_REUSEADDR setting
+ * @skc_bound_dev_if - bound device index if != 0
+ * @skc_next - main hash linkage for various protocol lookup tables
+ * @skc_pprev - main hash linkage for various protocol lookup tables
+ * @skc_bind_next - main hash linkage for various protocol lookup tables
+ * @skc_bind_pprev - main hash linkage for various protocol lookup tables
+ * @skc_refcnt - reference count
+ *
+ * This is the minimal network layer representation of sockets, the header
+ * for struct sock and struct tcp_tw_bucket.
+ */
+struct sock_common {
+ unsigned short skc_family;
+ volatile unsigned char skc_state;
+ unsigned char skc_reuse;
+ int skc_bound_dev_if;
+ struct sock *skc_next;
+ struct sock **skc_pprev;
+ struct sock *skc_bind_next;
+ struct sock **skc_bind_pprev;
+ atomic_t skc_refcnt;
+};
+
/**
* struct sock - network layer representation of sockets
- * @state - Connection state
- * @zapped - ax25 & ipx means !linked
- * @reuse - %SO_REUSEADDR setting
- * @shutdown - mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
- * @bound_dev_if - bound device index if != 0
- * @next - main hash linkage for various protocol lookup tables
- * @pprev - main hash linkage for various protocol lookup tables
- * @bind_next - main hash linkage for various protocol lookup tables
- * @bind_pprev - main hash linkage for various protocol lookup tables
- * @refcnt - reference count
- * @family - network address family
- * @use_write_queue - wheter to call sk->write_space(sk) in sock_wfree
- * @userlocks - %SO_SNDBUF and %SO_RCVBUF settings
- * @lock - synchronizer
- * @rcvbuf - size of receive buffer in bytes
- * @sleep - sock wait queue
- * @dst_cache - destination cache
- * @dst_lock - destination cache lock
- * @policy - flow policy
- * @rmem_alloc - receive queue bytes committed
- * @receive_queue - incoming packets
- * @wmem_alloc - transmit queue bytes committed
- * @write_queue - Packet sending queue
- * @omem_alloc - "o" is "option" or "other"
- * @wmem_queued - persistent queue size
- * @forward_alloc - space allocated forward
- * @allocation - allocation mode
- * @sndbuf - size of send buffer in bytes
- * @prev - pointer to previous sock in the list this sock is in
- * @flags - %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
- * @no_check - %SO_NO_CHECK setting, wether or not checkup packets
- * @debug - %SO_DEBUG setting
- * @rcvtstamp - %SO_TIMESTAMP setting
- * @no_largesend - whether to sent large segments or not
- * @route_caps - route capabilities (e.g. %NETIF_F_TSO)
- * @lingertime - %SO_LINGER l_linger setting
- * @hashent - hash entry in several tables (e.g. tcp_ehash)
- * @pair - socket pair (e.g. AF_UNIX/unix_peer)
- * @backlog - always used with the per-socket spinlock held
- * @callback_lock - used with the callbacks in the end of this struct
- * @error_queue - rarely used
- * @prot - protocol handlers inside a network family
- * @err - last error
- * @err_soft - errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
- * @ack_backlog - current listen backlog
- * @max_ack_backlog - listen backlog set in listen()
- * @priority - %SO_PRIORITY setting
- * @type - socket type (%SOCK_STREAM, etc)
- * @localroute - route locally only, %SO_DONTROUTE setting
- * @protocol - which protocol this socket belongs in this network family
- * @peercred - %SO_PEERCRED setting
- * @rcvlowat - %SO_RCVLOWAT setting
- * @rcvtimeo - %SO_RCVTIMEO setting
- * @sndtimeo - %SO_SNDTIMEO setting
- * @filter - socket filtering instructions
- * @protinfo - private area, net family specific, when not using slab
- * @slab - the slabcache this instance was allocated from
- * @timer - sock cleanup timer
- * @stamp - time stamp of last packet received
- * @socket - Identd and reporting IO signals
- * @user_data - RPC layer private data
- * @owner - module that owns this socket
- * @state_change - callback to indicate change in the state of the sock
- * @data_ready - callback to indicate there is data to be processed
- * @write_space - callback to indicate there is bf sending space available
- * @error_report - callback to indicate errors (e.g. %MSG_ERRQUEUE)
- * @backlog_rcv - callback to process the backlog
- * @destruct - called at sock freeing time, i.e. when all refcnt == 0
+ * @__sk_common - shared layout with tcp_tw_bucket
+ * @sk_zapped - ax25 & ipx means !linked
+ * @sk_shutdown - mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
+ * @sk_use_write_queue - wheter to call sk->sk_write_space in sock_wfree
+ * @sk_userlocks - %SO_SNDBUF and %SO_RCVBUF settings
+ * @sk_lock - synchronizer
+ * @sk_rcvbuf - size of receive buffer in bytes
+ * @sk_sleep - sock wait queue
+ * @sk_dst_cache - destination cache
+ * @sk_dst_lock - destination cache lock
+ * @sk_policy - flow policy
+ * @sk_rmem_alloc - receive queue bytes committed
+ * @sk_receive_queue - incoming packets
+ * @sk_wmem_alloc - transmit queue bytes committed
+ * @sk_write_queue - Packet sending queue
+ * @sk_omem_alloc - "o" is "option" or "other"
+ * @sk_wmem_queued - persistent queue size
+ * @sk_forward_alloc - space allocated forward
+ * @sk_allocation - allocation mode
+ * @sk_sndbuf - size of send buffer in bytes
+ * @sk_prev - pointer to previous sock in the list this sock is in
+ * @sk_flags - %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
+ * @sk_no_check - %SO_NO_CHECK setting, wether or not checkup packets
+ * @sk_debug - %SO_DEBUG setting
+ * @sk_rcvtstamp - %SO_TIMESTAMP setting
+ * @sk_no_largesend - whether to sent large segments or not
+ * @sk_route_caps - route capabilities (e.g. %NETIF_F_TSO)
+ * @sk_lingertime - %SO_LINGER l_linger setting
+ * @sk_hashent - hash entry in several tables (e.g. tcp_ehash)
+ * @sk_pair - socket pair (e.g. AF_UNIX/unix_peer)
+ * @sk_backlog - always used with the per-socket spinlock held
+ * @sk_callback_lock - used with the callbacks in the end of this struct
+ * @sk_error_queue - rarely used
+ * @sk_prot - protocol handlers inside a network family
+ * @sk_err - last error
+ * @sk_err_soft - errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
+ * @sk_ack_backlog - current listen backlog
+ * @sk_max_ack_backlog - listen backlog set in listen()
+ * @sk_priority - %SO_PRIORITY setting
+ * @sk_type - socket type (%SOCK_STREAM, etc)
+ * @sk_localroute - route locally only, %SO_DONTROUTE setting
+ * @sk_protocol - which protocol this socket belongs in this network family
+ * @sk_peercred - %SO_PEERCRED setting
+ * @sk_rcvlowat - %SO_RCVLOWAT setting
+ * @sk_rcvtimeo - %SO_RCVTIMEO setting
+ * @sk_sndtimeo - %SO_SNDTIMEO setting
+ * @sk_filter - socket filtering instructions
+ * @sk_protinfo - private area, net family specific, when not using slab
+ * @sk_slab - the slabcache this instance was allocated from
+ * @sk_timer - sock cleanup timer
+ * @sk_stamp - time stamp of last packet received
+ * @sk_socket - Identd and reporting IO signals
+ * @sk_user_data - RPC layer private data
+ * @sk_owner - module that owns this socket
+ * @sk_state_change - callback to indicate change in the state of the sock
+ * @sk_data_ready - callback to indicate there is data to be processed
+ * @sk_write_space - callback to indicate there is bf sending space available
+ * @sk_error_report - callback to indicate errors (e.g. %MSG_ERRQUEUE)
+ * @sk_backlog_rcv - callback to process the backlog
+ * @sk_destruct - called at sock freeing time, i.e. when all refcnt == 0
*/
struct sock {
- /* Begin of struct sock/struct tcp_tw_bucket shared layout */
- unsigned short family;
- volatile unsigned char state;
- unsigned char reuse;
- int bound_dev_if;
- struct sock *next;
- struct sock **pprev;
- struct sock *bind_next;
- struct sock **bind_pprev;
- atomic_t refcnt;
- /* End of struct sock/struct tcp_tw_bucket shared layout */
- volatile unsigned char zapped;
- unsigned char shutdown;
- unsigned char use_write_queue;
- unsigned char userlocks;
- socket_lock_t lock;
- int rcvbuf;
- wait_queue_head_t *sleep;
- struct dst_entry *dst_cache;
- rwlock_t dst_lock;
- struct xfrm_policy *policy[2];
- atomic_t rmem_alloc;
- struct sk_buff_head receive_queue;
- atomic_t wmem_alloc;
- struct sk_buff_head write_queue;
- atomic_t omem_alloc;
- int wmem_queued;
- int forward_alloc;
- unsigned int allocation;
- int sndbuf;
- struct sock *prev;
- unsigned long flags;
- char no_check;
- unsigned char debug;
- unsigned char rcvtstamp;
- unsigned char no_largesend;
- int route_caps;
- unsigned long lingertime;
- int hashent;
- struct sock *pair;
+ /*
+ * Now struct tcp_tw_bucket also uses sock_common, so please just
+ * don't add nothing before this first member (__sk_common) --acme
+ */
+ struct sock_common __sk_common;
+#define sk_family __sk_common.skc_family
+#define sk_state __sk_common.skc_state
+#define sk_reuse __sk_common.skc_reuse
+#define sk_bound_dev_if __sk_common.skc_bound_dev_if
+#define sk_next __sk_common.skc_next
+#define sk_pprev __sk_common.skc_pprev
+#define sk_bind_next __sk_common.skc_bind_next
+#define sk_bind_pprev __sk_common.skc_bind_pprev
+#define sk_refcnt __sk_common.skc_refcnt
+ volatile unsigned char sk_zapped;
+ unsigned char sk_shutdown;
+ unsigned char sk_use_write_queue;
+ unsigned char sk_userlocks;
+ socket_lock_t sk_lock;
+ int sk_rcvbuf;
+ wait_queue_head_t *sk_sleep;
+ struct dst_entry *sk_dst_cache;
+ rwlock_t sk_dst_lock;
+ struct xfrm_policy *sk_policy[2];
+ atomic_t sk_rmem_alloc;
+ struct sk_buff_head sk_receive_queue;
+ atomic_t sk_wmem_alloc;
+ struct sk_buff_head sk_write_queue;
+ atomic_t sk_omem_alloc;
+ int sk_wmem_queued;
+ int sk_forward_alloc;
+ unsigned int sk_allocation;
+ int sk_sndbuf;
+ struct sock *sk_prev;
+ unsigned long sk_flags;
+ char sk_no_check;
+ unsigned char sk_debug;
+ unsigned char sk_rcvtstamp;
+ unsigned char sk_no_largesend;
+ int sk_route_caps;
+ unsigned long sk_lingertime;
+ int sk_hashent;
+ struct sock *sk_pair;
/*
* The backlog queue is special, it is always used with
* the per-socket spinlock held and requires low latency
@@ -204,37 +229,37 @@
struct {
struct sk_buff *head;
struct sk_buff *tail;
- } backlog;
- rwlock_t callback_lock;
- struct sk_buff_head error_queue;
- struct proto *prot;
- int err,
- err_soft;
- unsigned short ack_backlog;
- unsigned short max_ack_backlog;
- __u32 priority;
- unsigned short type;
- unsigned char localroute;
- unsigned char protocol;
- struct ucred peercred;
- int rcvlowat;
- long rcvtimeo;
- long sndtimeo;
- struct sk_filter *filter;
- void *protinfo;
- kmem_cache_t *slab;
- struct timer_list timer;
- struct timeval stamp;
- struct socket *socket;
- void *user_data;
- struct module *owner;
- void (*state_change)(struct sock *sk);
- void (*data_ready)(struct sock *sk, int bytes);
- void (*write_space)(struct sock *sk);
- void (*error_report)(struct sock *sk);
- int (*backlog_rcv) (struct sock *sk,
- struct sk_buff *skb);
- void (*destruct)(struct sock *sk);
+ } sk_backlog;
+ rwlock_t sk_callback_lock;
+ struct sk_buff_head sk_error_queue;
+ struct proto *sk_prot;
+ int sk_err,
+ sk_err_soft;
+ unsigned short sk_ack_backlog;
+ unsigned short sk_max_ack_backlog;
+ __u32 sk_priority;
+ unsigned short sk_type;
+ unsigned char sk_localroute;
+ unsigned char sk_protocol;
+ struct ucred sk_peercred;
+ int sk_rcvlowat;
+ long sk_rcvtimeo;
+ long sk_sndtimeo;
+ struct sk_filter *sk_filter;
+ void *sk_protinfo;
+ kmem_cache_t *sk_slab;
+ struct timer_list sk_timer;
+ struct timeval sk_stamp;
+ struct socket *sk_socket;
+ void *sk_user_data;
+ struct module *sk_owner;
+ void (*sk_state_change)(struct sock *sk);
+ void (*sk_data_ready)(struct sock *sk, int bytes);
+ void (*sk_write_space)(struct sock *sk);
+ void (*sk_error_report)(struct sock *sk);
+ int (*sk_backlog_rcv)(struct sock *sk,
+ struct sk_buff *skb);
+ void (*sk_destruct)(struct sock *sk);
};
/* Sock flags */
@@ -250,29 +275,29 @@
static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
{
- __set_bit(flag, &sk->flags);
+ __set_bit(flag, &sk->sk_flags);
}
static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
{
- __clear_bit(flag, &sk->flags);
+ __clear_bit(flag, &sk->sk_flags);
}
static inline int sock_flag(struct sock *sk, enum sock_flags flag)
{
- return test_bit(flag, &sk->flags);
+ return test_bit(flag, &sk->sk_flags);
}
/* The per-socket spinlock must be held here. */
-#define sk_add_backlog(__sk, __skb) \
-do { if((__sk)->backlog.tail == NULL) { \
- (__sk)->backlog.head = \
- (__sk)->backlog.tail = (__skb); \
- } else { \
- ((__sk)->backlog.tail)->next = (__skb); \
- (__sk)->backlog.tail = (__skb); \
- } \
- (__skb)->next = NULL; \
+#define sk_add_backlog(__sk, __skb) \
+do { if (!(__sk)->sk_backlog.tail) { \
+ (__sk)->sk_backlog.head = \
+ (__sk)->sk_backlog.tail = (__skb); \
+ } else { \
+ ((__sk)->sk_backlog.tail)->next = (__skb); \
+ (__sk)->sk_backlog.tail = (__skb); \
+ } \
+ (__skb)->next = NULL; \
} while(0)
/* IP protocol blocks we attach to sockets.
@@ -337,9 +362,9 @@
* change the ownership of this struct sock, with one not needed
* transient sk_set_owner call.
*/
- if (unlikely(sk->owner != NULL))
+ if (unlikely(sk->sk_owner != NULL))
BUG();
- sk->owner = owner;
+ sk->sk_owner = owner;
__module_get(owner);
}
@@ -423,28 +448,29 @@
*/
extern void __lock_sock(struct sock *sk);
extern void __release_sock(struct sock *sk);
-#define sock_owned_by_user(sk) (NULL != (sk)->lock.owner)
+#define sock_owned_by_user(sk) ((sk)->sk_lock.owner)
#define lock_sock(__sk) \
do { might_sleep(); \
- spin_lock_bh(&((__sk)->lock.slock)); \
- if ((__sk)->lock.owner != NULL) \
+ spin_lock_bh(&((__sk)->sk_lock.slock)); \
+ if ((__sk)->sk_lock.owner) \
__lock_sock(__sk); \
- (__sk)->lock.owner = (void *)1; \
- spin_unlock_bh(&((__sk)->lock.slock)); \
+ (__sk)->sk_lock.owner = (void *)1; \
+ spin_unlock_bh(&((__sk)->sk_lock.slock)); \
} while(0)
#define release_sock(__sk) \
-do { spin_lock_bh(&((__sk)->lock.slock)); \
- if ((__sk)->backlog.tail != NULL) \
+do { spin_lock_bh(&((__sk)->sk_lock.slock)); \
+ if ((__sk)->sk_backlog.tail) \
__release_sock(__sk); \
- (__sk)->lock.owner = NULL; \
- if (waitqueue_active(&((__sk)->lock.wq))) wake_up(&((__sk)->lock.wq)); \
- spin_unlock_bh(&((__sk)->lock.slock)); \
+ (__sk)->sk_lock.owner = NULL; \
+ if (waitqueue_active(&((__sk)->sk_lock.wq))) \
+ wake_up(&((__sk)->sk_lock.wq)); \
+ spin_unlock_bh(&((__sk)->sk_lock.slock)); \
} while(0)
/* BH context may only use the following locking interface. */
-#define bh_lock_sock(__sk) spin_lock(&((__sk)->lock.slock))
-#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->lock.slock))
+#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
+#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
extern struct sock * sk_alloc(int family, int priority, int zero_it,
kmem_cache_t *slab);
@@ -547,13 +573,13 @@
if (err)
return err;
- if (sk->filter) {
+ if (sk->sk_filter) {
struct sk_filter *filter;
if (needlock)
bh_lock_sock(sk);
- filter = sk->filter;
+ filter = sk->sk_filter;
if (filter) {
int pkt_len = sk_run_filter(skb, filter->insns,
filter->len);
@@ -581,7 +607,7 @@
{
unsigned int size = sk_filter_len(fp);
- atomic_sub(size, &sk->omem_alloc);
+ atomic_sub(size, &sk->sk_omem_alloc);
if (atomic_dec_and_test(&fp->refcnt))
kfree(fp);
@@ -590,7 +616,7 @@
static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
atomic_inc(&fp->refcnt);
- atomic_add(sk_filter_len(fp), &sk->omem_alloc);
+ atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
}
/*
@@ -626,7 +652,7 @@
static inline void sock_hold(struct sock *sk)
{
- atomic_inc(&sk->refcnt);
+ atomic_inc(&sk->sk_refcnt);
}
/* Ungrab socket in the context, which assumes that socket refcnt
@@ -634,13 +660,13 @@
*/
static inline void __sock_put(struct sock *sk)
{
- atomic_dec(&sk->refcnt);
+ atomic_dec(&sk->sk_refcnt);
}
/* Ungrab socket and destroy it, if it was the last reference. */
static inline void sock_put(struct sock *sk)
{
- if (atomic_dec_and_test(&sk->refcnt))
+ if (atomic_dec_and_test(&sk->sk_refcnt))
sk_free(sk);
}
@@ -653,29 +679,29 @@
*/
static inline void sock_orphan(struct sock *sk)
{
- write_lock_bh(&sk->callback_lock);
+ write_lock_bh(&sk->sk_callback_lock);
sock_set_flag(sk, SOCK_DEAD);
- sk->socket = NULL;
- sk->sleep = NULL;
- write_unlock_bh(&sk->callback_lock);
+ sk->sk_socket = NULL;
+ sk->sk_sleep = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
}
static inline void sock_graft(struct sock *sk, struct socket *parent)
{
- write_lock_bh(&sk->callback_lock);
- sk->sleep = &parent->wait;
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_sleep = &parent->wait;
parent->sk = sk;
- sk->socket = parent;
- write_unlock_bh(&sk->callback_lock);
+ sk->sk_socket = parent;
+ write_unlock_bh(&sk->sk_callback_lock);
}
static inline int sock_i_uid(struct sock *sk)
{
int uid;
- read_lock(&sk->callback_lock);
- uid = sk->socket ? SOCK_INODE(sk->socket)->i_uid : 0;
- read_unlock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
+ uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
+ read_unlock(&sk->sk_callback_lock);
return uid;
}
@@ -683,16 +709,16 @@
{
unsigned long ino;
- read_lock(&sk->callback_lock);
- ino = sk->socket ? SOCK_INODE(sk->socket)->i_ino : 0;
- read_unlock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
+ ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
+ read_unlock(&sk->sk_callback_lock);
return ino;
}
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
{
- return sk->dst_cache;
+ return sk->sk_dst_cache;
}
static inline struct dst_entry *
@@ -700,11 +726,11 @@
{
struct dst_entry *dst;
- read_lock(&sk->dst_lock);
- dst = sk->dst_cache;
+ read_lock(&sk->sk_dst_lock);
+ dst = sk->sk_dst_cache;
if (dst)
dst_hold(dst);
- read_unlock(&sk->dst_lock);
+ read_unlock(&sk->sk_dst_lock);
return dst;
}
@@ -713,17 +739,17 @@
{
struct dst_entry *old_dst;
- old_dst = sk->dst_cache;
- sk->dst_cache = dst;
+ old_dst = sk->sk_dst_cache;
+ sk->sk_dst_cache = dst;
dst_release(old_dst);
}
static inline void
sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
- write_lock(&sk->dst_lock);
+ write_lock(&sk->sk_dst_lock);
__sk_dst_set(sk, dst);
- write_unlock(&sk->dst_lock);
+ write_unlock(&sk->sk_dst_lock);
}
static inline void
@@ -731,26 +757,26 @@
{
struct dst_entry *old_dst;
- old_dst = sk->dst_cache;
- sk->dst_cache = NULL;
+ old_dst = sk->sk_dst_cache;
+ sk->sk_dst_cache = NULL;
dst_release(old_dst);
}
static inline void
sk_dst_reset(struct sock *sk)
{
- write_lock(&sk->dst_lock);
+ write_lock(&sk->sk_dst_lock);
__sk_dst_reset(sk);
- write_unlock(&sk->dst_lock);
+ write_unlock(&sk->sk_dst_lock);
}
static inline struct dst_entry *
__sk_dst_check(struct sock *sk, u32 cookie)
{
- struct dst_entry *dst = sk->dst_cache;
+ struct dst_entry *dst = sk->sk_dst_cache;
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
- sk->dst_cache = NULL;
+ sk->sk_dst_cache = NULL;
return NULL;
}
@@ -785,14 +811,14 @@
sock_hold(sk);
skb->sk = sk;
skb->destructor = sock_wfree;
- atomic_add(skb->truesize, &sk->wmem_alloc);
+ atomic_add(skb->truesize, &sk->sk_wmem_alloc);
}
static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
skb->sk = sk;
skb->destructor = sock_rfree;
- atomic_add(skb->truesize, &sk->rmem_alloc);
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
}
static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
@@ -802,7 +828,8 @@
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
number of warnings when compiling with -W --ANK
*/
- if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf) {
err = -ENOMEM;
goto out;
}
@@ -817,9 +844,9 @@
skb->dev = NULL;
skb_set_owner_r(skb, sk);
- skb_queue_tail(&sk->receive_queue, skb);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk,skb->len);
+ sk->sk_data_ready(sk, skb->len);
out:
return err;
}
@@ -829,12 +856,13 @@
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
number of warnings when compiling with -W --ANK
*/
- if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf)
return -ENOMEM;
skb_set_owner_r(skb, sk);
- skb_queue_tail(&sk->error_queue,skb);
+ skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk,skb->len);
+ sk->sk_data_ready(sk, skb->len);
return 0;
}
@@ -844,7 +872,7 @@
static inline int sock_error(struct sock *sk)
{
- int err=xchg(&sk->err,0);
+ int err = xchg(&sk->sk_err, 0);
return -err;
}
@@ -852,8 +880,8 @@
{
int amt = 0;
- if (!(sk->shutdown & SEND_SHUTDOWN)) {
- amt = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
+ amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amt < 0)
amt = 0;
}
@@ -862,8 +890,8 @@
static inline void sk_wake_async(struct sock *sk, int how, int band)
{
- if (sk->socket && sk->socket->fasync_list)
- sock_wake_async(sk->socket, how, band);
+ if (sk->sk_socket && sk->sk_socket->fasync_list)
+ sock_wake_async(sk->sk_socket, how, band);
}
#define SOCK_MIN_SNDBUF 2048
@@ -874,7 +902,7 @@
*/
static inline int sock_writeable(struct sock *sk)
{
- return atomic_read(&sk->wmem_alloc) < (sk->sndbuf / 2);
+ return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
}
static inline int gfp_any(void)
@@ -884,17 +912,17 @@
static inline long sock_rcvtimeo(struct sock *sk, int noblock)
{
- return noblock ? 0 : sk->rcvtimeo;
+ return noblock ? 0 : sk->sk_rcvtimeo;
}
static inline long sock_sndtimeo(struct sock *sk, int noblock)
{
- return noblock ? 0 : sk->sndtimeo;
+ return noblock ? 0 : sk->sk_sndtimeo;
}
static inline int sock_rcvlowat(struct sock *sk, int waitall, int len)
{
- return (waitall ? len : min_t(int, sk->rcvlowat, len)) ? : 1;
+ return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
}
/* Alas, with timeout socket operations are not restartable.
@@ -908,10 +936,10 @@
static __inline__ void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
- if (sk->rcvtstamp)
+ if (sk->sk_rcvtstamp)
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(skb->stamp), &skb->stamp);
else
- sk->stamp = skb->stamp;
+ sk->sk_stamp = skb->stamp;
}
/*
@@ -940,11 +968,11 @@
#define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \
DECLARE_WAITQUEUE(wait, tsk); \
tsk->state = TASK_INTERRUPTIBLE; \
- add_wait_queue((sk)->sleep, &wait); \
+ add_wait_queue((sk)->sk_sleep, &wait); \
release_sock(sk);
#define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \
- remove_wait_queue((sk)->sleep, &wait); \
+ remove_wait_queue((sk)->sk_sleep, &wait); \
lock_sock(sk); \
}
diff -urN linux-2.5.70-bk11/include/net/tcp.h linux-2.5.70-bk12/include/net/tcp.h
--- linux-2.5.70-bk11/include/net/tcp.h 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/include/net/tcp.h 2003-06-07 04:47:48.000000000 -0700
@@ -54,7 +54,7 @@
*
* 1) Sockets bound to different interfaces may share a local port.
* Failing that, goto test 2.
- * 2) If all sockets have sk->reuse set, and none of them are in
+ * 2) If all sockets have sk->sk_reuse set, and none of them are in
* TCP_LISTEN state, the port may be shared.
* Failing that, goto test 3.
* 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
@@ -65,12 +65,12 @@
* The interesting point, is test #2. This is what an FTP server does
* all day. To optimize this case we use a specific flag bit defined
* below. As we add sockets to a bind bucket list, we perform a
- * check of: (newsk->reuse && (newsk->state != TCP_LISTEN))
+ * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
* As long as all sockets added to a bind bucket pass this test,
* the flag bit will be set.
* The resulting situation is that tcp_v[46]_verify_bind() can just check
* for this flag bit, if it is set and the socket trying to bind has
- * sk->reuse set, we don't even have to walk the owners list at all,
+ * sk->sk_reuse set, we don't even have to walk the owners list at all,
* we return that it is ok to bind this socket to the requested local port.
*
* Sounds like a lot of work, but it is worth it. In a more naive
@@ -97,7 +97,7 @@
/* This is for sockets with full identity only. Sockets here will
* always be without wildcards and will have the following invariant:
*
- * TCP_ESTABLISHED <= sk->state < TCP_CLOSE
+ * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
*
* First half of the table is for sockets not in TIME_WAIT, second half
* is for TIME_WAIT sockets only.
@@ -165,46 +165,45 @@
* without violating the protocol specification.
*/
struct tcp_tw_bucket {
- /* These _must_ match the beginning of struct sock precisely.
- * XXX Yes I know this is gross, but I'd have to edit every single
- * XXX networking file if I created a "struct sock_header". -DaveM
+ /*
+ * Now struct sock also uses sock_common, so please just
+ * don't add nothing before this first member (__tw_common) --acme
*/
- unsigned short family;
- volatile unsigned char state; /* Connection state */
- unsigned char reuse; /* SO_REUSEADDR setting */
- int bound_dev_if;
- /* Main hash linkage for various protocol lookup tables. */
- struct sock *next;
- struct sock **pprev;
- struct sock *bind_next;
- struct sock **bind_pprev;
- atomic_t refcnt;
- /* End of struct sock/struct tcp_tw_bucket shared layout */
- volatile unsigned char substate;
- unsigned char rcv_wscale;
- __u16 sport;
+ struct sock_common __tw_common;
+#define tw_family __tw_common.skc_family
+#define tw_state __tw_common.skc_state
+#define tw_reuse __tw_common.skc_reuse
+#define tw_bound_dev_if __tw_common.skc_bound_dev_if
+#define tw_next __tw_common.skc_next
+#define tw_pprev __tw_common.skc_pprev
+#define tw_bind_next __tw_common.skc_bind_next
+#define tw_bind_pprev __tw_common.skc_bind_pprev
+#define tw_refcnt __tw_common.skc_refcnt
+ volatile unsigned char tw_substate;
+ unsigned char tw_rcv_wscale;
+ __u16 tw_sport;
/* Socket demultiplex comparisons on incoming packets. */
/* these five are in inet_opt */
- __u32 daddr;
- __u32 rcv_saddr;
- __u16 dport;
- __u16 num;
+ __u32 tw_daddr;
+ __u32 tw_rcv_saddr;
+ __u16 tw_dport;
+ __u16 tw_num;
/* And these are ours. */
- int hashent;
- int timeout;
- __u32 rcv_nxt;
- __u32 snd_nxt;
- __u32 rcv_wnd;
- __u32 ts_recent;
- long ts_recent_stamp;
- unsigned long ttd;
- struct tcp_bind_bucket *tb;
- struct tcp_tw_bucket *next_death;
- struct tcp_tw_bucket **pprev_death;
+ int tw_hashent;
+ int tw_timeout;
+ __u32 tw_rcv_nxt;
+ __u32 tw_snd_nxt;
+ __u32 tw_rcv_wnd;
+ __u32 tw_ts_recent;
+ long tw_ts_recent_stamp;
+ unsigned long tw_ttd;
+ struct tcp_bind_bucket *tw_tb;
+ struct tcp_tw_bucket *tw_next_death;
+ struct tcp_tw_bucket **tw_pprev_death;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- struct in6_addr v6_daddr;
- struct in6_addr v6_rcv_saddr;
+ struct in6_addr tw_v6_daddr;
+ struct in6_addr tw_v6_rcv_saddr;
#endif
};
@@ -214,7 +213,7 @@
static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
{
- if (atomic_dec_and_test(&tw->refcnt)) {
+ if (atomic_dec_and_test(&tw->tw_refcnt)) {
#ifdef INET_REFCNT_DEBUG
printk(KERN_DEBUG "tw_bucket %p released\n", tw);
#endif
@@ -249,31 +248,31 @@
#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
(((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \
((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
- (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
- (((*((__u64 *)&(tcptw_sk(__sk)->daddr)))== (__cookie)) && \
- ((*((__u32 *)&(tcptw_sk(__sk)->dport)))== (__ports)) && \
- (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
+ (((*((__u64 *)&(tcptw_sk(__sk)->tw_daddr))) == (__cookie)) && \
+ ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#else /* 32-bit arch */
#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
((inet_sk(__sk)->daddr == (__saddr)) && \
(inet_sk(__sk)->rcv_saddr == (__daddr)) && \
((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
- (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
- ((tcptw_sk(__sk)->daddr == (__saddr)) && \
- (tcptw_sk(__sk)->rcv_saddr == (__daddr)) && \
- ((*((__u32 *)&(tcptw_sk(__sk)->dport)))== (__ports)) && \
- (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
+ ((tcptw_sk(__sk)->tw_daddr == (__saddr)) && \
+ (tcptw_sk(__sk)->tw_rcv_saddr == (__daddr)) && \
+ ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#endif /* 64-bit arch */
#define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
(((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
- ((__sk)->family == AF_INET6) && \
+ ((__sk)->sk_family == AF_INET6) && \
!ipv6_addr_cmp(&inet6_sk(__sk)->daddr, (__saddr)) && \
!ipv6_addr_cmp(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
- (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
/* These can have wildcards, don't try too hard. */
static __inline__ int tcp_lhashfn(unsigned short num)
@@ -450,6 +449,11 @@
#define TCP_TIME_PROBE0 3 /* Zero window probe timer */
#define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */
+/* Flags in tp->nonagle */
+#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
+#define TCP_NAGLE_CORK 2 /* Socket is corked */
+#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
+
/* sysctl variables for tcp */
extern int sysctl_max_syn_backlog;
extern int sysctl_tcp_timestamps;
@@ -932,7 +936,8 @@
{
struct tcp_opt *tp = tcp_sk(sk);
struct dst_entry *dst = __sk_dst_get(sk);
- int mss_now = large && (sk->route_caps&NETIF_F_TSO) && !tp->urg_mode ?
+ int mss_now = large && (sk->sk_route_caps & NETIF_F_TSO) &&
+ !tp->urg_mode ?
tp->mss_cache : tp->mss_cache_std;
if (dst) {
@@ -983,7 +988,7 @@
{
if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
tp->rcv_wnd &&
- atomic_read(&sk->rmem_alloc) < sk->rcvbuf &&
+ atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
!tp->urg_data)
tcp_fast_path_on(tp);
}
@@ -1066,9 +1071,9 @@
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
#define for_retrans_queue(skb, sk, tp) \
- for (skb = (sk)->write_queue.next; \
+ for (skb = (sk)->sk_write_queue.next; \
(skb != (tp)->send_head) && \
- (skb != (struct sk_buff *)&(sk)->write_queue); \
+ (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
skb=skb->next)
@@ -1080,12 +1085,12 @@
*/
static inline int tcp_min_write_space(struct sock *sk)
{
- return sk->wmem_queued/2;
+ return sk->sk_wmem_queued / 2;
}
static inline int tcp_wspace(struct sock *sk)
{
- return sk->sndbuf - sk->wmem_queued;
+ return sk->sk_sndbuf - sk->sk_wmem_queued;
}
@@ -1216,7 +1221,7 @@
{
return (skb->len < mss_now &&
!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
- (nonagle == 2 ||
+ ((nonagle&TCP_NAGLE_CORK) ||
(!nonagle &&
tp->packets_out &&
tcp_minshall_check(tp))));
@@ -1252,7 +1257,7 @@
/* Don't be strict about the congestion window for the
* final FIN frame. -DaveM
*/
- return ((nonagle==1 || tp->urg_mode
+ return (((nonagle&TCP_NAGLE_PUSH) || tp->urg_mode
|| !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&
((tcp_packets_in_flight(tp) < tp->snd_cwnd) ||
(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
@@ -1267,7 +1272,7 @@
static __inline__ int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
{
- return (skb->next == (struct sk_buff*)&sk->write_queue);
+ return skb->next == (struct sk_buff *)&sk->sk_write_queue;
}
/* Push out any pending frames which were held back due to
@@ -1283,7 +1288,7 @@
if (skb) {
if (!tcp_skb_is_last(sk, skb))
- nonagle = 1;
+ nonagle = TCP_NAGLE_PUSH;
if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||
tcp_write_xmit(sk, nonagle))
tcp_check_probe_timer(sk, tp);
@@ -1303,7 +1308,7 @@
return (skb &&
tcp_snd_test(tp, skb, tcp_current_mss(sk, 1),
- tcp_skb_is_last(sk, skb) ? 1 : tp->nonagle));
+ tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle));
}
static __inline__ void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq)
@@ -1365,19 +1370,19 @@
if (!sysctl_tcp_low_latency && tp->ucopy.task) {
__skb_queue_tail(&tp->ucopy.prequeue, skb);
tp->ucopy.memory += skb->truesize;
- if (tp->ucopy.memory > sk->rcvbuf) {
+ if (tp->ucopy.memory > sk->sk_rcvbuf) {
struct sk_buff *skb1;
if (sock_owned_by_user(sk)) BUG();
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
- sk->backlog_rcv(sk, skb1);
+ sk->sk_backlog_rcv(sk, skb1);
NET_INC_STATS_BH(TCPPrequeueDropped);
}
tp->ucopy.memory = 0;
} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
- wake_up_interruptible(sk->sleep);
+ wake_up_interruptible(sk->sk_sleep);
if (!tcp_ack_scheduled(tp))
tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
}
@@ -1399,7 +1404,7 @@
static __inline__ void tcp_set_state(struct sock *sk, int state)
{
- int oldstate = sk->state;
+ int oldstate = sk->sk_state;
switch (state) {
case TCP_ESTABLISHED:
@@ -1411,8 +1416,8 @@
if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
TCP_INC_STATS(TcpEstabResets);
- sk->prot->unhash(sk);
- if (sk->prev && !(sk->userlocks&SOCK_BINDPORT_LOCK))
+ sk->sk_prot->unhash(sk);
+ if (sk->sk_prev && !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
tcp_put_port(sk);
/* fall through */
default:
@@ -1423,7 +1428,7 @@
/* Change state AFTER socket is unhashed to avoid closed
* socket sitting in hash tables.
*/
- sk->state = state;
+ sk->sk_state = state;
#ifdef STATE_TRACE
SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
@@ -1435,10 +1440,10 @@
tcp_set_state(sk, TCP_CLOSE);
tcp_clear_xmit_timers(sk);
- sk->shutdown = SHUTDOWN_MASK;
+ sk->sk_shutdown = SHUTDOWN_MASK;
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
else
tcp_destroy_sock(sk);
}
@@ -1588,27 +1593,28 @@
/* Note: caller must be prepared to deal with negative returns */
static inline int tcp_space(struct sock *sk)
{
- return tcp_win_from_space(sk->rcvbuf - atomic_read(&sk->rmem_alloc));
+ return tcp_win_from_space(sk->sk_rcvbuf -
+ atomic_read(&sk->sk_rmem_alloc));
}
static inline int tcp_full_space( struct sock *sk)
{
- return tcp_win_from_space(sk->rcvbuf);
+ return tcp_win_from_space(sk->sk_rcvbuf);
}
static inline void tcp_acceptq_removed(struct sock *sk)
{
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
}
static inline void tcp_acceptq_added(struct sock *sk)
{
- sk->ack_backlog++;
+ sk->sk_ack_backlog++;
}
static inline int tcp_acceptq_is_full(struct sock *sk)
{
- return sk->ack_backlog > sk->max_ack_backlog;
+ return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
@@ -1711,15 +1717,15 @@
static inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb)
{
tcp_sk(sk)->queue_shrunk = 1;
- sk->wmem_queued -= skb->truesize;
- sk->forward_alloc += skb->truesize;
+ sk->sk_wmem_queued -= skb->truesize;
+ sk->sk_forward_alloc += skb->truesize;
__kfree_skb(skb);
}
static inline void tcp_charge_skb(struct sock *sk, struct sk_buff *skb)
{
- sk->wmem_queued += skb->truesize;
- sk->forward_alloc -= skb->truesize;
+ sk->sk_wmem_queued += skb->truesize;
+ sk->sk_forward_alloc -= skb->truesize;
}
extern void __tcp_mem_reclaim(struct sock *sk);
@@ -1727,7 +1733,7 @@
static inline void tcp_mem_reclaim(struct sock *sk)
{
- if (sk->forward_alloc >= TCP_MEM_QUANTUM)
+ if (sk->sk_forward_alloc >= TCP_MEM_QUANTUM)
__tcp_mem_reclaim(sk);
}
@@ -1741,9 +1747,9 @@
static inline void tcp_moderate_sndbuf(struct sock *sk)
{
- if (!(sk->userlocks&SOCK_SNDBUF_LOCK)) {
- sk->sndbuf = min(sk->sndbuf, sk->wmem_queued/2);
- sk->sndbuf = max(sk->sndbuf, SOCK_MIN_SNDBUF);
+ if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
+ sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
+ sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
}
}
@@ -1753,7 +1759,7 @@
if (skb) {
skb->truesize += mem;
- if (sk->forward_alloc >= (int)skb->truesize ||
+ if (sk->sk_forward_alloc >= (int)skb->truesize ||
tcp_mem_schedule(sk, skb->truesize, 0)) {
skb_reserve(skb, MAX_TCP_HEADER);
return skb;
@@ -1773,9 +1779,9 @@
static inline struct page * tcp_alloc_page(struct sock *sk)
{
- if (sk->forward_alloc >= (int)PAGE_SIZE ||
+ if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||
tcp_mem_schedule(sk, PAGE_SIZE, 0)) {
- struct page *page = alloc_pages(sk->allocation, 0);
+ struct page *page = alloc_pages(sk->sk_allocation, 0);
if (page)
return page;
}
@@ -1788,7 +1794,7 @@
{
struct sk_buff *skb;
- while ((skb = __skb_dequeue(&sk->write_queue)) != NULL)
+ while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
tcp_free_skb(sk, skb);
tcp_mem_reclaim(sk);
}
@@ -1799,8 +1805,8 @@
{
skb->sk = sk;
skb->destructor = tcp_rfree;
- atomic_add(skb->truesize, &sk->rmem_alloc);
- sk->forward_alloc -= skb->truesize;
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ sk->sk_forward_alloc -= skb->truesize;
}
extern void tcp_listen_wlock(void);
@@ -1870,10 +1876,10 @@
static inline void tcp_v4_setup_caps(struct sock *sk, struct dst_entry *dst)
{
- sk->route_caps = dst->dev->features;
- if (sk->route_caps & NETIF_F_TSO) {
- if (sk->no_largesend || dst->header_len)
- sk->route_caps &= ~NETIF_F_TSO;
+ sk->sk_route_caps = dst->dev->features;
+ if (sk->sk_route_caps & NETIF_F_TSO) {
+ if (sk->sk_no_largesend || dst->header_len)
+ sk->sk_route_caps &= ~NETIF_F_TSO;
}
}
diff -urN linux-2.5.70-bk11/include/net/tcp_ecn.h linux-2.5.70-bk12/include/net/tcp_ecn.h
--- linux-2.5.70-bk11/include/net/tcp_ecn.h 2003-05-26 18:00:44.000000000 -0700
+++ linux-2.5.70-bk12/include/net/tcp_ecn.h 2003-06-07 04:47:48.000000000 -0700
@@ -31,10 +31,10 @@
TCP_ECN_send_syn(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
{
tp->ecn_flags = 0;
- if (sysctl_tcp_ecn && !(sk->route_caps&NETIF_F_TSO)) {
+ if (sysctl_tcp_ecn && !(sk->sk_route_caps & NETIF_F_TSO)) {
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
tp->ecn_flags = TCP_ECN_OK;
- sk->no_largesend = 1;
+ sk->sk_no_largesend = 1;
}
}
diff -urN linux-2.5.70-bk11/include/net/udp.h linux-2.5.70-bk12/include/net/udp.h
--- linux-2.5.70-bk11/include/net/udp.h 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/include/net/udp.h 2003-06-07 04:47:48.000000000 -0700
@@ -43,10 +43,9 @@
{
struct sock *sk = udp_hash[num & (UDP_HTABLE_SIZE - 1)];
- for(; sk != NULL; sk = sk->next) {
+ for (; sk; sk = sk->sk_next)
if (inet_sk(sk)->num == num)
return 1;
- }
return 0;
}
diff -urN linux-2.5.70-bk11/include/net/x25.h linux-2.5.70-bk12/include/net/x25.h
--- linux-2.5.70-bk11/include/net/x25.h 2003-05-26 18:00:41.000000000 -0700
+++ linux-2.5.70-bk12/include/net/x25.h 2003-06-07 04:47:48.000000000 -0700
@@ -149,7 +149,7 @@
unsigned long vc_facil_mask; /* inc_call facilities mask */
};
-#define x25_sk(__sk) ((struct x25_opt *)(__sk)->protinfo)
+#define x25_sk(__sk) ((struct x25_opt *)(__sk)->sk_protinfo)
/* af_x25.c */
extern int sysctl_x25_restart_request_timeout;
diff -urN linux-2.5.70-bk11/include/net/xfrm.h linux-2.5.70-bk12/include/net/xfrm.h
--- linux-2.5.70-bk11/include/net/xfrm.h 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/include/net/xfrm.h 2003-06-07 04:47:48.000000000 -0700
@@ -79,7 +79,7 @@
We add genid to each dst plus pointer to genid of raw IP route,
pmtu disc will update pmtu on raw IP route and increase its genid.
dst_check() will see this for top level and trigger resyncing
- metrics. Plus, it will be made via sk->dst_cache. Solved.
+ metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
*/
/* Full description of state of transformer. */
@@ -587,7 +587,7 @@
static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
{
- if (sk && sk->policy[XFRM_POLICY_IN])
+ if (sk && sk->sk_policy[XFRM_POLICY_IN])
return __xfrm_policy_check(sk, dir, skb, family);
return !xfrm_policy_list[dir] ||
@@ -629,7 +629,7 @@
static inline int xfrm_sk_clone_policy(struct sock *sk)
{
- if (unlikely(sk->policy[0] || sk->policy[1]))
+ if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
return __xfrm_sk_clone_policy(sk);
return 0;
}
@@ -638,13 +638,13 @@
static inline void xfrm_sk_free_policy(struct sock *sk)
{
- if (unlikely(sk->policy[0] != NULL)) {
- __xfrm_sk_free_policy(sk->policy[0], 0);
- sk->policy[0] = NULL;
- }
- if (unlikely(sk->policy[1] != NULL)) {
- __xfrm_sk_free_policy(sk->policy[1], 1);
- sk->policy[1] = NULL;
+ if (unlikely(sk->sk_policy[0] != NULL)) {
+ __xfrm_sk_free_policy(sk->sk_policy[0], 0);
+ sk->sk_policy[0] = NULL;
+ }
+ if (unlikely(sk->sk_policy[1] != NULL)) {
+ __xfrm_sk_free_policy(sk->sk_policy[1], 1);
+ sk->sk_policy[1] = NULL;
}
}
diff -urN linux-2.5.70-bk11/include/sound/ak4xxx-adda.h linux-2.5.70-bk12/include/sound/ak4xxx-adda.h
--- linux-2.5.70-bk11/include/sound/ak4xxx-adda.h 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/include/sound/ak4xxx-adda.h 2003-06-07 04:47:48.000000000 -0700
@@ -37,11 +37,13 @@
void (*set_rate_val)(akm4xxx_t *ak, unsigned int rate);
};
+#define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
+
struct snd_akm4xxx {
snd_card_t *card;
unsigned int num_adcs; /* AK4524 or AK4528 ADCs */
unsigned int num_dacs; /* AK4524 or AK4528 DACs */
- unsigned char images[AK4XXX_MAX_CHIPS][16]; /* saved register image */
+ unsigned char images[AK4XXX_IMAGE_SIZE]; /* saved register image */
unsigned char ipga_gain[AK4XXX_MAX_CHIPS][2]; /* saved register image for IPGA (AK4528) */
unsigned long private_value[AK4XXX_MAX_CHIPS]; /* helper for driver */
void *private_data[AK4XXX_MAX_CHIPS]; /* helper for driver */
@@ -58,4 +60,9 @@
void snd_akm4xxx_init(akm4xxx_t *ak);
int snd_akm4xxx_build_controls(akm4xxx_t *ak);
+#define snd_akm4xxx_get(ak,chip,reg) (ak)->images[(chip) * 16 + (reg)]
+#define snd_akm4xxx_set(ak,chip,reg,val) ((ak)->images[(chip) * 16 + (reg)] = (val))
+#define snd_akm4xxx_get_ipga(ak,chip,reg) (ak)->ipga_gain[chip][(reg)-4]
+#define snd_akm4xxx_set_ipga(ak,chip,reg,val) ((ak)->ipga_gain[chip][(reg)-4] = (val))
+
#endif /* __SOUND_AK4XXX_ADDA_H */
diff -urN linux-2.5.70-bk11/include/sound/core.h linux-2.5.70-bk12/include/sound/core.h
--- linux-2.5.70-bk11/include/sound/core.h 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/include/sound/core.h 2003-06-07 04:47:48.000000000 -0700
@@ -25,6 +25,7 @@
#include /* wake_up() */
#include /* struct semaphore */
#include /* struct rw_semaphore */
+#include /* struct workqueue_struct */
/* Typedef's */
typedef struct timespec snd_timestamp_t;
@@ -158,6 +159,7 @@
spinlock_t files_lock; /* lock the files for this card */
int shutdown; /* this card is going down */
wait_queue_head_t shutdown_sleep;
+ struct work_struct free_workq; /* for free in workqueue */
#ifdef CONFIG_PM
int (*set_power_state) (snd_card_t *card, unsigned int state);
diff -urN linux-2.5.70-bk11/include/sound/version.h linux-2.5.70-bk12/include/sound/version.h
--- linux-2.5.70-bk11/include/sound/version.h 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/include/sound/version.h 2003-06-07 04:47:48.000000000 -0700
@@ -1,3 +1,3 @@
/* include/version.h. Generated by configure. */
#define CONFIG_SND_VERSION "0.9.4"
-#define CONFIG_SND_DATE " (Sat May 31 13:37:06 2003 UTC)"
+#define CONFIG_SND_DATE " (Fri Jun 06 09:23:03 2003 UTC)"
diff -urN linux-2.5.70-bk11/init/main.c linux-2.5.70-bk12/init/main.c
--- linux-2.5.70-bk11/init/main.c 2003-05-26 18:00:25.000000000 -0700
+++ linux-2.5.70-bk12/init/main.c 2003-06-07 04:47:48.000000000 -0700
@@ -37,6 +37,7 @@
#include
#include
#include
+#include
#include
#include
@@ -318,14 +319,16 @@
/* Copy section for each CPU (we discard the original) */
size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
- if (!size)
- return;
+#ifdef CONFIG_MODULES
+ if (size < PERCPU_ENOUGH_ROOM)
+ size = PERCPU_ENOUGH_ROOM;
+#endif
ptr = alloc_bootmem(size * NR_CPUS);
for (i = 0; i < NR_CPUS; i++, ptr += size) {
__per_cpu_offset[i] = ptr - __per_cpu_start;
- memcpy(ptr, __per_cpu_start, size);
+ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
}
}
#endif /* !__GENERIC_PER_CPU */
@@ -388,6 +391,7 @@
lock_kernel();
printk(linux_banner);
setup_arch(&command_line);
+ setup_per_zone_pages_min();
setup_per_cpu_areas();
/*
diff -urN linux-2.5.70-bk11/kernel/cpu.c linux-2.5.70-bk12/kernel/cpu.c
--- linux-2.5.70-bk11/kernel/cpu.c 2003-05-26 18:00:56.000000000 -0700
+++ linux-2.5.70-bk12/kernel/cpu.c 2003-06-07 04:47:48.000000000 -0700
@@ -8,6 +8,7 @@
#include
#include
#include
+#include
#include
/* This protects CPUs going up and down... */
diff -urN linux-2.5.70-bk11/kernel/kallsyms.c linux-2.5.70-bk12/kernel/kallsyms.c
--- linux-2.5.70-bk11/kernel/kallsyms.c 2003-05-26 18:01:00.000000000 -0700
+++ linux-2.5.70-bk12/kernel/kallsyms.c 2003-06-07 04:47:48.000000000 -0700
@@ -8,6 +8,11 @@
*/
#include
#include
+#include
+#include
+#include
+#include
+#include
/* These will be re-linked against their real values during the second link stage */
extern unsigned long kallsyms_addresses[] __attribute__((weak));
@@ -117,5 +122,170 @@
}
}
+/* To avoid O(n^2) iteration, we carry prefix along. */
+struct kallsym_iter
+{
+ loff_t pos;
+ struct module *owner;
+ unsigned long value;
+ unsigned int nameoff; /* If iterating in core kernel symbols */
+ char type;
+ char name[128];
+};
+
+/* Only label it "global" if it is exported. */
+static void upcase_if_global(struct kallsym_iter *iter)
+{
+ if (is_exported(iter->name, iter->owner))
+ iter->type += 'A' - 'a';
+}
+
+static int get_ksymbol_mod(struct kallsym_iter *iter)
+{
+ iter->owner = module_get_kallsym(iter->pos - kallsyms_num_syms,
+ &iter->value,
+ &iter->type, iter->name);
+ if (iter->owner == NULL)
+ return 0;
+
+ upcase_if_global(iter);
+ return 1;
+}
+
+static void get_ksymbol_core(struct kallsym_iter *iter)
+{
+ unsigned stemlen;
+
+ /* First char of each symbol name indicates prefix length
+ shared with previous name (stem compresion). */
+ stemlen = kallsyms_names[iter->nameoff++];
+
+ strlcpy(iter->name+stemlen, kallsyms_names+iter->nameoff, 128-stemlen);
+ iter->nameoff += strlen(kallsyms_names + iter->nameoff) + 1;
+ iter->owner = NULL;
+ iter->value = kallsyms_addresses[iter->pos];
+ iter->type = 't';
+
+ upcase_if_global(iter);
+}
+
+static void reset_iter(struct kallsym_iter *iter)
+{
+ iter->name[0] = '\0';
+ iter->nameoff = 0;
+ iter->pos = 0;
+}
+
+/* Returns false if pos at or past end of file. */
+static int update_iter(struct kallsym_iter *iter, loff_t pos)
+{
+ /* Module symbols can be accessed randomly. */
+ if (pos >= kallsyms_num_syms) {
+ iter->pos = pos;
+ return get_ksymbol_mod(iter);
+ }
+
+ /* If we're past the desired position, reset to start. */
+ if (pos < iter->pos)
+ reset_iter(iter);
+
+ /* We need to iterate through the previous symbols. */
+ for (; iter->pos <= pos; iter->pos++)
+ get_ksymbol_core(iter);
+ return 1;
+}
+
+static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ (*pos)++;
+
+ if (!update_iter(m->private, *pos))
+ return NULL;
+ return p;
+}
+
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+ if (!update_iter(m->private, *pos))
+ return NULL;
+ return m->private;
+}
+
+static void s_stop(struct seq_file *m, void *p)
+{
+}
+
+static int s_show(struct seq_file *m, void *p)
+{
+ struct kallsym_iter *iter = m->private;
+
+ /* Some debugging symbols have no name. Ignore them. */
+ if (!iter->name[0])
+ return 0;
+
+ if (iter->owner)
+ seq_printf(m, "%0*lx %c %s\t[%s]\n",
+ (int)(2*sizeof(void*)),
+ iter->value, iter->type, iter->name,
+ module_name(iter->owner));
+ else
+ seq_printf(m, "%0*lx %c %s\n",
+ (int)(2*sizeof(void*)),
+ iter->value, iter->type, iter->name);
+ return 0;
+}
+
+struct seq_operations kallsyms_op = {
+ .start = s_start,
+ .next = s_next,
+ .stop = s_stop,
+ .show = s_show
+};
+
+static int kallsyms_open(struct inode *inode, struct file *file)
+{
+ /* We keep iterator in m->private, since normal case is to
+ * s_start from where we left off, so we avoid O(N^2). */
+ struct kallsym_iter *iter;
+ int ret;
+
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
+
+ ret = seq_open(file, &kallsyms_op);
+ if (ret == 0)
+ ((struct seq_file *)file->private_data)->private = iter;
+ else
+ kfree(iter);
+ return ret;
+}
+
+static int kallsyms_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = (struct seq_file *)file->private_data;
+ kfree(m->private);
+ return seq_release(inode, file);
+}
+
+static struct file_operations kallsyms_operations = {
+ .open = kallsyms_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = kallsyms_release,
+};
+
+int __init kallsyms_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ /* root-only: could chew up lots of cpu by read, seek back, read... */
+ entry = create_proc_entry("kallsyms", 0400, NULL);
+ if (entry)
+ entry->proc_fops = &kallsyms_operations;
+ return 0;
+}
+__initcall(kallsyms_init);
+
EXPORT_SYMBOL(kallsyms_lookup);
EXPORT_SYMBOL(__print_symbol);
diff -urN linux-2.5.70-bk11/kernel/ksyms.c linux-2.5.70-bk12/kernel/ksyms.c
--- linux-2.5.70-bk11/kernel/ksyms.c 2003-05-26 18:00:20.000000000 -0700
+++ linux-2.5.70-bk12/kernel/ksyms.c 2003-06-07 04:47:48.000000000 -0700
@@ -98,8 +98,8 @@
EXPORT_SYMBOL(kmalloc);
EXPORT_SYMBOL(kfree);
#ifdef CONFIG_SMP
-EXPORT_SYMBOL(kmalloc_percpu);
-EXPORT_SYMBOL(kfree_percpu);
+EXPORT_SYMBOL(__alloc_percpu);
+EXPORT_SYMBOL(free_percpu);
EXPORT_SYMBOL(percpu_counter_mod);
#endif
EXPORT_SYMBOL(vfree);
diff -urN linux-2.5.70-bk11/kernel/module.c linux-2.5.70-bk12/kernel/module.c
--- linux-2.5.70-bk11/kernel/module.c 2003-05-26 18:00:45.000000000 -0700
+++ linux-2.5.70-bk12/kernel/module.c 2003-06-07 04:47:48.000000000 -0700
@@ -205,6 +205,167 @@
return NULL;
}
+#ifdef CONFIG_SMP
+/* Number of blocks used and allocated. */
+static unsigned int pcpu_num_used, pcpu_num_allocated;
+/* Size of each block. -ve means used. */
+static int *pcpu_size;
+
+static int split_block(unsigned int i, unsigned short size)
+{
+ /* Reallocation required? */
+ if (pcpu_num_used + 1 > pcpu_num_allocated) {
+ int *new = kmalloc(sizeof(new[0]) * pcpu_num_allocated*2,
+ GFP_KERNEL);
+ if (!new)
+ return 0;
+
+ memcpy(new, pcpu_size, sizeof(new[0])*pcpu_num_allocated);
+ pcpu_num_allocated *= 2;
+ kfree(pcpu_size);
+ pcpu_size = new;
+ }
+
+ /* Insert a new subblock */
+ memmove(&pcpu_size[i+1], &pcpu_size[i],
+ sizeof(pcpu_size[0]) * (pcpu_num_used - i));
+ pcpu_num_used++;
+
+ pcpu_size[i+1] -= size;
+ pcpu_size[i] = size;
+ return 1;
+}
+
+static inline unsigned int block_size(int val)
+{
+ if (val < 0)
+ return -val;
+ return val;
+}
+
+/* Created by linker magic */
+extern char __per_cpu_start[], __per_cpu_end[];
+
+static void *percpu_modalloc(unsigned long size, unsigned long align)
+{
+ unsigned long extra;
+ unsigned int i;
+ void *ptr;
+
+ BUG_ON(align > SMP_CACHE_BYTES);
+
+ ptr = __per_cpu_start;
+ for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
+ /* Extra for alignment requirement. */
+ extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr;
+ BUG_ON(i == 0 && extra != 0);
+
+ if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size)
+ continue;
+
+ /* Transfer extra to previous block. */
+ if (pcpu_size[i-1] < 0)
+ pcpu_size[i-1] -= extra;
+ else
+ pcpu_size[i-1] += extra;
+ pcpu_size[i] -= extra;
+ ptr += extra;
+
+ /* Split block if warranted */
+ if (pcpu_size[i] - size > sizeof(unsigned long))
+ if (!split_block(i, size))
+ return NULL;
+
+ /* Mark allocated */
+ pcpu_size[i] = -pcpu_size[i];
+ return ptr;
+ }
+
+ printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n",
+ size);
+ return NULL;
+}
+
+static void percpu_modfree(void *freeme)
+{
+ unsigned int i;
+ void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
+
+ /* First entry is core kernel percpu data. */
+ for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
+ if (ptr == freeme) {
+ pcpu_size[i] = -pcpu_size[i];
+ goto free;
+ }
+ }
+ BUG();
+
+ free:
+ /* Merge with previous? */
+ if (pcpu_size[i-1] >= 0) {
+ pcpu_size[i-1] += pcpu_size[i];
+ pcpu_num_used--;
+ memmove(&pcpu_size[i], &pcpu_size[i+1],
+ (pcpu_num_used - i) * sizeof(pcpu_size[0]));
+ i--;
+ }
+ /* Merge with next? */
+ if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) {
+ pcpu_size[i] += pcpu_size[i+1];
+ pcpu_num_used--;
+ memmove(&pcpu_size[i+1], &pcpu_size[i+2],
+ (pcpu_num_used - (i+1)) * sizeof(pcpu_size[0]));
+ }
+}
+
+static unsigned int find_pcpusec(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+ const char *secstrings)
+{
+ return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
+}
+
+static int percpu_modinit(void)
+{
+ pcpu_num_used = 2;
+ pcpu_num_allocated = 2;
+ pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
+ GFP_KERNEL);
+ /* Static in-kernel percpu data (used). */
+ pcpu_size[0] = -ALIGN(__per_cpu_end-__per_cpu_start, SMP_CACHE_BYTES);
+ /* Free room. */
+ pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
+ if (pcpu_size[1] < 0) {
+ printk(KERN_ERR "No per-cpu room for modules.\n");
+ pcpu_num_used = 1;
+ }
+
+ return 0;
+}
+__initcall(percpu_modinit);
+#else /* ... !CONFIG_SMP */
+static inline void *percpu_modalloc(unsigned long size, unsigned long align)
+{
+ return NULL;
+}
+static inline void percpu_modfree(void *pcpuptr)
+{
+ BUG();
+}
+static inline unsigned int find_pcpusec(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+ const char *secstrings)
+{
+ return 0;
+}
+static inline void percpu_modcopy(void *pcpudst, const void *src,
+ unsigned long size)
+{
+ /* pcpusec should be 0, and size of that section should be 0. */
+ BUG_ON(size != 0);
+}
+#endif /* CONFIG_SMP */
+
#ifdef CONFIG_MODULE_UNLOAD
/* Init the unload section of the module. */
static void module_unload_init(struct module *mod)
@@ -913,6 +1074,8 @@
/* This may be NULL, but that's OK */
module_free(mod, mod->module_init);
kfree(mod->args);
+ if (mod->percpu)
+ percpu_modfree(mod->percpu);
/* Finally, free the core (containing the module structure) */
module_free(mod, mod->module_core);
@@ -939,10 +1102,11 @@
unsigned int symindex,
const char *strtab,
unsigned int versindex,
+ unsigned int pcpuindex,
struct module *mod)
{
Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
-
+ unsigned long secbase;
unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
int ret = 0;
@@ -979,10 +1143,12 @@
break;
default:
- sym[i].st_value
- = (unsigned long)
- (sechdrs[sym[i].st_shndx].sh_addr
- + sym[i].st_value);
+ /* Divert to percpu allocation if a percpu var. */
+ if (sym[i].st_shndx == pcpuindex)
+ secbase = (unsigned long)mod->percpu;
+ else
+ secbase = sechdrs[sym[i].st_shndx].sh_addr;
+ sym[i].st_value += secbase;
break;
}
}
@@ -1108,6 +1274,83 @@
return NULL;
}
+#ifdef CONFIG_KALLSYMS
+int is_exported(const char *name, const struct module *mod)
+{
+ unsigned int i;
+
+ if (!mod) {
+ for (i = 0; __start___ksymtab+i < __stop___ksymtab; i++)
+ if (strcmp(__start___ksymtab[i].name, name) == 0)
+ return 1;
+ return 0;
+ }
+ for (i = 0; i < mod->num_syms; i++)
+ if (strcmp(mod->syms[i].name, name) == 0)
+ return 1;
+ return 0;
+}
+
+/* As per nm */
+static char elf_type(const Elf_Sym *sym,
+ Elf_Shdr *sechdrs,
+ const char *secstrings,
+ struct module *mod)
+{
+ if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
+ if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
+ return 'v';
+ else
+ return 'w';
+ }
+ if (sym->st_shndx == SHN_UNDEF)
+ return 'U';
+ if (sym->st_shndx == SHN_ABS)
+ return 'a';
+ if (sym->st_shndx >= SHN_LORESERVE)
+ return '?';
+ if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
+ return 't';
+ if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
+ && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
+ if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
+ return 'r';
+ else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
+ return 'g';
+ else
+ return 'd';
+ }
+ if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
+ if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
+ return 's';
+ else
+ return 'b';
+ }
+ if (strncmp(secstrings + sechdrs[sym->st_shndx].sh_name,
+ ".debug", strlen(".debug")) == 0)
+ return 'n';
+ return '?';
+}
+
+static void add_kallsyms(struct module *mod,
+ Elf_Shdr *sechdrs,
+ unsigned int symindex,
+ unsigned int strindex,
+ const char *secstrings)
+{
+ unsigned int i;
+
+ mod->symtab = (void *)sechdrs[symindex].sh_addr;
+ mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
+ mod->strtab = (void *)sechdrs[strindex].sh_addr;
+
+ /* Set types up while we still have access to sections. */
+ for (i = 0; i < mod->num_symtab; i++)
+ mod->symtab[i].st_info
+ = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
+}
+#endif
+
/* Allocate and load the module: note that size of section 0 is always
zero, and we rely on this for optional sections. */
static struct module *load_module(void __user *umod,
@@ -1119,7 +1362,7 @@
char *secstrings, *args, *modmagic, *strtab = NULL;
unsigned int i, symindex = 0, strindex = 0, setupindex, exindex,
exportindex, modindex, obsparmindex, infoindex, gplindex,
- crcindex, gplcrcindex, versindex;
+ crcindex, gplcrcindex, versindex, pcpuindex;
long arglen;
struct module *mod;
long err = 0;
@@ -1194,6 +1437,7 @@
obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm");
versindex = find_sec(hdr, sechdrs, secstrings, "__versions");
infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo");
+ pcpuindex = find_pcpusec(hdr, sechdrs, secstrings);
/* Don't keep modinfo section */
sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
@@ -1250,6 +1494,17 @@
if (err < 0)
goto free_mod;
+ if (pcpuindex) {
+ /* We have a special allocation for this section. */
+ mod->percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
+ sechdrs[pcpuindex].sh_addralign);
+ if (!mod->percpu) {
+ err = -ENOMEM;
+ goto free_mod;
+ }
+ sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
+ }
+
/* Determine total sizes, and put offsets in sh_entsize. For now
this is done generically; there doesn't appear to be any
special cases for the architectures. */
@@ -1259,7 +1514,7 @@
ptr = module_alloc(mod->core_size);
if (!ptr) {
err = -ENOMEM;
- goto free_mod;
+ goto free_percpu;
}
memset(ptr, 0, mod->core_size);
mod->module_core = ptr;
@@ -1303,7 +1558,8 @@
set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
/* Fix up syms, so that st_value is a pointer to location. */
- err = simplify_symbols(sechdrs, symindex, strtab, versindex, mod);
+ err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
+ mod);
if (err < 0)
goto cleanup;
@@ -1342,15 +1598,16 @@
goto cleanup;
}
-#ifdef CONFIG_KALLSYMS
- mod->symtab = (void *)sechdrs[symindex].sh_addr;
- mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
- mod->strtab = (void *)sechdrs[strindex].sh_addr;
-#endif
+ /* Finally, copy percpu area over. */
+ percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr,
+ sechdrs[pcpuindex].sh_size);
+
err = module_finalize(hdr, sechdrs, mod);
if (err < 0)
goto cleanup;
+ add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);
+
mod->args = args;
if (obsparmindex) {
err = obsolete_params(mod->name, mod->args,
@@ -1383,6 +1640,9 @@
module_free(mod, mod->module_init);
free_core:
module_free(mod, mod->module_core);
+ free_percpu:
+ if (mod->percpu)
+ percpu_modfree(mod->percpu);
free_mod:
kfree(args);
free_hdr:
@@ -1529,6 +1789,30 @@
}
return NULL;
}
+
+struct module *module_get_kallsym(unsigned int symnum,
+ unsigned long *value,
+ char *type,
+ char namebuf[128])
+{
+ struct module *mod;
+
+ down(&module_mutex);
+ list_for_each_entry(mod, &modules, list) {
+ if (symnum < mod->num_symtab) {
+ *value = mod->symtab[symnum].st_value;
+ *type = mod->symtab[symnum].st_info;
+ strncpy(namebuf,
+ mod->strtab + mod->symtab[symnum].st_name,
+ 127);
+ up(&module_mutex);
+ return mod;
+ }
+ symnum -= mod->num_symtab;
+ }
+ up(&module_mutex);
+ return NULL;
+}
#endif /* CONFIG_KALLSYMS */
/* Called by the /proc file system to return a list of modules. */
diff -urN linux-2.5.70-bk11/kernel/rcupdate.c linux-2.5.70-bk12/kernel/rcupdate.c
--- linux-2.5.70-bk11/kernel/rcupdate.c 2003-05-26 18:00:20.000000000 -0700
+++ linux-2.5.70-bk12/kernel/rcupdate.c 2003-06-07 04:47:48.000000000 -0700
@@ -43,6 +43,7 @@
#include
#include
#include
+#include
/* Definition for rcupdate control block. */
struct rcu_ctrlblk rcu_ctrlblk =
diff -urN linux-2.5.70-bk11/kernel/sched.c linux-2.5.70-bk12/kernel/sched.c
--- linux-2.5.70-bk11/kernel/sched.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/kernel/sched.c 2003-06-07 04:47:48.000000000 -0700
@@ -32,6 +32,7 @@
#include
#include
#include
+#include
#ifdef CONFIG_NUMA
#define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu))
@@ -779,7 +780,7 @@
return best_cpu;
minload = 10000000;
- for (i = 0; i < numnodes; i++) {
+ for_each_node_with_cpus(i) {
load = atomic_read(&node_nr_running[i]);
if (load < minload) {
minload = load;
diff -urN linux-2.5.70-bk11/kernel/softirq.c linux-2.5.70-bk12/kernel/softirq.c
--- linux-2.5.70-bk11/kernel/softirq.c 2003-05-26 18:00:26.000000000 -0700
+++ linux-2.5.70-bk12/kernel/softirq.c 2003-06-07 04:47:48.000000000 -0700
@@ -14,6 +14,7 @@
#include
#include
#include
+#include
/*
- No shared variables, all the data are CPU local.
diff -urN linux-2.5.70-bk11/kernel/sysctl.c linux-2.5.70-bk12/kernel/sysctl.c
--- linux-2.5.70-bk11/kernel/sysctl.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/kernel/sysctl.c 2003-06-07 04:47:48.000000000 -0700
@@ -57,6 +57,7 @@
extern int cad_pid;
extern int pid_max;
extern int sysctl_lower_zone_protection;
+extern int min_free_kbytes;
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
static int maxolduid = 65535;
@@ -661,6 +662,16 @@
.strategy = &sysctl_intvec,
.extra1 = &zero,
},
+ {
+ .ctl_name = VM_MIN_FREE_KBYTES,
+ .procname = "min_free_kbytes",
+ .data = &min_free_kbytes,
+ .maxlen = sizeof(min_free_kbytes),
+ .mode = 0644,
+ .proc_handler = &min_free_kbytes_sysctl_handler,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ },
{ .ctl_name = 0 }
};
diff -urN linux-2.5.70-bk11/kernel/timer.c linux-2.5.70-bk12/kernel/timer.c
--- linux-2.5.70-bk11/kernel/timer.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/kernel/timer.c 2003-06-07 04:47:48.000000000 -0700
@@ -29,6 +29,7 @@
#include
#include
#include
+#include
#include
#include
@@ -1232,8 +1233,8 @@
#endif
struct time_interpolator *time_interpolator;
-struct time_interpolator *time_interpolator_list;
-spinlock_t time_interpolator_lock = SPIN_LOCK_UNLOCKED;
+static struct time_interpolator *time_interpolator_list;
+static spinlock_t time_interpolator_lock = SPIN_LOCK_UNLOCKED;
static inline int
is_better_time_interpolator(struct time_interpolator *new)
diff -urN linux-2.5.70-bk11/mm/page-writeback.c linux-2.5.70-bk12/mm/page-writeback.c
--- linux-2.5.70-bk11/mm/page-writeback.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/mm/page-writeback.c 2003-06-07 04:47:48.000000000 -0700
@@ -27,6 +27,7 @@
#include
#include
#include
+#include
/*
* The maximum number of pages to writeout in a single bdflush/kupdate
@@ -220,7 +221,6 @@
}
put_cpu();
}
-EXPORT_SYMBOL_GPL(balance_dirty_pages_ratelimited);
/*
* writeback at least _min_pages, and keep writing until the amount of dirty
diff -urN linux-2.5.70-bk11/mm/page_alloc.c linux-2.5.70-bk12/mm/page_alloc.c
--- linux-2.5.70-bk11/mm/page_alloc.c 2003-05-26 18:00:22.000000000 -0700
+++ linux-2.5.70-bk12/mm/page_alloc.c 2003-06-07 04:47:48.000000000 -0700
@@ -28,8 +28,9 @@
#include
#include
#include
-
-#include
+#include
+#include
+#include
DECLARE_BITMAP(node_online_map, MAX_NUMNODES);
DECLARE_BITMAP(memblk_online_map, MAX_NR_MEMBLKS);
@@ -48,9 +49,7 @@
EXPORT_SYMBOL(zone_table);
static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
-static int zone_balance_ratio[MAX_NR_ZONES] __initdata = { 128, 128, 128, };
-static int zone_balance_min[MAX_NR_ZONES] __initdata = { 20 , 20, 20, };
-static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, };
+int min_free_kbytes = 1024;
/*
* Temporary debugging check for pages not lying within a given zone.
@@ -1206,7 +1205,6 @@
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
- unsigned long mask;
unsigned long size, realsize;
unsigned long batch;
@@ -1280,15 +1278,6 @@
pgdat->nr_zones = j+1;
- mask = (realsize / zone_balance_ratio[j]);
- if (mask < zone_balance_min[j])
- mask = zone_balance_min[j];
- else if (mask > zone_balance_max[j])
- mask = zone_balance_max[j];
- zone->pages_min = mask;
- zone->pages_low = mask*2;
- zone->pages_high = mask*3;
-
zone->zone_mem_map = lmem_map;
zone->zone_start_pfn = zone_start_pfn;
@@ -1373,19 +1362,6 @@
}
#endif
-static int __init setup_mem_frac(char *str)
-{
- int j = 0;
-
- while (get_option(&str, &zone_balance_ratio[j++]) == 2);
- printk("setup_mem_frac: ");
- for (j = 0; j < MAX_NR_ZONES; j++) printk("%d ", zone_balance_ratio[j]);
- printk("\n");
- return 1;
-}
-
-__setup("memfrac=", setup_mem_frac);
-
#ifdef CONFIG_PROC_FS
#include
@@ -1562,3 +1538,64 @@
init_page_alloc_cpu(smp_processor_id());
register_cpu_notifier(&page_alloc_nb);
}
+
+/*
+ * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures
+ * that the pages_{min,low,high} values for each zone are set correctly
+ * with respect to min_free_kbytes.
+ */
+void setup_per_zone_pages_min(void)
+{
+ unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
+ unsigned long lowmem_pages = 0;
+ struct zone *zone;
+ unsigned long flags;
+
+ /* Calculate total number of !ZONE_HIGHMEM pages */
+ for_each_zone(zone)
+ if (!is_highmem(zone))
+ lowmem_pages += zone->present_pages;
+
+ for_each_zone(zone) {
+ spin_lock_irqsave(&zone->lru_lock, flags);
+ if (is_highmem(zone)) {
+ /*
+ * Often, highmem doesn't need to reserve any pages.
+ * But the pages_min/low/high values are also used for
+ * batching up page reclaim activity so we need a
+ * decent value here.
+ */
+ int min_pages;
+
+ min_pages = zone->present_pages / 1024;
+ if (min_pages < SWAP_CLUSTER_MAX)
+ min_pages = SWAP_CLUSTER_MAX;
+ if (min_pages > 128)
+ min_pages = 128;
+ zone->pages_min = min_pages;
+ } else {
+ /* if it's a lowmem zone, reserve a number of pages
+ * proportionate to the zone's size.
+ */
+ zone->pages_min = (pages_min * zone->present_pages) /
+ lowmem_pages;
+ }
+
+ zone->pages_low = zone->pages_min * 2;
+ zone->pages_high = zone->pages_min * 3;
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ }
+}
+
+/*
+ * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
+ * that we can call setup_per_zone_pages_min() whenever min_free_kbytes
+ * changes.
+ */
+int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
+ struct file *file, void *buffer, size_t *length)
+{
+ proc_dointvec(table, write, file, buffer, length);
+ setup_per_zone_pages_min();
+ return 0;
+}
diff -urN linux-2.5.70-bk11/mm/slab.c linux-2.5.70-bk12/mm/slab.c
--- linux-2.5.70-bk11/mm/slab.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/mm/slab.c 2003-06-07 04:47:48.000000000 -0700
@@ -25,6 +25,10 @@
* page long) and always contiguous), and each slab contains multiple
* initialized objects.
*
+ * This means, that your constructor is used only for newly allocated
+ * slabs and you must pass objects with the same intializations to
+ * kmem_cache_free.
+ *
* Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
* normal). If you need a special memory type, then must create a new
* cache for that memory type.
@@ -84,6 +88,7 @@
#include
#include
#include
+#include
#include
/*
@@ -782,7 +787,7 @@
*(unsigned char *)(addr+size-1) = POISON_END;
}
-static void *fprob(unsigned char* addr, unsigned int size)
+static void *scan_poisoned_obj(unsigned char* addr, unsigned int size)
{
unsigned char *end;
@@ -808,7 +813,7 @@
if (cachep->flags & SLAB_STORE_USER) {
size -= BYTES_PER_WORD;
}
- end = fprob(addr, size);
+ end = scan_poisoned_obj(addr, size);
if (end) {
int s;
printk(KERN_ERR "Slab corruption: start=%p, expend=%p, "
@@ -1984,26 +1989,18 @@
#ifdef CONFIG_SMP
/**
- * kmalloc_percpu - allocate one copy of the object for every present
- * cpu in the system.
+ * __alloc_percpu - allocate one copy of the object for every present
+ * cpu in the system, zeroing them.
* Objects should be dereferenced using per_cpu_ptr/get_cpu_ptr
* macros only.
*
* @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- * The @flags argument may be one of:
- *
- * %GFP_USER - Allocate memory on behalf of user. May sleep.
- *
- * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
- *
- * %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers.
+ * @align: the alignment, which can't be greater than SMP_CACHE_BYTES.
*/
-void *
-kmalloc_percpu(size_t size, int flags)
+void *__alloc_percpu(size_t size, size_t align)
{
int i;
- struct percpu_data *pdata = kmalloc(sizeof (*pdata), flags);
+ struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
@@ -2011,9 +2008,10 @@
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
- pdata->ptrs[i] = kmalloc(size, flags);
+ pdata->ptrs[i] = kmalloc(size, GFP_KERNEL);
if (!pdata->ptrs[i])
goto unwind_oom;
+ memset(pdata->ptrs[i], 0, size);
}
/* Catch derefs w/o wrappers */
@@ -2070,14 +2068,14 @@
#ifdef CONFIG_SMP
/**
- * kfree_percpu - free previously allocated percpu memory
- * @objp: pointer returned by kmalloc_percpu.
+ * free_percpu - free previously allocated percpu memory
+ * @objp: pointer returned by alloc_percpu.
*
- * Don't free memory not originally allocated by kmalloc_percpu()
+ * Don't free memory not originally allocated by alloc_percpu()
* The complemented objp is to check for that.
*/
void
-kfree_percpu(const void *objp)
+free_percpu(const void *objp)
{
int i;
struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp);
diff -urN linux-2.5.70-bk11/mm/vmscan.c linux-2.5.70-bk12/mm/vmscan.c
--- linux-2.5.70-bk11/mm/vmscan.c 2003-05-26 18:00:24.000000000 -0700
+++ linux-2.5.70-bk12/mm/vmscan.c 2003-06-07 04:47:48.000000000 -0700
@@ -28,10 +28,10 @@
#include
#include
#include
+#include
#include
#include
-#include
#include
#include
diff -urN linux-2.5.70-bk11/net/appletalk/aarp.c linux-2.5.70-bk12/net/appletalk/aarp.c
--- linux-2.5.70-bk11/net/appletalk/aarp.c 2003-05-26 18:00:59.000000000 -0700
+++ linux-2.5.70-bk12/net/appletalk/aarp.c 2003-06-07 04:47:48.000000000 -0700
@@ -630,7 +630,7 @@
sendit:
if (skb->sk)
- skb->priority = skb->sk->priority;
+ skb->priority = skb->sk->sk_priority;
dev_queue_xmit(skb);
sent:
return 1;
diff -urN linux-2.5.70-bk11/net/appletalk/atalk_proc.c linux-2.5.70-bk12/net/appletalk/atalk_proc.c
--- linux-2.5.70-bk11/net/appletalk/atalk_proc.c 2003-05-26 18:00:56.000000000 -0700
+++ linux-2.5.70-bk12/net/appletalk/atalk_proc.c 2003-06-07 04:47:48.000000000 -0700
@@ -144,7 +144,7 @@
{
struct sock *s;
- for (s = atalk_sockets; pos && s; s = s->next)
+ for (s = atalk_sockets; pos && s; s = s->sk_next)
--pos;
return s;
@@ -170,7 +170,7 @@
goto out;
}
i = v;
- i = i->next;
+ i = i->sk_next;
out:
return i;
}
@@ -196,10 +196,11 @@
seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X "
"%02X %d\n",
- s->type, ntohs(at->src_net), at->src_node, at->src_port,
+ s->sk_type, ntohs(at->src_net), at->src_node, at->src_port,
ntohs(at->dest_net), at->dest_node, at->dest_port,
- atomic_read(&s->wmem_alloc), atomic_read(&s->rmem_alloc),
- s->state, SOCK_INODE(s->socket)->i_uid);
+ atomic_read(&s->sk_wmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
+ s->sk_state, SOCK_INODE(s->sk_socket)->i_uid);
out:
return 0;
}
diff -urN linux-2.5.70-bk11/net/appletalk/ddp.c linux-2.5.70-bk12/net/appletalk/ddp.c
--- linux-2.5.70-bk11/net/appletalk/ddp.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/appletalk/ddp.c 2003-06-07 04:47:48.000000000 -0700
@@ -92,11 +92,11 @@
static inline void atalk_insert_socket(struct sock *sk)
{
write_lock_bh(&atalk_sockets_lock);
- sk->next = atalk_sockets;
- if (sk->next)
- atalk_sockets->pprev = &sk->next;
+ sk->sk_next = atalk_sockets;
+ if (sk->sk_next)
+ atalk_sockets->sk_pprev = &sk->sk_next;
atalk_sockets = sk;
- sk->pprev = &atalk_sockets;
+ sk->sk_pprev = &atalk_sockets;
write_unlock_bh(&atalk_sockets_lock);
}
#endif
@@ -104,11 +104,11 @@
static inline void atalk_remove_socket(struct sock *sk)
{
write_lock_bh(&atalk_sockets_lock);
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
}
write_unlock_bh(&atalk_sockets_lock);
}
@@ -119,7 +119,7 @@
struct sock *s;
read_lock_bh(&atalk_sockets_lock);
- for (s = atalk_sockets; s; s = s->next) {
+ for (s = atalk_sockets; s; s = s->sk_next) {
struct atalk_sock *at = at_sk(s);
if (to->sat_port != at->src_port)
@@ -165,7 +165,7 @@
struct sock *s;
write_lock_bh(&atalk_sockets_lock);
- for (s = atalk_sockets; s; s = s->next) {
+ for (s = atalk_sockets; s; s = s->sk_next) {
struct atalk_sock *at = at_sk(s);
if (at->src_net == sat->sat_addr.s_net &&
@@ -176,11 +176,11 @@
if (!s) {
/* Wheee, it's free, assign and insert. */
- sk->next = atalk_sockets;
- if (sk->next)
- atalk_sockets->pprev = &sk->next;
+ sk->sk_next = atalk_sockets;
+ if (sk->sk_next)
+ atalk_sockets->sk_pprev = &sk->sk_next;
atalk_sockets = sk;
- sk->pprev = &atalk_sockets;
+ sk->sk_pprev = &atalk_sockets;
}
write_unlock_bh(&atalk_sockets_lock);
@@ -191,29 +191,29 @@
{
struct sock *sk = (struct sock *)data;
- if (!atomic_read(&sk->wmem_alloc) &&
- !atomic_read(&sk->rmem_alloc) && sock_flag(sk, SOCK_DEAD))
+ if (!atomic_read(&sk->sk_wmem_alloc) &&
+ !atomic_read(&sk->sk_rmem_alloc) && sock_flag(sk, SOCK_DEAD))
sock_put(sk);
else {
- sk->timer.expires = jiffies + SOCK_DESTROY_TIME;
- add_timer(&sk->timer);
+ sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
+ add_timer(&sk->sk_timer);
}
}
static inline void atalk_destroy_socket(struct sock *sk)
{
atalk_remove_socket(sk);
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
- if (!atomic_read(&sk->wmem_alloc) &&
- !atomic_read(&sk->rmem_alloc) && sock_flag(sk, SOCK_DEAD))
+ if (!atomic_read(&sk->sk_wmem_alloc) &&
+ !atomic_read(&sk->sk_rmem_alloc) && sock_flag(sk, SOCK_DEAD))
sock_put(sk);
else {
- init_timer(&sk->timer);
- sk->timer.expires = jiffies + SOCK_DESTROY_TIME;
- sk->timer.function = atalk_destroy_timer;
- sk->timer.data = (unsigned long) sk;
- add_timer(&sk->timer);
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
+ sk->sk_timer.function = atalk_destroy_timer;
+ sk->sk_timer.data = (unsigned long)sk;
+ add_timer(&sk->sk_timer);
}
}
@@ -992,7 +992,7 @@
sock->ops = &atalk_dgram_ops;
sock_init_data(sock, sk);
/* Checksums on by default */
- sk->zapped = 1;
+ sk->sk_zapped = 1;
out:
return rc;
outsk:
@@ -1007,7 +1007,7 @@
if (sk) {
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
sock->sk = NULL;
@@ -1036,7 +1036,7 @@
for (sat->sat_port = ATPORT_RESERVED;
sat->sat_port < ATPORT_LAST;
sat->sat_port++) {
- for (s = atalk_sockets; s; s = s->next) {
+ for (s = atalk_sockets; s; s = s->sk_next) {
struct atalk_sock *at = at_sk(s);
if (at->src_net == sat->sat_addr.s_net &&
@@ -1046,11 +1046,11 @@
}
/* Wheee, it's free, assign and insert. */
- sk->next = atalk_sockets;
- if (sk->next)
- atalk_sockets->pprev = &sk->next;
+ sk->sk_next = atalk_sockets;
+ if (sk->sk_next)
+ atalk_sockets->sk_pprev = &sk->sk_next;
atalk_sockets = sk;
- sk->pprev = &atalk_sockets;
+ sk->sk_pprev = &atalk_sockets;
at_sk(sk)->src_port = sat->sat_port;
retval = 0;
goto out;
@@ -1079,7 +1079,7 @@
n = atalk_pick_and_bind_port(sk, &sat);
if (!n)
- sk->zapped = 0;
+ sk->sk_zapped = 0;
out:
return n;
}
@@ -1091,7 +1091,7 @@
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
- if (!sk->zapped || addr_len != sizeof(struct sockaddr_at))
+ if (!sk->sk_zapped || addr_len != sizeof(struct sockaddr_at))
return -EINVAL;
if (addr->sat_family != AF_APPLETALK)
@@ -1126,7 +1126,7 @@
return -EADDRINUSE;
}
- sk->zapped = 0;
+ sk->sk_zapped = 0;
return 0;
}
@@ -1138,7 +1138,7 @@
struct atalk_sock *at = at_sk(sk);
struct sockaddr_at *addr;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(*addr))
@@ -1161,7 +1161,7 @@
#endif
}
- if (sk->zapped)
+ if (sk->sk_zapped)
if (atalk_autobind(sk) < 0)
return -EBUSY;
@@ -1172,8 +1172,8 @@
at->dest_net = addr->sat_addr.s_net;
at->dest_node = addr->sat_addr.s_node;
- sock->state = SS_CONNECTED;
- sk->state = TCP_ESTABLISHED;
+ sock->state = SS_CONNECTED;
+ sk->sk_state = TCP_ESTABLISHED;
return 0;
}
@@ -1188,14 +1188,14 @@
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
- if (sk->zapped)
+ if (sk->sk_zapped)
if (atalk_autobind(sk) < 0)
return -ENOBUFS;
*uaddr_len = sizeof(struct sockaddr_at);
if (peer) {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
sat.sat_addr.s_net = at->dest_net;
@@ -1506,7 +1506,7 @@
return -EMSGSIZE;
if (usat) {
- if (sk->zapped)
+ if (sk->sk_zapped)
if (atalk_autobind(sk) < 0)
return -EBUSY;
@@ -1524,7 +1524,7 @@
#endif
}
} else {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
usat = &local_satalk;
usat->sat_family = AF_APPLETALK;
@@ -1599,7 +1599,7 @@
return -EFAULT;
}
- if (sk->no_check == 1)
+ if (sk->sk_no_check == 1)
ddp->deh_sum = 0;
else
ddp->deh_sum = atalk_checksum(ddp, len + sizeof(*ddp));
@@ -1661,7 +1661,7 @@
ddp = ddp_hdr(skb);
*((__u16 *)&ddphv) = ntohs(*((__u16 *)ddp));
- if (sk->type == SOCK_RAW) {
+ if (sk->sk_type == SOCK_RAW) {
copied = ddphv.deh_len;
if (copied > size) {
copied = size;
@@ -1705,7 +1705,8 @@
switch (cmd) {
/* Protocol layer */
case TIOCOUTQ: {
- long amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ long amount = sk->sk_sndbuf -
+ atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
@@ -1717,7 +1718,7 @@
* These two are safe on a single CPU system as only
* user tasks fiddle here
*/
- struct sk_buff *skb = skb_peek(&sk->receive_queue);
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
long amount = 0;
if (skb)
@@ -1729,9 +1730,9 @@
if (!sk)
break;
rc = -ENOENT;
- if (!sk->stamp.tv_sec)
+ if (!sk->sk_stamp.tv_sec)
break;
- rc = copy_to_user((void *)arg, &sk->stamp,
+ rc = copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval)) ? -EFAULT : 0;
break;
/* Routing */
diff -urN linux-2.5.70-bk11/net/atm/atm_misc.c linux-2.5.70-bk12/net/atm/atm_misc.c
--- linux-2.5.70-bk11/net/atm/atm_misc.c 2003-05-26 18:00:25.000000000 -0700
+++ linux-2.5.70-bk12/net/atm/atm_misc.c 2003-06-07 04:47:48.000000000 -0700
@@ -16,7 +16,8 @@
int atm_charge(struct atm_vcc *vcc,int truesize)
{
atm_force_charge(vcc,truesize);
- if (atomic_read(&vcc->sk->rmem_alloc) <= vcc->sk->rcvbuf) return 1;
+ if (atomic_read(&vcc->sk->sk_rmem_alloc) <= vcc->sk->sk_rcvbuf)
+ return 1;
atm_return(vcc,truesize);
atomic_inc(&vcc->stats->rx_drop);
return 0;
@@ -29,11 +30,12 @@
int guess = atm_guess_pdu2truesize(pdu_size);
atm_force_charge(vcc,guess);
- if (atomic_read(&vcc->sk->rmem_alloc) <= vcc->sk->rcvbuf) {
+ if (atomic_read(&vcc->sk->sk_rmem_alloc) <= vcc->sk->sk_rcvbuf) {
struct sk_buff *skb = alloc_skb(pdu_size,gfp_flags);
if (skb) {
- atomic_add(skb->truesize-guess,&vcc->sk->rmem_alloc);
+ atomic_add(skb->truesize-guess,
+ &vcc->sk->sk_rmem_alloc);
return skb;
}
}
diff -urN linux-2.5.70-bk11/net/atm/br2684.c linux-2.5.70-bk12/net/atm/br2684.c
--- linux-2.5.70-bk11/net/atm/br2684.c 2003-05-26 18:00:38.000000000 -0700
+++ linux-2.5.70-bk12/net/atm/br2684.c 2003-06-07 04:47:48.000000000 -0700
@@ -188,7 +188,7 @@
dev_kfree_skb(skb);
return 0;
}
- atomic_add(skb->truesize, &atmvcc->sk->wmem_alloc);
+ atomic_add(skb->truesize, &atmvcc->sk->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = atmvcc->atm_options;
brdev->stats.tx_packets++;
brdev->stats.tx_bytes += skb->len;
@@ -551,7 +551,7 @@
barrier();
atmvcc->push = br2684_push;
skb_queue_head_init(©);
- skb_migrate(&atmvcc->sk->receive_queue, ©);
+ skb_migrate(&atmvcc->sk->sk_receive_queue, ©);
while ((skb = skb_dequeue(©))) {
BRPRIV(skb->dev)->stats.rx_bytes -= skb->len;
BRPRIV(skb->dev)->stats.rx_packets--;
diff -urN linux-2.5.70-bk11/net/atm/clip.c linux-2.5.70-bk12/net/atm/clip.c
--- linux-2.5.70-bk11/net/atm/clip.c 2003-05-26 18:00:59.000000000 -0700
+++ linux-2.5.70-bk12/net/atm/clip.c 2003-06-07 04:47:48.000000000 -0700
@@ -66,7 +66,7 @@
ctrl->itf_num = itf;
ctrl->ip = ip;
atm_force_charge(atmarpd,skb->truesize);
- skb_queue_tail(&atmarpd->sk->receive_queue,skb);
+ skb_queue_tail(&atmarpd->sk->sk_receive_queue, skb);
wake_up(&atmarpd->sleep);
return 0;
}
@@ -435,7 +435,7 @@
memcpy(here,llc_oui,sizeof(llc_oui));
((u16 *) here)[3] = skb->protocol;
}
- atomic_add(skb->truesize,&vcc->sk->wmem_alloc);
+ atomic_add(skb->truesize, &vcc->sk->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = vcc->atm_options;
entry->vccs->last_use = jiffies;
DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n",skb,vcc,vcc->dev);
@@ -493,7 +493,7 @@
vcc->push = clip_push;
vcc->pop = clip_pop;
skb_queue_head_init(©);
- skb_migrate(&vcc->sk->receive_queue,©);
+ skb_migrate(&vcc->sk->sk_receive_queue, ©);
/* re-process everything received between connection setup and MKIP */
while ((skb = skb_dequeue(©)))
if (!clip_devs) {
@@ -699,10 +699,10 @@
barrier();
unregister_inetaddr_notifier(&clip_inet_notifier);
unregister_netdevice_notifier(&clip_dev_notifier);
- if (skb_peek(&vcc->sk->receive_queue))
+ if (skb_peek(&vcc->sk->sk_receive_queue))
printk(KERN_ERR "atmarpd_close: closing with requests "
"pending\n");
- skb_queue_purge(&vcc->sk->receive_queue);
+ skb_queue_purge(&vcc->sk->sk_receive_queue);
DPRINTK("(done)\n");
module_put(THIS_MODULE);
}
diff -urN linux-2.5.70-bk11/net/atm/common.c linux-2.5.70-bk12/net/atm/common.c
--- linux-2.5.70-bk11/net/atm/common.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/atm/common.c 2003-06-07 04:47:48.000000000 -0700
@@ -161,14 +161,16 @@
{
struct sk_buff *skb;
- if (atomic_read(&vcc->sk->wmem_alloc) && !atm_may_send(vcc,size)) {
+ if (atomic_read(&vcc->sk->sk_wmem_alloc) && !atm_may_send(vcc, size)) {
DPRINTK("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
- atomic_read(&vcc->sk->wmem_alloc),size,vcc->sk->sndbuf);
+ atomic_read(&vcc->sk->sk_wmem_alloc), size,
+ vcc->sk->sk_sndbuf);
return NULL;
}
while (!(skb = alloc_skb(size,GFP_KERNEL))) schedule();
- DPRINTK("AlTx %d += %d\n",atomic_read(&vcc->sk->wmem_alloc),skb->truesize);
- atomic_add(skb->truesize, &vcc->sk->wmem_alloc);
+ DPRINTK("AlTx %d += %d\n", atomic_read(&vcc->sk->sk_wmem_alloc),
+ skb->truesize);
+ atomic_add(skb->truesize, &vcc->sk->sk_wmem_alloc);
return skb;
}
@@ -188,15 +190,15 @@
memset(&vcc->local,0,sizeof(struct sockaddr_atmsvc));
memset(&vcc->remote,0,sizeof(struct sockaddr_atmsvc));
vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
- atomic_set(&vcc->sk->wmem_alloc,0);
- atomic_set(&vcc->sk->rmem_alloc,0);
+ atomic_set(&vcc->sk->sk_wmem_alloc, 0);
+ atomic_set(&vcc->sk->sk_rmem_alloc, 0);
vcc->push = NULL;
vcc->pop = NULL;
vcc->push_oam = NULL;
vcc->vpi = vcc->vci = 0; /* no VCI/VPI yet */
vcc->atm_options = vcc->aal_options = 0;
init_waitqueue_head(&vcc->sleep);
- sk->sleep = &vcc->sleep;
+ sk->sk_sleep = &vcc->sleep;
sock->sk = sk;
return 0;
}
@@ -211,17 +213,17 @@
if (vcc->dev) {
if (vcc->dev->ops->close) vcc->dev->ops->close(vcc);
if (vcc->push) vcc->push(vcc,NULL); /* atmarpd has no push */
- while ((skb = skb_dequeue(&vcc->sk->receive_queue))) {
+ while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue))) {
atm_return(vcc,skb->truesize);
kfree_skb(skb);
}
module_put(vcc->dev->ops->owner);
atm_dev_release(vcc->dev);
- if (atomic_read(&vcc->sk->rmem_alloc))
+ if (atomic_read(&vcc->sk->sk_rmem_alloc))
printk(KERN_WARNING "atm_release_vcc: strange ... "
"rmem_alloc == %d after closing\n",
- atomic_read(&vcc->sk->rmem_alloc));
+ atomic_read(&vcc->sk->sk_rmem_alloc));
bind_vcc(vcc,NULL);
}
@@ -431,7 +433,7 @@
add_wait_queue(&vcc->sleep,&wait);
set_current_state(TASK_INTERRUPTIBLE);
error = 1; /* <= 0 is error */
- while (!(skb = skb_dequeue(&vcc->sk->receive_queue))) {
+ while (!(skb = skb_dequeue(&vcc->sk->sk_receive_queue))) {
if (test_bit(ATM_VF_RELEASED,&vcc->flags) ||
test_bit(ATM_VF_CLOSE,&vcc->flags)) {
error = vcc->reply;
@@ -462,7 +464,8 @@
if (vcc->dev->ops->feedback)
vcc->dev->ops->feedback(vcc,skb,(unsigned long) skb->data,
(unsigned long) buff,eff_len);
- DPRINTK("RcvM %d -= %d\n",atomic_read(&vcc->sk->rmem_alloc),skb->truesize);
+ DPRINTK("RcvM %d -= %d\n", atomic_read(&vcc->sk->sk_rmem_alloc),
+ skb->truesize);
atm_return(vcc,skb->truesize);
error = copy_to_user(buff,skb->data,eff_len) ? -EFAULT : 0;
kfree_skb(skb);
@@ -541,14 +544,15 @@
vcc = ATM_SD(sock);
poll_wait(file,&vcc->sleep,wait);
mask = 0;
- if (skb_peek(&vcc->sk->receive_queue))
+ if (skb_peek(&vcc->sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
if (test_bit(ATM_VF_RELEASED,&vcc->flags) ||
test_bit(ATM_VF_CLOSE,&vcc->flags))
mask |= POLLHUP;
if (sock->state != SS_CONNECTING) {
if (vcc->qos.txtp.traffic_class != ATM_NONE &&
- vcc->qos.txtp.max_sdu+atomic_read(&vcc->sk->wmem_alloc) <= vcc->sk->sndbuf)
+ vcc->qos.txtp.max_sdu +
+ atomic_read(&vcc->sk->sk_wmem_alloc) <= vcc->sk->sk_sndbuf)
mask |= POLLOUT | POLLWRNORM;
}
else if (vcc->reply != WAITING) {
@@ -613,8 +617,8 @@
ret_val = -EINVAL;
goto done;
}
- ret_val = put_user(vcc->sk->sndbuf-
- atomic_read(&vcc->sk->wmem_alloc),
+ ret_val = put_user(vcc->sk->sk_sndbuf -
+ atomic_read(&vcc->sk->sk_wmem_alloc),
(int *) arg) ? -EFAULT : 0;
goto done;
case SIOCINQ:
@@ -625,7 +629,7 @@
ret_val = -EINVAL;
goto done;
}
- skb = skb_peek(&vcc->sk->receive_queue);
+ skb = skb_peek(&vcc->sk->sk_receive_queue);
ret_val = put_user(skb ? skb->len : 0,(int *) arg)
? -EFAULT : 0;
goto done;
@@ -668,11 +672,11 @@
kfree(tmp_buf);
goto done;
case SIOCGSTAMP: /* borrowed from IP */
- if (!vcc->sk->stamp.tv_sec) {
+ if (!vcc->sk->sk_stamp.tv_sec) {
ret_val = -ENOENT;
goto done;
}
- ret_val = copy_to_user((void *) arg, &vcc->sk->stamp,
+ ret_val = copy_to_user((void *)arg, &vcc->sk->sk_stamp,
sizeof(struct timeval)) ? -EFAULT : 0;
goto done;
case ATM_SETSC:
@@ -1078,7 +1082,7 @@
if (!error) error = adjust_tp(&qos->rxtp,qos->aal);
if (error) return error;
if (!vcc->dev->ops->change_qos) return -EOPNOTSUPP;
- if (vcc->sk->family == AF_ATMPVC)
+ if (vcc->sk->sk_family == AF_ATMPVC)
return vcc->dev->ops->change_qos(vcc,qos,ATM_MF_SET);
return svc_change_qos(vcc,qos);
}
diff -urN linux-2.5.70-bk11/net/atm/lec.c linux-2.5.70-bk12/net/atm/lec.c
--- linux-2.5.70-bk11/net/atm/lec.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/atm/lec.c 2003-06-07 04:47:48.000000000 -0700
@@ -133,7 +133,7 @@
priv = (struct lec_priv *)dev->priv;
atm_force_charge(priv->lecd, skb2->truesize);
- skb_queue_tail(&priv->lecd->sk->receive_queue, skb2);
+ skb_queue_tail(&priv->lecd->sk->sk_receive_queue, skb2);
wake_up(&priv->lecd->sleep);
}
@@ -210,7 +210,7 @@
lec_send(struct atm_vcc *vcc, struct sk_buff *skb, struct lec_priv *priv)
{
if (atm_may_send(vcc, skb->len)) {
- atomic_add(skb->truesize, &vcc->sk->wmem_alloc);
+ atomic_add(skb->truesize, &vcc->sk->sk_wmem_alloc);
ATM_SKB(skb)->vcc = vcc;
ATM_SKB(skb)->atm_options = vcc->atm_options;
priv->stats.tx_packets++;
@@ -406,7 +406,7 @@
int i;
char *tmp; /* FIXME */
- atomic_sub(skb->truesize, &vcc->sk->wmem_alloc);
+ atomic_sub(skb->truesize, &vcc->sk->sk_wmem_alloc);
mesg = (struct atmlec_msg *)skb->data;
tmp = skb->data;
tmp += sizeof(struct atmlec_msg);
@@ -512,7 +512,7 @@
skb2->len = sizeof(struct atmlec_msg);
memcpy(skb2->data, mesg, sizeof(struct atmlec_msg));
atm_force_charge(priv->lecd, skb2->truesize);
- skb_queue_tail(&priv->lecd->sk->receive_queue, skb2);
+ skb_queue_tail(&priv->lecd->sk->sk_receive_queue, skb2);
wake_up(&priv->lecd->sleep);
}
if (f != NULL) br_fdb_put_hook(f);
@@ -541,10 +541,10 @@
netif_stop_queue(dev);
lec_arp_destroy(priv);
- if (skb_peek(&vcc->sk->receive_queue))
+ if (skb_peek(&vcc->sk->sk_receive_queue))
printk("%s lec_atm_close: closing with messages pending\n",
dev->name);
- while ((skb = skb_dequeue(&vcc->sk->receive_queue))) {
+ while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue))) {
atm_return(vcc, skb->truesize);
dev_kfree_skb(skb);
}
@@ -597,13 +597,13 @@
memcpy(&mesg->content.normal.atm_addr, atm_addr, ATM_ESA_LEN);
atm_force_charge(priv->lecd, skb->truesize);
- skb_queue_tail(&priv->lecd->sk->receive_queue, skb);
+ skb_queue_tail(&priv->lecd->sk->sk_receive_queue, skb);
wake_up(&priv->lecd->sleep);
if (data != NULL) {
DPRINTK("lec: about to send %d bytes of data\n", data->len);
atm_force_charge(priv->lecd, data->truesize);
- skb_queue_tail(&priv->lecd->sk->receive_queue, data);
+ skb_queue_tail(&priv->lecd->sk->sk_receive_queue, data);
wake_up(&priv->lecd->sleep);
}
@@ -685,7 +685,7 @@
#endif /* DUMP_PACKETS > 0 */
if (memcmp(skb->data, lec_ctrl_magic, 4) ==0) { /* Control frame, to daemon*/
DPRINTK("%s: To daemon\n",dev->name);
- skb_queue_tail(&vcc->sk->receive_queue, skb);
+ skb_queue_tail(&vcc->sk->sk_receive_queue, skb);
wake_up(&vcc->sleep);
} else { /* Data frame, queue to protocol handlers */
unsigned char *dst;
diff -urN linux-2.5.70-bk11/net/atm/mpc.c linux-2.5.70-bk12/net/atm/mpc.c
--- linux-2.5.70-bk11/net/atm/mpc.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/atm/mpc.c 2003-06-07 04:47:48.000000000 -0700
@@ -523,7 +523,7 @@
memcpy(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr));
}
- atomic_add(skb->truesize, &entry->shortcut->sk->wmem_alloc);
+ atomic_add(skb->truesize, &entry->shortcut->sk->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
entry->shortcut->send(entry->shortcut, skb);
entry->packets_fwded++;
@@ -667,7 +667,8 @@
skb->dev = dev;
if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) {
dprintk("mpoa: (%s) mpc_push: control packet arrived\n", dev->name);
- skb_queue_tail(&vcc->sk->receive_queue, skb); /* Pass control packets to daemon */
+ /* Pass control packets to daemon */
+ skb_queue_tail(&vcc->sk->sk_receive_queue, skb);
wake_up(&vcc->sleep);
return;
}
@@ -847,7 +848,7 @@
mpc->in_ops->destroy_cache(mpc);
mpc->eg_ops->destroy_cache(mpc);
- while ( (skb = skb_dequeue(&vcc->sk->receive_queue)) ){
+ while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue))) {
atm_return(vcc, skb->truesize);
kfree_skb(skb);
}
@@ -867,7 +868,7 @@
struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
struct k_message *mesg = (struct k_message*)skb->data;
- atomic_sub(skb->truesize, &vcc->sk->wmem_alloc);
+ atomic_sub(skb->truesize, &vcc->sk->sk_wmem_alloc);
if (mpc == NULL) {
printk("mpoa: msg_from_mpoad: no mpc found\n");
@@ -944,7 +945,7 @@
skb_put(skb, sizeof(struct k_message));
memcpy(skb->data, mesg, sizeof(struct k_message));
atm_force_charge(mpc->mpoad_vcc, skb->truesize);
- skb_queue_tail(&mpc->mpoad_vcc->sk->receive_queue, skb);
+ skb_queue_tail(&mpc->mpoad_vcc->sk->sk_receive_queue, skb);
wake_up(&mpc->mpoad_vcc->sleep);
return 0;
@@ -1223,7 +1224,7 @@
purge_msg->content.eg_info = entry->ctrl_info;
atm_force_charge(vcc, skb->truesize);
- skb_queue_tail(&vcc->sk->receive_queue, skb);
+ skb_queue_tail(&vcc->sk->sk_receive_queue, skb);
wake_up(&vcc->sleep);
dprintk("mpoa: purge_egress_shortcut: exiting:\n");
diff -urN linux-2.5.70-bk11/net/atm/pppoatm.c linux-2.5.70-bk12/net/atm/pppoatm.c
--- linux-2.5.70-bk11/net/atm/pppoatm.c 2003-05-26 18:00:42.000000000 -0700
+++ linux-2.5.70-bk12/net/atm/pppoatm.c 2003-06-07 04:47:49.000000000 -0700
@@ -231,7 +231,7 @@
kfree_skb(skb);
return 1;
}
- atomic_add(skb->truesize, &ATM_SKB(skb)->vcc->sk->wmem_alloc);
+ atomic_add(skb->truesize, &ATM_SKB(skb)->vcc->sk->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
DPRINTK("(unit %d): atm_skb(%p)->vcc(%p)->dev(%p)\n",
pvcc->chan.unit, skb, ATM_SKB(skb)->vcc,
diff -urN linux-2.5.70-bk11/net/atm/proc.c linux-2.5.70-bk12/net/atm/proc.c
--- linux-2.5.70-bk11/net/atm/proc.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/atm/proc.c 2003-06-07 04:47:49.000000000 -0700
@@ -136,7 +136,7 @@
unsigned char *ip;
int svc,off,ip_len;
- svc = !clip_vcc || clip_vcc->vcc->sk->family == AF_ATMSVC;
+ svc = !clip_vcc || clip_vcc->vcc->sk->sk_family == AF_ATMSVC;
off = sprintf(buf,"%-6s%-4s%-4s%5ld ",dev->name,svc ? "SVC" : "PVC",
!clip_vcc || clip_vcc->encap ? "LLC" : "NULL",
(jiffies-(clip_vcc ? clip_vcc->last_use : entry->neigh->used))/
@@ -213,7 +213,7 @@
if (!vcc->dev) here += sprintf(here,"Unassigned ");
else here += sprintf(here,"%3d %3d %5d ",vcc->dev->number,vcc->vpi,
vcc->vci);
- switch (vcc->sk->family) {
+ switch (vcc->sk->sk_family) {
case AF_ATMPVC:
here += sprintf(here,"PVC");
break;
@@ -221,12 +221,12 @@
here += sprintf(here,"SVC");
break;
default:
- here += sprintf(here,"%3d",vcc->sk->family);
+ here += sprintf(here, "%3d", vcc->sk->sk_family);
}
here += sprintf(here," %04lx %5d %7d/%7d %7d/%7d\n",vcc->flags,
vcc->reply,
- atomic_read(&vcc->sk->wmem_alloc),vcc->sk->sndbuf,
- atomic_read(&vcc->sk->rmem_alloc),vcc->sk->rcvbuf);
+ atomic_read(&vcc->sk->sk_wmem_alloc), vcc->sk->sk_sndbuf,
+ atomic_read(&vcc->sk->sk_rmem_alloc), vcc->sk->sk_rcvbuf);
}
@@ -354,7 +354,8 @@
dev = list_entry(p, struct atm_dev, dev_list);
spin_lock_irqsave(&dev->lock, flags);
for (vcc = dev->vccs; vcc; vcc = vcc->next)
- if (vcc->sk->family == PF_ATMPVC && vcc->dev && !left--) {
+ if (vcc->sk->sk_family == PF_ATMPVC &&
+ vcc->dev && !left--) {
pvc_info(vcc,buf,clip_info);
spin_unlock_irqrestore(&dev->lock, flags);
spin_unlock(&atm_dev_lock);
@@ -423,7 +424,7 @@
dev = list_entry(p, struct atm_dev, dev_list);
spin_lock_irqsave(&dev->lock, flags);
for (vcc = dev->vccs; vcc; vcc = vcc->next)
- if (vcc->sk->family == PF_ATMSVC && !left--) {
+ if (vcc->sk->sk_family == PF_ATMSVC && !left--) {
svc_info(vcc,buf);
spin_unlock_irqrestore(&dev->lock, flags);
spin_unlock(&atm_dev_lock);
diff -urN linux-2.5.70-bk11/net/atm/raw.c linux-2.5.70-bk12/net/atm/raw.c
--- linux-2.5.70-bk11/net/atm/raw.c 2003-05-26 18:00:58.000000000 -0700
+++ linux-2.5.70-bk12/net/atm/raw.c 2003-06-07 04:47:49.000000000 -0700
@@ -28,7 +28,7 @@
void atm_push_raw(struct atm_vcc *vcc,struct sk_buff *skb)
{
if (skb) {
- skb_queue_tail(&vcc->sk->receive_queue,skb);
+ skb_queue_tail(&vcc->sk->sk_receive_queue, skb);
wake_up(&vcc->sleep);
}
}
@@ -36,8 +36,9 @@
static void atm_pop_raw(struct atm_vcc *vcc,struct sk_buff *skb)
{
- DPRINTK("APopR (%d) %d -= %d\n",vcc->vci,vcc->sk->wmem_alloc,skb->truesize);
- atomic_sub(skb->truesize, &vcc->sk->wmem_alloc);
+ DPRINTK("APopR (%d) %d -= %d\n", vcc->vci, vcc->sk->sk_wmem_alloc,
+ skb->truesize);
+ atomic_sub(skb->truesize, &vcc->sk->sk_wmem_alloc);
dev_kfree_skb_any(skb);
wake_up(&vcc->sleep);
}
diff -urN linux-2.5.70-bk11/net/atm/signaling.c linux-2.5.70-bk12/net/atm/signaling.c
--- linux-2.5.70-bk11/net/atm/signaling.c 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/net/atm/signaling.c 2003-06-07 04:47:49.000000000 -0700
@@ -60,7 +60,7 @@
}
#endif
atm_force_charge(sigd,skb->truesize);
- skb_queue_tail(&sigd->sk->receive_queue,skb);
+ skb_queue_tail(&sigd->sk->sk_receive_queue,skb);
wake_up(&sigd->sleep);
}
@@ -97,7 +97,7 @@
struct atm_vcc *session_vcc;
msg = (struct atmsvc_msg *) skb->data;
- atomic_sub(skb->truesize, &vcc->sk->wmem_alloc);
+ atomic_sub(skb->truesize, &vcc->sk->sk_wmem_alloc);
DPRINTK("sigd_send %d (0x%lx)\n",(int) msg->type,
(unsigned long) msg->vcc);
vcc = *(struct atm_vcc **) &msg->vcc;
@@ -128,12 +128,13 @@
case as_indicate:
vcc = *(struct atm_vcc **) &msg->listen_vcc;
DPRINTK("as_indicate!!!\n");
- if (vcc->sk->ack_backlog == vcc->sk->max_ack_backlog) {
+ if (vcc->sk->sk_ack_backlog ==
+ vcc->sk->sk_max_ack_backlog) {
sigd_enq(0,as_reject,vcc,NULL,NULL);
return 0;
}
- vcc->sk->ack_backlog++;
- skb_queue_tail(&vcc->sk->receive_queue,skb);
+ vcc->sk->sk_ack_backlog++;
+ skb_queue_tail(&vcc->sk->sk_receive_queue, skb);
if (vcc->callback) {
DPRINTK("waking vcc->sleep 0x%p\n",
&vcc->sleep);
@@ -197,7 +198,7 @@
static void purge_vccs(struct atm_vcc *vcc)
{
while (vcc) {
- if (vcc->sk->family == PF_ATMSVC &&
+ if (vcc->sk->sk_family == PF_ATMSVC &&
!test_bit(ATM_VF_META,&vcc->flags)) {
set_bit(ATM_VF_RELEASED,&vcc->flags);
vcc->reply = -EUNATCH;
@@ -216,9 +217,9 @@
DPRINTK("sigd_close\n");
sigd = NULL;
- if (skb_peek(&vcc->sk->receive_queue))
+ if (skb_peek(&vcc->sk->sk_receive_queue))
printk(KERN_ERR "sigd_close: closing with requests pending\n");
- skb_queue_purge(&vcc->sk->receive_queue);
+ skb_queue_purge(&vcc->sk->sk_receive_queue);
spin_lock(&atm_dev_lock);
list_for_each(p, &atm_devs) {
diff -urN linux-2.5.70-bk11/net/atm/svc.c linux-2.5.70-bk12/net/atm/svc.c
--- linux-2.5.70-bk11/net/atm/svc.c 2003-05-26 18:00:42.000000000 -0700
+++ linux-2.5.70-bk12/net/atm/svc.c 2003-06-07 04:47:49.000000000 -0700
@@ -74,7 +74,7 @@
}
/* beware - socket is still in use by atmsigd until the last
as_indicate has been answered */
- while ((skb = skb_dequeue(&vcc->sk->receive_queue))) {
+ while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue))) {
DPRINTK("LISTEN REL\n");
sigd_enq2(NULL,as_reject,vcc,NULL,NULL,&vcc->qos,0);
dev_kfree_skb(skb);
@@ -253,7 +253,8 @@
remove_wait_queue(&vcc->sleep,&wait);
if (!sigd) return -EUNATCH;
set_bit(ATM_VF_LISTEN,&vcc->flags);
- vcc->sk->max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
+ vcc->sk->sk_max_ack_backlog = backlog > 0 ? backlog :
+ ATM_BACKLOG_DEFAULT;
return vcc->reply;
}
@@ -277,7 +278,8 @@
DECLARE_WAITQUEUE(wait,current);
add_wait_queue(&old_vcc->sleep,&wait);
- while (!(skb = skb_dequeue(&old_vcc->sk->receive_queue)) && sigd) {
+ while (!(skb = skb_dequeue(&old_vcc->sk->sk_receive_queue)) &&
+ sigd) {
if (test_bit(ATM_VF_RELEASED,&old_vcc->flags)) break;
if (test_bit(ATM_VF_CLOSE,&old_vcc->flags)) {
error = old_vcc->reply;
@@ -306,7 +308,7 @@
error = atm_connect(newsock,msg->pvc.sap_addr.itf,
msg->pvc.sap_addr.vpi,msg->pvc.sap_addr.vci);
dev_kfree_skb(skb);
- old_vcc->sk->ack_backlog--;
+ old_vcc->sk->sk_ack_backlog--;
if (error) {
sigd_enq2(NULL,as_reject,old_vcc,NULL,NULL,
&old_vcc->qos,error);
diff -urN linux-2.5.70-bk11/net/ax25/af_ax25.c linux-2.5.70-bk12/net/ax25/af_ax25.c
--- linux-2.5.70-bk11/net/ax25/af_ax25.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ax25/af_ax25.c 2003-06-07 04:47:49.000000000 -0700
@@ -174,7 +174,8 @@
for (s = ax25_list; s != NULL; s = s->next) {
if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
continue;
- if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && s->sk->type == type && s->sk->state == TCP_LISTEN) {
+ if (s->sk && !ax25cmp(&s->source_addr, addr) &&
+ s->sk->sk_type == type && s->sk->sk_state == TCP_LISTEN) {
/* If device is null we match any device */
if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) {
spin_unlock_bh(&ax25_list_lock);
@@ -199,7 +200,9 @@
spin_lock_bh(&ax25_list_lock);
for (s = ax25_list; s != NULL; s = s->next) {
- if (s->sk != NULL && ax25cmp(&s->source_addr, my_addr) == 0 && ax25cmp(&s->dest_addr, dest_addr) == 0 && s->sk->type == type) {
+ if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
+ !ax25cmp(&s->dest_addr, dest_addr) &&
+ s->sk->sk_type == type) {
sk = s->sk;
/* XXX Sleeps with spinlock held, use refcounts instead. XXX */
lock_sock(sk);
@@ -223,7 +226,7 @@
spin_lock_bh(&ax25_list_lock);
for (s = ax25_list; s != NULL; s = s->next) {
- if (s->sk != NULL && s->sk->type != SOCK_SEQPACKET)
+ if (s->sk && s->sk->sk_type != SOCK_SEQPACKET)
continue;
if (s->ax25_dev == NULL)
continue;
@@ -258,7 +261,7 @@
spin_lock_bh(&ax25_list_lock);
for (s = ax25_list; s != NULL; s = s->next) {
if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
- s->sk->type == SOCK_RAW) {
+ s->sk->sk_type == SOCK_RAW) {
sk = s->sk;
lock_sock(sk);
break;
@@ -274,9 +277,9 @@
struct sk_buff *copy;
while (sk != NULL) {
- if (sk->type == SOCK_RAW &&
- sk->protocol == proto &&
- atomic_read(&sk->rmem_alloc) <= sk->rcvbuf) {
+ if (sk->sk_type == SOCK_RAW &&
+ sk->sk_protocol == proto &&
+ atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL)
return;
@@ -284,7 +287,7 @@
kfree_skb(copy);
}
- sk = sk->next;
+ sk = sk->sk_next;
}
}
@@ -322,7 +325,7 @@
ax25_clear_queues(ax25); /* Flush the queues */
if (ax25->sk != NULL) {
- while ((skb = skb_dequeue(&ax25->sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&ax25->sk->sk_receive_queue)) != NULL) {
if (skb->sk != ax25->sk) {
/* A pending connection */
ax25_cb *sax25 = ax25_sk(skb->sk);
@@ -339,8 +342,8 @@
}
if (ax25->sk != NULL) {
- if (atomic_read(&ax25->sk->wmem_alloc) != 0 ||
- atomic_read(&ax25->sk->rmem_alloc) != 0) {
+ if (atomic_read(&ax25->sk->sk_wmem_alloc) ||
+ atomic_read(&ax25->sk->sk_rmem_alloc)) {
/* Defer: outstanding buffers */
init_timer(&ax25->timer);
ax25->timer.expires = jiffies + 10 * HZ;
@@ -650,8 +653,9 @@
break;
}
- if (sk->type == SOCK_SEQPACKET &&
- (sock->state != SS_UNCONNECTED || sk->state == TCP_LISTEN)) {
+ if (sk->sk_type == SOCK_SEQPACKET &&
+ (sock->state != SS_UNCONNECTED ||
+ sk->sk_state == TCP_LISTEN)) {
res = -EADDRNOTAVAIL;
break;
}
@@ -771,9 +775,9 @@
int res = 0;
lock_sock(sk);
- if (sk->type == SOCK_SEQPACKET && sk->state != TCP_LISTEN) {
- sk->max_ack_backlog = backlog;
- sk->state = TCP_LISTEN;
+ if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_LISTEN) {
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
goto out;
}
res = -EOPNOTSUPP;
@@ -846,9 +850,9 @@
sock_init_data(sock, sk);
sk_set_owner(sk, THIS_MODULE);
- sk->destruct = ax25_free_sock;
+ sk->sk_destruct = ax25_free_sock;
sock->ops = &ax25_proto_ops;
- sk->protocol = protocol;
+ sk->sk_protocol = protocol;
ax25->sk = sk;
@@ -868,7 +872,7 @@
return NULL;
}
- switch (osk->type) {
+ switch (osk->sk_type) {
case SOCK_DGRAM:
break;
case SOCK_SEQPACKET:
@@ -882,17 +886,17 @@
sock_init_data(NULL, sk);
sk_set_owner(sk, THIS_MODULE);
- sk->destruct = ax25_free_sock;
- sk->type = osk->type;
- sk->socket = osk->socket;
- sk->priority = osk->priority;
- sk->protocol = osk->protocol;
- sk->rcvbuf = osk->rcvbuf;
- sk->sndbuf = osk->sndbuf;
- sk->debug = osk->debug;
- sk->state = TCP_ESTABLISHED;
- sk->sleep = osk->sleep;
- sk->zapped = osk->zapped;
+ sk->sk_destruct = ax25_free_sock;
+ sk->sk_type = osk->sk_type;
+ sk->sk_socket = osk->sk_socket;
+ sk->sk_priority = osk->sk_priority;
+ sk->sk_protocol = osk->sk_protocol;
+ sk->sk_rcvbuf = osk->sk_rcvbuf;
+ sk->sk_sndbuf = osk->sk_sndbuf;
+ sk->sk_debug = osk->sk_debug;
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_sleep = osk->sk_sleep;
+ sk->sk_zapped = osk->sk_zapped;
oax25 = ax25_sk(osk);
@@ -938,7 +942,7 @@
lock_sock(sk);
ax25 = ax25_sk(sk);
- if (sk->type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
switch (ax25->state) {
case AX25_STATE_0:
ax25_disconnect(ax25, 0);
@@ -978,9 +982,9 @@
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
ax25->state = AX25_STATE_2;
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY);
break;
@@ -989,15 +993,15 @@
break;
}
} else {
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
ax25_destroy_socket(ax25);
}
sock->sk = NULL;
- sk->socket = NULL; /* Not used, but we should do this */
+ sk->sk_socket = NULL; /* Not used, but we should do this */
release_sock(sk);
return 0;
@@ -1041,7 +1045,7 @@
lock_sock(sk);
ax25 = ax25_sk(sk);
- if (sk->zapped == 0) {
+ if (!sk->sk_zapped) {
err = -EINVAL;
goto out;
}
@@ -1075,7 +1079,7 @@
done:
ax25_insert_socket(ax25);
- sk->zapped = 0;
+ sk->sk_zapped = 0;
out:
release_sock(sk);
@@ -1122,7 +1126,7 @@
/* deal with restarts */
if (sock->state == SS_CONNECTING) {
- switch (sk->state) {
+ switch (sk->sk_state) {
case TCP_SYN_SENT: /* still trying */
err = -EINPROGRESS;
goto out;
@@ -1138,12 +1142,12 @@
}
}
- if (sk->state == TCP_ESTABLISHED && sk->type == SOCK_SEQPACKET) {
+ if (sk->sk_state == TCP_ESTABLISHED && sk->sk_type == SOCK_SEQPACKET) {
err = -EISCONN; /* No reconnect on a seqpacket socket */
goto out;
}
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (ax25->digipeat != NULL) {
@@ -1188,7 +1192,7 @@
* the socket is already bound, check to see if the device has
* been filled in, error if it hasn't.
*/
- if (sk->zapped) {
+ if (sk->sk_zapped) {
/* check if we can remove this feature. It is broken. */
printk(KERN_WARNING "ax25_connect(): %s uses autobind, please contact jreuter@yaina.de\n",
current->comm);
@@ -1204,7 +1208,7 @@
}
}
- if (sk->type == SOCK_SEQPACKET &&
+ if (sk->sk_type == SOCK_SEQPACKET &&
ax25_find_cb(&ax25->source_addr, &fsa->fsa_ax25.sax25_call, digi,
ax25->ax25_dev->dev)) {
if (digi != NULL)
@@ -1217,15 +1221,15 @@
ax25->digipeat = digi;
/* First the easy one */
- if (sk->type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET) {
sock->state = SS_CONNECTED;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
goto out;
}
/* Move to connecting socket, ax.25 lapb WAIT_UA.. */
sock->state = SS_CONNECTING;
- sk->state = TCP_SYN_SENT;
+ sk->sk_state = TCP_SYN_SENT;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
@@ -1250,18 +1254,18 @@
ax25_start_heartbeat(ax25);
/* Now the loop */
- if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
+ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
err = -EINPROGRESS;
goto out;
}
- if (sk->state == TCP_SYN_SENT) {
+ if (sk->sk_state == TCP_SYN_SENT) {
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
- if (sk->state != TCP_SYN_SENT)
+ if (sk->sk_state != TCP_SYN_SENT)
break;
set_current_state(TASK_INTERRUPTIBLE);
release_sock(sk);
@@ -1273,10 +1277,10 @@
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
}
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
/* Not in ABM, not in WAIT_UA -> failed */
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
@@ -1308,12 +1312,12 @@
return -EINVAL;
lock_sock(sk);
- if (sk->type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out;
}
- if (sk->state != TCP_LISTEN) {
+ if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out;
}
@@ -1322,9 +1326,9 @@
* The read queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
@@ -1340,16 +1344,16 @@
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
- newsk = skb->sk;
- newsk->pair = NULL;
- newsk->socket = newsock;
- newsk->sleep = &newsock->wait;
+ newsk = skb->sk;
+ newsk->sk_pair = NULL;
+ newsk->sk_socket = newsock;
+ newsk->sk_sleep = &newsock->wait;
/* Now attach up the new socket */
kfree_skb(skb);
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
newsock->sk = newsk;
newsock->state = SS_CONNECTED;
@@ -1372,7 +1376,7 @@
ax25 = ax25_sk(sk);
if (peer != 0) {
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
@@ -1426,12 +1430,12 @@
lock_sock(sk);
ax25 = ax25_sk(sk);
- if (sk->zapped) {
+ if (sk->sk_zapped) {
err = -EADDRNOTAVAIL;
goto out;
}
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
goto out;
@@ -1486,7 +1490,8 @@
}
sax = *usax;
- if (sk->type == SOCK_SEQPACKET && ax25cmp(&ax25->dest_addr, &sax.sax25_call) != 0) {
+ if (sk->sk_type == SOCK_SEQPACKET &&
+ ax25cmp(&ax25->dest_addr, &sax.sax25_call)) {
err = -EISCONN;
goto out;
}
@@ -1500,7 +1505,7 @@
* it has become closed (not started closed) and is VC
* we ought to SIGPIPE, EPIPE
*/
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
@@ -1532,14 +1537,14 @@
/* Add the PID if one is not supplied by the user in the skb */
if (!ax25->pidincl) {
asmptr = skb_push(skb, 1);
- *asmptr = sk->protocol;
+ *asmptr = sk->sk_protocol;
}
SOCK_DEBUG(sk, "AX.25: Transmitting buffer\n");
- if (sk->type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
/* Connected mode sockets go via the LAPB machine */
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
err = -ENOTCONN;
goto out;
@@ -1598,7 +1603,7 @@
* This works for seqpacket too. The receiver has ordered the
* queue for us! We do one quick check first though
*/
- if (sk->type == SOCK_SEQPACKET && sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
@@ -1670,7 +1675,7 @@
switch (cmd) {
case TIOCOUTQ: {
long amount;
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
res = put_user(amount, (int *)arg);
@@ -1681,7 +1686,7 @@
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
- if ((skb = skb_peek(&sk->receive_queue)) != NULL)
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
res = put_user(amount, (int *)arg);
break;
@@ -1689,11 +1694,12 @@
case SIOCGSTAMP:
if (sk != NULL) {
- if (sk->stamp.tv_sec == 0) {
+ if (!sk->sk_stamp.tv_sec) {
res = -ENOENT;
break;
}
- res = copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)) ? -EFAULT : 0;
+ res = copy_to_user((void *)arg, &sk->sk_stamp,
+ sizeof(struct timeval)) ? -EFAULT : 0;
break;
}
res = -EINVAL;
@@ -1764,8 +1770,8 @@
ax25_info.idletimer = ax25_display_timer(&ax25->idletimer) / (60 * HZ);
ax25_info.n2count = ax25->n2count;
ax25_info.state = ax25->state;
- ax25_info.rcv_q = atomic_read(&sk->rmem_alloc);
- ax25_info.snd_q = atomic_read(&sk->wmem_alloc);
+ ax25_info.rcv_q = atomic_read(&sk->sk_rmem_alloc);
+ ax25_info.snd_q = atomic_read(&sk->sk_wmem_alloc);
ax25_info.vs = ax25->vs;
ax25_info.vr = ax25->vr;
ax25_info.va = ax25->va;
@@ -1878,9 +1884,9 @@
if (ax25->sk != NULL) {
len += sprintf(buffer + len, " %d %d %ld\n",
- atomic_read(&ax25->sk->wmem_alloc),
- atomic_read(&ax25->sk->rmem_alloc),
- ax25->sk->socket != NULL ? SOCK_INODE(ax25->sk->socket)->i_ino : 0L);
+ atomic_read(&ax25->sk->sk_wmem_alloc),
+ atomic_read(&ax25->sk->sk_rmem_alloc),
+ ax25->sk->sk_socket != NULL ? SOCK_INODE(ax25->sk->sk_socket)->i_ino : 0L);
} else {
len += sprintf(buffer + len, " * * *\n");
}
diff -urN linux-2.5.70-bk11/net/ax25/ax25_dev.c linux-2.5.70-bk12/net/ax25/ax25_dev.c
--- linux-2.5.70-bk11/net/ax25/ax25_dev.c 2003-05-26 18:00:24.000000000 -0700
+++ linux-2.5.70-bk12/net/ax25/ax25_dev.c 2003-06-07 04:47:49.000000000 -0700
@@ -34,21 +34,6 @@
ax25_dev *ax25_dev_list;
spinlock_t ax25_dev_lock = SPIN_LOCK_UNLOCKED;
-ax25_dev *ax25_dev_ax25dev(struct net_device *dev)
-{
- ax25_dev *ax25_dev, *res = NULL;
-
- spin_lock_bh(&ax25_dev_lock);
- for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
- if (ax25_dev->dev == dev) {
- res = ax25_dev;
- break;
- }
- spin_unlock_bh(&ax25_dev_lock);
-
- return res;
-}
-
ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
{
ax25_dev *ax25_dev, *res = NULL;
@@ -80,6 +65,7 @@
memset(ax25_dev, 0x00, sizeof(*ax25_dev));
+ dev->ax25_ptr = ax25_dev;
ax25_dev->dev = dev;
ax25_dev->forward = NULL;
@@ -152,6 +138,7 @@
s = s->next;
}
spin_unlock_bh(&ax25_dev_lock);
+ dev->ax25_ptr = NULL;
ax25_register_sysctl();
}
diff -urN linux-2.5.70-bk11/net/ax25/ax25_ds_in.c linux-2.5.70-bk12/net/ax25/ax25_ds_in.c
--- linux-2.5.70-bk11/net/ax25/ax25_ds_in.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ax25/ax25_ds_in.c 2003-06-07 04:47:49.000000000 -0700
@@ -65,13 +65,13 @@
ax25->state = AX25_STATE_3;
ax25->n2count = 0;
if (ax25->sk != NULL) {
- ax25->sk->state = TCP_ESTABLISHED;
+ ax25->sk->sk_state = TCP_ESTABLISHED;
/*
* For WAIT_SABM connections we will produce an accept
* ready socket here
*/
if (!sock_flag(ax25->sk, SOCK_DEAD))
- ax25->sk->state_change(ax25->sk);
+ ax25->sk->sk_state_change(ax25->sk);
}
ax25_dama_on(ax25);
diff -urN linux-2.5.70-bk11/net/ax25/ax25_ds_timer.c linux-2.5.70-bk12/net/ax25/ax25_ds_timer.c
--- linux-2.5.70-bk11/net/ax25/ax25_ds_timer.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ax25/ax25_ds_timer.c 2003-06-07 04:47:49.000000000 -0700
@@ -104,7 +104,7 @@
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!ax25->sk || sock_flag(ax25->sk, SOCK_DESTROY) ||
- (ax25->sk->state == TCP_LISTEN &&
+ (ax25->sk->sk_state == TCP_LISTEN &&
sock_flag(ax25->sk, SOCK_DEAD))) {
ax25_destroy_socket(ax25);
return;
@@ -116,7 +116,8 @@
* Check the state of the receive buffer.
*/
if (ax25->sk != NULL) {
- if (atomic_read(&ax25->sk->rmem_alloc) < (ax25->sk->rcvbuf / 2) &&
+ if (atomic_read(&ax25->sk->sk_rmem_alloc) <
+ (ax25->sk->sk_rcvbuf / 2) &&
(ax25->condition & AX25_COND_OWN_RX_BUSY)) {
ax25->condition &= ~AX25_COND_OWN_RX_BUSY;
ax25->condition &= ~AX25_COND_ACK_PENDING;
@@ -156,11 +157,11 @@
ax25_stop_t3timer(ax25);
if (ax25->sk != NULL) {
- ax25->sk->state = TCP_CLOSE;
- ax25->sk->err = 0;
- ax25->sk->shutdown |= SEND_SHUTDOWN;
+ ax25->sk->sk_state = TCP_CLOSE;
+ ax25->sk->sk_err = 0;
+ ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
- ax25->sk->state_change(ax25->sk);
+ ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
}
diff -urN linux-2.5.70-bk11/net/ax25/ax25_in.c linux-2.5.70-bk12/net/ax25/ax25_in.c
--- linux-2.5.70-bk11/net/ax25/ax25_in.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ax25/ax25_in.c 2003-06-07 04:47:49.000000000 -0700
@@ -147,7 +147,8 @@
}
if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
- if ((!ax25->pidincl && ax25->sk->protocol == pid) || ax25->pidincl) {
+ if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
+ ax25->pidincl) {
if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
queued = 1;
else
@@ -277,7 +278,8 @@
/* Now find a suitable dgram socket */
sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
if (sk != NULL) {
- if (atomic_read(&sk->rmem_alloc) >= sk->rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) >=
+ sk->sk_rcvbuf) {
kfree_skb(skb);
} else {
/*
@@ -355,7 +357,7 @@
sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
if (sk != NULL) {
- if (sk->ack_backlog == sk->max_ack_backlog ||
+ if (sk->sk_ack_backlog == sk->sk_max_ack_backlog ||
(make = ax25_make_new(sk, ax25_dev)) == NULL) {
if (mine)
ax25_return_dm(dev, &src, &dest, &dp);
@@ -366,12 +368,12 @@
ax25 = ax25_sk(make);
skb_set_owner_r(skb, make);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
- make->state = TCP_ESTABLISHED;
- make->pair = sk;
+ make->sk_state = TCP_ESTABLISHED;
+ make->sk_pair = sk;
- sk->ack_backlog++;
+ sk->sk_ack_backlog++;
} else {
if (!mine) {
kfree_skb(skb);
@@ -435,7 +437,7 @@
if (sk) {
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk, skb->len);
+ sk->sk_data_ready(sk, skb->len);
} else
kfree_skb(skb);
diff -urN linux-2.5.70-bk11/net/ax25/ax25_route.c linux-2.5.70-bk12/net/ax25/ax25_route.c
--- linux-2.5.70-bk11/net/ax25/ax25_route.c 2003-05-26 18:00:43.000000000 -0700
+++ linux-2.5.70-bk12/net/ax25/ax25_route.c 2003-06-07 04:47:49.000000000 -0700
@@ -435,7 +435,7 @@
}
if (ax25->sk != NULL)
- ax25->sk->zapped = 0;
+ ax25->sk->sk_zapped = 0;
put:
ax25_put_route(ax25_rt);
diff -urN linux-2.5.70-bk11/net/ax25/ax25_std_in.c linux-2.5.70-bk12/net/ax25/ax25_std_in.c
--- linux-2.5.70-bk11/net/ax25/ax25_std_in.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ax25/ax25_std_in.c 2003-06-07 04:47:49.000000000 -0700
@@ -73,10 +73,10 @@
ax25->state = AX25_STATE_3;
ax25->n2count = 0;
if (ax25->sk != NULL) {
- ax25->sk->state = TCP_ESTABLISHED;
+ ax25->sk->sk_state = TCP_ESTABLISHED;
/* For WAIT_SABM connections we will produce an accept ready socket here */
if (!sock_flag(ax25->sk, SOCK_DEAD))
- ax25->sk->state_change(ax25->sk);
+ ax25->sk->sk_state_change(ax25->sk);
}
}
break;
diff -urN linux-2.5.70-bk11/net/ax25/ax25_std_timer.c linux-2.5.70-bk12/net/ax25/ax25_std_timer.c
--- linux-2.5.70-bk11/net/ax25/ax25_std_timer.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ax25/ax25_std_timer.c 2003-06-07 04:47:49.000000000 -0700
@@ -38,7 +38,7 @@
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!ax25->sk || sock_flag(ax25->sk, SOCK_DESTROY) ||
- (ax25->sk->state == TCP_LISTEN &&
+ (ax25->sk->sk_state == TCP_LISTEN &&
sock_flag(ax25->sk, SOCK_DEAD))) {
ax25_destroy_socket(ax25);
return;
@@ -51,7 +51,8 @@
* Check the state of the receive buffer.
*/
if (ax25->sk != NULL) {
- if (atomic_read(&ax25->sk->rmem_alloc) < (ax25->sk->rcvbuf / 2) &&
+ if (atomic_read(&ax25->sk->sk_rmem_alloc) <
+ (ax25->sk->sk_rcvbuf / 2) &&
(ax25->condition & AX25_COND_OWN_RX_BUSY)) {
ax25->condition &= ~AX25_COND_OWN_RX_BUSY;
ax25->condition &= ~AX25_COND_ACK_PENDING;
@@ -93,11 +94,11 @@
ax25_stop_t3timer(ax25);
if (ax25->sk != NULL) {
- ax25->sk->state = TCP_CLOSE;
- ax25->sk->err = 0;
- ax25->sk->shutdown |= SEND_SHUTDOWN;
+ ax25->sk->sk_state = TCP_CLOSE;
+ ax25->sk->sk_err = 0;
+ ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
- ax25->sk->state_change(ax25->sk);
+ ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
}
diff -urN linux-2.5.70-bk11/net/ax25/ax25_subr.c linux-2.5.70-bk12/net/ax25/ax25_subr.c
--- linux-2.5.70-bk11/net/ax25/ax25_subr.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ax25/ax25_subr.c 2003-06-07 04:47:49.000000000 -0700
@@ -282,11 +282,11 @@
ax25_link_failed(ax25, reason);
if (ax25->sk != NULL) {
- ax25->sk->state = TCP_CLOSE;
- ax25->sk->err = reason;
- ax25->sk->shutdown |= SEND_SHUTDOWN;
+ ax25->sk->sk_state = TCP_CLOSE;
+ ax25->sk->sk_err = reason;
+ ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
- ax25->sk->state_change(ax25->sk);
+ ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
}
diff -urN linux-2.5.70-bk11/net/bluetooth/af_bluetooth.c linux-2.5.70-bk12/net/bluetooth/af_bluetooth.c
--- linux-2.5.70-bk11/net/bluetooth/af_bluetooth.c 2003-05-26 18:00:25.000000000 -0700
+++ linux-2.5.70-bk12/net/bluetooth/af_bluetooth.c 2003-06-07 04:47:49.000000000 -0700
@@ -126,15 +126,15 @@
return NULL;
}
memset(pi, 0, pi_size);
- sk->protinfo = pi;
+ sk->sk_protinfo = pi;
}
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
- sk->zapped = 0;
- sk->protocol = proto;
- sk->state = BT_OPEN;
+ sk->sk_zapped = 0;
+ sk->sk_protocol = proto;
+ sk->sk_state = BT_OPEN;
return sk;
}
@@ -142,7 +142,7 @@
void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
{
write_lock_bh(&l->lock);
- sk->next = l->head;
+ sk->sk_next = l->head;
l->head = sk;
sock_hold(sk);
write_unlock_bh(&l->lock);
@@ -153,9 +153,9 @@
struct sock **skp;
write_lock_bh(&l->lock);
- for (skp = &l->head; *skp; skp = &((*skp)->next)) {
+ for (skp = &l->head; *skp; skp = &((*skp)->sk_next)) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
@@ -170,15 +170,15 @@
sock_hold(sk);
list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
bt_sk(sk)->parent = parent;
- parent->ack_backlog++;
+ parent->sk_ack_backlog++;
}
static void bt_accept_unlink(struct sock *sk)
{
- BT_DBG("sk %p state %d", sk, sk->state);
+ BT_DBG("sk %p state %d", sk, sk->sk_state);
list_del_init(&bt_sk(sk)->accept_q);
- bt_sk(sk)->parent->ack_backlog--;
+ bt_sk(sk)->parent->sk_ack_backlog--;
bt_sk(sk)->parent = NULL;
sock_put(sk);
}
@@ -194,13 +194,13 @@
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
lock_sock(sk);
- if (sk->state == BT_CLOSED) {
+ if (sk->sk_state == BT_CLOSED) {
release_sock(sk);
bt_accept_unlink(sk);
continue;
}
- if (sk->state == BT_CONNECTED || !newsock) {
+ if (sk->sk_state == BT_CONNECTED || !newsock) {
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
@@ -226,7 +226,7 @@
return -EOPNOTSUPP;
if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) {
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
}
@@ -254,30 +254,30 @@
BT_DBG("sock %p, sk %p", sock, sk);
- poll_wait(file, sk->sleep, wait);
+ poll_wait(file, sk->sk_sleep, wait);
mask = 0;
- if (sk->err || !skb_queue_empty(&sk->error_queue))
+ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR;
- if (sk->shutdown == SHUTDOWN_MASK)
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
- if (!skb_queue_empty(&sk->receive_queue) ||
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
!list_empty(&bt_sk(sk)->accept_q) ||
- (sk->shutdown & RCV_SHUTDOWN))
+ (sk->sk_shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
- if (sk->state == BT_CLOSED)
+ if (sk->sk_state == BT_CLOSED)
mask |= POLLHUP;
- if (sk->state == BT_CONNECT || sk->state == BT_CONNECT2)
+ if (sk->sk_state == BT_CONNECT || sk->sk_state == BT_CONNECT2)
return mask;
if (sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
return mask;
}
@@ -290,8 +290,8 @@
BT_DBG("sk %p", sk);
- add_wait_queue(sk->sleep, &wait);
- while (sk->state != BT_CONNECTED) {
+ add_wait_queue(sk->sk_sleep, &wait);
+ while (sk->sk_state != BT_CONNECTED) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
@@ -303,10 +303,10 @@
lock_sock(sk);
err = 0;
- if (sk->state == BT_CONNECTED)
+ if (sk->sk_state == BT_CONNECTED)
break;
- if (sk->err) {
+ if (sk->sk_err) {
err = sock_error(sk);
break;
}
@@ -317,7 +317,7 @@
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return err;
}
diff -urN linux-2.5.70-bk11/net/bluetooth/bnep/core.c linux-2.5.70-bk12/net/bluetooth/bnep/core.c
--- linux-2.5.70-bk11/net/bluetooth/bnep/core.c 2003-05-26 18:00:43.000000000 -0700
+++ linux-2.5.70-bk12/net/bluetooth/bnep/core.c 2003-06-07 04:47:49.000000000 -0700
@@ -465,21 +465,21 @@
set_fs(KERNEL_DS);
init_waitqueue_entry(&wait, current);
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
while (!atomic_read(&s->killed)) {
set_current_state(TASK_INTERRUPTIBLE);
// RX
- while ((skb = skb_dequeue(&sk->receive_queue))) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
bnep_rx_frame(s, skb);
}
- if (sk->state != BT_CONNECTED)
+ if (sk->sk_state != BT_CONNECTED)
break;
// TX
- while ((skb = skb_dequeue(&sk->write_queue)))
+ while ((skb = skb_dequeue(&sk->sk_write_queue)))
if (bnep_tx_frame(s, skb))
break;
netif_wake_queue(dev);
@@ -487,7 +487,7 @@
schedule();
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
/* Cleanup session */
down_write(&bnep_session_sem);
@@ -609,11 +609,11 @@
if (s) {
/* Wakeup user-space which is polling for socket errors.
* This is temporary hack untill we have shutdown in L2CAP */
- s->sock->sk->err = EUNATCH;
+ s->sock->sk->sk_err = EUNATCH;
/* Kill session thread */
atomic_inc(&s->killed);
- wake_up_interruptible(s->sock->sk->sleep);
+ wake_up_interruptible(s->sock->sk->sk_sleep);
} else
err = -ENOENT;
diff -urN linux-2.5.70-bk11/net/bluetooth/bnep/netdev.c linux-2.5.70-bk12/net/bluetooth/bnep/netdev.c
--- linux-2.5.70-bk11/net/bluetooth/bnep/netdev.c 2003-05-26 18:00:45.000000000 -0700
+++ linux-2.5.70-bk12/net/bluetooth/bnep/netdev.c 2003-06-07 04:47:49.000000000 -0700
@@ -121,8 +121,8 @@
r->len = htons(skb->len - len);
}
- skb_queue_tail(&sk->write_queue, skb);
- wake_up_interruptible(sk->sleep);
+ skb_queue_tail(&sk->sk_write_queue, skb);
+ wake_up_interruptible(sk->sk_sleep);
#endif
}
@@ -209,13 +209,13 @@
/*
* We cannot send L2CAP packets from here as we are potentially in a bh.
* So we have to queue them and wake up session thread which is sleeping
- * on the sk->sleep.
+ * on the sk->sk_sleep.
*/
dev->trans_start = jiffies;
- skb_queue_tail(&sk->write_queue, skb);
- wake_up_interruptible(sk->sleep);
+ skb_queue_tail(&sk->sk_write_queue, skb);
+ wake_up_interruptible(sk->sk_sleep);
- if (skb_queue_len(&sk->write_queue) >= BNEP_TX_QUEUE_LEN) {
+ if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) {
BT_DBG("tx queue is full");
/* Stop queuing.
diff -urN linux-2.5.70-bk11/net/bluetooth/bnep/sock.c linux-2.5.70-bk12/net/bluetooth/bnep/sock.c
--- linux-2.5.70-bk11/net/bluetooth/bnep/sock.c 2003-05-26 18:01:03.000000000 -0700
+++ linux-2.5.70-bk12/net/bluetooth/bnep/sock.c 2003-06-07 04:47:49.000000000 -0700
@@ -93,7 +93,7 @@
if (!nsock)
return err;
- if (nsock->sk->state != BT_CONNECTED)
+ if (nsock->sk->sk_state != BT_CONNECTED)
return -EBADFD;
err = bnep_add_connection(&ca, nsock);
@@ -179,8 +179,8 @@
sock->state = SS_UNCONNECTED;
- sk->destruct = NULL;
- sk->protocol = protocol;
+ sk->sk_destruct = NULL;
+ sk->sk_protocol = protocol;
return 0;
}
diff -urN linux-2.5.70-bk11/net/bluetooth/hci_sock.c linux-2.5.70-bk12/net/bluetooth/hci_sock.c
--- linux-2.5.70-bk11/net/bluetooth/hci_sock.c 2003-05-26 18:00:41.000000000 -0700
+++ linux-2.5.70-bk12/net/bluetooth/hci_sock.c 2003-06-07 04:47:49.000000000 -0700
@@ -95,11 +95,11 @@
BT_DBG("hdev %p len %d", hdev, skb->len);
read_lock(&hci_sk_list.lock);
- for (sk = hci_sk_list.head; sk; sk = sk->next) {
+ for (sk = hci_sk_list.head; sk; sk = sk->sk_next) {
struct hci_filter *flt;
struct sk_buff *nskb;
- if (sk->state != BT_BOUND || hci_pi(sk)->hdev != hdev)
+ if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
continue;
/* Don't send frame to the socket it came from */
@@ -157,8 +157,8 @@
sock_orphan(sk);
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
sock_put(sk);
return 0;
@@ -283,7 +283,7 @@
}
hci_pi(sk)->hdev = hdev;
- sk->state = BT_BOUND;
+ sk->sk_state = BT_BOUND;
done:
release_sock(sk);
@@ -330,7 +330,7 @@
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
- if (sk->state == BT_CLOSED)
+ if (sk->sk_state == BT_CLOSED)
return 0;
if (!(skb = skb_recv_datagram(sk, flags, noblock, &err)))
@@ -587,7 +587,7 @@
return -ENOMEM;
sock->state = SS_UNCONNECTED;
- sk->state = BT_OPEN;
+ sk->sk_state = BT_OPEN;
bt_sock_link(&hci_sk_list, sk);
return 0;
@@ -610,13 +610,13 @@
/* Detach sockets from device */
read_lock(&hci_sk_list.lock);
- for (sk = hci_sk_list.head; sk; sk = sk->next) {
+ for (sk = hci_sk_list.head; sk; sk = sk->sk_next) {
bh_lock_sock(sk);
if (hci_pi(sk)->hdev == hdev) {
hci_pi(sk)->hdev = NULL;
- sk->err = EPIPE;
- sk->state = BT_OPEN;
- sk->state_change(sk);
+ sk->sk_err = EPIPE;
+ sk->sk_state = BT_OPEN;
+ sk->sk_state_change(sk);
hci_dev_put(hdev);
}
diff -urN linux-2.5.70-bk11/net/bluetooth/l2cap.c linux-2.5.70-bk12/net/bluetooth/l2cap.c
--- linux-2.5.70-bk11/net/bluetooth/l2cap.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/bluetooth/l2cap.c 2003-06-07 04:47:49.000000000 -0700
@@ -86,7 +86,7 @@
{
struct sock *sk = (struct sock *) arg;
- BT_DBG("sock %p state %d", sk, sk->state);
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
bh_lock_sock(sk);
__l2cap_sock_close(sk, ETIMEDOUT);
@@ -98,25 +98,25 @@
static void l2cap_sock_set_timer(struct sock *sk, long timeout)
{
- BT_DBG("sk %p state %d timeout %ld", sk, sk->state, timeout);
+ BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
- if (!mod_timer(&sk->timer, jiffies + timeout))
+ if (!mod_timer(&sk->sk_timer, jiffies + timeout))
sock_hold(sk);
}
static void l2cap_sock_clear_timer(struct sock *sk)
{
- BT_DBG("sock %p state %d", sk, sk->state);
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
- if (timer_pending(&sk->timer) && del_timer(&sk->timer))
+ if (timer_pending(&sk->sk_timer) && del_timer(&sk->sk_timer))
__sock_put(sk);
}
static void l2cap_sock_init_timer(struct sock *sk)
{
- init_timer(&sk->timer);
- sk->timer.function = l2cap_sock_timeout;
- sk->timer.data = (unsigned long)sk;
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.function = l2cap_sock_timeout;
+ sk->sk_timer.data = (unsigned long)sk;
}
/* ---- L2CAP connections ---- */
@@ -186,7 +186,7 @@
static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
{
struct sock *sk;
- for (sk = l2cap_sk_list.head; sk; sk = sk->next) {
+ for (sk = l2cap_sk_list.head; sk; sk = sk->sk_next) {
if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
break;
}
@@ -200,8 +200,8 @@
{
struct sock *sk, *sk1 = NULL;
- for (sk = l2cap_sk_list.head; sk; sk = sk->next) {
- if (state && sk->state != state)
+ for (sk = l2cap_sk_list.head; sk; sk = sk->sk_next) {
+ if (state && sk->sk_state != state)
continue;
if (l2cap_pi(sk)->psm == psm) {
@@ -233,11 +233,11 @@
{
BT_DBG("sk %p", sk);
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
- if (sk->protinfo)
- kfree(sk->protinfo);
+ if (sk->sk_protinfo)
+ kfree(sk->sk_protinfo);
}
static void l2cap_sock_cleanup_listen(struct sock *parent)
@@ -250,8 +250,8 @@
while ((sk = bt_accept_dequeue(parent, NULL)))
l2cap_sock_close(sk);
- parent->state = BT_CLOSED;
- parent->zapped = 1;
+ parent->sk_state = BT_CLOSED;
+ parent->sk_zapped = 1;
}
/* Kill socket (only if zapped and orphan)
@@ -259,10 +259,10 @@
*/
static void l2cap_sock_kill(struct sock *sk)
{
- if (!sk->zapped || sk->socket)
+ if (!sk->sk_zapped || sk->sk_socket)
return;
- BT_DBG("sk %p state %d", sk, sk->state);
+ BT_DBG("sk %p state %d", sk, sk->sk_state);
/* Kill poor orphan */
bt_sock_unlink(&l2cap_sk_list, sk);
@@ -272,9 +272,9 @@
static void __l2cap_sock_close(struct sock *sk, int reason)
{
- BT_DBG("sk %p state %d socket %p", sk, sk->state, sk->socket);
+ BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
- switch (sk->state) {
+ switch (sk->sk_state) {
case BT_LISTEN:
l2cap_sock_cleanup_listen(sk);
break;
@@ -282,11 +282,11 @@
case BT_CONNECTED:
case BT_CONFIG:
case BT_CONNECT2:
- if (sk->type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct l2cap_disconn_req req;
- sk->state = BT_DISCONN;
+ sk->sk_state = BT_DISCONN;
l2cap_sock_set_timer(sk, HZ * 5);
req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
@@ -303,7 +303,7 @@
break;
default:
- sk->zapped = 1;
+ sk->sk_zapped = 1;
break;
}
}
@@ -327,7 +327,7 @@
BT_DBG("sk %p", sk);
if (parent) {
- sk->type = parent->type;
+ sk->sk_type = parent->sk_type;
pi->imtu = l2cap_pi(parent)->imtu;
pi->omtu = l2cap_pi(parent)->omtu;
pi->link_mode = l2cap_pi(parent)->link_mode;
@@ -352,11 +352,11 @@
sk_set_owner(sk, THIS_MODULE);
- sk->destruct = l2cap_sock_destruct;
- sk->sndtimeo = L2CAP_CONN_TIMEOUT;
+ sk->sk_destruct = l2cap_sock_destruct;
+ sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
- sk->protocol = proto;
- sk->state = BT_OPEN;
+ sk->sk_protocol = proto;
+ sk->sk_state = BT_OPEN;
l2cap_sock_init_timer(sk);
@@ -401,7 +401,7 @@
lock_sock(sk);
- if (sk->state != BT_OPEN) {
+ if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
@@ -414,7 +414,7 @@
bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
l2cap_pi(sk)->psm = la->l2_psm;
l2cap_pi(sk)->sport = la->l2_psm;
- sk->state = BT_BOUND;
+ sk->sk_state = BT_BOUND;
}
write_unlock_bh(&l2cap_sk_list.lock);
@@ -458,18 +458,18 @@
l2cap_chan_add(conn, sk, NULL);
- sk->state = BT_CONNECT;
- l2cap_sock_set_timer(sk, sk->sndtimeo);
+ sk->sk_state = BT_CONNECT;
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
if (hcon->state == BT_CONNECTED) {
- if (sk->type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
struct l2cap_conn_req req;
req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
req.psm = l2cap_pi(sk)->psm;
l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
} else {
l2cap_sock_clear_timer(sk);
- sk->state = BT_CONNECTED;
+ sk->sk_state = BT_CONNECTED;
}
}
@@ -494,12 +494,12 @@
goto done;
}
- if (sk->type == SOCK_SEQPACKET && !la->l2_psm) {
+ if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
err = -EINVAL;
goto done;
}
- switch(sk->state) {
+ switch(sk->sk_state) {
case BT_CONNECT:
case BT_CONNECT2:
case BT_CONFIG:
@@ -544,7 +544,7 @@
lock_sock(sk);
- if (sk->state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
+ if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
err = -EBADFD;
goto done;
}
@@ -554,9 +554,9 @@
goto done;
}
- sk->max_ack_backlog = backlog;
- sk->ack_backlog = 0;
- sk->state = BT_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_ack_backlog = 0;
+ sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
@@ -572,7 +572,7 @@
lock_sock(sk);
- if (sk->state != BT_LISTEN) {
+ if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
goto done;
}
@@ -582,7 +582,7 @@
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -594,7 +594,7 @@
timeo = schedule_timeout(timeo);
lock_sock(sk);
- if (sk->state != BT_LISTEN) {
+ if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
@@ -605,7 +605,7 @@
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
if (err)
goto done;
@@ -648,7 +648,7 @@
BT_DBG("sk %p len %d", sk, len);
/* First fragment (with L2CAP header) */
- if (sk->type == SOCK_DGRAM)
+ if (sk->sk_type == SOCK_DGRAM)
hlen = L2CAP_HDR_SIZE + 2;
else
hlen = L2CAP_HDR_SIZE;
@@ -665,7 +665,7 @@
lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- if (sk->type == SOCK_DGRAM)
+ if (sk->sk_type == SOCK_DGRAM)
put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
@@ -713,7 +713,7 @@
BT_DBG("sock %p, sk %p", sock, sk);
- if (sk->err)
+ if (sk->sk_err)
return sock_error(sk);
if (msg->msg_flags & MSG_OOB)
@@ -725,7 +725,7 @@
lock_sock(sk);
- if (sk->state == BT_CONNECTED)
+ if (sk->sk_state == BT_CONNECTED)
err = l2cap_do_send(sk, msg, len);
else
err = -ENOTCONN;
@@ -804,7 +804,7 @@
break;
case L2CAP_CONNINFO:
- if (sk->state != BT_CONNECTED) {
+ if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
@@ -837,7 +837,7 @@
l2cap_sock_clear_timer(sk);
lock_sock(sk);
- sk->shutdown = SHUTDOWN_MASK;
+ sk->sk_shutdown = SHUTDOWN_MASK;
__l2cap_sock_close(sk, ECONNRESET);
release_sock(sk);
@@ -939,10 +939,10 @@
l2cap_pi(sk)->conn = conn;
- if (sk->type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
/* Alloc CID for connection-oriented socket */
l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
- } else if (sk->type == SOCK_DGRAM) {
+ } else if (sk->sk_type == SOCK_DGRAM) {
/* Connectionless socket */
l2cap_pi(sk)->scid = 0x0002;
l2cap_pi(sk)->dcid = 0x0002;
@@ -978,14 +978,14 @@
hci_conn_put(conn->hcon);
}
- sk->state = BT_CLOSED;
- sk->err = err;
- sk->zapped = 1;
+ sk->sk_state = BT_CLOSED;
+ sk->sk_err = err;
+ sk->sk_zapped = 1;
if (parent)
- parent->data_ready(parent, 0);
+ parent->sk_data_ready(parent, 0);
else
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
static void l2cap_conn_ready(struct l2cap_conn *conn)
@@ -1000,11 +1000,11 @@
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
- if (sk->type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET) {
l2cap_sock_clear_timer(sk);
- sk->state = BT_CONNECTED;
- sk->state_change(sk);
- } else if (sk->state == BT_CONNECT) {
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+ } else if (sk->sk_state == BT_CONNECT) {
struct l2cap_conn_req req;
req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
req.psm = l2cap_pi(sk)->psm;
@@ -1030,13 +1030,13 @@
/* Outgoing channel.
* Wake up socket sleeping on connect.
*/
- sk->state = BT_CONNECTED;
- sk->state_change(sk);
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
} else {
/* Incoming channel.
* Wake up socket sleeping on accept.
*/
- parent->data_ready(parent, 0);
+ parent->sk_data_ready(parent, 0);
}
}
@@ -1051,7 +1051,7 @@
read_lock(&l->lock);
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
- if (sk->type != SOCK_RAW)
+ if (sk->sk_type != SOCK_RAW)
continue;
/* Don't send frame to the socket it came from */
@@ -1352,8 +1352,8 @@
result = L2CAP_CR_NO_MEM;
/* Check for backlog size */
- if (parent->ack_backlog > parent->max_ack_backlog) {
- BT_DBG("backlog full %d", parent->ack_backlog);
+ if (parent->sk_ack_backlog > parent->sk_max_ack_backlog) {
+ BT_DBG("backlog full %d", parent->sk_ack_backlog);
goto response;
}
@@ -1366,7 +1366,7 @@
/* Check if we already have channel with that dcid */
if (__l2cap_get_chan_by_dcid(list, scid)) {
write_unlock(&list->lock);
- sk->zapped = 1;
+ sk->sk_zapped = 1;
l2cap_sock_kill(sk);
goto response;
}
@@ -1382,12 +1382,12 @@
__l2cap_chan_add(conn, sk, parent);
dcid = l2cap_pi(sk)->scid;
- l2cap_sock_set_timer(sk, sk->sndtimeo);
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
/* Service level security */
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHEN_PEND;
- sk->state = BT_CONNECT2;
+ sk->sk_state = BT_CONNECT2;
l2cap_pi(sk)->ident = cmd->ident;
if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) {
@@ -1398,7 +1398,7 @@
goto done;
}
- sk->state = BT_CONFIG;
+ sk->sk_state = BT_CONFIG;
result = status = 0;
done:
@@ -1435,7 +1435,7 @@
switch (result) {
case L2CAP_CR_SUCCESS:
- sk->state = BT_CONFIG;
+ sk->sk_state = BT_CONFIG;
l2cap_pi(sk)->dcid = dcid;
l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
@@ -1488,7 +1488,7 @@
l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
- sk->state = BT_CONNECTED;
+ sk->sk_state = BT_CONNECTED;
l2cap_chan_ready(sk);
} else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
u8 req[64];
@@ -1522,7 +1522,7 @@
/* They didn't like our options. Well... we do not negotiate.
* Close channel.
*/
- sk->state = BT_DISCONN;
+ sk->sk_state = BT_DISCONN;
l2cap_sock_set_timer(sk, HZ * 5);
req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
@@ -1538,7 +1538,7 @@
l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
- sk->state = BT_CONNECTED;
+ sk->sk_state = BT_CONNECTED;
l2cap_chan_ready(sk);
}
@@ -1566,7 +1566,7 @@
rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
l2cap_send_rsp(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
- sk->shutdown = SHUTDOWN_MASK;
+ sk->sk_shutdown = SHUTDOWN_MASK;
l2cap_chan_del(sk, ECONNRESET);
bh_unlock_sock(sk);
@@ -1690,7 +1690,7 @@
BT_DBG("sk %p, len %d", sk, skb->len);
- if (sk->state != BT_CONNECTED)
+ if (sk->sk_state != BT_CONNECTED)
goto drop;
if (l2cap_pi(sk)->imtu < skb->len)
@@ -1722,7 +1722,7 @@
BT_DBG("sk %p, len %d", sk, skb->len);
- if (sk->state != BT_BOUND && sk->state != BT_CONNECTED)
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
goto drop;
if (l2cap_pi(sk)->imtu < skb->len)
@@ -1781,8 +1781,8 @@
/* Find listening sockets and check their link_mode */
read_lock(&l2cap_sk_list.lock);
- for (sk = l2cap_sk_list.head; sk; sk = sk->next) {
- if (sk->state != BT_LISTEN)
+ for (sk = l2cap_sk_list.head; sk; sk = sk->sk_next) {
+ if (sk->sk_state != BT_LISTEN)
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
@@ -1845,17 +1845,17 @@
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
- if (sk->state != BT_CONNECT2 ||
+ if (sk->sk_state != BT_CONNECT2 ||
(l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)) {
bh_unlock_sock(sk);
continue;
}
if (!status) {
- sk->state = BT_CONFIG;
+ sk->sk_state = BT_CONFIG;
result = 0;
} else {
- sk->state = BT_DISCONN;
+ sk->sk_state = BT_DISCONN;
l2cap_sock_set_timer(sk, HZ/10);
result = L2CAP_CR_SEC_BLOCK;
}
@@ -1892,16 +1892,16 @@
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
- if (sk->state != BT_CONNECT2) {
+ if (sk->sk_state != BT_CONNECT2) {
bh_unlock_sock(sk);
continue;
}
if (!status) {
- sk->state = BT_CONFIG;
+ sk->sk_state = BT_CONFIG;
result = 0;
} else {
- sk->state = BT_DISCONN;
+ sk->sk_state = BT_DISCONN;
l2cap_sock_set_timer(sk, HZ/10);
result = L2CAP_CR_SEC_BLOCK;
}
@@ -2008,7 +2008,7 @@
read_lock_bh(&l2cap_sk_list.lock);
- for (sk = l2cap_sk_list.head; sk; sk = sk->next)
+ for (sk = l2cap_sk_list.head; sk; sk = sk->sk_next)
if (!l--)
return sk;
return NULL;
@@ -2018,7 +2018,7 @@
{
struct sock *sk = e;
(*pos)++;
- return sk->next;
+ return sk->sk_next;
}
static void l2cap_seq_stop(struct seq_file *seq, void *e)
@@ -2033,8 +2033,8 @@
seq_printf(seq, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
- sk->state, pi->psm, pi->scid, pi->dcid, pi->imtu, pi->omtu,
- pi->link_mode);
+ sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
+ pi->omtu, pi->link_mode);
return 0;
}
diff -urN linux-2.5.70-bk11/net/bluetooth/rfcomm/core.c linux-2.5.70-bk12/net/bluetooth/rfcomm/core.c
--- linux-2.5.70-bk11/net/bluetooth/rfcomm/core.c 2003-05-26 18:00:20.000000000 -0700
+++ linux-2.5.70-bk12/net/bluetooth/rfcomm/core.c 2003-06-07 04:47:49.000000000 -0700
@@ -144,7 +144,7 @@
/* ---- L2CAP callbacks ---- */
static void rfcomm_l2state_change(struct sock *sk)
{
- BT_DBG("%p state %d", sk, sk->state);
+ BT_DBG("%p state %d", sk, sk->sk_state);
rfcomm_schedule(RFCOMM_SCHED_STATE);
}
@@ -163,8 +163,8 @@
err = sock_create(PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP, sock);
if (!err) {
struct sock *sk = (*sock)->sk;
- sk->data_ready = rfcomm_l2data_ready;
- sk->state_change = rfcomm_l2state_change;
+ sk->sk_data_ready = rfcomm_l2data_ready;
+ sk->sk_state_change = rfcomm_l2state_change;
}
return err;
}
@@ -1545,19 +1545,19 @@
struct sock *sk = sock->sk;
struct sk_buff *skb;
- BT_DBG("session %p state %ld qlen %d", s, s->state, skb_queue_len(&sk->receive_queue));
+ BT_DBG("session %p state %ld qlen %d", s, s->state, skb_queue_len(&sk->sk_receive_queue));
/* Get data directly from socket receive queue without copying it. */
- while ((skb = skb_dequeue(&sk->receive_queue))) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
rfcomm_recv_frame(s, skb);
}
- if (sk->state == BT_CLOSED) {
+ if (sk->sk_state == BT_CLOSED) {
if (!s->initiator)
rfcomm_session_put(s);
- rfcomm_session_close(s, sk->err);
+ rfcomm_session_close(s, sk->sk_err);
}
}
@@ -1587,8 +1587,8 @@
}
/* Set our callbacks */
- nsock->sk->data_ready = rfcomm_l2data_ready;
- nsock->sk->state_change = rfcomm_l2state_change;
+ nsock->sk->sk_data_ready = rfcomm_l2data_ready;
+ nsock->sk->sk_state_change = rfcomm_l2state_change;
s = rfcomm_session_add(nsock, BT_OPEN);
if (s)
@@ -1603,7 +1603,7 @@
BT_DBG("%p state %ld", s, s->state);
- switch(sk->state) {
+ switch(sk->sk_state) {
case BT_CONNECTED:
s->state = BT_CONNECT;
@@ -1616,7 +1616,7 @@
case BT_CLOSED:
s->state = BT_CLOSED;
- rfcomm_session_close(s, sk->err);
+ rfcomm_session_close(s, sk->sk_err);
break;
}
}
diff -urN linux-2.5.70-bk11/net/bluetooth/rfcomm/sock.c linux-2.5.70-bk12/net/bluetooth/rfcomm/sock.c
--- linux-2.5.70-bk11/net/bluetooth/rfcomm/sock.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/bluetooth/rfcomm/sock.c 2003-06-07 04:47:49.000000000 -0700
@@ -78,11 +78,11 @@
if (!sk)
return;
- atomic_add(skb->len, &sk->rmem_alloc);
- skb_queue_tail(&sk->receive_queue, skb);
- sk->data_ready(sk, skb->len);
+ atomic_add(skb->len, &sk->sk_rmem_alloc);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
- if (atomic_read(&sk->rmem_alloc) >= sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
rfcomm_dlc_throttle(d);
}
@@ -97,16 +97,16 @@
bh_lock_sock(sk);
if (err)
- sk->err = err;
- sk->state = d->state;
+ sk->sk_err = err;
+ sk->sk_state = d->state;
parent = bt_sk(sk)->parent;
if (!parent) {
if (d->state == BT_CONNECTED)
rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL);
- sk->state_change(sk);
+ sk->sk_state_change(sk);
} else
- parent->data_ready(parent, 0);
+ parent->sk_data_ready(parent, 0);
bh_unlock_sock(sk);
}
@@ -116,7 +116,7 @@
{
struct sock *sk;
- for (sk = rfcomm_sk_list.head; sk; sk = sk->next) {
+ for (sk = rfcomm_sk_list.head; sk; sk = sk->sk_next) {
if (rfcomm_pi(sk)->channel == channel &&
!bacmp(&bt_sk(sk)->src, src))
break;
@@ -132,8 +132,8 @@
{
struct sock *sk, *sk1 = NULL;
- for (sk = rfcomm_sk_list.head; sk; sk = sk->next) {
- if (state && sk->state != state)
+ for (sk = rfcomm_sk_list.head; sk; sk = sk->sk_next) {
+ if (state && sk->sk_state != state)
continue;
if (rfcomm_pi(sk)->channel == channel) {
@@ -167,8 +167,8 @@
BT_DBG("sk %p dlc %p", sk, d);
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
rfcomm_dlc_lock(d);
rfcomm_pi(sk)->dlc = NULL;
@@ -180,8 +180,8 @@
rfcomm_dlc_put(d);
- if (sk->protinfo)
- kfree(sk->protinfo);
+ if (sk->sk_protinfo)
+ kfree(sk->sk_protinfo);
}
static void rfcomm_sock_cleanup_listen(struct sock *parent)
@@ -194,8 +194,8 @@
while ((sk = bt_accept_dequeue(parent, NULL)))
rfcomm_sock_close(sk);
- parent->state = BT_CLOSED;
- parent->zapped = 1;
+ parent->sk_state = BT_CLOSED;
+ parent->sk_zapped = 1;
}
/* Kill socket (only if zapped and orphan)
@@ -203,10 +203,10 @@
*/
static void rfcomm_sock_kill(struct sock *sk)
{
- if (!sk->zapped || sk->socket)
+ if (!sk->sk_zapped || sk->sk_socket)
return;
- BT_DBG("sk %p state %d refcnt %d", sk, sk->state, atomic_read(&sk->refcnt));
+ BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
/* Kill poor orphan */
bt_sock_unlink(&rfcomm_sk_list, sk);
@@ -223,9 +223,9 @@
lock_sock(sk);
- BT_DBG("sk %p state %d socket %p", sk, sk->state, sk->socket);
+ BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
- switch (sk->state) {
+ switch (sk->sk_state) {
case BT_LISTEN:
rfcomm_sock_cleanup_listen(sk);
break;
@@ -237,7 +237,7 @@
rfcomm_dlc_close(d, 0);
default:
- sk->zapped = 1;
+ sk->sk_zapped = 1;
break;
};
@@ -251,7 +251,7 @@
BT_DBG("sk %p", sk);
if (parent)
- sk->type = parent->type;
+ sk->sk_type = parent->sk_type;
}
static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, int prio)
@@ -276,14 +276,14 @@
rfcomm_pi(sk)->dlc = d;
d->owner = sk;
- sk->destruct = rfcomm_sock_destruct;
- sk->sndtimeo = RFCOMM_CONN_TIMEOUT;
+ sk->sk_destruct = rfcomm_sock_destruct;
+ sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT;
- sk->sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
- sk->rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
+ sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
+ sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
- sk->protocol = proto;
- sk->state = BT_OPEN;
+ sk->sk_protocol = proto;
+ sk->sk_state = BT_OPEN;
bt_sock_link(&rfcomm_sk_list, sk);
@@ -324,7 +324,7 @@
lock_sock(sk);
- if (sk->state != BT_OPEN) {
+ if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
@@ -337,7 +337,7 @@
/* Save source address */
bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
- sk->state = BT_BOUND;
+ sk->sk_state = BT_BOUND;
}
write_unlock_bh(&rfcomm_sk_list.lock);
@@ -359,15 +359,15 @@
if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_rc))
return -EINVAL;
- if (sk->state != BT_OPEN && sk->state != BT_BOUND)
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
return -EBADFD;
- if (sk->type != SOCK_STREAM)
+ if (sk->sk_type != SOCK_STREAM)
return -EINVAL;
lock_sock(sk);
- sk->state = BT_CONNECT;
+ sk->sk_state = BT_CONNECT;
bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
@@ -388,14 +388,14 @@
lock_sock(sk);
- if (sk->state != BT_BOUND) {
+ if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
- sk->max_ack_backlog = backlog;
- sk->ack_backlog = 0;
- sk->state = BT_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_ack_backlog = 0;
+ sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
@@ -411,7 +411,7 @@
lock_sock(sk);
- if (sk->state != BT_LISTEN) {
+ if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
goto done;
}
@@ -421,7 +421,7 @@
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -433,7 +433,7 @@
timeo = schedule_timeout(timeo);
lock_sock(sk);
- if (sk->state != BT_LISTEN) {
+ if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
@@ -444,7 +444,7 @@
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
if (err)
goto done;
@@ -488,7 +488,7 @@
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
- if (sk->shutdown & SEND_SHUTDOWN)
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
return -EPIPE;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -530,23 +530,23 @@
{
DECLARE_WAITQUEUE(wait, current);
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (skb_queue_len(&sk->receive_queue) || sk->err || (sk->shutdown & RCV_SHUTDOWN) ||
+ if (skb_queue_len(&sk->sk_receive_queue) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current) || !timeo)
break;
- set_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return timeo;
}
@@ -557,7 +557,7 @@
int target, err = 0, copied = 0;
long timeo;
- if (sk->state != BT_CONNECTED)
+ if (sk->sk_state != BT_CONNECTED)
return -EINVAL;
if (flags & MSG_OOB)
@@ -576,14 +576,14 @@
struct sk_buff *skb;
int chunk;
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (!skb) {
if (copied >= target)
break;
if ((err = sock_error(sk)) != 0)
break;
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
err = -EAGAIN;
@@ -601,7 +601,7 @@
chunk = min_t(unsigned int, skb->len, size);
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
if (!copied)
copied = -EFAULT;
break;
@@ -610,24 +610,24 @@
size -= chunk;
if (!(flags & MSG_PEEK)) {
- atomic_sub(chunk, &sk->rmem_alloc);
+ atomic_sub(chunk, &sk->sk_rmem_alloc);
skb_pull(skb, chunk);
if (skb->len) {
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
kfree_skb(skb);
} else {
/* put message back and return */
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} while (size);
out:
- if (atomic_read(&sk->rmem_alloc) <= (sk->rcvbuf >> 2))
+ if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2))
rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc);
release_sock(sk);
@@ -643,8 +643,8 @@
if (!sk) return 0;
lock_sock(sk);
- sk->shutdown = SHUTDOWN_MASK;
- if (sk->state == BT_CONNECTED)
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ if (sk->sk_state == BT_CONNECTED)
rfcomm_dlc_close(rfcomm_pi(sk)->dlc, 0);
release_sock(sk);
@@ -744,8 +744,8 @@
return 0;
/* Check for backlog size */
- if (parent->ack_backlog > parent->max_ack_backlog) {
- BT_DBG("backlog full %d", parent->ack_backlog);
+ if (parent->sk_ack_backlog > parent->sk_max_ack_backlog) {
+ BT_DBG("backlog full %d", parent->sk_ack_backlog);
goto done;
}
@@ -758,7 +758,7 @@
bacpy(&bt_sk(sk)->dst, &dst);
rfcomm_pi(sk)->channel = channel;
- sk->state = BT_CONFIG;
+ sk->sk_state = BT_CONFIG;
bt_accept_enqueue(parent, sk);
/* Accept connection and return socket DLC */
@@ -779,7 +779,7 @@
read_lock_bh(&rfcomm_sk_list.lock);
- for (sk = rfcomm_sk_list.head; sk; sk = sk->next)
+ for (sk = rfcomm_sk_list.head; sk; sk = sk->sk_next)
if (!l--)
return sk;
return NULL;
@@ -789,7 +789,7 @@
{
struct sock *sk = e;
(*pos)++;
- return sk->next;
+ return sk->sk_next;
}
static void rfcomm_seq_stop(struct seq_file *seq, void *e)
@@ -802,7 +802,7 @@
struct sock *sk = e;
seq_printf(seq, "%s %s %d %d\n",
batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
- sk->state, rfcomm_pi(sk)->channel);
+ sk->sk_state, rfcomm_pi(sk)->channel);
return 0;
}
diff -urN linux-2.5.70-bk11/net/bluetooth/rfcomm/tty.c linux-2.5.70-bk12/net/bluetooth/rfcomm/tty.c
--- linux-2.5.70-bk11/net/bluetooth/rfcomm/tty.c 2003-05-26 18:00:40.000000000 -0700
+++ linux-2.5.70-bk12/net/bluetooth/rfcomm/tty.c 2003-06-07 04:47:49.000000000 -0700
@@ -294,7 +294,7 @@
if (req.flags & (1 << RFCOMM_REUSE_DLC)) {
/* Socket must be connected */
- if (sk->state != BT_CONNECTED)
+ if (sk->sk_state != BT_CONNECTED)
return -EBADFD;
dlc = rfcomm_pi(sk)->dlc;
@@ -314,7 +314,7 @@
if (req.flags & (1 << RFCOMM_REUSE_DLC)) {
/* DLC is now used by device.
* Socket must be disconnected */
- sk->state = BT_CLOSED;
+ sk->sk_state = BT_CLOSED;
}
return id;
@@ -863,11 +863,8 @@
.magic = TTY_DRIVER_MAGIC,
.driver_name = "rfcomm",
-#ifdef CONFIG_DEVFS_FS
- .name = "bluetooth/rfcomm/",
-#else
+ .devfs_name = "bluetooth/rfcomm/",
.name = "rfcomm",
-#endif
.major = RFCOMM_TTY_MAJOR,
.minor_start = RFCOMM_TTY_MINOR,
.num = RFCOMM_TTY_PORTS,
diff -urN linux-2.5.70-bk11/net/bluetooth/sco.c linux-2.5.70-bk12/net/bluetooth/sco.c
--- linux-2.5.70-bk11/net/bluetooth/sco.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/bluetooth/sco.c 2003-06-07 04:47:49.000000000 -0700
@@ -81,11 +81,11 @@
{
struct sock *sk = (struct sock *) arg;
- BT_DBG("sock %p state %d", sk, sk->state);
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
bh_lock_sock(sk);
- sk->err = ETIMEDOUT;
- sk->state_change(sk);
+ sk->sk_err = ETIMEDOUT;
+ sk->sk_state_change(sk);
bh_unlock_sock(sk);
sco_sock_kill(sk);
@@ -94,25 +94,25 @@
static void sco_sock_set_timer(struct sock *sk, long timeout)
{
- BT_DBG("sock %p state %d timeout %ld", sk, sk->state, timeout);
+ BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
- if (!mod_timer(&sk->timer, jiffies + timeout))
+ if (!mod_timer(&sk->sk_timer, jiffies + timeout))
sock_hold(sk);
}
static void sco_sock_clear_timer(struct sock *sk)
{
- BT_DBG("sock %p state %d", sk, sk->state);
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
- if (timer_pending(&sk->timer) && del_timer(&sk->timer))
+ if (timer_pending(&sk->sk_timer) && del_timer(&sk->sk_timer))
__sock_put(sk);
}
static void sco_sock_init_timer(struct sock *sk)
{
- init_timer(&sk->timer);
- sk->timer.function = sco_sock_timeout;
- sk->timer.data = (unsigned long)sk;
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.function = sco_sock_timeout;
+ sk->sk_timer.data = (unsigned long)sk;
}
/* ---- SCO connections ---- */
@@ -232,10 +232,10 @@
if (hcon->state == BT_CONNECTED) {
sco_sock_clear_timer(sk);
- sk->state = BT_CONNECTED;
+ sk->sk_state = BT_CONNECTED;
} else {
- sk->state = BT_CONNECT;
- sco_sock_set_timer(sk, sk->sndtimeo);
+ sk->sk_state = BT_CONNECT;
+ sco_sock_set_timer(sk, sk->sk_sndtimeo);
}
done:
hci_dev_unlock_bh(hdev);
@@ -283,7 +283,7 @@
BT_DBG("sk %p len %d", sk, skb->len);
- if (sk->state != BT_CONNECTED)
+ if (sk->sk_state != BT_CONNECTED)
goto drop;
if (!sock_queue_rcv_skb(sk, skb))
@@ -299,7 +299,7 @@
{
struct sock *sk;
- for (sk = sco_sk_list.head; sk; sk = sk->next) {
+ for (sk = sco_sk_list.head; sk; sk = sk->sk_next) {
if (!bacmp(&bt_sk(sk)->src, ba))
break;
}
@@ -316,8 +316,8 @@
read_lock(&sco_sk_list.lock);
- for (sk = sco_sk_list.head; sk; sk = sk->next) {
- if (sk->state != BT_LISTEN)
+ for (sk = sco_sk_list.head; sk; sk = sk->sk_next) {
+ if (sk->sk_state != BT_LISTEN)
continue;
/* Exact match. */
@@ -338,11 +338,11 @@
{
BT_DBG("sk %p", sk);
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
- if (sk->protinfo)
- kfree(sk->protinfo);
+ if (sk->sk_protinfo)
+ kfree(sk->sk_protinfo);
}
static void sco_sock_cleanup_listen(struct sock *parent)
@@ -355,8 +355,8 @@
while ((sk = bt_accept_dequeue(parent, NULL)))
sco_sock_close(sk);
- parent->state = BT_CLOSED;
- parent->zapped = 1;
+ parent->sk_state = BT_CLOSED;
+ parent->sk_zapped = 1;
}
/* Kill socket (only if zapped and orphan)
@@ -364,10 +364,10 @@
*/
static void sco_sock_kill(struct sock *sk)
{
- if (!sk->zapped || sk->socket)
+ if (!sk->sk_zapped || sk->sk_socket)
return;
- BT_DBG("sk %p state %d", sk, sk->state);
+ BT_DBG("sk %p state %d", sk, sk->sk_state);
/* Kill poor orphan */
bt_sock_unlink(&sco_sk_list, sk);
@@ -388,9 +388,9 @@
conn = sco_pi(sk)->conn;
- BT_DBG("sk %p state %d conn %p socket %p", sk, sk->state, conn, sk->socket);
+ BT_DBG("sk %p state %d conn %p socket %p", sk, sk->sk_state, conn, sk->sk_socket);
- switch (sk->state) {
+ switch (sk->sk_state) {
case BT_LISTEN:
sco_sock_cleanup_listen(sk);
break;
@@ -403,7 +403,7 @@
break;
default:
- sk->zapped = 1;
+ sk->sk_zapped = 1;
break;
};
@@ -417,7 +417,7 @@
BT_DBG("sk %p", sk);
if (parent)
- sk->type = parent->type;
+ sk->sk_type = parent->sk_type;
}
static struct sock *sco_sock_alloc(struct socket *sock, int proto, int prio)
@@ -430,9 +430,9 @@
sk_set_owner(sk, THIS_MODULE);
- sk->destruct = sco_sock_destruct;
- sk->sndtimeo = SCO_CONN_TIMEOUT;
- sk->state = BT_OPEN;
+ sk->sk_destruct = sco_sock_destruct;
+ sk->sk_sndtimeo = SCO_CONN_TIMEOUT;
+ sk->sk_state = BT_OPEN;
sco_sock_init_timer(sk);
@@ -474,7 +474,7 @@
lock_sock(sk);
- if (sk->state != BT_OPEN) {
+ if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
@@ -486,7 +486,7 @@
} else {
/* Save source address */
bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
- sk->state = BT_BOUND;
+ sk->sk_state = BT_BOUND;
}
write_unlock_bh(&sco_sk_list.lock);
@@ -508,10 +508,10 @@
if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco))
return -EINVAL;
- if (sk->state != BT_OPEN && sk->state != BT_BOUND)
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
return -EBADFD;
- if (sk->type != SOCK_SEQPACKET)
+ if (sk->sk_type != SOCK_SEQPACKET)
return -EINVAL;
lock_sock(sk);
@@ -538,14 +538,14 @@
lock_sock(sk);
- if (sk->state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
+ if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
err = -EBADFD;
goto done;
}
- sk->max_ack_backlog = backlog;
- sk->ack_backlog = 0;
- sk->state = BT_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_ack_backlog = 0;
+ sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
@@ -561,7 +561,7 @@
lock_sock(sk);
- if (sk->state != BT_LISTEN) {
+ if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
goto done;
}
@@ -571,7 +571,7 @@
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
while (!(ch = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -583,7 +583,7 @@
timeo = schedule_timeout(timeo);
lock_sock(sk);
- if (sk->state != BT_LISTEN) {
+ if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
@@ -594,7 +594,7 @@
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
if (err)
goto done;
@@ -633,7 +633,7 @@
BT_DBG("sock %p, sk %p", sock, sk);
- if (sk->err)
+ if (sk->sk_err)
return sock_error(sk);
if (msg->msg_flags & MSG_OOB)
@@ -641,7 +641,7 @@
lock_sock(sk);
- if (sk->state == BT_CONNECTED)
+ if (sk->sk_state == BT_CONNECTED)
err = sco_send_frame(sk, msg, len);
else
err = -ENOTCONN;
@@ -685,7 +685,7 @@
switch (optname) {
case SCO_OPTIONS:
- if (sk->state != BT_CONNECTED) {
+ if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
@@ -701,7 +701,7 @@
break;
case SCO_CONNINFO:
- if (sk->state != BT_CONNECTED) {
+ if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
@@ -767,11 +767,11 @@
hci_conn_put(conn->hcon);
}
- sk->state = BT_CLOSED;
- sk->err = err;
- sk->state_change(sk);
+ sk->sk_state = BT_CLOSED;
+ sk->sk_err = err;
+ sk->sk_state_change(sk);
- sk->zapped = 1;
+ sk->sk_zapped = 1;
}
static void sco_conn_ready(struct sco_conn *conn)
@@ -785,8 +785,8 @@
if ((sk = conn->sk)) {
sco_sock_clear_timer(sk);
bh_lock_sock(sk);
- sk->state = BT_CONNECTED;
- sk->state_change(sk);
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
bh_unlock_sock(sk);
} else {
parent = sco_get_sock_listen(conn->src);
@@ -809,10 +809,10 @@
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
- sk->state = BT_CONNECTED;
+ sk->sk_state = BT_CONNECTED;
/* Wake up parent */
- parent->data_ready(parent, 1);
+ parent->sk_data_ready(parent, 1);
bh_unlock_sock(parent);
}
@@ -888,7 +888,7 @@
read_lock_bh(&sco_sk_list.lock);
- for (sk = sco_sk_list.head; sk; sk = sk->next)
+ for (sk = sco_sk_list.head; sk; sk = sk->sk_next)
if (!l--)
return sk;
return NULL;
@@ -898,7 +898,7 @@
{
struct sock *sk = e;
(*pos)++;
- return sk->next;
+ return sk->sk_next;
}
static void sco_seq_stop(struct seq_file *seq, void *e)
@@ -910,7 +910,7 @@
{
struct sock *sk = e;
seq_printf(seq, "%s %s %d\n",
- batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), sk->state);
+ batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), sk->sk_state);
return 0;
}
diff -urN linux-2.5.70-bk11/net/core/datagram.c linux-2.5.70-bk12/net/core/datagram.c
--- linux-2.5.70-bk11/net/core/datagram.c 2003-05-26 18:00:41.000000000 -0700
+++ linux-2.5.70-bk12/net/core/datagram.c 2003-06-07 04:47:49.000000000 -0700
@@ -59,7 +59,7 @@
*/
static inline int connection_based(struct sock *sk)
{
- return sk->type == SOCK_SEQPACKET || sk->type == SOCK_STREAM;
+ return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
}
/*
@@ -70,26 +70,26 @@
int error;
DEFINE_WAIT(wait);
- prepare_to_wait_exclusive(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
/* Socket errors? */
error = sock_error(sk);
if (error)
goto out_err;
- if (!skb_queue_empty(&sk->receive_queue))
+ if (!skb_queue_empty(&sk->sk_receive_queue))
goto out;
/* Socket shut down? */
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
goto out_noerr;
/* Sequenced packets can come disconnected.
* If so we report the problem
*/
error = -ENOTCONN;
- if (connection_based(sk) && !(sk->state == TCP_ESTABLISHED ||
- sk->state == TCP_LISTEN))
+ if (connection_based(sk) &&
+ !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
goto out_err;
/* handle signals */
@@ -99,7 +99,7 @@
error = 0;
*timeo_p = schedule_timeout(*timeo_p);
out:
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return error;
interrupted:
error = sock_intr_errno(*timeo_p);
@@ -146,7 +146,9 @@
{
struct sk_buff *skb;
long timeo;
- /* Caller is allowed not to check sk->err before skb_recv_datagram() */
+ /*
+ * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
+ */
int error = sock_error(sk);
if (error)
@@ -164,14 +166,15 @@
if (flags & MSG_PEEK) {
unsigned long cpu_flags;
- spin_lock_irqsave(&sk->receive_queue.lock, cpu_flags);
- skb = skb_peek(&sk->receive_queue);
+ spin_lock_irqsave(&sk->sk_receive_queue.lock,
+ cpu_flags);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb)
atomic_inc(&skb->users);
- spin_unlock_irqrestore(&sk->receive_queue.lock,
+ spin_unlock_irqrestore(&sk->sk_receive_queue.lock,
cpu_flags);
} else
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
return skb;
@@ -451,26 +454,26 @@
struct sock *sk = sock->sk;
unsigned int mask;
- poll_wait(file, sk->sleep, wait);
+ poll_wait(file, sk->sk_sleep, wait);
mask = 0;
/* exceptional events? */
- if (sk->err || !skb_queue_empty(&sk->error_queue))
+ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR;
- if (sk->shutdown == SHUTDOWN_MASK)
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
/* readable? */
- if (!skb_queue_empty(&sk->receive_queue) ||
- (sk->shutdown & RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
/* Connection-based need to check for termination and startup */
if (connection_based(sk)) {
- if (sk->state == TCP_CLOSE)
+ if (sk->sk_state == TCP_CLOSE)
mask |= POLLHUP;
/* connection hasn't started yet? */
- if (sk->state == TCP_SYN_SENT)
+ if (sk->sk_state == TCP_SYN_SENT)
return mask;
}
@@ -478,7 +481,7 @@
if (sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
return mask;
}
diff -urN linux-2.5.70-bk11/net/core/dev.c linux-2.5.70-bk12/net/core/dev.c
--- linux-2.5.70-bk11/net/core/dev.c 2003-05-26 18:00:40.000000000 -0700
+++ linux-2.5.70-bk12/net/core/dev.c 2003-06-07 04:47:50.000000000 -0700
@@ -2863,10 +2863,6 @@
*
*/
-extern void net_device_init(void);
-extern void ip_auto_config(void);
-
-
/*
* This is called single threaded during boot, so no need
* to take the rtnl semaphore.
@@ -2999,11 +2995,6 @@
#ifdef CONFIG_NET_SCHED
pktsched_init();
#endif
- /*
- * Initialise network devices
- */
-
- net_device_init();
rc = 0;
out:
return rc;
diff -urN linux-2.5.70-bk11/net/core/filter.c linux-2.5.70-bk12/net/core/filter.c
--- linux-2.5.70-bk11/net/core/filter.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/core/filter.c 2003-06-07 04:47:50.000000000 -0700
@@ -414,10 +414,10 @@
if (!err) {
struct sk_filter *old_fp;
- spin_lock_bh(&sk->lock.slock);
- old_fp = sk->filter;
- sk->filter = fp;
- spin_unlock_bh(&sk->lock.slock);
+ spin_lock_bh(&sk->sk_lock.slock);
+ old_fp = sk->sk_filter;
+ sk->sk_filter = fp;
+ spin_unlock_bh(&sk->sk_lock.slock);
fp = old_fp;
}
diff -urN linux-2.5.70-bk11/net/core/netfilter.c linux-2.5.70-bk12/net/core/netfilter.c
--- linux-2.5.70-bk11/net/core/netfilter.c 2003-05-26 18:00:39.000000000 -0700
+++ linux-2.5.70-bk12/net/core/netfilter.c 2003-06-07 04:47:50.000000000 -0700
@@ -633,7 +633,7 @@
.fwmark = (*pskb)->nfmark
#endif
} },
- .oif = (*pskb)->sk ? (*pskb)->sk->bound_dev_if : 0,
+ .oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0,
};
struct net_device *dev_src = NULL;
int err;
@@ -648,7 +648,7 @@
if ((err=ip_route_output_key(&rt, &fl)) != 0) {
printk("route_me_harder: ip_route_output_key(dst=%u.%u.%u.%u, src=%u.%u.%u.%u, oif=%d, tos=0x%x, fwmark=0x%lx) error %d\n",
NIPQUAD(iph->daddr), NIPQUAD(iph->saddr),
- (*pskb)->sk ? (*pskb)->sk->bound_dev_if : 0,
+ (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0,
RT_TOS(iph->tos)|RTO_CONN,
#ifdef CONFIG_IP_ROUTE_FWMARK
(*pskb)->nfmark,
diff -urN linux-2.5.70-bk11/net/core/rtnetlink.c linux-2.5.70-bk12/net/core/rtnetlink.c
--- linux-2.5.70-bk11/net/core/rtnetlink.c 2003-05-26 18:01:03.000000000 -0700
+++ linux-2.5.70-bk12/net/core/rtnetlink.c 2003-06-07 04:47:50.000000000 -0700
@@ -490,10 +490,11 @@
if (rtnl_shlock_nowait())
return;
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (rtnetlink_rcv_skb(skb)) {
if (skb->len)
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue,
+ skb);
else
kfree_skb(skb);
break;
@@ -504,7 +505,7 @@
up(&rtnl_sem);
netdev_run_todo();
- } while (rtnl && rtnl->receive_queue.qlen);
+ } while (rtnl && rtnl->sk_receive_queue.qlen);
}
static struct rtnetlink_link link_rtnetlink_table[RTM_MAX-RTM_BASE+1] =
diff -urN linux-2.5.70-bk11/net/core/sock.c linux-2.5.70-bk12/net/core/sock.c
--- linux-2.5.70-bk11/net/core/sock.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/core/sock.c 2003-06-07 04:47:50.000000000 -0700
@@ -202,17 +202,17 @@
ret = -EACCES;
}
else
- sk->debug=valbool;
+ sk->sk_debug = valbool;
break;
case SO_REUSEADDR:
- sk->reuse = valbool;
+ sk->sk_reuse = valbool;
break;
case SO_TYPE:
case SO_ERROR:
ret = -ENOPROTOOPT;
break;
case SO_DONTROUTE:
- sk->localroute=valbool;
+ sk->sk_localroute = valbool;
break;
case SO_BROADCAST:
sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
@@ -226,17 +226,17 @@
if (val > sysctl_wmem_max)
val = sysctl_wmem_max;
- sk->userlocks |= SOCK_SNDBUF_LOCK;
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
if ((val * 2) < SOCK_MIN_SNDBUF)
- sk->sndbuf = SOCK_MIN_SNDBUF;
+ sk->sk_sndbuf = SOCK_MIN_SNDBUF;
else
- sk->sndbuf = (val * 2);
+ sk->sk_sndbuf = val * 2;
/*
* Wake up sending tasks if we
* upped the value.
*/
- sk->write_space(sk);
+ sk->sk_write_space(sk);
break;
case SO_RCVBUF:
@@ -248,20 +248,18 @@
if (val > sysctl_rmem_max)
val = sysctl_rmem_max;
- sk->userlocks |= SOCK_RCVBUF_LOCK;
+ sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
/* FIXME: is this lower bound the right one? */
if ((val * 2) < SOCK_MIN_RCVBUF)
- sk->rcvbuf = SOCK_MIN_RCVBUF;
+ sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
else
- sk->rcvbuf = (val * 2);
+ sk->sk_rcvbuf = val * 2;
break;
case SO_KEEPALIVE:
#ifdef CONFIG_INET
- if (sk->protocol == IPPROTO_TCP)
- {
+ if (sk->sk_protocol == IPPROTO_TCP)
tcp_set_keepalive(sk, valbool);
- }
#endif
sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
break;
@@ -271,12 +269,12 @@
break;
case SO_NO_CHECK:
- sk->no_check = valbool;
+ sk->sk_no_check = valbool;
break;
case SO_PRIORITY:
if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
- sk->priority = val;
+ sk->sk_priority = val;
else
ret = -EPERM;
break;
@@ -295,10 +293,10 @@
else {
#if (BITS_PER_LONG == 32)
if (ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
- sk->lingertime=MAX_SCHEDULE_TIMEOUT;
+ sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
else
#endif
- sk->lingertime=ling.l_linger*HZ;
+ sk->sk_lingertime = ling.l_linger * HZ;
sock_set_flag(sk, SOCK_LINGER);
}
break;
@@ -312,21 +310,21 @@
break;
case SO_TIMESTAMP:
- sk->rcvtstamp = valbool;
+ sk->sk_rcvtstamp = valbool;
break;
case SO_RCVLOWAT:
if (val < 0)
val = INT_MAX;
- sk->rcvlowat = val ? : 1;
+ sk->sk_rcvlowat = val ? : 1;
break;
case SO_RCVTIMEO:
- ret = sock_set_timeout(&sk->rcvtimeo, optval, optlen);
+ ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
break;
case SO_SNDTIMEO:
- ret = sock_set_timeout(&sk->sndtimeo, optval, optlen);
+ ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
break;
#ifdef CONFIG_NETDEVICES
@@ -347,7 +345,7 @@
*/
if (!valbool) {
- sk->bound_dev_if = 0;
+ sk->sk_bound_dev_if = 0;
} else {
if (optlen > IFNAMSIZ)
optlen = IFNAMSIZ;
@@ -360,14 +358,14 @@
sk_dst_reset(sk);
if (devname[0] == '\0') {
- sk->bound_dev_if = 0;
+ sk->sk_bound_dev_if = 0;
} else {
struct net_device *dev = dev_get_by_name(devname);
if (!dev) {
ret = -ENODEV;
break;
}
- sk->bound_dev_if = dev->ifindex;
+ sk->sk_bound_dev_if = dev->ifindex;
dev_put(dev);
}
}
@@ -390,15 +388,15 @@
break;
case SO_DETACH_FILTER:
- spin_lock_bh(&sk->lock.slock);
- filter = sk->filter;
+ spin_lock_bh(&sk->sk_lock.slock);
+ filter = sk->sk_filter;
if (filter) {
- sk->filter = NULL;
- spin_unlock_bh(&sk->lock.slock);
+ sk->sk_filter = NULL;
+ spin_unlock_bh(&sk->sk_lock.slock);
sk_filter_release(sk, filter);
break;
}
- spin_unlock_bh(&sk->lock.slock);
+ spin_unlock_bh(&sk->sk_lock.slock);
ret = -ENONET;
break;
@@ -435,11 +433,11 @@
switch(optname)
{
case SO_DEBUG:
- v.val = sk->debug;
+ v.val = sk->sk_debug;
break;
case SO_DONTROUTE:
- v.val = sk->localroute;
+ v.val = sk->sk_localroute;
break;
case SO_BROADCAST:
@@ -447,15 +445,15 @@
break;
case SO_SNDBUF:
- v.val=sk->sndbuf;
+ v.val = sk->sk_sndbuf;
break;
case SO_RCVBUF:
- v.val =sk->rcvbuf;
+ v.val = sk->sk_rcvbuf;
break;
case SO_REUSEADDR:
- v.val = sk->reuse;
+ v.val = sk->sk_reuse;
break;
case SO_KEEPALIVE:
@@ -463,13 +461,13 @@
break;
case SO_TYPE:
- v.val = sk->type;
+ v.val = sk->sk_type;
break;
case SO_ERROR:
v.val = -sock_error(sk);
if(v.val==0)
- v.val=xchg(&sk->err_soft,0);
+ v.val = xchg(&sk->sk_err_soft, 0);
break;
case SO_OOBINLINE:
@@ -477,17 +475,17 @@
break;
case SO_NO_CHECK:
- v.val = sk->no_check;
+ v.val = sk->sk_no_check;
break;
case SO_PRIORITY:
- v.val = sk->priority;
+ v.val = sk->sk_priority;
break;
case SO_LINGER:
lv = sizeof(v.ling);
v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
- v.ling.l_linger = sk->lingertime / HZ;
+ v.ling.l_linger = sk->sk_lingertime / HZ;
break;
case SO_BSDCOMPAT:
@@ -495,33 +493,33 @@
break;
case SO_TIMESTAMP:
- v.val = sk->rcvtstamp;
+ v.val = sk->sk_rcvtstamp;
break;
case SO_RCVTIMEO:
lv=sizeof(struct timeval);
- if (sk->rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
+ if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
v.tm.tv_sec = 0;
v.tm.tv_usec = 0;
} else {
- v.tm.tv_sec = sk->rcvtimeo/HZ;
- v.tm.tv_usec = ((sk->rcvtimeo%HZ)*1000)/HZ;
+ v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
+ v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000) / HZ;
}
break;
case SO_SNDTIMEO:
lv=sizeof(struct timeval);
- if (sk->sndtimeo == MAX_SCHEDULE_TIMEOUT) {
+ if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
v.tm.tv_sec = 0;
v.tm.tv_usec = 0;
} else {
- v.tm.tv_sec = sk->sndtimeo/HZ;
- v.tm.tv_usec = ((sk->sndtimeo%HZ)*1000)/HZ;
+ v.tm.tv_sec = sk->sk_sndtimeo / HZ;
+ v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000) / HZ;
}
break;
case SO_RCVLOWAT:
- v.val = sk->rcvlowat;
+ v.val = sk->sk_rcvlowat;
break;
case SO_SNDLOWAT:
@@ -533,9 +531,9 @@
break;
case SO_PEERCRED:
- if (len > sizeof(sk->peercred))
- len = sizeof(sk->peercred);
- if (copy_to_user(optval, &sk->peercred, len))
+ if (len > sizeof(sk->sk_peercred))
+ len = sizeof(sk->sk_peercred);
+ if (copy_to_user(optval, &sk->sk_peercred, len))
return -EFAULT;
goto lenout;
@@ -556,7 +554,7 @@
* the UNIX standard wants it for whatever reason... -DaveM
*/
case SO_ACCEPTCONN:
- v.val = (sk->state == TCP_LISTEN);
+ v.val = sk->sk_state == TCP_LISTEN;
break;
default:
@@ -597,10 +595,10 @@
if (zero_it) {
memset(sk, 0,
zero_it == 1 ? sizeof(struct sock) : zero_it);
- sk->family = family;
+ sk->sk_family = family;
sock_lock_init(sk);
}
- sk->slab = slab;
+ sk->sk_slab = slab;
}
return sk;
}
@@ -608,21 +606,22 @@
void sk_free(struct sock *sk)
{
struct sk_filter *filter;
- struct module *owner = sk->owner;
+ struct module *owner = sk->sk_owner;
- if (sk->destruct)
- sk->destruct(sk);
+ if (sk->sk_destruct)
+ sk->sk_destruct(sk);
- filter = sk->filter;
+ filter = sk->sk_filter;
if (filter) {
sk_filter_release(sk, filter);
- sk->filter = NULL;
+ sk->sk_filter = NULL;
}
- if (atomic_read(&sk->omem_alloc))
- printk(KERN_DEBUG "sk_free: optmem leakage (%d bytes) detected.\n", atomic_read(&sk->omem_alloc));
+ if (atomic_read(&sk->sk_omem_alloc))
+ printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
+ __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
- kmem_cache_free(sk->slab, sk);
+ kmem_cache_free(sk->sk_slab, sk);
module_put(owner);
}
@@ -657,9 +656,9 @@
struct sock *sk = skb->sk;
/* In case it might be waiting for more memory. */
- atomic_sub(skb->truesize, &sk->wmem_alloc);
- if (!sk->use_write_queue)
- sk->write_space(sk);
+ atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
+ if (!sk->sk_use_write_queue)
+ sk->sk_write_space(sk);
sock_put(sk);
}
@@ -670,7 +669,7 @@
{
struct sock *sk = skb->sk;
- atomic_sub(skb->truesize, &sk->rmem_alloc);
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
}
/*
@@ -678,7 +677,7 @@
*/
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority)
{
- if (force || atomic_read(&sk->wmem_alloc) < sk->sndbuf) {
+ if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
struct sk_buff * skb = alloc_skb(size, priority);
if (skb) {
skb_set_owner_w(skb, sk);
@@ -693,7 +692,7 @@
*/
struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority)
{
- if (force || atomic_read(&sk->rmem_alloc) < sk->rcvbuf) {
+ if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
struct sk_buff *skb = alloc_skb(size, priority);
if (skb) {
skb_set_owner_r(skb, sk);
@@ -709,16 +708,16 @@
void *sock_kmalloc(struct sock *sk, int size, int priority)
{
if ((unsigned)size <= sysctl_optmem_max &&
- atomic_read(&sk->omem_alloc)+size < sysctl_optmem_max) {
+ atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
void *mem;
/* First do the add, to avoid the race if kmalloc
* might sleep.
*/
- atomic_add(size, &sk->omem_alloc);
+ atomic_add(size, &sk->sk_omem_alloc);
mem = kmalloc(size, priority);
if (mem)
return mem;
- atomic_sub(size, &sk->omem_alloc);
+ atomic_sub(size, &sk->sk_omem_alloc);
}
return NULL;
}
@@ -729,7 +728,7 @@
void sock_kfree_s(struct sock *sk, void *mem, int size)
{
kfree(mem);
- atomic_sub(size, &sk->omem_alloc);
+ atomic_sub(size, &sk->sk_omem_alloc);
}
/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
@@ -739,23 +738,23 @@
{
DEFINE_WAIT(wait);
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
for (;;) {
if (!timeo)
break;
if (signal_pending(current))
break;
- set_bit(SOCK_NOSPACE, &sk->socket->flags);
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
- if (atomic_read(&sk->wmem_alloc) < sk->sndbuf)
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
break;
- if (sk->shutdown & SEND_SHUTDOWN)
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
break;
- if (sk->err)
+ if (sk->sk_err)
break;
timeo = schedule_timeout(timeo);
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return timeo;
}
@@ -772,7 +771,7 @@
long timeo;
int err;
- gfp_mask = sk->allocation;
+ gfp_mask = sk->sk_allocation;
if (gfp_mask & __GFP_WAIT)
gfp_mask |= __GFP_REPEAT;
@@ -783,11 +782,11 @@
goto failure;
err = -EPIPE;
- if (sk->shutdown & SEND_SHUTDOWN)
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
goto failure;
- if (atomic_read(&sk->wmem_alloc) < sk->sndbuf) {
- skb = alloc_skb(header_len, sk->allocation);
+ if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+ skb = alloc_skb(header_len, sk->sk_allocation);
if (skb) {
int npages;
int i;
@@ -803,7 +802,7 @@
struct page *page;
skb_frag_t *frag;
- page = alloc_pages(sk->allocation, 0);
+ page = alloc_pages(sk->sk_allocation, 0);
if (!page) {
err = -ENOBUFS;
skb_shinfo(skb)->nr_frags = i;
@@ -826,8 +825,8 @@
err = -ENOBUFS;
goto failure;
}
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
- set_bit(SOCK_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
err = -EAGAIN;
if (!timeo)
goto failure;
@@ -857,35 +856,35 @@
DEFINE_WAIT(wait);
for(;;) {
- prepare_to_wait_exclusive(&sk->lock.wq, &wait,
+ prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
TASK_UNINTERRUPTIBLE);
- spin_unlock_bh(&sk->lock.slock);
+ spin_unlock_bh(&sk->sk_lock.slock);
schedule();
- spin_lock_bh(&sk->lock.slock);
+ spin_lock_bh(&sk->sk_lock.slock);
if(!sock_owned_by_user(sk))
break;
}
- finish_wait(&sk->lock.wq, &wait);
+ finish_wait(&sk->sk_lock.wq, &wait);
}
void __release_sock(struct sock *sk)
{
- struct sk_buff *skb = sk->backlog.head;
+ struct sk_buff *skb = sk->sk_backlog.head;
do {
- sk->backlog.head = sk->backlog.tail = NULL;
+ sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
bh_unlock_sock(sk);
do {
struct sk_buff *next = skb->next;
skb->next = NULL;
- sk->backlog_rcv(sk, skb);
+ sk->sk_backlog_rcv(sk, skb);
skb = next;
} while (skb != NULL);
bh_lock_sock(sk);
- } while((skb = sk->backlog.head) != NULL);
+ } while((skb = sk->sk_backlog.head) != NULL);
}
/*
@@ -1014,101 +1013,101 @@
void sock_def_wakeup(struct sock *sk)
{
- read_lock(&sk->callback_lock);
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible_all(sk->sleep);
- read_unlock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_all(sk->sk_sleep);
+ read_unlock(&sk->sk_callback_lock);
}
void sock_def_error_report(struct sock *sk)
{
- read_lock(&sk->callback_lock);
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
sk_wake_async(sk,0,POLL_ERR);
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
}
void sock_def_readable(struct sock *sk, int len)
{
- read_lock(&sk->callback_lock);
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
sk_wake_async(sk,1,POLL_IN);
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
}
void sock_def_write_space(struct sock *sk)
{
- read_lock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
/* Do not wake up a writer until he can make "significant"
* progress. --DaveM
*/
- if((atomic_read(&sk->wmem_alloc) << 1) <= sk->sndbuf) {
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))
sk_wake_async(sk, 2, POLL_OUT);
}
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
}
void sock_def_destruct(struct sock *sk)
{
- if (sk->protinfo)
- kfree(sk->protinfo);
+ if (sk->sk_protinfo)
+ kfree(sk->sk_protinfo);
}
void sk_send_sigurg(struct sock *sk)
{
- if (sk->socket && sk->socket->file)
- if (send_sigurg(&sk->socket->file->f_owner))
+ if (sk->sk_socket && sk->sk_socket->file)
+ if (send_sigurg(&sk->sk_socket->file->f_owner))
sk_wake_async(sk, 3, POLL_PRI);
}
void sock_init_data(struct socket *sock, struct sock *sk)
{
- skb_queue_head_init(&sk->receive_queue);
- skb_queue_head_init(&sk->write_queue);
- skb_queue_head_init(&sk->error_queue);
+ skb_queue_head_init(&sk->sk_receive_queue);
+ skb_queue_head_init(&sk->sk_write_queue);
+ skb_queue_head_init(&sk->sk_error_queue);
- init_timer(&sk->timer);
+ init_timer(&sk->sk_timer);
- sk->allocation = GFP_KERNEL;
- sk->rcvbuf = sysctl_rmem_default;
- sk->sndbuf = sysctl_wmem_default;
- sk->state = TCP_CLOSE;
- sk->zapped = 1;
- sk->socket = sock;
+ sk->sk_allocation = GFP_KERNEL;
+ sk->sk_rcvbuf = sysctl_rmem_default;
+ sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_zapped = 1;
+ sk->sk_socket = sock;
if(sock)
{
- sk->type = sock->type;
- sk->sleep = &sock->wait;
+ sk->sk_type = sock->type;
+ sk->sk_sleep = &sock->wait;
sock->sk = sk;
} else
- sk->sleep = NULL;
+ sk->sk_sleep = NULL;
- sk->dst_lock = RW_LOCK_UNLOCKED;
- sk->callback_lock = RW_LOCK_UNLOCKED;
+ sk->sk_dst_lock = RW_LOCK_UNLOCKED;
+ sk->sk_callback_lock = RW_LOCK_UNLOCKED;
- sk->state_change = sock_def_wakeup;
- sk->data_ready = sock_def_readable;
- sk->write_space = sock_def_write_space;
- sk->error_report = sock_def_error_report;
- sk->destruct = sock_def_destruct;
-
- sk->peercred.pid = 0;
- sk->peercred.uid = -1;
- sk->peercred.gid = -1;
- sk->rcvlowat = 1;
- sk->rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- sk->sndtimeo = MAX_SCHEDULE_TIMEOUT;
- sk->owner = NULL;
+ sk->sk_state_change = sock_def_wakeup;
+ sk->sk_data_ready = sock_def_readable;
+ sk->sk_write_space = sock_def_write_space;
+ sk->sk_error_report = sock_def_error_report;
+ sk->sk_destruct = sock_def_destruct;
+
+ sk->sk_peercred.pid = 0;
+ sk->sk_peercred.uid = -1;
+ sk->sk_peercred.gid = -1;
+ sk->sk_rcvlowat = 1;
+ sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+ sk->sk_owner = NULL;
- atomic_set(&sk->refcnt, 1);
+ atomic_set(&sk->sk_refcnt, 1);
}
diff -urN linux-2.5.70-bk11/net/decnet/af_decnet.c linux-2.5.70-bk12/net/decnet/af_decnet.c
--- linux-2.5.70-bk11/net/decnet/af_decnet.c 2003-05-26 18:00:40.000000000 -0700
+++ linux-2.5.70-bk12/net/decnet/af_decnet.c 2003-06-07 04:47:50.000000000 -0700
@@ -180,7 +180,7 @@
struct dn_scp *scp = DN_SK(sk);
if (scp->addrloc == port)
return -1;
- sk = sk->next;
+ sk = sk->sk_next;
}
return 0;
}
@@ -212,9 +212,9 @@
struct sock **skp;
int rv = -EUSERS;
- if (sk->next)
+ if (sk->sk_next)
BUG();
- if (sk->pprev)
+ if (sk->sk_pprev)
BUG();
write_lock_bh(&dn_hash_lock);
@@ -226,8 +226,8 @@
if ((skp = dn_find_list(sk)) == NULL)
goto out;
- sk->next = *skp;
- sk->pprev = skp;
+ sk->sk_next = *skp;
+ sk->sk_pprev = skp;
*skp = sk;
rv = 0;
out:
@@ -237,36 +237,36 @@
static void dn_unhash_sock(struct sock *sk)
{
- struct sock **skp = sk->pprev;
+ struct sock **skp = sk->sk_pprev;
if (skp == NULL)
return;
write_lock(&dn_hash_lock);
while(*skp != sk)
- skp = &((*skp)->next);
- *skp = sk->next;
+ skp = &((*skp)->sk_next);
+ *skp = sk->sk_next;
write_unlock(&dn_hash_lock);
- sk->next = NULL;
- sk->pprev = NULL;
+ sk->sk_next = NULL;
+ sk->sk_pprev = NULL;
}
static void dn_unhash_sock_bh(struct sock *sk)
{
- struct sock **skp = sk->pprev;
+ struct sock **skp = sk->sk_pprev;
if (skp == NULL)
return;
write_lock_bh(&dn_hash_lock);
while(*skp != sk)
- skp = &((*skp)->next);
- *skp = sk->next;
+ skp = &((*skp)->sk_next);
+ *skp = sk->sk_next;
write_unlock_bh(&dn_hash_lock);
- sk->next = NULL;
- sk->pprev = NULL;
+ sk->sk_next = NULL;
+ sk->sk_pprev = NULL;
}
struct sock **listen_hash(struct sockaddr_dn *addr)
@@ -292,7 +292,7 @@
*/
static void dn_rehash_sock(struct sock *sk)
{
- struct sock **skp = sk->pprev;
+ struct sock **skp = sk->sk_pprev;
struct dn_scp *scp = DN_SK(sk);
if (scp->addr.sdn_flags & SDF_WILD)
@@ -300,14 +300,14 @@
write_lock_bh(&dn_hash_lock);
while(*skp != sk)
- skp = &((*skp)->next);
- *skp = sk->next;
+ skp = &((*skp)->sk_next);
+ *skp = sk->sk_next;
DN_SK(sk)->addrloc = 0;
skp = listen_hash(&DN_SK(sk)->addr);
- sk->next = *skp;
- sk->pprev = skp;
+ sk->sk_next = *skp;
+ sk->sk_pprev = skp;
*skp = sk;
write_unlock_bh(&dn_hash_lock);
}
@@ -405,9 +405,9 @@
struct sock *sk;
read_lock(&dn_hash_lock);
- for(sk = *skp; sk != NULL; sk = sk->next) {
+ for(sk = *skp; sk; sk = sk->sk_next) {
struct dn_scp *scp = DN_SK(sk);
- if (sk->state != TCP_LISTEN)
+ if (sk->sk_state != TCP_LISTEN)
continue;
if (scp->addr.sdn_objnum) {
if (scp->addr.sdn_objnum != addr->sdn_objnum)
@@ -425,7 +425,7 @@
return sk;
}
- if (dn_wild_sk && (dn_wild_sk->state == TCP_LISTEN))
+ if (dn_wild_sk && (dn_wild_sk->sk_state == TCP_LISTEN))
sock_hold((sk = dn_wild_sk));
read_unlock(&dn_hash_lock);
@@ -440,7 +440,7 @@
read_lock(&dn_hash_lock);
sk = dn_sk_hash[cb->dst_port & DN_SK_HASH_MASK];
- for (; sk != NULL; sk = sk->next) {
+ for (; sk; sk = sk->sk_next) {
scp = DN_SK(sk);
if (cb->src != dn_saddr2dn(&scp->peer))
continue;
@@ -469,7 +469,7 @@
skb_queue_purge(&scp->other_xmit_queue);
skb_queue_purge(&scp->other_receive_queue);
- dst_release(xchg(&sk->dst_cache, NULL));
+ dst_release(xchg(&sk->sk_dst_cache, NULL));
}
struct sock *dn_alloc_sock(struct socket *sock, int gfp)
@@ -488,12 +488,12 @@
sock_init_data(sock, sk);
sk_set_owner(sk, THIS_MODULE);
- sk->backlog_rcv = dn_nsp_backlog_rcv;
- sk->destruct = dn_destruct;
- sk->no_check = 1;
- sk->family = PF_DECnet;
- sk->protocol = 0;
- sk->allocation = gfp;
+ sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
+ sk->sk_destruct = dn_destruct;
+ sk->sk_no_check = 1;
+ sk->sk_family = PF_DECnet;
+ sk->sk_protocol = 0;
+ sk->sk_allocation = gfp;
/* Initialization of DECnet Session Control Port */
scp->state = DN_O; /* Open */
@@ -600,7 +600,7 @@
scp->persist = (HZ * decnet_time_wait);
- if (sk->socket)
+ if (sk->sk_socket)
return 0;
dn_stop_fast_timer(sk); /* unlikely, but possible that this is runninng */
@@ -619,16 +619,17 @@
scp->nsp_rxtshift = 0; /* reset back off */
- if (sk->socket) {
- if (sk->socket->state != SS_UNCONNECTED)
- sk->socket->state = SS_DISCONNECTING;
+ if (sk->sk_socket) {
+ if (sk->sk_socket->state != SS_UNCONNECTED)
+ sk->sk_socket->state = SS_DISCONNECTING;
}
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
switch(scp->state) {
case DN_DN:
- dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, sk->allocation);
+ dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
+ sk->sk_allocation);
scp->persist_fxn = dn_destroy_timer;
scp->persist = dn_nsp_persist(sk);
break;
@@ -640,7 +641,7 @@
case DN_DI:
case DN_DR:
disc_reject:
- dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->allocation);
+ dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
case DN_NC:
case DN_NR:
case DN_RJ:
@@ -697,7 +698,7 @@
if ((sk = dn_alloc_sock(sock, GFP_KERNEL)) == NULL)
return -ENOBUFS;
- sk->protocol = protocol;
+ sk->sk_protocol = protocol;
return 0;
}
@@ -777,13 +778,13 @@
rv = -EINVAL;
lock_sock(sk);
- if (sk->zapped != 0) {
+ if (sk->sk_zapped) {
memcpy(&scp->addr, saddr, addr_len);
- sk->zapped = 0;
+ sk->sk_zapped = 0;
rv = dn_hash_sock(sk);
if (rv) {
- sk->zapped = 1;
+ sk->sk_zapped = 1;
}
}
release_sock(sk);
@@ -798,7 +799,7 @@
struct dn_scp *scp = DN_SK(sk);
int rv;
- sk->zapped = 0;
+ sk->sk_zapped = 0;
scp->addr.sdn_flags = 0;
scp->addr.sdn_objnum = 0;
@@ -823,7 +824,7 @@
if (rv == 0) {
rv = dn_hash_sock(sk);
if (rv) {
- sk->zapped = 1;
+ sk->sk_zapped = 1;
}
}
@@ -843,7 +844,7 @@
scp->segsize_loc = dst_path_metric(__sk_dst_get(sk), RTAX_ADVMSS);
dn_send_conn_conf(sk, allocation);
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
for(;;) {
release_sock(sk);
if (scp->state == DN_CC)
@@ -861,13 +862,13 @@
err = -EAGAIN;
if (!*timeo)
break;
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
if (err == 0) {
- sk->socket->state = SS_CONNECTED;
+ sk->sk_socket->state = SS_CONNECTED;
} else if (scp->state != DN_CC) {
- sk->socket->state = SS_UNCONNECTED;
+ sk->sk_socket->state = SS_UNCONNECTED;
}
return err;
}
@@ -884,7 +885,7 @@
if (!*timeo)
return -EALREADY;
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
for(;;) {
release_sock(sk);
if (scp->state == DN_CI || scp->state == DN_CC)
@@ -902,21 +903,21 @@
err = -ETIMEDOUT;
if (!*timeo)
break;
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
out:
if (err == 0) {
- sk->socket->state = SS_CONNECTED;
+ sk->sk_socket->state = SS_CONNECTED;
} else if (scp->state != DN_CI && scp->state != DN_CC) {
- sk->socket->state = SS_UNCONNECTED;
+ sk->sk_socket->state = SS_UNCONNECTED;
}
return err;
}
static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
{
- struct socket *sock = sk->socket;
+ struct socket *sock = sk->sk_socket;
struct dn_scp *scp = DN_SK(sk);
int err = -EISCONN;
struct flowi fl;
@@ -949,8 +950,8 @@
if (addr->sdn_flags & SDF_WILD)
goto out;
- if (sk->zapped) {
- err = dn_auto_bind(sk->socket);
+ if (sk->sk_zapped) {
+ err = dn_auto_bind(sk->sk_socket);
if (err)
goto out;
}
@@ -959,17 +960,17 @@
err = -EHOSTUNREACH;
memset(&fl, 0, sizeof(fl));
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fld_dst = dn_saddr2dn(&scp->peer);
fl.fld_src = dn_saddr2dn(&scp->addr);
dn_sk_ports_copy(&fl, scp);
fl.proto = DNPROTO_NSP;
- if (dn_route_output_sock(&sk->dst_cache, &fl, sk, flags) < 0)
+ if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, flags) < 0)
goto out;
- sk->route_caps = sk->dst_cache->dev->features;
+ sk->sk_route_caps = sk->sk_dst_cache->dev->features;
sock->state = SS_CONNECTING;
scp->state = DN_CI;
- scp->segsize_loc = dst_path_metric(sk->dst_cache, RTAX_ADVMSS);
+ scp->segsize_loc = dst_path_metric(sk->sk_dst_cache, RTAX_ADVMSS);
dn_nsp_send_conninit(sk, NSP_CI);
err = -EINPROGRESS;
@@ -1002,7 +1003,7 @@
case DN_RUN:
return 0;
case DN_CR:
- return dn_confirm_accept(sk, timeo, sk->allocation);
+ return dn_confirm_accept(sk, timeo, sk->sk_allocation);
case DN_CI:
case DN_CC:
return dn_wait_run(sk, timeo);
@@ -1050,19 +1051,19 @@
struct sk_buff *skb = NULL;
int err = 0;
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
for(;;) {
release_sock(sk);
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb == NULL) {
*timeo = schedule_timeout(*timeo);
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
}
lock_sock(sk);
if (skb != NULL)
break;
err = -EINVAL;
- if (sk->state != TCP_LISTEN)
+ if (sk->sk_state != TCP_LISTEN)
break;
err = sock_intr_errno(*timeo);
if (signal_pending(current))
@@ -1070,9 +1071,9 @@
err = -EAGAIN;
if (!*timeo)
break;
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return skb == NULL ? ERR_PTR(err) : skb;
}
@@ -1089,12 +1090,12 @@
lock_sock(sk);
- if (sk->state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
+ if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
release_sock(sk);
return -EINVAL;
}
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb == NULL) {
skb = dn_wait_for_connect(sk, &timeo);
if (IS_ERR(skb)) {
@@ -1104,8 +1105,8 @@
}
cb = DN_SKB_CB(skb);
- sk->ack_backlog--;
- newsk = dn_alloc_sock(newsock, sk->allocation);
+ sk->sk_ack_backlog--;
+ newsk = dn_alloc_sock(newsock, sk->sk_allocation);
if (newsk == NULL) {
release_sock(sk);
kfree_skb(skb);
@@ -1113,7 +1114,7 @@
}
release_sock(sk);
- dst_release(xchg(&newsk->dst_cache, skb->dst));
+ dst_release(xchg(&newsk->sk_dst_cache, skb->dst));
skb->dst = NULL;
DN_SK(newsk)->state = DN_CR;
@@ -1129,7 +1130,7 @@
if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd;
- newsk->state = TCP_LISTEN;
+ newsk->sk_state = TCP_LISTEN;
memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
/*
@@ -1168,15 +1169,16 @@
lock_sock(newsk);
err = dn_hash_sock(newsk);
if (err == 0) {
- newsk->zapped = 0;
+ newsk->sk_zapped = 0;
dn_send_conn_ack(newsk);
/*
- * Here we use sk->allocation since although the conn conf is
+ * Here we use sk->sk_allocation since although the conn conf is
* for the newsk, the context is the old socket.
*/
if (DN_SK(newsk)->accept_mode == ACC_IMMED)
- err = dn_confirm_accept(newsk, &timeo, sk->allocation);
+ err = dn_confirm_accept(newsk, &timeo,
+ sk->sk_allocation);
}
release_sock(newsk);
return err;
@@ -1246,7 +1248,7 @@
return val;
case TIOCOUTQ:
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
err = put_user(amount, (int *)arg);
@@ -1257,9 +1259,10 @@
if ((skb = skb_peek(&scp->other_receive_queue)) != NULL) {
amount = skb->len;
} else {
- struct sk_buff *skb = sk->receive_queue.next;
+ struct sk_buff *skb = sk->sk_receive_queue.next;
for(;;) {
- if (skb == (struct sk_buff *)&sk->receive_queue)
+ if (skb ==
+ (struct sk_buff *)&sk->sk_receive_queue)
break;
amount += skb->len;
skb = skb->next;
@@ -1284,15 +1287,15 @@
lock_sock(sk);
- if (sk->zapped)
+ if (sk->sk_zapped)
goto out;
- if ((DN_SK(sk)->state != DN_O) || (sk->state == TCP_LISTEN))
+ if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
goto out;
- sk->max_ack_backlog = backlog;
- sk->ack_backlog = 0;
- sk->state = TCP_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_ack_backlog = 0;
+ sk->sk_state = TCP_LISTEN;
err = 0;
dn_rehash_sock(sk);
@@ -1325,7 +1328,7 @@
if (how != SHUTDOWN_MASK)
goto out;
- sk->shutdown = how;
+ sk->sk_shutdown = how;
dn_destroy_sock(sk);
err = 0;
@@ -1438,7 +1441,7 @@
if (scp->state != DN_CR)
return -EINVAL;
timeo = sock_rcvtimeo(sk, 0);
- err = dn_confirm_accept(sk, &timeo, sk->allocation);
+ err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
return err;
case DSO_CONREJECT:
@@ -1447,8 +1450,8 @@
return -EINVAL;
scp->state = DN_DR;
- sk->shutdown = SHUTDOWN_MASK;
- dn_nsp_send_disc(sk, 0x38, 0, sk->allocation);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
break;
default:
@@ -1662,7 +1665,7 @@
if (cb->nsp_flags & 0x40) {
/* SOCK_SEQPACKET reads to EOM */
- if (sk->type == SOCK_SEQPACKET)
+ if (sk->sk_type == SOCK_SEQPACKET)
return 1;
/* so does SOCK_STREAM unless WAITALL is specified */
if (!(flags & MSG_WAITALL))
@@ -1685,7 +1688,7 @@
{
struct sock *sk = sock->sk;
struct dn_scp *scp = DN_SK(sk);
- struct sk_buff_head *queue = &sk->receive_queue;
+ struct sk_buff_head *queue = &sk->sk_receive_queue;
int target = size > 1 ? 1 : 0;
int copied = 0;
int rv = 0;
@@ -1696,7 +1699,7 @@
lock_sock(sk);
- if (sk->zapped) {
+ if (sk->sk_zapped) {
rv = -EADDRNOTAVAIL;
goto out;
}
@@ -1705,7 +1708,7 @@
if (rv)
goto out;
- if (sk->shutdown & RCV_SHUTDOWN) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
if (!(flags & MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
rv = -EPIPE;
@@ -1728,7 +1731,7 @@
* See if there is data ready to read, sleep if there isn't
*/
for(;;) {
- if (sk->err)
+ if (sk->sk_err)
goto out;
if (skb_queue_len(&scp->other_receive_queue)) {
@@ -1800,7 +1803,7 @@
}
if (eor) {
- if (sk->type == SOCK_SEQPACKET)
+ if (sk->sk_type == SOCK_SEQPACKET)
break;
if (!(flags & MSG_WAITALL))
break;
@@ -1816,12 +1819,12 @@
rv = copied;
- if (eor && (sk->type == SOCK_SEQPACKET))
+ if (eor && (sk->sk_type == SOCK_SEQPACKET))
msg->msg_flags |= MSG_EOR;
out:
if (rv == 0)
- rv = (flags & MSG_PEEK) ? -sk->err : sock_error(sk);
+ rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
if ((rv >= 0) && msg->msg_name) {
memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
@@ -1950,13 +1953,13 @@
if (err)
goto out_err;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
err = -EPIPE;
goto out_err;
}
- if ((flags & MSG_TRYHARD) && sk->dst_cache)
- dst_negative_advice(&sk->dst_cache);
+ if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
+ dst_negative_advice(&sk->sk_dst_cache);
mss = scp->segsize_rem;
fctype = scp->services_rem & NSP_FC_MASK;
@@ -2053,7 +2056,7 @@
}
sent += len;
- dn_nsp_queue_xmit(sk, skb, sk->allocation, flags & MSG_OOB);
+ dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
skb = NULL;
scp->persist = dn_nsp_persist(sk);
@@ -2132,7 +2135,7 @@
{
struct dn_iter_state *state = seq->private;
- n = n->next;
+ n = n->sk_next;
try_again:
if (n)
goto out;
diff -urN linux-2.5.70-bk11/net/decnet/dn_nsp_in.c linux-2.5.70-bk12/net/decnet/dn_nsp_in.c
--- linux-2.5.70-bk11/net/decnet/dn_nsp_in.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/decnet/dn_nsp_in.c 2003-06-07 04:47:50.000000000 -0700
@@ -120,7 +120,7 @@
}
if (wakeup && !sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
/*
@@ -324,14 +324,14 @@
static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb)
{
- if (sk->ack_backlog >= sk->max_ack_backlog) {
+ if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) {
kfree_skb(skb);
return;
}
- sk->ack_backlog++;
- skb_queue_tail(&sk->receive_queue, skb);
- sk->state_change(sk);
+ sk->sk_ack_backlog++;
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_state_change(sk);
}
static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
@@ -351,7 +351,7 @@
if ((scp->state == DN_CI) || (scp->state == DN_CD)) {
scp->persist = 0;
scp->addrrem = cb->src_port;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
scp->state = DN_RUN;
scp->services_rem = cb->services;
scp->info_rem = cb->info;
@@ -369,7 +369,7 @@
}
dn_nsp_send_link(sk, DN_NOCHANGE, 0);
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
out:
@@ -413,7 +413,7 @@
}
scp->addrrem = cb->src_port;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
switch(scp->state) {
case DN_CI:
@@ -421,7 +421,7 @@
scp->state = DN_RJ;
break;
case DN_RUN:
- sk->shutdown |= SHUTDOWN_MASK;
+ sk->sk_shutdown |= SHUTDOWN_MASK;
scp->state = DN_DN;
break;
case DN_DI:
@@ -430,9 +430,9 @@
}
if (!sock_flag(sk, SOCK_DEAD)) {
- if (sk->socket->state != SS_UNCONNECTED)
- sk->socket->state = SS_DISCONNECTING;
- sk->state_change(sk);
+ if (sk->sk_socket->state != SS_UNCONNECTED)
+ sk->sk_socket->state = SS_DISCONNECTING;
+ sk->sk_state_change(sk);
}
/*
@@ -465,7 +465,7 @@
reason = dn_ntohs(*(__u16 *)skb->data);
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
switch(scp->state) {
case DN_CI:
@@ -481,15 +481,15 @@
scp->state = DN_DIC;
break;
case DN_RUN:
- sk->shutdown |= SHUTDOWN_MASK;
+ sk->sk_shutdown |= SHUTDOWN_MASK;
case DN_CC:
scp->state = DN_CN;
}
if (!sock_flag(sk, SOCK_DEAD)) {
- if (sk->socket->state != SS_UNCONNECTED)
- sk->socket->state = SS_DISCONNECTING;
- sk->state_change(sk);
+ if (sk->sk_socket->state != SS_UNCONNECTED)
+ sk->sk_socket->state = SS_DISCONNECTING;
+ sk->sk_state_change(sk);
}
scp->persist_fxn = dn_destroy_timer;
@@ -559,7 +559,7 @@
break;
}
if (wake_up && !sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
dn_nsp_send_oth_ack(sk);
@@ -580,7 +580,8 @@
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
number of warnings when compiling with -W --ANK
*/
- if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf) {
err = -ENOMEM;
goto out;
}
@@ -595,16 +596,16 @@
/* This code only runs from BH or BH protected context.
* Therefore the plain read_lock is ok here. -DaveM
*/
- read_lock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
if (!sock_flag(sk, SOCK_DEAD)) {
- struct socket *sock = sk->socket;
- wake_up_interruptible(sk->sleep);
+ struct socket *sock = sk->sk_socket;
+ wake_up_interruptible(sk->sk_sleep);
if (sock && sock->fasync_list &&
!test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
__kill_fasync(sock->fasync_list, sig,
(sig == SIGURG) ? POLL_PRI : POLL_IN);
}
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
out:
return err;
}
@@ -651,7 +652,7 @@
skb_pull(skb, 2);
if (seq_next(scp->numdat_rcv, segnum)) {
- if (dn_queue_skb(sk, skb, SIGIO, &sk->receive_queue) == 0) {
+ if (dn_queue_skb(sk, skb, SIGIO, &sk->sk_receive_queue) == 0) {
seq_add(&scp->numdat_rcv, 1);
queued = 1;
}
@@ -679,9 +680,9 @@
if (scp->state == DN_CI) {
scp->state = DN_NC;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
kfree_skb(skb);
@@ -884,8 +885,8 @@
/* both data and ack frames can kick a CC socket into RUN */
if ((scp->state == DN_CC) && !sock_flag(sk, SOCK_DEAD)) {
scp->state = DN_RUN;
- sk->state = TCP_ESTABLISHED;
- sk->state_change(sk);
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_state_change(sk);
}
if ((cb->nsp_flags & 0x1c) == 0)
diff -urN linux-2.5.70-bk11/net/decnet/dn_nsp_out.c linux-2.5.70-bk12/net/decnet/dn_nsp_out.c
--- linux-2.5.70-bk11/net/decnet/dn_nsp_out.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/decnet/dn_nsp_out.c 2003-06-07 04:47:50.000000000 -0700
@@ -92,20 +92,20 @@
}
memset(&fl, 0, sizeof(fl));
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fld_src = dn_saddr2dn(&scp->addr);
fl.fld_dst = dn_saddr2dn(&scp->peer);
dn_sk_ports_copy(&fl, scp);
fl.proto = DNPROTO_NSP;
- if (dn_route_output_sock(&sk->dst_cache, &fl, sk, 0) == 0) {
+ if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, 0) == 0) {
dst = sk_dst_get(sk);
- sk->route_caps = dst->dev->features;
+ sk->sk_route_caps = dst->dev->features;
goto try_again;
}
- sk->err = EHOSTUNREACH;
+ sk->sk_err = EHOSTUNREACH;
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
@@ -155,40 +155,42 @@
break;
}
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
*err = EINVAL;
break;
}
- if (sk->err)
+ if (sk->sk_err)
break;
len = *size + 11;
- space = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ space = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (space < len) {
- if ((sk->socket->type == SOCK_STREAM) && (space >= (16 + 11)))
+ if ((sk->sk_socket->type == SOCK_STREAM) &&
+ (space >= (16 + 11)))
len = space;
}
if (space < len) {
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
if (noblock) {
*err = EWOULDBLOCK;
break;
}
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
SOCK_SLEEP_PRE(sk)
- if ((sk->sndbuf - atomic_read(&sk->wmem_alloc)) < len)
+ if ((sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc)) <
+ len)
schedule();
SOCK_SLEEP_POST(sk)
continue;
}
- if ((skb = dn_alloc_skb(sk, len, sk->allocation)) == NULL)
+ if ((skb = dn_alloc_skb(sk, len, sk->sk_allocation)) == NULL)
continue;
*size = len - 11;
@@ -546,7 +548,7 @@
struct sk_buff *skb = NULL;
struct nsp_conn_ack_msg *msg;
- if ((skb = dn_alloc_skb(sk, 3, sk->allocation)) == NULL)
+ if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL)
return;
msg = (struct nsp_conn_ack_msg *)skb_put(skb, 3);
@@ -662,7 +664,7 @@
if (reason == 0)
reason = scp->discdata_out.opt_status;
- dn_nsp_do_disc(sk, msgflg, reason, gfp, sk->dst_cache, ddl,
+ dn_nsp_do_disc(sk, msgflg, reason, gfp, sk->sk_dst_cache, ddl,
scp->discdata_out.opt_data, scp->addrrem, scp->addrloc);
}
@@ -714,14 +716,15 @@
void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
{
struct dn_scp *scp = DN_SK(sk);
- struct sk_buff *skb = NULL;
struct nsp_conn_init_msg *msg;
unsigned char aux;
unsigned char menuver;
struct dn_skb_cb *cb;
unsigned char type = 1;
+ int allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC;
+ struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation);
- if ((skb = dn_alloc_skb(sk, 200, (msgflg == NSP_CI) ? sk->allocation : GFP_ATOMIC)) == NULL)
+ if (!skb)
return;
cb = DN_SKB_CB(skb);
diff -urN linux-2.5.70-bk11/net/decnet/dn_timer.c linux-2.5.70-bk12/net/decnet/dn_timer.c
--- linux-2.5.70-bk11/net/decnet/dn_timer.c 2003-05-26 18:00:41.000000000 -0700
+++ linux-2.5.70-bk12/net/decnet/dn_timer.c 2003-06-07 04:47:50.000000000 -0700
@@ -38,16 +38,16 @@
void dn_start_slow_timer(struct sock *sk)
{
- sk->timer.expires = jiffies + SLOW_INTERVAL;
- sk->timer.function = dn_slow_timer;
- sk->timer.data = (unsigned long)sk;
+ sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
+ sk->sk_timer.function = dn_slow_timer;
+ sk->sk_timer.data = (unsigned long)sk;
- add_timer(&sk->timer);
+ add_timer(&sk->sk_timer);
}
void dn_stop_slow_timer(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
}
static void dn_slow_timer(unsigned long arg)
@@ -59,8 +59,8 @@
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- sk->timer.expires = jiffies + HZ / 10;
- add_timer(&sk->timer);
+ sk->sk_timer.expires = jiffies + HZ / 10;
+ add_timer(&sk->sk_timer);
goto out;
}
@@ -102,9 +102,9 @@
scp->keepalive_fxn(sk);
}
- sk->timer.expires = jiffies + SLOW_INTERVAL;
+ sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
- add_timer(&sk->timer);
+ add_timer(&sk->sk_timer);
out:
bh_unlock_sock(sk);
sock_put(sk);
diff -urN linux-2.5.70-bk11/net/decnet/netfilter/dn_rtmsg.c linux-2.5.70-bk12/net/decnet/netfilter/dn_rtmsg.c
--- linux-2.5.70-bk11/net/decnet/netfilter/dn_rtmsg.c 2003-05-26 18:00:40.000000000 -0700
+++ linux-2.5.70-bk12/net/decnet/netfilter/dn_rtmsg.c 2003-06-07 04:47:50.000000000 -0700
@@ -120,7 +120,7 @@
{
struct sk_buff *skb;
- while((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
dnrmg_receive_user_skb(skb);
kfree_skb(skb);
}
@@ -145,7 +145,7 @@
rv = nf_register_hook(&dnrmg_ops);
if (rv) {
- sock_release(dnrmg->socket);
+ sock_release(dnrmg->sk_socket);
}
return rv;
@@ -154,7 +154,7 @@
static void __exit fini(void)
{
nf_unregister_hook(&dnrmg_ops);
- sock_release(dnrmg->socket);
+ sock_release(dnrmg->sk_socket);
}
diff -urN linux-2.5.70-bk11/net/econet/af_econet.c linux-2.5.70-bk12/net/econet/af_econet.c
--- linux-2.5.70-bk11/net/econet/af_econet.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/econet/af_econet.c 2003-06-07 04:47:50.000000000 -0700
@@ -101,10 +101,10 @@
while ((s = *list) != NULL) {
if (s == sk) {
- *list = s->next;
+ *list = s->sk_next;
break;
}
- list = &s->next;
+ list = &s->sk_next;
}
write_unlock_bh(&econet_lock);
@@ -115,7 +115,7 @@
static void econet_insert_socket(struct sock **list, struct sock *sk)
{
write_lock_bh(&econet_lock);
- sk->next = *list;
+ sk->sk_next = *list;
sock_hold(sk);
write_unlock_bh(&econet_lock);
}
@@ -170,7 +170,7 @@
err = memcpy_toiovec(msg->msg_iov, skb->data, copied);
if (err)
goto out_free;
- sk->stamp=skb->stamp;
+ sk->sk_stamp = skb->stamp;
if (msg->msg_name)
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
@@ -364,7 +364,7 @@
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
skb->protocol = proto;
skb->dev = dev;
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
if (err)
goto out_free;
@@ -500,13 +500,14 @@
{
struct sock *sk=(struct sock *)data;
- if (!atomic_read(&sk->wmem_alloc) && !atomic_read(&sk->rmem_alloc)) {
+ if (!atomic_read(&sk->sk_wmem_alloc) &&
+ !atomic_read(&sk->sk_rmem_alloc)) {
sk_free(sk);
return;
}
- sk->timer.expires=jiffies+10*HZ;
- add_timer(&sk->timer);
+ sk->sk_timer.expires = jiffies + 10 * HZ;
+ add_timer(&sk->sk_timer);
printk(KERN_DEBUG "econet socket destroy delayed\n");
}
@@ -527,21 +528,22 @@
* Now the socket is dead. No more input will appear.
*/
- sk->state_change(sk); /* It is useless. Just for sanity. */
+ sk->sk_state_change(sk); /* It is useless. Just for sanity. */
sock->sk = NULL;
- sk->socket = NULL;
+ sk->sk_socket = NULL;
sock_set_flag(sk, SOCK_DEAD);
/* Purge queues */
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
- if (atomic_read(&sk->rmem_alloc) || atomic_read(&sk->wmem_alloc)) {
- sk->timer.data=(unsigned long)sk;
- sk->timer.expires=jiffies+HZ;
- sk->timer.function=econet_destroy_timer;
- add_timer(&sk->timer);
+ if (atomic_read(&sk->sk_rmem_alloc) ||
+ atomic_read(&sk->sk_wmem_alloc)) {
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.expires = jiffies + HZ;
+ sk->sk_timer.function = econet_destroy_timer;
+ add_timer(&sk->sk_timer);
return 0;
}
@@ -570,7 +572,7 @@
if (sk == NULL)
goto out;
- sk->reuse = 1;
+ sk->sk_reuse = 1;
sock->ops = &econet_ops;
sock_init_data(sock,sk);
@@ -578,8 +580,8 @@
if (!eo)
goto out_free;
memset(eo, 0, sizeof(*eo));
- sk->zapped=0;
- sk->family = PF_ECONET;
+ sk->sk_zapped = 0;
+ sk->sk_family = PF_ECONET;
eo->num = protocol;
econet_insert_socket(&econet_sklist, sk);
@@ -671,9 +673,10 @@
switch(cmd) {
case SIOCGSTAMP:
- if(sk->stamp.tv_sec==0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
- return copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)) ? -EFAULT : 0;
+ return copy_to_user((void *)arg, &sk->sk_stamp,
+ sizeof(struct timeval)) ? -EFAULT : 0;
case SIOCSIFADDR:
case SIOCGIFADDR:
return ec_dev_ioctl(sock, cmd, (void *)arg);
@@ -733,7 +736,7 @@
(opt->net == net || opt->net == 0))
return sk;
- sk = sk->next;
+ sk = sk->sk_next;
}
return NULL;
@@ -990,9 +993,9 @@
return error;
}
- udpsock->sk->reuse = 1;
- udpsock->sk->allocation = GFP_ATOMIC; /* we're going to call it
- from interrupts */
+ udpsock->sk->sk_reuse = 1;
+ udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it
+ from interrupts */
error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin,
sizeof(sin));
@@ -1002,7 +1005,7 @@
goto release;
}
- udpsock->sk->data_ready = aun_data_available;
+ udpsock->sk->sk_data_ready = aun_data_available;
return 0;
diff -urN linux-2.5.70-bk11/net/ipv4/af_inet.c linux-2.5.70-bk12/net/ipv4/af_inet.c
--- linux-2.5.70-bk11/net/ipv4/af_inet.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/af_inet.c 2003-06-07 04:47:50.000000000 -0700
@@ -137,13 +137,12 @@
{
struct inet_opt *inet = inet_sk(sk);
- __skb_queue_purge(&sk->receive_queue);
- __skb_queue_purge(&sk->error_queue);
+ __skb_queue_purge(&sk->sk_receive_queue);
+ __skb_queue_purge(&sk->sk_error_queue);
- if (sk->type == SOCK_STREAM && sk->state != TCP_CLOSE) {
+ if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
printk("Attempt to release TCP socket in state %d %p\n",
- sk->state,
- sk);
+ sk->sk_state, sk);
return;
}
if (!sock_flag(sk, SOCK_DEAD)) {
@@ -151,14 +150,14 @@
return;
}
- BUG_TRAP(!atomic_read(&sk->rmem_alloc));
- BUG_TRAP(!atomic_read(&sk->wmem_alloc));
- BUG_TRAP(!sk->wmem_queued);
- BUG_TRAP(!sk->forward_alloc);
+ BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
+ BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
+ BUG_TRAP(!sk->sk_wmem_queued);
+ BUG_TRAP(!sk->sk_forward_alloc);
if (inet->opt)
kfree(inet->opt);
- dst_release(sk->dst_cache);
+ dst_release(sk->sk_dst_cache);
#ifdef INET_REFCNT_DEBUG
atomic_dec(&inet_sock_nr);
printk(KERN_DEBUG "INET socket %p released, %d are still alive\n",
@@ -168,8 +167,8 @@
void inet_sock_release(struct sock *sk)
{
- if (sk->prot->destroy)
- sk->prot->destroy(sk);
+ if (sk->sk_prot->destroy)
+ sk->sk_prot->destroy(sk);
/* Observation: when inet_sock_release is called, processes have
* no access to socket. But net still has.
@@ -178,7 +177,7 @@
* A. Remove from hash tables.
*/
- sk->prot->unhash(sk);
+ sk->sk_prot->unhash(sk);
/* In this point socket cannot receive new packets,
* but it is possible that some packets are in flight
@@ -198,9 +197,9 @@
xfrm_sk_free_policy(sk);
#ifdef INET_REFCNT_DEBUG
- if (atomic_read(&sk->refcnt) != 1)
+ if (atomic_read(&sk->sk_refcnt) != 1)
printk(KERN_DEBUG "Destruction inet %p delayed, c=%d\n",
- sk, atomic_read(&sk->refcnt));
+ sk, atomic_read(&sk->sk_refcnt));
#endif
sock_put(sk);
}
@@ -220,7 +219,7 @@
{
struct sock *sk = sock->sk;
- return sk->prot->setsockopt(sk, level, optname, optval, optlen);
+ return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
}
/*
@@ -236,7 +235,7 @@
{
struct sock *sk = sock->sk;
- return sk->prot->getsockopt(sk, level, optname, optval, optlen);
+ return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
}
/*
@@ -250,7 +249,7 @@
lock_sock(sk);
inet = inet_sk(sk);
if (!inet->num) {
- if (sk->prot->get_port(sk, 0)) {
+ if (sk->sk_prot->get_port(sk, 0)) {
release_sock(sk);
return -EAGAIN;
}
@@ -275,7 +274,7 @@
if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
goto out;
- old_state = sk->state;
+ old_state = sk->sk_state;
if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
goto out;
@@ -287,7 +286,7 @@
if (err)
goto out;
}
- sk->max_ack_backlog = backlog;
+ sk->sk_max_ack_backlog = backlog;
err = 0;
out:
@@ -368,10 +367,10 @@
goto out_sk_free;
err = 0;
sock->ops = answer->ops;
- sk->prot = answer->prot;
- sk->no_check = answer->no_check;
+ sk->sk_prot = answer->prot;
+ sk->sk_no_check = answer->no_check;
if (INET_PROTOSW_REUSE & answer->flags)
- sk->reuse = 1;
+ sk->sk_reuse = 1;
rcu_read_unlock();
inet = inet_sk(sk);
@@ -392,11 +391,11 @@
sock_init_data(sock, sk);
sk_set_owner(sk, THIS_MODULE);
- sk->destruct = inet_sock_destruct;
- sk->zapped = 0;
- sk->family = PF_INET;
- sk->protocol = protocol;
- sk->backlog_rcv = sk->prot->backlog_rcv;
+ sk->sk_destruct = inet_sock_destruct;
+ sk->sk_zapped = 0;
+ sk->sk_family = PF_INET;
+ sk->sk_protocol = protocol;
+ sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
inet->uc_ttl = -1;
inet->mc_loop = 1;
@@ -416,11 +415,11 @@
*/
inet->sport = htons(inet->num);
/* Add to protocol hash chains. */
- sk->prot->hash(sk);
+ sk->sk_prot->hash(sk);
}
- if (sk->prot->init) {
- err = sk->prot->init(sk);
+ if (sk->sk_prot->init) {
+ err = sk->sk_prot->init(sk);
if (err)
inet_sock_release(sk);
}
@@ -458,9 +457,9 @@
timeout = 0;
if (sock_flag(sk, SOCK_LINGER) &&
!(current->flags & PF_EXITING))
- timeout = sk->lingertime;
+ timeout = sk->sk_lingertime;
sock->sk = NULL;
- sk->prot->close(sk, timeout);
+ sk->sk_prot->close(sk, timeout);
}
return 0;
}
@@ -478,8 +477,8 @@
int err;
/* If the socket has its own bind function then use it. (RAW) */
- if (sk->prot->bind) {
- err = sk->prot->bind(sk, uaddr, addr_len);
+ if (sk->sk_prot->bind) {
+ err = sk->sk_prot->bind(sk, uaddr, addr_len);
goto out;
}
err = -EINVAL;
@@ -520,7 +519,7 @@
/* Check these errors (active socket, double bind). */
err = -EINVAL;
- if (sk->state != TCP_CLOSE || inet->num)
+ if (sk->sk_state != TCP_CLOSE || inet->num)
goto out_release_sock;
inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
@@ -528,16 +527,16 @@
inet->saddr = 0; /* Use device */
/* Make sure we are allowed to bind here. */
- if (sk->prot->get_port(sk, snum)) {
+ if (sk->sk_prot->get_port(sk, snum)) {
inet->saddr = inet->rcv_saddr = 0;
err = -EADDRINUSE;
goto out_release_sock;
}
if (inet->rcv_saddr)
- sk->userlocks |= SOCK_BINDADDR_LOCK;
+ sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
if (snum)
- sk->userlocks |= SOCK_BINDPORT_LOCK;
+ sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
inet->sport = htons(inet->num);
inet->daddr = 0;
inet->dport = 0;
@@ -555,33 +554,33 @@
struct sock *sk = sock->sk;
if (uaddr->sa_family == AF_UNSPEC)
- return sk->prot->disconnect(sk, flags);
+ return sk->sk_prot->disconnect(sk, flags);
if (!inet_sk(sk)->num && inet_autobind(sk))
return -EAGAIN;
- return sk->prot->connect(sk, (struct sockaddr *)uaddr, addr_len);
+ return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len);
}
static long inet_wait_for_connect(struct sock *sk, long timeo)
{
DEFINE_WAIT(wait);
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
- /* Basic assumption: if someone sets sk->err, he _must_
+ /* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
* Connect() does not allow to get error notifications
* without closing the socket.
*/
- while ((1 << sk->state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
if (signal_pending(current) || !timeo)
break;
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return timeo;
}
@@ -599,7 +598,7 @@
lock_sock(sk);
if (uaddr->sa_family == AF_UNSPEC) {
- err = sk->prot->disconnect(sk, flags);
+ err = sk->sk_prot->disconnect(sk, flags);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
goto out;
}
@@ -617,10 +616,10 @@
break;
case SS_UNCONNECTED:
err = -EISCONN;
- if (sk->state != TCP_CLOSE)
+ if (sk->sk_state != TCP_CLOSE)
goto out;
- err = sk->prot->connect(sk, uaddr, addr_len);
+ err = sk->sk_prot->connect(sk, uaddr, addr_len);
if (err < 0)
goto out;
@@ -636,7 +635,7 @@
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
- if ((1 << sk->state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
/* Error code is set above */
if (!timeo || !inet_wait_for_connect(sk, timeo))
goto out;
@@ -649,10 +648,10 @@
/* Connection was closed by RST, timeout, ICMP error
* or another process disconnected us.
*/
- if (sk->state == TCP_CLOSE)
+ if (sk->sk_state == TCP_CLOSE)
goto sock_error;
- /* sk->err may be not zero now, if RECVERR was ordered by user
+ /* sk->sk_err may be not zero now, if RECVERR was ordered by user
* and error was received after socket entered established state.
* Hence, it is handled normally after connect() return successfully.
*/
@@ -666,7 +665,7 @@
sock_error:
err = sock_error(sk) ? : -ECONNABORTED;
sock->state = SS_UNCONNECTED;
- if (sk->prot->disconnect(sk, flags))
+ if (sk->sk_prot->disconnect(sk, flags))
sock->state = SS_DISCONNECTING;
goto out;
}
@@ -679,14 +678,14 @@
{
struct sock *sk1 = sock->sk;
int err = -EINVAL;
- struct sock *sk2 = sk1->prot->accept(sk1, flags, &err);
+ struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err);
if (!sk2)
goto do_err;
lock_sock(sk2);
- BUG_TRAP((1 << sk2->state) &
+ BUG_TRAP((1 << sk2->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE));
sock_graft(sk2, newsock);
@@ -712,7 +711,7 @@
sin->sin_family = AF_INET;
if (peer) {
if (!inet->dport ||
- (((1 << sk->state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
+ (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
peer == 1))
return -ENOTCONN;
sin->sin_port = inet->dport;
@@ -737,8 +736,8 @@
int addr_len = 0;
int err;
- err = sk->prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
- flags & ~MSG_DONTWAIT, &addr_len);
+ err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
+ flags & ~MSG_DONTWAIT, &addr_len);
if (err >= 0)
msg->msg_namelen = addr_len;
return err;
@@ -754,7 +753,7 @@
if (!inet_sk(sk)->num && inet_autobind(sk))
return -EAGAIN;
- return sk->prot->sendmsg(iocb, sk, msg, size);
+ return sk->sk_prot->sendmsg(iocb, sk, msg, size);
}
@@ -766,8 +765,8 @@
if (!inet_sk(sk)->num && inet_autobind(sk))
return -EAGAIN;
- if (sk->prot->sendpage)
- return sk->prot->sendpage(sk, page, offset, size, flags);
+ if (sk->sk_prot->sendpage)
+ return sk->sk_prot->sendpage(sk, page, offset, size, flags);
return sock_no_sendpage(sock, page, offset, size, flags);
}
@@ -788,22 +787,22 @@
lock_sock(sk);
if (sock->state == SS_CONNECTING) {
- if ((1 << sk->state) &
+ if ((1 << sk->sk_state) &
(TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
sock->state = SS_DISCONNECTING;
else
sock->state = SS_CONNECTED;
}
- switch (sk->state) {
+ switch (sk->sk_state) {
case TCP_CLOSE:
err = -ENOTCONN;
/* Hack to wake up other listeners, who can poll for
POLLHUP, even on eg. unconnected UDP sockets -- RR */
default:
- sk->shutdown |= how;
- if (sk->prot->shutdown)
- sk->prot->shutdown(sk, how);
+ sk->sk_shutdown |= how;
+ if (sk->sk_prot->shutdown)
+ sk->sk_prot->shutdown(sk, how);
break;
/* Remaining two branches are temporary solution for missing
@@ -815,13 +814,13 @@
break;
/* Fall through */
case TCP_SYN_SENT:
- err = sk->prot->disconnect(sk, O_NONBLOCK);
+ err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
break;
}
/* Wake up anyone sleeping in poll. */
- sk->state_change(sk);
+ sk->sk_state_change(sk);
release_sock(sk);
return err;
}
@@ -843,9 +842,9 @@
switch (cmd) {
case SIOCGSTAMP:
- if (!sk->stamp.tv_sec)
+ if (!sk->sk_stamp.tv_sec)
err = -ENOENT;
- else if (copy_to_user((void *)arg, &sk->stamp,
+ else if (copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval)))
err = -EFAULT;
break;
@@ -873,8 +872,8 @@
err = devinet_ioctl(cmd, (void *)arg);
break;
default:
- if (!sk->prot->ioctl ||
- (err = sk->prot->ioctl(sk, cmd, arg)) ==
+ if (!sk->sk_prot->ioctl ||
+ (err = sk->sk_prot->ioctl(sk, cmd, arg)) ==
-ENOIOCTLCMD)
err = dev_ioctl(cmd, (void *)arg);
break;
@@ -1068,54 +1067,22 @@
static int __init init_ipv4_mibs(void)
{
- int i;
-
- net_statistics[0] =
- kmalloc_percpu(sizeof (struct linux_mib), GFP_KERNEL);
- net_statistics[1] =
- kmalloc_percpu(sizeof (struct linux_mib), GFP_KERNEL);
- ip_statistics[0] = kmalloc_percpu(sizeof (struct ip_mib), GFP_KERNEL);
- ip_statistics[1] = kmalloc_percpu(sizeof (struct ip_mib), GFP_KERNEL);
- icmp_statistics[0] =
- kmalloc_percpu(sizeof (struct icmp_mib), GFP_KERNEL);
- icmp_statistics[1] =
- kmalloc_percpu(sizeof (struct icmp_mib), GFP_KERNEL);
- tcp_statistics[0] = kmalloc_percpu(sizeof (struct tcp_mib), GFP_KERNEL);
- tcp_statistics[1] = kmalloc_percpu(sizeof (struct tcp_mib), GFP_KERNEL);
- udp_statistics[0] = kmalloc_percpu(sizeof (struct udp_mib), GFP_KERNEL);
- udp_statistics[1] = kmalloc_percpu(sizeof (struct udp_mib), GFP_KERNEL);
+ net_statistics[0] = alloc_percpu(struct linux_mib);
+ net_statistics[1] = alloc_percpu(struct linux_mib);
+ ip_statistics[0] = alloc_percpu(struct ip_mib);
+ ip_statistics[1] = alloc_percpu(struct ip_mib);
+ icmp_statistics[0] = alloc_percpu(struct icmp_mib);
+ icmp_statistics[1] = alloc_percpu(struct icmp_mib);
+ tcp_statistics[0] = alloc_percpu(struct tcp_mib);
+ tcp_statistics[1] = alloc_percpu(struct tcp_mib);
+ udp_statistics[0] = alloc_percpu(struct udp_mib);
+ udp_statistics[1] = alloc_percpu(struct udp_mib);
if (!
(net_statistics[0] && net_statistics[1] && ip_statistics[0]
&& ip_statistics[1] && tcp_statistics[0] && tcp_statistics[1]
&& udp_statistics[0] && udp_statistics[1]))
return -ENOMEM;
- /* Set all the per cpu copies of the mibs to zero */
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_possible(i)) {
- memset(per_cpu_ptr(net_statistics[0], i), 0,
- sizeof (struct linux_mib));
- memset(per_cpu_ptr(net_statistics[1], i), 0,
- sizeof (struct linux_mib));
- memset(per_cpu_ptr(ip_statistics[0], i), 0,
- sizeof (struct ip_mib));
- memset(per_cpu_ptr(ip_statistics[1], i), 0,
- sizeof (struct ip_mib));
- memset(per_cpu_ptr(icmp_statistics[0], i), 0,
- sizeof (struct icmp_mib));
- memset(per_cpu_ptr(icmp_statistics[1], i), 0,
- sizeof (struct icmp_mib));
- memset(per_cpu_ptr(tcp_statistics[0], i), 0,
- sizeof (struct tcp_mib));
- memset(per_cpu_ptr(tcp_statistics[1], i), 0,
- sizeof (struct tcp_mib));
- memset(per_cpu_ptr(udp_statistics[0], i), 0,
- sizeof (struct udp_mib));
- memset(per_cpu_ptr(udp_statistics[1], i), 0,
- sizeof (struct udp_mib));
- }
- }
-
(void) tcp_mib_init();
return 0;
diff -urN linux-2.5.70-bk11/net/ipv4/icmp.c linux-2.5.70-bk12/net/ipv4/icmp.c
--- linux-2.5.70-bk11/net/ipv4/icmp.c 2003-05-26 18:00:58.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/icmp.c 2003-06-07 04:47:51.000000000 -0700
@@ -234,13 +234,13 @@
{
local_bh_disable();
- if (unlikely(!spin_trylock(&icmp_socket->sk->lock.slock)))
+ if (unlikely(!spin_trylock(&icmp_socket->sk->sk_lock.slock)))
BUG();
}
static void icmp_xmit_unlock(void)
{
- spin_unlock_bh(&icmp_socket->sk->lock.slock);
+ spin_unlock_bh(&icmp_socket->sk->sk_lock.slock);
}
/*
@@ -344,12 +344,12 @@
icmp_param->head_len,
ipc, rt, MSG_DONTWAIT);
- if ((skb = skb_peek(&icmp_socket->sk->write_queue)) != NULL) {
+ if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
struct icmphdr *icmph = skb->h.icmph;
unsigned int csum = 0;
struct sk_buff *skb1;
- skb_queue_walk(&icmp_socket->sk->write_queue, skb1) {
+ skb_queue_walk(&icmp_socket->sk->sk_write_queue, skb1) {
csum = csum_add(csum, skb1->csum);
}
csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
@@ -685,7 +685,7 @@
iph->saddr,
skb->dev->ifindex)) != NULL) {
raw_err(raw_sk, skb, info);
- raw_sk = raw_sk->next;
+ raw_sk = raw_sk->sk_next;
iph = (struct iphdr *)skb->data;
}
}
@@ -1101,8 +1101,8 @@
if (err < 0)
panic("Failed to create the ICMP control socket.\n");
- per_cpu(__icmp_socket, i)->sk->allocation = GFP_ATOMIC;
- per_cpu(__icmp_socket, i)->sk->sndbuf = SK_WMEM_MAX * 2;
+ per_cpu(__icmp_socket, i)->sk->sk_allocation = GFP_ATOMIC;
+ per_cpu(__icmp_socket, i)->sk->sk_sndbuf = SK_WMEM_MAX * 2;
inet = inet_sk(per_cpu(__icmp_socket, i)->sk);
inet->uc_ttl = -1;
inet->pmtudisc = IP_PMTUDISC_DONT;
@@ -1111,6 +1111,6 @@
* see it, we do not wish this socket to see incoming
* packets.
*/
- per_cpu(__icmp_socket, i)->sk->prot->unhash(per_cpu(__icmp_socket, i)->sk);
+ per_cpu(__icmp_socket, i)->sk->sk_prot->unhash(per_cpu(__icmp_socket, i)->sk);
}
}
diff -urN linux-2.5.70-bk11/net/ipv4/ip_input.c linux-2.5.70-bk12/net/ipv4/ip_input.c
--- linux-2.5.70-bk11/net/ipv4/ip_input.c 2003-05-26 18:00:22.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/ip_input.c 2003-06-07 04:47:51.000000000 -0700
@@ -167,9 +167,9 @@
/* If socket is bound to an interface, only report
* the packet if it came from that interface.
*/
- if (sk && inet_sk(sk)->num == protocol
- && ((sk->bound_dev_if == 0)
- || (sk->bound_dev_if == skb->dev->ifindex))) {
+ if (sk && inet_sk(sk)->num == protocol &&
+ (!sk->sk_bound_dev_if ||
+ sk->sk_bound_dev_if == skb->dev->ifindex)) {
if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
skb = ip_defrag(skb);
if (skb == NULL) {
diff -urN linux-2.5.70-bk11/net/ipv4/ip_output.c linux-2.5.70-bk12/net/ipv4/ip_output.c
--- linux-2.5.70-bk11/net/ipv4/ip_output.c 2003-05-26 18:01:00.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/ip_output.c 2003-06-07 04:47:51.000000000 -0700
@@ -148,7 +148,7 @@
iph->ttl = ip_select_ttl(inet, &rt->u.dst);
iph->daddr = rt->rt_dst;
iph->saddr = rt->rt_src;
- iph->protocol = sk->protocol;
+ iph->protocol = sk->sk_protocol;
iph->tot_len = htons(skb->len);
ip_select_ident(iph, &rt->u.dst, sk);
skb->nh.iph = iph;
@@ -159,7 +159,7 @@
}
ip_send_check(iph);
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
/* Send it out. */
return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
@@ -316,12 +316,12 @@
daddr = opt->faddr;
{
- struct flowi fl = { .oif = sk->bound_dev_if,
+ struct flowi fl = { .oif = sk->sk_bound_dev_if,
.nl_u = { .ip4_u =
{ .daddr = daddr,
.saddr = inet->saddr,
.tos = RT_CONN_FLAGS(sk) } },
- .proto = sk->protocol,
+ .proto = sk->sk_protocol,
.uli_u = { .ports =
{ .sport = inet->sport,
.dport = inet->dport } } };
@@ -351,7 +351,7 @@
else
iph->frag_off = 0;
iph->ttl = ip_select_ttl(inet, &rt->u.dst);
- iph->protocol = sk->protocol;
+ iph->protocol = sk->sk_protocol;
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
skb->nh.iph = iph;
@@ -363,7 +363,7 @@
}
mtu = dst_pmtu(&rt->u.dst);
- if (skb->len > mtu && (sk->route_caps&NETIF_F_TSO)) {
+ if (skb->len > mtu && (sk->sk_route_caps & NETIF_F_TSO)) {
unsigned int hlen;
/* Hack zone: all this must be done by TCP. */
@@ -379,7 +379,7 @@
/* Add an IP checksum. */
ip_send_check(iph);
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
dst_output);
@@ -739,14 +739,14 @@
if (flags&MSG_PROBE)
return 0;
- if (skb_queue_empty(&sk->write_queue)) {
+ if (skb_queue_empty(&sk->sk_write_queue)) {
/*
* setup for corking.
*/
opt = ipc->opt;
if (opt) {
if (inet->cork.opt == NULL)
- inet->cork.opt = kmalloc(sizeof(struct ip_options)+40, sk->allocation);
+ inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
inet->cork.flags |= IPCORK_OPT;
inet->cork.addr = ipc->addr;
@@ -805,7 +805,7 @@
* it is not necessary. Not a big bug, but needs a fix.
*/
- if ((skb = skb_peek_tail(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
goto alloc_new_skb;
while (length > 0) {
@@ -842,10 +842,11 @@
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
- if (atomic_read(&sk->wmem_alloc) <= 2*sk->sndbuf)
+ if (atomic_read(&sk->sk_wmem_alloc) <=
+ 2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len + 15, 1,
- sk->allocation);
+ sk->sk_allocation);
if (unlikely(skb == NULL))
err = -ENOBUFS;
}
@@ -883,7 +884,7 @@
/*
* Put the packet on the pending queue.
*/
- __skb_queue_tail(&sk->write_queue, skb);
+ __skb_queue_tail(&sk->sk_write_queue, skb);
continue;
}
@@ -922,7 +923,7 @@
} else if (i < MAX_SKB_FRAGS) {
if (copy > PAGE_SIZE)
copy = PAGE_SIZE;
- page = alloc_pages(sk->allocation, 0);
+ page = alloc_pages(sk->sk_allocation, 0);
if (page == NULL) {
err = -ENOMEM;
goto error;
@@ -933,7 +934,7 @@
skb_fill_page_desc(skb, i, page, 0, 0);
frag = &skb_shinfo(skb)->frags[i];
skb->truesize += PAGE_SIZE;
- atomic_add(PAGE_SIZE, &sk->wmem_alloc);
+ atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
} else {
err = -EMSGSIZE;
goto error;
@@ -978,7 +979,7 @@
if (flags&MSG_PROBE)
return 0;
- if (skb_queue_empty(&sk->write_queue))
+ if (skb_queue_empty(&sk->sk_write_queue))
return -EINVAL;
rt = inet->cork.rt;
@@ -999,7 +1000,7 @@
return -EMSGSIZE;
}
- if ((skb = skb_peek_tail(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
return -EINVAL;
inet->cork.length += size;
@@ -1012,7 +1013,7 @@
BUG_TRAP(len == 0);
skb = sock_wmalloc(sk, fragheaderlen + hh_len + 15, 1,
- sk->allocation);
+ sk->sk_allocation);
if (unlikely(!skb)) {
err = -ENOBUFS;
goto error;
@@ -1036,7 +1037,7 @@
/*
* Put the packet on the pending queue.
*/
- __skb_queue_tail(&sk->write_queue, skb);
+ __skb_queue_tail(&sk->sk_write_queue, skb);
continue;
}
@@ -1088,14 +1089,14 @@
__u8 ttl;
int err = 0;
- if ((skb = __skb_dequeue(&sk->write_queue)) == NULL)
+ if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
/* move skb->data to ip header from ext header */
if (skb->data < skb->nh.raw)
__skb_pull(skb, skb->nh.raw - skb->data);
- while ((tmp_skb = __skb_dequeue(&sk->write_queue)) != NULL) {
+ while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
__skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
*tail_skb = tmp_skb;
tail_skb = &(tmp_skb->next);
@@ -1147,12 +1148,12 @@
iph->id = htons(inet->id++);
}
iph->ttl = ttl;
- iph->protocol = sk->protocol;
+ iph->protocol = sk->sk_protocol;
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
ip_send_check(iph);
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
skb->dst = dst_clone(&rt->u.dst);
/* Netfilter gets whole the not fragmented skb. */
@@ -1186,7 +1187,7 @@
struct inet_opt *inet = inet_sk(sk);
struct sk_buff *skb;
- while ((skb = __skb_dequeue_tail(&sk->write_queue)) != NULL)
+ while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
kfree_skb(skb);
inet->cork.flags &= ~IPCORK_OPT;
@@ -1257,7 +1258,7 @@
.uli_u = { .ports =
{ .sport = skb->h.th->dest,
.dport = skb->h.th->source } },
- .proto = sk->protocol };
+ .proto = sk->sk_protocol };
if (ip_route_output_key(&rt, &fl))
return;
}
@@ -1270,11 +1271,11 @@
*/
bh_lock_sock(sk);
inet->tos = skb->nh.iph->tos;
- sk->priority = skb->priority;
- sk->protocol = skb->nh.iph->protocol;
+ sk->sk_priority = skb->priority;
+ sk->sk_protocol = skb->nh.iph->protocol;
ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
&ipc, rt, MSG_DONTWAIT);
- if ((skb = skb_peek(&sk->write_queue)) != NULL) {
+ if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
if (arg->csumoffset >= 0)
*((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
skb->ip_summed = CHECKSUM_NONE;
diff -urN linux-2.5.70-bk11/net/ipv4/ip_sockglue.c linux-2.5.70-bk12/net/ipv4/ip_sockglue.c
--- linux-2.5.70-bk11/net/ipv4/ip_sockglue.c 2003-05-26 18:00:27.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/ip_sockglue.c 2003-06-07 04:47:51.000000000 -0700
@@ -194,7 +194,7 @@
{
struct ip_ra_chain *ra, *new_ra, **rap;
- if (sk->type != SOCK_RAW || inet_sk(sk)->num == IPPROTO_RAW)
+ if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num == IPPROTO_RAW)
return -EINVAL;
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
@@ -315,7 +315,7 @@
int copied;
err = -EAGAIN;
- skb = skb_dequeue(&sk->error_queue);
+ skb = skb_dequeue(&sk->sk_error_queue);
if (skb == NULL)
goto out;
@@ -362,15 +362,14 @@
err = copied;
/* Reset and regenerate socket error */
- spin_lock_irq(&sk->error_queue.lock);
- sk->err = 0;
- if ((skb2 = skb_peek(&sk->error_queue)) != NULL) {
- sk->err = SKB_EXT_ERR(skb2)->ee.ee_errno;
- spin_unlock_irq(&sk->error_queue.lock);
- sk->error_report(sk);
- } else {
- spin_unlock_irq(&sk->error_queue.lock);
- }
+ spin_lock_irq(&sk->sk_error_queue.lock);
+ sk->sk_err = 0;
+ if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
+ sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
+ spin_unlock_irq(&sk->sk_error_queue.lock);
+ sk->sk_error_report(sk);
+ } else
+ spin_unlock_irq(&sk->sk_error_queue.lock);
out_free_skb:
kfree_skb(skb);
@@ -431,12 +430,13 @@
err = ip_options_get(&opt, optval, optlen, 1);
if (err)
break;
- if (sk->type == SOCK_STREAM) {
+ if (sk->sk_type == SOCK_STREAM) {
struct tcp_opt *tp = tcp_sk(sk);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- if (sk->family == PF_INET ||
- (!((1<state)&(TCPF_LISTEN|TCPF_CLOSE))
- && inet->daddr != LOOPBACK4_IPV6)) {
+ if (sk->sk_family == PF_INET ||
+ (!((1 << sk->sk_state) &
+ (TCPF_LISTEN | TCPF_CLOSE)) &&
+ inet->daddr != LOOPBACK4_IPV6)) {
#endif
if (inet->opt)
tp->ext_header_len -= inet->opt->optlen;
@@ -483,7 +483,7 @@
inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
break;
case IP_TOS: /* This sets both TOS and Precedence */
- if (sk->type == SOCK_STREAM) {
+ if (sk->sk_type == SOCK_STREAM) {
val &= ~3;
val |= inet->tos & 3;
}
@@ -494,7 +494,7 @@
}
if (inet->tos != val) {
inet->tos = val;
- sk->priority = rt_tos2priority(val);
+ sk->sk_priority = rt_tos2priority(val);
sk_dst_reset(sk);
}
break;
@@ -506,7 +506,7 @@
inet->uc_ttl = val;
break;
case IP_HDRINCL:
- if(sk->type!=SOCK_RAW) {
+ if (sk->sk_type != SOCK_RAW) {
err = -ENOPROTOOPT;
break;
}
@@ -520,10 +520,10 @@
case IP_RECVERR:
inet->recverr = !!val;
if (!val)
- skb_queue_purge(&sk->error_queue);
+ skb_queue_purge(&sk->sk_error_queue);
break;
case IP_MULTICAST_TTL:
- if (sk->type == SOCK_STREAM)
+ if (sk->sk_type == SOCK_STREAM)
goto e_inval;
if (optlen<1)
goto e_inval;
@@ -543,7 +543,7 @@
struct ip_mreqn mreq;
struct net_device *dev = NULL;
- if (sk->type == SOCK_STREAM)
+ if (sk->sk_type == SOCK_STREAM)
goto e_inval;
/*
* Check the arguments are allowable
@@ -581,7 +581,8 @@
break;
err = -EINVAL;
- if (sk->bound_dev_if && mreq.imr_ifindex != sk->bound_dev_if)
+ if (sk->sk_bound_dev_if &&
+ mreq.imr_ifindex != sk->sk_bound_dev_if)
break;
inet->mc_index = mreq.imr_ifindex;
@@ -998,7 +999,7 @@
release_sock(sk);
- if (sk->type != SOCK_STREAM)
+ if (sk->sk_type != SOCK_STREAM)
return -ENOPROTOOPT;
msg.msg_control = optval;
diff -urN linux-2.5.70-bk11/net/ipv4/ipmr.c linux-2.5.70-bk12/net/ipv4/ipmr.c
--- linux-2.5.70-bk11/net/ipv4/ipmr.c 2003-05-26 18:00:39.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/ipmr.c 2003-06-07 04:47:51.000000000 -0700
@@ -860,7 +860,8 @@
switch(optname)
{
case MRT_INIT:
- if(sk->type!=SOCK_RAW || inet_sk(sk)->num!=IPPROTO_IGMP)
+ if (sk->sk_type != SOCK_RAW ||
+ inet_sk(sk)->num != IPPROTO_IGMP)
return -EOPNOTSUPP;
if(optlen!=sizeof(int))
return -ENOPROTOOPT;
diff -urN linux-2.5.70-bk11/net/ipv4/netfilter/ip_conntrack_core.c linux-2.5.70-bk12/net/ipv4/netfilter/ip_conntrack_core.c
--- linux-2.5.70-bk11/net/ipv4/netfilter/ip_conntrack_core.c 2003-05-26 18:00:19.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/netfilter/ip_conntrack_core.c 2003-06-07 04:47:51.000000000 -0700
@@ -1288,7 +1288,7 @@
IPPROTO_TCP } };
/* We only do TCP at the moment: is there a better way? */
- if (strcmp(sk->prot->name, "TCP") != 0) {
+ if (strcmp(sk->sk_prot->name, "TCP")) {
DEBUGP("SO_ORIGINAL_DST: Not a TCP socket\n");
return -ENOPROTOOPT;
}
diff -urN linux-2.5.70-bk11/net/ipv4/netfilter/ip_queue.c linux-2.5.70-bk12/net/ipv4/netfilter/ip_queue.c
--- linux-2.5.70-bk11/net/ipv4/netfilter/ip_queue.c 2003-05-26 18:01:03.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/netfilter/ip_queue.c 2003-06-07 04:47:51.000000000 -0700
@@ -535,14 +535,14 @@
if (down_trylock(&ipqnl_sem))
return;
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
ipq_rcv_skb(skb);
kfree_skb(skb);
}
up(&ipqnl_sem);
- } while (ipqnl && ipqnl->receive_queue.qlen);
+ } while (ipqnl && ipqnl->sk_receive_queue.qlen);
}
static int
@@ -691,7 +691,7 @@
proc_net_remove(IPQ_PROC_FS_NAME);
cleanup_ipqnl:
- sock_release(ipqnl->socket);
+ sock_release(ipqnl->sk_socket);
down(&ipqnl_sem);
up(&ipqnl_sem);
diff -urN linux-2.5.70-bk11/net/ipv4/netfilter/ipchains_core.c linux-2.5.70-bk12/net/ipv4/netfilter/ipchains_core.c
--- linux-2.5.70-bk11/net/ipv4/netfilter/ipchains_core.c 2003-05-26 18:00:56.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/netfilter/ipchains_core.c 2003-06-07 04:47:51.000000000 -0700
@@ -1836,7 +1836,7 @@
cleanup_netlink:
#if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
- sock_release(ipfwsk->socket);
+ sock_release(ipfwsk->sk_socket);
cleanup_nothing:
#endif
diff -urN linux-2.5.70-bk11/net/ipv4/netfilter/ipfwadm_core.c linux-2.5.70-bk12/net/ipv4/netfilter/ipfwadm_core.c
--- linux-2.5.70-bk11/net/ipv4/netfilter/ipfwadm_core.c 2003-05-26 18:00:38.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/netfilter/ipfwadm_core.c 2003-06-07 04:47:51.000000000 -0700
@@ -1435,7 +1435,7 @@
cleanup:
#ifdef CONFIG_IP_FIREWALL_NETLINK
- sock_release(ipfwsk->socket);
+ sock_release(ipfwsk->sk_socket);
#endif
unregister_netdevice_notifier(&ipfw_dev_notifier);
diff -urN linux-2.5.70-bk11/net/ipv4/netfilter/ipt_ULOG.c linux-2.5.70-bk12/net/ipv4/netfilter/ipt_ULOG.c
--- linux-2.5.70-bk11/net/ipv4/netfilter/ipt_ULOG.c 2003-05-26 18:00:37.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/netfilter/ipt_ULOG.c 2003-06-07 04:47:51.000000000 -0700
@@ -336,7 +336,7 @@
return -ENOMEM;
if (ipt_register_target(&ipt_ulog_reg) != 0) {
- sock_release(nflognl->socket);
+ sock_release(nflognl->sk_socket);
return -EINVAL;
}
@@ -351,7 +351,7 @@
DEBUGP("ipt_ULOG: cleanup_module\n");
ipt_unregister_target(&ipt_ulog_reg);
- sock_release(nflognl->socket);
+ sock_release(nflognl->sk_socket);
/* remove pending timers and free allocated skb's */
for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
diff -urN linux-2.5.70-bk11/net/ipv4/netfilter/ipt_owner.c linux-2.5.70-bk12/net/ipv4/netfilter/ipt_owner.c
--- linux-2.5.70-bk11/net/ipv4/netfilter/ipt_owner.c 2003-05-26 18:00:58.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/netfilter/ipt_owner.c 2003-06-07 04:47:51.000000000 -0700
@@ -28,7 +28,8 @@
if(files) {
spin_lock(&files->file_lock);
for (i=0; i < files->max_fds; i++) {
- if (fcheck_files(files, i) == skb->sk->socket->file) {
+ if (fcheck_files(files, i) ==
+ skb->sk->sk_socket->file) {
spin_unlock(&files->file_lock);
task_unlock(p);
read_unlock(&tasklist_lock);
@@ -59,7 +60,8 @@
if(files) {
spin_lock(&files->file_lock);
for (i=0; i < files->max_fds; i++) {
- if (fcheck_files(files, i) == skb->sk->socket->file) {
+ if (fcheck_files(files, i) ==
+ skb->sk->sk_socket->file) {
spin_unlock(&files->file_lock);
task_unlock(p);
read_unlock(&tasklist_lock);
@@ -78,7 +80,7 @@
match_sid(const struct sk_buff *skb, pid_t sid)
{
struct task_struct *g, *p;
- struct file *file = skb->sk->socket->file;
+ struct file *file = skb->sk->sk_socket->file;
int i, found=0;
read_lock(&tasklist_lock);
@@ -119,17 +121,17 @@
{
const struct ipt_owner_info *info = matchinfo;
- if (!skb->sk || !skb->sk->socket || !skb->sk->socket->file)
+ if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file)
return 0;
if(info->match & IPT_OWNER_UID) {
- if((skb->sk->socket->file->f_uid != info->uid) ^
+ if ((skb->sk->sk_socket->file->f_uid != info->uid) ^
!!(info->invert & IPT_OWNER_UID))
return 0;
}
if(info->match & IPT_OWNER_GID) {
- if((skb->sk->socket->file->f_gid != info->gid) ^
+ if ((skb->sk->sk_socket->file->f_gid != info->gid) ^
!!(info->invert & IPT_OWNER_GID))
return 0;
}
diff -urN linux-2.5.70-bk11/net/ipv4/raw.c linux-2.5.70-bk12/net/ipv4/raw.c
--- linux-2.5.70-bk11/net/ipv4/raw.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/raw.c 2003-06-07 04:47:51.000000000 -0700
@@ -89,11 +89,11 @@
(RAWV4_HTABLE_SIZE - 1)];
write_lock_bh(&raw_v4_lock);
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sock_prot_inc_use(sk->sk_prot);
sock_hold(sk);
write_unlock_bh(&raw_v4_lock);
}
@@ -101,12 +101,12 @@
static void raw_v4_unhash(struct sock *sk)
{
write_lock_bh(&raw_v4_lock);
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
- sock_prot_dec_use(sk->prot);
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
+ sock_prot_dec_use(sk->sk_prot);
__sock_put(sk);
}
write_unlock_bh(&raw_v4_lock);
@@ -118,13 +118,13 @@
{
struct sock *s = sk;
- for (s = sk; s; s = s->next) {
+ for (; s; s = s->sk_next) {
struct inet_opt *inet = inet_sk(s);
if (inet->num == num &&
!(inet->daddr && inet->daddr != raddr) &&
!(inet->rcv_saddr && inet->rcv_saddr != laddr) &&
- !(s->bound_dev_if && s->bound_dev_if != dif))
+ !(s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
break; /* gotcha */
}
return s;
@@ -174,7 +174,7 @@
if (clone)
raw_rcv(sk, clone);
}
- sk = __raw_v4_lookup(sk->next, iph->protocol,
+ sk = __raw_v4_lookup(sk->sk_next, iph->protocol,
iph->saddr, iph->daddr,
skb->dev->ifindex);
}
@@ -195,7 +195,7 @@
2. Socket is connected (otherwise the error indication
is useless without ip_recverr and error is hard.
*/
- if (!inet->recverr && sk->state != TCP_ESTABLISHED)
+ if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED)
return;
switch (type) {
@@ -231,8 +231,8 @@
}
if (inet->recverr || harderr) {
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
}
}
@@ -288,7 +288,7 @@
goto error;
skb_reserve(skb, hh_len);
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
skb->dst = dst_clone(&rt->u.dst);
skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);
@@ -390,14 +390,14 @@
*/
} else {
err = -EINVAL;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
daddr = inet->daddr;
}
ipc.addr = inet->saddr;
ipc.opt = NULL;
- ipc.oif = sk->bound_dev_if;
+ ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
err = ip_cmsg_send(msg, &ipc);
@@ -426,7 +426,7 @@
daddr = ipc.opt->faddr;
}
}
- tos = RT_TOS(inet->tos) | sk->localroute;
+ tos = RT_TOS(inet->tos) | sk->sk_localroute;
if (msg->msg_flags & MSG_DONTROUTE)
tos |= RTO_ONLINK;
@@ -443,7 +443,9 @@
{ .daddr = daddr,
.saddr = saddr,
.tos = tos } },
- .proto = inet->hdrincl ? IPPROTO_RAW : sk->protocol };
+ .proto = inet->hdrincl ? IPPROTO_RAW :
+ sk->sk_protocol,
+ };
err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT));
}
if (err)
@@ -506,7 +508,7 @@
int ret = -EINVAL;
int chk_addr_ret;
- if (sk->state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
+ if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
goto out;
chk_addr_ret = inet_addr_type(addr->sin_addr.s_addr);
ret = -EADDRNOTAVAIL;
@@ -645,18 +647,18 @@
{
switch (cmd) {
case SIOCOUTQ: {
- int amount = atomic_read(&sk->wmem_alloc);
+ int amount = atomic_read(&sk->sk_wmem_alloc);
return put_user(amount, (int *)arg);
}
case SIOCINQ: {
struct sk_buff *skb;
int amount = 0;
- spin_lock_irq(&sk->receive_queue.lock);
- skb = skb_peek(&sk->receive_queue);
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
amount = skb->len;
- spin_unlock_irq(&sk->receive_queue.lock);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
return put_user(amount, (int *)arg);
}
@@ -700,8 +702,8 @@
for (state->bucket = 0; state->bucket < RAWV4_HTABLE_SIZE; ++state->bucket) {
sk = raw_v4_htable[state->bucket];
- while (sk && sk->family != PF_INET)
- sk = sk->next;
+ while (sk && sk->sk_family != PF_INET)
+ sk = sk->sk_next;
if (sk)
break;
}
@@ -713,10 +715,10 @@
struct raw_iter_state* state = raw_seq_private(seq);
do {
- sk = sk->next;
+ sk = sk->sk_next;
try_again:
;
- } while (sk && sk->family != PF_INET);
+ } while (sk && sk->sk_family != PF_INET);
if (!sk && ++state->bucket < RAWV4_HTABLE_SIZE) {
sk = raw_v4_htable[state->bucket];
@@ -768,10 +770,11 @@
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
- i, src, srcp, dest, destp, sp->state,
- atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc),
+ i, src, srcp, dest, destp, sp->sk_state,
+ atomic_read(&sp->sk_wmem_alloc),
+ atomic_read(&sp->sk_rmem_alloc),
0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
- atomic_read(&sp->refcnt), sp);
+ atomic_read(&sp->sk_refcnt), sp);
return tmpbuf;
}
diff -urN linux-2.5.70-bk11/net/ipv4/route.c linux-2.5.70-bk12/net/ipv4/route.c
--- linux-2.5.70-bk11/net/ipv4/route.c 2003-05-26 18:01:00.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/route.c 2003-06-07 04:47:51.000000000 -0700
@@ -2694,16 +2694,9 @@
ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
ip_rt_max_size = (rt_hash_mask + 1) * 16;
- rt_cache_stat = kmalloc_percpu(sizeof (struct rt_cache_stat),
- GFP_KERNEL);
+ rt_cache_stat = alloc_percpu(struct rt_cache_stat);
if (!rt_cache_stat)
goto out_enomem1;
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_possible(i)) {
- memset(per_cpu_ptr(rt_cache_stat, i), 0,
- sizeof (struct rt_cache_stat));
- }
- }
devinet_init();
ip_fib_init();
@@ -2739,7 +2732,7 @@
out:
return rc;
out_enomem:
- kfree_percpu(rt_cache_stat);
+ free_percpu(rt_cache_stat);
out_enomem1:
rc = -ENOMEM;
goto out;
diff -urN linux-2.5.70-bk11/net/ipv4/syncookies.c linux-2.5.70-bk12/net/ipv4/syncookies.c
--- linux-2.5.70-bk11/net/ipv4/syncookies.c 2003-05-26 18:00:20.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/syncookies.c 2003-06-07 04:47:51.000000000 -0700
@@ -103,7 +103,7 @@
child = tp->af_specific->syn_recv_sock(sk, skb, req, dst);
if (child) {
- sk_set_owner(child, sk->owner);
+ sk_set_owner(child, sk->sk_owner);
tcp_acceptq_queue(sk, req, child);
} else
tcp_openreq_free(req);
diff -urN linux-2.5.70-bk11/net/ipv4/tcp.c linux-2.5.70-bk12/net/ipv4/tcp.c
--- linux-2.5.70-bk11/net/ipv4/tcp.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/tcp.c 2003-06-07 04:47:51.000000000 -0700
@@ -294,7 +294,7 @@
{
int amt = TCP_PAGES(size);
- sk->forward_alloc += amt * TCP_MEM_QUANTUM;
+ sk->sk_forward_alloc += amt * TCP_MEM_QUANTUM;
atomic_add(amt, &tcp_memory_allocated);
/* Under limit. */
@@ -315,18 +315,16 @@
tcp_enter_memory_pressure();
if (kind) {
- if (atomic_read(&sk->rmem_alloc) < sysctl_tcp_rmem[0])
+ if (atomic_read(&sk->sk_rmem_alloc) < sysctl_tcp_rmem[0])
return 1;
- } else {
- if (sk->wmem_queued < sysctl_tcp_wmem[0])
- return 1;
- }
+ } else if (sk->sk_wmem_queued < sysctl_tcp_wmem[0])
+ return 1;
if (!tcp_memory_pressure ||
sysctl_tcp_mem[2] > atomic_read(&tcp_sockets_allocated) *
- TCP_PAGES(sk->wmem_queued +
- atomic_read(&sk->rmem_alloc) +
- sk->forward_alloc))
+ TCP_PAGES(sk->sk_wmem_queued +
+ atomic_read(&sk->sk_rmem_alloc) +
+ sk->sk_forward_alloc))
return 1;
suppress_allocation:
@@ -337,22 +335,22 @@
/* Fail only if socket is _under_ its sndbuf.
* In this case we cannot block, so that we have to fail.
*/
- if (sk->wmem_queued + size >= sk->sndbuf)
+ if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
return 1;
}
/* Alas. Undo changes. */
- sk->forward_alloc -= amt * TCP_MEM_QUANTUM;
+ sk->sk_forward_alloc -= amt * TCP_MEM_QUANTUM;
atomic_sub(amt, &tcp_memory_allocated);
return 0;
}
void __tcp_mem_reclaim(struct sock *sk)
{
- if (sk->forward_alloc >= TCP_MEM_QUANTUM) {
- atomic_sub(sk->forward_alloc / TCP_MEM_QUANTUM,
+ if (sk->sk_forward_alloc >= TCP_MEM_QUANTUM) {
+ atomic_sub(sk->sk_forward_alloc / TCP_MEM_QUANTUM,
&tcp_memory_allocated);
- sk->forward_alloc &= TCP_MEM_QUANTUM - 1;
+ sk->sk_forward_alloc &= TCP_MEM_QUANTUM - 1;
if (tcp_memory_pressure &&
atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
tcp_memory_pressure = 0;
@@ -363,8 +361,8 @@
{
struct sock *sk = skb->sk;
- atomic_sub(skb->truesize, &sk->rmem_alloc);
- sk->forward_alloc += skb->truesize;
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+ sk->sk_forward_alloc += skb->truesize;
}
/*
@@ -389,8 +387,8 @@
struct sock *sk = sock->sk;
struct tcp_opt *tp = tcp_sk(sk);
- poll_wait(file, sk->sleep, wait);
- if (sk->state == TCP_LISTEN)
+ poll_wait(file, sk->sk_sleep, wait);
+ if (sk->sk_state == TCP_LISTEN)
return tcp_listen_poll(sk, wait);
/* Socket is not locked. We are protected from async events
@@ -399,7 +397,7 @@
*/
mask = 0;
- if (sk->err)
+ if (sk->sk_err)
mask = POLLERR;
/*
@@ -429,15 +427,15 @@
* NOTE. Check for TCP_CLOSE is added. The goal is to prevent
* blocking on fresh not-connected or disconnected socket. --ANK
*/
- if (sk->shutdown == SHUTDOWN_MASK || sk->state == TCP_CLOSE)
+ if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
mask |= POLLHUP;
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLIN | POLLRDNORM;
/* Connected? */
- if ((1 << sk->state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
/* Potential race condition. If read of tp below will
- * escape above sk->state, we can be illegally awaken
+ * escape above sk->sk_state, we can be illegally awaken
* in SYN_* states. */
if ((tp->rcv_nxt != tp->copied_seq) &&
(tp->urg_seq != tp->copied_seq ||
@@ -445,12 +443,13 @@
sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
mask |= POLLIN | POLLRDNORM;
- if (!(sk->shutdown & SEND_SHUTDOWN)) {
+ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
if (tcp_wspace(sk) >= tcp_min_write_space(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
- set_bit(SOCK_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE,
+ &sk->sk_socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
/* Race breaker. If space is freed after
* wspace test but before the flags are set,
@@ -472,15 +471,15 @@
*/
void tcp_write_space(struct sock *sk)
{
- struct socket *sock = sk->socket;
+ struct socket *sock = sk->sk_socket;
if (tcp_wspace(sk) >= tcp_min_write_space(sk) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
- if (sock->fasync_list && !(sk->shutdown & SEND_SHUTDOWN))
+ if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, 2, POLL_OUT);
}
}
@@ -492,11 +491,11 @@
switch (cmd) {
case SIOCINQ:
- if (sk->state == TCP_LISTEN)
+ if (sk->sk_state == TCP_LISTEN)
return -EINVAL;
lock_sock(sk);
- if ((1 << sk->state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else if (sock_flag(sk, SOCK_URGINLINE) ||
!tp->urg_data ||
@@ -505,9 +504,9 @@
answ = tp->rcv_nxt - tp->copied_seq;
/* Subtract 1, if FIN is in queue. */
- if (answ && !skb_queue_empty(&sk->receive_queue))
+ if (answ && !skb_queue_empty(&sk->sk_receive_queue))
answ -=
- ((struct sk_buff*)sk->receive_queue.prev)->h.th->fin;
+ ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
} else
answ = tp->urg_seq - tp->copied_seq;
release_sock(sk);
@@ -516,10 +515,10 @@
answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
break;
case SIOCOUTQ:
- if (sk->state == TCP_LISTEN)
+ if (sk->sk_state == TCP_LISTEN)
return -EINVAL;
- if ((1 << sk->state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else
answ = tp->write_seq - tp->snd_una;
@@ -538,8 +537,8 @@
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_listen_opt *lopt;
- sk->max_ack_backlog = 0;
- sk->ack_backlog = 0;
+ sk->sk_max_ack_backlog = 0;
+ sk->sk_ack_backlog = 0;
tp->accept_queue = tp->accept_queue_tail = NULL;
tp->syn_wait_lock = RW_LOCK_UNLOCKED;
tcp_delack_init(tp);
@@ -563,17 +562,17 @@
* It is OK, because this socket enters to hash table only
* after validation is complete.
*/
- sk->state = TCP_LISTEN;
- if (!sk->prot->get_port(sk, inet->num)) {
+ sk->sk_state = TCP_LISTEN;
+ if (!sk->sk_prot->get_port(sk, inet->num)) {
inet->sport = htons(inet->num);
sk_dst_reset(sk);
- sk->prot->hash(sk);
+ sk->sk_prot->hash(sk);
return 0;
}
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
write_lock_bh(&tp->syn_wait_lock);
tp->listen_opt = NULL;
write_unlock_bh(&tp->syn_wait_lock);
@@ -649,7 +648,7 @@
tcp_acceptq_removed(sk);
tcp_openreq_fastfree(req);
}
- BUG_TRAP(!sk->ack_backlog);
+ BUG_TRAP(!sk->sk_ack_backlog);
}
/*
@@ -663,24 +662,24 @@
struct task_struct *tsk = current;
DEFINE_WAIT(wait);
- while ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
- if (sk->err)
+ while ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
+ if (sk->sk_err)
return sock_error(sk);
- if ((1 << sk->state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
+ if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
return -EPIPE;
if (!*timeo_p)
return -EAGAIN;
if (signal_pending(tsk))
return sock_intr_errno(*timeo_p);
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
tp->write_pending++;
release_sock(sk);
*timeo_p = schedule_timeout(*timeo_p);
lock_sock(sk);
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
tp->write_pending--;
}
return 0;
@@ -688,7 +687,7 @@
static inline int tcp_memory_free(struct sock *sk)
{
- return sk->wmem_queued < sk->sndbuf;
+ return sk->sk_wmem_queued < sk->sk_sndbuf;
}
/*
@@ -706,21 +705,21 @@
current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
for (;;) {
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
- if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
if (!*timeo)
goto do_nonblock;
if (signal_pending(current))
goto do_interrupted;
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
if (tcp_memory_free(sk) && !vm_wait)
break;
- set_bit(SOCK_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
tp->write_pending++;
release_sock(sk);
if (!tcp_memory_free(sk) || vm_wait)
@@ -739,7 +738,7 @@
*timeo = current_timeo;
}
out:
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return err;
do_error:
@@ -796,10 +795,12 @@
TCP_SKB_CB(skb)->end_seq = tp->write_seq;
TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
TCP_SKB_CB(skb)->sacked = 0;
- __skb_queue_tail(&sk->write_queue, skb);
+ __skb_queue_tail(&sk->sk_write_queue, skb);
tcp_charge_skb(sk, skb);
if (!tp->send_head)
tp->send_head = skb;
+ else if (tp->nonagle&TCP_NAGLE_PUSH)
+ tp->nonagle &= ~TCP_NAGLE_PUSH;
}
static inline void tcp_mark_urg(struct tcp_opt *tp, int flags,
@@ -816,12 +817,12 @@
int mss_now, int nonagle)
{
if (tp->send_head) {
- struct sk_buff *skb = sk->write_queue.prev;
+ struct sk_buff *skb = sk->sk_write_queue.prev;
if (!(flags & MSG_MORE) || forced_push(tp))
tcp_mark_push(tp, skb);
tcp_mark_urg(tp, flags, skb);
__tcp_push_pending_frames(sk, tp, mss_now,
- (flags & MSG_MORE) ? 2 : nonagle);
+ (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
}
}
@@ -844,21 +845,21 @@
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
/* Wait for a connection to finish. */
- if ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
+ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
if ((err = wait_for_tcp_connect(sk, 0, &timeo)) != 0)
goto out_err;
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
copied = 0;
err = -EPIPE;
- if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
while (psize > 0) {
- struct sk_buff *skb = sk->write_queue.prev;
+ struct sk_buff *skb = sk->sk_write_queue.prev;
struct page *page = pages[poffset / PAGE_SIZE];
int copy, i;
int offset = poffset % PAGE_SIZE;
@@ -870,7 +871,7 @@
goto wait_for_sndbuf;
skb = tcp_alloc_pskb(sk, 0, tp->mss_cache,
- sk->allocation);
+ sk->sk_allocation);
if (!skb)
goto wait_for_memory;
@@ -911,16 +912,16 @@
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
- __tcp_push_pending_frames(sk, tp, mss_now, 1);
+ __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tp->send_head)
tcp_push_one(sk, mss_now);
continue;
wait_for_sndbuf:
- set_bit(SOCK_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
if (copied)
- tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, 1);
+ tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
goto do_error;
@@ -948,8 +949,8 @@
#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
- if (!(sk->route_caps & NETIF_F_SG) ||
- !(sk->route_caps & TCP_ZC_CSUM_FLAGS))
+ if (!(sk->sk_route_caps & NETIF_F_SG) ||
+ !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
return sock_no_sendpage(sock, page, offset, size, flags);
#undef TCP_ZC_CSUM_FLAGS
@@ -985,8 +986,8 @@
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
- sk->wmem_queued += copy;
- sk->forward_alloc -= copy;
+ sk->sk_wmem_queued += copy;
+ sk->sk_forward_alloc -= copy;
return 0;
}
@@ -1016,7 +1017,7 @@
{
int tmp = tp->mss_cache_std;
- if (sk->route_caps & NETIF_F_SG) {
+ if (sk->sk_route_caps & NETIF_F_SG) {
int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
if (tmp >= pgbreak &&
@@ -1044,12 +1045,12 @@
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
/* Wait for a connection to finish. */
- if ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
+ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
if ((err = wait_for_tcp_connect(sk, flags, &timeo)) != 0)
goto out_err;
/* This should be in poll */
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
@@ -1059,7 +1060,7 @@
copied = 0;
err = -EPIPE;
- if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
while (--iovlen >= 0) {
@@ -1071,7 +1072,7 @@
while (seglen > 0) {
int copy;
- skb = sk->write_queue.prev;
+ skb = sk->sk_write_queue.prev;
if (!tp->send_head ||
(copy = mss_now - skb->len) <= 0) {
@@ -1084,14 +1085,16 @@
goto wait_for_sndbuf;
skb = tcp_alloc_pskb(sk, select_size(sk, tp),
- 0, sk->allocation);
+ 0, sk->sk_allocation);
if (!skb)
goto wait_for_memory;
/*
* Check whether we can use HW checksum.
*/
- if (sk->route_caps & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM))
+ if (sk->sk_route_caps &
+ (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
+ NETIF_F_HW_CSUM))
skb->ip_summed = CHECKSUM_HW;
skb_entail(sk, tp, skb);
@@ -1122,7 +1125,7 @@
merge = 1;
} else if (i == MAX_SKB_FRAGS ||
(!i &&
- !(sk->route_caps & NETIF_F_SG))) {
+ !(sk->sk_route_caps & NETIF_F_SG))) {
/* Need to add new fragment and cannot
* do this because interface is non-SG,
* or because all the page slots are
@@ -1199,16 +1202,16 @@
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
- __tcp_push_pending_frames(sk, tp, mss_now, 1);
+ __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tp->send_head)
tcp_push_one(sk, mss_now);
continue;
wait_for_sndbuf:
- set_bit(SOCK_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
if (copied)
- tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, 1);
+ tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
goto do_error;
@@ -1258,7 +1261,7 @@
tp->urg_data == TCP_URG_READ)
return -EINVAL; /* Yes this is right ! */
- if (sk->state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
+ if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
return -ENOTCONN;
if (tp->urg_data & TCP_URG_VALID) {
@@ -1281,7 +1284,7 @@
return err ? -EFAULT : len;
}
- if (sk->state == TCP_CLOSE || (sk->shutdown & RCV_SHUTDOWN))
+ if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
return 0;
/* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
@@ -1301,7 +1304,7 @@
static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
{
- __skb_unlink(skb, &sk->receive_queue);
+ __skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
@@ -1317,7 +1320,7 @@
int time_to_ack = 0;
#if TCP_DEBUG
- struct sk_buff *skb = skb_peek(&sk->receive_queue);
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
#endif
@@ -1335,7 +1338,7 @@
* in queue.
*/
(copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) &&
- !tp->ack.pingpong && !atomic_read(&sk->rmem_alloc)))
+ !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
time_to_ack = 1;
}
@@ -1345,7 +1348,7 @@
* Even if window raised up to infinity, do not send window open ACK
* in states, where we will not receive more. It is useless.
*/
- if (copied > 0 && !time_to_ack && !(sk->shutdown & RCV_SHUTDOWN)) {
+ if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
__u32 rcv_window_now = tcp_receive_window(tp);
/* Optimize, __tcp_select_window() is not cheap. */
@@ -1365,7 +1368,7 @@
tcp_send_ack(sk);
}
-/* Now socket state including sk->err is changed only under lock,
+/* Now socket state including sk->sk_err is changed only under lock,
* hence we may omit checks after joining wait queue.
* We check receive queue before schedule() only as optimization;
* it is very likely that release_sock() added new data.
@@ -1375,18 +1378,18 @@
{
DEFINE_WAIT(wait);
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
- set_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
release_sock(sk);
- if (skb_queue_empty(&sk->receive_queue))
+ if (skb_queue_empty(&sk->sk_receive_queue))
timeo = schedule_timeout(timeo);
lock_sock(sk);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return timeo;
}
@@ -1401,7 +1404,7 @@
* necessary */
local_bh_disable();
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
- sk->backlog_rcv(sk, skb);
+ sk->sk_backlog_rcv(sk, skb);
local_bh_enable();
/* Clear memory counter. */
@@ -1413,7 +1416,7 @@
struct sk_buff *skb;
u32 offset;
- skb_queue_walk(&sk->receive_queue, skb) {
+ skb_queue_walk(&sk->sk_receive_queue, skb) {
offset = seq - TCP_SKB_CB(skb)->seq;
if (skb->h.th->syn)
offset--;
@@ -1445,7 +1448,7 @@
u32 offset;
int copied = 0;
- if (sk->state == TCP_LISTEN)
+ if (sk->sk_state == TCP_LISTEN)
return -ENOTCONN;
while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
if (offset < skb->len) {
@@ -1511,7 +1514,7 @@
TCP_CHECK_TIMER(sk);
err = -ENOTCONN;
- if (sk->state == TCP_LISTEN)
+ if (sk->sk_state == TCP_LISTEN)
goto out;
timeo = sock_rcvtimeo(sk, nonblock);
@@ -1549,7 +1552,7 @@
/* Next get a buffer. */
- skb = skb_peek(&sk->receive_queue);
+ skb = skb_peek(&sk->sk_receive_queue);
do {
if (!skb)
break;
@@ -1571,17 +1574,17 @@
goto found_fin_ok;
BUG_TRAP(flags & MSG_PEEK);
skb = skb->next;
- } while (skb != (struct sk_buff *)&sk->receive_queue);
+ } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
/* Well, if we have backlog, try to process it now yet. */
- if (copied >= target && !sk->backlog.tail)
+ if (copied >= target && !sk->sk_backlog.tail)
break;
if (copied) {
- if (sk->err ||
- sk->state == TCP_CLOSE ||
- (sk->shutdown & RCV_SHUTDOWN) ||
+ if (sk->sk_err ||
+ sk->sk_state == TCP_CLOSE ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
!timeo ||
(flags & MSG_PEEK))
break;
@@ -1589,15 +1592,15 @@
if (sock_flag(sk, SOCK_DONE))
break;
- if (sk->err) {
+ if (sk->sk_err) {
copied = sock_error(sk);
break;
}
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
- if (sk->state == TCP_CLOSE) {
+ if (sk->sk_state == TCP_CLOSE) {
if (!sock_flag(sk, SOCK_DONE)) {
/* This occurs when user tries to read
* from never connected socket.
@@ -1825,7 +1828,7 @@
static int tcp_close_state(struct sock *sk)
{
- int next = (int)new_state[sk->state];
+ int next = (int)new_state[sk->sk_state];
int ns = next & TCP_STATE_MASK;
tcp_set_state(sk, ns);
@@ -1848,7 +1851,7 @@
return;
/* If we've already sent a FIN, or it's a closed state, skip this. */
- if ((1 << sk->state) &
+ if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_SYN_SENT |
TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
/* Clear out any half completed packets. FIN if needed. */
@@ -1864,26 +1867,26 @@
static inline int closing(struct sock *sk)
{
- return (1 << sk->state) &
+ return (1 << sk->sk_state) &
(TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
}
static __inline__ void tcp_kill_sk_queues(struct sock *sk)
{
/* First the read buffer. */
- __skb_queue_purge(&sk->receive_queue);
+ __skb_queue_purge(&sk->sk_receive_queue);
/* Next, the error queue. */
- __skb_queue_purge(&sk->error_queue);
+ __skb_queue_purge(&sk->sk_error_queue);
/* Next, the write queue. */
- BUG_TRAP(skb_queue_empty(&sk->write_queue));
+ BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));
/* Account for returned memory. */
tcp_mem_reclaim(sk);
- BUG_TRAP(!sk->wmem_queued);
- BUG_TRAP(!sk->forward_alloc);
+ BUG_TRAP(!sk->sk_wmem_queued);
+ BUG_TRAP(!sk->sk_forward_alloc);
/* It is _impossible_ for the backlog to contain anything
* when we get here. All user references to this socket
@@ -1899,33 +1902,33 @@
*/
void tcp_destroy_sock(struct sock *sk)
{
- BUG_TRAP(sk->state == TCP_CLOSE);
+ BUG_TRAP(sk->sk_state == TCP_CLOSE);
BUG_TRAP(sock_flag(sk, SOCK_DEAD));
/* It cannot be in hash table! */
- BUG_TRAP(!sk->pprev);
+ BUG_TRAP(!sk->sk_pprev);
/* If it has not 0 inet_sk(sk)->num, it must be bound */
- BUG_TRAP(!inet_sk(sk)->num || sk->prev);
+ BUG_TRAP(!inet_sk(sk)->num || sk->sk_prev);
#ifdef TCP_DEBUG
- if (sk->zapped) {
+ if (sk->sk_zapped) {
printk(KERN_DEBUG "TCP: double destroy sk=%p\n", sk);
sock_hold(sk);
}
- sk->zapped = 1;
+ sk->sk_zapped = 1;
#endif
- sk->prot->destroy(sk);
+ sk->sk_prot->destroy(sk);
tcp_kill_sk_queues(sk);
xfrm_sk_free_policy(sk);
#ifdef INET_REFCNT_DEBUG
- if (atomic_read(&sk->refcnt) != 1) {
+ if (atomic_read(&sk->sk_refcnt) != 1) {
printk(KERN_DEBUG "Destruction TCP %p delayed, c=%d\n",
- sk, atomic_read(&sk->refcnt));
+ sk, atomic_read(&sk->sk_refcnt));
}
#endif
@@ -1939,9 +1942,9 @@
int data_was_unread = 0;
lock_sock(sk);
- sk->shutdown = SHUTDOWN_MASK;
+ sk->sk_shutdown = SHUTDOWN_MASK;
- if (sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
tcp_set_state(sk, TCP_CLOSE);
/* Special case. */
@@ -1954,7 +1957,7 @@
* descriptor close, not protocol-sourced closes, because the
* reader process may not have drained the data yet!
*/
- while ((skb = __skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
skb->h.th->fin;
data_was_unread += len;
@@ -1977,9 +1980,9 @@
NET_INC_STATS_USER(TCPAbortOnClose);
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_KERNEL);
- } else if (sock_flag(sk, SOCK_LINGER) && !sk->lingertime) {
+ } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
/* Check zero linger _after_ checking for unread data. */
- sk->prot->disconnect(sk, 0);
+ sk->sk_prot->disconnect(sk, 0);
NET_INC_STATS_USER(TCPAbortOnData);
} else if (tcp_close_state(sk)) {
/* We FIN if the application ate all the data before
@@ -2015,7 +2018,8 @@
DEFINE_WAIT(wait);
do {
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait,
+ TASK_INTERRUPTIBLE);
if (!closing(sk))
break;
release_sock(sk);
@@ -2023,7 +2027,7 @@
lock_sock(sk);
} while (!signal_pending(tsk) && timeout);
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
}
adjudge_to_death:
@@ -2055,7 +2059,7 @@
* linger2 option. --ANK
*/
- if (sk->state == TCP_FIN_WAIT2) {
+ if (sk->sk_state == TCP_FIN_WAIT2) {
struct tcp_opt *tp = tcp_sk(sk);
if (tp->linger2 < 0) {
tcp_set_state(sk, TCP_CLOSE);
@@ -2073,10 +2077,10 @@
}
}
}
- if (sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
tcp_mem_reclaim(sk);
if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
- (sk->wmem_queued > SOCK_MIN_SNDBUF &&
+ (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
if (net_ratelimit())
printk(KERN_INFO "TCP: too many of orphaned "
@@ -2088,7 +2092,7 @@
}
atomic_inc(&tcp_orphan_count);
- if (sk->state == TCP_CLOSE)
+ if (sk->sk_state == TCP_CLOSE)
tcp_destroy_sock(sk);
/* Otherwise, socket is reprieved until protocol close. */
@@ -2111,10 +2115,9 @@
{
struct inet_opt *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
- int old_state;
int err = 0;
+ int old_state = sk->sk_state;
- old_state = sk->state;
if (old_state != TCP_CLOSE)
tcp_set_state(sk, TCP_CLOSE);
@@ -2128,21 +2131,21 @@
* states
*/
tcp_send_active_reset(sk, gfp_any());
- sk->err = ECONNRESET;
+ sk->sk_err = ECONNRESET;
} else if (old_state == TCP_SYN_SENT)
- sk->err = ECONNRESET;
+ sk->sk_err = ECONNRESET;
tcp_clear_xmit_timers(sk);
- __skb_queue_purge(&sk->receive_queue);
+ __skb_queue_purge(&sk->sk_receive_queue);
tcp_writequeue_purge(sk);
__skb_queue_purge(&tp->out_of_order_queue);
inet->dport = 0;
- if (!(sk->userlocks & SOCK_BINDADDR_LOCK))
+ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
- sk->shutdown = 0;
+ sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);
tp->srtt = 0;
if ((tp->write_seq += tp->max_window + 2) == 0)
@@ -2161,9 +2164,9 @@
tcp_sack_reset(tp);
__sk_dst_reset(sk);
- BUG_TRAP(!inet->num || sk->prev);
+ BUG_TRAP(!inet->num || sk->sk_prev);
- sk->error_report(sk);
+ sk->sk_error_report(sk);
return err;
}
@@ -2192,7 +2195,8 @@
* having to remove and re-insert us on the wait queue.
*/
for (;;) {
- prepare_to_wait_exclusive(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk->sk_sleep, &wait,
+ TASK_INTERRUPTIBLE);
release_sock(sk);
if (!tp->accept_queue)
timeo = schedule_timeout(timeo);
@@ -2201,7 +2205,7 @@
if (tp->accept_queue)
break;
err = -EINVAL;
- if (sk->state != TCP_LISTEN)
+ if (sk->sk_state != TCP_LISTEN)
break;
err = sock_intr_errno(timeo);
if (signal_pending(current))
@@ -2210,7 +2214,7 @@
if (!timeo)
break;
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return err;
}
@@ -2231,7 +2235,7 @@
* and that it has something pending.
*/
error = -EINVAL;
- if (sk->state != TCP_LISTEN)
+ if (sk->sk_state != TCP_LISTEN)
goto out;
/* Find already established connection */
@@ -2255,7 +2259,7 @@
newsk = req->sk;
tcp_acceptq_removed(sk);
tcp_openreq_fastfree(req);
- BUG_TRAP(newsk->state != TCP_SYN_RECV);
+ BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
release_sock(sk);
return newsk;
@@ -2300,16 +2304,20 @@
break;
case TCP_NODELAY:
- /* You cannot try to use this and TCP_CORK in
- * tandem, so let the user know.
- */
- if (tp->nonagle == 2) {
- err = -EINVAL;
- break;
- }
- tp->nonagle = !val ? 0 : 1;
- if (val)
+ if (val) {
+ /* TCP_NODELAY is weaker than TCP_CORK, so that
+ * this option on corked socket is remembered, but
+ * it is not activated until cork is cleared.
+ *
+ * However, when TCP_NODELAY is set we make
+ * an explicit push, which overrides even TCP_CORK
+ * for currently queued segments.
+ */
+ tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
tcp_push_pending_frames(sk, tp);
+ } else {
+ tp->nonagle &= ~TCP_NAGLE_OFF;
+ }
break;
case TCP_CORK:
@@ -2321,18 +2329,15 @@
* out headers with a write() call first and then use
* sendfile to send out the data parts.
*
- * You cannot try to use TCP_NODELAY and this mechanism
- * at the same time, so let the user know.
+ * TCP_CORK can be set together with TCP_NODELAY and it is
+ * stronger than TCP_NODELAY.
*/
- if (tp->nonagle == 1) {
- err = -EINVAL;
- break;
- }
- if (val != 0) {
- tp->nonagle = 2;
+ if (val) {
+ tp->nonagle |= TCP_NAGLE_CORK;
} else {
- tp->nonagle = 0;
-
+ tp->nonagle &= ~TCP_NAGLE_CORK;
+ if (tp->nonagle&TCP_NAGLE_OFF)
+ tp->nonagle |= TCP_NAGLE_PUSH;
tcp_push_pending_frames(sk, tp);
}
break;
@@ -2343,7 +2348,8 @@
else {
tp->keepalive_time = val * HZ;
if (sock_flag(sk, SOCK_KEEPOPEN) &&
- !((1 << sk->state) & (TCPF_CLOSE | TCPF_LISTEN))) {
+ !((1 << sk->sk_state) &
+ (TCPF_CLOSE | TCPF_LISTEN))) {
__u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
if (tp->keepalive_time > elapsed)
elapsed = tp->keepalive_time - elapsed;
@@ -2396,7 +2402,7 @@
case TCP_WINDOW_CLAMP:
if (!val) {
- if (sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
err = -EINVAL;
break;
}
@@ -2411,7 +2417,7 @@
tp->ack.pingpong = 1;
} else {
tp->ack.pingpong = 0;
- if ((1 << sk->state) &
+ if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
tcp_ack_scheduled(tp)) {
tp->ack.pending |= TCP_ACK_PUSHED;
@@ -2451,14 +2457,14 @@
switch (optname) {
case TCP_MAXSEG:
val = tp->mss_cache_std;
- if (!val && ((1 << sk->state) & (TCPF_CLOSE | TCPF_LISTEN)))
+ if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
val = tp->user_mss;
break;
case TCP_NODELAY:
- val = (tp->nonagle == 1);
+ val = !!(tp->nonagle&TCP_NAGLE_OFF);
break;
case TCP_CORK:
- val = (tp->nonagle == 2);
+ val = !!(tp->nonagle&TCP_NAGLE_CORK);
break;
case TCP_KEEPIDLE:
val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
@@ -2490,7 +2496,7 @@
if (get_user(len, optlen))
return -EFAULT;
- info.tcpi_state = sk->state;
+ info.tcpi_state = sk->sk_state;
info.tcpi_ca_state = tp->ca_state;
info.tcpi_retransmits = tp->retransmits;
info.tcpi_probes = tp->probes_out;
diff -urN linux-2.5.70-bk11/net/ipv4/tcp_diag.c linux-2.5.70-bk12/net/ipv4/tcp_diag.c
--- linux-2.5.70-bk11/net/ipv4/tcp_diag.c 2003-05-26 18:00:20.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/tcp_diag.c 2003-06-07 04:47:51.000000000 -0700
@@ -54,32 +54,32 @@
nlh = NLMSG_PUT(skb, pid, seq, TCPDIAG_GETSOCK, sizeof(*r));
r = NLMSG_DATA(nlh);
- if (sk->state != TCP_TIME_WAIT) {
+ if (sk->sk_state != TCP_TIME_WAIT) {
if (ext & (1<<(TCPDIAG_MEMINFO-1)))
minfo = TCPDIAG_PUT(skb, TCPDIAG_MEMINFO, sizeof(*minfo));
if (ext & (1<<(TCPDIAG_INFO-1)))
info = TCPDIAG_PUT(skb, TCPDIAG_INFO, sizeof(*info));
}
- r->tcpdiag_family = sk->family;
- r->tcpdiag_state = sk->state;
+ r->tcpdiag_family = sk->sk_family;
+ r->tcpdiag_state = sk->sk_state;
r->tcpdiag_timer = 0;
r->tcpdiag_retrans = 0;
- r->id.tcpdiag_if = sk->bound_dev_if;
+ r->id.tcpdiag_if = sk->sk_bound_dev_if;
r->id.tcpdiag_cookie[0] = (u32)(unsigned long)sk;
r->id.tcpdiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
if (r->tcpdiag_state == TCP_TIME_WAIT) {
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket*)sk;
- long tmo = tw->ttd - jiffies;
+ long tmo = tw->tw_ttd - jiffies;
if (tmo < 0)
tmo = 0;
- r->id.tcpdiag_sport = tw->sport;
- r->id.tcpdiag_dport = tw->dport;
- r->id.tcpdiag_src[0] = tw->rcv_saddr;
- r->id.tcpdiag_dst[0] = tw->daddr;
- r->tcpdiag_state = tw->substate;
+ r->id.tcpdiag_sport = tw->tw_sport;
+ r->id.tcpdiag_dport = tw->tw_dport;
+ r->id.tcpdiag_src[0] = tw->tw_rcv_saddr;
+ r->id.tcpdiag_dst[0] = tw->tw_daddr;
+ r->tcpdiag_state = tw->tw_substate;
r->tcpdiag_timer = 3;
r->tcpdiag_expires = (tmo*1000+HZ-1)/HZ;
r->tcpdiag_rqueue = 0;
@@ -89,9 +89,9 @@
#ifdef CONFIG_IPV6
if (r->tcpdiag_family == AF_INET6) {
ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src,
- &tw->v6_rcv_saddr);
+ &tw->tw_v6_rcv_saddr);
ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst,
- &tw->v6_daddr);
+ &tw->tw_v6_daddr);
}
#endif
nlh->nlmsg_len = skb->tail - b;
@@ -124,10 +124,10 @@
r->tcpdiag_timer = 4;
r->tcpdiag_retrans = tp->probes_out;
r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout);
- } else if (timer_pending(&sk->timer)) {
+ } else if (timer_pending(&sk->sk_timer)) {
r->tcpdiag_timer = 2;
r->tcpdiag_retrans = tp->probes_out;
- r->tcpdiag_expires = EXPIRES_IN_MS(sk->timer.expires);
+ r->tcpdiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
} else {
r->tcpdiag_timer = 0;
r->tcpdiag_expires = 0;
@@ -140,16 +140,16 @@
r->tcpdiag_inode = sock_i_ino(sk);
if (minfo) {
- minfo->tcpdiag_rmem = atomic_read(&sk->rmem_alloc);
- minfo->tcpdiag_wmem = sk->wmem_queued;
- minfo->tcpdiag_fmem = sk->forward_alloc;
- minfo->tcpdiag_tmem = atomic_read(&sk->wmem_alloc);
+ minfo->tcpdiag_rmem = atomic_read(&sk->sk_rmem_alloc);
+ minfo->tcpdiag_wmem = sk->sk_wmem_queued;
+ minfo->tcpdiag_fmem = sk->sk_forward_alloc;
+ minfo->tcpdiag_tmem = atomic_read(&sk->sk_wmem_alloc);
}
if (info) {
u32 now = tcp_time_stamp;
- info->tcpi_state = sk->state;
+ info->tcpi_state = sk->sk_state;
info->tcpi_ca_state = tp->ca_state;
info->tcpi_retransmits = tp->retransmits;
info->tcpi_probes = tp->probes_out;
@@ -264,7 +264,7 @@
out:
if (sk) {
- if (sk->state == TCP_TIME_WAIT)
+ if (sk->sk_state == TCP_TIME_WAIT)
tcp_tw_put((struct tcp_tw_bucket*)sk);
else
sock_put(sk);
@@ -325,7 +325,7 @@
yes = ntohs(inet->dport) <= op[1].no;
break;
case TCPDIAG_BC_AUTO:
- yes = !(sk->userlocks&SOCK_BINDPORT_LOCK);
+ yes = !(sk->sk_userlocks & SOCK_BINDPORT_LOCK);
break;
case TCPDIAG_BC_S_COND:
case TCPDIAG_BC_D_COND:
@@ -344,7 +344,7 @@
break;
#ifdef CONFIG_IPV6
- if (sk->family == AF_INET6) {
+ if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
if (op->code == TCPDIAG_BC_S_COND)
@@ -362,7 +362,8 @@
if (bitstring_match(addr, cond->addr, cond->prefix_len))
break;
- if (sk->family == AF_INET6 && cond->family == AF_INET) {
+ if (sk->sk_family == AF_INET6 &&
+ cond->family == AF_INET) {
if (addr[0] == 0 && addr[1] == 0 &&
addr[2] == htonl(0xffff) &&
bitstring_match(addr+3, cond->addr, cond->prefix_len))
@@ -466,7 +467,7 @@
for (sk = tcp_listening_hash[i], num = 0;
sk != NULL;
- sk = sk->next, num++) {
+ sk = sk->sk_next, num++) {
struct inet_opt *inet = inet_sk(sk);
if (num < s_num)
continue;
@@ -506,12 +507,12 @@
for (sk = head->chain, num = 0;
sk != NULL;
- sk = sk->next, num++) {
+ sk = sk->sk_next, num++) {
struct inet_opt *inet = inet_sk(sk);
if (num < s_num)
continue;
- if (!(r->tcpdiag_states&(1<state)))
+ if (!(r->tcpdiag_states & (1 << sk->sk_state)))
continue;
if (r->id.tcpdiag_sport != inet->sport &&
r->id.tcpdiag_sport)
@@ -531,12 +532,12 @@
if (r->tcpdiag_states&TCPF_TIME_WAIT) {
for (sk = tcp_ehash[i+tcp_ehash_size].chain;
sk != NULL;
- sk = sk->next, num++) {
+ sk = sk->sk_next, num++) {
struct inet_opt *inet = inet_sk(sk);
if (num < s_num)
continue;
- if (!(r->tcpdiag_states&(1<zapped)))
+ if (!(r->tcpdiag_states & (1 << sk->sk_zapped)))
continue;
if (r->id.tcpdiag_sport != inet->sport &&
r->id.tcpdiag_sport)
@@ -622,7 +623,7 @@
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
tcpdiag_rcv_skb(skb);
kfree_skb(skb);
}
diff -urN linux-2.5.70-bk11/net/ipv4/tcp_input.c linux-2.5.70-bk12/net/ipv4/tcp_input.c
--- linux-2.5.70-bk11/net/ipv4/tcp_input.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/tcp_input.c 2003-06-07 04:47:51.000000000 -0700
@@ -184,7 +184,7 @@
/* Buffer size and advertised window tuning.
*
- * 1. Tuning sk->sndbuf, when connection enters established state.
+ * 1. Tuning sk->sk_sndbuf, when connection enters established state.
*/
static void tcp_fixup_sndbuf(struct sock *sk)
@@ -192,8 +192,8 @@
int sndmem = tcp_sk(sk)->mss_clamp + MAX_TCP_HEADER + 16 +
sizeof(struct sk_buff);
- if (sk->sndbuf < 3*sndmem)
- sk->sndbuf = min(3*sndmem, sysctl_tcp_wmem[2]);
+ if (sk->sk_sndbuf < 3 * sndmem)
+ sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]);
}
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -276,8 +276,8 @@
*/
while (tcp_win_from_space(rcvmem) < tp->advmss)
rcvmem += 128;
- if (sk->rcvbuf < 4*rcvmem)
- sk->rcvbuf = min(4*rcvmem, sysctl_tcp_rmem[2]);
+ if (sk->sk_rcvbuf < 4 * rcvmem)
+ sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]);
}
/* 4. Try to fixup all. It is made iimediately after connection enters
@@ -288,9 +288,9 @@
struct tcp_opt *tp = tcp_sk(sk);
int maxwin;
- if (!(sk->userlocks&SOCK_RCVBUF_LOCK))
+ if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
tcp_fixup_rcvbuf(sk);
- if (!(sk->userlocks&SOCK_SNDBUF_LOCK))
+ if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
tcp_fixup_sndbuf(sk);
maxwin = tcp_full_space(sk);
@@ -331,15 +331,16 @@
* do not clamp window. Try to expand rcvbuf instead.
*/
if (ofo_win) {
- if (sk->rcvbuf < sysctl_tcp_rmem[2] &&
- !(sk->userlocks&SOCK_RCVBUF_LOCK) &&
+ if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
+ !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
!tcp_memory_pressure &&
atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
- sk->rcvbuf = min(atomic_read(&sk->rmem_alloc), sysctl_tcp_rmem[2]);
+ sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
+ sysctl_tcp_rmem[2]);
}
- if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
app_win += ofo_win;
- if (atomic_read(&sk->rmem_alloc) >= 2*sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf)
app_win >>= 1;
if (app_win > tp->ack.rcv_mss)
app_win -= tp->ack.rcv_mss;
@@ -778,9 +779,9 @@
/* So, SACKs for already sent large segments will be lost.
* Not good, but alternative is to resegment the queue. */
- if (sk->route_caps&NETIF_F_TSO) {
- sk->route_caps &= ~NETIF_F_TSO;
- sk->no_largesend = 1;
+ if (sk->sk_route_caps & NETIF_F_TSO) {
+ sk->sk_route_caps &= ~NETIF_F_TSO;
+ sk->sk_no_largesend = 1;
tp->mss_cache = tp->mss_cache_std;
}
@@ -1128,13 +1129,13 @@
* receiver _host_ is heavily congested (or buggy).
* Do processing similar to RTO timeout.
*/
- if ((skb = skb_peek(&sk->write_queue)) != NULL &&
+ if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
NET_INC_STATS_BH(TCPSACKReneging);
tcp_enter_loss(sk, 1);
tp->retransmits++;
- tcp_retransmit_skb(sk, skb_peek(&sk->write_queue));
+ tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
return 1;
}
@@ -1153,7 +1154,8 @@
static inline int tcp_head_timedout(struct sock *sk, struct tcp_opt *tp)
{
- return tp->packets_out && tcp_skb_timedout(tp, skb_peek(&sk->write_queue));
+ return tp->packets_out &&
+ tcp_skb_timedout(tp, skb_peek(&sk->sk_write_queue));
}
/* Linux NewReno/SACK/FACK/ECN state machine.
@@ -1840,7 +1842,7 @@
int acked = 0;
__s32 seq_rtt = -1;
- while((skb = skb_peek(&sk->write_queue)) && (skb != tp->send_head)) {
+ while ((skb = skb_peek(&sk->sk_write_queue)) && skb != tp->send_head) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
__u8 sacked = scb->sacked;
@@ -2080,7 +2082,7 @@
/* We passed data and got it acked, remove any soft error
* log. Something worked...
*/
- sk->err_soft = 0;
+ sk->sk_err_soft = 0;
tp->rcv_tstamp = tcp_time_stamp;
prior_packets = tp->packets_out;
if (!prior_packets)
@@ -2107,7 +2109,7 @@
}
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
- dst_confirm(sk->dst_cache);
+ dst_confirm(sk->sk_dst_cache);
return 1;
@@ -2339,21 +2341,21 @@
static void tcp_reset(struct sock *sk)
{
/* We want the right error as BSD sees it (and indeed as we do). */
- switch (sk->state) {
+ switch (sk->sk_state) {
case TCP_SYN_SENT:
- sk->err = ECONNREFUSED;
+ sk->sk_err = ECONNREFUSED;
break;
case TCP_CLOSE_WAIT:
- sk->err = EPIPE;
+ sk->sk_err = EPIPE;
break;
case TCP_CLOSE:
return;
default:
- sk->err = ECONNRESET;
+ sk->sk_err = ECONNRESET;
}
if (!sock_flag(sk, SOCK_DEAD))
- sk->error_report(sk);
+ sk->sk_error_report(sk);
tcp_done(sk);
}
@@ -2378,10 +2380,10 @@
tcp_schedule_ack(tp);
- sk->shutdown |= RCV_SHUTDOWN;
+ sk->sk_shutdown |= RCV_SHUTDOWN;
sock_reset_flag(sk, SOCK_DONE);
- switch(sk->state) {
+ switch (sk->sk_state) {
case TCP_SYN_RECV:
case TCP_ESTABLISHED:
/* Move to CLOSE_WAIT */
@@ -2416,7 +2418,8 @@
/* Only TCP_LISTEN and TCP_CLOSE are left, in these
* cases we should never reach this piece of code.
*/
- printk("tcp_fin: Impossible, sk->state=%d\n", sk->state);
+ printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
+ __FUNCTION__, sk->sk_state);
break;
};
@@ -2429,10 +2432,11 @@
tcp_mem_reclaim(sk);
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
/* Do not send POLL_HUP for half duplex close. */
- if (sk->shutdown == SHUTDOWN_MASK || sk->state == TCP_CLOSE)
+ if (sk->sk_shutdown == SHUTDOWN_MASK ||
+ sk->sk_state == TCP_CLOSE)
sk_wake_async(sk, 1, POLL_HUP);
else
sk_wake_async(sk, 1, POLL_IN);
@@ -2650,7 +2654,7 @@
TCP_SKB_CB(skb)->end_seq);
__skb_unlink(skb, skb->list);
- __skb_queue_tail(&sk->receive_queue, skb);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if(skb->h.th->fin)
tcp_fin(skb, sk, skb->h.th);
@@ -2659,7 +2663,7 @@
static inline int tcp_rmem_schedule(struct sock *sk, struct sk_buff *skb)
{
- return (int)skb->truesize <= sk->forward_alloc ||
+ return (int)skb->truesize <= sk->sk_forward_alloc ||
tcp_mem_schedule(sk, skb->truesize, 1);
}
@@ -2714,13 +2718,13 @@
if (eaten <= 0) {
queue_and_out:
if (eaten < 0 &&
- (atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
+ (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!tcp_rmem_schedule(sk, skb))) {
if (tcp_prune_queue(sk) < 0 || !tcp_rmem_schedule(sk, skb))
goto drop;
}
tcp_set_owner_r(skb, sk);
- __skb_queue_tail(&sk->receive_queue, skb);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
}
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if(skb->len)
@@ -2746,7 +2750,7 @@
if (eaten > 0)
__kfree_skb(skb);
else if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk, 0);
+ sk->sk_data_ready(sk, 0);
return;
}
@@ -2787,7 +2791,7 @@
TCP_ECN_check_ce(tp, skb);
- if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!tcp_rmem_schedule(sk, skb)) {
if (tcp_prune_queue(sk) < 0 || !tcp_rmem_schedule(sk, skb))
goto drop;
@@ -3024,18 +3028,18 @@
NET_INC_STATS_BH(PruneCalled);
- if (atomic_read(&sk->rmem_alloc) >= sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
tcp_clamp_window(sk, tp);
else if (tcp_memory_pressure)
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
tcp_collapse_ofo_queue(sk);
- tcp_collapse(sk, sk->receive_queue.next,
- (struct sk_buff*)&sk->receive_queue,
+ tcp_collapse(sk, sk->sk_receive_queue.next,
+ (struct sk_buff*)&sk->sk_receive_queue,
tp->copied_seq, tp->rcv_nxt);
tcp_mem_reclaim(sk);
- if (atomic_read(&sk->rmem_alloc) <= sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
return 0;
/* Collapsing did not help, destructive actions follow.
@@ -3057,7 +3061,7 @@
tcp_mem_reclaim(sk);
}
- if(atomic_read(&sk->rmem_alloc) <= sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
return 0;
/* If we are really being abused, tell the caller to silently
@@ -3081,7 +3085,7 @@
struct tcp_opt *tp = tcp_sk(sk);
if (tp->ca_state == TCP_CA_Open &&
- sk->socket && !test_bit(SOCK_NOSPACE, &sk->socket->flags)) {
+ sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
/* Limited by application or receiver window. */
u32 win_used = max(tp->snd_cwnd_used, 2U);
if (win_used < tp->snd_cwnd) {
@@ -3105,7 +3109,7 @@
struct tcp_opt *tp = tcp_sk(sk);
if (tp->packets_out < tp->snd_cwnd &&
- !(sk->userlocks&SOCK_SNDBUF_LOCK) &&
+ !(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
!tcp_memory_pressure &&
atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
int sndmem = max_t(u32, tp->mss_clamp, tp->mss_cache) +
@@ -3113,12 +3117,12 @@
demanded = max_t(unsigned int, tp->snd_cwnd,
tp->reordering + 1);
sndmem *= 2*demanded;
- if (sndmem > sk->sndbuf)
- sk->sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
+ if (sndmem > sk->sk_sndbuf)
+ sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
- sk->write_space(sk);
+ sk->sk_write_space(sk);
}
static inline void tcp_check_space(struct sock *sk)
@@ -3127,7 +3131,8 @@
if (tp->queue_shrunk) {
tp->queue_shrunk = 0;
- if (sk->socket && test_bit(SOCK_NOSPACE, &sk->socket->flags))
+ if (sk->sk_socket &&
+ test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
tcp_new_space(sk);
}
}
@@ -3249,7 +3254,7 @@
if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
!sock_flag(sk, SOCK_URGINLINE) &&
tp->copied_seq != tp->rcv_nxt) {
- struct sk_buff *skb = skb_peek(&sk->receive_queue);
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
tp->copied_seq++;
if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
__skb_unlink(skb, skb->list);
@@ -3285,7 +3290,7 @@
BUG();
tp->urg_data = TCP_URG_VALID | tmp;
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk,0);
+ sk->sk_data_ready(sk, 0);
}
}
}
@@ -3483,14 +3488,14 @@
tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp);
- if ((int)skb->truesize > sk->forward_alloc)
+ if ((int)skb->truesize > sk->sk_forward_alloc)
goto step5;
NET_INC_STATS_BH(TCPHPHits);
/* Bulk data transfer: receiver */
__skb_pull(skb,tcp_header_len);
- __skb_queue_tail(&sk->receive_queue, skb);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
tcp_set_owner_r(skb, sk);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
}
@@ -3519,7 +3524,7 @@
if (eaten)
__kfree_skb(skb);
else
- sk->data_ready(sk, 0);
+ sk->sk_data_ready(sk, 0);
return 0;
}
}
@@ -3659,7 +3664,7 @@
TCP_ECN_rcv_synack(tp, th);
if (tp->ecn_flags&TCP_ECN_OK)
- sk->no_largesend = 1;
+ sk->sk_no_largesend = 1;
tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
tcp_ack(sk, skb, FLAG_SLOWPATH);
@@ -3715,7 +3720,7 @@
tcp_set_state(sk, TCP_ESTABLISHED);
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sk_wake_async(sk, 0, POLL_OUT);
}
@@ -3787,7 +3792,7 @@
TCP_ECN_rcv_syn(tp, th);
if (tp->ecn_flags&TCP_ECN_OK)
- sk->no_largesend = 1;
+ sk->sk_no_largesend = 1;
tcp_sync_mss(sk, tp->pmtu_cookie);
tcp_initialize_rcv_mss(sk);
@@ -3840,7 +3845,7 @@
tp->saw_tstamp = 0;
- switch (sk->state) {
+ switch (sk->sk_state) {
case TCP_CLOSE:
goto discard;
@@ -3928,20 +3933,20 @@
if (th->ack) {
int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
- switch(sk->state) {
+ switch(sk->sk_state) {
case TCP_SYN_RECV:
if (acceptable) {
tp->copied_seq = tp->rcv_nxt;
mb();
tcp_set_state(sk, TCP_ESTABLISHED);
- sk->state_change(sk);
+ sk->sk_state_change(sk);
/* Note, that this wakeup is only for marginal
* crossed SYN case. Passively open sockets
- * are not waked up, because sk->sleep == NULL
- * and sk->socket == NULL.
+ * are not waked up, because sk->sk_sleep ==
+ * NULL and sk->sk_socket == NULL.
*/
- if (sk->socket) {
+ if (sk->sk_socket) {
sk_wake_async(sk,0,POLL_OUT);
}
@@ -3974,12 +3979,12 @@
case TCP_FIN_WAIT1:
if (tp->snd_una == tp->write_seq) {
tcp_set_state(sk, TCP_FIN_WAIT2);
- sk->shutdown |= SEND_SHUTDOWN;
- dst_confirm(sk->dst_cache);
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ dst_confirm(sk->sk_dst_cache);
if (!sock_flag(sk, SOCK_DEAD))
/* Wake up lingering close() */
- sk->state_change(sk);
+ sk->sk_state_change(sk);
else {
int tmo;
@@ -4032,7 +4037,7 @@
tcp_urg(sk, skb, th);
/* step 7: process the segment text */
- switch (sk->state) {
+ switch (sk->sk_state) {
case TCP_CLOSE_WAIT:
case TCP_CLOSING:
case TCP_LAST_ACK:
@@ -4044,7 +4049,7 @@
* RFC 1122 says we MUST send a reset.
* BSD 4.4 also does reset.
*/
- if (sk->shutdown & RCV_SHUTDOWN) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
NET_INC_STATS_BH(TCPAbortOnData);
@@ -4060,7 +4065,7 @@
}
/* tcp_data could move socket to TIME-WAIT */
- if (sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
tcp_data_snd_check(sk);
tcp_ack_snd_check(sk);
}
diff -urN linux-2.5.70-bk11/net/ipv4/tcp_ipv4.c linux-2.5.70-bk12/net/ipv4/tcp_ipv4.c
--- linux-2.5.70-bk11/net/ipv4/tcp_ipv4.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/tcp_ipv4.c 2003-06-07 04:47:51.000000000 -0700
@@ -161,12 +161,12 @@
struct tcp_bind_bucket *tb;
spin_lock(&head->lock);
- tb = (struct tcp_bind_bucket *)sk->prev;
- if ((child->bind_next = tb->owners) != NULL)
- tb->owners->bind_pprev = &child->bind_next;
+ tb = (struct tcp_bind_bucket *)sk->sk_prev;
+ if ((child->sk_bind_next = tb->owners) != NULL)
+ tb->owners->sk_bind_pprev = &child->sk_bind_next;
tb->owners = child;
- child->bind_pprev = &tb->owners;
- child->prev = (struct sock *)tb;
+ child->sk_bind_pprev = &tb->owners;
+ child->sk_prev = (struct sock *)tb;
spin_unlock(&head->lock);
}
@@ -181,25 +181,25 @@
unsigned short snum)
{
inet_sk(sk)->num = snum;
- if ((sk->bind_next = tb->owners) != NULL)
- tb->owners->bind_pprev = &sk->bind_next;
+ if ((sk->sk_bind_next = tb->owners) != NULL)
+ tb->owners->sk_bind_pprev = &sk->sk_bind_next;
tb->owners = sk;
- sk->bind_pprev = &tb->owners;
- sk->prev = (struct sock *)tb;
+ sk->sk_bind_pprev = &tb->owners;
+ sk->sk_prev = (struct sock *)tb;
}
static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
{
struct inet_opt *inet = inet_sk(sk);
struct sock *sk2 = tb->owners;
- int sk_reuse = sk->reuse;
+ int reuse = sk->sk_reuse;
- for ( ; sk2; sk2 = sk2->bind_next) {
+ for (; sk2; sk2 = sk2->sk_bind_next) {
if (sk != sk2 &&
!ipv6_only_sock(sk2) &&
- sk->bound_dev_if == sk2->bound_dev_if) {
- if (!sk_reuse || !sk2->reuse ||
- sk2->state == TCP_LISTEN) {
+ sk->sk_bound_dev_if == sk2->sk_bound_dev_if) {
+ if (!reuse || !sk2->sk_reuse ||
+ sk2->sk_state == TCP_LISTEN) {
struct inet_opt *inet2 = inet_sk(sk2);
if (!inet2->rcv_saddr || !inet->rcv_saddr ||
inet2->rcv_saddr == inet->rcv_saddr)
@@ -262,9 +262,10 @@
break;
}
if (tb && tb->owners) {
- if (sk->reuse > 1)
+ if (sk->sk_reuse > 1)
goto success;
- if (tb->fastreuse > 0 && sk->reuse && sk->state != TCP_LISTEN) {
+ if (tb->fastreuse > 0 &&
+ sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
goto success;
} else {
ret = 1;
@@ -276,16 +277,17 @@
if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
goto fail_unlock;
if (!tb->owners) {
- if (sk->reuse && sk->state != TCP_LISTEN)
+ if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
tb->fastreuse = 1;
else
tb->fastreuse = 0;
- } else if (tb->fastreuse && (!sk->reuse || sk->state == TCP_LISTEN))
+ } else if (tb->fastreuse &&
+ (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
tb->fastreuse = 0;
success:
- if (!sk->prev)
+ if (!sk->sk_prev)
tcp_bind_hash(sk, tb, snum);
- BUG_TRAP(sk->prev == (struct sock *)tb);
+ BUG_TRAP(sk->sk_prev == (struct sock *)tb);
ret = 0;
fail_unlock:
@@ -305,11 +307,11 @@
struct tcp_bind_bucket *tb;
spin_lock(&head->lock);
- tb = (struct tcp_bind_bucket *) sk->prev;
- if (sk->bind_next)
- sk->bind_next->bind_pprev = sk->bind_pprev;
- *(sk->bind_pprev) = sk->bind_next;
- sk->prev = NULL;
+ tb = (struct tcp_bind_bucket *)sk->sk_prev;
+ if (sk->sk_bind_next)
+ sk->sk_bind_next->sk_bind_pprev = sk->sk_bind_pprev;
+ *(sk->sk_bind_pprev) = sk->sk_bind_next;
+ sk->sk_prev = NULL;
inet->num = 0;
tcp_bucket_destroy(tb);
spin_unlock(&head->lock);
@@ -355,29 +357,29 @@
struct sock **skp;
rwlock_t *lock;
- BUG_TRAP(!sk->pprev);
- if (listen_possible && sk->state == TCP_LISTEN) {
+ BUG_TRAP(!sk->sk_pprev);
+ if (listen_possible && sk->sk_state == TCP_LISTEN) {
skp = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
lock = &tcp_lhash_lock;
tcp_listen_wlock();
} else {
- skp = &tcp_ehash[(sk->hashent = tcp_sk_hashfn(sk))].chain;
- lock = &tcp_ehash[sk->hashent].lock;
+ skp = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
+ lock = &tcp_ehash[sk->sk_hashent].lock;
write_lock(lock);
}
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sock_prot_inc_use(sk->sk_prot);
write_unlock(lock);
- if (listen_possible && sk->state == TCP_LISTEN)
+ if (listen_possible && sk->sk_state == TCP_LISTEN)
wake_up(&tcp_lhash_wait);
}
static void tcp_v4_hash(struct sock *sk)
{
- if (sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
__tcp_v4_hash(sk, 1);
local_bh_enable();
@@ -388,30 +390,30 @@
{
rwlock_t *lock;
- if (!sk->pprev)
+ if (!sk->sk_pprev)
goto ende;
- if (sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
local_bh_disable();
tcp_listen_wlock();
lock = &tcp_lhash_lock;
} else {
- struct tcp_ehash_bucket *head = &tcp_ehash[sk->hashent];
+ struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
lock = &head->lock;
write_lock_bh(&head->lock);
}
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
- sock_prot_dec_use(sk->prot);
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
+ sock_prot_dec_use(sk->sk_prot);
}
write_unlock_bh(lock);
ende:
- if (sk->state == TCP_LISTEN)
+ if (sk->sk_state == TCP_LISTEN)
wake_up(&tcp_lhash_wait);
}
@@ -428,20 +430,20 @@
int score, hiscore;
hiscore=-1;
- for (; sk; sk = sk->next) {
+ for (; sk; sk = sk->sk_next) {
struct inet_opt *inet = inet_sk(sk);
if (inet->num == hnum && !ipv6_only_sock(sk)) {
__u32 rcv_saddr = inet->rcv_saddr;
- score = (sk->family == PF_INET ? 1 : 0);
+ score = (sk->sk_family == PF_INET ? 1 : 0);
if (rcv_saddr) {
if (rcv_saddr != daddr)
continue;
score+=2;
}
- if (sk->bound_dev_if) {
- if (sk->bound_dev_if != dif)
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
continue;
score+=2;
}
@@ -467,10 +469,10 @@
if (sk) {
struct inet_opt *inet = inet_sk(sk);
- if (inet->num == hnum && !sk->next &&
+ if (inet->num == hnum && !sk->sk_next &&
(!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
- (sk->family == PF_INET || !ipv6_only_sock(sk)) &&
- !sk->bound_dev_if)
+ (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
+ !sk->sk_bound_dev_if)
goto sherry_cache;
sk = __tcp_v4_lookup_listener(sk, daddr, hnum, dif);
}
@@ -502,13 +504,13 @@
int hash = tcp_hashfn(daddr, hnum, saddr, sport);
head = &tcp_ehash[hash];
read_lock(&head->lock);
- for (sk = head->chain; sk; sk = sk->next) {
+ for (sk = head->chain; sk; sk = sk->sk_next) {
if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit; /* You sunk my battleship! */
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
- for (sk = (head + tcp_ehash_size)->chain; sk; sk = sk->next)
+ for (sk = (head + tcp_ehash_size)->chain; sk; sk = sk->sk_next)
if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit;
out:
@@ -555,7 +557,7 @@
struct inet_opt *inet = inet_sk(sk);
u32 daddr = inet->rcv_saddr;
u32 saddr = inet->daddr;
- int dif = sk->bound_dev_if;
+ int dif = sk->sk_bound_dev_if;
TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
__u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
@@ -567,7 +569,7 @@
/* Check TIME-WAIT sockets first. */
for (skp = &(head + tcp_ehash_size)->chain; (sk2 = *skp) != NULL;
- skp = &sk2->next) {
+ skp = &sk2->sk_next) {
tw = (struct tcp_tw_bucket *)sk2;
if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
@@ -587,15 +589,15 @@
fall back to VJ's scheme and use initial
timestamp retrieved from peer table.
*/
- if (tw->ts_recent_stamp &&
+ if (tw->tw_ts_recent_stamp &&
(!twp || (sysctl_tcp_tw_reuse &&
xtime.tv_sec -
- tw->ts_recent_stamp > 1))) {
+ tw->tw_ts_recent_stamp > 1))) {
if ((tp->write_seq =
- tw->snd_nxt + 65535 + 2) == 0)
+ tw->tw_snd_nxt + 65535 + 2) == 0)
tp->write_seq = 1;
- tp->ts_recent = tw->ts_recent;
- tp->ts_recent_stamp = tw->ts_recent_stamp;
+ tp->ts_recent = tw->tw_ts_recent;
+ tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
sock_hold(sk2);
skp = &head->chain;
goto unique;
@@ -606,7 +608,7 @@
tw = NULL;
/* And established part... */
- for (skp = &head->chain; (sk2 = *skp) != NULL; skp = &sk2->next) {
+ for (skp = &head->chain; (sk2 = *skp) != NULL; skp = &sk2->sk_next) {
if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
goto not_unique;
}
@@ -616,14 +618,14 @@
* in hash table socket with a funny identity. */
inet->num = lport;
inet->sport = htons(lport);
- BUG_TRAP(!sk->pprev);
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ BUG_TRAP(!sk->sk_pprev);
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sk->hashent = hash;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sk->sk_hashent = hash;
+ sock_prot_inc_use(sk->sk_prot);
write_unlock(&head->lock);
if (twp) {
@@ -727,7 +729,7 @@
spin_unlock(&tcp_portalloc_lock);
tcp_bind_hash(sk, tb, rover);
- if (!sk->pprev) {
+ if (!sk->sk_pprev) {
inet_sk(sk)->sport = htons(rover);
__tcp_v4_hash(sk, 0);
}
@@ -743,9 +745,9 @@
}
head = &tcp_bhash[tcp_bhashfn(snum)];
- tb = (struct tcp_bind_bucket *)sk->prev;
+ tb = (struct tcp_bind_bucket *)sk->sk_prev;
spin_lock_bh(&head->lock);
- if (tb->owners == sk && !sk->bind_next) {
+ if (tb->owners == sk && !sk->sk_bind_next) {
__tcp_v4_hash(sk, 0);
spin_unlock_bh(&head->lock);
return 0;
@@ -784,7 +786,7 @@
}
tmp = ip_route_connect(&rt, nexthop, inet->saddr,
- RT_CONN_FLAGS(sk), sk->bound_dev_if,
+ RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_TCP,
inet->sport, usin->sin_port, sk);
if (tmp < 0)
@@ -871,7 +873,7 @@
/* This unhashes the socket and releases the local port, if necessary. */
tcp_set_state(sk, TCP_CLOSE);
ip_rt_put(rt);
- sk->route_caps = 0;
+ sk->sk_route_caps = 0;
inet->dport = 0;
return err;
}
@@ -943,7 +945,7 @@
* send out by Linux are always <576bytes so they should go through
* unfragmented).
*/
- if (sk->state == TCP_LISTEN)
+ if (sk->sk_state == TCP_LISTEN)
return;
/* We don't check in the destentry if pmtu discovery is forbidden
@@ -961,7 +963,7 @@
* for the case, if this connection will not able to recover.
*/
if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
- sk->err_soft = EMSGSIZE;
+ sk->sk_err_soft = EMSGSIZE;
mtu = dst_pmtu(dst);
@@ -1017,7 +1019,7 @@
ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
- if (sk->state == TCP_TIME_WAIT) {
+ if (sk->sk_state == TCP_TIME_WAIT) {
tcp_tw_put((struct tcp_tw_bucket *)sk);
return;
}
@@ -1029,12 +1031,12 @@
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(LockDroppedIcmps);
- if (sk->state == TCP_CLOSE)
+ if (sk->sk_state == TCP_CLOSE)
goto out;
tp = tcp_sk(sk);
seq = ntohl(th->seq);
- if (sk->state != TCP_LISTEN &&
+ if (sk->sk_state != TCP_LISTEN &&
!between(seq, tp->snd_una, tp->snd_nxt)) {
NET_INC_STATS(OutOfWindowIcmps);
goto out;
@@ -1070,7 +1072,7 @@
goto out;
}
- switch (sk->state) {
+ switch (sk->sk_state) {
struct open_request *req, **prev;
case TCP_LISTEN:
if (sock_owned_by_user(sk))
@@ -1106,13 +1108,13 @@
*/
if (!sock_owned_by_user(sk)) {
TCP_INC_STATS_BH(TcpAttemptFails);
- sk->err = err;
+ sk->sk_err = err;
- sk->error_report(sk);
+ sk->sk_error_report(sk);
tcp_done(sk);
} else {
- sk->err_soft = err;
+ sk->sk_err_soft = err;
}
goto out;
}
@@ -1135,10 +1137,10 @@
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
} else { /* Only an error on timeout */
- sk->err_soft = err;
+ sk->sk_err_soft = err;
}
out:
@@ -1269,8 +1271,8 @@
{
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
- tcp_v4_send_ack(skb, tw->snd_nxt, tw->rcv_nxt,
- tw->rcv_wnd >> tw->rcv_wscale, tw->ts_recent);
+ tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
+ tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
tcp_tw_put(tw);
}
@@ -1286,7 +1288,7 @@
{
struct rtable *rt;
struct ip_options *opt = req->af.v4_req.opt;
- struct flowi fl = { .oif = sk->bound_dev_if,
+ struct flowi fl = { .oif = sk->sk_bound_dev_if,
.nl_u = { .ip4_u =
{ .daddr = ((opt && opt->srr) ?
opt->faddr :
@@ -1586,7 +1588,7 @@
if (!newsk)
goto exit;
- newsk->dst_cache = dst;
+ newsk->sk_dst_cache = dst;
tcp_v4_setup_caps(newsk, dst);
newtp = tcp_sk(newsk);
@@ -1641,7 +1643,7 @@
tcp_v4_iif(skb));
if (nsk) {
- if (nsk->state != TCP_TIME_WAIT) {
+ if (nsk->sk_state != TCP_TIME_WAIT) {
bh_lock_sock(nsk);
return nsk;
}
@@ -1693,7 +1695,7 @@
*/
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
- if (sk->state == TCP_ESTABLISHED) { /* Fast path */
+ if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
TCP_CHECK_TIMER(sk);
if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
goto reset;
@@ -1704,7 +1706,7 @@
if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
goto csum_err;
- if (sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
struct sock *nsk = tcp_v4_hnd_req(sk, skb);
if (!nsk)
goto discard;
@@ -1789,7 +1791,7 @@
goto no_tcp_socket;
process:
- if (sk->state == TCP_TIME_WAIT)
+ if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
@@ -1870,8 +1872,8 @@
*/
static void __tcp_v4_rehash(struct sock *sk)
{
- sk->prot->unhash(sk);
- sk->prot->hash(sk);
+ sk->sk_prot->unhash(sk);
+ sk->sk_prot->hash(sk);
}
static int tcp_v4_reselect_saddr(struct sock *sk)
@@ -1888,8 +1890,8 @@
/* Query new route. */
err = ip_route_connect(&rt, daddr, 0,
- RT_TOS(inet->tos) | sk->localroute,
- sk->bound_dev_if,
+ RT_TOS(inet->tos) | sk->sk_localroute,
+ sk->sk_bound_dev_if,
IPPROTO_TCP,
inet->sport, inet->dport, sk);
if (err)
@@ -1941,7 +1943,7 @@
daddr = inet->opt->faddr;
{
- struct flowi fl = { .oif = sk->bound_dev_if,
+ struct flowi fl = { .oif = sk->sk_bound_dev_if,
.nl_u = { .ip4_u =
{ .daddr = daddr,
.saddr = inet->saddr,
@@ -1960,13 +1962,13 @@
}
/* Routing failed... */
- sk->route_caps = 0;
+ sk->sk_route_caps = 0;
if (!sysctl_ip_dynaddr ||
- sk->state != TCP_SYN_SENT ||
- (sk->userlocks & SOCK_BINDADDR_LOCK) ||
+ sk->sk_state != TCP_SYN_SENT ||
+ (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
(err = tcp_v4_reselect_saddr(sk)) != 0)
- sk->err_soft=-err;
+ sk->sk_err_soft = -err;
return err;
}
@@ -2023,14 +2025,14 @@
{
struct inet_peer *peer = NULL;
- peer = inet_getpeer(tw->daddr, 1);
+ peer = inet_getpeer(tw->tw_daddr, 1);
if (peer) {
- if ((s32)(peer->tcp_ts - tw->ts_recent) <= 0 ||
+ if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
(peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
- peer->tcp_ts_stamp <= tw->ts_recent_stamp)) {
- peer->tcp_ts_stamp = tw->ts_recent_stamp;
- peer->tcp_ts = tw->ts_recent;
+ peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
+ peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
+ peer->tcp_ts = tw->tw_ts_recent;
}
inet_putpeer(peer);
return 1;
@@ -2083,15 +2085,15 @@
tp->reordering = sysctl_tcp_reordering;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
- sk->write_space = tcp_write_space;
- sk->use_write_queue = 1;
+ sk->sk_write_space = tcp_write_space;
+ sk->sk_use_write_queue = 1;
tp->af_specific = &ipv4_specific;
- sk->sndbuf = sysctl_tcp_wmem[1];
- sk->rcvbuf = sysctl_tcp_rmem[1];
+ sk->sk_sndbuf = sysctl_tcp_wmem[1];
+ sk->sk_rcvbuf = sysctl_tcp_rmem[1];
atomic_inc(&tcp_sockets_allocated);
@@ -2114,7 +2116,7 @@
__skb_queue_purge(&tp->ucopy.prequeue);
/* Clean up a referenced TCP bind bucket. */
- if (sk->prev)
+ if (sk->sk_prev)
tcp_put_port(sk);
/* If sendmsg cached page exists, toss it. */
@@ -2142,7 +2144,7 @@
if (!sk)
continue;
++st->num;
- if (sk->family == st->family) {
+ if (sk->sk_family == st->family) {
rc = sk;
goto out;
}
@@ -2195,14 +2197,14 @@
get_req:
req = tp->listen_opt->syn_table[st->sbucket];
}
- sk = st->syn_wait_sk->next;
+ sk = st->syn_wait_sk->sk_next;
st->state = TCP_SEQ_STATE_LISTENING;
read_unlock_bh(&tp->syn_wait_lock);
} else
- sk = sk->next;
+ sk = sk->sk_next;
get_sk:
while (sk) {
- if (sk->family == st->family) {
+ if (sk->sk_family == st->family) {
cur = sk;
goto out;
}
@@ -2216,7 +2218,7 @@
goto get_req;
}
read_unlock_bh(&tp->syn_wait_lock);
- sk = sk->next;
+ sk = sk->sk_next;
}
if (++st->bucket < TCP_LHTABLE_SIZE) {
sk = tcp_listening_hash[st->bucket];
@@ -2248,8 +2250,8 @@
read_lock(&tcp_ehash[st->bucket].lock);
for (sk = tcp_ehash[st->bucket].chain; sk;
- sk = sk->next, ++st->num) {
- if (sk->family != st->family)
+ sk = sk->sk_next, ++st->num) {
+ if (sk->sk_family != st->family)
continue;
rc = sk;
goto out;
@@ -2257,8 +2259,8 @@
st->state = TCP_SEQ_STATE_TIME_WAIT;
for (tw = (struct tcp_tw_bucket *)
tcp_ehash[st->bucket + tcp_ehash_size].chain;
- tw; tw = (struct tcp_tw_bucket *)tw->next, ++st->num) {
- if (tw->family != st->family)
+ tw; tw = (struct tcp_tw_bucket *)tw->tw_next, ++st->num) {
+ if (tw->tw_family != st->family)
continue;
rc = tw;
goto out;
@@ -2278,11 +2280,11 @@
if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
tw = cur;
- tw = (struct tcp_tw_bucket *)tw->next;
+ tw = (struct tcp_tw_bucket *)tw->tw_next;
get_tw:
- while (tw && tw->family != st->family) {
+ while (tw && tw->tw_family != st->family) {
++st->num;
- tw = (struct tcp_tw_bucket *)tw->next;
+ tw = (struct tcp_tw_bucket *)tw->tw_next;
}
if (tw) {
cur = tw;
@@ -2298,11 +2300,11 @@
goto out;
}
} else
- sk = sk->next;
+ sk = sk->sk_next;
- while (sk && sk->family != st->family) {
+ while (sk && sk->sk_family != st->family) {
++st->num;
- sk = sk->next;
+ sk = sk->sk_next;
}
if (!sk) {
st->state = TCP_SEQ_STATE_TIME_WAIT;
@@ -2482,7 +2484,7 @@
uid,
0, /* non standard timer */
0, /* open_requests have no inode */
- atomic_read(&sk->refcnt),
+ atomic_read(&sk->sk_refcnt),
req);
}
@@ -2503,9 +2505,9 @@
} else if (tp->pending == TCP_TIME_PROBE0) {
timer_active = 4;
timer_expires = tp->timeout;
- } else if (timer_pending(&sp->timer)) {
+ } else if (timer_pending(&sp->sk_timer)) {
timer_active = 2;
- timer_expires = sp->timer.expires;
+ timer_expires = sp->sk_timer.expires;
} else {
timer_active = 0;
timer_expires = jiffies;
@@ -2513,14 +2515,14 @@
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
"%08X %5d %8d %lu %d %p %u %u %u %u %d",
- i, src, srcp, dest, destp, sp->state,
+ i, src, srcp, dest, destp, sp->sk_state,
tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
timer_active, timer_expires - jiffies,
tp->retransmits,
sock_i_uid(sp),
tp->probes_out,
sock_i_ino(sp),
- atomic_read(&sp->refcnt), sp,
+ atomic_read(&sp->sk_refcnt), sp,
tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
tp->snd_cwnd,
tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
@@ -2530,21 +2532,21 @@
{
unsigned int dest, src;
__u16 destp, srcp;
- int ttd = tw->ttd - jiffies;
+ int ttd = tw->tw_ttd - jiffies;
if (ttd < 0)
ttd = 0;
- dest = tw->daddr;
- src = tw->rcv_saddr;
- destp = ntohs(tw->dport);
- srcp = ntohs(tw->sport);
+ dest = tw->tw_daddr;
+ src = tw->tw_rcv_saddr;
+ destp = ntohs(tw->tw_dport);
+ srcp = ntohs(tw->tw_sport);
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08X %08X %5d %8d %d %d %p",
- i, src, srcp, dest, destp, tw->substate, 0, 0,
+ i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
3, ttd, 0, 0, 0, 0,
- atomic_read(&tw->refcnt), tw);
+ atomic_read(&tw->tw_refcnt), tw);
}
#define TMPSZ 150
@@ -2627,12 +2629,12 @@
int err = sock_create(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
if (err < 0)
panic("Failed to create the TCP control socket.\n");
- tcp_socket->sk->allocation = GFP_ATOMIC;
+ tcp_socket->sk->sk_allocation = GFP_ATOMIC;
inet_sk(tcp_socket->sk)->uc_ttl = -1;
/* Unhash it so that IP input processing does not even
* see it, we do not wish this socket to see incoming
* packets.
*/
- tcp_socket->sk->prot->unhash(tcp_socket->sk);
+ tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
}
diff -urN linux-2.5.70-bk11/net/ipv4/tcp_minisocks.c linux-2.5.70-bk12/net/ipv4/tcp_minisocks.c
--- linux-2.5.70-bk11/net/ipv4/tcp_minisocks.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/tcp_minisocks.c 2003-06-07 04:47:51.000000000 -0700
@@ -61,32 +61,33 @@
struct tcp_bind_bucket *tb;
/* Unlink from established hashes. */
- ehead = &tcp_ehash[tw->hashent];
+ ehead = &tcp_ehash[tw->tw_hashent];
write_lock(&ehead->lock);
- if (!tw->pprev) {
+ if (!tw->tw_pprev) {
write_unlock(&ehead->lock);
return;
}
- if(tw->next)
- tw->next->pprev = tw->pprev;
- *(tw->pprev) = tw->next;
- tw->pprev = NULL;
+ if (tw->tw_next)
+ tw->tw_next->sk_pprev = tw->tw_pprev;
+ *(tw->tw_pprev) = tw->tw_next;
+ tw->tw_pprev = NULL;
write_unlock(&ehead->lock);
/* Disassociate with bind bucket. */
- bhead = &tcp_bhash[tcp_bhashfn(tw->num)];
+ bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)];
spin_lock(&bhead->lock);
- tb = tw->tb;
- if(tw->bind_next)
- tw->bind_next->bind_pprev = tw->bind_pprev;
- *(tw->bind_pprev) = tw->bind_next;
- tw->tb = NULL;
+ tb = tw->tw_tb;
+ if (tw->tw_bind_next)
+ tw->tw_bind_next->sk_bind_pprev = tw->tw_bind_pprev;
+ *(tw->tw_bind_pprev) = tw->tw_bind_next;
+ tw->tw_tb = NULL;
tcp_bucket_destroy(tb);
spin_unlock(&bhead->lock);
#ifdef INET_REFCNT_DEBUG
- if (atomic_read(&tw->refcnt) != 1) {
- printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw, atomic_read(&tw->refcnt));
+ if (atomic_read(&tw->tw_refcnt) != 1) {
+ printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
+ atomic_read(&tw->tw_refcnt));
}
#endif
tcp_tw_put(tw);
@@ -128,33 +129,34 @@
int paws_reject = 0;
tp.saw_tstamp = 0;
- if (th->doff > (sizeof(struct tcphdr)>>2) && tw->ts_recent_stamp) {
+ if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) {
tcp_parse_options(skb, &tp, 0);
if (tp.saw_tstamp) {
- tp.ts_recent = tw->ts_recent;
- tp.ts_recent_stamp = tw->ts_recent_stamp;
+ tp.ts_recent = tw->tw_ts_recent;
+ tp.ts_recent_stamp = tw->tw_ts_recent_stamp;
paws_reject = tcp_paws_check(&tp, th->rst);
}
}
- if (tw->substate == TCP_FIN_WAIT2) {
+ if (tw->tw_substate == TCP_FIN_WAIT2) {
/* Just repeat all the checks of tcp_rcv_state_process() */
/* Out of window, send ACK */
if (paws_reject ||
!tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
- tw->rcv_nxt, tw->rcv_nxt + tw->rcv_wnd))
+ tw->tw_rcv_nxt,
+ tw->tw_rcv_nxt + tw->tw_rcv_wnd))
return TCP_TW_ACK;
if (th->rst)
goto kill;
- if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->rcv_nxt))
+ if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt))
goto kill_with_rst;
/* Dup ACK? */
- if (!after(TCP_SKB_CB(skb)->end_seq, tw->rcv_nxt) ||
+ if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) ||
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
tcp_tw_put(tw);
return TCP_TW_SUCCESS;
@@ -163,7 +165,8 @@
/* New data or FIN. If new data arrive after half-duplex close,
* reset.
*/
- if (!th->fin || TCP_SKB_CB(skb)->end_seq != tw->rcv_nxt+1) {
+ if (!th->fin ||
+ TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) {
kill_with_rst:
tcp_tw_deschedule(tw);
tcp_tw_put(tw);
@@ -171,11 +174,11 @@
}
/* FIN arrived, enter true time-wait state. */
- tw->substate = TCP_TIME_WAIT;
- tw->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ tw->tw_substate = TCP_TIME_WAIT;
+ tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (tp.saw_tstamp) {
- tw->ts_recent_stamp = xtime.tv_sec;
- tw->ts_recent = tp.rcv_tsval;
+ tw->tw_ts_recent_stamp = xtime.tv_sec;
+ tw->tw_ts_recent = tp.rcv_tsval;
}
/* I am shamed, but failed to make it more elegant.
@@ -183,10 +186,10 @@
* to generalize to IPv6. Taking into account that IPv6
* do not undertsnad recycling in any case, it not
* a big problem in practice. --ANK */
- if (tw->family == AF_INET &&
- sysctl_tcp_tw_recycle && tw->ts_recent_stamp &&
+ if (tw->tw_family == AF_INET &&
+ sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp &&
tcp_v4_tw_remember_stamp(tw))
- tcp_tw_schedule(tw, tw->timeout);
+ tcp_tw_schedule(tw, tw->tw_timeout);
else
tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
return TCP_TW_ACK;
@@ -210,7 +213,7 @@
*/
if (!paws_reject &&
- (TCP_SKB_CB(skb)->seq == tw->rcv_nxt &&
+ (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt &&
(TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
/* In window segment, it may be only reset or bare ack. */
@@ -229,8 +232,8 @@
tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
if (tp.saw_tstamp) {
- tw->ts_recent = tp.rcv_tsval;
- tw->ts_recent_stamp = xtime.tv_sec;
+ tw->tw_ts_recent = tp.rcv_tsval;
+ tw->tw_ts_recent_stamp = xtime.tv_sec;
}
tcp_tw_put(tw);
@@ -255,9 +258,9 @@
*/
if (th->syn && !th->rst && !th->ack && !paws_reject &&
- (after(TCP_SKB_CB(skb)->seq, tw->rcv_nxt) ||
- (tp.saw_tstamp && (s32)(tw->ts_recent - tp.rcv_tsval) < 0))) {
- u32 isn = tw->snd_nxt+65535+2;
+ (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) ||
+ (tp.saw_tstamp && (s32)(tw->tw_ts_recent - tp.rcv_tsval) < 0))) {
+ u32 isn = tw->tw_snd_nxt + 65535 + 2;
if (isn == 0)
isn++;
TCP_SKB_CB(skb)->when = isn;
@@ -293,7 +296,7 @@
*/
static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
{
- struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->hashent];
+ struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
struct tcp_bind_hashbucket *bhead;
struct sock **head, *sktw;
@@ -303,33 +306,33 @@
*/
bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
spin_lock(&bhead->lock);
- tw->tb = (struct tcp_bind_bucket *)sk->prev;
- BUG_TRAP(sk->prev!=NULL);
- if ((tw->bind_next = tw->tb->owners) != NULL)
- tw->tb->owners->bind_pprev = &tw->bind_next;
- tw->tb->owners = (struct sock*)tw;
- tw->bind_pprev = &tw->tb->owners;
+ tw->tw_tb = (struct tcp_bind_bucket *)sk->sk_prev;
+ BUG_TRAP(sk->sk_prev);
+ if ((tw->tw_bind_next = tw->tw_tb->owners) != NULL)
+ tw->tw_tb->owners->sk_bind_pprev = &tw->tw_bind_next;
+ tw->tw_tb->owners = (struct sock *)tw;
+ tw->tw_bind_pprev = &tw->tw_tb->owners;
spin_unlock(&bhead->lock);
write_lock(&ehead->lock);
/* Step 2: Remove SK from established hash. */
- if (sk->pprev) {
- if(sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
- sock_prot_dec_use(sk->prot);
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
+ sock_prot_dec_use(sk->sk_prot);
}
/* Step 3: Hash TW into TIMEWAIT half of established hash table. */
head = &(ehead + tcp_ehash_size)->chain;
sktw = (struct sock *)tw;
- if((sktw->next = *head) != NULL)
- (*head)->pprev = &sktw->next;
+ if ((sktw->sk_next = *head) != NULL)
+ (*head)->sk_pprev = &sktw->sk_next;
*head = sktw;
- sktw->pprev = head;
- atomic_inc(&tw->refcnt);
+ sktw->sk_pprev = head;
+ atomic_inc(&tw->tw_refcnt);
write_unlock(&ehead->lock);
}
@@ -354,33 +357,33 @@
int rto = (tp->rto<<2) - (tp->rto>>1);
/* Give us an identity. */
- tw->daddr = inet->daddr;
- tw->rcv_saddr = inet->rcv_saddr;
- tw->bound_dev_if= sk->bound_dev_if;
- tw->num = inet->num;
- tw->state = TCP_TIME_WAIT;
- tw->substate = state;
- tw->sport = inet->sport;
- tw->dport = inet->dport;
- tw->family = sk->family;
- tw->reuse = sk->reuse;
- tw->rcv_wscale = tp->rcv_wscale;
- atomic_set(&tw->refcnt, 1);
-
- tw->hashent = sk->hashent;
- tw->rcv_nxt = tp->rcv_nxt;
- tw->snd_nxt = tp->snd_nxt;
- tw->rcv_wnd = tcp_receive_window(tp);
- tw->ts_recent = tp->ts_recent;
- tw->ts_recent_stamp= tp->ts_recent_stamp;
- tw->pprev_death = NULL;
+ tw->tw_daddr = inet->daddr;
+ tw->tw_rcv_saddr = inet->rcv_saddr;
+ tw->tw_bound_dev_if = sk->sk_bound_dev_if;
+ tw->tw_num = inet->num;
+ tw->tw_state = TCP_TIME_WAIT;
+ tw->tw_substate = state;
+ tw->tw_sport = inet->sport;
+ tw->tw_dport = inet->dport;
+ tw->tw_family = sk->sk_family;
+ tw->tw_reuse = sk->sk_reuse;
+ tw->tw_rcv_wscale = tp->rcv_wscale;
+ atomic_set(&tw->tw_refcnt, 1);
+
+ tw->tw_hashent = sk->sk_hashent;
+ tw->tw_rcv_nxt = tp->rcv_nxt;
+ tw->tw_snd_nxt = tp->snd_nxt;
+ tw->tw_rcv_wnd = tcp_receive_window(tp);
+ tw->tw_ts_recent = tp->ts_recent;
+ tw->tw_ts_recent_stamp = tp->ts_recent_stamp;
+ tw->tw_pprev_death = NULL;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- if(tw->family == PF_INET6) {
+ if (tw->tw_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
- ipv6_addr_copy(&tw->v6_daddr, &np->daddr);
- ipv6_addr_copy(&tw->v6_rcv_saddr, &np->rcv_saddr);
+ ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr);
+ ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr);
}
#endif
/* Linkage updates. */
@@ -391,9 +394,9 @@
timeo = rto;
if (recycle_ok) {
- tw->timeout = rto;
+ tw->tw_timeout = rto;
} else {
- tw->timeout = TCP_TIMEWAIT_LEN;
+ tw->tw_timeout = TCP_TIMEWAIT_LEN;
if (state == TCP_TIME_WAIT)
timeo = TCP_TIMEWAIT_LEN;
}
@@ -443,10 +446,10 @@
goto out;
while((tw = tcp_tw_death_row[tcp_tw_death_row_slot]) != NULL) {
- tcp_tw_death_row[tcp_tw_death_row_slot] = tw->next_death;
- if (tw->next_death)
- tw->next_death->pprev_death = tw->pprev_death;
- tw->pprev_death = NULL;
+ tcp_tw_death_row[tcp_tw_death_row_slot] = tw->tw_next_death;
+ if (tw->tw_next_death)
+ tw->tw_next_death->tw_pprev_death = tw->tw_pprev_death;
+ tw->tw_pprev_death = NULL;
spin_unlock(&tw_death_lock);
tcp_timewait_kill(tw);
@@ -474,11 +477,11 @@
void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
{
spin_lock(&tw_death_lock);
- if (tw->pprev_death) {
- if(tw->next_death)
- tw->next_death->pprev_death = tw->pprev_death;
- *tw->pprev_death = tw->next_death;
- tw->pprev_death = NULL;
+ if (tw->tw_pprev_death) {
+ if (tw->tw_next_death)
+ tw->tw_next_death->tw_pprev_death = tw->tw_pprev_death;
+ *tw->tw_pprev_death = tw->tw_next_death;
+ tw->tw_pprev_death = NULL;
tcp_tw_put(tw);
if (--tcp_tw_count == 0)
del_timer(&tcp_tw_timer);
@@ -530,14 +533,14 @@
spin_lock(&tw_death_lock);
/* Unlink it, if it was scheduled */
- if (tw->pprev_death) {
- if(tw->next_death)
- tw->next_death->pprev_death = tw->pprev_death;
- *tw->pprev_death = tw->next_death;
- tw->pprev_death = NULL;
+ if (tw->tw_pprev_death) {
+ if (tw->tw_next_death)
+ tw->tw_next_death->tw_pprev_death = tw->tw_pprev_death;
+ *tw->tw_pprev_death = tw->tw_next_death;
+ tw->tw_pprev_death = NULL;
tcp_tw_count--;
} else
- atomic_inc(&tw->refcnt);
+ atomic_inc(&tw->tw_refcnt);
if (slot >= TCP_TW_RECYCLE_SLOTS) {
/* Schedule to slow timer */
@@ -548,11 +551,11 @@
if (slot >= TCP_TWKILL_SLOTS)
slot = TCP_TWKILL_SLOTS-1;
}
- tw->ttd = jiffies + timeo;
+ tw->tw_ttd = jiffies + timeo;
slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
tpp = &tcp_tw_death_row[slot];
} else {
- tw->ttd = jiffies + (slot<tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
if (tcp_twcal_hand < 0) {
tcp_twcal_hand = 0;
@@ -567,10 +570,10 @@
tpp = &tcp_twcal_row[slot];
}
- if((tw->next_death = *tpp) != NULL)
- (*tpp)->pprev_death = &tw->next_death;
+ if ((tw->tw_next_death = *tpp) != NULL)
+ (*tpp)->tw_pprev_death = &tw->tw_next_death;
*tpp = tw;
- tw->pprev_death = tpp;
+ tw->tw_pprev_death = tpp;
if (tcp_tw_count++ == 0)
mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
@@ -597,8 +600,8 @@
struct tcp_tw_bucket *tw;
while((tw = tcp_twcal_row[slot]) != NULL) {
- tcp_twcal_row[slot] = tw->next_death;
- tw->pprev_death = NULL;
+ tcp_twcal_row[slot] = tw->tw_next_death;
+ tw->tw_pprev_death = NULL;
tcp_timewait_kill(tw);
tcp_tw_put(tw);
@@ -639,18 +642,18 @@
/* allocate the newsk from the same slab of the master sock,
* if not, at sk_free time we'll try to free it from the wrong
* slabcache (i.e. is it TCPv4 or v6?) -acme */
- struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0, sk->slab);
+ struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0, sk->sk_slab);
if(newsk != NULL) {
struct tcp_opt *newtp;
struct sk_filter *filter;
memcpy(newsk, sk, sizeof(struct tcp_sock));
- newsk->state = TCP_SYN_RECV;
+ newsk->sk_state = TCP_SYN_RECV;
/* SANITY */
- newsk->pprev = NULL;
- newsk->prev = NULL;
+ newsk->sk_pprev = NULL;
+ newsk->sk_prev = NULL;
/* Clone the TCP header template */
inet_sk(newsk)->dport = req->rmt_port;
@@ -658,29 +661,29 @@
sock_lock_init(newsk);
bh_lock_sock(newsk);
- newsk->dst_lock = RW_LOCK_UNLOCKED;
- atomic_set(&newsk->rmem_alloc, 0);
- skb_queue_head_init(&newsk->receive_queue);
- atomic_set(&newsk->wmem_alloc, 0);
- skb_queue_head_init(&newsk->write_queue);
- atomic_set(&newsk->omem_alloc, 0);
- newsk->wmem_queued = 0;
- newsk->forward_alloc = 0;
+ newsk->sk_dst_lock = RW_LOCK_UNLOCKED;
+ atomic_set(&newsk->sk_rmem_alloc, 0);
+ skb_queue_head_init(&newsk->sk_receive_queue);
+ atomic_set(&newsk->sk_wmem_alloc, 0);
+ skb_queue_head_init(&newsk->sk_write_queue);
+ atomic_set(&newsk->sk_omem_alloc, 0);
+ newsk->sk_wmem_queued = 0;
+ newsk->sk_forward_alloc = 0;
sock_reset_flag(newsk, SOCK_DONE);
- newsk->userlocks = sk->userlocks & ~SOCK_BINDPORT_LOCK;
- newsk->backlog.head = newsk->backlog.tail = NULL;
- newsk->callback_lock = RW_LOCK_UNLOCKED;
- skb_queue_head_init(&newsk->error_queue);
- newsk->write_space = tcp_write_space;
+ newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
+ newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
+ newsk->sk_callback_lock = RW_LOCK_UNLOCKED;
+ skb_queue_head_init(&newsk->sk_error_queue);
+ newsk->sk_write_space = tcp_write_space;
- if ((filter = newsk->filter) != NULL)
+ if ((filter = newsk->sk_filter) != NULL)
sk_filter_charge(newsk, filter);
if (unlikely(xfrm_sk_clone_policy(newsk))) {
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
- newsk->destruct = NULL;
+ newsk->sk_destruct = NULL;
sk_free(newsk);
return NULL;
}
@@ -744,9 +747,9 @@
memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock));
/* Back to base struct sock members. */
- newsk->err = 0;
- newsk->priority = 0;
- atomic_set(&newsk->refcnt, 2);
+ newsk->sk_err = 0;
+ newsk->sk_priority = 0;
+ atomic_set(&newsk->sk_refcnt, 2);
#ifdef INET_REFCNT_DEBUG
atomic_inc(&inet_sock_nr);
#endif
@@ -755,9 +758,9 @@
if (sock_flag(newsk, SOCK_KEEPOPEN))
tcp_reset_keepalive_timer(newsk,
keepalive_time_when(newtp));
- newsk->socket = NULL;
- newsk->sleep = NULL;
- newsk->owner = NULL;
+ newsk->sk_socket = NULL;
+ newsk->sk_sleep = NULL;
+ newsk->sk_owner = NULL;
newtp->tstamp_ok = req->tstamp_ok;
if((newtp->sack_ok = req->sack_ok) != 0) {
@@ -791,7 +794,7 @@
newtp->mss_clamp = req->mss;
TCP_ECN_openreq_child(newtp, req);
if (newtp->ecn_flags&TCP_ECN_OK)
- newsk->no_largesend = 1;
+ newsk->sk_no_largesend = 1;
TCP_INC_STATS_BH(TcpPassiveOpens);
}
@@ -967,7 +970,7 @@
if (child == NULL)
goto listen_overflow;
- sk_set_owner(child, sk->owner);
+ sk_set_owner(child, sk->sk_owner);
tcp_synq_unlink(tp, req, prev);
tcp_synq_removed(sk, req);
@@ -999,14 +1002,14 @@
struct sk_buff *skb)
{
int ret = 0;
- int state = child->state;
+ int state = child->sk_state;
if (!sock_owned_by_user(child)) {
ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
/* Wakeup parent, send SIGIO */
- if (state == TCP_SYN_RECV && child->state != state)
- parent->data_ready(parent, 0);
+ if (state == TCP_SYN_RECV && child->sk_state != state)
+ parent->sk_data_ready(parent, 0);
} else {
/* Alas, it is possible again, because we do lookup
* in main socket hash table and lock on listening
diff -urN linux-2.5.70-bk11/net/ipv4/tcp_output.c linux-2.5.70-bk12/net/ipv4/tcp_output.c
--- linux-2.5.70-bk11/net/ipv4/tcp_output.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/tcp_output.c 2003-06-07 04:47:51.000000000 -0700
@@ -48,7 +48,7 @@
void update_send_head(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
{
tp->send_head = skb->next;
- if (tp->send_head == (struct sk_buff *) &sk->write_queue)
+ if (tp->send_head == (struct sk_buff *)&sk->sk_write_queue)
tp->send_head = NULL;
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
if (tp->packets_out++ == 0)
@@ -309,13 +309,13 @@
/* Advance write_seq and place onto the write_queue. */
tp->write_seq = TCP_SKB_CB(skb)->end_seq;
- __skb_queue_tail(&sk->write_queue, skb);
+ __skb_queue_tail(&sk->sk_write_queue, skb);
tcp_charge_skb(sk, skb);
if (!force_queue && tp->send_head == NULL && tcp_snd_test(tp, skb, cur_mss, tp->nonagle)) {
/* Send it out now. */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
- if (tcp_transmit_skb(sk, skb_clone(skb, sk->allocation)) == 0) {
+ if (!tcp_transmit_skb(sk, skb_clone(skb, sk->sk_allocation))) {
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
tcp_minshall_update(tp, cur_mss, skb);
if (tp->packets_out++ == 0)
@@ -336,10 +336,10 @@
struct tcp_opt *tp = tcp_sk(sk);
struct sk_buff *skb = tp->send_head;
- if (tcp_snd_test(tp, skb, cur_mss, 1)) {
+ if (tcp_snd_test(tp, skb, cur_mss, TCP_NAGLE_PUSH)) {
/* Send it out now. */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
- if (tcp_transmit_skb(sk, skb_clone(skb, sk->allocation)) == 0) {
+ if (!tcp_transmit_skb(sk, skb_clone(skb, sk->sk_allocation))) {
tp->send_head = NULL;
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
if (tp->packets_out++ == 0)
@@ -587,7 +587,7 @@
tp->pmtu_cookie = pmtu;
tp->mss_cache = tp->mss_cache_std = mss_now;
- if (sk->route_caps&NETIF_F_TSO) {
+ if (sk->sk_route_caps & NETIF_F_TSO) {
int large_mss;
large_mss = 65535 - tp->af_specific->net_header_len -
@@ -620,7 +620,7 @@
* In time closedown will finish, we empty the write queue and all
* will be happy.
*/
- if(sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
struct sk_buff *skb;
int sent_pkts = 0;
@@ -632,7 +632,7 @@
mss_now = tcp_current_mss(sk, 1);
while((skb = tp->send_head) &&
- tcp_snd_test(tp, skb, mss_now, tcp_skb_is_last(sk, skb) ? nonagle : 1)) {
+ tcp_snd_test(tp, skb, mss_now, tcp_skb_is_last(sk, skb) ? nonagle : TCP_NAGLE_PUSH)) {
if (skb->len > mss_now) {
if (tcp_fragment(sk, skb, mss_now))
break;
@@ -886,16 +886,17 @@
/* Do not sent more than we queued. 1/4 is reserved for possible
* copying overhead: frgagmentation, tunneling, mangling etc.
*/
- if (atomic_read(&sk->wmem_alloc) > min(sk->wmem_queued+(sk->wmem_queued>>2),sk->sndbuf))
+ if (atomic_read(&sk->sk_wmem_alloc) >
+ min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
return -EAGAIN;
if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
BUG();
- if (sk->route_caps&NETIF_F_TSO) {
- sk->route_caps &= ~NETIF_F_TSO;
- sk->no_largesend = 1;
+ if (sk->sk_route_caps & NETIF_F_TSO) {
+ sk->sk_route_caps &= ~NETIF_F_TSO;
+ sk->sk_no_largesend = 1;
tp->mss_cache = tp->mss_cache_std;
}
@@ -924,7 +925,7 @@
if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
(skb->len < (cur_mss >> 1)) &&
(skb->next != tp->send_head) &&
- (skb->next != (struct sk_buff *)&sk->write_queue) &&
+ (skb->next != (struct sk_buff *)&sk->sk_write_queue) &&
(skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) &&
(sysctl_tcp_retrans_collapse != 0))
tcp_retrans_try_collapse(sk, skb, cur_mss);
@@ -1013,7 +1014,8 @@
else
NET_INC_STATS_BH(TCPSlowStartRetrans);
- if (skb == skb_peek(&sk->write_queue))
+ if (skb ==
+ skb_peek(&sk->sk_write_queue))
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
}
@@ -1059,7 +1061,7 @@
if(tcp_retransmit_skb(sk, skb))
break;
- if (skb == skb_peek(&sk->write_queue))
+ if (skb == skb_peek(&sk->sk_write_queue))
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
NET_INC_STATS_BH(TCPForwardRetrans);
@@ -1073,7 +1075,7 @@
void tcp_send_fin(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
- struct sk_buff *skb = skb_peek_tail(&sk->write_queue);
+ struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue);
unsigned int mss_now;
/* Optimization, tack on the FIN if we have a queue of
@@ -1106,7 +1108,7 @@
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
tcp_send_skb(sk, skb, 1, mss_now);
}
- __tcp_push_pending_frames(sk, tp, mss_now, 1);
+ __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF);
}
/* We get here when a process closes a file descriptor (either due to
@@ -1149,7 +1151,7 @@
{
struct sk_buff* skb;
- skb = skb_peek(&sk->write_queue);
+ skb = skb_peek(&sk->sk_write_queue);
if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
return -EFAULT;
@@ -1159,8 +1161,8 @@
struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
if (nskb == NULL)
return -ENOMEM;
- __skb_unlink(skb, &sk->write_queue);
- __skb_queue_head(&sk->write_queue, nskb);
+ __skb_unlink(skb, &sk->sk_write_queue);
+ __skb_queue_head(&sk->sk_write_queue, nskb);
tcp_free_skb(sk, skb);
tcp_charge_skb(sk, nskb);
skb = nskb;
@@ -1275,7 +1277,7 @@
tp->rcv_ssthresh = tp->rcv_wnd;
- sk->err = 0;
+ sk->sk_err = 0;
sock_reset_flag(sk, SOCK_DONE);
tp->snd_wnd = 0;
tcp_init_wl(tp, tp->write_seq, 0);
@@ -1300,7 +1302,7 @@
tcp_connect_init(sk);
- buff = alloc_skb(MAX_TCP_HEADER + 15, sk->allocation);
+ buff = alloc_skb(MAX_TCP_HEADER + 15, sk->sk_allocation);
if (unlikely(buff == NULL))
return -ENOBUFS;
@@ -1319,7 +1321,7 @@
/* Send it off. */
TCP_SKB_CB(buff)->when = tcp_time_stamp;
tp->retrans_stamp = TCP_SKB_CB(buff)->when;
- __skb_queue_tail(&sk->write_queue, buff);
+ __skb_queue_tail(&sk->sk_write_queue, buff);
tcp_charge_skb(sk, buff);
tp->packets_out++;
tcp_transmit_skb(sk, skb_clone(buff, GFP_KERNEL));
@@ -1388,7 +1390,7 @@
void tcp_send_ack(struct sock *sk)
{
/* If we have been reset, we may not send again. */
- if(sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
struct tcp_opt *tp = tcp_sk(sk);
struct sk_buff *buff;
@@ -1456,7 +1458,7 @@
int tcp_write_wakeup(struct sock *sk)
{
- if (sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
struct tcp_opt *tp = tcp_sk(sk);
struct sk_buff *skb;
@@ -1481,9 +1483,9 @@
return -1;
/* SWS override triggered forced fragmentation.
* Disable TSO, the connection is too sick. */
- if (sk->route_caps&NETIF_F_TSO) {
- sk->no_largesend = 1;
- sk->route_caps &= ~NETIF_F_TSO;
+ if (sk->sk_route_caps & NETIF_F_TSO) {
+ sk->sk_no_largesend = 1;
+ sk->sk_route_caps &= ~NETIF_F_TSO;
tp->mss_cache = tp->mss_cache_std;
}
}
diff -urN linux-2.5.70-bk11/net/ipv4/tcp_timer.c linux-2.5.70-bk12/net/ipv4/tcp_timer.c
--- linux-2.5.70-bk11/net/ipv4/tcp_timer.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/tcp_timer.c 2003-06-07 04:47:51.000000000 -0700
@@ -57,9 +57,9 @@
tp->delack_timer.data = (unsigned long) sk;
tp->ack.pending = 0;
- init_timer(&sk->timer);
- sk->timer.function=&tcp_keepalive_timer;
- sk->timer.data = (unsigned long) sk;
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.function = &tcp_keepalive_timer;
+ sk->sk_timer.data = (unsigned long)sk;
}
void tcp_clear_xmit_timers(struct sock *sk)
@@ -77,14 +77,14 @@
del_timer(&tp->delack_timer))
__sock_put(sk);
- if(timer_pending(&sk->timer) && del_timer(&sk->timer))
+ if (timer_pending(&sk->sk_timer) && del_timer(&sk->sk_timer))
__sock_put(sk);
}
static void tcp_write_err(struct sock *sk)
{
- sk->err = sk->err_soft ? : ETIMEDOUT;
- sk->error_report(sk);
+ sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
+ sk->sk_error_report(sk);
tcp_done(sk);
NET_INC_STATS_BH(TCPAbortOnTimeout);
@@ -112,11 +112,11 @@
orphans <<= 1;
/* If some dubious ICMP arrived, penalize even more. */
- if (sk->err_soft)
+ if (sk->sk_err_soft)
orphans <<= 1;
if (orphans >= sysctl_tcp_max_orphans ||
- (sk->wmem_queued > SOCK_MIN_SNDBUF &&
+ (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
if (net_ratelimit())
printk(KERN_INFO "Out of socket memory\n");
@@ -142,7 +142,7 @@
int retries = sysctl_tcp_orphan_retries; /* May be zero. */
/* We know from an ICMP that something is wrong. */
- if (sk->err_soft && !alive)
+ if (sk->sk_err_soft && !alive)
retries = 0;
/* However, if socket sent something recently, select some safe
@@ -159,9 +159,9 @@
struct tcp_opt *tp = tcp_sk(sk);
int retry_until;
- if ((1<state)&(TCPF_SYN_SENT|TCPF_SYN_RECV)) {
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (tp->retransmits)
- dst_negative_advice(&sk->dst_cache);
+ dst_negative_advice(&sk->sk_dst_cache);
retry_until = tp->syn_retries ? : sysctl_tcp_syn_retries;
} else {
if (tp->retransmits >= sysctl_tcp_retries1) {
@@ -185,7 +185,7 @@
Golden words :-).
*/
- dst_negative_advice(&sk->dst_cache);
+ dst_negative_advice(&sk->sk_dst_cache);
}
retry_until = sysctl_tcp_retries2;
@@ -224,7 +224,7 @@
tcp_mem_reclaim(sk);
- if (sk->state == TCP_CLOSE || !(tp->ack.pending&TCP_ACK_TIMER))
+ if (sk->sk_state == TCP_CLOSE || !(tp->ack.pending & TCP_ACK_TIMER))
goto out;
if ((long)(tp->ack.timeout - jiffies) > 0) {
@@ -241,7 +241,7 @@
skb_queue_len(&tp->ucopy.prequeue));
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
- sk->backlog_rcv(sk, skb);
+ sk->sk_backlog_rcv(sk, skb);
tp->ucopy.memory = 0;
}
@@ -325,10 +325,10 @@
if (tp->packets_out == 0)
goto out;
- BUG_TRAP(!skb_queue_empty(&sk->write_queue));
+ BUG_TRAP(!skb_queue_empty(&sk->sk_write_queue));
if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
- !((1<state)&(TCPF_SYN_SENT|TCPF_SYN_RECV))) {
+ !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
/* Receiver dastardly shrinks window. Our retransmits
* become zero probes, but we should not timeout this
* connection. If the socket is an orphan, time it out,
@@ -347,7 +347,7 @@
goto out;
}
tcp_enter_loss(sk, 0);
- tcp_retransmit_skb(sk, skb_peek(&sk->write_queue));
+ tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
__sk_dst_reset(sk);
goto out_reset_timer;
}
@@ -381,7 +381,7 @@
tcp_enter_loss(sk, 0);
}
- if (tcp_retransmit_skb(sk, skb_peek(&sk->write_queue)) > 0) {
+ if (tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)) > 0) {
/* Retransmission failed because of local congestion,
* do not backoff.
*/
@@ -433,7 +433,7 @@
goto out_unlock;
}
- if (sk->state == TCP_CLOSE || !tp->pending)
+ if (sk->sk_state == TCP_CLOSE || !tp->pending)
goto out;
if ((long)(tp->timeout - jiffies) > 0) {
@@ -556,19 +556,19 @@
void tcp_delete_keepalive_timer (struct sock *sk)
{
- if (timer_pending(&sk->timer) && del_timer (&sk->timer))
+ if (timer_pending(&sk->sk_timer) && del_timer (&sk->sk_timer))
__sock_put(sk);
}
void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len)
{
- if (!mod_timer(&sk->timer, jiffies+len))
+ if (!mod_timer(&sk->sk_timer, jiffies + len))
sock_hold(sk);
}
void tcp_set_keepalive(struct sock *sk, int val)
{
- if ((1<state)&(TCPF_CLOSE|TCPF_LISTEN))
+ if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
return;
if (val && !sock_flag(sk, SOCK_KEEPOPEN))
@@ -592,12 +592,12 @@
goto out;
}
- if (sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
tcp_synack_timer(sk);
goto out;
}
- if (sk->state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
+ if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
if (tp->linger2 >= 0) {
int tmo = tcp_fin_time(tp) - TCP_TIMEWAIT_LEN;
@@ -610,7 +610,7 @@
goto death;
}
- if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->state == TCP_CLOSE)
+ if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
goto out;
elapsed = keepalive_time_when(tp);
diff -urN linux-2.5.70-bk11/net/ipv4/udp.c linux-2.5.70-bk12/net/ipv4/udp.c
--- linux-2.5.70-bk11/net/ipv4/udp.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv4/udp.c 2003-06-07 04:47:51.000000000 -0700
@@ -148,7 +148,7 @@
do {
if (++size >= best_size_so_far)
goto next;
- } while ((sk2 = sk2->next) != NULL);
+ } while ((sk2 = sk2->sk_next) != NULL);
best_size_so_far = size;
best = result;
next:;
@@ -171,28 +171,28 @@
for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
sk2 != NULL;
- sk2 = sk2->next) {
+ sk2 = sk2->sk_next) {
struct inet_opt *inet2 = inet_sk(sk2);
if (inet2->num == snum &&
sk2 != sk &&
!ipv6_only_sock(sk2) &&
- sk2->bound_dev_if == sk->bound_dev_if &&
+ sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
(!inet2->rcv_saddr ||
!inet->rcv_saddr ||
inet2->rcv_saddr == inet->rcv_saddr) &&
- (!sk2->reuse || !sk->reuse))
+ (!sk2->sk_reuse || !sk->sk_reuse))
goto fail;
}
}
inet->num = snum;
- if (sk->pprev == NULL) {
+ if (!sk->sk_pprev) {
struct sock **skp = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sock_prot_inc_use(sk->sk_prot);
sock_hold(sk);
}
write_unlock_bh(&udp_hash_lock);
@@ -211,13 +211,13 @@
static void udp_v4_unhash(struct sock *sk)
{
write_lock_bh(&udp_hash_lock);
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
inet_sk(sk)->num = 0;
- sock_prot_dec_use(sk->prot);
+ sock_prot_dec_use(sk->sk_prot);
__sock_put(sk);
}
write_unlock_bh(&udp_hash_lock);
@@ -232,11 +232,12 @@
unsigned short hnum = ntohs(dport);
int badness = -1;
- for(sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk != NULL; sk = sk->next) {
+ for (sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk;
+ sk = sk->sk_next) {
struct inet_opt *inet = inet_sk(sk);
if (inet->num == hnum && !ipv6_only_sock(sk)) {
- int score = (sk->family == PF_INET ? 1 : 0);
+ int score = (sk->sk_family == PF_INET ? 1 : 0);
if (inet->rcv_saddr) {
if (inet->rcv_saddr != daddr)
continue;
@@ -252,8 +253,8 @@
continue;
score+=2;
}
- if(sk->bound_dev_if) {
- if(sk->bound_dev_if != dif)
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
continue;
score+=2;
}
@@ -288,7 +289,8 @@
{
struct sock *s = sk;
unsigned short hnum = ntohs(loc_port);
- for(; s; s = s->next) {
+
+ for (; s; s = s->sk_next) {
struct inet_opt *inet = inet_sk(s);
if (inet->num != hnum ||
@@ -296,7 +298,7 @@
(inet->dport != rmt_port && inet->dport) ||
(inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
ipv6_only_sock(s) ||
- (s->bound_dev_if && s->bound_dev_if != dif))
+ (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
continue;
if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
continue;
@@ -370,13 +372,13 @@
* 4.1.3.3.
*/
if (!inet->recverr) {
- if (!harderr || sk->state != TCP_ESTABLISHED)
+ if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
} else {
ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
}
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
out:
sock_put(sk);
}
@@ -404,7 +406,7 @@
int err = 0;
/* Grab the skbuff where UDP header space exists. */
- if ((skb = skb_peek(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
/*
@@ -416,12 +418,12 @@
uh->len = htons(up->len);
uh->check = 0;
- if (sk->no_check == UDP_CSUM_NOXMIT) {
+ if (sk->sk_no_check == UDP_CSUM_NOXMIT) {
skb->ip_summed = CHECKSUM_NONE;
goto send;
}
- if (skb_queue_len(&sk->write_queue) == 1) {
+ if (skb_queue_len(&sk->sk_write_queue) == 1) {
/*
* Only one fragment on the socket.
*/
@@ -454,7 +456,7 @@
sizeof(struct udphdr), skb->csum);
}
- skb_queue_walk(&sk->write_queue, skb) {
+ skb_queue_walk(&sk->sk_write_queue, skb) {
csum = csum_add(csum, skb->csum);
}
uh->check = csum_tcpudp_magic(up->saddr, up->daddr,
@@ -544,7 +546,7 @@
if (dport == 0)
return -EINVAL;
} else {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
daddr = inet->daddr;
dport = inet->dport;
@@ -555,7 +557,7 @@
}
ipc.addr = inet->saddr;
- ipc.oif = sk->bound_dev_if;
+ ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
err = ip_cmsg_send(msg, &ipc);
if (err)
@@ -577,7 +579,7 @@
connected = 0;
}
tos = RT_TOS(inet->tos);
- if (sk->localroute || (msg->msg_flags&MSG_DONTROUTE) ||
+ if (sk->sk_localroute || (msg->msg_flags & MSG_DONTROUTE) ||
(ipc.opt && ipc.opt->is_strictroute)) {
tos |= RTO_ONLINK;
connected = 0;
@@ -701,7 +703,8 @@
ret = ip_append_page(sk, page, offset, size, flags);
if (ret == -EOPNOTSUPP) {
release_sock(sk);
- return sock_no_sendpage(sk->socket, page, offset, size, flags);
+ return sock_no_sendpage(sk->sk_socket, page, offset,
+ size, flags);
}
if (ret < 0) {
udp_flush_pending_frames(sk);
@@ -728,7 +731,7 @@
{
case SIOCOUTQ:
{
- int amount = atomic_read(&sk->wmem_alloc);
+ int amount = atomic_read(&sk->sk_wmem_alloc);
return put_user(amount, (int *)arg);
}
@@ -738,8 +741,8 @@
unsigned long amount;
amount = 0;
- spin_lock_irq(&sk->receive_queue.lock);
- skb = skb_peek(&sk->receive_queue);
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL) {
/*
* We will only return the amount
@@ -748,7 +751,7 @@
*/
amount = skb->len - sizeof(struct udphdr);
}
- spin_unlock_irq(&sk->receive_queue.lock);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
return put_user(amount, (int *)arg);
}
@@ -844,12 +847,12 @@
/* Clear queue. */
if (flags&MSG_PEEK) {
int clear = 0;
- spin_lock_irq(&sk->receive_queue.lock);
- if (skb == skb_peek(&sk->receive_queue)) {
- __skb_unlink(skb, &sk->receive_queue);
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ if (skb == skb_peek(&sk->sk_receive_queue)) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
clear = 1;
}
- spin_unlock_irq(&sk->receive_queue.lock);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
if (clear)
kfree_skb(skb);
}
@@ -877,7 +880,7 @@
sk_dst_reset(sk);
- oif = sk->bound_dev_if;
+ oif = sk->sk_bound_dev_if;
saddr = inet->saddr;
if (MULTICAST(usin->sin_addr.s_addr)) {
if (!oif)
@@ -901,7 +904,7 @@
inet->rcv_saddr = rt->rt_src;
inet->daddr = rt->rt_dst;
inet->dport = usin->sin_port;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
inet->id = jiffies;
sk_dst_set(sk, &rt->u.dst);
@@ -915,15 +918,15 @@
* 1003.1g - break association.
*/
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
inet->daddr = 0;
inet->dport = 0;
- sk->bound_dev_if = 0;
- if (!(sk->userlocks & SOCK_BINDADDR_LOCK))
+ sk->sk_bound_dev_if = 0;
+ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
- if (!(sk->userlocks&SOCK_BINDPORT_LOCK)) {
- sk->prot->unhash(sk);
+ if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
+ sk->sk_prot->unhash(sk);
inet->sport = 0;
}
sk_dst_reset(sk);
@@ -1054,7 +1057,7 @@
/* FALLTHROUGH -- it's a UDP Packet */
}
- if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
+ if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if (__udp_checksum_complete(skb)) {
UDP_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
@@ -1094,7 +1097,7 @@
do {
struct sk_buff *skb1 = skb;
- sknext = udp_v4_mcast_next(sk->next, uh->dest, daddr,
+ sknext = udp_v4_mcast_next(sk->sk_next, uh->dest, daddr,
uh->source, saddr, dif);
if(sknext)
skb1 = skb_clone(skb, GFP_ATOMIC);
@@ -1356,8 +1359,9 @@
struct udp_iter_state *state = seq->private;
for (; state->bucket < UDP_HTABLE_SIZE; ++state->bucket)
- for (i = 0, sk = udp_hash[state->bucket]; sk; ++i, sk = sk->next) {
- if (sk->family != state->family)
+ for (i = 0, sk = udp_hash[state->bucket]; sk;
+ ++i, sk = sk->sk_next) {
+ if (sk->sk_family != state->family)
continue;
if (l--)
continue;
@@ -1387,12 +1391,11 @@
state = seq->private;
sk = v;
- sk = sk->next;
+ sk = sk->sk_next;
- for (; sk; sk = sk->next) {
- if (sk->family == state->family)
+ for (; sk; sk = sk->sk_next)
+ if (sk->sk_family == state->family)
goto out;
- }
if (++state->bucket >= UDP_HTABLE_SIZE)
goto out;
@@ -1480,10 +1483,11 @@
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
- bucket, src, srcp, dest, destp, sp->state,
- atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc),
+ bucket, src, srcp, dest, destp, sp->sk_state,
+ atomic_read(&sp->sk_wmem_alloc),
+ atomic_read(&sp->sk_rmem_alloc),
0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
- atomic_read(&sp->refcnt), sp);
+ atomic_read(&sp->sk_refcnt), sp);
}
static int udp4_seq_show(struct seq_file *seq, void *v)
diff -urN linux-2.5.70-bk11/net/ipv6/af_inet6.c linux-2.5.70-bk12/net/ipv6/af_inet6.c
--- linux-2.5.70-bk11/net/ipv6/af_inet6.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv6/af_inet6.c 2003-06-07 04:47:52.000000000 -0700
@@ -144,9 +144,9 @@
{
struct ipv6_pinfo *rc = (&((struct tcp6_sock *)sk)->inet6);
- if (sk->protocol == IPPROTO_UDP)
+ if (sk->sk_protocol == IPPROTO_UDP)
rc = (&((struct udp6_sock *)sk)->inet6);
- else if (sk->protocol == IPPROTO_RAW)
+ else if (sk->sk_protocol == IPPROTO_RAW)
rc = (&((struct raw6_sock *)sk)->inet6);
return rc;
}
@@ -198,10 +198,10 @@
sock_init_data(sock, sk);
sk_set_owner(sk, THIS_MODULE);
- sk->prot = answer->prot;
- sk->no_check = answer->no_check;
+ sk->sk_prot = answer->prot;
+ sk->sk_no_check = answer->no_check;
if (INET_PROTOSW_REUSE & answer->flags)
- sk->reuse = 1;
+ sk->sk_reuse = 1;
rcu_read_unlock();
inet = inet_sk(sk);
@@ -212,12 +212,12 @@
inet->hdrincl = 1;
}
- sk->destruct = inet6_sock_destruct;
- sk->zapped = 0;
- sk->family = PF_INET6;
- sk->protocol = protocol;
+ sk->sk_destruct = inet6_sock_destruct;
+ sk->sk_zapped = 0;
+ sk->sk_family = PF_INET6;
+ sk->sk_protocol = protocol;
- sk->backlog_rcv = answer->prot->backlog_rcv;
+ sk->sk_backlog_rcv = answer->prot->backlog_rcv;
tcp6sk = (struct tcp6_sock *)sk;
tcp6sk->pinet6 = np = inet6_sk_generic(sk);
@@ -253,10 +253,10 @@
* creation time automatically shares.
*/
inet->sport = ntohs(inet->num);
- sk->prot->hash(sk);
+ sk->sk_prot->hash(sk);
}
- if (sk->prot->init) {
- int err = sk->prot->init(sk);
+ if (sk->sk_prot->init) {
+ int err = sk->sk_prot->init(sk);
if (err != 0) {
inet_sock_release(sk);
return err;
@@ -293,8 +293,8 @@
int addr_type = 0;
/* If the socket has its own bind function then use it. */
- if(sk->prot->bind)
- return sk->prot->bind(sk, uaddr, addr_len);
+ if (sk->sk_prot->bind)
+ return sk->sk_prot->bind(sk, uaddr, addr_len);
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
@@ -327,7 +327,7 @@
lock_sock(sk);
/* Check these errors (active socket, double bind). */
- if (sk->state != TCP_CLOSE || inet->num) {
+ if (sk->sk_state != TCP_CLOSE || inet->num) {
release_sock(sk);
return -EINVAL;
}
@@ -338,11 +338,11 @@
/* Override any existing binding, if another one
* is supplied by user.
*/
- sk->bound_dev_if = addr->sin6_scope_id;
+ sk->sk_bound_dev_if = addr->sin6_scope_id;
}
/* Binding to link-local address requires an interface */
- if (sk->bound_dev_if == 0) {
+ if (!sk->sk_bound_dev_if) {
release_sock(sk);
return -EINVAL;
}
@@ -357,16 +357,16 @@
ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
/* Make sure we are allowed to bind here. */
- if (sk->prot->get_port(sk, snum) != 0) {
+ if (sk->sk_prot->get_port(sk, snum)) {
inet_reset_saddr(sk);
release_sock(sk);
return -EADDRINUSE;
}
if (addr_type != IPV6_ADDR_ANY)
- sk->userlocks |= SOCK_BINDADDR_LOCK;
+ sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
if (snum)
- sk->userlocks |= SOCK_BINDPORT_LOCK;
+ sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
inet->sport = ntohs(inet->num);
inet->dport = 0;
inet->daddr = 0;
@@ -437,7 +437,8 @@
if (peer) {
if (!inet->dport)
return -ENOTCONN;
- if (((1<state)&(TCPF_CLOSE|TCPF_SYN_SENT)) && peer == 1)
+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
+ peer == 1)
return -ENOTCONN;
sin->sin6_port = inet->dport;
ipv6_addr_copy(&sin->sin6_addr, &np->daddr);
@@ -452,7 +453,7 @@
sin->sin6_port = inet->sport;
}
if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
- sin->sin6_scope_id = sk->bound_dev_if;
+ sin->sin6_scope_id = sk->sk_bound_dev_if;
*uaddr_len = sizeof(*sin);
return(0);
}
@@ -465,9 +466,9 @@
switch(cmd)
{
case SIOCGSTAMP:
- if(sk->stamp.tv_sec==0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
- err = copy_to_user((void *)arg, &sk->stamp,
+ err = copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval));
if (err)
return -EFAULT;
@@ -485,7 +486,8 @@
case SIOCSIFDSTADDR:
return addrconf_set_dstaddr((void *) arg);
default:
- if(sk->prot->ioctl==0 || (err=sk->prot->ioctl(sk, cmd, arg))==-ENOIOCTLCMD)
+ if (!sk->sk_prot->ioctl ||
+ (err = sk->sk_prot->ioctl(sk, cmd, arg)) == -ENOIOCTLCMD)
return(dev_ioctl(cmd,(void *) arg));
return err;
}
@@ -640,32 +642,23 @@
}
int
-snmp6_mib_init(void *ptr[2], size_t mibsize)
+snmp6_mib_init(void *ptr[2], size_t mibsize, size_t mibalign)
{
- int i;
-
if (ptr == NULL)
return -EINVAL;
- ptr[0] = kmalloc_percpu(mibsize, GFP_KERNEL);
+ ptr[0] = __alloc_percpu(mibsize, mibalign);
if (!ptr[0])
goto err0;
- ptr[1] = kmalloc_percpu(mibsize, GFP_KERNEL);
+ ptr[1] = __alloc_percpu(mibsize, mibalign);
if (!ptr[1])
goto err1;
- /* Zero percpu version of the mibs */
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_possible(i)) {
- memset(per_cpu_ptr(ptr[0], i), 0, mibsize);
- memset(per_cpu_ptr(ptr[1], i), 0, mibsize);
- }
- }
return 0;
err1:
- kfree_percpu(ptr[0]);
+ free_percpu(ptr[0]);
ptr[0] = NULL;
err0:
return -ENOMEM;
@@ -676,18 +669,21 @@
{
if (ptr == NULL)
return;
- kfree_percpu(ptr[0]);
- kfree_percpu(ptr[1]);
+ free_percpu(ptr[0]);
+ free_percpu(ptr[1]);
ptr[0] = ptr[1] = NULL;
}
static int __init init_ipv6_mibs(void)
{
- if (snmp6_mib_init((void **)ipv6_statistics, sizeof (struct ipv6_mib)) < 0)
+ if (snmp6_mib_init((void **)ipv6_statistics, sizeof (struct ipv6_mib),
+ __alignof__(struct ipv6_mib)) < 0)
goto err_ip_mib;
- if (snmp6_mib_init((void **)icmpv6_statistics, sizeof (struct icmpv6_mib)) < 0)
+ if (snmp6_mib_init((void **)icmpv6_statistics, sizeof (struct icmpv6_mib),
+ __alignof__(struct ipv6_mib)) < 0)
goto err_icmp_mib;
- if (snmp6_mib_init((void **)udp_stats_in6, sizeof (struct udp_mib)) < 0)
+ if (snmp6_mib_init((void **)udp_stats_in6, sizeof (struct udp_mib),
+ __alignof__(struct ipv6_mib)) < 0)
goto err_udp_mib;
return 0;
diff -urN linux-2.5.70-bk11/net/ipv6/datagram.c linux-2.5.70-bk12/net/ipv6/datagram.c
--- linux-2.5.70-bk11/net/ipv6/datagram.c 2003-05-26 18:00:22.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv6/datagram.c 2003-06-07 04:47:52.000000000 -0700
@@ -117,7 +117,7 @@
int copied;
err = -EAGAIN;
- skb = skb_dequeue(&sk->error_queue);
+ skb = skb_dequeue(&sk->sk_error_queue);
if (skb == NULL)
goto out;
@@ -190,14 +190,14 @@
err = copied;
/* Reset and regenerate socket error */
- spin_lock_irq(&sk->error_queue.lock);
- sk->err = 0;
- if ((skb2 = skb_peek(&sk->error_queue)) != NULL) {
- sk->err = SKB_EXT_ERR(skb2)->ee.ee_errno;
- spin_unlock_irq(&sk->error_queue.lock);
- sk->error_report(sk);
+ spin_lock_irq(&sk->sk_error_queue.lock);
+ sk->sk_err = 0;
+ if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
+ sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
+ spin_unlock_irq(&sk->sk_error_queue.lock);
+ sk->sk_error_report(sk);
} else {
- spin_unlock_irq(&sk->error_queue.lock);
+ spin_unlock_irq(&sk->sk_error_queue.lock);
}
out_free_skb:
diff -urN linux-2.5.70-bk11/net/ipv6/icmp.c linux-2.5.70-bk12/net/ipv6/icmp.c
--- linux-2.5.70-bk11/net/ipv6/icmp.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv6/icmp.c 2003-06-07 04:47:52.000000000 -0700
@@ -96,13 +96,13 @@
static __inline__ void icmpv6_xmit_lock(void)
{
local_bh_disable();
- if (unlikely(!spin_trylock(&icmpv6_socket->sk->lock.slock)))
+ if (unlikely(!spin_trylock(&icmpv6_socket->sk->sk_lock.slock)))
BUG();
}
static __inline__ void icmpv6_xmit_unlock(void)
{
- spin_unlock_bh(&icmpv6_socket->sk->lock.slock);
+ spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock);
}
/*
@@ -213,14 +213,14 @@
struct icmp6hdr *icmp6h;
int err = 0;
- if ((skb = skb_peek(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
icmp6h = (struct icmp6hdr*) skb->h.raw;
memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
icmp6h->icmp6_cksum = 0;
- if (skb_queue_len(&sk->write_queue) == 1) {
+ if (skb_queue_len(&sk->sk_write_queue) == 1) {
skb->csum = csum_partial((char *)icmp6h,
sizeof(struct icmp6hdr), skb->csum);
icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
@@ -230,7 +230,7 @@
} else {
u32 tmp_csum = 0;
- skb_queue_walk(&sk->write_queue, skb) {
+ skb_queue_walk(&sk->sk_write_queue, skb) {
tmp_csum = csum_add(tmp_csum, skb->csum);
}
@@ -519,7 +519,7 @@
if ((sk = raw_v6_htable[hash]) != NULL) {
while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr))) {
rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
- sk = sk->next;
+ sk = sk->sk_next;
}
}
read_unlock(&raw_v6_lock);
@@ -687,9 +687,9 @@
}
sk = __icmpv6_socket[i]->sk;
- sk->allocation = GFP_ATOMIC;
- sk->sndbuf = SK_WMEM_MAX*2;
- sk->prot->unhash(sk);
+ sk->sk_allocation = GFP_ATOMIC;
+ sk->sk_sndbuf = SK_WMEM_MAX * 2;
+ sk->sk_prot->unhash(sk);
}
diff -urN linux-2.5.70-bk11/net/ipv6/ip6_output.c linux-2.5.70-bk12/net/ipv6/ip6_output.c
--- linux-2.5.70-bk11/net/ipv6/ip6_output.c 2003-05-26 18:00:21.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv6/ip6_output.c 2003-06-07 04:47:52.000000000 -0700
@@ -153,7 +153,7 @@
struct ipv6hdr *iph = skb->nh.ipv6h;
struct dst_entry *dst;
struct flowi fl = {
- .oif = skb->sk ? skb->sk->bound_dev_if : 0,
+ .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
.nl_u =
{ .ip6_u =
{ .daddr = iph->daddr,
@@ -457,7 +457,7 @@
struct frag_hdr *fhdr2;
- skb = skb_copy(last_skb, sk->allocation);
+ skb = skb_copy(last_skb, sk->sk_allocation);
if (skb == NULL) {
IP6_INC_STATS(Ip6FragFails);
@@ -1222,13 +1222,14 @@
if (flags&MSG_PROBE)
return 0;
- if (skb_queue_empty(&sk->write_queue)) {
+ if (skb_queue_empty(&sk->sk_write_queue)) {
/*
* setup for corking
*/
if (opt) {
if (np->cork.opt == NULL)
- np->cork.opt = kmalloc(opt->tot_len, sk->allocation);
+ np->cork.opt = kmalloc(opt->tot_len,
+ sk->sk_allocation);
memcpy(np->cork.opt, opt, opt->tot_len);
inet->cork.flags |= IPCORK_OPT;
/* need source address above miyazawa*/
@@ -1268,7 +1269,7 @@
inet->cork.length += length;
- if ((skb = skb_peek_tail(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
goto alloc_new_skb;
while (length > 0) {
@@ -1295,10 +1296,11 @@
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
- if (atomic_read(&sk->wmem_alloc) <= 2*sk->sndbuf)
+ if (atomic_read(&sk->sk_wmem_alloc) <=
+ 2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len + 15, 1,
- sk->allocation);
+ sk->sk_allocation);
if (unlikely(skb == NULL))
err = -ENOBUFS;
}
@@ -1335,7 +1337,7 @@
/*
* Put the packet on the pending queue
*/
- __skb_queue_tail(&sk->write_queue, skb);
+ __skb_queue_tail(&sk->sk_write_queue, skb);
continue;
}
@@ -1374,7 +1376,7 @@
} else if(i < MAX_SKB_FRAGS) {
if (copy > PAGE_SIZE)
copy = PAGE_SIZE;
- page = alloc_pages(sk->allocation, 0);
+ page = alloc_pages(sk->sk_allocation, 0);
if (page == NULL) {
err = -ENOMEM;
goto error;
@@ -1385,7 +1387,7 @@
skb_fill_page_desc(skb, i, page, 0, 0);
frag = &skb_shinfo(skb)->frags[i];
skb->truesize += PAGE_SIZE;
- atomic_add(PAGE_SIZE, &sk->wmem_alloc);
+ atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
} else {
err = -EMSGSIZE;
goto error;
@@ -1423,14 +1425,14 @@
unsigned char proto = fl->proto;
int err = 0;
- if ((skb = __skb_dequeue(&sk->write_queue)) == NULL)
+ if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
/* move skb->data to ip header from ext header */
if (skb->data < skb->nh.raw)
__skb_pull(skb, skb->nh.raw - skb->data);
- while ((tmp_skb = __skb_dequeue(&sk->write_queue)) != NULL) {
+ while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
__skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
*tail_skb = tmp_skb;
tail_skb = &(tmp_skb->next);
@@ -1496,7 +1498,7 @@
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
- while ((skb = __skb_dequeue_tail(&sk->write_queue)) != NULL)
+ while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
kfree_skb(skb);
inet->cork.flags &= ~IPCORK_OPT;
diff -urN linux-2.5.70-bk11/net/ipv6/ipv6_sockglue.c linux-2.5.70-bk12/net/ipv6/ipv6_sockglue.c
--- linux-2.5.70-bk11/net/ipv6/ipv6_sockglue.c 2003-05-26 18:00:22.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv6/ipv6_sockglue.c 2003-06-07 04:47:52.000000000 -0700
@@ -81,7 +81,7 @@
struct ip6_ra_chain *ra, *new_ra, **rap;
/* RA packet may be delivered ONLY to IPPROTO_RAW socket */
- if (sk->type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_RAW)
+ if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_RAW)
return -EINVAL;
new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
@@ -134,7 +134,7 @@
int val, valbool;
int retv = -ENOPROTOOPT;
- if(level==SOL_IP && sk->type != SOCK_RAW)
+ if (level == SOL_IP && sk->sk_type != SOCK_RAW)
return udp_prot.setsockopt(sk, level, optname, optval, optlen);
if(level!=SOL_IPV6)
@@ -156,11 +156,11 @@
struct ipv6_txoptions *opt;
struct sk_buff *pktopt;
- if (sk->protocol != IPPROTO_UDP &&
- sk->protocol != IPPROTO_TCP)
+ if (sk->sk_protocol != IPPROTO_UDP &&
+ sk->sk_protocol != IPPROTO_TCP)
break;
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
retv = -ENOTCONN;
break;
}
@@ -174,26 +174,26 @@
fl6_free_socklist(sk);
ipv6_sock_mc_close(sk);
- if (sk->protocol == IPPROTO_TCP) {
+ if (sk->sk_protocol == IPPROTO_TCP) {
struct tcp_opt *tp = tcp_sk(sk);
local_bh_disable();
- sock_prot_dec_use(sk->prot);
+ sock_prot_dec_use(sk->sk_prot);
sock_prot_inc_use(&tcp_prot);
local_bh_enable();
- sk->prot = &tcp_prot;
+ sk->sk_prot = &tcp_prot;
tp->af_specific = &ipv4_specific;
- sk->socket->ops = &inet_stream_ops;
- sk->family = PF_INET;
+ sk->sk_socket->ops = &inet_stream_ops;
+ sk->sk_family = PF_INET;
tcp_sync_mss(sk, tp->pmtu_cookie);
} else {
local_bh_disable();
- sock_prot_dec_use(sk->prot);
+ sock_prot_dec_use(sk->sk_prot);
sock_prot_inc_use(&udp_prot);
local_bh_enable();
- sk->prot = &udp_prot;
- sk->socket->ops = &inet_dgram_ops;
- sk->family = PF_INET;
+ sk->sk_prot = &udp_prot;
+ sk->sk_socket->ops = &inet_dgram_ops;
+ sk->sk_family = PF_INET;
}
opt = xchg(&np->opt, NULL);
if (opt)
@@ -202,7 +202,7 @@
if (pktopt)
kfree_skb(pktopt);
- sk->destruct = inet_sock_destruct;
+ sk->sk_destruct = inet_sock_destruct;
#ifdef INET_REFCNT_DEBUG
atomic_dec(&inet6_sock_nr);
#endif
@@ -264,7 +264,7 @@
int junk;
fl.fl6_flowlabel = 0;
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
if (optlen == 0)
goto update;
@@ -295,10 +295,11 @@
goto done;
update:
retv = 0;
- if (sk->type == SOCK_STREAM) {
+ if (sk->sk_type == SOCK_STREAM) {
if (opt) {
struct tcp_opt *tp = tcp_sk(sk);
- if (!((1<state)&(TCPF_LISTEN|TCPF_CLOSE))
+ if (!((1 << sk->sk_state) &
+ (TCPF_LISTEN | TCPF_CLOSE))
&& inet_sk(sk)->daddr != LOOPBACK4_IPV6) {
tp->ext_header_len = opt->opt_flen + opt->opt_nflen;
tcp_sync_mss(sk, tp->pmtu_cookie);
@@ -307,9 +308,9 @@
opt = xchg(&np->opt, opt);
sk_dst_reset(sk);
} else {
- write_lock(&sk->dst_lock);
+ write_lock(&sk->sk_dst_lock);
opt = xchg(&np->opt, opt);
- write_unlock(&sk->dst_lock);
+ write_unlock(&sk->sk_dst_lock);
sk_dst_reset(sk);
}
@@ -326,7 +327,7 @@
break;
case IPV6_MULTICAST_HOPS:
- if (sk->type == SOCK_STREAM)
+ if (sk->sk_type == SOCK_STREAM)
goto e_inval;
if (val > 255 || val < -1)
goto e_inval;
@@ -340,9 +341,9 @@
break;
case IPV6_MULTICAST_IF:
- if (sk->type == SOCK_STREAM)
+ if (sk->sk_type == SOCK_STREAM)
goto e_inval;
- if (sk->bound_dev_if && sk->bound_dev_if != val)
+ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
goto e_inval;
if (__dev_get_by_index(val) == NULL) {
@@ -488,7 +489,7 @@
case IPV6_RECVERR:
np->recverr = valbool;
if (!val)
- skb_queue_purge(&sk->error_queue);
+ skb_queue_purge(&sk->sk_error_queue);
retv = 0;
break;
case IPV6_FLOWINFO_SEND:
@@ -528,7 +529,7 @@
int len;
int val;
- if(level==SOL_IP && sk->type != SOCK_RAW)
+ if (level == SOL_IP && sk->sk_type != SOCK_RAW)
return udp_prot.getsockopt(sk, level, optname, optval, optlen);
if(level!=SOL_IPV6)
return -ENOPROTOOPT;
@@ -536,12 +537,12 @@
return -EFAULT;
switch (optname) {
case IPV6_ADDRFORM:
- if (sk->protocol != IPPROTO_UDP &&
- sk->protocol != IPPROTO_TCP)
+ if (sk->sk_protocol != IPPROTO_UDP &&
+ sk->sk_protocol != IPPROTO_TCP)
return -EINVAL;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
- val = sk->family;
+ val = sk->sk_family;
break;
case MCAST_MSFILTER:
{
@@ -564,7 +565,7 @@
struct msghdr msg;
struct sk_buff *skb;
- if (sk->type != SOCK_STREAM)
+ if (sk->sk_type != SOCK_STREAM)
return -ENOPROTOOPT;
msg.msg_control = optval;
diff -urN linux-2.5.70-bk11/net/ipv6/mcast.c linux-2.5.70-bk12/net/ipv6/mcast.c
--- linux-2.5.70-bk11/net/ipv6/mcast.c 2003-05-26 18:00:56.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv6/mcast.c 2003-06-07 04:47:52.000000000 -0700
@@ -2176,8 +2176,8 @@
}
sk = igmp6_socket->sk;
- sk->allocation = GFP_ATOMIC;
- sk->prot->unhash(sk);
+ sk->sk_allocation = GFP_ATOMIC;
+ sk->sk_prot->unhash(sk);
np = inet6_sk(sk);
np->hop_limit = 1;
diff -urN linux-2.5.70-bk11/net/ipv6/ndisc.c linux-2.5.70-bk12/net/ipv6/ndisc.c
--- linux-2.5.70-bk11/net/ipv6/ndisc.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv6/ndisc.c 2003-06-07 04:47:52.000000000 -0700
@@ -1466,11 +1466,11 @@
sk = ndisc_socket->sk;
np = inet6_sk(sk);
- sk->allocation = GFP_ATOMIC;
+ sk->sk_allocation = GFP_ATOMIC;
np->hop_limit = 255;
/* Do not loopback ndisc messages */
np->mc_loop = 0;
- sk->prot->unhash(sk);
+ sk->sk_prot->unhash(sk);
/*
* Initialize the neighbour table
diff -urN linux-2.5.70-bk11/net/ipv6/netfilter/ip6_queue.c linux-2.5.70-bk12/net/ipv6/netfilter/ip6_queue.c
--- linux-2.5.70-bk11/net/ipv6/netfilter/ip6_queue.c 2003-05-26 18:00:27.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv6/netfilter/ip6_queue.c 2003-06-07 04:47:52.000000000 -0700
@@ -538,14 +538,14 @@
if (down_trylock(&ipqnl_sem))
return;
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
ipq_rcv_skb(skb);
kfree_skb(skb);
}
up(&ipqnl_sem);
- } while (ipqnl && ipqnl->receive_queue.qlen);
+ } while (ipqnl && ipqnl->sk_receive_queue.qlen);
}
static int
@@ -694,7 +694,7 @@
proc_net_remove(IPQ_PROC_FS_NAME);
cleanup_ipqnl:
- sock_release(ipqnl->socket);
+ sock_release(ipqnl->sk_socket);
down(&ipqnl_sem);
up(&ipqnl_sem);
diff -urN linux-2.5.70-bk11/net/ipv6/netfilter/ip6t_owner.c linux-2.5.70-bk12/net/ipv6/netfilter/ip6t_owner.c
--- linux-2.5.70-bk11/net/ipv6/netfilter/ip6t_owner.c 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv6/netfilter/ip6t_owner.c 2003-06-07 04:47:52.000000000 -0700
@@ -31,7 +31,7 @@
if(files) {
spin_lock(&files->file_lock);
for (i=0; i < files->max_fds; i++) {
- if (fcheck_files(files, i) == skb->sk->socket->file) {
+ if (fcheck_files(files, i) == skb->sk->sk_socket->file) {
spin_unlock(&files->file_lock);
task_unlock(p);
read_unlock(&tasklist_lock);
@@ -50,7 +50,7 @@
match_sid(const struct sk_buff *skb, pid_t sid)
{
struct task_struct *g, *p;
- struct file *file = skb->sk->socket->file;
+ struct file *file = skb->sk->sk_socket->file;
int i, found=0;
read_lock(&tasklist_lock);
@@ -93,17 +93,17 @@
{
const struct ip6t_owner_info *info = matchinfo;
- if (!skb->sk || !skb->sk->socket || !skb->sk->socket->file)
+ if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file)
return 0;
if(info->match & IP6T_OWNER_UID) {
- if((skb->sk->socket->file->f_uid != info->uid) ^
+ if((skb->sk->sk_socket->file->f_uid != info->uid) ^
!!(info->invert & IP6T_OWNER_UID))
return 0;
}
if(info->match & IP6T_OWNER_GID) {
- if((skb->sk->socket->file->f_gid != info->gid) ^
+ if((skb->sk->sk_socket->file->f_gid != info->gid) ^
!!(info->invert & IP6T_OWNER_GID))
return 0;
}
diff -urN linux-2.5.70-bk11/net/ipv6/proc.c linux-2.5.70-bk12/net/ipv6/proc.c
--- linux-2.5.70-bk11/net/ipv6/proc.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv6/proc.c 2003-06-07 04:47:52.000000000 -0700
@@ -228,7 +228,8 @@
if (!idev || !idev->dev)
return -EINVAL;
- if (snmp6_mib_init((void **)idev->stats.icmpv6, sizeof(struct icmpv6_mib)) < 0)
+ if (snmp6_mib_init((void **)idev->stats.icmpv6, sizeof(struct icmpv6_mib),
+ __alignof__(struct ipv6_mib)) < 0)
goto err_icmp;
#ifdef CONFIG_PROC_FS
diff -urN linux-2.5.70-bk11/net/ipv6/raw.c linux-2.5.70-bk12/net/ipv6/raw.c
--- linux-2.5.70-bk11/net/ipv6/raw.c 2003-05-26 18:00:38.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv6/raw.c 2003-06-07 04:47:52.000000000 -0700
@@ -62,11 +62,11 @@
(RAWV6_HTABLE_SIZE - 1)];
write_lock_bh(&raw_v6_lock);
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sock_prot_inc_use(sk->sk_prot);
sock_hold(sk);
write_unlock_bh(&raw_v6_lock);
}
@@ -74,12 +74,12 @@
static void raw_v6_unhash(struct sock *sk)
{
write_lock_bh(&raw_v6_lock);
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
- sock_prot_dec_use(sk->prot);
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
+ sock_prot_dec_use(sk->sk_prot);
__sock_put(sk);
}
write_unlock_bh(&raw_v6_lock);
@@ -93,7 +93,7 @@
struct sock *s = sk;
int addr_type = ipv6_addr_type(loc_addr);
- for(s = sk; s; s = s->next) {
+ for (s = sk; s; s = s->sk_next) {
if (inet_sk(s)->num == num) {
struct ipv6_pinfo *np = inet6_sk(s);
@@ -176,7 +176,7 @@
if (clone)
rawv6_rcv(sk, clone);
}
- sk = __raw_v6_lookup(sk->next, nexthdr, daddr, saddr);
+ sk = __raw_v6_lookup(sk->sk_next, nexthdr, daddr, saddr);
}
out:
read_unlock(&raw_v6_lock);
@@ -203,7 +203,7 @@
lock_sock(sk);
err = -EINVAL;
- if (sk->state != TCP_CLOSE)
+ if (sk->sk_state != TCP_CLOSE)
goto out;
if (addr_type & IPV6_ADDR_LINKLOCAL) {
@@ -212,11 +212,11 @@
/* Override any existing binding, if another one
* is supplied by user.
*/
- sk->bound_dev_if = addr->sin6_scope_id;
+ sk->sk_bound_dev_if = addr->sin6_scope_id;
}
/* Binding to link-local address requires an interface */
- if (sk->bound_dev_if == 0)
+ if (!sk->sk_bound_dev_if)
goto out;
}
@@ -257,7 +257,7 @@
2. Socket is connected (otherwise the error indication
is useless without recverr and error is hard.
*/
- if (!np->recverr && sk->state != TCP_ESTABLISHED)
+ if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
return;
harderr = icmpv6_err_convert(type, code, &err);
@@ -272,14 +272,14 @@
}
if (np->recverr || harderr) {
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
}
}
static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
- if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
+ if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
/* FIXME: increment a raw6 drops counter here */
kfree_skb(skb);
@@ -422,12 +422,12 @@
/* Clear queue. */
if (flags&MSG_PEEK) {
int clear = 0;
- spin_lock_irq(&sk->receive_queue.lock);
- if (skb == skb_peek(&sk->receive_queue)) {
- __skb_unlink(skb, &sk->receive_queue);
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ if (skb == skb_peek(&sk->sk_receive_queue)) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
clear = 1;
}
- spin_unlock_irq(&sk->receive_queue.lock);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
if (clear)
kfree_skb(skb);
}
@@ -446,7 +446,7 @@
int err = 0;
u16 *csum;
- if ((skb = skb_peek(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
if (opt->offset + 1 < len)
@@ -456,7 +456,7 @@
goto out;
}
- if (skb_queue_len(&sk->write_queue) == 1) {
+ if (skb_queue_len(&sk->sk_write_queue) == 1) {
/*
* Only one fragment on the socket.
*/
@@ -467,7 +467,7 @@
} else {
u32 tmp_csum = 0;
- skb_queue_walk(&sk->write_queue, skb) {
+ skb_queue_walk(&sk->sk_write_queue, skb) {
tmp_csum = csum_add(tmp_csum, skb->csum);
}
@@ -508,7 +508,7 @@
goto error;
skb_reserve(skb, hh_len);
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
skb->dst = dst_clone(&rt->u.dst);
skb->nh.ipv6h = iph = (struct ipv6hdr *)skb_put(skb, length);
@@ -597,8 +597,11 @@
}
}
- /* Otherwise it will be difficult to maintain sk->dst_cache. */
- if (sk->state == TCP_ESTABLISHED &&
+ /*
+ * Otherwise it will be difficult to maintain
+ * sk->sk_dst_cache.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED &&
!ipv6_addr_cmp(daddr, &np->daddr))
daddr = &np->daddr;
@@ -607,7 +610,7 @@
ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
fl.oif = sin6->sin6_scope_id;
} else {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return(-EINVAL);
proto = inet->num;
@@ -625,7 +628,7 @@
}
if (fl.oif == 0)
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
opt = &opt_space;
@@ -857,7 +860,7 @@
switch(cmd) {
case SIOCOUTQ:
{
- int amount = atomic_read(&sk->wmem_alloc);
+ int amount = atomic_read(&sk->sk_wmem_alloc);
return put_user(amount, (int *)arg);
}
case SIOCINQ:
@@ -865,11 +868,11 @@
struct sk_buff *skb;
int amount = 0;
- spin_lock_irq(&sk->receive_queue.lock);
- skb = skb_peek(&sk->receive_queue);
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
amount = skb->tail - skb->h.raw;
- spin_unlock_irq(&sk->receive_queue.lock);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
return put_user(amount, (int *)arg);
}
@@ -928,8 +931,8 @@
for (state->bucket = 0; state->bucket < RAWV6_HTABLE_SIZE; ++state->bucket) {
sk = raw_v6_htable[state->bucket];
- while (sk && sk->family != PF_INET6)
- sk = sk->next;
+ while (sk && sk->sk_family != PF_INET6)
+ sk = sk->sk_next;
if (sk)
break;
}
@@ -941,10 +944,10 @@
struct raw6_iter_state* state = raw6_seq_private(seq);
do {
- sk = sk->next;
+ sk = sk->sk_next;
try_again:
;
- } while (sk && sk->family != PF_INET6);
+ } while (sk && sk->sk_family != PF_INET6);
if (!sk && ++state->bucket < RAWV6_HTABLE_SIZE) {
sk = raw_v6_htable[state->bucket];
@@ -1003,12 +1006,13 @@
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
- sp->state,
- atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc),
+ sp->sk_state,
+ atomic_read(&sp->sk_wmem_alloc),
+ atomic_read(&sp->sk_rmem_alloc),
0, 0L, 0,
sock_i_uid(sp), 0,
sock_i_ino(sp),
- atomic_read(&sp->refcnt), sp);
+ atomic_read(&sp->sk_refcnt), sp);
}
static int raw6_seq_show(struct seq_file *seq, void *v)
diff -urN linux-2.5.70-bk11/net/ipv6/tcp_ipv6.c linux-2.5.70-bk12/net/ipv6/tcp_ipv6.c
--- linux-2.5.70-bk11/net/ipv6/tcp_ipv6.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv6/tcp_ipv6.c 2003-06-07 04:47:52.000000000 -0700
@@ -101,22 +101,23 @@
if (!inet_sk(sk2)->rcv_saddr && !ipv6_only_sock(sk))
return 1;
- if (sk2->family == AF_INET6 &&
+ if (sk2->sk_family == AF_INET6 &&
ipv6_addr_any(&inet6_sk(sk2)->rcv_saddr) &&
!(ipv6_only_sock(sk2) && addr_type == IPV6_ADDR_MAPPED))
return 1;
if (addr_type == IPV6_ADDR_ANY &&
(!ipv6_only_sock(sk) ||
- !(sk2->family == AF_INET6 ?
- ipv6_addr_type(&inet6_sk(sk2)->rcv_saddr) == IPV6_ADDR_MAPPED : 1)))
+ !(sk2->sk_family == AF_INET6 ?
+ (ipv6_addr_type(&inet6_sk(sk2)->rcv_saddr) == IPV6_ADDR_MAPPED) :
+ 1)))
return 1;
- if (sk2->family == AF_INET6 &&
+ if (sk2->sk_family == AF_INET6 &&
!ipv6_addr_cmp(&np->rcv_saddr,
- (sk2->state != TCP_TIME_WAIT ?
+ (sk2->sk_state != TCP_TIME_WAIT ?
&inet6_sk(sk2)->rcv_saddr :
- &((struct tcp_tw_bucket *)sk)->v6_rcv_saddr)))
+ &((struct tcp_tw_bucket *)sk)->tw_v6_rcv_saddr)))
return 1;
if (addr_type == IPV6_ADDR_MAPPED &&
@@ -135,10 +136,10 @@
struct sock *sk2 = tb->owners;
/* We must walk the whole port owner list in this case. -DaveM */
- for (; sk2; sk2 = sk2->bind_next)
- if (sk != sk2 && sk->bound_dev_if == sk2->bound_dev_if &&
- (!sk->reuse || !sk2->reuse ||
- sk2->state == TCP_LISTEN) &&
+ for (; sk2; sk2 = sk2->sk_bind_next)
+ if (sk != sk2 && sk->sk_bound_dev_if == sk2->sk_bound_dev_if &&
+ (!sk->sk_reuse || !sk2->sk_reuse ||
+ sk2->sk_state == TCP_LISTEN) &&
ipv6_rcv_saddr_equal(sk, sk2))
break;
@@ -196,7 +197,8 @@
break;
}
if (tb != NULL && tb->owners != NULL) {
- if (tb->fastreuse > 0 && sk->reuse != 0 && sk->state != TCP_LISTEN) {
+ if (tb->fastreuse > 0 && sk->sk_reuse &&
+ sk->sk_state != TCP_LISTEN) {
goto success;
} else {
ret = 1;
@@ -209,18 +211,18 @@
(tb = tcp_bucket_create(head, snum)) == NULL)
goto fail_unlock;
if (tb->owners == NULL) {
- if (sk->reuse && sk->state != TCP_LISTEN)
+ if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
tb->fastreuse = 1;
else
tb->fastreuse = 0;
} else if (tb->fastreuse &&
- ((sk->reuse == 0) || (sk->state == TCP_LISTEN)))
+ (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
tb->fastreuse = 0;
success:
- if (!sk->prev)
+ if (!sk->sk_prev)
tcp_bind_hash(sk, tb, snum);
- BUG_TRAP(sk->prev == (struct sock *)tb);
+ BUG_TRAP(sk->sk_prev == (struct sock *)tb);
ret = 0;
fail_unlock:
@@ -235,30 +237,30 @@
struct sock **skp;
rwlock_t *lock;
- BUG_TRAP(sk->pprev==NULL);
+ BUG_TRAP(!sk->sk_pprev);
- if(sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
skp = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
lock = &tcp_lhash_lock;
tcp_listen_wlock();
} else {
- skp = &tcp_ehash[(sk->hashent = tcp_v6_sk_hashfn(sk))].chain;
- lock = &tcp_ehash[sk->hashent].lock;
+ skp = &tcp_ehash[(sk->sk_hashent = tcp_v6_sk_hashfn(sk))].chain;
+ lock = &tcp_ehash[sk->sk_hashent].lock;
write_lock(lock);
}
- if((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sock_prot_inc_use(sk->sk_prot);
write_unlock(lock);
}
static void tcp_v6_hash(struct sock *sk)
{
- if(sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
struct tcp_opt *tp = tcp_sk(sk);
if (tp->af_specific == &ipv6_mapped) {
@@ -280,8 +282,8 @@
hiscore=0;
read_lock(&tcp_lhash_lock);
sk = tcp_listening_hash[tcp_lhashfn(hnum)];
- for(; sk; sk = sk->next) {
- if (inet_sk(sk)->num == hnum && sk->family == PF_INET6) {
+ for (; sk; sk = sk->sk_next) {
+ if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
score = 1;
@@ -290,8 +292,8 @@
continue;
score++;
}
- if (sk->bound_dev_if) {
- if (sk->bound_dev_if != dif)
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
continue;
score++;
}
@@ -332,21 +334,21 @@
hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
head = &tcp_ehash[hash];
read_lock(&head->lock);
- for(sk = head->chain; sk; sk = sk->next) {
+ for (sk = head->chain; sk; sk = sk->sk_next) {
/* For IPV6 do the cheaper port and family tests first. */
if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))
goto hit; /* You sunk my battleship! */
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
- for(sk = (head + tcp_ehash_size)->chain; sk; sk = sk->next) {
+ for (sk = (head + tcp_ehash_size)->chain; sk; sk = sk->sk_next) {
/* FIXME: acme: check this... */
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
- if(*((__u32 *)&(tw->dport)) == ports &&
- sk->family == PF_INET6) {
- if(!ipv6_addr_cmp(&tw->v6_daddr, saddr) &&
- !ipv6_addr_cmp(&tw->v6_rcv_saddr, daddr) &&
- (!sk->bound_dev_if || sk->bound_dev_if == dif))
+ if(*((__u32 *)&(tw->tw_dport)) == ports &&
+ sk->sk_family == PF_INET6) {
+ if(!ipv6_addr_cmp(&tw->tw_v6_daddr, saddr) &&
+ !ipv6_addr_cmp(&tw->tw_v6_rcv_saddr, daddr) &&
+ (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
goto hit;
}
}
@@ -468,7 +470,7 @@
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *daddr = &np->rcv_saddr;
struct in6_addr *saddr = &np->daddr;
- int dif = sk->bound_dev_if;
+ int dif = sk->sk_bound_dev_if;
u32 ports = TCP_COMBINED_PORTS(inet->dport, inet->num);
int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport);
struct tcp_ehash_bucket *head = &tcp_ehash[hash];
@@ -477,22 +479,24 @@
write_lock_bh(&head->lock);
- for(skp = &(head + tcp_ehash_size)->chain; (sk2=*skp)!=NULL; skp = &sk2->next) {
+ for (skp = &(head + tcp_ehash_size)->chain; (sk2 = *skp) != NULL;
+ skp = &sk2->sk_next) {
tw = (struct tcp_tw_bucket*)sk2;
- if(*((__u32 *)&(tw->dport)) == ports &&
- sk2->family == PF_INET6 &&
- !ipv6_addr_cmp(&tw->v6_daddr, saddr) &&
- !ipv6_addr_cmp(&tw->v6_rcv_saddr, daddr) &&
- sk2->bound_dev_if == sk->bound_dev_if) {
+ if(*((__u32 *)&(tw->tw_dport)) == ports &&
+ sk2->sk_family == PF_INET6 &&
+ !ipv6_addr_cmp(&tw->tw_v6_daddr, saddr) &&
+ !ipv6_addr_cmp(&tw->tw_v6_rcv_saddr, daddr) &&
+ sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
struct tcp_opt *tp = tcp_sk(sk);
- if (tw->ts_recent_stamp) {
+ if (tw->tw_ts_recent_stamp) {
/* See comment in tcp_ipv4.c */
- if ((tp->write_seq = tw->snd_nxt+65535+2) == 0)
+ tp->write_seq = tw->tw_snd_nxt + 65535 + 2;
+ if (!tp->write_seq)
tp->write_seq = 1;
- tp->ts_recent = tw->ts_recent;
- tp->ts_recent_stamp = tw->ts_recent_stamp;
+ tp->ts_recent = tw->tw_ts_recent;
+ tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
sock_hold(sk2);
skp = &head->chain;
goto unique;
@@ -502,20 +506,20 @@
}
tw = NULL;
- for(skp = &head->chain; (sk2=*skp)!=NULL; skp = &sk2->next) {
+ for (skp = &head->chain; (sk2 = *skp) != NULL; skp = &sk2->sk_next) {
if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))
goto not_unique;
}
unique:
- BUG_TRAP(sk->pprev==NULL);
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ BUG_TRAP(!sk->sk_pprev);
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sk->hashent = hash;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sk->sk_hashent = hash;
+ sock_prot_inc_use(sk->sk_prot);
write_unlock_bh(&head->lock);
if (tw) {
@@ -552,7 +556,7 @@
spin_lock_bh(&head->lock);
- if (tb->owners == sk && sk->bind_next == NULL) {
+ if (tb->owners == sk && !sk->sk_bind_next) {
__tcp_v6_hash(sk);
spin_unlock_bh(&head->lock);
return 0;
@@ -621,15 +625,15 @@
/* If interface is set while binding, indices
* must coincide.
*/
- if (sk->bound_dev_if &&
- sk->bound_dev_if != usin->sin6_scope_id)
+ if (sk->sk_bound_dev_if &&
+ sk->sk_bound_dev_if != usin->sin6_scope_id)
return -EINVAL;
- sk->bound_dev_if = usin->sin6_scope_id;
+ sk->sk_bound_dev_if = usin->sin6_scope_id;
}
/* Connect to link-local address requires an interface */
- if (sk->bound_dev_if == 0)
+ if (!sk->sk_bound_dev_if)
return -EINVAL;
}
@@ -661,14 +665,14 @@
sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
tp->af_specific = &ipv6_mapped;
- sk->backlog_rcv = tcp_v4_do_rcv;
+ sk->sk_backlog_rcv = tcp_v4_do_rcv;
err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
if (err) {
tp->ext_header_len = exthdrlen;
tp->af_specific = &ipv6_specific;
- sk->backlog_rcv = tcp_v6_do_rcv;
+ sk->sk_backlog_rcv = tcp_v6_do_rcv;
goto failure;
} else {
ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
@@ -687,7 +691,7 @@
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src,
(saddr ? saddr : &np->saddr));
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = usin->sin6_port;
fl.fl_ip_sport = inet->sport;
@@ -704,7 +708,8 @@
}
ip6_dst_store(sk, dst, NULL);
- sk->route_caps = dst->dev->features&~(NETIF_F_IP_CSUM|NETIF_F_TSO);
+ sk->sk_route_caps = dst->dev->features &
+ ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
if (saddr == NULL) {
err = ipv6_get_saddr(dst, &np->daddr, &saddr_buf);
@@ -748,7 +753,7 @@
failure:
__sk_dst_reset(sk);
inet->dport = 0;
- sk->route_caps = 0;
+ sk->sk_route_caps = 0;
return err;
}
@@ -770,7 +775,7 @@
return;
}
- if (sk->state == TCP_TIME_WAIT) {
+ if (sk->sk_state == TCP_TIME_WAIT) {
tcp_tw_put((struct tcp_tw_bucket*)sk);
return;
}
@@ -779,12 +784,13 @@
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(LockDroppedIcmps);
- if (sk->state == TCP_CLOSE)
+ if (sk->sk_state == TCP_CLOSE)
goto out;
tp = tcp_sk(sk);
seq = ntohl(th->seq);
- if (sk->state != TCP_LISTEN && !between(seq, tp->snd_una, tp->snd_nxt)) {
+ if (sk->sk_state != TCP_LISTEN &&
+ !between(seq, tp->snd_una, tp->snd_nxt)) {
NET_INC_STATS_BH(OutOfWindowIcmps);
goto out;
}
@@ -796,7 +802,7 @@
if (sock_owned_by_user(sk))
goto out;
- if ((1<state)&(TCPF_LISTEN|TCPF_CLOSE))
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
goto out;
/* icmp should have updated the destination cache entry */
@@ -814,7 +820,7 @@
fl.proto = IPPROTO_TCP;
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = inet->dport;
fl.fl_ip_sport = inet->sport;
@@ -823,7 +829,7 @@
dst_hold(dst);
if (dst->error) {
- sk->err_soft = -dst->error;
+ sk->sk_err_soft = -dst->error;
} else if (tp->pmtu_cookie > dst_pmtu(dst)) {
tcp_sync_mss(sk, dst_pmtu(dst));
tcp_simple_retransmit(sk);
@@ -835,7 +841,7 @@
icmpv6_err_convert(type, code, &err);
/* Might be for an open_request */
- switch (sk->state) {
+ switch (sk->sk_state) {
struct open_request *req, **prev;
case TCP_LISTEN:
if (sock_owned_by_user(sk))
@@ -864,22 +870,20 @@
It can, it SYNs are crossed. --ANK */
if (!sock_owned_by_user(sk)) {
TCP_INC_STATS_BH(TcpAttemptFails);
- sk->err = err;
- sk->error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
+ sk->sk_err = err;
+ sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
tcp_done(sk);
- } else {
- sk->err_soft = err;
- }
+ } else
+ sk->sk_err_soft = err;
goto out;
}
if (!sock_owned_by_user(sk) && np->recverr) {
- sk->err = err;
- sk->error_report(sk);
- } else {
- sk->err_soft = err;
- }
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
+ } else
+ sk->sk_err_soft = err;
out:
bh_unlock_sock(sk);
@@ -1128,8 +1132,8 @@
{
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
- tcp_v6_send_ack(skb, tw->snd_nxt, tw->rcv_nxt,
- tw->rcv_wnd>>tw->rcv_wscale, tw->ts_recent);
+ tcp_v6_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
+ tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
tcp_tw_put(tw);
}
@@ -1160,7 +1164,7 @@
tcp_v6_iif(skb));
if (nsk) {
- if (nsk->state != TCP_TIME_WAIT) {
+ if (nsk->sk_state != TCP_TIME_WAIT) {
bh_lock_sock(nsk);
return nsk;
}
@@ -1247,10 +1251,11 @@
atomic_inc(&skb->users);
req->af.v6_req.pktopts = skb;
}
- req->af.v6_req.iif = sk->bound_dev_if;
+ req->af.v6_req.iif = sk->sk_bound_dev_if;
/* So that link locals have meaning */
- if (!sk->bound_dev_if && ipv6_addr_type(&req->af.v6_req.rmt_addr)&IPV6_ADDR_LINKLOCAL)
+ if (!sk->sk_bound_dev_if &&
+ ipv6_addr_type(&req->af.v6_req.rmt_addr) & IPV6_ADDR_LINKLOCAL)
req->af.v6_req.iif = tcp_v6_iif(skb);
if (isn == 0)
@@ -1312,7 +1317,7 @@
ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
newtp->af_specific = &ipv6_mapped;
- newsk->backlog_rcv = tcp_v4_do_rcv;
+ newsk->sk_backlog_rcv = tcp_v4_do_rcv;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = tcp_v6_iif(skb);
@@ -1357,7 +1362,7 @@
ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
}
ipv6_addr_copy(&fl.fl6_src, &req->af.v6_req.loc_addr);
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = req->rmt_port;
fl.fl_ip_sport = inet_sk(sk)->sport;
@@ -1377,7 +1382,8 @@
#endif
ip6_dst_store(newsk, dst, NULL);
- sk->route_caps = dst->dev->features&~(NETIF_F_IP_CSUM|NETIF_F_TSO);
+ sk->sk_route_caps = dst->dev->features &
+ ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
newtcp6sk = (struct tcp6_sock *)newsk;
newtcp6sk->pinet6 = &newtcp6sk->inet6;
@@ -1391,7 +1397,7 @@
ipv6_addr_copy(&newnp->daddr, &req->af.v6_req.rmt_addr);
ipv6_addr_copy(&newnp->saddr, &req->af.v6_req.loc_addr);
ipv6_addr_copy(&newnp->rcv_saddr, &req->af.v6_req.loc_addr);
- newsk->bound_dev_if = req->af.v6_req.iif;
+ newsk->sk_bound_dev_if = req->af.v6_req.iif;
/* Now IPv6 options...
@@ -1524,7 +1530,7 @@
if (np->rxopt.all)
opt_skb = skb_clone(skb, GFP_ATOMIC);
- if (sk->state == TCP_ESTABLISHED) { /* Fast path */
+ if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
TCP_CHECK_TIMER(sk);
if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
goto reset;
@@ -1537,7 +1543,7 @@
if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
goto csum_err;
- if (sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
struct sock *nsk = tcp_v6_hnd_req(sk, skb);
if (!nsk)
goto discard;
@@ -1586,7 +1592,7 @@
*/
tp = tcp_sk(sk);
if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
- !((1<state)&(TCPF_CLOSE|TCPF_LISTEN))) {
+ !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
if (np->rxopt.bits.rxinfo)
np->mcast_oif = tcp_v6_iif(opt_skb);
if (np->rxopt.bits.rxhlim)
@@ -1650,7 +1656,7 @@
goto no_tcp_socket;
process:
- if(sk->state == TCP_TIME_WAIT)
+ if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
@@ -1749,7 +1755,7 @@
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
fl.fl6_flowlabel = np->flow_label;
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = inet->dport;
fl.fl_ip_sport = inet->sport;
@@ -1763,12 +1769,13 @@
if (dst->error) {
err = dst->error;
dst_release(dst);
- sk->route_caps = 0;
+ sk->sk_route_caps = 0;
return err;
}
ip6_dst_store(sk, dst, NULL);
- sk->route_caps = dst->dev->features&~(NETIF_F_IP_CSUM|NETIF_F_TSO);
+ sk->sk_route_caps = dst->dev->features &
+ ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
}
return 0;
@@ -1788,7 +1795,7 @@
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
fl.fl6_flowlabel = np->flow_label;
IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_sport = inet->sport;
fl.fl_ip_dport = inet->dport;
@@ -1803,9 +1810,9 @@
dst = ip6_route_output(sk, &fl);
if (dst->error) {
- sk->err_soft = -dst->error;
+ sk->sk_err_soft = -dst->error;
dst_release(dst);
- return -sk->err_soft;
+ return -sk->sk_err_soft;
}
ip6_dst_store(sk, dst, NULL);
@@ -1830,8 +1837,9 @@
/* We do not store received flowlabel for TCP */
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = 0;
- if (sk->bound_dev_if && ipv6_addr_type(&sin6->sin6_addr)&IPV6_ADDR_LINKLOCAL)
- sin6->sin6_scope_id = sk->bound_dev_if;
+ if (sk->sk_bound_dev_if &&
+ ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ sin6->sin6_scope_id = sk->sk_bound_dev_if;
}
static int tcp_v6_remember_stamp(struct sock *sk)
@@ -1906,15 +1914,15 @@
tp->reordering = sysctl_tcp_reordering;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
tp->af_specific = &ipv6_specific;
- sk->write_space = tcp_write_space;
- sk->use_write_queue = 1;
+ sk->sk_write_space = tcp_write_space;
+ sk->sk_use_write_queue = 1;
- sk->sndbuf = sysctl_tcp_wmem[1];
- sk->rcvbuf = sysctl_tcp_rmem[1];
+ sk->sk_sndbuf = sysctl_tcp_wmem[1];
+ sk->sk_rcvbuf = sysctl_tcp_rmem[1];
atomic_inc(&tcp_sockets_allocated);
@@ -1938,7 +1946,7 @@
__skb_queue_purge(&tp->ucopy.prequeue);
/* Clean up a referenced TCP bind bucket. */
- if(sk->prev != NULL)
+ if (sk->sk_prev)
tcp_put_port(sk);
/* If sendmsg cached page exists, toss it. */
@@ -2003,9 +2011,9 @@
} else if (tp->pending == TCP_TIME_PROBE0) {
timer_active = 4;
timer_expires = tp->timeout;
- } else if (timer_pending(&sp->timer)) {
+ } else if (timer_pending(&sp->sk_timer)) {
timer_active = 2;
- timer_expires = sp->timer.expires;
+ timer_expires = sp->sk_timer.expires;
} else {
timer_active = 0;
timer_expires = jiffies;
@@ -2019,14 +2027,14 @@
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
- sp->state,
+ sp->sk_state,
tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
timer_active, timer_expires-jiffies,
tp->retransmits,
sock_i_uid(sp),
tp->probes_out,
sock_i_ino(sp),
- atomic_read(&sp->refcnt), sp,
+ atomic_read(&sp->sk_refcnt), sp,
tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong,
tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
);
@@ -2037,15 +2045,15 @@
{
struct in6_addr *dest, *src;
__u16 destp, srcp;
- int ttd = tw->ttd - jiffies;
+ int ttd = tw->tw_ttd - jiffies;
if (ttd < 0)
ttd = 0;
- dest = &tw->v6_daddr;
- src = &tw->v6_rcv_saddr;
- destp = ntohs(tw->dport);
- srcp = ntohs(tw->sport);
+ dest = &tw->tw_v6_daddr;
+ src = &tw->tw_v6_rcv_saddr;
+ destp = ntohs(tw->tw_dport);
+ srcp = ntohs(tw->tw_sport);
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
@@ -2055,9 +2063,9 @@
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
- tw->substate, 0, 0,
+ tw->tw_substate, 0, 0,
3, ttd, 0, 0, 0, 0,
- atomic_read(&tw->refcnt), tw);
+ atomic_read(&tw->tw_refcnt), tw);
}
static int tcp6_seq_show(struct seq_file *seq, void *v)
diff -urN linux-2.5.70-bk11/net/ipv6/udp.c linux-2.5.70-bk12/net/ipv6/udp.c
--- linux-2.5.70-bk11/net/ipv6/udp.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipv6/udp.c 2003-06-07 04:47:52.000000000 -0700
@@ -70,18 +70,18 @@
if (!inet_sk(sk2)->rcv_saddr && !ipv6_only_sock(sk))
return 1;
- if (sk2->family == AF_INET6 &&
+ if (sk2->sk_family == AF_INET6 &&
ipv6_addr_any(&inet6_sk(sk2)->rcv_saddr) &&
!(ipv6_only_sock(sk2) && addr_type == IPV6_ADDR_MAPPED))
return 1;
if (addr_type == IPV6_ADDR_ANY &&
(!ipv6_only_sock(sk) ||
- !(sk2->family == AF_INET6 ?
+ !(sk2->sk_family == AF_INET6 ?
(ipv6_addr_type(&inet6_sk(sk2)->rcv_saddr) == IPV6_ADDR_MAPPED) : 1)))
return 1;
- if (sk2->family == AF_INET6 &&
+ if (sk2->sk_family == AF_INET6 &&
!ipv6_addr_cmp(&inet6_sk(sk)->rcv_saddr,
&inet6_sk(sk2)->rcv_saddr))
return 1;
@@ -126,7 +126,7 @@
do {
if (++size >= best_size_so_far)
goto next;
- } while ((sk2 = sk2->next) != NULL);
+ } while ((sk2 = sk2->sk_next) != NULL);
best_size_so_far = size;
best = result;
next:;
@@ -147,24 +147,24 @@
for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
sk2 != NULL;
- sk2 = sk2->next) {
+ sk2 = sk2->sk_next) {
if (inet_sk(sk2)->num == snum &&
sk2 != sk &&
- sk2->bound_dev_if == sk->bound_dev_if &&
- (!sk2->reuse || !sk->reuse) &&
+ sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
+ (!sk2->sk_reuse || !sk->sk_reuse) &&
udv6_rcv_saddr_equal(sk, sk2))
goto fail;
}
}
inet_sk(sk)->num = snum;
- if (sk->pprev == NULL) {
+ if (!sk->sk_pprev) {
struct sock **skp = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sock_prot_inc_use(sk->sk_prot);
sock_hold(sk);
}
write_unlock_bh(&udp_hash_lock);
@@ -183,13 +183,13 @@
static void udp_v6_unhash(struct sock *sk)
{
write_lock_bh(&udp_hash_lock);
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
inet_sk(sk)->num = 0;
- sock_prot_dec_use(sk->prot);
+ sock_prot_dec_use(sk->sk_prot);
__sock_put(sk);
}
write_unlock_bh(&udp_hash_lock);
@@ -203,10 +203,11 @@
int badness = -1;
read_lock(&udp_hash_lock);
- for(sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk != NULL; sk = sk->next) {
+ for (sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk;
+ sk = sk->sk_next) {
struct inet_opt *inet = inet_sk(sk);
- if (inet->num == hnum && sk->family == PF_INET6) {
+ if (inet->num == hnum && sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
int score = 0;
if (inet->dport) {
@@ -224,8 +225,8 @@
continue;
score++;
}
- if(sk->bound_dev_if) {
- if(sk->bound_dev_if != dif)
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
continue;
score++;
}
@@ -328,17 +329,19 @@
if (addr_type&IPV6_ADDR_LINKLOCAL) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
usin->sin6_scope_id) {
- if (sk->bound_dev_if && sk->bound_dev_if != usin->sin6_scope_id) {
+ if (sk->sk_bound_dev_if &&
+ sk->sk_bound_dev_if != usin->sin6_scope_id) {
fl6_sock_release(flowlabel);
return -EINVAL;
}
- sk->bound_dev_if = usin->sin6_scope_id;
- if (!sk->bound_dev_if && (addr_type&IPV6_ADDR_MULTICAST))
+ sk->sk_bound_dev_if = usin->sin6_scope_id;
+ if (!sk->sk_bound_dev_if &&
+ (addr_type & IPV6_ADDR_MULTICAST))
fl.oif = np->mcast_oif;
}
/* Connect to link-local address requires an interface */
- if (sk->bound_dev_if == 0)
+ if (!sk->sk_bound_dev_if)
return -EINVAL;
}
@@ -355,7 +358,7 @@
fl.proto = IPPROTO_UDP;
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = inet->dport;
fl.fl_ip_sport = inet->sport;
@@ -397,7 +400,7 @@
!ipv6_addr_cmp(&fl.fl6_dst, &np->daddr) ?
&np->daddr : NULL);
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
}
fl6_sock_release(flowlabel);
@@ -494,12 +497,12 @@
/* Clear queue. */
if (flags&MSG_PEEK) {
int clear = 0;
- spin_lock_irq(&sk->receive_queue.lock);
- if (skb == skb_peek(&sk->receive_queue)) {
- __skb_unlink(skb, &sk->receive_queue);
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ if (skb == skb_peek(&sk->sk_receive_queue)) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
clear = 1;
}
- spin_unlock_irq(&sk->receive_queue.lock);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
if (clear)
kfree_skb(skb);
}
@@ -534,14 +537,14 @@
if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
goto out;
- if (sk->state != TCP_ESTABLISHED && !np->recverr)
+ if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
goto out;
if (np->recverr)
ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
out:
sock_put(sk);
}
@@ -553,7 +556,7 @@
return -1;
}
- if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
+ if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
UDP6_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
@@ -578,10 +581,10 @@
{
struct sock *s = sk;
unsigned short num = ntohs(loc_port);
- for(; s; s = s->next) {
+ for (; s; s = s->sk_next) {
struct inet_opt *inet = inet_sk(s);
- if (inet->num == num && sk->family == PF_INET6) {
+ if (inet->num == num && sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(s);
if (inet->dport) {
if (inet->dport != rmt_port)
@@ -591,7 +594,7 @@
ipv6_addr_cmp(&np->daddr, rmt_addr))
continue;
- if (s->bound_dev_if && s->bound_dev_if != dif)
+ if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
continue;
if (!ipv6_addr_any(&np->rcv_saddr)) {
@@ -627,8 +630,8 @@
buff = NULL;
sk2 = sk;
- while((sk2 = udp_v6_mcast_next(sk2->next, uh->dest, daddr,
- uh->source, saddr, dif))) {
+ while ((sk2 = udp_v6_mcast_next(sk2->sk_next, uh->dest, daddr,
+ uh->source, saddr, dif))) {
if (!buff) {
buff = skb_clone(skb, GFP_ATOMIC);
if (!buff)
@@ -770,7 +773,7 @@
int err = 0;
/* Grab the skbuff where UDP header space exists. */
- if ((skb = skb_peek(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
/*
@@ -782,12 +785,12 @@
uh->len = htons(up->len);
uh->check = 0;
- if (sk->no_check == UDP_CSUM_NOXMIT) {
+ if (sk->sk_no_check == UDP_CSUM_NOXMIT) {
skb->ip_summed = CHECKSUM_NONE;
goto send;
}
- if (skb_queue_len(&sk->write_queue) == 1) {
+ if (skb_queue_len(&sk->sk_write_queue) == 1) {
skb->csum = csum_partial((char *)uh,
sizeof(struct udphdr), skb->csum);
uh->check = csum_ipv6_magic(&fl->fl6_src,
@@ -796,7 +799,7 @@
} else {
u32 tmp_csum = 0;
- skb_queue_walk(&sk->write_queue, skb) {
+ skb_queue_walk(&sk->sk_write_queue, skb) {
tmp_csum = csum_add(tmp_csum, skb->csum);
}
tmp_csum = csum_partial((char *)uh,
@@ -886,8 +889,11 @@
}
}
- /* Otherwise it will be difficult to maintain sk->dst_cache. */
- if (sk->state == TCP_ESTABLISHED &&
+ /*
+ * Otherwise it will be difficult to maintain
+ * sk->sk_dst_cache.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED &&
!ipv6_addr_cmp(daddr, &np->daddr))
daddr = &np->daddr;
@@ -896,7 +902,7 @@
ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
fl.oif = sin6->sin6_scope_id;
} else {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
up->dport = inet->dport;
@@ -923,7 +929,7 @@
}
if (!fl.oif)
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
opt = &opt_space;
@@ -1144,12 +1150,13 @@
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
- sp->state,
- atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc),
+ sp->sk_state,
+ atomic_read(&sp->sk_wmem_alloc),
+ atomic_read(&sp->sk_rmem_alloc),
0, 0L, 0,
sock_i_uid(sp), 0,
sock_i_ino(sp),
- atomic_read(&sp->refcnt), sp);
+ atomic_read(&sp->sk_refcnt), sp);
}
static int udp6_seq_show(struct seq_file *seq, void *v)
diff -urN linux-2.5.70-bk11/net/ipx/af_ipx.c linux-2.5.70-bk12/net/ipx/af_ipx.c
--- linux-2.5.70-bk11/net/ipx/af_ipx.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipx/af_ipx.c 2003-06-07 04:47:52.000000000 -0700
@@ -143,16 +143,16 @@
spin_lock_bh(&intrfc->if_sklist_lock);
s = intrfc->if_sklist;
if (s == sk) {
- intrfc->if_sklist = s->next;
+ intrfc->if_sklist = s->sk_next;
goto out_unlock;
}
- while (s && s->next) {
- if (s->next == sk) {
- s->next = sk->next;
+ while (s && s->sk_next) {
+ if (s->sk_next == sk) {
+ s->sk_next = sk->sk_next;
goto out_unlock;
}
- s = s->next;
+ s = s->sk_next;
}
out_unlock:
spin_unlock_bh(&intrfc->if_sklist_lock);
@@ -165,14 +165,14 @@
static void ipx_destroy_socket(struct sock *sk)
{
ipx_remove_socket(sk);
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
#ifdef IPX_REFCNT_DEBUG
atomic_dec(&ipx_sock_nr);
printk(KERN_DEBUG "IPX socket %p released, %d are still alive\n", sk,
atomic_read(&ipx_sock_nr));
- if (atomic_read(&sk->refcnt) != 1)
+ if (atomic_read(&sk->sk_refcnt) != 1)
printk(KERN_DEBUG "Destruction sock ipx %p delayed, cnt=%d\n",
- sk, atomic_read(&sk->refcnt));
+ sk, atomic_read(&sk->sk_refcnt));
#endif
sock_put(sk);
}
@@ -246,14 +246,14 @@
sock_hold(sk);
spin_lock_bh(&intrfc->if_sklist_lock);
ipx_sk(sk)->intrfc = intrfc;
- sk->next = NULL;
+ sk->sk_next = NULL;
if (!intrfc->if_sklist)
intrfc->if_sklist = sk;
else {
struct sock *s = intrfc->if_sklist;
- while (s->next)
- s = s->next;
- s->next = sk;
+ while (s->sk_next)
+ s = s->sk_next;
+ s->sk_next = sk;
}
spin_unlock_bh(&intrfc->if_sklist_lock);
ipxitf_put(intrfc);
@@ -266,7 +266,7 @@
struct sock *s = intrfc->if_sklist;
while (s && ipx_sk(s)->port != port)
- s = s->next;
+ s = s->sk_next;
return s;
}
@@ -303,7 +303,7 @@
if (ipxs->port == port &&
!memcmp(node, ipxs->node, IPX_NODE_LEN))
break;
- s = s->next;
+ s = s->sk_next;
}
spin_unlock_bh(&intrfc->if_sklist_lock);
ipxitf_put(intrfc);
@@ -324,14 +324,14 @@
for (s = intrfc->if_sklist; s;) {
struct ipx_opt *ipxs = ipx_sk(s);
- s->err = ENOLINK;
- s->error_report(s);
+ s->sk_err = ENOLINK;
+ s->sk_error_report(s);
ipxs->intrfc = NULL;
ipxs->port = 0;
- s->zapped = 1; /* Indicates it is no longer bound */
+ s->sk_zapped = 1; /* Indicates it is no longer bound */
t = s;
- s = s->next;
- t->next = NULL;
+ s = s->sk_next;
+ t->sk_next = NULL;
}
intrfc->if_sklist = NULL;
spin_unlock_bh(&intrfc->if_sklist_lock);
@@ -429,7 +429,7 @@
if (intrfc != ipx_internal_net)
break;
}
- s = s->next;
+ s = s->sk_next;
}
/* skb was solely for us, and we did not make a copy, so free it. */
@@ -468,7 +468,7 @@
spin_lock_bh(&intrfc->if_sklist_lock);
for (sk = intrfc->if_sklist;
sk && ipx_sk(sk)->ipx_ncp_conn != connection;
- sk = sk->next);
+ sk = sk->sk_next);
if (sk)
sock_hold(sk);
spin_unlock_bh(&intrfc->if_sklist_lock);
@@ -1385,7 +1385,7 @@
atomic_read(&ipx_sock_nr));
#endif
sock_init_data(sock, sk);
- sk->no_check = 1; /* Checksum off by default */
+ sk->sk_no_check = 1; /* Checksum off by default */
rc = 0;
out:
return rc;
@@ -1402,7 +1402,7 @@
goto out;
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock->sk = NULL;
@@ -1442,7 +1442,7 @@
struct sockaddr_ipx *addr = (struct sockaddr_ipx *)uaddr;
int rc = -EINVAL;
- if (!sk->zapped || addr_len != sizeof(struct sockaddr_ipx))
+ if (!sk->sk_zapped || addr_len != sizeof(struct sockaddr_ipx))
goto out;
intrfc = ipxitf_find_using_net(addr->sipx_network);
@@ -1520,7 +1520,7 @@
#endif /* CONFIG_IPX_INTERN */
ipxitf_insert_socket(intrfc, sk);
- sk->zapped = 0;
+ sk->sk_zapped = 0;
rc = 0;
out_put:
@@ -1538,7 +1538,7 @@
int rc = -EINVAL;
struct ipx_route *rt;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(*addr))
@@ -1580,7 +1580,7 @@
if (sock->type == SOCK_DGRAM) {
sock->state = SS_CONNECTED;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
}
if (rt)
@@ -1604,7 +1604,7 @@
if (peer) {
rc = -ENOTCONN;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
addr = &ipxs->dest_addr;
@@ -1703,7 +1703,7 @@
int flags = msg->msg_flags;
/* Socket gets bound below anyway */
-/* if (sk->zapped)
+/* if (sk->sk_zapped)
return -EIO; */ /* Socket not bound */
if (flags & ~MSG_DONTWAIT)
goto out;
@@ -1733,7 +1733,7 @@
goto out;
} else {
rc = -ENOTCONN;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
usipx = &local_sipx;
@@ -1784,7 +1784,7 @@
}
rc = -ENOTCONN;
- if (sk->zapped)
+ if (sk->sk_zapped)
goto out;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
@@ -1803,7 +1803,7 @@
copied);
if (rc)
goto out_free;
- sk->stamp = skb->stamp;
+ sk->sk_stamp = skb->stamp;
msg->msg_namelen = sizeof(*sipx);
@@ -1831,13 +1831,13 @@
switch (cmd) {
case TIOCOUTQ:
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
rc = put_user(amount, (int *)arg);
break;
case TIOCINQ: {
- struct sk_buff *skb = skb_peek(&sk->receive_queue);
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
/* These two are safe on a single CPU system as only
* user tasks fiddle here */
if (skb)
@@ -1878,10 +1878,10 @@
rc = -EINVAL;
if (sk) {
rc = -ENOENT;
- if (!sk->stamp.tv_sec)
+ if (!sk->sk_stamp.tv_sec)
break;
rc = -EFAULT;
- if (!copy_to_user((void *)arg, &sk->stamp,
+ if (!copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval)))
rc = 0;
}
diff -urN linux-2.5.70-bk11/net/ipx/ipx_proc.c linux-2.5.70-bk12/net/ipx/ipx_proc.c
--- linux-2.5.70-bk11/net/ipx/ipx_proc.c 2003-05-26 18:00:20.000000000 -0700
+++ linux-2.5.70-bk12/net/ipx/ipx_proc.c 2003-06-07 04:47:52.000000000 -0700
@@ -174,7 +174,7 @@
if (!pos)
break;
spin_lock_bh(&i->if_sklist_lock);
- for (s = i->if_sklist; pos && s; s = s->next)
+ for (s = i->if_sklist; pos && s; s = s->sk_next)
--pos;
if (!pos) {
if (!s)
@@ -213,8 +213,8 @@
goto out;
}
sk = v;
- if (sk->next) {
- sk = sk->next;
+ if (sk->sk_next) {
+ sk = sk->sk_next;
goto out;
}
ipxs = ipx_sk(sk);
@@ -264,7 +264,7 @@
seq_printf(seq, "%08lX:%04X ", (unsigned long) htonl(ipxs->intrfc->if_netnum),
htons(ipxs->port));
#endif /* CONFIG_IPX_INTERN */
- if (s->state != TCP_ESTABLISHED)
+ if (s->sk_state != TCP_ESTABLISHED)
seq_printf(seq, "%-28s", "Not_Connected");
else {
seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ",
@@ -276,8 +276,9 @@
}
seq_printf(seq, "%08X %08X %02X %03d\n",
- atomic_read(&s->wmem_alloc), atomic_read(&s->rmem_alloc),
- s->state, SOCK_INODE(s->socket)->i_uid);
+ atomic_read(&s->sk_wmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
+ s->sk_state, SOCK_INODE(s->sk_socket)->i_uid);
out:
return 0;
}
diff -urN linux-2.5.70-bk11/net/ipx/ipx_route.c linux-2.5.70-bk12/net/ipx/ipx_route.c
--- linux-2.5.70-bk11/net/ipx/ipx_route.c 2003-05-26 18:00:40.000000000 -0700
+++ linux-2.5.70-bk12/net/ipx/ipx_route.c 2003-06-07 04:47:52.000000000 -0700
@@ -238,7 +238,7 @@
}
/* Apply checksum. Not allowed on 802.3 links. */
- if (sk->no_check || intrfc->if_dlink_type == IPX_FRAME_8023)
+ if (sk->sk_no_check || intrfc->if_dlink_type == IPX_FRAME_8023)
ipx->ipx_checksum = 0xFFFF;
else
ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr));
diff -urN linux-2.5.70-bk11/net/irda/af_irda.c linux-2.5.70-bk12/net/irda/af_irda.c
--- linux-2.5.70-bk11/net/irda/af_irda.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/irda/af_irda.c 2003-06-07 04:47:52.000000000 -0700
@@ -133,12 +133,12 @@
}
/* Prevent race conditions with irda_release() and irda_shutdown() */
- if (!sock_flag(sk, SOCK_DEAD) && sk->state != TCP_CLOSE) {
- sk->state = TCP_CLOSE;
- sk->err = ECONNRESET;
- sk->shutdown |= SEND_SHUTDOWN;
+ if (!sock_flag(sk, SOCK_DEAD) && sk->sk_state != TCP_CLOSE) {
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = ECONNRESET;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state_change(sk);
/* Uh-oh... Should use sock_orphan ? */
sock_set_flag(sk, SOCK_DEAD);
@@ -151,7 +151,7 @@
* requests. Some apps forget to close sockets, or hang to it
* a bit too long, so we may stay in this dead state long
* enough to be noticed...
- * Note : all socket function do check sk->state, so we are
+ * Note : all socket function do check sk->sk_state, so we are
* safe...
* Jean II
*/
@@ -163,8 +163,8 @@
/* Note : once we are there, there is not much you want to do
* with the socket anymore, apart from closing it.
- * For example, bind() and connect() won't reset sk->err,
- * sk->shutdown and sk->flags to valid values...
+ * For example, bind() and connect() won't reset sk->sk_err,
+ * sk->sk_shutdown and sk->sk_flags to valid values...
* Jean II
*/
}
@@ -192,7 +192,7 @@
return;
dev_kfree_skb(skb);
- // Should be ??? skb_queue_tail(&sk->receive_queue, skb);
+ // Should be ??? skb_queue_tail(&sk->sk_receive_queue, skb);
/* How much header space do we need to reserve */
self->max_header_size = max_header_size;
@@ -201,7 +201,7 @@
self->max_sdu_size_tx = max_sdu_size;
/* Find out what the largest chunk of data that we can transmit is */
- switch (sk->type) {
+ switch (sk->sk_type) {
case SOCK_STREAM:
if (max_sdu_size != 0) {
ERROR("%s: max_sdu_size must be 0\n", __FUNCTION__);
@@ -226,8 +226,8 @@
memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
/* We are now connected! */
- sk->state = TCP_ESTABLISHED;
- sk->state_change(sk);
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_state_change(sk);
}
/*
@@ -258,7 +258,7 @@
self->max_sdu_size_tx = max_sdu_size;
/* Find out what the largest chunk of data that we can transmit is */
- switch (sk->type) {
+ switch (sk->sk_type) {
case SOCK_STREAM:
if (max_sdu_size != 0) {
ERROR("%s: max_sdu_size must be 0\n", __FUNCTION__);
@@ -284,8 +284,8 @@
memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
- skb_queue_tail(&sk->receive_queue, skb);
- sk->state_change(sk);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_state_change(sk);
}
/*
@@ -344,7 +344,7 @@
self->tx_flow = flow;
IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n",
__FUNCTION__);
- wake_up_interruptible(sk->sleep);
+ wake_up_interruptible(sk->sk_sleep);
break;
default:
IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __FUNCTION__);
@@ -717,7 +717,7 @@
struct irda_sock *self = irda_sk(sk);
if (peer) {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
saddr.sir_family = AF_IRDA;
@@ -751,13 +751,13 @@
IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
- if ((sk->type != SOCK_STREAM) && (sk->type != SOCK_SEQPACKET) &&
- (sk->type != SOCK_DGRAM))
+ if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
+ (sk->sk_type != SOCK_DGRAM))
return -EOPNOTSUPP;
- if (sk->state != TCP_LISTEN) {
- sk->max_ack_backlog = backlog;
- sk->state = TCP_LISTEN;
+ if (sk->sk_state != TCP_LISTEN) {
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
return 0;
}
@@ -787,7 +787,8 @@
#ifdef CONFIG_IRDA_ULTRA
/* Special care for Ultra sockets */
- if ((sk->type == SOCK_DGRAM) && (sk->protocol == IRDAPROTO_ULTRA)) {
+ if ((sk->sk_type == SOCK_DGRAM) &&
+ (sk->sk_protocol == IRDAPROTO_ULTRA)) {
self->pid = addr->sir_lsap_sel;
if (self->pid & 0x80) {
IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __FUNCTION__);
@@ -802,7 +803,7 @@
/* Pretend we are connected */
sock->state = SS_CONNECTED;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
return 0;
}
@@ -839,7 +840,7 @@
ASSERT(self != NULL, return -1;);
- err = irda_create(newsock, sk->protocol);
+ err = irda_create(newsock, sk->sk_protocol);
if (err)
return err;
@@ -849,11 +850,11 @@
if ((sk = sock->sk) == NULL)
return -EINVAL;
- if ((sk->type != SOCK_STREAM) && (sk->type != SOCK_SEQPACKET) &&
- (sk->type != SOCK_DGRAM))
+ if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
+ (sk->sk_type != SOCK_DGRAM))
return -EOPNOTSUPP;
- if (sk->state != TCP_LISTEN)
+ if (sk->sk_state != TCP_LISTEN)
return -EINVAL;
/*
@@ -869,7 +870,7 @@
* calling us, the data is waiting for us ;-)
* Jean II
*/
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb == NULL) {
int ret = 0;
DECLARE_WAITQUEUE(waitq, current);
@@ -883,10 +884,10 @@
* We don't us the macro because the condition has
* side effects : we want to make sure that only one
* skb get dequeued - Jean II */
- add_wait_queue(sk->sleep, &waitq);
+ add_wait_queue(sk->sk_sleep, &waitq);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb != NULL)
break;
if (!signal_pending(current)) {
@@ -897,13 +898,13 @@
break;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &waitq);
+ remove_wait_queue(sk->sk_sleep, &waitq);
if(ret)
return -ERESTARTSYS;
}
newsk = newsock->sk;
- newsk->state = TCP_ESTABLISHED;
+ newsk->sk_state = TCP_ESTABLISHED;
new = irda_sk(newsk);
ASSERT(new != NULL, return -1;);
@@ -935,7 +936,7 @@
skb->sk = NULL;
skb->destructor = NULL;
kfree_skb(skb);
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
newsock->state = SS_CONNECTED;
@@ -975,23 +976,23 @@
IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self);
/* Don't allow connect for Ultra sockets */
- if ((sk->type == SOCK_DGRAM) && (sk->protocol == IRDAPROTO_ULTRA))
+ if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA))
return -ESOCKTNOSUPPORT;
- if (sk->state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
return 0; /* Connect completed during a ERESTARTSYS event */
}
- if (sk->state == TCP_CLOSE && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
return -ECONNREFUSED;
}
- if (sk->state == TCP_ESTABLISHED)
+ if (sk->sk_state == TCP_ESTABLISHED)
return -EISCONN; /* No reconnect on a seqpacket socket */
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(struct sockaddr_irda))
@@ -1024,7 +1025,7 @@
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
- sk->state = TCP_SYN_SENT;
+ sk->sk_state = TCP_SYN_SENT;
/* Connect to remote device */
err = irttp_connect_request(self->tsap, self->dtsap_sel,
@@ -1036,13 +1037,14 @@
}
/* Now the loop */
- if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
+ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
return -EINPROGRESS;
- if (wait_event_interruptible(*(sk->sleep), (sk->state!=TCP_SYN_SENT)))
+ if (wait_event_interruptible(*(sk->sk_sleep),
+ (sk->sk_state != TCP_SYN_SENT)))
return -ERESTARTSYS;
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
return sock_error(sk); /* Always set at this point */
}
@@ -1095,9 +1097,9 @@
init_waitqueue_head(&self->query_wait);
/* Initialise networking socket struct */
- sock_init_data(sock, sk); /* Note : set sk->refcnt to 1 */
- sk->family = PF_IRDA;
- sk->protocol = protocol;
+ sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */
+ sk->sk_family = PF_IRDA;
+ sk->sk_protocol = protocol;
/* Link networking socket and IrDA socket structs together */
self->sk = sk;
@@ -1194,9 +1196,9 @@
if (sk == NULL)
return 0;
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
/* Destroy IrDA socket */
irda_destroy_socket(irda_sk(sk));
@@ -1207,10 +1209,10 @@
sock->sk = NULL;
/* Purge queues (see sock_init_data()) */
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
/* Destroy networking socket if we are the last reference on it,
- * i.e. if(sk->refcnt == 0) -> sk_free(sk) */
+ * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */
sock_put(sk);
/* Notes on socket locking and deallocation... - Jean II
@@ -1264,12 +1266,12 @@
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR))
return -EINVAL;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
self = irda_sk(sk);
@@ -1277,12 +1279,12 @@
/* Check if IrTTP is wants us to slow down */
- if (wait_event_interruptible(*(sk->sleep),
- (self->tx_flow != FLOW_STOP || sk->state != TCP_ESTABLISHED)))
+ if (wait_event_interruptible(*(sk->sk_sleep),
+ (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED)))
return -ERESTARTSYS;
/* Check if we are still connected */
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Check that we don't send out to big frames */
@@ -1358,7 +1360,7 @@
* empty
*/
if (self->rx_flow == FLOW_STOP) {
- if ((atomic_read(&sk->rmem_alloc) << 2) <= sk->rcvbuf) {
+ if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __FUNCTION__);
self->rx_flow = FLOW_START;
irttp_flow_request(self->tsap, FLOW_START);
@@ -1398,9 +1400,8 @@
do {
int chunk;
- struct sk_buff *skb;
+ struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
- skb=skb_dequeue(&sk->receive_queue);
if (skb==NULL) {
int ret = 0;
@@ -1411,32 +1412,32 @@
* wait_event_interruptible() macro.
* We don't us the macro because the test condition
* is messy. - Jean II */
- set_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
- add_wait_queue(sk->sleep, &waitq);
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ add_wait_queue(sk->sk_sleep, &waitq);
set_current_state(TASK_INTERRUPTIBLE);
/*
* POSIX 1003.1g mandates this order.
*/
- if (sk->err)
+ if (sk->sk_err)
ret = sock_error(sk);
- else if (sk->shutdown & RCV_SHUTDOWN)
+ else if (sk->sk_shutdown & RCV_SHUTDOWN)
;
else if (noblock)
ret = -EAGAIN;
else if (signal_pending(current))
ret = -ERESTARTSYS;
- else if (skb_peek(&sk->receive_queue) == NULL)
+ else if (skb_peek(&sk->sk_receive_queue) == NULL)
/* Wait process until data arrives */
schedule();
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &waitq);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ remove_wait_queue(sk->sk_sleep, &waitq);
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
if(ret)
return(ret);
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
continue;
@@ -1444,7 +1445,7 @@
chunk = min_t(unsigned int, skb->len, size);
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
if (copied == 0)
copied = -EFAULT;
break;
@@ -1460,7 +1461,7 @@
if (skb->len) {
IRDA_DEBUG(1, "%s(), back on q!\n",
__FUNCTION__);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
@@ -1469,7 +1470,7 @@
IRDA_DEBUG(0, "%s() questionable!?\n", __FUNCTION__);
/* put message back and return */
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} while (size);
@@ -1481,7 +1482,7 @@
* empty
*/
if (self->rx_flow == FLOW_STOP) {
- if ((atomic_read(&sk->rmem_alloc) << 2) <= sk->rcvbuf) {
+ if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __FUNCTION__);
self->rx_flow = FLOW_START;
irttp_flow_request(self->tsap, FLOW_START);
@@ -1512,12 +1513,12 @@
if (msg->msg_flags & ~MSG_DONTWAIT)
return -EINVAL;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
self = irda_sk(sk);
@@ -1578,7 +1579,7 @@
if (msg->msg_flags & ~MSG_DONTWAIT)
return -EINVAL;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
@@ -1629,9 +1630,9 @@
IRDA_DEBUG(1, "%s(%p)\n", __FUNCTION__, self);
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
if (self->iriap) {
iriap_close(self->iriap);
@@ -1664,32 +1665,32 @@
IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
- poll_wait(file, sk->sleep, wait);
+ poll_wait(file, sk->sk_sleep, wait);
mask = 0;
/* Exceptional events? */
- if (sk->err)
+ if (sk->sk_err)
mask |= POLLERR;
- if (sk->shutdown & RCV_SHUTDOWN) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
IRDA_DEBUG(0, "%s(), POLLHUP\n", __FUNCTION__);
mask |= POLLHUP;
}
/* Readable? */
- if (!skb_queue_empty(&sk->receive_queue)) {
+ if (!skb_queue_empty(&sk->sk_receive_queue)) {
IRDA_DEBUG(4, "Socket is readable\n");
mask |= POLLIN | POLLRDNORM;
}
/* Connection-based need to check for termination and startup */
- switch (sk->type) {
+ switch (sk->sk_type) {
case SOCK_STREAM:
- if (sk->state == TCP_CLOSE) {
+ if (sk->sk_state == TCP_CLOSE) {
IRDA_DEBUG(0, "%s(), POLLHUP\n", __FUNCTION__);
mask |= POLLHUP;
}
- if (sk->state == TCP_ESTABLISHED) {
+ if (sk->sk_state == TCP_ESTABLISHED) {
if ((self->tx_flow == FLOW_START) &&
sock_writeable(sk))
{
@@ -1726,7 +1727,7 @@
switch (cmd) {
case TIOCOUTQ: {
long amount;
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
if (put_user(amount, (unsigned int *)arg))
@@ -1738,7 +1739,7 @@
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
- if ((skb = skb_peek(&sk->receive_queue)) != NULL)
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
if (put_user(amount, (unsigned int *)arg))
return -EFAULT;
@@ -1747,9 +1748,9 @@
case SIOCGSTAMP:
if (sk != NULL) {
- if (sk->stamp.tv_sec == 0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
- if (copy_to_user((void *)arg, &sk->stamp,
+ if (copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval)))
return -EFAULT;
return 0;
@@ -1973,7 +1974,7 @@
return -EFAULT;
/* Only possible for a seqpacket service (TTP with SAR) */
- if (sk->type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET) {
IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n",
__FUNCTION__, opt);
self->max_sdu_size_rx = opt;
diff -urN linux-2.5.70-bk11/net/key/af_key.c linux-2.5.70-bk12/net/key/af_key.c
--- linux-2.5.70-bk11/net/key/af_key.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/key/af_key.c 2003-06-07 04:47:52.000000000 -0700
@@ -46,19 +46,19 @@
int registered;
int promisc;
};
-#define pfkey_sk(__sk) ((struct pfkey_opt *)(__sk)->protinfo)
+#define pfkey_sk(__sk) ((struct pfkey_opt *)(__sk)->sk_protinfo)
static void pfkey_sock_destruct(struct sock *sk)
{
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive pfkey socket: %p\n", sk);
return;
}
- BUG_TRAP(atomic_read(&sk->rmem_alloc)==0);
- BUG_TRAP(atomic_read(&sk->wmem_alloc)==0);
+ BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
+ BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
kfree(pfkey_sk(sk));
@@ -114,7 +114,7 @@
static void pfkey_insert(struct sock *sk)
{
pfkey_table_grab();
- sk->next = pfkey_table;
+ sk->sk_next = pfkey_table;
pfkey_table = sk;
sock_hold(sk);
pfkey_table_ungrab();
@@ -125,9 +125,9 @@
struct sock **skp;
pfkey_table_grab();
- for (skp = &pfkey_table; *skp; skp = &((*skp)->next)) {
+ for (skp = &pfkey_table; *skp; skp = &((*skp)->sk_next)) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
@@ -165,8 +165,8 @@
}
memset(pfk, 0, sizeof(*pfk));
- sk->family = PF_KEY;
- sk->destruct = pfkey_sock_destruct;
+ sk->sk_family = PF_KEY;
+ sk->sk_destruct = pfkey_sock_destruct;
atomic_inc(&pfkey_socks_nr);
@@ -188,7 +188,7 @@
sock_orphan(sk);
sock->sk = NULL;
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_write_queue);
sock_put(sk);
return 0;
@@ -209,11 +209,11 @@
}
}
if (*skb2 != NULL) {
- if (atomic_read(&sk->rmem_alloc) <= sk->rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
skb_orphan(*skb2);
skb_set_owner_r(*skb2, sk);
- skb_queue_tail(&sk->receive_queue, *skb2);
- sk->data_ready(sk, (*skb2)->len);
+ skb_queue_tail(&sk->sk_receive_queue, *skb2);
+ sk->sk_data_ready(sk, (*skb2)->len);
*skb2 = NULL;
err = 0;
}
@@ -241,7 +241,7 @@
return -ENOMEM;
pfkey_lock_table();
- for (sk = pfkey_table; sk; sk = sk->next) {
+ for (sk = pfkey_table; sk; sk = sk->sk_next) {
struct pfkey_opt *pfk = pfkey_sk(sk);
int err2;
@@ -2694,7 +2694,7 @@
goto out;
err = -EMSGSIZE;
- if ((unsigned)len > sk->sndbuf-32)
+ if ((unsigned)len > sk->sk_sndbuf - 32)
goto out;
err = -ENOBUFS;
@@ -2804,12 +2804,12 @@
read_lock(&pfkey_table_lock);
- for (s = pfkey_table; s; s = s->next) {
+ for (s = pfkey_table; s; s = s->sk_next) {
len += sprintf(buffer+len,"%p %-6d %-6u %-6u %-6u %-6lu",
s,
- atomic_read(&s->refcnt),
- atomic_read(&s->rmem_alloc),
- atomic_read(&s->wmem_alloc),
+ atomic_read(&s->sk_refcnt),
+ atomic_read(&s->sk_rmem_alloc),
+ atomic_read(&s->sk_wmem_alloc),
sock_i_uid(s),
sock_i_ino(s)
);
diff -urN linux-2.5.70-bk11/net/llc/af_llc.c linux-2.5.70-bk12/net/llc/af_llc.c
--- linux-2.5.70-bk11/net/llc/af_llc.c 2003-05-26 18:00:20.000000000 -0700
+++ linux-2.5.70-bk12/net/llc/af_llc.c 2003-06-07 04:47:53.000000000 -0700
@@ -97,7 +97,7 @@
if (addr->sllc_test || addr->sllc_xid)
rc = LLC_PDU_LEN_U;
- else if (sk->type == SOCK_STREAM)
+ else if (sk->sk_type == SOCK_STREAM)
rc = LLC_PDU_LEN_I;
return rc;
}
@@ -129,11 +129,11 @@
static void llc_ui_sk_init(struct socket *sock, struct sock *sk)
{
- sk->type = sock->type;
- sk->sleep = &sock->wait;
- sk->socket = sock;
- sock->sk = sk;
- sock->ops = &llc_ui_ops;
+ sk->sk_type = sock->type;
+ sk->sk_sleep = &sock->wait;
+ sk->sk_socket = sock;
+ sock->sk = sk;
+ sock->ops = &llc_ui_ops;
}
/**
@@ -180,8 +180,8 @@
dprintk("%s: closing local(%02X) remote(%02X)\n", __FUNCTION__,
llc->laddr.lsap, llc->daddr.lsap);
if (!llc_send_disc(sk))
- llc_ui_wait_for_disc(sk, sk->rcvtimeo);
- if (!sk->zapped)
+ llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
+ if (!sk->sk_zapped)
llc_sap_unassign_sock(llc->sap, sk);
release_sock(sk);
if (llc->sap && !llc->sap->sk_list.list)
@@ -246,7 +246,7 @@
struct net_device *dev = NULL;
int rc = -EINVAL;
- if (!sk->zapped)
+ if (!sk->sk_zapped)
goto out;
/* bind to a specific mac, optional. */
if (!llc_mac_null(addr->sllc_smac)) {
@@ -281,7 +281,7 @@
memset(&laddr, 0, sizeof(laddr));
memset(&daddr, 0, sizeof(daddr));
if (!llc_mac_null(addr->sllc_mmac)) {
- if (sk->type != SOCK_DGRAM) {
+ if (sk->sk_type != SOCK_DGRAM) {
rc = -EOPNOTSUPP;
goto out;
}
@@ -304,7 +304,7 @@
memcpy(&llc->addr, addr, sizeof(llc->addr));
/* assign new connection to its SAP */
llc_sap_assign_sock(sap, sk);
- rc = sk->zapped = 0;
+ rc = sk->sk_zapped = 0;
out:
return rc;
}
@@ -334,7 +334,7 @@
int rc = -EINVAL;
dprintk("%s: binding %02X\n", __FUNCTION__, addr->sllc_ssap);
- if (!sk->zapped || addrlen != sizeof(*addr))
+ if (!sk->sk_zapped || addrlen != sizeof(*addr))
goto out;
rc = -EAFNOSUPPORT;
if (addr->sllc_family != AF_LLC)
@@ -362,16 +362,16 @@
int rc = -ENOTCONN;
lock_sock(sk);
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
rc = -EINVAL;
if (how != 2)
goto out;
rc = llc_send_disc(sk);
if (!rc)
- rc = llc_ui_wait_for_disc(sk, sk->rcvtimeo);
+ rc = llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
/* Wake up anyone sleeping in poll */
- sk->state_change(sk);
+ sk->sk_state_change(sk);
out:
release_sock(sk);
return rc;
@@ -407,7 +407,7 @@
if (addr->sllc_family != AF_LLC)
goto out;
/* bind connection to sap if user hasn't done it. */
- if (sk->zapped) {
+ if (sk->sk_zapped) {
/* bind to sap with null dev, exclusive */
rc = llc_ui_autobind(sock, addr);
if (rc)
@@ -422,23 +422,23 @@
llc->dev = dev;
} else
dev = llc->dev;
- if (sk->type != SOCK_STREAM)
+ if (sk->sk_type != SOCK_STREAM)
goto out;
rc = -EALREADY;
if (sock->state == SS_CONNECTING)
goto out;
sock->state = SS_CONNECTING;
- sk->state = TCP_SYN_SENT;
+ sk->sk_state = TCP_SYN_SENT;
llc->link = llc_ui_next_link_no(llc->sap->laddr.lsap);
rc = llc_establish_connection(sk, dev->dev_addr,
addr->sllc_dmac, addr->sllc_dsap);
if (rc) {
dprintk("%s: llc_ui_send_conn failed :-(\n", __FUNCTION__);
- sock->state = SS_UNCONNECTED;
- sk->state = TCP_CLOSE;
+ sock->state = SS_UNCONNECTED;
+ sk->sk_state = TCP_CLOSE;
goto out;
}
- rc = llc_ui_wait_for_conn(sk, sk->rcvtimeo);
+ rc = llc_ui_wait_for_conn(sk, sk->sk_rcvtimeo);
if (rc)
dprintk("%s: llc_ui_wait_for_conn failed=%d\n", __FUNCTION__, rc);
out:
@@ -463,20 +463,20 @@
if (sock->state != SS_UNCONNECTED)
goto out;
rc = -EOPNOTSUPP;
- if (sk->type != SOCK_STREAM)
+ if (sk->sk_type != SOCK_STREAM)
goto out;
rc = -EAGAIN;
- if (sk->zapped)
+ if (sk->sk_zapped)
goto out;
rc = 0;
if (!(unsigned)backlog) /* BSDism */
backlog = 1;
- sk->max_ack_backlog = backlog;
- if (sk->state != TCP_LISTEN) {
- sk->ack_backlog = 0;
- sk->state = TCP_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ if (sk->sk_state != TCP_LISTEN) {
+ sk->sk_ack_backlog = 0;
+ sk->sk_state = TCP_LISTEN;
}
- sk->socket->flags |= __SO_ACCEPTCON;
+ sk->sk_socket->flags |= __SO_ACCEPTCON;
out:
release_sock(sk);
return rc;
@@ -487,7 +487,7 @@
DECLARE_WAITQUEUE(wait, current);
int rc;
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
rc = -ERESTARTSYS;
@@ -497,7 +497,7 @@
if (!timeout)
break;
rc = 0;
- if (sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
@@ -505,7 +505,7 @@
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return rc;
}
@@ -514,11 +514,11 @@
DECLARE_WAITQUEUE(wait, current);
int rc;
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
rc = -EAGAIN;
- if (sk->state == TCP_CLOSE)
+ if (sk->sk_state == TCP_CLOSE)
break;
rc = -ERESTARTSYS;
if (signal_pending(current))
@@ -527,7 +527,7 @@
if (!timeout)
break;
rc = 0;
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
@@ -535,7 +535,7 @@
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return rc;
}
@@ -544,10 +544,10 @@
DECLARE_WAITQUEUE(wait, current);
int rc = 0;
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
rc = -ERESTARTSYS;
if (signal_pending(current))
@@ -558,12 +558,12 @@
/*
* Well, if we have backlog, try to process it now.
*/
- if (sk->backlog.tail) {
+ if (sk->sk_backlog.tail) {
release_sock(sk);
lock_sock(sk);
}
rc = 0;
- if (skb_queue_empty(&sk->receive_queue)) {
+ if (skb_queue_empty(&sk->sk_receive_queue)) {
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
@@ -571,7 +571,7 @@
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return rc;
}
@@ -581,12 +581,12 @@
struct llc_opt *llc = llc_sk(sk);
int rc;
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
for (;;) {
dprintk("%s: looping...\n", __FUNCTION__);
__set_current_state(TASK_INTERRUPTIBLE);
rc = -ENOTCONN;
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
rc = -ERESTARTSYS;
if (signal_pending(current))
@@ -603,7 +603,7 @@
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return rc;
}
@@ -626,18 +626,18 @@
dprintk("%s: accepting on %02X\n", __FUNCTION__,
llc_sk(sk)->addr.sllc_ssap);
lock_sock(sk);
- if (sk->type != SOCK_STREAM)
+ if (sk->sk_type != SOCK_STREAM)
goto out;
rc = -EINVAL;
- if (sock->state != SS_UNCONNECTED || sk->state != TCP_LISTEN)
+ if (sock->state != SS_UNCONNECTED || sk->sk_state != TCP_LISTEN)
goto out;
/* wait for a connection to arrive. */
- rc = llc_ui_wait_for_data(sk, sk->rcvtimeo);
+ rc = llc_ui_wait_for_data(sk, sk->sk_rcvtimeo);
if (rc)
goto out;
dprintk("%s: got a new connection on %02X\n", __FUNCTION__,
llc_sk(sk)->addr.sllc_ssap);
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
rc = -EINVAL;
if (!skb->sk)
goto frees;
@@ -645,9 +645,9 @@
newsk = skb->sk;
/* attach connection to a new socket. */
llc_ui_sk_init(newsock, newsk);
- newsk->pair = NULL;
- newsk->zapped = 0;
- newsk->state = TCP_ESTABLISHED;
+ newsk->sk_pair = NULL;
+ newsk->sk_zapped = 0;
+ newsk->sk_state = TCP_ESTABLISHED;
newsock->state = SS_CONNECTED;
llc = llc_sk(sk);
newllc = llc_sk(newsk);
@@ -657,8 +657,8 @@
newllc->link = llc_ui_next_link_no(newllc->laddr.lsap);
/* put original socket back into a clean listen state. */
- sk->state = TCP_LISTEN;
- sk->ack_backlog--;
+ sk->sk_state = TCP_LISTEN;
+ sk->sk_ack_backlog--;
skb->sk = NULL;
dprintk("%s: ok success on %02X, client on %02X\n", __FUNCTION__,
llc_sk(sk)->addr.sllc_ssap, newllc->addr.sllc_dsap);
@@ -699,7 +699,7 @@
llc_sk(sk)->laddr.lsap, llc_sk(sk)->daddr.lsap);
goto out;
}
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (!skb) /* shutdown */
goto out;
copied = skb->len;
@@ -710,7 +710,7 @@
goto dgram_free;
if (skb->len > copied) {
skb_pull(skb, copied);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
}
if (uaddr)
memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
@@ -757,7 +757,7 @@
addr = &llc->addr;
}
/* must bind connection to sap if user hasn't done it. */
- if (sk->zapped) {
+ if (sk->sk_zapped) {
/* bind to sap with null dev, exclusive. */
rc = llc_ui_autobind(sock, addr);
if (rc)
@@ -789,7 +789,7 @@
rc = memcpy_fromiovec(skb_put(skb, copied), msg->msg_iov, copied);
if (rc)
goto out;
- if (sk->type == SOCK_DGRAM || addr->sllc_ua) {
+ if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) {
llc_build_and_send_ui_pkt(llc->sap, skb, addr->sllc_dmac,
addr->sllc_dsap);
goto out;
@@ -805,7 +805,7 @@
goto out;
}
rc = -ENOPROTOOPT;
- if (!(sk->type == SOCK_STREAM && !addr->sllc_ua))
+ if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua))
goto out;
rc = llc_ui_send_data(sk, skb, noblock);
if (rc)
@@ -839,13 +839,13 @@
int rc = 0;
lock_sock(sk);
- if (sk->zapped)
+ if (sk->sk_zapped)
goto out;
*uaddrlen = sizeof(sllc);
memset(uaddr, 0, *uaddrlen);
if (peer) {
rc = -ENOTCONN;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
if(llc->dev)
sllc.sllc_arphrd = llc->dev->type;
diff -urN linux-2.5.70-bk11/net/llc/llc_c_ac.c linux-2.5.70-bk12/net/llc/llc_c_ac.c
--- linux-2.5.70-bk11/net/llc/llc_c_ac.c 2003-05-26 18:01:00.000000000 -0700
+++ linux-2.5.70-bk12/net/llc/llc_c_ac.c 2003-06-07 04:47:53.000000000 -0700
@@ -783,7 +783,7 @@
llc_sk(sk)->p_flag = value;
if (state_changed)
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb)
diff -urN linux-2.5.70-bk11/net/llc/llc_conn.c linux-2.5.70-bk12/net/llc/llc_conn.c
--- linux-2.5.70-bk11/net/llc/llc_conn.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/llc/llc_conn.c 2003-06-07 04:47:53.000000000 -0700
@@ -44,7 +44,7 @@
struct sockaddr_llc *addr = llc_ui_skb_cb(skb);
/* save primitive for use by the user. */
- addr->sllc_family = skb->sk->family;
+ addr->sllc_family = skb->sk->sk_family;
addr->sllc_arphrd = skb->dev->type;
addr->sllc_test = prim == LLC_TEST_PRIM;
addr->sllc_xid = prim == LLC_XID_PRIM;
@@ -110,18 +110,19 @@
struct sock *parent = skb->sk;
skb->sk = sk;
- skb_queue_tail(&parent->receive_queue, skb);
- sk->state_change(parent);
+ skb_queue_tail(&parent->sk_receive_queue, skb);
+ sk->sk_state_change(parent);
}
break;
case LLC_DISC_PRIM:
sock_hold(sk);
- if (sk->type == SOCK_STREAM && sk->state == TCP_ESTABLISHED) {
- sk->shutdown = SHUTDOWN_MASK;
- sk->socket->state = SS_UNCONNECTED;
- sk->state = TCP_CLOSE;
+ if (sk->sk_type == SOCK_STREAM &&
+ sk->sk_state == TCP_ESTABLISHED) {
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ sk->sk_socket->state = SS_UNCONNECTED;
+ sk->sk_state = TCP_CLOSE;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
@@ -149,28 +150,29 @@
switch (ev->cfm_prim) {
case LLC_DATA_PRIM:
if (!llc_data_accept_state(llc->state))
- sk->write_space(sk);
+ sk->sk_write_space(sk);
else
rc = llc->failed_data_req = 1;
break;
case LLC_CONN_PRIM:
- if (sk->type == SOCK_STREAM && sk->state == TCP_SYN_SENT) {
+ if (sk->sk_type == SOCK_STREAM &&
+ sk->sk_state == TCP_SYN_SENT) {
if (ev->status) {
- sk->socket->state = SS_UNCONNECTED;
- sk->state = TCP_CLOSE;
+ sk->sk_socket->state = SS_UNCONNECTED;
+ sk->sk_state = TCP_CLOSE;
} else {
- sk->socket->state = SS_CONNECTED;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_socket->state = SS_CONNECTED;
+ sk->sk_state = TCP_ESTABLISHED;
}
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
break;
case LLC_DISC_PRIM:
sock_hold(sk);
- if (sk->type == SOCK_STREAM && sk->state == TCP_CLOSING) {
- sk->socket->state = SS_UNCONNECTED;
- sk->state = TCP_CLOSE;
- sk->state_change(sk);
+ if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSING) {
+ sk->sk_socket->state = SS_UNCONNECTED;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_state_change(sk);
}
sock_put(sk);
break;
@@ -199,7 +201,7 @@
void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
{
/* queue PDU to send to MAC layer */
- skb_queue_tail(&sk->write_queue, skb);
+ skb_queue_tail(&sk->sk_write_queue, skb);
llc_conn_send_pdus(sk);
}
@@ -250,7 +252,7 @@
pdu = llc_pdu_sn_hdr(skb);
llc_pdu_set_cmd_rsp(skb, LLC_PDU_CMD);
llc_pdu_set_pf_bit(skb, first_p_bit);
- skb_queue_tail(&sk->write_queue, skb);
+ skb_queue_tail(&sk->sk_write_queue, skb);
first_p_bit = 0;
llc->vS = LLC_I_GET_NS(pdu);
howmany_resend++;
@@ -291,7 +293,7 @@
llc_pdu_set_cmd_rsp(skb, LLC_PDU_RSP);
llc_pdu_set_pf_bit(skb, first_f_bit);
- skb_queue_tail(&sk->write_queue, skb);
+ skb_queue_tail(&sk->sk_write_queue, skb);
first_f_bit = 0;
llc->vS = LLC_I_GET_NS(pdu);
howmany_resend++;
@@ -351,7 +353,7 @@
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(&sk->write_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
if (!LLC_PDU_TYPE_IS_I(pdu) &&
@@ -391,7 +393,7 @@
if (!rc && trans->next_state != NO_STATE_CHANGE) {
llc->state = trans->next_state;
if (!llc_data_accept_state(llc->state))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
}
out:
@@ -489,7 +491,7 @@
struct sock *rc;
read_lock_bh(&sap->sk_list.lock);
- for (rc = sap->sk_list.list; rc; rc = rc->next) {
+ for (rc = sap->sk_list.list; rc; rc = rc->sk_next) {
struct llc_opt *llc = llc_sk(rc);
if (llc->laddr.lsap == laddr->lsap &&
@@ -518,10 +520,10 @@
struct sock *rc;
read_lock_bh(&sap->sk_list.lock);
- for (rc = sap->sk_list.list; rc; rc = rc->next) {
+ for (rc = sap->sk_list.list; rc; rc = rc->sk_next) {
struct llc_opt *llc = llc_sk(rc);
- if (rc->type == SOCK_STREAM && rc->state == TCP_LISTEN &&
+ if (rc->sk_type == SOCK_STREAM && rc->sk_state == TCP_LISTEN &&
llc->laddr.lsap == laddr->lsap &&
llc_mac_match(llc->laddr.mac, laddr->mac))
break;
@@ -545,10 +547,10 @@
struct sock *rc;
read_lock_bh(&sap->sk_list.lock);
- for (rc = sap->sk_list.list; rc; rc = rc->next) {
+ for (rc = sap->sk_list.list; rc; rc = rc->sk_next) {
struct llc_opt *llc = llc_sk(rc);
- if (rc->type == SOCK_DGRAM &&
+ if (rc->sk_type == SOCK_DGRAM &&
llc->laddr.lsap == laddr->lsap &&
llc_mac_match(llc->laddr.mac, laddr->mac))
break;
diff -urN linux-2.5.70-bk11/net/llc/llc_if.c linux-2.5.70-bk12/net/llc/llc_if.c
--- linux-2.5.70-bk11/net/llc/llc_if.c 2003-05-26 18:00:40.000000000 -0700
+++ linux-2.5.70-bk12/net/llc/llc_if.c 2003-06-07 04:47:53.000000000 -0700
@@ -223,7 +223,7 @@
memcpy(laddr.mac, lmac, sizeof(laddr.mac));
existing = llc_lookup_established(llc->sap, &daddr, &laddr);
if (existing) {
- if (existing->state == TCP_ESTABLISHED) {
+ if (existing->sk_state == TCP_ESTABLISHED) {
sk = existing;
goto out_put;
} else
@@ -261,7 +261,7 @@
struct sk_buff *skb;
sock_hold(sk);
- if (sk->type != SOCK_STREAM || sk->state != TCP_ESTABLISHED ||
+ if (sk->sk_type != SOCK_STREAM || sk->sk_state != TCP_ESTABLISHED ||
llc_sk(sk)->state == LLC_CONN_STATE_ADM ||
llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC)
goto out;
@@ -272,7 +272,7 @@
skb = alloc_skb(0, GFP_ATOMIC);
if (!skb)
goto out;
- sk->state = TCP_CLOSING;
+ sk->sk_state = TCP_CLOSING;
ev = llc_conn_ev(skb);
ev->type = LLC_CONN_EV_TYPE_PRIM;
ev->prim = LLC_DISC_PRIM;
diff -urN linux-2.5.70-bk11/net/llc/llc_mac.c linux-2.5.70-bk12/net/llc/llc_mac.c
--- linux-2.5.70-bk11/net/llc/llc_mac.c 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/net/llc/llc_mac.c 2003-06-07 04:47:53.000000000 -0700
@@ -126,7 +126,7 @@
goto drop;
}
- sk = llc_sk_alloc(parent->family, GFP_ATOMIC);
+ sk = llc_sk_alloc(parent->sk_family, GFP_ATOMIC);
if (!sk) {
sock_put(parent);
goto drop;
diff -urN linux-2.5.70-bk11/net/llc/llc_main.c linux-2.5.70-bk12/net/llc/llc_main.c
--- linux-2.5.70-bk11/net/llc/llc_main.c 2003-05-26 18:00:38.000000000 -0700
+++ linux-2.5.70-bk12/net/llc/llc_main.c 2003-06-07 04:47:53.000000000 -0700
@@ -206,7 +206,7 @@
llc->rw = 128; /* rx win size (opt and equal to
* tx_win of remote LLC) */
skb_queue_head_init(&llc->pdu_unack_q);
- sk->backlog_rcv = llc_backlog_rcv;
+ sk->sk_backlog_rcv = llc_backlog_rcv;
llc_sk(sk) = llc;
out:
return rc;
@@ -258,15 +258,15 @@
#ifdef DEBUG_LLC_CONN_ALLOC
printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __FUNCTION__,
skb_queue_len(&llc->pdu_unack_q),
- skb_queue_len(&sk->write_queue));
+ skb_queue_len(&sk->sk_write_queue));
#endif
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&llc->pdu_unack_q);
#ifdef LLC_REFCNT_DEBUG
- if (atomic_read(&sk->refcnt) != 1) {
+ if (atomic_read(&sk->sk_refcnt) != 1) {
printk(KERN_DEBUG "Destruction of LLC sock %p delayed in %s, cnt=%d\n",
- sk, __FUNCTION__, atomic_read(&sk->refcnt));
+ sk, __FUNCTION__, atomic_read(&sk->sk_refcnt));
printk(KERN_DEBUG "%d LLC sockets are still alive\n",
atomic_read(&llc_sock_nr));
} else {
@@ -290,7 +290,7 @@
struct llc_opt *llc = llc_sk(sk);
llc_conn_ac_stop_all_timers(sk, NULL);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&llc->pdu_unack_q);
llc->remote_busy_flag = 0;
llc->cause_flag = 0;
@@ -323,7 +323,7 @@
write_lock_bh(&sap->sk_list.lock);
- for (sk = sap->sk_list.list; sk; sk = sk->next) {
+ for (sk = sap->sk_list.list; sk; sk = sk->sk_next) {
llc_sk(sk)->state = LLC_CONN_STATE_TEMP;
if (llc_send_disc(sk))
diff -urN linux-2.5.70-bk11/net/llc/llc_proc.c linux-2.5.70-bk12/net/llc/llc_proc.c
--- linux-2.5.70-bk11/net/llc/llc_proc.c 2003-05-26 18:00:22.000000000 -0700
+++ linux-2.5.70-bk12/net/llc/llc_proc.c 2003-06-07 04:47:53.000000000 -0700
@@ -44,7 +44,7 @@
sap = list_entry(sap_entry, struct llc_sap, node);
read_lock_bh(&sap->sk_list.lock);
- for (sk = sap->sk_list.list; sk; sk = sk->next)
+ for (sk = sap->sk_list.list; sk; sk = sk->sk_next)
if (!pos--) {
if (!sk)
read_unlock_bh(&sap->sk_list.lock);
@@ -76,8 +76,8 @@
goto out;
}
sk = v;
- if (sk->next) {
- sk = sk->next;
+ if (sk->sk_next) {
+ sk = sk->sk_next;
goto out;
}
llc = llc_sk(sk);
@@ -124,7 +124,7 @@
sk = v;
llc = llc_sk(sk);
- seq_printf(seq, "%2X %2X ", sk->type,
+ seq_printf(seq, "%2X %2X ", sk->sk_type,
!llc_mac_null(llc->addr.sllc_mmac));
if (llc->dev && llc_mac_null(llc->addr.sllc_mmac))
@@ -136,8 +136,10 @@
seq_printf(seq, "@%02X ", llc->sap->laddr.lsap);
llc_ui_format_mac(seq, llc->addr.sllc_dmac);
seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->addr.sllc_dsap,
- atomic_read(&sk->wmem_alloc), atomic_read(&sk->rmem_alloc),
- sk->state, sk->socket ? SOCK_INODE(sk->socket)->i_uid : -1,
+ atomic_read(&sk->sk_wmem_alloc),
+ atomic_read(&sk->sk_rmem_alloc),
+ sk->sk_state,
+ sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1,
llc->link);
out:
return 0;
@@ -181,7 +183,7 @@
timer_pending(&llc->pf_cycle_timer.timer),
timer_pending(&llc->rej_sent_timer.timer),
timer_pending(&llc->busy_state_timer.timer),
- !!sk->backlog.tail, sock_owned_by_user(sk));
+ !!sk->sk_backlog.tail, sock_owned_by_user(sk));
out:
return 0;
}
diff -urN linux-2.5.70-bk11/net/llc/llc_sap.c linux-2.5.70-bk12/net/llc/llc_sap.c
--- linux-2.5.70-bk11/net/llc/llc_sap.c 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/net/llc/llc_sap.c 2003-06-07 04:47:53.000000000 -0700
@@ -34,11 +34,11 @@
{
write_lock_bh(&sap->sk_list.lock);
llc_sk(sk)->sap = sap;
- sk->next = sap->sk_list.list;
- if (sk->next)
- sap->sk_list.list->pprev = &sk->next;
+ sk->sk_next = sap->sk_list.list;
+ if (sk->sk_next)
+ sap->sk_list.list->sk_pprev = &sk->sk_next;
sap->sk_list.list = sk;
- sk->pprev = &sap->sk_list.list;
+ sk->sk_pprev = &sap->sk_list.list;
sock_hold(sk);
write_unlock_bh(&sap->sk_list.lock);
}
@@ -53,14 +53,14 @@
void llc_sap_unassign_sock(struct llc_sap *sap, struct sock *sk)
{
write_lock_bh(&sap->sk_list.lock);
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
/*
* This only makes sense if the socket was inserted on the
- * list, if sk->pprev is NULL it wasn't
+ * list, if sk->sk_pprev is NULL it wasn't
*/
sock_put(sk);
}
@@ -195,7 +195,7 @@
ev->ind_cfm_flag = 0;
llc_sap_next_state(sap, skb);
if (ev->ind_cfm_flag == LLC_IND) {
- if (skb->sk->state == TCP_LISTEN)
+ if (skb->sk->sk_state == TCP_LISTEN)
kfree_skb(skb);
else {
llc_save_primitive(skb, ev->prim);
diff -urN linux-2.5.70-bk11/net/netlink/af_netlink.c linux-2.5.70-bk12/net/netlink/af_netlink.c
--- linux-2.5.70-bk11/net/netlink/af_netlink.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/netlink/af_netlink.c 2003-06-07 04:47:53.000000000 -0700
@@ -66,7 +66,7 @@
void (*data_ready)(struct sock *sk, int bytes);
};
-#define nlk_sk(__sk) ((struct netlink_opt *)(__sk)->protinfo)
+#define nlk_sk(__sk) ((struct netlink_opt *)(__sk)->sk_protinfo)
static struct sock *nl_table[MAX_LINKS];
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
@@ -88,14 +88,14 @@
static void netlink_sock_destruct(struct sock *sk)
{
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
if (!sock_flag(sk, SOCK_DEAD)) {
printk("Freeing alive netlink socket %p\n", sk);
return;
}
- BUG_TRAP(atomic_read(&sk->rmem_alloc)==0);
- BUG_TRAP(atomic_read(&sk->wmem_alloc)==0);
+ BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
+ BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
BUG_TRAP(!nlk_sk(sk)->cb);
kfree(nlk_sk(sk));
@@ -162,7 +162,7 @@
struct sock *sk;
read_lock(&nl_table_lock);
- for (sk=nl_table[protocol]; sk; sk=sk->next) {
+ for (sk = nl_table[protocol]; sk; sk = sk->sk_next) {
if (nlk_sk(sk)->pid == pid) {
sock_hold(sk);
read_unlock(&nl_table_lock);
@@ -182,7 +182,7 @@
struct sock *osk;
netlink_table_grab();
- for (osk=nl_table[sk->protocol]; osk; osk=osk->next) {
+ for (osk = nl_table[sk->sk_protocol]; osk; osk = osk->sk_next) {
if (nlk_sk(osk)->pid == pid)
break;
}
@@ -190,8 +190,8 @@
err = -EBUSY;
if (nlk_sk(sk)->pid == 0) {
nlk_sk(sk)->pid = pid;
- sk->next = nl_table[sk->protocol];
- nl_table[sk->protocol] = sk;
+ sk->sk_next = nl_table[sk->sk_protocol];
+ nl_table[sk->sk_protocol] = sk;
sock_hold(sk);
err = 0;
}
@@ -205,9 +205,9 @@
struct sock **skp;
netlink_table_grab();
- for (skp = &nl_table[sk->protocol]; *skp; skp = &((*skp)->next)) {
+ for (skp = &nl_table[sk->sk_protocol]; *skp; skp = &((*skp)->sk_next)) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
@@ -246,10 +246,10 @@
spin_lock_init(&nlk->cb_lock);
init_waitqueue_head(&nlk->wait);
- sk->destruct = netlink_sock_destruct;
+ sk->sk_destruct = netlink_sock_destruct;
atomic_inc(&netlink_sock_nr);
- sk->protocol=protocol;
+ sk->sk_protocol = protocol;
return 0;
}
@@ -280,11 +280,13 @@
sock->sk = NULL;
wake_up_interruptible_all(&nlk->wait);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_write_queue);
if (nlk->pid && !nlk->groups) {
- struct netlink_notify n = { .protocol = sk->protocol,
- .pid = nlk->pid };
+ struct netlink_notify n = {
+ .protocol = sk->sk_protocol,
+ .pid = nlk->pid,
+ };
notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
}
@@ -301,7 +303,7 @@
retry:
netlink_table_grab();
- for (osk=nl_table[sk->protocol]; osk; osk=osk->next) {
+ for (osk = nl_table[sk->sk_protocol]; osk; osk = osk->sk_next) {
if (nlk_sk(osk)->pid == pid) {
/* Bind collision, search negative pid values. */
if (pid > 0)
@@ -322,7 +324,8 @@
static inline int netlink_capable(struct socket *sock, unsigned flag)
{
- return (nl_nonroot[sock->sk->protocol] & flag) || capable(CAP_NET_ADMIN);
+ return (nl_nonroot[sock->sk->sk_protocol] & flag) ||
+ capable(CAP_NET_ADMIN);
}
static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
@@ -412,8 +415,8 @@
static void netlink_overrun(struct sock *sk)
{
if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
- sk->err = ENOBUFS;
- sk->error_report(sk);
+ sk->sk_err = ENOBUFS;
+ sk->sk_error_report(sk);
}
}
@@ -422,7 +425,7 @@
struct sock *sk;
struct netlink_opt *nlk;
int len = skb->len;
- int protocol = ssk->protocol;
+ int protocol = ssk->sk_protocol;
long timeo;
DECLARE_WAITQUEUE(wait, current);
@@ -443,7 +446,7 @@
}
#endif
- if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(0, &nlk->state)) {
if (!timeo) {
if (!nlk->pid)
@@ -456,7 +459,7 @@
__set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&nlk->wait, &wait);
- if ((atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
+ if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(0, &nlk->state)) &&
!sock_flag(sk, SOCK_DEAD))
timeo = schedule_timeout(timeo);
@@ -474,8 +477,8 @@
skb_orphan(skb);
skb_set_owner_r(skb, sk);
- skb_queue_tail(&sk->receive_queue, skb);
- sk->data_ready(sk, len);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, len);
sock_put(sk);
return len;
@@ -490,16 +493,16 @@
#ifdef NL_EMULATE_DEV
if (nlk->handler) {
skb_orphan(skb);
- nlk->handler(sk->protocol, skb);
+ nlk->handler(sk->sk_protocol, skb);
return 0;
} else
#endif
- if (atomic_read(&sk->rmem_alloc) <= sk->rcvbuf &&
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
!test_bit(0, &nlk->state)) {
skb_orphan(skb);
skb_set_owner_r(skb, sk);
- skb_queue_tail(&sk->receive_queue, skb);
- sk->data_ready(sk, skb->len);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
return 0;
}
return -1;
@@ -510,14 +513,14 @@
{
struct sock *sk;
struct sk_buff *skb2 = NULL;
- int protocol = ssk->protocol;
+ int protocol = ssk->sk_protocol;
int failure = 0, delivered = 0;
/* While we sleep in clone, do not allow to change socket list */
netlink_lock_table();
- for (sk = nl_table[protocol]; sk; sk = sk->next) {
+ for (sk = nl_table[protocol]; sk; sk = sk->sk_next) {
struct netlink_opt *nlk = nlk_sk(sk);
if (ssk == sk)
@@ -569,10 +572,10 @@
void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
{
struct sock *sk;
- int protocol = ssk->protocol;
+ int protocol = ssk->sk_protocol;
read_lock(&nl_table_lock);
- for (sk = nl_table[protocol]; sk; sk = sk->next) {
+ for (sk = nl_table[protocol]; sk; sk = sk->sk_next) {
struct netlink_opt *nlk = nlk_sk(sk);
if (ssk == sk)
continue;
@@ -580,8 +583,8 @@
if (nlk->pid == pid || !(nlk->groups & group))
continue;
- sk->err = code;
- sk->error_report(sk);
+ sk->sk_err = code;
+ sk->sk_error_report(sk);
}
read_unlock(&nl_table_lock);
}
@@ -590,7 +593,7 @@
{
struct netlink_opt *nlk = nlk_sk(sk);
- if (skb_queue_len(&sk->receive_queue) == 0)
+ if (!skb_queue_len(&sk->sk_receive_queue))
clear_bit(0, &nlk->state);
if (!test_bit(0, &nlk->state))
wake_up_interruptible(&nlk->wait);
@@ -637,7 +640,7 @@
}
err = -EMSGSIZE;
- if ((unsigned)len > sk->sndbuf-32)
+ if ((unsigned)len > sk->sk_sndbuf - 32)
goto out;
err = -ENOBUFS;
skb = alloc_skb(len, GFP_KERNEL);
@@ -726,7 +729,7 @@
siocb->scm->creds = *NETLINK_CREDS(skb);
skb_free_datagram(sk, skb);
- if (nlk->cb && atomic_read(&sk->rmem_alloc) <= sk->rcvbuf / 2)
+ if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
netlink_dump(sk);
scm_recv(sock, msg, siocb->scm, flags);
@@ -770,7 +773,7 @@
return NULL;
}
sk = sock->sk;
- sk->data_ready = netlink_data_ready;
+ sk->sk_data_ready = netlink_data_ready;
if (input)
nlk_sk(sk)->data_ready = input;
@@ -821,16 +824,16 @@
if (len > 0) {
spin_unlock(&nlk->cb_lock);
- skb_queue_tail(&sk->receive_queue, skb);
- sk->data_ready(sk, len);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, len);
return 0;
}
nlh = __nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLMSG_DONE, sizeof(int));
nlh->nlmsg_flags |= NLM_F_MULTI;
memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
- skb_queue_tail(&sk->receive_queue, skb);
- sk->data_ready(sk, skb->len);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
cb->done(cb);
nlk->cb = NULL;
@@ -861,7 +864,7 @@
atomic_inc(&skb->users);
cb->skb = skb;
- sk = netlink_lookup(ssk->protocol, NETLINK_CB(skb).pid);
+ sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
if (sk == NULL) {
netlink_destroy_callback(cb);
return -ECONNREFUSED;
@@ -922,7 +925,7 @@
return -ENOBUFS;
nlk_sk(sk)->handler = function;
write_lock_bh(&nl_emu_lock);
- netlink_kernel[unit] = sk->socket;
+ netlink_kernel[unit] = sk->sk_socket;
write_unlock_bh(&nl_emu_lock);
return 0;
}
@@ -978,18 +981,18 @@
for (i=0; inext) {
+ for (s = nl_table[i]; s; s = s->sk_next) {
struct netlink_opt *nlk = nlk_sk(s);
len+=sprintf(buffer+len,"%p %-3d %-6d %08x %-8d %-8d %p %d",
s,
- s->protocol,
+ s->sk_protocol,
nlk->pid,
nlk->groups,
- atomic_read(&s->rmem_alloc),
- atomic_read(&s->wmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
+ atomic_read(&s->sk_wmem_alloc),
nlk->cb,
- atomic_read(&s->refcnt)
+ atomic_read(&s->sk_refcnt)
);
buffer[len++]='\n';
diff -urN linux-2.5.70-bk11/net/netrom/af_netrom.c linux-2.5.70-bk12/net/netrom/af_netrom.c
--- linux-2.5.70-bk11/net/netrom/af_netrom.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/netrom/af_netrom.c 2003-06-07 04:47:53.000000000 -0700
@@ -94,19 +94,19 @@
spin_lock_bh(&nr_list_lock);
if ((s = nr_list) == sk) {
- nr_list = s->next;
+ nr_list = s->sk_next;
spin_unlock_bh(&nr_list_lock);
return;
}
- while (s != NULL && s->next != NULL) {
- if (s->next == sk) {
- s->next = sk->next;
+ while (s && s->sk_next) {
+ if (s->sk_next == sk) {
+ s->sk_next = sk->sk_next;
spin_unlock_bh(&nr_list_lock);
return;
}
- s = s->next;
+ s = s->sk_next;
}
spin_unlock_bh(&nr_list_lock);
@@ -120,7 +120,7 @@
struct sock *s;
spin_lock_bh(&nr_list_lock);
- for (s = nr_list; s != NULL; s = s->next) {
+ for (s = nr_list; s; s = s->sk_next) {
if (nr_sk(s)->device == dev)
nr_disconnect(s, ENETUNREACH);
}
@@ -149,7 +149,7 @@
static void nr_insert_socket(struct sock *sk)
{
spin_lock_bh(&nr_list_lock);
- sk->next = nr_list;
+ sk->sk_next = nr_list;
nr_list = sk;
spin_unlock_bh(&nr_list_lock);
}
@@ -163,9 +163,9 @@
struct sock *s;
spin_lock_bh(&nr_list_lock);
- for (s = nr_list; s != NULL; s = s->next) {
+ for (s = nr_list; s; s = s->sk_next) {
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
- s->state == TCP_LISTEN) {
+ s->sk_state == TCP_LISTEN) {
spin_unlock_bh(&nr_list_lock);
return s;
}
@@ -183,7 +183,7 @@
struct sock *s;
spin_lock_bh(&nr_list_lock);
- for (s = nr_list; s != NULL; s = s->next) {
+ for (s = nr_list; s; s = s->sk_next) {
nr_cb *nr = nr_sk(s);
if (nr->my_index == index && nr->my_id == id) {
@@ -205,7 +205,7 @@
struct sock *s;
spin_lock_bh(&nr_list_lock);
- for (s = nr_list; s != NULL; s = s->next) {
+ for (s = nr_list; s; s = s->sk_next) {
nr_cb *nr = nr_sk(s);
if (nr->your_index == index && nr->your_id == id &&
@@ -274,7 +274,7 @@
nr_clear_queues(sk); /* Flush the queues */
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/* Queue the unaccepted socket for death */
sock_set_flag(skb->sk, SOCK_DEAD);
@@ -285,13 +285,14 @@
kfree_skb(skb);
}
- if (atomic_read(&sk->wmem_alloc) != 0 || atomic_read(&sk->rmem_alloc) != 0) {
+ if (atomic_read(&sk->sk_wmem_alloc) ||
+ atomic_read(&sk->sk_rmem_alloc)) {
/* Defer: outstanding buffers */
- init_timer(&sk->timer);
- sk->timer.expires = jiffies + 10 * HZ;
- sk->timer.function = nr_destroy_timer;
- sk->timer.data = (unsigned long)sk;
- add_timer(&sk->timer);
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.expires = jiffies + 10 * HZ;
+ sk->sk_timer.function = nr_destroy_timer;
+ sk->sk_timer.data = (unsigned long)sk;
+ add_timer(&sk->sk_timer);
} else
sk_free(sk);
}
@@ -407,10 +408,10 @@
{
struct sock *sk = sock->sk;
- if (sk->state != TCP_LISTEN) {
+ if (sk->sk_state != TCP_LISTEN) {
memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN);
- sk->max_ack_backlog = backlog;
- sk->state = TCP_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
return 0;
}
@@ -433,7 +434,7 @@
sock_init_data(sock, sk);
sock->ops = &nr_proto_ops;
- sk->protocol = protocol;
+ sk->sk_protocol = protocol;
skb_queue_head_init(&nr->ack_queue);
skb_queue_head_init(&nr->reseq_queue);
@@ -462,7 +463,7 @@
struct sock *sk;
nr_cb *nr, *onr;
- if (osk->type != SOCK_SEQPACKET)
+ if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
if ((sk = nr_alloc_sock()) == NULL)
@@ -472,16 +473,16 @@
sock_init_data(NULL, sk);
- sk->type = osk->type;
- sk->socket = osk->socket;
- sk->priority = osk->priority;
- sk->protocol = osk->protocol;
- sk->rcvbuf = osk->rcvbuf;
- sk->sndbuf = osk->sndbuf;
- sk->debug = osk->debug;
- sk->state = TCP_ESTABLISHED;
- sk->sleep = osk->sleep;
- sk->zapped = osk->zapped;
+ sk->sk_type = osk->sk_type;
+ sk->sk_socket = osk->sk_socket;
+ sk->sk_priority = osk->sk_priority;
+ sk->sk_protocol = osk->sk_protocol;
+ sk->sk_rcvbuf = osk->sk_rcvbuf;
+ sk->sk_sndbuf = osk->sk_sndbuf;
+ sk->sk_debug = osk->sk_debug;
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_sleep = osk->sk_sleep;
+ sk->sk_zapped = osk->sk_zapped;
skb_queue_head_init(&nr->ack_queue);
skb_queue_head_init(&nr->reseq_queue);
@@ -533,16 +534,16 @@
nr_stop_t4timer(sk);
nr_stop_idletimer(sk);
nr->state = NR_STATE_2;
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY);
- sk->socket = NULL;
+ sk->sk_socket = NULL;
break;
default:
- sk->socket = NULL;
+ sk->sk_socket = NULL;
break;
}
@@ -559,7 +560,7 @@
struct net_device *dev;
ax25_address *user, *source;
- if (sk->zapped == 0)
+ if (!sk->sk_zapped)
return -EINVAL;
if (addr_len < sizeof(struct sockaddr_ax25) || addr_len > sizeof(struct
@@ -601,7 +602,7 @@
nr->device = dev;
nr_insert_socket(sk);
- sk->zapped = 0;
+ sk->sk_zapped = 0;
SOCK_DEBUG(sk, "NET/ROM: socket is bound\n");
return 0;
}
@@ -615,20 +616,20 @@
ax25_address *user, *source = NULL;
struct net_device *dev;
- if (sk->state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
return 0; /* Connect completed during a ERESTARTSYS event */
}
- if (sk->state == TCP_CLOSE && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
return -ECONNREFUSED;
}
- if (sk->state == TCP_ESTABLISHED)
+ if (sk->sk_state == TCP_ESTABLISHED)
return -EISCONN; /* No reconnect on a seqpacket socket */
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25))
@@ -637,8 +638,8 @@
if (addr->sax25_family != AF_NETROM)
return -EINVAL;
- if (sk->zapped) { /* Must bind first - autobinding in this may or may not work */
- sk->zapped = 0;
+ if (sk->sk_zapped) { /* Must bind first - autobinding in this may or may not work */
+ sk->sk_zapped = 0;
if ((dev = nr_dev_first()) == NULL)
return -ENETUNREACH;
@@ -668,8 +669,8 @@
circuit++;
/* Move to connecting socket, start sending Connect Requests */
- sock->state = SS_CONNECTING;
- sk->state = TCP_SYN_SENT;
+ sock->state = SS_CONNECTING;
+ sk->sk_state = TCP_SYN_SENT;
nr_establish_data_link(sk);
@@ -678,21 +679,21 @@
nr_start_heartbeat(sk);
/* Now the loop */
- if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
+ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
return -EINPROGRESS;
/*
* A Connect Ack with Choke or timeout or failed routing will go to
* closed.
*/
- if (sk->state == TCP_SYN_SENT) {
+ if (sk->sk_state == TCP_SYN_SENT) {
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (sk->state != TCP_SYN_SENT)
+ if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(tsk)) {
schedule();
@@ -701,10 +702,10 @@
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
}
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
return sock_error(sk); /* Always set at this point */
}
@@ -727,12 +728,12 @@
return -EINVAL;
lock_sock(sk);
- if (sk->type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out;
}
- if (sk->state != TCP_LISTEN) {
+ if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out;
}
@@ -741,9 +742,9 @@
* The write queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
@@ -759,16 +760,16 @@
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
newsk = skb->sk;
- newsk->pair = NULL;
- newsk->socket = newsock;
- newsk->sleep = &newsock->wait;
+ newsk->sk_pair = NULL;
+ newsk->sk_socket = newsock;
+ newsk->sk_sleep = &newsock->wait;
/* Now attach up the new socket */
kfree_skb(skb);
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
newsock->sk = newsk;
out:
@@ -783,7 +784,7 @@
nr_cb *nr = nr_sk(sk);
if (peer != 0) {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
sax->fsa_ax25.sax25_family = AF_NETROM;
sax->fsa_ax25.sax25_ndigis = 1;
@@ -893,7 +894,8 @@
user = (ax25_address *)(skb->data + 21);
- if (sk == NULL || sk->ack_backlog == sk->max_ack_backlog || (make = nr_make_new(sk)) == NULL) {
+ if (!sk || sk->sk_ack_backlog == sk->sk_max_ack_backlog ||
+ (make = nr_make_new(sk)) == NULL) {
nr_transmit_refusal(skb, 0);
return 0;
}
@@ -901,7 +903,7 @@
window = skb->data[20];
skb->sk = make;
- make->state = TCP_ESTABLISHED;
+ make->sk_state = TCP_ESTABLISHED;
/* Fill in his circuit details */
nr_make = nr_sk(make);
@@ -941,18 +943,18 @@
nr_make->vr = 0;
nr_make->vl = 0;
nr_make->state = NR_STATE_3;
- sk->ack_backlog++;
- make->pair = sk;
+ sk->sk_ack_backlog++;
+ make->sk_pair = sk;
nr_insert_socket(make);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
nr_start_heartbeat(make);
nr_start_idletimer(make);
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk, skb->len);
+ sk->sk_data_ready(sk, skb->len);
return 1;
}
@@ -972,10 +974,10 @@
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR))
return -EINVAL;
- if (sk->zapped)
+ if (sk->sk_zapped)
return -EADDRNOTAVAIL;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
@@ -992,7 +994,7 @@
if (sax.sax25_family != AF_NETROM)
return -EINVAL;
} else {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
sax.sax25_family = AF_NETROM;
sax.sax25_call = nr->dest_addr;
@@ -1038,7 +1040,7 @@
memcpy_fromiovec(asmptr, msg->msg_iov, len);
SOCK_DEBUG(sk, "NET/ROM: Transmitting buffer\n");
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
return -ENOTCONN;
}
@@ -1062,7 +1064,7 @@
* us! We do one quick check first though
*/
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Now we can treat all alike */
@@ -1099,7 +1101,7 @@
switch (cmd) {
case TIOCOUTQ: {
long amount;
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
return put_user(amount, (int *)arg);
@@ -1109,16 +1111,16 @@
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
- if ((skb = skb_peek(&sk->receive_queue)) != NULL)
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
return put_user(amount, (int *)arg);
}
case SIOCGSTAMP:
if (sk != NULL) {
- if (sk->stamp.tv_sec == 0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
- return copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)) ? -EFAULT : 0;
+ return copy_to_user((void *)arg, &sk->sk_stamp, sizeof(struct timeval)) ? -EFAULT : 0;
}
return -EINVAL;
@@ -1160,7 +1162,7 @@
len += sprintf(buffer, "user_addr dest_node src_node dev my your st vs vr va t1 t2 t4 idle n2 wnd Snd-Q Rcv-Q inode\n");
- for (s = nr_list; s != NULL; s = s->next) {
+ for (s = nr_list; s; s = s->sk_next) {
nr_cb *nr = nr_sk(s);
if ((dev = nr->device) == NULL)
@@ -1194,9 +1196,9 @@
nr->n2count,
nr->n2,
nr->window,
- atomic_read(&s->wmem_alloc),
- atomic_read(&s->rmem_alloc),
- s->socket != NULL ? SOCK_INODE(s->socket)->i_ino : 0L);
+ atomic_read(&s->sk_wmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
+ s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
pos = begin + len;
diff -urN linux-2.5.70-bk11/net/netrom/nr_in.c linux-2.5.70-bk12/net/netrom/nr_in.c
--- linux-2.5.70-bk11/net/netrom/nr_in.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/netrom/nr_in.c 2003-06-07 04:47:53.000000000 -0700
@@ -89,9 +89,9 @@
nr->state = NR_STATE_3;
nr->n2count = 0;
nr->window = skb->data[20];
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
break;
}
diff -urN linux-2.5.70-bk11/net/netrom/nr_out.c linux-2.5.70-bk12/net/netrom/nr_out.c
--- linux-2.5.70-bk11/net/netrom/nr_out.c 2003-05-26 18:00:40.000000000 -0700
+++ linux-2.5.70-bk12/net/netrom/nr_out.c 2003-06-07 04:47:53.000000000 -0700
@@ -65,12 +65,12 @@
if (skb->len > 0)
skbn->data[4] |= NR_MORE_FLAG;
- skb_queue_tail(&sk->write_queue, skbn); /* Throw it on the queue */
+ skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
}
kfree_skb(skb);
} else {
- skb_queue_tail(&sk->write_queue, skb); /* Throw it on the queue */
+ skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
}
nr_kick(sk);
@@ -135,7 +135,7 @@
if (nr->condition & NR_COND_PEER_RX_BUSY)
return;
- if (skb_peek(&sk->write_queue) == NULL)
+ if (!skb_peek(&sk->sk_write_queue))
return;
start = (skb_peek(&nr->ack_queue) == NULL) ? nr->va : nr->vs;
@@ -154,11 +154,11 @@
/*
* Dequeue the frame and copy it.
*/
- skb = skb_dequeue(&sk->write_queue);
+ skb = skb_dequeue(&sk->sk_write_queue);
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
- skb_queue_head(&sk->write_queue, skb);
+ skb_queue_head(&sk->sk_write_queue, skb);
break;
}
@@ -176,7 +176,8 @@
*/
skb_queue_tail(&nr->ack_queue, skb);
- } while (nr->vs != end && (skb = skb_dequeue(&sk->write_queue)) != NULL);
+ } while (nr->vs != end &&
+ (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
nr->vl = nr->vr;
nr->condition &= ~NR_COND_ACK_PENDING;
diff -urN linux-2.5.70-bk11/net/netrom/nr_subr.c linux-2.5.70-bk12/net/netrom/nr_subr.c
--- linux-2.5.70-bk11/net/netrom/nr_subr.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/netrom/nr_subr.c 2003-06-07 04:47:53.000000000 -0700
@@ -36,7 +36,7 @@
{
nr_cb *nr = nr_sk(sk);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&nr->ack_queue);
skb_queue_purge(&nr->reseq_queue);
skb_queue_purge(&nr->frag_queue);
@@ -75,7 +75,7 @@
while ((skb = skb_dequeue(&nr_sk(sk)->ack_queue)) != NULL) {
if (skb_prev == NULL)
- skb_queue_head(&sk->write_queue, skb);
+ skb_queue_head(&sk->sk_write_queue, skb);
else
skb_append(skb_prev, skb);
skb_prev = skb;
@@ -272,12 +272,12 @@
nr_sk(sk)->state = NR_STATE_0;
- sk->state = TCP_CLOSE;
- sk->err = reason;
- sk->shutdown |= SEND_SHUTDOWN;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = reason;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
diff -urN linux-2.5.70-bk11/net/netrom/nr_timer.c linux-2.5.70-bk12/net/netrom/nr_timer.c
--- linux-2.5.70-bk11/net/netrom/nr_timer.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/netrom/nr_timer.c 2003-06-07 04:47:53.000000000 -0700
@@ -92,13 +92,13 @@
void nr_start_heartbeat(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
- sk->timer.data = (unsigned long)sk;
- sk->timer.function = &nr_heartbeat_expiry;
- sk->timer.expires = jiffies + 5 * HZ;
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.function = &nr_heartbeat_expiry;
+ sk->sk_timer.expires = jiffies + 5 * HZ;
- add_timer(&sk->timer);
+ add_timer(&sk->sk_timer);
}
void nr_stop_t1timer(struct sock *sk)
@@ -123,7 +123,7 @@
void nr_stop_heartbeat(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
}
int nr_t1timer_running(struct sock *sk)
@@ -142,7 +142,7 @@
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
- (sk->state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
+ (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
nr_destroy_socket(sk);
return;
}
@@ -152,7 +152,7 @@
/*
* Check for the state of the receive buffer.
*/
- if (atomic_read(&sk->rmem_alloc) < (sk->rcvbuf / 2) &&
+ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
(nr->condition & NR_COND_OWN_RX_BUSY)) {
nr->condition &= ~NR_COND_OWN_RX_BUSY;
nr->condition &= ~NR_COND_ACK_PENDING;
@@ -206,12 +206,12 @@
nr_stop_t2timer(sk);
nr_stop_t4timer(sk);
- sk->state = TCP_CLOSE;
- sk->err = 0;
- sk->shutdown |= SEND_SHUTDOWN;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = 0;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
bh_unlock_sock(sk);
diff -urN linux-2.5.70-bk11/net/packet/af_packet.c linux-2.5.70-bk12/net/packet/af_packet.c
--- linux-2.5.70-bk11/net/packet/af_packet.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/packet/af_packet.c 2003-06-07 04:47:53.000000000 -0700
@@ -191,12 +191,12 @@
#endif
};
-#define pkt_sk(__sk) ((struct packet_opt *)(__sk)->protinfo)
+#define pkt_sk(__sk) ((struct packet_opt *)(__sk)->sk_protinfo)
void packet_sock_destruct(struct sock *sk)
{
- BUG_TRAP(atomic_read(&sk->rmem_alloc)==0);
- BUG_TRAP(atomic_read(&sk->wmem_alloc)==0);
+ BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
+ BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive packet socket: %p\n", sk);
@@ -356,7 +356,7 @@
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
skb->protocol = proto;
skb->dev = dev;
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
if (err)
goto out_free;
@@ -418,7 +418,7 @@
structure, so that corresponding packet head
never delivered to user.
*/
- if (sk->type != SOCK_DGRAM)
+ if (sk->sk_type != SOCK_DGRAM)
skb_push(skb, skb->data - skb->mac.raw);
else if (skb->pkt_type == PACKET_OUTGOING) {
/* Special case: outgoing packets have ll header at head */
@@ -428,13 +428,14 @@
snaplen = skb->len;
- if (sk->filter) {
+ if (sk->sk_filter) {
unsigned res = snaplen;
struct sk_filter *filter;
bh_lock_sock(sk);
- if ((filter = sk->filter) != NULL)
- res = sk_run_filter(skb, sk->filter->insns, sk->filter->len);
+ if ((filter = sk->sk_filter) != NULL)
+ res = sk_run_filter(skb, sk->sk_filter->insns,
+ sk->sk_filter->len);
bh_unlock_sock(sk);
if (res == 0)
@@ -443,7 +444,8 @@
snaplen = res;
}
- if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf)
goto drop_n_acct;
if (skb_shared(skb)) {
@@ -475,17 +477,17 @@
skb_set_owner_r(skb, sk);
skb->dev = NULL;
- spin_lock(&sk->receive_queue.lock);
+ spin_lock(&sk->sk_receive_queue.lock);
po->stats.tp_packets++;
- __skb_queue_tail(&sk->receive_queue, skb);
- spin_unlock(&sk->receive_queue.lock);
- sk->data_ready(sk,skb->len);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ spin_unlock(&sk->sk_receive_queue.lock);
+ sk->sk_data_ready(sk, skb->len);
return 0;
drop_n_acct:
- spin_lock(&sk->receive_queue.lock);
+ spin_lock(&sk->sk_receive_queue.lock);
po->stats.tp_drops++;
- spin_unlock(&sk->receive_queue.lock);
+ spin_unlock(&sk->sk_receive_queue.lock);
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
@@ -518,7 +520,7 @@
po = pkt_sk(sk);
if (dev->hard_header) {
- if (sk->type != SOCK_DGRAM)
+ if (sk->sk_type != SOCK_DGRAM)
skb_push(skb, skb->data - skb->mac.raw);
else if (skb->pkt_type == PACKET_OUTGOING) {
/* Special case: outgoing packets have ll header at head */
@@ -530,13 +532,14 @@
snaplen = skb->len;
- if (sk->filter) {
+ if (sk->sk_filter) {
unsigned res = snaplen;
struct sk_filter *filter;
bh_lock_sock(sk);
- if ((filter = sk->filter) != NULL)
- res = sk_run_filter(skb, sk->filter->insns, sk->filter->len);
+ if ((filter = sk->sk_filter) != NULL)
+ res = sk_run_filter(skb, sk->sk_filter->insns,
+ sk->sk_filter->len);
bh_unlock_sock(sk);
if (res == 0)
@@ -545,7 +548,7 @@
snaplen = res;
}
- if (sk->type == SOCK_DGRAM) {
+ if (sk->sk_type == SOCK_DGRAM) {
macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
} else {
unsigned maclen = skb->nh.raw - skb->data;
@@ -555,7 +558,8 @@
if (macoff + snaplen > po->frame_size) {
if (po->copy_thresh &&
- atomic_read(&sk->rmem_alloc) + skb->truesize < (unsigned)sk->rcvbuf) {
+ atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
+ (unsigned)sk->sk_rcvbuf) {
if (skb_shared(skb)) {
copy_skb = skb_clone(skb, GFP_ATOMIC);
} else {
@@ -572,7 +576,7 @@
if (snaplen > skb->len-skb->data_len)
snaplen = skb->len-skb->data_len;
- spin_lock(&sk->receive_queue.lock);
+ spin_lock(&sk->sk_receive_queue.lock);
h = po->iovec[po->head];
if (h->tp_status)
@@ -581,11 +585,11 @@
po->stats.tp_packets++;
if (copy_skb) {
status |= TP_STATUS_COPY;
- __skb_queue_tail(&sk->receive_queue, copy_skb);
+ __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
}
if (!po->stats.tp_drops)
status &= ~TP_STATUS_LOSING;
- spin_unlock(&sk->receive_queue.lock);
+ spin_unlock(&sk->sk_receive_queue.lock);
memcpy((u8*)h + macoff, skb->data, snaplen);
@@ -621,7 +625,7 @@
}
}
- sk->data_ready(sk, 0);
+ sk->sk_data_ready(sk, 0);
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
@@ -634,9 +638,9 @@
ring_is_full:
po->stats.tp_drops++;
- spin_unlock(&sk->receive_queue.lock);
+ spin_unlock(&sk->sk_receive_queue.lock);
- sk->data_ready(sk, 0);
+ sk->sk_data_ready(sk, 0);
if (copy_skb)
kfree_skb(copy_skb);
goto drop_n_restore;
@@ -713,7 +717,7 @@
skb->protocol = proto;
skb->dev = dev;
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
@@ -755,9 +759,9 @@
return 0;
write_lock_bh(&packet_sklist_lock);
- for (skp = &packet_sklist; *skp; skp = &(*skp)->next) {
+ for (skp = &packet_sklist; *skp; skp = &(*skp)->sk_next) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
@@ -799,7 +803,7 @@
/* Purge queues */
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
sock_put(sk);
return 0;
@@ -843,9 +847,9 @@
sock_hold(sk);
po->running = 1;
} else {
- sk->err = ENETDOWN;
+ sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
- sk->error_report(sk);
+ sk->sk_error_report(sk);
}
} else {
dev_add_pack(&po->prot_hook);
@@ -959,10 +963,10 @@
if (!po)
goto out_free;
memset(po, 0, sizeof(*po));
- sk->family = PF_PACKET;
+ sk->sk_family = PF_PACKET;
po->num = protocol;
- sk->destruct = packet_sock_destruct;
+ sk->sk_destruct = packet_sock_destruct;
atomic_inc(&packet_socks_nr);
/*
@@ -985,7 +989,7 @@
}
write_lock_bh(&packet_sklist_lock);
- sk->next = packet_sklist;
+ sk->sk_next = packet_sklist;
packet_sklist = sk;
sock_hold(sk);
write_unlock_bh(&packet_sklist_lock);
@@ -1342,10 +1346,10 @@
if (len > sizeof(struct tpacket_stats))
len = sizeof(struct tpacket_stats);
- spin_lock_bh(&sk->receive_queue.lock);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
st = po->stats;
memset(&po->stats, 0, sizeof(st));
- spin_unlock_bh(&sk->receive_queue.lock);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
st.tp_packets += st.tp_drops;
if (copy_to_user(optval, &st, len))
@@ -1368,7 +1372,7 @@
struct net_device *dev = (struct net_device*)data;
read_lock(&packet_sklist_lock);
- for (sk = packet_sklist; sk; sk = sk->next) {
+ for (sk = packet_sklist; sk; sk = sk->sk_next) {
struct packet_opt *po = pkt_sk(sk);
switch (msg) {
@@ -1380,9 +1384,9 @@
__dev_remove_pack(&po->prot_hook);
__sock_put(sk);
po->running = 0;
- sk->err = ENETDOWN;
+ sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
- sk->error_report(sk);
+ sk->sk_error_report(sk);
}
if (msg == NETDEV_UNREGISTER) {
po->ifindex = -1;
@@ -1424,7 +1428,7 @@
switch(cmd) {
case SIOCOUTQ:
{
- int amount = atomic_read(&sk->wmem_alloc);
+ int amount = atomic_read(&sk->sk_wmem_alloc);
return put_user(amount, (int *)arg);
}
case SIOCINQ:
@@ -1432,17 +1436,17 @@
struct sk_buff *skb;
int amount = 0;
- spin_lock_bh(&sk->receive_queue.lock);
- skb = skb_peek(&sk->receive_queue);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb)
amount = skb->len;
- spin_unlock_bh(&sk->receive_queue.lock);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int *)arg);
}
case SIOCGSTAMP:
- if (sk->stamp.tv_sec==0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
- if (copy_to_user((void *)arg, &sk->stamp,
+ if (copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval)))
return -EFAULT;
break;
@@ -1482,14 +1486,14 @@
struct packet_opt *po = pkt_sk(sk);
unsigned int mask = datagram_poll(file, sock, wait);
- spin_lock_bh(&sk->receive_queue.lock);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
if (po->iovec) {
unsigned last = po->head ? po->head-1 : po->iovmax;
if (po->iovec[last]->tp_status)
mask |= POLLIN | POLLRDNORM;
}
- spin_unlock_bh(&sk->receive_queue.lock);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
return mask;
}
@@ -1635,20 +1639,20 @@
err = 0;
#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
- spin_lock_bh(&sk->receive_queue.lock);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
pg_vec = XC(po->pg_vec, pg_vec);
io_vec = XC(po->iovec, io_vec);
po->iovmax = req->tp_frame_nr-1;
po->head = 0;
po->frame_size = req->tp_frame_size;
- spin_unlock_bh(&sk->receive_queue.lock);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
order = XC(po->pg_vec_order, order);
req->tp_block_nr = XC(po->pg_vec_len, req->tp_block_nr);
po->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
po->prot_hook.func = po->iovec ? tpacket_rcv : packet_rcv;
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
#undef XC
if (atomic_read(&po->mapped))
printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
@@ -1778,17 +1782,17 @@
read_lock(&packet_sklist_lock);
- for (s = packet_sklist; s; s = s->next) {
+ for (s = packet_sklist; s; s = s->sk_next) {
struct packet_opt *po = pkt_sk(s);
len+=sprintf(buffer+len,"%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu",
s,
- atomic_read(&s->refcnt),
- s->type,
+ atomic_read(&s->sk_refcnt),
+ s->sk_type,
ntohs(po->num),
po->ifindex,
po->running,
- atomic_read(&s->rmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
sock_i_uid(s),
sock_i_ino(s)
);
diff -urN linux-2.5.70-bk11/net/rose/af_rose.c linux-2.5.70-bk12/net/rose/af_rose.c
--- linux-2.5.70-bk11/net/rose/af_rose.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/rose/af_rose.c 2003-06-07 04:47:53.000000000 -0700
@@ -155,19 +155,19 @@
spin_lock_bh(&rose_list_lock);
if ((s = rose_list) == sk) {
- rose_list = s->next;
+ rose_list = s->sk_next;
spin_unlock_bh(&rose_list_lock);
return;
}
- while (s != NULL && s->next != NULL) {
- if (s->next == sk) {
- s->next = sk->next;
+ while (s && s->sk_next) {
+ if (s->sk_next == sk) {
+ s->sk_next = sk->sk_next;
spin_unlock_bh(&rose_list_lock);
return;
}
- s = s->next;
+ s = s->sk_next;
}
spin_unlock_bh(&rose_list_lock);
}
@@ -181,7 +181,7 @@
struct sock *s;
spin_lock_bh(&rose_list_lock);
- for (s = rose_list; s != NULL; s = s->next) {
+ for (s = rose_list; s; s = s->sk_next) {
rose_cb *rose = rose_sk(s);
if (rose->neighbour == neigh) {
@@ -201,7 +201,7 @@
struct sock *s;
spin_lock_bh(&rose_list_lock);
- for (s = rose_list; s != NULL; s = s->next) {
+ for (s = rose_list; s; s = s->sk_next) {
rose_cb *rose = rose_sk(s);
if (rose->device == dev) {
@@ -244,7 +244,7 @@
{
spin_lock_bh(&rose_list_lock);
- sk->next = rose_list;
+ sk->sk_next = rose_list;
rose_list = sk;
spin_unlock_bh(&rose_list_lock);
}
@@ -258,23 +258,23 @@
struct sock *s;
spin_lock_bh(&rose_list_lock);
- for (s = rose_list; s != NULL; s = s->next) {
+ for (s = rose_list; s; s = s->sk_next) {
rose_cb *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
!ax25cmp(&rose->source_call, call) &&
- !rose->source_ndigis && s->state == TCP_LISTEN) {
+ !rose->source_ndigis && s->sk_state == TCP_LISTEN) {
spin_unlock_bh(&rose_list_lock);
return s;
}
}
- for (s = rose_list; s != NULL; s = s->next) {
+ for (s = rose_list; s; s = s->sk_next) {
rose_cb *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
!ax25cmp(&rose->source_call, &null_ax25_address) &&
- s->state == TCP_LISTEN) {
+ s->sk_state == TCP_LISTEN) {
spin_unlock_bh(&rose_list_lock);
return s;
}
@@ -292,7 +292,7 @@
struct sock *s;
spin_lock_bh(&rose_list_lock);
- for (s = rose_list; s != NULL; s = s->next) {
+ for (s = rose_list; s; s = s->sk_next) {
rose_cb *rose = rose_sk(s);
if (rose->lci == lci && rose->neighbour == neigh) {
@@ -355,7 +355,7 @@
rose_clear_queues(sk); /* Flush the queues */
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/* Queue the unaccepted socket for death */
sock_set_flag(skb->sk, SOCK_DEAD);
@@ -366,13 +366,14 @@
kfree_skb(skb);
}
- if (atomic_read(&sk->wmem_alloc) != 0 || atomic_read(&sk->rmem_alloc) != 0) {
+ if (atomic_read(&sk->sk_wmem_alloc) ||
+ atomic_read(&sk->sk_rmem_alloc)) {
/* Defer: outstanding buffers */
- init_timer(&sk->timer);
- sk->timer.expires = jiffies + 10 * HZ;
- sk->timer.function = rose_destroy_timer;
- sk->timer.data = (unsigned long)sk;
- add_timer(&sk->timer);
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.expires = jiffies + 10 * HZ;
+ sk->sk_timer.function = rose_destroy_timer;
+ sk->sk_timer.data = (unsigned long)sk;
+ add_timer(&sk->sk_timer);
} else
sk_free(sk);
}
@@ -504,15 +505,15 @@
{
struct sock *sk = sock->sk;
- if (sk->state != TCP_LISTEN) {
+ if (sk->sk_state != TCP_LISTEN) {
rose_cb *rose = rose_sk(sk);
rose->dest_ndigis = 0;
memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
memset(&rose->dest_call, 0, AX25_ADDR_LEN);
memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
- sk->max_ack_backlog = backlog;
- sk->state = TCP_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
return 0;
}
@@ -541,7 +542,7 @@
#endif
sock->ops = &rose_proto_ops;
- sk->protocol = protocol;
+ sk->sk_protocol = protocol;
init_timer(&rose->timer);
init_timer(&rose->idletimer);
@@ -562,7 +563,7 @@
struct sock *sk;
rose_cb *rose, *orose;
- if (osk->type != SOCK_SEQPACKET)
+ if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
if ((sk = rose_alloc_sock()) == NULL)
@@ -578,16 +579,16 @@
rose->fraglen = 0;
#endif
- sk->type = osk->type;
- sk->socket = osk->socket;
- sk->priority = osk->priority;
- sk->protocol = osk->protocol;
- sk->rcvbuf = osk->rcvbuf;
- sk->sndbuf = osk->sndbuf;
- sk->debug = osk->debug;
- sk->state = TCP_ESTABLISHED;
- sk->sleep = osk->sleep;
- sk->zapped = osk->zapped;
+ sk->sk_type = osk->sk_type;
+ sk->sk_socket = osk->sk_socket;
+ sk->sk_priority = osk->sk_priority;
+ sk->sk_protocol = osk->sk_protocol;
+ sk->sk_rcvbuf = osk->sk_rcvbuf;
+ sk->sk_sndbuf = osk->sk_sndbuf;
+ sk->sk_debug = osk->sk_debug;
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_sleep = osk->sk_sleep;
+ sk->sk_zapped = osk->sk_zapped;
init_timer(&rose->timer);
init_timer(&rose->idletimer);
@@ -635,9 +636,9 @@
rose_write_internal(sk, ROSE_CLEAR_REQUEST);
rose_start_t3timer(sk);
rose->state = ROSE_STATE_2;
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY);
break;
@@ -647,7 +648,7 @@
}
sock->sk = NULL;
- sk->socket = NULL; /* Not used, but we should do this. **/
+ sk->sk_socket = NULL; /* Not used, but we should do this. **/
return 0;
}
@@ -661,7 +662,7 @@
ax25_address *user, *source;
int n;
- if (sk->zapped == 0)
+ if (!sk->sk_zapped)
return -EINVAL;
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
@@ -706,7 +707,7 @@
rose_insert_socket(sk);
- sk->zapped = 0;
+ sk->sk_zapped = 0;
SOCK_DEBUG(sk, "ROSE: socket is bound\n");
return 0;
}
@@ -721,20 +722,20 @@
struct net_device *dev;
int n;
- if (sk->state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
return 0; /* Connect completed during a ERESTARTSYS event */
}
- if (sk->state == TCP_CLOSE && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
return -ECONNREFUSED;
}
- if (sk->state == TCP_ESTABLISHED)
+ if (sk->sk_state == TCP_ESTABLISHED)
return -EISCONN; /* No reconnect on a seqpacket socket */
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
@@ -762,8 +763,8 @@
if (!rose->lci)
return -ENETUNREACH;
- if (sk->zapped) { /* Must bind first - autobinding in this may or may not work */
- sk->zapped = 0;
+ if (sk->sk_zapped) { /* Must bind first - autobinding in this may or may not work */
+ sk->sk_zapped = 0;
if ((dev = rose_dev_first()) == NULL)
return -ENETUNREACH;
@@ -795,7 +796,7 @@
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
- sk->state = TCP_SYN_SENT;
+ sk->sk_state = TCP_SYN_SENT;
rose->state = ROSE_STATE_1;
@@ -806,21 +807,21 @@
rose_start_t1timer(sk);
/* Now the loop */
- if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
+ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
return -EINPROGRESS;
/*
* A Connect Ack with Choke or timeout or failed routing will go to
* closed.
*/
- if (sk->state == TCP_SYN_SENT) {
+ if (sk->sk_state == TCP_SYN_SENT) {
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (sk->state != TCP_SYN_SENT)
+ if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(tsk)) {
schedule();
@@ -829,10 +830,10 @@
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
}
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
return sock_error(sk); /* Always set at this point */
}
@@ -855,12 +856,12 @@
return -EINVAL;
lock_sock(sk);
- if (sk->type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out;
}
- if (sk->state != TCP_LISTEN) {
+ if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out;
}
@@ -869,9 +870,9 @@
* The write queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
@@ -887,17 +888,17 @@
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
newsk = skb->sk;
- newsk->pair = NULL;
- newsk->socket = newsock;
- newsk->sleep = &newsock->wait;
+ newsk->sk_pair = NULL;
+ newsk->sk_socket = newsock;
+ newsk->sk_sleep = &newsock->wait;
/* Now attach up the new socket */
skb->sk = NULL;
kfree_skb(skb);
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
newsock->sk = newsk;
out:
@@ -915,7 +916,7 @@
int n;
if (peer != 0) {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
@@ -963,13 +964,14 @@
/*
* We can't accept the Call Request.
*/
- if (sk == NULL || sk->ack_backlog == sk->max_ack_backlog || (make = rose_make_new(sk)) == NULL) {
+ if (!sk || sk->sk_ack_backlog == sk->sk_max_ack_backlog ||
+ (make = rose_make_new(sk)) == NULL) {
rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
return 0;
}
skb->sk = make;
- make->state = TCP_ESTABLISHED;
+ make->sk_state = TCP_ESTABLISHED;
make_rose = rose_sk(make);
make_rose->lci = lci;
@@ -1002,17 +1004,17 @@
make_rose->va = 0;
make_rose->vr = 0;
make_rose->vl = 0;
- sk->ack_backlog++;
- make->pair = sk;
+ sk->sk_ack_backlog++;
+ make->sk_pair = sk;
rose_insert_socket(make);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
rose_start_heartbeat(make);
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk, skb->len);
+ sk->sk_data_ready(sk, skb->len);
return 1;
}
@@ -1032,10 +1034,10 @@
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR))
return -EINVAL;
- if (sk->zapped)
+ if (sk->sk_zapped)
return -EADDRNOTAVAIL;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
@@ -1062,7 +1064,7 @@
if (srose.srose_family != AF_ROSE)
return -EINVAL;
} else {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
srose.srose_family = AF_ROSE;
@@ -1121,7 +1123,7 @@
SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n");
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
return -ENOTCONN;
}
@@ -1163,16 +1165,16 @@
if (skb->len > 0)
skbn->data[2] |= M_BIT;
- skb_queue_tail(&sk->write_queue, skbn); /* Throw it on the queue */
+ skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
}
skb->free = 1;
kfree_skb(skb, FREE_WRITE);
} else {
- skb_queue_tail(&sk->write_queue, skb); /* Throw it on the queue */
+ skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
}
#else
- skb_queue_tail(&sk->write_queue, skb); /* Shove it onto the queue */
+ skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */
#endif
rose_kick(sk);
@@ -1196,7 +1198,7 @@
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Now we can treat all alike */
@@ -1255,7 +1257,7 @@
switch (cmd) {
case TIOCOUTQ: {
long amount;
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
return put_user(amount, (unsigned int *)arg);
@@ -1265,16 +1267,17 @@
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
- if ((skb = skb_peek(&sk->receive_queue)) != NULL)
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
return put_user(amount, (unsigned int *)arg);
}
case SIOCGSTAMP:
if (sk != NULL) {
- if (sk->stamp.tv_sec == 0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
- return copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)) ? -EFAULT : 0;
+ return copy_to_user((void *)arg, &sk->sk_stamp,
+ sizeof(struct timeval)) ? -EFAULT : 0;
}
return -EINVAL;
@@ -1359,7 +1362,7 @@
len += sprintf(buffer, "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
- for (s = rose_list; s != NULL; s = s->next) {
+ for (s = rose_list; s; s = s->sk_next) {
rose_cb *rose = rose_sk(s);
if ((dev = rose->device) == NULL)
@@ -1393,9 +1396,9 @@
rose->hb / HZ,
ax25_display_timer(&rose->idletimer) / (60 * HZ),
rose->idle / (60 * HZ),
- atomic_read(&s->wmem_alloc),
- atomic_read(&s->rmem_alloc),
- s->socket != NULL ? SOCK_INODE(s->socket)->i_ino : 0L);
+ atomic_read(&s->sk_wmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
+ s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
pos = begin + len;
diff -urN linux-2.5.70-bk11/net/rose/rose_in.c linux-2.5.70-bk12/net/rose/rose_in.c
--- linux-2.5.70-bk11/net/rose/rose_in.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/rose/rose_in.c 2003-06-07 04:47:53.000000000 -0700
@@ -53,9 +53,9 @@
rose->vr = 0;
rose->vl = 0;
rose->state = ROSE_STATE_3;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
break;
case ROSE_CLEAR_REQUEST:
@@ -183,7 +183,8 @@
rose_stop_idletimer(sk);
break;
}
- if (atomic_read(&sk->rmem_alloc) > (sk->rcvbuf / 2))
+ if (atomic_read(&sk->sk_rmem_alloc) >
+ (sk->sk_rcvbuf / 2))
rose->condition |= ROSE_COND_OWN_RX_BUSY;
}
/*
diff -urN linux-2.5.70-bk11/net/rose/rose_out.c linux-2.5.70-bk12/net/rose/rose_out.c
--- linux-2.5.70-bk11/net/rose/rose_out.c 2003-05-26 18:01:03.000000000 -0700
+++ linux-2.5.70-bk12/net/rose/rose_out.c 2003-06-07 04:47:53.000000000 -0700
@@ -58,7 +58,7 @@
if (rose->condition & ROSE_COND_PEER_RX_BUSY)
return;
- if (skb_peek(&sk->write_queue) == NULL)
+ if (!skb_peek(&sk->sk_write_queue))
return;
start = (skb_peek(&rose->ack_queue) == NULL) ? rose->va : rose->vs;
@@ -74,11 +74,11 @@
* the window is full.
*/
- skb = skb_dequeue(&sk->write_queue);
+ skb = skb_dequeue(&sk->sk_write_queue);
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
- skb_queue_head(&sk->write_queue, skb);
+ skb_queue_head(&sk->sk_write_queue, skb);
break;
}
@@ -96,7 +96,8 @@
*/
skb_queue_tail(&rose->ack_queue, skb);
- } while (rose->vs != end && (skb = skb_dequeue(&sk->write_queue)) != NULL);
+ } while (rose->vs != end &&
+ (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
rose->vl = rose->vr;
rose->condition &= ~ROSE_COND_ACK_PENDING;
diff -urN linux-2.5.70-bk11/net/rose/rose_route.c linux-2.5.70-bk12/net/rose/rose_route.c
--- linux-2.5.70-bk11/net/rose/rose_route.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/rose/rose_route.c 2003-06-07 04:47:53.000000000 -0700
@@ -907,11 +907,11 @@
rose->neighbour = NULL;
rose->lci = 0;
rose->state = ROSE_STATE_0;
- sk->state = TCP_CLOSE;
- sk->err = 0;
- sk->shutdown |= SEND_SHUTDOWN;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = 0;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
diff -urN linux-2.5.70-bk11/net/rose/rose_subr.c linux-2.5.70-bk12/net/rose/rose_subr.c
--- linux-2.5.70-bk11/net/rose/rose_subr.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/rose/rose_subr.c 2003-06-07 04:47:53.000000000 -0700
@@ -33,7 +33,7 @@
*/
void rose_clear_queues(struct sock *sk)
{
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&rose_sk(sk)->ack_queue);
}
@@ -70,7 +70,7 @@
*/
while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) {
if (skb_prev == NULL)
- skb_queue_head(&sk->write_queue, skb);
+ skb_queue_head(&sk->sk_write_queue, skb);
else
skb_append(skb_prev, skb);
skb_prev = skb;
@@ -506,12 +506,12 @@
if (diagnostic != -1)
rose->diagnostic = diagnostic;
- sk->state = TCP_CLOSE;
- sk->err = reason;
- sk->shutdown |= SEND_SHUTDOWN;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = reason;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
diff -urN linux-2.5.70-bk11/net/rose/rose_timer.c linux-2.5.70-bk12/net/rose/rose_timer.c
--- linux-2.5.70-bk11/net/rose/rose_timer.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/rose/rose_timer.c 2003-06-07 04:47:53.000000000 -0700
@@ -35,13 +35,13 @@
void rose_start_heartbeat(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
- sk->timer.data = (unsigned long)sk;
- sk->timer.function = &rose_heartbeat_expiry;
- sk->timer.expires = jiffies + 5 * HZ;
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.function = &rose_heartbeat_expiry;
+ sk->sk_timer.expires = jiffies + 5 * HZ;
- add_timer(&sk->timer);
+ add_timer(&sk->sk_timer);
}
void rose_start_t1timer(struct sock *sk)
@@ -113,7 +113,7 @@
void rose_stop_heartbeat(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
}
void rose_stop_timer(struct sock *sk)
@@ -137,7 +137,7 @@
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
- (sk->state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
+ (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
rose_destroy_socket(sk);
return;
}
@@ -147,7 +147,7 @@
/*
* Check for the state of the receive buffer.
*/
- if (atomic_read(&sk->rmem_alloc) < (sk->rcvbuf / 2) &&
+ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
(rose->condition & ROSE_COND_OWN_RX_BUSY)) {
rose->condition &= ~ROSE_COND_OWN_RX_BUSY;
rose->condition &= ~ROSE_COND_ACK_PENDING;
@@ -204,12 +204,12 @@
rose_start_t3timer(sk);
- sk->state = TCP_CLOSE;
- sk->err = 0;
- sk->shutdown |= SEND_SHUTDOWN;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = 0;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
bh_unlock_sock(sk);
diff -urN linux-2.5.70-bk11/net/rxrpc/transport.c linux-2.5.70-bk12/net/rxrpc/transport.c
--- linux-2.5.70-bk11/net/rxrpc/transport.c 2003-05-26 18:00:45.000000000 -0700
+++ linux-2.5.70-bk12/net/rxrpc/transport.c 2003-06-07 04:47:53.000000000 -0700
@@ -112,9 +112,9 @@
/* set the socket up */
sock = trans->socket->sk;
- sock->user_data = trans;
- sock->data_ready = rxrpc_data_ready;
- sock->error_report = rxrpc_error_report;
+ sock->sk_user_data = trans;
+ sock->sk_data_ready = rxrpc_data_ready;
+ sock->sk_error_report = rxrpc_error_report;
down_write(&rxrpc_proc_transports_sem);
list_add_tail(&trans->proc_link,&rxrpc_proc_transports);
@@ -184,7 +184,7 @@
/* close the socket */
if (trans->socket) {
- trans->socket->sk->user_data = NULL;
+ trans->socket->sk->sk_user_data = NULL;
sock_release(trans->socket);
trans->socket = NULL;
}
@@ -255,16 +255,16 @@
{
struct rxrpc_transport *trans;
- _enter("%p{t=%p},%d",sk,sk->user_data,count);
+ _enter("%p{t=%p},%d",sk,sk->sk_user_data,count);
/* queue the transport for attention by krxiod */
- trans = (struct rxrpc_transport *) sk->user_data;
+ trans = (struct rxrpc_transport *) sk->sk_user_data;
if (trans)
rxrpc_krxiod_queue_transport(trans);
/* wake up anyone waiting on the socket */
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
_leave("");
@@ -279,18 +279,18 @@
{
struct rxrpc_transport *trans;
- _enter("%p{t=%p}",sk,sk->user_data);
+ _enter("%p{t=%p}",sk,sk->sk_user_data);
/* queue the transport for attention by krxiod */
- trans = (struct rxrpc_transport *) sk->user_data;
+ trans = (struct rxrpc_transport *) sk->sk_user_data;
if (trans) {
trans->error_rcvd = 1;
rxrpc_krxiod_queue_transport(trans);
}
/* wake up anyone waiting on the socket */
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
_leave("");
diff -urN linux-2.5.70-bk11/net/sched/sch_atm.c linux-2.5.70-bk12/net/sched/sch_atm.c
--- linux-2.5.70-bk11/net/sched/sch_atm.c 2003-05-26 18:00:39.000000000 -0700
+++ linux-2.5.70-bk12/net/sched/sch_atm.c 2003-06-07 04:47:53.000000000 -0700
@@ -508,7 +508,8 @@
ATM_SKB(skb)->vcc = flow->vcc;
memcpy(skb_push(skb,flow->hdr_len),flow->hdr,
flow->hdr_len);
- atomic_add(skb->truesize,&flow->vcc->sk->wmem_alloc);
+ atomic_add(skb->truesize,
+ &flow->vcc->sk->sk_wmem_alloc);
/* atm.atm_options are already set by atm_tc_enqueue */
(void) flow->vcc->send(flow->vcc,skb);
}
diff -urN linux-2.5.70-bk11/net/sctp/associola.c linux-2.5.70-bk12/net/sctp/associola.c
--- linux-2.5.70-bk11/net/sctp/associola.c 2003-05-26 18:00:21.000000000 -0700
+++ linux-2.5.70-bk12/net/sctp/associola.c 2003-06-07 04:47:53.000000000 -0700
@@ -177,10 +177,10 @@
* RFC 6 - A SCTP receiver MUST be able to receive a minimum of
* 1500 bytes in one SCTP packet.
*/
- if (sk->rcvbuf < SCTP_DEFAULT_MINWINDOW)
+ if (sk->sk_rcvbuf < SCTP_DEFAULT_MINWINDOW)
asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
else
- asoc->rwnd = sk->rcvbuf;
+ asoc->rwnd = sk->sk_rcvbuf;
asoc->a_rwnd = asoc->rwnd;
@@ -299,7 +299,7 @@
/* Decrement the backlog value for a TCP-style listening socket. */
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
/* Mark as dead, so other users can know this structure is
* going away.
@@ -857,7 +857,7 @@
/* Decrement the backlog value for a TCP-style socket. */
if (sctp_style(oldsk, TCP))
- oldsk->ack_backlog--;
+ oldsk->sk_ack_backlog--;
/* Release references to the old endpoint and the sock. */
sctp_endpoint_put(assoc->ep);
@@ -1026,7 +1026,7 @@
case SCTP_STATE_SHUTDOWN_RECEIVED:
if ((asoc->rwnd > asoc->a_rwnd) &&
((asoc->rwnd - asoc->a_rwnd) >=
- min_t(__u32, (asoc->base.sk->rcvbuf >> 1), asoc->pmtu)))
+ min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pmtu)))
return 1;
break;
default:
@@ -1109,7 +1109,7 @@
* the endpoint.
*/
scope = sctp_scope(&asoc->peer.active_path->ipaddr);
- flags = (PF_INET6 == asoc->base.sk->family) ? SCTP_ADDR6_ALLOWED : 0;
+ flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
if (asoc->peer.ipv4_address)
flags |= SCTP_ADDR4_PEERSUPP;
if (asoc->peer.ipv6_address)
diff -urN linux-2.5.70-bk11/net/sctp/endpointola.c linux-2.5.70-bk12/net/sctp/endpointola.c
--- linux-2.5.70-bk11/net/sctp/endpointola.c 2003-05-26 18:00:24.000000000 -0700
+++ linux-2.5.70-bk12/net/sctp/endpointola.c 2003-06-07 04:47:53.000000000 -0700
@@ -151,12 +151,12 @@
/* FIXME - Should the min and max window size be configurable
* sysctl parameters as opposed to be constants?
*/
- sk->rcvbuf = SCTP_DEFAULT_MAXWINDOW;
- sk->sndbuf = SCTP_DEFAULT_MAXWINDOW * 2;
+ sk->sk_rcvbuf = SCTP_DEFAULT_MAXWINDOW;
+ sk->sk_sndbuf = SCTP_DEFAULT_MAXWINDOW * 2;
/* Use SCTP specific send buffer space queues. */
- sk->write_space = sctp_write_space;
- sk->use_write_queue = 1;
+ sk->sk_write_space = sctp_write_space;
+ sk->sk_use_write_queue = 1;
/* Initialize the secret key used with cookie. */
get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE);
@@ -178,7 +178,7 @@
/* Increment the backlog value for a TCP-style listening socket. */
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
- sk->ack_backlog++;
+ sk->sk_ack_backlog++;
}
/* Free the endpoint structure. Delay cleanup until
@@ -195,7 +195,7 @@
{
SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
- ep->base.sk->state = SCTP_SS_CLOSED;
+ ep->base.sk->sk_state = SCTP_SS_CLOSED;
/* Unlink this endpoint, so we can't find it again! */
sctp_unhash_endpoint(ep);
@@ -209,7 +209,7 @@
sctp_bind_addr_free(&ep->base.bind_addr);
/* Remove and free the port */
- if (ep->base.sk->prev != NULL)
+ if (ep->base.sk->sk_prev)
sctp_put_port(ep->base.sk);
/* Give up our hold on the sock. */
diff -urN linux-2.5.70-bk11/net/sctp/input.c linux-2.5.70-bk12/net/sctp/input.c
--- linux-2.5.70-bk11/net/sctp/input.c 2003-05-26 18:00:21.000000000 -0700
+++ linux-2.5.70-bk12/net/sctp/input.c 2003-06-07 04:47:53.000000000 -0700
@@ -447,10 +447,10 @@
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
} else { /* Only an error on timeout */
- sk->err_soft = err;
+ sk->sk_err_soft = err;
}
out_unlock:
diff -urN linux-2.5.70-bk11/net/sctp/ipv6.c linux-2.5.70-bk12/net/sctp/ipv6.c
--- linux-2.5.70-bk11/net/sctp/ipv6.c 2003-05-26 18:00:41.000000000 -0700
+++ linux-2.5.70-bk12/net/sctp/ipv6.c 2003-06-07 04:47:53.000000000 -0700
@@ -123,10 +123,10 @@
np = inet6_sk(sk);
icmpv6_err_convert(type, code, &err);
if (!sock_owned_by_user(sk) && np->recverr) {
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
} else { /* Only an error on timeout */
- sk->err_soft = err;
+ sk->sk_err_soft = err;
}
out_unlock:
@@ -146,7 +146,7 @@
memset(&fl, 0, sizeof(fl));
- fl.proto = sk->protocol;
+ fl.proto = sk->sk_protocol;
/* Fill in the dest address from the route entry passed with the skb
* and the source address from the transport.
@@ -159,7 +159,7 @@
if (ipv6_addr_type(&fl.fl6_src) & IPV6_ADDR_LINKLOCAL)
fl.oif = transport->saddr.v6.sin6_scope_id;
else
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_sport = inet_sk(sk)->sport;
fl.fl_ip_dport = transport->ipaddr.v6.sin6_port;
@@ -366,13 +366,13 @@
addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
}
-/* Initialize sk->rcv_saddr from sctp_addr. */
+/* Initialize sk->sk_rcv_saddr from sctp_addr. */
static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
{
inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr;
}
-/* Initialize sk->daddr from sctp_addr. */
+/* Initialize sk->sk_daddr from sctp_addr. */
static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
{
inet6_sk(sk)->daddr = addr->v6.sin6_addr;
@@ -500,25 +500,25 @@
struct sctp6_sock *newsctp6sk;
newsk = sk_alloc(PF_INET6, GFP_KERNEL, sizeof(struct sctp6_sock),
- sk->slab);
+ sk->sk_slab);
if (!newsk)
goto out;
sock_init_data(NULL, newsk);
sk_set_owner(newsk, THIS_MODULE);
- newsk->type = SOCK_STREAM;
+ newsk->sk_type = SOCK_STREAM;
- newsk->prot = sk->prot;
- newsk->no_check = sk->no_check;
- newsk->reuse = sk->reuse;
-
- newsk->destruct = inet_sock_destruct;
- newsk->zapped = 0;
- newsk->family = PF_INET6;
- newsk->protocol = IPPROTO_SCTP;
- newsk->backlog_rcv = sk->prot->backlog_rcv;
- newsk->shutdown = sk->shutdown;
+ newsk->sk_prot = sk->sk_prot;
+ newsk->sk_no_check = sk->sk_no_check;
+ newsk->sk_reuse = sk->sk_reuse;
+
+ newsk->sk_destruct = inet_sock_destruct;
+ newsk->sk_zapped = 0;
+ newsk->sk_family = PF_INET6;
+ newsk->sk_protocol = IPPROTO_SCTP;
+ newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
+ newsk->sk_shutdown = sk->sk_shutdown;
newsctp6sk = (struct sctp6_sock *)newsk;
newsctp6sk->pinet6 = &newsctp6sk->inet6;
@@ -556,7 +556,7 @@
atomic_inc(&inet_sock_nr);
#endif
- if (0 != newsk->prot->init(newsk)) {
+ if (newsk->sk_prot->init(newsk)) {
inet_sock_release(newsk);
newsk = NULL;
}
@@ -716,8 +716,8 @@
*/
if (addr->v6.sin6_scope_id)
- sk->bound_dev_if = addr->v6.sin6_scope_id;
- if (!sk->bound_dev_if)
+ sk->sk_bound_dev_if = addr->v6.sin6_scope_id;
+ if (!sk->sk_bound_dev_if)
return 0;
}
af = opt->pf->af;
@@ -746,8 +746,8 @@
*/
if (addr->v6.sin6_scope_id)
- sk->bound_dev_if = addr->v6.sin6_scope_id;
- if (!sk->bound_dev_if)
+ sk->sk_bound_dev_if = addr->v6.sin6_scope_id;
+ if (!sk->sk_bound_dev_if)
return 0;
}
af = opt->pf->af;
diff -urN linux-2.5.70-bk11/net/sctp/output.c linux-2.5.70-bk12/net/sctp/output.c
--- linux-2.5.70-bk11/net/sctp/output.c 2003-05-26 18:00:25.000000000 -0700
+++ linux-2.5.70-bk12/net/sctp/output.c 2003-06-07 04:47:53.000000000 -0700
@@ -138,7 +138,7 @@
if (!packet->has_cookie_echo) {
error = sctp_packet_transmit(packet);
if (error < 0)
- chunk->skb->sk->err = -error;
+ chunk->skb->sk->sk_err = -error;
/* If we have an empty packet, then we can NOT ever
* return PMTU_FULL.
@@ -429,7 +429,7 @@
/* Set up the IP options. */
/* BUG: not implemented
- * For v4 this all lives somewhere in sk->opt...
+ * For v4 this all lives somewhere in sk->sk_opt...
*/
/* Dump that on IP! */
diff -urN linux-2.5.70-bk11/net/sctp/outqueue.c linux-2.5.70-bk12/net/sctp/outqueue.c
--- linux-2.5.70-bk11/net/sctp/outqueue.c 2003-05-26 18:00:28.000000000 -0700
+++ linux-2.5.70-bk12/net/sctp/outqueue.c 2003-06-07 04:47:53.000000000 -0700
@@ -492,7 +492,7 @@
error = sctp_outq_flush(q, /* rtx_timeout */ 1);
if (error)
- q->asoc->base.sk->err = -error;
+ q->asoc->base.sk->sk_err = -error;
}
/*
diff -urN linux-2.5.70-bk11/net/sctp/protocol.c linux-2.5.70-bk12/net/sctp/protocol.c
--- linux-2.5.70-bk11/net/sctp/protocol.c 2003-05-26 18:00:41.000000000 -0700
+++ linux-2.5.70-bk12/net/sctp/protocol.c 2003-06-07 04:47:53.000000000 -0700
@@ -267,13 +267,13 @@
addr->v4.sin_addr.s_addr = inet_sk(sk)->rcv_saddr;
}
-/* Initialize sk->rcv_saddr from sctp_addr. */
+/* Initialize sk->sk_rcv_saddr from sctp_addr. */
static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
{
inet_sk(sk)->rcv_saddr = addr->v4.sin_addr.s_addr;
}
-/* Initialize sk->daddr from sctp_addr. */
+/* Initialize sk->sk_daddr from sctp_addr. */
static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
{
inet_sk(sk)->daddr = addr->v4.sin_addr.s_addr;
@@ -512,25 +512,25 @@
struct inet_opt *newinet;
newsk = sk_alloc(PF_INET, GFP_KERNEL, sizeof(struct sctp_sock),
- sk->slab);
+ sk->sk_slab);
if (!newsk)
goto out;
sock_init_data(NULL, newsk);
sk_set_owner(newsk, THIS_MODULE);
- newsk->type = SOCK_STREAM;
+ newsk->sk_type = SOCK_STREAM;
- newsk->prot = sk->prot;
- newsk->no_check = sk->no_check;
- newsk->reuse = sk->reuse;
- newsk->shutdown = sk->shutdown;
-
- newsk->destruct = inet_sock_destruct;
- newsk->zapped = 0;
- newsk->family = PF_INET;
- newsk->protocol = IPPROTO_SCTP;
- newsk->backlog_rcv = sk->prot->backlog_rcv;
+ newsk->sk_prot = sk->sk_prot;
+ newsk->sk_no_check = sk->sk_no_check;
+ newsk->sk_reuse = sk->sk_reuse;
+ newsk->sk_shutdown = sk->sk_shutdown;
+
+ newsk->sk_destruct = inet_sock_destruct;
+ newsk->sk_zapped = 0;
+ newsk->sk_family = PF_INET;
+ newsk->sk_protocol = IPPROTO_SCTP;
+ newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
newinet = inet_sk(newsk);
@@ -555,7 +555,7 @@
atomic_inc(&inet_sock_nr);
#endif
- if (0 != newsk->prot->init(newsk)) {
+ if (newsk->sk_prot->init(newsk)) {
inet_sock_release(newsk);
newsk = NULL;
}
@@ -601,7 +601,7 @@
"SCTP: Failed to create the SCTP control socket.\n");
return err;
}
- sctp_ctl_socket->sk->allocation = GFP_ATOMIC;
+ sctp_ctl_socket->sk->sk_allocation = GFP_ATOMIC;
inet_sk(sctp_ctl_socket->sk)->uc_ttl = -1;
return 0;
@@ -880,34 +880,22 @@
{
int i;
- sctp_statistics[0] = kmalloc_percpu(sizeof (struct sctp_mib),
- GFP_KERNEL);
+ sctp_statistics[0] = alloc_percpu(struct sctp_mib);
if (!sctp_statistics[0])
return -ENOMEM;
- sctp_statistics[1] = kmalloc_percpu(sizeof (struct sctp_mib),
- GFP_KERNEL);
+ sctp_statistics[1] = alloc_percpu(struct sctp_mib);
if (!sctp_statistics[1]) {
- kfree_percpu(sctp_statistics[0]);
+ free_percpu(sctp_statistics[0]);
return -ENOMEM;
}
-
- /* Zero all percpu versions of the mibs */
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_possible(i)) {
- memset(per_cpu_ptr(sctp_statistics[0], i), 0,
- sizeof (struct sctp_mib));
- memset(per_cpu_ptr(sctp_statistics[1], i), 0,
- sizeof (struct sctp_mib));
- }
- }
return 0;
}
static void cleanup_sctp_mibs(void)
{
- kfree_percpu(sctp_statistics[0]);
- kfree_percpu(sctp_statistics[1]);
+ free_percpu(sctp_statistics[0]);
+ free_percpu(sctp_statistics[1]);
}
/* Initialize the universe into something sensible. */
diff -urN linux-2.5.70-bk11/net/sctp/sm_make_chunk.c linux-2.5.70-bk12/net/sctp/sm_make_chunk.c
--- linux-2.5.70-bk11/net/sctp/sm_make_chunk.c 2003-05-26 18:00:24.000000000 -0700
+++ linux-2.5.70-bk12/net/sctp/sm_make_chunk.c 2003-06-07 04:47:53.000000000 -0700
@@ -1895,7 +1895,7 @@
*/
switch (param.p->type) {
case SCTP_PARAM_IPV6_ADDRESS:
- if( PF_INET6 != asoc->base.sk->family)
+ if (PF_INET6 != asoc->base.sk->sk_family)
break;
/* Fall through. */
case SCTP_PARAM_IPV4_ADDRESS:
diff -urN linux-2.5.70-bk11/net/sctp/sm_sideeffect.c linux-2.5.70-bk12/net/sctp/sm_sideeffect.c
--- linux-2.5.70-bk11/net/sctp/sm_sideeffect.c 2003-05-26 18:00:42.000000000 -0700
+++ linux-2.5.70-bk12/net/sctp/sm_sideeffect.c 2003-06-07 04:47:53.000000000 -0700
@@ -229,7 +229,7 @@
transport, GFP_ATOMIC);
if (error)
- asoc->base.sk->err = -error;
+ asoc->base.sk->sk_err = -error;
out_unlock:
sctp_bh_unlock_sock(asoc->base.sk);
@@ -269,7 +269,7 @@
(void *)timeout_type, GFP_ATOMIC);
if (error)
- asoc->base.sk->err = -error;
+ asoc->base.sk->sk_err = -error;
out_unlock:
sctp_bh_unlock_sock(asoc->base.sk);
@@ -339,7 +339,7 @@
transport, GFP_ATOMIC);
if (error)
- asoc->base.sk->err = -error;
+ asoc->base.sk->sk_err = -error;
out_unlock:
sctp_bh_unlock_sock(asoc->base.sk);
@@ -616,16 +616,16 @@
asoc->state_timestamp = jiffies;
if (sctp_style(sk, TCP)) {
- /* Change the sk->state of a TCP-style socket that has
+ /* Change the sk->sk_state of a TCP-style socket that has
* sucessfully completed a connect() call.
*/
if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
- sk->state = SCTP_SS_ESTABLISHED;
+ sk->sk_state = SCTP_SS_ESTABLISHED;
/* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
sctp_sstate(sk, ESTABLISHED))
- sk->shutdown |= RCV_SHUTDOWN;
+ sk->sk_shutdown |= RCV_SHUTDOWN;
}
if (sctp_state(asoc, ESTABLISHED) ||
@@ -644,7 +644,7 @@
* notifications.
*/
if (!sctp_style(sk, UDP))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
}
diff -urN linux-2.5.70-bk11/net/sctp/sm_statefuns.c linux-2.5.70-bk12/net/sctp/sm_statefuns.c
--- linux-2.5.70-bk11/net/sctp/sm_statefuns.c 2003-05-26 18:00:22.000000000 -0700
+++ linux-2.5.70-bk12/net/sctp/sm_statefuns.c 2003-06-07 04:47:54.000000000 -0700
@@ -213,7 +213,8 @@
* ABORT.
*/
if (!sctp_sstate(sk, LISTENING) ||
- (sctp_style(sk, TCP) && (sk->ack_backlog >= sk->max_ack_backlog)))
+ (sctp_style(sk, TCP) &&
+ (sk->sk_ack_backlog >= sk->sk_max_ack_backlog)))
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
/* Verify the INIT chunk before processing it. */
diff -urN linux-2.5.70-bk11/net/sctp/socket.c linux-2.5.70-bk12/net/sctp/socket.c
--- linux-2.5.70-bk11/net/sctp/socket.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/sctp/socket.c 2003-06-07 04:47:54.000000000 -0700
@@ -327,7 +327,7 @@
* added.
*/
-/* Unprotected by locks. Call only with socket lock sk->lock held! See
+/* Unprotected by locks. Call only with socket lock sk->sk_lock held! See
* sctp_bindx() for a lock-protected call.
*/
@@ -537,8 +537,8 @@
goto err_bindx_rem;
};
- /* FIXME - There is probably a need to check if sk->saddr and
- * sk->rcv_addr are currently set to one of the addresses to
+ /* FIXME - There is probably a need to check if sk->sk_saddr and
+ * sk->sk_rcv_addr are currently set to one of the addresses to
* be removed. This is something which needs to be looked into
* when we are fixing the outstanding issues with multi-homing
* socket routing and failover schemes. Refer to comments in
@@ -713,7 +713,7 @@
printk("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
sctp_lock_sock(sk);
- sk->shutdown = SHUTDOWN_MASK;
+ sk->sk_shutdown = SHUTDOWN_MASK;
ep = sctp_sk(sk)->ep;
@@ -732,7 +732,7 @@
sctp_association_free(asoc);
} else if (sock_flag(sk, SOCK_LINGER) &&
- !sk->lingertime)
+ !sk->sk_lingertime)
sctp_primitive_ABORT(asoc, NULL);
else
sctp_primitive_SHUTDOWN(asoc, NULL);
@@ -741,7 +741,7 @@
}
/* Clean up any skbs sitting on the receive queue. */
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sctp_sk(sk)->pd_lobby);
/* On a TCP-style socket, block for at most linger_time if set. */
@@ -1073,7 +1073,7 @@
/* API 7.1.7, the sndbuf size per association bounds the
* maximum size of data that can be sent in a single send call.
*/
- if (msg_len > sk->sndbuf) {
+ if (msg_len > sk->sk_sndbuf) {
err = -EMSGSIZE;
goto out_free;
}
@@ -1296,7 +1296,7 @@
sctp_ulpevent_read_sndrcvinfo(event, msg);
#if 0
/* FIXME: we should be calling IP/IPv6 layers. */
- if (sk->protinfo.af_inet.cmsg_flags)
+ if (sk->sk_protinfo.af_inet.cmsg_flags)
ip_cmsg_recv(msg, skb);
#endif
@@ -1311,7 +1311,7 @@
if (flags & MSG_PEEK)
goto out_free;
sctp_skb_pull(skb, copied);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
/* When only partial message is copied to the user, increase
* rwnd by that amount. If all the data in the skb is read,
@@ -1819,7 +1819,7 @@
af = sctp_get_af_specific(to.sa.sa_family);
af->to_sk_daddr(&to, sk);
- timeo = sock_sndtimeo(sk, sk->socket->file->f_flags & O_NONBLOCK);
+ timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK);
err = sctp_wait_for_connect(asoc, &timeo);
out_unlock:
@@ -1865,7 +1865,7 @@
goto out;
}
- timeo = sock_rcvtimeo(sk, sk->socket->file->f_flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK);
error = sctp_wait_for_accept(sk, timeo);
if (error)
@@ -1916,7 +1916,7 @@
sp = sctp_sk(sk);
/* Initialize the SCTP per socket area. */
- switch (sk->type) {
+ switch (sk->sk_type) {
case SOCK_SEQPACKET:
sp->type = SCTP_SOCKET_UDP;
break;
@@ -1988,7 +1988,7 @@
/* User specified fragmentation limit. */
sp->user_frag = 0;
- sp->pf = sctp_get_pf_specific(sk->family);
+ sp->pf = sctp_get_pf_specific(sk->sk_family);
/* Control variables for partial data delivery. */
sp->pd_mode = 0;
@@ -2184,7 +2184,7 @@
return -EINVAL;
/* Create a new socket. */
- err = sock_create(sk->family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
+ err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
if (err < 0)
return err;
@@ -2814,12 +2814,12 @@
* used by other socket (pp->sk != NULL); that other
* socket is going to be sk2.
*/
- int sk_reuse = sk->reuse;
+ int reuse = sk->sk_reuse;
struct sock *sk2 = pp->sk;
SCTP_DEBUG_PRINTK("sctp_get_port() found a "
"possible match\n");
- if (pp->fastreuse != 0 && sk->reuse != 0)
+ if (pp->fastreuse && sk->sk_reuse)
goto success;
/* Run through the list of sockets bound to the port
@@ -2832,11 +2832,11 @@
* that this port/socket (sk) combination are already
* in an endpoint.
*/
- for ( ; sk2 != NULL; sk2 = sk2->bind_next) {
+ for (; sk2; sk2 = sk2->sk_bind_next) {
struct sctp_endpoint *ep2;
ep2 = sctp_sk(sk2)->ep;
- if (sk_reuse && sk2->reuse)
+ if (reuse && sk2->sk_reuse)
continue;
if (sctp_bind_addr_match(&ep2->base.bind_addr, addr,
@@ -2860,12 +2860,12 @@
goto fail_unlock;
/* In either case (hit or miss), make sure fastreuse is 1 only
- * if sk->reuse is too (that is, if the caller requested
+ * if sk->sk_reuse is too (that is, if the caller requested
* SO_REUSEADDR on this socket -sk-).
*/
if (!pp->sk)
- pp->fastreuse = sk->reuse ? 1 : 0;
- else if (pp->fastreuse && sk->reuse == 0)
+ pp->fastreuse = sk->sk_reuse ? 1 : 0;
+ else if (pp->fastreuse && !sk->sk_reuse)
pp->fastreuse = 0;
/* We are set, so fill up all the data in the hash table
@@ -2874,12 +2874,12 @@
*/
success:
inet_sk(sk)->num = snum;
- if (sk->prev == NULL) {
- if ((sk->bind_next = pp->sk) != NULL)
- pp->sk->bind_pprev = &sk->bind_next;
+ if (!sk->sk_prev) {
+ if ((sk->sk_bind_next = pp->sk) != NULL)
+ pp->sk->sk_bind_pprev = &sk->sk_bind_next;
pp->sk = sk;
- sk->bind_pprev = &pp->sk;
- sk->prev = (struct sock *) pp;
+ sk->sk_bind_pprev = &pp->sk;
+ sk->sk_prev = (struct sock *) pp;
}
ret = 0;
@@ -2907,7 +2907,7 @@
af->from_sk(&addr, sk);
addr.v4.sin_port = htons(snum);
- /* Note: sk->num gets filled in if ephemeral port request. */
+ /* Note: sk->sk_num gets filled in if ephemeral port request. */
ret = sctp_get_port_local(sk, &addr);
return (ret ? 1 : 0);
@@ -2948,7 +2948,7 @@
if (sctp_autobind(sk))
return -EAGAIN;
}
- sk->state = SCTP_SS_LISTENING;
+ sk->sk_state = SCTP_SS_LISTENING;
sctp_hash_endpoint(ep);
return 0;
}
@@ -2981,8 +2981,8 @@
if (sctp_autobind(sk))
return -EAGAIN;
}
- sk->state = SCTP_SS_LISTENING;
- sk->max_ack_backlog = backlog;
+ sk->sk_state = SCTP_SS_LISTENING;
+ sk->sk_max_ack_backlog = backlog;
sctp_hash_endpoint(ep);
return 0;
}
@@ -3056,7 +3056,7 @@
struct sctp_opt *sp = sctp_sk(sk);
unsigned int mask;
- poll_wait(file, sk->sleep, wait);
+ poll_wait(file, sk->sk_sleep, wait);
/* A TCP-style listening socket becomes readable when the accept queue
* is not empty.
@@ -3068,14 +3068,14 @@
mask = 0;
/* Is there any exceptional events? */
- if (sk->err || !skb_queue_empty(&sk->error_queue))
+ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR;
- if (sk->shutdown == SHUTDOWN_MASK)
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
/* Is it readable? Reconsider this code with TCP-style support. */
- if (!skb_queue_empty(&sk->receive_queue) ||
- (sk->shutdown & RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
/* The association is either gone or not ready. */
@@ -3086,7 +3086,7 @@
if (sctp_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else {
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
/*
* Since the socket is not locked, the buffer
* might be made available after the writeable check and
@@ -3133,11 +3133,11 @@
sctp_bind_bucket_t *pp;
sctp_spin_lock(&head->lock);
- pp = (sctp_bind_bucket_t *) sk->prev;
- if (sk->bind_next)
- sk->bind_next->bind_pprev = sk->bind_pprev;
- *(sk->bind_pprev) = sk->bind_next;
- sk->prev = NULL;
+ pp = (sctp_bind_bucket_t *)sk->sk_prev;
+ if (sk->sk_bind_next)
+ sk->sk_bind_next->sk_bind_pprev = sk->sk_bind_pprev;
+ *(sk->sk_bind_pprev) = sk->sk_bind_next;
+ sk->sk_prev = NULL;
inet_sk(sk)->num = 0;
if (pp->sk) {
if (pp->next)
@@ -3299,18 +3299,18 @@
int error;
DEFINE_WAIT(wait);
- prepare_to_wait_exclusive(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
/* Socket errors? */
error = sock_error(sk);
if (error)
goto out;
- if (!skb_queue_empty(&sk->receive_queue))
+ if (!skb_queue_empty(&sk->sk_receive_queue))
goto ready;
/* Socket shut down? */
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
goto out;
/* Sequenced packets can come disconnected. If so we report the
@@ -3336,14 +3336,14 @@
sctp_lock_sock(sk);
ready:
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return 0;
interrupted:
error = sock_intr_errno(*timeo_p);
out:
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
*err = error;
return error;
}
@@ -3359,7 +3359,7 @@
struct sk_buff *skb;
long timeo;
- /* Caller is allowed not to check sk->err before calling. */
+ /* Caller is allowed not to check sk->sk_err before calling. */
error = sock_error(sk);
if (error)
goto no_packet;
@@ -3380,21 +3380,21 @@
if (flags & MSG_PEEK) {
unsigned long cpu_flags;
- sctp_spin_lock_irqsave(&sk->receive_queue.lock,
+ sctp_spin_lock_irqsave(&sk->sk_receive_queue.lock,
cpu_flags);
- skb = skb_peek(&sk->receive_queue);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb)
atomic_inc(&skb->users);
- sctp_spin_unlock_irqrestore(&sk->receive_queue.lock,
+ sctp_spin_unlock_irqrestore(&sk->sk_receive_queue.lock,
cpu_flags);
} else {
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
}
if (skb)
return skb;
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
/* User doesn't want to wait. */
@@ -3437,7 +3437,7 @@
struct sock *sk = asoc->base.sk;
int amt = 0;
- amt = sk->sndbuf - asoc->sndbuf_used;
+ amt = sk->sk_sndbuf - asoc->sndbuf_used;
if (amt < 0)
amt = 0;
return amt;
@@ -3465,29 +3465,29 @@
*((struct sctp_chunk **)(chunk->skb->cb)) = chunk;
asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk);
- sk->wmem_queued += SCTP_DATA_SNDSIZE(chunk);
+ sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk);
}
/* If sndbuf has changed, wake up per association sndbuf waiters. */
static void __sctp_write_space(struct sctp_association *asoc)
{
struct sock *sk = asoc->base.sk;
- struct socket *sock = sk->socket;
+ struct socket *sock = sk->sk_socket;
if ((sctp_wspace(asoc) > 0) && sock) {
if (waitqueue_active(&asoc->wait))
wake_up_interruptible(&asoc->wait);
if (sctp_writeable(sk)) {
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
/* Note that we try to include the Async I/O support
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
if (sock->fasync_list &&
- !(sk->shutdown & SEND_SHUTDOWN))
+ !(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, 2, POLL_OUT);
}
}
@@ -3508,7 +3508,7 @@
asoc = chunk->asoc;
sk = asoc->base.sk;
asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk);
- sk->wmem_queued -= SCTP_DATA_SNDSIZE(chunk);
+ sk->sk_wmem_queued -= SCTP_DATA_SNDSIZE(chunk);
__sctp_write_space(asoc);
sctp_association_put(asoc);
@@ -3535,7 +3535,7 @@
TASK_INTERRUPTIBLE);
if (!*timeo_p)
goto do_nonblock;
- if (sk->err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
+ if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
asoc->base.dead)
goto do_error;
if (signal_pending(current))
@@ -3602,7 +3602,7 @@
{
int amt = 0;
- amt = sk->sndbuf - sk->wmem_queued;
+ amt = sk->sk_sndbuf - sk->sk_wmem_queued;
if (amt < 0)
amt = 0;
return amt;
@@ -3629,9 +3629,9 @@
TASK_INTERRUPTIBLE);
if (!*timeo_p)
goto do_nonblock;
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
- if (sk->err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
+ if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
asoc->base.dead)
goto do_error;
if (signal_pending(current))
@@ -3681,7 +3681,8 @@
for (;;) {
- prepare_to_wait_exclusive(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk->sk_sleep, &wait,
+ TASK_INTERRUPTIBLE);
if (list_empty(&ep->asocs)) {
sctp_release_sock(sk);
@@ -3706,7 +3707,7 @@
break;
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return err;
}
@@ -3716,7 +3717,7 @@
DEFINE_WAIT(wait);
do {
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
if (list_empty(&sctp_sk(sk)->ep->asocs))
break;
sctp_release_sock(sk);
@@ -3724,7 +3725,7 @@
sctp_lock_sock(sk);
} while (!signal_pending(current) && timeout);
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
}
/* Populate the fields of the newsk from the oldsk and migrate the assoc
@@ -3743,8 +3744,8 @@
/* Migrate socket buffer sizes and all the socket level options to the
* new socket.
*/
- newsk->sndbuf = oldsk->sndbuf;
- newsk->rcvbuf = oldsk->rcvbuf;
+ newsk->sk_sndbuf = oldsk->sk_sndbuf;
+ newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
*newsp = *oldsp;
/* Restore the ep value that was overwritten with the above structure
@@ -3756,11 +3757,11 @@
/* Move any messages in the old socket's receive queue that are for the
* peeled off association to the new socket's receive queue.
*/
- sctp_skb_for_each(skb, &oldsk->receive_queue, tmp) {
+ sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
__skb_unlink(skb, skb->list);
- __skb_queue_tail(&newsk->receive_queue, skb);
+ __skb_queue_tail(&newsk->sk_receive_queue, skb);
}
}
@@ -3780,7 +3781,7 @@
if (assoc->ulpq.pd_mode) {
queue = &newsp->pd_lobby;
} else
- queue = &newsk->receive_queue;
+ queue = &newsk->sk_receive_queue;
/* Walk through the pd_lobby, looking for skbs that
* need moved to the new socket.
@@ -3814,9 +3815,9 @@
* is called, set RCV_SHUTDOWN flag.
*/
if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP))
- newsk->shutdown |= RCV_SHUTDOWN;
+ newsk->sk_shutdown |= RCV_SHUTDOWN;
- newsk->state = SCTP_SS_ESTABLISHED;
+ newsk->sk_state = SCTP_SS_ESTABLISHED;
}
/* This proto struct describes the ULP interface for SCTP. */
diff -urN linux-2.5.70-bk11/net/sctp/transport.c linux-2.5.70-bk12/net/sctp/transport.c
--- linux-2.5.70-bk11/net/sctp/transport.c 2003-05-26 18:00:25.000000000 -0700
+++ linux-2.5.70-bk12/net/sctp/transport.c 2003-06-07 04:47:54.000000000 -0700
@@ -245,7 +245,7 @@
if (dst) {
transport->pmtu = dst_pmtu(dst);
- /* Initialize sk->rcv_saddr, if the transport is the
+ /* Initialize sk->sk_rcv_saddr, if the transport is the
* association's active path for getsockname().
*/
if (asoc && (transport == asoc->peer.active_path))
diff -urN linux-2.5.70-bk11/net/sctp/ulpqueue.c linux-2.5.70-bk12/net/sctp/ulpqueue.c
--- linux-2.5.70-bk11/net/sctp/ulpqueue.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/sctp/ulpqueue.c 2003-06-07 04:47:54.000000000 -0700
@@ -163,7 +163,7 @@
sp->pd_mode = 0;
if (!skb_queue_empty(&sp->pd_lobby)) {
struct list_head *list;
- sctp_skb_list_tail(&sp->pd_lobby, &sk->receive_queue);
+ sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
INIT_LIST_HEAD(list);
return 1;
@@ -189,7 +189,7 @@
/* If the socket is just going to throw this away, do not
* even try to deliver it.
*/
- if (sock_flag(sk, SOCK_DEAD) || (sk->shutdown & RCV_SHUTDOWN))
+ if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
goto out_free;
/* Check if the user wishes to receive this event. */
@@ -202,13 +202,13 @@
*/
if (!sctp_sk(sk)->pd_mode) {
- queue = &sk->receive_queue;
+ queue = &sk->sk_receive_queue;
} else if (ulpq->pd_mode) {
if (event->msg_flags & MSG_NOTIFICATION)
queue = &sctp_sk(sk)->pd_lobby;
else {
clear_pd = event->msg_flags & MSG_EOR;
- queue = &sk->receive_queue;
+ queue = &sk->sk_receive_queue;
}
} else
queue = &sctp_sk(sk)->pd_lobby;
@@ -229,8 +229,8 @@
if (clear_pd)
sctp_ulpq_clear_pd(ulpq);
- if (queue == &sk->receive_queue)
- sk->data_ready(sk, 0);
+ if (queue == &sk->sk_receive_queue)
+ sk->sk_data_ready(sk, 0);
return 1;
out_free:
@@ -773,7 +773,7 @@
freed = 0;
- if (skb_queue_empty(&asoc->base.sk->receive_queue)) {
+ if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
freed = sctp_ulpq_renege_order(ulpq, needed);
if (freed < needed) {
freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
@@ -812,9 +812,9 @@
SCTP_PARTIAL_DELIVERY_ABORTED,
gfp);
if (ev)
- __skb_queue_tail(&sk->receive_queue, sctp_event2skb(ev));
+ __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
/* If there is data waiting, send it up the socket now. */
if (sctp_ulpq_clear_pd(ulpq) || ev)
- sk->data_ready(sk, 0);
+ sk->sk_data_ready(sk, 0);
}
diff -urN linux-2.5.70-bk11/net/socket.c linux-2.5.70-bk12/net/socket.c
--- linux-2.5.70-bk11/net/socket.c 2003-05-26 18:00:37.000000000 -0700
+++ linux-2.5.70-bk12/net/socket.c 2003-06-07 04:47:54.000000000 -0700
@@ -889,11 +889,11 @@
*
* 1. fasync_list is modified only under process context socket lock
* i.e. under semaphore.
- * 2. fasync_list is used under read_lock(&sk->callback_lock)
+ * 2. fasync_list is used under read_lock(&sk->sk_callback_lock)
* or under socket lock.
* 3. fasync_list can be used from softirq context, so that
* modification under socket lock have to be enhanced with
- * write_lock_bh(&sk->callback_lock).
+ * write_lock_bh(&sk->sk_callback_lock).
* --ANK (990710)
*/
@@ -930,9 +930,9 @@
{
if(fa!=NULL)
{
- write_lock_bh(&sk->callback_lock);
+ write_lock_bh(&sk->sk_callback_lock);
fa->fa_fd=fd;
- write_unlock_bh(&sk->callback_lock);
+ write_unlock_bh(&sk->sk_callback_lock);
kfree(fna);
goto out;
@@ -941,17 +941,17 @@
fna->fa_fd=fd;
fna->magic=FASYNC_MAGIC;
fna->fa_next=sock->fasync_list;
- write_lock_bh(&sk->callback_lock);
+ write_lock_bh(&sk->sk_callback_lock);
sock->fasync_list=fna;
- write_unlock_bh(&sk->callback_lock);
+ write_unlock_bh(&sk->sk_callback_lock);
}
else
{
if (fa!=NULL)
{
- write_lock_bh(&sk->callback_lock);
+ write_lock_bh(&sk->sk_callback_lock);
*prev=fa->fa_next;
- write_unlock_bh(&sk->callback_lock);
+ write_unlock_bh(&sk->sk_callback_lock);
kfree(fa);
}
}
diff -urN linux-2.5.70-bk11/net/sunrpc/svcsock.c linux-2.5.70-bk12/net/sunrpc/svcsock.c
--- linux-2.5.70-bk11/net/sunrpc/svcsock.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/sunrpc/svcsock.c 2003-06-07 04:47:54.000000000 -0700
@@ -496,9 +496,9 @@
* DaveM said I could!
*/
lock_sock(sock->sk);
- sock->sk->sndbuf = snd * 2;
- sock->sk->rcvbuf = rcv * 2;
- sock->sk->userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
+ sock->sk->sk_sndbuf = snd * 2;
+ sock->sk->sk_rcvbuf = rcv * 2;
+ sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
release_sock(sock->sk);
#endif
}
@@ -508,7 +508,7 @@
static void
svc_udp_data_ready(struct sock *sk, int count)
{
- struct svc_sock *svsk = (struct svc_sock *)(sk->user_data);
+ struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
if (!svsk)
goto out;
@@ -517,8 +517,8 @@
set_bit(SK_DATA, &svsk->sk_flags);
svc_sock_enqueue(svsk);
out:
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
}
/*
@@ -527,7 +527,7 @@
static void
svc_write_space(struct sock *sk)
{
- struct svc_sock *svsk = (struct svc_sock *)(sk->user_data);
+ struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
if (svsk) {
dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
@@ -535,10 +535,10 @@
svc_sock_enqueue(svsk);
}
- if (sk->sleep && waitqueue_active(sk->sleep)) {
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
printk(KERN_WARNING "RPC svc_write_space: some sleeping on %p\n",
svsk);
- wake_up_interruptible(sk->sleep);
+ wake_up_interruptible(sk->sk_sleep);
}
}
@@ -589,7 +589,7 @@
rqstp->rq_addr.sin_port = skb->h.uh->source;
rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr;
- svsk->sk_sk->stamp = skb->stamp;
+ svsk->sk_sk->sk_stamp = skb->stamp;
if (skb_is_nonlinear(skb)) {
/* we have to copy */
@@ -652,8 +652,8 @@
static void
svc_udp_init(struct svc_sock *svsk)
{
- svsk->sk_sk->data_ready = svc_udp_data_ready;
- svsk->sk_sk->write_space = svc_write_space;
+ svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
+ svsk->sk_sk->sk_write_space = svc_write_space;
svsk->sk_recvfrom = svc_udp_recvfrom;
svsk->sk_sendto = svc_udp_sendto;
@@ -679,21 +679,21 @@
struct svc_sock *svsk;
dprintk("svc: socket %p TCP (listen) state change %d\n",
- sk, sk->state);
+ sk, sk->sk_state);
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
/* Aborted connection, SYN_RECV or whatever... */
goto out;
}
- if (!(svsk = (struct svc_sock *) sk->user_data)) {
+ if (!(svsk = (struct svc_sock *) sk->sk_user_data)) {
printk("svc: socket %p: no user data\n", sk);
goto out;
}
set_bit(SK_CONN, &svsk->sk_flags);
svc_sock_enqueue(svsk);
out:
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible_all(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_all(sk->sk_sleep);
}
/*
@@ -705,17 +705,17 @@
struct svc_sock *svsk;
dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
- sk, sk->state, sk->user_data);
+ sk, sk->sk_state, sk->sk_user_data);
- if (!(svsk = (struct svc_sock *) sk->user_data)) {
+ if (!(svsk = (struct svc_sock *) sk->sk_user_data)) {
printk("svc: socket %p: no user data\n", sk);
goto out;
}
set_bit(SK_CLOSE, &svsk->sk_flags);
svc_sock_enqueue(svsk);
out:
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible_all(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_all(sk->sk_sleep);
}
static void
@@ -724,14 +724,14 @@
struct svc_sock * svsk;
dprintk("svc: socket %p TCP data ready (svsk %p)\n",
- sk, sk->user_data);
- if (!(svsk = (struct svc_sock *)(sk->user_data)))
+ sk, sk->sk_user_data);
+ if (!(svsk = (struct svc_sock *)(sk->sk_user_data)))
goto out;
set_bit(SK_DATA, &svsk->sk_flags);
svc_sock_enqueue(svsk);
out:
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
}
/*
@@ -797,7 +797,7 @@
/* make sure that a write doesn't block forever when
* low on memory
*/
- newsock->sk->sndtimeo = HZ*30;
+ newsock->sk->sk_sndtimeo = HZ*30;
if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0)))
goto failed;
@@ -1035,15 +1035,15 @@
svsk->sk_recvfrom = svc_tcp_recvfrom;
svsk->sk_sendto = svc_tcp_sendto;
- if (sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
dprintk("setting up TCP socket for listening\n");
- sk->data_ready = svc_tcp_listen_data_ready;
+ sk->sk_data_ready = svc_tcp_listen_data_ready;
set_bit(SK_CONN, &svsk->sk_flags);
} else {
dprintk("setting up TCP socket for reading\n");
- sk->state_change = svc_tcp_state_change;
- sk->data_ready = svc_tcp_data_ready;
- sk->write_space = svc_write_space;
+ sk->sk_state_change = svc_tcp_state_change;
+ sk->sk_data_ready = svc_tcp_data_ready;
+ sk->sk_write_space = svc_write_space;
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
@@ -1290,7 +1290,7 @@
/* Register socket with portmapper */
if (*errp >= 0 && pmap_register)
- *errp = svc_register(serv, inet->protocol,
+ *errp = svc_register(serv, inet->sk_protocol,
ntohs(inet_sk(inet)->sport));
if (*errp < 0) {
@@ -1299,12 +1299,12 @@
}
set_bit(SK_BUSY, &svsk->sk_flags);
- inet->user_data = svsk;
+ inet->sk_user_data = svsk;
svsk->sk_sock = sock;
svsk->sk_sk = inet;
- svsk->sk_ostate = inet->state_change;
- svsk->sk_odata = inet->data_ready;
- svsk->sk_owspace = inet->write_space;
+ svsk->sk_ostate = inet->sk_state_change;
+ svsk->sk_odata = inet->sk_data_ready;
+ svsk->sk_owspace = inet->sk_write_space;
svsk->sk_server = serv;
svsk->sk_lastrecv = get_seconds();
INIT_LIST_HEAD(&svsk->sk_deferred);
@@ -1363,7 +1363,7 @@
return error;
if (sin != NULL) {
- sock->sk->reuse = 1; /* allow address reuse */
+ sock->sk->sk_reuse = 1; /* allow address reuse */
error = sock->ops->bind(sock, (struct sockaddr *) sin,
sizeof(*sin));
if (error < 0)
@@ -1398,9 +1398,9 @@
serv = svsk->sk_server;
sk = svsk->sk_sk;
- sk->state_change = svsk->sk_ostate;
- sk->data_ready = svsk->sk_odata;
- sk->write_space = svsk->sk_owspace;
+ sk->sk_state_change = svsk->sk_ostate;
+ sk->sk_data_ready = svsk->sk_odata;
+ sk->sk_write_space = svsk->sk_owspace;
spin_lock_bh(&serv->sv_lock);
diff -urN linux-2.5.70-bk11/net/sunrpc/xprt.c linux-2.5.70-bk12/net/sunrpc/xprt.c
--- linux-2.5.70-bk11/net/sunrpc/xprt.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/sunrpc/xprt.c 2003-06-07 04:47:54.000000000 -0700
@@ -129,7 +129,7 @@
static inline struct rpc_xprt *
xprt_from_sock(struct sock *sk)
{
- return (struct rpc_xprt *) sk->user_data;
+ return (struct rpc_xprt *) sk->sk_user_data;
}
/*
@@ -367,18 +367,18 @@
if (!sk)
return;
- write_lock_bh(&sk->callback_lock);
+ write_lock_bh(&sk->sk_callback_lock);
xprt->inet = NULL;
xprt->sock = NULL;
- sk->user_data = NULL;
- sk->data_ready = xprt->old_data_ready;
- sk->state_change = xprt->old_state_change;
- sk->write_space = xprt->old_write_space;
- write_unlock_bh(&sk->callback_lock);
+ sk->sk_user_data = NULL;
+ sk->sk_data_ready = xprt->old_data_ready;
+ sk->sk_state_change = xprt->old_state_change;
+ sk->sk_write_space = xprt->old_write_space;
+ write_unlock_bh(&sk->sk_callback_lock);
xprt_disconnect(xprt);
- sk->no_check = 0;
+ sk->sk_no_check = 0;
sock_release(sock);
}
@@ -448,7 +448,7 @@
status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
sizeof(xprt->addr), O_NONBLOCK);
dprintk("RPC: %4d connect status %d connected %d sock state %d\n",
- task->tk_pid, -status, xprt_connected(xprt), inet->state);
+ task->tk_pid, -status, xprt_connected(xprt), inet->sk_state);
if (status >= 0)
return;
@@ -458,12 +458,13 @@
case -EALREADY:
/* Protect against TCP socket state changes */
lock_sock(inet);
- if (inet->state != TCP_ESTABLISHED) {
+ if (inet->sk_state != TCP_ESTABLISHED) {
dprintk("RPC: %4d waiting for connection\n",
task->tk_pid);
task->tk_timeout = RPC_CONNECT_TIMEOUT;
/* if the socket is already closing, delay briefly */
- if ((1 << inet->state) & ~(TCPF_SYN_SENT|TCPF_SYN_RECV))
+ if ((1 << inet->sk_state) &
+ ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
task->tk_timeout = RPC_REESTABLISH_TIMEOUT;
rpc_sleep_on(&xprt->pending, task, xprt_connect_status,
NULL);
@@ -679,7 +680,7 @@
struct sk_buff *skb;
int err, repsize, copied;
- read_lock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
dprintk("RPC: udp_data_ready...\n");
if (!(xprt = xprt_from_sock(sk))) {
printk("RPC: udp_data_ready request not found!\n");
@@ -728,9 +729,9 @@
dropit:
skb_free_datagram(sk, skb);
out:
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
- read_unlock(&sk->callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
+ read_unlock(&sk->sk_callback_lock);
}
/*
@@ -935,7 +936,7 @@
struct rpc_xprt *xprt;
read_descriptor_t rd_desc;
- read_lock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
dprintk("RPC: tcp_data_ready...\n");
if (!(xprt = xprt_from_sock(sk))) {
printk("RPC: tcp_data_ready socket info not found!\n");
@@ -949,7 +950,7 @@
rd_desc.count = 65536;
tcp_read_sock(sk, &rd_desc, tcp_data_recv);
out:
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
}
static void
@@ -957,15 +958,15 @@
{
struct rpc_xprt *xprt;
- read_lock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
goto out;
dprintk("RPC: tcp_state_change client %p...\n", xprt);
dprintk("RPC: state %x conn %d dead %d zapped %d\n",
- sk->state, xprt_connected(xprt),
- sock_flag(sk, SOCK_DEAD), sk->zapped);
+ sk->sk_state, xprt_connected(xprt),
+ sock_flag(sk, SOCK_DEAD), sk->sk_zapped);
- switch (sk->state) {
+ switch (sk->sk_state) {
case TCP_ESTABLISHED:
spin_lock_bh(&xprt->sock_lock);
if (!xprt_test_and_set_connected(xprt)) {
@@ -989,9 +990,9 @@
break;
}
out:
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible_all(sk->sleep);
- read_unlock(&sk->callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_all(sk->sk_sleep);
+ read_unlock(&sk->sk_callback_lock);
}
/*
@@ -1006,8 +1007,8 @@
struct rpc_xprt *xprt;
struct socket *sock;
- read_lock(&sk->callback_lock);
- if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->socket))
+ read_lock(&sk->sk_callback_lock);
+ if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket))
goto out;
if (xprt->shutdown)
goto out;
@@ -1030,10 +1031,10 @@
if (xprt->snd_task && xprt->snd_task->tk_rpcwait == &xprt->pending)
rpc_wake_up_task(xprt->snd_task);
spin_unlock_bh(&xprt->sock_lock);
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
out:
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
}
/*
@@ -1465,28 +1466,28 @@
if (xprt->inet)
return;
- write_lock_bh(&sk->callback_lock);
- sk->user_data = xprt;
- xprt->old_data_ready = sk->data_ready;
- xprt->old_state_change = sk->state_change;
- xprt->old_write_space = sk->write_space;
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_user_data = xprt;
+ xprt->old_data_ready = sk->sk_data_ready;
+ xprt->old_state_change = sk->sk_state_change;
+ xprt->old_write_space = sk->sk_write_space;
if (xprt->prot == IPPROTO_UDP) {
- sk->data_ready = udp_data_ready;
- sk->no_check = UDP_CSUM_NORCV;
+ sk->sk_data_ready = udp_data_ready;
+ sk->sk_no_check = UDP_CSUM_NORCV;
xprt_set_connected(xprt);
} else {
struct tcp_opt *tp = tcp_sk(sk);
tp->nonagle = 1; /* disable Nagle's algorithm */
- sk->data_ready = tcp_data_ready;
- sk->state_change = tcp_state_change;
+ sk->sk_data_ready = tcp_data_ready;
+ sk->sk_state_change = tcp_state_change;
xprt_clear_connected(xprt);
}
- sk->write_space = xprt_write_space;
+ sk->sk_write_space = xprt_write_space;
/* Reset to new socket */
xprt->sock = sock;
xprt->inet = sk;
- write_unlock_bh(&sk->callback_lock);
+ write_unlock_bh(&sk->sk_callback_lock);
return;
}
@@ -1502,13 +1503,13 @@
if (xprt->stream)
return;
if (xprt->rcvsize) {
- sk->userlocks |= SOCK_RCVBUF_LOCK;
- sk->rcvbuf = xprt->rcvsize * RPC_MAXCONG * 2;
+ sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+ sk->sk_rcvbuf = xprt->rcvsize * RPC_MAXCONG * 2;
}
if (xprt->sndsize) {
- sk->userlocks |= SOCK_SNDBUF_LOCK;
- sk->sndbuf = xprt->sndsize * RPC_MAXCONG * 2;
- sk->write_space(sk);
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+ sk->sk_sndbuf = xprt->sndsize * RPC_MAXCONG * 2;
+ sk->sk_write_space(sk);
}
}
diff -urN linux-2.5.70-bk11/net/unix/af_unix.c linux-2.5.70-bk12/net/unix/af_unix.c
--- linux-2.5.70-bk11/net/unix/af_unix.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/unix/af_unix.c 2003-06-07 04:47:54.000000000 -0700
@@ -145,7 +145,7 @@
return hash&(UNIX_HASH_SIZE-1);
}
-#define unix_peer(sk) ((sk)->pair)
+#define unix_peer(sk) ((sk)->sk_pair)
static inline int unix_our_peer(unix_socket *sk, unix_socket *osk)
{
@@ -215,15 +215,15 @@
unix_socket **list = u->list;
if (list) {
- if (sk->next)
- sk->next->prev = sk->prev;
- if (sk->prev)
- sk->prev->next = sk->next;
+ if (sk->sk_next)
+ sk->sk_next->sk_prev = sk->sk_prev;
+ if (sk->sk_prev)
+ sk->sk_prev->sk_next = sk->sk_next;
if (*list == sk)
- *list = sk->next;
+ *list = sk->sk_next;
u->list = NULL;
- sk->prev = NULL;
- sk->next = NULL;
+ sk->sk_prev = NULL;
+ sk->sk_next = NULL;
__sock_put(sk);
}
}
@@ -234,10 +234,10 @@
BUG_TRAP(!u->list);
u->list = list;
- sk->prev = NULL;
- sk->next = *list;
+ sk->sk_prev = NULL;
+ sk->sk_next = *list;
if (*list)
- (*list)->prev = sk;
+ (*list)->sk_prev = sk;
*list=sk;
sock_hold(sk);
}
@@ -261,7 +261,7 @@
{
unix_socket *s;
- for (s=unix_socket_table[hash^type]; s; s=s->next) {
+ for (s = unix_socket_table[hash ^ type]; s; s = s->sk_next) {
struct unix_sock *u = unix_sk(s);
if (u->addr->len == len &&
@@ -290,8 +290,8 @@
unix_socket *s;
read_lock(&unix_table_lock);
- for (s=unix_socket_table[i->i_ino & (UNIX_HASH_SIZE-1)]; s; s=s->next)
- {
+ for (s = unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]; s;
+ s = s->sk_next) {
struct dentry *dentry = unix_sk(s)->dentry;
if(dentry && dentry->d_inode == i)
@@ -306,18 +306,18 @@
static inline int unix_writable(struct sock *sk)
{
- return ((atomic_read(&sk->wmem_alloc)<<2) <= sk->sndbuf);
+ return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
}
static void unix_write_space(struct sock *sk)
{
- read_lock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
if (unix_writable(sk)) {
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
sk_wake_async(sk, 2, POLL_OUT);
}
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
}
/* When dgram socket disconnects (or changes its peer), we clear its receive
@@ -326,8 +326,8 @@
* may receive messages only from that peer. */
static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
{
- if (skb_queue_len(&sk->receive_queue)) {
- skb_queue_purge(&sk->receive_queue);
+ if (skb_queue_len(&sk->sk_receive_queue)) {
+ skb_queue_purge(&sk->sk_receive_queue);
wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
/* If one link of bidirectional dgram pipe is disconnected,
@@ -335,8 +335,8 @@
* when peer was not connected to us.
*/
if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
- other->err = ECONNRESET;
- other->error_report(other);
+ other->sk_err = ECONNRESET;
+ other->sk_error_report(other);
}
}
}
@@ -345,11 +345,11 @@
{
struct unix_sock *u = unix_sk(sk);
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
- BUG_TRAP(atomic_read(&sk->wmem_alloc) == 0);
+ BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
BUG_TRAP(!u->list);
- BUG_TRAP(sk->socket==NULL);
+ BUG_TRAP(!sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive unix socket: %p\n", sk);
return;
@@ -378,13 +378,13 @@
/* Clear state */
unix_state_wlock(sk);
sock_orphan(sk);
- sk->shutdown = SHUTDOWN_MASK;
+ sk->sk_shutdown = SHUTDOWN_MASK;
dentry = u->dentry;
u->dentry = NULL;
mnt = u->mnt;
u->mnt = NULL;
- state = sk->state;
- sk->state = TCP_CLOSE;
+ state = sk->sk_state;
+ sk->sk_state = TCP_CLOSE;
unix_state_wunlock(sk);
wake_up_interruptible_all(&u->peer_wait);
@@ -392,16 +392,17 @@
skpair=unix_peer(sk);
if (skpair!=NULL) {
- if (sk->type==SOCK_STREAM) {
+ if (sk->sk_type == SOCK_STREAM) {
unix_state_wlock(skpair);
- skpair->shutdown=SHUTDOWN_MASK; /* No more writes*/
- if (!skb_queue_empty(&sk->receive_queue) || embrion)
- skpair->err = ECONNRESET;
+ /* No more writes */
+ skpair->sk_shutdown = SHUTDOWN_MASK;
+ if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
+ skpair->sk_err = ECONNRESET;
unix_state_wunlock(skpair);
- skpair->state_change(skpair);
- read_lock(&skpair->callback_lock);
+ skpair->sk_state_change(skpair);
+ read_lock(&skpair->sk_callback_lock);
sk_wake_async(skpair,1,POLL_HUP);
- read_unlock(&skpair->callback_lock);
+ read_unlock(&skpair->sk_callback_lock);
}
sock_put(skpair); /* It may now die */
unix_peer(sk) = NULL;
@@ -409,8 +410,7 @@
/* Try to flush out this socket. Throw out buffers at least */
- while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
- {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (state==TCP_LISTEN)
unix_release_sock(skb->sk, 1);
/* passed fds are erased in the kfree_skb hook */
@@ -456,16 +456,16 @@
if (!u->addr)
goto out; /* No listens on an unbound socket */
unix_state_wlock(sk);
- if (sk->state != TCP_CLOSE && sk->state != TCP_LISTEN)
+ if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
goto out_unlock;
- if (backlog > sk->max_ack_backlog)
+ if (backlog > sk->sk_max_ack_backlog)
wake_up_interruptible_all(&u->peer_wait);
- sk->max_ack_backlog=backlog;
- sk->state=TCP_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
/* set credentials so connect can copy them */
- sk->peercred.pid = current->pid;
- sk->peercred.uid = current->euid;
- sk->peercred.gid = current->egid;
+ sk->sk_peercred.pid = current->pid;
+ sk->sk_peercred.uid = current->euid;
+ sk->sk_peercred.gid = current->egid;
err = 0;
out_unlock:
@@ -495,10 +495,9 @@
sock_init_data(sock,sk);
sk_set_owner(sk, THIS_MODULE);
- sk->write_space = unix_write_space;
-
- sk->max_ack_backlog = sysctl_unix_max_dgram_qlen;
- sk->destruct = unix_sock_destructor;
+ sk->sk_write_space = unix_write_space;
+ sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
+ sk->sk_destruct = unix_sock_destructor;
u = unix_sk(sk);
u->dentry = NULL;
u->mnt = NULL;
@@ -589,7 +588,7 @@
yield();
goto retry;
}
- addr->hash ^= sk->type;
+ addr->hash ^= sk->sk_type;
__unix_remove_socket(sk);
u->addr = addr;
@@ -623,13 +622,13 @@
if (!u)
goto put_fail;
- if (u->type == type)
+ if (u->sk_type == type)
update_atime(nd.dentry->d_inode);
path_release(&nd);
err=-EPROTOTYPE;
- if (u->type != type) {
+ if (u->sk_type != type) {
sock_put(u);
goto fail;
}
@@ -693,7 +692,7 @@
memcpy(addr->name, sunaddr, addr_len);
addr->len = addr_len;
- addr->hash = hash^sk->type;
+ addr->hash = hash ^ sk->sk_type;
atomic_set(&addr->refcnt, 1);
if (sunaddr->sun_path[0]) {
@@ -736,7 +735,8 @@
/*
* All right, let's create it.
*/
- mode = S_IFSOCK | (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
+ mode = S_IFSOCK |
+ (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
if (err)
goto out_mknod_dput;
@@ -752,7 +752,7 @@
if (!sunaddr->sun_path[0]) {
err = -EADDRINUSE;
if (__unix_find_socket_byname(sunaddr, addr_len,
- sk->type, hash)) {
+ sk->sk_type, hash)) {
unix_release_addr(addr);
goto out_unlock;
}
@@ -818,7 +818,7 @@
if (!unix_may_send(sk, other))
goto out_unlock;
- err = security_unix_may_send(sk->socket, other->socket);
+ err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
@@ -863,8 +863,9 @@
prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
sched = !sock_flag(other, SOCK_DEAD) &&
- !(other->shutdown & RCV_SHUTDOWN) &&
- skb_queue_len(&other->receive_queue) > other->max_ack_backlog;
+ !(other->sk_shutdown & RCV_SHUTDOWN) &&
+ (skb_queue_len(&other->sk_receive_queue) >
+ other->sk_max_ack_backlog);
unix_state_runlock(other);
@@ -918,7 +919,7 @@
restart:
/* Find listening sock. */
- other=unix_find_other(sunaddr, addr_len, sk->type, hash, &err);
+ other = unix_find_other(sunaddr, addr_len, sk->sk_type, hash, &err);
if (!other)
goto out;
@@ -933,10 +934,11 @@
}
err = -ECONNREFUSED;
- if (other->state != TCP_LISTEN)
+ if (other->sk_state != TCP_LISTEN)
goto out_unlock;
- if (skb_queue_len(&other->receive_queue) > other->max_ack_backlog) {
+ if (skb_queue_len(&other->sk_receive_queue) >
+ other->sk_max_ack_backlog) {
err = -EAGAIN;
if (!timeo)
goto out_unlock;
@@ -961,7 +963,7 @@
Well, and we have to recheck the state after socket locked.
*/
- st = sk->state;
+ st = sk->sk_state;
switch (st) {
case TCP_CLOSE:
@@ -978,14 +980,14 @@
unix_state_wlock(sk);
- if (sk->state != st) {
+ if (sk->sk_state != st) {
unix_state_wunlock(sk);
unix_state_runlock(other);
sock_put(other);
goto restart;
}
- err = security_unix_stream_connect(sock, other->socket, newsk);
+ err = security_unix_stream_connect(sock, other->sk_socket, newsk);
if (err) {
unix_state_wunlock(sk);
goto out_unlock;
@@ -994,14 +996,14 @@
/* The way is open! Fastly set all the necessary fields... */
sock_hold(sk);
- unix_peer(newsk)=sk;
- newsk->state=TCP_ESTABLISHED;
- newsk->type=SOCK_STREAM;
- newsk->peercred.pid = current->pid;
- newsk->peercred.uid = current->euid;
- newsk->peercred.gid = current->egid;
+ unix_peer(newsk) = sk;
+ newsk->sk_state = TCP_ESTABLISHED;
+ newsk->sk_type = SOCK_STREAM;
+ newsk->sk_peercred.pid = current->pid;
+ newsk->sk_peercred.uid = current->euid;
+ newsk->sk_peercred.gid = current->egid;
newu = unix_sk(newsk);
- newsk->sleep = &newu->peer_wait;
+ newsk->sk_sleep = &newu->peer_wait;
otheru = unix_sk(other);
/* copy address information from listening to new sock*/
@@ -1015,24 +1017,24 @@
}
/* Set credentials */
- sk->peercred = other->peercred;
+ sk->sk_peercred = other->sk_peercred;
sock_hold(newsk);
- unix_peer(sk)=newsk;
- sock->state=SS_CONNECTED;
- sk->state=TCP_ESTABLISHED;
+ unix_peer(sk) = newsk;
+ sock->state = SS_CONNECTED;
+ sk->sk_state = TCP_ESTABLISHED;
unix_state_wunlock(sk);
/* take ten and and send info to listening sock */
- spin_lock(&other->receive_queue.lock);
- __skb_queue_tail(&other->receive_queue,skb);
+ spin_lock(&other->sk_receive_queue.lock);
+ __skb_queue_tail(&other->sk_receive_queue, skb);
/* Undo artificially decreased inflight after embrion
* is installed to listening socket. */
atomic_inc(&newu->inflight);
- spin_unlock(&other->receive_queue.lock);
+ spin_unlock(&other->sk_receive_queue.lock);
unix_state_runlock(other);
- other->data_ready(other, 0);
+ other->sk_data_ready(other, 0);
sock_put(other);
return 0;
@@ -1059,16 +1061,15 @@
sock_hold(skb);
unix_peer(ska)=skb;
unix_peer(skb)=ska;
- ska->peercred.pid = skb->peercred.pid = current->pid;
- ska->peercred.uid = skb->peercred.uid = current->euid;
- ska->peercred.gid = skb->peercred.gid = current->egid;
-
- if (ska->type != SOCK_DGRAM)
- {
- ska->state=TCP_ESTABLISHED;
- skb->state=TCP_ESTABLISHED;
- socka->state=SS_CONNECTED;
- sockb->state=SS_CONNECTED;
+ ska->sk_peercred.pid = skb->sk_peercred.pid = current->pid;
+ ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
+ ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
+
+ if (ska->sk_type != SOCK_DGRAM) {
+ ska->sk_state = TCP_ESTABLISHED;
+ skb->sk_state = TCP_ESTABLISHED;
+ socka->state = SS_CONNECTED;
+ sockb->state = SS_CONNECTED;
}
return 0;
}
@@ -1085,7 +1086,7 @@
goto out;
err = -EINVAL;
- if (sk->state!=TCP_LISTEN)
+ if (sk->sk_state != TCP_LISTEN)
goto out;
/* If socket state is TCP_LISTEN it cannot change (for now...),
@@ -1232,7 +1233,7 @@
goto out;
err = -EMSGSIZE;
- if ((unsigned)len > sk->sndbuf - 32)
+ if ((unsigned)len > sk->sk_sndbuf - 32)
goto out;
skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
@@ -1256,7 +1257,8 @@
if (sunaddr == NULL)
goto out_free;
- other = unix_find_other(sunaddr, namelen, sk->type, hash, &err);
+ other = unix_find_other(sunaddr, namelen, sk->sk_type,
+ hash, &err);
if (other==NULL)
goto out_free;
}
@@ -1294,15 +1296,16 @@
}
err = -EPIPE;
- if (other->shutdown&RCV_SHUTDOWN)
+ if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
- err = security_unix_may_send(sk->socket, other->socket);
+ err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
if (unix_peer(other) != sk &&
- skb_queue_len(&other->receive_queue) > other->max_ack_backlog) {
+ (skb_queue_len(&other->sk_receive_queue) >
+ other->sk_max_ack_backlog)) {
if (!timeo) {
err = -EAGAIN;
goto out_unlock;
@@ -1317,9 +1320,9 @@
goto restart;
}
- skb_queue_tail(&other->receive_queue, skb);
+ skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_runlock(other);
- other->data_ready(other, len);
+ other->sk_data_ready(other, len);
sock_put(other);
scm_destroy(siocb->scm);
return len;
@@ -1359,7 +1362,7 @@
goto out_err;
if (msg->msg_namelen) {
- err = (sk->state==TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP);
+ err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
} else {
sunaddr = NULL;
@@ -1369,7 +1372,7 @@
goto out_err;
}
- if (sk->shutdown&SEND_SHUTDOWN)
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
goto pipe_err;
while(sent < len)
@@ -1382,8 +1385,8 @@
size=len-sent;
/* Keep two messages in the pipe so it schedules better */
- if (size > sk->sndbuf/2 - 64)
- size = sk->sndbuf/2 - 64;
+ if (size > sk->sk_sndbuf / 2 - 64)
+ size = sk->sk_sndbuf / 2 - 64;
if (size > SKB_MAX_ALLOC)
size = SKB_MAX_ALLOC;
@@ -1418,12 +1421,12 @@
unix_state_rlock(other);
if (sock_flag(other, SOCK_DEAD) ||
- (other->shutdown & RCV_SHUTDOWN))
+ (other->sk_shutdown & RCV_SHUTDOWN))
goto pipe_err_free;
- skb_queue_tail(&other->receive_queue, skb);
+ skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_runlock(other);
- other->data_ready(other, size);
+ other->sk_data_ready(other, size);
sent+=size;
}
sock_put(other);
@@ -1544,23 +1547,23 @@
unix_state_rlock(sk);
for (;;) {
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
- if (skb_queue_len(&sk->receive_queue) ||
- sk->err ||
- (sk->shutdown & RCV_SHUTDOWN) ||
+ if (skb_queue_len(&sk->sk_receive_queue) ||
+ sk->sk_err ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current) ||
!timeo)
break;
- set_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
unix_state_runlock(sk);
timeo = schedule_timeout(timeo);
unix_state_rlock(sk);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
unix_state_runlock(sk);
return timeo;
}
@@ -1583,7 +1586,7 @@
long timeo;
err = -EINVAL;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
err = -EOPNOTSUPP;
@@ -1611,7 +1614,7 @@
int chunk;
struct sk_buff *skb;
- skb=skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb==NULL)
{
if (copied >= target)
@@ -1623,7 +1626,7 @@
if ((err = sock_error(sk)) != 0)
break;
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
err = -EAGAIN;
if (!timeo)
@@ -1643,7 +1646,7 @@
if (check_creds) {
/* Never glue messages from different writers */
if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} else {
@@ -1661,7 +1664,7 @@
chunk = min_t(unsigned int, skb->len, size);
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
if (copied == 0)
copied = -EFAULT;
break;
@@ -1680,7 +1683,7 @@
/* put the skb back if we didn't use it up.. */
if (skb->len)
{
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
@@ -1697,7 +1700,7 @@
siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
/* put message back and return */
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} while (size);
@@ -1717,14 +1720,14 @@
if (mode) {
unix_state_wlock(sk);
- sk->shutdown |= mode;
+ sk->sk_shutdown |= mode;
other=unix_peer(sk);
if (other)
sock_hold(other);
unix_state_wunlock(sk);
- sk->state_change(sk);
+ sk->sk_state_change(sk);
- if (other && sk->type == SOCK_STREAM) {
+ if (other && sk->sk_type == SOCK_STREAM) {
int peer_mode = 0;
if (mode&RCV_SHUTDOWN)
@@ -1732,15 +1735,15 @@
if (mode&SEND_SHUTDOWN)
peer_mode |= RCV_SHUTDOWN;
unix_state_wlock(other);
- other->shutdown |= peer_mode;
+ other->sk_shutdown |= peer_mode;
unix_state_wunlock(other);
- other->state_change(other);
- read_lock(&other->callback_lock);
+ other->sk_state_change(other);
+ read_lock(&other->sk_callback_lock);
if (peer_mode == SHUTDOWN_MASK)
sk_wake_async(other,1,POLL_HUP);
else if (peer_mode & RCV_SHUTDOWN)
sk_wake_async(other,1,POLL_IN);
- read_unlock(&other->callback_lock);
+ read_unlock(&other->sk_callback_lock);
}
if (other)
sock_put(other);
@@ -1757,21 +1760,22 @@
switch(cmd)
{
case SIOCOUTQ:
- amount = atomic_read(&sk->wmem_alloc);
+ amount = atomic_read(&sk->sk_wmem_alloc);
err = put_user(amount, (int *)arg);
break;
case SIOCINQ:
{
struct sk_buff *skb;
- if (sk->state==TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
err = -EINVAL;
break;
}
- spin_lock(&sk->receive_queue.lock);
- if((skb=skb_peek(&sk->receive_queue))!=NULL)
+ spin_lock(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb)
amount=skb->len;
- spin_unlock(&sk->receive_queue.lock);
+ spin_unlock(&sk->sk_receive_queue.lock);
err = put_user(amount, (int *)arg);
break;
}
@@ -1788,21 +1792,22 @@
struct sock *sk = sock->sk;
unsigned int mask;
- poll_wait(file, sk->sleep, wait);
+ poll_wait(file, sk->sk_sleep, wait);
mask = 0;
/* exceptional events? */
- if (sk->err)
+ if (sk->sk_err)
mask |= POLLERR;
- if (sk->shutdown == SHUTDOWN_MASK)
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
/* readable? */
- if (!skb_queue_empty(&sk->receive_queue) || (sk->shutdown&RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
/* Connection-based need to check for termination and startup */
- if (sk->type == SOCK_STREAM && sk->state==TCP_CLOSE)
+ if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSE)
mask |= POLLHUP;
/*
@@ -1837,13 +1842,13 @@
len+=sprintf(buffer+len,"%p: %08X %08X %08X %04X %02X %5lu",
s,
- atomic_read(&s->refcnt),
+ atomic_read(&s->sk_refcnt),
0,
- s->state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
- s->type,
- s->socket ?
- (s->state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
- (s->state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
+ s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
+ s->sk_type,
+ s->sk_socket ?
+ (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
+ (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
sock_i_ino(s));
if (u->addr) {
diff -urN linux-2.5.70-bk11/net/unix/garbage.c linux-2.5.70-bk12/net/unix/garbage.c
--- linux-2.5.70-bk11/net/unix/garbage.c 2003-05-26 18:00:27.000000000 -0700
+++ linux-2.5.70-bk12/net/unix/garbage.c 2003-06-07 04:47:54.000000000 -0700
@@ -219,8 +219,8 @@
* negative inflight counter to close race window.
* It is trick of course and dirty one.
*/
- if(s->socket && s->socket->file)
- open_count = file_count(s->socket->file);
+ if (s->sk_socket && s->sk_socket->file)
+ open_count = file_count(s->sk_socket->file);
if (open_count > atomic_read(&unix_sk(s)->inflight))
maybe_unmark_and_push(s);
}
@@ -234,15 +234,14 @@
unix_socket *x = pop_stack();
unix_socket *sk;
- spin_lock(&x->receive_queue.lock);
- skb=skb_peek(&x->receive_queue);
+ spin_lock(&x->sk_receive_queue.lock);
+ skb = skb_peek(&x->sk_receive_queue);
/*
* Loop through all but first born
*/
- while(skb && skb != (struct sk_buff *)&x->receive_queue)
- {
+ while (skb && skb != (struct sk_buff *)&x->sk_receive_queue) {
/*
* Do we have file descriptors ?
*/
@@ -266,12 +265,11 @@
}
}
/* We have to scan not-yet-accepted ones too */
- if (x->state == TCP_LISTEN) {
+ if (x->sk_state == TCP_LISTEN)
maybe_unmark_and_push(skb->sk);
- }
skb=skb->next;
}
- spin_unlock(&x->receive_queue.lock);
+ spin_unlock(&x->sk_receive_queue.lock);
sock_put(x);
}
@@ -283,10 +281,11 @@
if (u->gc_tree == GC_ORPHAN) {
struct sk_buff *nextsk;
- spin_lock(&s->receive_queue.lock);
- skb=skb_peek(&s->receive_queue);
- while(skb && skb != (struct sk_buff *)&s->receive_queue)
- {
+
+ spin_lock(&s->sk_receive_queue.lock);
+ skb = skb_peek(&s->sk_receive_queue);
+ while (skb &&
+ skb != (struct sk_buff *)&s->sk_receive_queue) {
nextsk=skb->next;
/*
* Do we have file descriptors ?
@@ -298,7 +297,7 @@
}
skb=nextsk;
}
- spin_unlock(&s->receive_queue.lock);
+ spin_unlock(&s->sk_receive_queue.lock);
}
u->gc_tree = GC_ORPHAN;
}
diff -urN linux-2.5.70-bk11/net/wanrouter/af_wanpipe.c linux-2.5.70-bk12/net/wanrouter/af_wanpipe.c
--- linux-2.5.70-bk11/net/wanrouter/af_wanpipe.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/wanrouter/af_wanpipe.c 2003-06-07 04:47:54.000000000 -0700
@@ -84,12 +84,12 @@
* When the user sends a packet via send() system call
* the wanpipe_sendmsg() function is executed.
*
- * Each packet is enqueud into sk->write_queue transmit
+ * Each packet is enqueud into sk->sk_write_queue transmit
* queue. When the packet is enqueued, a delayed transmit
* timer is triggerd which acts as a Bottom Half hander.
*
* wanpipe_delay_transmit() function (BH), dequeues packets
- * from the sk->write_queue transmit queue and sends it
+ * from the sk->sk_write_queue transmit queue and sends it
* to the deriver via dev->hard_start_xmit(skb, dev) function.
* Note, this function is actual a function pointer of if_send()
* routine in the wanpipe driver.
@@ -99,7 +99,7 @@
* In order to provide 100% guaranteed packet delivery,
* an atomic 'packet_sent' counter is implemented. Counter
* is incremented for each packet enqueued
- * into sk->write_queue. Counter is decremented each
+ * into sk->sk_write_queue. Counter is decremented each
* time wanpipe_delayed_transmit() function successfuly
* passes the packet to the driver. Before each send(), a poll
* routine checks the sock resources The maximum value of
@@ -263,16 +263,16 @@
}
break;
case WAN_PACKET_CMD:
- sk->state = chan->state;
+ sk->sk_state = chan->state;
/* Bug fix: update Mar6.
* Do not set the sock lcn number here, since
* cmd is not guaranteed to be executed on the
* board, thus Lcn could be wrong */
- sk->data_ready(sk,skb->len);
+ sk->sk_data_ready(sk, skb->len);
kfree_skb(skb);
break;
case WAN_PACKET_ERR:
- sk->state = chan->state;
+ sk->sk_state = chan->state;
if (sock_queue_err_skb(sk,skb)<0){
return -ENOMEM;
}
@@ -284,11 +284,11 @@
}
//??????????????????????
-// if (sk->state == WANSOCK_DISCONNECTED){
-// if (sk->zapped){
+// if (sk->sk_state == WANSOCK_DISCONNECTED){
+// if (sk->sk_zapped) {
// //printk(KERN_INFO "wansock: Disconnected, killing early\n");
// wanpipe_unlink_driver(sk);
-// sk->bound_dev_if = 0;
+// sk->sk_bound_dev_if = 0;
// }
// }
@@ -359,7 +359,7 @@
/* Initialize the new sock structure
*/
- newsk->bound_dev_if = dev->ifindex;
+ newsk->sk_bound_dev_if = dev->ifindex;
newwp = wp_sk(newsk);
newwp->card = wp->card;
@@ -395,7 +395,7 @@
chan->lcn = mbox_ptr->cmd.lcn;
card->u.x.svc_to_dev_map[(chan->lcn%MAX_X25_LCN)] = dev;
- newsk->zapped=0;
+ newsk->sk_zapped = 0;
newwp->num = htons(X25_PROT);
if (wanpipe_do_bind(newsk, dev, newwp->num)) {
@@ -403,7 +403,7 @@
release_device(dev);
return -EINVAL;
}
- newsk->state = WANSOCK_CONNECTING;
+ newsk->sk_state = WANSOCK_CONNECTING;
/* Fill in the standard sock address info */
@@ -416,23 +416,23 @@
sll->sll_halen = 0;
skb->dev = dev;
- sk->ack_backlog++;
+ sk->sk_ack_backlog++;
/* We must do this manually, since the sock_queue_rcv_skb()
* function sets the skb->dev to NULL. However, we use
* the dev field in the accept function.*/
- if (atomic_read(&sk->rmem_alloc) + skb->truesize >=
- (unsigned)sk->rcvbuf){
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf) {
wanpipe_unlink_driver(newsk);
wanpipe_kill_sock_irq (newsk);
- --sk->ack_backlog;
+ --sk->sk_ack_backlog;
return -ENOMEM;
}
skb_set_owner_r(skb, sk);
- skb_queue_tail(&sk->receive_queue, skb);
- sk->data_ready(sk,skb->len);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
return 0;
}
@@ -456,22 +456,22 @@
{
struct sock *sk;
- if (osk->type != SOCK_RAW)
+ if (osk->sk_type != SOCK_RAW)
return NULL;
if ((sk = wanpipe_alloc_socket()) == NULL)
return NULL;
- sk->type = osk->type;
- sk->socket = osk->socket;
- sk->priority = osk->priority;
- sk->protocol = osk->protocol;
+ sk->sk_type = osk->sk_type;
+ sk->sk_socket = osk->sk_socket;
+ sk->sk_priority = osk->sk_priority;
+ sk->sk_protocol = osk->sk_protocol;
wp_sk(sk)->num = wp_sk(osk)->num;
- sk->rcvbuf = osk->rcvbuf;
- sk->sndbuf = osk->sndbuf;
- sk->debug = osk->debug;
- sk->state = WANSOCK_CONNECTING;
- sk->sleep = osk->sleep;
+ sk->sk_rcvbuf = osk->sk_rcvbuf;
+ sk->sk_sndbuf = osk->sk_sndbuf;
+ sk->sk_debug = osk->sk_debug;
+ sk->sk_state = WANSOCK_CONNECTING;
+ sk->sk_sleep = osk->sk_sleep;
return sk;
}
@@ -521,17 +521,17 @@
*
* This function implements a sendto() system call,
* for AF_WANPIPE socket family.
- * During socket bind() sk->bound_dev_if is initialized
+ * During socket bind() sk->sk_bound_dev_if is initialized
* to a correct network device. This number is used
* to find a network device to which the packet should
* be passed to.
*
- * Each packet is queued into sk->write_queue and
+ * Each packet is queued into sk->sk_write_queue and
* delayed transmit bottom half handler is marked for
* execution.
*
* A socket must be in WANSOCK_CONNECTED state before
- * a packet is queued into sk->write_queue.
+ * a packet is queued into sk->sk_write_queue.
*===========================================================*/
static int wanpipe_sendmsg(struct kiocb *iocb, struct socket *sock,
@@ -547,10 +547,10 @@
int ifindex, err, reserve = 0;
- if (!sk->zapped)
+ if (!sk->sk_zapped)
return -ENETDOWN;
- if (sk->state != WANSOCK_CONNECTED)
+ if (sk->sk_state != WANSOCK_CONNECTED)
return -ENOTCONN;
if (msg->msg_flags&~MSG_DONTWAIT)
@@ -564,7 +564,7 @@
wp = wp_sk(sk);
if (saddr == NULL) {
- ifindex = sk->bound_dev_if;
+ ifindex = sk->sk_bound_dev_if;
proto = wp->num;
addr = NULL;
@@ -573,7 +573,7 @@
return -EINVAL;
}
- ifindex = sk->bound_dev_if;
+ ifindex = sk->sk_bound_dev_if;
proto = saddr->sll_protocol;
addr = saddr->sll_addr;
}
@@ -619,19 +619,20 @@
skb->protocol = proto;
skb->dev = dev;
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
skb->pkt_type = WAN_PACKET_DATA;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_free;
- if (atomic_read(&sk->wmem_alloc) + skb->truesize > (unsigned int)sk->sndbuf){
+ if (atomic_read(&sk->sk_wmem_alloc) + skb->truesize >
+ (unsigned int)sk->sk_sndbuf){
kfree_skb(skb);
return -ENOBUFS;
}
- skb_queue_tail(&sk->write_queue,skb);
+ skb_queue_tail(&sk->sk_write_queue,skb);
atomic_inc(&wp->packet_sent);
if (!(test_and_set_bit(0, &wp->timer))){
@@ -652,7 +653,7 @@
* wanpipe_delayed_tarnsmit
*
* Transmit bottom half handler. It dequeues packets
- * from sk->write_queue and passes them to the
+ * from sk->sk_write_queue and passes them to the
* driver. If the driver is busy, the packet is
* re-enqueued.
*
@@ -675,7 +676,7 @@
return;
}
- if (sk->state != WANSOCK_CONNECTED || !sk->zapped){
+ if (sk->sk_state != WANSOCK_CONNECTED || !sk->sk_zapped) {
clear_bit(0, &wp->timer);
DBG_PRINTK(KERN_INFO "wansock: Tx Timer, State not CONNECTED\n");
return;
@@ -701,13 +702,13 @@
}
/* Check for a packet in the fifo and send */
- if ((skb=skb_dequeue(&sk->write_queue)) != NULL){
+ if ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL){
if (dev->hard_start_xmit(skb, dev) != 0){
/* Driver failed to transmit, re-enqueue
* the packet and retry again later */
- skb_queue_head(&sk->write_queue,skb);
+ skb_queue_head(&sk->sk_write_queue,skb);
clear_bit(0,&wanpipe_tx_critical);
return;
}else{
@@ -718,11 +719,11 @@
*/
atomic_dec(&wp->packet_sent);
- if (skb_peek(&sk->write_queue) == NULL){
+ if (skb_peek(&sk->sk_write_queue) == NULL) {
/* If there is nothing to send, kick
* the poll routine, which will trigger
* the application to send more data */
- sk->data_ready(sk,0);
+ sk->sk_data_ready(sk, 0);
clear_bit(0, &wp->timer);
}else{
/* Reschedule as fast as possible */
@@ -763,10 +764,10 @@
int err=0;
DECLARE_WAITQUEUE(wait, current);
- dev = dev_get_by_index(sk->bound_dev_if);
+ dev = dev_get_by_index(sk->sk_bound_dev_if);
if (dev == NULL){
printk(KERN_INFO "wansock: Exec failed no dev %i\n",
- sk->bound_dev_if);
+ sk->sk_bound_dev_if);
return -ENODEV;
}
dev_put(dev);
@@ -799,7 +800,7 @@
atomic_set(&chan->command, cmd);
}
- add_wait_queue(sk->sleep,&wait);
+ add_wait_queue(sk->sk_sleep,&wait);
current->state = TASK_INTERRUPTIBLE;
for (;;){
if (((mbox_cmd_t*)wp->mbox)->cmd.result != 0x7F) {
@@ -813,7 +814,7 @@
schedule();
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep,&wait);
+ remove_wait_queue(sk->sk_sleep,&wait);
return err;
}
@@ -830,27 +831,29 @@
struct sock *sk=(struct sock *)data;
wanpipe_opt *wp = wp_sk(sk);
- if ((!atomic_read(&sk->wmem_alloc) && !atomic_read(&sk->rmem_alloc)) ||
+ if ((!atomic_read(&sk->sk_wmem_alloc) &&
+ !atomic_read(&sk->sk_rmem_alloc)) ||
(++wp->force == 5)) {
- if (atomic_read(&sk->wmem_alloc) || atomic_read(&sk->rmem_alloc))
+ if (atomic_read(&sk->sk_wmem_alloc) ||
+ atomic_read(&sk->sk_rmem_alloc))
printk(KERN_INFO "wansock: Warning, Packet Discarded due to sock shutdown!\n");
kfree(wp);
wp_sk(sk) = NULL;
- if (atomic_read(&sk->refcnt) != 1){
- atomic_set(&sk->refcnt,1);
+ if (atomic_read(&sk->sk_refcnt) != 1) {
+ atomic_set(&sk->sk_refcnt, 1);
DBG_PRINTK(KERN_INFO "wansock: Error, wrong reference count: %i ! :delay.\n",
- atomic_read(&sk->refcnt));
+ atomic_read(&sk->sk_refcnt));
}
sock_put(sk);
atomic_dec(&wanpipe_socks_nr);
return;
}
- sk->timer.expires=jiffies+5*HZ;
- add_timer(&sk->timer);
+ sk->sk_timer.expires = jiffies + 5 * HZ;
+ add_timer(&sk->sk_timer);
printk(KERN_INFO "wansock: packet sk destroy delayed\n");
}
@@ -866,11 +869,11 @@
struct net_device *dev;
wanpipe_common_t *chan=NULL;
- sk->zapped=0;
- sk->state = WANSOCK_DISCONNECTED;
+ sk->sk_zapped = 0;
+ sk->sk_state = WANSOCK_DISCONNECTED;
wp_sk(sk)->dev = NULL;
- dev = dev_get_by_index(sk->bound_dev_if);
+ dev = dev_get_by_index(sk->sk_bound_dev_if);
if (!dev){
printk(KERN_INFO "wansock: No dev on release\n");
return;
@@ -915,7 +918,7 @@
chan->mbox = wp->mbox;
chan->tx_timer = &wp->tx_timer;
wp->dev = dev;
- sk->zapped = 1;
+ sk->sk_zapped = 1;
clear_bit(0,&chan->common_critical);
}
@@ -966,23 +969,23 @@
*/
if (wp->num == htons(X25_PROT) &&
- sk->state != WANSOCK_DISCONNECTED && sk->zapped) {
- struct net_device *dev = dev_get_by_index(sk->bound_dev_if);
+ sk->sk_state != WANSOCK_DISCONNECTED && sk->sk_zapped) {
+ struct net_device *dev = dev_get_by_index(sk->sk_bound_dev_if);
wanpipe_common_t *chan;
if (dev){
chan=dev->priv;
atomic_set(&chan->disconnect,1);
DBG_PRINTK(KERN_INFO "wansock: Sending Clear Indication %i\n",
- sk->state);
+ sk->sk_state);
dev_put(dev);
}
}
set_bit(1,&wanpipe_tx_critical);
write_lock(&wanpipe_sklist_lock);
- for (skp = &wanpipe_sklist; *skp; skp = &(*skp)->next) {
+ for (skp = &wanpipe_sklist; *skp; skp = &(*skp)->sk_next) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
@@ -999,35 +1002,37 @@
* Now the socket is dead. No more input will appear.
*/
- sk->state_change(sk); /* It is useless. Just for sanity. */
+ sk->sk_state_change(sk); /* It is useless. Just for sanity. */
sock->sk = NULL;
- sk->socket = NULL;
+ sk->sk_socket = NULL;
sock_set_flag(sk, SOCK_DEAD);
/* Purge queues */
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
- skb_queue_purge(&sk->error_queue);
-
- if (atomic_read(&sk->rmem_alloc) || atomic_read(&sk->wmem_alloc)) {
- del_timer(&sk->timer);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+ skb_queue_purge(&sk->sk_error_queue);
+
+ if (atomic_read(&sk->sk_rmem_alloc) ||
+ atomic_read(&sk->sk_wmem_alloc)) {
+ del_timer(&sk->sk_timer);
printk(KERN_INFO "wansock: Killing in Timer R %i , W %i\n",
- atomic_read(&sk->rmem_alloc),atomic_read(&sk->wmem_alloc));
- sk->timer.data=(unsigned long)sk;
- sk->timer.expires=jiffies+HZ;
- sk->timer.function=wanpipe_destroy_timer;
- add_timer(&sk->timer);
+ atomic_read(&sk->sk_rmem_alloc),
+ atomic_read(&sk->sk_wmem_alloc));
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.expires = jiffies + HZ;
+ sk->sk_timer.function = wanpipe_destroy_timer;
+ add_timer(&sk->sk_timer);
return 0;
}
kfree(wp);
wp_sk(sk) = NULL;
- if (atomic_read(&sk->refcnt) != 1){
+ if (atomic_read(&sk->sk_refcnt) != 1) {
DBG_PRINTK(KERN_INFO "wansock: Error, wrong reference count: %i !:release.\n",
- atomic_read(&sk->refcnt));
- atomic_set(&sk->refcnt,1);
+ atomic_read(&sk->sk_refcnt));
+ atomic_set(&sk->sk_refcnt, 1);
}
sock_put(sk);
atomic_dec(&wanpipe_socks_nr);
@@ -1046,10 +1051,10 @@
static void check_write_queue(struct sock *sk)
{
- if (sk->state != WANSOCK_CONNECTED)
+ if (sk->sk_state != WANSOCK_CONNECTED)
return;
- if (!atomic_read(&sk->wmem_alloc))
+ if (!atomic_read(&sk->sk_wmem_alloc))
return;
printk(KERN_INFO "wansock: MAJOR ERROR, Data lost on sock release !!!\n");
@@ -1071,8 +1076,9 @@
struct sk_buff *skb=NULL;
struct sock *deadsk=NULL;
- if (sk->state == WANSOCK_LISTEN || sk->state == WANSOCK_BIND_LISTEN){
- while ((skb=skb_dequeue(&sk->receive_queue))!=NULL){
+ if (sk->sk_state == WANSOCK_LISTEN ||
+ sk->sk_state == WANSOCK_BIND_LISTEN) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if ((deadsk = get_newsk_from_skb(skb))){
DBG_PRINTK (KERN_INFO "wansock: RELEASE: FOUND DEAD SOCK\n");
sock_set_flag(deadsk, SOCK_DEAD);
@@ -1080,15 +1086,15 @@
}
kfree_skb(skb);
}
- if (sk->zapped)
+ if (sk->sk_zapped)
wanpipe_unlink_card(sk);
}else{
- if (sk->zapped)
+ if (sk->sk_zapped)
wanpipe_unlink_driver(sk);
}
- sk->state = WANSOCK_DISCONNECTED;
- sk->bound_dev_if = 0;
- sk->zapped=0;
+ sk->sk_state = WANSOCK_DISCONNECTED;
+ sk->sk_bound_dev_if = 0;
+ sk->sk_zapped = 0;
wp = wp_sk(sk);
if (wp && wp->mbox) {
@@ -1108,11 +1114,11 @@
static void start_cleanup_timer (struct sock *sk)
{
- del_timer(&sk->timer);
- sk->timer.data = (unsigned long)sk;
- sk->timer.expires = jiffies + HZ;
- sk->timer.function = wanpipe_kill_sock_timer;
- add_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.expires = jiffies + HZ;
+ sk->sk_timer.function = wanpipe_kill_sock_timer;
+ add_timer(&sk->sk_timer);
}
@@ -1137,15 +1143,15 @@
* appropriate locks */
if (test_bit(1,&wanpipe_tx_critical)){
- sk->timer.expires=jiffies+10;
- add_timer(&sk->timer);
+ sk->sk_timer.expires = jiffies + 10;
+ add_timer(&sk->sk_timer);
return;
}
write_lock(&wanpipe_sklist_lock);
- for (skp = &wanpipe_sklist; *skp; skp = &(*skp)->next) {
+ for (skp = &wanpipe_sklist; *skp; skp = &(*skp)->sk_next) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
@@ -1154,8 +1160,8 @@
if (wp_sk(sk)->num == htons(X25_PROT) &&
- sk->state != WANSOCK_DISCONNECTED){
- struct net_device *dev = dev_get_by_index(sk->bound_dev_if);
+ sk->sk_state != WANSOCK_DISCONNECTED) {
+ struct net_device *dev = dev_get_by_index(sk->sk_bound_dev_if);
wanpipe_common_t *chan;
if (dev){
chan=dev->priv;
@@ -1166,20 +1172,21 @@
release_driver(sk);
- sk->socket = NULL;
+ sk->sk_socket = NULL;
/* Purge queues */
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
- skb_queue_purge(&sk->error_queue);
-
- if (atomic_read(&sk->rmem_alloc) || atomic_read(&sk->wmem_alloc)) {
- del_timer(&sk->timer);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+ skb_queue_purge(&sk->sk_error_queue);
+
+ if (atomic_read(&sk->sk_rmem_alloc) ||
+ atomic_read(&sk->sk_wmem_alloc)) {
+ del_timer(&sk->sk_timer);
printk(KERN_INFO "wansock: Killing SOCK in Timer\n");
- sk->timer.data=(unsigned long)sk;
- sk->timer.expires=jiffies+HZ;
- sk->timer.function=wanpipe_destroy_timer;
- add_timer(&sk->timer);
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.expires = jiffies + HZ;
+ sk->sk_timer.function = wanpipe_destroy_timer;
+ add_timer(&sk->sk_timer);
return;
}
@@ -1188,10 +1195,10 @@
wp_sk(sk) = NULL;
}
- if (atomic_read(&sk->refcnt) != 1){
- atomic_set(&sk->refcnt,1);
+ if (atomic_read(&sk->sk_refcnt) != 1) {
+ atomic_set(&sk->sk_refcnt, 1);
DBG_PRINTK(KERN_INFO "wansock: Error, wrong reference count: %i ! :timer.\n",
- atomic_read(&sk->refcnt));
+ atomic_read(&sk->sk_refcnt));
}
sock_put(sk);
atomic_dec(&wanpipe_socks_nr);
@@ -1210,16 +1217,16 @@
* appropriate locks */
write_lock(&wanpipe_sklist_lock);
- for (skp = &wanpipe_sklist; *skp; skp = &(*skp)->next) {
+ for (skp = &wanpipe_sklist; *skp; skp = &(*skp)->sk_next) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
}
write_unlock(&wanpipe_sklist_lock);
- sk->socket = NULL;
+ sk->sk_socket = NULL;
if (wp_sk(sk)) {
@@ -1227,10 +1234,10 @@
wp_sk(sk) = NULL;
}
- if (atomic_read(&sk->refcnt) != 1){
- atomic_set(&sk->refcnt,1);
+ if (atomic_read(&sk->sk_refcnt) != 1) {
+ atomic_set(&sk->sk_refcnt, 1);
DBG_PRINTK(KERN_INFO "wansock: Error, wrong reference count: %i ! :timer.\n",
- atomic_read(&sk->refcnt));
+ atomic_read(&sk->sk_refcnt));
}
sock_put(sk);
atomic_dec(&wanpipe_socks_nr);
@@ -1244,17 +1251,17 @@
if (!sk)
return;
- sk->socket = NULL;
+ sk->sk_socket = NULL;
if (wp_sk(sk)) {
kfree(wp_sk(sk));
wp_sk(sk) = NULL;
}
- if (atomic_read(&sk->refcnt) != 1){
- atomic_set(&sk->refcnt,1);
+ if (atomic_read(&sk->sk_refcnt) != 1) {
+ atomic_set(&sk->sk_refcnt, 1);
DBG_PRINTK(KERN_INFO "wansock: Error, wrong reference count: %i !:listen.\n",
- atomic_read(&sk->refcnt));
+ atomic_read(&sk->sk_refcnt));
}
sock_put(sk);
atomic_dec(&wanpipe_socks_nr);
@@ -1277,7 +1284,7 @@
wanpipe_common_t *chan=NULL;
int err=0;
- if (sk->zapped){
+ if (sk->sk_zapped) {
err = -EALREADY;
goto bind_unlock_exit;
}
@@ -1293,29 +1300,29 @@
if (dev) {
if (dev->flags&IFF_UP) {
chan=dev->priv;
- sk->state = chan->state;
+ sk->sk_state = chan->state;
if (wp->num == htons(X25_PROT) &&
- sk->state != WANSOCK_DISCONNECTED &&
- sk->state != WANSOCK_CONNECTING){
+ sk->sk_state != WANSOCK_DISCONNECTED &&
+ sk->sk_state != WANSOCK_CONNECTING) {
DBG_PRINTK(KERN_INFO
"wansock: Binding to Device not DISCONNECTED %i\n",
- sk->state);
+ sk->sk_state);
release_device(dev);
err = -EAGAIN;
goto bind_unlock_exit;
}
wanpipe_link_driver(dev,sk);
- sk->bound_dev_if = dev->ifindex;
+ sk->sk_bound_dev_if = dev->ifindex;
/* X25 Specific option */
if (wp->num == htons(X25_PROT))
wp_sk(sk)->svc = chan->svc;
} else {
- sk->err = ENETDOWN;
- sk->error_report(sk);
+ sk->sk_err = ENETDOWN;
+ sk->sk_error_report(sk);
release_device(dev);
err = -EINVAL;
}
@@ -1386,7 +1393,7 @@
if (sll->sll_protocol)
wp->num = sll->sll_protocol;
- sk->state = WANSOCK_BIND_LISTEN;
+ sk->sk_state = WANSOCK_BIND_LISTEN;
return 0;
}else if (!strcmp(sll->sll_device,"svc_connect")){
@@ -1527,16 +1534,16 @@
if ((sk = wanpipe_alloc_socket()) == NULL)
return -ENOBUFS;
- sk->reuse = 1;
+ sk->sk_reuse = 1;
sock->ops = &wanpipe_ops;
sock_init_data(sock,sk);
- sk->zapped=0;
- sk->family = PF_WANPIPE;
- wp_sk(sk)->num = protocol;
- sk->state = WANSOCK_DISCONNECTED;
- sk->ack_backlog = 0;
- sk->bound_dev_if=0;
+ sk->sk_zapped = 0;
+ sk->sk_family = PF_WANPIPE;
+ wp_sk(sk)->num = protocol;
+ sk->sk_state = WANSOCK_DISCONNECTED;
+ sk->sk_ack_backlog = 0;
+ sk->sk_bound_dev_if = 0;
atomic_inc(&wanpipe_socks_nr);
@@ -1544,7 +1551,7 @@
* can also change the list */
set_bit(1,&wanpipe_tx_critical);
write_lock(&wanpipe_sklist_lock);
- sk->next = wanpipe_sklist;
+ sk->sk_next = wanpipe_sklist;
wanpipe_sklist = sk;
sock_hold(sk);
write_unlock(&wanpipe_sklist_lock);
@@ -1586,7 +1593,7 @@
*/
if (flags & MSG_OOB){
- skb=skb_dequeue(&sk->error_queue);
+ skb = skb_dequeue(&sk->sk_error_queue);
}else{
skb=skb_recv_datagram(sk,flags,1,&err);
}
@@ -1653,7 +1660,7 @@
struct net_device *dev = NULL;
wanpipe_common_t *chan=NULL;
- dev = dev_get_by_index(sk->bound_dev_if);
+ dev = dev_get_by_index(sk->sk_bound_dev_if);
if (!dev)
return;
@@ -1663,7 +1670,8 @@
return;
if (atomic_read(&chan->receive_block)){
- if (atomic_read(&sk->rmem_alloc) < ((unsigned)sk->rcvbuf*0.9) ){
+ if (atomic_read(&sk->sk_rmem_alloc) <
+ ((unsigned)sk->sk_rcvbuf * 0.9)) {
printk(KERN_INFO "wansock: Queuing task for wanpipe\n");
atomic_set(&chan->receive_block,0);
wanpipe_queue_tq(&chan->wanpipe_task);
@@ -1689,9 +1697,9 @@
struct wan_sockaddr_ll *sll = (struct wan_sockaddr_ll*)uaddr;
sll->sll_family = AF_WANPIPE;
- sll->sll_ifindex = sk->bound_dev_if;
+ sll->sll_ifindex = sk->sk_bound_dev_if;
sll->sll_protocol = wp_sk(sk)->num;
- dev = dev_get_by_index(sk->bound_dev_if);
+ dev = dev_get_by_index(sk->sk_bound_dev_if);
if (dev) {
sll->sll_hatype = dev->type;
sll->sll_halen = dev->addr_len;
@@ -1725,7 +1733,7 @@
struct net_device *dev = (struct net_device *)data;
struct wanpipe_opt *po;
- for (sk = wanpipe_sklist; sk; sk = sk->next) {
+ for (sk = wanpipe_sklist; sk; sk = sk->sk_next) {
if ((po = wp_sk(sk)) == NULL)
continue;
@@ -1735,25 +1743,25 @@
switch (msg) {
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
- if (dev->ifindex == sk->bound_dev_if) {
+ if (dev->ifindex == sk->sk_bound_dev_if) {
printk(KERN_INFO "wansock: Device down %s\n",dev->name);
- if (sk->zapped){
+ if (sk->sk_zapped) {
wanpipe_unlink_driver(sk);
- sk->err = ENETDOWN;
- sk->error_report(sk);
+ sk->sk_err = ENETDOWN;
+ sk->sk_error_report(sk);
}
if (msg == NETDEV_UNREGISTER) {
printk(KERN_INFO "wansock: Unregistering Device: %s\n",
dev->name);
wanpipe_unlink_driver(sk);
- sk->bound_dev_if = 0;
+ sk->sk_bound_dev_if = 0;
}
}
break;
case NETDEV_UP:
- if (dev->ifindex == sk->bound_dev_if &&
- po->num && !sk->zapped) {
+ if (dev->ifindex == sk->sk_bound_dev_if &&
+ po->num && !sk->sk_zapped) {
printk(KERN_INFO "wansock: Registering Device: %s\n",
dev->name);
wanpipe_link_driver(dev,sk);
@@ -1781,20 +1789,21 @@
switch(cmd)
{
case SIOCGSTAMP:
- if(sk->stamp.tv_sec==0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
err = -EFAULT;
- if (!copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)))
+ if (!copy_to_user((void *)arg, &sk->sk_stamp,
+ sizeof(struct timeval)))
err = 0;
return err;
case SIOC_WANPIPE_CHECK_TX:
- return atomic_read(&sk->wmem_alloc);
+ return atomic_read(&sk->sk_wmem_alloc);
case SIOC_WANPIPE_SOCK_STATE:
- if (sk->state == WANSOCK_CONNECTED)
+ if (sk->sk_state == WANSOCK_CONNECTED)
return 0;
return 1;
@@ -1825,7 +1834,7 @@
case SIOC_WANPIPE_SET_NONBLOCK:
- if (sk->state != WANSOCK_DISCONNECTED)
+ if (sk->sk_state != WANSOCK_DISCONNECTED)
return -EINVAL;
sock->file->f_flags |= O_NONBLOCK;
@@ -1876,7 +1885,7 @@
int cnt=0, err=0;
wan_debug_t *dbg_data = (wan_debug_t *)arg;
- for (sk = wanpipe_sklist; sk; sk = sk->next){
+ for (sk = wanpipe_sklist; sk; sk = sk->sk_next) {
wanpipe_opt *wp = wp_sk(sk);
if (sk == origsk){
@@ -1885,25 +1894,31 @@
if ((err=put_user(1, &dbg_data->debug[cnt].free)))
return err;
- if ((err=put_user(sk->state, &dbg_data->debug[cnt].sk_state)))
+ if ((err = put_user(sk->sk_state,
+ &dbg_data->debug[cnt].state_sk)))
return err;
- if ((err=put_user(sk->rcvbuf, &dbg_data->debug[cnt].rcvbuf)))
+ if ((err = put_user(sk->sk_rcvbuf,
+ &dbg_data->debug[cnt].rcvbuf)))
return err;
- if ((err=put_user(atomic_read(&sk->rmem_alloc), &dbg_data->debug[cnt].rmem)))
+ if ((err = put_user(atomic_read(&sk->sk_rmem_alloc),
+ &dbg_data->debug[cnt].rmem)))
return err;
- if ((err=put_user(atomic_read(&sk->wmem_alloc), &dbg_data->debug[cnt].wmem)))
+ if ((err = put_user(atomic_read(&sk->sk_wmem_alloc),
+ &dbg_data->debug[cnt].wmem)))
return err;
- if ((err=put_user(sk->sndbuf, &dbg_data->debug[cnt].sndbuf)))
+ if ((err = put_user(sk->sk_sndbuf,
+ &dbg_data->debug[cnt].sndbuf)))
return err;
if ((err=put_user(sk_count, &dbg_data->debug[cnt].sk_count)))
return err;
if ((err=put_user(wp->poll_cnt, &dbg_data->debug[cnt].poll_cnt)))
return err;
- if ((err=put_user(sk->bound_dev_if, &dbg_data->debug[cnt].bound)))
+ if ((err = put_user(sk->sk_bound_dev_if,
+ &dbg_data->debug[cnt].bound)))
return err;
- if (sk->bound_dev_if){
- dev = dev_get_by_index(sk->bound_dev_if);
+ if (sk->sk_bound_dev_if) {
+ dev = dev_get_by_index(sk->sk_bound_dev_if);
if (!dev)
continue;
@@ -2014,7 +2029,7 @@
if (!wp_sk(sk)->mbox) {
void *mbox_ptr;
- struct net_device *dev = dev_get_by_index(sk->bound_dev_if);
+ struct net_device *dev = dev_get_by_index(sk->sk_bound_dev_if);
if (!dev)
return -ENODEV;
@@ -2078,28 +2093,28 @@
++wp_sk(sk)->poll_cnt;
- poll_wait(file, sk->sleep, wait);
+ poll_wait(file, sk->sk_sleep, wait);
mask = 0;
/* exceptional events? */
- if (sk->err || !skb_queue_empty(&sk->error_queue)){
+ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) {
mask |= POLLPRI;
return mask;
}
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLHUP;
/* readable? */
- if (!skb_queue_empty(&sk->receive_queue)){
+ if (!skb_queue_empty(&sk->sk_receive_queue)) {
mask |= POLLIN | POLLRDNORM;
}
/* connection hasn't started yet */
- if (sk->state == WANSOCK_CONNECTING){
+ if (sk->sk_state == WANSOCK_CONNECTING) {
return mask;
}
- if (sk->state == WANSOCK_DISCONNECTED){
+ if (sk->sk_state == WANSOCK_DISCONNECTED) {
mask = POLLPRI;
return mask;
}
@@ -2120,7 +2135,7 @@
if (sock_writeable(sk)){
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
}else{
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
}
return mask;
@@ -2142,10 +2157,10 @@
if (wp_sk(sk)->num != htons(X25_PROT))
return -EINVAL;
- if (sk->state == WANSOCK_BIND_LISTEN) {
+ if (sk->sk_state == WANSOCK_BIND_LISTEN) {
- sk->max_ack_backlog = backlog;
- sk->state = WANSOCK_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = WANSOCK_LISTEN;
return 0;
}else{
printk(KERN_INFO "wansock: Listening sock was not binded\n");
@@ -2174,7 +2189,7 @@
card->sk=sk;
card->func=wanpipe_listen_rcv;
- sk->zapped=1;
+ sk->sk_zapped = 1;
return 0;
}
@@ -2225,7 +2240,7 @@
case SIOC_WANPIPE_ACCEPT_CALL:
- if (sk->state != WANSOCK_CONNECTING){
+ if (sk->sk_state != WANSOCK_CONNECTING) {
err = -EHOSTDOWN;
break;
}
@@ -2238,7 +2253,7 @@
* Do not set the sock lcn number here, since
* it is done in wanpipe_listen_rcv().
*/
- if (sk->state == WANSOCK_CONNECTED){
+ if (sk->sk_state == WANSOCK_CONNECTED) {
wp->lcn = ((mbox_cmd_t*)wp->mbox)->cmd.lcn;
DBG_PRINTK(KERN_INFO "\nwansock: Accept OK %i\n",
wp->lcn);
@@ -2254,7 +2269,7 @@
case SIOC_WANPIPE_CLEAR_CALL:
- if (sk->state == WANSOCK_DISCONNECTED){
+ if (sk->sk_state == WANSOCK_DISCONNECTED) {
err = -EINVAL;
break;
}
@@ -2264,7 +2279,8 @@
* if so, check whether user wants to wait until data
* is transmitted, or clear a call and drop packets */
- if (atomic_read(&sk->wmem_alloc) || check_driver_busy(sk)){
+ if (atomic_read(&sk->sk_wmem_alloc) ||
+ check_driver_busy(sk)) {
mbox_cmd_t *mbox = wp->mbox;
if (mbox->cmd.qdm & 0x80){
mbox->cmd.result = 0x35;
@@ -2273,14 +2289,14 @@
}
}
- sk->state = WANSOCK_DISCONNECTING;
+ sk->sk_state = WANSOCK_DISCONNECTING;
err = execute_command(sk,X25_CLEAR_CALL,0);
if (err < 0)
break;
err = -ECONNREFUSED;
- if (sk->state == WANSOCK_DISCONNECTED){
+ if (sk->sk_state == WANSOCK_DISCONNECTED) {
DBG_PRINTK(KERN_INFO "\nwansock: CLEAR OK %i\n",
wp->lcn);
wp->lcn = 0;
@@ -2290,7 +2306,7 @@
case SIOC_WANPIPE_RESET_CALL:
- if (sk->state != WANSOCK_CONNECTED){
+ if (sk->sk_state != WANSOCK_CONNECTED) {
err = -EINVAL;
break;
}
@@ -2300,7 +2316,8 @@
* if so, check whether user wants to wait until data
* is transmitted, or reset a call and drop packets */
- if (atomic_read(&sk->wmem_alloc) || check_driver_busy(sk)){
+ if (atomic_read(&sk->sk_wmem_alloc) ||
+ check_driver_busy(sk)) {
mbox_cmd_t *mbox = wp->mbox;
if (mbox->cmd.qdm & 0x80){
mbox->cmd.result = 0x35;
@@ -2324,7 +2341,7 @@
if (err < 0)
break;
- if (sk->state == WANSOCK_CONNECTED){
+ if (sk->sk_state == WANSOCK_CONNECTED) {
wp->lcn = ((mbox_cmd_t*)wp->mbox)->cmd.lcn;
@@ -2332,7 +2349,8 @@
wp->lcn);
err = 0;
- }else if (sk->state == WANSOCK_CONNECTING && (flags & O_NONBLOCK)){
+ } else if (sk->sk_state == WANSOCK_CONNECTING &&
+ (flags & O_NONBLOCK)) {
wp->lcn = ((mbox_cmd_t*)wp->mbox)->cmd.lcn;
DBG_PRINTK(KERN_INFO "\nwansock: Place Call OK: Waiting %i\n",
wp->lcn);
@@ -2355,7 +2373,7 @@
static int check_driver_busy (struct sock *sk)
{
- struct net_device *dev = dev_get_by_index(sk->bound_dev_if);
+ struct net_device *dev = dev_get_by_index(sk->sk_bound_dev_if);
wanpipe_common_t *chan;
if (!dev)
@@ -2394,19 +2412,19 @@
if ((sk = sock->sk) == NULL)
return -EINVAL;
- if (sk->type != SOCK_RAW)
+ if (sk->sk_type != SOCK_RAW)
return -EOPNOTSUPP;
- if (sk->state != WANSOCK_LISTEN)
+ if (sk->sk_state != WANSOCK_LISTEN)
return -EINVAL;
if (wp_sk(sk)->num != htons(X25_PROT))
return -EINVAL;
- add_wait_queue(sk->sleep,&wait);
+ add_wait_queue(sk->sk_sleep,&wait);
current->state = TASK_INTERRUPTIBLE;
for (;;){
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb){
err=0;
break;
@@ -2418,7 +2436,7 @@
schedule();
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep,&wait);
+ remove_wait_queue(sk->sk_sleep,&wait);
if (err != 0)
return err;
@@ -2430,18 +2448,18 @@
set_bit(1,&wanpipe_tx_critical);
write_lock(&wanpipe_sklist_lock);
- newsk->next = wanpipe_sklist;
+ newsk->sk_next = wanpipe_sklist;
wanpipe_sklist = newsk;
sock_hold(sk);
write_unlock(&wanpipe_sklist_lock);
clear_bit(1,&wanpipe_tx_critical);
- newsk->pair = NULL;
- newsk->socket = newsock;
- newsk->sleep = &newsock->wait;
+ newsk->sk_pair = NULL;
+ newsk->sk_socket = newsock;
+ newsk->sk_sleep = &newsock->wait;
/* Now attach up the new socket */
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
newsock->sk = newsk;
kfree_skb(skb);
@@ -2496,16 +2514,16 @@
if (wp_sk(sk)->num != htons(X25_PROT))
return -EINVAL;
- if (sk->state == WANSOCK_CONNECTED)
+ if (sk->sk_state == WANSOCK_CONNECTED)
return -EISCONN; /* No reconnect on a seqpacket socket */
- if (sk->state != WAN_DISCONNECTED){
+ if (sk->sk_state != WAN_DISCONNECTED) {
printk(KERN_INFO "wansock: Trying to connect on channel NON DISCONNECT\n");
return -ECONNREFUSED;
}
- sk->state = WANSOCK_DISCONNECTED;
- sock->state = SS_UNCONNECTED;
+ sk->sk_state = WANSOCK_DISCONNECTED;
+ sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(struct wan_sockaddr_ll))
return -EINVAL;
@@ -2513,16 +2531,16 @@
if (addr->sll_family != AF_WANPIPE)
return -EINVAL;
- if ((dev = dev_get_by_index(sk->bound_dev_if)) == NULL)
+ if ((dev = dev_get_by_index(sk->sk_bound_dev_if)) == NULL)
return -ENETUNREACH;
dev_put(dev);
- if (!sk->zapped) /* Must bind first - autobinding does not work */
+ if (!sk->sk_zapped) /* Must bind first - autobinding does not work */
return -EINVAL;
sock->state = SS_CONNECTING;
- sk->state = WANSOCK_CONNECTING;
+ sk->sk_state = WANSOCK_CONNECTING;
if (!wp_sk(sk)->mbox) {
if (wp_sk (sk)->svc)
@@ -2536,15 +2554,15 @@
if ((err=wanpipe_exec_cmd(sk, X25_PLACE_CALL,flags)) != 0){
sock->state = SS_UNCONNECTED;
- sk->state = WANSOCK_CONNECTED;
+ sk->sk_state = WANSOCK_CONNECTED;
return err;
}
- if (sk->state != WANSOCK_CONNECTED && (flags & O_NONBLOCK)){
+ if (sk->sk_state != WANSOCK_CONNECTED && (flags & O_NONBLOCK)) {
return 0;
}
- if (sk->state != WANSOCK_CONNECTED) {
+ if (sk->sk_state != WANSOCK_CONNECTED) {
sock->state = SS_UNCONNECTED;
return -ECONNREFUSED;
}
diff -urN linux-2.5.70-bk11/net/x25/af_x25.c linux-2.5.70-bk12/net/x25/af_x25.c
--- linux-2.5.70-bk11/net/x25/af_x25.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/x25/af_x25.c 2003-06-07 04:47:54.000000000 -0700
@@ -158,15 +158,15 @@
write_lock_bh(&x25_list_lock);
if ((s = x25_list) == sk)
- x25_list = s->next;
- else while (s && s->next) {
- if (s->next == sk) {
- s->next = sk->next;
+ x25_list = s->sk_next;
+ else while (s && s->sk_next) {
+ if (s->sk_next == sk) {
+ s->sk_next = sk->sk_next;
sock_put(sk);
break;
}
- s = s->next;
+ s = s->sk_next;
}
write_unlock_bh(&x25_list_lock);
@@ -181,7 +181,7 @@
write_lock_bh(&x25_list_lock);
- for (s = x25_list; s; s = s->next)
+ for (s = x25_list; s; s = s->sk_next)
if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev)
x25_disconnect(s, ENETUNREACH, 0, 0);
@@ -230,7 +230,7 @@
static void x25_insert_socket(struct sock *sk)
{
write_lock_bh(&x25_list_lock);
- sk->next = x25_list;
+ sk->sk_next = x25_list;
x25_list = sk;
sock_hold(sk);
write_unlock_bh(&x25_list_lock);
@@ -246,12 +246,12 @@
read_lock_bh(&x25_list_lock);
- for (s = x25_list; s; s = s->next)
+ for (s = x25_list; s; s = s->sk_next)
if ((!strcmp(addr->x25_addr,
x25_sk(s)->source_addr.x25_addr) ||
!strcmp(addr->x25_addr,
null_x25_address.x25_addr)) &&
- s->state == TCP_LISTEN)
+ s->sk_state == TCP_LISTEN)
break;
if (s)
@@ -267,7 +267,7 @@
{
struct sock *s;
- for (s = x25_list; s; s = s->next)
+ for (s = x25_list; s; s = s->sk_next)
if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb)
break;
if (s)
@@ -339,7 +339,7 @@
x25_remove_socket(sk);
x25_clear_queues(sk); /* Flush the queues */
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/*
* Queue the unaccepted socket for death
@@ -352,13 +352,14 @@
kfree_skb(skb);
}
- if (atomic_read(&sk->wmem_alloc) || atomic_read(&sk->rmem_alloc)) {
+ if (atomic_read(&sk->sk_wmem_alloc) ||
+ atomic_read(&sk->sk_rmem_alloc)) {
/* Defer: outstanding buffers */
- init_timer(&sk->timer);
- sk->timer.expires = jiffies + 10 * HZ;
- sk->timer.function = x25_destroy_timer;
- sk->timer.data = (unsigned long)sk;
- add_timer(&sk->timer);
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.expires = jiffies + 10 * HZ;
+ sk->sk_timer.function = x25_destroy_timer;
+ sk->sk_timer.data = (unsigned long)sk;
+ add_timer(&sk->sk_timer);
} else
sk_free(sk);
release_sock(sk);
@@ -428,10 +429,10 @@
struct sock *sk = sock->sk;
int rc = -EOPNOTSUPP;
- if (sk->state != TCP_LISTEN) {
+ if (sk->sk_state != TCP_LISTEN) {
memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
- sk->max_ack_backlog = backlog;
- sk->state = TCP_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
rc = 0;
}
@@ -488,8 +489,8 @@
init_timer(&x25->timer);
sock->ops = &x25_proto_ops;
- sk->protocol = protocol;
- sk->backlog_rcv = x25_backlog_rcv;
+ sk->sk_protocol = protocol;
+ sk->sk_backlog_rcv = x25_backlog_rcv;
x25->t21 = sysctl_x25_call_request_timeout;
x25->t22 = sysctl_x25_reset_request_timeout;
@@ -513,7 +514,7 @@
struct sock *sk = NULL;
struct x25_opt *x25, *ox25;
- if (osk->type != SOCK_SEQPACKET)
+ if (osk->sk_type != SOCK_SEQPACKET)
goto out;
if ((sk = x25_alloc_socket()) == NULL)
@@ -521,17 +522,17 @@
x25 = x25_sk(sk);
- sk->type = osk->type;
- sk->socket = osk->socket;
- sk->priority = osk->priority;
- sk->protocol = osk->protocol;
- sk->rcvbuf = osk->rcvbuf;
- sk->sndbuf = osk->sndbuf;
- sk->debug = osk->debug;
- sk->state = TCP_ESTABLISHED;
- sk->sleep = osk->sleep;
- sk->zapped = osk->zapped;
- sk->backlog_rcv = osk->backlog_rcv;
+ sk->sk_type = osk->sk_type;
+ sk->sk_socket = osk->sk_socket;
+ sk->sk_priority = osk->sk_priority;
+ sk->sk_protocol = osk->sk_protocol;
+ sk->sk_rcvbuf = osk->sk_rcvbuf;
+ sk->sk_sndbuf = osk->sk_sndbuf;
+ sk->sk_debug = osk->sk_debug;
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_sleep = osk->sk_sleep;
+ sk->sk_zapped = osk->sk_zapped;
+ sk->sk_backlog_rcv = osk->sk_backlog_rcv;
ox25 = x25_sk(osk);
x25->t21 = ox25->t21;
@@ -571,16 +572,16 @@
x25_write_internal(sk, X25_CLEAR_REQUEST);
x25_start_t23timer(sk);
x25->state = X25_STATE_2;
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY);
break;
}
- sock->sk = NULL;
- sk->socket = NULL; /* Not used, but we should do this */
+ sock->sk = NULL;
+ sk->sk_socket = NULL; /* Not used, but we should do this */
out:
return 0;
}
@@ -590,14 +591,14 @@
struct sock *sk = sock->sk;
struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
- if (!sk->zapped ||
+ if (!sk->sk_zapped ||
addr_len != sizeof(struct sockaddr_x25) ||
addr->sx25_family != AF_X25)
return -EINVAL;
x25_sk(sk)->source_addr = addr->sx25_addr;
x25_insert_socket(sk);
- sk->zapped = 0;
+ sk->sk_zapped = 0;
SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
return 0;
@@ -608,7 +609,7 @@
DECLARE_WAITQUEUE(wait, current);
int rc;
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
rc = -ERESTARTSYS;
@@ -616,11 +617,11 @@
break;
rc = sock_error(sk);
if (rc) {
- sk->socket->state = SS_UNCONNECTED;
+ sk->sk_socket->state = SS_UNCONNECTED;
break;
}
rc = 0;
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
schedule();
lock_sock(sk);
@@ -628,7 +629,7 @@
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return rc;
}
@@ -642,22 +643,22 @@
int rc = 0;
lock_sock(sk);
- if (sk->state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
goto out; /* Connect completed during a ERESTARTSYS event */
}
rc = -ECONNREFUSED;
- if (sk->state == TCP_CLOSE && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
goto out;
}
rc = -EISCONN; /* No reconnect on a seqpacket socket */
- if (sk->state == TCP_ESTABLISHED)
+ if (sk->sk_state == TCP_ESTABLISHED)
goto out;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
rc = -EINVAL;
@@ -681,7 +682,7 @@
goto out_put_neigh;
rc = -EINVAL;
- if (sk->zapped) /* Must bind first - autobinding does not work */
+ if (sk->sk_zapped) /* Must bind first - autobinding does not work */
goto out_put_neigh;
if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr))
@@ -691,7 +692,7 @@
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
- sk->state = TCP_SYN_SENT;
+ sk->sk_state = TCP_SYN_SENT;
x25->state = X25_STATE_1;
@@ -702,7 +703,7 @@
/* Now the loop */
rc = -EINPROGRESS;
- if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
+ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
goto out_put_neigh;
rc = x25_wait_for_connection_establishment(sk);
@@ -726,10 +727,10 @@
DECLARE_WAITQUEUE(wait, current);
int rc = 0;
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
rc = -ERESTARTSYS;
if (signal_pending(current))
@@ -738,7 +739,7 @@
if (!timeout)
break;
rc = 0;
- if (skb_queue_empty(&sk->receive_queue)) {
+ if (skb_queue_empty(&sk->sk_receive_queue)) {
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
@@ -746,7 +747,7 @@
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return rc;
}
@@ -757,29 +758,29 @@
struct sk_buff *skb;
int rc = -EINVAL;
- if (!sk || sk->state != TCP_LISTEN)
+ if (!sk || sk->sk_state != TCP_LISTEN)
goto out;
rc = -EOPNOTSUPP;
- if (sk->type != SOCK_SEQPACKET)
+ if (sk->sk_type != SOCK_SEQPACKET)
goto out;
- rc = x25_wait_for_data(sk, sk->rcvtimeo);
+ rc = x25_wait_for_data(sk, sk->sk_rcvtimeo);
if (rc)
goto out;
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
rc = -EINVAL;
if (!skb->sk)
goto out;
- newsk = skb->sk;
- newsk->pair = NULL;
- newsk->socket = newsock;
- newsk->sleep = &newsock->wait;
+ newsk = skb->sk;
+ newsk->sk_pair = NULL;
+ newsk->sk_socket = newsock;
+ newsk->sk_sleep = &newsock->wait;
/* Now attach up the new socket */
skb->sk = NULL;
kfree_skb(skb);
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
newsock->sk = newsk;
newsock->state = SS_CONNECTED;
rc = 0;
@@ -795,7 +796,7 @@
struct x25_opt *x25 = x25_sk(sk);
if (peer) {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
sx25->sx25_addr = x25->dest_addr;
} else
@@ -836,7 +837,7 @@
/*
* We can't accept the Call Request.
*/
- if (!sk || sk->ack_backlog == sk->max_ack_backlog)
+ if (!sk || sk->sk_ack_backlog == sk->sk_max_ack_backlog)
goto out_clear_request;
/*
@@ -865,7 +866,7 @@
skb_pull(skb, len);
skb->sk = make;
- make->state = TCP_ESTABLISHED;
+ make->sk_state = TCP_ESTABLISHED;
makex25 = x25_sk(make);
makex25->lci = lci;
@@ -887,17 +888,17 @@
makex25->state = X25_STATE_3;
- sk->ack_backlog++;
- make->pair = sk;
+ sk->sk_ack_backlog++;
+ make->sk_pair = sk;
x25_insert_socket(make);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
x25_start_heartbeat(make);
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk, skb->len);
+ sk->sk_data_ready(sk, skb->len);
rc = 1;
sock_put(sk);
out:
@@ -930,11 +931,11 @@
goto out;
rc = -EADDRNOTAVAIL;
- if (sk->zapped)
+ if (sk->sk_zapped)
goto out;
rc = -EPIPE;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
goto out;
}
@@ -961,7 +962,7 @@
* to SIGPIPE, EPIPE;
*/
rc = -ENOTCONN;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
sx25.sx25_family = AF_X25;
@@ -1046,7 +1047,7 @@
SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n");
rc = -ENOTCONN;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out_kfree_skb;
if (msg->msg_flags & MSG_OOB)
@@ -1101,7 +1102,7 @@
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
if (flags & MSG_OOB) {
@@ -1183,8 +1184,8 @@
switch (cmd) {
case TIOCOUTQ: {
- int amount;
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ int amount = sk->sk_sndbuf -
+ atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
rc = put_user(amount, (unsigned int *)arg);
@@ -1198,7 +1199,7 @@
* These two are safe on a single CPU system as
* only user tasks fiddle here
*/
- if ((skb = skb_peek(&sk->receive_queue)) != NULL)
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
rc = put_user(amount, (unsigned int *)arg);
break;
@@ -1207,9 +1208,9 @@
case SIOCGSTAMP:
if (sk) {
rc = -ENOENT;
- if (!sk->stamp.tv_sec)
+ if (!sk->sk_stamp.tv_sec)
break;
- rc = copy_to_user((void *)arg, &sk->stamp,
+ rc = copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval)) ? -EFAULT : 0;
}
rc = -EINVAL;
@@ -1256,7 +1257,8 @@
sizeof(facilities)))
break;
rc = -EINVAL;
- if (sk->state != TCP_LISTEN && sk->state != TCP_CLOSE)
+ if (sk->sk_state != TCP_LISTEN &&
+ sk->sk_state != TCP_CLOSE)
break;
if (facilities.pacsize_in < X25_PS16 ||
facilities.pacsize_in > X25_PS4096)
@@ -1360,7 +1362,7 @@
write_lock_bh(&x25_list_lock);
- for (s = x25_list; s; s = s->next)
+ for (s = x25_list; s; s = s->sk_next)
if (x25_sk(s)->neighbour == nb)
x25_disconnect(s, ENETUNREACH, 0, 0);
diff -urN linux-2.5.70-bk11/net/x25/x25_in.c linux-2.5.70-bk12/net/x25/x25_in.c
--- linux-2.5.70-bk11/net/x25/x25_in.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/x25/x25_in.c 2003-06-07 04:47:54.000000000 -0700
@@ -85,9 +85,9 @@
}
skb_set_owner_r(skbn, sk);
- skb_queue_tail(&sk->receive_queue, skbn);
+ skb_queue_tail(&sk->sk_receive_queue, skbn);
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk, skbn->len);
+ sk->sk_data_ready(sk, skbn->len);
return 0;
}
@@ -112,7 +112,7 @@
x25->vr = 0;
x25->vl = 0;
x25->state = X25_STATE_3;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
/*
* Parse the data in the frame.
*/
@@ -130,7 +130,7 @@
x25->calluserdata.cudlength = skb->len;
}
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
break;
}
case X25_CLEAR_REQUEST:
@@ -255,7 +255,8 @@
x25->state = X25_STATE_4;
break;
}
- if (atomic_read(&sk->rmem_alloc) > (sk->rcvbuf / 2))
+ if (atomic_read(&sk->sk_rmem_alloc) >
+ (sk->sk_rcvbuf / 2))
x25->condition |= X25_COND_OWN_RX_BUSY;
}
/*
diff -urN linux-2.5.70-bk11/net/x25/x25_out.c linux-2.5.70-bk12/net/x25/x25_out.c
--- linux-2.5.70-bk11/net/x25/x25_out.c 2003-05-26 18:00:23.000000000 -0700
+++ linux-2.5.70-bk12/net/x25/x25_out.c 2003-06-07 04:47:54.000000000 -0700
@@ -111,13 +111,13 @@
skbn->data[2] |= X25_STD_M_BIT;
}
- skb_queue_tail(&sk->write_queue, skbn);
+ skb_queue_tail(&sk->sk_write_queue, skbn);
sent += len;
}
kfree_skb(skb);
} else {
- skb_queue_tail(&sk->write_queue, skb);
+ skb_queue_tail(&sk->sk_write_queue, skb);
sent = skb->len - header_len;
}
return sent;
@@ -169,7 +169,7 @@
if (x25->condition & X25_COND_PEER_RX_BUSY)
return;
- if (skb_peek(&sk->write_queue) == NULL)
+ if (!skb_peek(&sk->sk_write_queue))
return;
modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS;
@@ -187,11 +187,11 @@
* the window is full.
*/
- skb = skb_dequeue(&sk->write_queue);
+ skb = skb_dequeue(&sk->sk_write_queue);
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
- skb_queue_head(&sk->write_queue, skb);
+ skb_queue_head(&sk->sk_write_queue, skb);
break;
}
@@ -209,7 +209,8 @@
*/
skb_queue_tail(&x25->ack_queue, skb);
- } while (x25->vs != end && (skb = skb_dequeue(&sk->write_queue)) != NULL);
+ } while (x25->vs != end &&
+ (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
x25->vl = x25->vr;
x25->condition &= ~X25_COND_ACK_PENDING;
diff -urN linux-2.5.70-bk11/net/x25/x25_proc.c linux-2.5.70-bk12/net/x25/x25_proc.c
--- linux-2.5.70-bk11/net/x25/x25_proc.c 2003-05-26 18:00:24.000000000 -0700
+++ linux-2.5.70-bk12/net/x25/x25_proc.c 2003-06-07 04:47:54.000000000 -0700
@@ -94,7 +94,7 @@
{
struct sock *s;
- for (s = x25_list; pos && s; s = s->next)
+ for (s = x25_list; pos && s; s = s->sk_next)
--pos;
return s;
@@ -120,7 +120,7 @@
goto out;
}
s = v;
- s = s->next;
+ s = s->sk_next;
out:
return s;
}
@@ -158,8 +158,9 @@
devname, x25->lci & 0x0FFF, x25->state, x25->vs, x25->vr,
x25->va, x25_display_timer(s) / HZ, x25->t2 / HZ,
x25->t21 / HZ, x25->t22 / HZ, x25->t23 / HZ,
- atomic_read(&s->wmem_alloc), atomic_read(&s->rmem_alloc),
- s->socket ? SOCK_INODE(s->socket)->i_ino : 0L);
+ atomic_read(&s->sk_wmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
+ s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
out:
return 0;
}
diff -urN linux-2.5.70-bk11/net/x25/x25_subr.c linux-2.5.70-bk12/net/x25/x25_subr.c
--- linux-2.5.70-bk11/net/x25/x25_subr.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/x25/x25_subr.c 2003-06-07 04:47:54.000000000 -0700
@@ -49,7 +49,7 @@
{
struct x25_opt *x25 = x25_sk(sk);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&x25->ack_queue);
skb_queue_purge(&x25->interrupt_in_queue);
skb_queue_purge(&x25->interrupt_out_queue);
@@ -90,7 +90,7 @@
*/
while ((skb = skb_dequeue(&x25_sk(sk)->ack_queue)) != NULL) {
if (!skb_prev)
- skb_queue_head(&sk->write_queue, skb);
+ skb_queue_head(&sk->sk_write_queue, skb);
else
skb_append(skb_prev, skb);
skb_prev = skb;
@@ -340,12 +340,12 @@
x25->causediag.cause = cause;
x25->causediag.diagnostic = diagnostic;
- sk->state = TCP_CLOSE;
- sk->err = reason;
- sk->shutdown |= SEND_SHUTDOWN;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = reason;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
@@ -358,7 +358,7 @@
{
struct x25_opt *x25 = x25_sk(sk);
- if (atomic_read(&sk->rmem_alloc) < (sk->rcvbuf / 2) &&
+ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
(x25->condition & X25_COND_OWN_RX_BUSY)) {
x25->condition &= ~X25_COND_OWN_RX_BUSY;
x25->condition &= ~X25_COND_ACK_PENDING;
diff -urN linux-2.5.70-bk11/net/x25/x25_timer.c linux-2.5.70-bk12/net/x25/x25_timer.c
--- linux-2.5.70-bk11/net/x25/x25_timer.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/x25/x25_timer.c 2003-06-07 04:47:54.000000000 -0700
@@ -45,18 +45,18 @@
void x25_start_heartbeat(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
- sk->timer.data = (unsigned long)sk;
- sk->timer.function = &x25_heartbeat_expiry;
- sk->timer.expires = jiffies + 5 * HZ;
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.function = &x25_heartbeat_expiry;
+ sk->sk_timer.expires = jiffies + 5 * HZ;
- add_timer(&sk->timer);
+ add_timer(&sk->sk_timer);
}
void x25_stop_heartbeat(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
}
void x25_start_t2timer(struct sock *sk)
@@ -143,7 +143,7 @@
* get removed.
*/
if (sock_flag(sk, SOCK_DESTROY) ||
- (sk->state == TCP_LISTEN &&
+ (sk->sk_state == TCP_LISTEN &&
sock_flag(sk, SOCK_DEAD))) {
x25_destroy_socket(sk);
goto unlock;
diff -urN linux-2.5.70-bk11/net/xfrm/xfrm_policy.c linux-2.5.70-bk12/net/xfrm/xfrm_policy.c
--- linux-2.5.70-bk11/net/xfrm/xfrm_policy.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/xfrm/xfrm_policy.c 2003-06-07 04:47:54.000000000 -0700
@@ -15,6 +15,7 @@
#include
#include
+#include
#include
#include
@@ -74,16 +75,25 @@
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
struct xfrm_type_map *typemap;
struct xfrm_type *type;
+ int modload_attempted = 0;
if (unlikely(afinfo == NULL))
return NULL;
typemap = afinfo->type_map;
+retry:
read_lock(&typemap->lock);
type = typemap->map[proto];
if (unlikely(type && !try_module_get(type->owner)))
type = NULL;
read_unlock(&typemap->lock);
+ if (!type && !modload_attempted) {
+ request_module("xfrm-type-%d-%d",
+ (int) family, (int) proto);
+ modload_attempted = 1;
+ goto retry;
+ }
+
xfrm_policy_put_afinfo(afinfo);
return type;
}
@@ -411,10 +421,9 @@
struct xfrm_policy *pol;
read_lock_bh(&xfrm_policy_lock);
- if ((pol = sk->policy[dir]) != NULL) {
- int match;
-
- match = xfrm_selector_match(&pol->selector, fl, sk->family);
+ if ((pol = sk->sk_policy[dir]) != NULL) {
+ int match = xfrm_selector_match(&pol->selector, fl,
+ sk->sk_family);
if (match)
xfrm_pol_hold(pol);
else
@@ -450,8 +459,8 @@
struct xfrm_policy *old_pol;
write_lock_bh(&xfrm_policy_lock);
- old_pol = sk->policy[dir];
- sk->policy[dir] = pol;
+ old_pol = sk->sk_policy[dir];
+ sk->sk_policy[dir] = pol;
if (pol) {
pol->curlft.add_time = (unsigned long)xtime.tv_sec;
pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir);
@@ -491,14 +500,13 @@
int __xfrm_sk_clone_policy(struct sock *sk)
{
- struct xfrm_policy *p0, *p1;
- p0 = sk->policy[0];
- p1 = sk->policy[1];
- sk->policy[0] = NULL;
- sk->policy[1] = NULL;
- if (p0 && (sk->policy[0] = clone_policy(p0, 0)) == NULL)
+ struct xfrm_policy *p0 = sk->sk_policy[0],
+ *p1 = sk->sk_policy[1];
+
+ sk->sk_policy[0] = sk->sk_policy[1] = NULL;
+ if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
return -ENOMEM;
- if (p1 && (sk->policy[1] = clone_policy(p1, 1)) == NULL)
+ if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
return -ENOMEM;
return 0;
}
@@ -644,7 +652,7 @@
restart:
genid = atomic_read(&flow_cache_genid);
policy = NULL;
- if (sk && sk->policy[1])
+ if (sk && sk->sk_policy[1])
policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
if (!policy) {
@@ -832,7 +840,7 @@
}
pol = NULL;
- if (sk && sk->policy[dir])
+ if (sk && sk->sk_policy[dir])
pol = xfrm_sk_policy_lookup(sk, dir, &fl);
if (!pol)
diff -urN linux-2.5.70-bk11/net/xfrm/xfrm_state.c linux-2.5.70-bk12/net/xfrm/xfrm_state.c
--- linux-2.5.70-bk11/net/xfrm/xfrm_state.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/xfrm/xfrm_state.c 2003-06-07 04:47:54.000000000 -0700
@@ -701,7 +701,8 @@
err = -EINVAL;
read_lock(&xfrm_km_lock);
list_for_each_entry(km, &xfrm_km_list, list) {
- pol = km->compile_policy(sk->family, optname, data, optlen, &err);
+ pol = km->compile_policy(sk->sk_family, optname, data,
+ optlen, &err);
if (err >= 0)
break;
}
diff -urN linux-2.5.70-bk11/net/xfrm/xfrm_user.c linux-2.5.70-bk12/net/xfrm/xfrm_user.c
--- linux-2.5.70-bk11/net/xfrm/xfrm_user.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/net/xfrm/xfrm_user.c 2003-06-07 04:47:54.000000000 -0700
@@ -946,10 +946,11 @@
down(&xfrm_cfg_sem);
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (xfrm_user_rcv_skb(skb)) {
if (skb->len)
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue,
+ skb);
else
kfree_skb(skb);
break;
@@ -959,7 +960,7 @@
up(&xfrm_cfg_sem);
- } while (xfrm_nl && xfrm_nl->receive_queue.qlen);
+ } while (xfrm_nl && xfrm_nl->sk_receive_queue.qlen);
}
static int build_expire(struct sk_buff *skb, struct xfrm_state *x, int hard)
@@ -1126,7 +1127,7 @@
static void __exit xfrm_user_exit(void)
{
xfrm_unregister_km(&netlink_mgr);
- sock_release(xfrm_nl->socket);
+ sock_release(xfrm_nl->sk_socket);
}
module_init(xfrm_user_init);
diff -urN linux-2.5.70-bk11/scripts/kconfig/conf.c linux-2.5.70-bk12/scripts/kconfig/conf.c
--- linux-2.5.70-bk11/scripts/kconfig/conf.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/scripts/kconfig/conf.c 2003-06-07 04:47:54.000000000 -0700
@@ -73,6 +73,13 @@
line[0] = '\n';
line[1] = 0;
+ if (!sym_is_changable(sym)) {
+ printf("%s\n", def);
+ line[0] = '\n';
+ line[1] = 0;
+ return;
+ }
+
switch (input_mode) {
case ask_new:
case ask_silent:
@@ -82,12 +89,6 @@
}
check_stdin();
case ask_all:
- if (!sym_is_changable(sym)) {
- printf("%s\n", def);
- line[0] = '\n';
- line[1] = 0;
- return;
- }
fflush(stdout);
fgets(line, 128, stdin);
return;
diff -urN linux-2.5.70-bk11/scripts/kconfig/symbol.c linux-2.5.70-bk12/scripts/kconfig/symbol.c
--- linux-2.5.70-bk11/scripts/kconfig/symbol.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/scripts/kconfig/symbol.c 2003-06-07 04:47:54.000000000 -0700
@@ -271,6 +271,8 @@
if (sym_get_type(sym) == S_BOOLEAN) {
if (newval.tri == mod)
newval.tri = yes;
+ if (sym->visible == mod)
+ sym->visible = yes;
if (sym->rev_dep.tri == mod)
sym->rev_dep.tri = yes;
}
diff -urN linux-2.5.70-bk11/sound/core/Makefile linux-2.5.70-bk12/sound/core/Makefile
--- linux-2.5.70-bk11/sound/core/Makefile 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/core/Makefile 2003-06-07 04:47:54.000000000 -0700
@@ -45,6 +45,7 @@
obj-$(CONFIG_SND_MPU401) += snd-rawmidi.o snd.o snd-timer.o
obj-$(CONFIG_SND_ALS100) += snd-pcm.o snd-timer.o snd-page-alloc.o snd.o snd-rawmidi.o snd-hwdep.o
obj-$(CONFIG_SND_AZT2320) += snd-pcm.o snd-timer.o snd-page-alloc.o snd.o snd-rawmidi.o snd-hwdep.o
+obj-$(CONFIG_SND_AZT3328) += snd-pcm.o snd-timer.o snd-page-alloc.o snd.o snd-rawmidi.o snd-hwdep.o
obj-$(CONFIG_SND_CMI8330) += snd-pcm.o snd-timer.o snd-page-alloc.o snd.o
obj-$(CONFIG_SND_DT019X) += snd-pcm.o snd-timer.o snd-page-alloc.o snd.o snd-rawmidi.o snd-hwdep.o
obj-$(CONFIG_SND_ES18XX) += snd-pcm.o snd-timer.o snd-page-alloc.o snd.o snd-rawmidi.o snd-hwdep.o
diff -urN linux-2.5.70-bk11/sound/core/init.c linux-2.5.70-bk12/sound/core/init.c
--- linux-2.5.70-bk11/sound/core/init.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/core/init.c 2003-06-07 04:47:54.000000000 -0700
@@ -26,7 +26,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -50,6 +49,8 @@
snd_iprintf(buffer, "%s\n", entry->card->id);
}
+static void snd_card_free_thread(void * __card);
+
/**
* snd_card_new - create and initialize a soundcard structure
* @idx: card index (address) [0 ... (SNDRV_CARDS-1)]
@@ -115,6 +116,7 @@
INIT_LIST_HEAD(&card->ctl_files);
spin_lock_init(&card->files_lock);
init_waitqueue_head(&card->shutdown_sleep);
+ INIT_WORK(&card->free_workq, snd_card_free_thread, card);
#ifdef CONFIG_PM
init_MUTEX(&card->power_lock);
init_waitqueue_head(&card->power_sleep);
@@ -327,16 +329,15 @@
*/
int snd_card_free_in_thread(snd_card_t * card)
{
- DECLARE_WORK(works, snd_card_free_thread, card);
-
if (card->files == NULL) {
snd_card_free(card);
return 0;
}
- if (schedule_work(&works))
+
+ if (schedule_work(&card->free_workq))
return 0;
- snd_printk(KERN_ERR "kernel_thread failed in snd_card_free_in_thread for card %i\n", card->number);
+ snd_printk(KERN_ERR "schedule_work() failed in snd_card_free_in_thread for card %i\n", card->number);
/* try to free the structure immediately */
snd_card_free(card);
return -EFAULT;
@@ -354,6 +355,10 @@
id++;
}
id = card->id;
+ while (*spos != '\0' && !isalnum(*spos))
+ spos++;
+ if (isdigit(*spos))
+ *id++ = isalpha(card->shortname[0]) ? card->shortname[0] : 'D';
while (*spos != '\0' && (size_t)(id - card->id) < sizeof(card->id) - 1) {
if (isalnum(*spos))
*id++ = *spos;
@@ -362,6 +367,9 @@
*id = '\0';
id = card->id;
+
+ if (*id == '\0')
+ strcpy(id, "default");
while (1) {
if (loops-- == 0) {
@@ -421,7 +429,7 @@
write_unlock(&snd_card_rwlock);
return 0;
}
- if (!card->id[0])
+ if (card->id[0] == '\0')
choose_default_id(card);
snd_cards[card->number] = card;
snd_cards_count++;
diff -urN linux-2.5.70-bk11/sound/core/pcm_native.c linux-2.5.70-bk12/sound/core/pcm_native.c
--- linux-2.5.70-bk11/sound/core/pcm_native.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/core/pcm_native.c 2003-06-07 04:47:54.000000000 -0700
@@ -862,7 +862,7 @@
static int snd_pcm_pause(snd_pcm_substream_t *substream, int push)
{
- return snd_pcm_action(&snd_pcm_action_pause, substream, 0);
+ return snd_pcm_action(&snd_pcm_action_pause, substream, push);
}
#ifdef CONFIG_PM
diff -urN linux-2.5.70-bk11/sound/core/seq/Makefile linux-2.5.70-bk12/sound/core/seq/Makefile
--- linux-2.5.70-bk11/sound/core/seq/Makefile 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/core/seq/Makefile 2003-06-07 04:47:54.000000000 -0700
@@ -43,6 +43,7 @@
obj-$(call sequencer,$(CONFIG_SND_MPU401)) += $(RAWMIDI_OBJS)
obj-$(call sequencer,$(CONFIG_SND_ALS100)) += $(RAWMIDI_OBJS) $(OPL3_OBJS)
obj-$(call sequencer,$(CONFIG_SND_AZT2320)) += $(RAWMIDI_OBJS) $(OPL3_OBJS)
+obj-$(call sequencer,$(CONFIG_SND_AZT3328)) += $(RAWMIDI_OBJS) $(OPL3_OBJS)
obj-$(call sequencer,$(CONFIG_SND_DT019X)) += $(RAWMIDI_OBJS) $(OPL3_OBJS)
obj-$(call sequencer,$(CONFIG_SND_ES18XX)) += $(RAWMIDI_OBJS) $(OPL3_OBJS)
obj-$(call sequencer,$(CONFIG_SND_OPL3SA2)) += $(RAWMIDI_OBJS) $(OPL3_OBJS)
diff -urN linux-2.5.70-bk11/sound/core/seq/instr/Makefile linux-2.5.70-bk12/sound/core/seq/instr/Makefile
--- linux-2.5.70-bk11/sound/core/seq/instr/Makefile 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/sound/core/seq/instr/Makefile 2003-06-07 04:47:54.000000000 -0700
@@ -19,6 +19,7 @@
# Toplevel Module Dependency
obj-$(call sequencer,$(CONFIG_SND_ALS100)) += snd-ainstr-fm.o
obj-$(call sequencer,$(CONFIG_SND_AZT2320)) += snd-ainstr-fm.o
+obj-$(call sequencer,$(CONFIG_SND_AZT3328)) += snd-ainstr-fm.o
obj-$(call sequencer,$(CONFIG_SND_DT019X)) += snd-ainstr-fm.o
obj-$(call sequencer,$(CONFIG_SND_ES18XX)) += snd-ainstr-fm.o
obj-$(call sequencer,$(CONFIG_SND_OPL3SA2)) += snd-ainstr-fm.o
diff -urN linux-2.5.70-bk11/sound/drivers/mpu401/Makefile linux-2.5.70-bk12/sound/drivers/mpu401/Makefile
--- linux-2.5.70-bk11/sound/drivers/mpu401/Makefile 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/drivers/mpu401/Makefile 2003-06-07 04:47:54.000000000 -0700
@@ -10,6 +10,7 @@
obj-$(CONFIG_SND_MPU401) += snd-mpu401.o snd-mpu401-uart.o
obj-$(CONFIG_SND_ALS100) += snd-mpu401-uart.o
obj-$(CONFIG_SND_AZT2320) += snd-mpu401-uart.o
+obj-$(CONFIG_SND_AZT3328) += snd-mpu401-uart.o
obj-$(CONFIG_SND_DT019X) += snd-mpu401-uart.o
obj-$(CONFIG_SND_ES18XX) += snd-mpu401-uart.o
obj-$(CONFIG_SND_OPL3SA2) += snd-mpu401-uart.o
diff -urN linux-2.5.70-bk11/sound/drivers/opl3/Makefile linux-2.5.70-bk12/sound/drivers/opl3/Makefile
--- linux-2.5.70-bk11/sound/drivers/opl3/Makefile 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/drivers/opl3/Makefile 2003-06-07 04:47:54.000000000 -0700
@@ -17,6 +17,7 @@
# Toplevel Module Dependency
obj-$(CONFIG_SND_ALS100) += $(OPL3_OBJS)
obj-$(CONFIG_SND_AZT2320) += $(OPL3_OBJS)
+obj-$(CONFIG_SND_AZT3328) += $(OPL3_OBJS)
obj-$(CONFIG_SND_DT019X) += $(OPL3_OBJS)
obj-$(CONFIG_SND_ES18XX) += $(OPL3_OBJS)
obj-$(CONFIG_SND_OPL3SA2) += $(OPL3_OBJS)
diff -urN linux-2.5.70-bk11/sound/i2c/other/ak4xxx-adda.c linux-2.5.70-bk12/sound/i2c/other/ak4xxx-adda.c
--- linux-2.5.70-bk11/sound/i2c/other/ak4xxx-adda.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/i2c/other/ak4xxx-adda.c 2003-06-07 04:47:54.000000000 -0700
@@ -42,12 +42,12 @@
/* save the data */
if (ak->type == SND_AK4524 || ak->type == SND_AK4528) {
if ((reg != 0x04 && reg != 0x05) || (reg & 0x80) == 0)
- ak->images[chip][reg] = val;
+ snd_akm4xxx_set(ak, chip, reg, val);
else
- ak->ipga_gain[chip][reg-4] = val;
+ snd_akm4xxx_set_ipga(ak, chip, reg, val);
} else {
/* AK4529, or else */
- ak->images[chip][reg] = val;
+ snd_akm4xxx_set(ak, chip, reg, val);
}
ak->ops.unlock(ak, chip);
}
@@ -72,12 +72,12 @@
continue;
/* DAC volumes */
for (reg = 0x04; reg < (ak->type == SND_AK4528 ? 0x06 : 0x08); reg++)
- snd_akm4xxx_write(ak, chip, reg, ak->images[chip][reg]);
+ snd_akm4xxx_write(ak, chip, reg, snd_akm4xxx_get(ak, chip, reg));
if (ak->type == SND_AK4528)
continue;
/* IPGA */
for (reg = 0x04; reg < 0x06; reg++)
- snd_akm4xxx_write(ak, chip, reg, ak->ipga_gain[chip][reg-4]);
+ snd_akm4xxx_write(ak, chip, reg, snd_akm4xxx_get_ipga(ak, chip, reg));
}
break;
case SND_AK4529:
@@ -89,7 +89,7 @@
return;
for (reg = 0x00; reg < 0x0a; reg++)
if (reg != 0x01)
- snd_akm4xxx_write(ak, 0, reg, ak->images[0][reg]);
+ snd_akm4xxx_write(ak, 0, reg, snd_akm4xxx_get(ak, 0, reg));
break;
case SND_AK4381:
for (chip = 0; chip < ak->num_dacs/2; chip++) {
@@ -97,7 +97,7 @@
if (state)
continue;
for (reg = 0x01; reg < 0x05; reg++)
- snd_akm4xxx_write(ak, chip, reg, ak->images[chip][reg]);
+ snd_akm4xxx_write(ak, chip, reg, snd_akm4xxx_get(ak, chip, reg));
}
break;
}
@@ -240,7 +240,7 @@
int addr = AK_GET_ADDR(kcontrol->private_value);
int invert = AK_GET_INVERT(kcontrol->private_value);
unsigned int mask = AK_GET_MASK(kcontrol->private_value);
- unsigned char val = ak->images[chip][addr];
+ unsigned char val = snd_akm4xxx_get(ak, chip, addr);
ucontrol->value.integer.value[0] = invert ? mask - val : val;
return 0;
@@ -258,7 +258,7 @@
if (invert)
nval = mask - nval;
- change = ak->images[chip][addr] != nval;
+ change = snd_akm4xxx_get(ak, chip, addr) != nval;
if (change)
snd_akm4xxx_write(ak, chip, addr, nval);
return change;
@@ -278,7 +278,7 @@
akm4xxx_t *ak = _snd_kcontrol_chip(kcontrol);
int chip = AK_GET_CHIP(kcontrol->private_value);
int addr = AK_GET_ADDR(kcontrol->private_value);
- ucontrol->value.integer.value[0] = ak->ipga_gain[chip][addr-4] & 0x7f;
+ ucontrol->value.integer.value[0] = snd_akm4xxx_get_ipga(ak, chip, addr) & 0x7f;
return 0;
}
@@ -288,7 +288,7 @@
int chip = AK_GET_CHIP(kcontrol->private_value);
int addr = AK_GET_ADDR(kcontrol->private_value);
unsigned char nval = (ucontrol->value.integer.value[0] % 37) | 0x80;
- int change = ak->ipga_gain[chip][addr] != nval;
+ int change = snd_akm4xxx_get_ipga(ak, chip, addr) != nval;
if (change)
snd_akm4xxx_write(ak, chip, addr, nval);
return change;
@@ -314,7 +314,7 @@
int chip = AK_GET_CHIP(kcontrol->private_value);
int addr = AK_GET_ADDR(kcontrol->private_value);
int shift = AK_GET_SHIFT(kcontrol->private_value);
- ucontrol->value.enumerated.item[0] = (ak->images[chip][addr] >> shift) & 3;
+ ucontrol->value.enumerated.item[0] = (snd_akm4xxx_get(ak, chip, addr) >> shift) & 3;
return 0;
}
@@ -327,8 +327,8 @@
unsigned char nval = ucontrol->value.enumerated.item[0] & 3;
int change;
- nval = (nval << shift) | (ak->images[chip][addr] & ~(3 << shift));
- change = ak->images[chip][addr] != nval;
+ nval = (nval << shift) | (snd_akm4xxx_get(ak, chip, addr) & ~(3 << shift));
+ change = snd_akm4xxx_get(ak, chip, addr) != nval;
if (change)
snd_akm4xxx_write(ak, chip, addr, nval);
return change;
diff -urN linux-2.5.70-bk11/sound/isa/cs423x/cs4236.c linux-2.5.70-bk12/sound/isa/cs423x/cs4236.c
--- linux-2.5.70-bk11/sound/isa/cs423x/cs4236.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/isa/cs423x/cs4236.c 2003-06-07 04:47:54.000000000 -0700
@@ -281,6 +281,9 @@
struct pnp_resource_table * cfg = kmalloc(sizeof(struct pnp_resource_table), GFP_KERNEL);
int err;
+ if (!cfg)
+ return -ENOMEM;
+
acard->wss = pnp_request_card_device(card, id->devs[0].id, NULL);
if (acard->wss == NULL) {
kfree(cfg);
diff -urN linux-2.5.70-bk11/sound/pci/Kconfig linux-2.5.70-bk12/sound/pci/Kconfig
--- linux-2.5.70-bk11/sound/pci/Kconfig 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/pci/Kconfig 2003-06-07 04:47:54.000000000 -0700
@@ -9,6 +9,12 @@
help
Say 'Y' or 'M' to include support for ALI PCI Audio M5451 sound core.
+config SND_AZT3328
+ tristate "Aztech AZF3328 / PCI168 (EXPERIMENTAL)"
+ depends on SND && EXPERIMENTAL
+ help
+ Say 'Y' or 'M' to include support for Aztech AZF3328 (PCI168) soundcards.
+
config SND_CS46XX
tristate "Cirrus Logic (Sound Fusion) CS4280/CS461x/CS462x/CS463x"
depends on SND && SOUND_GAMEPORT
@@ -181,7 +187,7 @@
tristate "Digigram VX222"
depends on SND
help
- Say 'Y' or 'M' to include support for Digigram VX222 soundcards
+ Say 'Y' or 'M' to include support for Digigram VX222 soundcards.
endmenu
diff -urN linux-2.5.70-bk11/sound/pci/Makefile linux-2.5.70-bk12/sound/pci/Makefile
--- linux-2.5.70-bk11/sound/pci/Makefile 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/pci/Makefile 2003-06-07 04:47:54.000000000 -0700
@@ -4,6 +4,7 @@
#
snd-als4000-objs := als4000.o
+snd-azt3328-objs := azt3328.o
snd-cmipci-objs := cmipci.o
snd-cs4281-objs := cs4281.o
snd-ens1370-objs := ens1370.o
@@ -33,5 +34,6 @@
obj-$(CONFIG_SND_RME96) += snd-rme96.o
obj-$(CONFIG_SND_SONICVIBES) += snd-sonicvibes.o
obj-$(CONFIG_SND_VIA82XX) += snd-via82xx.o
+obj-$(CONFIG_SND_AZT3328) += snd-azt3328.o
obj-$(CONFIG_SND) += ac97/ ali5451/ cs46xx/ emu10k1/ korg1212/ nm256/ rme9652/ trident/ ymfpci/ ice1712/ vx222/
diff -urN linux-2.5.70-bk11/sound/pci/ac97/ac97_codec.c linux-2.5.70-bk12/sound/pci/ac97/ac97_codec.c
--- linux-2.5.70-bk11/sound/pci/ac97/ac97_codec.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/pci/ac97/ac97_codec.c 2003-06-07 04:47:54.000000000 -0700
@@ -103,6 +103,7 @@
{ 0x41445363, 0xffffffff, "AD1886A", patch_ad1881, NULL },
{ 0x41445370, 0xffffffff, "AD1980", patch_ad1980, NULL },
{ 0x41445372, 0xffffffff, "AD1981A", patch_ad1881, NULL },
+{ 0x41445374, 0xffffffff, "AD1981B", patch_ad1881, NULL },
{ 0x41445375, 0xffffffff, "AD1985", patch_ad1980, NULL },
{ 0x414c4300, 0xfffffff0, "RL5306", NULL, NULL },
{ 0x414c4310, 0xfffffff0, "RL5382", NULL, NULL },
@@ -112,6 +113,7 @@
{ 0x414c4730, 0xffffffff, "ALC101", NULL, NULL },
{ 0x414c4740, 0xfffffff0, "ALC202", NULL, NULL },
{ 0x414c4750, 0xfffffff0, "ALC250", NULL, NULL },
+{ 0x414c4770, 0xfffffff0, "ALC203", NULL, NULL },
{ 0x434d4941, 0xffffffff, "CMI9738", NULL, NULL },
{ 0x434d4961, 0xffffffff, "CMI9739", patch_cm9739, NULL },
{ 0x43525900, 0xfffffff8, "CS4297", NULL, NULL },
@@ -122,6 +124,7 @@
{ 0x43525948, 0xfffffff8, "CS4201", NULL, NULL },
{ 0x43525958, 0xfffffff8, "CS4205", patch_cirrus_spdif, NULL },
{ 0x43525960, 0xfffffff8, "CS4291", NULL, NULL },
+{ 0x43525970, 0xfffffff8, "CS4202", NULL, NULL },
{ 0x43585421, 0xffffffff, "HSD11246", NULL, NULL }, // SmartMC II
{ 0x43585428, 0xfffffff8, "Cx20468", patch_conexant, NULL }, // SmartAMC fixme: the mask might be different
{ 0x44543031, 0xfffffff0, "DT0398", NULL, NULL },
@@ -130,10 +133,13 @@
{ 0x48525300, 0xffffff00, "HMP9701", NULL, NULL },
{ 0x49434501, 0xffffffff, "ICE1230", NULL, NULL },
{ 0x49434511, 0xffffffff, "ICE1232", NULL, NULL }, // alias VIA VT1611A?
+{ 0x49434514, 0xffffffff, "ICE1232A", NULL, NULL },
{ 0x49434551, 0xffffffff, "VT1616", NULL, NULL },
+{ 0x49434552, 0xffffffff, "VT1616i", NULL, NULL }, // VT1616 compatible (chipset integrated)
{ 0x49544520, 0xffffffff, "IT2226E", NULL, NULL },
{ 0x4e534300, 0xffffffff, "LM4540/43/45/46/48", NULL, NULL }, // only guess --jk
{ 0x4e534331, 0xffffffff, "LM4549", NULL, NULL },
+{ 0x4e534350, 0xffffffff, "LM4550", NULL, NULL },
{ 0x50534304, 0xffffffff, "UCB1400", NULL, NULL },
{ 0x53494c20, 0xffffffe0, "Si3036/8", NULL, NULL },
{ 0x54524102, 0xffffffff, "TR28022", NULL, NULL },
@@ -159,6 +165,7 @@
{ 0x83847609, 0xffffffff, "STAC9721/23", patch_sigmatel_stac9721, NULL },
{ 0x83847644, 0xffffffff, "STAC9744", patch_sigmatel_stac9744, NULL },
{ 0x83847650, 0xffffffff, "STAC9750/51", NULL, NULL }, // patch?
+{ 0x83847652, 0xffffffff, "STAC9752/53", NULL, NULL }, // patch?
{ 0x83847656, 0xffffffff, "STAC9756/57", patch_sigmatel_stac9756, NULL },
{ 0x83847666, 0xffffffff, "STAC9766/67", NULL, NULL }, // patch?
{ 0, 0, NULL, NULL, NULL }
@@ -183,7 +190,7 @@
/* 14 */ "Binaura 3D Audio Enhancement",
/* 15 */ "ESS Technology Stereo Enhancement",
/* 16 */ "Harman International VMAx",
- /* 17 */ "Nvidea 3D Stereo Enhancement",
+ /* 17 */ "Nvidea/IC Ensemble/KS Waves 3D Stereo Enhancement",
/* 18 */ "Philips Incredible Sound",
/* 19 */ "Texas Instruments 3D Stereo Enhancement",
/* 20 */ "VLSI Technology 3D Stereo Enhancement",
@@ -1672,18 +1679,18 @@
ac97->spec.ad18xx.pcmreg[2] = 0x9f1f;
}
} else {
+ unsigned int pcm_ctrls = 2;
/* FIXME: C-Media chips have no PCM volume!! */
if (/*ac97->id == 0x434d4941 ||*/
ac97->id == 0x434d4942 ||
ac97->id == 0x434d4961)
- goto no_pcm;
- for (idx = 0; idx < 2; idx++)
+ pcm_ctrls = 1;
+ for (idx = 0; idx < pcm_ctrls; idx++)
if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_pcm[idx], ac97))) < 0)
return err;
}
snd_ac97_write_cache(ac97, AC97_PCM, 0x9f1f);
- no_pcm:
/* build Capture controls */
for (idx = 0; idx < 3; idx++)
if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_capture[idx], ac97))) < 0)
@@ -2074,6 +2081,10 @@
ac97->card = card;
spin_lock_init(&ac97->reg_lock);
+ if (ac97->pci) {
+ pci_read_config_word(ac97->pci, PCI_SUBSYSTEM_VENDOR_ID, &ac97->subsystem_vendor);
+ pci_read_config_word(ac97->pci, PCI_SUBSYSTEM_ID, &ac97->subsystem_device);
+ }
if (ac97->reset) {
ac97->reset(ac97);
goto __access_ok;
@@ -2713,7 +2724,7 @@
}
spin_lock(&ac97->reg_lock);
- old = ac97->regs[reg] & ~mask;
+ old = ac97->regs[reg] & mask;
spin_unlock(&ac97->reg_lock);
if (old != bits) {
snd_ac97_update_bits(ac97, AC97_EXTENDED_STATUS, AC97_EA_SPDIF, 0);
diff -urN linux-2.5.70-bk11/sound/pci/azt3328.c linux-2.5.70-bk12/sound/pci/azt3328.c
--- linux-2.5.70-bk11/sound/pci/azt3328.c 1969-12-31 16:00:00.000000000 -0800
+++ linux-2.5.70-bk12/sound/pci/azt3328.c 2003-06-07 04:47:54.000000000 -0700
@@ -0,0 +1,1621 @@
+/*
+ * azt3328.c - driver for Aztech AZF3328 based soundcards (e.g. PCI168).
+ * Copyright (C) 2002 by Andreas Mohr
+ *
+ * Framework borrowed from Bart Hartgers's als4000.c.
+ * Driver developed on PCI168 AP(W) version (PCI rev. 10, subsystem ID 1801),
+ * found in a Fujitsu-Siemens PC ("Cordant", aluminum case).
+ * Other versions are:
+ * PCI168 A(W), sub ID 1800
+ * PCI168 A/AP, sub ID 8000
+ * Please give me feedback in case you try my driver with one of these!!
+ *
+ * GPL LICENSE
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * NOTES
+ * Since Aztech does not provide any chipset documentation,
+ * even on repeated request to various addresses,
+ * and the answer that was finally given was negative
+ * (and I was stupid enough to manage to get hold of a PCI168 soundcard
+ * in the first place >:-P}),
+ * I was forced to base this driver on reverse engineering
+ * (3 weeks' worth of evenings filled with driver work).
+ * (and no, I did NOT go the easy way: to pick up a PCI128 for 9 Euros)
+ *
+ * The AZF3328 chip (note: AZF3328, *not* AZT3328, that's just the driver name
+ * for compatibility reasons) has the following features:
+ *
+ * - builtin AC97 conformant codec (SNR over 80dB)
+ * (really AC97 compliant?? I really doubt it when looking
+ * at the mixer register layout)
+ * - builtin genuine OPL3
+ * - full duplex 16bit playback/record at independent sampling rate
+ * - MPU401 (+ legacy address support) FIXME: how to enable legacy addr??
+ * - game port (legacy address support)
+ * - built-in General DirectX timer having a 20 bits counter
+ * with 1us resolution (FIXME: where is it?)
+ * - I2S serial port for external DAC
+ * - supports 33MHz PCI spec 2.1, PCI power management 1.0, compliant with ACPI
+ * - supports hardware volume control
+ * - single chip low cost solution (128 pin QFP)
+ * - supports programmable Sub-vendor and Sub-system ID
+ * required for Microsoft's logo compliance (FIXME: where?)
+ * - PCI168 AP(W) card: power amplifier with 4 Watts/channel at 4 Ohms
+ *
+ * Certain PCI versions of this card are susceptible to DMA traffic underruns
+ * in some systems (resulting in sound crackling/clicking/popping),
+ * probably because they don't have a DMA FIFO buffer or so.
+ * Overview (PCI ID/PCI subID/PCI rev.):
+ * - no DMA crackling on SiS735: 0x50DC/0x1801/16
+ * - unknown performance: 0x50DC/0x1801/10
+ *
+ * Crackling happens with VIA chipsets or, in my case, an SiS735, which is
+ * supposed to be very fast and supposed to get rid of crackling much
+ * better than a VIA, yet ironically I still get crackling, like many other
+ * people with the same chipset.
+ * Possible remedies:
+ * - plug card into a different PCI slot, preferrably one that isn't shared
+ * too much (this helps a lot, but not completely!)
+ * - get rid of PCI VGA card, use AGP instead
+ * - upgrade or downgrade BIOS
+ * - fiddle with PCI latency settings (setpci -v -s BUSID latency_timer=XX)
+ * Not too helpful.
+ * - Disable ACPI/power management/"Auto Detect RAM/PCI Clk" in BIOS
+ *
+ * BUGS
+ * - when Ctrl-C'ing mpg321, the playback loops a bit
+ * (premature DMA playback reset?)
+ * - full-duplex sometimes breaks (IRQ management issues?).
+ * Once even a spontaneous REBOOT happened!!!
+ *
+ * TODO
+ * - test MPU401 MIDI playback etc.
+ * - power management (CONFIG_PM). See e.g. intel8x0 or cs4281.
+ * This would be nice since the chip runs a bit hot, and it's *required*
+ * anyway for proper ACPI power management. In other words: rest
+ * assured that I *will* implement this very soon; as soon as Linux 2.5.x
+ * has power management that's bugfree enough to work properly on my desktop.
+ * - figure out what all unknown port bits are responsible for
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#define SNDRV_GET_ID
+#include
+#include "azt3328.h"
+
+MODULE_AUTHOR("Andreas Mohr ");
+MODULE_DESCRIPTION("Aztech AZF3328 (PCI168)");
+MODULE_LICENSE("GPL");
+MODULE_CLASSES("{sound}");
+MODULE_DEVICES("{{Aztech,AZF3328}}");
+
+#define DEBUG_MISC 0
+#define DEBUG_CALLS 0
+#define DEBUG_MIXER 0
+#define DEBUG_PLAY_REC 0
+#define DEBUG_IO 0
+#define MIXER_TESTING 0
+
+#if DEBUG_MISC
+#define snd_azf3328_dbgmisc(format, args...) printk(KERN_ERR format, ##args)
+#else
+#define snd_azf3328_dbgmisc(format, args...)
+#endif
+
+#if DEBUG_CALLS
+#define snd_azf3328_dbgcalls(format, args...) printk(format, ##args)
+#define snd_azf3328_dbgcallenter() printk(KERN_ERR "entering %s\n", __FUNCTION__)
+#define snd_azf3328_dbgcallleave() printk(KERN_ERR "leaving %s\n", __FUNCTION__)
+#else
+#define snd_azf3328_dbgcalls(format, args...)
+#define snd_azf3328_dbgcallenter()
+#define snd_azf3328_dbgcallleave()
+#endif
+
+#if DEBUG_MIXER
+#define snd_azf3328_dbgmixer(format, args...) printk(format, ##args)
+#else
+#define snd_azf3328_dbgmixer(format, args...)
+#endif
+
+#if DEBUG_PLAY_REC
+#define snd_azf3328_dbgplay(format, args...) printk(KERN_ERR format, ##args)
+#else
+#define snd_azf3328_dbgplay(format, args...)
+#endif
+
+#if DEBUG_IO
+#define snd_azf3328_dbgio(chip, where) \
+ printk(KERN_ERR "%s: IDX_IO_PLAY_FLAGS %04x, IDX_IO_PLAY_IRQMASK %04x, IDX_IO_IRQSTATUS %04x\n", where, inw(chip->codec_port+IDX_IO_PLAY_FLAGS), inw(chip->codec_port+IDX_IO_PLAY_IRQMASK), inw(chip->codec_port+IDX_IO_IRQSTATUS))
+#else
+#define snd_azf3328_dbgio(chip, where)
+#endif
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
+static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */
+static int joystick[SNDRV_CARDS] =
+ {-1}; /* "unset" as default */
+
+MODULE_PARM(index, "1-" __MODULE_STRING(SNDRV_CARDS) "i");
+MODULE_PARM_DESC(index, "Index value for AZF3328 soundcard.");
+MODULE_PARM_SYNTAX(index, SNDRV_INDEX_DESC);
+MODULE_PARM(id, "1-" __MODULE_STRING(SNDRV_CARDS) "s");
+MODULE_PARM_DESC(id, "ID string for AZF3328 soundcard.");
+MODULE_PARM_SYNTAX(id, SNDRV_ID_DESC);
+MODULE_PARM(enable, "1-" __MODULE_STRING(SNDRV_CARDS) "i");
+MODULE_PARM_DESC(enable, "Enable AZF3328 soundcard.");
+MODULE_PARM_SYNTAX(enable, SNDRV_INDEX_DESC);
+MODULE_PARM(joystick, "1-" __MODULE_STRING(SNDRV_CARDS) "i");
+MODULE_PARM_DESC(joystick, "Forced joystick port enable for AZF3328 soundcard. (0 = force disable)");
+MODULE_PARM_SYNTAX(joystick, SNDRV_ENABLED);
+
+typedef struct _snd_azf3328 azf3328_t;
+#define chip_t azf3328_t
+
+struct _snd_azf3328 {
+ int irq;
+
+ unsigned long codec_port;
+ struct resource *res_codec_port;
+ unsigned long io2_port;
+ struct resource *res_io2_port;
+ unsigned long mpu_port;
+ struct resource *res_mpu_port;
+ unsigned long synth_port;
+ struct resource *res_synth_port;
+ unsigned long mixer_port;
+ struct resource *res_mixer_port;
+ unsigned long game_port;
+
+ struct pci_dev *pci;
+ snd_card_t *card;
+
+ snd_pcm_t *pcm;
+ snd_rawmidi_t *rmidi;
+ snd_pcm_substream_t *playback_substream;
+ snd_pcm_substream_t *capture_substream;
+ unsigned int is_playing;
+ unsigned int is_recording;
+
+ spinlock_t reg_lock;
+};
+
+static struct pci_device_id snd_azf3328_ids[] __devinitdata = {
+ { 0x122D, 0x50DC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* PCI168/3328 */
+ { 0x122D, 0x80DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* 3328 */
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, snd_azf3328_ids);
+
+void snd_azf3328_io2_write(azf3328_t *chip, int reg, unsigned char value)
+{
+ outb(value, chip->io2_port + reg);
+}
+
+unsigned char snd_azf3328_io2_read(azf3328_t *chip, int reg)
+{
+ return inb(chip->io2_port + reg);
+}
+
+void snd_azf3328_mixer_write(azf3328_t *chip, int reg, unsigned long value, int type)
+{
+ switch(type) {
+ case WORD_VALUE:
+ outw(value, chip->mixer_port + reg);
+ break;
+ case DWORD_VALUE:
+ outl(value, chip->mixer_port + reg);
+ break;
+ case BYTE_VALUE:
+ outb(value, chip->mixer_port + reg);
+ break;
+ }
+}
+
+unsigned long snd_azf3328_mixer_read(azf3328_t *chip, int reg, int type)
+{
+ unsigned long res = 0;
+
+ switch(type) {
+ case WORD_VALUE:
+ res = (unsigned long)inw(chip->mixer_port + reg);
+ break;
+ case DWORD_VALUE:
+ res = (unsigned long)inl(chip->mixer_port + reg);
+ break;
+ case BYTE_VALUE:
+ res = (unsigned long)inb(chip->mixer_port + reg);
+ break;
+ }
+
+ return res;
+}
+
+void snd_azf3328_mixer_set_mute(azf3328_t *chip, int reg, int do_mute)
+{
+ unsigned char oldval;
+
+ /* the mute bit is on the *second* (i.e. right) register of a
+ * left/right channel setting */
+ oldval = inb(chip->mixer_port + reg + 1);
+ if (do_mute)
+ oldval |= 0x80;
+ else
+ oldval &= ~0x80;
+ outb(oldval, chip->mixer_port + reg + 1);
+}
+
+void snd_azf3328_mixer_write_volume_gradually(azf3328_t *chip, int reg, unsigned char dst_vol_left, unsigned char dst_vol_right, int chan_sel, int delay)
+{
+ unsigned char curr_vol_left = 0, curr_vol_right = 0;
+ int left_done = 0, right_done = 0;
+
+ snd_azf3328_dbgcallenter();
+ if (chan_sel & SET_CHAN_LEFT)
+ curr_vol_left = inb(chip->mixer_port + reg + 1);
+ else
+ left_done = 1;
+ if (chan_sel & SET_CHAN_RIGHT)
+ curr_vol_right = inb(chip->mixer_port + reg + 0);
+ else
+ right_done = 1;
+
+ /* take care of muting flag (0x80) contained in left channel */
+ if (curr_vol_left & 0x80)
+ dst_vol_left |= 0x80;
+ else
+ dst_vol_left &= ~0x80;
+
+ do
+ {
+ if (!left_done)
+ {
+ if (curr_vol_left > dst_vol_left)
+ curr_vol_left--;
+ else
+ if (curr_vol_left < dst_vol_left)
+ curr_vol_left++;
+ else
+ left_done = 1;
+ outb(curr_vol_left, chip->mixer_port + reg + 1);
+ }
+ if (!right_done)
+ {
+ if (curr_vol_right > dst_vol_right)
+ curr_vol_right--;
+ else
+ if (curr_vol_right < dst_vol_right)
+ curr_vol_right++;
+ else
+ right_done = 1;
+ /* during volume change, the right channel is crackling
+ * somewhat more than the left channel, unfortunately.
+ * This seems to be a hardware issue. */
+ outb(curr_vol_right, chip->mixer_port + reg + 0);
+ }
+ if (delay)
+ mdelay(delay);
+ }
+ while ((!left_done) || (!right_done));
+ snd_azf3328_dbgcallleave();
+}
+
+/*
+ * general mixer element
+ */
+typedef struct azf3328_mixer_reg {
+ unsigned int reg;
+ unsigned int lchan_shift, rchan_shift;
+ unsigned int mask;
+ unsigned int invert: 1;
+ unsigned int stereo: 1;
+ unsigned int enum_c: 4;
+} azf3328_mixer_reg_t;
+
+#define COMPOSE_MIXER_REG(reg,lchan_shift,rchan_shift,mask,invert,stereo,enum_c) \
+ ((reg) | (lchan_shift << 8) | (rchan_shift << 12) | (mask << 16) | (invert << 24) | (stereo << 25) | (enum_c << 26))
+
+static void snd_azf3328_mixer_reg_decode(azf3328_mixer_reg_t *r, unsigned long val)
+{
+ r->reg = val & 0xff;
+ r->lchan_shift = (val >> 8) & 0x0f;
+ r->rchan_shift = (val >> 12) & 0x0f;
+ r->mask = (val >> 16) & 0xff;
+ r->invert = (val >> 24) & 1;
+ r->stereo = (val >> 25) & 1;
+ r->enum_c = (val >> 26) & 0x0f;
+}
+
+/*
+ * mixer switches/volumes
+ */
+
+#define AZF3328_MIXER_SWITCH(xname, reg, shift, invert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_azf3328_info_mixer, \
+ .get = snd_azf3328_get_mixer, .put = snd_azf3328_put_mixer, \
+ .private_value = COMPOSE_MIXER_REG(reg, shift, 0, 0x1, invert, 0, 0), \
+}
+
+#define AZF3328_MIXER_VOL_STEREO(xname, reg, mask, invert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_azf3328_info_mixer, \
+ .get = snd_azf3328_get_mixer, .put = snd_azf3328_put_mixer, \
+ .private_value = COMPOSE_MIXER_REG(reg, 8, 0, mask, invert, 1, 0), \
+}
+
+#define AZF3328_MIXER_VOL_MONO(xname, reg, mask, is_right_chan) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_azf3328_info_mixer, \
+ .get = snd_azf3328_get_mixer, .put = snd_azf3328_put_mixer, \
+ .private_value = COMPOSE_MIXER_REG(reg, is_right_chan ? 0 : 8, 0, mask, 1, 0, 0), \
+}
+
+#define AZF3328_MIXER_VOL_SPECIAL(xname, reg, mask, shift, invert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_azf3328_info_mixer, \
+ .get = snd_azf3328_get_mixer, .put = snd_azf3328_put_mixer, \
+ .private_value = COMPOSE_MIXER_REG(reg, shift, 0, mask, invert, 0, 0), \
+}
+
+#define AZF3328_MIXER_ENUM(xname, reg, enum_c, shift) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_azf3328_info_mixer_enum, \
+ .get = snd_azf3328_get_mixer_enum, .put = snd_azf3328_put_mixer_enum, \
+ .private_value = COMPOSE_MIXER_REG(reg, shift, 0, 0, 0, 0, enum_c), \
+}
+
+static int snd_azf3328_info_mixer(snd_kcontrol_t *kcontrol, snd_ctl_elem_info_t *uinfo)
+{
+ azf3328_mixer_reg_t reg;
+
+ snd_azf3328_dbgcallenter();
+ snd_azf3328_mixer_reg_decode(®, kcontrol->private_value);
+ uinfo->type = reg.mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = reg.stereo + 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = reg.mask;
+ snd_azf3328_dbgcallleave();
+ return 0;
+}
+
+static int snd_azf3328_get_mixer(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t * ucontrol)
+{
+ azf3328_t *chip = snd_kcontrol_chip(kcontrol);
+ azf3328_mixer_reg_t reg;
+ unsigned int oreg, val;
+
+ snd_azf3328_dbgcallenter();
+ snd_azf3328_mixer_reg_decode(®, kcontrol->private_value);
+
+ oreg = inw(chip->mixer_port + reg.reg);
+ val = (oreg >> reg.lchan_shift) & reg.mask;
+ if (reg.invert)
+ val = reg.mask - val;
+ ucontrol->value.integer.value[0] = val;
+ if (reg.stereo) {
+ val = (oreg >> reg.rchan_shift) & reg.mask;
+ if (reg.invert)
+ val = reg.mask - val;
+ ucontrol->value.integer.value[1] = val;
+ }
+ snd_azf3328_dbgmixer("get: %02x is %04x -> vol %02lx|%02lx (shift %02d|%02d, mask %02x, inv. %d, stereo %d)\n", reg.reg, oreg, ucontrol->value.integer.value[0], ucontrol->value.integer.value[1], reg.lchan_shift, reg.rchan_shift, reg.mask, reg.invert, reg.stereo);
+ snd_azf3328_dbgcallleave();
+ return 0;
+}
+
+static int snd_azf3328_put_mixer(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t * ucontrol)
+{
+ azf3328_t *chip = snd_kcontrol_chip(kcontrol);
+ azf3328_mixer_reg_t reg;
+ unsigned int oreg, nreg, val;
+
+ snd_azf3328_dbgcallenter();
+ snd_azf3328_mixer_reg_decode(®, kcontrol->private_value);
+ oreg = inw(chip->mixer_port + reg.reg);
+ val = ucontrol->value.integer.value[0] & reg.mask;
+ if (reg.invert)
+ val = reg.mask - val;
+ nreg = oreg & ~(reg.mask << reg.lchan_shift);
+ nreg |= (val << reg.lchan_shift);
+ if (reg.stereo) {
+ val = ucontrol->value.integer.value[1] & reg.mask;
+ if (reg.invert)
+ val = reg.mask - val;
+ nreg &= ~(reg.mask << reg.rchan_shift);
+ nreg |= (val << reg.rchan_shift);
+ }
+ if (reg.mask >= 0x07) /* it's a volume control, so better take care */
+ snd_azf3328_mixer_write_volume_gradually(chip, reg.reg, nreg >> 8, nreg & 0xff, SET_CHAN_LEFT|SET_CHAN_RIGHT, 0); /* just set both channels, doesn't matter */
+ else
+ outw(nreg, chip->mixer_port + reg.reg);
+
+ snd_azf3328_dbgmixer("put: %02x to %02lx|%02lx, oreg %04x; shift %02d|%02d -> nreg %04x; after: %04x\n", reg.reg, ucontrol->value.integer.value[0], ucontrol->value.integer.value[1], oreg, reg.lchan_shift, reg.rchan_shift, nreg, inw(chip->mixer_port + reg.reg));
+ snd_azf3328_dbgcallleave();
+ return (nreg != oreg);
+}
+
+static int snd_azf3328_info_mixer_enum(snd_kcontrol_t *kcontrol, snd_ctl_elem_info_t * uinfo)
+{
+ azf3328_mixer_reg_t reg;
+ static char *texts1[2] = { "ModemOut1", "ModemOut2" };
+ static char *texts2[2] = { "MonoSelectSource1", "MonoSelectSource2" };
+ static char *texts3[8] = {
+ "Mic", "CD", "Video", "Aux", "Line",
+ "Mix", "Mix Mono", "Phone"
+ };
+
+ snd_azf3328_mixer_reg_decode(®, kcontrol->private_value);
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = (reg.reg == IDX_MIXER_REC_SELECT) ? 2 : 1;
+ uinfo->value.enumerated.items = reg.enum_c;
+ if (uinfo->value.enumerated.item > reg.enum_c - 1U)
+ uinfo->value.enumerated.item = reg.enum_c - 1U;
+ if (reg.reg == IDX_MIXER_ADVCTL2)
+ {
+ if (reg.lchan_shift == 8) /* modem out sel */
+ strcpy(uinfo->value.enumerated.name, texts1[uinfo->value.enumerated.item]);
+ else /* mono sel source */
+ strcpy(uinfo->value.enumerated.name, texts2[uinfo->value.enumerated.item]);
+ }
+ else
+ strcpy(uinfo->value.enumerated.name, texts3[uinfo->value.enumerated.item]
+);
+ return 0;
+}
+
+static int snd_azf3328_get_mixer_enum(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t * ucontrol)
+{
+ azf3328_mixer_reg_t reg;
+ azf3328_t *chip = snd_kcontrol_chip(kcontrol);
+ unsigned short val;
+
+ snd_azf3328_mixer_reg_decode(®, kcontrol->private_value);
+ val = inw(chip->mixer_port + reg.reg);
+ if (reg.reg == IDX_MIXER_REC_SELECT)
+ {
+ ucontrol->value.enumerated.item[0] = (val >> 8) & (reg.enum_c - 1);
+ ucontrol->value.enumerated.item[1] = (val >> 0) & (reg.enum_c - 1);
+ }
+ else
+ ucontrol->value.enumerated.item[0] = (val >> reg.lchan_shift) & (reg.enum_c - 1);
+ snd_azf3328_dbgmixer("get_enum: %02x is %04x -> %d|%d (shift %02d, enum_c %d)\n", reg.reg, val, ucontrol->value.enumerated.item[0], ucontrol->value.enumerated.item[1], reg.lchan_shift, reg.enum_c);
+ return 0;
+}
+
+static int snd_azf3328_put_mixer_enum(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t * ucontrol)
+{
+ azf3328_mixer_reg_t reg;
+ azf3328_t *chip = snd_kcontrol_chip(kcontrol);
+ unsigned int oreg, nreg, val;
+
+ snd_azf3328_mixer_reg_decode(®, kcontrol->private_value);
+ oreg = inw(chip->mixer_port + reg.reg);
+ val = oreg;
+ if (reg.reg == IDX_MIXER_REC_SELECT)
+ {
+ if (ucontrol->value.enumerated.item[0] > reg.enum_c - 1U ||
+ ucontrol->value.enumerated.item[1] > reg.enum_c - 1U)
+ return -EINVAL;
+ val = (ucontrol->value.enumerated.item[0] << 8) |
+ (ucontrol->value.enumerated.item[1] << 0);
+ }
+ else
+ {
+ if (ucontrol->value.enumerated.item[0] > reg.enum_c - 1U)
+ return -EINVAL;
+ val &= ~((reg.enum_c - 1) << reg.lchan_shift);
+ val |= (ucontrol->value.enumerated.item[0] << reg.lchan_shift);
+ }
+ outw(val, chip->mixer_port + reg.reg);
+ nreg = val;
+
+ snd_azf3328_dbgmixer("put_enum: %02x to %04x, oreg %04x\n", reg.reg, val, oreg);
+ return (nreg != oreg);
+}
+
+#define NUM_CONTROLS(ary) (sizeof(ary) / sizeof(snd_kcontrol_new_t))
+
+static snd_kcontrol_new_t snd_azf3328_mixer_controls[] __devinitdata = {
+ AZF3328_MIXER_SWITCH("Master Playback Switch", IDX_MIXER_PLAY_MASTER, 15, 1),
+ AZF3328_MIXER_VOL_STEREO("Master Playback Volume", IDX_MIXER_PLAY_MASTER, 0x1f, 1),
+ AZF3328_MIXER_SWITCH("Wave Playback Switch", IDX_MIXER_WAVEOUT, 15, 1),
+ AZF3328_MIXER_VOL_STEREO("Wave Playback Volume", IDX_MIXER_WAVEOUT, 0x1f, 1),
+ AZF3328_MIXER_SWITCH("Wave Playback 3D Bypass", IDX_MIXER_ADVCTL2, 7, 1),
+ AZF3328_MIXER_SWITCH("FM Playback Switch", IDX_MIXER_FMSYNTH, 15, 1),
+ AZF3328_MIXER_VOL_STEREO("FM Playback Volume", IDX_MIXER_FMSYNTH, 0x1f, 1),
+ AZF3328_MIXER_SWITCH("CD Playback Switch", IDX_MIXER_CDAUDIO, 15, 1),
+ AZF3328_MIXER_VOL_STEREO("CD Playback Volume", IDX_MIXER_CDAUDIO, 0x1f, 1),
+ AZF3328_MIXER_SWITCH("Capture Switch", IDX_MIXER_REC_VOLUME, 15, 1),
+ AZF3328_MIXER_VOL_STEREO("Capture Volume", IDX_MIXER_REC_VOLUME, 0x0f, 0),
+ AZF3328_MIXER_ENUM("Capture Source", IDX_MIXER_REC_SELECT, 8, 0),
+ AZF3328_MIXER_SWITCH("Mic Playback Switch", IDX_MIXER_MIC, 15, 1),
+ AZF3328_MIXER_VOL_MONO("Mic Playback Volume", IDX_MIXER_MIC, 0x1f, 1),
+ AZF3328_MIXER_SWITCH("Mic Boost (+20dB)", IDX_MIXER_MIC, 6, 0),
+ AZF3328_MIXER_SWITCH("Line Playback Switch", IDX_MIXER_LINEIN, 15, 1),
+ AZF3328_MIXER_VOL_STEREO("Line Playback Volume", IDX_MIXER_LINEIN, 0x1f, 1),
+ AZF3328_MIXER_SWITCH("PCBeep Playback Switch", IDX_MIXER_PCBEEP, 15, 1),
+ AZF3328_MIXER_VOL_SPECIAL("PCBeep Playback Volume", IDX_MIXER_PCBEEP, 0x0f, 1, 1),
+ AZF3328_MIXER_SWITCH("Video Playback Switch", IDX_MIXER_VIDEO, 15, 1),
+ AZF3328_MIXER_VOL_STEREO("Video Playback Volume", IDX_MIXER_VIDEO, 0x1f, 1),
+ AZF3328_MIXER_SWITCH("Aux Playback Switch", IDX_MIXER_AUX, 15, 1),
+ AZF3328_MIXER_VOL_STEREO("Aux Playback Volume", IDX_MIXER_AUX, 0x1f, 1),
+ AZF3328_MIXER_SWITCH("Modem Playback Switch", IDX_MIXER_MODEMOUT, 15, 1),
+ AZF3328_MIXER_VOL_MONO("Modem Playback Volume", IDX_MIXER_MODEMOUT, 0x1f, 1),
+ AZF3328_MIXER_SWITCH("Modem Capture Switch", IDX_MIXER_MODEMIN, 15, 1),
+ AZF3328_MIXER_VOL_MONO("Modem Capture Volume", IDX_MIXER_MODEMIN, 0x1f, 1),
+ AZF3328_MIXER_ENUM("Modem Out Select", IDX_MIXER_ADVCTL2, 2, 8),
+ AZF3328_MIXER_ENUM("Mono Select Source", IDX_MIXER_ADVCTL2, 2, 9),
+ AZF3328_MIXER_VOL_SPECIAL("Tone Control - Treble", IDX_MIXER_BASSTREBLE, 0x07, 1, 0),
+ AZF3328_MIXER_VOL_SPECIAL("Tone Control - Bass", IDX_MIXER_BASSTREBLE, 0x07, 9, 0),
+ AZF3328_MIXER_SWITCH("3D Control - Toggle", IDX_MIXER_ADVCTL2, 13, 0),
+ AZF3328_MIXER_VOL_SPECIAL("3D Control - Volume", IDX_MIXER_ADVCTL1, 0x07, 1, 0), /* "3D Width" */
+ AZF3328_MIXER_VOL_SPECIAL("3D Control - Space", IDX_MIXER_ADVCTL1, 0x03, 8, 0), /* "Hifi 3D" */
+#if MIXER_TESTING
+ AZF3328_MIXER_SWITCH("0", IDX_MIXER_ADVCTL2, 0, 0),
+ AZF3328_MIXER_SWITCH("1", IDX_MIXER_ADVCTL2, 1, 0),
+ AZF3328_MIXER_SWITCH("2", IDX_MIXER_ADVCTL2, 2, 0),
+ AZF3328_MIXER_SWITCH("3", IDX_MIXER_ADVCTL2, 3, 0),
+ AZF3328_MIXER_SWITCH("4", IDX_MIXER_ADVCTL2, 4, 0),
+ AZF3328_MIXER_SWITCH("5", IDX_MIXER_ADVCTL2, 5, 0),
+ AZF3328_MIXER_SWITCH("6", IDX_MIXER_ADVCTL2, 6, 0),
+ AZF3328_MIXER_SWITCH("7", IDX_MIXER_ADVCTL2, 7, 0),
+ AZF3328_MIXER_SWITCH("8", IDX_MIXER_ADVCTL2, 8, 0),
+ AZF3328_MIXER_SWITCH("9", IDX_MIXER_ADVCTL2, 9, 0),
+ AZF3328_MIXER_SWITCH("10", IDX_MIXER_ADVCTL2, 10, 0),
+ AZF3328_MIXER_SWITCH("11", IDX_MIXER_ADVCTL2, 11, 0),
+ AZF3328_MIXER_SWITCH("12", IDX_MIXER_ADVCTL2, 12, 0),
+ AZF3328_MIXER_SWITCH("13", IDX_MIXER_ADVCTL2, 13, 0),
+ AZF3328_MIXER_SWITCH("14", IDX_MIXER_ADVCTL2, 14, 0),
+ AZF3328_MIXER_SWITCH("15", IDX_MIXER_ADVCTL2, 15, 0),
+#endif
+};
+
+#define AZF3328_INIT_VALUES (sizeof(snd_azf3328_init_values)/sizeof(unsigned int)/2)
+
+static unsigned int snd_azf3328_init_values[][2] = {
+ { IDX_MIXER_PLAY_MASTER, MIXER_MUTE_MASK|0x1f1f },
+ { IDX_MIXER_MODEMOUT, MIXER_MUTE_MASK|0x1f1f },
+ { IDX_MIXER_BASSTREBLE, 0x0000 },
+ { IDX_MIXER_PCBEEP, MIXER_MUTE_MASK|0x1f1f },
+ { IDX_MIXER_MODEMIN, MIXER_MUTE_MASK|0x1f1f },
+ { IDX_MIXER_MIC, MIXER_MUTE_MASK|0x001f },
+ { IDX_MIXER_LINEIN, MIXER_MUTE_MASK|0x1f1f },
+ { IDX_MIXER_CDAUDIO, MIXER_MUTE_MASK|0x1f1f },
+ { IDX_MIXER_VIDEO, MIXER_MUTE_MASK|0x1f1f },
+ { IDX_MIXER_AUX, MIXER_MUTE_MASK|0x1f1f },
+ { IDX_MIXER_WAVEOUT, MIXER_MUTE_MASK|0x1f1f },
+ { IDX_MIXER_FMSYNTH, MIXER_MUTE_MASK|0x1f1f },
+ { IDX_MIXER_REC_VOLUME, MIXER_MUTE_MASK|0x0707 },
+};
+
+static int __devinit snd_azf3328_mixer_new(azf3328_t *chip)
+{
+ snd_card_t *card;
+ snd_kcontrol_new_t *sw;
+ unsigned int idx;
+ int err;
+
+ snd_azf3328_dbgcallenter();
+ snd_assert(chip != NULL && chip->card != NULL, return -EINVAL);
+
+ card = chip->card;
+
+ /* mixer reset */
+ snd_azf3328_mixer_write(chip, IDX_MIXER_RESET, 0x0, WORD_VALUE);
+
+ /* mute and zero volume channels */
+ for (idx = 0; idx < AZF3328_INIT_VALUES; idx++) {
+ snd_azf3328_mixer_write(chip, snd_azf3328_init_values[idx][0], snd_azf3328_init_values[idx][1], WORD_VALUE);
+ }
+
+ /* add mixer controls */
+ sw = snd_azf3328_mixer_controls;
+ for (idx = 0; idx < NUM_CONTROLS(snd_azf3328_mixer_controls); idx++, sw++) {
+ if ((err = snd_ctl_add(chip->card, snd_ctl_new1(sw, chip))) < 0)
+ return err;
+ }
+ snd_component_add(card, "AZF3328 mixer");
+ strcpy(card->mixername, "AZF3328 mixer");
+
+ snd_azf3328_dbgcallleave();
+ return 0;
+}
+
+static int snd_azf3328_hw_params(snd_pcm_substream_t * substream,
+ snd_pcm_hw_params_t * hw_params)
+{
+ int res;
+ snd_azf3328_dbgcallenter();
+ res = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
+ snd_azf3328_dbgcallleave();
+ return res;
+}
+
+static int snd_azf3328_hw_free(snd_pcm_substream_t * substream)
+{
+ snd_azf3328_dbgcallenter();
+ snd_pcm_lib_free_pages(substream);
+ snd_azf3328_dbgcallleave();
+ return 0;
+}
+
+static void snd_azf3328_setfmt(azf3328_t *chip,
+ unsigned int reg,
+ unsigned int bitrate,
+ unsigned int format_width,
+ unsigned int channels
+)
+{
+ unsigned int val = 0xff00;
+ unsigned long flags;
+
+ snd_azf3328_dbgcallenter();
+ switch (bitrate) {
+ case 5512: val |= 0x0d; break; /* the AZF3328 names it "5510" for some strange reason */
+ case 6620: val |= 0x0b; break;
+ case 8000: val |= 0x00; break;
+ case 9600: val |= 0x08; break;
+ case 11025: val |= 0x01; break;
+ case 16000: val |= 0x02; break;
+ case 22050: val |= 0x03; break;
+ case 32000: val |= 0x04; break;
+ case 44100: val |= 0x05; break;
+ case 48000: val |= 0x06; break;
+ case 64000: val |= 0x07; break;
+ default:
+ snd_printk("unknown bitrate %d, assuming 44.1kHz!\n", bitrate);
+ val |= 0x05; /* 44100 */
+ break;
+ }
+ /* val = 0xff07; 3m27.993s (65301Hz; -> 64000Hz???) */
+ /* val = 0xff09; 17m15.098s (13123,478Hz; -> 12000Hz???) */
+ /* val = 0xff0a; 47m30.599s (4764,891Hz; -> 4800Hz???) */
+ /* val = 0xff0c; 57m0.510s (4010,263Hz; -> 4000Hz???) */
+ /* val = 0xff05; 5m11.556s (... -> 44100Hz) */
+ /* val = 0xff03; 10m21.529s (21872,463Hz; -> 22050Hz???) */
+ /* val = 0xff0f; 20m41.883s (10937,993Hz; -> 11025Hz???) */
+ /* val = 0xff0d; 41m23.135s (5523,600Hz; -> 5512Hz???) */
+ /* val = 0xff0e; 28m30.777s (8017Hz; -> 8000Hz???) */
+ if (channels == 2)
+ val |= SOUNDFORMAT_FLAG_2CHANNELS;
+
+ if (format_width == 16)
+ val |= SOUNDFORMAT_FLAG_16BIT;
+
+ spin_lock_irqsave(&chip->reg_lock, flags);
+
+ /* set bitrate/format */
+ outw(val, chip->codec_port+reg);
+
+ /* changing the bitrate/format settings switches off the
+ * audio output with an annoying click in case of 8/16bit format change
+ * (maybe shutting down DAC/ADC?), thus immediately
+ * do some tweaking to reenable it and get rid of the clicking
+ * (FIXME: yes, it works, but what exactly am I doing here?? :)
+ * FIXME: does this have some side effects for full-duplex
+ * or other dramatic side effects? */
+ if (reg == IDX_IO_PLAY_SOUNDFORMAT) /* only do it for playback */
+ outw(inw(chip->codec_port + IDX_IO_PLAY_FLAGS)|DMA_PLAY_SOMETHING1|DMA_PLAY_SOMETHING2|SOMETHING_ALMOST_ALWAYS_SET|DMA_EPILOGUE_SOMETHING|DMA_SOMETHING_ELSE, chip->codec_port + IDX_IO_PLAY_FLAGS);
+
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
+ snd_azf3328_dbgcallleave();
+}
+
+static void snd_azf3328_setdmaa(azf3328_t *chip,
+ long unsigned int addr,
+ unsigned int count,
+ unsigned int size,
+ int do_recording)
+{
+ long unsigned int addr1;
+ long unsigned int addr2;
+ unsigned int count1;
+ unsigned int count2;
+ unsigned long flags;
+ int reg_offs = do_recording ? 0x20 : 0x00;
+
+ snd_azf3328_dbgcallenter();
+ /* AZF3328 uses a two buffer pointer DMA playback approach */
+ if (!chip->is_playing)
+ {
+ addr1 = addr;
+ addr2 = addr+(size/2);
+ count1 = (size/2)-1;
+ count2 = (size/2)-1;
+#if DEBUG_PLAY_REC
+ snd_azf3328_dbgplay("setting dma: buf1 %08lx[%d], buf2 %08lx[%d]\n", addr1, count1, addr2, count2);
+#endif
+ spin_lock_irqsave(&chip->reg_lock, flags);
+ outl(addr1, chip->codec_port+reg_offs+IDX_IO_PLAY_DMA_START_1);
+ outl(addr2, chip->codec_port+reg_offs+IDX_IO_PLAY_DMA_START_2);
+ outw(count1, chip->codec_port+reg_offs+IDX_IO_PLAY_DMA_LEN_1);
+ outw(count2, chip->codec_port+reg_offs+IDX_IO_PLAY_DMA_LEN_2);
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
+ }
+ snd_azf3328_dbgcallleave();
+}
+
+static int snd_azf3328_playback_prepare(snd_pcm_substream_t *substream)
+{
+#if 0
+ azf3328_t *chip = snd_pcm_substream_chip(substream);
+ snd_pcm_runtime_t *runtime = substream->runtime;
+ unsigned int size = snd_pcm_lib_buffer_bytes(substream);
+ unsigned int count = snd_pcm_lib_period_bytes(substream);
+#endif
+
+ snd_azf3328_dbgcallenter();
+#if 0
+ snd_azf3328_setfmt(chip, IDX_IO_PLAY_SOUNDFORMAT, runtime->rate, snd_pcm_format_width(runtime->format), runtime->channels);
+ snd_azf3328_setdmaa(chip, runtime->dma_addr, count, size, 0);
+#endif
+ snd_azf3328_dbgcallleave();
+ return 0;
+}
+
+static int snd_azf3328_capture_prepare(snd_pcm_substream_t * substream)
+{
+#if 0
+ azf3328_t *chip = snd_pcm_substream_chip(substream);
+ snd_pcm_runtime_t *runtime = substream->runtime;
+ unsigned int size = snd_pcm_lib_buffer_bytes(substream);
+ unsigned int count = snd_pcm_lib_period_bytes(substream);
+#endif
+
+ snd_azf3328_dbgcallenter();
+#if 0
+ snd_azf3328_setfmt(chip, IDX_IO_REC_SOUNDFORMAT, runtime->rate, snd_pcm_format_width(runtime->format), runtime->channels);
+ snd_azf3328_setdmaa(chip, runtime->dma_addr, count, size, 1);
+#endif
+ snd_azf3328_dbgcallleave();
+ return 0;
+}
+
+static int snd_azf3328_playback_trigger(snd_pcm_substream_t * substream, int cmd)
+{
+ unsigned long flags;
+ azf3328_t *chip = snd_pcm_substream_chip(substream);
+ snd_pcm_runtime_t *runtime = substream->runtime;
+ int result = 0;
+ unsigned int status1;
+
+ snd_azf3328_dbgcalls("snd_azf3328_playback_trigger cmd %d\n", cmd);
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+
+ snd_azf3328_dbgio(chip, "trigger1");
+
+ /* mute WaveOut */
+ snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 1);
+
+ snd_azf3328_setfmt(chip, IDX_IO_PLAY_SOUNDFORMAT, runtime->rate, snd_pcm_format_width(runtime->format), runtime->channels);
+
+ spin_lock_irqsave(&chip->reg_lock, flags);
+ /* stop playback */
+ status1 = inw(chip->codec_port+IDX_IO_PLAY_FLAGS);
+ status1 &= ~DMA_RESUME;
+ outw(status1, chip->codec_port+IDX_IO_PLAY_FLAGS);
+
+ /* FIXME: clear interrupts or what??? */
+ outw(0xffff, chip->codec_port+IDX_IO_PLAY_IRQMASK);
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
+
+ snd_azf3328_setdmaa(chip, runtime->dma_addr, snd_pcm_lib_period_bytes(substream), snd_pcm_lib_buffer_bytes(substream), 0);
+
+ spin_lock_irqsave(&chip->reg_lock, flags);
+#if WIN9X
+ /* FIXME: enable playback/recording??? */
+ status1 |= DMA_PLAY_SOMETHING1 | DMA_PLAY_SOMETHING2;
+ outw(status1, chip->codec_port+IDX_IO_PLAY_FLAGS);
+
+ /* start playback again */
+ /* FIXME: what is this value (0x0010)??? */
+ status1 |= DMA_RESUME | DMA_EPILOGUE_SOMETHING;
+ outw(status1, chip->codec_port+IDX_IO_PLAY_FLAGS);
+#else /* NT4 */
+ outw(0x00, chip->codec_port+IDX_IO_PLAY_FLAGS);
+ outw(DMA_PLAY_SOMETHING1, chip->codec_port+IDX_IO_PLAY_FLAGS);
+ outw(DMA_PLAY_SOMETHING1|DMA_PLAY_SOMETHING2, chip->codec_port+IDX_IO_PLAY_FLAGS);
+ outw(DMA_RESUME|SOMETHING_ALMOST_ALWAYS_SET|DMA_EPILOGUE_SOMETHING|DMA_SOMETHING_ELSE, chip->codec_port+IDX_IO_PLAY_FLAGS);
+#endif
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
+
+ /* now unmute WaveOut */
+ snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 0);
+
+ snd_azf3328_dbgio(chip, "trigger2");
+ chip->is_playing = 1;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ /* mute WaveOut */
+ snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 1);
+
+ spin_lock_irqsave(&chip->reg_lock, flags);
+ /* stop playback */
+ status1 = inw(chip->codec_port+IDX_IO_PLAY_FLAGS);
+
+ status1 &= ~DMA_RESUME;
+ outw(status1, chip->codec_port+IDX_IO_PLAY_FLAGS);
+
+ status1 |= DMA_PLAY_SOMETHING1;
+ outw(status1, chip->codec_port+IDX_IO_PLAY_FLAGS);
+
+ status1 &= ~DMA_PLAY_SOMETHING1;
+ outw(status1, chip->codec_port+IDX_IO_PLAY_FLAGS);
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
+
+ /* now unmute WaveOut */
+ snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 0);
+ chip->is_playing = 0;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ snd_printk("FIXME: SNDRV_PCM_TRIGGER_PAUSE_PUSH NIY!\n");
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ snd_printk("FIXME: SNDRV_PCM_TRIGGER_PAUSE_RELEASE NIY!\n");
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_azf3328_dbgcallleave();
+ return result;
+}
+
+/* this is just analogous to playback; I'm not quite sure whether recording
+ * should actually be triggered like that */
+static int snd_azf3328_capture_trigger(snd_pcm_substream_t * substream, int cmd)
+{
+ unsigned long flags;
+ azf3328_t *chip = snd_pcm_substream_chip(substream);
+ snd_pcm_runtime_t *runtime = substream->runtime;
+ int result = 0;
+ unsigned int status1;
+
+ snd_azf3328_dbgcalls("snd_azf3328_capture_trigger cmd %d\n", cmd);
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+
+ snd_azf3328_dbgio(chip, "trigger1");
+
+ snd_azf3328_setfmt(chip, IDX_IO_REC_SOUNDFORMAT, runtime->rate, snd_pcm_format_width(runtime->format), runtime->channels);
+
+ spin_lock_irqsave(&chip->reg_lock, flags);
+ /* stop recording */
+ status1 = inw(chip->codec_port+IDX_IO_REC_FLAGS);
+ status1 &= ~DMA_RESUME;
+ outw(status1, chip->codec_port+IDX_IO_REC_FLAGS);
+
+ /* FIXME: clear interrupts or what??? */
+ outw(0xffff, chip->codec_port+IDX_IO_REC_IRQMASK);
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
+
+ snd_azf3328_setdmaa(chip, runtime->dma_addr, snd_pcm_lib_period_bytes(substream), snd_pcm_lib_buffer_bytes(substream), 1);
+
+ spin_lock_irqsave(&chip->reg_lock, flags);
+#if WIN9X
+ /* FIXME: enable playback/recording??? */
+ status1 |= DMA_PLAY_SOMETHING1 | DMA_PLAY_SOMETHING2;
+ outw(status1, chip->codec_port+IDX_IO_REC_FLAGS);
+
+ /* start playback again */
+ /* FIXME: what is this value (0x0010)??? */
+ status1 |= DMA_RESUME | DMA_EPILOGUE_SOMETHING;
+ outw(status1, chip->codec_port+IDX_IO_REC_FLAGS);
+#else
+ outw(0x00, chip->codec_port+IDX_IO_REC_FLAGS);
+ outw(DMA_PLAY_SOMETHING1, chip->codec_port+IDX_IO_REC_FLAGS);
+ outw(DMA_PLAY_SOMETHING1|DMA_PLAY_SOMETHING2, chip->codec_port+IDX_IO_REC_FLAGS);
+ outw(DMA_RESUME|SOMETHING_ALMOST_ALWAYS_SET|DMA_EPILOGUE_SOMETHING|DMA_SOMETHING_ELSE, chip->codec_port+IDX_IO_REC_FLAGS);
+#endif
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
+
+ snd_azf3328_dbgio(chip, "trigger2");
+ chip->is_playing = 1;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ spin_lock_irqsave(&chip->reg_lock, flags);
+ /* stop recording */
+ status1 = inw(chip->codec_port+IDX_IO_REC_FLAGS);
+
+ status1 &= ~DMA_RESUME;
+ outw(status1, chip->codec_port+IDX_IO_REC_FLAGS);
+
+ status1 |= DMA_PLAY_SOMETHING1;
+ outw(status1, chip->codec_port+IDX_IO_REC_FLAGS);
+
+ status1 &= ~DMA_PLAY_SOMETHING1;
+ outw(status1, chip->codec_port+IDX_IO_REC_FLAGS);
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
+
+ chip->is_playing = 0;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ snd_printk("FIXME: SNDRV_PCM_TRIGGER_PAUSE_PUSH NIY!\n");
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ snd_printk("FIXME: SNDRV_PCM_TRIGGER_PAUSE_RELEASE NIY!\n");
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_azf3328_dbgcallleave();
+ return result;
+}
+
+static snd_pcm_uframes_t snd_azf3328_playback_pointer(snd_pcm_substream_t * substream)
+{
+ azf3328_t *chip = snd_pcm_substream_chip(substream);
+ unsigned long bufptr, playptr;
+ unsigned long result;
+ snd_pcm_uframes_t frmres;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->reg_lock, flags);
+#if QUERY_HARDWARE
+ bufptr = inl(chip->codec_port+IDX_IO_PLAY_DMA_START_1);
+#else
+ bufptr = substream->runtime->dma_addr;
+#endif
+ playptr = inl(chip->codec_port+IDX_IO_PLAY_DMA_CURRPOS);
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
+
+ result = playptr - bufptr;
+ frmres = bytes_to_frames( substream->runtime, result );
+ snd_azf3328_dbgplay("result %lx, playptr %lx (base %x), frames %ld\n", result, playptr, substream->runtime->dma_addr, frmres);
+ return frmres;
+}
+
+static snd_pcm_uframes_t snd_azf3328_capture_pointer(snd_pcm_substream_t * substream)
+{
+ azf3328_t *chip = snd_pcm_substream_chip(substream);
+ unsigned long bufptr, recptr;
+ unsigned long result;
+ snd_pcm_uframes_t frmres;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->reg_lock, flags);
+#if QUERY_HARDWARE
+ bufptr = inl(chip->codec_port+IDX_IO_REC_DMA_START_1);
+#else
+ bufptr = substream->runtime->dma_addr;
+#endif
+ recptr = inl(chip->codec_port+IDX_IO_REC_DMA_CURRPOS);
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
+
+ result = recptr - bufptr;
+ frmres = bytes_to_frames( substream->runtime, result );
+ snd_azf3328_dbgplay("result %lx, rec ptr %lx (base %x), frames %ld\n", result, recptr, substream->runtime->dma_addr, frmres);
+ return frmres;
+}
+
+static irqreturn_t snd_azf3328_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ azf3328_t *chip = snd_magic_cast(azf3328_t, dev_id, return IRQ_NONE);
+ unsigned int status, which;
+ static unsigned long count = 0;
+
+ status = inw(chip->codec_port+IDX_IO_IRQSTATUS);
+
+ /* fast path out, to ease interrupt sharing */
+ if (!(status & (IRQ_PLAYBACK|IRQ_RECORDING|IRQ_MPU401|IRQ_SOMEIRQ)))
+ return IRQ_NONE; /* must be interrupt for another device */
+
+ snd_azf3328_dbgplay("Interrupt %ld!\nIDX_IO_PLAY_FLAGS %04x, IDX_IO_PLAY_IRQMASK %04x, IDX_IO_IRQSTATUS %04x\n", count, inw(chip->codec_port+IDX_IO_PLAY_FLAGS), inw(chip->codec_port+IDX_IO_PLAY_IRQMASK), inw(chip->codec_port+IDX_IO_IRQSTATUS));
+
+ if (status & IRQ_PLAYBACK)
+ {
+ spin_lock(&chip->reg_lock);
+ which = inw(chip->codec_port+IDX_IO_PLAY_IRQMASK);
+ if (which & IRQ_FINISHED_PLAYBUF_1)
+ /* ack IRQ */
+ outw(which | IRQ_FINISHED_PLAYBUF_1, chip->codec_port+IDX_IO_PLAY_IRQMASK);
+ if (which & IRQ_FINISHED_PLAYBUF_2)
+ /* ack IRQ */
+ outw(which | IRQ_FINISHED_PLAYBUF_2, chip->codec_port+IDX_IO_PLAY_IRQMASK);
+ if (which & IRQ_PLAY_SOMETHING)
+ {
+ snd_azf3328_dbgplay("azt3328: unknown play IRQ type occurred, please report!\n");
+ }
+ if (chip->pcm && chip->playback_substream)
+ {
+ snd_azf3328_dbgplay("which %x, playptr %lx\n", which, inl(chip->codec_port+IDX_IO_PLAY_DMA_CURRPOS));
+ snd_pcm_period_elapsed(chip->playback_substream);
+ snd_azf3328_dbgplay("period done, playptr %lx.\n", inl(chip->codec_port+IDX_IO_PLAY_DMA_CURRPOS));
+ }
+ else
+ snd_azf3328_dbgplay("azt3328: ouch, irq handler problem!\n");
+ spin_unlock(&chip->reg_lock);
+ }
+ if (status & IRQ_RECORDING)
+ {
+ spin_lock(&chip->reg_lock);
+ which = inw(chip->codec_port+IDX_IO_REC_IRQMASK);
+ if (which & IRQ_FINISHED_RECBUF_1)
+ /* ack interrupt */
+ outw(which | IRQ_FINISHED_RECBUF_1, chip->codec_port+IDX_IO_REC_IRQMASK);
+ if (which & IRQ_FINISHED_RECBUF_2)
+ /* ack interrupt */
+ outw(which | IRQ_FINISHED_RECBUF_2, chip->codec_port+IDX_IO_REC_IRQMASK);
+ if (which & IRQ_REC_SOMETHING)
+ {
+ snd_azf3328_dbgplay("azt3328: unknown rec IRQ type occurred, please report!\n");
+ }
+ if (chip->pcm && chip->capture_substream)
+ {
+ snd_azf3328_dbgplay("which %x, recptr %lx\n", which, inl(chip->codec_port+IDX_IO_REC_DMA_CURRPOS));
+ spin_unlock(&chip->reg_lock);
+ snd_pcm_period_elapsed(chip->capture_substream);
+ spin_lock(&chip->reg_lock);
+ snd_azf3328_dbgplay("period done, recptr %lx.\n", inl(chip->codec_port+IDX_IO_REC_DMA_CURRPOS));
+ }
+ spin_unlock(&chip->reg_lock);
+ }
+ if (status & IRQ_MPU401)
+ snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data, regs);
+ if (status & IRQ_SOMEIRQ)
+ snd_azf3328_dbgplay("azt3328: unknown IRQ type occurred, please report!\n");
+ count++;
+ return IRQ_HANDLED;
+}
+
+/*****************************************************************/
+
+static snd_pcm_hardware_t snd_azf3328_playback =
+{
+ /* FIXME!! Correct? */
+ .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP_VALID),
+ .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 |
+ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE,
+ .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_KNOT,
+ .rate_min = 5512,
+ .rate_max = 64000,
+ .channels_min = 1,
+ .channels_max = 2,
+ .buffer_bytes_max = 65536,
+ .period_bytes_min = 64,
+ .period_bytes_max = 65536,
+ .periods_min = 1,
+ .periods_max = 1024,
+ /* FIXME: maybe that card actually has a FIFO?
+ * Hmm, it seems newer revisions do have one, but we still don't know
+ * its size... */
+ .fifo_size = 0,
+};
+
+static snd_pcm_hardware_t snd_azf3328_capture =
+{
+ /* FIXME */
+ .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP_VALID),
+ .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 |
+ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE,
+ .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_KNOT,
+ .rate_min = 5512,
+ .rate_max = 64000,
+ .channels_min = 1,
+ .channels_max = 2,
+ .buffer_bytes_max = 65536,
+ .period_bytes_min = 64,
+ .period_bytes_max = 65536,
+ .periods_min = 1,
+ .periods_max = 1024,
+ .fifo_size = 0,
+};
+
+
+static unsigned int snd_azf3328_fixed_rates[] = {
+ 5512, 6620, 8000, 9600, 11025, 16000, 22050, 32000, 44100, 48000, 64000
+};
+static snd_pcm_hw_constraint_list_t snd_azf3328_hw_constraints_rates = {
+ .count = ARRAY_SIZE(snd_azf3328_fixed_rates),
+ .list = snd_azf3328_fixed_rates,
+ .mask = 0,
+};
+
+/*****************************************************************/
+
+static int snd_azf3328_playback_open(snd_pcm_substream_t * substream)
+{
+ azf3328_t *chip = snd_pcm_substream_chip(substream);
+ snd_pcm_runtime_t *runtime = substream->runtime;
+
+ snd_azf3328_dbgcallenter();
+ chip->playback_substream = substream;
+ runtime->hw = snd_azf3328_playback;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &snd_azf3328_hw_constraints_rates);
+ snd_azf3328_dbgcallleave();
+ return 0;
+}
+
+static int snd_azf3328_capture_open(snd_pcm_substream_t * substream)
+{
+ azf3328_t *chip = snd_pcm_substream_chip(substream);
+ snd_pcm_runtime_t *runtime = substream->runtime;
+
+ snd_azf3328_dbgcallenter();
+ chip->capture_substream = substream;
+ runtime->hw = snd_azf3328_capture;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &snd_azf3328_hw_constraints_rates);
+ snd_azf3328_dbgcallleave();
+ return 0;
+}
+
+static int snd_azf3328_playback_close(snd_pcm_substream_t * substream)
+{
+ azf3328_t *chip = snd_pcm_substream_chip(substream);
+
+ snd_azf3328_dbgcallenter();
+
+ chip->playback_substream = NULL;
+ snd_azf3328_dbgcallleave();
+ return 0;
+}
+
+static int snd_azf3328_capture_close(snd_pcm_substream_t * substream)
+{
+ azf3328_t *chip = snd_pcm_substream_chip(substream);
+
+ snd_azf3328_dbgcallenter();
+ chip->capture_substream = NULL;
+ snd_azf3328_dbgcallleave();
+ return 0;
+}
+
+/******************************************************************/
+
+static snd_pcm_ops_t snd_azf3328_playback_ops = {
+ .open = snd_azf3328_playback_open,
+ .close = snd_azf3328_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_azf3328_hw_params,
+ .hw_free = snd_azf3328_hw_free,
+ .prepare = snd_azf3328_playback_prepare,
+ .trigger = snd_azf3328_playback_trigger,
+ .pointer = snd_azf3328_playback_pointer
+};
+
+static snd_pcm_ops_t snd_azf3328_capture_ops = {
+ .open = snd_azf3328_capture_open,
+ .close = snd_azf3328_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_azf3328_hw_params,
+ .hw_free = snd_azf3328_hw_free,
+ .prepare = snd_azf3328_capture_prepare,
+ .trigger = snd_azf3328_capture_trigger,
+ .pointer = snd_azf3328_capture_pointer
+};
+
+static void snd_azf3328_pcm_free(snd_pcm_t *pcm)
+{
+ azf3328_t *chip = snd_magic_cast(azf3328_t, pcm->private_data, return);
+ chip->pcm = NULL;
+ snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+static int __devinit snd_azf3328_pcm(azf3328_t *chip, int device)
+{
+ snd_pcm_t *pcm;
+ int err;
+
+ snd_azf3328_dbgcallenter();
+ if ((err = snd_pcm_new(chip->card, "AZF3328 DSP", device, 1, 1, &pcm)) < 0)
+ return err;
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_azf3328_playback_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_azf3328_capture_ops);
+
+ pcm->private_data = chip;
+ pcm->private_free = snd_azf3328_pcm_free;
+ pcm->info_flags = 0;
+ strcpy(pcm->name, chip->card->shortname);
+ chip->pcm = pcm;
+
+ snd_pcm_lib_preallocate_pci_pages_for_all(chip->pci, pcm, 64*1024, 64*1024);
+
+ snd_azf3328_dbgcallleave();
+ return 0;
+}
+
+/******************************************************************/
+
+static int snd_azf3328_free(azf3328_t *chip)
+{
+ if (chip->irq < 0)
+ goto __end_hw;
+
+ /* interrupt setup - mask everything */
+ /* FIXME */
+
+ synchronize_irq(chip->irq);
+ __end_hw:
+ if (chip->res_codec_port) {
+ release_resource(chip->res_codec_port);
+ kfree_nocheck(chip->res_codec_port);
+ }
+ if (chip->res_io2_port) {
+ release_resource(chip->res_io2_port);
+ kfree_nocheck(chip->res_io2_port);
+ }
+ if (chip->res_mpu_port) {
+ release_resource(chip->res_mpu_port);
+ kfree_nocheck(chip->res_mpu_port);
+ }
+ if (chip->res_synth_port) {
+ release_resource(chip->res_synth_port);
+ kfree_nocheck(chip->res_synth_port);
+ }
+ if (chip->res_mixer_port) {
+ release_resource(chip->res_mixer_port);
+ kfree_nocheck(chip->res_mixer_port);
+ }
+ if (chip->irq >= 0)
+ free_irq(chip->irq, (void *)chip);
+
+ snd_magic_kfree(chip);
+ return 0;
+}
+
+static int snd_azf3328_dev_free(snd_device_t *device)
+{
+ azf3328_t *chip = snd_magic_cast(azf3328_t, device->device_data, return -ENXIO);
+ return snd_azf3328_free(chip);
+}
+
+#if 0
+/* check whether a bit can be modified */
+static void snd_azf3328_test_bit(unsigned int reg, int bit)
+{
+ unsigned char val, valoff, valon;
+
+ val = inb(reg);
+
+ outb(val & ~(1 << bit), reg);
+ valoff = inb(reg);
+
+ outb(val|(1 << bit), reg);
+ valon = inb(reg);
+
+ outb(val, reg);
+
+ printk(KERN_ERR "reg %04x bit %d: %02x %02x %02x\n", reg, bit, val, valoff, valon);
+}
+#endif
+
+static int __devinit snd_azf3328_create(snd_card_t * card,
+ struct pci_dev *pci,
+ unsigned long device_type,
+ azf3328_t ** rchip)
+{
+ unsigned long flags;
+ azf3328_t *chip;
+ int err;
+ static snd_device_ops_t ops = {
+ .dev_free = snd_azf3328_dev_free,
+ };
+ u16 tmp;
+
+ *rchip = NULL;
+
+ if ((err = pci_enable_device(pci)) < 0)
+ return err;
+
+ chip = snd_magic_kcalloc(azf3328_t, 0, GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+ spin_lock_init(&chip->reg_lock);
+ chip->card = card;
+ chip->pci = pci;
+ chip->irq = -1;
+
+ /* check if we can restrict PCI DMA transfers to 24 bits */
+ if (!pci_dma_supported(pci, 0x00ffffff)) {
+ snd_printk("architecture does not support 24bit PCI busmaster DMA\n");
+ return -ENXIO;
+ }
+ pci_set_dma_mask(pci, 0x00ffffff);
+
+ chip->codec_port = pci_resource_start(pci, 0);
+ if ((chip->res_codec_port = request_region(chip->codec_port, 0x80, "Aztech AZF3328 I/O")) == NULL) {
+ snd_azf3328_free(chip);
+ snd_printk("unable to grab I/O port at 0x%lx-0x%lx\n", chip->codec_port, chip->codec_port + 0x80 - 1);
+ return -EBUSY;
+ }
+ chip->io2_port = pci_resource_start(pci, 1);
+ if ((chip->res_io2_port = request_region(chip->io2_port, 0x08, "Aztech AZF3328 I/O 2")) == NULL) {
+ snd_azf3328_free(chip);
+ snd_printk("unable to grab I/O 2 port at 0x%lx-0x%lx\n", chip->io2_port, chip->io2_port + 0x08 - 1);
+ return -EBUSY;
+ }
+ chip->mpu_port = pci_resource_start(pci, 2);
+ if ((chip->res_mpu_port = request_region(chip->mpu_port, 0x04, "Aztech AZF3328 MPU401")) == NULL) {
+ snd_azf3328_free(chip);
+ snd_printk("unable to grab MPU401 port at 0x%lx-0x%lx\n", chip->mpu_port, chip->mpu_port + 0x04 - 1);
+ return -EBUSY;
+ }
+ chip->synth_port = pci_resource_start(pci, 3);
+ if ((chip->res_synth_port = request_region(chip->synth_port, 0x08, "Aztech AZF3328 OPL3")) == NULL) {
+ snd_azf3328_free(chip);
+ snd_printk("unable to grab OPL3 port at 0x%lx-0x%lx\n", chip->synth_port, chip->synth_port + 0x08 - 1);
+ return -EBUSY;
+ }
+ chip->mixer_port = pci_resource_start(pci, 4);
+ if ((chip->res_mixer_port = request_region(chip->mixer_port, 0x40, "Aztech AZF3328 Mixer")) == NULL) {
+ snd_azf3328_free(chip);
+ snd_printk("unable to grab mixer port at 0x%lx-0x%lx\n", chip->mixer_port, chip->mixer_port + 0x40 - 1);
+ return -EBUSY;
+ }
+
+ if (request_irq(pci->irq, snd_azf3328_interrupt, SA_INTERRUPT|SA_SHIRQ, card->shortname, (void *)chip)) {
+ snd_azf3328_free(chip);
+ snd_printk("unable to grab IRQ %d\n", pci->irq);
+ return -EBUSY;
+ }
+ chip->irq = pci->irq;
+ pci_set_master(pci);
+ synchronize_irq(chip->irq);
+
+ snd_azf3328_dbgmisc("codec_port 0x%lx, io2_port 0x%lx, mpu_port 0x%lx, synth_port 0x%lx, mixer_port 0x%lx, irq %d\n", chip->codec_port, chip->io2_port, chip->mpu_port, chip->synth_port, chip->mixer_port, chip->irq);
+
+ snd_azf3328_dbgmisc("io2 %02x %02x %02x %02x %02x %02x\n", snd_azf3328_io2_read(chip, 0), snd_azf3328_io2_read(chip, 1), snd_azf3328_io2_read(chip, 2), snd_azf3328_io2_read(chip, 3), snd_azf3328_io2_read(chip, 4), snd_azf3328_io2_read(chip, 5));
+
+ for (tmp=0; tmp <= 0x01; tmp += 1)
+ snd_azf3328_dbgmisc("0x%02x: opl 0x%04x, mpu300 0x%04x, mpu310 0x%04x, mpu320 0x%04x, mpu330 0x%04x\n", tmp, inb(0x388 + tmp), inb(0x300 + tmp), inb(0x310 + tmp), inb(0x320 + tmp), inb(0x330 + tmp));
+
+ /* create mixer interface & switches */
+ if ((err = snd_azf3328_mixer_new(chip)) < 0)
+ return err;
+
+ if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ snd_azf3328_free(chip);
+ return err;
+ }
+
+#if 0
+ /* set very low bitrate to reduce noise and power consumption? */
+ snd_azf3328_setfmt(chip, IDX_IO_PLAY_SOUNDFORMAT, 5512, 8, 1);
+#endif
+
+ /* standard chip init stuff */
+ spin_lock_irqsave(&chip->reg_lock, flags);
+ outb(DMA_PLAY_SOMETHING2|DMA_EPILOGUE_SOMETHING|DMA_SOMETHING_ELSE, chip->codec_port + IDX_IO_PLAY_FLAGS);
+ outb(DMA_PLAY_SOMETHING2|DMA_EPILOGUE_SOMETHING|DMA_SOMETHING_ELSE, chip->codec_port + IDX_IO_SOMETHING_FLAGS);
+ outb(DMA_PLAY_SOMETHING2|DMA_EPILOGUE_SOMETHING|DMA_SOMETHING_ELSE, chip->codec_port + IDX_IO_REC_FLAGS);
+ outb(0x0, chip->codec_port + IDX_IO_IRQ63H);
+
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
+
+ *rchip = chip;
+ return 0;
+}
+
+static void __devinit snd_azf3328_config_joystick(azf3328_t *chip, int joystick)
+{
+ int i, detected = 0, activate = 0;
+ char *msg = NULL;
+ unsigned char val;
+
+ if (joystick == -1) /* auto detection/activation */
+ {
+ for (i=0x200; i <= 0x207; i++)
+ if (inb(i) != 0xff)
+ detected = 1; /* other joy found, don't activate */
+ }
+
+ if ((joystick == -1) && (detected == 1))
+ {
+ activate = 0;
+ msg = "DISABLED (address occupied by another joystick port)";
+ }
+ else
+ if ((joystick == -1) && (detected == 0))
+ {
+ activate = 1;
+ msg = "ENABLED (via autodetect)";
+ }
+ else
+ if (joystick == 0)
+ {
+ activate = 0;
+ msg = "DISABLED (forced)";
+ }
+ else
+ if (joystick == 1)
+ {
+ activate = 1;
+ msg = "ENABLED (Warning: forced!)";
+ }
+ val = inb(chip->io2_port + IDX_IO2_LEGACY_ADDR);
+ if (activate)
+ val |= LEGACY_JOY;
+ else
+ val &= ~LEGACY_JOY;
+
+ outb(val, chip->io2_port + IDX_IO2_LEGACY_ADDR);
+#ifdef MODULE
+ printk("azt3328: Joystick port: %s.\n", msg);
+#endif
+}
+
+static int __devinit snd_azf3328_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ static int dev;
+ snd_card_t *card;
+ azf3328_t *chip;
+ opl3_t *opl3;
+ int err;
+
+ snd_azf3328_dbgcallenter();
+ if (dev >= SNDRV_CARDS)
+ return -ENODEV;
+ if (!enable[dev]) {
+ dev++;
+ return -ENOENT;
+ }
+
+ card = snd_card_new(index[dev], id[dev], THIS_MODULE, 0 );
+ if (card == NULL)
+ return -ENOMEM;
+
+ strcpy(card->driver, "AZF3328");
+ strcpy(card->shortname, "Aztech AZF3328 (PCI168)");
+
+ if ((err = snd_azf3328_create(card, pci, pci_id->driver_data, &chip)) < 0) {
+ snd_card_free(card);
+ return err;
+ }
+
+ if ((err = snd_mpu401_uart_new( card, 0, MPU401_HW_MPU401,
+ chip->mpu_port, 1, pci->irq, 0,
+ &chip->rmidi)) < 0) {
+ snd_card_free(card);
+ snd_printk("azf3328: no MPU-401 device at 0x%lx?\n", chip->mpu_port);
+ return err;
+ }
+
+ if ((err = snd_azf3328_pcm(chip, 0)) < 0) {
+ snd_card_free(card);
+ return err;
+ }
+
+ if (snd_opl3_create(card, chip->synth_port, chip->synth_port+2,
+ OPL3_HW_AUTO, 1, &opl3) < 0) {
+ snd_printk("azf3328: no OPL3 device at 0x%lx-0x%lx?\n",
+ chip->synth_port, chip->synth_port+2 );
+ } else {
+ if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
+ snd_card_free(card);
+ return err;
+ }
+ }
+
+ snd_azf3328_dbgio(chip, "create");
+
+ sprintf(card->longname, "%s at 0x%lx, irq %i",
+ card->shortname, chip->codec_port, chip->irq);
+
+ if ((err = snd_card_register(card)) < 0) {
+ snd_card_free(card);
+ return err;
+ }
+
+#ifdef MODULE
+ printk(
+"azt3328: Experimental driver for Aztech AZF3328-based soundcards such as PCI168.\n"
+"azt3328: ZERO support from Aztech: you might think hard about future purchase.\n"
+"azt3328: Feel free to contact hw7oshyuv3001@sneakemail.com for bug reports etc.!\n");
+#endif
+
+ snd_azf3328_config_joystick(chip, joystick[dev]);
+
+ pci_set_drvdata(pci, chip);
+ dev++;
+
+ snd_azf3328_dbgcallleave();
+ return 0;
+}
+
+static void __devexit snd_azf3328_remove(struct pci_dev *pci)
+{
+ azf3328_t *chip = snd_magic_cast(azf3328_t, pci_get_drvdata(pci), return);
+
+ snd_azf3328_dbgcallenter();
+
+ /* reset (close) mixer */
+ snd_azf3328_mixer_set_mute(chip, IDX_MIXER_PLAY_MASTER, 1); /* first mute master volume */
+ snd_azf3328_mixer_write(chip, IDX_MIXER_RESET, 0x0, WORD_VALUE);
+
+ if (chip)
+ snd_card_free(chip->card);
+ pci_set_drvdata(pci, NULL);
+ snd_azf3328_dbgcallleave();
+}
+
+static struct pci_driver driver = {
+ .name = "AZF3328",
+ .id_table = snd_azf3328_ids,
+ .probe = snd_azf3328_probe,
+ .remove = __devexit_p(snd_azf3328_remove),
+};
+
+static int __init alsa_card_azf3328_init(void)
+{
+ int err;
+
+ snd_azf3328_dbgcallenter();
+
+ if ((err = pci_module_init(&driver)) < 0)
+ {
+#ifdef MODULE
+ printk(KERN_ERR "azt3328: no AZF3328 based soundcards found or device busy\n");
+#endif
+ return err;
+ }
+ snd_azf3328_dbgcallleave();
+ return 0;
+}
+
+static void __exit alsa_card_azf3328_exit(void)
+{
+ snd_azf3328_dbgcallenter();
+ pci_unregister_driver(&driver);
+ snd_azf3328_dbgcallleave();
+}
+
+module_init(alsa_card_azf3328_init)
+module_exit(alsa_card_azf3328_exit)
+
+#ifndef MODULE
+
+/* format is: snd-azf3328=enable,index,id */
+
+static int __init alsa_card_azf3328_setup(char *str)
+{
+ static unsigned __initdata nr_dev = 0;
+
+ snd_azf3328_dbgcallenter();
+
+ if (nr_dev >= SNDRV_CARDS)
+ return 0;
+ (void)(get_option(&str,&enable[nr_dev]) == 2 &&
+ get_option(&str,&index[nr_dev]) == 2 &&
+ get_id(&str,&id[nr_dev]) == 2);
+ nr_dev++;
+ snd_azf3328_dbgcallleave();
+ return 1;
+}
+
+__setup("snd-azf3328=", alsa_card_azf3328_setup);
+
+#endif /* ifndef MODULE */
diff -urN linux-2.5.70-bk11/sound/pci/azt3328.h linux-2.5.70-bk12/sound/pci/azt3328.h
--- linux-2.5.70-bk11/sound/pci/azt3328.h 1969-12-31 16:00:00.000000000 -0800
+++ linux-2.5.70-bk12/sound/pci/azt3328.h 2003-06-07 04:47:54.000000000 -0700
@@ -0,0 +1,165 @@
+#ifndef __SOUND_AZF3328_H
+#define __SOUND_AZF3328_H
+
+/* type argument to use for the I/O functions */
+#define WORD_VALUE 0x1000
+#define DWORD_VALUE 0x2000
+#define BYTE_VALUE 0x4000
+
+/*** main I/O area port indices ***/
+/* (only 0x70 of 0x80 bytes saved/restored by Windows driver) */
+/* the driver initialisation suggests a layout of 3 main areas:
+ * from 0x00 (playback), from 0x20 (recording) and from 0x40 (maybe DirectX
+ * timer ???). and probably another area from 0x60 to 0x6f
+ * (IRQ management, power management etc. ???). */
+/* playback area */
+#define IDX_IO_PLAY_FLAGS 0x00
+ /* able to reactivate output after output muting due to 8/16bit
+ * output change, just like 0x0002.
+ * 0x0001 is the only bit that's able to start the DMA counter */
+ #define DMA_RESUME 0x0001 /* paused if cleared ? */
+ /* 0x0002 *temporarily* set during DMA stopping. hmm
+ * both 0x0002 and 0x0004 set in playback setup. */
+ /* able to reactivate output after output muting due to 8/16bit
+ * output change, just like 0x0001. */
+ #define DMA_PLAY_SOMETHING1 0x0002 /* \ alternated (toggled) */
+ /* 0x0004: NOT able to reactivate output */
+ #define DMA_PLAY_SOMETHING2 0x0004 /* / bits */
+ #define SOMETHING_ALMOST_ALWAYS_SET 0x0008 /* ???; can be modified */
+ #define DMA_EPILOGUE_SOMETHING 0x0010
+ #define DMA_SOMETHING_ELSE 0x0020 /* ??? */
+ #define SOMETHING_UNMODIFIABLE 0xffc0 /* unused ? not modifiable */
+#define IDX_IO_PLAY_IRQMASK 0x02
+ /* write back to flags in case flags are set, in order to ACK IRQ in handler
+ * (bit 1 of port 0x64 indicates interrupt for one of these three types)
+ * sometimes in this case it just writes 0xffff to globally ACK all IRQs
+ * settings written are not reflected when reading back, though.
+ * seems to be IRQ, too (frequently used: port |= 0x07 !), but who knows ? */
+ #define IRQ_PLAY_SOMETHING 0x0001 /* something & ACK */
+ #define IRQ_FINISHED_PLAYBUF_1 0x0002 /* 1st dmabuf finished & ACK */
+ #define IRQ_FINISHED_PLAYBUF_2 0x0004 /* 2nd dmabuf finished & ACK */
+ #define IRQMASK_SOME_STATUS_1 0x0008 /* \ related bits */
+ #define IRQMASK_SOME_STATUS_2 0x0010 /* / (checked together in loop) */
+ #define IRQMASK_UNMODIFIABLE 0xffe0 /* unused ? not modifiable */
+#define IDX_IO_PLAY_DMA_START_1 0x04 /* start address of 1st DMA play area */
+#define IDX_IO_PLAY_DMA_START_2 0x08 /* start address of 2nd DMA play area */
+#define IDX_IO_PLAY_DMA_LEN_1 0x0c /* length of 1st DMA play area */
+#define IDX_IO_PLAY_DMA_LEN_2 0x0e /* length of 2nd DMA play area */
+#define IDX_IO_PLAY_DMA_CURRPOS 0x10 /* current DMA position */
+#define IDX_IO_PLAY_DMA_CURROFS 0x14 /* offset within current DMA play area */
+#define IDX_IO_PLAY_SOUNDFORMAT 0x16
+ /* all unspecified bits can't be modified */
+ #define SOUNDFORMAT_FREQUENCY_MASK 0x000f
+ /* all _SUSPECTED_ values are not used by Windows drivers, so we don't
+ * have any hard facts, only rough measurements */
+ #define SOUNDFORMAT_FREQ_SUSPECTED_4000 0x0c
+ #define SOUNDFORMAT_FREQ_SUSPECTED_4800 0x0a
+ #define SOUNDFORMAT_FREQ_5510 0x0d
+ #define SOUNDFORMAT_FREQ_6620 0x0b
+ #define SOUNDFORMAT_FREQ_8000 0x00 /* also 0x0e ? */
+ #define SOUNDFORMAT_FREQ_9600 0x08
+ #define SOUNDFORMAT_FREQ_SUSPECTED_12000 0x09
+ #define SOUNDFORMAT_FREQ_11025 0x01 /* also 0x0f ? */
+ #define SOUNDFORMAT_FREQ_16000 0x02
+ #define SOUNDFORMAT_FREQ_22050 0x03
+ #define SOUNDFORMAT_FREQ_32000 0x04
+ #define SOUNDFORMAT_FREQ_44100 0x05
+ #define SOUNDFORMAT_FREQ_48000 0x06
+ #define SOUNDFORMAT_FREQ_SUSPECTED_64000 0x07
+ #define SOUNDFORMAT_FLAG_16BIT 0x0010
+ #define SOUNDFORMAT_FLAG_2CHANNELS 0x0020
+/* recording area (see also: playback bit flag definitions) */
+#define IDX_IO_REC_FLAGS 0x20 /* ?? */
+#define IDX_IO_REC_IRQMASK 0x22 /* ?? */
+ #define IRQ_REC_SOMETHING 0x0001 /* something & ACK */
+ #define IRQ_FINISHED_RECBUF_1 0x0002 /* 1st dmabuf finished & ACK */
+ #define IRQ_FINISHED_RECBUF_2 0x0004 /* 2nd dmabuf finished & ACK */
+ /* hmm, maybe these are just the corresponding *recording* flags ?
+ * but OTOH they are most likely at port 0x22 instead */
+ #define IRQMASK_SOME_STATUS_1 0x0008 /* \ related bits */
+ #define IRQMASK_SOME_STATUS_2 0x0010 /* / (checked together in loop) */
+#define IDX_IO_REC_DMA_START_1 0x24
+#define IDX_IO_REC_DMA_START_2 0x28
+#define IDX_IO_REC_DMA_LEN_1 0x2c
+#define IDX_IO_REC_DMA_LEN_2 0x2e
+#define IDX_IO_REC_DMA_CURRPOS 0x30
+#define IDX_IO_REC_DMA_CURROFS 0x34
+#define IDX_IO_REC_SOUNDFORMAT 0x36
+/* some third area ? (after playback and recording) */
+#define IDX_IO_SOMETHING_FLAGS 0x40 /* gets set to 0x34 just like port 0x0 and 0x20 on card init */
+/* general */
+#define IDX_IO_60H 0x60 /* writing 0xffff returns 0xffff */
+#define IDX_IO_62H 0x62 /* writing to WORD 0x0062 can hang the box ! --> responsible for IRQ management as a whole ?? */
+#define IDX_IO_IRQ63H 0x63 /* FIXME !! */
+ #define IO_IRQ63H_SOMETHING 0x04 /* being set in IRQ handler in case port 0x00 had 0x0020 set upon IRQ handler */
+#define IDX_IO_IRQSTATUS 0x64
+ #define IRQ_PLAYBACK 0x0001
+ #define IRQ_RECORDING 0x0002
+ #define IRQ_MPU401 0x0010
+ #define IRQ_SOMEIRQ 0x0020 /* ???? */
+ #define IRQ_WHO_KNOWS_UNUSED 0x00e0 /* probably unused */
+#define IDX_IO_66H 0x66 /* writing 0xffff returns 0x0000 */
+#define IDX_IO_SOME_VALUE 0x68 /* this is always set to 0x3ff, and writable; maybe some buffer limit, but I couldn't find out more */
+#define IDX_IO_6AH 0x6A /* this WORD can be set to have bits 0x0028 activated; actually inhibits PCM playback !!! maybe power management ?? */
+#define IDX_IO_6CH 0x6C /* this WORD can have all its bits activated ? */
+#define IDX_IO_6EH 0x6E /* writing 0xffff returns 0x83fe */
+/* further I/O indices not saved/restored, so probably not used */
+
+/*** I/O 2 area port indices ***/
+/* (only 0x06 of 0x08 bytes saved/restored by Windows driver) */
+#define IDX_IO2_LEGACY_ADDR 0x04
+ #define LEGACY_SOMETHING 0x01 /* OPL3 ?? */
+ #define LEGACY_JOY 0x08
+
+/*** mixer I/O area port indices ***/
+/* (only 0x22 of 0x40 bytes saved/restored by Windows driver)
+ * generally spoken: AC97 register index = AZF3328 mixer reg index + 2
+ * (in other words: AZF3328 NOT fully AC97 compliant) */
+ #define MIXER_VOLUME_RIGHT_MASK 0x001f
+ #define MIXER_VOLUME_LEFT_MASK 0x1f00
+ #define MIXER_MUTE_MASK 0x8000
+#define IDX_MIXER_RESET 0x00 /* does NOT seem to have AC97 ID bits */
+#define IDX_MIXER_PLAY_MASTER 0x02
+#define IDX_MIXER_MODEMOUT 0x04
+#define IDX_MIXER_BASSTREBLE 0x06
+ #define MIXER_BASSTREBLE_TREBLE_VOLUME_MASK 0x000e
+ #define MIXER_BASSTREBLE_BASS_VOLUME_MASK 0x0e00
+#define IDX_MIXER_PCBEEP 0x08
+#define IDX_MIXER_MODEMIN 0x0a
+#define IDX_MIXER_MIC 0x0c
+ #define MIXER_MIC_MICGAIN_20DB_ENHANCEMENT_MASK 0x0040
+#define IDX_MIXER_LINEIN 0x0e
+#define IDX_MIXER_CDAUDIO 0x10
+#define IDX_MIXER_VIDEO 0x12
+#define IDX_MIXER_AUX 0x14
+#define IDX_MIXER_WAVEOUT 0x16
+#define IDX_MIXER_FMSYNTH 0x18
+#define IDX_MIXER_REC_SELECT 0x1a
+ #define MIXER_REC_SELECT_MIC 0x00
+ #define MIXER_REC_SELECT_CD 0x01
+ #define MIXER_REC_SELECT_VIDEO 0x02
+ #define MIXER_REC_SELECT_AUX 0x03
+ #define MIXER_REC_SELECT_LINEIN 0x04
+ #define MIXER_REC_SELECT_MIXSTEREO 0x05
+ #define MIXER_REC_SELECT_MIXMONO 0x06
+ #define MIXER_REC_SELECT_MONOIN 0x07
+#define IDX_MIXER_REC_VOLUME 0x1c
+#define IDX_MIXER_ADVCTL1 0x1e
+ /* unlisted bits are unmodifiable */
+ #define MIXER_ADVCTL1_3DWIDTH_MASK 0x000e
+ #define MIXER_ADVCTL1_HIFI3D_MASK 0x0300
+#define IDX_MIXER_ADVCTL2 0x20 /* resembles AC97_GENERAL_PURPOSE reg ! */
+ /* unlisted bits are unmodifiable */
+ #define MIXER_ADVCTL2_BIT7 0x0080 /* WaveOut 3D Bypass ? mutes WaveOut at LineOut */
+ #define MIXER_ADVCTL2_BIT8 0x0100 /* is this Modem Out Select ? */
+ #define MIXER_ADVCTL2_BIT9 0x0200 /* Mono Select Source ? */
+ #define MIXER_ADVCTL2_BIT13 0x2000 /* 3D enable ? */
+ #define MIXER_ADVCTL2_BIT15 0x8000 /* unknown */
+
+#define IDX_MIXER_SOMETHING30H 0x30 /* used, but unknown ??? */
+
+/* driver internal flags */
+#define SET_CHAN_LEFT 1
+#define SET_CHAN_RIGHT 2
+
+#endif /* __SOUND_AZF3328_H */
diff -urN linux-2.5.70-bk11/sound/pci/ens1370.c linux-2.5.70-bk12/sound/pci/ens1370.c
--- linux-2.5.70-bk11/sound/pci/ens1370.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/pci/ens1370.c 2003-06-07 04:47:54.000000000 -0700
@@ -1634,8 +1634,10 @@
/* try reset AK4531 */
outw(ES_1370_CODEC_WRITE(AK4531_RESET, 0x02), ES_REG(ensoniq, 1370_CODEC));
+ inw(ES_REG(ensoniq, 1370_CODEC));
udelay(100);
outw(ES_1370_CODEC_WRITE(AK4531_RESET, 0x03), ES_REG(ensoniq, 1370_CODEC));
+ inw(ES_REG(ensoniq, 1370_CODEC));
udelay(100);
memset(&ak4531, 0, sizeof(ak4531));
@@ -1975,6 +1977,7 @@
}
/* AC'97 warm reset to start the bitclk */
outl(ensoniq->ctrl | ES_1371_SYNC_RES, ES_REG(ensoniq, CONTROL));
+ inl(ES_REG(ensoniq, CONTROL));
udelay(20);
outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL));
/* Init the sample rate converter */
diff -urN linux-2.5.70-bk11/sound/pci/fm801.c linux-2.5.70-bk12/sound/pci/fm801.c
--- linux-2.5.70-bk11/sound/pci/fm801.c 2003-05-26 18:00:46.000000000 -0700
+++ linux-2.5.70-bk12/sound/pci/fm801.c 2003-06-07 04:47:54.000000000 -0700
@@ -959,6 +959,7 @@
/* codec cold reset + AC'97 warm reset */
outw((1<<5)|(1<<6), FM801_REG(chip, CODEC_CTRL));
+ inw(FM801_REG(chip, CODEC_CTRL)); /* flush posting data */
udelay(100);
outw(0, FM801_REG(chip, CODEC_CTRL));
diff -urN linux-2.5.70-bk11/sound/pci/ice1712/Makefile linux-2.5.70-bk12/sound/pci/ice1712/Makefile
--- linux-2.5.70-bk11/sound/pci/ice1712/Makefile 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/pci/ice1712/Makefile 2003-06-07 04:47:54.000000000 -0700
@@ -4,7 +4,7 @@
#
snd-ice1712-objs := ice1712.o delta.o hoontech.o ews.o ak4xxx.o
-snd-ice1724-objs := ice1724.o amp.o revo.o ak4xxx.o
+snd-ice1724-objs := ice1724.o amp.o revo.o aureon.o ak4xxx.o
# Toplevel Module Dependency
obj-$(CONFIG_SND_ICE1712) += snd-ice1712.o
diff -urN linux-2.5.70-bk11/sound/pci/ice1712/ak4xxx.c linux-2.5.70-bk12/sound/pci/ice1712/ak4xxx.c
--- linux-2.5.70-bk11/sound/pci/ice1712/ak4xxx.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/pci/ice1712/ak4xxx.c 2003-06-07 04:47:54.000000000 -0700
@@ -97,17 +97,6 @@
udelay(1);
}
- /* save the data */
- if (ak->type == SND_AK4524 || ak->type == SND_AK4528) {
- if ((addr != 0x04 && addr != 0x05) || (data & 0x80) == 0)
- ak->images[chip][addr] = data;
- else
- ak->ipga_gain[chip][addr-4] = data;
- } else {
- /* AK4529, or else */
- ak->images[chip][addr] = data;
- }
-
if (priv->cs_mask == priv->cs_addr) {
if (priv->cif) {
/* assert a cs pulse to trigger */
diff -urN linux-2.5.70-bk11/sound/pci/ice1712/aureon.c linux-2.5.70-bk12/sound/pci/ice1712/aureon.c
--- linux-2.5.70-bk11/sound/pci/ice1712/aureon.c 1969-12-31 16:00:00.000000000 -0800
+++ linux-2.5.70-bk12/sound/pci/ice1712/aureon.c 2003-06-07 04:47:54.000000000 -0700
@@ -0,0 +1,507 @@
+/*
+ * ALSA driver for ICEnsemble VT1724 (Envy24HT)
+ *
+ * Lowlevel functions for Terratec Aureon cards
+ *
+ * Copyright (c) 2003 Takashi Iwai
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * NOTES:
+ *
+ * - we reuse the akm4xxx_t record for storing the wm8770 codec data.
+ * both wm and akm codecs are pretty similar, so we can integrate
+ * both controls in the future, once if wm codecs are reused in
+ * many boards.
+ *
+ * - writing over SPI is implemented but reading is not yet.
+ * the SPDIF-in channel status, etc. can be read from CS chip.
+ *
+ * - DAC digital volumes are not implemented in the mixer.
+ * if they show better response than DAC analog volumes, we can use them
+ * instead.
+ *
+ * - Aureon boards are equipped with AC97 codec, too. it's used to do
+ * the analog mixing but not easily controllable (it's not connected
+ * directly from envy24ht chip). so let's leave it as it is.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "ice1712.h"
+#include "envy24ht.h"
+#include "aureon.h"
+
+/* WM8770 registers */
+#define WM_DAC_ATTEN 0x00 /* DAC1-8 analog attenuation */
+#define WM_DAC_MASTER_ATTEN 0x08 /* DAC master analog attenuation */
+#define WM_DAC_DIG_ATTEN 0x09 /* DAC1-8 digital attenuation */
+#define WM_DAC_DIG_MATER_ATTEN 0x11 /* DAC master digital attenuation */
+#define WM_PHASE_SWAP 0x12 /* DAC phase */
+#define WM_DAC_CTRL1 0x13 /* DAC control bits */
+#define WM_MUTE 0x14 /* mute controls */
+#define WM_DAC_CTRL2 0x15 /* de-emphasis and zefo-flag */
+#define WM_INT_CTRL 0x16 /* interface control */
+#define WM_MASTER 0x17 /* master clock and mode */
+#define WM_POWERDOWN 0x18 /* power-down controls */
+#define WM_ADC_GAIN 0x19 /* ADC gain L(19)/R(1a) */
+#define WM_ADC_MUX 0x1b /* input MUX */
+#define WM_OUT_MUX1 0x1c /* output MUX */
+#define WM_OUT_MUX2 0x1e /* output MUX */
+#define WM_RESET 0x1f /* software reset */
+
+
+/*
+ * write data in the SPI mode
+ */
+static void aureon_spi_write(ice1712_t *ice, unsigned int cs, unsigned int data, int bits)
+{
+ unsigned int tmp;
+ int i;
+
+ tmp = snd_ice1712_gpio_read(ice);
+
+ snd_ice1712_gpio_set_mask(ice, ~(AUREON_WM_RW|AUREON_WM_DATA|AUREON_WM_CLK|
+ AUREON_WM_CS|AUREON_CS8415_CS));
+ tmp |= AUREON_WM_RW;
+ tmp &= ~cs;
+ snd_ice1712_gpio_write(ice, tmp);
+ udelay(1);
+
+ for (i = bits - 1; i >= 0; i--) {
+ tmp &= ~AUREON_WM_CLK;
+ snd_ice1712_gpio_write(ice, tmp);
+ udelay(1);
+ if (data & (1 << i))
+ tmp |= AUREON_WM_DATA;
+ else
+ tmp &= ~AUREON_WM_DATA;
+ snd_ice1712_gpio_write(ice, tmp);
+ udelay(1);
+ tmp |= AUREON_WM_CLK;
+ snd_ice1712_gpio_write(ice, tmp);
+ udelay(1);
+ }
+
+ tmp &= ~AUREON_WM_CLK;
+ tmp |= cs;
+ snd_ice1712_gpio_write(ice, tmp);
+ udelay(1);
+ tmp |= AUREON_WM_CLK;
+ snd_ice1712_gpio_write(ice, tmp);
+ udelay(1);
+}
+
+
+/*
+ * get the current register value of WM codec
+ */
+static unsigned short wm_get(ice1712_t *ice, int reg)
+{
+ reg <<= 1;
+ return ((unsigned short)ice->akm[0].images[reg] << 8) |
+ ice->akm[0].images[reg + 1];
+}
+
+/*
+ * set the register value of WM codec and remember it
+ */
+static void wm_put(ice1712_t *ice, int reg, unsigned short val)
+{
+ aureon_spi_write(ice, AUREON_WM_CS, (reg << 9) | (val & 0x1ff), 16);
+ reg <<= 1;
+ ice->akm[0].images[reg] = val >> 8;
+ ice->akm[0].images[reg + 1] = val;
+}
+
+/*
+ * DAC volume attenuation mixer control
+ */
+static int wm_dac_vol_info(snd_kcontrol_t *kcontrol, snd_ctl_elem_info_t *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0; /* mute */
+ uinfo->value.integer.max = 101; /* 0dB */
+ return 0;
+}
+
+static int wm_dac_vol_get(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t *ucontrol)
+{
+ ice1712_t *ice = snd_kcontrol_chip(kcontrol);
+ int idx;
+ unsigned short vol;
+
+ down(&ice->gpio_mutex);
+ if (kcontrol->private_value)
+ idx = WM_DAC_MASTER_ATTEN;
+ else
+ idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id) + WM_DAC_ATTEN;
+ vol = wm_get(ice, idx) & 0x7f;
+ if (vol <= 0x1a)
+ ucontrol->value.integer.value[0] = 0;
+ else
+ ucontrol->value.integer.value[0] = vol - 0x1a;
+ up(&ice->gpio_mutex);
+ return 0;
+}
+
+static int wm_dac_vol_put(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t *ucontrol)
+{
+ ice1712_t *ice = snd_kcontrol_chip(kcontrol);
+ int idx;
+ unsigned short ovol, nvol;
+ int change;
+
+ snd_ice1712_save_gpio_status(ice);
+ if (kcontrol->private_value)
+ idx = WM_DAC_MASTER_ATTEN;
+ else
+ idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id) + WM_DAC_ATTEN;
+ nvol = ucontrol->value.integer.value[0] + 0x1a;
+ ovol = wm_get(ice, idx) & 0x7f;
+ change = (ovol != nvol);
+ if (change) {
+ if (nvol <= 0x1a && ovol <= 0x1a)
+ change = 0;
+ else
+ wm_put(ice, idx, nvol | 0x100);
+ }
+ snd_ice1712_restore_gpio_status(ice);
+ return change;
+}
+
+/*
+ * ADC gain mixer control
+ */
+static int wm_adc_vol_info(snd_kcontrol_t *kcontrol, snd_ctl_elem_info_t *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0; /* -12dB */
+ uinfo->value.integer.max = 0x1f; /* 19dB */
+ return 0;
+}
+
+static int wm_adc_vol_get(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t *ucontrol)
+{
+ ice1712_t *ice = snd_kcontrol_chip(kcontrol);
+ int idx;
+ unsigned short vol;
+
+ down(&ice->gpio_mutex);
+ idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id) + WM_ADC_GAIN;
+ vol = wm_get(ice, idx) & 0x1f;
+ ucontrol->value.integer.value[0] = vol;
+ up(&ice->gpio_mutex);
+ return 0;
+}
+
+static int wm_adc_vol_put(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t *ucontrol)
+{
+ ice1712_t *ice = snd_kcontrol_chip(kcontrol);
+ int idx;
+ unsigned short ovol, nvol;
+ int change;
+
+ snd_ice1712_save_gpio_status(ice);
+ idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id) + WM_ADC_GAIN;
+ nvol = ucontrol->value.integer.value[0];
+ ovol = wm_get(ice, idx) & 0x1f;
+ change = (ovol != nvol);
+ if (change)
+ wm_put(ice, idx, nvol);
+ snd_ice1712_restore_gpio_status(ice);
+ return change;
+}
+
+/*
+ * ADC input mux mixer control
+ */
+static int wm_adc_mux_info(snd_kcontrol_t *kcontrol, snd_ctl_elem_info_t *uinfo)
+{
+ static char *texts[] = {
+ "CD Left",
+ "CD Right",
+ "Aux Left",
+ "Aux Right",
+ "Line Left",
+ "Line Right",
+ "Mic Left",
+ "Mic Right",
+ };
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 2;
+ uinfo->value.enumerated.items = 8;
+ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+ uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
+ strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+static int wm_adc_mux_get(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t *ucontrol)
+{
+ ice1712_t *ice = snd_kcontrol_chip(kcontrol);
+ unsigned short val;
+
+ down(&ice->gpio_mutex);
+ val = wm_get(ice, WM_ADC_MUX);
+ ucontrol->value.integer.value[0] = val & 7;
+ ucontrol->value.integer.value[1] = (val >> 4) & 7;
+ up(&ice->gpio_mutex);
+ return 0;
+}
+
+static int wm_adc_mux_put(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t *ucontrol)
+{
+ ice1712_t *ice = snd_kcontrol_chip(kcontrol);
+ unsigned short oval, nval;
+ int change;
+
+ snd_ice1712_save_gpio_status(ice);
+ oval = wm_get(ice, WM_ADC_MUX);
+ nval = oval & ~0x77;
+ nval |= ucontrol->value.integer.value[0] & 7;
+ nval |= (ucontrol->value.integer.value[1] & 7) << 4;
+ change = (oval != nval);
+ if (change)
+ wm_put(ice, WM_ADC_MUX, nval);
+ snd_ice1712_restore_gpio_status(ice);
+ return 0;
+}
+
+/*
+ * mixers
+ */
+
+static snd_kcontrol_new_t aureon51_dac_control __devinitdata = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "DAC Volume",
+ .count = 6,
+ .info = wm_dac_vol_info,
+ .get = wm_dac_vol_get,
+ .put = wm_dac_vol_put,
+};
+
+static snd_kcontrol_new_t aureon71_dac_control __devinitdata = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "DAC Volume",
+ .count = 8,
+ .info = wm_dac_vol_info,
+ .get = wm_dac_vol_get,
+ .put = wm_dac_vol_put,
+};
+
+static snd_kcontrol_new_t wm_controls[] __devinitdata = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Master Playback Volume",
+ .info = wm_dac_vol_info,
+ .get = wm_dac_vol_get,
+ .put = wm_dac_vol_put,
+ .private_value = 1,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "ADC Volume",
+ .count = 2,
+ .info = wm_adc_vol_info,
+ .get = wm_adc_vol_get,
+ .put = wm_adc_vol_put,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Capture Route",
+ .info = wm_adc_mux_info,
+ .get = wm_adc_mux_get,
+ .put = wm_adc_mux_put,
+ },
+};
+
+
+static int __devinit aureon_add_controls(ice1712_t *ice)
+{
+ unsigned int i;
+ int err;
+
+ if (ice->eeprom.subvendor == VT1724_SUBDEVICE_AUREON51_SKY)
+ err = snd_ctl_add(ice->card, snd_ctl_new1(&aureon51_dac_control, ice));
+ else
+ err = snd_ctl_add(ice->card, snd_ctl_new1(&aureon71_dac_control, ice));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(wm_controls); i++) {
+ err = snd_ctl_add(ice->card, snd_ctl_new1(&wm_controls[i], ice));
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+
+/*
+ * initialize the chip
+ */
+static int __devinit aureon_init(ice1712_t *ice)
+{
+ static unsigned short wm_inits[] = {
+ 0x16, 0x122, /* I2S, normal polarity, 24bit */
+ 0x17, 0x022, /* 256fs, slave mode */
+ 0x18, 0x000, /* All power-up */
+ 0x00, 0, /* DAC1 analog mute */
+ 0x01, 0, /* DAC2 analog mute */
+ 0x02, 0, /* DAC3 analog mute */
+ 0x03, 0, /* DAC4 analog mute */
+ 0x04, 0, /* DAC5 analog mute */
+ 0x05, 0, /* DAC6 analog mute */
+ 0x06, 0, /* DAC7 analog mute */
+ 0x07, 0, /* DAC8 analog mute */
+ 0x08, 0x100, /* master analog mute */
+ 0x09, 0xff, /* DAC1 digital full */
+ 0x0a, 0xff, /* DAC2 digital full */
+ 0x0b, 0xff, /* DAC3 digital full */
+ 0x0c, 0xff, /* DAC4 digital full */
+ 0x0d, 0xff, /* DAC5 digital full */
+ 0x0e, 0xff, /* DAC6 digital full */
+ 0x0f, 0xff, /* DAC7 digital full */
+ 0x10, 0xff, /* DAC8 digital full */
+ 0x11, 0x1ff, /* master digital full */
+ 0x12, 0x000, /* phase normal */
+ 0x13, 0x090, /* unmute DAC L/R */
+ 0x14, 0x000, /* all unmute */
+ 0x15, 0x000, /* no deemphasis, no ZFLG */
+ 0x19, 0x000, /* -12dB ADC/L */
+ 0x1a, 0x000, /* -12dB ADC/R */
+ 0x1b, 0x000, /* ADC Mux */
+ 0x1c, 0x009, /* Out Mux1 */
+ 0x1d, 0x009, /* Out Mux2 */
+ };
+ static unsigned short cs_inits[] = {
+ 0x0441, /* RUN */
+ 0x0100, /* no mute */
+ 0x0200, /* */
+ 0x0600, /* slave, 24bit */
+ };
+ unsigned int tmp;
+ unsigned int i;
+
+ if (ice->eeprom.subvendor == VT1724_SUBDEVICE_AUREON51_SKY)
+ ice->num_total_dacs = 6;
+ else
+ ice->num_total_dacs = 8;
+
+ /* to remeber the register values */
+ ice->akm = snd_kcalloc(sizeof(akm4xxx_t), GFP_KERNEL);
+ if (! ice->akm)
+ return -ENOMEM;
+ ice->akm_codecs = 1;
+
+ snd_ice1712_gpio_set_dir(ice, 0xbfffff); /* fix this for the time being */
+
+ /* reset the wm codec as the SPI mode */
+ snd_ice1712_save_gpio_status(ice);
+ snd_ice1712_gpio_set_mask(ice, ~(AUREON_WM_RESET|AUREON_WM_CS|AUREON_CS8415_CS));
+ tmp = snd_ice1712_gpio_read(ice);
+ tmp &= ~AUREON_WM_RESET;
+ snd_ice1712_gpio_write(ice, tmp);
+ udelay(1);
+ tmp |= AUREON_WM_CS | AUREON_CS8415_CS;
+ snd_ice1712_gpio_write(ice, tmp);
+ udelay(1);
+ tmp |= AUREON_WM_RESET;
+ snd_ice1712_gpio_write(ice, tmp);
+ udelay(1);
+
+ /* initialize WM8770 codec */
+ for (i = 0; i < ARRAY_SIZE(wm_inits); i += 2)
+ wm_put(ice, wm_inits[i], wm_inits[i+1]);
+
+ /* initialize CS8415A codec */
+ for (i = 0; i < ARRAY_SIZE(cs_inits); i++)
+ aureon_spi_write(ice, AUREON_CS8415_CS,
+ cs_inits[i] | 0x200000, 24);
+
+ snd_ice1712_restore_gpio_status(ice);
+
+ return 0;
+}
+
+
+/*
+ * Aureon board don't provide the EEPROM data except for the vendor IDs.
+ * hence the driver needs to sets up it properly.
+ */
+
+static unsigned char aureon51_eeprom[] __devinitdata = {
+ 0x12, /* SYSCONF: clock 512, mpu401, spdif-in/ADC, 3DACs */
+ 0x80, /* ACLINK: I2S */
+ 0xf8, /* I2S: vol, 96k, 24bit, 192k */
+ 0xc2, /* SPDIF: out-en, out-int, spdif-in */
+ 0xff, /* GPIO_DIR */
+ 0xff, /* GPIO_DIR1 */
+ 0xbf, /* GPIO_DIR2 */
+ 0xff, /* GPIO_MASK */
+ 0xff, /* GPIO_MASK1 */
+ 0xff, /* GPIO_MASK2 */
+ 0x00, /* GPIO_STATE */
+ 0x00, /* GPIO_STATE1 */
+ 0x00, /* GPIO_STATE2 */
+};
+
+static unsigned char aureon71_eeprom[] __devinitdata = {
+ 0x13, /* SYSCONF: clock 512, mpu401, spdif-in/ADC, 4DACs */
+ 0x80, /* ACLINK: I2S */
+ 0xf8, /* I2S: vol, 96k, 24bit, 192k */
+ 0xc2, /* SPDIF: out-en, out-int, spdif-in */
+ 0xff, /* GPIO_DIR */
+ 0xff, /* GPIO_DIR1 */
+ 0xbf, /* GPIO_DIR2 */
+ 0x00, /* GPIO_MASK */
+ 0x00, /* GPIO_MASK1 */
+ 0x00, /* GPIO_MASK2 */
+ 0x00, /* GPIO_STATE */
+ 0x00, /* GPIO_STATE1 */
+ 0x00, /* GPIO_STATE2 */
+};
+
+/* entry point */
+struct snd_ice1712_card_info snd_vt1724_aureon_cards[] __devinitdata = {
+ {
+ .subvendor = VT1724_SUBDEVICE_AUREON51_SKY,
+ .name = "Terratec Aureon 5.1-Sky",
+ .chip_init = aureon_init,
+ .build_controls = aureon_add_controls,
+ .eeprom_size = sizeof(aureon51_eeprom),
+ .eeprom_data = aureon51_eeprom,
+ },
+ {
+ .subvendor = VT1724_SUBDEVICE_AUREON71_SPACE,
+ .name = "Terratec Aureon 7.1-Space",
+ .chip_init = aureon_init,
+ .build_controls = aureon_add_controls,
+ .eeprom_size = sizeof(aureon71_eeprom),
+ .eeprom_data = aureon71_eeprom,
+ },
+ { } /* terminator */
+};
diff -urN linux-2.5.70-bk11/sound/pci/ice1712/aureon.h linux-2.5.70-bk12/sound/pci/ice1712/aureon.h
--- linux-2.5.70-bk11/sound/pci/ice1712/aureon.h 1969-12-31 16:00:00.000000000 -0800
+++ linux-2.5.70-bk12/sound/pci/ice1712/aureon.h 2003-06-07 04:47:54.000000000 -0700
@@ -0,0 +1,47 @@
+#ifndef __SOUND_AUREON_H
+#define __SOUND_AUREON_H
+
+/*
+ * ALSA driver for VIA VT1724 (Envy24HT)
+ *
+ * Lowlevel functions for Terratec Aureon cards
+ *
+ * Copyright (c) 2003 Takashi Iwai
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define AUREON_DEVICE_DESC "{Terratec,Aureon 5.1 Sky},"\
+ "{Terratec,Aureon 7.1 Space},"
+
+#define VT1724_SUBDEVICE_AUREON51_SKY 0x3b154711 /* Aureon 5.1 Sky */
+#define VT1724_SUBDEVICE_AUREON71_SPACE 0x3b154511 /* Aureon 7.1 Space */
+
+extern struct snd_ice1712_card_info snd_vt1724_aureon_cards[];
+
+/* GPIO bits */
+#define AUREON_CS8415_CS (1 << 23)
+#define AUREON_CS8415_CDTO (1 << 22)
+#define AUREON_WM_RESET (1 << 20)
+#define AUREON_WM_CLK (1 << 19)
+#define AUREON_WM_DATA (1 << 18)
+#define AUREON_WM_RW (1 << 17)
+#define AUREON_AC97_RESET (1 << 16)
+#define AUREON_DIGITAL_SEL1 (1 << 15)
+#define AUREON_HP_SEL (1 << 14)
+#define AUREON_WM_CS (1 << 12)
+
+#endif /* __SOUND_AUREON_H */
diff -urN linux-2.5.70-bk11/sound/pci/ice1712/ice1712.c linux-2.5.70-bk12/sound/pci/ice1712/ice1712.c
--- linux-2.5.70-bk11/sound/pci/ice1712/ice1712.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/pci/ice1712/ice1712.c 2003-06-07 04:47:54.000000000 -0700
@@ -298,11 +298,13 @@
static void snd_ice1712_set_gpio_dir(ice1712_t *ice, unsigned int data)
{
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DIRECTION, data);
+ inb(ICEREG(ice, DATA)); /* dummy read for pci-posting */
}
static void snd_ice1712_set_gpio_mask(ice1712_t *ice, unsigned int data)
{
snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK, data);
+ inb(ICEREG(ice, DATA)); /* dummy read for pci-posting */
}
static unsigned int snd_ice1712_get_gpio_data(ice1712_t *ice)
@@ -313,6 +315,7 @@
static void snd_ice1712_set_gpio_data(ice1712_t *ice, unsigned int val)
{
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, val);
+ inb(ICEREG(ice, DATA)); /* dummy read for pci-posting */
}
diff -urN linux-2.5.70-bk11/sound/pci/ice1712/ice1724.c linux-2.5.70-bk12/sound/pci/ice1712/ice1724.c
--- linux-2.5.70-bk11/sound/pci/ice1712/ice1724.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/pci/ice1712/ice1724.c 2003-06-07 04:47:54.000000000 -0700
@@ -42,6 +42,7 @@
/* lowlevel routines */
#include "amp.h"
#include "revo.h"
+#include "aureon.h"
MODULE_AUTHOR("Jaroslav Kysela ");
MODULE_DESCRIPTION("ICEnsemble ICE1724 (Envy24HT)");
@@ -50,6 +51,7 @@
MODULE_DEVICES("{"
REVO_DEVICE_DESC
AMP_AUDIO2000_DEVICE_DESC
+ AUREON_DEVICE_DESC
"{VIA,VT1724},"
"{ICEnsemble,Generic ICE1724},"
"{ICEnsemble,Generic Envy24HT}}");
@@ -172,6 +174,7 @@
static void snd_vt1724_set_gpio_dir(ice1712_t *ice, unsigned int data)
{
outl(data, ICEREG1724(ice, GPIO_DIRECTION));
+ inw(ICEREG1724(ice, GPIO_DIRECTION)); /* dummy read for pci-posting */
}
/* set the gpio mask (0 = writable) */
@@ -179,12 +182,14 @@
{
outw(data, ICEREG1724(ice, GPIO_WRITE_MASK));
outb((data >> 16) & 0xff, ICEREG1724(ice, GPIO_WRITE_MASK_22));
+ inw(ICEREG1724(ice, GPIO_WRITE_MASK)); /* dummy read for pci-posting */
}
static void snd_vt1724_set_gpio_data(ice1712_t *ice, unsigned int data)
{
outw(data, ICEREG1724(ice, GPIO_DATA));
outb(data >> 16, ICEREG1724(ice, GPIO_DATA_22));
+ inw(ICEREG1724(ice, GPIO_DATA)); /* dummy read for pci-posting */
}
static unsigned int snd_vt1724_get_gpio_data(ice1712_t *ice)
@@ -415,14 +420,16 @@
val &= ~VT1724_MT_I2S_MCLK_128X; /* 256x MCLK */
if (val != old) {
outb(val, ICEMT1724(ice, I2S_FORMAT));
- /* FIXME: is this revo only? */
- /* assert PRST# to converters; MT05 bit 7 */
- outb(inb(ICEMT1724(ice, AC97_CMD)) | 0x80, ICEMT1724(ice, AC97_CMD));
- spin_unlock_irqrestore(&ice->reg_lock, flags);
- mdelay(5);
- spin_lock_irqsave(&ice->reg_lock, flags);
- /* deassert PRST# */
- outb(inb(ICEMT1724(ice, AC97_CMD)) & ~0x80, ICEMT1724(ice, AC97_CMD));
+ if (ice->eeprom.subvendor == VT1724_SUBDEVICE_REVOLUTION71) {
+ /* FIXME: is this revo only? */
+ /* assert PRST# to converters; MT05 bit 7 */
+ outb(inb(ICEMT1724(ice, AC97_CMD)) | 0x80, ICEMT1724(ice, AC97_CMD));
+ spin_unlock_irqrestore(&ice->reg_lock, flags);
+ mdelay(5);
+ spin_lock_irqsave(&ice->reg_lock, flags);
+ /* deassert PRST# */
+ outb(inb(ICEMT1724(ice, AC97_CMD)) & ~0x80, ICEMT1724(ice, AC97_CMD));
+ }
}
}
spin_unlock_irqrestore(&ice->reg_lock, flags);
@@ -1549,6 +1556,7 @@
static struct snd_ice1712_card_info *card_tables[] __devinitdata = {
snd_vt1724_revo_cards,
snd_vt1724_amp_cards,
+ snd_vt1724_aureon_cards,
0,
};
@@ -1572,6 +1580,7 @@
{
int dev = 0xa0; /* EEPROM device address */
unsigned int i, size;
+ struct snd_ice1712_card_info **tbl, *c;
if ((inb(ICEREG1724(ice, I2C_CTRL)) & VT1724_I2C_EEPROM) == 0) {
snd_printk("ICE1724 has not detected EEPROM\n");
@@ -1581,6 +1590,23 @@
(snd_vt1724_read_i2c(ice, dev, 0x01) << 8) |
(snd_vt1724_read_i2c(ice, dev, 0x02) << 16) |
(snd_vt1724_read_i2c(ice, dev, 0x03) << 24);
+
+ /* if the EEPROM is given by the driver, use it */
+ for (tbl = card_tables; *tbl; tbl++) {
+ for (c = *tbl; c->subvendor; c++) {
+ if (c->subvendor == ice->eeprom.subvendor) {
+ if (! c->eeprom_size || ! c->eeprom_data)
+ goto found;
+ snd_printdd("using the defined eeprom..\n");
+ ice->eeprom.version = 2;
+ ice->eeprom.size = c->eeprom_size + 6;
+ memcpy(ice->eeprom.data, c->eeprom_data, c->eeprom_size);
+ goto read_skipped;
+ }
+ }
+ }
+
+ found:
ice->eeprom.size = snd_vt1724_read_i2c(ice, dev, 0x04);
if (ice->eeprom.size < 6)
ice->eeprom.size = 32;
@@ -1597,6 +1623,7 @@
for (i = 0; i < size; i++)
ice->eeprom.data[i] = snd_vt1724_read_i2c(ice, dev, i + 6);
+ read_skipped:
ice->eeprom.gpiomask = eeprom_triple(ice, ICE_EEP2_GPIO_MASK);
ice->eeprom.gpiostate = eeprom_triple(ice, ICE_EEP2_GPIO_STATE);
ice->eeprom.gpiodir = eeprom_triple(ice, ICE_EEP2_GPIO_DIR);
diff -urN linux-2.5.70-bk11/sound/pci/ice1712/revo.c linux-2.5.70-bk12/sound/pci/ice1712/revo.c
--- linux-2.5.70-bk11/sound/pci/ice1712/revo.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/pci/ice1712/revo.c 2003-06-07 04:47:54.000000000 -0700
@@ -59,14 +59,14 @@
reg = 1;
shift = 3;
}
- tmp = ak->images[0][reg];
+ tmp = snd_akm4xxx_get(ak, 0, reg);
old = (tmp >> shift) & 0x03;
if (old == dfs)
return;
/* reset DFS */
snd_akm4xxx_reset(ak, 1);
- tmp = ak->images[0][reg];
+ tmp = snd_akm4xxx_get(ak, 0, reg);
tmp &= ~(0x03 << shift);
tmp |= dfs << shift;
snd_akm4xxx_write(ak, 0, reg, tmp);
@@ -121,6 +121,7 @@
static int __devinit revo_init(ice1712_t *ice)
{
akm4xxx_t *ak;
+ int err;
/* determine I2C, DACs and ADCs */
switch (ice->eeprom.subvendor) {
@@ -139,8 +140,10 @@
ice->akm_codecs = 2;
switch (ice->eeprom.subvendor) {
case VT1724_SUBDEVICE_REVOLUTION71:
- snd_ice1712_akm4xxx_init(ak, &akm_revo_front, &akm_revo_front_priv, ice);
- snd_ice1712_akm4xxx_init(ak + 1, &akm_revo_surround, &akm_revo_surround_priv, ice);
+ if ((err = snd_ice1712_akm4xxx_init(ak, &akm_revo_front, &akm_revo_front_priv, ice)) < 0)
+ return err;
+ if ((err = snd_ice1712_akm4xxx_init(ak + 1, &akm_revo_surround, &akm_revo_surround_priv, ice)) < 0)
+ return err;
/* unmute all codecs */
snd_ice1712_gpio_write_bits(ice, VT1724_REVO_MUTE, VT1724_REVO_MUTE);
break;
diff -urN linux-2.5.70-bk11/sound/pci/via82xx.c linux-2.5.70-bk12/sound/pci/via82xx.c
--- linux-2.5.70-bk11/sound/pci/via82xx.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/pci/via82xx.c 2003-06-07 04:47:54.000000000 -0700
@@ -564,6 +564,7 @@
{
outb(VIA_REG_CTRL_PAUSE | VIA_REG_CTRL_TERMINATE | VIA_REG_CTRL_RESET,
VIADEV_REG(viadev, OFFSET_CONTROL));
+ inb(VIADEV_REG(viadev, OFFSET_CONTROL));
udelay(50);
/* disable interrupts */
outb(0x00, VIADEV_REG(viadev, OFFSET_CONTROL));
diff -urN linux-2.5.70-bk11/sound/ppc/pmac.c linux-2.5.70-bk12/sound/ppc/pmac.c
--- linux-2.5.70-bk11/sound/ppc/pmac.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/ppc/pmac.c 2003-06-07 04:47:54.000000000 -0700
@@ -1211,9 +1211,12 @@
spin_lock_irqsave(&chip->reg_lock, flags);
snd_pmac_beep_stop(chip);
spin_unlock_irqrestore(&chip->reg_lock, flags);
- disable_irq(chip->irq);
- disable_irq(chip->tx_irq);
- disable_irq(chip->rx_irq);
+ if (chip->irq >= 0)
+ disable_irq(chip->irq);
+ if (chip->tx_irq >= 0)
+ disable_irq(chip->tx_irq);
+ if (chip->rx_irq >= 0)
+ disable_irq(chip->rx_irq);
snd_pmac_sound_feature(chip, 0);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
}
@@ -1237,9 +1240,12 @@
snd_pmac_pcm_set_format(chip);
- enable_irq(chip->irq);
- enable_irq(chip->tx_irq);
- enable_irq(chip->rx_irq);
+ if (chip->irq >= 0)
+ enable_irq(chip->irq);
+ if (chip->tx_irq >= 0)
+ enable_irq(chip->tx_irq);
+ if (chip->rx_irq >= 0)
+ enable_irq(chip->rx_irq);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
}
diff -urN linux-2.5.70-bk11/sound/ppc/tumbler.c linux-2.5.70-bk12/sound/ppc/tumbler.c
--- linux-2.5.70-bk11/sound/ppc/tumbler.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/ppc/tumbler.c 2003-06-07 04:47:54.000000000 -0700
@@ -27,6 +27,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -79,7 +80,7 @@
int active_state;
} pmac_gpio_t;
-typedef struct pmac_tumber_t {
+typedef struct pmac_tumbler_t {
pmac_keywest_t i2c;
pmac_gpio_t audio_reset;
pmac_gpio_t amp_mute;
@@ -92,11 +93,12 @@
unsigned int mix_vol[VOL_IDX_LAST_MIX][2]; /* stereo volumes for tas3004 */
int drc_range;
int drc_enable;
+#ifdef CONFIG_PMAC_PBOOK
+ struct work_struct resume_workq;
+#endif
} pmac_tumbler_t;
-#define number_of(ary) (sizeof(ary) / sizeof(ary[0]))
-
/*
*/
@@ -168,16 +170,16 @@
left_vol = 0;
else {
left_vol = mix->master_vol[0];
- if (left_vol >= number_of(master_volume_table))
- left_vol = number_of(master_volume_table) - 1;
+ if (left_vol >= ARRAY_SIZE(master_volume_table))
+ left_vol = ARRAY_SIZE(master_volume_table) - 1;
left_vol = master_volume_table[left_vol];
}
if (! mix->master_switch[1])
right_vol = 0;
else {
right_vol = mix->master_vol[1];
- if (right_vol >= number_of(master_volume_table))
- right_vol = number_of(master_volume_table) - 1;
+ if (right_vol >= ARRAY_SIZE(master_volume_table))
+ right_vol = ARRAY_SIZE(master_volume_table) - 1;
right_vol = master_volume_table[right_vol];
}
@@ -203,7 +205,7 @@
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
- uinfo->value.integer.max = number_of(master_volume_table) - 1;
+ uinfo->value.integer.max = ARRAY_SIZE(master_volume_table) - 1;
return 0;
}
@@ -479,7 +481,7 @@
.index = VOL_IDX_PCM_MONO,
.reg = TAS_REG_PCM,
.bytes = 3,
- .max = number_of(mixer_volume_table),
+ .max = ARRAY_SIZE(mixer_volume_table),
.table = mixer_volume_table,
};
@@ -487,7 +489,7 @@
.index = VOL_IDX_BASS,
.reg = TAS_REG_BASS,
.bytes = 1,
- .max = number_of(bass_volume_table),
+ .max = ARRAY_SIZE(bass_volume_table),
.table = bass_volume_table,
};
@@ -495,7 +497,7 @@
.index = VOL_IDX_TREBLE,
.reg = TAS_REG_TREBLE,
.bytes = 1,
- .max = number_of(treble_volume_table),
+ .max = ARRAY_SIZE(treble_volume_table),
.table = treble_volume_table,
};
@@ -504,7 +506,7 @@
.index = VOL_IDX_BASS,
.reg = TAS_REG_BASS,
.bytes = 1,
- .max = number_of(snapper_bass_volume_table),
+ .max = ARRAY_SIZE(snapper_bass_volume_table),
.table = snapper_bass_volume_table,
};
@@ -512,7 +514,7 @@
.index = VOL_IDX_TREBLE,
.reg = TAS_REG_TREBLE,
.bytes = 1,
- .max = number_of(snapper_treble_volume_table),
+ .max = ARRAY_SIZE(snapper_treble_volume_table),
.table = snapper_treble_volume_table,
};
@@ -546,8 +548,8 @@
unsigned char block[9];
vol = mix->mix_vol[idx][ch];
- if (vol >= number_of(mixer_volume_table)) {
- vol = number_of(mixer_volume_table) - 1;
+ if (vol >= ARRAY_SIZE(mixer_volume_table)) {
+ vol = ARRAY_SIZE(mixer_volume_table) - 1;
mix->mix_vol[idx][ch] = vol;
}
@@ -579,7 +581,7 @@
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
- uinfo->value.integer.max = number_of(mixer_volume_table) - 1;
+ uinfo->value.integer.max = ARRAY_SIZE(mixer_volume_table) - 1;
return 0;
}
@@ -877,29 +879,47 @@
#ifdef CONFIG_PMAC_PBOOK
/* resume mixer */
-static void tumbler_resume(pmac_t *chip)
+/* we call the i2c transfer in a workqueue because it may need either schedule()
+ * or completion from timer interrupts.
+ */
+static void tumbler_resume_work(void *arg)
{
+ pmac_t *chip = (pmac_t *)arg;
pmac_tumbler_t *mix = chip->mixer_data;
- snd_assert(mix, return);
+
tumbler_reset_audio(chip);
- if (mix->i2c.client)
- tumbler_init_client(&mix->i2c);
+ if (mix->i2c.client) {
+ if (tumbler_init_client(&mix->i2c) < 0)
+ printk(KERN_ERR "tumbler_init_client error\n");
+ } else
+ printk(KERN_ERR "tumbler: i2c is not initialized\n");
if (chip->model == PMAC_TUMBLER) {
tumbler_set_mono_volume(mix, &tumbler_pcm_vol_info);
tumbler_set_mono_volume(mix, &tumbler_bass_vol_info);
tumbler_set_mono_volume(mix, &tumbler_treble_vol_info);
+ tumbler_set_drc(mix);
} else {
snapper_set_mix_vol(mix, VOL_IDX_PCM);
snapper_set_mix_vol(mix, VOL_IDX_PCM2);
snapper_set_mix_vol(mix, VOL_IDX_ADC);
tumbler_set_mono_volume(mix, &tumbler_bass_vol_info);
tumbler_set_mono_volume(mix, &tumbler_treble_vol_info);
+ snapper_set_drc(mix);
}
- tumbler_set_drc(mix);
tumbler_set_master_volume(mix);
if (chip->update_automute)
chip->update_automute(chip, 0);
}
+
+static void tumbler_resume(pmac_t *chip)
+{
+ pmac_tumbler_t *mix = chip->mixer_data;
+ snd_assert(mix, return);
+ INIT_WORK(&mix->resume_workq, tumbler_resume_work, chip);
+ if (schedule_work(&mix->resume_workq))
+ return;
+ printk(KERN_ERR "ALSA tumbler: cannot schedule resume-workqueue.\n");
+}
#endif
/* initialize tumbler */
@@ -1001,12 +1021,12 @@
sprintf(chip->card->mixername, "PowerMac %s", chipname);
if (chip->model == PMAC_TUMBLER) {
- for (i = 0; i < number_of(tumbler_mixers); i++) {
+ for (i = 0; i < ARRAY_SIZE(tumbler_mixers); i++) {
if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&tumbler_mixers[i], chip))) < 0)
return err;
}
} else {
- for (i = 0; i < number_of(snapper_mixers); i++) {
+ for (i = 0; i < ARRAY_SIZE(snapper_mixers); i++) {
if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snapper_mixers[i], chip))) < 0)
return err;
}
diff -urN linux-2.5.70-bk11/sound/usb/usbaudio.c linux-2.5.70-bk12/sound/usb/usbaudio.c
--- linux-2.5.70-bk11/sound/usb/usbaudio.c 2003-06-07 04:47:40.000000000 -0700
+++ linux-2.5.70-bk12/sound/usb/usbaudio.c 2003-06-07 04:47:54.000000000 -0700
@@ -55,6 +55,7 @@
static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */
static int vid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 }; /* Vendor ID for this card */
static int pid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 }; /* Product ID for this card */
+static int nrpacks = 4; /* max. number of packets per urb */
MODULE_PARM(index, "1-" __MODULE_STRING(SNDRV_CARDS) "i");
MODULE_PARM_DESC(index, "Index value for the USB audio adapter.");
@@ -71,6 +72,9 @@
MODULE_PARM(pid, "1-" __MODULE_STRING(SNDRV_CARDS) "i");
MODULE_PARM_DESC(pid, "Product ID for the USB audio device.");
MODULE_PARM_SYNTAX(pid, SNDRV_ENABLED ",allows:{{-1,0xffff}},base:16");
+MODULE_PARM(nrpacks, "i");
+MODULE_PARM_DESC(nrpacks, "Max. number of packets per URB.");
+MODULE_PARM_SYNTAX(nrpacks, SNDRV_ENABLED ",allows:{{2,10}}");
/*
@@ -98,7 +102,7 @@
*
*/
-#define NRPACKS 4 /* 4ms per urb */
+#define MAX_PACKS 10
#define MAX_URBS 5 /* max. 20ms long packets */
#define SYNC_URBS 2 /* always two urbs for sync */
#define MIN_PACKS_URB 1 /* minimum 1 packet per urb */
@@ -176,7 +180,7 @@
unsigned int nurbs; /* # urbs */
snd_urb_ctx_t dataurb[MAX_URBS]; /* data urb table */
snd_urb_ctx_t syncurb[SYNC_URBS]; /* sync urb table */
- char syncbuf[SYNC_URBS * NRPACKS * 3]; /* sync buffer; it's so small - let's get static */
+ char syncbuf[SYNC_URBS * MAX_PACKS * 3]; /* sync buffer; it's so small - let's get static */
char *tmpbuf; /* temporary buffer for playback */
u64 formats; /* format bitmasks (all or'ed) */
@@ -839,7 +843,7 @@
/* allocate a temporary buffer for playback */
if (is_playback) {
- subs->tmpbuf = kmalloc(maxsize * NRPACKS, GFP_KERNEL);
+ subs->tmpbuf = kmalloc(maxsize * nrpacks, GFP_KERNEL);
if (! subs->tmpbuf) {
snd_printk(KERN_ERR "cannot malloc tmpbuf\n");
return -ENOMEM;
@@ -850,16 +854,16 @@
total_packs = (frames_to_bytes(runtime, runtime->period_size) + maxsize - 1) / maxsize;
if (total_packs < 2 * MIN_PACKS_URB)
total_packs = 2 * MIN_PACKS_URB;
- subs->nurbs = (total_packs + NRPACKS - 1) / NRPACKS;
+ subs->nurbs = (total_packs + nrpacks - 1) / nrpacks;
if (subs->nurbs > MAX_URBS) {
/* too much... */
subs->nurbs = MAX_URBS;
- total_packs = MAX_URBS * NRPACKS;
+ total_packs = MAX_URBS * nrpacks;
}
n = total_packs;
for (i = 0; i < subs->nurbs; i++) {
- npacks[i] = n > NRPACKS ? NRPACKS : n;
- n -= NRPACKS;
+ npacks[i] = n > nrpacks ? nrpacks : n;
+ n -= nrpacks;
}
if (subs->nurbs <= 1) {
/* too little - we need at least two packets
@@ -918,14 +922,14 @@
snd_urb_ctx_t *u = &subs->syncurb[i];
u->index = i;
u->subs = subs;
- u->packets = NRPACKS;
+ u->packets = nrpacks;
u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
if (! u->urb) {
release_substream_urbs(subs, 0);
return -ENOMEM;
}
- u->urb->transfer_buffer = subs->syncbuf + i * NRPACKS * 3;
- u->urb->transfer_buffer_length = NRPACKS * 3;
+ u->urb->transfer_buffer = subs->syncbuf + i * nrpacks * 3;
+ u->urb->transfer_buffer_length = nrpacks * 3;
u->urb->dev = subs->dev;
u->urb->pipe = subs->syncpipe;
u->urb->transfer_flags = URB_ISO_ASAP | UNLINK_FLAGS;
@@ -1096,16 +1100,6 @@
attr = fmt->ep_attr & EP_ATTR_MASK;
if ((is_playback && attr == EP_ATTR_ASYNC) ||
(! is_playback && attr == EP_ATTR_ADAPTIVE)) {
- /*
- * QUIRK: plantronics headset has adaptive-in
- * although it's really not...
- */
- if ((dev->descriptor.idVendor == 0x047f &&
- dev->descriptor.idProduct == 0x0ca1) ||
- /* Griffin iMic (note that there is an older model 77d:223) */
- (dev->descriptor.idVendor == 0x077d &&
- dev->descriptor.idProduct == 0x07af))
- goto _ok;
/* check endpoint */
if (altsd->bNumEndpoints < 2 ||
get_endpoint(alts, 1)->bmAttributes != 0x01 ||
@@ -1129,7 +1123,6 @@
subs->syncinterval = get_endpoint(alts, 1)->bRefresh;
}
- _ok:
if ((err = init_usb_pitch(dev, subs->interface, alts, fmt)) < 0 ||
(err = init_usb_sample_rate(dev, subs->interface, alts, fmt,
runtime->rate)) < 0)
@@ -1497,7 +1490,7 @@
/* set the period time minimum 1ms */
snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
1000 * MIN_PACKS_URB,
- /*(NRPACKS * MAX_URBS) * 1000*/ UINT_MAX);
+ /*(nrpacks * MAX_URBS) * 1000*/ UINT_MAX);
if (check_hw_params_convention(subs)) {
hwc_debug("setting extra hw constraints...\n");
@@ -1656,11 +1649,9 @@
* entry point for linux usb interface
*/
-#ifndef OLD_USB
static int usb_audio_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void usb_audio_disconnect(struct usb_interface *intf);
-#endif
static struct usb_device_id usb_audio_ids [] = {
#include "usbquirks.h"
@@ -1677,9 +1668,6 @@
.name = "snd-usb-audio",
.probe = usb_audio_probe,
.disconnect = usb_audio_disconnect,
-#ifdef OLD_USB
- .driver_list = LIST_HEAD_INIT(usb_audio_driver.driver_list),
-#endif
.id_table = usb_audio_ids,
};
@@ -1944,7 +1932,7 @@
switch (format) {
case 0: /* some devices don't define this correctly... */
snd_printdd(KERN_INFO "%d:%u:%d : format type 0 is detected, processed as PCM\n",
- dev->devnum, iface_no, altno);
+ dev->devnum, fp->iface, fp->altsetting);
/* fall-through */
case USB_AUDIO_FORMAT_PCM:
if (sample_width > sample_bytes * 8) {
@@ -2238,6 +2226,33 @@
fp->maxpacksize = get_endpoint(alts, 0)->wMaxPacketSize;
fp->attributes = csep[3];
+ /* some quirks for attributes here */
+
+ /* workaround for AudioTrak Optoplay */
+ if (dev->descriptor.idVendor == 0x0a92 &&
+ dev->descriptor.idProduct == 0x0053) {
+ /* Optoplay sets the sample rate attribute although
+ * it seems not supporting it in fact.
+ */
+ fp->attributes &= ~EP_CS_ATTR_SAMPLE_RATE;
+ }
+ /*
+ * plantronics headset and Griffin iMic have set adaptive-in
+ * although it's really not...
+ */
+ if ((dev->descriptor.idVendor == 0x047f &&
+ dev->descriptor.idProduct == 0x0ca1) ||
+ /* Griffin iMic (note that there is an older model 77d:223) */
+ (dev->descriptor.idVendor == 0x077d &&
+ dev->descriptor.idProduct == 0x07af)) {
+ fp->ep_attr &= ~EP_ATTR_MASK;
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ fp->ep_attr |= EP_ATTR_ADAPTIVE;
+ else
+ fp->ep_attr |= EP_ATTR_SYNC;
+ }
+
+ /* ok, let's parse further... */
if (parse_audio_format(dev, fp, format, fmt, stream) < 0) {
if (fp->rate_table)
kfree(fp->rate_table);
@@ -2461,6 +2476,11 @@
/*
* audio-interface quirks
+ *
+ * returns zero if no standard audio/MIDI parsing is needed.
+ * returns a postive value if standard audio/midi interfaces are parsed
+ * after this.
+ * returns a negative value at error.
*/
static int snd_usb_create_quirk(snd_usb_audio_t *chip,
struct usb_interface *iface,
@@ -2749,7 +2769,6 @@
}
}
-#ifndef OLD_USB
/*
* new 2.5 USB kernel API
*/
@@ -2770,12 +2789,14 @@
snd_usb_audio_disconnect(interface_to_usbdev(intf),
dev_get_drvdata(&intf->dev));
}
-#endif
-
static int __init snd_usb_audio_init(void)
{
+ if (nrpacks < 2 || nrpacks > MAX_PACKS) {
+ printk(KERN_WARNING "invalid nrpacks value.\n");
+ return -EINVAL;
+ }
usb_register(&usb_audio_driver);
return 0;
}