Name: Remove rwlocks Author: David S. Miller Status: Experimental D: Dave started removing rwlocks throughout the kernel: they're very rarely D: worthwhile. D: D: Rules for the conversion are easy: D: (1) If the lock is never grabbed with *_irq* variants, then simply D: change to a spinlock (assume code never locks recursively). D: (2) If the lock is taken with _*irq* variants, then change all D: read locks which don't use _*irq* variants to use them, then D: do (1). D: D: ChangeSet@1.1523, 2003-12-15 00:40:17-08:00, davem@nuts.ninka.net D: [PPC64]: Make imlist lock into spinlock, also mark as static. D: ChangeSet@1.1524, 2003-12-15 00:42:52-08:00, davem@nuts.ninka.net D: [BLUETOOTH]: Make HCI USB driver's completion_lock into a spinlock. D: ChangeSet@1.1525, 2003-12-15 00:58:13-08:00, davem@nuts.ninka.net D: [IEEE1394]: Change rwlock_t's into spinlock_t's. D: ChangeSet@1.1526, 2003-12-15 01:00:28-08:00, davem@nuts.ninka.net D: [ISDN]: Convert rwlocks to spinlocks in CAPI driver. D: ChangeSet@1.1527, 2003-12-15 01:02:15-08:00, davem@nuts.ninka.net D: [MACINTOSH]: Convert adb_handler_lock to spinlock. D: ChangeSet@1.1528, 2003-12-15 01:08:00-08:00, davem@nuts.ninka.net D: [MEDIA]: Convert zr36120 driver rwlock to spinlock. D: ChangeSet@1.1529, 2003-12-15 01:12:01-08:00, davem@nuts.ninka.net D: [MPT_FUSION]: Convert rwlocks to spinlocks, actually two were not even used so deleted. D: ChangeSet@1.1530, 2003-12-15 01:23:50-08:00, davem@nuts.ninka.net D: [NET]: Conver rwlocks to spinlocks in bonding driver. D: ChangeSet@1.1531, 2003-12-15 02:04:56-08:00, davem@nuts.ninka.net D: [BLUETOOTH]: Fix HCI USB driver build. D: ChangeSet@1.1532, 2003-12-15 02:39:45-08:00, davem@nuts.ninka.net D: [IEE1394 and BONDING]: Fix typo, spinlock_init() --> spin_lock_init(). D: ChangeSet@1.1533, 2003-12-15 02:44:59-08:00, davem@nuts.ninka.net D: [NET]: Convert dev_base_lock to spinlock. D: ChangeSet@1.1534, 2003-12-15 04:25:16-08:00, davem@nuts.ninka.net D: [IA64]: Convert unwind script rwlock to spinlock. diff -Nru a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c --- a/arch/ppc64/mm/imalloc.c Mon Dec 15 04:26:12 2003 +++ b/arch/ppc64/mm/imalloc.c Mon Dec 15 04:26:12 2003 @@ -15,7 +15,7 @@ #include #include -rwlock_t imlist_lock = RW_LOCK_UNLOCKED; +static spinlock_t imlist_lock = SPIN_LOCK_UNLOCKED; struct vm_struct * imlist = NULL; struct vm_struct *get_im_area(unsigned long size) @@ -27,13 +27,13 @@ if (!area) return NULL; addr = IMALLOC_START; - write_lock(&imlist_lock); + spin_lock(&imlist_lock); for (p = &imlist; (tmp = *p) ; p = &tmp->next) { if (size + addr < (unsigned long) tmp->addr) break; addr = tmp->size + (unsigned long) tmp->addr; if (addr > IMALLOC_END-size) { - write_unlock(&imlist_lock); + spin_unlock(&imlist_lock); kfree(area); return NULL; } @@ -43,7 +43,7 @@ area->size = size; area->next = *p; *p = area; - write_unlock(&imlist_lock); + spin_unlock(&imlist_lock); return area; } @@ -57,16 +57,16 @@ printk(KERN_ERR "Trying to ifree() bad address (%p)\n", addr); return; } - write_lock(&imlist_lock); + spin_lock(&imlist_lock); for (p = &imlist ; (tmp = *p) ; p = &tmp->next) { if (tmp->addr == addr) { *p = tmp->next; kfree(tmp); - write_unlock(&imlist_lock); + spin_unlock(&imlist_lock); return; } } - write_unlock(&imlist_lock); + spin_unlock(&imlist_lock); printk(KERN_ERR "Trying to ifree() nonexistent area (%p)\n", addr); } diff -Nru a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c --- a/drivers/bluetooth/hci_usb.c Mon Dec 15 04:26:16 2003 +++ b/drivers/bluetooth/hci_usb.c Mon Dec 15 04:26:16 2003 @@ -294,7 +294,7 @@ if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) return 0; - write_lock_irqsave(&husb->completion_lock, flags); + spin_lock_irqsave(&husb->completion_lock, flags); err = hci_usb_intr_rx_submit(husb); if (!err) { @@ -309,7 +309,7 @@ clear_bit(HCI_RUNNING, &hdev->flags); } - write_unlock_irqrestore(&husb->completion_lock, flags); + spin_unlock_irqrestore(&husb->completion_lock, flags); return err; } @@ -377,8 +377,8 @@ BT_DBG("%s", hdev->name); /* Synchronize with completion handlers */ - write_lock_irqsave(&husb->completion_lock, flags); - write_unlock_irqrestore(&husb->completion_lock, flags); + spin_lock_irqsave(&husb->completion_lock, flags); + spin_unlock_irqrestore(&husb->completion_lock, flags); hci_usb_unlink_urbs(husb); hci_usb_flush(hdev); @@ -554,6 +554,7 @@ { struct hci_dev *hdev = (struct hci_dev *) skb->dev; struct hci_usb *husb; + unsigned long flags; if (!hdev) { BT_ERR("frame for uknown device (hdev=NULL)"); @@ -587,12 +588,12 @@ return 0; } - read_lock(&husb->completion_lock); + spin_lock_irqsave(&husb->completion_lock, flags); skb_queue_tail(__transmit_q(husb, skb->pkt_type), skb); hci_usb_tx_wakeup(husb); - read_unlock(&husb->completion_lock); + spin_unlock_irqrestore(&husb->completion_lock, flags); return 0; } @@ -678,6 +679,7 @@ struct hci_usb *husb = (void *) urb->context; struct hci_dev *hdev = &husb->hdev; int err, count = urb->actual_length; + unsigned long flags; BT_DBG("%s urb %p type %d status %d count %d flags %x", hdev->name, urb, _urb->type, urb->status, count, urb->transfer_flags); @@ -685,7 +687,7 @@ if (!test_bit(HCI_RUNNING, &hdev->flags)) return; - read_lock(&husb->completion_lock); + spin_lock_irqsave(&husb->completion_lock, flags); if (urb->status || !count) goto resubmit; @@ -722,7 +724,7 @@ BT_DBG("%s urb %p type %d resubmit status %d", hdev->name, urb, _urb->type, err); - read_unlock(&husb->completion_lock); + spin_unlock_irqrestore(&husb->completion_lock, flags); } static void hci_usb_tx_complete(struct urb *urb, struct pt_regs *regs) @@ -747,14 +749,14 @@ else hdev->stat.err_tx++; - read_lock(&husb->completion_lock); + spin_lock_irqsave(&husb->completion_lock, flags); _urb_unlink(_urb); _urb_queue_tail(__completed_q(husb, _urb->type), _urb); hci_usb_tx_wakeup(husb); - read_unlock(&husb->completion_lock); + spin_unlock_irqrestore(&husb->completion_lock, flags); } static void hci_usb_destruct(struct hci_dev *hdev) @@ -885,7 +887,7 @@ } #endif - husb->completion_lock = RW_LOCK_UNLOCKED; + spin_lock_init(&husb->completion_lock); for (i = 0; i < 4; i++) { skb_queue_head_init(&husb->transmit_q[i]); diff -Nru a/drivers/bluetooth/hci_usb.h b/drivers/bluetooth/hci_usb.h --- a/drivers/bluetooth/hci_usb.h Mon Dec 15 04:26:16 2003 +++ b/drivers/bluetooth/hci_usb.h Mon Dec 15 04:26:16 2003 @@ -122,7 +122,7 @@ struct sk_buff_head transmit_q[4]; struct sk_buff *reassembly[4]; // Reassembly buffers - rwlock_t completion_lock; + spinlock_t completion_lock; atomic_t pending_tx[4]; // Number of pending requests struct _urb_queue pending_q[4]; // Pending requests diff -Nru a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c --- a/drivers/ieee1394/highlevel.c Mon Dec 15 04:26:20 2003 +++ b/drivers/ieee1394/highlevel.c Mon Dec 15 04:26:20 2003 @@ -38,10 +38,10 @@ static LIST_HEAD(hl_drivers); -static rwlock_t hl_drivers_lock = RW_LOCK_UNLOCKED; +static spinlock_t hl_drivers_lock = SPIN_LOCK_UNLOCKED; static LIST_HEAD(addr_space); -static rwlock_t addr_space_lock = RW_LOCK_UNLOCKED; +static spinlock_t addr_space_lock = SPIN_LOCK_UNLOCKED; /* addr_space list will have zero and max already included as bounds */ static struct hpsb_address_ops dummy_ops = { NULL, NULL, NULL, NULL }; @@ -53,18 +53,19 @@ { struct hl_host_info *hi = NULL; struct list_head *lh; + unsigned long flags; if (!hl || !host) return NULL; - read_lock(&hl->host_info_lock); + spin_lock_irqsave(&hl->host_info_lock, flags); list_for_each (lh, &hl->host_info_list) { hi = list_entry(lh, struct hl_host_info, list); if (hi->host == host) break; hi = NULL; } - read_unlock(&hl->host_info_lock); + spin_unlock_irqrestore(&hl->host_info_lock, flags); return hi; } @@ -112,9 +113,9 @@ hi->host = host; - write_lock_irqsave(&hl->host_info_lock, flags); + spin_lock_irqsave(&hl->host_info_lock, flags); list_add_tail(&hi->list, &hl->host_info_list); - write_unlock_irqrestore(&hl->host_info_lock, flags); + spin_unlock_irqrestore(&hl->host_info_lock, flags); return data; } @@ -148,9 +149,9 @@ hi = hl_get_hostinfo(hl, host); if (hi) { unsigned long flags; - write_lock_irqsave(&hl->host_info_lock, flags); + spin_lock_irqsave(&hl->host_info_lock, flags); list_del(&hi->list); - write_unlock_irqrestore(&hl->host_info_lock, flags); + spin_unlock_irqrestore(&hl->host_info_lock, flags); kfree(hi); } @@ -187,11 +188,12 @@ struct list_head *lh; struct hl_host_info *hi; void *data = NULL; + unsigned long flags; if (!hl) return NULL; - read_lock(&hl->host_info_lock); + spin_lock_irqsave(&hl->host_info_lock, flags); list_for_each (lh, &hl->host_info_list) { hi = list_entry(lh, struct hl_host_info, list); if (hi->key == key) { @@ -199,7 +201,7 @@ break; } } - read_unlock(&hl->host_info_lock); + spin_unlock_irqrestore(&hl->host_info_lock, flags); return data; } @@ -210,11 +212,12 @@ struct list_head *lh; struct hl_host_info *hi; struct hpsb_host *host = NULL; + unsigned long flags; if (!hl) return NULL; - read_lock(&hl->host_info_lock); + spin_lock_irqsave(&hl->host_info_lock, flags); list_for_each (lh, &hl->host_info_list) { hi = list_entry(lh, struct hl_host_info, list); if (hi->key == key) { @@ -222,7 +225,7 @@ break; } } - read_unlock(&hl->host_info_lock); + spin_unlock_irqrestore(&hl->host_info_lock, flags); return host; } @@ -236,11 +239,11 @@ INIT_LIST_HEAD(&hl->addr_list); INIT_LIST_HEAD(&hl->host_info_list); - rwlock_init(&hl->host_info_lock); + spinlock_init(&hl->host_info_lock); - write_lock_irqsave(&hl_drivers_lock, flags); + spin_lock_irqsave(&hl_drivers_lock, flags); list_add_tail(&hl->hl_list, &hl_drivers); - write_unlock_irqrestore(&hl_drivers_lock, flags); + spin_unlock_irqrestore(&hl_drivers_lock, flags); if (hl->add_host) { down(&hpsb_hosts_lock); @@ -260,17 +263,17 @@ struct hpsb_address_serve *as; unsigned long flags; - write_lock_irqsave(&addr_space_lock, flags); + spin_lock_irqsave(&addr_space_lock, flags); list_for_each_safe (lh, next, &hl->addr_list) { as = list_entry(lh, struct hpsb_address_serve, addr_list); list_del(&as->as_list); kfree(as); } - write_unlock_irqrestore(&addr_space_lock, flags); + spin_unlock_irqrestore(&addr_space_lock, flags); - write_lock_irqsave(&hl_drivers_lock, flags); + spin_lock_irqsave(&hl_drivers_lock, flags); list_del(&hl->hl_list); - write_unlock_irqrestore(&hl_drivers_lock, flags); + spin_unlock_irqrestore(&hl_drivers_lock, flags); if (hl->remove_host) { down(&hpsb_hosts_lock); @@ -309,7 +312,7 @@ as->start = start; as->end = end; - write_lock_irqsave(&addr_space_lock, flags); + spin_lock_irqsave(&addr_space_lock, flags); entry = addr_space.next; while (list_entry(entry, struct hpsb_address_serve, as_list)->end @@ -323,7 +326,7 @@ } entry = entry->next; } - write_unlock_irqrestore(&addr_space_lock, flags); + spin_unlock_irqrestore(&addr_space_lock, flags); if (retval == 0) { kfree(as); @@ -339,7 +342,7 @@ struct list_head *entry; unsigned long flags; - write_lock_irqsave(&addr_space_lock, flags); + spin_lock_irqsave(&addr_space_lock, flags); entry = hl->addr_list.next; @@ -355,7 +358,7 @@ } } - write_unlock_irqrestore(&addr_space_lock, flags); + spin_unlock_irqrestore(&addr_space_lock, flags); return retval; } @@ -393,22 +396,24 @@ { struct list_head *entry; struct hpsb_highlevel *hl; + unsigned long flags; - read_lock(&hl_drivers_lock); + spin_lock_irqsave(&hl_drivers_lock, flags); list_for_each(entry, &hl_drivers) { hl = list_entry(entry, struct hpsb_highlevel, hl_list); if (hl->add_host) hl->add_host(host); } - read_unlock(&hl_drivers_lock); + spin_unlock_irqrestore(&hl_drivers_lock, flags); } void highlevel_remove_host(struct hpsb_host *host) { struct list_head *entry; struct hpsb_highlevel *hl; + unsigned long flags; - read_lock(&hl_drivers_lock); + spin_lock_irqsave(&hl_drivers_lock, flags); list_for_each(entry, &hl_drivers) { hl = list_entry(entry, struct hpsb_highlevel, hl_list); @@ -417,22 +422,23 @@ hpsb_destroy_hostinfo(hl, host); } } - read_unlock(&hl_drivers_lock); + spin_unlock_irqrestore(&hl_drivers_lock, flags); } void highlevel_host_reset(struct hpsb_host *host) { struct list_head *entry; struct hpsb_highlevel *hl; + unsigned long flags; - read_lock(&hl_drivers_lock); + spin_lock_irqsave(&hl_drivers_lock, flags); list_for_each(entry, &hl_drivers) { hl = list_entry(entry, struct hpsb_highlevel, hl_list); if (hl->host_reset) hl->host_reset(host); } - read_unlock(&hl_drivers_lock); + spin_unlock_irqrestore(&hl_drivers_lock, flags); } void highlevel_iso_receive(struct hpsb_host *host, void *data, @@ -441,8 +447,9 @@ struct list_head *entry; struct hpsb_highlevel *hl; int channel = (((quadlet_t *)data)[0] >> 8) & 0x3f; + unsigned long flags; - read_lock(&hl_drivers_lock); + spin_lock_irqsave(&hl_drivers_lock, flags); entry = hl_drivers.next; while (entry != &hl_drivers) { @@ -452,7 +459,7 @@ } entry = entry->next; } - read_unlock(&hl_drivers_lock); + spin_unlock_irqrestore(&hl_drivers_lock, flags); } void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction, @@ -461,8 +468,9 @@ struct list_head *entry; struct hpsb_highlevel *hl; int cts = ((quadlet_t *)data)[0] >> 4; + unsigned long flags; - read_lock(&hl_drivers_lock); + spin_lock_irqsave(&hl_drivers_lock, flags); entry = hl_drivers.next; while (entry != &hl_drivers) { @@ -473,7 +481,7 @@ } entry = entry->next; } - read_unlock(&hl_drivers_lock); + spin_unlock_irqrestore(&hl_drivers_lock, flags); } int highlevel_read(struct hpsb_host *host, int nodeid, void *data, @@ -483,8 +491,9 @@ struct list_head *entry; unsigned int partlength; int rcode = RCODE_ADDRESS_ERROR; + unsigned long flags; - read_lock(&addr_space_lock); + spin_lock_irqsave(&addr_space_lock, flags); entry = addr_space.next; as = list_entry(entry, struct hpsb_address_serve, as_list); @@ -513,7 +522,7 @@ as = list_entry(entry, struct hpsb_address_serve, as_list); } - read_unlock(&addr_space_lock); + spin_unlock_irqrestore(&addr_space_lock, flags); if (length && (rcode == RCODE_COMPLETE)) { rcode = RCODE_ADDRESS_ERROR; @@ -529,8 +538,9 @@ struct list_head *entry; unsigned int partlength; int rcode = RCODE_ADDRESS_ERROR; + unsigned long flags; - read_lock(&addr_space_lock); + spin_lock_irqsave(&addr_space_lock, flags); entry = addr_space.next; as = list_entry(entry, struct hpsb_address_serve, as_list); @@ -559,7 +569,7 @@ as = list_entry(entry, struct hpsb_address_serve, as_list); } - read_unlock(&addr_space_lock); + spin_unlock_irqrestore(&addr_space_lock, flags); if (length && (rcode == RCODE_COMPLETE)) { rcode = RCODE_ADDRESS_ERROR; @@ -575,8 +585,9 @@ struct hpsb_address_serve *as; struct list_head *entry; int rcode = RCODE_ADDRESS_ERROR; + unsigned long flags; - read_lock(&addr_space_lock); + spin_lock_irqsave(&addr_space_lock, flags); entry = addr_space.next; as = list_entry(entry, struct hpsb_address_serve, as_list); @@ -597,7 +608,7 @@ as = list_entry(entry, struct hpsb_address_serve, as_list); } - read_unlock(&addr_space_lock); + spin_unlock_irqrestore(&addr_space_lock, flags); return rcode; } @@ -608,8 +619,9 @@ struct hpsb_address_serve *as; struct list_head *entry; int rcode = RCODE_ADDRESS_ERROR; + unsigned long flags; - read_lock(&addr_space_lock); + spin_lock_irqsave(&addr_space_lock, flags); entry = addr_space.next; as = list_entry(entry, struct hpsb_address_serve, as_list); @@ -631,7 +643,7 @@ as = list_entry(entry, struct hpsb_address_serve, as_list); } - read_unlock(&addr_space_lock); + spin_unlock_irqrestore(&addr_space_lock, flags); return rcode; } diff -Nru a/drivers/ieee1394/highlevel.h b/drivers/ieee1394/highlevel.h --- a/drivers/ieee1394/highlevel.h Mon Dec 15 04:26:19 2003 +++ b/drivers/ieee1394/highlevel.h Mon Dec 15 04:26:19 2003 @@ -60,7 +60,7 @@ struct list_head addr_list; struct list_head host_info_list; - rwlock_t host_info_lock; + spinlock_t host_info_lock; }; struct hpsb_address_ops { diff -Nru a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c --- a/drivers/ieee1394/ieee1394_core.c Mon Dec 15 04:26:19 2003 +++ b/drivers/ieee1394/ieee1394_core.c Mon Dec 15 04:26:19 2003 @@ -1003,7 +1003,7 @@ struct module *module; } ieee1394_chardevs[16]; -static rwlock_t ieee1394_chardevs_lock = RW_LOCK_UNLOCKED; +static spinlock_t ieee1394_chardevs_lock = SPIN_LOCK_UNLOCKED; static int ieee1394_dispatch_open(struct inode *inode, struct file *file); @@ -1022,7 +1022,7 @@ if ( (blocknum < 0) || (blocknum > 15) ) return -EINVAL; - write_lock(&ieee1394_chardevs_lock); + spin_lock(&ieee1394_chardevs_lock); if (ieee1394_chardevs[blocknum].file_ops == NULL) { /* grab the minor block */ @@ -1035,7 +1035,7 @@ retval = -EBUSY; } - write_unlock(&ieee1394_chardevs_lock); + spin_unlock(&ieee1394_chardevs_lock); return retval; } @@ -1046,14 +1046,14 @@ if ( (blocknum < 0) || (blocknum > 15) ) return; - write_lock(&ieee1394_chardevs_lock); + spin_lock(&ieee1394_chardevs_lock); if (ieee1394_chardevs[blocknum].file_ops) { ieee1394_chardevs[blocknum].file_ops = NULL; ieee1394_chardevs[blocknum].module = NULL; } - write_unlock(&ieee1394_chardevs_lock); + spin_unlock(&ieee1394_chardevs_lock); } /* @@ -1076,7 +1076,7 @@ if ((blocknum < 0) || (blocknum > 15)) return ret; - read_lock(&ieee1394_chardevs_lock); + spin_lock(&ieee1394_chardevs_lock); *module = ieee1394_chardevs[blocknum].module; *file_ops = ieee1394_chardevs[blocknum].file_ops; @@ -1091,7 +1091,7 @@ ret = 1; out: - read_unlock(&ieee1394_chardevs_lock); + spin_unlock(&ieee1394_chardevs_lock); return ret; } diff -Nru a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c --- a/drivers/isdn/capi/capi.c Mon Dec 15 04:26:23 2003 +++ b/drivers/isdn/capi/capi.c Mon Dec 15 04:26:23 2003 @@ -137,11 +137,11 @@ /* -------- global variables ---------------------------------------- */ -static rwlock_t capidev_list_lock = RW_LOCK_UNLOCKED; +static spinlock_t capidev_list_lock = SPIN_LOCK_UNLOCKED; static LIST_HEAD(capidev_list); #ifdef CONFIG_ISDN_CAPI_MIDDLEWARE -static rwlock_t capiminor_list_lock = RW_LOCK_UNLOCKED; +static spinlock_t capiminor_list_lock = SPIN_LOCK_UNLOCKED; static LIST_HEAD(capiminor_list); #endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */ @@ -219,7 +219,7 @@ skb_queue_head_init(&mp->inqueue); skb_queue_head_init(&mp->outqueue); - write_lock_irqsave(&capiminor_list_lock, flags); + spin_lock_irqsave(&capiminor_list_lock, flags); list_for_each(l, &capiminor_list) { p = list_entry(l, struct capiminor, list); if (p->minor > minor) { @@ -229,7 +229,7 @@ } minor++; } - write_unlock_irqrestore(&capiminor_list_lock, flags); + spin_unlock_irqrestore(&capiminor_list_lock, flags); if (l == &capiminor_list) { kfree(mp); return NULL; @@ -241,9 +241,9 @@ { unsigned long flags; - write_lock_irqsave(&capiminor_list_lock, flags); + spin_lock_irqsave(&capiminor_list_lock, flags); list_del(&mp->list); - write_unlock_irqrestore(&capiminor_list_lock, flags); + spin_unlock_irqrestore(&capiminor_list_lock, flags); if (mp->ttyskb) kfree_skb(mp->ttyskb); mp->ttyskb = 0; @@ -257,14 +257,15 @@ { struct list_head *l; struct capiminor *p = NULL; + unsigned long flags; - read_lock(&capiminor_list_lock); + spin_lock_irqsave(&capiminor_list_lock, flags); list_for_each(l, &capiminor_list) { p = list_entry(l, struct capiminor, list); if (p->minor == minor) break; } - read_unlock(&capiminor_list_lock); + spin_unlock_irqrestore(&capiminor_list_lock, flags); if (l == &capiminor_list) return NULL; @@ -369,9 +370,9 @@ skb_queue_head_init(&cdev->recvqueue); init_waitqueue_head(&cdev->recvwait); - write_lock_irqsave(&capidev_list_lock, flags); + spin_lock_irqsave(&capidev_list_lock, flags); list_add_tail(&cdev->list, &capidev_list); - write_unlock_irqrestore(&capidev_list_lock, flags); + spin_unlock_irqrestore(&capidev_list_lock, flags); return cdev; } @@ -385,9 +386,9 @@ } skb_queue_purge(&cdev->recvqueue); - write_lock_irqsave(&capidev_list_lock, flags); + spin_lock_irqsave(&capidev_list_lock, flags); list_del(&cdev->list); - write_unlock_irqrestore(&capidev_list_lock, flags); + spin_unlock_irqrestore(&capidev_list_lock, flags); kfree(cdev); } @@ -1349,8 +1350,9 @@ struct capidev *cdev; struct list_head *l; int len = 0; + unsigned long flags; - read_lock(&capidev_list_lock); + spin_lock_irqsave(&capidev_list_lock, flags); list_for_each(l, &capidev_list) { cdev = list_entry(l, struct capidev, list); len += sprintf(page+len, "0 %d %lu %lu %lu %lu\n", @@ -1369,7 +1371,7 @@ } endloop: - read_unlock(&capidev_list_lock); + spin_unlock_irqrestore(&capidev_list_lock, flags); if (len < count) *eof = 1; if (len > count) len = count; @@ -1388,8 +1390,9 @@ struct capincci *np; struct list_head *l; int len = 0; + unsigned long flags; - read_lock(&capidev_list_lock); + spin_lock_irqsave(&capidev_list_lock, flags); list_for_each(l, &capidev_list) { cdev = list_entry(l, struct capidev, list); for (np=cdev->nccis; np; np = np->next) { @@ -1406,7 +1409,7 @@ } } endloop: - read_unlock(&capidev_list_lock); + spin_unlock_irqrestore(&capidev_list_lock, flags); *start = page+off; if (len < count) *eof = 1; diff -Nru a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c --- a/drivers/macintosh/adb.c Mon Dec 15 04:26:27 2003 +++ b/drivers/macintosh/adb.c Mon Dec 15 04:26:27 2003 @@ -110,12 +110,12 @@ * and handler_id fields of adb_handler[i] for all i, and changes to the * handler field. * Accesses to the handler field are protected by the adb_handler_lock - * rwlock. It is held across all calls to any handler, so that by the + * spinlock. It is held across all calls to any handler, so that by the * time adb_unregister returns, we know that the old handler isn't being * called. */ static DECLARE_MUTEX(adb_handler_sem); -static rwlock_t adb_handler_lock = RW_LOCK_UNLOCKED; +static spinlock_t adb_handler_lock = SPIN_LOCK_UNLOCKED; #if 0 static void printADBreply(struct adb_request *req) @@ -400,9 +400,9 @@ } down(&adb_handler_sem); - write_lock_irq(&adb_handler_lock); + spin_lock_irq(&adb_handler_lock); memset(adb_handler, 0, sizeof(adb_handler)); - write_unlock_irq(&adb_handler_lock); + spin_unlock_irq(&adb_handler_lock); /* That one is still a bit synchronous, oh well... */ if (adb_controller->reset_bus) @@ -532,9 +532,9 @@ default_id); continue; } - write_lock_irq(&adb_handler_lock); + spin_lock_irq(&adb_handler_lock); adb_handler[i].handler = handler; - write_unlock_irq(&adb_handler_lock); + spin_unlock_irq(&adb_handler_lock); ids->id[ids->nids++] = i; } } @@ -548,17 +548,17 @@ int ret = -ENODEV; down(&adb_handler_sem); - write_lock_irq(&adb_handler_lock); + spin_lock_irq(&adb_handler_lock); if (adb_handler[index].handler) { while(adb_handler[index].busy) { - write_unlock_irq(&adb_handler_lock); + spin_unlock_irq(&adb_handler_lock); yield(); - write_lock_irq(&adb_handler_lock); + spin_lock_irq(&adb_handler_lock); } ret = 0; adb_handler[index].handler = 0; } - write_unlock_irq(&adb_handler_lock); + spin_unlock_irq(&adb_handler_lock); up(&adb_handler_sem); return ret; } @@ -585,11 +585,11 @@ printk(" %x", buf[i]); printk(", id = %d\n", id); } - write_lock_irqsave(&adb_handler_lock, flags); + spin_lock_irqsave(&adb_handler_lock, flags); handler = adb_handler[id].handler; if (handler != NULL) adb_handler[id].busy = 1; - write_unlock_irqrestore(&adb_handler_lock, flags); + spin_unlock_irqrestore(&adb_handler_lock, flags); if (handler != NULL) { (*handler)(buf, nb, regs, autopoll); wmb(); diff -Nru a/drivers/media/video/zr36120.c b/drivers/media/video/zr36120.c --- a/drivers/media/video/zr36120.c Mon Dec 15 04:26:31 2003 +++ b/drivers/media/video/zr36120.c Mon Dec 15 04:26:31 2003 @@ -186,6 +186,8 @@ static void reap_states(struct zoran* ztv) { + unsigned long flags; + /* count frames */ ztv->fieldnr++; @@ -222,11 +224,11 @@ } /* item completed, skip to next item in queue */ - write_lock(&ztv->lock); + spin_lock_irqsave(&ztv->lock, flags); newitem = ztv->workqueue->next; ztv->workqueue->next = 0; /* mark completed */ ztv->workqueue = newitem; - write_unlock(&ztv->lock); + spin_unlock_irqrestore(&ztv->lock, flags); } /* @@ -236,14 +238,15 @@ if (ztv->workqueue) { struct vidinfo* newitem; + again: DEBUG(printk(CARD_DEBUG "starting %s at %p\n",CARD,ztv->workqueue->kindof==FBUFFER_GRAB?"grab":"read",ztv->workqueue)); /* loadup the frame settings */ - read_lock(&ztv->lock); + spin_lock_irqsave(&ztv->lock, flags); zoran_set_geo(ztv,ztv->workqueue); - read_unlock(&ztv->lock); + spin_unlock_irqrestore(&ztv->lock, flags); switch (ztv->workqueue->kindof) { case FBUFFER_GRAB: @@ -257,11 +260,11 @@ break; default: printk(CARD_INFO "what is this doing on the queue? (kindof=%d)\n",CARD,ztv->workqueue->kindof); - write_lock(&ztv->lock); + spin_lock_irqsave(&ztv->lock, flags); newitem = ztv->workqueue->next; ztv->workqueue->next = 0; ztv->workqueue = newitem; - write_unlock(&ztv->lock); + spin_unlock_irqrestore(&ztv->lock, flags); if (newitem) goto again; /* yeah, sure.. */ } @@ -284,9 +287,9 @@ { DEBUG(printk(CARD_DEBUG "starting overlay\n",CARD)); - read_lock(&ztv->lock); + spin_lock_irqsave(&ztv->lock, flags); zoran_set_geo(ztv,&ztv->overinfo); - read_unlock(&ztv->lock); + spin_unlock_irqrestore(&ztv->lock, flags); zror(ZORAN_OCR_OVLEN, ZORAN_OCR); zrand(~ZORAN_VSTR_SNAPSHOT,ZORAN_VSTR); @@ -307,7 +310,7 @@ struct vidinfo* lastitem; /* protect the workqueue */ - write_lock(&ztv->lock); + spin_lock_irqsave(&ztv->lock, flags); lastitem = ztv->workqueue; if (lastitem) while (lastitem->next) lastitem = lastitem->next; @@ -322,7 +325,7 @@ lastitem->next = item; lastitem = item; } - write_unlock(&ztv->lock); + spin_unlock_irqrestore(&ztv->lock, flags); if (ztv->workqueue) goto again; /* hey, _i_ graduated :) */ } @@ -853,7 +856,7 @@ for (;;) { struct vidinfo* item; - write_lock_irq(&ztv->lock); + spin_lock_irq(&ztv->lock); for (item=ztv->grabinfo; item!=ztv->grabinfo+ZORAN_MAX_FBUFFERS; item++) { if (!unused && item->status == FBUFFER_FREE) @@ -865,7 +868,7 @@ break; /* no more free buffers, wait for them. */ - write_unlock_irq(&ztv->lock); + spin_unlock_irq(&ztv->lock); if (nonblock) return -EWOULDBLOCK; interruptible_sleep_on(&ztv->grabq); @@ -877,7 +880,7 @@ if (!done) { /* no? than this will take a while... */ if (nonblock) { - write_unlock_irq(&ztv->lock); + spin_unlock_irq(&ztv->lock); return -EWOULDBLOCK; } @@ -897,7 +900,7 @@ oldframe->next = unused; } } - write_unlock_irq(&ztv->lock); + spin_unlock_irq(&ztv->lock); /* tell the state machine we want it filled /NOW/ */ zoran_cap(ztv, 1); @@ -912,7 +915,7 @@ done = unused; } else - write_unlock_irq(&ztv->lock); + spin_unlock_irq(&ztv->lock); /* Yes! we got data! */ max = done->bpl * done->h; @@ -1154,10 +1157,10 @@ palette2fmt[p.palette].bpp != ztv->overinfo.bpp) return -EINVAL; - write_lock_irq(&ztv->lock); + spin_lock_irq(&ztv->lock); ztv->overinfo.format = p.palette; ztv->picture = p; - write_unlock_irq(&ztv->lock); + spin_unlock_irq(&ztv->lock); /* tell the decoder */ i2c_control_device(&ztv->i2c, I2C_DRIVERID_VIDEODECODER, DECODER_SET_PICTURE, &p); @@ -1168,7 +1171,7 @@ { struct video_window vw; DEBUG(printk(CARD_DEBUG "VIDIOCGWIN\n",CARD)); - read_lock(&ztv->lock); + spin_lock_irq(&ztv->lock); vw.x = ztv->overinfo.x; vw.y = ztv->overinfo.y; vw.width = ztv->overinfo.w; @@ -1177,7 +1180,7 @@ vw.flags = 0; if (ztv->vidInterlace) vw.flags|=VIDEO_WINDOW_INTERLACE; - read_unlock(&ztv->lock); + spin_unlock_irq(&ztv->lock); if (copy_to_user(arg,&vw,sizeof(vw))) return -EFAULT; break; @@ -1222,12 +1225,12 @@ vw.y = ztv->overinfo.y; /* by now we are committed to the new data... */ - write_lock_irq(&ztv->lock); + spin_lock_irq(&ztv->lock); ztv->overinfo.x = vw.x; ztv->overinfo.y = vw.y; ztv->overinfo.w = vw.width; ztv->overinfo.h = vw.height; - write_unlock_irq(&ztv->lock); + spin_unlock_irq(&ztv->lock); /* * Impose display clips @@ -1274,13 +1277,13 @@ { struct video_buffer v; DEBUG(printk(CARD_DEBUG "VIDIOCGFBUF\n",CARD)); - read_lock(&ztv->lock); + spin_lock_irq(&ztv->lock); v.base = (void *)ztv->overinfo.busadr; v.height = ztv->sheight; v.width = ztv->swidth; v.depth = ztv->depth; v.bytesperline = ztv->overinfo.bpl; - read_unlock(&ztv->lock); + spin_unlock_irq(&ztv->lock); if(copy_to_user(arg, &v,sizeof(v))) return -EFAULT; break; @@ -1300,14 +1303,14 @@ return -EINVAL; if (ztv->running) return -EBUSY; - write_lock_irq(&ztv->lock); + spin_lock_irq(&ztv->lock); ztv->overinfo.busadr = (ulong)v.base; ztv->sheight = v.height; ztv->swidth = v.width; ztv->depth = v.depth; /* bits per pixel */ ztv->overinfo.bpp = ((v.depth+1)&0x38)/8;/* bytes per pixel */ ztv->overinfo.bpl = v.bytesperline; /* bytes per line */ - write_unlock_irq(&ztv->lock); + spin_unlock_irq(&ztv->lock); break; } @@ -1368,7 +1371,7 @@ return -EBUSY; /* setup the other parameters if they are given */ - write_lock_irq(&ztv->lock); + spin_lock_irq(&ztv->lock); frame->w = vm.width; frame->h = vm.height; frame->format = vm.format; @@ -1384,7 +1387,7 @@ oldframe->next = frame; } } - write_unlock_irq(&ztv->lock); + spin_unlock_irq(&ztv->lock); zoran_cap(ztv, 1); break; } @@ -1601,7 +1604,7 @@ for (;;) { struct vidinfo* item; - write_lock_irq(&ztv->lock); + spin_lock_irq(&ztv->lock); for (item=ztv->readinfo; item!=ztv->readinfo+ZORAN_VBI_BUFFERS; item++) { if (!unused && item->status == FBUFFER_FREE) unused = item; @@ -1612,7 +1615,7 @@ break; /* no more free buffers, wait for them. */ - write_unlock_irq(&ztv->lock); + spin_unlock_irq(&ztv->lock); if (nonblock) return -EWOULDBLOCK; interruptible_sleep_on(&ztv->vbiq); @@ -1624,7 +1627,7 @@ if (!done) { /* no? than this will take a while... */ if (nonblock) { - write_unlock_irq(&ztv->lock); + spin_unlock_irq(&ztv->lock); return -EWOULDBLOCK; } @@ -1639,7 +1642,7 @@ oldframe->next = unused; } } - write_unlock_irq(&ztv->lock); + spin_unlock_irq(&ztv->lock); /* tell the state machine we want it filled /NOW/ */ zoran_cap(ztv, 1); @@ -1654,7 +1657,7 @@ done = unused; } else - write_unlock_irq(&ztv->lock); + spin_unlock_irq(&ztv->lock); /* Yes! we got data! */ max = done->bpl * -done->h; @@ -1803,14 +1806,14 @@ f.flags != VBI_INTERLACED) return -EINVAL; - write_lock_irq(&ztv->lock); + spin_lock_irq(&ztv->lock); ztv->readinfo[0].y = f.start[0]; ztv->readinfo[0].w = -f.samples_per_line; ztv->readinfo[0].h = -f.count[0]; ztv->readinfo[0].bpl = f.samples_per_line*ztv->readinfo[0].bpp; for (i=1; ireadinfo[i] = ztv->readinfo[i]; - write_unlock_irq(&ztv->lock); + spin_unlock_irq(&ztv->lock); break; } default: @@ -1955,7 +1958,7 @@ ztv->tuner_type = 0; ztv->running = 0; ztv->users = 0; - ztv->lock = RW_LOCK_UNLOCKED; + spin_lock_init(&ztv->lock); ztv->workqueue = 0; ztv->fieldnr = 0; ztv->lastfieldnr = 0; diff -Nru a/drivers/media/video/zr36120.h b/drivers/media/video/zr36120.h --- a/drivers/media/video/zr36120.h Mon Dec 15 04:26:31 2003 +++ b/drivers/media/video/zr36120.h Mon Dec 15 04:26:31 2003 @@ -129,7 +129,7 @@ int users; /* howmany video/vbi open? */ int tuner_type; /* tuner type, when found */ int running; /* are we rolling? */ - rwlock_t lock; + spinlock_t lock; long state; /* what is requested of us? */ #define STATE_OVERLAY 0 #define STATE_VBI 1 diff -Nru a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h --- a/drivers/message/fusion/mptbase.h Mon Dec 15 04:26:35 2003 +++ b/drivers/message/fusion/mptbase.h Mon Dec 15 04:26:35 2003 @@ -372,7 +372,6 @@ struct _VirtDevice *forw; struct _VirtDevice *back; struct scsi_device *device; - rwlock_t VdevLock; int ref_cnt; u8 tflags; u8 ioc_id; @@ -439,7 +438,6 @@ typedef struct _VirtDevTracker { struct _VirtDevice *head; struct _VirtDevice *tail; - rwlock_t VlistLock; int pad; } VirtDevTracker; diff -Nru a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c --- a/drivers/message/fusion/mptlan.c Mon Dec 15 04:26:35 2003 +++ b/drivers/message/fusion/mptlan.c Mon Dec 15 04:26:35 2003 @@ -180,7 +180,7 @@ #ifdef QLOGIC_NAA_WORKAROUND static struct NAA_Hosed *mpt_bad_naa = NULL; -rwlock_t bad_naa_lock; +static spinlock_t bad_naa_lock = SPIN_LOCK_INIT; #endif /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -809,7 +809,7 @@ /* Munge the NAA for Tx packets to QLogic boards, which don't follow RFC 2625. The longer I look at this, the more my opinion of Qlogic drops. */ - read_lock_irq(&bad_naa_lock); + spin_lock_irq(&bad_naa_lock); for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) { if ((nh->ieee[0] == skb->mac.raw[0]) && (nh->ieee[1] == skb->mac.raw[1]) && @@ -823,7 +823,7 @@ break; } } - read_unlock_irq(&bad_naa_lock); + spin_unlock_irq(&bad_naa_lock); } #endif @@ -1451,12 +1451,6 @@ show_mptmod_ver(LANAME, LANVER); -#ifdef QLOGIC_NAA_WORKAROUND - /* Init the global r/w lock for the bad_naa list. We want to do this - before any boards are initialized and may be used. */ - rwlock_init(&bad_naa_lock); -#endif - if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) { printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n"); return -EBUSY; @@ -1620,7 +1614,7 @@ "system with non-RFC 2625 NAA value (%04x).\n", source_naa)); - write_lock_irq(&bad_naa_lock); + spin_lock_irq(&bad_naa_lock); for (prevnh = nh = mpt_bad_naa; nh != NULL; prevnh=nh, nh=nh->next) { if ((nh->ieee[0] == fch->saddr[0]) && @@ -1666,7 +1660,7 @@ " set, but nh isn't null. Evil " "funkiness abounds.\n"); } - write_unlock_irq(&bad_naa_lock); + spin_unlock_irq(&bad_naa_lock); } } #endif diff -Nru a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c --- a/drivers/message/fusion/mptscsih.c Mon Dec 15 04:26:35 2003 +++ b/drivers/message/fusion/mptscsih.c Mon Dec 15 04:26:35 2003 @@ -3243,7 +3243,6 @@ return ENOMEM; } else { memset(vdev, 0, sizeof(VirtDevice)); - rwlock_init(&vdev->VdevLock); Q_INIT(&vdev->WaitQ, void); Q_INIT(&vdev->SentQ, void); Q_INIT(&vdev->DoneQ, void); diff -Nru a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c --- a/drivers/net/bonding/bond_3ad.c Mon Dec 15 04:26:39 2003 +++ b/drivers/net/bonding/bond_3ad.c Mon Dec 15 04:26:39 2003 @@ -2121,7 +2121,7 @@ struct port *port; struct aggregator *aggregator; - read_lock(&bond->lock); + spin_lock_bh(&bond->lock); //check if there are any slaves if (bond->next == (struct slave *)bond) { @@ -2166,7 +2166,7 @@ } end: - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); if ((bond->device->flags & IFF_UP) == IFF_UP) { @@ -2375,21 +2375,21 @@ return 0; } - read_lock(&bond->lock); + spin_lock_bh(&bond->lock); slave = bond->prev; /* check if bond is empty */ if ((slave == (struct slave *) bond) || (bond->slave_cnt == 0)) { printk(KERN_DEBUG "ERROR: bond is empty\n"); dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } if (bond_3ad_get_active_agg_info(bond, &ad_info)) { printk(KERN_DEBUG "ERROR: bond_3ad_get_active_agg_info failed\n"); dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -2400,7 +2400,7 @@ /*the aggregator is empty*/ printk(KERN_DEBUG "ERROR: active aggregator is empty\n"); dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -2408,7 +2408,7 @@ if ((slave == NULL) || (slave->dev == NULL)) { /* no suitable interface, frame not sent */ dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -2427,7 +2427,7 @@ if (slave == NULL) { printk(KERN_ERR "bonding: Error: slave is NULL\n"); dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } } @@ -2435,7 +2435,7 @@ if (slave == (slave_t *)bond) { printk(KERN_ERR "bonding: Error: Couldn't find a slave to tx on for aggregator ID %d\n", agg_id); dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -2448,7 +2448,7 @@ if (slave == NULL) { printk(KERN_ERR "bonding: Error: slave is NULL\n"); dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -2463,14 +2463,14 @@ skb->dev = slave->dev; skb->priority = 1; dev_queue_xmit(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } } while ((slave = slave->next) != start_at); /* no suitable interface, frame not sent */ dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -2484,7 +2484,7 @@ goto out; } - read_lock(&bond->lock); + spin_lock_bh(&bond->lock); slave = bond_get_slave_by_dev((struct bonding *)dev->priv, skb->real_dev); if (slave == NULL) { @@ -2496,7 +2496,7 @@ ret = NET_RX_SUCCESS; out_unlock: - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); out: dev_kfree_skb(skb); diff -Nru a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c --- a/drivers/net/bonding/bond_alb.c Mon Dec 15 04:26:39 2003 +++ b/drivers/net/bonding/bond_alb.c Mon Dec 15 04:26:39 2003 @@ -171,7 +171,7 @@ slave_info->head = TLB_NULL_INDEX; } -/* Caller must hold bond lock for read */ +/* Caller must hold bond lock */ static inline void tlb_clear_slave(struct bonding *bond, struct slave *slave, u8 save_load) { @@ -252,7 +252,7 @@ _unlock_tx_hashtbl(bond); } -/* Caller must hold bond lock for read */ +/* Caller must hold bond lock */ static struct slave* tlb_get_least_loaded_slave(struct bonding *bond) { @@ -294,7 +294,7 @@ return least_loaded; } -/* Caller must hold bond lock for read */ +/* Caller must hold bond lock */ struct slave* tlb_choose_channel(struct bonding *bond, u32 hash_index, u32 skb_len) { @@ -426,7 +426,7 @@ return ret; } -/* Caller must hold bond lock for read */ +/* Caller must hold bond lock */ static struct slave* rlb_next_rx_slave(struct bonding *bond) { @@ -464,7 +464,7 @@ /* teach the switch the mac of a disabled slave * on the primary for fault tolerance * - * Caller must hold bond->ptrlock for write or bond lock for write + * Caller must hold bond->ptrlock or bond lock */ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[]) @@ -483,7 +483,7 @@ /* slave being removed should not be active at this point * - * Caller must hold bond lock for read + * Caller must hold bond lock */ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) @@ -533,11 +533,11 @@ _unlock_rx_hashtbl(bond); - write_lock(&bond->ptrlock); + spin_lock_bh(&bond->ptrlock); if (slave != bond->current_slave) { rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr); } - write_unlock(&bond->ptrlock); + spin_unlock_bh(&bond->ptrlock); } static void @@ -673,7 +673,7 @@ _unlock_rx_hashtbl(bond); } -/* Caller must hold both bond and ptr locks for read */ +/* Caller must hold both bond and ptr locks */ struct slave* rlb_choose_channel(struct bonding *bond, struct arp_pkt *arp) { @@ -803,7 +803,7 @@ return tx_slave; } -/* Caller must hold bond lock for read */ +/* Caller must hold bond lock */ static void rlb_rebalance(struct bonding *bond) { @@ -979,7 +979,7 @@ return 0; } -/* Caller must hold bond lock for write or ptrlock for write*/ +/* Caller must hold bond lock or ptrlock */ static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, @@ -1095,7 +1095,7 @@ * assumption: this function is called before @slave is attached to the * bond slave list. * - * caller must hold the bond lock for write since the mac addresses are compared + * caller must hold the bond lock since the mac addresses are compared * and may be swapped. */ static int @@ -1296,16 +1296,16 @@ /* make sure that the current_slave and the slaves list do * not change during tx */ - read_lock(&bond->lock); + spin_lock_bh(&bond->lock); if (bond->slave_cnt == 0) { /* no suitable interface, frame not sent */ dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } - read_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); switch (ntohs(skb->protocol)) { case ETH_P_IP: @@ -1389,8 +1389,8 @@ dev_kfree_skb(skb); } - read_unlock(&bond->ptrlock); - read_unlock(&bond->lock); + spin_unlock(&bond->ptrlock); + spin_unlock_bh(&bond->lock); return 0; } @@ -1400,7 +1400,7 @@ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct slave *slave = NULL; - read_lock(&bond->lock); + spin_lock_bh(&bond->lock); if ((bond->slave_cnt == 0) || !(bond->device->flags & IFF_UP)) { bond_info->tx_rebalance_counter = 0; @@ -1418,20 +1418,20 @@ * sending the learning packets, the ptrlock must be held for * read. */ - read_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); slave = bond_get_first_slave(bond); while (slave) { alb_send_learning_packets(slave,slave->dev->dev_addr); slave = bond_get_next_slave(bond, slave); } - read_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); bond_info->lp_counter = 0; } /* rebalance tx traffic */ if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) { - read_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); slave = bond_get_first_slave(bond); while (slave) { tlb_clear_slave(bond, slave, 1); @@ -1443,7 +1443,7 @@ } slave = bond_get_next_slave(bond, slave); } - read_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); bond_info->tx_rebalance_counter = 0; } @@ -1454,7 +1454,7 @@ * write lock to protect from other code that also * sets the promiscuity. */ - write_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); if (bond_info->primary_is_promisc && (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) { @@ -1468,7 +1468,7 @@ dev_set_promiscuity(bond->current_slave->dev, -1); bond_info->primary_is_promisc = 0; } - write_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); if (bond_info->rlb_rebalance == 1) { bond_info->rlb_rebalance = 0; @@ -1491,7 +1491,7 @@ } out: - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); if (bond->device->flags & IFF_UP) { /* re-arm the timer */ @@ -1514,14 +1514,14 @@ return err; } - /* caller must hold the bond lock for write since the mac addresses + /* caller must hold the bond lock since the mac addresses * are compared and may be swapped. */ - write_lock_bh(&bond->lock); + spin_lock_bh(&bond->lock); err = alb_handle_addr_collision_on_attach(bond, slave); - write_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); if (err) { return err; @@ -1539,7 +1539,7 @@ return 0; } -/* Caller must hold bond lock for write */ +/* Caller must hold bond lock */ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave) { @@ -1555,7 +1555,7 @@ } } -/* Caller must hold bond lock for read */ +/* Caller must hold bond lock */ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link) @@ -1589,7 +1589,7 @@ * Set the bond->current_slave to @new_slave and handle * mac address swapping and promiscuity changes as needed. * - * Caller must hold bond ptrlock for write (or bond lock for write) + * Caller must hold bond ptrlock (or bond lock) */ void bond_alb_assign_current_slave(struct bonding *bond, struct slave *new_slave) diff -Nru a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c --- a/drivers/net/bonding/bond_main.c Mon Dec 15 04:26:39 2003 +++ b/drivers/net/bonding/bond_main.c Mon Dec 15 04:26:39 2003 @@ -937,11 +937,11 @@ { int has_active_interface = 0; - read_lock_bh(&bond->lock); - read_lock(&bond->ptrlock); + spin_lock_bh(&bond->lock); + spin_lock(&bond->ptrlock); has_active_interface = (bond->current_slave != NULL); - read_unlock(&bond->ptrlock); - read_unlock_bh(&bond->lock); + spin_unlock(&bond->ptrlock); + spin_unlock_bh(&bond->lock); return (has_active_interface ? BMSR_LSTATUS : 0); } @@ -1029,7 +1029,7 @@ { bonding_t *bond = (struct bonding *) master->priv; - write_lock_bh(&bond->lock); + spin_lock_bh(&bond->lock); if (miimon > 0) { /* link check interval, in milliseconds. */ del_timer(&bond->mii_timer); @@ -1051,7 +1051,7 @@ bond_mc_list_destroy (bond); - write_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); /* Release the bonded slaves */ bond_release_all(master); @@ -1238,7 +1238,7 @@ bonding_t *bond = master->priv; struct dev_mc_list *dmi; - write_lock_bh(&bond->lock); + spin_lock_bh(&bond->lock); /* * Do promisc before checking multicast_mode @@ -1251,7 +1251,7 @@ if (multicast_mode == BOND_MULTICAST_DISABLED) { bond->flags = master->flags; - write_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); return; } @@ -1281,7 +1281,7 @@ bond_mc_list_destroy (bond); bond_mc_list_copy (master->mc_list, bond, GFP_ATOMIC); - write_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); } /* @@ -1510,7 +1510,7 @@ dev_mc_add(slave_dev, lacpdu_multicast, ETH_ALEN, 0); } - write_lock_bh(&bond->lock); + spin_lock_bh(&bond->lock); bond_attach_slave(bond, new_slave); new_slave->delay = 0; @@ -1680,7 +1680,7 @@ bond->current_slave = new_slave; } - write_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); if (app_abi_ver < 1) { /* @@ -1776,7 +1776,7 @@ } bond = (struct bonding *) master_dev->priv; - write_lock_bh(&bond->lock); + spin_lock_bh(&bond->lock); slave = (slave_t *)bond; oldactive = bond->current_slave; @@ -1791,7 +1791,7 @@ * Changing to the current active: do nothing; return success. */ if (newactive && (newactive == oldactive)) { - write_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -1803,7 +1803,7 @@ } else { ret = -EINVAL; } - write_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); return ret; } @@ -1990,7 +1990,7 @@ return -EINVAL; } - write_lock_bh(&bond->lock); + spin_lock_bh(&bond->lock); bond->current_arp_slave = NULL; our_slave = (slave_t *)bond; old_current = bond->current_slave; @@ -2060,7 +2060,7 @@ } } - write_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); if (our_slave == (slave_t *)bond) { /* if we get here, it's because the device was not found */ @@ -2143,7 +2143,7 @@ bond = (struct bonding *) master->priv; - write_lock_bh(&bond->lock); + spin_lock_bh(&bond->lock); if (bond->next == (struct slave *) bond) { err = -EINVAL; goto out; @@ -2177,7 +2177,7 @@ * all the undo steps that should not be called from * within a lock. */ - write_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); /* unset promiscuity level from slave */ if (master->flags & IFF_PROMISC) { @@ -2217,7 +2217,7 @@ kfree(our_slave); /* re-acquire the lock before getting the next slave */ - write_lock_bh(&bond->lock); + spin_lock_bh(&bond->lock); } /* zero the mac address of the master so it will be @@ -2229,7 +2229,7 @@ printk (KERN_INFO "%s: released all slaves\n", master->name); out: - write_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); return err; } @@ -2242,7 +2242,7 @@ int slave_died = 0; int do_failover = 0; - read_lock(&bond->lock); + spin_lock_bh(&bond->lock); /* we will try to read the link status of each of our slaves, and * set their IFF_RUNNING flag appropriately. For each slave not @@ -2252,9 +2252,9 @@ slave = (slave_t *)bond; - read_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); oldcurrent = bond->current_slave; - read_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); while ((slave = slave->prev) != (slave_t *)bond) { struct net_device *dev = slave->dev; @@ -2436,7 +2436,7 @@ } /* end of while */ if (do_failover) { - write_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); reselect_active_interface(bond); if (oldcurrent && !bond->current_slave) { @@ -2445,10 +2445,10 @@ master->name); } - write_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); } - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); /* re-arm the timer */ mod_timer(&bond->mii_timer, jiffies + (miimon * HZ / 1000)); } @@ -2488,11 +2488,11 @@ return; } - read_lock(&bond->lock); + spin_lock_bh(&bond->lock); - read_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); oldcurrent = bond->current_slave; - read_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); /* see if any of the previous devices are up now (i.e. they have * xmt and rcv traffic). the current_slave does not come into @@ -2574,7 +2574,7 @@ } if (do_failover) { - write_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); reselect_active_interface(bond); if (oldcurrent && !bond->current_slave) { @@ -2583,10 +2583,10 @@ master->name); } - write_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); } - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); rtnl_exunlock(); rtnl_shunlock(); @@ -2627,7 +2627,7 @@ return; } - read_lock(&bond->lock); + spin_lock_bh(&bond->lock); /* determine if any slave has come up or any backup slave has * gone down @@ -2642,7 +2642,7 @@ the_delta_in_ticks) { slave->link = BOND_LINK_UP; - write_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); if ((bond->current_slave == NULL) && ((jiffies - slave->dev->trans_start) <= the_delta_in_ticks)) { @@ -2673,10 +2673,10 @@ slave->dev->name); } - write_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); } } else { - read_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); if ((slave != bond->current_slave) && (bond->current_arp_slave == NULL) && (((jiffies - slave->dev->last_rx) >= @@ -2691,7 +2691,7 @@ * down - this gives each slave a chance to * tx/rx traffic before being taken out */ - read_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); slave->link = BOND_LINK_DOWN; if (slave->link_failure_count < UINT_MAX) { slave->link_failure_count++; @@ -2702,14 +2702,14 @@ master->name, slave->dev->name); } else { - read_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); } } } - read_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); slave = bond->current_slave; - read_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); if (slave != NULL) { @@ -2735,10 +2735,10 @@ "active interface %s, disabling it", master->name, slave->dev->name); - write_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); reselect_active_interface(bond); slave = bond->current_slave; - write_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); bond->current_arp_slave = slave; if (slave != NULL) { slave->jiffies = jiffies; @@ -2756,9 +2756,9 @@ bond->primary_slave->dev->name); /* primary is up so switch to it */ - write_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); change_active_interface(bond, bond->primary_slave); - write_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); slave = bond->primary_slave; slave->jiffies = jiffies; } else { @@ -2825,7 +2825,7 @@ } } - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); mod_timer(&bond->arp_timer, next_timer); } @@ -2849,11 +2849,11 @@ info->num_slaves = 0; info->miimon = miimon; - read_lock_bh(&bond->lock); + spin_lock_bh(&bond->lock); for (slave = bond->prev; slave != (slave_t *)bond; slave = slave->prev) { info->num_slaves++; } - read_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -2869,13 +2869,13 @@ return -ENODEV; } - read_lock_bh(&bond->lock); + spin_lock_bh(&bond->lock); for (slave = bond->prev; slave != (slave_t *)bond && cur_ndx < info->slave_id; slave = slave->prev) { cur_ndx++; } - read_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); if (slave != (slave_t *)bond) { strcpy(info->slave_name, slave->dev->name); @@ -3098,15 +3098,15 @@ return 0; } - read_lock(&bond->lock); + spin_lock_bh(&bond->lock); - read_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); slave = start_at = bond->current_slave; - read_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); if (slave == NULL) { /* we're at the root, get the first slave */ /* no suitable interface, frame not sent */ - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); dev_kfree_skb(skb); return 0; } @@ -3138,7 +3138,7 @@ dev_kfree_skb(skb); /* frame sent to all suitable interfaces */ - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -3152,16 +3152,16 @@ return 0; } - read_lock(&bond->lock); + spin_lock_bh(&bond->lock); - read_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); slave = start_at = bond->current_slave; - read_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); if (slave == NULL) { /* we're at the root, get the first slave */ /* no suitable interface, frame not sent */ dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -3174,18 +3174,18 @@ skb->priority = 1; dev_queue_xmit(skb); - write_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); bond->current_slave = slave->next; - write_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } } while ((slave = slave->next) != start_at); /* no suitable interface, frame not sent */ dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -3206,14 +3206,14 @@ return 0; } - read_lock(&bond->lock); + spin_lock_bh(&bond->lock); slave = bond->prev; /* we're at the root, get the first slave */ if (bond->slave_cnt == 0) { /* no suitable interface, frame not sent */ dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -3234,14 +3234,14 @@ skb->priority = 1; dev_queue_xmit(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } } while ((slave = slave->next) != start_at); /* no suitable interface, frame not sent */ dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -3292,19 +3292,19 @@ } } - read_lock(&bond->lock); + spin_lock_bh(&bond->lock); - read_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); if (bond->current_slave != NULL) { /* one usable interface */ skb->dev = bond->current_slave->dev; - read_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); skb->priority = 1; ret = dev_queue_xmit(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } else { - read_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); } /* no suitable interface, frame not sent */ @@ -3312,7 +3312,7 @@ printk(KERN_INFO "There was no suitable interface, so we don't transmit\n"); #endif dev_kfree_skb(skb); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->lock); return 0; } @@ -3324,7 +3324,7 @@ memset(stats, 0, sizeof(struct net_device_stats)); - read_lock_bh(&bond->lock); + spin_lock_bh(&bond->lock); for (slave = bond->prev; slave != (slave_t *)bond; slave = slave->prev) { sstats = slave->dev->get_stats(slave->dev); @@ -3357,7 +3357,7 @@ } - read_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); return stats; } @@ -3373,7 +3373,7 @@ /* make sure the bond won't be taken away */ read_lock(&dev_base_lock); - read_lock_bh(&bond->lock); + spin_lock_bh(&bond->lock); if (*pos == 0) { return SEQ_START_TOKEN; @@ -3409,7 +3409,7 @@ { struct bonding *bond = seq->private; - read_unlock_bh(&bond->lock); + spin_unlock_bh(&bond->lock); read_unlock(&dev_base_lock); } @@ -3417,9 +3417,9 @@ { struct slave *curr; - read_lock(&bond->ptrlock); + spin_lock(&bond->ptrlock); curr = bond->current_slave; - read_unlock(&bond->ptrlock); + spin_unlock(&bond->ptrlock); seq_printf(seq, "Bonding Mode: %s\n", bond_mode_name()); @@ -3933,8 +3933,8 @@ bond = dev->priv; /* initialize rwlocks */ - rwlock_init(&bond->lock); - rwlock_init(&bond->ptrlock); + spinlock_init(&bond->lock); + spinlock_init(&bond->ptrlock); /* Initialize pointers */ bond->next = bond->prev = (slave_t *)bond; diff -Nru a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h --- a/drivers/net/bonding/bonding.h Mon Dec 15 04:26:39 2003 +++ b/drivers/net/bonding/bonding.h Mon Dec 15 04:26:39 2003 @@ -95,8 +95,8 @@ slave_t *primary_slave; slave_t *current_arp_slave; __s32 slave_cnt; - rwlock_t lock; - rwlock_t ptrlock; + spinlock_t lock; + spinlock_t ptrlock; struct timer_list mii_timer; struct timer_list arp_timer; struct net_device_stats stats; @@ -119,7 +119,7 @@ /** * These functions can be used for iterating the slave list * (which is circular) - * Caller must hold bond lock for read + * Caller must hold bond lock */ extern inline struct slave* bond_get_first_slave(struct bonding *bond) @@ -132,7 +132,7 @@ } /** - * Caller must hold bond lock for read + * Caller must hold bond lock */ extern inline struct slave* bond_get_next_slave(struct bonding *bond, struct slave *slave) @@ -147,7 +147,7 @@ /** * Returns NULL if the net_device does not belong to any of the bond's slaves * - * Caller must hold bond lock for read + * Caller must hold bond lock */ extern inline struct slave* bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev) diff -Nru a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c --- a/drivers/bluetooth/hci_usb.c Mon Dec 15 04:26:43 2003 +++ b/drivers/bluetooth/hci_usb.c Mon Dec 15 04:26:43 2003 @@ -732,6 +732,7 @@ struct _urb *_urb = container_of(urb, struct _urb, urb); struct hci_usb *husb = (void *) urb->context; struct hci_dev *hdev = &husb->hdev; + unsigned long flags; BT_DBG("%s urb %p status %d flags %x", hdev->name, urb, urb->status, urb->transfer_flags); diff -Nru a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c --- a/drivers/ieee1394/highlevel.c Mon Dec 15 04:26:47 2003 +++ b/drivers/ieee1394/highlevel.c Mon Dec 15 04:26:47 2003 @@ -239,7 +239,7 @@ INIT_LIST_HEAD(&hl->addr_list); INIT_LIST_HEAD(&hl->host_info_list); - spinlock_init(&hl->host_info_lock); + spin_lock_init(&hl->host_info_lock); spin_lock_irqsave(&hl_drivers_lock, flags); list_add_tail(&hl->hl_list, &hl_drivers); @@ -491,9 +491,9 @@ struct list_head *entry; unsigned int partlength; int rcode = RCODE_ADDRESS_ERROR; - unsigned long flags; + unsigned long cpu_flags; - spin_lock_irqsave(&addr_space_lock, flags); + spin_lock_irqsave(&addr_space_lock, cpu_flags); entry = addr_space.next; as = list_entry(entry, struct hpsb_address_serve, as_list); @@ -522,7 +522,7 @@ as = list_entry(entry, struct hpsb_address_serve, as_list); } - spin_unlock_irqrestore(&addr_space_lock, flags); + spin_unlock_irqrestore(&addr_space_lock, cpu_flags); if (length && (rcode == RCODE_COMPLETE)) { rcode = RCODE_ADDRESS_ERROR; @@ -538,9 +538,9 @@ struct list_head *entry; unsigned int partlength; int rcode = RCODE_ADDRESS_ERROR; - unsigned long flags; + unsigned long cpu_flags; - spin_lock_irqsave(&addr_space_lock, flags); + spin_lock_irqsave(&addr_space_lock, cpu_flags); entry = addr_space.next; as = list_entry(entry, struct hpsb_address_serve, as_list); @@ -569,7 +569,7 @@ as = list_entry(entry, struct hpsb_address_serve, as_list); } - spin_unlock_irqrestore(&addr_space_lock, flags); + spin_unlock_irqrestore(&addr_space_lock, cpu_flags); if (length && (rcode == RCODE_COMPLETE)) { rcode = RCODE_ADDRESS_ERROR; @@ -585,9 +585,9 @@ struct hpsb_address_serve *as; struct list_head *entry; int rcode = RCODE_ADDRESS_ERROR; - unsigned long flags; + unsigned long cpu_flags; - spin_lock_irqsave(&addr_space_lock, flags); + spin_lock_irqsave(&addr_space_lock, cpu_flags); entry = addr_space.next; as = list_entry(entry, struct hpsb_address_serve, as_list); @@ -608,7 +608,7 @@ as = list_entry(entry, struct hpsb_address_serve, as_list); } - spin_unlock_irqrestore(&addr_space_lock, flags); + spin_unlock_irqrestore(&addr_space_lock, cpu_flags); return rcode; } @@ -619,9 +619,9 @@ struct hpsb_address_serve *as; struct list_head *entry; int rcode = RCODE_ADDRESS_ERROR; - unsigned long flags; + unsigned long cpu_flags; - spin_lock_irqsave(&addr_space_lock, flags); + spin_lock_irqsave(&addr_space_lock, cpu_flags); entry = addr_space.next; as = list_entry(entry, struct hpsb_address_serve, as_list); @@ -643,7 +643,7 @@ as = list_entry(entry, struct hpsb_address_serve, as_list); } - spin_unlock_irqrestore(&addr_space_lock, flags); + spin_unlock_irqrestore(&addr_space_lock, cpu_flags); return rcode; } diff -Nru a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c --- a/drivers/net/bonding/bond_main.c Mon Dec 15 04:26:47 2003 +++ b/drivers/net/bonding/bond_main.c Mon Dec 15 04:26:47 2003 @@ -3933,8 +3933,8 @@ bond = dev->priv; /* initialize rwlocks */ - spinlock_init(&bond->lock); - spinlock_init(&bond->ptrlock); + spin_lock_init(&bond->lock); + spin_lock_init(&bond->ptrlock); /* Initialize pointers */ bond->next = bond->prev = (slave_t *)bond; diff -Nru a/arch/mips/kernel/ioctl32.c b/arch/mips/kernel/ioctl32.c --- a/arch/mips/kernel/ioctl32.c Mon Dec 15 04:26:51 2003 +++ b/arch/mips/kernel/ioctl32.c Mon Dec 15 04:26:51 2003 @@ -358,15 +358,15 @@ if (copy_from_user(&ifr32, uir32, sizeof(struct ifreq32))) return -EFAULT; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); dev = __dev_get_by_index(ifr32.ifr_ifindex); if (!dev) { - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return -ENODEV; } strcpy(ifr32.ifr_name, dev->name); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); if (copy_to_user(uir32, &ifr32, sizeof(struct ifreq32))) return -EFAULT; diff -Nru a/arch/s390/kernel/compat_ioctl.c b/arch/s390/kernel/compat_ioctl.c --- a/arch/s390/kernel/compat_ioctl.c Mon Dec 15 04:26:51 2003 +++ b/arch/s390/kernel/compat_ioctl.c Mon Dec 15 04:26:51 2003 @@ -151,15 +151,15 @@ if (copy_from_user(&ifr32, uir32, sizeof(struct ifreq32))) return -EFAULT; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); dev = __dev_get_by_index(ifr32.ifr_ifindex); if (!dev) { - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return -ENODEV; } strcpy(ifr32.ifr_name, dev->name); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); if (copy_to_user(uir32, &ifr32, sizeof(struct ifreq32))) return -EFAULT; diff -Nru a/arch/sparc64/solaris/ioctl.c b/arch/sparc64/solaris/ioctl.c --- a/arch/sparc64/solaris/ioctl.c Mon Dec 15 04:26:51 2003 +++ b/arch/sparc64/solaris/ioctl.c Mon Dec 15 04:26:51 2003 @@ -678,9 +678,9 @@ struct net_device *d; int i = 0; - read_lock_bh(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (d = dev_base; d; d = d->next) i++; - read_unlock_bh(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); if (put_user (i, (int *)A(arg))) return -EFAULT; diff -Nru a/drivers/net/Space.c b/drivers/net/Space.c --- a/drivers/net/Space.c Mon Dec 15 04:26:51 2003 +++ b/drivers/net/Space.c Mon Dec 15 04:26:51 2003 @@ -460,14 +460,14 @@ * The @dev_base list is protected by @dev_base_lock and the rtln * semaphore. * - * Pure readers hold dev_base_lock for reading. + * Pure readers hold dev_base_lock. * * Writers must hold the rtnl semaphore while they loop through the - * dev_base list, and hold dev_base_lock for writing when they do the + * dev_base list, and hold dev_base_lock when they do the * actual updates. This allows pure readers to access the list even * while a writer is preparing to update it. * - * To put it another way, dev_base_lock is held for writing only to + * To put it another way, dev_base_lock is held only to * protect against pure readers; the rtnl semaphore provides the * protection against other writers. * @@ -476,7 +476,7 @@ * semaphore held. */ struct net_device *dev_base; -rwlock_t dev_base_lock = RW_LOCK_UNLOCKED; +spinlock_t dev_base_lock = SPIN_LOCK_UNLOCKED; EXPORT_SYMBOL(dev_base); EXPORT_SYMBOL(dev_base_lock); diff -Nru a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c --- a/drivers/net/bonding/bond_main.c Mon Dec 15 04:26:51 2003 +++ b/drivers/net/bonding/bond_main.c Mon Dec 15 04:26:51 2003 @@ -3372,8 +3372,8 @@ struct slave *slave; /* make sure the bond won't be taken away */ - read_lock(&dev_base_lock); - spin_lock_bh(&bond->lock); + spin_lock_bh(&dev_base_lock); + spin_lock(&bond->lock); if (*pos == 0) { return SEQ_START_TOKEN; @@ -3409,8 +3409,8 @@ { struct bonding *bond = seq->private; - spin_unlock_bh(&bond->lock); - read_unlock(&dev_base_lock); + spin_unlock(&bond->lock); + spin_unlock_bh(&dev_base_lock); } static void bond_info_show_master(struct seq_file *seq, struct bonding *bond) diff -Nru a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c --- a/drivers/net/wireless/strip.c Mon Dec 15 04:26:51 2003 +++ b/drivers/net/wireless/strip.c Mon Dec 15 04:26:51 2003 @@ -1976,7 +1976,7 @@ && memcmp(&strip_info->true_dev_addr, zero_address.c, sizeof(zero_address))) { struct net_device *dev; - read_lock_bh(&dev_base_lock); + spin_lock_bh(&dev_base_lock); dev = dev_base; while (dev) { if (dev->type == strip_info->dev->type && @@ -1986,12 +1986,12 @@ printk(KERN_INFO "%s: Transferred packet ownership to %s.\n", strip_info->dev->name, dev->name); - read_unlock_bh(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return (dev); } dev = dev->next; } - read_unlock_bh(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); } return (strip_info->dev); } diff -Nru a/drivers/parisc/led.c b/drivers/parisc/led.c --- a/drivers/parisc/led.c Mon Dec 15 04:26:51 2003 +++ b/drivers/parisc/led.c Mon Dec 15 04:26:51 2003 @@ -363,8 +363,8 @@ rx_total = tx_total = 0; /* we are running as tasklet, so locking dev_base - * for reading should be OK */ - read_lock(&dev_base_lock); + * should be OK */ + spin_lock_bh(&dev_base_lock); for (dev = dev_base; dev; dev = dev->next) { struct net_device_stats *stats; struct in_device *in_dev = __in_dev_get(dev); @@ -378,7 +378,7 @@ rx_total += stats->rx_packets; tx_total += stats->tx_packets; } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); retval = 0; diff -Nru a/include/linux/netdevice.h b/include/linux/netdevice.h --- a/include/linux/netdevice.h Mon Dec 15 04:26:51 2003 +++ b/include/linux/netdevice.h Mon Dec 15 04:26:51 2003 @@ -492,7 +492,7 @@ extern struct net_device loopback_dev; /* The loopback */ extern struct net_device *dev_base; /* All devices */ -extern rwlock_t dev_base_lock; /* Device list lock */ +extern spinlock_t dev_base_lock; /* Device list lock */ extern int netdev_boot_setup_add(char *name, struct ifmap *map); extern int netdev_boot_setup_check(struct net_device *dev); diff -Nru a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c --- a/net/8021q/vlanproc.c Mon Dec 15 04:26:51 2003 +++ b/net/8021q/vlanproc.c Mon Dec 15 04:26:51 2003 @@ -253,7 +253,7 @@ struct net_device *dev; loff_t i = 1; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); if (*pos == 0) return SEQ_START_TOKEN; @@ -275,7 +275,7 @@ static void vlan_seq_stop(struct seq_file *seq, void *v) { - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); } static int vlan_seq_show(struct seq_file *seq, void *v) diff -Nru a/net/core/dev.c b/net/core/dev.c --- a/net/core/dev.c Mon Dec 15 04:26:51 2003 +++ b/net/core/dev.c Mon Dec 15 04:26:51 2003 @@ -442,11 +442,11 @@ { struct net_device *dev; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); dev = __dev_get_by_name(name); if (dev) dev_hold(dev); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return dev; } @@ -473,9 +473,9 @@ { struct net_device *dev; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); dev = __dev_get_by_name(name); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return dev != NULL; } @@ -515,11 +515,11 @@ { struct net_device *dev; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); dev = __dev_get_by_index(ifindex); if (dev) dev_hold(dev); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return dev; } @@ -591,11 +591,11 @@ { struct net_device *dev; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); dev = __dev_get_by_flags(if_flags, mask); if (dev) dev_hold(dev); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return dev; } @@ -723,9 +723,9 @@ { struct net_device *dev; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); dev = __dev_get_by_name(name); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); if (!dev && capable(CAP_SYS_MODULE)) request_module("%s", name); @@ -860,10 +860,10 @@ if (dev) { dev_do_clear_fastroute(dev); } else { - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev = dev_base; dev; dev = dev->next) dev_do_clear_fastroute(dev); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); } } #endif @@ -1766,15 +1766,15 @@ if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); dev = __dev_get_by_index(ifr.ifr_ifindex); if (!dev) { - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return -ENODEV; } strcpy(ifr.ifr_name, dev->name); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) return -EFAULT; @@ -1855,7 +1855,7 @@ void *dev_seq_start(struct seq_file *seq, loff_t *pos) { - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN; } @@ -1867,7 +1867,7 @@ void dev_seq_stop(struct seq_file *seq, void *v) { - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); } static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) @@ -2453,9 +2453,9 @@ case SIOCGIFINDEX: case SIOCGIFTXQLEN: dev_load(ifr.ifr_name); - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); ret = dev_ifsioc(&ifr, cmd); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); if (!ret) { if (colon) *colon = ':'; @@ -2707,11 +2707,11 @@ dev->next = NULL; dev_init_scheduler(dev); - write_lock_bh(&dev_base_lock); + spin_lock_bh(&dev_base_lock); *dp = dev; dev_hold(dev); dev->reg_state = NETREG_REGISTERING; - write_unlock_bh(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); /* Notify protocols, that a new device appeared. */ notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev); @@ -2929,9 +2929,9 @@ /* And unlink it from device chain. */ for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) { if (d == dev) { - write_lock_bh(&dev_base_lock); + spin_lock_bh(&dev_base_lock); *dp = d->next; - write_unlock_bh(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); break; } } diff -Nru a/net/core/dev_mcast.c b/net/core/dev_mcast.c --- a/net/core/dev_mcast.c Mon Dec 15 04:26:51 2003 +++ b/net/core/dev_mcast.c Mon Dec 15 04:26:51 2003 @@ -227,9 +227,9 @@ int len = 0; struct net_device *dev; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev = dev_base; dev; dev = dev->next) { - spin_lock_bh(&dev->xmit_lock); + spin_lock(&dev->xmit_lock); for (m = dev->mc_list; m; m = m->next) { int i; @@ -247,16 +247,16 @@ begin = pos; } if (pos > offset + length) { - spin_unlock_bh(&dev->xmit_lock); + spin_unlock(&dev->xmit_lock); goto done; } } - spin_unlock_bh(&dev->xmit_lock); + spin_unlock(&dev->xmit_lock); } *eof = 1; done: - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); *start = buffer + (offset - begin); len -= (offset - begin); if (len > length) diff -Nru a/net/core/net-sysfs.c b/net/core/net-sysfs.c --- a/net/core/net-sysfs.c Mon Dec 15 04:26:51 2003 +++ b/net/core/net-sysfs.c Mon Dec 15 04:26:51 2003 @@ -36,10 +36,10 @@ struct net_device *net = to_net_dev(cd); ssize_t ret = -EINVAL; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); if (dev_isalive(net)) ret = (*format)(net, buf); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return ret; } @@ -111,10 +111,10 @@ struct net_device *net = to_net_dev(dev); ssize_t ret = -EINVAL; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); if (dev_isalive(net)) ret = format_addr(buf, net->dev_addr, net->addr_len); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return ret; } @@ -201,13 +201,13 @@ offset % sizeof(unsigned long) != 0) WARN_ON(1); - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); if (dev_isalive(dev) && dev->get_stats && (stats = (*dev->get_stats)(dev))) ret = sprintf(buf, fmt_ulong, *(unsigned long *)(((u8 *) stats) + offset)); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return ret; } @@ -287,11 +287,11 @@ const struct iw_statistics *iw; ssize_t ret = -EINVAL; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); if (dev_isalive(dev) && dev->get_wireless_stats && (iw = dev->get_wireless_stats(dev)) != NULL) ret = (*format)(iw, buf); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return ret; } diff -Nru a/net/core/rtnetlink.c b/net/core/rtnetlink.c --- a/net/core/rtnetlink.c Mon Dec 15 04:26:51 2003 +++ b/net/core/rtnetlink.c Mon Dec 15 04:26:51 2003 @@ -220,14 +220,14 @@ int s_idx = cb->args[0]; struct net_device *dev; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) { if (idx < s_idx) continue; if (rtnetlink_fill_ifinfo(skb, dev, RTM_NEWLINK, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 0) <= 0) break; } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); cb->args[0] = idx; return skb->len; diff -Nru a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c --- a/net/decnet/af_decnet.c Mon Dec 15 04:26:51 2003 +++ b/net/decnet/af_decnet.c Mon Dec 15 04:26:51 2003 @@ -738,14 +738,14 @@ if (!(saddr->sdn_flags & SDF_WILD)) { if (dn_ntohs(saddr->sdn_nodeaddrl)) { - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for(dev = dev_base; dev; dev = dev->next) { if (!dev->dn_ptr) continue; if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) break; } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); if (dev == NULL) return -EADDRNOTAVAIL; } diff -Nru a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c --- a/net/decnet/dn_dev.c Mon Dec 15 04:26:51 2003 +++ b/net/decnet/dn_dev.c Mon Dec 15 04:26:51 2003 @@ -771,7 +771,7 @@ s_idx = cb->args[0]; s_dn_idx = dn_idx = cb->args[1]; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for(dev = dev_base, idx = 0; dev; dev = dev->next, idx++) { if (idx < s_idx) continue; @@ -792,7 +792,7 @@ } } done: - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); cb->args[0] = idx; cb->args[1] = dn_idx; @@ -832,9 +832,9 @@ dev = dn_dev_get_default(); last_chance: if (dev) { - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); rv = dn_dev_get_first(dev, addr); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); dev_put(dev); if (rv == 0 || dev == &loopback_dev) return rv; @@ -1359,10 +1359,10 @@ { if (*pos) { struct net_device *dev; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); dev = dn_dev_get_idx(seq, *pos - 1); if (dev == NULL) - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return dev; } return SEQ_START_TOKEN; @@ -1378,7 +1378,7 @@ } else { dev = dn_dev_get_next(seq, dev); if (dev == NULL) - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); } ++*pos; return dev; @@ -1387,7 +1387,7 @@ static void dn_dev_seq_stop(struct seq_file *seq, void *v) { if (v && v != SEQ_START_TOKEN) - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); } static char *dn_type2asc(char type) diff -Nru a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c --- a/net/decnet/dn_fib.c Mon Dec 15 04:26:51 2003 +++ b/net/decnet/dn_fib.c Mon Dec 15 04:26:51 2003 @@ -644,7 +644,7 @@ ASSERT_RTNL(); /* Scan device list */ - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for(dev = dev_base; dev; dev = dev->next) { dn_db = dev->dn_ptr; if (dn_db == NULL) @@ -656,7 +656,7 @@ } } } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); if (found_it == 0) { fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa); diff -Nru a/net/decnet/dn_route.c b/net/decnet/dn_route.c --- a/net/decnet/dn_route.c Mon Dec 15 04:26:51 2003 +++ b/net/decnet/dn_route.c Mon Dec 15 04:26:51 2003 @@ -848,7 +848,7 @@ int best_match = 0; int ret; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { if (ifa->ifa_scope > scope) continue; @@ -862,7 +862,7 @@ if (best_match == 0) saddr = ifa->ifa_local; } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return saddr; } @@ -928,14 +928,14 @@ dev_put(dev_out); goto out; } - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { if (!dev_out->dn_ptr) continue; if (dn_dev_islocal(dev_out, oldflp->fld_src)) break; } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); if (dev_out == NULL) goto out; dev_hold(dev_out); diff -Nru a/net/ipv4/devinet.c b/net/ipv4/devinet.c --- a/net/ipv4/devinet.c Mon Dec 15 04:26:51 2003 +++ b/net/ipv4/devinet.c Mon Dec 15 04:26:51 2003 @@ -356,11 +356,11 @@ { struct net_device *dev; struct in_device *in_dev = NULL; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); dev = __dev_get_by_index(ifindex); if (dev) in_dev = in_dev_get(dev); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return in_dev; } @@ -782,7 +782,7 @@ in this case. It is importnat that lo is the first interface in dev_base list. */ - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); read_lock(&inetdev_lock); for (dev = dev_base; dev; dev = dev->next) { if ((in_dev = __in_dev_get(dev)) == NULL) @@ -801,7 +801,7 @@ } out_unlock_both: read_unlock(&inetdev_lock); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); out: return addr; out_unlock_inetdev: @@ -966,7 +966,7 @@ int s_ip_idx, s_idx = cb->args[0]; s_ip_idx = ip_idx = cb->args[1]; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) { if (idx < s_idx) continue; @@ -995,7 +995,7 @@ } done: - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); cb->args[0] = idx; cb->args[1] = ip_idx; @@ -1042,7 +1042,7 @@ ipv4_devconf.accept_redirects = !on; ipv4_devconf_dflt.forwarding = on; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev = dev_base; dev; dev = dev->next) { struct in_device *in_dev; read_lock(&inetdev_lock); @@ -1051,7 +1051,7 @@ in_dev->cnf.forwarding = on; read_unlock(&inetdev_lock); } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); rt_cache_flush(0); } diff -Nru a/net/ipv4/igmp.c b/net/ipv4/igmp.c --- a/net/ipv4/igmp.c Mon Dec 15 04:26:51 2003 +++ b/net/ipv4/igmp.c Mon Dec 15 04:26:51 2003 @@ -2166,7 +2166,7 @@ static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos) { - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } @@ -2190,7 +2190,7 @@ state->in_dev = NULL; } state->dev = NULL; - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); } static int igmp_mc_seq_show(struct seq_file *seq, void *v) @@ -2341,7 +2341,7 @@ static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos) { - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } @@ -2369,7 +2369,7 @@ state->idev = NULL; } state->dev = NULL; - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); } static int igmp_mcf_seq_show(struct seq_file *seq, void *v) diff -Nru a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c --- a/net/ipv6/addrconf.c Mon Dec 15 04:26:51 2003 +++ b/net/ipv6/addrconf.c Mon Dec 15 04:26:51 2003 @@ -435,7 +435,7 @@ return; } - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev=dev_base; dev; dev=dev->next) { read_lock(&addrconf_lock); idev = __in6_dev_get(dev); @@ -445,7 +445,7 @@ } read_unlock(&addrconf_lock); } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); } @@ -802,7 +802,7 @@ * dev == NULL or search failed for specified dev */ - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); read_lock(&addrconf_lock); for (dev = dev_base; dev; dev=dev->next) { idev = __in6_dev_get(dev); @@ -838,7 +838,7 @@ out_unlock_base: read_unlock(&addrconf_lock); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); out: err = -EADDRNOTAVAIL; @@ -2564,7 +2564,7 @@ s_idx = cb->args[0]; s_ip_idx = ip_idx = cb->args[1]; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) { if (idx < s_idx) @@ -2625,7 +2625,7 @@ read_unlock_bh(&idev->lock); in6_dev_put(idev); } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); cb->args[0] = idx; cb->args[1] = ip_idx; return skb->len; @@ -2733,7 +2733,7 @@ struct net_device *dev; struct inet6_dev *idev; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) { if (idx < s_idx) continue; @@ -2745,7 +2745,7 @@ if (err <= 0) break; } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); cb->args[0] = idx; return skb->len; diff -Nru a/net/ipv6/anycast.c b/net/ipv6/anycast.c --- a/net/ipv6/anycast.c Mon Dec 15 04:26:51 2003 +++ b/net/ipv6/anycast.c Mon Dec 15 04:26:51 2003 @@ -431,11 +431,11 @@ { if (dev) return ipv6_chk_acast_dev(dev, addr); - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev=dev_base; dev; dev=dev->next) if (ipv6_chk_acast_dev(dev, addr)) break; - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return dev != 0; } @@ -506,7 +506,7 @@ static void *ac6_seq_start(struct seq_file *seq, loff_t *pos) { - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); return ac6_get_idx(seq, *pos); } @@ -525,7 +525,7 @@ read_unlock_bh(&state->idev->lock); in6_dev_put(state->idev); } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); } static int ac6_seq_show(struct seq_file *seq, void *v) diff -Nru a/net/ipv6/mcast.c b/net/ipv6/mcast.c --- a/net/ipv6/mcast.c Mon Dec 15 04:26:51 2003 +++ b/net/ipv6/mcast.c Mon Dec 15 04:26:51 2003 @@ -2137,7 +2137,7 @@ static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos) { - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); return igmp6_mc_get_idx(seq, *pos); } @@ -2158,7 +2158,7 @@ state->idev = NULL; } state->dev = NULL; - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); } static int igmp6_mc_seq_show(struct seq_file *seq, void *v) @@ -2297,7 +2297,7 @@ static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos) { - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } @@ -2325,7 +2325,7 @@ state->idev = NULL; } state->dev = NULL; - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); } static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) diff -Nru a/net/netrom/nr_route.c b/net/netrom/nr_route.c --- a/net/netrom/nr_route.c Mon Dec 15 04:26:51 2003 +++ b/net/netrom/nr_route.c Mon Dec 15 04:26:51 2003 @@ -593,7 +593,7 @@ { struct net_device *dev, *first = NULL; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev = dev_base; dev != NULL; dev = dev->next) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM) if (first == NULL || strncmp(dev->name, first->name, 3) < 0) @@ -601,7 +601,7 @@ } if (first) dev_hold(first); - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return first; } @@ -613,7 +613,7 @@ { struct net_device *dev; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev = dev_base; dev != NULL; dev = dev->next) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) { dev_hold(dev); @@ -621,7 +621,7 @@ } } out: - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return dev; } diff -Nru a/net/rose/rose_route.c b/net/rose/rose_route.c --- a/net/rose/rose_route.c Mon Dec 15 04:26:51 2003 +++ b/net/rose/rose_route.c Mon Dec 15 04:26:51 2003 @@ -609,13 +609,13 @@ { struct net_device *dev, *first = NULL; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev = dev_base; dev != NULL; dev = dev->next) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE) if (first == NULL || strncmp(dev->name, first->name, 3) < 0) first = dev; } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return first; } @@ -627,7 +627,7 @@ { struct net_device *dev; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev = dev_base; dev != NULL; dev = dev->next) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) { dev_hold(dev); @@ -635,7 +635,7 @@ } } out: - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return dev; } @@ -643,13 +643,13 @@ { struct net_device *dev; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev = dev_base; dev != NULL; dev = dev->next) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) goto out; } out: - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); return dev != NULL; } diff -Nru a/net/sched/sch_api.c b/net/sched/sch_api.c --- a/net/sched/sch_api.c Mon Dec 15 04:26:51 2003 +++ b/net/sched/sch_api.c Mon Dec 15 04:26:51 2003 @@ -802,7 +802,7 @@ s_idx = cb->args[0]; s_q_idx = q_idx = cb->args[1]; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) { if (idx < s_idx) continue; @@ -823,7 +823,7 @@ } done: - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); cb->args[0] = idx; cb->args[1] = q_idx; diff -Nru a/net/sctp/protocol.c b/net/sctp/protocol.c --- a/net/sctp/protocol.c Mon Dec 15 04:26:51 2003 +++ b/net/sctp/protocol.c Mon Dec 15 04:26:51 2003 @@ -176,14 +176,14 @@ struct list_head *pos; struct sctp_af *af; - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev = dev_base; dev; dev = dev->next) { __list_for_each(pos, &sctp_address_families) { af = list_entry(pos, struct sctp_af, list); af->copy_addrlist(&sctp_local_addr_list, dev); } } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); } static void sctp_get_local_addr_list(void) diff -Nru a/net/x25/af_x25.c b/net/x25/af_x25.c --- a/net/x25/af_x25.c Mon Dec 15 04:26:51 2003 +++ b/net/x25/af_x25.c Mon Dec 15 04:26:51 2003 @@ -1390,7 +1390,7 @@ /* * Register any pre existing devices. */ - read_lock(&dev_base_lock); + spin_lock_bh(&dev_base_lock); for (dev = dev_base; dev; dev = dev->next) { if ((dev->flags & IFF_UP) && (dev->type == ARPHRD_X25 #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) @@ -1399,7 +1399,7 @@ )) x25_link_device_up(dev); } - read_unlock(&dev_base_lock); + spin_unlock_bh(&dev_base_lock); #endif /* MODULE */ return 0; } diff -Nru a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c --- a/arch/ia64/kernel/unwind.c Mon Dec 15 04:26:55 2003 +++ b/arch/ia64/kernel/unwind.c Mon Dec 15 04:26:55 2003 @@ -20,11 +20,11 @@ * SMP conventions: * o updates to the global unwind data (in structure "unw") are serialized * by the unw.lock spinlock - * o each unwind script has its own read-write lock; a thread must acquire - * a read lock before executing a script and must acquire a write lock + * o each unwind script has its own spinlock; a thread must acquire + * the lock before executing a script and must acquire the lock * before modifying a script - * o if both the unw.lock spinlock and a script's read-write lock must be - * acquired, then the read-write lock must be acquired first. + * o if both the unw.lock spinlock and a script's spinlock must be + * acquired, then the script's spinlock must be acquired first. */ #include #include @@ -1180,11 +1180,11 @@ static inline long cache_match (struct unw_script *script, unsigned long ip, unsigned long pr) { - read_lock(&script->lock); + spin_lock(&script->lock); if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0) - /* keep the read lock... */ + /* keep the lock... */ return 1; - read_unlock(&script->lock); + spin_unlock(&script->lock); return 0; } @@ -1228,7 +1228,7 @@ } /* - * On returning, a write lock for the SCRIPT is still being held. + * On returning, the spinlock for the SCRIPT is still being held. */ static inline struct unw_script * script_new (unsigned long ip) @@ -1254,11 +1254,11 @@ /* * We'd deadlock here if we interrupted a thread that is holding a read lock on - * script->lock. Thus, if the write_trylock() fails, we simply bail out. The - * alternative would be to disable interrupts whenever we hold a read-lock, but + * script->lock. Thus, if the spin_trylock() fails, we simply bail out. The + * alternative would be to disable interrupts whenever we hold the lock, but * that seems silly. */ - if (!write_trylock(&script->lock)) + if (!spin_trylock(&script->lock)) return NULL; spin_lock(&unw.lock); @@ -1311,9 +1311,7 @@ script->pr_mask = sr->pr_mask; script->pr_val = sr->pr_val; /* - * We could down-grade our write-lock on script->lock here but - * the rwlock API doesn't offer atomic lock downgrading, so - * we'll just keep the write-lock and release it later when + * We just keep the write-lock and release it later when * we're done using the script. */ } @@ -1780,7 +1778,6 @@ static int find_save_locs (struct unw_frame_info *info) { - int have_write_lock = 0; struct unw_script *scr; if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) { @@ -1800,17 +1797,13 @@ __FUNCTION__, info->ip); return -1; } - have_write_lock = 1; } info->hint = scr->hint; info->prev_script = scr - unw.cache; run_script(scr, info); - if (have_write_lock) - write_unlock(&scr->lock); - else - read_unlock(&scr->lock); + spin_unlock(&scr->lock); return 0; } @@ -2107,14 +2100,14 @@ || tmp->ip < table->start || tmp->ip >= table->end) continue; - write_lock(&tmp->lock); + spin_lock(&tmp->lock); { if (tmp->ip >= table->start && tmp->ip < table->end) { unw.hash[index] = tmp->coll_chain; tmp->ip = 0; } } - write_unlock(&tmp->lock); + spin_unlock(&tmp->lock); } kfree(table); @@ -2208,7 +2201,7 @@ if (i > 0) unw.cache[i].lru_chain = (i - 1); unw.cache[i].coll_chain = -1; - unw.cache[i].lock = RW_LOCK_UNLOCKED; + spin_lock_init(&unw.cache[i].lock); } unw.lru_head = UNW_CACHE_SIZE - 1; unw.lru_tail = 0; diff -Nru a/arch/ia64/kernel/unwind_i.h b/arch/ia64/kernel/unwind_i.h --- a/arch/ia64/kernel/unwind_i.h Mon Dec 15 04:26:55 2003 +++ b/arch/ia64/kernel/unwind_i.h Mon Dec 15 04:26:55 2003 @@ -153,7 +153,7 @@ unsigned long ip; /* ip this script is for */ unsigned long pr_mask; /* mask of predicates script depends on */ unsigned long pr_val; /* predicate values this script is for */ - rwlock_t lock; + spinlock_t lock; unsigned int flags; /* see UNW_FLAG_* in unwind.h */ unsigned short lru_chain; /* used for least-recently-used chain */ unsigned short coll_chain; /* used for hash collisions */