## Automatically generated incremental diff ## From: linux-2.4.21-bk35 ## To: linux-2.4.21-bk36 ## Robot: $Id: make-incremental-diff,v 1.11 2002/02/20 02:59:33 hpa Exp $ diff -urN linux-2.4.21-bk35/Makefile linux-2.4.21-bk36/Makefile --- linux-2.4.21-bk35/Makefile 2003-08-22 02:54:02.000000000 -0700 +++ linux-2.4.21-bk36/Makefile 2003-08-22 02:54:09.000000000 -0700 @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 4 SUBLEVEL = 21 -EXTRAVERSION = -bk35 +EXTRAVERSION = -bk36 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) diff -urN linux-2.4.21-bk35/net/ipv4/igmp.c linux-2.4.21-bk36/net/ipv4/igmp.c --- linux-2.4.21-bk35/net/ipv4/igmp.c 2003-08-22 02:54:07.000000000 -0700 +++ linux-2.4.21-bk36/net/ipv4/igmp.c 2003-08-22 02:54:15.000000000 -0700 @@ -373,7 +373,7 @@ struct net_device *dev = pmc->interface->dev; struct igmpv3_report *pih; struct igmpv3_grec *pgr = 0; - struct ip_sf_list *psf, *psf_next, *psf_prev, *psf_list; + struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; int scount, first, isquery, truncate; if (pmc->multiaddr == IGMP_ALL_HOSTS) @@ -384,9 +384,9 @@ truncate = type == IGMPV3_MODE_IS_EXCLUDE || type == IGMPV3_CHANGE_TO_EXCLUDE; - psf_list = sdeleted ? pmc->tomb : pmc->sources; + psf_list = sdeleted ? &pmc->tomb : &pmc->sources; - if (!psf_list) { + if (!*psf_list) { if (type == IGMPV3_ALLOW_NEW_SOURCES || type == IGMPV3_BLOCK_OLD_SOURCES) return skb; @@ -417,7 +417,7 @@ first = 1; scount = 0; psf_prev = 0; - for (psf=psf_list; psf; psf=psf_next) { + for (psf=*psf_list; psf; psf=psf_next) { u32 *psrc; psf_next = psf->sf_next; @@ -457,7 +457,7 @@ if (psf_prev) psf_prev->sf_next = psf->sf_next; else - pmc->tomb = psf->sf_next; + *psf_list = psf->sf_next; kfree(psf); continue; } diff -urN linux-2.4.21-bk35/net/ipv6/mcast.c linux-2.4.21-bk36/net/ipv6/mcast.c --- linux-2.4.21-bk35/net/ipv6/mcast.c 2003-08-22 02:54:07.000000000 -0700 +++ linux-2.4.21-bk36/net/ipv6/mcast.c 2003-08-22 02:54:15.000000000 -0700 @@ -659,10 +659,10 @@ if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) dev_mc_delete(dev, buf, dev->addr_len, 0); } - spin_unlock_bh(&mc->mca_lock); if (mc->mca_flags & MAF_NOREPORT) goto done; + spin_unlock_bh(&mc->mca_lock); if (dev->flags&IFF_UP) igmp6_leave_group(mc); @@ -670,10 +670,9 @@ spin_lock_bh(&mc->mca_lock); if (del_timer(&mc->mca_timer)) atomic_dec(&mc->mca_refcnt); - spin_unlock_bh(&mc->mca_lock); - done: ip6_mc_clear_src(mc); + spin_unlock_bh(&mc->mca_lock); } /* @@ -854,15 +853,10 @@ /* * device multicast group del */ -int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr) +static int __ipv6_dev_mc_dec(struct net_device *dev, struct inet6_dev *idev, struct in6_addr *addr) { - struct inet6_dev *idev; struct ifmcaddr6 *ma, **map; - idev = in6_dev_get(dev); - if (idev == NULL) - return -ENODEV; - write_lock_bh(&idev->lock); for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) { if (ipv6_addr_cmp(&ma->mca_addr, addr) == 0) { @@ -873,20 +867,32 @@ igmp6_group_dropped(ma); ma_put(ma); - in6_dev_put(idev); return 0; } write_unlock_bh(&idev->lock); - in6_dev_put(idev); return 0; } } write_unlock_bh(&idev->lock); - in6_dev_put(idev); return -ENOENT; } +int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr) +{ + struct inet6_dev *idev = in6_dev_get(dev); + int err; + + if (!idev) + return -ENODEV; + + err = __ipv6_dev_mc_dec(dev, idev, addr); + + in6_dev_put(idev); + + return err; +} + /* * check if the interface/address pair is valid */ @@ -1305,7 +1311,7 @@ struct net_device *dev = pmc->idev->dev; struct mld2_report *pmr; struct mld2_grec *pgr = 0; - struct ip6_sf_list *psf, *psf_next, *psf_prev, *psf_list; + struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; int scount, first, isquery, truncate; if (pmc->mca_flags & MAF_NOREPORT) @@ -1316,9 +1322,9 @@ truncate = type == MLD2_MODE_IS_EXCLUDE || type == MLD2_CHANGE_TO_EXCLUDE; - psf_list = sdeleted ? pmc->mca_tomb : pmc->mca_sources; + psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources; - if (!psf_list) { + if (!*psf_list) { if (type == MLD2_ALLOW_NEW_SOURCES || type == MLD2_BLOCK_OLD_SOURCES) return skb; @@ -1349,7 +1355,7 @@ first = 1; scount = 0; psf_prev = 0; - for (psf=psf_list; psf; psf=psf_next) { + for (psf=*psf_list; psf; psf=psf_next) { struct in6_addr *psrc; psf_next = psf->sf_next; @@ -1389,7 +1395,7 @@ if (psf_prev) psf_prev->sf_next = psf->sf_next; else - pmc->mca_tomb = psf->sf_next; + *psf_list = psf->sf_next; kfree(psf); continue; } @@ -1659,11 +1665,11 @@ return -ESRCH; } spin_lock_bh(&pmc->mca_lock); - read_unlock_bh(&idev->lock); sf_markstate(pmc); if (!delta) { if (!pmc->mca_sfcount[sfmode]) { spin_unlock_bh(&pmc->mca_lock); + read_unlock_bh(&idev->lock); return -EINVAL; } pmc->mca_sfcount[sfmode]--; @@ -1691,6 +1697,7 @@ } else if (sf_setstate(pmc) || changerec) mld_ifc_event(pmc->idev); spin_unlock_bh(&pmc->mca_lock); + read_unlock_bh(&idev->lock); return err; } @@ -1782,7 +1789,6 @@ return -ESRCH; } spin_lock_bh(&pmc->mca_lock); - read_unlock_bh(&idev->lock); sf_markstate(pmc); isexclude = pmc->mca_sfmode == MCAST_EXCLUDE; @@ -1819,6 +1825,7 @@ } else if (sf_setstate(pmc)) mld_ifc_event(idev); spin_unlock_bh(&pmc->mca_lock); + read_unlock_bh(&idev->lock); return err; } @@ -2015,7 +2022,12 @@ /* Delete all-nodes address. */ ipv6_addr_all_nodes(&maddr); - ipv6_dev_mc_dec(idev->dev, &maddr); + + /* We cannot call ipv6_dev_mc_dec() directly, our caller in + * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will + * fail. + */ + __ipv6_dev_mc_dec(idev->dev, idev, &maddr); write_lock_bh(&idev->lock); while ((i = idev->mc_list) != NULL) {