linux/net/ipv6/mcast.c
<<
>>
Prefs
   1/*
   2 *      Multicast support for IPv6
   3 *      Linux INET6 implementation
   4 *
   5 *      Authors:
   6 *      Pedro Roque             <roque@di.fc.ul.pt>
   7 *
   8 *      Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
   9 *
  10 *      This program is free software; you can redistribute it and/or
  11 *      modify it under the terms of the GNU General Public License
  12 *      as published by the Free Software Foundation; either version
  13 *      2 of the License, or (at your option) any later version.
  14 */
  15
  16/* Changes:
  17 *
  18 *      yoshfuji        : fix format of router-alert option
  19 *      YOSHIFUJI Hideaki @USAGI:
  20 *              Fixed source address for MLD message based on
  21 *              <draft-ietf-magma-mld-source-05.txt>.
  22 *      YOSHIFUJI Hideaki @USAGI:
  23 *              - Ignore Queries for invalid addresses.
  24 *              - MLD for link-local addresses.
  25 *      David L Stevens <dlstevens@us.ibm.com>:
  26 *              - MLDv2 support
  27 */
  28
  29#include <linux/module.h>
  30#include <linux/errno.h>
  31#include <linux/types.h>
  32#include <linux/string.h>
  33#include <linux/socket.h>
  34#include <linux/sockios.h>
  35#include <linux/jiffies.h>
  36#include <linux/times.h>
  37#include <linux/net.h>
  38#include <linux/in.h>
  39#include <linux/in6.h>
  40#include <linux/netdevice.h>
  41#include <linux/if_arp.h>
  42#include <linux/route.h>
  43#include <linux/init.h>
  44#include <linux/proc_fs.h>
  45#include <linux/seq_file.h>
  46#include <linux/slab.h>
  47#include <net/mld.h>
  48
  49#include <linux/netfilter.h>
  50#include <linux/netfilter_ipv6.h>
  51
  52#include <net/net_namespace.h>
  53#include <net/sock.h>
  54#include <net/snmp.h>
  55
  56#include <net/ipv6.h>
  57#include <net/protocol.h>
  58#include <net/if_inet6.h>
  59#include <net/ndisc.h>
  60#include <net/addrconf.h>
  61#include <net/ip6_route.h>
  62#include <net/inet_common.h>
  63
  64#include <net/ip6_checksum.h>
  65
  66/* Set to 3 to get tracing... */
  67#define MCAST_DEBUG 2
  68
  69#if MCAST_DEBUG >= 3
  70#define MDBG(x) printk x
  71#else
  72#define MDBG(x)
  73#endif
  74
  75/* Ensure that we have struct in6_addr aligned on 32bit word. */
  76static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
  77        BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4),
  78        BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4),
  79        BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4)
  80};
  81
  82static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
  83
  84/* Big mc list lock for all the sockets */
  85static DEFINE_RWLOCK(ipv6_sk_mc_lock);
  86
  87static void igmp6_join_group(struct ifmcaddr6 *ma);
  88static void igmp6_leave_group(struct ifmcaddr6 *ma);
  89static void igmp6_timer_handler(unsigned long data);
  90
  91static void mld_gq_timer_expire(unsigned long data);
  92static void mld_ifc_timer_expire(unsigned long data);
  93static void mld_ifc_event(struct inet6_dev *idev);
  94static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
  95static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *addr);
  96static void mld_clear_delrec(struct inet6_dev *idev);
  97static int sf_setstate(struct ifmcaddr6 *pmc);
  98static void sf_markstate(struct ifmcaddr6 *pmc);
  99static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
 100static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca,
 101                          int sfmode, int sfcount, struct in6_addr *psfsrc,
 102                          int delta);
 103static int ip6_mc_add_src(struct inet6_dev *idev, struct in6_addr *pmca,
 104                          int sfmode, int sfcount, struct in6_addr *psfsrc,
 105                          int delta);
 106static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
 107                            struct inet6_dev *idev);
 108
 109
 110#define IGMP6_UNSOLICITED_IVAL  (10*HZ)
 111#define MLD_QRV_DEFAULT         2
 112
 113#define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \
 114                (idev)->cnf.force_mld_version == 1 || \
 115                ((idev)->mc_v1_seen && \
 116                time_before(jiffies, (idev)->mc_v1_seen)))
 117
 118#define IPV6_MLD_MAX_MSF        64
 119
 120int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
 121
 122/*
 123 *      socket join on multicast group
 124 */
 125
 126int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 127{
 128        struct net_device *dev = NULL;
 129        struct ipv6_mc_socklist *mc_lst;
 130        struct ipv6_pinfo *np = inet6_sk(sk);
 131        struct net *net = sock_net(sk);
 132        int err;
 133
 134        if (!ipv6_addr_is_multicast(addr))
 135                return -EINVAL;
 136
 137        read_lock_bh(&ipv6_sk_mc_lock);
 138        for (mc_lst=np->ipv6_mc_list; mc_lst; mc_lst=mc_lst->next) {
 139                if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
 140                    ipv6_addr_equal(&mc_lst->addr, addr)) {
 141                        read_unlock_bh(&ipv6_sk_mc_lock);
 142                        return -EADDRINUSE;
 143                }
 144        }
 145        read_unlock_bh(&ipv6_sk_mc_lock);
 146
 147        mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
 148
 149        if (mc_lst == NULL)
 150                return -ENOMEM;
 151
 152        mc_lst->next = NULL;
 153        ipv6_addr_copy(&mc_lst->addr, addr);
 154
 155        rcu_read_lock();
 156        if (ifindex == 0) {
 157                struct rt6_info *rt;
 158                rt = rt6_lookup(net, addr, NULL, 0, 0);
 159                if (rt) {
 160                        dev = rt->rt6i_dev;
 161                        dst_release(&rt->dst);
 162                }
 163        } else
 164                dev = dev_get_by_index_rcu(net, ifindex);
 165
 166        if (dev == NULL) {
 167                rcu_read_unlock();
 168                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 169                return -ENODEV;
 170        }
 171
 172        mc_lst->ifindex = dev->ifindex;
 173        mc_lst->sfmode = MCAST_EXCLUDE;
 174        rwlock_init(&mc_lst->sflock);
 175        mc_lst->sflist = NULL;
 176
 177        /*
 178         *      now add/increase the group membership on the device
 179         */
 180
 181        err = ipv6_dev_mc_inc(dev, addr);
 182
 183        if (err) {
 184                rcu_read_unlock();
 185                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 186                return err;
 187        }
 188
 189        write_lock_bh(&ipv6_sk_mc_lock);
 190        mc_lst->next = np->ipv6_mc_list;
 191        np->ipv6_mc_list = mc_lst;
 192        write_unlock_bh(&ipv6_sk_mc_lock);
 193
 194        rcu_read_unlock();
 195
 196        return 0;
 197}
 198
 199/*
 200 *      socket leave on multicast group
 201 */
 202int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
 203{
 204        struct ipv6_pinfo *np = inet6_sk(sk);
 205        struct ipv6_mc_socklist *mc_lst, **lnk;
 206        struct net *net = sock_net(sk);
 207
 208        write_lock_bh(&ipv6_sk_mc_lock);
 209        for (lnk = &np->ipv6_mc_list; (mc_lst = *lnk) !=NULL ; lnk = &mc_lst->next) {
 210                if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
 211                    ipv6_addr_equal(&mc_lst->addr, addr)) {
 212                        struct net_device *dev;
 213
 214                        *lnk = mc_lst->next;
 215                        write_unlock_bh(&ipv6_sk_mc_lock);
 216
 217                        rcu_read_lock();
 218                        dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
 219                        if (dev != NULL) {
 220                                struct inet6_dev *idev = __in6_dev_get(dev);
 221
 222                                (void) ip6_mc_leave_src(sk, mc_lst, idev);
 223                                if (idev)
 224                                        __ipv6_dev_mc_dec(idev, &mc_lst->addr);
 225                        } else
 226                                (void) ip6_mc_leave_src(sk, mc_lst, NULL);
 227                        rcu_read_unlock();
 228                        sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 229                        return 0;
 230                }
 231        }
 232        write_unlock_bh(&ipv6_sk_mc_lock);
 233
 234        return -EADDRNOTAVAIL;
 235}
 236
 237/* called with rcu_read_lock() */
 238static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
 239                                             struct in6_addr *group,
 240                                             int ifindex)
 241{
 242        struct net_device *dev = NULL;
 243        struct inet6_dev *idev = NULL;
 244
 245        if (ifindex == 0) {
 246                struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0);
 247
 248                if (rt) {
 249                        dev = rt->rt6i_dev;
 250                        dev_hold(dev);
 251                        dst_release(&rt->dst);
 252                }
 253        } else
 254                dev = dev_get_by_index_rcu(net, ifindex);
 255
 256        if (!dev)
 257                return NULL;
 258        idev = __in6_dev_get(dev);
 259        if (!idev)
 260                return NULL;;
 261        read_lock_bh(&idev->lock);
 262        if (idev->dead) {
 263                read_unlock_bh(&idev->lock);
 264                return NULL;
 265        }
 266        return idev;
 267}
 268
 269void ipv6_sock_mc_close(struct sock *sk)
 270{
 271        struct ipv6_pinfo *np = inet6_sk(sk);
 272        struct ipv6_mc_socklist *mc_lst;
 273        struct net *net = sock_net(sk);
 274
 275        write_lock_bh(&ipv6_sk_mc_lock);
 276        while ((mc_lst = np->ipv6_mc_list) != NULL) {
 277                struct net_device *dev;
 278
 279                np->ipv6_mc_list = mc_lst->next;
 280                write_unlock_bh(&ipv6_sk_mc_lock);
 281
 282                rcu_read_lock();
 283                dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
 284                if (dev) {
 285                        struct inet6_dev *idev = __in6_dev_get(dev);
 286
 287                        (void) ip6_mc_leave_src(sk, mc_lst, idev);
 288                        if (idev)
 289                                __ipv6_dev_mc_dec(idev, &mc_lst->addr);
 290                } else
 291                        (void) ip6_mc_leave_src(sk, mc_lst, NULL);
 292                rcu_read_unlock();
 293                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 294
 295                write_lock_bh(&ipv6_sk_mc_lock);
 296        }
 297        write_unlock_bh(&ipv6_sk_mc_lock);
 298}
 299
 300int ip6_mc_source(int add, int omode, struct sock *sk,
 301        struct group_source_req *pgsr)
 302{
 303        struct in6_addr *source, *group;
 304        struct ipv6_mc_socklist *pmc;
 305        struct net_device *dev;
 306        struct inet6_dev *idev;
 307        struct ipv6_pinfo *inet6 = inet6_sk(sk);
 308        struct ip6_sf_socklist *psl;
 309        struct net *net = sock_net(sk);
 310        int i, j, rv;
 311        int leavegroup = 0;
 312        int pmclocked = 0;
 313        int err;
 314
 315        source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
 316        group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
 317
 318        if (!ipv6_addr_is_multicast(group))
 319                return -EINVAL;
 320
 321        rcu_read_lock();
 322        idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
 323        if (!idev) {
 324                rcu_read_unlock();
 325                return -ENODEV;
 326        }
 327        dev = idev->dev;
 328
 329        err = -EADDRNOTAVAIL;
 330
 331        read_lock(&ipv6_sk_mc_lock);
 332        for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
 333                if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
 334                        continue;
 335                if (ipv6_addr_equal(&pmc->addr, group))
 336                        break;
 337        }
 338        if (!pmc) {             /* must have a prior join */
 339                err = -EINVAL;
 340                goto done;
 341        }
 342        /* if a source filter was set, must be the same mode as before */
 343        if (pmc->sflist) {
 344                if (pmc->sfmode != omode) {
 345                        err = -EINVAL;
 346                        goto done;
 347                }
 348        } else if (pmc->sfmode != omode) {
 349                /* allow mode switches for empty-set filters */
 350                ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
 351                ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
 352                pmc->sfmode = omode;
 353        }
 354
 355        write_lock(&pmc->sflock);
 356        pmclocked = 1;
 357
 358        psl = pmc->sflist;
 359        if (!add) {
 360                if (!psl)
 361                        goto done;      /* err = -EADDRNOTAVAIL */
 362                rv = !0;
 363                for (i=0; i<psl->sl_count; i++) {
 364                        rv = memcmp(&psl->sl_addr[i], source,
 365                                sizeof(struct in6_addr));
 366                        if (rv == 0)
 367                                break;
 368                }
 369                if (rv)         /* source not found */
 370                        goto done;      /* err = -EADDRNOTAVAIL */
 371
 372                /* special case - (INCLUDE, empty) == LEAVE_GROUP */
 373                if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
 374                        leavegroup = 1;
 375                        goto done;
 376                }
 377
 378                /* update the interface filter */
 379                ip6_mc_del_src(idev, group, omode, 1, source, 1);
 380
 381                for (j=i+1; j<psl->sl_count; j++)
 382                        psl->sl_addr[j-1] = psl->sl_addr[j];
 383                psl->sl_count--;
 384                err = 0;
 385                goto done;
 386        }
 387        /* else, add a new source to the filter */
 388
 389        if (psl && psl->sl_count >= sysctl_mld_max_msf) {
 390                err = -ENOBUFS;
 391                goto done;
 392        }
 393        if (!psl || psl->sl_count == psl->sl_max) {
 394                struct ip6_sf_socklist *newpsl;
 395                int count = IP6_SFBLOCK;
 396
 397                if (psl)
 398                        count += psl->sl_max;
 399                newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
 400                if (!newpsl) {
 401                        err = -ENOBUFS;
 402                        goto done;
 403                }
 404                newpsl->sl_max = count;
 405                newpsl->sl_count = count - IP6_SFBLOCK;
 406                if (psl) {
 407                        for (i=0; i<psl->sl_count; i++)
 408                                newpsl->sl_addr[i] = psl->sl_addr[i];
 409                        sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
 410                }
 411                pmc->sflist = psl = newpsl;
 412        }
 413        rv = 1; /* > 0 for insert logic below if sl_count is 0 */
 414        for (i=0; i<psl->sl_count; i++) {
 415                rv = memcmp(&psl->sl_addr[i], source, sizeof(struct in6_addr));
 416                if (rv == 0)
 417                        break;
 418        }
 419        if (rv == 0)            /* address already there is an error */
 420                goto done;
 421        for (j=psl->sl_count-1; j>=i; j--)
 422                psl->sl_addr[j+1] = psl->sl_addr[j];
 423        psl->sl_addr[i] = *source;
 424        psl->sl_count++;
 425        err = 0;
 426        /* update the interface list */
 427        ip6_mc_add_src(idev, group, omode, 1, source, 1);
 428done:
 429        if (pmclocked)
 430                write_unlock(&pmc->sflock);
 431        read_unlock(&ipv6_sk_mc_lock);
 432        read_unlock_bh(&idev->lock);
 433        rcu_read_unlock();
 434        if (leavegroup)
 435                return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
 436        return err;
 437}
 438
 439int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
 440{
 441        struct in6_addr *group;
 442        struct ipv6_mc_socklist *pmc;
 443        struct net_device *dev;
 444        struct inet6_dev *idev;
 445        struct ipv6_pinfo *inet6 = inet6_sk(sk);
 446        struct ip6_sf_socklist *newpsl, *psl;
 447        struct net *net = sock_net(sk);
 448        int leavegroup = 0;
 449        int i, err;
 450
 451        group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
 452
 453        if (!ipv6_addr_is_multicast(group))
 454                return -EINVAL;
 455        if (gsf->gf_fmode != MCAST_INCLUDE &&
 456            gsf->gf_fmode != MCAST_EXCLUDE)
 457                return -EINVAL;
 458
 459        rcu_read_lock();
 460        idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
 461
 462        if (!idev) {
 463                rcu_read_unlock();
 464                return -ENODEV;
 465        }
 466        dev = idev->dev;
 467
 468        err = 0;
 469        read_lock(&ipv6_sk_mc_lock);
 470
 471        if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
 472                leavegroup = 1;
 473                goto done;
 474        }
 475
 476        for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
 477                if (pmc->ifindex != gsf->gf_interface)
 478                        continue;
 479                if (ipv6_addr_equal(&pmc->addr, group))
 480                        break;
 481        }
 482        if (!pmc) {             /* must have a prior join */
 483                err = -EINVAL;
 484                goto done;
 485        }
 486        if (gsf->gf_numsrc) {
 487                newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
 488                                                          GFP_ATOMIC);
 489                if (!newpsl) {
 490                        err = -ENOBUFS;
 491                        goto done;
 492                }
 493                newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
 494                for (i=0; i<newpsl->sl_count; ++i) {
 495                        struct sockaddr_in6 *psin6;
 496
 497                        psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i];
 498                        newpsl->sl_addr[i] = psin6->sin6_addr;
 499                }
 500                err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
 501                        newpsl->sl_count, newpsl->sl_addr, 0);
 502                if (err) {
 503                        sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));
 504                        goto done;
 505                }
 506        } else {
 507                newpsl = NULL;
 508                (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
 509        }
 510
 511        write_lock(&pmc->sflock);
 512        psl = pmc->sflist;
 513        if (psl) {
 514                (void) ip6_mc_del_src(idev, group, pmc->sfmode,
 515                        psl->sl_count, psl->sl_addr, 0);
 516                sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
 517        } else
 518                (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
 519        pmc->sflist = newpsl;
 520        pmc->sfmode = gsf->gf_fmode;
 521        write_unlock(&pmc->sflock);
 522        err = 0;
 523done:
 524        read_unlock(&ipv6_sk_mc_lock);
 525        read_unlock_bh(&idev->lock);
 526        rcu_read_unlock();
 527        if (leavegroup)
 528                err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
 529        return err;
 530}
 531
 532int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
 533        struct group_filter __user *optval, int __user *optlen)
 534{
 535        int err, i, count, copycount;
 536        struct in6_addr *group;
 537        struct ipv6_mc_socklist *pmc;
 538        struct inet6_dev *idev;
 539        struct net_device *dev;
 540        struct ipv6_pinfo *inet6 = inet6_sk(sk);
 541        struct ip6_sf_socklist *psl;
 542        struct net *net = sock_net(sk);
 543
 544        group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
 545
 546        if (!ipv6_addr_is_multicast(group))
 547                return -EINVAL;
 548
 549        rcu_read_lock();
 550        idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
 551
 552        if (!idev) {
 553                rcu_read_unlock();
 554                return -ENODEV;
 555        }
 556        dev = idev->dev;
 557
 558        err = -EADDRNOTAVAIL;
 559        /*
 560         * changes to the ipv6_mc_list require the socket lock and
 561         * a read lock on ip6_sk_mc_lock. We have the socket lock,
 562         * so reading the list is safe.
 563         */
 564
 565        for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
 566                if (pmc->ifindex != gsf->gf_interface)
 567                        continue;
 568                if (ipv6_addr_equal(group, &pmc->addr))
 569                        break;
 570        }
 571        if (!pmc)               /* must have a prior join */
 572                goto done;
 573        gsf->gf_fmode = pmc->sfmode;
 574        psl = pmc->sflist;
 575        count = psl ? psl->sl_count : 0;
 576        read_unlock_bh(&idev->lock);
 577        rcu_read_unlock();
 578
 579        copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
 580        gsf->gf_numsrc = count;
 581        if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
 582            copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
 583                return -EFAULT;
 584        }
 585        /* changes to psl require the socket lock, a read lock on
 586         * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We
 587         * have the socket lock, so reading here is safe.
 588         */
 589        for (i=0; i<copycount; i++) {
 590                struct sockaddr_in6 *psin6;
 591                struct sockaddr_storage ss;
 592
 593                psin6 = (struct sockaddr_in6 *)&ss;
 594                memset(&ss, 0, sizeof(ss));
 595                psin6->sin6_family = AF_INET6;
 596                psin6->sin6_addr = psl->sl_addr[i];
 597                if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
 598                        return -EFAULT;
 599        }
 600        return 0;
 601done:
 602        read_unlock_bh(&idev->lock);
 603        rcu_read_unlock();
 604        return err;
 605}
 606
 607int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
 608                   const struct in6_addr *src_addr)
 609{
 610        struct ipv6_pinfo *np = inet6_sk(sk);
 611        struct ipv6_mc_socklist *mc;
 612        struct ip6_sf_socklist *psl;
 613        int rv = 1;
 614
 615        read_lock(&ipv6_sk_mc_lock);
 616        for (mc = np->ipv6_mc_list; mc; mc = mc->next) {
 617                if (ipv6_addr_equal(&mc->addr, mc_addr))
 618                        break;
 619        }
 620        if (!mc) {
 621                read_unlock(&ipv6_sk_mc_lock);
 622                return 1;
 623        }
 624        read_lock(&mc->sflock);
 625        psl = mc->sflist;
 626        if (!psl) {
 627                rv = mc->sfmode == MCAST_EXCLUDE;
 628        } else {
 629                int i;
 630
 631                for (i=0; i<psl->sl_count; i++) {
 632                        if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
 633                                break;
 634                }
 635                if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
 636                        rv = 0;
 637                if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
 638                        rv = 0;
 639        }
 640        read_unlock(&mc->sflock);
 641        read_unlock(&ipv6_sk_mc_lock);
 642
 643        return rv;
 644}
 645
 646static void ma_put(struct ifmcaddr6 *mc)
 647{
 648        if (atomic_dec_and_test(&mc->mca_refcnt)) {
 649                in6_dev_put(mc->idev);
 650                kfree(mc);
 651        }
 652}
 653
 654static void igmp6_group_added(struct ifmcaddr6 *mc)
 655{
 656        struct net_device *dev = mc->idev->dev;
 657        char buf[MAX_ADDR_LEN];
 658
 659        spin_lock_bh(&mc->mca_lock);
 660        if (!(mc->mca_flags&MAF_LOADED)) {
 661                mc->mca_flags |= MAF_LOADED;
 662                if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
 663                        dev_mc_add(dev, buf);
 664        }
 665        spin_unlock_bh(&mc->mca_lock);
 666
 667        if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
 668                return;
 669
 670        if (MLD_V1_SEEN(mc->idev)) {
 671                igmp6_join_group(mc);
 672                return;
 673        }
 674        /* else v2 */
 675
 676        mc->mca_crcount = mc->idev->mc_qrv;
 677        mld_ifc_event(mc->idev);
 678}
 679
 680static void igmp6_group_dropped(struct ifmcaddr6 *mc)
 681{
 682        struct net_device *dev = mc->idev->dev;
 683        char buf[MAX_ADDR_LEN];
 684
 685        spin_lock_bh(&mc->mca_lock);
 686        if (mc->mca_flags&MAF_LOADED) {
 687                mc->mca_flags &= ~MAF_LOADED;
 688                if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
 689                        dev_mc_del(dev, buf);
 690        }
 691
 692        if (mc->mca_flags & MAF_NOREPORT)
 693                goto done;
 694        spin_unlock_bh(&mc->mca_lock);
 695
 696        if (!mc->idev->dead)
 697                igmp6_leave_group(mc);
 698
 699        spin_lock_bh(&mc->mca_lock);
 700        if (del_timer(&mc->mca_timer))
 701                atomic_dec(&mc->mca_refcnt);
 702done:
 703        ip6_mc_clear_src(mc);
 704        spin_unlock_bh(&mc->mca_lock);
 705}
 706
 707/*
 708 * deleted ifmcaddr6 manipulation
 709 */
 710static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
 711{
 712        struct ifmcaddr6 *pmc;
 713
 714        /* this is an "ifmcaddr6" for convenience; only the fields below
 715         * are actually used. In particular, the refcnt and users are not
 716         * used for management of the delete list. Using the same structure
 717         * for deleted items allows change reports to use common code with
 718         * non-deleted or query-response MCA's.
 719         */
 720        pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);
 721        if (!pmc)
 722                return;
 723
 724        spin_lock_bh(&im->mca_lock);
 725        spin_lock_init(&pmc->mca_lock);
 726        pmc->idev = im->idev;
 727        in6_dev_hold(idev);
 728        pmc->mca_addr = im->mca_addr;
 729        pmc->mca_crcount = idev->mc_qrv;
 730        pmc->mca_sfmode = im->mca_sfmode;
 731        if (pmc->mca_sfmode == MCAST_INCLUDE) {
 732                struct ip6_sf_list *psf;
 733
 734                pmc->mca_tomb = im->mca_tomb;
 735                pmc->mca_sources = im->mca_sources;
 736                im->mca_tomb = im->mca_sources = NULL;
 737                for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
 738                        psf->sf_crcount = pmc->mca_crcount;
 739        }
 740        spin_unlock_bh(&im->mca_lock);
 741
 742        spin_lock_bh(&idev->mc_lock);
 743        pmc->next = idev->mc_tomb;
 744        idev->mc_tomb = pmc;
 745        spin_unlock_bh(&idev->mc_lock);
 746}
 747
 748static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca)
 749{
 750        struct ifmcaddr6 *pmc, *pmc_prev;
 751        struct ip6_sf_list *psf, *psf_next;
 752
 753        spin_lock_bh(&idev->mc_lock);
 754        pmc_prev = NULL;
 755        for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {
 756                if (ipv6_addr_equal(&pmc->mca_addr, pmca))
 757                        break;
 758                pmc_prev = pmc;
 759        }
 760        if (pmc) {
 761                if (pmc_prev)
 762                        pmc_prev->next = pmc->next;
 763                else
 764                        idev->mc_tomb = pmc->next;
 765        }
 766        spin_unlock_bh(&idev->mc_lock);
 767
 768        if (pmc) {
 769                for (psf=pmc->mca_tomb; psf; psf=psf_next) {
 770                        psf_next = psf->sf_next;
 771                        kfree(psf);
 772                }
 773                in6_dev_put(pmc->idev);
 774                kfree(pmc);
 775        }
 776}
 777
 778static void mld_clear_delrec(struct inet6_dev *idev)
 779{
 780        struct ifmcaddr6 *pmc, *nextpmc;
 781
 782        spin_lock_bh(&idev->mc_lock);
 783        pmc = idev->mc_tomb;
 784        idev->mc_tomb = NULL;
 785        spin_unlock_bh(&idev->mc_lock);
 786
 787        for (; pmc; pmc = nextpmc) {
 788                nextpmc = pmc->next;
 789                ip6_mc_clear_src(pmc);
 790                in6_dev_put(pmc->idev);
 791                kfree(pmc);
 792        }
 793
 794        /* clear dead sources, too */
 795        read_lock_bh(&idev->lock);
 796        for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
 797                struct ip6_sf_list *psf, *psf_next;
 798
 799                spin_lock_bh(&pmc->mca_lock);
 800                psf = pmc->mca_tomb;
 801                pmc->mca_tomb = NULL;
 802                spin_unlock_bh(&pmc->mca_lock);
 803                for (; psf; psf=psf_next) {
 804                        psf_next = psf->sf_next;
 805                        kfree(psf);
 806                }
 807        }
 808        read_unlock_bh(&idev->lock);
 809}
 810
 811
 812/*
 813 *      device multicast group inc (add if not found)
 814 */
 815int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
 816{
 817        struct ifmcaddr6 *mc;
 818        struct inet6_dev *idev;
 819
 820        /* we need to take a reference on idev */
 821        idev = in6_dev_get(dev);
 822
 823        if (idev == NULL)
 824                return -EINVAL;
 825
 826        write_lock_bh(&idev->lock);
 827        if (idev->dead) {
 828                write_unlock_bh(&idev->lock);
 829                in6_dev_put(idev);
 830                return -ENODEV;
 831        }
 832
 833        for (mc = idev->mc_list; mc; mc = mc->next) {
 834                if (ipv6_addr_equal(&mc->mca_addr, addr)) {
 835                        mc->mca_users++;
 836                        write_unlock_bh(&idev->lock);
 837                        ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,
 838                                NULL, 0);
 839                        in6_dev_put(idev);
 840                        return 0;
 841                }
 842        }
 843
 844        /*
 845         *      not found: create a new one.
 846         */
 847
 848        mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);
 849
 850        if (mc == NULL) {
 851                write_unlock_bh(&idev->lock);
 852                in6_dev_put(idev);
 853                return -ENOMEM;
 854        }
 855
 856        setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
 857
 858        ipv6_addr_copy(&mc->mca_addr, addr);
 859        mc->idev = idev; /* (reference taken) */
 860        mc->mca_users = 1;
 861        /* mca_stamp should be updated upon changes */
 862        mc->mca_cstamp = mc->mca_tstamp = jiffies;
 863        atomic_set(&mc->mca_refcnt, 2);
 864        spin_lock_init(&mc->mca_lock);
 865
 866        /* initial mode is (EX, empty) */
 867        mc->mca_sfmode = MCAST_EXCLUDE;
 868        mc->mca_sfcount[MCAST_EXCLUDE] = 1;
 869
 870        if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
 871            IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
 872                mc->mca_flags |= MAF_NOREPORT;
 873
 874        mc->next = idev->mc_list;
 875        idev->mc_list = mc;
 876        write_unlock_bh(&idev->lock);
 877
 878        mld_del_delrec(idev, &mc->mca_addr);
 879        igmp6_group_added(mc);
 880        ma_put(mc);
 881        return 0;
 882}
 883
 884/*
 885 *      device multicast group del
 886 */
 887int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
 888{
 889        struct ifmcaddr6 *ma, **map;
 890
 891        write_lock_bh(&idev->lock);
 892        for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
 893                if (ipv6_addr_equal(&ma->mca_addr, addr)) {
 894                        if (--ma->mca_users == 0) {
 895                                *map = ma->next;
 896                                write_unlock_bh(&idev->lock);
 897
 898                                igmp6_group_dropped(ma);
 899
 900                                ma_put(ma);
 901                                return 0;
 902                        }
 903                        write_unlock_bh(&idev->lock);
 904                        return 0;
 905                }
 906        }
 907        write_unlock_bh(&idev->lock);
 908
 909        return -ENOENT;
 910}
 911
 912int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
 913{
 914        struct inet6_dev *idev;
 915        int err;
 916
 917        rcu_read_lock();
 918
 919        idev = __in6_dev_get(dev);
 920        if (!idev)
 921                err = -ENODEV;
 922        else
 923                err = __ipv6_dev_mc_dec(idev, addr);
 924
 925        rcu_read_unlock();
 926        return err;
 927}
 928
 929/*
 930 * identify MLD packets for MLD filter exceptions
 931 */
 932int ipv6_is_mld(struct sk_buff *skb, int nexthdr)
 933{
 934        struct icmp6hdr *pic;
 935
 936        if (nexthdr != IPPROTO_ICMPV6)
 937                return 0;
 938
 939        if (!pskb_may_pull(skb, sizeof(struct icmp6hdr)))
 940                return 0;
 941
 942        pic = icmp6_hdr(skb);
 943
 944        switch (pic->icmp6_type) {
 945        case ICMPV6_MGM_QUERY:
 946        case ICMPV6_MGM_REPORT:
 947        case ICMPV6_MGM_REDUCTION:
 948        case ICMPV6_MLD2_REPORT:
 949                return 1;
 950        default:
 951                break;
 952        }
 953        return 0;
 954}
 955
 956/*
 957 *      check if the interface/address pair is valid
 958 */
 959int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
 960                        const struct in6_addr *src_addr)
 961{
 962        struct inet6_dev *idev;
 963        struct ifmcaddr6 *mc;
 964        int rv = 0;
 965
 966        rcu_read_lock();
 967        idev = __in6_dev_get(dev);
 968        if (idev) {
 969                read_lock_bh(&idev->lock);
 970                for (mc = idev->mc_list; mc; mc=mc->next) {
 971                        if (ipv6_addr_equal(&mc->mca_addr, group))
 972                                break;
 973                }
 974                if (mc) {
 975                        if (src_addr && !ipv6_addr_any(src_addr)) {
 976                                struct ip6_sf_list *psf;
 977
 978                                spin_lock_bh(&mc->mca_lock);
 979                                for (psf=mc->mca_sources;psf;psf=psf->sf_next) {
 980                                        if (ipv6_addr_equal(&psf->sf_addr, src_addr))
 981                                                break;
 982                                }
 983                                if (psf)
 984                                        rv = psf->sf_count[MCAST_INCLUDE] ||
 985                                                psf->sf_count[MCAST_EXCLUDE] !=
 986                                                mc->mca_sfcount[MCAST_EXCLUDE];
 987                                else
 988                                        rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;
 989                                spin_unlock_bh(&mc->mca_lock);
 990                        } else
 991                                rv = 1; /* don't filter unspecified source */
 992                }
 993                read_unlock_bh(&idev->lock);
 994        }
 995        rcu_read_unlock();
 996        return rv;
 997}
 998
 999static void mld_gq_start_timer(struct inet6_dev *idev)
1000{
1001        int tv = net_random() % idev->mc_maxdelay;
1002
1003        idev->mc_gq_running = 1;
1004        if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
1005                in6_dev_hold(idev);
1006}
1007
1008static void mld_ifc_start_timer(struct inet6_dev *idev, int delay)
1009{
1010        int tv = net_random() % delay;
1011
1012        if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
1013                in6_dev_hold(idev);
1014}
1015
1016/*
1017 *      IGMP handling (alias multicast ICMPv6 messages)
1018 */
1019
1020static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1021{
1022        unsigned long delay = resptime;
1023
1024        /* Do not start timer for these addresses */
1025        if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1026            IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1027                return;
1028
1029        if (del_timer(&ma->mca_timer)) {
1030                atomic_dec(&ma->mca_refcnt);
1031                delay = ma->mca_timer.expires - jiffies;
1032        }
1033
1034        if (delay >= resptime) {
1035                if (resptime)
1036                        delay = net_random() % resptime;
1037                else
1038                        delay = 1;
1039        }
1040        ma->mca_timer.expires = jiffies + delay;
1041        if (!mod_timer(&ma->mca_timer, jiffies + delay))
1042                atomic_inc(&ma->mca_refcnt);
1043        ma->mca_flags |= MAF_TIMER_RUNNING;
1044}
1045
1046/* mark EXCLUDE-mode sources */
1047static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1048        struct in6_addr *srcs)
1049{
1050        struct ip6_sf_list *psf;
1051        int i, scount;
1052
1053        scount = 0;
1054        for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1055                if (scount == nsrcs)
1056                        break;
1057                for (i=0; i<nsrcs; i++) {
1058                        /* skip inactive filters */
1059                        if (pmc->mca_sfcount[MCAST_INCLUDE] ||
1060                            pmc->mca_sfcount[MCAST_EXCLUDE] !=
1061                            psf->sf_count[MCAST_EXCLUDE])
1062                                continue;
1063                        if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1064                                scount++;
1065                                break;
1066                        }
1067                }
1068        }
1069        pmc->mca_flags &= ~MAF_GSQUERY;
1070        if (scount == nsrcs)    /* all sources excluded */
1071                return 0;
1072        return 1;
1073}
1074
1075static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1076        struct in6_addr *srcs)
1077{
1078        struct ip6_sf_list *psf;
1079        int i, scount;
1080
1081        if (pmc->mca_sfmode == MCAST_EXCLUDE)
1082                return mld_xmarksources(pmc, nsrcs, srcs);
1083
1084        /* mark INCLUDE-mode sources */
1085
1086        scount = 0;
1087        for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1088                if (scount == nsrcs)
1089                        break;
1090                for (i=0; i<nsrcs; i++) {
1091                        if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1092                                psf->sf_gsresp = 1;
1093                                scount++;
1094                                break;
1095                        }
1096                }
1097        }
1098        if (!scount) {
1099                pmc->mca_flags &= ~MAF_GSQUERY;
1100                return 0;
1101        }
1102        pmc->mca_flags |= MAF_GSQUERY;
1103        return 1;
1104}
1105
1106/* called with rcu_read_lock() */
1107int igmp6_event_query(struct sk_buff *skb)
1108{
1109        struct mld2_query *mlh2 = NULL;
1110        struct ifmcaddr6 *ma;
1111        struct in6_addr *group;
1112        unsigned long max_delay;
1113        struct inet6_dev *idev;
1114        struct mld_msg *mld;
1115        int group_type;
1116        int mark = 0;
1117        int len;
1118
1119        if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1120                return -EINVAL;
1121
1122        /* compute payload length excluding extension headers */
1123        len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1124        len -= skb_network_header_len(skb);
1125
1126        /* Drop queries with not link local source */
1127        if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
1128                return -EINVAL;
1129
1130        idev = __in6_dev_get(skb->dev);
1131
1132        if (idev == NULL)
1133                return 0;
1134
1135        mld = (struct mld_msg *)icmp6_hdr(skb);
1136        group = &mld->mld_mca;
1137        group_type = ipv6_addr_type(group);
1138
1139        if (group_type != IPV6_ADDR_ANY &&
1140            !(group_type&IPV6_ADDR_MULTICAST))
1141                return -EINVAL;
1142
1143        if (len == 24) {
1144                int switchback;
1145                /* MLDv1 router present */
1146
1147                /* Translate milliseconds to jiffies */
1148                max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000;
1149
1150                switchback = (idev->mc_qrv + 1) * max_delay;
1151                idev->mc_v1_seen = jiffies + switchback;
1152
1153                /* cancel the interface change timer */
1154                idev->mc_ifc_count = 0;
1155                if (del_timer(&idev->mc_ifc_timer))
1156                        __in6_dev_put(idev);
1157                /* clear deleted report items */
1158                mld_clear_delrec(idev);
1159        } else if (len >= 28) {
1160                int srcs_offset = sizeof(struct mld2_query) -
1161                                  sizeof(struct icmp6hdr);
1162                if (!pskb_may_pull(skb, srcs_offset))
1163                        return -EINVAL;
1164
1165                mlh2 = (struct mld2_query *)skb_transport_header(skb);
1166                max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
1167                if (!max_delay)
1168                        max_delay = 1;
1169                idev->mc_maxdelay = max_delay;
1170                if (mlh2->mld2q_qrv)
1171                        idev->mc_qrv = mlh2->mld2q_qrv;
1172                if (group_type == IPV6_ADDR_ANY) { /* general query */
1173                        if (mlh2->mld2q_nsrcs)
1174                                return -EINVAL; /* no sources allowed */
1175
1176                        mld_gq_start_timer(idev);
1177                        return 0;
1178                }
1179                /* mark sources to include, if group & source-specific */
1180                if (mlh2->mld2q_nsrcs != 0) {
1181                        if (!pskb_may_pull(skb, srcs_offset +
1182                            ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1183                                return -EINVAL;
1184
1185                        mlh2 = (struct mld2_query *)skb_transport_header(skb);
1186                        mark = 1;
1187                }
1188        } else
1189                return -EINVAL;
1190
1191        read_lock_bh(&idev->lock);
1192        if (group_type == IPV6_ADDR_ANY) {
1193                for (ma = idev->mc_list; ma; ma=ma->next) {
1194                        spin_lock_bh(&ma->mca_lock);
1195                        igmp6_group_queried(ma, max_delay);
1196                        spin_unlock_bh(&ma->mca_lock);
1197                }
1198        } else {
1199                for (ma = idev->mc_list; ma; ma=ma->next) {
1200                        if (!ipv6_addr_equal(group, &ma->mca_addr))
1201                                continue;
1202                        spin_lock_bh(&ma->mca_lock);
1203                        if (ma->mca_flags & MAF_TIMER_RUNNING) {
1204                                /* gsquery <- gsquery && mark */
1205                                if (!mark)
1206                                        ma->mca_flags &= ~MAF_GSQUERY;
1207                        } else {
1208                                /* gsquery <- mark */
1209                                if (mark)
1210                                        ma->mca_flags |= MAF_GSQUERY;
1211                                else
1212                                        ma->mca_flags &= ~MAF_GSQUERY;
1213                        }
1214                        if (!(ma->mca_flags & MAF_GSQUERY) ||
1215                            mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1216                                igmp6_group_queried(ma, max_delay);
1217                        spin_unlock_bh(&ma->mca_lock);
1218                        break;
1219                }
1220        }
1221        read_unlock_bh(&idev->lock);
1222
1223        return 0;
1224}
1225
1226/* called with rcu_read_lock() */
1227int igmp6_event_report(struct sk_buff *skb)
1228{
1229        struct ifmcaddr6 *ma;
1230        struct inet6_dev *idev;
1231        struct mld_msg *mld;
1232        int addr_type;
1233
1234        /* Our own report looped back. Ignore it. */
1235        if (skb->pkt_type == PACKET_LOOPBACK)
1236                return 0;
1237
1238        /* send our report if the MC router may not have heard this report */
1239        if (skb->pkt_type != PACKET_MULTICAST &&
1240            skb->pkt_type != PACKET_BROADCAST)
1241                return 0;
1242
1243        if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1244                return -EINVAL;
1245
1246        mld = (struct mld_msg *)icmp6_hdr(skb);
1247
1248        /* Drop reports with not link local source */
1249        addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1250        if (addr_type != IPV6_ADDR_ANY &&
1251            !(addr_type&IPV6_ADDR_LINKLOCAL))
1252                return -EINVAL;
1253
1254        idev = __in6_dev_get(skb->dev);
1255        if (idev == NULL)
1256                return -ENODEV;
1257
1258        /*
1259         *      Cancel the timer for this group
1260         */
1261
1262        read_lock_bh(&idev->lock);
1263        for (ma = idev->mc_list; ma; ma=ma->next) {
1264                if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1265                        spin_lock(&ma->mca_lock);
1266                        if (del_timer(&ma->mca_timer))
1267                                atomic_dec(&ma->mca_refcnt);
1268                        ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING);
1269                        spin_unlock(&ma->mca_lock);
1270                        break;
1271                }
1272        }
1273        read_unlock_bh(&idev->lock);
1274        return 0;
1275}
1276
1277static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1278        int gdeleted, int sdeleted)
1279{
1280        switch (type) {
1281        case MLD2_MODE_IS_INCLUDE:
1282        case MLD2_MODE_IS_EXCLUDE:
1283                if (gdeleted || sdeleted)
1284                        return 0;
1285                if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1286                        if (pmc->mca_sfmode == MCAST_INCLUDE)
1287                                return 1;
1288                        /* don't include if this source is excluded
1289                         * in all filters
1290                         */
1291                        if (psf->sf_count[MCAST_INCLUDE])
1292                                return type == MLD2_MODE_IS_INCLUDE;
1293                        return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1294                                psf->sf_count[MCAST_EXCLUDE];
1295                }
1296                return 0;
1297        case MLD2_CHANGE_TO_INCLUDE:
1298                if (gdeleted || sdeleted)
1299                        return 0;
1300                return psf->sf_count[MCAST_INCLUDE] != 0;
1301        case MLD2_CHANGE_TO_EXCLUDE:
1302                if (gdeleted || sdeleted)
1303                        return 0;
1304                if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1305                    psf->sf_count[MCAST_INCLUDE])
1306                        return 0;
1307                return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1308                        psf->sf_count[MCAST_EXCLUDE];
1309        case MLD2_ALLOW_NEW_SOURCES:
1310                if (gdeleted || !psf->sf_crcount)
1311                        return 0;
1312                return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1313        case MLD2_BLOCK_OLD_SOURCES:
1314                if (pmc->mca_sfmode == MCAST_INCLUDE)
1315                        return gdeleted || (psf->sf_crcount && sdeleted);
1316                return psf->sf_crcount && !gdeleted && !sdeleted;
1317        }
1318        return 0;
1319}
1320
1321static int
1322mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1323{
1324        struct ip6_sf_list *psf;
1325        int scount = 0;
1326
1327        for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1328                if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1329                        continue;
1330                scount++;
1331        }
1332        return scount;
1333}
1334
1335static struct sk_buff *mld_newpack(struct net_device *dev, int size)
1336{
1337        struct net *net = dev_net(dev);
1338        struct sock *sk = net->ipv6.igmp_sk;
1339        struct sk_buff *skb;
1340        struct mld2_report *pmr;
1341        struct in6_addr addr_buf;
1342        const struct in6_addr *saddr;
1343        int err;
1344        u8 ra[8] = { IPPROTO_ICMPV6, 0,
1345                     IPV6_TLV_ROUTERALERT, 2, 0, 0,
1346                     IPV6_TLV_PADN, 0 };
1347
1348        /* we assume size > sizeof(ra) here */
1349        size += LL_ALLOCATED_SPACE(dev);
1350        /* limit our allocations to order-0 page */
1351        size = min_t(int, size, SKB_MAX_ORDER(0, 0));
1352        skb = sock_alloc_send_skb(sk, size, 1, &err);
1353
1354        if (!skb)
1355                return NULL;
1356
1357        skb_reserve(skb, LL_RESERVED_SPACE(dev));
1358
1359        if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
1360                /* <draft-ietf-magma-mld-source-05.txt>:
1361                 * use unspecified address as the source address
1362                 * when a valid link-local address is not available.
1363                 */
1364                saddr = &in6addr_any;
1365        } else
1366                saddr = &addr_buf;
1367
1368        ip6_nd_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1369
1370        memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1371
1372        skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1373        skb_put(skb, sizeof(*pmr));
1374        pmr = (struct mld2_report *)skb_transport_header(skb);
1375        pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1376        pmr->mld2r_resv1 = 0;
1377        pmr->mld2r_cksum = 0;
1378        pmr->mld2r_resv2 = 0;
1379        pmr->mld2r_ngrec = 0;
1380        return skb;
1381}
1382
1383static void mld_sendpack(struct sk_buff *skb)
1384{
1385        struct ipv6hdr *pip6 = ipv6_hdr(skb);
1386        struct mld2_report *pmr =
1387                              (struct mld2_report *)skb_transport_header(skb);
1388        int payload_len, mldlen;
1389        struct inet6_dev *idev;
1390        struct net *net = dev_net(skb->dev);
1391        int err;
1392        struct flowi fl;
1393        struct dst_entry *dst;
1394
1395        rcu_read_lock();
1396        idev = __in6_dev_get(skb->dev);
1397        IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1398
1399        payload_len = (skb->tail - skb->network_header) - sizeof(*pip6);
1400        mldlen = skb->tail - skb->transport_header;
1401        pip6->payload_len = htons(payload_len);
1402
1403        pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1404                                           IPPROTO_ICMPV6,
1405                                           csum_partial(skb_transport_header(skb),
1406                                                        mldlen, 0));
1407
1408        dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
1409
1410        if (!dst) {
1411                err = -ENOMEM;
1412                goto err_out;
1413        }
1414
1415        icmpv6_flow_init(net->ipv6.igmp_sk, &fl, ICMPV6_MLD2_REPORT,
1416                         &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1417                         skb->dev->ifindex);
1418
1419        err = xfrm_lookup(net, &dst, &fl, NULL, 0);
1420        skb_dst_set(skb, dst);
1421        if (err)
1422                goto err_out;
1423
1424        payload_len = skb->len;
1425
1426        err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1427                      dst_output);
1428out:
1429        if (!err) {
1430                ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
1431                ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1432                IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1433        } else
1434                IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
1435
1436        rcu_read_unlock();
1437        return;
1438
1439err_out:
1440        kfree_skb(skb);
1441        goto out;
1442}
1443
1444static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1445{
1446        return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1447}
1448
1449static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1450        int type, struct mld2_grec **ppgr)
1451{
1452        struct net_device *dev = pmc->idev->dev;
1453        struct mld2_report *pmr;
1454        struct mld2_grec *pgr;
1455
1456        if (!skb)
1457                skb = mld_newpack(dev, dev->mtu);
1458        if (!skb)
1459                return NULL;
1460        pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
1461        pgr->grec_type = type;
1462        pgr->grec_auxwords = 0;
1463        pgr->grec_nsrcs = 0;
1464        pgr->grec_mca = pmc->mca_addr;  /* structure copy */
1465        pmr = (struct mld2_report *)skb_transport_header(skb);
1466        pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1467        *ppgr = pgr;
1468        return skb;
1469}
1470
1471#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
1472        skb_tailroom(skb)) : 0)
1473
1474static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1475        int type, int gdeleted, int sdeleted)
1476{
1477        struct net_device *dev = pmc->idev->dev;
1478        struct mld2_report *pmr;
1479        struct mld2_grec *pgr = NULL;
1480        struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
1481        int scount, stotal, first, isquery, truncate;
1482
1483        if (pmc->mca_flags & MAF_NOREPORT)
1484                return skb;
1485
1486        isquery = type == MLD2_MODE_IS_INCLUDE ||
1487                  type == MLD2_MODE_IS_EXCLUDE;
1488        truncate = type == MLD2_MODE_IS_EXCLUDE ||
1489                    type == MLD2_CHANGE_TO_EXCLUDE;
1490
1491        stotal = scount = 0;
1492
1493        psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1494
1495        if (!*psf_list)
1496                goto empty_source;
1497
1498        pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1499
1500        /* EX and TO_EX get a fresh packet, if needed */
1501        if (truncate) {
1502                if (pmr && pmr->mld2r_ngrec &&
1503                    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1504                        if (skb)
1505                                mld_sendpack(skb);
1506                        skb = mld_newpack(dev, dev->mtu);
1507                }
1508        }
1509        first = 1;
1510        psf_prev = NULL;
1511        for (psf=*psf_list; psf; psf=psf_next) {
1512                struct in6_addr *psrc;
1513
1514                psf_next = psf->sf_next;
1515
1516                if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
1517                        psf_prev = psf;
1518                        continue;
1519                }
1520
1521                /* clear marks on query responses */
1522                if (isquery)
1523                        psf->sf_gsresp = 0;
1524
1525                if (AVAILABLE(skb) < sizeof(*psrc) +
1526                    first*sizeof(struct mld2_grec)) {
1527                        if (truncate && !first)
1528                                break;   /* truncate these */
1529                        if (pgr)
1530                                pgr->grec_nsrcs = htons(scount);
1531                        if (skb)
1532                                mld_sendpack(skb);
1533                        skb = mld_newpack(dev, dev->mtu);
1534                        first = 1;
1535                        scount = 0;
1536                }
1537                if (first) {
1538                        skb = add_grhead(skb, pmc, type, &pgr);
1539                        first = 0;
1540                }
1541                if (!skb)
1542                        return NULL;
1543                psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc));
1544                *psrc = psf->sf_addr;
1545                scount++; stotal++;
1546                if ((type == MLD2_ALLOW_NEW_SOURCES ||
1547                     type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1548                        psf->sf_crcount--;
1549                        if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
1550                                if (psf_prev)
1551                                        psf_prev->sf_next = psf->sf_next;
1552                                else
1553                                        *psf_list = psf->sf_next;
1554                                kfree(psf);
1555                                continue;
1556                        }
1557                }
1558                psf_prev = psf;
1559        }
1560
1561empty_source:
1562        if (!stotal) {
1563                if (type == MLD2_ALLOW_NEW_SOURCES ||
1564                    type == MLD2_BLOCK_OLD_SOURCES)
1565                        return skb;
1566                if (pmc->mca_crcount || isquery) {
1567                        /* make sure we have room for group header */
1568                        if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1569                                mld_sendpack(skb);
1570                                skb = NULL; /* add_grhead will get a new one */
1571                        }
1572                        skb = add_grhead(skb, pmc, type, &pgr);
1573                }
1574        }
1575        if (pgr)
1576                pgr->grec_nsrcs = htons(scount);
1577
1578        if (isquery)
1579                pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
1580        return skb;
1581}
1582
1583static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
1584{
1585        struct sk_buff *skb = NULL;
1586        int type;
1587
1588        if (!pmc) {
1589                read_lock_bh(&idev->lock);
1590                for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
1591                        if (pmc->mca_flags & MAF_NOREPORT)
1592                                continue;
1593                        spin_lock_bh(&pmc->mca_lock);
1594                        if (pmc->mca_sfcount[MCAST_EXCLUDE])
1595                                type = MLD2_MODE_IS_EXCLUDE;
1596                        else
1597                                type = MLD2_MODE_IS_INCLUDE;
1598                        skb = add_grec(skb, pmc, type, 0, 0);
1599                        spin_unlock_bh(&pmc->mca_lock);
1600                }
1601                read_unlock_bh(&idev->lock);
1602        } else {
1603                spin_lock_bh(&pmc->mca_lock);
1604                if (pmc->mca_sfcount[MCAST_EXCLUDE])
1605                        type = MLD2_MODE_IS_EXCLUDE;
1606                else
1607                        type = MLD2_MODE_IS_INCLUDE;
1608                skb = add_grec(skb, pmc, type, 0, 0);
1609                spin_unlock_bh(&pmc->mca_lock);
1610        }
1611        if (skb)
1612                mld_sendpack(skb);
1613}
1614
1615/*
1616 * remove zero-count source records from a source filter list
1617 */
1618static void mld_clear_zeros(struct ip6_sf_list **ppsf)
1619{
1620        struct ip6_sf_list *psf_prev, *psf_next, *psf;
1621
1622        psf_prev = NULL;
1623        for (psf=*ppsf; psf; psf = psf_next) {
1624                psf_next = psf->sf_next;
1625                if (psf->sf_crcount == 0) {
1626                        if (psf_prev)
1627                                psf_prev->sf_next = psf->sf_next;
1628                        else
1629                                *ppsf = psf->sf_next;
1630                        kfree(psf);
1631                } else
1632                        psf_prev = psf;
1633        }
1634}
1635
1636static void mld_send_cr(struct inet6_dev *idev)
1637{
1638        struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
1639        struct sk_buff *skb = NULL;
1640        int type, dtype;
1641
1642        read_lock_bh(&idev->lock);
1643        spin_lock(&idev->mc_lock);
1644
1645        /* deleted MCA's */
1646        pmc_prev = NULL;
1647        for (pmc=idev->mc_tomb; pmc; pmc=pmc_next) {
1648                pmc_next = pmc->next;
1649                if (pmc->mca_sfmode == MCAST_INCLUDE) {
1650                        type = MLD2_BLOCK_OLD_SOURCES;
1651                        dtype = MLD2_BLOCK_OLD_SOURCES;
1652                        skb = add_grec(skb, pmc, type, 1, 0);
1653                        skb = add_grec(skb, pmc, dtype, 1, 1);
1654                }
1655                if (pmc->mca_crcount) {
1656                        if (pmc->mca_sfmode == MCAST_EXCLUDE) {
1657                                type = MLD2_CHANGE_TO_INCLUDE;
1658                                skb = add_grec(skb, pmc, type, 1, 0);
1659                        }
1660                        pmc->mca_crcount--;
1661                        if (pmc->mca_crcount == 0) {
1662                                mld_clear_zeros(&pmc->mca_tomb);
1663                                mld_clear_zeros(&pmc->mca_sources);
1664                        }
1665                }
1666                if (pmc->mca_crcount == 0 && !pmc->mca_tomb &&
1667                    !pmc->mca_sources) {
1668                        if (pmc_prev)
1669                                pmc_prev->next = pmc_next;
1670                        else
1671                                idev->mc_tomb = pmc_next;
1672                        in6_dev_put(pmc->idev);
1673                        kfree(pmc);
1674                } else
1675                        pmc_prev = pmc;
1676        }
1677        spin_unlock(&idev->mc_lock);
1678
1679        /* change recs */
1680        for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
1681                spin_lock_bh(&pmc->mca_lock);
1682                if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
1683                        type = MLD2_BLOCK_OLD_SOURCES;
1684                        dtype = MLD2_ALLOW_NEW_SOURCES;
1685                } else {
1686                        type = MLD2_ALLOW_NEW_SOURCES;
1687                        dtype = MLD2_BLOCK_OLD_SOURCES;
1688                }
1689                skb = add_grec(skb, pmc, type, 0, 0);
1690                skb = add_grec(skb, pmc, dtype, 0, 1);  /* deleted sources */
1691
1692                /* filter mode changes */
1693                if (pmc->mca_crcount) {
1694                        if (pmc->mca_sfmode == MCAST_EXCLUDE)
1695                                type = MLD2_CHANGE_TO_EXCLUDE;
1696                        else
1697                                type = MLD2_CHANGE_TO_INCLUDE;
1698                        skb = add_grec(skb, pmc, type, 0, 0);
1699                        pmc->mca_crcount--;
1700                }
1701                spin_unlock_bh(&pmc->mca_lock);
1702        }
1703        read_unlock_bh(&idev->lock);
1704        if (!skb)
1705                return;
1706        (void) mld_sendpack(skb);
1707}
1708
1709static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1710{
1711        struct net *net = dev_net(dev);
1712        struct sock *sk = net->ipv6.igmp_sk;
1713        struct inet6_dev *idev;
1714        struct sk_buff *skb;
1715        struct mld_msg *hdr;
1716        const struct in6_addr *snd_addr, *saddr;
1717        struct in6_addr addr_buf;
1718        int err, len, payload_len, full_len;
1719        u8 ra[8] = { IPPROTO_ICMPV6, 0,
1720                     IPV6_TLV_ROUTERALERT, 2, 0, 0,
1721                     IPV6_TLV_PADN, 0 };
1722        struct flowi fl;
1723        struct dst_entry *dst;
1724
1725        if (type == ICMPV6_MGM_REDUCTION)
1726                snd_addr = &in6addr_linklocal_allrouters;
1727        else
1728                snd_addr = addr;
1729
1730        len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
1731        payload_len = len + sizeof(ra);
1732        full_len = sizeof(struct ipv6hdr) + payload_len;
1733
1734        rcu_read_lock();
1735        IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
1736                      IPSTATS_MIB_OUT, full_len);
1737        rcu_read_unlock();
1738
1739        skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err);
1740
1741        if (skb == NULL) {
1742                rcu_read_lock();
1743                IP6_INC_STATS(net, __in6_dev_get(dev),
1744                              IPSTATS_MIB_OUTDISCARDS);
1745                rcu_read_unlock();
1746                return;
1747        }
1748
1749        skb_reserve(skb, LL_RESERVED_SPACE(dev));
1750
1751        if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
1752                /* <draft-ietf-magma-mld-source-05.txt>:
1753                 * use unspecified address as the source address
1754                 * when a valid link-local address is not available.
1755                 */
1756                saddr = &in6addr_any;
1757        } else
1758                saddr = &addr_buf;
1759
1760        ip6_nd_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
1761
1762        memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1763
1764        hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
1765        memset(hdr, 0, sizeof(struct mld_msg));
1766        hdr->mld_type = type;
1767        ipv6_addr_copy(&hdr->mld_mca, addr);
1768
1769        hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
1770                                         IPPROTO_ICMPV6,
1771                                         csum_partial(hdr, len, 0));
1772
1773        rcu_read_lock();
1774        idev = __in6_dev_get(skb->dev);
1775
1776        dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
1777        if (!dst) {
1778                err = -ENOMEM;
1779                goto err_out;
1780        }
1781
1782        icmpv6_flow_init(sk, &fl, type,
1783                         &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1784                         skb->dev->ifindex);
1785
1786        err = xfrm_lookup(net, &dst, &fl, NULL, 0);
1787        if (err)
1788                goto err_out;
1789
1790        skb_dst_set(skb, dst);
1791        err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1792                      dst_output);
1793out:
1794        if (!err) {
1795                ICMP6MSGOUT_INC_STATS(net, idev, type);
1796                ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1797                IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
1798        } else
1799                IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1800
1801        rcu_read_unlock();
1802        return;
1803
1804err_out:
1805        kfree_skb(skb);
1806        goto out;
1807}
1808
1809static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
1810        struct in6_addr *psfsrc)
1811{
1812        struct ip6_sf_list *psf, *psf_prev;
1813        int rv = 0;
1814
1815        psf_prev = NULL;
1816        for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1817                if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
1818                        break;
1819                psf_prev = psf;
1820        }
1821        if (!psf || psf->sf_count[sfmode] == 0) {
1822                /* source filter not found, or count wrong =>  bug */
1823                return -ESRCH;
1824        }
1825        psf->sf_count[sfmode]--;
1826        if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
1827                struct inet6_dev *idev = pmc->idev;
1828
1829                /* no more filters for this source */
1830                if (psf_prev)
1831                        psf_prev->sf_next = psf->sf_next;
1832                else
1833                        pmc->mca_sources = psf->sf_next;
1834                if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
1835                    !MLD_V1_SEEN(idev)) {
1836                        psf->sf_crcount = idev->mc_qrv;
1837                        psf->sf_next = pmc->mca_tomb;
1838                        pmc->mca_tomb = psf;
1839                        rv = 1;
1840                } else
1841                        kfree(psf);
1842        }
1843        return rv;
1844}
1845
1846static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca,
1847                          int sfmode, int sfcount, struct in6_addr *psfsrc,
1848                          int delta)
1849{
1850        struct ifmcaddr6 *pmc;
1851        int     changerec = 0;
1852        int     i, err;
1853
1854        if (!idev)
1855                return -ENODEV;
1856        read_lock_bh(&idev->lock);
1857        for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
1858                if (ipv6_addr_equal(pmca, &pmc->mca_addr))
1859                        break;
1860        }
1861        if (!pmc) {
1862                /* MCA not found?? bug */
1863                read_unlock_bh(&idev->lock);
1864                return -ESRCH;
1865        }
1866        spin_lock_bh(&pmc->mca_lock);
1867        sf_markstate(pmc);
1868        if (!delta) {
1869                if (!pmc->mca_sfcount[sfmode]) {
1870                        spin_unlock_bh(&pmc->mca_lock);
1871                        read_unlock_bh(&idev->lock);
1872                        return -EINVAL;
1873                }
1874                pmc->mca_sfcount[sfmode]--;
1875        }
1876        err = 0;
1877        for (i=0; i<sfcount; i++) {
1878                int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
1879
1880                changerec |= rv > 0;
1881                if (!err && rv < 0)
1882                        err = rv;
1883        }
1884        if (pmc->mca_sfmode == MCAST_EXCLUDE &&
1885            pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
1886            pmc->mca_sfcount[MCAST_INCLUDE]) {
1887                struct ip6_sf_list *psf;
1888
1889                /* filter mode change */
1890                pmc->mca_sfmode = MCAST_INCLUDE;
1891                pmc->mca_crcount = idev->mc_qrv;
1892                idev->mc_ifc_count = pmc->mca_crcount;
1893                for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
1894                        psf->sf_crcount = 0;
1895                mld_ifc_event(pmc->idev);
1896        } else if (sf_setstate(pmc) || changerec)
1897                mld_ifc_event(pmc->idev);
1898        spin_unlock_bh(&pmc->mca_lock);
1899        read_unlock_bh(&idev->lock);
1900        return err;
1901}
1902
1903/*
1904 * Add multicast single-source filter to the interface list
1905 */
1906static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
1907        struct in6_addr *psfsrc, int delta)
1908{
1909        struct ip6_sf_list *psf, *psf_prev;
1910
1911        psf_prev = NULL;
1912        for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1913                if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
1914                        break;
1915                psf_prev = psf;
1916        }
1917        if (!psf) {
1918                psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
1919                if (!psf)
1920                        return -ENOBUFS;
1921
1922                psf->sf_addr = *psfsrc;
1923                if (psf_prev) {
1924                        psf_prev->sf_next = psf;
1925                } else
1926                        pmc->mca_sources = psf;
1927        }
1928        psf->sf_count[sfmode]++;
1929        return 0;
1930}
1931
1932static void sf_markstate(struct ifmcaddr6 *pmc)
1933{
1934        struct ip6_sf_list *psf;
1935        int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
1936
1937        for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
1938                if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
1939                        psf->sf_oldin = mca_xcount ==
1940                                psf->sf_count[MCAST_EXCLUDE] &&
1941                                !psf->sf_count[MCAST_INCLUDE];
1942                } else
1943                        psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
1944}
1945
1946static int sf_setstate(struct ifmcaddr6 *pmc)
1947{
1948        struct ip6_sf_list *psf, *dpsf;
1949        int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
1950        int qrv = pmc->idev->mc_qrv;
1951        int new_in, rv;
1952
1953        rv = 0;
1954        for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1955                if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
1956                        new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
1957                                !psf->sf_count[MCAST_INCLUDE];
1958                } else
1959                        new_in = psf->sf_count[MCAST_INCLUDE] != 0;
1960                if (new_in) {
1961                        if (!psf->sf_oldin) {
1962                                struct ip6_sf_list *prev = NULL;
1963
1964                                for (dpsf=pmc->mca_tomb; dpsf;
1965                                     dpsf=dpsf->sf_next) {
1966                                        if (ipv6_addr_equal(&dpsf->sf_addr,
1967                                            &psf->sf_addr))
1968                                                break;
1969                                        prev = dpsf;
1970                                }
1971                                if (dpsf) {
1972                                        if (prev)
1973                                                prev->sf_next = dpsf->sf_next;
1974                                        else
1975                                                pmc->mca_tomb = dpsf->sf_next;
1976                                        kfree(dpsf);
1977                                }
1978                                psf->sf_crcount = qrv;
1979                                rv++;
1980                        }
1981                } else if (psf->sf_oldin) {
1982                        psf->sf_crcount = 0;
1983                        /*
1984                         * add or update "delete" records if an active filter
1985                         * is now inactive
1986                         */
1987                        for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next)
1988                                if (ipv6_addr_equal(&dpsf->sf_addr,
1989                                    &psf->sf_addr))
1990                                        break;
1991                        if (!dpsf) {
1992                                dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
1993                                if (!dpsf)
1994                                        continue;
1995                                *dpsf = *psf;
1996                                /* pmc->mca_lock held by callers */
1997                                dpsf->sf_next = pmc->mca_tomb;
1998                                pmc->mca_tomb = dpsf;
1999                        }
2000                        dpsf->sf_crcount = qrv;
2001                        rv++;
2002                }
2003        }
2004        return rv;
2005}
2006
2007/*
2008 * Add multicast source filter list to the interface list
2009 */
2010static int ip6_mc_add_src(struct inet6_dev *idev, struct in6_addr *pmca,
2011                          int sfmode, int sfcount, struct in6_addr *psfsrc,
2012                          int delta)
2013{
2014        struct ifmcaddr6 *pmc;
2015        int     isexclude;
2016        int     i, err;
2017
2018        if (!idev)
2019                return -ENODEV;
2020        read_lock_bh(&idev->lock);
2021        for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
2022                if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2023                        break;
2024        }
2025        if (!pmc) {
2026                /* MCA not found?? bug */
2027                read_unlock_bh(&idev->lock);
2028                return -ESRCH;
2029        }
2030        spin_lock_bh(&pmc->mca_lock);
2031
2032        sf_markstate(pmc);
2033        isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2034        if (!delta)
2035                pmc->mca_sfcount[sfmode]++;
2036        err = 0;
2037        for (i=0; i<sfcount; i++) {
2038                err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i], delta);
2039                if (err)
2040                        break;
2041        }
2042        if (err) {
2043                int j;
2044
2045                if (!delta)
2046                        pmc->mca_sfcount[sfmode]--;
2047                for (j=0; j<i; j++)
2048                        (void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
2049        } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2050                struct ip6_sf_list *psf;
2051
2052                /* filter mode change */
2053                if (pmc->mca_sfcount[MCAST_EXCLUDE])
2054                        pmc->mca_sfmode = MCAST_EXCLUDE;
2055                else if (pmc->mca_sfcount[MCAST_INCLUDE])
2056                        pmc->mca_sfmode = MCAST_INCLUDE;
2057                /* else no filters; keep old mode for reports */
2058
2059                pmc->mca_crcount = idev->mc_qrv;
2060                idev->mc_ifc_count = pmc->mca_crcount;
2061                for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
2062                        psf->sf_crcount = 0;
2063                mld_ifc_event(idev);
2064        } else if (sf_setstate(pmc))
2065                mld_ifc_event(idev);
2066        spin_unlock_bh(&pmc->mca_lock);
2067        read_unlock_bh(&idev->lock);
2068        return err;
2069}
2070
2071static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2072{
2073        struct ip6_sf_list *psf, *nextpsf;
2074
2075        for (psf=pmc->mca_tomb; psf; psf=nextpsf) {
2076                nextpsf = psf->sf_next;
2077                kfree(psf);
2078        }
2079        pmc->mca_tomb = NULL;
2080        for (psf=pmc->mca_sources; psf; psf=nextpsf) {
2081                nextpsf = psf->sf_next;
2082                kfree(psf);
2083        }
2084        pmc->mca_sources = NULL;
2085        pmc->mca_sfmode = MCAST_EXCLUDE;
2086        pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2087        pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
2088}
2089
2090
2091static void igmp6_join_group(struct ifmcaddr6 *ma)
2092{
2093        unsigned long delay;
2094
2095        if (ma->mca_flags & MAF_NOREPORT)
2096                return;
2097
2098        igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2099
2100        delay = net_random() % IGMP6_UNSOLICITED_IVAL;
2101
2102        spin_lock_bh(&ma->mca_lock);
2103        if (del_timer(&ma->mca_timer)) {
2104                atomic_dec(&ma->mca_refcnt);
2105                delay = ma->mca_timer.expires - jiffies;
2106        }
2107
2108        if (!mod_timer(&ma->mca_timer, jiffies + delay))
2109                atomic_inc(&ma->mca_refcnt);
2110        ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2111        spin_unlock_bh(&ma->mca_lock);
2112}
2113
2114static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2115                            struct inet6_dev *idev)
2116{
2117        int err;
2118
2119        /* callers have the socket lock and a write lock on ipv6_sk_mc_lock,
2120         * so no other readers or writers of iml or its sflist
2121         */
2122        if (!iml->sflist) {
2123                /* any-source empty exclude case */
2124                return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2125        }
2126        err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2127                iml->sflist->sl_count, iml->sflist->sl_addr, 0);
2128        sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
2129        iml->sflist = NULL;
2130        return err;
2131}
2132
2133static void igmp6_leave_group(struct ifmcaddr6 *ma)
2134{
2135        if (MLD_V1_SEEN(ma->idev)) {
2136                if (ma->mca_flags & MAF_LAST_REPORTER)
2137                        igmp6_send(&ma->mca_addr, ma->idev->dev,
2138                                ICMPV6_MGM_REDUCTION);
2139        } else {
2140                mld_add_delrec(ma->idev, ma);
2141                mld_ifc_event(ma->idev);
2142        }
2143}
2144
2145static void mld_gq_timer_expire(unsigned long data)
2146{
2147        struct inet6_dev *idev = (struct inet6_dev *)data;
2148
2149        idev->mc_gq_running = 0;
2150        mld_send_report(idev, NULL);
2151        __in6_dev_put(idev);
2152}
2153
2154static void mld_ifc_timer_expire(unsigned long data)
2155{
2156        struct inet6_dev *idev = (struct inet6_dev *)data;
2157
2158        mld_send_cr(idev);
2159        if (idev->mc_ifc_count) {
2160                idev->mc_ifc_count--;
2161                if (idev->mc_ifc_count)
2162                        mld_ifc_start_timer(idev, idev->mc_maxdelay);
2163        }
2164        __in6_dev_put(idev);
2165}
2166
2167static void mld_ifc_event(struct inet6_dev *idev)
2168{
2169        if (MLD_V1_SEEN(idev))
2170                return;
2171        idev->mc_ifc_count = idev->mc_qrv;
2172        mld_ifc_start_timer(idev, 1);
2173}
2174
2175
2176static void igmp6_timer_handler(unsigned long data)
2177{
2178        struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
2179
2180        if (MLD_V1_SEEN(ma->idev))
2181                igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2182        else
2183                mld_send_report(ma->idev, ma);
2184
2185        spin_lock(&ma->mca_lock);
2186        ma->mca_flags |=  MAF_LAST_REPORTER;
2187        ma->mca_flags &= ~MAF_TIMER_RUNNING;
2188        spin_unlock(&ma->mca_lock);
2189        ma_put(ma);
2190}
2191
2192/* Device changing type */
2193
2194void ipv6_mc_unmap(struct inet6_dev *idev)
2195{
2196        struct ifmcaddr6 *i;
2197
2198        /* Install multicast list, except for all-nodes (already installed) */
2199
2200        read_lock_bh(&idev->lock);
2201        for (i = idev->mc_list; i; i = i->next)
2202                igmp6_group_dropped(i);
2203        read_unlock_bh(&idev->lock);
2204}
2205
2206void ipv6_mc_remap(struct inet6_dev *idev)
2207{
2208        ipv6_mc_up(idev);
2209}
2210
2211/* Device going down */
2212
2213void ipv6_mc_down(struct inet6_dev *idev)
2214{
2215        struct ifmcaddr6 *i;
2216
2217        /* Withdraw multicast list */
2218
2219        read_lock_bh(&idev->lock);
2220        idev->mc_ifc_count = 0;
2221        if (del_timer(&idev->mc_ifc_timer))
2222                __in6_dev_put(idev);
2223        idev->mc_gq_running = 0;
2224        if (del_timer(&idev->mc_gq_timer))
2225                __in6_dev_put(idev);
2226
2227        for (i = idev->mc_list; i; i=i->next)
2228                igmp6_group_dropped(i);
2229        read_unlock_bh(&idev->lock);
2230
2231        mld_clear_delrec(idev);
2232}
2233
2234
2235/* Device going up */
2236
2237void ipv6_mc_up(struct inet6_dev *idev)
2238{
2239        struct ifmcaddr6 *i;
2240
2241        /* Install multicast list, except for all-nodes (already installed) */
2242
2243        read_lock_bh(&idev->lock);
2244        for (i = idev->mc_list; i; i=i->next)
2245                igmp6_group_added(i);
2246        read_unlock_bh(&idev->lock);
2247}
2248
2249/* IPv6 device initialization. */
2250
2251void ipv6_mc_init_dev(struct inet6_dev *idev)
2252{
2253        write_lock_bh(&idev->lock);
2254        spin_lock_init(&idev->mc_lock);
2255        idev->mc_gq_running = 0;
2256        setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
2257                        (unsigned long)idev);
2258        idev->mc_tomb = NULL;
2259        idev->mc_ifc_count = 0;
2260        setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire,
2261                        (unsigned long)idev);
2262        idev->mc_qrv = MLD_QRV_DEFAULT;
2263        idev->mc_maxdelay = IGMP6_UNSOLICITED_IVAL;
2264        idev->mc_v1_seen = 0;
2265        write_unlock_bh(&idev->lock);
2266}
2267
2268/*
2269 *      Device is about to be destroyed: clean up.
2270 */
2271
2272void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2273{
2274        struct ifmcaddr6 *i;
2275
2276        /* Deactivate timers */
2277        ipv6_mc_down(idev);
2278
2279        /* Delete all-nodes address. */
2280        /* We cannot call ipv6_dev_mc_dec() directly, our caller in
2281         * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2282         * fail.
2283         */
2284        __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2285
2286        if (idev->cnf.forwarding)
2287                __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2288
2289        write_lock_bh(&idev->lock);
2290        while ((i = idev->mc_list) != NULL) {
2291                idev->mc_list = i->next;
2292                write_unlock_bh(&idev->lock);
2293
2294                igmp6_group_dropped(i);
2295                ma_put(i);
2296
2297                write_lock_bh(&idev->lock);
2298        }
2299        write_unlock_bh(&idev->lock);
2300}
2301
2302#ifdef CONFIG_PROC_FS
2303struct igmp6_mc_iter_state {
2304        struct seq_net_private p;
2305        struct net_device *dev;
2306        struct inet6_dev *idev;
2307};
2308
2309#define igmp6_mc_seq_private(seq)       ((struct igmp6_mc_iter_state *)(seq)->private)
2310
2311static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2312{
2313        struct ifmcaddr6 *im = NULL;
2314        struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2315        struct net *net = seq_file_net(seq);
2316
2317        state->idev = NULL;
2318        for_each_netdev_rcu(net, state->dev) {
2319                struct inet6_dev *idev;
2320                idev = __in6_dev_get(state->dev);
2321                if (!idev)
2322                        continue;
2323                read_lock_bh(&idev->lock);
2324                im = idev->mc_list;
2325                if (im) {
2326                        state->idev = idev;
2327                        break;
2328                }
2329                read_unlock_bh(&idev->lock);
2330        }
2331        return im;
2332}
2333
2334static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2335{
2336        struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2337
2338        im = im->next;
2339        while (!im) {
2340                if (likely(state->idev != NULL))
2341                        read_unlock_bh(&state->idev->lock);
2342
2343                state->dev = next_net_device_rcu(state->dev);
2344                if (!state->dev) {
2345                        state->idev = NULL;
2346                        break;
2347                }
2348                state->idev = __in6_dev_get(state->dev);
2349                if (!state->idev)
2350                        continue;
2351                read_lock_bh(&state->idev->lock);
2352                im = state->idev->mc_list;
2353        }
2354        return im;
2355}
2356
2357static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2358{
2359        struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2360        if (im)
2361                while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2362                        --pos;
2363        return pos ? NULL : im;
2364}
2365
2366static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2367        __acquires(RCU)
2368{
2369        rcu_read_lock();
2370        return igmp6_mc_get_idx(seq, *pos);
2371}
2372
2373static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2374{
2375        struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2376
2377        ++*pos;
2378        return im;
2379}
2380
2381static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2382        __releases(RCU)
2383{
2384        struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2385
2386        if (likely(state->idev != NULL)) {
2387                read_unlock_bh(&state->idev->lock);
2388                state->idev = NULL;
2389        }
2390        state->dev = NULL;
2391        rcu_read_unlock();
2392}
2393
2394static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2395{
2396        struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2397        struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2398
2399        seq_printf(seq,
2400                   "%-4d %-15s %pi6 %5d %08X %ld\n",
2401                   state->dev->ifindex, state->dev->name,
2402                   &im->mca_addr,
2403                   im->mca_users, im->mca_flags,
2404                   (im->mca_flags&MAF_TIMER_RUNNING) ?
2405                   jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0);
2406        return 0;
2407}
2408
2409static const struct seq_operations igmp6_mc_seq_ops = {
2410        .start  =       igmp6_mc_seq_start,
2411        .next   =       igmp6_mc_seq_next,
2412        .stop   =       igmp6_mc_seq_stop,
2413        .show   =       igmp6_mc_seq_show,
2414};
2415
2416static int igmp6_mc_seq_open(struct inode *inode, struct file *file)
2417{
2418        return seq_open_net(inode, file, &igmp6_mc_seq_ops,
2419                            sizeof(struct igmp6_mc_iter_state));
2420}
2421
2422static const struct file_operations igmp6_mc_seq_fops = {
2423        .owner          =       THIS_MODULE,
2424        .open           =       igmp6_mc_seq_open,
2425        .read           =       seq_read,
2426        .llseek         =       seq_lseek,
2427        .release        =       seq_release_net,
2428};
2429
2430struct igmp6_mcf_iter_state {
2431        struct seq_net_private p;
2432        struct net_device *dev;
2433        struct inet6_dev *idev;
2434        struct ifmcaddr6 *im;
2435};
2436
2437#define igmp6_mcf_seq_private(seq)      ((struct igmp6_mcf_iter_state *)(seq)->private)
2438
2439static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2440{
2441        struct ip6_sf_list *psf = NULL;
2442        struct ifmcaddr6 *im = NULL;
2443        struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2444        struct net *net = seq_file_net(seq);
2445
2446        state->idev = NULL;
2447        state->im = NULL;
2448        for_each_netdev_rcu(net, state->dev) {
2449                struct inet6_dev *idev;
2450                idev = __in6_dev_get(state->dev);
2451                if (unlikely(idev == NULL))
2452                        continue;
2453                read_lock_bh(&idev->lock);
2454                im = idev->mc_list;
2455                if (likely(im != NULL)) {
2456                        spin_lock_bh(&im->mca_lock);
2457                        psf = im->mca_sources;
2458                        if (likely(psf != NULL)) {
2459                                state->im = im;
2460                                state->idev = idev;
2461                                break;
2462                        }
2463                        spin_unlock_bh(&im->mca_lock);
2464                }
2465                read_unlock_bh(&idev->lock);
2466        }
2467        return psf;
2468}
2469
2470static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
2471{
2472        struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2473
2474        psf = psf->sf_next;
2475        while (!psf) {
2476                spin_unlock_bh(&state->im->mca_lock);
2477                state->im = state->im->next;
2478                while (!state->im) {
2479                        if (likely(state->idev != NULL))
2480                                read_unlock_bh(&state->idev->lock);
2481
2482                        state->dev = next_net_device_rcu(state->dev);
2483                        if (!state->dev) {
2484                                state->idev = NULL;
2485                                goto out;
2486                        }
2487                        state->idev = __in6_dev_get(state->dev);
2488                        if (!state->idev)
2489                                continue;
2490                        read_lock_bh(&state->idev->lock);
2491                        state->im = state->idev->mc_list;
2492                }
2493                if (!state->im)
2494                        break;
2495                spin_lock_bh(&state->im->mca_lock);
2496                psf = state->im->mca_sources;
2497        }
2498out:
2499        return psf;
2500}
2501
2502static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
2503{
2504        struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
2505        if (psf)
2506                while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
2507                        --pos;
2508        return pos ? NULL : psf;
2509}
2510
2511static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2512        __acquires(RCU)
2513{
2514        rcu_read_lock();
2515        return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2516}
2517
2518static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2519{
2520        struct ip6_sf_list *psf;
2521        if (v == SEQ_START_TOKEN)
2522                psf = igmp6_mcf_get_first(seq);
2523        else
2524                psf = igmp6_mcf_get_next(seq, v);
2525        ++*pos;
2526        return psf;
2527}
2528
2529static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2530        __releases(RCU)
2531{
2532        struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2533        if (likely(state->im != NULL)) {
2534                spin_unlock_bh(&state->im->mca_lock);
2535                state->im = NULL;
2536        }
2537        if (likely(state->idev != NULL)) {
2538                read_unlock_bh(&state->idev->lock);
2539                state->idev = NULL;
2540        }
2541        state->dev = NULL;
2542        rcu_read_unlock();
2543}
2544
2545static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
2546{
2547        struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
2548        struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2549
2550        if (v == SEQ_START_TOKEN) {
2551                seq_printf(seq,
2552                           "%3s %6s "
2553                           "%32s %32s %6s %6s\n", "Idx",
2554                           "Device", "Multicast Address",
2555                           "Source Address", "INC", "EXC");
2556        } else {
2557                seq_printf(seq,
2558                           "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
2559                           state->dev->ifindex, state->dev->name,
2560                           &state->im->mca_addr,
2561                           &psf->sf_addr,
2562                           psf->sf_count[MCAST_INCLUDE],
2563                           psf->sf_count[MCAST_EXCLUDE]);
2564        }
2565        return 0;
2566}
2567
2568static const struct seq_operations igmp6_mcf_seq_ops = {
2569        .start  =       igmp6_mcf_seq_start,
2570        .next   =       igmp6_mcf_seq_next,
2571        .stop   =       igmp6_mcf_seq_stop,
2572        .show   =       igmp6_mcf_seq_show,
2573};
2574
2575static int igmp6_mcf_seq_open(struct inode *inode, struct file *file)
2576{
2577        return seq_open_net(inode, file, &igmp6_mcf_seq_ops,
2578                            sizeof(struct igmp6_mcf_iter_state));
2579}
2580
2581static const struct file_operations igmp6_mcf_seq_fops = {
2582        .owner          =       THIS_MODULE,
2583        .open           =       igmp6_mcf_seq_open,
2584        .read           =       seq_read,
2585        .llseek         =       seq_lseek,
2586        .release        =       seq_release_net,
2587};
2588
2589static int __net_init igmp6_proc_init(struct net *net)
2590{
2591        int err;
2592
2593        err = -ENOMEM;
2594        if (!proc_net_fops_create(net, "igmp6", S_IRUGO, &igmp6_mc_seq_fops))
2595                goto out;
2596        if (!proc_net_fops_create(net, "mcfilter6", S_IRUGO,
2597                                  &igmp6_mcf_seq_fops))
2598                goto out_proc_net_igmp6;
2599
2600        err = 0;
2601out:
2602        return err;
2603
2604out_proc_net_igmp6:
2605        proc_net_remove(net, "igmp6");
2606        goto out;
2607}
2608
2609static void __net_exit igmp6_proc_exit(struct net *net)
2610{
2611        proc_net_remove(net, "mcfilter6");
2612        proc_net_remove(net, "igmp6");
2613}
2614#else
2615static inline int igmp6_proc_init(struct net *net)
2616{
2617        return 0;
2618}
2619static inline void igmp6_proc_exit(struct net *net)
2620{
2621}
2622#endif
2623
2624static int __net_init igmp6_net_init(struct net *net)
2625{
2626        int err;
2627
2628        err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
2629                                   SOCK_RAW, IPPROTO_ICMPV6, net);
2630        if (err < 0) {
2631                printk(KERN_ERR
2632                       "Failed to initialize the IGMP6 control socket (err %d).\n",
2633                       err);
2634                goto out;
2635        }
2636
2637        inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
2638
2639        err = igmp6_proc_init(net);
2640        if (err)
2641                goto out_sock_create;
2642out:
2643        return err;
2644
2645out_sock_create:
2646        inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2647        goto out;
2648}
2649
2650static void __net_exit igmp6_net_exit(struct net *net)
2651{
2652        inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2653        igmp6_proc_exit(net);
2654}
2655
2656static struct pernet_operations igmp6_net_ops = {
2657        .init = igmp6_net_init,
2658        .exit = igmp6_net_exit,
2659};
2660
2661int __init igmp6_init(void)
2662{
2663        return register_pernet_subsys(&igmp6_net_ops);
2664}
2665
2666void igmp6_cleanup(void)
2667{
2668        unregister_pernet_subsys(&igmp6_net_ops);
2669}
2670