summaryrefslogtreecommitdiff
path: root/sys/netinet6/in6_mcast.c
diff options
context:
space:
mode:
authorStephen Hurd <shurd@FreeBSD.org>2018-05-02 19:36:29 +0000
committerStephen Hurd <shurd@FreeBSD.org>2018-05-02 19:36:29 +0000
commitf3e1324b41e54f1a3b148541ddcacdbe647d2214 (patch)
tree605fb041714cffab9218069efe9ade83faf4843d /sys/netinet6/in6_mcast.c
parentadb947a67ad23f81deea85fd765d98493db1713d (diff)
downloadsrc-test2-f3e1324b41e54f1a3b148541ddcacdbe647d2214.tar.gz
src-test2-f3e1324b41e54f1a3b148541ddcacdbe647d2214.zip
Notes
Diffstat (limited to 'sys/netinet6/in6_mcast.c')
-rw-r--r--sys/netinet6/in6_mcast.c263
1 files changed, 164 insertions, 99 deletions
diff --git a/sys/netinet6/in6_mcast.c b/sys/netinet6/in6_mcast.c
index 2d37ba9802b4..fe8da6685770 100644
--- a/sys/netinet6/in6_mcast.c
+++ b/sys/netinet6/in6_mcast.c
@@ -41,13 +41,13 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/gtaskqueue.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
-#include <sys/protosw.h>
#include <sys/sysctl.h>
#include <sys/priv.h>
#include <sys/ktr.h>
@@ -59,8 +59,12 @@ __FBSDID("$FreeBSD$");
#include <net/route.h>
#include <net/vnet.h>
+
#include <netinet/in.h>
+#include <netinet/udp.h>
#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+#include <netinet/udp_var.h>
#include <netinet6/in6_fib.h>
#include <netinet6/in6_var.h>
#include <netinet/ip6.h>
@@ -89,7 +93,7 @@ typedef union sockunion sockunion_t;
static MALLOC_DEFINE(M_IN6MFILTER, "in6_mfilter",
"IPv6 multicast PCB-layer source filter");
-static MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group");
+MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group");
static MALLOC_DEFINE(M_IP6MOPTS, "ip6_moptions", "IPv6 multicast options");
static MALLOC_DEFINE(M_IP6MSOURCE, "ip6_msource",
"IPv6 multicast MLD-layer source filter");
@@ -107,8 +111,16 @@ RB_GENERATE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp);
* any need for in6_multi itself to be virtualized -- it is bound to an ifp
* anyway no matter what happens.
*/
-struct mtx in6_multi_mtx;
-MTX_SYSINIT(in6_multi_mtx, &in6_multi_mtx, "in6_multi_mtx", MTX_DEF);
+struct mtx in6_multi_list_mtx;
+MTX_SYSINIT(in6_multi_mtx, &in6_multi_list_mtx, "in6_multi_list_mtx", MTX_DEF);
+
+struct mtx in6_multi_free_mtx;
+MTX_SYSINIT(in6_multi_free_mtx, &in6_multi_free_mtx, "in6_multi_free_mtx", MTX_DEF);
+
+struct sx in6_multi_sx;
+SX_SYSINIT(in6_multi_sx, &in6_multi_sx, "in6_multi_sx");
+
+
static void im6f_commit(struct in6_mfilter *);
static int im6f_get_source(struct in6_mfilter *imf,
@@ -130,7 +142,7 @@ static struct in6_msource *
const struct sockaddr *);
static void im6s_merge(struct ip6_msource *ims,
const struct in6_msource *lims, const int rollback);
-static int in6_mc_get(struct ifnet *, const struct in6_addr *,
+static int in6_getmulti(struct ifnet *, const struct in6_addr *,
struct in6_multi **);
static int in6m_get_source(struct in6_multi *inm,
const struct in6_addr *addr, const int noalloc,
@@ -389,7 +401,7 @@ im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp,
* Return 0 if successful, otherwise return an appropriate error code.
*/
static int
-in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
+in6_getmulti(struct ifnet *ifp, const struct in6_addr *group,
struct in6_multi **pinm)
{
struct sockaddr_in6 gsin6;
@@ -405,8 +417,8 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
* re-acquire around the call.
*/
IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK();
IF_ADDR_WLOCK(ifp);
-
inm = in6m_lookup_locked(ifp, group);
if (inm != NULL) {
/*
@@ -415,7 +427,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
*/
KASSERT(inm->in6m_refcount >= 1,
("%s: bad refcount %d", __func__, inm->in6m_refcount));
- ++inm->in6m_refcount;
+ in6m_acquire_locked(inm);
*pinm = inm;
goto out_locked;
}
@@ -429,10 +441,12 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
* Check if a link-layer group is already associated
* with this network-layer group on the given ifnet.
*/
+ IN6_MULTI_LIST_UNLOCK();
IF_ADDR_WUNLOCK(ifp);
error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma);
if (error != 0)
return (error);
+ IN6_MULTI_LIST_LOCK();
IF_ADDR_WLOCK(ifp);
/*
@@ -455,7 +469,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
panic("%s: ifma %p is inconsistent with %p (%p)",
__func__, ifma, inm, group);
#endif
- ++inm->in6m_refcount;
+ in6m_acquire_locked(inm);
*pinm = inm;
goto out_locked;
}
@@ -472,6 +486,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
*/
inm = malloc(sizeof(*inm), M_IP6MADDR, M_NOWAIT | M_ZERO);
if (inm == NULL) {
+ IN6_MULTI_LIST_UNLOCK();
IF_ADDR_WUNLOCK(ifp);
if_delmulti_ifma(ifma);
return (ENOMEM);
@@ -491,7 +506,8 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
ifma->ifma_protospec = inm;
*pinm = inm;
-out_locked:
+ out_locked:
+ IN6_MULTI_LIST_UNLOCK();
IF_ADDR_WUNLOCK(ifp);
return (error);
}
@@ -502,36 +518,105 @@ out_locked:
* If the refcount drops to 0, free the in6_multi record and
* delete the underlying link-layer membership.
*/
-void
-in6m_release_locked(struct in6_multi *inm)
+static void
+in6m_release(struct in6_multi *inm)
{
struct ifmultiaddr *ifma;
-
- IN6_MULTI_LOCK_ASSERT();
+ struct ifnet *ifp;
CTR2(KTR_MLD, "%s: refcount is %d", __func__, inm->in6m_refcount);
- if (--inm->in6m_refcount > 0) {
- CTR2(KTR_MLD, "%s: refcount is now %d", __func__,
- inm->in6m_refcount);
- return;
- }
-
+ MPASS(inm->in6m_refcount == 0);
CTR2(KTR_MLD, "%s: freeing inm %p", __func__, inm);
ifma = inm->in6m_ifma;
+ ifp = inm->in6m_ifp;
/* XXX this access is not covered by IF_ADDR_LOCK */
CTR2(KTR_MLD, "%s: purging ifma %p", __func__, ifma);
- KASSERT(ifma->ifma_protospec == inm,
- ("%s: ifma_protospec != inm", __func__));
- ifma->ifma_protospec = NULL;
+ KASSERT(ifma->ifma_protospec == NULL,
+ ("%s: ifma_protospec != NULL", __func__));
+ if (ifp)
+ CURVNET_SET(ifp->if_vnet);
in6m_purge(inm);
-
free(inm, M_IP6MADDR);
if_delmulti_ifma(ifma);
+ if (ifp)
+ CURVNET_RESTORE();
+}
+
+static struct grouptask free_gtask;
+static struct in6_multi_head in6m_free_list;
+static void in6m_release_task(void *arg __unused);
+static void in6m_init(void)
+{
+ SLIST_INIT(&in6m_free_list);
+ taskqgroup_config_gtask_init(NULL, &free_gtask, in6m_release_task, "in6m release task");
+}
+
+SYSINIT(in6m_init, SI_SUB_SMP + 1, SI_ORDER_FIRST,
+ in6m_init, NULL);
+
+
+void
+in6m_release_list_deferred(struct in6_multi_head *inmh)
+{
+ if (SLIST_EMPTY(inmh))
+ return;
+ mtx_lock(&in6_multi_free_mtx);
+ SLIST_CONCAT(&in6m_free_list, inmh, in6_multi, in6m_nrele);
+ mtx_unlock(&in6_multi_free_mtx);
+ GROUPTASK_ENQUEUE(&free_gtask);
+}
+
+void
+in6m_release_deferred(struct in6_multi *inm)
+{
+ struct in6_multi_head tmp;
+ struct ifnet *ifp;
+ struct ifaddr *ifa;
+ struct in6_ifaddr *ifa6;
+ struct in6_multi_mship *imm;
+
+ IN6_MULTI_LIST_LOCK_ASSERT();
+ KASSERT(inm->in6m_refcount > 0, ("refcount == %d inm: %p", inm->in6m_refcount, inm));
+ if (--inm->in6m_refcount == 0) {
+ ifp = inm->in6m_ifp;
+ IF_ADDR_LOCK_ASSERT(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ ifa6 = (void *)ifa;
+ LIST_FOREACH(imm, &ifa6->ia6_memberships, i6mm_chain) {
+ if (inm == imm->i6mm_maddr)
+ imm->i6mm_maddr = NULL;
+ }
+ }
+ SLIST_INIT(&tmp);
+ inm->in6m_ifma->ifma_protospec = NULL;
+ SLIST_INSERT_HEAD(&tmp, inm, in6m_nrele);
+ in6m_release_list_deferred(&tmp);
+ }
+}
+
+static void
+in6m_release_task(void *arg __unused)
+{
+ struct in6_multi_head in6m_free_tmp;
+ struct in6_multi *inm, *tinm;
+
+ SLIST_INIT(&in6m_free_tmp);
+ mtx_lock(&in6_multi_free_mtx);
+ SLIST_CONCAT(&in6m_free_tmp, &in6m_free_list, in6_multi, in6m_nrele);
+ mtx_unlock(&in6_multi_free_mtx);
+ IN6_MULTI_LOCK();
+ SLIST_FOREACH_SAFE(inm, &in6m_free_tmp, in6m_nrele, tinm) {
+ SLIST_REMOVE_HEAD(&in6m_free_tmp, in6m_nrele);
+ in6m_release(inm);
+ }
+ IN6_MULTI_UNLOCK();
}
/*
@@ -544,7 +629,7 @@ in6m_clear_recorded(struct in6_multi *inm)
{
struct ip6_msource *ims;
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) {
if (ims->im6s_stp) {
@@ -584,7 +669,7 @@ in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr)
struct ip6_msource find;
struct ip6_msource *ims, *nims;
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
find.im6s_addr = *addr;
ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find);
@@ -911,6 +996,7 @@ in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
schanged = 0;
error = 0;
nsrc1 = nsrc0 = 0;
+ IN6_MULTI_LIST_LOCK_ASSERT();
/*
* Update the source filters first, as this may fail.
@@ -1087,65 +1173,16 @@ in6m_purge(struct in6_multi *inm)
*
* SMPng: Assume no mc locks held by caller.
*/
-struct in6_multi_mship *
-in6_joingroup(struct ifnet *ifp, struct in6_addr *mcaddr,
- int *errorp, int delay)
-{
- struct in6_multi_mship *imm;
- int error;
-
- imm = malloc(sizeof(*imm), M_IP6MADDR, M_NOWAIT);
- if (imm == NULL) {
- *errorp = ENOBUFS;
- return (NULL);
- }
-
- delay = (delay * PR_FASTHZ) / hz;
-
- error = in6_mc_join(ifp, mcaddr, NULL, &imm->i6mm_maddr, delay);
- if (error) {
- *errorp = error;
- free(imm, M_IP6MADDR);
- return (NULL);
- }
-
- return (imm);
-}
-
-/*
- * Leave a multicast address w/o sources.
- * KAME compatibility entry point.
- *
- * SMPng: Assume no mc locks held by caller.
- */
int
-in6_leavegroup(struct in6_multi_mship *imm)
-{
-
- if (imm->i6mm_maddr != NULL)
- in6_mc_leave(imm->i6mm_maddr, NULL);
- free(imm, M_IP6MADDR);
- return 0;
-}
-
-/*
- * Join a multicast group; unlocked entry point.
- *
- * SMPng: XXX: in6_mc_join() is called from in6_control() when upper
- * locks are not held. Fortunately, ifp is unlikely to have been detached
- * at this point, so we assume it's OK to recurse.
- */
-int
-in6_mc_join(struct ifnet *ifp, const struct in6_addr *mcaddr,
+in6_joingroup(struct ifnet *ifp, const struct in6_addr *mcaddr,
/*const*/ struct in6_mfilter *imf, struct in6_multi **pinm,
const int delay)
{
int error;
IN6_MULTI_LOCK();
- error = in6_mc_join_locked(ifp, mcaddr, imf, pinm, delay);
+ error = in6_joingroup_locked(ifp, mcaddr, NULL, pinm, delay);
IN6_MULTI_UNLOCK();
-
return (error);
}
@@ -1159,12 +1196,13 @@ in6_mc_join(struct ifnet *ifp, const struct in6_addr *mcaddr,
* code is returned.
*/
int
-in6_mc_join_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
+in6_joingroup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
/*const*/ struct in6_mfilter *imf, struct in6_multi **pinm,
const int delay)
{
struct in6_mfilter timf;
struct in6_multi *inm;
+ struct ifmultiaddr *ifma;
int error;
#ifdef KTR
char ip6tbuf[INET6_ADDRSTRLEN];
@@ -1185,6 +1223,7 @@ in6_mc_join_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
#endif
IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_UNLOCK_ASSERT();
CTR4(KTR_MLD, "%s: join %s on %p(%s))", __func__,
ip6_sprintf(ip6tbuf, mcaddr), ifp, if_name(ifp));
@@ -1200,13 +1239,13 @@ in6_mc_join_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
im6f_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE);
imf = &timf;
}
-
- error = in6_mc_get(ifp, mcaddr, &inm);
+ error = in6_getmulti(ifp, mcaddr, &inm);
if (error) {
- CTR1(KTR_MLD, "%s: in6_mc_get() failure", __func__);
+ CTR1(KTR_MLD, "%s: in6_getmulti() failure", __func__);
return (error);
}
+ IN6_MULTI_LIST_LOCK();
CTR1(KTR_MLD, "%s: merge inm state", __func__);
error = in6m_merge(inm, imf);
if (error) {
@@ -1224,11 +1263,19 @@ in6_mc_join_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
out_in6m_release:
if (error) {
CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm);
- in6m_release_locked(inm);
+ IF_ADDR_RLOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_protospec == inm) {
+ ifma->ifma_protospec = NULL;
+ break;
+ }
+ }
+ in6m_release_deferred(inm);
+ IF_ADDR_RUNLOCK(ifp);
} else {
*pinm = inm;
}
-
+ IN6_MULTI_LIST_UNLOCK();
return (error);
}
@@ -1236,14 +1283,13 @@ out_in6m_release:
* Leave a multicast group; unlocked entry point.
*/
int
-in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
+in6_leavegroup(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
{
int error;
IN6_MULTI_LOCK();
- error = in6_mc_leave_locked(inm, imf);
+ error = in6_leavegroup_locked(inm, imf);
IN6_MULTI_UNLOCK();
-
return (error);
}
@@ -1261,9 +1307,10 @@ in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
* makes a state change downcall into MLD.
*/
int
-in6_mc_leave_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
+in6_leavegroup_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
{
struct in6_mfilter timf;
+ struct ifnet *ifp;
int error;
#ifdef KTR
char ip6tbuf[INET6_ADDRSTRLEN];
@@ -1294,6 +1341,9 @@ in6_mc_leave_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
* to be allocated, and there is no opportunity to roll back
* the transaction, it MUST NOT fail.
*/
+
+ ifp = inm->in6m_ifp;
+ IN6_MULTI_LIST_LOCK();
CTR1(KTR_MLD, "%s: merge inm state", __func__);
error = in6m_merge(inm, imf);
KASSERT(error == 0, ("%s: failed to merge inm state", __func__));
@@ -1304,11 +1354,17 @@ in6_mc_leave_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm);
- in6m_release_locked(inm);
+ if (ifp)
+ IF_ADDR_RLOCK(ifp);
+ in6m_release_deferred(inm);
+ if (ifp)
+ IF_ADDR_RUNLOCK(ifp);
+ IN6_MULTI_LIST_UNLOCK();
return (error);
}
+
/*
* Block or unblock an ASM multicast source on an inpcb.
* This implements the delta-based API described in RFC 3678.
@@ -1446,8 +1502,7 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
/*
* Begin state merge transaction at MLD layer.
*/
- IN6_MULTI_LOCK();
-
+ IN6_MULTI_LIST_LOCK();
CTR1(KTR_MLD, "%s: merge inm state", __func__);
error = in6m_merge(inm, imf);
if (error)
@@ -1459,7 +1514,7 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
}
- IN6_MULTI_UNLOCK();
+ IN6_MULTI_LIST_UNLOCK();
out_im6f_rollback:
if (error)
@@ -1535,7 +1590,8 @@ ip6_freemoptions(struct ip6_moptions *imo)
struct in6_mfilter *imf;
size_t idx, nmships;
- KASSERT(imo != NULL, ("%s: ip6_moptions is NULL", __func__));
+ if (imo == NULL)
+ return;
nmships = imo->im6o_num_memberships;
for (idx = 0; idx < nmships; ++idx) {
@@ -1543,7 +1599,7 @@ ip6_freemoptions(struct ip6_moptions *imo)
if (imf)
im6f_leave(imf);
/* XXX this will thrash the lock(s) */
- (void)in6_mc_leave(imo->im6o_membership[idx], imf);
+ (void)in6_leavegroup(imo->im6o_membership[idx], imf);
if (imf)
im6f_purge(imf);
}
@@ -2034,10 +2090,12 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
/*
* Begin state merge transaction at MLD layer.
*/
+ in_pcbref(inp);
+ INP_WUNLOCK(inp);
IN6_MULTI_LOCK();
if (is_new) {
- error = in6_mc_join_locked(ifp, &gsa->sin6.sin6_addr, imf,
+ error = in6_joingroup_locked(ifp, &gsa->sin6.sin6_addr, imf,
&inm, 0);
if (error) {
IN6_MULTI_UNLOCK();
@@ -2046,6 +2104,7 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
imo->im6o_membership[idx] = inm;
} else {
CTR1(KTR_MLD, "%s: merge inm state", __func__);
+ IN6_MULTI_LIST_LOCK();
error = in6m_merge(inm, imf);
if (error)
CTR1(KTR_MLD, "%s: failed to merge inm state",
@@ -2057,10 +2116,13 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
CTR1(KTR_MLD, "%s: failed mld downcall",
__func__);
}
+ IN6_MULTI_LIST_UNLOCK();
}
IN6_MULTI_UNLOCK();
- INP_WLOCK_ASSERT(inp);
+ INP_WLOCK(inp);
+ if (in_pcbrele_wlocked(inp))
+ return (ENXIO);
if (error) {
im6f_rollback(imf);
if (is_new)
@@ -2282,9 +2344,10 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
* Give up the multicast address record to which
* the membership points.
*/
- (void)in6_mc_leave_locked(inm, imf);
+ (void)in6_leavegroup_locked(inm, imf);
} else {
CTR1(KTR_MLD, "%s: merge inm state", __func__);
+ IN6_MULTI_LIST_LOCK();
error = in6m_merge(inm, imf);
if (error)
CTR1(KTR_MLD, "%s: failed to merge inm state",
@@ -2296,6 +2359,7 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
CTR1(KTR_MLD, "%s: failed mld downcall",
__func__);
}
+ IN6_MULTI_LIST_UNLOCK();
}
IN6_MULTI_UNLOCK();
@@ -2505,7 +2569,7 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
goto out_im6f_rollback;
INP_WLOCK_ASSERT(inp);
- IN6_MULTI_LOCK();
+ IN6_MULTI_LIST_LOCK();
/*
* Begin state merge transaction at MLD layer.
@@ -2521,7 +2585,7 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
}
- IN6_MULTI_UNLOCK();
+ IN6_MULTI_LIST_UNLOCK();
out_im6f_rollback:
if (error)
@@ -2712,7 +2776,7 @@ sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS)
return (retval);
IN6_MULTI_LOCK();
-
+ IN6_MULTI_LIST_LOCK();
IF_ADDR_RLOCK(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_INET6 ||
@@ -2744,6 +2808,7 @@ sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS)
}
IF_ADDR_RUNLOCK(ifp);
+ IN6_MULTI_LIST_UNLOCK();
IN6_MULTI_UNLOCK();
return (retval);