1 | /* |
2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /*- |
29 | * Copyright (c) 2009 Bruce Simpson. |
30 | * |
31 | * Redistribution and use in source and binary forms, with or without |
32 | * modification, are permitted provided that the following conditions |
33 | * are met: |
34 | * 1. Redistributions of source code must retain the above copyright |
35 | * notice, this list of conditions and the following disclaimer. |
36 | * 2. Redistributions in binary form must reproduce the above copyright |
37 | * notice, this list of conditions and the following disclaimer in the |
38 | * documentation and/or other materials provided with the distribution. |
39 | * 3. The name of the author may not be used to endorse or promote |
40 | * products derived from this software without specific prior written |
41 | * permission. |
42 | * |
43 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
44 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
45 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
46 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
47 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
48 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
49 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
50 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
51 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
52 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
53 | * SUCH DAMAGE. |
54 | */ |
55 | |
56 | /* |
57 | * Copyright (c) 1988 Stephen Deering. |
58 | * Copyright (c) 1992, 1993 |
59 | * The Regents of the University of California. All rights reserved. |
60 | * |
61 | * This code is derived from software contributed to Berkeley by |
62 | * Stephen Deering of Stanford University. |
63 | * |
64 | * Redistribution and use in source and binary forms, with or without |
65 | * modification, are permitted provided that the following conditions |
66 | * are met: |
67 | * 1. Redistributions of source code must retain the above copyright |
68 | * notice, this list of conditions and the following disclaimer. |
69 | * 2. Redistributions in binary form must reproduce the above copyright |
70 | * notice, this list of conditions and the following disclaimer in the |
71 | * documentation and/or other materials provided with the distribution. |
72 | * 3. All advertising materials mentioning features or use of this software |
73 | * must display the following acknowledgement: |
74 | * This product includes software developed by the University of |
75 | * California, Berkeley and its contributors. |
76 | * 4. Neither the name of the University nor the names of its contributors |
77 | * may be used to endorse or promote products derived from this software |
78 | * without specific prior written permission. |
79 | * |
80 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
81 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
82 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
83 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
84 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
85 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
86 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
87 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
88 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
89 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
90 | * SUCH DAMAGE. |
91 | * |
92 | * @(#)igmp.c 8.1 (Berkeley) 7/19/93 |
93 | */ |
94 | /* |
95 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce |
96 | * support for mandatory and extensible security protections. This notice |
97 | * is included in support of clause 2.2 (b) of the Apple Public License, |
98 | * Version 2.0. |
99 | */ |
100 | |
101 | #include <sys/cdefs.h> |
102 | |
103 | #include <sys/param.h> |
104 | #include <sys/systm.h> |
105 | #include <sys/mbuf.h> |
106 | #include <sys/socket.h> |
107 | #include <sys/protosw.h> |
108 | #include <sys/sysctl.h> |
109 | #include <sys/kernel.h> |
110 | #include <sys/malloc.h> |
111 | #include <sys/mcache.h> |
112 | |
113 | #include <dev/random/randomdev.h> |
114 | |
115 | #include <kern/zalloc.h> |
116 | |
117 | #include <net/if.h> |
118 | #include <net/route.h> |
119 | |
120 | #include <netinet/in.h> |
121 | #include <netinet/in_var.h> |
122 | #include <netinet6/in6_var.h> |
123 | #include <netinet/ip6.h> |
124 | #include <netinet6/ip6_var.h> |
125 | #include <netinet6/scope6_var.h> |
126 | #include <netinet/icmp6.h> |
127 | #include <netinet6/mld6.h> |
128 | #include <netinet6/mld6_var.h> |
129 | |
130 | #include <os/log.h> |
131 | |
132 | /* Lock group and attribute for mld_mtx */ |
133 | static LCK_ATTR_DECLARE(mld_mtx_attr, 0, 0); |
134 | static LCK_GRP_DECLARE(mld_mtx_grp, "mld_mtx" ); |
135 | |
136 | /* |
137 | * Locking and reference counting: |
138 | * |
139 | * mld_mtx mainly protects mli_head. In cases where both mld_mtx and |
140 | * in6_multihead_lock must be held, the former must be acquired first in order |
141 | * to maintain lock ordering. It is not a requirement that mld_mtx be |
142 | * acquired first before in6_multihead_lock, but in case both must be acquired |
143 | * in succession, the correct lock ordering must be followed. |
144 | * |
145 | * Instead of walking the if_multiaddrs list at the interface and returning |
146 | * the ifma_protospec value of a matching entry, we search the global list |
147 | * of in6_multi records and find it that way; this is done with in6_multihead |
148 | * lock held. Doing so avoids the race condition issues that many other BSDs |
149 | * suffer from (therefore in our implementation, ifma_protospec will never be |
150 | * NULL for as long as the in6_multi is valid.) |
151 | * |
152 | * The above creates a requirement for the in6_multi to stay in in6_multihead |
153 | * list even after the final MLD leave (in MLDv2 mode) until no longer needs |
154 | * be retransmitted (this is not required for MLDv1.) In order to handle |
155 | * this, the request and reference counts of the in6_multi are bumped up when |
156 | * the state changes to MLD_LEAVING_MEMBER, and later dropped in the timeout |
157 | * handler. Each in6_multi holds a reference to the underlying mld_ifinfo. |
158 | * |
159 | * Thus, the permitted lock order is: |
160 | * |
161 | * mld_mtx, in6_multihead_lock, inm6_lock, mli_lock |
162 | * |
163 | * Any may be taken independently, but if any are held at the same time, |
164 | * the above lock order must be followed. |
165 | */ |
166 | static LCK_MTX_DECLARE_ATTR(mld_mtx, &mld_mtx_grp, &mld_mtx_attr); |
167 | |
168 | SLIST_HEAD(mld_in6m_relhead, in6_multi); |
169 | |
170 | static void mli_initvar(struct mld_ifinfo *, struct ifnet *, int); |
171 | static struct mld_ifinfo *mli_alloc(zalloc_flags_t); |
172 | static void mli_free(struct mld_ifinfo *); |
173 | static void mli_delete(const struct ifnet *, struct mld_in6m_relhead *); |
174 | static void mld_dispatch_packet(struct mbuf *); |
175 | static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *, |
176 | struct mld_tparams *); |
177 | static int mld_handle_state_change(struct in6_multi *, struct mld_ifinfo *, |
178 | struct mld_tparams *); |
179 | static int mld_initial_join(struct in6_multi *, struct mld_ifinfo *, |
180 | struct mld_tparams *, const int); |
181 | #ifdef MLD_DEBUG |
182 | static const char * mld_rec_type_to_str(const int); |
183 | #endif |
184 | static uint32_t mld_set_version(struct mld_ifinfo *, const int); |
185 | static void mld_append_relq(struct mld_ifinfo *, struct in6_multi *); |
186 | static void mld_flush_relq(struct mld_ifinfo *, struct mld_in6m_relhead *); |
187 | static void mld_dispatch_queue_locked(struct mld_ifinfo *, struct ifqueue *, int); |
188 | static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *, |
189 | /*const*/ struct mld_hdr *); |
190 | static int mld_v1_input_report(struct ifnet *, struct mbuf *, |
191 | const struct ip6_hdr *, /*const*/ struct mld_hdr *); |
192 | static void mld_v1_process_group_timer(struct in6_multi *, const int); |
193 | static void mld_v1_process_querier_timers(struct mld_ifinfo *); |
194 | static int mld_v1_transmit_report(struct in6_multi *, const uint8_t); |
195 | static uint32_t mld_v1_update_group(struct in6_multi *, const int); |
196 | static void mld_v2_cancel_link_timers(struct mld_ifinfo *); |
197 | static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *); |
198 | static struct mbuf * |
199 | mld_v2_encap_report(struct ifnet *, struct mbuf *); |
200 | static int mld_v2_enqueue_filter_change(struct ifqueue *, |
201 | struct in6_multi *); |
202 | static int mld_v2_enqueue_group_record(struct ifqueue *, |
203 | struct in6_multi *, const int, const int, const int, |
204 | const int); |
205 | static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *, |
206 | struct mbuf *, const int, const int); |
207 | static int mld_v2_merge_state_changes(struct in6_multi *, |
208 | struct ifqueue *); |
209 | static void mld_v2_process_group_timers(struct mld_ifinfo *, |
210 | struct ifqueue *, struct ifqueue *, |
211 | struct in6_multi *, const int); |
212 | static int mld_v2_process_group_query(struct in6_multi *, |
213 | int, struct mbuf *, const int); |
214 | static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS; |
215 | static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS; |
216 | static int sysctl_mld_v2enable SYSCTL_HANDLER_ARGS; |
217 | |
218 | static const uint32_t mld_timeout_delay = 1000; /* in milliseconds */ |
219 | static const uint32_t mld_timeout_leeway = 500; /* in millseconds */ |
220 | static bool mld_timeout_run; /* MLD timer is scheduled to run */ |
221 | static bool mld_fast_timeout_run; /* MLD fast timer is scheduled to run */ |
222 | static void mld_timeout(thread_call_param_t, thread_call_param_t); |
223 | static void mld_sched_timeout(void); |
224 | static void mld_sched_fast_timeout(void); |
225 | |
226 | /* |
227 | * Normative references: RFC 2710, RFC 3590, RFC 3810. |
228 | */ |
229 | static struct timeval mld_gsrdelay = {.tv_sec = 10, .tv_usec = 0}; |
230 | static LIST_HEAD(, mld_ifinfo) mli_head; |
231 | |
232 | static int querier_present_timers_running6; |
233 | static int interface_timers_running6; |
234 | static int state_change_timers_running6; |
235 | static int current_state_timers_running6; |
236 | |
237 | static unsigned int mld_mli_list_genid; |
238 | /* |
239 | * Subsystem lock macros. |
240 | */ |
241 | #define MLD_LOCK() \ |
242 | lck_mtx_lock(&mld_mtx) |
243 | #define MLD_LOCK_ASSERT_HELD() \ |
244 | LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_OWNED) |
245 | #define MLD_LOCK_ASSERT_NOTHELD() \ |
246 | LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_NOTOWNED) |
247 | #define MLD_UNLOCK() \ |
248 | lck_mtx_unlock(&mld_mtx) |
249 | |
250 | #define MLD_ADD_DETACHED_IN6M(_head, _in6m) { \ |
251 | SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle); \ |
252 | } |
253 | |
254 | #define MLD_REMOVE_DETACHED_IN6M(_head) { \ |
255 | struct in6_multi *_in6m, *_inm_tmp; \ |
256 | SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) { \ |
257 | SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle); \ |
258 | IN6M_REMREF(_in6m); \ |
259 | } \ |
260 | VERIFY(SLIST_EMPTY(_head)); \ |
261 | } |
262 | |
263 | static KALLOC_TYPE_DEFINE(mli_zone, struct mld_ifinfo, NET_KT_DEFAULT); |
264 | |
265 | SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */ |
266 | |
267 | SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_LOCKED, 0, |
268 | "IPv6 Multicast Listener Discovery" ); |
269 | SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay, |
270 | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, |
271 | &mld_gsrdelay.tv_sec, 0, sysctl_mld_gsr, "I" , |
272 | "Rate limit for MLDv2 Group-and-Source queries in seconds" ); |
273 | |
274 | SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_LOCKED, |
275 | sysctl_mld_ifinfo, "Per-interface MLDv2 state" ); |
276 | |
277 | static int mld_v1enable = 1; |
278 | SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RW | CTLFLAG_LOCKED, |
279 | &mld_v1enable, 0, "Enable fallback to MLDv1" ); |
280 | |
281 | static int mld_v2enable = 1; |
282 | SYSCTL_PROC(_net_inet6_mld, OID_AUTO, v2enable, |
283 | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, |
284 | &mld_v2enable, 0, sysctl_mld_v2enable, "I" , |
285 | "Enable MLDv2 (debug purposes only)" ); |
286 | |
287 | static int mld_use_allow = 1; |
288 | SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RW | CTLFLAG_LOCKED, |
289 | &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves" ); |
290 | |
291 | #ifdef MLD_DEBUG |
292 | int mld_debug = 0; |
293 | SYSCTL_INT(_net_inet6_mld, OID_AUTO, |
294 | debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, "" ); |
295 | #endif |
296 | /* |
297 | * Packed Router Alert option structure declaration. |
298 | */ |
299 | struct mld_raopt { |
300 | struct ip6_hbh hbh; |
301 | struct ip6_opt pad; |
302 | struct ip6_opt_router ra; |
303 | } __packed; |
304 | |
305 | /* |
306 | * Router Alert hop-by-hop option header. |
307 | */ |
308 | static struct mld_raopt mld_ra = { |
309 | .hbh = { .ip6h_nxt = 0, .ip6h_len = 0 }, |
310 | .pad = { .ip6o_type = IP6OPT_PADN, .ip6o_len = 0 }, |
311 | .ra = { |
312 | .ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT, |
313 | .ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2), |
314 | .ip6or_value = {((IP6OPT_RTALERT_MLD >> 8) & 0xFF), |
315 | (IP6OPT_RTALERT_MLD & 0xFF) } |
316 | } |
317 | }; |
318 | static struct ip6_pktopts mld_po; |
319 | |
320 | /* Store MLDv2 record count in the module private scratch space */ |
321 | #define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0] |
322 | |
323 | static __inline void |
324 | mld_save_context(struct mbuf *m, struct ifnet *ifp) |
325 | { |
326 | m->m_pkthdr.rcvif = ifp; |
327 | } |
328 | |
329 | static __inline void |
330 | mld_scrub_context(struct mbuf *m) |
331 | { |
332 | m->m_pkthdr.rcvif = NULL; |
333 | } |
334 | |
335 | /* |
336 | * Restore context from a queued output chain. |
337 | * Return saved ifp. |
338 | */ |
339 | static __inline struct ifnet * |
340 | mld_restore_context(struct mbuf *m) |
341 | { |
342 | return m->m_pkthdr.rcvif; |
343 | } |
344 | |
345 | /* |
346 | * Retrieve or set threshold between group-source queries in seconds. |
347 | */ |
348 | static int |
349 | sysctl_mld_gsr SYSCTL_HANDLER_ARGS |
350 | { |
351 | #pragma unused(arg1, arg2) |
352 | int error; |
353 | int i; |
354 | |
355 | MLD_LOCK(); |
356 | |
357 | i = (int)mld_gsrdelay.tv_sec; |
358 | |
359 | error = sysctl_handle_int(oidp, arg1: &i, arg2: 0, req); |
360 | if (error || !req->newptr) { |
361 | goto out_locked; |
362 | } |
363 | |
364 | if (i < -1 || i >= 60) { |
365 | error = EINVAL; |
366 | goto out_locked; |
367 | } |
368 | |
369 | mld_gsrdelay.tv_sec = i; |
370 | |
371 | out_locked: |
372 | MLD_UNLOCK(); |
373 | return error; |
374 | } |
375 | /* |
376 | * Expose struct mld_ifinfo to userland, keyed by ifindex. |
377 | * For use by ifmcstat(8). |
378 | * |
379 | */ |
380 | static int |
381 | sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS |
382 | { |
383 | #pragma unused(oidp) |
384 | int *name; |
385 | int error; |
386 | u_int namelen; |
387 | struct ifnet *ifp; |
388 | struct mld_ifinfo *mli; |
389 | struct mld_ifinfo_u mli_u; |
390 | |
391 | name = (int *)arg1; |
392 | namelen = arg2; |
393 | |
394 | if (req->newptr != USER_ADDR_NULL) { |
395 | return EPERM; |
396 | } |
397 | |
398 | if (namelen != 1) { |
399 | return EINVAL; |
400 | } |
401 | |
402 | MLD_LOCK(); |
403 | |
404 | if (name[0] <= 0 || name[0] > (u_int)if_index) { |
405 | error = ENOENT; |
406 | goto out_locked; |
407 | } |
408 | |
409 | error = ENOENT; |
410 | |
411 | ifnet_head_lock_shared(); |
412 | ifp = ifindex2ifnet[name[0]]; |
413 | ifnet_head_done(); |
414 | if (ifp == NULL) { |
415 | goto out_locked; |
416 | } |
417 | |
418 | bzero(s: &mli_u, n: sizeof(mli_u)); |
419 | |
420 | LIST_FOREACH(mli, &mli_head, mli_link) { |
421 | MLI_LOCK(mli); |
422 | if (ifp != mli->mli_ifp) { |
423 | MLI_UNLOCK(mli); |
424 | continue; |
425 | } |
426 | |
427 | mli_u.mli_ifindex = mli->mli_ifp->if_index; |
428 | mli_u.mli_version = mli->mli_version; |
429 | mli_u.mli_v1_timer = mli->mli_v1_timer; |
430 | mli_u.mli_v2_timer = mli->mli_v2_timer; |
431 | mli_u.mli_flags = mli->mli_flags; |
432 | mli_u.mli_rv = mli->mli_rv; |
433 | mli_u.mli_qi = mli->mli_qi; |
434 | mli_u.mli_qri = mli->mli_qri; |
435 | mli_u.mli_uri = mli->mli_uri; |
436 | MLI_UNLOCK(mli); |
437 | |
438 | error = SYSCTL_OUT(req, &mli_u, sizeof(mli_u)); |
439 | break; |
440 | } |
441 | |
442 | out_locked: |
443 | MLD_UNLOCK(); |
444 | return error; |
445 | } |
446 | |
447 | static int |
448 | sysctl_mld_v2enable SYSCTL_HANDLER_ARGS |
449 | { |
450 | #pragma unused(arg1, arg2) |
451 | int error; |
452 | int i; |
453 | struct mld_ifinfo *mli; |
454 | struct mld_tparams mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 }; |
455 | |
456 | MLD_LOCK(); |
457 | |
458 | i = mld_v2enable; |
459 | |
460 | error = sysctl_handle_int(oidp, arg1: &i, arg2: 0, req); |
461 | if (error || !req->newptr) { |
462 | goto out_locked; |
463 | } |
464 | |
465 | if (i < 0 || i > 1) { |
466 | error = EINVAL; |
467 | goto out_locked; |
468 | } |
469 | |
470 | mld_v2enable = i; |
471 | /* |
472 | * If we enabled v2, the state transition will take care of upgrading |
473 | * the MLD version back to v2. Otherwise, we have to explicitly |
474 | * downgrade. Note that this functionality is to be used for debugging. |
475 | */ |
476 | if (mld_v2enable == 1) { |
477 | goto out_locked; |
478 | } |
479 | |
480 | LIST_FOREACH(mli, &mli_head, mli_link) { |
481 | MLI_LOCK(mli); |
482 | if (mld_set_version(mli, MLD_VERSION_1) > 0) { |
483 | mtp.qpt = 1; |
484 | } |
485 | MLI_UNLOCK(mli); |
486 | } |
487 | |
488 | out_locked: |
489 | MLD_UNLOCK(); |
490 | |
491 | mld_set_timeout(&mtp); |
492 | |
493 | return error; |
494 | } |
495 | |
496 | /* |
497 | * Dispatch an entire queue of pending packet chains. |
498 | * |
499 | * Must not be called with in6m_lock held. |
500 | * XXX This routine unlocks MLD global lock and also mli locks. |
501 | * Make sure that the calling routine takes reference on the mli |
502 | * before calling this routine. |
503 | * Also if we are traversing mli_head, remember to check for |
504 | * mli list generation count and restart the loop if generation count |
505 | * has changed. |
506 | */ |
507 | static void |
508 | mld_dispatch_queue_locked(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit) |
509 | { |
510 | struct mbuf *m; |
511 | |
512 | MLD_LOCK_ASSERT_HELD(); |
513 | |
514 | if (mli != NULL) { |
515 | MLI_LOCK_ASSERT_HELD(mli); |
516 | } |
517 | |
518 | for (;;) { |
519 | IF_DEQUEUE(ifq, m); |
520 | if (m == NULL) { |
521 | break; |
522 | } |
523 | MLD_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n" , __func__, |
524 | (uint64_t)VM_KERNEL_ADDRPERM(ifq), |
525 | (uint64_t)VM_KERNEL_ADDRPERM(m))); |
526 | |
527 | if (mli != NULL) { |
528 | MLI_UNLOCK(mli); |
529 | } |
530 | MLD_UNLOCK(); |
531 | |
532 | mld_dispatch_packet(m); |
533 | |
534 | MLD_LOCK(); |
535 | if (mli != NULL) { |
536 | MLI_LOCK(mli); |
537 | } |
538 | |
539 | if (--limit == 0) { |
540 | break; |
541 | } |
542 | } |
543 | |
544 | if (mli != NULL) { |
545 | MLI_LOCK_ASSERT_HELD(mli); |
546 | } |
547 | } |
548 | |
549 | /* |
550 | * Filter outgoing MLD report state by group. |
551 | * |
552 | * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1) |
553 | * and node-local addresses. However, kernel and socket consumers |
554 | * always embed the KAME scope ID in the address provided, so strip it |
555 | * when performing comparison. |
556 | * Note: This is not the same as the *multicast* scope. |
557 | * |
558 | * Return zero if the given group is one for which MLD reports |
559 | * should be suppressed, or non-zero if reports should be issued. |
560 | */ |
561 | static __inline__ int |
562 | mld_is_addr_reported(const struct in6_addr *addr) |
563 | { |
564 | VERIFY(IN6_IS_ADDR_MULTICAST(addr)); |
565 | |
566 | if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL) { |
567 | return 0; |
568 | } |
569 | |
570 | if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL && !IN6_IS_ADDR_UNICAST_BASED_MULTICAST(addr)) { |
571 | struct in6_addr tmp = *addr; |
572 | in6_clearscope(&tmp); |
573 | if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes)) { |
574 | return 0; |
575 | } |
576 | } |
577 | |
578 | return 1; |
579 | } |
580 | |
581 | /* |
582 | * Attach MLD when PF_INET6 is attached to an interface. |
583 | */ |
584 | struct mld_ifinfo * |
585 | mld_domifattach(struct ifnet *ifp, zalloc_flags_t how) |
586 | { |
587 | struct mld_ifinfo *mli; |
588 | |
589 | os_log_debug(OS_LOG_DEFAULT, "%s: called for ifp %s\n" , __func__, |
590 | if_name(ifp)); |
591 | |
592 | mli = mli_alloc(how); |
593 | if (mli == NULL) { |
594 | return NULL; |
595 | } |
596 | |
597 | MLD_LOCK(); |
598 | |
599 | MLI_LOCK(mli); |
600 | mli_initvar(mli, ifp, 0); |
601 | mli->mli_debug |= IFD_ATTACHED; |
602 | MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */ |
603 | MLI_ADDREF_LOCKED(mli); /* hold a reference for caller */ |
604 | MLI_UNLOCK(mli); |
605 | ifnet_lock_shared(ifp); |
606 | mld6_initsilent(ifp, mli); |
607 | ifnet_lock_done(ifp); |
608 | |
609 | LIST_INSERT_HEAD(&mli_head, mli, mli_link); |
610 | mld_mli_list_genid++; |
611 | |
612 | MLD_UNLOCK(); |
613 | |
614 | os_log_info(OS_LOG_DEFAULT, "%s: allocated mld_ifinfo for ifp %s\n" , |
615 | __func__, if_name(ifp)); |
616 | |
617 | return mli; |
618 | } |
619 | |
620 | /* |
621 | * Attach MLD when PF_INET6 is reattached to an interface. Caller is |
622 | * expected to have an outstanding reference to the mli. |
623 | */ |
624 | void |
625 | mld_domifreattach(struct mld_ifinfo *mli) |
626 | { |
627 | struct ifnet *ifp; |
628 | |
629 | MLD_LOCK(); |
630 | |
631 | MLI_LOCK(mli); |
632 | VERIFY(!(mli->mli_debug & IFD_ATTACHED)); |
633 | ifp = mli->mli_ifp; |
634 | VERIFY(ifp != NULL); |
635 | mli_initvar(mli, ifp, 1); |
636 | mli->mli_debug |= IFD_ATTACHED; |
637 | MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */ |
638 | MLI_UNLOCK(mli); |
639 | ifnet_lock_shared(ifp); |
640 | mld6_initsilent(ifp, mli); |
641 | ifnet_lock_done(ifp); |
642 | |
643 | LIST_INSERT_HEAD(&mli_head, mli, mli_link); |
644 | mld_mli_list_genid++; |
645 | |
646 | MLD_UNLOCK(); |
647 | |
648 | os_log_info(OS_LOG_DEFAULT, "%s: reattached mld_ifinfo for ifp %s\n" , |
649 | __func__, if_name(ifp)); |
650 | } |
651 | |
652 | /* |
653 | * Hook for domifdetach. |
654 | */ |
655 | void |
656 | mld_domifdetach(struct ifnet *ifp) |
657 | { |
658 | SLIST_HEAD(, in6_multi) in6m_dthead; |
659 | |
660 | SLIST_INIT(&in6m_dthead); |
661 | |
662 | os_log_info(OS_LOG_DEFAULT, "%s: called for ifp %s\n" , __func__, |
663 | if_name(ifp)); |
664 | |
665 | MLD_LOCK(); |
666 | mli_delete(ifp, (struct mld_in6m_relhead *)&in6m_dthead); |
667 | MLD_UNLOCK(); |
668 | |
669 | /* Now that we're dropped all locks, release detached records */ |
670 | MLD_REMOVE_DETACHED_IN6M(&in6m_dthead); |
671 | } |
672 | |
673 | /* |
674 | * Called at interface detach time. Note that we only flush all deferred |
675 | * responses and record releases; all remaining inm records and their source |
676 | * entries related to this interface are left intact, in order to handle |
677 | * the reattach case. |
678 | */ |
679 | static void |
680 | mli_delete(const struct ifnet *ifp, struct mld_in6m_relhead *in6m_dthead) |
681 | { |
682 | struct mld_ifinfo *mli, *tmli; |
683 | |
684 | MLD_LOCK_ASSERT_HELD(); |
685 | |
686 | LIST_FOREACH_SAFE(mli, &mli_head, mli_link, tmli) { |
687 | MLI_LOCK(mli); |
688 | if (mli->mli_ifp == ifp) { |
689 | /* |
690 | * Free deferred General Query responses. |
691 | */ |
692 | IF_DRAIN(&mli->mli_gq); |
693 | IF_DRAIN(&mli->mli_v1q); |
694 | mld_flush_relq(mli, in6m_dthead); |
695 | mli->mli_debug &= ~IFD_ATTACHED; |
696 | MLI_UNLOCK(mli); |
697 | |
698 | LIST_REMOVE(mli, mli_link); |
699 | MLI_REMREF(mli); /* release mli_head reference */ |
700 | mld_mli_list_genid++; |
701 | return; |
702 | } |
703 | MLI_UNLOCK(mli); |
704 | } |
705 | panic("%s: mld_ifinfo not found for ifp %p(%s)" , __func__, |
706 | ifp, ifp->if_xname); |
707 | } |
708 | |
709 | __private_extern__ void |
710 | mld6_initsilent(struct ifnet *ifp, struct mld_ifinfo *mli) |
711 | { |
712 | ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED); |
713 | |
714 | MLI_LOCK_ASSERT_NOTHELD(mli); |
715 | MLI_LOCK(mli); |
716 | if (!(ifp->if_flags & IFF_MULTICAST) && |
717 | (ifp->if_eflags & (IFEF_IPV6_ND6ALT | IFEF_LOCALNET_PRIVATE))) { |
718 | mli->mli_flags |= MLIF_SILENT; |
719 | } else { |
720 | mli->mli_flags &= ~MLIF_SILENT; |
721 | } |
722 | MLI_UNLOCK(mli); |
723 | } |
724 | |
725 | static void |
726 | mli_initvar(struct mld_ifinfo *mli, struct ifnet *ifp, int reattach) |
727 | { |
728 | MLI_LOCK_ASSERT_HELD(mli); |
729 | |
730 | mli->mli_ifp = ifp; |
731 | if (mld_v2enable) { |
732 | mli->mli_version = MLD_VERSION_2; |
733 | } else { |
734 | mli->mli_version = MLD_VERSION_1; |
735 | } |
736 | mli->mli_flags = 0; |
737 | mli->mli_rv = MLD_RV_INIT; |
738 | mli->mli_qi = MLD_QI_INIT; |
739 | mli->mli_qri = MLD_QRI_INIT; |
740 | mli->mli_uri = MLD_URI_INIT; |
741 | |
742 | if (mld_use_allow) { |
743 | mli->mli_flags |= MLIF_USEALLOW; |
744 | } |
745 | if (!reattach) { |
746 | SLIST_INIT(&mli->mli_relinmhead); |
747 | } |
748 | |
749 | /* |
750 | * Responses to general queries are subject to bounds. |
751 | */ |
752 | mli->mli_gq.ifq_maxlen = MLD_MAX_RESPONSE_PACKETS; |
753 | mli->mli_v1q.ifq_maxlen = MLD_MAX_RESPONSE_PACKETS; |
754 | } |
755 | |
756 | static struct mld_ifinfo * |
757 | mli_alloc(zalloc_flags_t how) |
758 | { |
759 | struct mld_ifinfo *mli = zalloc_flags(mli_zone, how | Z_ZERO); |
760 | if (mli != NULL) { |
761 | lck_mtx_init(lck: &mli->mli_lock, grp: &mld_mtx_grp, attr: &mld_mtx_attr); |
762 | mli->mli_debug |= IFD_ALLOC; |
763 | } |
764 | return mli; |
765 | } |
766 | |
767 | static void |
768 | mli_free(struct mld_ifinfo *mli) |
769 | { |
770 | MLI_LOCK(mli); |
771 | if (mli->mli_debug & IFD_ATTACHED) { |
772 | panic("%s: attached mli=%p is being freed" , __func__, mli); |
773 | /* NOTREACHED */ |
774 | } else if (mli->mli_ifp != NULL) { |
775 | panic("%s: ifp not NULL for mli=%p" , __func__, mli); |
776 | /* NOTREACHED */ |
777 | } else if (!(mli->mli_debug & IFD_ALLOC)) { |
778 | panic("%s: mli %p cannot be freed" , __func__, mli); |
779 | /* NOTREACHED */ |
780 | } else if (mli->mli_refcnt != 0) { |
781 | panic("%s: non-zero refcnt mli=%p" , __func__, mli); |
782 | /* NOTREACHED */ |
783 | } |
784 | mli->mli_debug &= ~IFD_ALLOC; |
785 | MLI_UNLOCK(mli); |
786 | |
787 | lck_mtx_destroy(lck: &mli->mli_lock, grp: &mld_mtx_grp); |
788 | zfree(mli_zone, mli); |
789 | } |
790 | |
791 | void |
792 | mli_addref(struct mld_ifinfo *mli, int locked) |
793 | { |
794 | if (!locked) { |
795 | MLI_LOCK_SPIN(mli); |
796 | } else { |
797 | MLI_LOCK_ASSERT_HELD(mli); |
798 | } |
799 | |
800 | if (++mli->mli_refcnt == 0) { |
801 | panic("%s: mli=%p wraparound refcnt" , __func__, mli); |
802 | /* NOTREACHED */ |
803 | } |
804 | if (!locked) { |
805 | MLI_UNLOCK(mli); |
806 | } |
807 | } |
808 | |
809 | void |
810 | mli_remref(struct mld_ifinfo *mli) |
811 | { |
812 | SLIST_HEAD(, in6_multi) in6m_dthead; |
813 | struct ifnet *ifp; |
814 | |
815 | MLI_LOCK_SPIN(mli); |
816 | |
817 | if (mli->mli_refcnt == 0) { |
818 | panic("%s: mli=%p negative refcnt" , __func__, mli); |
819 | /* NOTREACHED */ |
820 | } |
821 | |
822 | --mli->mli_refcnt; |
823 | if (mli->mli_refcnt > 0) { |
824 | MLI_UNLOCK(mli); |
825 | return; |
826 | } |
827 | |
828 | ifp = mli->mli_ifp; |
829 | mli->mli_ifp = NULL; |
830 | IF_DRAIN(&mli->mli_gq); |
831 | IF_DRAIN(&mli->mli_v1q); |
832 | SLIST_INIT(&in6m_dthead); |
833 | mld_flush_relq(mli, (struct mld_in6m_relhead *)&in6m_dthead); |
834 | MLI_UNLOCK(mli); |
835 | |
836 | /* Now that we're dropped all locks, release detached records */ |
837 | MLD_REMOVE_DETACHED_IN6M(&in6m_dthead); |
838 | |
839 | os_log(OS_LOG_DEFAULT, "%s: freeing mld_ifinfo for ifp %s\n" , |
840 | __func__, if_name(ifp)); |
841 | |
842 | mli_free(mli); |
843 | } |
844 | |
845 | /* |
846 | * Process a received MLDv1 general or address-specific query. |
847 | * Assumes that the query header has been pulled up to sizeof(mld_hdr). |
848 | * |
849 | * NOTE: Can't be fully const correct as we temporarily embed scope ID in |
850 | * mld_addr. This is OK as we own the mbuf chain. |
851 | */ |
852 | static int |
853 | mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, |
854 | /*const*/ struct mld_hdr *mld) |
855 | { |
856 | struct mld_ifinfo *mli; |
857 | struct in6_multi *inm; |
858 | int err = 0, is_general_query; |
859 | uint16_t timer; |
860 | struct mld_tparams mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 }; |
861 | |
862 | MLD_LOCK_ASSERT_NOTHELD(); |
863 | |
864 | is_general_query = 0; |
865 | |
866 | if (!mld_v1enable) { |
867 | os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 query on ifp %s\n" , |
868 | __func__, if_name(ifp)); |
869 | goto done; |
870 | } |
871 | |
872 | /* |
873 | * RFC3810 Section 6.2: MLD queries must originate from |
874 | * a router's link-local address. |
875 | */ |
876 | if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { |
877 | os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 query src %s on ifp %s\n" , |
878 | __func__, ip6_sprintf(&ip6->ip6_src), |
879 | if_name(ifp)); |
880 | goto done; |
881 | } |
882 | |
883 | /* |
884 | * Do address field validation upfront before we accept |
885 | * the query. |
886 | */ |
887 | if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) { |
888 | /* |
889 | * MLDv1 General Query. |
890 | * If this was not sent to the all-nodes group, ignore it. |
891 | */ |
892 | struct in6_addr dst; |
893 | |
894 | dst = ip6->ip6_dst; |
895 | in6_clearscope(&dst); |
896 | if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes)) { |
897 | err = EINVAL; |
898 | goto done; |
899 | } |
900 | is_general_query = 1; |
901 | } else { |
902 | /* |
903 | * Embed scope ID of receiving interface in MLD query for |
904 | * lookup whilst we don't hold other locks. |
905 | */ |
906 | (void)in6_setscope(&mld->mld_addr, ifp, NULL); |
907 | } |
908 | |
909 | /* |
910 | * Switch to MLDv1 host compatibility mode. |
911 | */ |
912 | mli = MLD_IFINFO(ifp); |
913 | VERIFY(mli != NULL); |
914 | |
915 | MLI_LOCK(mli); |
916 | mtp.qpt = mld_set_version(mli, MLD_VERSION_1); |
917 | MLI_UNLOCK(mli); |
918 | |
919 | timer = ntohs(mld->mld_maxdelay) / MLD_TIMER_SCALE; |
920 | if (timer == 0) { |
921 | timer = 1; |
922 | } |
923 | |
924 | if (is_general_query) { |
925 | struct in6_multistep step; |
926 | |
927 | os_log_debug(OS_LOG_DEFAULT, "%s: process v1 general query on ifp %s\n" , |
928 | __func__, if_name(ifp)); |
929 | /* |
930 | * For each reporting group joined on this |
931 | * interface, kick the report timer. |
932 | */ |
933 | in6_multihead_lock_shared(); |
934 | IN6_FIRST_MULTI(step, inm); |
935 | while (inm != NULL) { |
936 | IN6M_LOCK(inm); |
937 | if (inm->in6m_ifp == ifp) { |
938 | mtp.cst += mld_v1_update_group(inm, timer); |
939 | } |
940 | IN6M_UNLOCK(inm); |
941 | IN6_NEXT_MULTI(step, inm); |
942 | } |
943 | in6_multihead_lock_done(); |
944 | } else { |
945 | /* |
946 | * MLDv1 Group-Specific Query. |
947 | * If this is a group-specific MLDv1 query, we need only |
948 | * look up the single group to process it. |
949 | */ |
950 | in6_multihead_lock_shared(); |
951 | IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm); |
952 | in6_multihead_lock_done(); |
953 | |
954 | if (inm != NULL) { |
955 | IN6M_LOCK(inm); |
956 | os_log_debug(OS_LOG_DEFAULT, "%s: process v1 query %s on " |
957 | "ifp %s\n" , __func__, |
958 | ip6_sprintf(&mld->mld_addr), |
959 | if_name(ifp)); |
960 | mtp.cst = mld_v1_update_group(inm, timer); |
961 | IN6M_UNLOCK(inm); |
962 | IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */ |
963 | } |
964 | /* XXX Clear embedded scope ID as userland won't expect it. */ |
965 | in6_clearscope(&mld->mld_addr); |
966 | } |
967 | done: |
968 | mld_set_timeout(&mtp); |
969 | |
970 | return err; |
971 | } |
972 | |
973 | /* |
974 | * Update the report timer on a group in response to an MLDv1 query. |
975 | * |
976 | * If we are becoming the reporting member for this group, start the timer. |
977 | * If we already are the reporting member for this group, and timer is |
978 | * below the threshold, reset it. |
979 | * |
980 | * We may be updating the group for the first time since we switched |
981 | * to MLDv2. If we are, then we must clear any recorded source lists, |
982 | * and transition to REPORTING state; the group timer is overloaded |
983 | * for group and group-source query responses. |
984 | * |
985 | * Unlike MLDv2, the delay per group should be jittered |
986 | * to avoid bursts of MLDv1 reports. |
987 | */ |
988 | static uint32_t |
989 | mld_v1_update_group(struct in6_multi *inm, const int timer) |
990 | { |
991 | IN6M_LOCK_ASSERT_HELD(inm); |
992 | |
993 | MLD_PRINTF(("%s: %s/%s timer=%d\n" , __func__, |
994 | ip6_sprintf(&inm->in6m_addr), |
995 | if_name(inm->in6m_ifp), timer)); |
996 | |
997 | switch (inm->in6m_state) { |
998 | case MLD_NOT_MEMBER: |
999 | case MLD_SILENT_MEMBER: |
1000 | break; |
1001 | case MLD_REPORTING_MEMBER: |
1002 | if (inm->in6m_timer != 0 && |
1003 | inm->in6m_timer <= timer) { |
1004 | MLD_PRINTF(("%s: REPORTING and timer running, " |
1005 | "skipping.\n" , __func__)); |
1006 | break; |
1007 | } |
1008 | OS_FALLTHROUGH; |
1009 | case MLD_SG_QUERY_PENDING_MEMBER: |
1010 | case MLD_G_QUERY_PENDING_MEMBER: |
1011 | case MLD_IDLE_MEMBER: |
1012 | case MLD_LAZY_MEMBER: |
1013 | case MLD_AWAKENING_MEMBER: |
1014 | MLD_PRINTF(("%s: ->REPORTING\n" , __func__)); |
1015 | inm->in6m_state = MLD_REPORTING_MEMBER; |
1016 | inm->in6m_timer = MLD_RANDOM_DELAY(timer); |
1017 | break; |
1018 | case MLD_SLEEPING_MEMBER: |
1019 | MLD_PRINTF(("%s: ->AWAKENING\n" , __func__)); |
1020 | inm->in6m_state = MLD_AWAKENING_MEMBER; |
1021 | break; |
1022 | case MLD_LEAVING_MEMBER: |
1023 | break; |
1024 | } |
1025 | |
1026 | return inm->in6m_timer; |
1027 | } |
1028 | |
1029 | /* |
1030 | * Process a received MLDv2 general, group-specific or |
1031 | * group-and-source-specific query. |
1032 | * |
1033 | * Assumes that the query header has been pulled up to sizeof(mldv2_query). |
1034 | * |
1035 | * Return 0 if successful, otherwise an appropriate error code is returned. |
1036 | */ |
1037 | static int |
1038 | mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, |
1039 | struct mbuf *m, const int off, const int icmp6len) |
1040 | { |
1041 | struct mld_ifinfo *mli; |
1042 | struct mldv2_query *mld; |
1043 | struct in6_multi *inm; |
1044 | uint32_t maxdelay, nsrc, qqi, timer; |
1045 | int err = 0, is_general_query; |
1046 | uint8_t qrv; |
1047 | struct mld_tparams mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 }; |
1048 | |
1049 | MLD_LOCK_ASSERT_NOTHELD(); |
1050 | |
1051 | is_general_query = 0; |
1052 | |
1053 | if (!mld_v2enable) { |
1054 | os_log_info(OS_LOG_DEFAULT, "%s: ignore v2 query on ifp %s\n" , |
1055 | __func__, if_name(ifp)); |
1056 | goto done; |
1057 | } |
1058 | |
1059 | /* |
1060 | * RFC3810 Section 6.2: MLD queries must originate from |
1061 | * a router's link-local address. |
1062 | */ |
1063 | if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { |
1064 | os_log_info(OS_LOG_DEFAULT, |
1065 | "%s: ignore v1 query src %s on ifp %s\n" , |
1066 | __func__, ip6_sprintf(&ip6->ip6_src), |
1067 | if_name(ifp)); |
1068 | goto done; |
1069 | } |
1070 | |
1071 | os_log_debug(OS_LOG_DEFAULT, |
1072 | "%s: input v2 query on ifp %s\n" , __func__, |
1073 | if_name(ifp)); |
1074 | |
1075 | mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off); |
1076 | |
1077 | maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */ |
1078 | if (maxdelay > SHRT_MAX) { |
1079 | maxdelay = (MLD_MRC_MANT((uint16_t)maxdelay) | 0x1000) << |
1080 | (MLD_MRC_EXP((uint16_t)maxdelay) + 3); |
1081 | } |
1082 | timer = maxdelay / MLD_TIMER_SCALE; |
1083 | if (timer == 0) { |
1084 | timer = 1; |
1085 | } |
1086 | |
1087 | qrv = MLD_QRV(mld->mld_misc); |
1088 | if (qrv < 2) { |
1089 | MLD_PRINTF(("%s: clamping qrv %d to %d\n" , __func__, |
1090 | qrv, MLD_RV_INIT)); |
1091 | qrv = MLD_RV_INIT; |
1092 | } |
1093 | |
1094 | qqi = mld->mld_qqi; |
1095 | if (qqi >= 128) { |
1096 | qqi = MLD_QQIC_MANT(mld->mld_qqi) << |
1097 | (MLD_QQIC_EXP(mld->mld_qqi) + 3); |
1098 | } |
1099 | |
1100 | nsrc = ntohs(mld->mld_numsrc); |
1101 | if (nsrc > MLD_MAX_GS_SOURCES) { |
1102 | err = EMSGSIZE; |
1103 | goto done; |
1104 | } |
1105 | if (icmp6len < sizeof(struct mldv2_query) + |
1106 | (nsrc * sizeof(struct in6_addr))) { |
1107 | err = EMSGSIZE; |
1108 | goto done; |
1109 | } |
1110 | |
1111 | /* |
1112 | * Do further input validation upfront to avoid resetting timers |
1113 | * should we need to discard this query. |
1114 | */ |
1115 | if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) { |
1116 | /* |
1117 | * A general query with a source list has undefined |
1118 | * behaviour; discard it. |
1119 | */ |
1120 | if (nsrc > 0) { |
1121 | err = EINVAL; |
1122 | goto done; |
1123 | } |
1124 | is_general_query = 1; |
1125 | } else { |
1126 | /* |
1127 | * Embed scope ID of receiving interface in MLD query for |
1128 | * lookup whilst we don't hold other locks (due to KAME |
1129 | * locking lameness). We own this mbuf chain just now. |
1130 | */ |
1131 | (void)in6_setscope(&mld->mld_addr, ifp, NULL); |
1132 | } |
1133 | |
1134 | mli = MLD_IFINFO(ifp); |
1135 | VERIFY(mli != NULL); |
1136 | |
1137 | MLI_LOCK(mli); |
1138 | /* |
1139 | * Discard the v2 query if we're in Compatibility Mode. |
1140 | * The RFC is pretty clear that hosts need to stay in MLDv1 mode |
1141 | * until the Old Version Querier Present timer expires. |
1142 | */ |
1143 | if (mli->mli_version != MLD_VERSION_2) { |
1144 | MLI_UNLOCK(mli); |
1145 | goto done; |
1146 | } |
1147 | |
1148 | mtp.qpt = mld_set_version(mli, MLD_VERSION_2); |
1149 | mli->mli_rv = qrv; |
1150 | mli->mli_qi = qqi; |
1151 | mli->mli_qri = MAX(timer, MLD_QRI_MIN); |
1152 | |
1153 | MLD_PRINTF(("%s: qrv %d qi %d qri %d\n" , __func__, mli->mli_rv, |
1154 | mli->mli_qi, mli->mli_qri)); |
1155 | |
1156 | if (is_general_query) { |
1157 | /* |
1158 | * MLDv2 General Query. |
1159 | * |
1160 | * Schedule a current-state report on this ifp for |
1161 | * all groups, possibly containing source lists. |
1162 | * |
1163 | * If there is a pending General Query response |
1164 | * scheduled earlier than the selected delay, do |
1165 | * not schedule any other reports. |
1166 | * Otherwise, reset the interface timer. |
1167 | */ |
1168 | os_log_debug(OS_LOG_DEFAULT, "%s: process v2 general query on ifp %s\n" , |
1169 | __func__, if_name(ifp)); |
1170 | if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) { |
1171 | mtp.it = mli->mli_v2_timer = MLD_RANDOM_DELAY(timer); |
1172 | } |
1173 | MLI_UNLOCK(mli); |
1174 | } else { |
1175 | MLI_UNLOCK(mli); |
1176 | /* |
1177 | * MLDv2 Group-specific or Group-and-source-specific Query. |
1178 | * |
1179 | * Group-source-specific queries are throttled on |
1180 | * a per-group basis to defeat denial-of-service attempts. |
1181 | * Queries for groups we are not a member of on this |
1182 | * link are simply ignored. |
1183 | */ |
1184 | in6_multihead_lock_shared(); |
1185 | IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm); |
1186 | in6_multihead_lock_done(); |
1187 | if (inm == NULL) { |
1188 | goto done; |
1189 | } |
1190 | |
1191 | IN6M_LOCK(inm); |
1192 | if (nsrc > 0) { |
1193 | if (!ratecheck(lasttime: &inm->in6m_lastgsrtv, |
1194 | mininterval: &mld_gsrdelay)) { |
1195 | os_log_info(OS_LOG_DEFAULT, "%s: GS query throttled\n" , |
1196 | __func__); |
1197 | IN6M_UNLOCK(inm); |
1198 | IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */ |
1199 | goto done; |
1200 | } |
1201 | } |
1202 | os_log_debug(OS_LOG_DEFAULT, "%s: process v2 group query on ifp %s\n" , |
1203 | __func__, if_name(ifp)); |
1204 | /* |
1205 | * If there is a pending General Query response |
1206 | * scheduled sooner than the selected delay, no |
1207 | * further report need be scheduled. |
1208 | * Otherwise, prepare to respond to the |
1209 | * group-specific or group-and-source query. |
1210 | */ |
1211 | MLI_LOCK(mli); |
1212 | mtp.it = mli->mli_v2_timer; |
1213 | MLI_UNLOCK(mli); |
1214 | if (mtp.it == 0 || mtp.it >= timer) { |
1215 | (void) mld_v2_process_group_query(inm, timer, m, off); |
1216 | mtp.cst = inm->in6m_timer; |
1217 | } |
1218 | IN6M_UNLOCK(inm); |
1219 | IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */ |
1220 | /* XXX Clear embedded scope ID as userland won't expect it. */ |
1221 | in6_clearscope(&mld->mld_addr); |
1222 | } |
1223 | done: |
1224 | if (mtp.it > 0) { |
1225 | os_log_debug(OS_LOG_DEFAULT, "%s: v2 general query response scheduled in " |
1226 | "T+%d seconds on ifp %s\n" , __func__, mtp.it, |
1227 | if_name(ifp)); |
1228 | } |
1229 | mld_set_timeout(&mtp); |
1230 | |
1231 | return err; |
1232 | } |
1233 | |
1234 | /* |
1235 | * Process a recieved MLDv2 group-specific or group-and-source-specific |
1236 | * query. |
1237 | * Return <0 if any error occured. Currently this is ignored. |
1238 | */ |
1239 | static int |
1240 | mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, |
1241 | const int off) |
1242 | { |
1243 | struct mldv2_query *mld; |
1244 | int retval; |
1245 | uint16_t nsrc; |
1246 | |
1247 | IN6M_LOCK_ASSERT_HELD(inm); |
1248 | |
1249 | retval = 0; |
1250 | mld = (struct mldv2_query *)(mtod(m0, uint8_t *) + off); |
1251 | |
1252 | switch (inm->in6m_state) { |
1253 | case MLD_NOT_MEMBER: |
1254 | case MLD_SILENT_MEMBER: |
1255 | case MLD_SLEEPING_MEMBER: |
1256 | case MLD_LAZY_MEMBER: |
1257 | case MLD_AWAKENING_MEMBER: |
1258 | case MLD_IDLE_MEMBER: |
1259 | case MLD_LEAVING_MEMBER: |
1260 | return retval; |
1261 | case MLD_REPORTING_MEMBER: |
1262 | case MLD_G_QUERY_PENDING_MEMBER: |
1263 | case MLD_SG_QUERY_PENDING_MEMBER: |
1264 | break; |
1265 | } |
1266 | |
1267 | nsrc = ntohs(mld->mld_numsrc); |
1268 | |
1269 | /* |
1270 | * Deal with group-specific queries upfront. |
1271 | * If any group query is already pending, purge any recorded |
1272 | * source-list state if it exists, and schedule a query response |
1273 | * for this group-specific query. |
1274 | */ |
1275 | if (nsrc == 0) { |
1276 | if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER || |
1277 | inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) { |
1278 | in6m_clear_recorded(inm); |
1279 | timer = min(a: inm->in6m_timer, b: timer); |
1280 | } |
1281 | inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER; |
1282 | inm->in6m_timer = MLD_RANDOM_DELAY(timer); |
1283 | return retval; |
1284 | } |
1285 | |
1286 | /* |
1287 | * Deal with the case where a group-and-source-specific query has |
1288 | * been received but a group-specific query is already pending. |
1289 | */ |
1290 | if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) { |
1291 | timer = min(a: inm->in6m_timer, b: timer); |
1292 | inm->in6m_timer = MLD_RANDOM_DELAY(timer); |
1293 | return retval; |
1294 | } |
1295 | |
1296 | /* |
1297 | * Finally, deal with the case where a group-and-source-specific |
1298 | * query has been received, where a response to a previous g-s-r |
1299 | * query exists, or none exists. |
1300 | * In this case, we need to parse the source-list which the Querier |
1301 | * has provided us with and check if we have any source list filter |
1302 | * entries at T1 for these sources. If we do not, there is no need |
1303 | * schedule a report and the query may be dropped. |
1304 | * If we do, we must record them and schedule a current-state |
1305 | * report for those sources. |
1306 | */ |
1307 | if (inm->in6m_nsrc > 0) { |
1308 | struct mbuf *m; |
1309 | struct in6_addr addr; |
1310 | int i, nrecorded; |
1311 | int soff; |
1312 | |
1313 | m = m0; |
1314 | soff = off + sizeof(struct mldv2_query); |
1315 | nrecorded = 0; |
1316 | for (i = 0; i < nsrc; i++) { |
1317 | m_copydata(m, soff, sizeof(addr), &addr); |
1318 | retval = in6m_record_source(inm, &addr); |
1319 | if (retval < 0) { |
1320 | break; |
1321 | } |
1322 | nrecorded += retval; |
1323 | soff += sizeof(struct in6_addr); |
1324 | |
1325 | while (m && (soff >= m->m_len)) { |
1326 | soff -= m->m_len; |
1327 | m = m->m_next; |
1328 | } |
1329 | |
1330 | /* should not be possible: */ |
1331 | if (m == NULL) { |
1332 | break; |
1333 | } |
1334 | } |
1335 | if (nrecorded > 0) { |
1336 | MLD_PRINTF(("%s: schedule response to SG query\n" , |
1337 | __func__)); |
1338 | inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER; |
1339 | inm->in6m_timer = MLD_RANDOM_DELAY(timer); |
1340 | } |
1341 | } |
1342 | |
1343 | return retval; |
1344 | } |
1345 | |
1346 | /* |
1347 | * Process a received MLDv1 host membership report. |
1348 | * Assumes mld points to mld_hdr in pulled up mbuf chain. |
1349 | * |
1350 | * NOTE: Can't be fully const correct as we temporarily embed scope ID in |
1351 | * mld_addr. This is OK as we own the mbuf chain. |
1352 | */ |
1353 | static int |
1354 | mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, |
1355 | const struct ip6_hdr *ip6, /*const*/ struct mld_hdr *mld) |
1356 | { |
1357 | struct in6_addr src, dst; |
1358 | struct in6_ifaddr *ia; |
1359 | struct in6_multi *inm; |
1360 | |
1361 | if (!mld_v1enable) { |
1362 | os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 report on ifp %s\n" , |
1363 | __func__, if_name(ifp)); |
1364 | return 0; |
1365 | } |
1366 | |
1367 | if ((ifp->if_flags & IFF_LOOPBACK) || |
1368 | (m->m_pkthdr.pkt_flags & PKTF_LOOP)) { |
1369 | return 0; |
1370 | } |
1371 | |
1372 | /* |
1373 | * MLDv1 reports must originate from a host's link-local address, |
1374 | * or the unspecified address (when booting). |
1375 | */ |
1376 | src = ip6->ip6_src; |
1377 | in6_clearscope(&src); |
1378 | if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) { |
1379 | os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 query src %s on ifp %s\n" , |
1380 | __func__, ip6_sprintf(&ip6->ip6_src), |
1381 | if_name(ifp)); |
1382 | return EINVAL; |
1383 | } |
1384 | |
1385 | /* |
1386 | * RFC2710 Section 4: MLDv1 reports must pertain to a multicast |
1387 | * group, and must be directed to the group itself. |
1388 | */ |
1389 | dst = ip6->ip6_dst; |
1390 | in6_clearscope(&dst); |
1391 | if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) || |
1392 | !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) { |
1393 | os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 query dst %s on ifp %s\n" , |
1394 | __func__, ip6_sprintf(&ip6->ip6_dst), |
1395 | if_name(ifp)); |
1396 | return EINVAL; |
1397 | } |
1398 | |
1399 | /* |
1400 | * Make sure we don't hear our own membership report, as fast |
1401 | * leave requires knowing that we are the only member of a |
1402 | * group. Assume we used the link-local address if available, |
1403 | * otherwise look for ::. |
1404 | * |
1405 | * XXX Note that scope ID comparison is needed for the address |
1406 | * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be |
1407 | * performed for the on-wire address. |
1408 | */ |
1409 | ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST); |
1410 | if (ia != NULL) { |
1411 | IFA_LOCK(&ia->ia_ifa); |
1412 | if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))) { |
1413 | IFA_UNLOCK(&ia->ia_ifa); |
1414 | ifa_remref(ifa: &ia->ia_ifa); |
1415 | return 0; |
1416 | } |
1417 | IFA_UNLOCK(&ia->ia_ifa); |
1418 | ifa_remref(ifa: &ia->ia_ifa); |
1419 | } else if (IN6_IS_ADDR_UNSPECIFIED(&src)) { |
1420 | return 0; |
1421 | } |
1422 | |
1423 | os_log_debug(OS_LOG_DEFAULT, "%s: process v1 report %s on ifp %s\n" , |
1424 | __func__, ip6_sprintf(&mld->mld_addr), |
1425 | if_name(ifp)); |
1426 | |
1427 | /* |
1428 | * Embed scope ID of receiving interface in MLD query for lookup |
1429 | * whilst we don't hold other locks (due to KAME locking lameness). |
1430 | */ |
1431 | if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) { |
1432 | (void)in6_setscope(&mld->mld_addr, ifp, NULL); |
1433 | } |
1434 | |
1435 | /* |
1436 | * MLDv1 report suppression. |
1437 | * If we are a member of this group, and our membership should be |
1438 | * reported, and our group timer is pending or about to be reset, |
1439 | * stop our group timer by transitioning to the 'lazy' state. |
1440 | */ |
1441 | in6_multihead_lock_shared(); |
1442 | IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm); |
1443 | in6_multihead_lock_done(); |
1444 | |
1445 | if (inm != NULL) { |
1446 | struct mld_ifinfo *mli; |
1447 | |
1448 | IN6M_LOCK(inm); |
1449 | mli = inm->in6m_mli; |
1450 | VERIFY(mli != NULL); |
1451 | |
1452 | MLI_LOCK(mli); |
1453 | /* |
1454 | * If we are in MLDv2 host mode, do not allow the |
1455 | * other host's MLDv1 report to suppress our reports. |
1456 | */ |
1457 | if (mli->mli_version == MLD_VERSION_2) { |
1458 | MLI_UNLOCK(mli); |
1459 | IN6M_UNLOCK(inm); |
1460 | IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */ |
1461 | goto out; |
1462 | } |
1463 | MLI_UNLOCK(mli); |
1464 | |
1465 | inm->in6m_timer = 0; |
1466 | |
1467 | switch (inm->in6m_state) { |
1468 | case MLD_NOT_MEMBER: |
1469 | case MLD_SILENT_MEMBER: |
1470 | case MLD_SLEEPING_MEMBER: |
1471 | break; |
1472 | case MLD_REPORTING_MEMBER: |
1473 | case MLD_IDLE_MEMBER: |
1474 | case MLD_AWAKENING_MEMBER: |
1475 | MLD_PRINTF(("%s: report suppressed for %s on " |
1476 | "ifp 0x%llx(%s)\n" , __func__, |
1477 | ip6_sprintf(&mld->mld_addr), |
1478 | (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); |
1479 | OS_FALLTHROUGH; |
1480 | case MLD_LAZY_MEMBER: |
1481 | inm->in6m_state = MLD_LAZY_MEMBER; |
1482 | break; |
1483 | case MLD_G_QUERY_PENDING_MEMBER: |
1484 | case MLD_SG_QUERY_PENDING_MEMBER: |
1485 | case MLD_LEAVING_MEMBER: |
1486 | break; |
1487 | } |
1488 | IN6M_UNLOCK(inm); |
1489 | IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */ |
1490 | } |
1491 | |
1492 | out: |
1493 | /* XXX Clear embedded scope ID as userland won't expect it. */ |
1494 | in6_clearscope(&mld->mld_addr); |
1495 | |
1496 | return 0; |
1497 | } |
1498 | |
1499 | /* |
1500 | * MLD input path. |
1501 | * |
1502 | * Assume query messages which fit in a single ICMPv6 message header |
1503 | * have been pulled up. |
1504 | * Assume that userland will want to see the message, even if it |
1505 | * otherwise fails kernel input validation; do not free it. |
1506 | * Pullup may however free the mbuf chain m if it fails. |
1507 | * |
1508 | * Return IPPROTO_DONE if we freed m. Otherwise, return 0. |
1509 | */ |
1510 | int |
1511 | mld_input(struct mbuf *m, int off, int icmp6len) |
1512 | { |
1513 | struct ifnet *ifp = NULL; |
1514 | struct ip6_hdr *ip6 = NULL; |
1515 | struct mld_hdr *mld = NULL; |
1516 | int mldlen = 0; |
1517 | |
1518 | MLD_PRINTF(("%s: called w/mbuf (0x%llx,%d)\n" , __func__, |
1519 | (uint64_t)VM_KERNEL_ADDRPERM(m), off)); |
1520 | |
1521 | ifp = m->m_pkthdr.rcvif; |
1522 | |
1523 | /* Pullup to appropriate size. */ |
1524 | mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off); |
1525 | if (mld->mld_type == MLD_LISTENER_QUERY && |
1526 | icmp6len >= sizeof(struct mldv2_query)) { |
1527 | mldlen = sizeof(struct mldv2_query); |
1528 | } else { |
1529 | mldlen = sizeof(struct mld_hdr); |
1530 | } |
1531 | // check if mldv2_query/mld_hdr fits in the first mbuf |
1532 | IP6_EXTHDR_CHECK(m, off, mldlen, return IPPROTO_DONE); |
1533 | IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen); |
1534 | if (mld == NULL) { |
1535 | icmp6stat.icp6s_badlen++; |
1536 | return IPPROTO_DONE; |
1537 | } |
1538 | ip6 = mtod(m, struct ip6_hdr *); |
1539 | |
1540 | /* |
1541 | * Userland needs to see all of this traffic for implementing |
1542 | * the endpoint discovery portion of multicast routing. |
1543 | */ |
1544 | switch (mld->mld_type) { |
1545 | case MLD_LISTENER_QUERY: |
1546 | icmp6_ifstat_inc(ifp, ifs6_in_mldquery); |
1547 | if (icmp6len == sizeof(struct mld_hdr)) { |
1548 | if (mld_v1_input_query(ifp, ip6, mld) != 0) { |
1549 | return 0; |
1550 | } |
1551 | } else if (icmp6len >= sizeof(struct mldv2_query)) { |
1552 | if (mld_v2_input_query(ifp, ip6, m, off, |
1553 | icmp6len) != 0) { |
1554 | return 0; |
1555 | } |
1556 | } |
1557 | break; |
1558 | case MLD_LISTENER_REPORT: |
1559 | icmp6_ifstat_inc(ifp, ifs6_in_mldreport); |
1560 | if (mld_v1_input_report(ifp, m, ip6, mld) != 0) { |
1561 | return 0; |
1562 | } |
1563 | break; |
1564 | case MLDV2_LISTENER_REPORT: |
1565 | icmp6_ifstat_inc(ifp, ifs6_in_mldreport); |
1566 | break; |
1567 | case MLD_LISTENER_DONE: |
1568 | icmp6_ifstat_inc(ifp, ifs6_in_mlddone); |
1569 | break; |
1570 | default: |
1571 | break; |
1572 | } |
1573 | |
1574 | return 0; |
1575 | } |
1576 | |
1577 | /* |
1578 | * Schedule MLD timer based on various parameters; caller must ensure that |
1579 | * lock ordering is maintained as this routine acquires MLD global lock. |
1580 | */ |
1581 | void |
1582 | mld_set_timeout(struct mld_tparams *mtp) |
1583 | { |
1584 | MLD_LOCK_ASSERT_NOTHELD(); |
1585 | VERIFY(mtp != NULL); |
1586 | |
1587 | if (mtp->qpt != 0 || mtp->it != 0 || mtp->cst != 0 || mtp->sct != 0) { |
1588 | MLD_LOCK(); |
1589 | if (mtp->qpt != 0) { |
1590 | querier_present_timers_running6 = 1; |
1591 | } |
1592 | if (mtp->it != 0) { |
1593 | interface_timers_running6 = 1; |
1594 | } |
1595 | if (mtp->cst != 0) { |
1596 | current_state_timers_running6 = 1; |
1597 | } |
1598 | if (mtp->sct != 0) { |
1599 | state_change_timers_running6 = 1; |
1600 | } |
1601 | if (mtp->fast) { |
1602 | mld_sched_fast_timeout(); |
1603 | } else { |
1604 | mld_sched_timeout(); |
1605 | } |
1606 | MLD_UNLOCK(); |
1607 | } |
1608 | } |
1609 | |
1610 | void |
1611 | mld_set_fast_timeout(struct mld_tparams *mtp) |
1612 | { |
1613 | VERIFY(mtp != NULL); |
1614 | mtp->fast = true; |
1615 | mld_set_timeout(mtp); |
1616 | } |
1617 | |
1618 | /* |
1619 | * MLD6 timer handler (per 1 second). |
1620 | */ |
1621 | static void |
1622 | mld_timeout(thread_call_param_t arg0, thread_call_param_t arg1 __unused) |
1623 | { |
1624 | struct ifqueue scq; /* State-change packets */ |
1625 | struct ifqueue qrq; /* Query response packets */ |
1626 | struct ifnet *ifp; |
1627 | struct mld_ifinfo *mli; |
1628 | struct in6_multi *inm; |
1629 | int uri_sec = 0; |
1630 | unsigned int genid = mld_mli_list_genid; |
1631 | bool fast = arg0 != NULL; |
1632 | |
1633 | SLIST_HEAD(, in6_multi) in6m_dthead; |
1634 | |
1635 | SLIST_INIT(&in6m_dthead); |
1636 | |
1637 | /* |
1638 | * Update coarse-grained networking timestamp (in sec.); the idea |
1639 | * is to piggy-back on the timeout callout to update the counter |
1640 | * returnable via net_uptime(). |
1641 | */ |
1642 | net_update_uptime(); |
1643 | |
1644 | MLD_LOCK(); |
1645 | |
1646 | MLD_PRINTF(("%s: qpt %d, it %d, cst %d, sct %d, fast %d\n" , __func__, |
1647 | querier_present_timers_running6, interface_timers_running6, |
1648 | current_state_timers_running6, state_change_timers_running6, fast)); |
1649 | |
1650 | if (fast) { |
1651 | /* |
1652 | * When running the fast timer, skip processing |
1653 | * of "querier present" timers since they are |
1654 | * based on 1-second intervals. |
1655 | */ |
1656 | goto skip_query_timers; |
1657 | } |
1658 | /* |
1659 | * MLDv1 querier present timer processing. |
1660 | */ |
1661 | if (querier_present_timers_running6) { |
1662 | querier_present_timers_running6 = 0; |
1663 | LIST_FOREACH(mli, &mli_head, mli_link) { |
1664 | MLI_LOCK(mli); |
1665 | mld_v1_process_querier_timers(mli); |
1666 | if (mli->mli_v1_timer > 0) { |
1667 | querier_present_timers_running6 = 1; |
1668 | } |
1669 | MLI_UNLOCK(mli); |
1670 | } |
1671 | } |
1672 | |
1673 | /* |
1674 | * MLDv2 General Query response timer processing. |
1675 | */ |
1676 | if (interface_timers_running6) { |
1677 | MLD_PRINTF(("%s: interface timers running\n" , __func__)); |
1678 | interface_timers_running6 = 0; |
1679 | mli = LIST_FIRST(&mli_head); |
1680 | |
1681 | while (mli != NULL) { |
1682 | if (mli->mli_flags & MLIF_PROCESSED) { |
1683 | mli = LIST_NEXT(mli, mli_link); |
1684 | continue; |
1685 | } |
1686 | |
1687 | MLI_LOCK(mli); |
1688 | if (mli->mli_version != MLD_VERSION_2) { |
1689 | MLI_UNLOCK(mli); |
1690 | mli = LIST_NEXT(mli, mli_link); |
1691 | continue; |
1692 | } |
1693 | /* |
1694 | * XXX The logic below ends up calling |
1695 | * mld_dispatch_packet which can unlock mli |
1696 | * and the global MLD lock. |
1697 | * Therefore grab a reference on MLI and also |
1698 | * check for generation count to see if we should |
1699 | * iterate the list again. |
1700 | */ |
1701 | MLI_ADDREF_LOCKED(mli); |
1702 | |
1703 | if (mli->mli_v2_timer == 0) { |
1704 | /* Do nothing. */ |
1705 | } else if (--mli->mli_v2_timer == 0) { |
1706 | if (mld_v2_dispatch_general_query(mli) > 0) { |
1707 | interface_timers_running6 = 1; |
1708 | } |
1709 | } else { |
1710 | interface_timers_running6 = 1; |
1711 | } |
1712 | mli->mli_flags |= MLIF_PROCESSED; |
1713 | MLI_UNLOCK(mli); |
1714 | MLI_REMREF(mli); |
1715 | |
1716 | if (genid != mld_mli_list_genid) { |
1717 | MLD_PRINTF(("%s: MLD information list changed " |
1718 | "in the middle of iteration! Restart iteration.\n" , |
1719 | __func__)); |
1720 | mli = LIST_FIRST(&mli_head); |
1721 | genid = mld_mli_list_genid; |
1722 | } else { |
1723 | mli = LIST_NEXT(mli, mli_link); |
1724 | } |
1725 | } |
1726 | |
1727 | LIST_FOREACH(mli, &mli_head, mli_link) |
1728 | mli->mli_flags &= ~MLIF_PROCESSED; |
1729 | } |
1730 | |
1731 | skip_query_timers: |
1732 | if (!current_state_timers_running6 && |
1733 | !state_change_timers_running6) { |
1734 | goto out_locked; |
1735 | } |
1736 | |
1737 | current_state_timers_running6 = 0; |
1738 | state_change_timers_running6 = 0; |
1739 | |
1740 | MLD_PRINTF(("%s: state change timers running\n" , __func__)); |
1741 | |
1742 | memset(s: &qrq, c: 0, n: sizeof(struct ifqueue)); |
1743 | qrq.ifq_maxlen = MLD_MAX_G_GS_PACKETS; |
1744 | |
1745 | memset(s: &scq, c: 0, n: sizeof(struct ifqueue)); |
1746 | scq.ifq_maxlen = MLD_MAX_STATE_CHANGE_PACKETS; |
1747 | |
1748 | /* |
1749 | * MLD host report and state-change timer processing. |
1750 | * Note: Processing a v2 group timer may remove a node. |
1751 | */ |
1752 | mli = LIST_FIRST(&mli_head); |
1753 | |
1754 | while (mli != NULL) { |
1755 | struct in6_multistep step; |
1756 | |
1757 | if (mli->mli_flags & MLIF_PROCESSED) { |
1758 | mli = LIST_NEXT(mli, mli_link); |
1759 | continue; |
1760 | } |
1761 | |
1762 | MLI_LOCK(mli); |
1763 | ifp = mli->mli_ifp; |
1764 | uri_sec = MLD_RANDOM_DELAY(mli->mli_uri); |
1765 | MLI_UNLOCK(mli); |
1766 | |
1767 | in6_multihead_lock_shared(); |
1768 | IN6_FIRST_MULTI(step, inm); |
1769 | while (inm != NULL) { |
1770 | IN6M_LOCK(inm); |
1771 | if (inm->in6m_ifp != ifp) { |
1772 | goto next; |
1773 | } |
1774 | |
1775 | MLI_LOCK(mli); |
1776 | switch (mli->mli_version) { |
1777 | case MLD_VERSION_1: |
1778 | mld_v1_process_group_timer(inm, |
1779 | mli->mli_version); |
1780 | break; |
1781 | case MLD_VERSION_2: |
1782 | mld_v2_process_group_timers(mli, &qrq, |
1783 | &scq, inm, uri_sec); |
1784 | break; |
1785 | } |
1786 | MLI_UNLOCK(mli); |
1787 | next: |
1788 | IN6M_UNLOCK(inm); |
1789 | IN6_NEXT_MULTI(step, inm); |
1790 | } |
1791 | in6_multihead_lock_done(); |
1792 | |
1793 | /* |
1794 | * XXX The logic below ends up calling |
1795 | * mld_dispatch_packet which can unlock mli |
1796 | * and the global MLD lock. |
1797 | * Therefore grab a reference on MLI and also |
1798 | * check for generation count to see if we should |
1799 | * iterate the list again. |
1800 | */ |
1801 | MLI_LOCK(mli); |
1802 | MLI_ADDREF_LOCKED(mli); |
1803 | if (mli->mli_version == MLD_VERSION_1) { |
1804 | mld_dispatch_queue_locked(mli, ifq: &mli->mli_v1q, limit: 0); |
1805 | } else if (mli->mli_version == MLD_VERSION_2) { |
1806 | MLI_UNLOCK(mli); |
1807 | mld_dispatch_queue_locked(NULL, ifq: &qrq, limit: 0); |
1808 | mld_dispatch_queue_locked(NULL, ifq: &scq, limit: 0); |
1809 | VERIFY(qrq.ifq_len == 0); |
1810 | VERIFY(scq.ifq_len == 0); |
1811 | MLI_LOCK(mli); |
1812 | } |
1813 | /* |
1814 | * In case there are still any pending membership reports |
1815 | * which didn't get drained at version change time. |
1816 | */ |
1817 | IF_DRAIN(&mli->mli_v1q); |
1818 | /* |
1819 | * Release all deferred inm records, and drain any locally |
1820 | * enqueued packets; do it even if the current MLD version |
1821 | * for the link is no longer MLDv2, in order to handle the |
1822 | * version change case. |
1823 | */ |
1824 | mld_flush_relq(mli, (struct mld_in6m_relhead *)&in6m_dthead); |
1825 | mli->mli_flags |= MLIF_PROCESSED; |
1826 | MLI_UNLOCK(mli); |
1827 | MLI_REMREF(mli); |
1828 | |
1829 | IF_DRAIN(&qrq); |
1830 | IF_DRAIN(&scq); |
1831 | |
1832 | if (genid != mld_mli_list_genid) { |
1833 | MLD_PRINTF(("%s: MLD information list changed " |
1834 | "in the middle of iteration! Restart iteration.\n" , |
1835 | __func__)); |
1836 | mli = LIST_FIRST(&mli_head); |
1837 | genid = mld_mli_list_genid; |
1838 | } else { |
1839 | mli = LIST_NEXT(mli, mli_link); |
1840 | } |
1841 | } |
1842 | |
1843 | LIST_FOREACH(mli, &mli_head, mli_link) |
1844 | mli->mli_flags &= ~MLIF_PROCESSED; |
1845 | |
1846 | out_locked: |
1847 | /* re-arm the timer if there's work to do */ |
1848 | if (fast) { |
1849 | mld_fast_timeout_run = false; |
1850 | } else { |
1851 | mld_timeout_run = false; |
1852 | } |
1853 | mld_sched_timeout(); |
1854 | MLD_UNLOCK(); |
1855 | |
1856 | /* Now that we're dropped all locks, release detached records */ |
1857 | MLD_REMOVE_DETACHED_IN6M(&in6m_dthead); |
1858 | } |
1859 | |
1860 | static void |
1861 | mld_sched_timeout(void) |
1862 | { |
1863 | static thread_call_t mld_timeout_tcall; |
1864 | uint64_t deadline = 0, leeway = 0; |
1865 | |
1866 | MLD_LOCK_ASSERT_HELD(); |
1867 | if (mld_timeout_tcall == NULL) { |
1868 | mld_timeout_tcall = |
1869 | thread_call_allocate_with_options(func: mld_timeout, |
1870 | NULL, |
1871 | pri: THREAD_CALL_PRIORITY_KERNEL, |
1872 | options: THREAD_CALL_OPTIONS_ONCE); |
1873 | } |
1874 | |
1875 | if (!mld_timeout_run && |
1876 | (querier_present_timers_running6 || current_state_timers_running6 || |
1877 | interface_timers_running6 || state_change_timers_running6)) { |
1878 | mld_timeout_run = true; |
1879 | clock_interval_to_deadline(interval: mld_timeout_delay, NSEC_PER_MSEC, |
1880 | result: &deadline); |
1881 | clock_interval_to_absolutetime_interval(interval: mld_timeout_leeway, |
1882 | NSEC_PER_MSEC, result: &leeway); |
1883 | thread_call_enter_delayed_with_leeway(call: mld_timeout_tcall, NULL, |
1884 | deadline, leeway, |
1885 | THREAD_CALL_DELAY_LEEWAY); |
1886 | } |
1887 | } |
1888 | |
1889 | static void |
1890 | mld_sched_fast_timeout(void) |
1891 | { |
1892 | static thread_call_t mld_fast_timeout_tcall; |
1893 | |
1894 | MLD_LOCK_ASSERT_HELD(); |
1895 | if (mld_fast_timeout_tcall == NULL) { |
1896 | mld_fast_timeout_tcall = |
1897 | thread_call_allocate_with_options(func: mld_timeout, |
1898 | param0: mld_sched_fast_timeout, |
1899 | pri: THREAD_CALL_PRIORITY_KERNEL, |
1900 | options: THREAD_CALL_OPTIONS_ONCE); |
1901 | } |
1902 | if (!mld_fast_timeout_run && |
1903 | (current_state_timers_running6 || state_change_timers_running6)) { |
1904 | mld_fast_timeout_run = true; |
1905 | thread_call_enter(call: mld_fast_timeout_tcall); |
1906 | } |
1907 | } |
1908 | |
1909 | /* |
1910 | * Appends an in6_multi to the list to be released later. |
1911 | * |
1912 | * Caller must be holding mli_lock. |
1913 | */ |
1914 | static void |
1915 | mld_append_relq(struct mld_ifinfo *mli, struct in6_multi *inm) |
1916 | { |
1917 | MLI_LOCK_ASSERT_HELD(mli); |
1918 | if (inm->in6m_in_nrele) { |
1919 | os_log_debug(OS_LOG_DEFAULT, "%s: inm %llx already on relq ifp %s\n" , |
1920 | __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm), |
1921 | mli->mli_ifp != NULL ? if_name(mli->mli_ifp) : "<null>" ); |
1922 | return; |
1923 | } |
1924 | os_log_debug(OS_LOG_DEFAULT, "%s: adding inm %llx on relq ifp %s\n" , |
1925 | __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm), |
1926 | mli->mli_ifp != NULL ? if_name(mli->mli_ifp) : "<null>" ); |
1927 | inm->in6m_in_nrele = true; |
1928 | SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm, in6m_nrele); |
1929 | } |
1930 | |
1931 | /* |
1932 | * Free the in6_multi reference(s) for this MLD lifecycle. |
1933 | * |
1934 | * Caller must be holding mli_lock. |
1935 | */ |
1936 | static void |
1937 | mld_flush_relq(struct mld_ifinfo *mli, struct mld_in6m_relhead *in6m_dthead) |
1938 | { |
1939 | struct in6_multi *inm; |
1940 | SLIST_HEAD(, in6_multi) temp_relinmhead; |
1941 | |
1942 | /* |
1943 | * Before dropping the mli_lock, copy all the items in the |
1944 | * release list to a temporary list to prevent other threads |
1945 | * from changing mli_relinmhead while we are traversing it. |
1946 | */ |
1947 | MLI_LOCK_ASSERT_HELD(mli); |
1948 | SLIST_INIT(&temp_relinmhead); |
1949 | while ((inm = SLIST_FIRST(&mli->mli_relinmhead)) != NULL) { |
1950 | SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele); |
1951 | SLIST_INSERT_HEAD(&temp_relinmhead, inm, in6m_nrele); |
1952 | } |
1953 | MLI_UNLOCK(mli); |
1954 | in6_multihead_lock_exclusive(); |
1955 | while ((inm = SLIST_FIRST(&temp_relinmhead)) != NULL) { |
1956 | int lastref; |
1957 | |
1958 | SLIST_REMOVE_HEAD(&temp_relinmhead, in6m_nrele); |
1959 | IN6M_LOCK(inm); |
1960 | os_log_debug(OS_LOG_DEFAULT, "%s: flushing inm %llx on relq ifp %s\n" , |
1961 | __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm), |
1962 | inm->in6m_ifp != NULL ? if_name(inm->in6m_ifp) : "<null>" ); |
1963 | VERIFY(inm->in6m_in_nrele == true); |
1964 | inm->in6m_in_nrele = false; |
1965 | VERIFY(inm->in6m_nrelecnt != 0); |
1966 | inm->in6m_nrelecnt--; |
1967 | lastref = in6_multi_detach(inm); |
1968 | VERIFY(!lastref || (!(inm->in6m_debug & IFD_ATTACHED) && |
1969 | inm->in6m_reqcnt == 0)); |
1970 | IN6M_UNLOCK(inm); |
1971 | /* from mli_relinmhead */ |
1972 | IN6M_REMREF(inm); |
1973 | /* from in6_multihead_list */ |
1974 | if (lastref) { |
1975 | /* |
1976 | * Defer releasing our final reference, as we |
1977 | * are holding the MLD lock at this point, and |
1978 | * we could end up with locking issues later on |
1979 | * (while issuing SIOCDELMULTI) when this is the |
1980 | * final reference count. Let the caller do it |
1981 | * when it is safe. |
1982 | */ |
1983 | MLD_ADD_DETACHED_IN6M(in6m_dthead, inm); |
1984 | } |
1985 | } |
1986 | in6_multihead_lock_done(); |
1987 | MLI_LOCK(mli); |
1988 | } |
1989 | |
1990 | /* |
1991 | * Update host report group timer. |
1992 | * Will update the global pending timer flags. |
1993 | */ |
1994 | static void |
1995 | mld_v1_process_group_timer(struct in6_multi *inm, const int mld_version) |
1996 | { |
1997 | #pragma unused(mld_version) |
1998 | int report_timer_expired; |
1999 | |
2000 | MLD_LOCK_ASSERT_HELD(); |
2001 | IN6M_LOCK_ASSERT_HELD(inm); |
2002 | MLI_LOCK_ASSERT_HELD(inm->in6m_mli); |
2003 | |
2004 | if (inm->in6m_timer == 0) { |
2005 | report_timer_expired = 0; |
2006 | } else if (--inm->in6m_timer == 0) { |
2007 | report_timer_expired = 1; |
2008 | } else { |
2009 | current_state_timers_running6 = 1; |
2010 | /* caller will schedule timer */ |
2011 | return; |
2012 | } |
2013 | |
2014 | switch (inm->in6m_state) { |
2015 | case MLD_NOT_MEMBER: |
2016 | case MLD_SILENT_MEMBER: |
2017 | case MLD_IDLE_MEMBER: |
2018 | case MLD_LAZY_MEMBER: |
2019 | case MLD_SLEEPING_MEMBER: |
2020 | case MLD_AWAKENING_MEMBER: |
2021 | break; |
2022 | case MLD_REPORTING_MEMBER: |
2023 | if (report_timer_expired) { |
2024 | inm->in6m_state = MLD_IDLE_MEMBER; |
2025 | (void) mld_v1_transmit_report(inm, |
2026 | MLD_LISTENER_REPORT); |
2027 | IN6M_LOCK_ASSERT_HELD(inm); |
2028 | MLI_LOCK_ASSERT_HELD(inm->in6m_mli); |
2029 | } |
2030 | break; |
2031 | case MLD_G_QUERY_PENDING_MEMBER: |
2032 | case MLD_SG_QUERY_PENDING_MEMBER: |
2033 | case MLD_LEAVING_MEMBER: |
2034 | break; |
2035 | } |
2036 | } |
2037 | |
2038 | /* |
2039 | * Update a group's timers for MLDv2. |
2040 | * Will update the global pending timer flags. |
2041 | * Note: Unlocked read from mli. |
2042 | */ |
2043 | static void |
2044 | mld_v2_process_group_timers(struct mld_ifinfo *mli, |
2045 | struct ifqueue *qrq, struct ifqueue *scq, |
2046 | struct in6_multi *inm, const int uri_sec) |
2047 | { |
2048 | int query_response_timer_expired; |
2049 | int state_change_retransmit_timer_expired; |
2050 | |
2051 | MLD_LOCK_ASSERT_HELD(); |
2052 | IN6M_LOCK_ASSERT_HELD(inm); |
2053 | MLI_LOCK_ASSERT_HELD(mli); |
2054 | VERIFY(mli == inm->in6m_mli); |
2055 | |
2056 | query_response_timer_expired = 0; |
2057 | state_change_retransmit_timer_expired = 0; |
2058 | |
2059 | /* |
2060 | * During a transition from compatibility mode back to MLDv2, |
2061 | * a group record in REPORTING state may still have its group |
2062 | * timer active. This is a no-op in this function; it is easier |
2063 | * to deal with it here than to complicate the timeout path. |
2064 | */ |
2065 | if (inm->in6m_timer == 0) { |
2066 | query_response_timer_expired = 0; |
2067 | } else if (--inm->in6m_timer == 0) { |
2068 | query_response_timer_expired = 1; |
2069 | } else { |
2070 | current_state_timers_running6 = 1; |
2071 | /* caller will schedule timer */ |
2072 | } |
2073 | |
2074 | if (inm->in6m_sctimer == 0) { |
2075 | state_change_retransmit_timer_expired = 0; |
2076 | } else if (--inm->in6m_sctimer == 0) { |
2077 | state_change_retransmit_timer_expired = 1; |
2078 | } else { |
2079 | state_change_timers_running6 = 1; |
2080 | /* caller will schedule timer */ |
2081 | } |
2082 | |
2083 | /* We are in timer callback, so be quick about it. */ |
2084 | if (!state_change_retransmit_timer_expired && |
2085 | !query_response_timer_expired) { |
2086 | return; |
2087 | } |
2088 | |
2089 | switch (inm->in6m_state) { |
2090 | case MLD_NOT_MEMBER: |
2091 | case MLD_SILENT_MEMBER: |
2092 | case MLD_SLEEPING_MEMBER: |
2093 | case MLD_LAZY_MEMBER: |
2094 | case MLD_AWAKENING_MEMBER: |
2095 | case MLD_IDLE_MEMBER: |
2096 | break; |
2097 | case MLD_G_QUERY_PENDING_MEMBER: |
2098 | case MLD_SG_QUERY_PENDING_MEMBER: |
2099 | /* |
2100 | * Respond to a previously pending Group-Specific |
2101 | * or Group-and-Source-Specific query by enqueueing |
2102 | * the appropriate Current-State report for |
2103 | * immediate transmission. |
2104 | */ |
2105 | if (query_response_timer_expired) { |
2106 | int retval; |
2107 | |
2108 | retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1, |
2109 | (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER), |
2110 | 0); |
2111 | MLD_PRINTF(("%s: enqueue record = %d\n" , |
2112 | __func__, retval)); |
2113 | inm->in6m_state = MLD_REPORTING_MEMBER; |
2114 | in6m_clear_recorded(inm); |
2115 | } |
2116 | OS_FALLTHROUGH; |
2117 | case MLD_REPORTING_MEMBER: |
2118 | case MLD_LEAVING_MEMBER: |
2119 | if (state_change_retransmit_timer_expired) { |
2120 | /* |
2121 | * State-change retransmission timer fired. |
2122 | * If there are any further pending retransmissions, |
2123 | * set the global pending state-change flag, and |
2124 | * reset the timer. |
2125 | */ |
2126 | if (--inm->in6m_scrv > 0) { |
2127 | inm->in6m_sctimer = (uint16_t)uri_sec; |
2128 | state_change_timers_running6 = 1; |
2129 | /* caller will schedule timer */ |
2130 | } |
2131 | /* |
2132 | * Retransmit the previously computed state-change |
2133 | * report. If there are no further pending |
2134 | * retransmissions, the mbuf queue will be consumed. |
2135 | * Update T0 state to T1 as we have now sent |
2136 | * a state-change. |
2137 | */ |
2138 | (void) mld_v2_merge_state_changes(inm, scq); |
2139 | |
2140 | in6m_commit(inm); |
2141 | MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n" , __func__, |
2142 | ip6_sprintf(&inm->in6m_addr), |
2143 | if_name(inm->in6m_ifp))); |
2144 | |
2145 | /* |
2146 | * If we are leaving the group for good, make sure |
2147 | * we release MLD's reference to it. |
2148 | * This release must be deferred using a SLIST, |
2149 | * as we are called from a loop which traverses |
2150 | * the in_ifmultiaddr TAILQ. |
2151 | */ |
2152 | if (inm->in6m_state == MLD_LEAVING_MEMBER && |
2153 | inm->in6m_scrv == 0) { |
2154 | inm->in6m_state = MLD_NOT_MEMBER; |
2155 | /* |
2156 | * A reference has already been held in |
2157 | * mld_final_leave() for this inm, so |
2158 | * no need to hold another one. We also |
2159 | * bumped up its request count then, so |
2160 | * that it stays in in6_multihead. Both |
2161 | * of them will be released when it is |
2162 | * dequeued later on. |
2163 | */ |
2164 | VERIFY(inm->in6m_nrelecnt != 0); |
2165 | mld_append_relq(mli, inm); |
2166 | } |
2167 | } |
2168 | break; |
2169 | } |
2170 | } |
2171 | |
2172 | /* |
2173 | * Switch to a different version on the given interface, |
2174 | * as per Section 9.12. |
2175 | */ |
2176 | static uint32_t |
2177 | mld_set_version(struct mld_ifinfo *mli, const int mld_version) |
2178 | { |
2179 | int old_version_timer; |
2180 | |
2181 | MLI_LOCK_ASSERT_HELD(mli); |
2182 | |
2183 | os_log(OS_LOG_DEFAULT, "%s: switching to v%d on ifp %s\n" , __func__, |
2184 | mld_version, if_name(mli->mli_ifp)); |
2185 | |
2186 | if (mld_version == MLD_VERSION_1) { |
2187 | /* |
2188 | * Compute the "Older Version Querier Present" timer as per |
2189 | * Section 9.12, in seconds. |
2190 | */ |
2191 | old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri; |
2192 | mli->mli_v1_timer = old_version_timer; |
2193 | } |
2194 | |
2195 | if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) { |
2196 | mli->mli_version = MLD_VERSION_1; |
2197 | mld_v2_cancel_link_timers(mli); |
2198 | } |
2199 | |
2200 | MLI_LOCK_ASSERT_HELD(mli); |
2201 | |
2202 | return mli->mli_v1_timer; |
2203 | } |
2204 | |
2205 | /* |
2206 | * Cancel pending MLDv2 timers for the given link and all groups |
2207 | * joined on it; state-change, general-query, and group-query timers. |
2208 | * |
2209 | * Only ever called on a transition from v2 to Compatibility mode. Kill |
2210 | * the timers stone dead (this may be expensive for large N groups), they |
2211 | * will be restarted if Compatibility Mode deems that they must be due to |
2212 | * query processing. |
2213 | */ |
2214 | static void |
2215 | mld_v2_cancel_link_timers(struct mld_ifinfo *mli) |
2216 | { |
2217 | struct ifnet *ifp; |
2218 | struct in6_multi *inm; |
2219 | struct in6_multistep step; |
2220 | |
2221 | MLI_LOCK_ASSERT_HELD(mli); |
2222 | |
2223 | MLD_PRINTF(("%s: cancel v2 timers on ifp 0x%llx(%s)\n" , __func__, |
2224 | (uint64_t)VM_KERNEL_ADDRPERM(mli->mli_ifp), if_name(mli->mli_ifp))); |
2225 | |
2226 | /* |
2227 | * Stop the v2 General Query Response on this link stone dead. |
2228 | * If timer is woken up due to interface_timers_running6, |
2229 | * the flag will be cleared if there are no pending link timers. |
2230 | */ |
2231 | mli->mli_v2_timer = 0; |
2232 | |
2233 | /* |
2234 | * Now clear the current-state and state-change report timers |
2235 | * for all memberships scoped to this link. |
2236 | */ |
2237 | ifp = mli->mli_ifp; |
2238 | MLI_UNLOCK(mli); |
2239 | |
2240 | in6_multihead_lock_shared(); |
2241 | IN6_FIRST_MULTI(step, inm); |
2242 | while (inm != NULL) { |
2243 | IN6M_LOCK(inm); |
2244 | if (inm->in6m_ifp != ifp) { |
2245 | goto next; |
2246 | } |
2247 | |
2248 | switch (inm->in6m_state) { |
2249 | case MLD_NOT_MEMBER: |
2250 | case MLD_SILENT_MEMBER: |
2251 | case MLD_IDLE_MEMBER: |
2252 | case MLD_LAZY_MEMBER: |
2253 | case MLD_SLEEPING_MEMBER: |
2254 | case MLD_AWAKENING_MEMBER: |
2255 | /* |
2256 | * These states are either not relevant in v2 mode, |
2257 | * or are unreported. Do nothing. |
2258 | */ |
2259 | break; |
2260 | case MLD_LEAVING_MEMBER: |
2261 | /* |
2262 | * If we are leaving the group and switching |
2263 | * version, we need to release the final |
2264 | * reference held for issuing the INCLUDE {}. |
2265 | * During mld_final_leave(), we bumped up both the |
2266 | * request and reference counts. Since we cannot |
2267 | * call in6_multi_detach() here, defer this task to |
2268 | * the timer routine. |
2269 | */ |
2270 | VERIFY(inm->in6m_nrelecnt != 0); |
2271 | MLI_LOCK(mli); |
2272 | mld_append_relq(mli, inm); |
2273 | MLI_UNLOCK(mli); |
2274 | OS_FALLTHROUGH; |
2275 | case MLD_G_QUERY_PENDING_MEMBER: |
2276 | case MLD_SG_QUERY_PENDING_MEMBER: |
2277 | in6m_clear_recorded(inm); |
2278 | OS_FALLTHROUGH; |
2279 | case MLD_REPORTING_MEMBER: |
2280 | inm->in6m_state = MLD_REPORTING_MEMBER; |
2281 | break; |
2282 | } |
2283 | /* |
2284 | * Always clear state-change and group report timers. |
2285 | * Free any pending MLDv2 state-change records. |
2286 | */ |
2287 | inm->in6m_sctimer = 0; |
2288 | inm->in6m_timer = 0; |
2289 | IF_DRAIN(&inm->in6m_scq); |
2290 | next: |
2291 | IN6M_UNLOCK(inm); |
2292 | IN6_NEXT_MULTI(step, inm); |
2293 | } |
2294 | in6_multihead_lock_done(); |
2295 | |
2296 | MLI_LOCK(mli); |
2297 | } |
2298 | |
2299 | /* |
2300 | * Update the Older Version Querier Present timers for a link. |
2301 | * See Section 9.12 of RFC 3810. |
2302 | */ |
2303 | static void |
2304 | mld_v1_process_querier_timers(struct mld_ifinfo *mli) |
2305 | { |
2306 | MLI_LOCK_ASSERT_HELD(mli); |
2307 | |
2308 | if (mld_v2enable && mli->mli_version != MLD_VERSION_2 && |
2309 | --mli->mli_v1_timer == 0) { |
2310 | /* |
2311 | * MLDv1 Querier Present timer expired; revert to MLDv2. |
2312 | */ |
2313 | os_log(OS_LOG_DEFAULT, "%s: transition from v%d -> v%d on %s\n" , |
2314 | __func__, mli->mli_version, MLD_VERSION_2, |
2315 | if_name(mli->mli_ifp)); |
2316 | mli->mli_version = MLD_VERSION_2; |
2317 | } |
2318 | } |
2319 | |
2320 | /* |
2321 | * Transmit an MLDv1 report immediately. |
2322 | */ |
2323 | static int |
2324 | mld_v1_transmit_report(struct in6_multi *in6m, const uint8_t type) |
2325 | { |
2326 | struct ifnet *ifp; |
2327 | struct in6_ifaddr *ia; |
2328 | struct ip6_hdr *ip6; |
2329 | struct mbuf *mh, *md; |
2330 | struct mld_hdr *mld; |
2331 | int error = 0; |
2332 | |
2333 | IN6M_LOCK_ASSERT_HELD(in6m); |
2334 | MLI_LOCK_ASSERT_HELD(in6m->in6m_mli); |
2335 | |
2336 | ifp = in6m->in6m_ifp; |
2337 | /* ia may be NULL if link-local address is tentative. */ |
2338 | ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST); |
2339 | |
2340 | MGETHDR(mh, M_DONTWAIT, MT_HEADER); |
2341 | if (mh == NULL) { |
2342 | if (ia != NULL) { |
2343 | ifa_remref(ifa: &ia->ia_ifa); |
2344 | } |
2345 | return ENOMEM; |
2346 | } |
2347 | MGET(md, M_DONTWAIT, MT_DATA); |
2348 | if (md == NULL) { |
2349 | m_free(mh); |
2350 | if (ia != NULL) { |
2351 | ifa_remref(ifa: &ia->ia_ifa); |
2352 | } |
2353 | return ENOMEM; |
2354 | } |
2355 | mh->m_next = md; |
2356 | |
2357 | /* |
2358 | * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so |
2359 | * that ether_output() does not need to allocate another mbuf |
2360 | * for the header in the most common case. |
2361 | */ |
2362 | MH_ALIGN(mh, sizeof(struct ip6_hdr)); |
2363 | mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr); |
2364 | mh->m_len = sizeof(struct ip6_hdr); |
2365 | |
2366 | ip6 = mtod(mh, struct ip6_hdr *); |
2367 | ip6->ip6_flow = 0; |
2368 | ip6->ip6_vfc &= ~IPV6_VERSION_MASK; |
2369 | ip6->ip6_vfc |= IPV6_VERSION; |
2370 | ip6->ip6_nxt = IPPROTO_ICMPV6; |
2371 | if (ia != NULL) { |
2372 | IFA_LOCK(&ia->ia_ifa); |
2373 | } |
2374 | ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; |
2375 | ip6_output_setsrcifscope(mh, IFSCOPE_NONE, ia); |
2376 | if (ia != NULL) { |
2377 | IFA_UNLOCK(&ia->ia_ifa); |
2378 | ifa_remref(ifa: &ia->ia_ifa); |
2379 | ia = NULL; |
2380 | } |
2381 | ip6->ip6_dst = in6m->in6m_addr; |
2382 | ip6_output_setdstifscope(mh, in6m->ifscope, NULL); |
2383 | |
2384 | md->m_len = sizeof(struct mld_hdr); |
2385 | mld = mtod(md, struct mld_hdr *); |
2386 | mld->mld_type = type; |
2387 | mld->mld_code = 0; |
2388 | mld->mld_cksum = 0; |
2389 | mld->mld_maxdelay = 0; |
2390 | mld->mld_reserved = 0; |
2391 | mld->mld_addr = in6m->in6m_addr; |
2392 | in6_clearscope(&mld->mld_addr); |
2393 | mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6, |
2394 | sizeof(struct ip6_hdr), sizeof(struct mld_hdr)); |
2395 | |
2396 | mld_save_context(m: mh, ifp); |
2397 | mh->m_flags |= M_MLDV1; |
2398 | |
2399 | /* |
2400 | * Due to the fact that at this point we are possibly holding |
2401 | * in6_multihead_lock in shared or exclusive mode, we can't call |
2402 | * mld_dispatch_packet() here since that will eventually call |
2403 | * ip6_output(), which will try to lock in6_multihead_lock and cause |
2404 | * a deadlock. |
2405 | * Instead we defer the work to the mld_timeout() thread, thus |
2406 | * avoiding unlocking in_multihead_lock here. |
2407 | */ |
2408 | if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) { |
2409 | os_log_error(OS_LOG_DEFAULT, "%s: v1 outbound queue full\n" , __func__); |
2410 | error = ENOMEM; |
2411 | m_freem(mh); |
2412 | } else { |
2413 | IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh); |
2414 | VERIFY(error == 0); |
2415 | } |
2416 | |
2417 | return error; |
2418 | } |
2419 | |
2420 | /* |
2421 | * Process a state change from the upper layer for the given IPv6 group. |
2422 | * |
2423 | * Each socket holds a reference on the in6_multi in its own ip_moptions. |
2424 | * The socket layer will have made the necessary updates to.the group |
2425 | * state, it is now up to MLD to issue a state change report if there |
2426 | * has been any change between T0 (when the last state-change was issued) |
2427 | * and T1 (now). |
2428 | * |
2429 | * We use the MLDv2 state machine at group level. The MLd module |
2430 | * however makes the decision as to which MLD protocol version to speak. |
2431 | * A state change *from* INCLUDE {} always means an initial join. |
2432 | * A state change *to* INCLUDE {} always means a final leave. |
2433 | * |
2434 | * If delay is non-zero, and the state change is an initial multicast |
2435 | * join, the state change report will be delayed by 'delay' ticks |
2436 | * in units of seconds if MLDv1 is active on the link; otherwise |
2437 | * the initial MLDv2 state change report will be delayed by whichever |
2438 | * is sooner, a pending state-change timer or delay itself. |
2439 | */ |
2440 | int |
2441 | mld_change_state(struct in6_multi *inm, struct mld_tparams *mtp, |
2442 | const int delay) |
2443 | { |
2444 | struct mld_ifinfo *mli; |
2445 | struct ifnet *ifp; |
2446 | int error = 0; |
2447 | |
2448 | VERIFY(mtp != NULL); |
2449 | bzero(s: mtp, n: sizeof(*mtp)); |
2450 | |
2451 | IN6M_LOCK_ASSERT_HELD(inm); |
2452 | VERIFY(inm->in6m_mli != NULL); |
2453 | MLI_LOCK_ASSERT_NOTHELD(inm->in6m_mli); |
2454 | |
2455 | /* |
2456 | * Try to detect if the upper layer just asked us to change state |
2457 | * for an interface which has now gone away. |
2458 | */ |
2459 | VERIFY(inm->in6m_ifma != NULL); |
2460 | ifp = inm->in6m_ifma->ifma_ifp; |
2461 | /* |
2462 | * Sanity check that netinet6's notion of ifp is the same as net's. |
2463 | */ |
2464 | VERIFY(inm->in6m_ifp == ifp); |
2465 | |
2466 | mli = MLD_IFINFO(ifp); |
2467 | VERIFY(mli != NULL); |
2468 | |
2469 | /* |
2470 | * If we detect a state transition to or from MCAST_UNDEFINED |
2471 | * for this group, then we are starting or finishing an MLD |
2472 | * life cycle for this group. |
2473 | */ |
2474 | if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) { |
2475 | MLD_PRINTF(("%s: inm transition %d -> %d\n" , __func__, |
2476 | inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode)); |
2477 | if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) { |
2478 | MLD_PRINTF(("%s: initial join\n" , __func__)); |
2479 | error = mld_initial_join(inm, mli, mtp, delay); |
2480 | goto out; |
2481 | } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) { |
2482 | MLD_PRINTF(("%s: final leave\n" , __func__)); |
2483 | mld_final_leave(inm, mli, mtp); |
2484 | goto out; |
2485 | } |
2486 | } else { |
2487 | MLD_PRINTF(("%s: filter set change\n" , __func__)); |
2488 | } |
2489 | |
2490 | error = mld_handle_state_change(inm, mli, mtp); |
2491 | out: |
2492 | return error; |
2493 | } |
2494 | |
2495 | /* |
2496 | * Perform the initial join for an MLD group. |
2497 | * |
2498 | * When joining a group: |
2499 | * If the group should have its MLD traffic suppressed, do nothing. |
2500 | * MLDv1 starts sending MLDv1 host membership reports. |
2501 | * MLDv2 will schedule an MLDv2 state-change report containing the |
2502 | * initial state of the membership. |
2503 | * |
2504 | * If the delay argument is non-zero, then we must delay sending the |
2505 | * initial state change for delay ticks (in units of seconds). |
2506 | */ |
2507 | static int |
2508 | mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, |
2509 | struct mld_tparams *mtp, const int delay) |
2510 | { |
2511 | struct ifnet *ifp; |
2512 | struct ifqueue *ifq; |
2513 | int error, retval, syncstates; |
2514 | int odelay; |
2515 | |
2516 | IN6M_LOCK_ASSERT_HELD(inm); |
2517 | MLI_LOCK_ASSERT_NOTHELD(mli); |
2518 | VERIFY(mtp != NULL); |
2519 | |
2520 | MLD_PRINTF(("%s: initial join %s on ifp 0x%llx(%s)\n" , |
2521 | __func__, ip6_sprintf(&inm->in6m_addr), |
2522 | (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp), |
2523 | if_name(inm->in6m_ifp))); |
2524 | |
2525 | error = 0; |
2526 | syncstates = 1; |
2527 | |
2528 | ifp = inm->in6m_ifp; |
2529 | |
2530 | MLI_LOCK(mli); |
2531 | VERIFY(mli->mli_ifp == ifp); |
2532 | |
2533 | /* |
2534 | * Avoid MLD if group is : |
2535 | * 1. Joined on loopback, OR |
2536 | * 2. On a link that is marked MLIF_SILENT |
2537 | * 3. rdar://problem/19227650 Is link local scoped and |
2538 | * on cellular interface |
2539 | * 4. Is a type that should not be reported (node local |
2540 | * or all node link local multicast. |
2541 | * All other groups enter the appropriate state machine |
2542 | * for the version in use on this link. |
2543 | */ |
2544 | if ((ifp->if_flags & IFF_LOOPBACK) || |
2545 | (mli->mli_flags & MLIF_SILENT) || |
2546 | (IFNET_IS_CELLULAR(ifp) && |
2547 | (IN6_IS_ADDR_MC_LINKLOCAL(&inm->in6m_addr) || IN6_IS_ADDR_MC_UNICAST_BASED_LINKLOCAL(&inm->in6m_addr))) || |
2548 | !mld_is_addr_reported(addr: &inm->in6m_addr)) { |
2549 | MLD_PRINTF(("%s: not kicking state machine for silent group\n" , |
2550 | __func__)); |
2551 | inm->in6m_state = MLD_SILENT_MEMBER; |
2552 | inm->in6m_timer = 0; |
2553 | } else { |
2554 | /* |
2555 | * Deal with overlapping in6_multi lifecycle. |
2556 | * If this group was LEAVING, then make sure |
2557 | * we drop the reference we picked up to keep the |
2558 | * group around for the final INCLUDE {} enqueue. |
2559 | * Since we cannot call in6_multi_detach() here, |
2560 | * defer this task to the timer routine. |
2561 | */ |
2562 | if (mli->mli_version == MLD_VERSION_2 && |
2563 | inm->in6m_state == MLD_LEAVING_MEMBER) { |
2564 | VERIFY(inm->in6m_nrelecnt != 0); |
2565 | mld_append_relq(mli, inm); |
2566 | } |
2567 | |
2568 | inm->in6m_state = MLD_REPORTING_MEMBER; |
2569 | |
2570 | switch (mli->mli_version) { |
2571 | case MLD_VERSION_1: |
2572 | /* |
2573 | * If a delay was provided, only use it if |
2574 | * it is greater than the delay normally |
2575 | * used for an MLDv1 state change report, |
2576 | * and delay sending the initial MLDv1 report |
2577 | * by not transitioning to the IDLE state. |
2578 | */ |
2579 | odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI); |
2580 | if (delay) { |
2581 | inm->in6m_timer = max(a: delay, b: odelay); |
2582 | mtp->cst = 1; |
2583 | } else { |
2584 | inm->in6m_state = MLD_IDLE_MEMBER; |
2585 | error = mld_v1_transmit_report(in6m: inm, |
2586 | MLD_LISTENER_REPORT); |
2587 | |
2588 | IN6M_LOCK_ASSERT_HELD(inm); |
2589 | MLI_LOCK_ASSERT_HELD(mli); |
2590 | |
2591 | if (error == 0) { |
2592 | inm->in6m_timer = odelay; |
2593 | mtp->cst = 1; |
2594 | } |
2595 | } |
2596 | break; |
2597 | |
2598 | case MLD_VERSION_2: |
2599 | /* |
2600 | * Defer update of T0 to T1, until the first copy |
2601 | * of the state change has been transmitted. |
2602 | */ |
2603 | syncstates = 0; |
2604 | |
2605 | /* |
2606 | * Immediately enqueue a State-Change Report for |
2607 | * this interface, freeing any previous reports. |
2608 | * Don't kick the timers if there is nothing to do, |
2609 | * or if an error occurred. |
2610 | */ |
2611 | ifq = &inm->in6m_scq; |
2612 | IF_DRAIN(ifq); |
2613 | retval = mld_v2_enqueue_group_record(ifq, inm, 1, |
2614 | 0, 0, (mli->mli_flags & MLIF_USEALLOW)); |
2615 | mtp->cst = (ifq->ifq_len > 0); |
2616 | MLD_PRINTF(("%s: enqueue record = %d\n" , |
2617 | __func__, retval)); |
2618 | if (retval <= 0) { |
2619 | error = retval * -1; |
2620 | break; |
2621 | } |
2622 | |
2623 | /* |
2624 | * Schedule transmission of pending state-change |
2625 | * report up to RV times for this link. The timer |
2626 | * will fire at the next mld_timeout (1 second)), |
2627 | * giving us an opportunity to merge the reports. |
2628 | * |
2629 | * If a delay was provided to this function, only |
2630 | * use this delay if sooner than the existing one. |
2631 | */ |
2632 | VERIFY(mli->mli_rv > 1); |
2633 | inm->in6m_scrv = (uint16_t)mli->mli_rv; |
2634 | if (delay) { |
2635 | if (inm->in6m_sctimer > 1) { |
2636 | inm->in6m_sctimer = |
2637 | MIN(inm->in6m_sctimer, (uint16_t)delay); |
2638 | } else { |
2639 | inm->in6m_sctimer = (uint16_t)delay; |
2640 | } |
2641 | } else { |
2642 | inm->in6m_sctimer = 1; |
2643 | } |
2644 | mtp->sct = 1; |
2645 | error = 0; |
2646 | break; |
2647 | } |
2648 | } |
2649 | MLI_UNLOCK(mli); |
2650 | |
2651 | /* |
2652 | * Only update the T0 state if state change is atomic, |
2653 | * i.e. we don't need to wait for a timer to fire before we |
2654 | * can consider the state change to have been communicated. |
2655 | */ |
2656 | if (syncstates) { |
2657 | in6m_commit(inm); |
2658 | MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n" , __func__, |
2659 | ip6_sprintf(&inm->in6m_addr), |
2660 | if_name(inm->in6m_ifp))); |
2661 | } |
2662 | |
2663 | return error; |
2664 | } |
2665 | |
2666 | /* |
2667 | * Issue an intermediate state change during the life-cycle. |
2668 | */ |
2669 | static int |
2670 | mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli, |
2671 | struct mld_tparams *mtp) |
2672 | { |
2673 | struct ifnet *ifp; |
2674 | int retval = 0; |
2675 | |
2676 | IN6M_LOCK_ASSERT_HELD(inm); |
2677 | MLI_LOCK_ASSERT_NOTHELD(mli); |
2678 | VERIFY(mtp != NULL); |
2679 | |
2680 | MLD_PRINTF(("%s: state change for %s on ifp 0x%llx(%s)\n" , |
2681 | __func__, ip6_sprintf(&inm->in6m_addr), |
2682 | (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp), |
2683 | if_name(inm->in6m_ifp))); |
2684 | |
2685 | ifp = inm->in6m_ifp; |
2686 | |
2687 | MLI_LOCK(mli); |
2688 | VERIFY(mli->mli_ifp == ifp); |
2689 | |
2690 | if ((ifp->if_flags & IFF_LOOPBACK) || |
2691 | (mli->mli_flags & MLIF_SILENT) || |
2692 | !mld_is_addr_reported(addr: &inm->in6m_addr) || |
2693 | (mli->mli_version != MLD_VERSION_2)) { |
2694 | MLI_UNLOCK(mli); |
2695 | if (!mld_is_addr_reported(addr: &inm->in6m_addr)) { |
2696 | MLD_PRINTF(("%s: not kicking state machine for silent " |
2697 | "group\n" , __func__)); |
2698 | } |
2699 | MLD_PRINTF(("%s: nothing to do\n" , __func__)); |
2700 | in6m_commit(inm); |
2701 | MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n" , __func__, |
2702 | ip6_sprintf(&inm->in6m_addr), |
2703 | if_name(inm->in6m_ifp))); |
2704 | goto done; |
2705 | } |
2706 | |
2707 | IF_DRAIN(&inm->in6m_scq); |
2708 | |
2709 | retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0, |
2710 | (mli->mli_flags & MLIF_USEALLOW)); |
2711 | mtp->cst = (inm->in6m_scq.ifq_len > 0); |
2712 | MLD_PRINTF(("%s: enqueue record = %d\n" , __func__, retval)); |
2713 | if (retval <= 0) { |
2714 | MLI_UNLOCK(mli); |
2715 | retval *= -1; |
2716 | goto done; |
2717 | } else { |
2718 | retval = 0; |
2719 | } |
2720 | |
2721 | /* |
2722 | * If record(s) were enqueued, start the state-change |
2723 | * report timer for this group. |
2724 | */ |
2725 | inm->in6m_scrv = (uint16_t)mli->mli_rv; |
2726 | inm->in6m_sctimer = 1; |
2727 | mtp->sct = 1; |
2728 | MLI_UNLOCK(mli); |
2729 | |
2730 | done: |
2731 | return retval; |
2732 | } |
2733 | |
2734 | /* |
2735 | * Perform the final leave for a multicast address. |
2736 | * |
2737 | * When leaving a group: |
2738 | * MLDv1 sends a DONE message, if and only if we are the reporter. |
2739 | * MLDv2 enqueues a state-change report containing a transition |
2740 | * to INCLUDE {} for immediate transmission. |
2741 | */ |
2742 | static void |
2743 | mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli, |
2744 | struct mld_tparams *mtp) |
2745 | { |
2746 | int syncstates = 1; |
2747 | |
2748 | IN6M_LOCK_ASSERT_HELD(inm); |
2749 | MLI_LOCK_ASSERT_NOTHELD(mli); |
2750 | VERIFY(mtp != NULL); |
2751 | |
2752 | MLD_PRINTF(("%s: final leave %s on ifp 0x%llx(%s)\n" , |
2753 | __func__, ip6_sprintf(&inm->in6m_addr), |
2754 | (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp), |
2755 | if_name(inm->in6m_ifp))); |
2756 | |
2757 | switch (inm->in6m_state) { |
2758 | case MLD_NOT_MEMBER: |
2759 | case MLD_SILENT_MEMBER: |
2760 | case MLD_LEAVING_MEMBER: |
2761 | /* Already leaving or left; do nothing. */ |
2762 | MLD_PRINTF(("%s: not kicking state machine for silent group\n" , |
2763 | __func__)); |
2764 | break; |
2765 | case MLD_REPORTING_MEMBER: |
2766 | case MLD_IDLE_MEMBER: |
2767 | case MLD_G_QUERY_PENDING_MEMBER: |
2768 | case MLD_SG_QUERY_PENDING_MEMBER: |
2769 | MLI_LOCK(mli); |
2770 | if (mli->mli_version == MLD_VERSION_1) { |
2771 | if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER || |
2772 | inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) { |
2773 | panic("%s: MLDv2 state reached, not MLDv2 " |
2774 | "mode\n" , __func__); |
2775 | /* NOTREACHED */ |
2776 | } |
2777 | /* scheduler timer if enqueue is successful */ |
2778 | mtp->cst = (mld_v1_transmit_report(in6m: inm, |
2779 | MLD_LISTENER_DONE) == 0); |
2780 | |
2781 | IN6M_LOCK_ASSERT_HELD(inm); |
2782 | MLI_LOCK_ASSERT_HELD(mli); |
2783 | |
2784 | inm->in6m_state = MLD_NOT_MEMBER; |
2785 | } else if (mli->mli_version == MLD_VERSION_2) { |
2786 | /* |
2787 | * Stop group timer and all pending reports. |
2788 | * Immediately enqueue a state-change report |
2789 | * TO_IN {} to be sent on the next timeout, |
2790 | * giving us an opportunity to merge reports. |
2791 | */ |
2792 | IF_DRAIN(&inm->in6m_scq); |
2793 | inm->in6m_timer = 0; |
2794 | inm->in6m_scrv = (uint16_t)mli->mli_rv; |
2795 | MLD_PRINTF(("%s: Leaving %s/%s with %d " |
2796 | "pending retransmissions.\n" , __func__, |
2797 | ip6_sprintf(&inm->in6m_addr), |
2798 | if_name(inm->in6m_ifp), |
2799 | inm->in6m_scrv)); |
2800 | if (inm->in6m_scrv == 0) { |
2801 | inm->in6m_state = MLD_NOT_MEMBER; |
2802 | inm->in6m_sctimer = 0; |
2803 | } else { |
2804 | int retval; |
2805 | /* |
2806 | * Stick around in the in6_multihead list; |
2807 | * the final detach will be issued by |
2808 | * mld_v2_process_group_timers() when |
2809 | * the retransmit timer expires. |
2810 | */ |
2811 | IN6M_ADDREF_LOCKED(inm); |
2812 | VERIFY(inm->in6m_debug & IFD_ATTACHED); |
2813 | inm->in6m_reqcnt++; |
2814 | VERIFY(inm->in6m_reqcnt >= 1); |
2815 | inm->in6m_nrelecnt++; |
2816 | VERIFY(inm->in6m_nrelecnt != 0); |
2817 | |
2818 | retval = mld_v2_enqueue_group_record( |
2819 | &inm->in6m_scq, inm, 1, 0, 0, |
2820 | (mli->mli_flags & MLIF_USEALLOW)); |
2821 | mtp->cst = (inm->in6m_scq.ifq_len > 0); |
2822 | KASSERT(retval != 0, |
2823 | ("%s: enqueue record = %d\n" , __func__, |
2824 | retval)); |
2825 | |
2826 | inm->in6m_state = MLD_LEAVING_MEMBER; |
2827 | inm->in6m_sctimer = 1; |
2828 | mtp->sct = 1; |
2829 | syncstates = 0; |
2830 | } |
2831 | } |
2832 | MLI_UNLOCK(mli); |
2833 | break; |
2834 | case MLD_LAZY_MEMBER: |
2835 | case MLD_SLEEPING_MEMBER: |
2836 | case MLD_AWAKENING_MEMBER: |
2837 | /* Our reports are suppressed; do nothing. */ |
2838 | break; |
2839 | } |
2840 | |
2841 | if (syncstates) { |
2842 | in6m_commit(inm); |
2843 | MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n" , __func__, |
2844 | ip6_sprintf(&inm->in6m_addr), |
2845 | if_name(inm->in6m_ifp))); |
2846 | inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; |
2847 | MLD_PRINTF(("%s: T1 now MCAST_UNDEFINED for 0x%llx/%s\n" , |
2848 | __func__, (uint64_t)VM_KERNEL_ADDRPERM(&inm->in6m_addr), |
2849 | if_name(inm->in6m_ifp))); |
2850 | } |
2851 | } |
2852 | |
2853 | /* |
2854 | * Enqueue an MLDv2 group record to the given output queue. |
2855 | * |
2856 | * If is_state_change is zero, a current-state record is appended. |
2857 | * If is_state_change is non-zero, a state-change report is appended. |
2858 | * |
2859 | * If is_group_query is non-zero, an mbuf packet chain is allocated. |
2860 | * If is_group_query is zero, and if there is a packet with free space |
2861 | * at the tail of the queue, it will be appended to providing there |
2862 | * is enough free space. |
2863 | * Otherwise a new mbuf packet chain is allocated. |
2864 | * |
2865 | * If is_source_query is non-zero, each source is checked to see if |
2866 | * it was recorded for a Group-Source query, and will be omitted if |
2867 | * it is not both in-mode and recorded. |
2868 | * |
2869 | * If use_block_allow is non-zero, state change reports for initial join |
2870 | * and final leave, on an inclusive mode group with a source list, will be |
2871 | * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively. |
2872 | * |
2873 | * The function will attempt to allocate leading space in the packet |
2874 | * for the IPv6+ICMP headers to be prepended without fragmenting the chain. |
2875 | * |
2876 | * If successful the size of all data appended to the queue is returned, |
2877 | * otherwise an error code less than zero is returned, or zero if |
2878 | * no record(s) were appended. |
2879 | */ |
2880 | static int |
2881 | mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, |
2882 | const int is_state_change, const int is_group_query, |
2883 | const int is_source_query, const int use_block_allow) |
2884 | { |
2885 | struct mldv2_record mr; |
2886 | struct mldv2_record *pmr; |
2887 | struct ifnet *ifp; |
2888 | struct ip6_msource *ims, *nims; |
2889 | mbuf_ref_t m0, m, md; |
2890 | int error, is_filter_list_change; |
2891 | int minrec0len, m0srcs, msrcs, nbytes, off; |
2892 | int record_has_sources; |
2893 | int now; |
2894 | uint8_t type; |
2895 | uint8_t mode; |
2896 | |
2897 | IN6M_LOCK_ASSERT_HELD(inm); |
2898 | MLI_LOCK_ASSERT_HELD(inm->in6m_mli); |
2899 | |
2900 | error = 0; |
2901 | ifp = inm->in6m_ifp; |
2902 | is_filter_list_change = 0; |
2903 | m = NULL; |
2904 | m0 = NULL; |
2905 | m0srcs = 0; |
2906 | msrcs = 0; |
2907 | nbytes = 0; |
2908 | nims = NULL; |
2909 | record_has_sources = 1; |
2910 | pmr = NULL; |
2911 | type = MLD_DO_NOTHING; |
2912 | mode = (uint8_t)inm->in6m_st[1].iss_fmode; |
2913 | |
2914 | /* |
2915 | * If we did not transition out of ASM mode during t0->t1, |
2916 | * and there are no source nodes to process, we can skip |
2917 | * the generation of source records. |
2918 | */ |
2919 | if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 && |
2920 | inm->in6m_nsrc == 0) { |
2921 | record_has_sources = 0; |
2922 | } |
2923 | |
2924 | if (is_state_change) { |
2925 | /* |
2926 | * Queue a state change record. |
2927 | * If the mode did not change, and there are non-ASM |
2928 | * listeners or source filters present, |
2929 | * we potentially need to issue two records for the group. |
2930 | * If there are ASM listeners, and there was no filter |
2931 | * mode transition of any kind, do nothing. |
2932 | * |
2933 | * If we are transitioning to MCAST_UNDEFINED, we need |
2934 | * not send any sources. A transition to/from this state is |
2935 | * considered inclusive with some special treatment. |
2936 | * |
2937 | * If we are rewriting initial joins/leaves to use |
2938 | * ALLOW/BLOCK, and the group's membership is inclusive, |
2939 | * we need to send sources in all cases. |
2940 | */ |
2941 | if (mode != inm->in6m_st[0].iss_fmode) { |
2942 | if (mode == MCAST_EXCLUDE) { |
2943 | MLD_PRINTF(("%s: change to EXCLUDE\n" , |
2944 | __func__)); |
2945 | type = MLD_CHANGE_TO_EXCLUDE_MODE; |
2946 | } else { |
2947 | MLD_PRINTF(("%s: change to INCLUDE\n" , |
2948 | __func__)); |
2949 | if (use_block_allow) { |
2950 | /* |
2951 | * XXX |
2952 | * Here we're interested in state |
2953 | * edges either direction between |
2954 | * MCAST_UNDEFINED and MCAST_INCLUDE. |
2955 | * Perhaps we should just check |
2956 | * the group state, rather than |
2957 | * the filter mode. |
2958 | */ |
2959 | if (mode == MCAST_UNDEFINED) { |
2960 | type = MLD_BLOCK_OLD_SOURCES; |
2961 | } else { |
2962 | type = MLD_ALLOW_NEW_SOURCES; |
2963 | } |
2964 | } else { |
2965 | type = MLD_CHANGE_TO_INCLUDE_MODE; |
2966 | if (mode == MCAST_UNDEFINED) { |
2967 | record_has_sources = 0; |
2968 | } |
2969 | } |
2970 | } |
2971 | } else { |
2972 | if (record_has_sources) { |
2973 | is_filter_list_change = 1; |
2974 | } else { |
2975 | type = MLD_DO_NOTHING; |
2976 | } |
2977 | } |
2978 | } else { |
2979 | /* |
2980 | * Queue a current state record. |
2981 | */ |
2982 | if (mode == MCAST_EXCLUDE) { |
2983 | type = MLD_MODE_IS_EXCLUDE; |
2984 | } else if (mode == MCAST_INCLUDE) { |
2985 | type = MLD_MODE_IS_INCLUDE; |
2986 | VERIFY(inm->in6m_st[1].iss_asm == 0); |
2987 | } |
2988 | } |
2989 | |
2990 | /* |
2991 | * Generate the filter list changes using a separate function. |
2992 | */ |
2993 | if (is_filter_list_change) { |
2994 | return mld_v2_enqueue_filter_change(ifq, inm); |
2995 | } |
2996 | |
2997 | if (type == MLD_DO_NOTHING) { |
2998 | MLD_PRINTF(("%s: nothing to do for %s/%s\n" , |
2999 | __func__, ip6_sprintf(&inm->in6m_addr), |
3000 | if_name(inm->in6m_ifp))); |
3001 | return 0; |
3002 | } |
3003 | |
3004 | /* |
3005 | * If any sources are present, we must be able to fit at least |
3006 | * one in the trailing space of the tail packet's mbuf, |
3007 | * ideally more. |
3008 | */ |
3009 | minrec0len = sizeof(struct mldv2_record); |
3010 | if (record_has_sources) { |
3011 | minrec0len += sizeof(struct in6_addr); |
3012 | } |
3013 | MLD_PRINTF(("%s: queueing %s for %s/%s\n" , __func__, |
3014 | mld_rec_type_to_str(type), |
3015 | ip6_sprintf(&inm->in6m_addr), |
3016 | if_name(inm->in6m_ifp))); |
3017 | |
3018 | /* |
3019 | * Check if we have a packet in the tail of the queue for this |
3020 | * group into which the first group record for this group will fit. |
3021 | * Otherwise allocate a new packet. |
3022 | * Always allocate leading space for IP6+RA+ICMPV6+REPORT. |
3023 | * Note: Group records for G/GSR query responses MUST be sent |
3024 | * in their own packet. |
3025 | */ |
3026 | m0 = ifq->ifq_tail; |
3027 | if (!is_group_query && |
3028 | m0 != NULL && |
3029 | (m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) && |
3030 | (m0->m_pkthdr.len + minrec0len) < |
3031 | (ifp->if_mtu - MLD_MTUSPACE)) { |
3032 | m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - |
3033 | sizeof(struct mldv2_record)) / |
3034 | sizeof(struct in6_addr); |
3035 | m = m0; |
3036 | MLD_PRINTF(("%s: use existing packet\n" , __func__)); |
3037 | } else { |
3038 | if (IF_QFULL(ifq)) { |
3039 | os_log_error(OS_LOG_DEFAULT, |
3040 | "%s: outbound queue full\n" , __func__); |
3041 | return -ENOMEM; |
3042 | } |
3043 | m = NULL; |
3044 | m0srcs = (ifp->if_mtu - MLD_MTUSPACE - |
3045 | sizeof(struct mldv2_record)) / sizeof(struct in6_addr); |
3046 | if (!is_state_change && !is_group_query) { |
3047 | m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); |
3048 | } |
3049 | if (m == NULL) { |
3050 | m = m_gethdr(M_DONTWAIT, MT_DATA); |
3051 | } |
3052 | if (m == NULL) { |
3053 | return -ENOMEM; |
3054 | } |
3055 | |
3056 | mld_save_context(m, ifp); |
3057 | |
3058 | MLD_PRINTF(("%s: allocated first packet\n" , __func__)); |
3059 | } |
3060 | |
3061 | /* |
3062 | * Append group record. |
3063 | * If we have sources, we don't know how many yet. |
3064 | */ |
3065 | mr.mr_type = type; |
3066 | mr.mr_datalen = 0; |
3067 | mr.mr_numsrc = 0; |
3068 | mr.mr_addr = inm->in6m_addr; |
3069 | in6_clearscope(&mr.mr_addr); |
3070 | if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) { |
3071 | if (m != m0) { |
3072 | m_freem(m); |
3073 | } |
3074 | os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed.\n" , __func__); |
3075 | return -ENOMEM; |
3076 | } |
3077 | nbytes += sizeof(struct mldv2_record); |
3078 | |
3079 | /* |
3080 | * Append as many sources as will fit in the first packet. |
3081 | * If we are appending to a new packet, the chain allocation |
3082 | * may potentially use clusters; use m_getptr() in this case. |
3083 | * If we are appending to an existing packet, we need to obtain |
3084 | * a pointer to the group record after m_append(), in case a new |
3085 | * mbuf was allocated. |
3086 | * |
3087 | * Only append sources which are in-mode at t1. If we are |
3088 | * transitioning to MCAST_UNDEFINED state on the group, and |
3089 | * use_block_allow is zero, do not include source entries. |
3090 | * Otherwise, we need to include this source in the report. |
3091 | * |
3092 | * Only report recorded sources in our filter set when responding |
3093 | * to a group-source query. |
3094 | */ |
3095 | if (record_has_sources) { |
3096 | if (m == m0) { |
3097 | md = m_last(m); |
3098 | pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + |
3099 | md->m_len - nbytes); |
3100 | } else { |
3101 | md = m_getptr(m, 0, &off); |
3102 | pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + |
3103 | off); |
3104 | } |
3105 | msrcs = 0; |
3106 | RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, |
3107 | nims) { |
3108 | MLD_PRINTF(("%s: visit node %s\n" , __func__, |
3109 | ip6_sprintf(&ims->im6s_addr))); |
3110 | now = im6s_get_mode(inm, ims, 1); |
3111 | MLD_PRINTF(("%s: node is %d\n" , __func__, now)); |
3112 | if ((now != mode) || |
3113 | (now == mode && |
3114 | (!use_block_allow && mode == MCAST_UNDEFINED))) { |
3115 | MLD_PRINTF(("%s: skip node\n" , __func__)); |
3116 | continue; |
3117 | } |
3118 | if (is_source_query && ims->im6s_stp == 0) { |
3119 | MLD_PRINTF(("%s: skip unrecorded node\n" , |
3120 | __func__)); |
3121 | continue; |
3122 | } |
3123 | MLD_PRINTF(("%s: append node\n" , __func__)); |
3124 | if (!m_append(m, sizeof(struct in6_addr), |
3125 | (void *)&ims->im6s_addr)) { |
3126 | if (m != m0) { |
3127 | m_freem(m); |
3128 | } |
3129 | os_log_error(OS_LOG_DEFAULT, |
3130 | "%s: m_append() failed\n" , |
3131 | __func__); |
3132 | return -ENOMEM; |
3133 | } |
3134 | nbytes += sizeof(struct in6_addr); |
3135 | ++msrcs; |
3136 | if (msrcs == m0srcs) { |
3137 | break; |
3138 | } |
3139 | } |
3140 | MLD_PRINTF(("%s: msrcs is %d this packet\n" , __func__, |
3141 | msrcs)); |
3142 | pmr->mr_numsrc = htons((uint16_t)msrcs); |
3143 | nbytes += (msrcs * sizeof(struct in6_addr)); |
3144 | } |
3145 | |
3146 | if (is_source_query && msrcs == 0) { |
3147 | MLD_PRINTF(("%s: no recorded sources to report\n" , __func__)); |
3148 | if (m != m0) { |
3149 | m_freem(m); |
3150 | } |
3151 | return 0; |
3152 | } |
3153 | |
3154 | /* |
3155 | * We are good to go with first packet. |
3156 | */ |
3157 | if (m != m0) { |
3158 | MLD_PRINTF(("%s: enqueueing first packet\n" , __func__)); |
3159 | m->m_pkthdr.vt_nrecs = 1; |
3160 | IF_ENQUEUE(ifq, m); |
3161 | } else { |
3162 | m->m_pkthdr.vt_nrecs++; |
3163 | } |
3164 | /* |
3165 | * No further work needed if no source list in packet(s). |
3166 | */ |
3167 | if (!record_has_sources) { |
3168 | return nbytes; |
3169 | } |
3170 | |
3171 | /* |
3172 | * Whilst sources remain to be announced, we need to allocate |
3173 | * a new packet and fill out as many sources as will fit. |
3174 | * Always try for a cluster first. |
3175 | */ |
3176 | while (nims != NULL) { |
3177 | if (IF_QFULL(ifq)) { |
3178 | os_log_error(OS_LOG_DEFAULT, "%s: outbound queue full\n" , __func__); |
3179 | return -ENOMEM; |
3180 | } |
3181 | m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); |
3182 | if (m == NULL) { |
3183 | m = m_gethdr(M_DONTWAIT, MT_DATA); |
3184 | } |
3185 | if (m == NULL) { |
3186 | return -ENOMEM; |
3187 | } |
3188 | mld_save_context(m, ifp); |
3189 | md = m_getptr(m, 0, &off); |
3190 | pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off); |
3191 | MLD_PRINTF(("%s: allocated next packet\n" , __func__)); |
3192 | |
3193 | if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) { |
3194 | if (m != m0) { |
3195 | m_freem(m); |
3196 | } |
3197 | os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed.\n" , __func__); |
3198 | return -ENOMEM; |
3199 | } |
3200 | m->m_pkthdr.vt_nrecs = 1; |
3201 | nbytes += sizeof(struct mldv2_record); |
3202 | |
3203 | m0srcs = (ifp->if_mtu - MLD_MTUSPACE - |
3204 | sizeof(struct mldv2_record)) / sizeof(struct in6_addr); |
3205 | |
3206 | msrcs = 0; |
3207 | RB_FOREACH_FROM(ims, ip6_msource_tree, nims) { |
3208 | MLD_PRINTF(("%s: visit node %s\n" , |
3209 | __func__, ip6_sprintf(&ims->im6s_addr))); |
3210 | now = im6s_get_mode(inm, ims, 1); |
3211 | if ((now != mode) || |
3212 | (now == mode && |
3213 | (!use_block_allow && mode == MCAST_UNDEFINED))) { |
3214 | MLD_PRINTF(("%s: skip node\n" , __func__)); |
3215 | continue; |
3216 | } |
3217 | if (is_source_query && ims->im6s_stp == 0) { |
3218 | MLD_PRINTF(("%s: skip unrecorded node\n" , |
3219 | __func__)); |
3220 | continue; |
3221 | } |
3222 | MLD_PRINTF(("%s: append node\n" , __func__)); |
3223 | if (!m_append(m, sizeof(struct in6_addr), |
3224 | (void *)&ims->im6s_addr)) { |
3225 | if (m != m0) { |
3226 | m_freem(m); |
3227 | } |
3228 | os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed\n" , |
3229 | __func__); |
3230 | return -ENOMEM; |
3231 | } |
3232 | ++msrcs; |
3233 | if (msrcs == m0srcs) { |
3234 | break; |
3235 | } |
3236 | } |
3237 | pmr->mr_numsrc = htons((uint16_t)msrcs); |
3238 | nbytes += (msrcs * sizeof(struct in6_addr)); |
3239 | |
3240 | MLD_PRINTF(("%s: enqueueing next packet\n" , __func__)); |
3241 | IF_ENQUEUE(ifq, m); |
3242 | } |
3243 | |
3244 | return nbytes; |
3245 | } |
3246 | |
3247 | /* |
3248 | * Type used to mark record pass completion. |
3249 | * We exploit the fact we can cast to this easily from the |
3250 | * current filter modes on each ip_msource node. |
3251 | */ |
3252 | typedef enum { |
3253 | REC_NONE = 0x00, /* MCAST_UNDEFINED */ |
3254 | REC_ALLOW = 0x01, /* MCAST_INCLUDE */ |
3255 | REC_BLOCK = 0x02, /* MCAST_EXCLUDE */ |
3256 | REC_FULL = REC_ALLOW | REC_BLOCK |
3257 | } rectype_t; |
3258 | |
3259 | /* |
3260 | * Enqueue an MLDv2 filter list change to the given output queue. |
3261 | * |
3262 | * Source list filter state is held in an RB-tree. When the filter list |
3263 | * for a group is changed without changing its mode, we need to compute |
3264 | * the deltas between T0 and T1 for each source in the filter set, |
3265 | * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records. |
3266 | * |
3267 | * As we may potentially queue two record types, and the entire R-B tree |
3268 | * needs to be walked at once, we break this out into its own function |
3269 | * so we can generate a tightly packed queue of packets. |
3270 | * |
3271 | * XXX This could be written to only use one tree walk, although that makes |
3272 | * serializing into the mbuf chains a bit harder. For now we do two walks |
3273 | * which makes things easier on us, and it may or may not be harder on |
3274 | * the L2 cache. |
3275 | * |
3276 | * If successful the size of all data appended to the queue is returned, |
3277 | * otherwise an error code less than zero is returned, or zero if |
3278 | * no record(s) were appended. |
3279 | */ |
3280 | static int |
3281 | mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) |
3282 | { |
3283 | static const int MINRECLEN = |
3284 | sizeof(struct mldv2_record) + sizeof(struct in6_addr); |
3285 | struct ifnet *ifp; |
3286 | struct mldv2_record mr; |
3287 | struct mldv2_record *pmr; |
3288 | struct ip6_msource *ims, *nims; |
3289 | mbuf_ref_t m, m0, md; |
3290 | int m0srcs, nbytes, npbytes, off, rsrcs, schanged; |
3291 | int nallow, nblock; |
3292 | uint8_t mode, now, then; |
3293 | rectype_t crt, drt, nrt; |
3294 | |
3295 | IN6M_LOCK_ASSERT_HELD(inm); |
3296 | |
3297 | if (inm->in6m_nsrc == 0 || |
3298 | (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) { |
3299 | return 0; |
3300 | } |
3301 | |
3302 | ifp = inm->in6m_ifp; /* interface */ |
3303 | mode = (uint8_t)inm->in6m_st[1].iss_fmode; /* filter mode at t1 */ |
3304 | crt = REC_NONE; /* current group record type */ |
3305 | drt = REC_NONE; /* mask of completed group record types */ |
3306 | nrt = REC_NONE; /* record type for current node */ |
3307 | m0srcs = 0; /* # source which will fit in current mbuf chain */ |
3308 | npbytes = 0; /* # of bytes appended this packet */ |
3309 | nbytes = 0; /* # of bytes appended to group's state-change queue */ |
3310 | rsrcs = 0; /* # sources encoded in current record */ |
3311 | schanged = 0; /* # nodes encoded in overall filter change */ |
3312 | nallow = 0; /* # of source entries in ALLOW_NEW */ |
3313 | nblock = 0; /* # of source entries in BLOCK_OLD */ |
3314 | nims = NULL; /* next tree node pointer */ |
3315 | |
3316 | /* |
3317 | * For each possible filter record mode. |
3318 | * The first kind of source we encounter tells us which |
3319 | * is the first kind of record we start appending. |
3320 | * If a node transitioned to UNDEFINED at t1, its mode is treated |
3321 | * as the inverse of the group's filter mode. |
3322 | */ |
3323 | while (drt != REC_FULL) { |
3324 | do { |
3325 | m0 = ifq->ifq_tail; |
3326 | if (m0 != NULL && |
3327 | (m0->m_pkthdr.vt_nrecs + 1 <= |
3328 | MLD_V2_REPORT_MAXRECS) && |
3329 | (m0->m_pkthdr.len + MINRECLEN) < |
3330 | (ifp->if_mtu - MLD_MTUSPACE)) { |
3331 | m = m0; |
3332 | m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - |
3333 | sizeof(struct mldv2_record)) / |
3334 | sizeof(struct in6_addr); |
3335 | MLD_PRINTF(("%s: use previous packet\n" , |
3336 | __func__)); |
3337 | } else { |
3338 | m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); |
3339 | if (m == NULL) { |
3340 | m = m_gethdr(M_DONTWAIT, MT_DATA); |
3341 | } |
3342 | if (m == NULL) { |
3343 | os_log_error(OS_LOG_DEFAULT, "%s: m_get*() failed\n" , |
3344 | __func__); |
3345 | return -ENOMEM; |
3346 | } |
3347 | m->m_pkthdr.vt_nrecs = 0; |
3348 | mld_save_context(m, ifp); |
3349 | m0srcs = (ifp->if_mtu - MLD_MTUSPACE - |
3350 | sizeof(struct mldv2_record)) / |
3351 | sizeof(struct in6_addr); |
3352 | npbytes = 0; |
3353 | MLD_PRINTF(("%s: allocated new packet\n" , |
3354 | __func__)); |
3355 | } |
3356 | /* |
3357 | * Append the MLD group record header to the |
3358 | * current packet's data area. |
3359 | * Recalculate pointer to free space for next |
3360 | * group record, in case m_append() allocated |
3361 | * a new mbuf or cluster. |
3362 | */ |
3363 | memset(s: &mr, c: 0, n: sizeof(mr)); |
3364 | mr.mr_addr = inm->in6m_addr; |
3365 | in6_clearscope(&mr.mr_addr); |
3366 | if (!m_append(m, sizeof(mr), (void *)&mr)) { |
3367 | if (m != m0) { |
3368 | m_freem(m); |
3369 | } |
3370 | os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed\n" , |
3371 | __func__); |
3372 | return -ENOMEM; |
3373 | } |
3374 | npbytes += sizeof(struct mldv2_record); |
3375 | if (m != m0) { |
3376 | /* new packet; offset in chain */ |
3377 | md = m_getptr(m, npbytes - |
3378 | sizeof(struct mldv2_record), &off); |
3379 | pmr = (struct mldv2_record *)(mtod(md, |
3380 | uint8_t *) + off); |
3381 | } else { |
3382 | /* current packet; offset from last append */ |
3383 | md = m_last(m); |
3384 | pmr = (struct mldv2_record *)(mtod(md, |
3385 | uint8_t *) + md->m_len - |
3386 | sizeof(struct mldv2_record)); |
3387 | } |
3388 | /* |
3389 | * Begin walking the tree for this record type |
3390 | * pass, or continue from where we left off |
3391 | * previously if we had to allocate a new packet. |
3392 | * Only report deltas in-mode at t1. |
3393 | * We need not report included sources as allowed |
3394 | * if we are in inclusive mode on the group, |
3395 | * however the converse is not true. |
3396 | */ |
3397 | rsrcs = 0; |
3398 | if (nims == NULL) { |
3399 | nims = RB_MIN(ip6_msource_tree, |
3400 | &inm->in6m_srcs); |
3401 | } |
3402 | RB_FOREACH_FROM(ims, ip6_msource_tree, nims) { |
3403 | MLD_PRINTF(("%s: visit node %s\n" , __func__, |
3404 | ip6_sprintf(&ims->im6s_addr))); |
3405 | now = im6s_get_mode(inm, ims, 1); |
3406 | then = im6s_get_mode(inm, ims, 0); |
3407 | MLD_PRINTF(("%s: mode: t0 %d, t1 %d\n" , |
3408 | __func__, then, now)); |
3409 | if (now == then) { |
3410 | MLD_PRINTF(("%s: skip unchanged\n" , |
3411 | __func__)); |
3412 | continue; |
3413 | } |
3414 | if (mode == MCAST_EXCLUDE && |
3415 | now == MCAST_INCLUDE) { |
3416 | MLD_PRINTF(("%s: skip IN src on EX " |
3417 | "group\n" , __func__)); |
3418 | continue; |
3419 | } |
3420 | nrt = (rectype_t)now; |
3421 | if (nrt == REC_NONE) { |
3422 | nrt = (rectype_t)(~mode & REC_FULL); |
3423 | } |
3424 | if (schanged++ == 0) { |
3425 | crt = nrt; |
3426 | } else if (crt != nrt) { |
3427 | continue; |
3428 | } |
3429 | if (!m_append(m, sizeof(struct in6_addr), |
3430 | (void *)&ims->im6s_addr)) { |
3431 | if (m != m0) { |
3432 | m_freem(m); |
3433 | } |
3434 | os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed\n" , |
3435 | __func__); |
3436 | return -ENOMEM; |
3437 | } |
3438 | nallow += !!(crt == REC_ALLOW); |
3439 | nblock += !!(crt == REC_BLOCK); |
3440 | if (++rsrcs == m0srcs) { |
3441 | break; |
3442 | } |
3443 | } |
3444 | /* |
3445 | * If we did not append any tree nodes on this |
3446 | * pass, back out of allocations. |
3447 | */ |
3448 | if (rsrcs == 0) { |
3449 | npbytes -= sizeof(struct mldv2_record); |
3450 | if (m != m0) { |
3451 | MLD_PRINTF(("%s: m_free(m)\n" , |
3452 | __func__)); |
3453 | m_freem(m); |
3454 | } else { |
3455 | MLD_PRINTF(("%s: m_adj(m, -mr)\n" , |
3456 | __func__)); |
3457 | m_adj(m, -((int)sizeof( |
3458 | struct mldv2_record))); |
3459 | } |
3460 | continue; |
3461 | } |
3462 | npbytes += (rsrcs * sizeof(struct in6_addr)); |
3463 | if (crt == REC_ALLOW) { |
3464 | pmr->mr_type = MLD_ALLOW_NEW_SOURCES; |
3465 | } else if (crt == REC_BLOCK) { |
3466 | pmr->mr_type = MLD_BLOCK_OLD_SOURCES; |
3467 | } |
3468 | pmr->mr_numsrc = htons((uint16_t)rsrcs); |
3469 | /* |
3470 | * Count the new group record, and enqueue this |
3471 | * packet if it wasn't already queued. |
3472 | */ |
3473 | m->m_pkthdr.vt_nrecs++; |
3474 | if (m != m0) { |
3475 | IF_ENQUEUE(ifq, m); |
3476 | } |
3477 | nbytes += npbytes; |
3478 | } while (nims != NULL); |
3479 | drt |= crt; |
3480 | crt = (~crt & REC_FULL); |
3481 | } |
3482 | |
3483 | MLD_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n" , __func__, |
3484 | nallow, nblock)); |
3485 | |
3486 | return nbytes; |
3487 | } |
3488 | |
3489 | static int |
3490 | mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) |
3491 | { |
3492 | struct ifqueue *gq; |
3493 | mbuf_ref_t m; /* pending state-change */ |
3494 | mbuf_ref_t m0; /* copy of pending state-change */ |
3495 | mbuf_ref_t mt; /* last state-change in packet */ |
3496 | mbuf_ref_t n; |
3497 | int docopy, domerge; |
3498 | u_int recslen; |
3499 | |
3500 | IN6M_LOCK_ASSERT_HELD(inm); |
3501 | |
3502 | docopy = 0; |
3503 | domerge = 0; |
3504 | recslen = 0; |
3505 | |
3506 | /* |
3507 | * If there are further pending retransmissions, make a writable |
3508 | * copy of each queued state-change message before merging. |
3509 | */ |
3510 | if (inm->in6m_scrv > 0) { |
3511 | docopy = 1; |
3512 | } |
3513 | |
3514 | gq = &inm->in6m_scq; |
3515 | #ifdef MLD_DEBUG |
3516 | if (gq->ifq_head == NULL) { |
3517 | MLD_PRINTF(("%s: WARNING: queue for inm 0x%llx is empty\n" , |
3518 | __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm))); |
3519 | } |
3520 | #endif |
3521 | |
3522 | /* |
3523 | * Use IF_REMQUEUE() instead of IF_DEQUEUE() below, since the |
3524 | * packet might not always be at the head of the ifqueue. |
3525 | */ |
3526 | m = gq->ifq_head; |
3527 | while (m != NULL) { |
3528 | /* |
3529 | * Only merge the report into the current packet if |
3530 | * there is sufficient space to do so; an MLDv2 report |
3531 | * packet may only contain 65,535 group records. |
3532 | * Always use a simple mbuf chain concatentation to do this, |
3533 | * as large state changes for single groups may have |
3534 | * allocated clusters. |
3535 | */ |
3536 | domerge = 0; |
3537 | mt = ifscq->ifq_tail; |
3538 | if (mt != NULL) { |
3539 | recslen = m_length(m); |
3540 | |
3541 | if ((mt->m_pkthdr.vt_nrecs + |
3542 | m->m_pkthdr.vt_nrecs <= |
3543 | MLD_V2_REPORT_MAXRECS) && |
3544 | (mt->m_pkthdr.len + recslen <= |
3545 | (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) { |
3546 | domerge = 1; |
3547 | } |
3548 | } |
3549 | |
3550 | if (!domerge && IF_QFULL(gq)) { |
3551 | os_log_info(OS_LOG_DEFAULT, "%s: outbound queue full" , |
3552 | __func__); |
3553 | n = m->m_nextpkt; |
3554 | if (!docopy) { |
3555 | IF_REMQUEUE(gq, m); |
3556 | m_freem(m); |
3557 | } |
3558 | m = n; |
3559 | continue; |
3560 | } |
3561 | |
3562 | if (!docopy) { |
3563 | MLD_PRINTF(("%s: dequeueing 0x%llx\n" , __func__, |
3564 | (uint64_t)VM_KERNEL_ADDRPERM(m))); |
3565 | n = m->m_nextpkt; |
3566 | IF_REMQUEUE(gq, m); |
3567 | m0 = m; |
3568 | m = n; |
3569 | } else { |
3570 | MLD_PRINTF(("%s: copying 0x%llx\n" , __func__, |
3571 | (uint64_t)VM_KERNEL_ADDRPERM(m))); |
3572 | m0 = m_dup(m, M_NOWAIT); |
3573 | if (m0 == NULL) { |
3574 | return ENOMEM; |
3575 | } |
3576 | m0->m_nextpkt = NULL; |
3577 | m = m->m_nextpkt; |
3578 | } |
3579 | |
3580 | if (!domerge) { |
3581 | MLD_PRINTF(("%s: queueing 0x%llx to ifscq 0x%llx)\n" , |
3582 | __func__, (uint64_t)VM_KERNEL_ADDRPERM(m0), |
3583 | (uint64_t)VM_KERNEL_ADDRPERM(ifscq))); |
3584 | IF_ENQUEUE(ifscq, m0); |
3585 | } else { |
3586 | struct mbuf *mtl; /* last mbuf of packet mt */ |
3587 | |
3588 | MLD_PRINTF(("%s: merging 0x%llx with ifscq tail " |
3589 | "0x%llx)\n" , __func__, |
3590 | (uint64_t)VM_KERNEL_ADDRPERM(m0), |
3591 | (uint64_t)VM_KERNEL_ADDRPERM(mt))); |
3592 | |
3593 | mtl = m_last(mt); |
3594 | m0->m_flags &= ~M_PKTHDR; |
3595 | mt->m_pkthdr.len += recslen; |
3596 | mt->m_pkthdr.vt_nrecs += |
3597 | m0->m_pkthdr.vt_nrecs; |
3598 | |
3599 | mtl->m_next = m0; |
3600 | } |
3601 | } |
3602 | |
3603 | return 0; |
3604 | } |
3605 | |
3606 | /* |
3607 | * Respond to a pending MLDv2 General Query. |
3608 | */ |
3609 | static uint32_t |
3610 | mld_v2_dispatch_general_query(struct mld_ifinfo *mli) |
3611 | { |
3612 | struct ifnet *ifp; |
3613 | struct in6_multi *inm; |
3614 | struct in6_multistep step; |
3615 | int retval; |
3616 | |
3617 | MLI_LOCK_ASSERT_HELD(mli); |
3618 | |
3619 | VERIFY(mli->mli_version == MLD_VERSION_2); |
3620 | |
3621 | ifp = mli->mli_ifp; |
3622 | MLI_UNLOCK(mli); |
3623 | |
3624 | in6_multihead_lock_shared(); |
3625 | IN6_FIRST_MULTI(step, inm); |
3626 | while (inm != NULL) { |
3627 | IN6M_LOCK(inm); |
3628 | if (inm->in6m_ifp != ifp) { |
3629 | goto next; |
3630 | } |
3631 | |
3632 | switch (inm->in6m_state) { |
3633 | case MLD_NOT_MEMBER: |
3634 | case MLD_SILENT_MEMBER: |
3635 | break; |
3636 | case MLD_REPORTING_MEMBER: |
3637 | case MLD_IDLE_MEMBER: |
3638 | case MLD_LAZY_MEMBER: |
3639 | case MLD_SLEEPING_MEMBER: |
3640 | case MLD_AWAKENING_MEMBER: |
3641 | inm->in6m_state = MLD_REPORTING_MEMBER; |
3642 | MLI_LOCK(mli); |
3643 | retval = mld_v2_enqueue_group_record(ifq: &mli->mli_gq, |
3644 | inm, is_state_change: 0, is_group_query: 0, is_source_query: 0, use_block_allow: 0); |
3645 | MLI_UNLOCK(mli); |
3646 | MLD_PRINTF(("%s: enqueue record = %d\n" , |
3647 | __func__, retval)); |
3648 | break; |
3649 | case MLD_G_QUERY_PENDING_MEMBER: |
3650 | case MLD_SG_QUERY_PENDING_MEMBER: |
3651 | case MLD_LEAVING_MEMBER: |
3652 | break; |
3653 | } |
3654 | next: |
3655 | IN6M_UNLOCK(inm); |
3656 | IN6_NEXT_MULTI(step, inm); |
3657 | } |
3658 | in6_multihead_lock_done(); |
3659 | |
3660 | MLI_LOCK(mli); |
3661 | mld_dispatch_queue_locked(mli, ifq: &mli->mli_gq, MLD_MAX_RESPONSE_BURST); |
3662 | MLI_LOCK_ASSERT_HELD(mli); |
3663 | |
3664 | /* |
3665 | * Slew transmission of bursts over 1 second intervals. |
3666 | */ |
3667 | if (mli->mli_gq.ifq_head != NULL) { |
3668 | mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY( |
3669 | MLD_RESPONSE_BURST_INTERVAL); |
3670 | } |
3671 | |
3672 | return mli->mli_v2_timer; |
3673 | } |
3674 | |
3675 | /* |
3676 | * Transmit the next pending message in the output queue. |
3677 | * |
3678 | * Must not be called with in6m_lockm or mli_lock held. |
3679 | */ |
3680 | __attribute__((noinline)) |
3681 | static void |
3682 | mld_dispatch_packet(struct mbuf *m) |
3683 | { |
3684 | struct ip6_moptions *im6o; |
3685 | struct ifnet *ifp; |
3686 | struct ifnet *__single oifp = NULL; |
3687 | mbuf_ref_t m0, md; |
3688 | struct ip6_hdr *ip6; |
3689 | struct icmp6_hdr *icmp6; |
3690 | int error; |
3691 | int off; |
3692 | int type; |
3693 | |
3694 | MLD_PRINTF(("%s: transmit 0x%llx\n" , __func__, |
3695 | (uint64_t)VM_KERNEL_ADDRPERM(m))); |
3696 | |
3697 | /* |
3698 | * Check if the ifnet is still attached. |
3699 | */ |
3700 | ifp = mld_restore_context(m); |
3701 | if (ifp == NULL || !ifnet_is_attached(ifp, refio: 0)) { |
3702 | os_log_error(OS_LOG_DEFAULT, "%s: dropped 0x%llx as interface went away\n" , |
3703 | __func__, (uint64_t)VM_KERNEL_ADDRPERM(m)); |
3704 | m_freem(m); |
3705 | ip6stat.ip6s_noroute++; |
3706 | return; |
3707 | } |
3708 | im6o = ip6_allocmoptions(Z_WAITOK); |
3709 | if (im6o == NULL) { |
3710 | m_freem(m); |
3711 | return; |
3712 | } |
3713 | |
3714 | im6o->im6o_multicast_hlim = 1; |
3715 | im6o->im6o_multicast_loop = 0; |
3716 | im6o->im6o_multicast_ifp = ifp; |
3717 | if (m->m_flags & M_MLDV1) { |
3718 | m0 = m; |
3719 | } else { |
3720 | m0 = mld_v2_encap_report(ifp, m); |
3721 | if (m0 == NULL) { |
3722 | os_log_error(OS_LOG_DEFAULT, "%s: dropped 0x%llx\n" , __func__, |
3723 | (uint64_t)VM_KERNEL_ADDRPERM(m)); |
3724 | /* |
3725 | * mld_v2_encap_report() has already freed our mbuf. |
3726 | */ |
3727 | IM6O_REMREF(im6o); |
3728 | ip6stat.ip6s_odropped++; |
3729 | return; |
3730 | } |
3731 | } |
3732 | mld_scrub_context(m: m0); |
3733 | m->m_flags &= ~(M_PROTOFLAGS); |
3734 | m0->m_pkthdr.rcvif = lo_ifp; |
3735 | |
3736 | ip6 = mtod(m0, struct ip6_hdr *); |
3737 | (void)in6_setscope(&ip6->ip6_dst, ifp, NULL); |
3738 | ip6_output_setdstifscope(m0, ifp->if_index, NULL); |
3739 | /* |
3740 | * Retrieve the ICMPv6 type before handoff to ip6_output(), |
3741 | * so we can bump the stats. |
3742 | */ |
3743 | md = m_getptr(m0, sizeof(struct ip6_hdr), &off); |
3744 | icmp6 = (struct icmp6_hdr *)(mtod(md, uint8_t *) + off); |
3745 | type = icmp6->icmp6_type; |
3746 | |
3747 | if (ifp->if_eflags & IFEF_TXSTART) { |
3748 | /* |
3749 | * Use control service class if the outgoing |
3750 | * interface supports transmit-start model. |
3751 | */ |
3752 | (void) m_set_service_class(m0, MBUF_SC_CTL); |
3753 | } |
3754 | |
3755 | error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, im6o, |
3756 | &oifp, NULL); |
3757 | |
3758 | IM6O_REMREF(im6o); |
3759 | |
3760 | if (error) { |
3761 | os_log_error(OS_LOG_DEFAULT, "%s: ip6_output(0x%llx) = %d\n" , __func__, |
3762 | (uint64_t)VM_KERNEL_ADDRPERM(m0), error); |
3763 | if (oifp != NULL) { |
3764 | ifnet_release(interface: oifp); |
3765 | } |
3766 | return; |
3767 | } |
3768 | |
3769 | icmp6stat.icp6s_outhist[type]++; |
3770 | if (oifp != NULL) { |
3771 | icmp6_ifstat_inc(oifp, ifs6_out_msg); |
3772 | switch (type) { |
3773 | case MLD_LISTENER_REPORT: |
3774 | case MLDV2_LISTENER_REPORT: |
3775 | icmp6_ifstat_inc(oifp, ifs6_out_mldreport); |
3776 | break; |
3777 | case MLD_LISTENER_DONE: |
3778 | icmp6_ifstat_inc(oifp, ifs6_out_mlddone); |
3779 | break; |
3780 | } |
3781 | ifnet_release(interface: oifp); |
3782 | } |
3783 | } |
3784 | |
3785 | /* |
3786 | * Encapsulate an MLDv2 report. |
3787 | * |
3788 | * KAME IPv6 requires that hop-by-hop options be passed separately, |
3789 | * and that the IPv6 header be prepended in a separate mbuf. |
3790 | * |
3791 | * Returns a pointer to the new mbuf chain head, or NULL if the |
3792 | * allocation failed. |
3793 | */ |
3794 | static struct mbuf * |
3795 | mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m) |
3796 | { |
3797 | struct mbuf *mh; |
3798 | struct mldv2_report *mld; |
3799 | struct ip6_hdr *ip6; |
3800 | struct in6_ifaddr *ia; |
3801 | int mldreclen; |
3802 | |
3803 | VERIFY(m->m_flags & M_PKTHDR); |
3804 | |
3805 | /* |
3806 | * RFC3590: OK to send as :: or tentative during DAD. |
3807 | */ |
3808 | ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST); |
3809 | if (ia == NULL) { |
3810 | MLD_PRINTF(("%s: warning: ia is NULL\n" , __func__)); |
3811 | } |
3812 | |
3813 | MGETHDR(mh, M_DONTWAIT, MT_HEADER); |
3814 | if (mh == NULL) { |
3815 | if (ia != NULL) { |
3816 | ifa_remref(ifa: &ia->ia_ifa); |
3817 | } |
3818 | m_freem(m); |
3819 | return NULL; |
3820 | } |
3821 | MH_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report)); |
3822 | |
3823 | mldreclen = m_length(m); |
3824 | MLD_PRINTF(("%s: mldreclen is %d\n" , __func__, mldreclen)); |
3825 | |
3826 | mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report); |
3827 | mh->m_pkthdr.len = sizeof(struct ip6_hdr) + |
3828 | sizeof(struct mldv2_report) + mldreclen; |
3829 | |
3830 | ip6 = mtod(mh, struct ip6_hdr *); |
3831 | ip6->ip6_flow = 0; |
3832 | ip6->ip6_vfc &= ~IPV6_VERSION_MASK; |
3833 | ip6->ip6_vfc |= IPV6_VERSION; |
3834 | ip6->ip6_nxt = IPPROTO_ICMPV6; |
3835 | if (ia != NULL) { |
3836 | IFA_LOCK(&ia->ia_ifa); |
3837 | } |
3838 | ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; |
3839 | ip6_output_setsrcifscope(mh, IFSCOPE_NONE, ia); |
3840 | |
3841 | if (ia != NULL) { |
3842 | IFA_UNLOCK(&ia->ia_ifa); |
3843 | ifa_remref(ifa: &ia->ia_ifa); |
3844 | ia = NULL; |
3845 | } |
3846 | ip6->ip6_dst = in6addr_linklocal_allv2routers; |
3847 | ip6_output_setdstifscope(mh, ifp->if_index, NULL); |
3848 | /* scope ID will be set in netisr */ |
3849 | |
3850 | mld = (struct mldv2_report *)(ip6 + 1); |
3851 | mld->mld_type = MLDV2_LISTENER_REPORT; |
3852 | mld->mld_code = 0; |
3853 | mld->mld_cksum = 0; |
3854 | mld->mld_v2_reserved = 0; |
3855 | mld->mld_v2_numrecs = htons(m->m_pkthdr.vt_nrecs); |
3856 | m->m_pkthdr.vt_nrecs = 0; |
3857 | m->m_flags &= ~M_PKTHDR; |
3858 | |
3859 | mh->m_next = m; |
3860 | mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6, |
3861 | sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen); |
3862 | return mh; |
3863 | } |
3864 | |
3865 | #ifdef MLD_DEBUG |
3866 | static const char * |
3867 | mld_rec_type_to_str(const int type) |
3868 | { |
3869 | switch (type) { |
3870 | case MLD_CHANGE_TO_EXCLUDE_MODE: |
3871 | return "TO_EX" ; |
3872 | case MLD_CHANGE_TO_INCLUDE_MODE: |
3873 | return "TO_IN" ; |
3874 | case MLD_MODE_IS_EXCLUDE: |
3875 | return "MODE_EX" ; |
3876 | case MLD_MODE_IS_INCLUDE: |
3877 | return "MODE_IN" ; |
3878 | case MLD_ALLOW_NEW_SOURCES: |
3879 | return "ALLOW_NEW" ; |
3880 | case MLD_BLOCK_OLD_SOURCES: |
3881 | return "BLOCK_OLD" ; |
3882 | default: |
3883 | break; |
3884 | } |
3885 | return "unknown" ; |
3886 | } |
3887 | #endif |
3888 | |
3889 | void |
3890 | mld_init(void) |
3891 | { |
3892 | os_log(OS_LOG_DEFAULT, "%s: initializing\n" , __func__); |
3893 | |
3894 | ip6_initpktopts(&mld_po); |
3895 | mld_po.ip6po_hlim = 1; |
3896 | mld_po.ip6po_hbh = &mld_ra.hbh; |
3897 | mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER; |
3898 | mld_po.ip6po_flags = IP6PO_DONTFRAG; |
3899 | LIST_INIT(&mli_head); |
3900 | } |
3901 | |