1/*
2 * Copyright (c) 2003-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the project nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58/*
59 * Copyright (c) 1982, 1986, 1988, 1993
60 * The Regents of the University of California. All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions
64 * are met:
65 * 1. Redistributions of source code must retain the above copyright
66 * notice, this list of conditions and the following disclaimer.
67 * 2. Redistributions in binary form must reproduce the above copyright
68 * notice, this list of conditions and the following disclaimer in the
69 * documentation and/or other materials provided with the distribution.
70 * 3. All advertising materials mentioning features or use of this software
71 * must display the following acknowledgement:
72 * This product includes software developed by the University of
73 * California, Berkeley and its contributors.
74 * 4. Neither the name of the University nor the names of its contributors
75 * may be used to endorse or promote products derived from this software
76 * without specific prior written permission.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
79 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
80 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
81 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
82 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
83 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
84 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
86 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
87 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
88 * SUCH DAMAGE.
89 *
90 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
91 */
92
93#include <sys/param.h>
94#include <sys/systm.h>
95#include <sys/malloc.h>
96#include <sys/mbuf.h>
97#include <sys/domain.h>
98#include <sys/protosw.h>
99#include <sys/socket.h>
100#include <sys/socketvar.h>
101#include <sys/errno.h>
102#include <sys/time.h>
103#include <sys/kernel.h>
104#include <sys/syslog.h>
105#include <sys/sysctl.h>
106#include <sys/proc.h>
107#include <sys/kauth.h>
108#include <sys/mcache.h>
109
110#include <mach/mach_time.h>
111#include <mach/sdt.h>
112#include <pexpert/pexpert.h>
113#include <dev/random/randomdev.h>
114
115#include <net/if.h>
116#include <net/if_var.h>
117#include <net/if_types.h>
118#include <net/if_dl.h>
119#include <net/route.h>
120#include <net/kpi_protocol.h>
121#include <net/ntstat.h>
122#include <net/init.h>
123#include <net/net_osdep.h>
124#include <net/net_perf.h>
125
126#include <netinet/in.h>
127#include <netinet/in_systm.h>
128#if INET
129#include <netinet/ip.h>
130#include <netinet/ip_icmp.h>
131#endif /* INET */
132#include <netinet/kpi_ipfilter_var.h>
133#include <netinet/ip6.h>
134#include <netinet6/in6_var.h>
135#include <netinet6/ip6_var.h>
136#include <netinet/in_pcb.h>
137#include <netinet/icmp6.h>
138#include <netinet6/in6_ifattach.h>
139#include <netinet6/nd6.h>
140#include <netinet6/scope6_var.h>
141#include <netinet6/ip6protosw.h>
142
143#if IPSEC
144#include <netinet6/ipsec.h>
145#include <netinet6/ipsec6.h>
146extern int ipsec_bypass;
147#endif /* IPSEC */
148
149#if DUMMYNET
150#include <netinet/ip_fw.h>
151#include <netinet/ip_dummynet.h>
152#endif /* DUMMYNET */
153
154/* we need it for NLOOP. */
155#include "loop.h"
156
157#if PF
158#include <net/pfvar.h>
159#endif /* PF */
160
161struct ip6protosw *ip6_protox[IPPROTO_MAX];
162
163static lck_grp_attr_t *in6_ifaddr_rwlock_grp_attr;
164static lck_grp_t *in6_ifaddr_rwlock_grp;
165static lck_attr_t *in6_ifaddr_rwlock_attr;
166decl_lck_rw_data(, in6_ifaddr_rwlock);
167
168/* Protected by in6_ifaddr_rwlock */
169struct in6_ifaddr *in6_ifaddrs = NULL;
170
171#define IN6_IFSTAT_REQUIRE_ALIGNED_64(f) \
172 _CASSERT(!(offsetof(struct in6_ifstat, f) % sizeof (uint64_t)))
173
174#define ICMP6_IFSTAT_REQUIRE_ALIGNED_64(f) \
175 _CASSERT(!(offsetof(struct icmp6_ifstat, f) % sizeof (uint64_t)))
176
177struct ip6stat ip6stat;
178
179decl_lck_mtx_data(, proxy6_lock);
180decl_lck_mtx_data(static, dad6_mutex_data);
181decl_lck_mtx_data(static, nd6_mutex_data);
182decl_lck_mtx_data(static, prefix6_mutex_data);
183lck_mtx_t *dad6_mutex = &dad6_mutex_data;
184lck_mtx_t *nd6_mutex = &nd6_mutex_data;
185lck_mtx_t *prefix6_mutex = &prefix6_mutex_data;
186#ifdef ENABLE_ADDRSEL
187decl_lck_mtx_data(static, addrsel_mutex_data);
188lck_mtx_t *addrsel_mutex = &addrsel_mutex_data;
189#endif
190static lck_attr_t *ip6_mutex_attr;
191static lck_grp_t *ip6_mutex_grp;
192static lck_grp_attr_t *ip6_mutex_grp_attr;
193
194extern int loopattach_done;
195extern void addrsel_policy_init(void);
196
197static int sysctl_reset_ip6_input_stats SYSCTL_HANDLER_ARGS;
198static int sysctl_ip6_input_measure_bins SYSCTL_HANDLER_ARGS;
199static int sysctl_ip6_input_getperf SYSCTL_HANDLER_ARGS;
200static void ip6_init_delayed(void);
201static int ip6_hopopts_input(u_int32_t *, u_int32_t *, struct mbuf **, int *);
202
203#if NSTF
204extern void stfattach(void);
205#endif /* NSTF */
206
207SYSCTL_DECL(_net_inet6_ip6);
208
209static uint32_t ip6_adj_clear_hwcksum = 0;
210SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, adj_clear_hwcksum,
211 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_adj_clear_hwcksum, 0,
212 "Invalidate hwcksum info when adjusting length");
213
214static uint32_t ip6_adj_partial_sum = 1;
215SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, adj_partial_sum,
216 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_adj_partial_sum, 0,
217 "Perform partial sum adjustment of trailing bytes at IP layer");
218
219static int ip6_input_measure = 0;
220SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, input_perf,
221 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
222 &ip6_input_measure, 0, sysctl_reset_ip6_input_stats, "I", "Do time measurement");
223
224static uint64_t ip6_input_measure_bins = 0;
225SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, input_perf_bins,
226 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_input_measure_bins, 0,
227 sysctl_ip6_input_measure_bins, "I",
228 "bins for chaining performance data histogram");
229
230static net_perf_t net_perf;
231SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, input_perf_data,
232 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
233 0, 0, sysctl_ip6_input_getperf, "S,net_perf",
234 "IP6 input performance data (struct net_perf, net/net_perf.h)");
235
236/*
237 * On platforms which require strict alignment (currently for anything but
238 * i386 or x86_64), check if the IP header pointer is 32-bit aligned; if not,
239 * copy the contents of the mbuf chain into a new chain, and free the original
240 * one. Create some head room in the first mbuf of the new chain, in case
241 * it's needed later on.
242 *
243 * RFC 2460 says that IPv6 headers are 64-bit aligned, but network interfaces
244 * mostly align to 32-bit boundaries. Care should be taken never to use 64-bit
245 * load/store operations on the fields in IPv6 headers.
246 */
247#if defined(__i386__) || defined(__x86_64__)
248#define IP6_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { } while (0)
249#else /* !__i386__ && !__x86_64__ */
250#define IP6_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { \
251 if (!IP6_HDR_ALIGNED_P(mtod(_m, caddr_t))) { \
252 struct mbuf *_n; \
253 struct ifnet *__ifp = (_ifp); \
254 atomic_add_64(&(__ifp)->if_alignerrs, 1); \
255 if (((_m)->m_flags & M_PKTHDR) && \
256 (_m)->m_pkthdr.pkt_hdr != NULL) \
257 (_m)->m_pkthdr.pkt_hdr = NULL; \
258 _n = m_defrag_offset(_m, max_linkhdr, M_NOWAIT); \
259 if (_n == NULL) { \
260 ip6stat.ip6s_toosmall++; \
261 m_freem(_m); \
262 (_m) = NULL; \
263 _action; \
264 } else { \
265 VERIFY(_n != (_m)); \
266 (_m) = _n; \
267 } \
268 } \
269} while (0)
270#endif /* !__i386__ && !__x86_64__ */
271
272static void
273ip6_proto_input(protocol_family_t protocol, mbuf_t packet)
274{
275#pragma unused(protocol)
276#if INET
277 struct timeval start_tv;
278 if (ip6_input_measure)
279 net_perf_start_time(&net_perf, &start_tv);
280#endif /* INET */
281 ip6_input(packet);
282#if INET
283 if (ip6_input_measure) {
284 net_perf_measure_time(&net_perf, &start_tv, 1);
285 net_perf_histogram(&net_perf, 1);
286 }
287#endif /* INET */
288}
289
290/*
291 * IP6 initialization: fill in IP6 protocol switch table.
292 * All protocols not implemented in kernel go to raw IP6 protocol handler.
293 */
294void
295ip6_init(struct ip6protosw *pp, struct domain *dp)
296{
297 static int ip6_initialized = 0;
298 struct protosw *pr;
299 struct timeval tv;
300 int i;
301 domain_unguard_t unguard;
302
303 domain_proto_mtx_lock_assert_held();
304 VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED);
305
306 _CASSERT((sizeof (struct ip6_hdr) +
307 sizeof (struct icmp6_hdr)) <= _MHLEN);
308
309 if (ip6_initialized)
310 return;
311 ip6_initialized = 1;
312
313 eventhandler_lists_ctxt_init(&in6_evhdlr_ctxt);
314 (void)EVENTHANDLER_REGISTER(&in6_evhdlr_ctxt, in6_event,
315 in6_eventhdlr_callback, eventhandler_entry_dummy_arg,
316 EVENTHANDLER_PRI_ANY);
317
318 eventhandler_lists_ctxt_init(&in6_clat46_evhdlr_ctxt);
319 (void)EVENTHANDLER_REGISTER(&in6_clat46_evhdlr_ctxt, in6_clat46_event,
320 in6_clat46_eventhdlr_callback, eventhandler_entry_dummy_arg,
321 EVENTHANDLER_PRI_ANY);
322
323 for (i = 0; i < IN6_EVENT_MAX; i++)
324 VERIFY(in6_event2kev_array[i].in6_event_code == i);
325
326 pr = pffindproto_locked(PF_INET6, IPPROTO_RAW, SOCK_RAW);
327 if (pr == NULL) {
328 panic("%s: Unable to find [PF_INET6,IPPROTO_RAW,SOCK_RAW]\n",
329 __func__);
330 /* NOTREACHED */
331 }
332
333 /* Initialize the entire ip6_protox[] array to IPPROTO_RAW. */
334 for (i = 0; i < IPPROTO_MAX; i++)
335 ip6_protox[i] = (struct ip6protosw *)pr;
336 /*
337 * Cycle through IP protocols and put them into the appropriate place
338 * in ip6_protox[], skipping protocols IPPROTO_{IP,RAW}.
339 */
340 VERIFY(dp == inet6domain && dp->dom_family == PF_INET6);
341 TAILQ_FOREACH(pr, &dp->dom_protosw, pr_entry) {
342 VERIFY(pr->pr_domain == dp);
343 if (pr->pr_protocol != 0 && pr->pr_protocol != IPPROTO_RAW) {
344 /* Be careful to only index valid IP protocols. */
345 if (pr->pr_protocol < IPPROTO_MAX)
346 ip6_protox[pr->pr_protocol] =
347 (struct ip6protosw *)pr;
348 }
349 }
350
351 ip6_mutex_grp_attr = lck_grp_attr_alloc_init();
352
353 ip6_mutex_grp = lck_grp_alloc_init("ip6", ip6_mutex_grp_attr);
354 ip6_mutex_attr = lck_attr_alloc_init();
355
356 lck_mtx_init(dad6_mutex, ip6_mutex_grp, ip6_mutex_attr);
357 lck_mtx_init(nd6_mutex, ip6_mutex_grp, ip6_mutex_attr);
358 lck_mtx_init(prefix6_mutex, ip6_mutex_grp, ip6_mutex_attr);
359 scope6_init(ip6_mutex_grp, ip6_mutex_attr);
360
361#ifdef ENABLE_ADDRSEL
362 lck_mtx_init(addrsel_mutex, ip6_mutex_grp, ip6_mutex_attr);
363#endif
364
365 lck_mtx_init(&proxy6_lock, ip6_mutex_grp, ip6_mutex_attr);
366
367 in6_ifaddr_rwlock_grp_attr = lck_grp_attr_alloc_init();
368 in6_ifaddr_rwlock_grp = lck_grp_alloc_init("in6_ifaddr_rwlock",
369 in6_ifaddr_rwlock_grp_attr);
370 in6_ifaddr_rwlock_attr = lck_attr_alloc_init();
371 lck_rw_init(&in6_ifaddr_rwlock, in6_ifaddr_rwlock_grp,
372 in6_ifaddr_rwlock_attr);
373
374 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_receive);
375 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_hdrerr);
376 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_toobig);
377 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_noroute);
378 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_addrerr);
379 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_protounknown);
380 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_truncated);
381 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_discard);
382 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_deliver);
383 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_forward);
384 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_request);
385 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_discard);
386 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_fragok);
387 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_fragfail);
388 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_fragcreat);
389 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_reass_reqd);
390 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_reass_ok);
391 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_reass_fail);
392 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_mcast);
393 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_mcast);
394
395 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_msg);
396 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_error);
397 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_dstunreach);
398 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_adminprohib);
399 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_timeexceed);
400 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_paramprob);
401 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_pkttoobig);
402 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_echo);
403 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_echoreply);
404 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_routersolicit);
405 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_routeradvert);
406 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_neighborsolicit);
407 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_neighboradvert);
408 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_redirect);
409 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_mldquery);
410 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_mldreport);
411 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_mlddone);
412
413 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_msg);
414 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_error);
415 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_dstunreach);
416 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_adminprohib);
417 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_timeexceed);
418 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_paramprob);
419 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_pkttoobig);
420 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_echo);
421 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_echoreply);
422 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_routersolicit);
423 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_routeradvert);
424 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_neighborsolicit);
425 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_neighboradvert);
426 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_redirect);
427 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_mldquery);
428 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_mldreport);
429 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_mlddone);
430
431 getmicrotime(&tv);
432 ip6_desync_factor =
433 (RandomULong() ^ tv.tv_usec) % MAX_TEMP_DESYNC_FACTOR;
434
435 in6_ifaddr_init();
436 ip6_moptions_init();
437 nd6_init();
438 frag6_init();
439 icmp6_init(NULL, dp);
440 addrsel_policy_init();
441
442 /*
443 * P2P interfaces often route the local address to the loopback
444 * interface. At this point, lo0 hasn't been initialized yet, which
445 * means that we need to delay the IPv6 configuration of lo0.
446 */
447 net_init_add(ip6_init_delayed);
448
449 unguard = domain_unguard_deploy();
450 i = proto_register_input(PF_INET6, ip6_proto_input, NULL, 0);
451 if (i != 0) {
452 panic("%s: failed to register PF_INET6 protocol: %d\n",
453 __func__, i);
454 /* NOTREACHED */
455 }
456 domain_unguard_release(unguard);
457}
458
459static void
460ip6_init_delayed(void)
461{
462 (void) in6_ifattach_prelim(lo_ifp);
463
464 /* timer for regeneranation of temporary addresses randomize ID */
465 timeout(in6_tmpaddrtimer, NULL,
466 (ip6_temp_preferred_lifetime - ip6_desync_factor -
467 ip6_temp_regen_advance) * hz);
468
469#if NSTF
470 stfattach();
471#endif /* NSTF */
472}
473
474static void
475ip6_input_adjust(struct mbuf *m, struct ip6_hdr *ip6, uint32_t plen,
476 struct ifnet *inifp)
477{
478 boolean_t adjust = TRUE;
479 uint32_t tot_len = sizeof (*ip6) + plen;
480
481 ASSERT(m_pktlen(m) > tot_len);
482
483 /*
484 * Invalidate hardware checksum info if ip6_adj_clear_hwcksum
485 * is set; useful to handle buggy drivers. Note that this
486 * should not be enabled by default, as we may get here due
487 * to link-layer padding.
488 */
489 if (ip6_adj_clear_hwcksum &&
490 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) &&
491 !(inifp->if_flags & IFF_LOOPBACK) &&
492 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
493 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
494 m->m_pkthdr.csum_data = 0;
495 ip6stat.ip6s_adj_hwcsum_clr++;
496 }
497
498 /*
499 * If partial checksum information is available, subtract
500 * out the partial sum of postpended extraneous bytes, and
501 * update the checksum metadata accordingly. By doing it
502 * here, the upper layer transport only needs to adjust any
503 * prepended extraneous bytes (else it will do both.)
504 */
505 if (ip6_adj_partial_sum &&
506 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID|CSUM_PARTIAL)) ==
507 (CSUM_DATA_VALID|CSUM_PARTIAL)) {
508 m->m_pkthdr.csum_rx_val = m_adj_sum16(m,
509 m->m_pkthdr.csum_rx_start, m->m_pkthdr.csum_rx_start,
510 (tot_len - m->m_pkthdr.csum_rx_start),
511 m->m_pkthdr.csum_rx_val);
512 } else if ((m->m_pkthdr.csum_flags &
513 (CSUM_DATA_VALID|CSUM_PARTIAL)) ==
514 (CSUM_DATA_VALID|CSUM_PARTIAL)) {
515 /*
516 * If packet has partial checksum info and we decided not
517 * to subtract the partial sum of postpended extraneous
518 * bytes here (not the default case), leave that work to
519 * be handled by the other layers. For now, only TCP, UDP
520 * layers are capable of dealing with this. For all other
521 * protocols (including fragments), trim and ditch the
522 * partial sum as those layers might not implement partial
523 * checksumming (or adjustment) at all.
524 */
525 if (ip6->ip6_nxt == IPPROTO_TCP ||
526 ip6->ip6_nxt == IPPROTO_UDP) {
527 adjust = FALSE;
528 } else {
529 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
530 m->m_pkthdr.csum_data = 0;
531 ip6stat.ip6s_adj_hwcsum_clr++;
532 }
533 }
534
535 if (adjust) {
536 ip6stat.ip6s_adj++;
537 if (m->m_len == m->m_pkthdr.len) {
538 m->m_len = tot_len;
539 m->m_pkthdr.len = tot_len;
540 } else {
541 m_adj(m, tot_len - m->m_pkthdr.len);
542 }
543 }
544}
545
546void
547ip6_input(struct mbuf *m)
548{
549 struct ip6_hdr *ip6;
550 int off = sizeof (struct ip6_hdr), nest;
551 u_int32_t plen;
552 u_int32_t rtalert = ~0;
553 int nxt = 0, ours = 0;
554 struct ifnet *inifp, *deliverifp = NULL;
555 ipfilter_t inject_ipfref = NULL;
556 int seen = 1;
557 struct in6_ifaddr *ia6 = NULL;
558 struct sockaddr_in6 *dst6;
559#if DUMMYNET
560 struct m_tag *tag;
561#endif /* DUMMYNET */
562 struct {
563 struct route_in6 rin6;
564#if DUMMYNET
565 struct ip_fw_args args;
566#endif /* DUMMYNET */
567 } ip6ibz;
568#define rin6 ip6ibz.rin6
569#define args ip6ibz.args
570
571 /* zero out {rin6, args} */
572 bzero(&ip6ibz, sizeof (ip6ibz));
573
574 /*
575 * Check if the packet we received is valid after interface filter
576 * processing
577 */
578 MBUF_INPUT_CHECK(m, m->m_pkthdr.rcvif);
579 inifp = m->m_pkthdr.rcvif;
580 VERIFY(inifp != NULL);
581
582 /* Perform IP header alignment fixup, if needed */
583 IP6_HDR_ALIGNMENT_FIXUP(m, inifp, return);
584
585 m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED;
586#if IPSEC
587 /*
588 * should the inner packet be considered authentic?
589 * see comment in ah4_input().
590 */
591 m->m_flags &= ~M_AUTHIPHDR;
592 m->m_flags &= ~M_AUTHIPDGM;
593#endif /* IPSEC */
594
595 /*
596 * make sure we don't have onion peering information into m_aux.
597 */
598 ip6_delaux(m);
599
600#if DUMMYNET
601 if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
602 KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) {
603 struct dn_pkt_tag *dn_tag;
604
605 dn_tag = (struct dn_pkt_tag *)(tag+1);
606
607 args.fwa_pf_rule = dn_tag->dn_pf_rule;
608
609 m_tag_delete(m, tag);
610 }
611
612 if (args.fwa_pf_rule) {
613 ip6 = mtod(m, struct ip6_hdr *); /* In case PF got disabled */
614
615 goto check_with_pf;
616 }
617#endif /* DUMMYNET */
618
619 /*
620 * No need to proccess packet twice if we've already seen it.
621 */
622 inject_ipfref = ipf_get_inject_filter(m);
623 if (inject_ipfref != NULL) {
624 ip6 = mtod(m, struct ip6_hdr *);
625 nxt = ip6->ip6_nxt;
626 seen = 0;
627 goto injectit;
628 } else {
629 seen = 1;
630 }
631
632 /*
633 * mbuf statistics
634 */
635 if (m->m_flags & M_EXT) {
636 if (m->m_next != NULL)
637 ip6stat.ip6s_mext2m++;
638 else
639 ip6stat.ip6s_mext1++;
640 } else {
641#define M2MMAX (sizeof (ip6stat.ip6s_m2m) / sizeof (ip6stat.ip6s_m2m[0]))
642 if (m->m_next != NULL) {
643 if (m->m_pkthdr.pkt_flags & PKTF_LOOP) {
644 /* XXX */
645 ip6stat.ip6s_m2m[ifnet_index(lo_ifp)]++;
646 } else if (inifp->if_index < M2MMAX) {
647 ip6stat.ip6s_m2m[inifp->if_index]++;
648 } else {
649 ip6stat.ip6s_m2m[0]++;
650 }
651 } else {
652 ip6stat.ip6s_m1++;
653 }
654#undef M2MMAX
655 }
656
657 /*
658 * Drop the packet if IPv6 operation is disabled on the interface.
659 */
660 if (inifp->if_eflags & IFEF_IPV6_DISABLED)
661 goto bad;
662
663 in6_ifstat_inc_na(inifp, ifs6_in_receive);
664 ip6stat.ip6s_total++;
665
666 /*
667 * L2 bridge code and some other code can return mbuf chain
668 * that does not conform to KAME requirement. too bad.
669 * XXX: fails to join if interface MTU > MCLBYTES. jumbogram?
670 */
671 if (m->m_next != NULL && m->m_pkthdr.len < MCLBYTES) {
672 struct mbuf *n;
673
674 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
675 if (n)
676 M_COPY_PKTHDR(n, m);
677 if (n && m->m_pkthdr.len > MHLEN) {
678 MCLGET(n, M_DONTWAIT);
679 if ((n->m_flags & M_EXT) == 0) {
680 m_freem(n);
681 n = NULL;
682 }
683 }
684 if (n == NULL)
685 goto bad;
686
687 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
688 n->m_len = m->m_pkthdr.len;
689 m_freem(m);
690 m = n;
691 }
692 IP6_EXTHDR_CHECK(m, 0, sizeof (struct ip6_hdr), { goto done; });
693
694 if (m->m_len < sizeof (struct ip6_hdr)) {
695 if ((m = m_pullup(m, sizeof (struct ip6_hdr))) == 0) {
696 ip6stat.ip6s_toosmall++;
697 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
698 goto done;
699 }
700 }
701
702 ip6 = mtod(m, struct ip6_hdr *);
703
704 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
705 ip6stat.ip6s_badvers++;
706 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
707 goto bad;
708 }
709
710 ip6stat.ip6s_nxthist[ip6->ip6_nxt]++;
711
712 /*
713 * Check against address spoofing/corruption.
714 */
715 if (!(m->m_pkthdr.pkt_flags & PKTF_LOOP) &&
716 IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src)) {
717 ip6stat.ip6s_badscope++;
718 in6_ifstat_inc(inifp, ifs6_in_addrerr);
719 goto bad;
720 }
721 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_src) ||
722 IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_dst)) {
723 /*
724 * XXX: "badscope" is not very suitable for a multicast source.
725 */
726 ip6stat.ip6s_badscope++;
727 in6_ifstat_inc(inifp, ifs6_in_addrerr);
728 goto bad;
729 }
730 if (IN6_IS_ADDR_MC_INTFACELOCAL(&ip6->ip6_dst) &&
731 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
732 /*
733 * In this case, the packet should come from the loopback
734 * interface. However, we cannot just check the if_flags,
735 * because ip6_mloopback() passes the "actual" interface
736 * as the outgoing/incoming interface.
737 */
738 ip6stat.ip6s_badscope++;
739 in6_ifstat_inc(inifp, ifs6_in_addrerr);
740 goto bad;
741 }
742
743 /*
744 * The following check is not documented in specs. A malicious
745 * party may be able to use IPv4 mapped addr to confuse tcp/udp stack
746 * and bypass security checks (act as if it was from 127.0.0.1 by using
747 * IPv6 src ::ffff:127.0.0.1). Be cautious.
748 *
749 * This check chokes if we are in an SIIT cloud. As none of BSDs
750 * support IPv4-less kernel compilation, we cannot support SIIT
751 * environment at all. So, it makes more sense for us to reject any
752 * malicious packets for non-SIIT environment, than try to do a
753 * partial support for SIIT environment.
754 */
755 if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) ||
756 IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) {
757 ip6stat.ip6s_badscope++;
758 in6_ifstat_inc(inifp, ifs6_in_addrerr);
759 goto bad;
760 }
761#if 0
762 /*
763 * Reject packets with IPv4 compatible addresses (auto tunnel).
764 *
765 * The code forbids auto tunnel relay case in RFC1933 (the check is
766 * stronger than RFC1933). We may want to re-enable it if mech-xx
767 * is revised to forbid relaying case.
768 */
769 if (IN6_IS_ADDR_V4COMPAT(&ip6->ip6_src) ||
770 IN6_IS_ADDR_V4COMPAT(&ip6->ip6_dst)) {
771 ip6stat.ip6s_badscope++;
772 in6_ifstat_inc(inifp, ifs6_in_addrerr);
773 goto bad;
774 }
775#endif
776
777 /*
778 * Naively assume we can attribute inbound data to the route we would
779 * use to send to this destination. Asymetric routing breaks this
780 * assumption, but it still allows us to account for traffic from
781 * a remote node in the routing table.
782 * this has a very significant performance impact so we bypass
783 * if nstat_collect is disabled. We may also bypass if the
784 * protocol is tcp in the future because tcp will have a route that
785 * we can use to attribute the data to. That does mean we would not
786 * account for forwarded tcp traffic.
787 */
788 if (nstat_collect) {
789 struct rtentry *rte =
790 ifnet_cached_rtlookup_inet6(inifp, &ip6->ip6_src);
791 if (rte != NULL) {
792 nstat_route_rx(rte, 1, m->m_pkthdr.len, 0);
793 rtfree(rte);
794 }
795 }
796
797 /* for consistency */
798 m->m_pkthdr.pkt_proto = ip6->ip6_nxt;
799
800#if DUMMYNET
801check_with_pf:
802#endif /* DUMMYNET */
803#if PF
804 /* Invoke inbound packet filter */
805 if (PF_IS_ENABLED) {
806 int error;
807#if DUMMYNET
808 error = pf_af_hook(inifp, NULL, &m, AF_INET6, TRUE, &args);
809#else /* !DUMMYNET */
810 error = pf_af_hook(inifp, NULL, &m, AF_INET6, TRUE, NULL);
811#endif /* !DUMMYNET */
812 if (error != 0 || m == NULL) {
813 if (m != NULL) {
814 panic("%s: unexpected packet %p\n",
815 __func__, m);
816 /* NOTREACHED */
817 }
818 /* Already freed by callee */
819 goto done;
820 }
821 ip6 = mtod(m, struct ip6_hdr *);
822 }
823#endif /* PF */
824
825 /* drop packets if interface ID portion is already filled */
826 if (!(inifp->if_flags & IFF_LOOPBACK) &&
827 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
828 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src) &&
829 ip6->ip6_src.s6_addr16[1]) {
830 ip6stat.ip6s_badscope++;
831 goto bad;
832 }
833 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst) &&
834 ip6->ip6_dst.s6_addr16[1]) {
835 ip6stat.ip6s_badscope++;
836 goto bad;
837 }
838 }
839
840 if (m->m_pkthdr.pkt_flags & PKTF_IFAINFO) {
841 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src))
842 ip6->ip6_src.s6_addr16[1] =
843 htons(m->m_pkthdr.src_ifindex);
844 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst))
845 ip6->ip6_dst.s6_addr16[1] =
846 htons(m->m_pkthdr.dst_ifindex);
847 } else {
848 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src))
849 ip6->ip6_src.s6_addr16[1] = htons(inifp->if_index);
850 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst))
851 ip6->ip6_dst.s6_addr16[1] = htons(inifp->if_index);
852 }
853
854 /*
855 * Multicast check
856 */
857 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
858 struct in6_multi *in6m = NULL;
859
860 in6_ifstat_inc_na(inifp, ifs6_in_mcast);
861 /*
862 * See if we belong to the destination multicast group on the
863 * arrival interface.
864 */
865 in6_multihead_lock_shared();
866 IN6_LOOKUP_MULTI(&ip6->ip6_dst, inifp, in6m);
867 in6_multihead_lock_done();
868 if (in6m != NULL) {
869 IN6M_REMREF(in6m);
870 ours = 1;
871 } else if (!nd6_prproxy) {
872 ip6stat.ip6s_notmember++;
873 ip6stat.ip6s_cantforward++;
874 in6_ifstat_inc(inifp, ifs6_in_discard);
875 goto bad;
876 }
877 deliverifp = inifp;
878 VERIFY(ia6 == NULL);
879 goto hbhcheck;
880 }
881
882 /*
883 * Unicast check
884 *
885 * Fast path: see if the target is ourselves.
886 */
887 lck_rw_lock_shared(&in6_ifaddr_rwlock);
888 for (ia6 = in6_ifaddrs; ia6 != NULL; ia6 = ia6->ia_next) {
889 /*
890 * No reference is held on the address, as we just need
891 * to test for a few things while holding the RW lock.
892 */
893 if (IN6_ARE_ADDR_EQUAL(&ia6->ia_addr.sin6_addr, &ip6->ip6_dst))
894 break;
895 }
896
897 if (ia6 != NULL) {
898 /*
899 * For performance, test without acquiring the address lock;
900 * a lot of things in the address are set once and never
901 * changed (e.g. ia_ifp.)
902 */
903 if (!(ia6->ia6_flags & (IN6_IFF_NOTREADY | IN6_IFF_CLAT46))) {
904 /* this address is ready */
905 ours = 1;
906 deliverifp = ia6->ia_ifp;
907 /*
908 * record dst address information into mbuf.
909 */
910 (void) ip6_setdstifaddr_info(m, 0, ia6);
911 lck_rw_done(&in6_ifaddr_rwlock);
912 goto hbhcheck;
913 }
914 lck_rw_done(&in6_ifaddr_rwlock);
915 ia6 = NULL;
916 /* address is not ready, so discard the packet. */
917 nd6log((LOG_INFO, "%s: packet to an unready address %s->%s\n",
918 __func__, ip6_sprintf(&ip6->ip6_src),
919 ip6_sprintf(&ip6->ip6_dst)));
920 goto bad;
921 }
922 lck_rw_done(&in6_ifaddr_rwlock);
923
924 /*
925 * Slow path: route lookup.
926 */
927 dst6 = SIN6(&rin6.ro_dst);
928 dst6->sin6_len = sizeof (struct sockaddr_in6);
929 dst6->sin6_family = AF_INET6;
930 dst6->sin6_addr = ip6->ip6_dst;
931
932 rtalloc_scoped_ign((struct route *)&rin6,
933 RTF_PRCLONING, IFSCOPE_NONE);
934 if (rin6.ro_rt != NULL)
935 RT_LOCK_SPIN(rin6.ro_rt);
936
937#define rt6_key(r) (SIN6((r)->rt_nodes->rn_key))
938
939 /*
940 * Accept the packet if the forwarding interface to the destination
941 * according to the routing table is the loopback interface,
942 * unless the associated route has a gateway.
943 * Note that this approach causes to accept a packet if there is a
944 * route to the loopback interface for the destination of the packet.
945 * But we think it's even useful in some situations, e.g. when using
946 * a special daemon which wants to intercept the packet.
947 *
948 * XXX: some OSes automatically make a cloned route for the destination
949 * of an outgoing packet. If the outgoing interface of the packet
950 * is a loopback one, the kernel would consider the packet to be
951 * accepted, even if we have no such address assinged on the interface.
952 * We check the cloned flag of the route entry to reject such cases,
953 * assuming that route entries for our own addresses are not made by
954 * cloning (it should be true because in6_addloop explicitly installs
955 * the host route). However, we might have to do an explicit check
956 * while it would be less efficient. Or, should we rather install a
957 * reject route for such a case?
958 */
959 if (rin6.ro_rt != NULL &&
960 (rin6.ro_rt->rt_flags & (RTF_HOST|RTF_GATEWAY)) == RTF_HOST &&
961#if RTF_WASCLONED
962 !(rin6.ro_rt->rt_flags & RTF_WASCLONED) &&
963#endif
964 rin6.ro_rt->rt_ifp->if_type == IFT_LOOP) {
965 ia6 = (struct in6_ifaddr *)rin6.ro_rt->rt_ifa;
966 /*
967 * Packets to a tentative, duplicated, or somehow invalid
968 * address must not be accepted.
969 *
970 * For performance, test without acquiring the address lock;
971 * a lot of things in the address are set once and never
972 * changed (e.g. ia_ifp.)
973 */
974 if (!(ia6->ia6_flags & IN6_IFF_NOTREADY)) {
975 /* this address is ready */
976 ours = 1;
977 deliverifp = ia6->ia_ifp; /* correct? */
978 /*
979 * record dst address information into mbuf.
980 */
981 (void) ip6_setdstifaddr_info(m, 0, ia6);
982 RT_UNLOCK(rin6.ro_rt);
983 goto hbhcheck;
984 }
985 RT_UNLOCK(rin6.ro_rt);
986 ia6 = NULL;
987 /* address is not ready, so discard the packet. */
988 nd6log((LOG_INFO, "%s: packet to an unready address %s->%s\n",
989 __func__, ip6_sprintf(&ip6->ip6_src),
990 ip6_sprintf(&ip6->ip6_dst)));
991 goto bad;
992 }
993
994 if (rin6.ro_rt != NULL)
995 RT_UNLOCK(rin6.ro_rt);
996
997 /*
998 * Now there is no reason to process the packet if it's not our own
999 * and we're not a router.
1000 */
1001 if (!ip6_forwarding) {
1002 ip6stat.ip6s_cantforward++;
1003 in6_ifstat_inc(inifp, ifs6_in_discard);
1004 /*
1005 * Raise a kernel event if the packet received on cellular
1006 * interface is not intended for local host.
1007 * For now limit it to ICMPv6 packets.
1008 */
1009 if (inifp->if_type == IFT_CELLULAR &&
1010 ip6->ip6_nxt == IPPROTO_ICMPV6)
1011 in6_ifstat_inc(inifp, ifs6_cantfoward_icmp6);
1012 goto bad;
1013 }
1014
1015hbhcheck:
1016 /*
1017 * record dst address information into mbuf, if we don't have one yet.
1018 * note that we are unable to record it, if the address is not listed
1019 * as our interface address (e.g. multicast addresses, etc.)
1020 */
1021 if (deliverifp != NULL && ia6 == NULL) {
1022 ia6 = in6_ifawithifp(deliverifp, &ip6->ip6_dst);
1023 if (ia6 != NULL) {
1024 (void) ip6_setdstifaddr_info(m, 0, ia6);
1025 IFA_REMREF(&ia6->ia_ifa);
1026 }
1027 }
1028
1029 /*
1030 * Process Hop-by-Hop options header if it's contained.
1031 * m may be modified in ip6_hopopts_input().
1032 * If a JumboPayload option is included, plen will also be modified.
1033 */
1034 plen = (u_int32_t)ntohs(ip6->ip6_plen);
1035 if (ip6->ip6_nxt == IPPROTO_HOPOPTS) {
1036 struct ip6_hbh *hbh;
1037
1038 /*
1039 * Mark the packet to imply that HBH option has been checked.
1040 * This can only be true is the packet came in unfragmented
1041 * or if the option is in the first fragment
1042 */
1043 m->m_pkthdr.pkt_flags |= PKTF_HBH_CHKED;
1044 if (ip6_hopopts_input(&plen, &rtalert, &m, &off)) {
1045#if 0 /* touches NULL pointer */
1046 in6_ifstat_inc(inifp, ifs6_in_discard);
1047#endif
1048 goto done; /* m have already been freed */
1049 }
1050
1051 /* adjust pointer */
1052 ip6 = mtod(m, struct ip6_hdr *);
1053
1054 /*
1055 * if the payload length field is 0 and the next header field
1056 * indicates Hop-by-Hop Options header, then a Jumbo Payload
1057 * option MUST be included.
1058 */
1059 if (ip6->ip6_plen == 0 && plen == 0) {
1060 /*
1061 * Note that if a valid jumbo payload option is
1062 * contained, ip6_hopopts_input() must set a valid
1063 * (non-zero) payload length to the variable plen.
1064 */
1065 ip6stat.ip6s_badoptions++;
1066 in6_ifstat_inc(inifp, ifs6_in_discard);
1067 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
1068 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
1069 (caddr_t)&ip6->ip6_plen - (caddr_t)ip6);
1070 goto done;
1071 }
1072 /* ip6_hopopts_input() ensures that mbuf is contiguous */
1073 hbh = (struct ip6_hbh *)(ip6 + 1);
1074 nxt = hbh->ip6h_nxt;
1075
1076 /*
1077 * If we are acting as a router and the packet contains a
1078 * router alert option, see if we know the option value.
1079 * Currently, we only support the option value for MLD, in which
1080 * case we should pass the packet to the multicast routing
1081 * daemon.
1082 */
1083 if (rtalert != ~0 && ip6_forwarding) {
1084 switch (rtalert) {
1085 case IP6OPT_RTALERT_MLD:
1086 ours = 1;
1087 break;
1088 default:
1089 /*
1090 * RFC2711 requires unrecognized values must be
1091 * silently ignored.
1092 */
1093 break;
1094 }
1095 }
1096 } else
1097 nxt = ip6->ip6_nxt;
1098
1099 /*
1100 * Check that the amount of data in the buffers
1101 * is as at least much as the IPv6 header would have us expect.
1102 * Trim mbufs if longer than we expect.
1103 * Drop packet if shorter than we expect.
1104 */
1105 if (m->m_pkthdr.len - sizeof (struct ip6_hdr) < plen) {
1106 ip6stat.ip6s_tooshort++;
1107 in6_ifstat_inc(inifp, ifs6_in_truncated);
1108 goto bad;
1109 }
1110 if (m->m_pkthdr.len > sizeof (struct ip6_hdr) + plen) {
1111 ip6_input_adjust(m, ip6, plen, inifp);
1112 }
1113
1114 /*
1115 * Forward if desirable.
1116 */
1117 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
1118 if (!ours && nd6_prproxy) {
1119 /*
1120 * If this isn't for us, this might be a Neighbor
1121 * Solicitation (dst is solicited-node multicast)
1122 * against an address in one of the proxied prefixes;
1123 * if so, claim the packet and let icmp6_input()
1124 * handle the rest.
1125 */
1126 ours = nd6_prproxy_isours(m, ip6, NULL, IFSCOPE_NONE);
1127 VERIFY(!ours ||
1128 (m->m_pkthdr.pkt_flags & PKTF_PROXY_DST));
1129 }
1130 if (!ours)
1131 goto bad;
1132 } else if (!ours) {
1133 /*
1134 * The unicast forwarding function might return the packet
1135 * if we are proxying prefix(es), and if the packet is an
1136 * ICMPv6 packet that has failed the zone checks, but is
1137 * targetted towards a proxied address (this is optimized by
1138 * way of RTF_PROXY test.) If so, claim the packet as ours
1139 * and let icmp6_input() handle the rest. The packet's hop
1140 * limit value is kept intact (it's not decremented). This
1141 * is for supporting Neighbor Unreachability Detection between
1142 * proxied nodes on different links (src is link-local, dst
1143 * is target address.)
1144 */
1145 if ((m = ip6_forward(m, &rin6, 0)) == NULL)
1146 goto done;
1147 VERIFY(rin6.ro_rt != NULL);
1148 VERIFY(m->m_pkthdr.pkt_flags & PKTF_PROXY_DST);
1149 deliverifp = rin6.ro_rt->rt_ifp;
1150 ours = 1;
1151 }
1152
1153 ip6 = mtod(m, struct ip6_hdr *);
1154
1155 /*
1156 * Malicious party may be able to use IPv4 mapped addr to confuse
1157 * tcp/udp stack and bypass security checks (act as if it was from
1158 * 127.0.0.1 by using IPv6 src ::ffff:127.0.0.1). Be cautious.
1159 *
1160 * For SIIT end node behavior, you may want to disable the check.
1161 * However, you will become vulnerable to attacks using IPv4 mapped
1162 * source.
1163 */
1164 if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) ||
1165 IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) {
1166 ip6stat.ip6s_badscope++;
1167 in6_ifstat_inc(inifp, ifs6_in_addrerr);
1168 goto bad;
1169 }
1170
1171 /*
1172 * Tell launch routine the next header
1173 */
1174 ip6stat.ip6s_delivered++;
1175 in6_ifstat_inc_na(deliverifp, ifs6_in_deliver);
1176
1177injectit:
1178 nest = 0;
1179
1180 /*
1181 * Perform IP header alignment fixup again, if needed. Note that
1182 * we do it once for the outermost protocol, and we assume each
1183 * protocol handler wouldn't mess with the alignment afterwards.
1184 */
1185 IP6_HDR_ALIGNMENT_FIXUP(m, inifp, return);
1186
1187 while (nxt != IPPROTO_DONE) {
1188 struct ipfilter *filter;
1189 int (*pr_input)(struct mbuf **, int *, int);
1190
1191 /*
1192 * This would imply either IPPROTO_HOPOPTS was not the first
1193 * option or it did not come in the first fragment.
1194 */
1195 if (nxt == IPPROTO_HOPOPTS &&
1196 (m->m_pkthdr.pkt_flags & PKTF_HBH_CHKED) == 0) {
1197 /*
1198 * This implies that HBH option was not contained
1199 * in the first fragment
1200 */
1201 ip6stat.ip6s_badoptions++;
1202 goto bad;
1203 }
1204
1205 if (ip6_hdrnestlimit && (++nest > ip6_hdrnestlimit)) {
1206 ip6stat.ip6s_toomanyhdr++;
1207 goto bad;
1208 }
1209
1210 /*
1211 * protection against faulty packet - there should be
1212 * more sanity checks in header chain processing.
1213 */
1214 if (m->m_pkthdr.len < off) {
1215 ip6stat.ip6s_tooshort++;
1216 in6_ifstat_inc(inifp, ifs6_in_truncated);
1217 goto bad;
1218 }
1219
1220#if IPSEC
1221 /*
1222 * enforce IPsec policy checking if we are seeing last header.
1223 * note that we do not visit this with protocols with pcb layer
1224 * code - like udp/tcp/raw ip.
1225 */
1226 if ((ipsec_bypass == 0) &&
1227 (ip6_protox[nxt]->pr_flags & PR_LASTHDR) != 0) {
1228 if (ipsec6_in_reject(m, NULL)) {
1229 IPSEC_STAT_INCREMENT(ipsec6stat.in_polvio);
1230 goto bad;
1231 }
1232 }
1233#endif /* IPSEC */
1234
1235 /*
1236 * Call IP filter
1237 */
1238 if (!TAILQ_EMPTY(&ipv6_filters) && !IFNET_IS_INTCOPROC(inifp)) {
1239 ipf_ref();
1240 TAILQ_FOREACH(filter, &ipv6_filters, ipf_link) {
1241 if (seen == 0) {
1242 if ((struct ipfilter *)inject_ipfref ==
1243 filter)
1244 seen = 1;
1245 } else if (filter->ipf_filter.ipf_input) {
1246 errno_t result;
1247
1248 result = filter->ipf_filter.ipf_input(
1249 filter->ipf_filter.cookie,
1250 (mbuf_t *)&m, off, nxt);
1251 if (result == EJUSTRETURN) {
1252 ipf_unref();
1253 goto done;
1254 }
1255 if (result != 0) {
1256 ipf_unref();
1257 goto bad;
1258 }
1259 }
1260 }
1261 ipf_unref();
1262 }
1263
1264 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1265 struct ip6_hdr *, ip6, struct ifnet *, inifp,
1266 struct ip *, NULL, struct ip6_hdr *, ip6);
1267
1268 if ((pr_input = ip6_protox[nxt]->pr_input) == NULL) {
1269 m_freem(m);
1270 m = NULL;
1271 nxt = IPPROTO_DONE;
1272 } else if (!(ip6_protox[nxt]->pr_flags & PR_PROTOLOCK)) {
1273 lck_mtx_lock(inet6_domain_mutex);
1274 nxt = pr_input(&m, &off, nxt);
1275 lck_mtx_unlock(inet6_domain_mutex);
1276 } else {
1277 nxt = pr_input(&m, &off, nxt);
1278 }
1279 }
1280done:
1281 ROUTE_RELEASE(&rin6);
1282 return;
1283bad:
1284 m_freem(m);
1285 goto done;
1286}
1287
1288void
1289ip6_setsrcifaddr_info(struct mbuf *m, uint32_t src_idx, struct in6_ifaddr *ia6)
1290{
1291 VERIFY(m->m_flags & M_PKTHDR);
1292
1293 /*
1294 * If the source ifaddr is specified, pick up the information
1295 * from there; otherwise just grab the passed-in ifindex as the
1296 * caller may not have the ifaddr available.
1297 */
1298 if (ia6 != NULL) {
1299 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
1300 m->m_pkthdr.src_ifindex = ia6->ia_ifp->if_index;
1301
1302 /* See IN6_IFF comments in in6_var.h */
1303 m->m_pkthdr.src_iff = (ia6->ia6_flags & 0xffff);
1304 } else {
1305 m->m_pkthdr.src_iff = 0;
1306 m->m_pkthdr.src_ifindex = src_idx;
1307 if (src_idx != 0)
1308 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
1309 }
1310}
1311
1312void
1313ip6_setdstifaddr_info(struct mbuf *m, uint32_t dst_idx, struct in6_ifaddr *ia6)
1314{
1315 VERIFY(m->m_flags & M_PKTHDR);
1316
1317 /*
1318 * If the destination ifaddr is specified, pick up the information
1319 * from there; otherwise just grab the passed-in ifindex as the
1320 * caller may not have the ifaddr available.
1321 */
1322 if (ia6 != NULL) {
1323 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
1324 m->m_pkthdr.dst_ifindex = ia6->ia_ifp->if_index;
1325
1326 /* See IN6_IFF comments in in6_var.h */
1327 m->m_pkthdr.dst_iff = (ia6->ia6_flags & 0xffff);
1328 } else {
1329 m->m_pkthdr.dst_iff = 0;
1330 m->m_pkthdr.dst_ifindex = dst_idx;
1331 if (dst_idx != 0)
1332 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
1333 }
1334}
1335
1336int
1337ip6_getsrcifaddr_info(struct mbuf *m, uint32_t *src_idx, uint32_t *ia6f)
1338{
1339 VERIFY(m->m_flags & M_PKTHDR);
1340
1341 if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO))
1342 return (-1);
1343
1344 if (src_idx != NULL)
1345 *src_idx = m->m_pkthdr.src_ifindex;
1346
1347 if (ia6f != NULL)
1348 *ia6f = m->m_pkthdr.src_iff;
1349
1350 return (0);
1351}
1352
1353int
1354ip6_getdstifaddr_info(struct mbuf *m, uint32_t *dst_idx, uint32_t *ia6f)
1355{
1356 VERIFY(m->m_flags & M_PKTHDR);
1357
1358 if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO))
1359 return (-1);
1360
1361 if (dst_idx != NULL)
1362 *dst_idx = m->m_pkthdr.dst_ifindex;
1363
1364 if (ia6f != NULL)
1365 *ia6f = m->m_pkthdr.dst_iff;
1366
1367 return (0);
1368}
1369
1370/*
1371 * Hop-by-Hop options header processing. If a valid jumbo payload option is
1372 * included, the real payload length will be stored in plenp.
1373 */
1374static int
1375ip6_hopopts_input(uint32_t *plenp, uint32_t *rtalertp, struct mbuf **mp,
1376 int *offp)
1377{
1378 struct mbuf *m = *mp;
1379 int off = *offp, hbhlen;
1380 struct ip6_hbh *hbh;
1381 u_int8_t *opt;
1382
1383 /* validation of the length of the header */
1384 IP6_EXTHDR_CHECK(m, off, sizeof (*hbh), return (-1));
1385 hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off);
1386 hbhlen = (hbh->ip6h_len + 1) << 3;
1387
1388 IP6_EXTHDR_CHECK(m, off, hbhlen, return (-1));
1389 hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off);
1390 off += hbhlen;
1391 hbhlen -= sizeof (struct ip6_hbh);
1392 opt = (u_int8_t *)hbh + sizeof (struct ip6_hbh);
1393
1394 if (ip6_process_hopopts(m, (u_int8_t *)hbh + sizeof (struct ip6_hbh),
1395 hbhlen, rtalertp, plenp) < 0)
1396 return (-1);
1397
1398 *offp = off;
1399 *mp = m;
1400 return (0);
1401}
1402
1403/*
1404 * Search header for all Hop-by-hop options and process each option.
1405 * This function is separate from ip6_hopopts_input() in order to
1406 * handle a case where the sending node itself process its hop-by-hop
1407 * options header. In such a case, the function is called from ip6_output().
1408 *
1409 * The function assumes that hbh header is located right after the IPv6 header
1410 * (RFC2460 p7), opthead is pointer into data content in m, and opthead to
1411 * opthead + hbhlen is located in continuous memory region.
1412 */
1413int
1414ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen,
1415 u_int32_t *rtalertp, u_int32_t *plenp)
1416{
1417 struct ip6_hdr *ip6;
1418 int optlen = 0;
1419 u_int8_t *opt = opthead;
1420 u_int16_t rtalert_val;
1421 u_int32_t jumboplen;
1422 const int erroff = sizeof (struct ip6_hdr) + sizeof (struct ip6_hbh);
1423
1424 for (; hbhlen > 0; hbhlen -= optlen, opt += optlen) {
1425 switch (*opt) {
1426 case IP6OPT_PAD1:
1427 optlen = 1;
1428 break;
1429 case IP6OPT_PADN:
1430 if (hbhlen < IP6OPT_MINLEN) {
1431 ip6stat.ip6s_toosmall++;
1432 goto bad;
1433 }
1434 optlen = *(opt + 1) + 2;
1435 break;
1436 case IP6OPT_ROUTER_ALERT:
1437 /* XXX may need check for alignment */
1438 if (hbhlen < IP6OPT_RTALERT_LEN) {
1439 ip6stat.ip6s_toosmall++;
1440 goto bad;
1441 }
1442 if (*(opt + 1) != IP6OPT_RTALERT_LEN - 2) {
1443 /* XXX stat */
1444 icmp6_error(m, ICMP6_PARAM_PROB,
1445 ICMP6_PARAMPROB_HEADER,
1446 erroff + opt + 1 - opthead);
1447 return (-1);
1448 }
1449 optlen = IP6OPT_RTALERT_LEN;
1450 bcopy((caddr_t)(opt + 2), (caddr_t)&rtalert_val, 2);
1451 *rtalertp = ntohs(rtalert_val);
1452 break;
1453 case IP6OPT_JUMBO:
1454 /* XXX may need check for alignment */
1455 if (hbhlen < IP6OPT_JUMBO_LEN) {
1456 ip6stat.ip6s_toosmall++;
1457 goto bad;
1458 }
1459 if (*(opt + 1) != IP6OPT_JUMBO_LEN - 2) {
1460 /* XXX stat */
1461 icmp6_error(m, ICMP6_PARAM_PROB,
1462 ICMP6_PARAMPROB_HEADER,
1463 erroff + opt + 1 - opthead);
1464 return (-1);
1465 }
1466 optlen = IP6OPT_JUMBO_LEN;
1467
1468 /*
1469 * IPv6 packets that have non 0 payload length
1470 * must not contain a jumbo payload option.
1471 */
1472 ip6 = mtod(m, struct ip6_hdr *);
1473 if (ip6->ip6_plen) {
1474 ip6stat.ip6s_badoptions++;
1475 icmp6_error(m, ICMP6_PARAM_PROB,
1476 ICMP6_PARAMPROB_HEADER,
1477 erroff + opt - opthead);
1478 return (-1);
1479 }
1480
1481 /*
1482 * We may see jumbolen in unaligned location, so
1483 * we'd need to perform bcopy().
1484 */
1485 bcopy(opt + 2, &jumboplen, sizeof (jumboplen));
1486 jumboplen = (u_int32_t)htonl(jumboplen);
1487
1488#if 1
1489 /*
1490 * if there are multiple jumbo payload options,
1491 * *plenp will be non-zero and the packet will be
1492 * rejected.
1493 * the behavior may need some debate in ipngwg -
1494 * multiple options does not make sense, however,
1495 * there's no explicit mention in specification.
1496 */
1497 if (*plenp != 0) {
1498 ip6stat.ip6s_badoptions++;
1499 icmp6_error(m, ICMP6_PARAM_PROB,
1500 ICMP6_PARAMPROB_HEADER,
1501 erroff + opt + 2 - opthead);
1502 return (-1);
1503 }
1504#endif
1505
1506 /*
1507 * jumbo payload length must be larger than 65535.
1508 */
1509 if (jumboplen <= IPV6_MAXPACKET) {
1510 ip6stat.ip6s_badoptions++;
1511 icmp6_error(m, ICMP6_PARAM_PROB,
1512 ICMP6_PARAMPROB_HEADER,
1513 erroff + opt + 2 - opthead);
1514 return (-1);
1515 }
1516 *plenp = jumboplen;
1517
1518 break;
1519 default: /* unknown option */
1520 if (hbhlen < IP6OPT_MINLEN) {
1521 ip6stat.ip6s_toosmall++;
1522 goto bad;
1523 }
1524 optlen = ip6_unknown_opt(opt, m,
1525 erroff + opt - opthead);
1526 if (optlen == -1) {
1527 return (-1);
1528 }
1529 optlen += 2;
1530 break;
1531 }
1532 }
1533
1534 return (0);
1535
1536bad:
1537 m_freem(m);
1538 return (-1);
1539}
1540
1541/*
1542 * Unknown option processing.
1543 * The third argument `off' is the offset from the IPv6 header to the option,
1544 * which is necessary if the IPv6 header the and option header and IPv6 header
1545 * is not continuous in order to return an ICMPv6 error.
1546 */
1547int
1548ip6_unknown_opt(uint8_t *optp, struct mbuf *m, int off)
1549{
1550 struct ip6_hdr *ip6;
1551
1552 switch (IP6OPT_TYPE(*optp)) {
1553 case IP6OPT_TYPE_SKIP: /* ignore the option */
1554 return ((int)*(optp + 1));
1555
1556 case IP6OPT_TYPE_DISCARD: /* silently discard */
1557 m_freem(m);
1558 return (-1);
1559
1560 case IP6OPT_TYPE_FORCEICMP: /* send ICMP even if multicasted */
1561 ip6stat.ip6s_badoptions++;
1562 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_OPTION, off);
1563 return (-1);
1564
1565 case IP6OPT_TYPE_ICMP: /* send ICMP if not multicasted */
1566 ip6stat.ip6s_badoptions++;
1567 ip6 = mtod(m, struct ip6_hdr *);
1568 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1569 (m->m_flags & (M_BCAST|M_MCAST))) {
1570 m_freem(m);
1571 } else {
1572 icmp6_error(m, ICMP6_PARAM_PROB,
1573 ICMP6_PARAMPROB_OPTION, off);
1574 }
1575 return (-1);
1576 }
1577
1578 m_freem(m); /* XXX: NOTREACHED */
1579 return (-1);
1580}
1581
1582/*
1583 * Create the "control" list for this pcb.
1584 * These functions will not modify mbuf chain at all.
1585 *
1586 * With KAME mbuf chain restriction:
1587 * The routine will be called from upper layer handlers like tcp6_input().
1588 * Thus the routine assumes that the caller (tcp6_input) have already
1589 * called IP6_EXTHDR_CHECK() and all the extension headers are located in the
1590 * very first mbuf on the mbuf chain.
1591 *
1592 * ip6_savecontrol_v4 will handle those options that are possible to be
1593 * set on a v4-mapped socket.
1594 * ip6_savecontrol will directly call ip6_savecontrol_v4 to handle those
1595 * options and handle the v6-only ones itself.
1596 */
1597struct mbuf **
1598ip6_savecontrol_v4(struct inpcb *inp, struct mbuf *m, struct mbuf **mp,
1599 int *v4only)
1600{
1601 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
1602
1603 if ((inp->inp_socket->so_options & SO_TIMESTAMP) != 0) {
1604 struct timeval tv;
1605
1606 getmicrotime(&tv);
1607 mp = sbcreatecontrol_mbuf((caddr_t)&tv, sizeof (tv),
1608 SCM_TIMESTAMP, SOL_SOCKET, mp);
1609 if (*mp == NULL)
1610 return (NULL);
1611 }
1612 if ((inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) {
1613 uint64_t time;
1614
1615 time = mach_absolute_time();
1616 mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof (time),
1617 SCM_TIMESTAMP_MONOTONIC, SOL_SOCKET, mp);
1618 if (*mp == NULL)
1619 return (NULL);
1620 }
1621 if ((inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
1622 uint64_t time;
1623
1624 time = mach_continuous_time();
1625 mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof (time),
1626 SCM_TIMESTAMP_CONTINUOUS, SOL_SOCKET, mp);
1627 if (*mp == NULL)
1628 return (NULL);
1629 }
1630 if ((inp->inp_socket->so_flags & SOF_RECV_TRAFFIC_CLASS) != 0) {
1631 int tc = m_get_traffic_class(m);
1632
1633 mp = sbcreatecontrol_mbuf((caddr_t)&tc, sizeof (tc),
1634 SO_TRAFFIC_CLASS, SOL_SOCKET, mp);
1635 if (*mp == NULL)
1636 return (NULL);
1637 }
1638
1639#define IS2292(inp, x, y) (((inp)->inp_flags & IN6P_RFC2292) ? (x) : (y))
1640 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
1641 if (v4only != NULL) {
1642 *v4only = 1;
1643 }
1644
1645 // Send ECN flags for v4-mapped addresses
1646 if ((inp->inp_flags & IN6P_TCLASS) != 0) {
1647 struct ip *ip_header = mtod(m, struct ip *);
1648 u_int8_t tos = (ip_header->ip_tos & IPTOS_ECN_MASK);
1649
1650 mp = sbcreatecontrol_mbuf((caddr_t)&tos, sizeof(tos),
1651 IPV6_TCLASS, IPPROTO_IPV6, mp);
1652 if (*mp == NULL)
1653 return (NULL);
1654 }
1655
1656 // Send IN6P_PKTINFO for v4-mapped address
1657 if ((inp->inp_flags & IN6P_PKTINFO) != 0) {
1658 struct in6_pktinfo pi6 = {
1659 .ipi6_addr = IN6ADDR_V4MAPPED_INIT,
1660 .ipi6_ifindex = (m && m->m_pkthdr.rcvif) ? m->m_pkthdr.rcvif->if_index : 0,
1661 };
1662
1663 struct ip *ip_header = mtod(m, struct ip *);
1664 bcopy(&ip_header->ip_dst, &pi6.ipi6_addr.s6_addr32[3], sizeof(struct in_addr));
1665
1666 mp = sbcreatecontrol_mbuf((caddr_t)&pi6,
1667 sizeof (struct in6_pktinfo),
1668 IS2292(inp, IPV6_2292PKTINFO, IPV6_PKTINFO),
1669 IPPROTO_IPV6, mp);
1670 if (*mp == NULL)
1671 return (NULL);
1672 }
1673 return (mp);
1674 }
1675
1676 /* RFC 2292 sec. 5 */
1677 if ((inp->inp_flags & IN6P_PKTINFO) != 0) {
1678 struct in6_pktinfo pi6;
1679
1680 bcopy(&ip6->ip6_dst, &pi6.ipi6_addr, sizeof (struct in6_addr));
1681 in6_clearscope(&pi6.ipi6_addr); /* XXX */
1682 pi6.ipi6_ifindex =
1683 (m && m->m_pkthdr.rcvif) ? m->m_pkthdr.rcvif->if_index : 0;
1684
1685 mp = sbcreatecontrol_mbuf((caddr_t)&pi6,
1686 sizeof (struct in6_pktinfo),
1687 IS2292(inp, IPV6_2292PKTINFO, IPV6_PKTINFO),
1688 IPPROTO_IPV6, mp);
1689 if (*mp == NULL)
1690 return (NULL);
1691 }
1692
1693 if ((inp->inp_flags & IN6P_HOPLIMIT) != 0) {
1694 int hlim = ip6->ip6_hlim & 0xff;
1695
1696 mp = sbcreatecontrol_mbuf((caddr_t)&hlim, sizeof (int),
1697 IS2292(inp, IPV6_2292HOPLIMIT, IPV6_HOPLIMIT),
1698 IPPROTO_IPV6, mp);
1699 if (*mp == NULL)
1700 return (NULL);
1701 }
1702
1703 if (v4only != NULL)
1704 *v4only = 0;
1705 return (mp);
1706}
1707
1708int
1709ip6_savecontrol(struct inpcb *in6p, struct mbuf *m, struct mbuf **mp)
1710{
1711 struct mbuf **np;
1712 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
1713 int v4only = 0;
1714
1715 *mp = NULL;
1716 np = ip6_savecontrol_v4(in6p, m, mp, &v4only);
1717 if (np == NULL)
1718 goto no_mbufs;
1719
1720 mp = np;
1721 if (v4only)
1722 return (0);
1723
1724 if ((in6p->inp_flags & IN6P_TCLASS) != 0) {
1725 u_int32_t flowinfo;
1726 int tclass;
1727
1728 flowinfo = (u_int32_t)ntohl(ip6->ip6_flow & IPV6_FLOWINFO_MASK);
1729 flowinfo >>= 20;
1730
1731 tclass = flowinfo & 0xff;
1732 mp = sbcreatecontrol_mbuf((caddr_t)&tclass, sizeof (tclass),
1733 IPV6_TCLASS, IPPROTO_IPV6, mp);
1734 if (*mp == NULL)
1735 goto no_mbufs;
1736 }
1737
1738 /*
1739 * IPV6_HOPOPTS socket option. Recall that we required super-user
1740 * privilege for the option (see ip6_ctloutput), but it might be too
1741 * strict, since there might be some hop-by-hop options which can be
1742 * returned to normal user.
1743 * See also RFC 2292 section 6 (or RFC 3542 section 8).
1744 */
1745 if ((in6p->inp_flags & IN6P_HOPOPTS) != 0) {
1746 /*
1747 * Check if a hop-by-hop options header is contatined in the
1748 * received packet, and if so, store the options as ancillary
1749 * data. Note that a hop-by-hop options header must be
1750 * just after the IPv6 header, which is assured through the
1751 * IPv6 input processing.
1752 */
1753 ip6 = mtod(m, struct ip6_hdr *);
1754 if (ip6->ip6_nxt == IPPROTO_HOPOPTS) {
1755 struct ip6_hbh *hbh;
1756 int hbhlen = 0;
1757 hbh = (struct ip6_hbh *)(ip6 + 1);
1758 hbhlen = (hbh->ip6h_len + 1) << 3;
1759
1760 /*
1761 * XXX: We copy the whole header even if a
1762 * jumbo payload option is included, the option which
1763 * is to be removed before returning according to
1764 * RFC2292.
1765 * Note: this constraint is removed in RFC3542
1766 */
1767 mp = sbcreatecontrol_mbuf((caddr_t)hbh, hbhlen,
1768 IS2292(in6p, IPV6_2292HOPOPTS, IPV6_HOPOPTS),
1769 IPPROTO_IPV6, mp);
1770
1771 if (*mp == NULL) {
1772 goto no_mbufs;
1773 }
1774 }
1775 }
1776
1777 if ((in6p->inp_flags & (IN6P_RTHDR | IN6P_DSTOPTS)) != 0) {
1778 int nxt = ip6->ip6_nxt, off = sizeof (struct ip6_hdr);
1779
1780 /*
1781 * Search for destination options headers or routing
1782 * header(s) through the header chain, and stores each
1783 * header as ancillary data.
1784 * Note that the order of the headers remains in
1785 * the chain of ancillary data.
1786 */
1787 while (1) { /* is explicit loop prevention necessary? */
1788 struct ip6_ext *ip6e = NULL;
1789 int elen;
1790
1791 /*
1792 * if it is not an extension header, don't try to
1793 * pull it from the chain.
1794 */
1795 switch (nxt) {
1796 case IPPROTO_DSTOPTS:
1797 case IPPROTO_ROUTING:
1798 case IPPROTO_HOPOPTS:
1799 case IPPROTO_AH: /* is it possible? */
1800 break;
1801 default:
1802 goto loopend;
1803 }
1804
1805 if (off + sizeof (*ip6e) > m->m_len)
1806 goto loopend;
1807 ip6e = (struct ip6_ext *)(mtod(m, caddr_t) + off);
1808 if (nxt == IPPROTO_AH)
1809 elen = (ip6e->ip6e_len + 2) << 2;
1810 else
1811 elen = (ip6e->ip6e_len + 1) << 3;
1812 if (off + elen > m->m_len)
1813 goto loopend;
1814
1815 switch (nxt) {
1816 case IPPROTO_DSTOPTS:
1817 if (!(in6p->inp_flags & IN6P_DSTOPTS))
1818 break;
1819
1820 mp = sbcreatecontrol_mbuf((caddr_t)ip6e, elen,
1821 IS2292(in6p, IPV6_2292DSTOPTS,
1822 IPV6_DSTOPTS), IPPROTO_IPV6, mp);
1823 if (*mp == NULL) {
1824 goto no_mbufs;
1825 }
1826 break;
1827 case IPPROTO_ROUTING:
1828 if (!(in6p->inp_flags & IN6P_RTHDR))
1829 break;
1830
1831 mp = sbcreatecontrol_mbuf((caddr_t)ip6e, elen,
1832 IS2292(in6p, IPV6_2292RTHDR, IPV6_RTHDR),
1833 IPPROTO_IPV6, mp);
1834 if (*mp == NULL) {
1835 goto no_mbufs;
1836 }
1837 break;
1838 case IPPROTO_HOPOPTS:
1839 case IPPROTO_AH: /* is it possible? */
1840 break;
1841
1842 default:
1843 /*
1844 * other cases have been filtered in the above.
1845 * none will visit this case. here we supply
1846 * the code just in case (nxt overwritten or
1847 * other cases).
1848 */
1849 goto loopend;
1850
1851 }
1852
1853 /* proceed with the next header. */
1854 off += elen;
1855 nxt = ip6e->ip6e_nxt;
1856 ip6e = NULL;
1857 }
1858loopend:
1859 ;
1860 }
1861 return (0);
1862no_mbufs:
1863 ip6stat.ip6s_pktdropcntrl++;
1864 /* XXX increment a stat to show the failure */
1865 return (ENOBUFS);
1866}
1867#undef IS2292
1868
1869void
1870ip6_notify_pmtu(struct inpcb *in6p, struct sockaddr_in6 *dst, u_int32_t *mtu)
1871{
1872 struct socket *so;
1873 struct mbuf *m_mtu;
1874 struct ip6_mtuinfo mtuctl;
1875
1876 so = in6p->inp_socket;
1877
1878 if ((in6p->inp_flags & IN6P_MTU) == 0)
1879 return;
1880
1881 if (mtu == NULL)
1882 return;
1883
1884#ifdef DIAGNOSTIC
1885 if (so == NULL) { /* I believe this is impossible */
1886 panic("ip6_notify_pmtu: socket is NULL");
1887 /* NOTREACHED */
1888 }
1889#endif
1890
1891 if (IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr) &&
1892 (so->so_proto == NULL || so->so_proto->pr_protocol == IPPROTO_TCP))
1893 return;
1894
1895 if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr) &&
1896 !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &dst->sin6_addr))
1897 return;
1898
1899 bzero(&mtuctl, sizeof (mtuctl)); /* zero-clear for safety */
1900 mtuctl.ip6m_mtu = *mtu;
1901 mtuctl.ip6m_addr = *dst;
1902 if (sa6_recoverscope(&mtuctl.ip6m_addr, TRUE))
1903 return;
1904
1905 if ((m_mtu = sbcreatecontrol((caddr_t)&mtuctl, sizeof (mtuctl),
1906 IPV6_PATHMTU, IPPROTO_IPV6)) == NULL)
1907 return;
1908
1909 if (sbappendaddr(&so->so_rcv, SA(dst), NULL, m_mtu, NULL) == 0) {
1910 m_freem(m_mtu);
1911 /* XXX: should count statistics */
1912 } else {
1913 sorwakeup(so);
1914 }
1915}
1916
1917/*
1918 * Get pointer to the previous header followed by the header
1919 * currently processed.
1920 * XXX: This function supposes that
1921 * M includes all headers,
1922 * the next header field and the header length field of each header
1923 * are valid, and
1924 * the sum of each header length equals to OFF.
1925 * Because of these assumptions, this function must be called very
1926 * carefully. Moreover, it will not be used in the near future when
1927 * we develop `neater' mechanism to process extension headers.
1928 */
1929char *
1930ip6_get_prevhdr(struct mbuf *m, int off)
1931{
1932 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
1933
1934 if (off == sizeof (struct ip6_hdr)) {
1935 return ((char *)&ip6->ip6_nxt);
1936 } else {
1937 int len, nxt;
1938 struct ip6_ext *ip6e = NULL;
1939
1940 nxt = ip6->ip6_nxt;
1941 len = sizeof (struct ip6_hdr);
1942 while (len < off) {
1943 ip6e = (struct ip6_ext *)(mtod(m, caddr_t) + len);
1944
1945 switch (nxt) {
1946 case IPPROTO_FRAGMENT:
1947 len += sizeof (struct ip6_frag);
1948 break;
1949 case IPPROTO_AH:
1950 len += (ip6e->ip6e_len + 2) << 2;
1951 break;
1952 default:
1953 len += (ip6e->ip6e_len + 1) << 3;
1954 break;
1955 }
1956 nxt = ip6e->ip6e_nxt;
1957 }
1958 if (ip6e)
1959 return ((char *)&ip6e->ip6e_nxt);
1960 else
1961 return (NULL);
1962 }
1963}
1964
1965/*
1966 * get next header offset. m will be retained.
1967 */
1968int
1969ip6_nexthdr(struct mbuf *m, int off, int proto, int *nxtp)
1970{
1971 struct ip6_hdr ip6;
1972 struct ip6_ext ip6e;
1973 struct ip6_frag fh;
1974
1975 /* just in case */
1976 VERIFY(m != NULL);
1977 if ((m->m_flags & M_PKTHDR) == 0 || m->m_pkthdr.len < off)
1978 return (-1);
1979
1980 switch (proto) {
1981 case IPPROTO_IPV6:
1982 if (m->m_pkthdr.len < off + sizeof (ip6))
1983 return (-1);
1984 m_copydata(m, off, sizeof (ip6), (caddr_t)&ip6);
1985 if (nxtp)
1986 *nxtp = ip6.ip6_nxt;
1987 off += sizeof (ip6);
1988 return (off);
1989
1990 case IPPROTO_FRAGMENT:
1991 /*
1992 * terminate parsing if it is not the first fragment,
1993 * it does not make sense to parse through it.
1994 */
1995 if (m->m_pkthdr.len < off + sizeof (fh))
1996 return (-1);
1997 m_copydata(m, off, sizeof (fh), (caddr_t)&fh);
1998 /* IP6F_OFF_MASK = 0xfff8(BigEndian), 0xf8ff(LittleEndian) */
1999 if (fh.ip6f_offlg & IP6F_OFF_MASK)
2000 return (-1);
2001 if (nxtp)
2002 *nxtp = fh.ip6f_nxt;
2003 off += sizeof (struct ip6_frag);
2004 return (off);
2005
2006 case IPPROTO_AH:
2007 if (m->m_pkthdr.len < off + sizeof (ip6e))
2008 return (-1);
2009 m_copydata(m, off, sizeof (ip6e), (caddr_t)&ip6e);
2010 if (nxtp)
2011 *nxtp = ip6e.ip6e_nxt;
2012 off += (ip6e.ip6e_len + 2) << 2;
2013 return (off);
2014
2015 case IPPROTO_HOPOPTS:
2016 case IPPROTO_ROUTING:
2017 case IPPROTO_DSTOPTS:
2018 if (m->m_pkthdr.len < off + sizeof (ip6e))
2019 return (-1);
2020 m_copydata(m, off, sizeof (ip6e), (caddr_t)&ip6e);
2021 if (nxtp)
2022 *nxtp = ip6e.ip6e_nxt;
2023 off += (ip6e.ip6e_len + 1) << 3;
2024 return (off);
2025
2026 case IPPROTO_NONE:
2027 case IPPROTO_ESP:
2028 case IPPROTO_IPCOMP:
2029 /* give up */
2030 return (-1);
2031
2032 default:
2033 return (-1);
2034 }
2035}
2036
2037/*
2038 * get offset for the last header in the chain. m will be kept untainted.
2039 */
2040int
2041ip6_lasthdr(struct mbuf *m, int off, int proto, int *nxtp)
2042{
2043 int newoff;
2044 int nxt;
2045
2046 if (!nxtp) {
2047 nxt = -1;
2048 nxtp = &nxt;
2049 }
2050 while (1) {
2051 newoff = ip6_nexthdr(m, off, proto, nxtp);
2052 if (newoff < 0)
2053 return (off);
2054 else if (newoff < off)
2055 return (-1); /* invalid */
2056 else if (newoff == off)
2057 return (newoff);
2058
2059 off = newoff;
2060 proto = *nxtp;
2061 }
2062}
2063
2064struct ip6aux *
2065ip6_addaux(struct mbuf *m)
2066{
2067 struct m_tag *tag;
2068
2069 /* Check if one is already allocated */
2070 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
2071 KERNEL_TAG_TYPE_INET6, NULL);
2072 if (tag == NULL) {
2073 /* Allocate a tag */
2074 tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_INET6,
2075 sizeof (struct ip6aux), M_DONTWAIT, m);
2076
2077 /* Attach it to the mbuf */
2078 if (tag) {
2079 m_tag_prepend(m, tag);
2080 }
2081 }
2082
2083 return (tag ? (struct ip6aux *)(tag + 1) : NULL);
2084}
2085
2086struct ip6aux *
2087ip6_findaux(struct mbuf *m)
2088{
2089 struct m_tag *tag;
2090
2091 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
2092 KERNEL_TAG_TYPE_INET6, NULL);
2093
2094 return (tag ? (struct ip6aux *)(tag + 1) : NULL);
2095}
2096
2097void
2098ip6_delaux(struct mbuf *m)
2099{
2100 struct m_tag *tag;
2101
2102 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
2103 KERNEL_TAG_TYPE_INET6, NULL);
2104 if (tag) {
2105 m_tag_delete(m, tag);
2106 }
2107}
2108
2109/*
2110 * Drain callback
2111 */
2112void
2113ip6_drain(void)
2114{
2115 frag6_drain(); /* fragments */
2116 in6_rtqdrain(); /* protocol cloned routes */
2117 nd6_drain(NULL); /* cloned routes: ND6 */
2118}
2119
2120/*
2121 * System control for IP6
2122 */
2123
2124u_char inet6ctlerrmap[PRC_NCMDS] = {
2125 0, 0, 0, 0,
2126 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
2127 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
2128 EMSGSIZE, EHOSTUNREACH, 0, 0,
2129 0, 0, 0, 0,
2130 ENOPROTOOPT
2131};
2132
2133static int
2134sysctl_reset_ip6_input_stats SYSCTL_HANDLER_ARGS
2135{
2136#pragma unused(arg1, arg2)
2137 int error, i;
2138
2139 i = ip6_input_measure;
2140 error = sysctl_handle_int(oidp, &i, 0, req);
2141 if (error || req->newptr == USER_ADDR_NULL)
2142 goto done;
2143 /* impose bounds */
2144 if (i < 0 || i > 1) {
2145 error = EINVAL;
2146 goto done;
2147 }
2148 if (ip6_input_measure != i && i == 1) {
2149 net_perf_initialize(&net_perf, ip6_input_measure_bins);
2150 }
2151 ip6_input_measure = i;
2152done:
2153 return (error);
2154}
2155
2156static int
2157sysctl_ip6_input_measure_bins SYSCTL_HANDLER_ARGS
2158{
2159#pragma unused(arg1, arg2)
2160 int error;
2161 uint64_t i;
2162
2163 i = ip6_input_measure_bins;
2164 error = sysctl_handle_quad(oidp, &i, 0, req);
2165 if (error || req->newptr == USER_ADDR_NULL)
2166 goto done;
2167 /* validate data */
2168 if (!net_perf_validate_bins(i)) {
2169 error = EINVAL;
2170 goto done;
2171 }
2172 ip6_input_measure_bins = i;
2173done:
2174 return (error);
2175}
2176
2177static int
2178sysctl_ip6_input_getperf SYSCTL_HANDLER_ARGS
2179{
2180#pragma unused(oidp, arg1, arg2)
2181 if (req->oldptr == USER_ADDR_NULL)
2182 req->oldlen = (size_t)sizeof (struct ipstat);
2183
2184 return (SYSCTL_OUT(req, &net_perf, MIN(sizeof (net_perf), req->oldlen)));
2185}
2186