1/*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30/* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
31
32/*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61/*
62 * RFC1827/2406 Encapsulated Security Payload.
63 */
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/malloc.h>
68#include <sys/mbuf.h>
69#include <sys/mcache.h>
70#include <sys/domain.h>
71#include <sys/protosw.h>
72#include <sys/socket.h>
73#include <sys/errno.h>
74#include <sys/time.h>
75#include <sys/kernel.h>
76#include <sys/syslog.h>
77
78#include <net/if.h>
79#include <net/if_ipsec.h>
80#include <net/route.h>
81#include <kern/cpu_number.h>
82#include <kern/locks.h>
83
84#include <netinet/in.h>
85#include <netinet/in_systm.h>
86#include <netinet/ip.h>
87#include <netinet/ip_var.h>
88#include <netinet/in_var.h>
89#include <netinet/ip_ecn.h>
90#include <netinet/in_pcb.h>
91#include <netinet/udp.h>
92#if INET6
93#include <netinet6/ip6_ecn.h>
94#endif
95
96#if INET6
97#include <netinet/ip6.h>
98#include <netinet6/in6_pcb.h>
99#include <netinet6/ip6_var.h>
100#include <netinet/icmp6.h>
101#include <netinet6/ip6protosw.h>
102#endif
103
104#include <netinet6/ipsec.h>
105#if INET6
106#include <netinet6/ipsec6.h>
107#endif
108#include <netinet6/ah.h>
109#if INET6
110#include <netinet6/ah6.h>
111#endif
112#include <netinet6/esp.h>
113#if INET6
114#include <netinet6/esp6.h>
115#endif
116#include <netkey/key.h>
117#include <netkey/keydb.h>
118#include <netkey/key_debug.h>
119
120#include <net/kpi_protocol.h>
121#include <netinet/kpi_ipfilter_var.h>
122
123#include <net/net_osdep.h>
124#include <mach/sdt.h>
125#include <corecrypto/cc.h>
126
127#include <sys/kdebug.h>
128#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
129#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
130#define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
131#define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
132#define IPLEN_FLIPPED
133
134extern lck_mtx_t *sadb_mutex;
135
136#if INET
137#define ESPMAXLEN \
138 (sizeof(struct esp) < sizeof(struct newesp) \
139 ? sizeof(struct newesp) : sizeof(struct esp))
140
141static struct ip *
142esp4_input_strip_udp_encap (struct mbuf *m, int iphlen)
143{
144 // strip the udp header that's encapsulating ESP
145 struct ip *ip;
146 size_t stripsiz = sizeof(struct udphdr);
147
148 ip = mtod(m, __typeof__(ip));
149 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
150 m->m_data += stripsiz;
151 m->m_len -= stripsiz;
152 m->m_pkthdr.len -= stripsiz;
153 ip = mtod(m, __typeof__(ip));
154 ip->ip_len = ip->ip_len - stripsiz;
155 ip->ip_p = IPPROTO_ESP;
156 return ip;
157}
158
159static struct ip6_hdr *
160esp6_input_strip_udp_encap (struct mbuf *m, int ip6hlen)
161{
162 // strip the udp header that's encapsulating ESP
163 struct ip6_hdr *ip6;
164 size_t stripsiz = sizeof(struct udphdr);
165
166 ip6 = mtod(m, __typeof__(ip6));
167 ovbcopy((caddr_t)ip6, (caddr_t)(((u_char *)ip6) + stripsiz), ip6hlen);
168 m->m_data += stripsiz;
169 m->m_len -= stripsiz;
170 m->m_pkthdr.len -= stripsiz;
171 ip6 = mtod(m, __typeof__(ip6));
172 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
173 ip6->ip6_nxt = IPPROTO_ESP;
174 return ip6;
175}
176
177void
178esp4_input(struct mbuf *m, int off)
179{
180 (void)esp4_input_extended(m, off, NULL);
181}
182
183struct mbuf *
184esp4_input_extended(struct mbuf *m, int off, ifnet_t interface)
185{
186 struct ip *ip;
187#if INET6
188 struct ip6_hdr *ip6;
189#endif /* INET6 */
190 struct esp *esp;
191 struct esptail esptail;
192 u_int32_t spi;
193 u_int32_t seq;
194 struct secasvar *sav = NULL;
195 size_t taillen;
196 u_int16_t nxt;
197 const struct esp_algorithm *algo;
198 int ivlen;
199 size_t hlen;
200 size_t esplen;
201 sa_family_t ifamily;
202 struct mbuf *out_m = NULL;
203
204 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0,0,0,0,0);
205 /* sanity check for alignment. */
206 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
207 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
208 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
209 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
210 goto bad;
211 }
212
213 if (m->m_len < off + ESPMAXLEN) {
214 m = m_pullup(m, off + ESPMAXLEN);
215 if (!m) {
216 ipseclog((LOG_DEBUG,
217 "IPv4 ESP input: can't pullup in esp4_input\n"));
218 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
219 goto bad;
220 }
221 }
222
223 m->m_pkthdr.csum_flags &= ~CSUM_RX_FLAGS;
224
225 /* Expect 32-bit aligned data pointer on strict-align platforms */
226 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
227
228 ip = mtod(m, struct ip *);
229 // expect udp-encap and esp packets only
230 if (ip->ip_p != IPPROTO_ESP &&
231 !(ip->ip_p == IPPROTO_UDP && off >= sizeof(struct udphdr))) {
232 ipseclog((LOG_DEBUG,
233 "IPv4 ESP input: invalid protocol type\n"));
234 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
235 goto bad;
236 }
237 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
238#ifdef _IP_VHL
239 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
240#else
241 hlen = ip->ip_hl << 2;
242#endif
243
244 /* find the sassoc. */
245 spi = esp->esp_spi;
246
247 if ((sav = key_allocsa_extended(AF_INET,
248 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
249 IPPROTO_ESP, spi, interface)) == 0) {
250 ipseclog((LOG_WARNING,
251 "IPv4 ESP input: no key association found for spi %u\n",
252 (u_int32_t)ntohl(spi)));
253 IPSEC_STAT_INCREMENT(ipsecstat.in_nosa);
254 goto bad;
255 }
256 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
257 printf("DP esp4_input called to allocate SA:0x%llx\n",
258 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
259 if (sav->state != SADB_SASTATE_MATURE
260 && sav->state != SADB_SASTATE_DYING) {
261 ipseclog((LOG_DEBUG,
262 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
263 (u_int32_t)ntohl(spi)));
264 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
265 goto bad;
266 }
267 algo = esp_algorithm_lookup(sav->alg_enc);
268 if (!algo) {
269 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
270 "unsupported encryption algorithm for spi %u\n",
271 (u_int32_t)ntohl(spi)));
272 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
273 goto bad;
274 }
275
276 /* check if we have proper ivlen information */
277 ivlen = sav->ivlen;
278 if (ivlen < 0) {
279 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
280 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
281 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
282 goto bad;
283 }
284
285 seq = ntohl(((struct newesp *)esp)->esp_seq);
286
287 /* Save ICV from packet for verification later */
288 size_t siz = 0;
289 unsigned char saved_icv[AH_MAXSUMSIZE];
290 if (algo->finalizedecrypt) {
291 siz = algo->icvlen;
292 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv);
293 goto delay_icv;
294 }
295
296 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
297 && (sav->alg_auth && sav->key_auth)))
298 goto noreplaycheck;
299
300 if (sav->alg_auth == SADB_X_AALG_NULL ||
301 sav->alg_auth == SADB_AALG_NONE)
302 goto noreplaycheck;
303
304 /*
305 * check for sequence number.
306 */
307 if (ipsec_chkreplay(seq, sav))
308 ; /*okey*/
309 else {
310 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
311 ipseclog((LOG_WARNING,
312 "replay packet in IPv4 ESP input: %s %s\n",
313 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
314 goto bad;
315 }
316
317 /* check ICV */
318 {
319 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
320 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
321 const struct ah_algorithm *sumalgo;
322
323 sumalgo = ah_algorithm_lookup(sav->alg_auth);
324 if (!sumalgo)
325 goto noreplaycheck;
326 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
327 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
328 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
329 goto bad;
330 }
331 if (AH_MAXSUMSIZE < siz) {
332 ipseclog((LOG_DEBUG,
333 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
334 (u_int32_t)siz));
335 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
336 goto bad;
337 }
338
339 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
340
341 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
342 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
343 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
344 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
345 goto bad;
346 }
347
348 if (cc_cmp_safe(siz, sum0, sum)) {
349 ipseclog((LOG_WARNING, "cc_cmp fail in IPv4 ESP input: %s %s\n",
350 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
351 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
352 goto bad;
353 }
354
355delay_icv:
356
357 /* strip off the authentication data */
358 m_adj(m, -siz);
359 ip = mtod(m, struct ip *);
360#ifdef IPLEN_FLIPPED
361 ip->ip_len = ip->ip_len - siz;
362#else
363 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
364#endif
365 m->m_flags |= M_AUTHIPDGM;
366 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc);
367 }
368
369 /*
370 * update sequence number.
371 */
372 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
373 if (ipsec_updatereplay(seq, sav)) {
374 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
375 goto bad;
376 }
377 }
378
379noreplaycheck:
380
381 /* process main esp header. */
382 if (sav->flags & SADB_X_EXT_OLD) {
383 /* RFC 1827 */
384 esplen = sizeof(struct esp);
385 } else {
386 /* RFC 2406 */
387 if (sav->flags & SADB_X_EXT_DERIV)
388 esplen = sizeof(struct esp);
389 else
390 esplen = sizeof(struct newesp);
391 }
392
393 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
394 ipseclog((LOG_WARNING,
395 "IPv4 ESP input: packet too short\n"));
396 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
397 goto bad;
398 }
399
400 if (m->m_len < off + esplen + ivlen) {
401 m = m_pullup(m, off + esplen + ivlen);
402 if (!m) {
403 ipseclog((LOG_DEBUG,
404 "IPv4 ESP input: can't pullup in esp4_input\n"));
405 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
406 goto bad;
407 }
408 }
409
410 /*
411 * pre-compute and cache intermediate key
412 */
413 if (esp_schedule(algo, sav) != 0) {
414 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
415 goto bad;
416 }
417
418 /*
419 * decrypt the packet.
420 */
421 if (!algo->decrypt)
422 panic("internal error: no decrypt function");
423 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0,0,0,0,0);
424 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
425 /* m is already freed */
426 m = NULL;
427 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
428 ipsec_logsastr(sav)));
429 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
430 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
431 goto bad;
432 }
433 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2,0,0,0,0);
434 IPSEC_STAT_INCREMENT(ipsecstat.in_esphist[sav->alg_enc]);
435
436 m->m_flags |= M_DECRYPTED;
437
438 if (algo->finalizedecrypt)
439 {
440 if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
441 ipseclog((LOG_ERR, "packet decryption ICV failure\n"));
442 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
443 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
444 goto bad;
445 }
446 }
447
448 /*
449 * find the trailer of the ESP.
450 */
451 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
452 (caddr_t)&esptail);
453 nxt = esptail.esp_nxt;
454 taillen = esptail.esp_padlen + sizeof(esptail);
455
456 if (m->m_pkthdr.len < taillen
457 || m->m_pkthdr.len - taillen < hlen) { /*?*/
458 ipseclog((LOG_WARNING,
459 "bad pad length in IPv4 ESP input: %s %s\n",
460 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
461 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
462 goto bad;
463 }
464
465 /* strip off the trailing pad area. */
466 m_adj(m, -taillen);
467 ip = mtod(m, struct ip *);
468#ifdef IPLEN_FLIPPED
469 ip->ip_len = ip->ip_len - taillen;
470#else
471 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
472#endif
473 if (ip->ip_p == IPPROTO_UDP) {
474 // offset includes the outer ip and udp header lengths.
475 if (m->m_len < off) {
476 m = m_pullup(m, off);
477 if (!m) {
478 ipseclog((LOG_DEBUG,
479 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
480 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
481 goto bad;
482 }
483 }
484
485 // check the UDP encap header to detect changes in the source port, and then strip the header
486 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
487 // if peer is behind nat and this is the latest esp packet
488 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
489 (sav->flags & SADB_X_EXT_OLD) == 0 &&
490 seq && sav->replay &&
491 seq >= sav->replay->lastseq) {
492 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip + off);
493 if (encap_uh->uh_sport &&
494 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
495 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
496 }
497 }
498 ip = esp4_input_strip_udp_encap(m, off);
499 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
500 }
501
502 /* was it transmitted over the IPsec tunnel SA? */
503 if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
504 ifaddr_t ifa;
505 struct sockaddr_storage addr;
506
507 /*
508 * strip off all the headers that precedes ESP header.
509 * IP4 xx ESP IP4' payload -> IP4' payload
510 *
511 * XXX more sanity checks
512 * XXX relationship with gif?
513 */
514 u_int8_t tos, otos;
515 int sum;
516
517 tos = ip->ip_tos;
518 m_adj(m, off + esplen + ivlen);
519 if (ifamily == AF_INET) {
520 struct sockaddr_in *ipaddr;
521
522 if (m->m_len < sizeof(*ip)) {
523 m = m_pullup(m, sizeof(*ip));
524 if (!m) {
525 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
526 goto bad;
527 }
528 }
529 ip = mtod(m, struct ip *);
530 /* ECN consideration. */
531
532 otos = ip->ip_tos;
533 if (ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos) == 0) {
534 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
535 goto bad;
536 }
537
538 if (otos != ip->ip_tos) {
539 sum = ~ntohs(ip->ip_sum) & 0xffff;
540 sum += (~otos & 0xffff) + ip->ip_tos;
541 sum = (sum >> 16) + (sum & 0xffff);
542 sum += (sum >> 16); /* add carry */
543 ip->ip_sum = htons(~sum & 0xffff);
544 }
545
546 if (!key_checktunnelsanity(sav, AF_INET,
547 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
548 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
549 "in ESP input: %s %s\n",
550 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
551 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
552 goto bad;
553 }
554
555 bzero(&addr, sizeof(addr));
556 ipaddr = (__typeof__(ipaddr))&addr;
557 ipaddr->sin_family = AF_INET;
558 ipaddr->sin_len = sizeof(*ipaddr);
559 ipaddr->sin_addr = ip->ip_dst;
560#if INET6
561 } else if (ifamily == AF_INET6) {
562 struct sockaddr_in6 *ip6addr;
563
564 /*
565 * m_pullup is prohibited in KAME IPv6 input processing
566 * but there's no other way!
567 */
568 if (m->m_len < sizeof(*ip6)) {
569 m = m_pullup(m, sizeof(*ip6));
570 if (!m) {
571 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
572 goto bad;
573 }
574 }
575
576 /*
577 * Expect 32-bit aligned data pointer on strict-align
578 * platforms.
579 */
580 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
581
582 ip6 = mtod(m, struct ip6_hdr *);
583
584 /* ECN consideration. */
585 if (ip64_ecn_egress(ip4_ipsec_ecn, &tos, &ip6->ip6_flow) == 0) {
586 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
587 goto bad;
588 }
589
590 if (!key_checktunnelsanity(sav, AF_INET6,
591 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
592 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
593 "in ESP input: %s %s\n",
594 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
595 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
596 goto bad;
597 }
598
599 bzero(&addr, sizeof(addr));
600 ip6addr = (__typeof__(ip6addr))&addr;
601 ip6addr->sin6_family = AF_INET6;
602 ip6addr->sin6_len = sizeof(*ip6addr);
603 ip6addr->sin6_addr = ip6->ip6_dst;
604#endif /* INET6 */
605 } else {
606 ipseclog((LOG_ERR, "ipsec tunnel unsupported address family "
607 "in ESP input\n"));
608 goto bad;
609 }
610
611 key_sa_recordxfer(sav, m);
612 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
613 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
614 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
615 goto bad;
616 }
617
618 // update the receiving interface address based on the inner address
619 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
620 if (ifa) {
621 m->m_pkthdr.rcvif = ifa->ifa_ifp;
622 IFA_REMREF(ifa);
623 }
624
625 /* Clear the csum flags, they can't be valid for the inner headers */
626 m->m_pkthdr.csum_flags = 0;
627
628 // Input via IPSec interface
629 if (sav->sah->ipsec_if != NULL) {
630 // Return mbuf
631 if (interface != NULL &&
632 interface == sav->sah->ipsec_if) {
633 out_m = m;
634 goto done;
635 }
636
637 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
638 m = NULL;
639 goto done;
640 } else {
641 goto bad;
642 }
643 }
644
645 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0)
646 goto bad;
647
648 nxt = IPPROTO_DONE;
649 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2,0,0,0,0);
650 } else {
651 /*
652 * strip off ESP header and IV.
653 * even in m_pulldown case, we need to strip off ESP so that
654 * we can always compute checksum for AH correctly.
655 */
656 size_t stripsiz;
657
658 stripsiz = esplen + ivlen;
659
660 ip = mtod(m, struct ip *);
661 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
662 m->m_data += stripsiz;
663 m->m_len -= stripsiz;
664 m->m_pkthdr.len -= stripsiz;
665
666 ip = mtod(m, struct ip *);
667#ifdef IPLEN_FLIPPED
668 ip->ip_len = ip->ip_len - stripsiz;
669#else
670 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
671#endif
672 ip->ip_p = nxt;
673
674 key_sa_recordxfer(sav, m);
675 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
676 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
677 goto bad;
678 }
679
680 /*
681 * Set the csum valid flag, if we authenticated the
682 * packet, the payload shouldn't be corrupt unless
683 * it was corrupted before being signed on the other
684 * side.
685 */
686 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
687 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
688 m->m_pkthdr.csum_data = 0xFFFF;
689 _CASSERT(offsetof(struct pkthdr, csum_data) == offsetof(struct pkthdr, csum_rx_val));
690 }
691
692 if (nxt != IPPROTO_DONE) {
693 if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 &&
694 ipsec4_in_reject(m, NULL)) {
695 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
696 goto bad;
697 }
698 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3,0,0,0,0);
699
700 /* translate encapsulated UDP port ? */
701 if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
702 struct udphdr *udp;
703
704 if (nxt != IPPROTO_UDP) { /* not UPD packet - drop it */
705 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
706 goto bad;
707 }
708
709 if (m->m_len < off + sizeof(struct udphdr)) {
710 m = m_pullup(m, off + sizeof(struct udphdr));
711 if (!m) {
712 ipseclog((LOG_DEBUG,
713 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
714 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
715 goto bad;
716 }
717 ip = mtod(m, struct ip *);
718 }
719 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + off);
720
721 lck_mtx_lock(sadb_mutex);
722 if (sav->natt_encapsulated_src_port == 0) {
723 sav->natt_encapsulated_src_port = udp->uh_sport;
724 } else if (sav->natt_encapsulated_src_port != udp->uh_sport) { /* something wrong */
725 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
726 lck_mtx_unlock(sadb_mutex);
727 goto bad;
728 }
729 lck_mtx_unlock(sadb_mutex);
730 udp->uh_sport = htons(sav->remote_ike_port);
731 udp->uh_sum = 0;
732 }
733
734 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
735 struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif,
736 struct ip *, ip, struct ip6_hdr *, NULL);
737
738 // Input via IPsec interface legacy path
739 if (sav->sah->ipsec_if != NULL) {
740 int mlen;
741 if ((mlen = m_length2(m, NULL)) < hlen) {
742 ipseclog((LOG_DEBUG,
743 "IPv4 ESP input: decrypted packet too short %d < %d\n",
744 mlen, hlen));
745 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
746 goto bad;
747 }
748 ip->ip_len = htons(ip->ip_len + hlen);
749 ip->ip_off = htons(ip->ip_off);
750 ip->ip_sum = 0;
751 ip->ip_sum = ip_cksum_hdr_in(m, hlen);
752
753 // Return mbuf
754 if (interface != NULL &&
755 interface == sav->sah->ipsec_if) {
756 out_m = m;
757 goto done;
758 }
759
760 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
761 m = NULL;
762 goto done;
763 } else {
764 goto bad;
765 }
766 }
767
768 ip_proto_dispatch_in(m, off, nxt, 0);
769 } else {
770 m_freem(m);
771 }
772 m = NULL;
773 }
774
775done:
776 if (sav) {
777 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
778 printf("DP esp4_input call free SA:0x%llx\n",
779 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
780 key_freesav(sav, KEY_SADB_UNLOCKED);
781 }
782 IPSEC_STAT_INCREMENT(ipsecstat.in_success);
783 return out_m;
784bad:
785 if (sav) {
786 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
787 printf("DP esp4_input call free SA:0x%llx\n",
788 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
789 key_freesav(sav, KEY_SADB_UNLOCKED);
790 }
791 if (m) {
792 m_freem(m);
793 }
794 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4,0,0,0,0);
795 return out_m;
796}
797#endif /* INET */
798
799#if INET6
800
801int
802esp6_input(struct mbuf **mp, int *offp, int proto)
803{
804 return esp6_input_extended(mp, offp, proto, NULL);
805}
806
807int
808esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface)
809{
810#pragma unused(proto)
811 struct mbuf *m = *mp;
812 int off = *offp;
813 struct ip *ip;
814 struct ip6_hdr *ip6;
815 struct esp *esp;
816 struct esptail esptail;
817 u_int32_t spi;
818 u_int32_t seq;
819 struct secasvar *sav = NULL;
820 size_t taillen;
821 u_int16_t nxt;
822 char *nproto;
823 const struct esp_algorithm *algo;
824 int ivlen;
825 size_t esplen;
826 sa_family_t ifamily;
827
828 /* sanity check for alignment. */
829 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
830 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
831 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
832 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
833 goto bad;
834 }
835
836#ifndef PULLDOWN_TEST
837 IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, {return IPPROTO_DONE;});
838 esp = (struct esp *)(void *)(mtod(m, caddr_t) + off);
839#else
840 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
841 if (esp == NULL) {
842 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
843 return IPPROTO_DONE;
844 }
845#endif
846 m->m_pkthdr.csum_flags &= ~CSUM_RX_FLAGS;
847
848 /* Expect 32-bit data aligned pointer on strict-align platforms */
849 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
850
851 ip6 = mtod(m, struct ip6_hdr *);
852
853 if (ntohs(ip6->ip6_plen) == 0) {
854 ipseclog((LOG_ERR, "IPv6 ESP input: "
855 "ESP with IPv6 jumbogram is not supported.\n"));
856 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
857 goto bad;
858 }
859
860 nproto = ip6_get_prevhdr(m, off);
861 if (nproto == NULL || (*nproto != IPPROTO_ESP &&
862 !(*nproto == IPPROTO_UDP && off >= sizeof(struct udphdr)))) {
863 ipseclog((LOG_DEBUG, "IPv6 ESP input: invalid protocol type\n"));
864 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
865 goto bad;
866 }
867
868 /* find the sassoc. */
869 spi = esp->esp_spi;
870
871 if ((sav = key_allocsa_extended(AF_INET6,
872 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
873 IPPROTO_ESP, spi, interface)) == 0) {
874 ipseclog((LOG_WARNING,
875 "IPv6 ESP input: no key association found for spi %u\n",
876 (u_int32_t)ntohl(spi)));
877 IPSEC_STAT_INCREMENT(ipsec6stat.in_nosa);
878 goto bad;
879 }
880 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
881 printf("DP esp6_input called to allocate SA:0x%llx\n",
882 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
883 if (sav->state != SADB_SASTATE_MATURE
884 && sav->state != SADB_SASTATE_DYING) {
885 ipseclog((LOG_DEBUG,
886 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
887 (u_int32_t)ntohl(spi)));
888 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
889 goto bad;
890 }
891 algo = esp_algorithm_lookup(sav->alg_enc);
892 if (!algo) {
893 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
894 "unsupported encryption algorithm for spi %u\n",
895 (u_int32_t)ntohl(spi)));
896 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
897 goto bad;
898 }
899
900 /* check if we have proper ivlen information */
901 ivlen = sav->ivlen;
902 if (ivlen < 0) {
903 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
904 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
905 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
906 goto bad;
907 }
908
909 seq = ntohl(((struct newesp *)esp)->esp_seq);
910
911 /* Save ICV from packet for verification later */
912 size_t siz = 0;
913 unsigned char saved_icv[AH_MAXSUMSIZE];
914 if (algo->finalizedecrypt) {
915 siz = algo->icvlen;
916 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv);
917 goto delay_icv;
918 }
919
920 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
921 && (sav->alg_auth && sav->key_auth)))
922 goto noreplaycheck;
923
924 if (sav->alg_auth == SADB_X_AALG_NULL ||
925 sav->alg_auth == SADB_AALG_NONE)
926 goto noreplaycheck;
927
928 /*
929 * check for sequence number.
930 */
931 if (ipsec_chkreplay(seq, sav))
932 ; /*okey*/
933 else {
934 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
935 ipseclog((LOG_WARNING,
936 "replay packet in IPv6 ESP input: %s %s\n",
937 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
938 goto bad;
939 }
940
941 /* check ICV */
942 {
943 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
944 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
945 const struct ah_algorithm *sumalgo;
946
947 sumalgo = ah_algorithm_lookup(sav->alg_auth);
948 if (!sumalgo)
949 goto noreplaycheck;
950 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
951 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
952 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
953 goto bad;
954 }
955 if (AH_MAXSUMSIZE < siz) {
956 ipseclog((LOG_DEBUG,
957 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
958 (u_int32_t)siz));
959 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
960 goto bad;
961 }
962
963 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
964
965 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
966 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
967 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
968 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
969 goto bad;
970 }
971
972 if (cc_cmp_safe(siz, sum0, sum)) {
973 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
974 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
975 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
976 goto bad;
977 }
978
979delay_icv:
980
981 /* strip off the authentication data */
982 m_adj(m, -siz);
983 ip6 = mtod(m, struct ip6_hdr *);
984 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz);
985
986 m->m_flags |= M_AUTHIPDGM;
987 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc);
988 }
989
990 /*
991 * update sequence number.
992 */
993 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
994 if (ipsec_updatereplay(seq, sav)) {
995 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
996 goto bad;
997 }
998 }
999
1000noreplaycheck:
1001
1002 /* process main esp header. */
1003 if (sav->flags & SADB_X_EXT_OLD) {
1004 /* RFC 1827 */
1005 esplen = sizeof(struct esp);
1006 } else {
1007 /* RFC 2406 */
1008 if (sav->flags & SADB_X_EXT_DERIV)
1009 esplen = sizeof(struct esp);
1010 else
1011 esplen = sizeof(struct newesp);
1012 }
1013
1014 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
1015 ipseclog((LOG_WARNING,
1016 "IPv6 ESP input: packet too short\n"));
1017 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1018 goto bad;
1019 }
1020
1021#ifndef PULLDOWN_TEST
1022 IP6_EXTHDR_CHECK(m, off, esplen + ivlen, return IPPROTO_DONE); /*XXX*/
1023#else
1024 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
1025 if (esp == NULL) {
1026 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1027 m = NULL;
1028 goto bad;
1029 }
1030#endif
1031 ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/
1032
1033 /*
1034 * pre-compute and cache intermediate key
1035 */
1036 if (esp_schedule(algo, sav) != 0) {
1037 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1038 goto bad;
1039 }
1040
1041 /*
1042 * decrypt the packet.
1043 */
1044 if (!algo->decrypt)
1045 panic("internal error: no decrypt function");
1046 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
1047 /* m is already freed */
1048 m = NULL;
1049 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
1050 ipsec_logsastr(sav)));
1051 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1052 goto bad;
1053 }
1054 IPSEC_STAT_INCREMENT(ipsec6stat.in_esphist[sav->alg_enc]);
1055
1056 m->m_flags |= M_DECRYPTED;
1057
1058 if (algo->finalizedecrypt)
1059 {
1060 if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
1061 ipseclog((LOG_ERR, "packet decryption ICV failure\n"));
1062 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1063 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
1064 goto bad;
1065 }
1066 }
1067
1068 /*
1069 * find the trailer of the ESP.
1070 */
1071 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
1072 (caddr_t)&esptail);
1073 nxt = esptail.esp_nxt;
1074 taillen = esptail.esp_padlen + sizeof(esptail);
1075
1076 if (m->m_pkthdr.len < taillen
1077 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/
1078 ipseclog((LOG_WARNING,
1079 "bad pad length in IPv6 ESP input: %s %s\n",
1080 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1081 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1082 goto bad;
1083 }
1084
1085 /* strip off the trailing pad area. */
1086 m_adj(m, -taillen);
1087 ip6 = mtod(m, struct ip6_hdr *);
1088 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
1089
1090 if (*nproto == IPPROTO_UDP) {
1091 // offset includes the outer ip and udp header lengths.
1092 if (m->m_len < off) {
1093 m = m_pullup(m, off);
1094 if (!m) {
1095 ipseclog((LOG_DEBUG,
1096 "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1097 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1098 goto bad;
1099 }
1100 }
1101
1102 // check the UDP encap header to detect changes in the source port, and then strip the header
1103 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
1104 // if peer is behind nat and this is the latest esp packet
1105 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
1106 (sav->flags & SADB_X_EXT_OLD) == 0 &&
1107 seq && sav->replay &&
1108 seq >= sav->replay->lastseq) {
1109 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip6 + off);
1110 if (encap_uh->uh_sport &&
1111 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
1112 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
1113 }
1114 }
1115 ip6 = esp6_input_strip_udp_encap(m, off);
1116 esp = (struct esp *)(void *)(((u_int8_t *)ip6) + off);
1117 }
1118
1119
1120 /* was it transmitted over the IPsec tunnel SA? */
1121 if (ipsec6_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
1122 ifaddr_t ifa;
1123 struct sockaddr_storage addr;
1124
1125 /*
1126 * strip off all the headers that precedes ESP header.
1127 * IP6 xx ESP IP6' payload -> IP6' payload
1128 *
1129 * XXX more sanity checks
1130 * XXX relationship with gif?
1131 */
1132 u_int32_t flowinfo; /*net endian*/
1133 flowinfo = ip6->ip6_flow;
1134 m_adj(m, off + esplen + ivlen);
1135 if (ifamily == AF_INET6) {
1136 struct sockaddr_in6 *ip6addr;
1137
1138 if (m->m_len < sizeof(*ip6)) {
1139#ifndef PULLDOWN_TEST
1140 /*
1141 * m_pullup is prohibited in KAME IPv6 input processing
1142 * but there's no other way!
1143 */
1144#else
1145 /* okay to pullup in m_pulldown style */
1146#endif
1147 m = m_pullup(m, sizeof(*ip6));
1148 if (!m) {
1149 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1150 goto bad;
1151 }
1152 }
1153 ip6 = mtod(m, struct ip6_hdr *);
1154 /* ECN consideration. */
1155 if (ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow) == 0) {
1156 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1157 goto bad;
1158 }
1159 if (!key_checktunnelsanity(sav, AF_INET6,
1160 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
1161 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1162 "in IPv6 ESP input: %s %s\n",
1163 ipsec6_logpacketstr(ip6, spi),
1164 ipsec_logsastr(sav)));
1165 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1166 goto bad;
1167 }
1168
1169 bzero(&addr, sizeof(addr));
1170 ip6addr = (__typeof__(ip6addr))&addr;
1171 ip6addr->sin6_family = AF_INET6;
1172 ip6addr->sin6_len = sizeof(*ip6addr);
1173 ip6addr->sin6_addr = ip6->ip6_dst;
1174 } else if (ifamily == AF_INET) {
1175 struct sockaddr_in *ipaddr;
1176
1177 if (m->m_len < sizeof(*ip)) {
1178 m = m_pullup(m, sizeof(*ip));
1179 if (!m) {
1180 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1181 goto bad;
1182 }
1183 }
1184
1185 u_int8_t otos;
1186 int sum;
1187
1188 ip = mtod(m, struct ip *);
1189 otos = ip->ip_tos;
1190 /* ECN consideration. */
1191 if (ip46_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip->ip_tos) == 0) {
1192 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1193 goto bad;
1194 }
1195
1196 if (otos != ip->ip_tos) {
1197 sum = ~ntohs(ip->ip_sum) & 0xffff;
1198 sum += (~otos & 0xffff) + ip->ip_tos;
1199 sum = (sum >> 16) + (sum & 0xffff);
1200 sum += (sum >> 16); /* add carry */
1201 ip->ip_sum = htons(~sum & 0xffff);
1202 }
1203
1204 if (!key_checktunnelsanity(sav, AF_INET,
1205 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
1206 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1207 "in ESP input: %s %s\n",
1208 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
1209 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1210 goto bad;
1211 }
1212
1213 bzero(&addr, sizeof(addr));
1214 ipaddr = (__typeof__(ipaddr))&addr;
1215 ipaddr->sin_family = AF_INET;
1216 ipaddr->sin_len = sizeof(*ipaddr);
1217 ipaddr->sin_addr = ip->ip_dst;
1218 }
1219
1220 key_sa_recordxfer(sav, m);
1221 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
1222 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
1223 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1224 goto bad;
1225 }
1226
1227 // update the receiving interface address based on the inner address
1228 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
1229 if (ifa) {
1230 m->m_pkthdr.rcvif = ifa->ifa_ifp;
1231 IFA_REMREF(ifa);
1232 }
1233
1234 // Input via IPSec interface
1235 if (sav->sah->ipsec_if != NULL) {
1236 // Return mbuf
1237 if (interface != NULL &&
1238 interface == sav->sah->ipsec_if) {
1239 goto done;
1240 }
1241
1242 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
1243 m = NULL;
1244 nxt = IPPROTO_DONE;
1245 goto done;
1246 } else {
1247 goto bad;
1248 }
1249 }
1250
1251 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0)
1252 goto bad;
1253 nxt = IPPROTO_DONE;
1254 } else {
1255 /*
1256 * strip off ESP header and IV.
1257 * even in m_pulldown case, we need to strip off ESP so that
1258 * we can always compute checksum for AH correctly.
1259 */
1260 size_t stripsiz;
1261 char *prvnxtp;
1262
1263 /*
1264 * Set the next header field of the previous header correctly.
1265 */
1266 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
1267 *prvnxtp = nxt;
1268
1269 stripsiz = esplen + ivlen;
1270
1271 ip6 = mtod(m, struct ip6_hdr *);
1272 if (m->m_len >= stripsiz + off) {
1273 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
1274 m->m_data += stripsiz;
1275 m->m_len -= stripsiz;
1276 m->m_pkthdr.len -= stripsiz;
1277 } else {
1278 /*
1279 * this comes with no copy if the boundary is on
1280 * cluster
1281 */
1282 struct mbuf *n;
1283
1284 n = m_split(m, off, M_DONTWAIT);
1285 if (n == NULL) {
1286 /* m is retained by m_split */
1287 goto bad;
1288 }
1289 m_adj(n, stripsiz);
1290 /* m_cat does not update m_pkthdr.len */
1291 m->m_pkthdr.len += n->m_pkthdr.len;
1292 m_cat(m, n);
1293 }
1294
1295#ifndef PULLDOWN_TEST
1296 /*
1297 * KAME requires that the packet to be contiguous on the
1298 * mbuf. We need to make that sure.
1299 * this kind of code should be avoided.
1300 * XXX other conditions to avoid running this part?
1301 */
1302 if (m->m_len != m->m_pkthdr.len) {
1303 struct mbuf *n = NULL;
1304 int maxlen;
1305
1306 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
1307 maxlen = MHLEN;
1308 if (n)
1309 M_COPY_PKTHDR(n, m);
1310 if (n && m->m_pkthdr.len > maxlen) {
1311 MCLGET(n, M_DONTWAIT);
1312 maxlen = MCLBYTES;
1313 if ((n->m_flags & M_EXT) == 0) {
1314 m_free(n);
1315 n = NULL;
1316 }
1317 }
1318 if (!n) {
1319 printf("esp6_input: mbuf allocation failed\n");
1320 goto bad;
1321 }
1322
1323 if (m->m_pkthdr.len <= maxlen) {
1324 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
1325 n->m_len = m->m_pkthdr.len;
1326 n->m_pkthdr.len = m->m_pkthdr.len;
1327 n->m_next = NULL;
1328 m_freem(m);
1329 } else {
1330 m_copydata(m, 0, maxlen, mtod(n, caddr_t));
1331 n->m_len = maxlen;
1332 n->m_pkthdr.len = m->m_pkthdr.len;
1333 n->m_next = m;
1334 m_adj(m, maxlen);
1335 m->m_flags &= ~M_PKTHDR;
1336 }
1337 m = n;
1338 }
1339#endif
1340
1341 ip6 = mtod(m, struct ip6_hdr *);
1342 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
1343
1344 key_sa_recordxfer(sav, m);
1345 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
1346 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1347 goto bad;
1348 }
1349
1350 /*
1351 * Set the csum valid flag, if we authenticated the
1352 * packet, the payload shouldn't be corrupt unless
1353 * it was corrupted before being signed on the other
1354 * side.
1355 */
1356 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
1357 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1358 m->m_pkthdr.csum_data = 0xFFFF;
1359 _CASSERT(offsetof(struct pkthdr, csum_data) == offsetof(struct pkthdr, csum_rx_val));
1360 }
1361
1362 // Input via IPSec interface
1363 if (sav->sah->ipsec_if != NULL) {
1364 // Return mbuf
1365 if (interface != NULL &&
1366 interface == sav->sah->ipsec_if) {
1367 goto done;
1368 }
1369
1370 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
1371 m = NULL;
1372 nxt = IPPROTO_DONE;
1373 goto done;
1374 } else {
1375 goto bad;
1376 }
1377 }
1378
1379 }
1380
1381done:
1382 *offp = off;
1383 *mp = m;
1384 if (sav) {
1385 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1386 printf("DP esp6_input call free SA:0x%llx\n",
1387 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1388 key_freesav(sav, KEY_SADB_UNLOCKED);
1389 }
1390 IPSEC_STAT_INCREMENT(ipsec6stat.in_success);
1391 return nxt;
1392
1393bad:
1394 if (sav) {
1395 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1396 printf("DP esp6_input call free SA:0x%llx\n",
1397 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1398 key_freesav(sav, KEY_SADB_UNLOCKED);
1399 }
1400 if (m) {
1401 m_freem(m);
1402 }
1403 if (interface != NULL) {
1404 *mp = NULL;
1405 }
1406 return IPPROTO_DONE;
1407}
1408
1409void
1410esp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp)
1411{
1412 const struct newesp *espp;
1413 struct newesp esp;
1414 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
1415 struct secasvar *sav;
1416 struct ip6_hdr *ip6;
1417 struct mbuf *m;
1418 int off = 0;
1419 struct sockaddr_in6 *sa6_src, *sa6_dst;
1420
1421 if (sa->sa_family != AF_INET6 ||
1422 sa->sa_len != sizeof(struct sockaddr_in6))
1423 return;
1424 if ((unsigned)cmd >= PRC_NCMDS)
1425 return;
1426
1427 /* if the parameter is from icmp6, decode it. */
1428 if (d != NULL) {
1429 ip6cp = (struct ip6ctlparam *)d;
1430 m = ip6cp->ip6c_m;
1431 ip6 = ip6cp->ip6c_ip6;
1432 off = ip6cp->ip6c_off;
1433 } else {
1434 m = NULL;
1435 ip6 = NULL;
1436 }
1437
1438 if (ip6) {
1439 /*
1440 * Notify the error to all possible sockets via pfctlinput2.
1441 * Since the upper layer information (such as protocol type,
1442 * source and destination ports) is embedded in the encrypted
1443 * data and might have been cut, we can't directly call
1444 * an upper layer ctlinput function. However, the pcbnotify
1445 * function will consider source and destination addresses
1446 * as well as the flow info value, and may be able to find
1447 * some PCB that should be notified.
1448 * Although pfctlinput2 will call esp6_ctlinput(), there is
1449 * no possibility of an infinite loop of function calls,
1450 * because we don't pass the inner IPv6 header.
1451 */
1452 bzero(&ip6cp1, sizeof(ip6cp1));
1453 ip6cp1.ip6c_src = ip6cp->ip6c_src;
1454 pfctlinput2(cmd, sa, (void *)&ip6cp1);
1455
1456 /*
1457 * Then go to special cases that need ESP header information.
1458 * XXX: We assume that when ip6 is non NULL,
1459 * M and OFF are valid.
1460 */
1461
1462 /* check if we can safely examine src and dst ports */
1463 if (m->m_pkthdr.len < off + sizeof(esp))
1464 return;
1465
1466 if (m->m_len < off + sizeof(esp)) {
1467 /*
1468 * this should be rare case,
1469 * so we compromise on this copy...
1470 */
1471 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
1472 espp = &esp;
1473 } else
1474 espp = (struct newesp*)(void *)(mtod(m, caddr_t) + off);
1475
1476 if (cmd == PRC_MSGSIZE) {
1477 int valid = 0;
1478
1479 /*
1480 * Check to see if we have a valid SA corresponding to
1481 * the address in the ICMP message payload.
1482 */
1483 sa6_src = ip6cp->ip6c_src;
1484 sa6_dst = (struct sockaddr_in6 *)(void *)sa;
1485 sav = key_allocsa(AF_INET6,
1486 (caddr_t)&sa6_src->sin6_addr,
1487 (caddr_t)&sa6_dst->sin6_addr,
1488 IPPROTO_ESP, espp->esp_spi);
1489 if (sav) {
1490 if (sav->state == SADB_SASTATE_MATURE ||
1491 sav->state == SADB_SASTATE_DYING)
1492 valid++;
1493 key_freesav(sav, KEY_SADB_UNLOCKED);
1494 }
1495
1496 /* XXX Further validation? */
1497
1498 /*
1499 * Depending on the value of "valid" and routing table
1500 * size (mtudisc_{hi,lo}wat), we will:
1501 * - recalcurate the new MTU and create the
1502 * corresponding routing entry, or
1503 * - ignore the MTU change notification.
1504 */
1505 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1506 }
1507 } else {
1508 /* we normally notify any pcb here */
1509 }
1510}
1511#endif /* INET6 */
1512