1/*
2 * Copyright (c) 2008-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $FreeBSD: src/sys/netinet6/ipsec.c,v 1.3.2.7 2001/07/19 06:37:23 kris Exp $ */
30/* $KAME: ipsec.c,v 1.103 2001/05/24 07:14:18 sakane Exp $ */
31
32/*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61/*
62 * IPsec controller part.
63 */
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/malloc.h>
68#include <sys/mbuf.h>
69#include <sys/mcache.h>
70#include <sys/domain.h>
71#include <sys/protosw.h>
72#include <sys/socket.h>
73#include <sys/socketvar.h>
74#include <sys/errno.h>
75#include <sys/time.h>
76#include <sys/kernel.h>
77#include <sys/syslog.h>
78#include <sys/sysctl.h>
79#include <kern/locks.h>
80#include <sys/kauth.h>
81#include <libkern/OSAtomic.h>
82
83#include <net/if.h>
84#include <net/route.h>
85#include <net/if_ipsec.h>
86
87#include <netinet/in.h>
88#include <netinet/in_systm.h>
89#include <netinet/ip.h>
90#include <netinet/ip_var.h>
91#include <netinet/in_var.h>
92#include <netinet/udp.h>
93#include <netinet/udp_var.h>
94#include <netinet/ip_ecn.h>
95#if INET6
96#include <netinet6/ip6_ecn.h>
97#endif
98#include <netinet/tcp.h>
99#include <netinet/udp.h>
100
101#include <netinet/ip6.h>
102#if INET6
103#include <netinet6/ip6_var.h>
104#endif
105#include <netinet/in_pcb.h>
106#if INET6
107#include <netinet/icmp6.h>
108#endif
109
110#include <netinet6/ipsec.h>
111#if INET6
112#include <netinet6/ipsec6.h>
113#endif
114#include <netinet6/ah.h>
115#if INET6
116#include <netinet6/ah6.h>
117#endif
118#if IPSEC_ESP
119#include <netinet6/esp.h>
120#if INET6
121#include <netinet6/esp6.h>
122#endif
123#endif
124#include <netinet6/ipcomp.h>
125#if INET6
126#include <netinet6/ipcomp6.h>
127#endif
128#include <netkey/key.h>
129#include <netkey/keydb.h>
130#include <netkey/key_debug.h>
131
132#include <net/net_osdep.h>
133
134#if IPSEC_DEBUG
135int ipsec_debug = 1;
136#else
137int ipsec_debug = 0;
138#endif
139
140#include <sys/kdebug.h>
141#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
142#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
143#define DBG_FNC_GETPOL_SOCK NETDBG_CODE(DBG_NETIPSEC, (1 << 8))
144#define DBG_FNC_GETPOL_ADDR NETDBG_CODE(DBG_NETIPSEC, (2 << 8))
145#define DBG_FNC_IPSEC_OUT NETDBG_CODE(DBG_NETIPSEC, (3 << 8))
146
147extern lck_mtx_t *sadb_mutex;
148
149struct ipsecstat ipsecstat;
150int ip4_ah_cleartos = 1;
151int ip4_ah_offsetmask = 0; /* maybe IP_DF? */
152int ip4_ipsec_dfbit = 0; /* DF bit on encap. 0: clear 1: set 2: copy */
153int ip4_esp_trans_deflev = IPSEC_LEVEL_USE;
154int ip4_esp_net_deflev = IPSEC_LEVEL_USE;
155int ip4_ah_trans_deflev = IPSEC_LEVEL_USE;
156int ip4_ah_net_deflev = IPSEC_LEVEL_USE;
157struct secpolicy ip4_def_policy;
158int ip4_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
159int ip4_esp_randpad = -1;
160int esp_udp_encap_port = 0;
161static int sysctl_def_policy SYSCTL_HANDLER_ARGS;
162extern int natt_keepalive_interval;
163extern u_int64_t natt_now;
164
165struct ipsec_tag;
166
167SYSCTL_DECL(_net_inet_ipsec);
168#if INET6
169SYSCTL_DECL(_net_inet6_ipsec6);
170#endif
171/* net.inet.ipsec */
172SYSCTL_STRUCT(_net_inet_ipsec, IPSECCTL_STATS,
173 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsecstat, ipsecstat, "");
174SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
175 &ip4_def_policy.policy, 0, &sysctl_def_policy, "I", "");
176SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
177 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_trans_deflev, 0, "");
178SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
179 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_net_deflev, 0, "");
180SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
181 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_trans_deflev, 0, "");
182SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
183 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_net_deflev, 0, "");
184SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS,
185 ah_cleartos, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_cleartos, 0, "");
186SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_OFFSETMASK,
187 ah_offsetmask, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_offsetmask, 0, "");
188SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT,
189 dfbit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_dfbit, 0, "");
190SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN,
191 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_ecn, 0, "");
192SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEBUG,
193 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
194SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ESP_RANDPAD,
195 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_randpad, 0, "");
196
197/* for performance, we bypass ipsec until a security policy is set */
198int ipsec_bypass = 1;
199SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec_bypass,0, "");
200
201/*
202 * NAT Traversal requires a UDP port for encapsulation,
203 * esp_udp_encap_port controls which port is used. Racoon
204 * must set this port to the port racoon is using locally
205 * for nat traversal.
206 */
207SYSCTL_INT(_net_inet_ipsec, OID_AUTO, esp_port,
208 CTLFLAG_RW | CTLFLAG_LOCKED, &esp_udp_encap_port, 0, "");
209
210#if INET6
211struct ipsecstat ipsec6stat;
212int ip6_esp_trans_deflev = IPSEC_LEVEL_USE;
213int ip6_esp_net_deflev = IPSEC_LEVEL_USE;
214int ip6_ah_trans_deflev = IPSEC_LEVEL_USE;
215int ip6_ah_net_deflev = IPSEC_LEVEL_USE;
216struct secpolicy ip6_def_policy;
217int ip6_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
218int ip6_esp_randpad = -1;
219
220/* net.inet6.ipsec6 */
221SYSCTL_STRUCT(_net_inet6_ipsec6, IPSECCTL_STATS,
222 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec6stat, ipsecstat, "");
223SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY,
224 def_policy, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_def_policy.policy, 0, "");
225SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
226 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_trans_deflev, 0, "");
227SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
228 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_net_deflev, 0, "");
229SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
230 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_trans_deflev, 0, "");
231SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
232 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_net_deflev, 0, "");
233SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN,
234 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ipsec_ecn, 0, "");
235SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG,
236 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
237SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ESP_RANDPAD,
238 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_randpad, 0, "");
239#endif /* INET6 */
240
241static int ipsec_setspidx_interface(struct secpolicyindex *, u_int, struct mbuf *,
242 int, int, int);
243static int ipsec_setspidx_mbuf(struct secpolicyindex *, u_int, u_int,
244 struct mbuf *, int);
245static int ipsec4_setspidx_inpcb(struct mbuf *, struct inpcb *pcb);
246#if INET6
247static int ipsec6_setspidx_in6pcb(struct mbuf *, struct in6pcb *pcb);
248#endif
249static int ipsec_setspidx(struct mbuf *, struct secpolicyindex *, int, int);
250static void ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
251static int ipsec4_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
252#if INET6
253static void ipsec6_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
254static int ipsec6_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
255#endif
256static struct inpcbpolicy *ipsec_newpcbpolicy(void);
257static void ipsec_delpcbpolicy(struct inpcbpolicy *);
258static struct secpolicy *ipsec_deepcopy_policy(struct secpolicy *src);
259static int ipsec_set_policy(struct secpolicy **pcb_sp,
260 int optname, caddr_t request, size_t len, int priv);
261static void vshiftl(unsigned char *, int, int);
262static int ipsec_in_reject(struct secpolicy *, struct mbuf *);
263#if INET6
264static int ipsec64_encapsulate(struct mbuf *, struct secasvar *);
265static int ipsec6_update_routecache_and_output(struct ipsec_output_state *state, struct secasvar *sav);
266static int ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav);
267#endif
268static struct ipsec_tag *ipsec_addaux(struct mbuf *);
269static struct ipsec_tag *ipsec_findaux(struct mbuf *);
270static void ipsec_optaux(struct mbuf *, struct ipsec_tag *);
271int ipsec_send_natt_keepalive(struct secasvar *sav);
272bool ipsec_fill_offload_frame(ifnet_t ifp, struct secasvar *sav, struct ifnet_keepalive_offload_frame *frame, size_t frame_data_offset);
273
274static int
275sysctl_def_policy SYSCTL_HANDLER_ARGS
276{
277 int old_policy = ip4_def_policy.policy;
278 int error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
279
280#pragma unused(arg1, arg2)
281
282 if (ip4_def_policy.policy != IPSEC_POLICY_NONE &&
283 ip4_def_policy.policy != IPSEC_POLICY_DISCARD) {
284 ip4_def_policy.policy = old_policy;
285 return EINVAL;
286 }
287
288 /* Turn off the bypass if the default security policy changes */
289 if (ipsec_bypass != 0 && ip4_def_policy.policy != IPSEC_POLICY_NONE)
290 ipsec_bypass = 0;
291
292 return error;
293}
294
295/*
296 * For OUTBOUND packet having a socket. Searching SPD for packet,
297 * and return a pointer to SP.
298 * OUT: NULL: no apropreate SP found, the following value is set to error.
299 * 0 : bypass
300 * EACCES : discard packet.
301 * ENOENT : ipsec_acquire() in progress, maybe.
302 * others : error occurred.
303 * others: a pointer to SP
304 *
305 * NOTE: IPv6 mapped adddress concern is implemented here.
306 */
307struct secpolicy *
308ipsec4_getpolicybysock(struct mbuf *m,
309 u_int dir,
310 struct socket *so,
311 int *error)
312{
313 struct inpcbpolicy *pcbsp = NULL;
314 struct secpolicy *currsp = NULL; /* policy on socket */
315 struct secpolicy *kernsp = NULL; /* policy on kernel */
316
317 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
318 /* sanity check */
319 if (m == NULL || so == NULL || error == NULL)
320 panic("ipsec4_getpolicybysock: NULL pointer was passed.\n");
321
322 if (so->so_pcb == NULL) {
323 printf("ipsec4_getpolicybysock: so->so_pcb == NULL\n");
324 return ipsec4_getpolicybyaddr(m, dir, 0, error);
325 }
326
327 switch (SOCK_DOM(so)) {
328 case PF_INET:
329 pcbsp = sotoinpcb(so)->inp_sp;
330 break;
331#if INET6
332 case PF_INET6:
333 pcbsp = sotoin6pcb(so)->in6p_sp;
334 break;
335#endif
336 }
337
338 if (!pcbsp){
339 /* Socket has not specified an IPSEC policy */
340 return ipsec4_getpolicybyaddr(m, dir, 0, error);
341 }
342
343 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_START, 0,0,0,0,0);
344
345 switch (SOCK_DOM(so)) {
346 case PF_INET:
347 /* set spidx in pcb */
348 *error = ipsec4_setspidx_inpcb(m, sotoinpcb(so));
349 break;
350#if INET6
351 case PF_INET6:
352 /* set spidx in pcb */
353 *error = ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
354 break;
355#endif
356 default:
357 panic("ipsec4_getpolicybysock: unsupported address family\n");
358 }
359 if (*error) {
360 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 1,*error,0,0,0);
361 return NULL;
362 }
363
364 /* sanity check */
365 if (pcbsp == NULL)
366 panic("ipsec4_getpolicybysock: pcbsp is NULL.\n");
367
368 switch (dir) {
369 case IPSEC_DIR_INBOUND:
370 currsp = pcbsp->sp_in;
371 break;
372 case IPSEC_DIR_OUTBOUND:
373 currsp = pcbsp->sp_out;
374 break;
375 default:
376 panic("ipsec4_getpolicybysock: illegal direction.\n");
377 }
378
379 /* sanity check */
380 if (currsp == NULL)
381 panic("ipsec4_getpolicybysock: currsp is NULL.\n");
382
383 /* when privilieged socket */
384 if (pcbsp->priv) {
385 switch (currsp->policy) {
386 case IPSEC_POLICY_BYPASS:
387 lck_mtx_lock(sadb_mutex);
388 currsp->refcnt++;
389 lck_mtx_unlock(sadb_mutex);
390 *error = 0;
391 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 2,*error,0,0,0);
392 return currsp;
393
394 case IPSEC_POLICY_ENTRUST:
395 /* look for a policy in SPD */
396 kernsp = key_allocsp(&currsp->spidx, dir);
397
398 /* SP found */
399 if (kernsp != NULL) {
400 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
401 printf("DP ipsec4_getpolicybysock called "
402 "to allocate SP:0x%llx\n",
403 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
404 *error = 0;
405 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 3,*error,0,0,0);
406 return kernsp;
407 }
408
409 /* no SP found */
410 lck_mtx_lock(sadb_mutex);
411 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
412 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
413 ipseclog((LOG_INFO,
414 "fixed system default policy: %d->%d\n",
415 ip4_def_policy.policy, IPSEC_POLICY_NONE));
416 ip4_def_policy.policy = IPSEC_POLICY_NONE;
417 }
418 ip4_def_policy.refcnt++;
419 lck_mtx_unlock(sadb_mutex);
420 *error = 0;
421 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 4,*error,0,0,0);
422 return &ip4_def_policy;
423
424 case IPSEC_POLICY_IPSEC:
425 lck_mtx_lock(sadb_mutex);
426 currsp->refcnt++;
427 lck_mtx_unlock(sadb_mutex);
428 *error = 0;
429 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 5,*error,0,0,0);
430 return currsp;
431
432 default:
433 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
434 "Invalid policy for PCB %d\n", currsp->policy));
435 *error = EINVAL;
436 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 6,*error,0,0,0);
437 return NULL;
438 }
439 /* NOTREACHED */
440 }
441
442 /* when non-privilieged socket */
443 /* look for a policy in SPD */
444 kernsp = key_allocsp(&currsp->spidx, dir);
445
446 /* SP found */
447 if (kernsp != NULL) {
448 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
449 printf("DP ipsec4_getpolicybysock called "
450 "to allocate SP:0x%llx\n",
451 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
452 *error = 0;
453 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 7,*error,0,0,0);
454 return kernsp;
455 }
456
457 /* no SP found */
458 switch (currsp->policy) {
459 case IPSEC_POLICY_BYPASS:
460 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
461 "Illegal policy for non-priviliged defined %d\n",
462 currsp->policy));
463 *error = EINVAL;
464 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 8,*error,0,0,0);
465 return NULL;
466
467 case IPSEC_POLICY_ENTRUST:
468 lck_mtx_lock(sadb_mutex);
469 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
470 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
471 ipseclog((LOG_INFO,
472 "fixed system default policy: %d->%d\n",
473 ip4_def_policy.policy, IPSEC_POLICY_NONE));
474 ip4_def_policy.policy = IPSEC_POLICY_NONE;
475 }
476 ip4_def_policy.refcnt++;
477 lck_mtx_unlock(sadb_mutex);
478 *error = 0;
479 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 9,*error,0,0,0);
480 return &ip4_def_policy;
481
482 case IPSEC_POLICY_IPSEC:
483 lck_mtx_lock(sadb_mutex);
484 currsp->refcnt++;
485 lck_mtx_unlock(sadb_mutex);
486 *error = 0;
487 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 10,*error,0,0,0);
488 return currsp;
489
490 default:
491 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
492 "Invalid policy for PCB %d\n", currsp->policy));
493 *error = EINVAL;
494 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 11,*error,0,0,0);
495 return NULL;
496 }
497 /* NOTREACHED */
498}
499
500/*
501 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
502 * and return a pointer to SP.
503 * OUT: positive: a pointer to the entry for security policy leaf matched.
504 * NULL: no apropreate SP found, the following value is set to error.
505 * 0 : bypass
506 * EACCES : discard packet.
507 * ENOENT : ipsec_acquire() in progress, maybe.
508 * others : error occurred.
509 */
510struct secpolicy *
511ipsec4_getpolicybyaddr(struct mbuf *m,
512 u_int dir,
513 int flag,
514 int *error)
515{
516 struct secpolicy *sp = NULL;
517
518 if (ipsec_bypass != 0)
519 return 0;
520
521 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
522
523 /* sanity check */
524 if (m == NULL || error == NULL)
525 panic("ipsec4_getpolicybyaddr: NULL pointer was passed.\n");
526 {
527 struct secpolicyindex spidx;
528
529 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0);
530 bzero(&spidx, sizeof(spidx));
531
532 /* make a index to look for a policy */
533 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET, m,
534 (flag & IP_FORWARDING) ? 0 : 1);
535
536 if (*error != 0) {
537 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,*error,0,0,0);
538 return NULL;
539 }
540
541 sp = key_allocsp(&spidx, dir);
542 }
543
544 /* SP found */
545 if (sp != NULL) {
546 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
547 printf("DP ipsec4_getpolicybyaddr called "
548 "to allocate SP:0x%llx\n",
549 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
550 *error = 0;
551 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,*error,0,0,0);
552 return sp;
553 }
554
555 /* no SP found */
556 lck_mtx_lock(sadb_mutex);
557 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
558 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
559 ipseclog((LOG_INFO, "fixed system default policy:%d->%d\n",
560 ip4_def_policy.policy,
561 IPSEC_POLICY_NONE));
562 ip4_def_policy.policy = IPSEC_POLICY_NONE;
563 }
564 ip4_def_policy.refcnt++;
565 lck_mtx_unlock(sadb_mutex);
566 *error = 0;
567 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 3,*error,0,0,0);
568 return &ip4_def_policy;
569}
570
571/* Match with bound interface rather than src addr.
572 * Unlike getpolicybyaddr, do not set the default policy.
573 * Return 0 if should continue processing, or -1 if packet
574 * should be dropped.
575 */
576int
577ipsec4_getpolicybyinterface(struct mbuf *m,
578 u_int dir,
579 int *flags,
580 struct ip_out_args *ipoa,
581 struct secpolicy **sp)
582{
583 struct secpolicyindex spidx;
584 int error = 0;
585
586 if (ipsec_bypass != 0)
587 return 0;
588
589 /* Sanity check */
590 if (m == NULL || ipoa == NULL || sp == NULL)
591 panic("ipsec4_getpolicybyinterface: NULL pointer was passed.\n");
592
593 if (ipoa->ipoa_boundif == IFSCOPE_NONE)
594 return 0;
595
596 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0);
597 bzero(&spidx, sizeof(spidx));
598
599 /* make a index to look for a policy */
600 error = ipsec_setspidx_interface(&spidx, dir, m, (*flags & IP_FORWARDING) ? 0 : 1,
601 ipoa->ipoa_boundif, 4);
602
603 if (error != 0) {
604 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,error,0,0,0);
605 return 0;
606 }
607
608 *sp = key_allocsp(&spidx, dir);
609
610 /* Return SP, whether NULL or not */
611 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
612 if ((*sp)->ipsec_if == NULL) {
613 /* Invalid to capture on an interface without redirect */
614 key_freesp(*sp, KEY_SADB_UNLOCKED);
615 *sp = NULL;
616 return -1;
617 } else if ((*sp)->disabled) {
618 /* Disabled policies go in the clear */
619 key_freesp(*sp, KEY_SADB_UNLOCKED);
620 *sp = NULL;
621 *flags |= IP_NOIPSEC; /* Avoid later IPSec check */
622 } else {
623 /* If policy is enabled, redirect to ipsec interface */
624 ipoa->ipoa_boundif = (*sp)->ipsec_if->if_index;
625 }
626 }
627
628 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,error,0,0,0);
629
630 return 0;
631}
632
633
634#if INET6
635/*
636 * For OUTBOUND packet having a socket. Searching SPD for packet,
637 * and return a pointer to SP.
638 * OUT: NULL: no apropreate SP found, the following value is set to error.
639 * 0 : bypass
640 * EACCES : discard packet.
641 * ENOENT : ipsec_acquire() in progress, maybe.
642 * others : error occurred.
643 * others: a pointer to SP
644 */
645struct secpolicy *
646ipsec6_getpolicybysock(struct mbuf *m,
647 u_int dir,
648 struct socket *so,
649 int *error)
650{
651 struct inpcbpolicy *pcbsp = NULL;
652 struct secpolicy *currsp = NULL; /* policy on socket */
653 struct secpolicy *kernsp = NULL; /* policy on kernel */
654
655 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
656
657 /* sanity check */
658 if (m == NULL || so == NULL || error == NULL)
659 panic("ipsec6_getpolicybysock: NULL pointer was passed.\n");
660
661#if DIAGNOSTIC
662 if (SOCK_DOM(so) != PF_INET6)
663 panic("ipsec6_getpolicybysock: socket domain != inet6\n");
664#endif
665
666 pcbsp = sotoin6pcb(so)->in6p_sp;
667
668 if (!pcbsp){
669 return ipsec6_getpolicybyaddr(m, dir, 0, error);
670 }
671
672 /* set spidx in pcb */
673 ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
674
675 /* sanity check */
676 if (pcbsp == NULL)
677 panic("ipsec6_getpolicybysock: pcbsp is NULL.\n");
678
679 switch (dir) {
680 case IPSEC_DIR_INBOUND:
681 currsp = pcbsp->sp_in;
682 break;
683 case IPSEC_DIR_OUTBOUND:
684 currsp = pcbsp->sp_out;
685 break;
686 default:
687 panic("ipsec6_getpolicybysock: illegal direction.\n");
688 }
689
690 /* sanity check */
691 if (currsp == NULL)
692 panic("ipsec6_getpolicybysock: currsp is NULL.\n");
693
694 /* when privilieged socket */
695 if (pcbsp->priv) {
696 switch (currsp->policy) {
697 case IPSEC_POLICY_BYPASS:
698 lck_mtx_lock(sadb_mutex);
699 currsp->refcnt++;
700 lck_mtx_unlock(sadb_mutex);
701 *error = 0;
702 return currsp;
703
704 case IPSEC_POLICY_ENTRUST:
705 /* look for a policy in SPD */
706 kernsp = key_allocsp(&currsp->spidx, dir);
707
708 /* SP found */
709 if (kernsp != NULL) {
710 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
711 printf("DP ipsec6_getpolicybysock called "
712 "to allocate SP:0x%llx\n",
713 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
714 *error = 0;
715 return kernsp;
716 }
717
718 /* no SP found */
719 lck_mtx_lock(sadb_mutex);
720 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
721 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
722 ipseclog((LOG_INFO,
723 "fixed system default policy: %d->%d\n",
724 ip6_def_policy.policy, IPSEC_POLICY_NONE));
725 ip6_def_policy.policy = IPSEC_POLICY_NONE;
726 }
727 ip6_def_policy.refcnt++;
728 lck_mtx_unlock(sadb_mutex);
729 *error = 0;
730 return &ip6_def_policy;
731
732 case IPSEC_POLICY_IPSEC:
733 lck_mtx_lock(sadb_mutex);
734 currsp->refcnt++;
735 lck_mtx_unlock(sadb_mutex);
736 *error = 0;
737 return currsp;
738
739 default:
740 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
741 "Invalid policy for PCB %d\n", currsp->policy));
742 *error = EINVAL;
743 return NULL;
744 }
745 /* NOTREACHED */
746 }
747
748 /* when non-privilieged socket */
749 /* look for a policy in SPD */
750 kernsp = key_allocsp(&currsp->spidx, dir);
751
752 /* SP found */
753 if (kernsp != NULL) {
754 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
755 printf("DP ipsec6_getpolicybysock called "
756 "to allocate SP:0x%llx\n",
757 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
758 *error = 0;
759 return kernsp;
760 }
761
762 /* no SP found */
763 switch (currsp->policy) {
764 case IPSEC_POLICY_BYPASS:
765 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
766 "Illegal policy for non-priviliged defined %d\n",
767 currsp->policy));
768 *error = EINVAL;
769 return NULL;
770
771 case IPSEC_POLICY_ENTRUST:
772 lck_mtx_lock(sadb_mutex);
773 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
774 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
775 ipseclog((LOG_INFO,
776 "fixed system default policy: %d->%d\n",
777 ip6_def_policy.policy, IPSEC_POLICY_NONE));
778 ip6_def_policy.policy = IPSEC_POLICY_NONE;
779 }
780 ip6_def_policy.refcnt++;
781 lck_mtx_unlock(sadb_mutex);
782 *error = 0;
783 return &ip6_def_policy;
784
785 case IPSEC_POLICY_IPSEC:
786 lck_mtx_lock(sadb_mutex);
787 currsp->refcnt++;
788 lck_mtx_unlock(sadb_mutex);
789 *error = 0;
790 return currsp;
791
792 default:
793 ipseclog((LOG_ERR,
794 "ipsec6_policybysock: Invalid policy for PCB %d\n",
795 currsp->policy));
796 *error = EINVAL;
797 return NULL;
798 }
799 /* NOTREACHED */
800}
801
802/*
803 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
804 * and return a pointer to SP.
805 * `flag' means that packet is to be forwarded whether or not.
806 * flag = 1: forwad
807 * OUT: positive: a pointer to the entry for security policy leaf matched.
808 * NULL: no apropreate SP found, the following value is set to error.
809 * 0 : bypass
810 * EACCES : discard packet.
811 * ENOENT : ipsec_acquire() in progress, maybe.
812 * others : error occurred.
813 */
814#ifndef IP_FORWARDING
815#define IP_FORWARDING 1
816#endif
817
818struct secpolicy *
819ipsec6_getpolicybyaddr(struct mbuf *m,
820 u_int dir,
821 int flag,
822 int *error)
823{
824 struct secpolicy *sp = NULL;
825
826 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
827
828 /* sanity check */
829 if (m == NULL || error == NULL)
830 panic("ipsec6_getpolicybyaddr: NULL pointer was passed.\n");
831
832 {
833 struct secpolicyindex spidx;
834
835 bzero(&spidx, sizeof(spidx));
836
837 /* make a index to look for a policy */
838 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET6, m,
839 (flag & IP_FORWARDING) ? 0 : 1);
840
841 if (*error != 0)
842 return NULL;
843
844 sp = key_allocsp(&spidx, dir);
845 }
846
847 /* SP found */
848 if (sp != NULL) {
849 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
850 printf("DP ipsec6_getpolicybyaddr called "
851 "to allocate SP:0x%llx\n",
852 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
853 *error = 0;
854 return sp;
855 }
856
857 /* no SP found */
858 lck_mtx_lock(sadb_mutex);
859 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
860 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
861 ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n",
862 ip6_def_policy.policy, IPSEC_POLICY_NONE));
863 ip6_def_policy.policy = IPSEC_POLICY_NONE;
864 }
865 ip6_def_policy.refcnt++;
866 lck_mtx_unlock(sadb_mutex);
867 *error = 0;
868 return &ip6_def_policy;
869}
870
871/* Match with bound interface rather than src addr.
872 * Unlike getpolicybyaddr, do not set the default policy.
873 * Return 0 if should continue processing, or -1 if packet
874 * should be dropped.
875 */
876int
877ipsec6_getpolicybyinterface(struct mbuf *m,
878 u_int dir,
879 int flag,
880 struct ip6_out_args *ip6oap,
881 int *noipsec,
882 struct secpolicy **sp)
883{
884 struct secpolicyindex spidx;
885 int error = 0;
886
887 if (ipsec_bypass != 0)
888 return 0;
889
890 /* Sanity check */
891 if (m == NULL || sp == NULL || noipsec == NULL || ip6oap == NULL)
892 panic("ipsec6_getpolicybyinterface: NULL pointer was passed.\n");
893
894 *noipsec = 0;
895
896 if (ip6oap->ip6oa_boundif == IFSCOPE_NONE)
897 return 0;
898
899 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0);
900 bzero(&spidx, sizeof(spidx));
901
902 /* make a index to look for a policy */
903 error = ipsec_setspidx_interface(&spidx, dir, m, (flag & IP_FORWARDING) ? 0 : 1,
904 ip6oap->ip6oa_boundif, 6);
905
906 if (error != 0) {
907 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,error,0,0,0);
908 return 0;
909 }
910
911 *sp = key_allocsp(&spidx, dir);
912
913 /* Return SP, whether NULL or not */
914 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
915 if ((*sp)->ipsec_if == NULL) {
916 /* Invalid to capture on an interface without redirect */
917 key_freesp(*sp, KEY_SADB_UNLOCKED);
918 *sp = NULL;
919 return -1;
920 } else if ((*sp)->disabled) {
921 /* Disabled policies go in the clear */
922 key_freesp(*sp, KEY_SADB_UNLOCKED);
923 *sp = NULL;
924 *noipsec = 1; /* Avoid later IPSec check */
925 } else {
926 /* If policy is enabled, redirect to ipsec interface */
927 ip6oap->ip6oa_boundif = (*sp)->ipsec_if->if_index;
928 }
929 }
930
931 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,*error,0,0,0);
932
933 return 0;
934}
935#endif /* INET6 */
936
937/*
938 * set IP address into spidx from mbuf.
939 * When Forwarding packet and ICMP echo reply, this function is used.
940 *
941 * IN: get the followings from mbuf.
942 * protocol family, src, dst, next protocol
943 * OUT:
944 * 0: success.
945 * other: failure, and set errno.
946 */
947static int
948ipsec_setspidx_mbuf(
949 struct secpolicyindex *spidx,
950 u_int dir,
951 __unused u_int family,
952 struct mbuf *m,
953 int needport)
954{
955 int error;
956
957 /* sanity check */
958 if (spidx == NULL || m == NULL)
959 panic("ipsec_setspidx_mbuf: NULL pointer was passed.\n");
960
961 bzero(spidx, sizeof(*spidx));
962
963 error = ipsec_setspidx(m, spidx, needport, 0);
964 if (error)
965 goto bad;
966 spidx->dir = dir;
967
968 return 0;
969
970 bad:
971 /* XXX initialize */
972 bzero(spidx, sizeof(*spidx));
973 return EINVAL;
974}
975
976static int
977ipsec_setspidx_interface(
978 struct secpolicyindex *spidx,
979 u_int dir,
980 struct mbuf *m,
981 int needport,
982 int ifindex,
983 int ip_version)
984{
985 int error;
986
987 /* sanity check */
988 if (spidx == NULL || m == NULL)
989 panic("ipsec_setspidx_interface: NULL pointer was passed.\n");
990
991 bzero(spidx, sizeof(*spidx));
992
993 error = ipsec_setspidx(m, spidx, needport, ip_version);
994 if (error)
995 goto bad;
996 spidx->dir = dir;
997
998 if (ifindex != 0) {
999 ifnet_head_lock_shared();
1000 spidx->internal_if = ifindex2ifnet[ifindex];
1001 ifnet_head_done();
1002 } else {
1003 spidx->internal_if = NULL;
1004 }
1005
1006 return 0;
1007
1008bad:
1009 return EINVAL;
1010}
1011
1012static int
1013ipsec4_setspidx_inpcb(struct mbuf *m, struct inpcb *pcb)
1014{
1015 struct secpolicyindex *spidx;
1016 int error;
1017
1018 if (ipsec_bypass != 0)
1019 return 0;
1020
1021 /* sanity check */
1022 if (pcb == NULL)
1023 panic("ipsec4_setspidx_inpcb: no PCB found.\n");
1024 if (pcb->inp_sp == NULL)
1025 panic("ipsec4_setspidx_inpcb: no inp_sp found.\n");
1026 if (pcb->inp_sp->sp_out == NULL || pcb->inp_sp->sp_in == NULL)
1027 panic("ipsec4_setspidx_inpcb: no sp_in/out found.\n");
1028
1029 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1030 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1031
1032 spidx = &pcb->inp_sp->sp_in->spidx;
1033 error = ipsec_setspidx(m, spidx, 1, 0);
1034 if (error)
1035 goto bad;
1036 spidx->dir = IPSEC_DIR_INBOUND;
1037
1038 spidx = &pcb->inp_sp->sp_out->spidx;
1039 error = ipsec_setspidx(m, spidx, 1, 0);
1040 if (error)
1041 goto bad;
1042 spidx->dir = IPSEC_DIR_OUTBOUND;
1043
1044 return 0;
1045
1046bad:
1047 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1048 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1049 return error;
1050}
1051
1052#if INET6
1053static int
1054ipsec6_setspidx_in6pcb(struct mbuf *m, struct in6pcb *pcb)
1055{
1056 struct secpolicyindex *spidx;
1057 int error;
1058
1059 /* sanity check */
1060 if (pcb == NULL)
1061 panic("ipsec6_setspidx_in6pcb: no PCB found.\n");
1062 if (pcb->in6p_sp == NULL)
1063 panic("ipsec6_setspidx_in6pcb: no in6p_sp found.\n");
1064 if (pcb->in6p_sp->sp_out == NULL || pcb->in6p_sp->sp_in == NULL)
1065 panic("ipsec6_setspidx_in6pcb: no sp_in/out found.\n");
1066
1067 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1068 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1069
1070 spidx = &pcb->in6p_sp->sp_in->spidx;
1071 error = ipsec_setspidx(m, spidx, 1, 0);
1072 if (error)
1073 goto bad;
1074 spidx->dir = IPSEC_DIR_INBOUND;
1075
1076 spidx = &pcb->in6p_sp->sp_out->spidx;
1077 error = ipsec_setspidx(m, spidx, 1, 0);
1078 if (error)
1079 goto bad;
1080 spidx->dir = IPSEC_DIR_OUTBOUND;
1081
1082 return 0;
1083
1084bad:
1085 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1086 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1087 return error;
1088}
1089#endif
1090
1091/*
1092 * configure security policy index (src/dst/proto/sport/dport)
1093 * by looking at the content of mbuf.
1094 * the caller is responsible for error recovery (like clearing up spidx).
1095 */
1096static int
1097ipsec_setspidx(struct mbuf *m,
1098 struct secpolicyindex *spidx,
1099 int needport,
1100 int force_ip_version)
1101{
1102 struct ip *ip = NULL;
1103 struct ip ipbuf;
1104 u_int v;
1105 struct mbuf *n;
1106 int len;
1107 int error;
1108
1109 if (m == NULL)
1110 panic("ipsec_setspidx: m == 0 passed.\n");
1111
1112 /*
1113 * validate m->m_pkthdr.len. we see incorrect length if we
1114 * mistakenly call this function with inconsistent mbuf chain
1115 * (like 4.4BSD tcp/udp processing). XXX should we panic here?
1116 */
1117 len = 0;
1118 for (n = m; n; n = n->m_next)
1119 len += n->m_len;
1120 if (m->m_pkthdr.len != len) {
1121 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1122 printf("ipsec_setspidx: "
1123 "total of m_len(%d) != pkthdr.len(%d), "
1124 "ignored.\n",
1125 len, m->m_pkthdr.len));
1126 return EINVAL;
1127 }
1128
1129 if (m->m_pkthdr.len < sizeof(struct ip)) {
1130 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1131 printf("ipsec_setspidx: "
1132 "pkthdr.len(%d) < sizeof(struct ip), ignored.\n",
1133 m->m_pkthdr.len));
1134 return EINVAL;
1135 }
1136
1137 if (m->m_len >= sizeof(*ip))
1138 ip = mtod(m, struct ip *);
1139 else {
1140 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1141 ip = &ipbuf;
1142 }
1143
1144 if (force_ip_version) {
1145 v = force_ip_version;
1146 } else {
1147#ifdef _IP_VHL
1148 v = _IP_VHL_V(ip->ip_vhl);
1149#else
1150 v = ip->ip_v;
1151#endif
1152 }
1153 switch (v) {
1154 case 4:
1155 error = ipsec4_setspidx_ipaddr(m, spidx);
1156 if (error)
1157 return error;
1158 ipsec4_get_ulp(m, spidx, needport);
1159 return 0;
1160#if INET6
1161 case 6:
1162 if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) {
1163 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1164 printf("ipsec_setspidx: "
1165 "pkthdr.len(%d) < sizeof(struct ip6_hdr), "
1166 "ignored.\n", m->m_pkthdr.len));
1167 return EINVAL;
1168 }
1169 error = ipsec6_setspidx_ipaddr(m, spidx);
1170 if (error)
1171 return error;
1172 ipsec6_get_ulp(m, spidx, needport);
1173 return 0;
1174#endif
1175 default:
1176 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1177 printf("ipsec_setspidx: "
1178 "unknown IP version %u, ignored.\n", v));
1179 return EINVAL;
1180 }
1181}
1182
1183static void
1184ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport)
1185{
1186 struct ip ip;
1187 struct ip6_ext ip6e;
1188 u_int8_t nxt;
1189 int off;
1190 struct tcphdr th;
1191 struct udphdr uh;
1192
1193 /* sanity check */
1194 if (m == NULL)
1195 panic("ipsec4_get_ulp: NULL pointer was passed.\n");
1196 if (m->m_pkthdr.len < sizeof(ip))
1197 panic("ipsec4_get_ulp: too short\n");
1198
1199 /* set default */
1200 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1201 ((struct sockaddr_in *)&spidx->src)->sin_port = IPSEC_PORT_ANY;
1202 ((struct sockaddr_in *)&spidx->dst)->sin_port = IPSEC_PORT_ANY;
1203
1204 m_copydata(m, 0, sizeof(ip), (caddr_t)&ip);
1205 /* ip_input() flips it into host endian XXX need more checking */
1206 if (ip.ip_off & (IP_MF | IP_OFFMASK))
1207 return;
1208
1209 nxt = ip.ip_p;
1210#ifdef _IP_VHL
1211 off = _IP_VHL_HL(ip->ip_vhl) << 2;
1212#else
1213 off = ip.ip_hl << 2;
1214#endif
1215 while (off < m->m_pkthdr.len) {
1216 switch (nxt) {
1217 case IPPROTO_TCP:
1218 spidx->ul_proto = nxt;
1219 if (!needport)
1220 return;
1221 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len)
1222 return;
1223 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1224 ((struct sockaddr_in *)&spidx->src)->sin_port =
1225 th.th_sport;
1226 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1227 th.th_dport;
1228 return;
1229 case IPPROTO_UDP:
1230 spidx->ul_proto = nxt;
1231 if (!needport)
1232 return;
1233 if (off + sizeof(struct udphdr) > m->m_pkthdr.len)
1234 return;
1235 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1236 ((struct sockaddr_in *)&spidx->src)->sin_port =
1237 uh.uh_sport;
1238 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1239 uh.uh_dport;
1240 return;
1241 case IPPROTO_AH:
1242 if (off + sizeof(ip6e) > m->m_pkthdr.len)
1243 return;
1244 m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
1245 off += (ip6e.ip6e_len + 2) << 2;
1246 nxt = ip6e.ip6e_nxt;
1247 break;
1248 case IPPROTO_ICMP:
1249 default:
1250 /* XXX intermediate headers??? */
1251 spidx->ul_proto = nxt;
1252 return;
1253 }
1254 }
1255}
1256
1257/* assumes that m is sane */
1258static int
1259ipsec4_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx)
1260{
1261 struct ip *ip = NULL;
1262 struct ip ipbuf;
1263 struct sockaddr_in *sin;
1264
1265 if (m->m_len >= sizeof(*ip))
1266 ip = mtod(m, struct ip *);
1267 else {
1268 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1269 ip = &ipbuf;
1270 }
1271
1272 sin = (struct sockaddr_in *)&spidx->src;
1273 bzero(sin, sizeof(*sin));
1274 sin->sin_family = AF_INET;
1275 sin->sin_len = sizeof(struct sockaddr_in);
1276 bcopy(&ip->ip_src, &sin->sin_addr, sizeof(ip->ip_src));
1277 spidx->prefs = sizeof(struct in_addr) << 3;
1278
1279 sin = (struct sockaddr_in *)&spidx->dst;
1280 bzero(sin, sizeof(*sin));
1281 sin->sin_family = AF_INET;
1282 sin->sin_len = sizeof(struct sockaddr_in);
1283 bcopy(&ip->ip_dst, &sin->sin_addr, sizeof(ip->ip_dst));
1284 spidx->prefd = sizeof(struct in_addr) << 3;
1285
1286 return 0;
1287}
1288
1289#if INET6
1290static void
1291ipsec6_get_ulp(struct mbuf *m,
1292 struct secpolicyindex *spidx,
1293 int needport)
1294{
1295 int off, nxt;
1296 struct tcphdr th;
1297 struct udphdr uh;
1298
1299 /* sanity check */
1300 if (m == NULL)
1301 panic("ipsec6_get_ulp: NULL pointer was passed.\n");
1302
1303 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1304 printf("ipsec6_get_ulp:\n"); kdebug_mbuf(m));
1305
1306 /* set default */
1307 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1308 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = IPSEC_PORT_ANY;
1309 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = IPSEC_PORT_ANY;
1310
1311 nxt = -1;
1312 off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt);
1313 if (off < 0 || m->m_pkthdr.len < off)
1314 return;
1315
1316 switch (nxt) {
1317 case IPPROTO_TCP:
1318 spidx->ul_proto = nxt;
1319 if (!needport)
1320 break;
1321 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len)
1322 break;
1323 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1324 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = th.th_sport;
1325 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = th.th_dport;
1326 break;
1327 case IPPROTO_UDP:
1328 spidx->ul_proto = nxt;
1329 if (!needport)
1330 break;
1331 if (off + sizeof(struct udphdr) > m->m_pkthdr.len)
1332 break;
1333 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1334 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = uh.uh_sport;
1335 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = uh.uh_dport;
1336 break;
1337 case IPPROTO_ICMPV6:
1338 default:
1339 /* XXX intermediate headers??? */
1340 spidx->ul_proto = nxt;
1341 break;
1342 }
1343}
1344
1345/* assumes that m is sane */
1346static int
1347ipsec6_setspidx_ipaddr(struct mbuf *m,
1348 struct secpolicyindex *spidx)
1349{
1350 struct ip6_hdr *ip6 = NULL;
1351 struct ip6_hdr ip6buf;
1352 struct sockaddr_in6 *sin6;
1353
1354 if (m->m_len >= sizeof(*ip6))
1355 ip6 = mtod(m, struct ip6_hdr *);
1356 else {
1357 m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf);
1358 ip6 = &ip6buf;
1359 }
1360
1361 sin6 = (struct sockaddr_in6 *)&spidx->src;
1362 bzero(sin6, sizeof(*sin6));
1363 sin6->sin6_family = AF_INET6;
1364 sin6->sin6_len = sizeof(struct sockaddr_in6);
1365 bcopy(&ip6->ip6_src, &sin6->sin6_addr, sizeof(ip6->ip6_src));
1366 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
1367 sin6->sin6_addr.s6_addr16[1] = 0;
1368 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
1369 }
1370 spidx->prefs = sizeof(struct in6_addr) << 3;
1371
1372 sin6 = (struct sockaddr_in6 *)&spidx->dst;
1373 bzero(sin6, sizeof(*sin6));
1374 sin6->sin6_family = AF_INET6;
1375 sin6->sin6_len = sizeof(struct sockaddr_in6);
1376 bcopy(&ip6->ip6_dst, &sin6->sin6_addr, sizeof(ip6->ip6_dst));
1377 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
1378 sin6->sin6_addr.s6_addr16[1] = 0;
1379 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
1380 }
1381 spidx->prefd = sizeof(struct in6_addr) << 3;
1382
1383 return 0;
1384}
1385#endif
1386
1387static struct inpcbpolicy *
1388ipsec_newpcbpolicy(void)
1389{
1390 struct inpcbpolicy *p;
1391
1392 p = (struct inpcbpolicy *)_MALLOC(sizeof(*p), M_SECA, M_WAITOK);
1393 return p;
1394}
1395
1396static void
1397ipsec_delpcbpolicy(struct inpcbpolicy *p)
1398{
1399 FREE(p, M_SECA);
1400}
1401
1402/* initialize policy in PCB */
1403int
1404ipsec_init_policy(struct socket *so,
1405 struct inpcbpolicy **pcb_sp)
1406{
1407 struct inpcbpolicy *new;
1408
1409 /* sanity check. */
1410 if (so == NULL || pcb_sp == NULL)
1411 panic("ipsec_init_policy: NULL pointer was passed.\n");
1412
1413 new = ipsec_newpcbpolicy();
1414 if (new == NULL) {
1415 ipseclog((LOG_DEBUG, "ipsec_init_policy: No more memory.\n"));
1416 return ENOBUFS;
1417 }
1418 bzero(new, sizeof(*new));
1419
1420#ifdef __APPLE__
1421 if (kauth_cred_issuser(so->so_cred))
1422#else
1423 if (so->so_cred != 0 && !suser(so->so_cred->pc_ucred, NULL))
1424#endif
1425 new->priv = 1;
1426 else
1427 new->priv = 0;
1428
1429 if ((new->sp_in = key_newsp()) == NULL) {
1430 ipsec_delpcbpolicy(new);
1431 return ENOBUFS;
1432 }
1433 new->sp_in->state = IPSEC_SPSTATE_ALIVE;
1434 new->sp_in->policy = IPSEC_POLICY_ENTRUST;
1435
1436 if ((new->sp_out = key_newsp()) == NULL) {
1437 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1438 ipsec_delpcbpolicy(new);
1439 return ENOBUFS;
1440 }
1441 new->sp_out->state = IPSEC_SPSTATE_ALIVE;
1442 new->sp_out->policy = IPSEC_POLICY_ENTRUST;
1443
1444 *pcb_sp = new;
1445
1446 return 0;
1447}
1448
1449/* copy old ipsec policy into new */
1450int
1451ipsec_copy_policy(struct inpcbpolicy *old,
1452 struct inpcbpolicy *new)
1453{
1454 struct secpolicy *sp;
1455
1456 if (ipsec_bypass != 0)
1457 return 0;
1458
1459 sp = ipsec_deepcopy_policy(old->sp_in);
1460 if (sp) {
1461 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1462 new->sp_in = sp;
1463 } else
1464 return ENOBUFS;
1465
1466 sp = ipsec_deepcopy_policy(old->sp_out);
1467 if (sp) {
1468 key_freesp(new->sp_out, KEY_SADB_UNLOCKED);
1469 new->sp_out = sp;
1470 } else
1471 return ENOBUFS;
1472
1473 new->priv = old->priv;
1474
1475 return 0;
1476}
1477
1478/* deep-copy a policy in PCB */
1479static struct secpolicy *
1480ipsec_deepcopy_policy(struct secpolicy *src)
1481{
1482 struct ipsecrequest *newchain = NULL;
1483 struct ipsecrequest *p;
1484 struct ipsecrequest **q;
1485 struct ipsecrequest *r;
1486 struct secpolicy *dst;
1487
1488 if (src == NULL)
1489 return NULL;
1490 dst = key_newsp();
1491 if (dst == NULL)
1492 return NULL;
1493
1494 /*
1495 * deep-copy IPsec request chain. This is required since struct
1496 * ipsecrequest is not reference counted.
1497 */
1498 q = &newchain;
1499 for (p = src->req; p; p = p->next) {
1500 *q = (struct ipsecrequest *)_MALLOC(sizeof(struct ipsecrequest),
1501 M_SECA, M_WAITOK | M_ZERO);
1502 if (*q == NULL)
1503 goto fail;
1504 (*q)->next = NULL;
1505
1506 (*q)->saidx.proto = p->saidx.proto;
1507 (*q)->saidx.mode = p->saidx.mode;
1508 (*q)->level = p->level;
1509 (*q)->saidx.reqid = p->saidx.reqid;
1510
1511 bcopy(&p->saidx.src, &(*q)->saidx.src, sizeof((*q)->saidx.src));
1512 bcopy(&p->saidx.dst, &(*q)->saidx.dst, sizeof((*q)->saidx.dst));
1513
1514 (*q)->sp = dst;
1515
1516 q = &((*q)->next);
1517 }
1518
1519 dst->req = newchain;
1520 dst->state = src->state;
1521 dst->policy = src->policy;
1522 /* do not touch the refcnt fields */
1523
1524 return dst;
1525
1526fail:
1527 for (p = newchain; p; p = r) {
1528 r = p->next;
1529 FREE(p, M_SECA);
1530 p = NULL;
1531 }
1532 key_freesp(dst, KEY_SADB_UNLOCKED);
1533 return NULL;
1534}
1535
1536/* set policy and ipsec request if present. */
1537static int
1538ipsec_set_policy(struct secpolicy **pcb_sp,
1539 __unused int optname,
1540 caddr_t request,
1541 size_t len,
1542 int priv)
1543{
1544 struct sadb_x_policy *xpl;
1545 struct secpolicy *newsp = NULL;
1546 int error;
1547
1548 /* sanity check. */
1549 if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL)
1550 return EINVAL;
1551 if (len < sizeof(*xpl))
1552 return EINVAL;
1553 xpl = (struct sadb_x_policy *)(void *)request;
1554
1555 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1556 printf("ipsec_set_policy: passed policy\n");
1557 kdebug_sadb_x_policy((struct sadb_ext *)xpl));
1558
1559 /* check policy type */
1560 /* ipsec_set_policy() accepts IPSEC, ENTRUST and BYPASS. */
1561 if (xpl->sadb_x_policy_type == IPSEC_POLICY_DISCARD
1562 || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE)
1563 return EINVAL;
1564
1565 /* check privileged socket */
1566 if (priv == 0 && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS)
1567 return EACCES;
1568
1569 /* allocation new SP entry */
1570 if ((newsp = key_msg2sp(xpl, len, &error)) == NULL)
1571 return error;
1572
1573 newsp->state = IPSEC_SPSTATE_ALIVE;
1574
1575 /* clear old SP and set new SP */
1576 key_freesp(*pcb_sp, KEY_SADB_UNLOCKED);
1577 *pcb_sp = newsp;
1578 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1579 printf("ipsec_set_policy: new policy\n");
1580 kdebug_secpolicy(newsp));
1581
1582 return 0;
1583}
1584
1585int
1586ipsec4_set_policy(struct inpcb *inp,
1587 int optname,
1588 caddr_t request,
1589 size_t len,
1590 int priv)
1591{
1592 struct sadb_x_policy *xpl;
1593 struct secpolicy **pcb_sp;
1594 int error = 0;
1595 struct sadb_x_policy xpl_aligned_buf;
1596 u_int8_t *xpl_unaligned;
1597
1598 /* sanity check. */
1599 if (inp == NULL || request == NULL)
1600 return EINVAL;
1601 if (len < sizeof(*xpl))
1602 return EINVAL;
1603 xpl = (struct sadb_x_policy *)(void *)request;
1604
1605 /* This is a new mbuf allocated by soopt_getm() */
1606 if (IPSEC_IS_P2ALIGNED(xpl)) {
1607 xpl_unaligned = NULL;
1608 } else {
1609 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1610 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1611 xpl = (__typeof__(xpl))&xpl_aligned_buf;
1612 }
1613
1614 if (inp->inp_sp == NULL) {
1615 error = ipsec_init_policy(inp->inp_socket, &inp->inp_sp);
1616 if (error)
1617 return error;
1618 }
1619
1620 /* select direction */
1621 switch (xpl->sadb_x_policy_dir) {
1622 case IPSEC_DIR_INBOUND:
1623 pcb_sp = &inp->inp_sp->sp_in;
1624 break;
1625 case IPSEC_DIR_OUTBOUND:
1626 pcb_sp = &inp->inp_sp->sp_out;
1627 break;
1628 default:
1629 ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n",
1630 xpl->sadb_x_policy_dir));
1631 return EINVAL;
1632 }
1633
1634 /* turn bypass off */
1635 if (ipsec_bypass != 0)
1636 ipsec_bypass = 0;
1637
1638 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1639}
1640
1641/* delete policy in PCB */
1642int
1643ipsec4_delete_pcbpolicy(struct inpcb *inp)
1644{
1645
1646 /* sanity check. */
1647 if (inp == NULL)
1648 panic("ipsec4_delete_pcbpolicy: NULL pointer was passed.\n");
1649
1650 if (inp->inp_sp == NULL)
1651 return 0;
1652
1653 if (inp->inp_sp->sp_in != NULL) {
1654 key_freesp(inp->inp_sp->sp_in, KEY_SADB_UNLOCKED);
1655 inp->inp_sp->sp_in = NULL;
1656 }
1657
1658 if (inp->inp_sp->sp_out != NULL) {
1659 key_freesp(inp->inp_sp->sp_out, KEY_SADB_UNLOCKED);
1660 inp->inp_sp->sp_out = NULL;
1661 }
1662
1663 ipsec_delpcbpolicy(inp->inp_sp);
1664 inp->inp_sp = NULL;
1665
1666 return 0;
1667}
1668
1669#if INET6
1670int
1671ipsec6_set_policy(struct in6pcb *in6p,
1672 int optname,
1673 caddr_t request,
1674 size_t len,
1675 int priv)
1676{
1677 struct sadb_x_policy *xpl;
1678 struct secpolicy **pcb_sp;
1679 int error = 0;
1680 struct sadb_x_policy xpl_aligned_buf;
1681 u_int8_t *xpl_unaligned;
1682
1683 /* sanity check. */
1684 if (in6p == NULL || request == NULL)
1685 return EINVAL;
1686 if (len < sizeof(*xpl))
1687 return EINVAL;
1688 xpl = (struct sadb_x_policy *)(void *)request;
1689
1690 /* This is a new mbuf allocated by soopt_getm() */
1691 if (IPSEC_IS_P2ALIGNED(xpl)) {
1692 xpl_unaligned = NULL;
1693 } else {
1694 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1695 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1696 xpl = (__typeof__(xpl))&xpl_aligned_buf;
1697 }
1698
1699 if (in6p->in6p_sp == NULL) {
1700 error = ipsec_init_policy(in6p->inp_socket, &in6p->in6p_sp);
1701 if (error)
1702 return error;
1703 }
1704
1705 /* select direction */
1706 switch (xpl->sadb_x_policy_dir) {
1707 case IPSEC_DIR_INBOUND:
1708 pcb_sp = &in6p->in6p_sp->sp_in;
1709 break;
1710 case IPSEC_DIR_OUTBOUND:
1711 pcb_sp = &in6p->in6p_sp->sp_out;
1712 break;
1713 default:
1714 ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n",
1715 xpl->sadb_x_policy_dir));
1716 return EINVAL;
1717 }
1718
1719 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1720}
1721
1722int
1723ipsec6_delete_pcbpolicy(struct in6pcb *in6p)
1724{
1725
1726 /* sanity check. */
1727 if (in6p == NULL)
1728 panic("ipsec6_delete_pcbpolicy: NULL pointer was passed.\n");
1729
1730 if (in6p->in6p_sp == NULL)
1731 return 0;
1732
1733 if (in6p->in6p_sp->sp_in != NULL) {
1734 key_freesp(in6p->in6p_sp->sp_in, KEY_SADB_UNLOCKED);
1735 in6p->in6p_sp->sp_in = NULL;
1736 }
1737
1738 if (in6p->in6p_sp->sp_out != NULL) {
1739 key_freesp(in6p->in6p_sp->sp_out, KEY_SADB_UNLOCKED);
1740 in6p->in6p_sp->sp_out = NULL;
1741 }
1742
1743 ipsec_delpcbpolicy(in6p->in6p_sp);
1744 in6p->in6p_sp = NULL;
1745
1746 return 0;
1747}
1748#endif
1749
1750/*
1751 * return current level.
1752 * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned.
1753 */
1754u_int
1755ipsec_get_reqlevel(struct ipsecrequest *isr)
1756{
1757 u_int level = 0;
1758 u_int esp_trans_deflev = 0, esp_net_deflev = 0, ah_trans_deflev = 0, ah_net_deflev = 0;
1759
1760 /* sanity check */
1761 if (isr == NULL || isr->sp == NULL)
1762 panic("ipsec_get_reqlevel: NULL pointer is passed.\n");
1763 if (((struct sockaddr *)&isr->sp->spidx.src)->sa_family
1764 != ((struct sockaddr *)&isr->sp->spidx.dst)->sa_family)
1765 panic("ipsec_get_reqlevel: family mismatched.\n");
1766
1767/* XXX note that we have ipseclog() expanded here - code sync issue */
1768#define IPSEC_CHECK_DEFAULT(lev) \
1769 (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \
1770 && (lev) != IPSEC_LEVEL_UNIQUE) \
1771 ? (ipsec_debug \
1772 ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\
1773 (lev), IPSEC_LEVEL_REQUIRE) \
1774 : (void)0), \
1775 (lev) = IPSEC_LEVEL_REQUIRE, \
1776 (lev) \
1777 : (lev))
1778
1779 /* set default level */
1780 switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) {
1781#if INET
1782 case AF_INET:
1783 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_trans_deflev);
1784 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_net_deflev);
1785 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_trans_deflev);
1786 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_net_deflev);
1787 break;
1788#endif
1789#if INET6
1790 case AF_INET6:
1791 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_trans_deflev);
1792 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_net_deflev);
1793 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_trans_deflev);
1794 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_net_deflev);
1795 break;
1796#endif /* INET6 */
1797 default:
1798 panic("key_get_reqlevel: Unknown family. %d\n",
1799 ((struct sockaddr *)&isr->sp->spidx.src)->sa_family);
1800 }
1801
1802#undef IPSEC_CHECK_DEFAULT
1803
1804 /* set level */
1805 switch (isr->level) {
1806 case IPSEC_LEVEL_DEFAULT:
1807 switch (isr->saidx.proto) {
1808 case IPPROTO_ESP:
1809 if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
1810 level = esp_net_deflev;
1811 else
1812 level = esp_trans_deflev;
1813 break;
1814 case IPPROTO_AH:
1815 if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
1816 level = ah_net_deflev;
1817 else
1818 level = ah_trans_deflev;
1819 break;
1820 case IPPROTO_IPCOMP:
1821 /*
1822 * we don't really care, as IPcomp document says that
1823 * we shouldn't compress small packets
1824 */
1825 level = IPSEC_LEVEL_USE;
1826 break;
1827 default:
1828 panic("ipsec_get_reqlevel: "
1829 "Illegal protocol defined %u\n",
1830 isr->saidx.proto);
1831 }
1832 break;
1833
1834 case IPSEC_LEVEL_USE:
1835 case IPSEC_LEVEL_REQUIRE:
1836 level = isr->level;
1837 break;
1838 case IPSEC_LEVEL_UNIQUE:
1839 level = IPSEC_LEVEL_REQUIRE;
1840 break;
1841
1842 default:
1843 panic("ipsec_get_reqlevel: Illegal IPsec level %u\n",
1844 isr->level);
1845 }
1846
1847 return level;
1848}
1849
1850/*
1851 * Check AH/ESP integrity.
1852 * OUT:
1853 * 0: valid
1854 * 1: invalid
1855 */
1856static int
1857ipsec_in_reject(struct secpolicy *sp, struct mbuf *m)
1858{
1859 struct ipsecrequest *isr;
1860 u_int level;
1861 int need_auth, need_conf, need_icv;
1862
1863 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
1864 printf("ipsec_in_reject: using SP\n");
1865 kdebug_secpolicy(sp));
1866
1867 /* check policy */
1868 switch (sp->policy) {
1869 case IPSEC_POLICY_DISCARD:
1870 case IPSEC_POLICY_GENERATE:
1871 return 1;
1872 case IPSEC_POLICY_BYPASS:
1873 case IPSEC_POLICY_NONE:
1874 return 0;
1875
1876 case IPSEC_POLICY_IPSEC:
1877 break;
1878
1879 case IPSEC_POLICY_ENTRUST:
1880 default:
1881 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
1882 }
1883
1884 need_auth = 0;
1885 need_conf = 0;
1886 need_icv = 0;
1887
1888 /* XXX should compare policy against ipsec header history */
1889
1890 for (isr = sp->req; isr != NULL; isr = isr->next) {
1891
1892 /* get current level */
1893 level = ipsec_get_reqlevel(isr);
1894
1895 switch (isr->saidx.proto) {
1896 case IPPROTO_ESP:
1897 if (level == IPSEC_LEVEL_REQUIRE) {
1898 need_conf++;
1899
1900#if 0
1901 /* this won't work with multiple input threads - isr->sav would change
1902 * with every packet and is not necessarily related to the current packet
1903 * being processed. If ESP processing is required - the esp code should
1904 * make sure that the integrity check is present and correct. I don't see
1905 * why it would be necessary to check for the presence of the integrity
1906 * check value here. I think this is just wrong.
1907 * isr->sav has been removed.
1908 * %%%%%% this needs to be re-worked at some point but I think the code below can
1909 * be ignored for now.
1910 */
1911 if (isr->sav != NULL
1912 && isr->sav->flags == SADB_X_EXT_NONE
1913 && isr->sav->alg_auth != SADB_AALG_NONE)
1914 need_icv++;
1915#endif
1916 }
1917 break;
1918 case IPPROTO_AH:
1919 if (level == IPSEC_LEVEL_REQUIRE) {
1920 need_auth++;
1921 need_icv++;
1922 }
1923 break;
1924 case IPPROTO_IPCOMP:
1925 /*
1926 * we don't really care, as IPcomp document says that
1927 * we shouldn't compress small packets, IPComp policy
1928 * should always be treated as being in "use" level.
1929 */
1930 break;
1931 }
1932 }
1933
1934 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1935 printf("ipsec_in_reject: auth:%d conf:%d icv:%d m_flags:%x\n",
1936 need_auth, need_conf, need_icv, m->m_flags));
1937
1938 if ((need_conf && !(m->m_flags & M_DECRYPTED))
1939 || (!need_auth && need_icv && !(m->m_flags & M_AUTHIPDGM))
1940 || (need_auth && !(m->m_flags & M_AUTHIPHDR)))
1941 return 1;
1942
1943 return 0;
1944}
1945
1946/*
1947 * Check AH/ESP integrity.
1948 * This function is called from tcp_input(), udp_input(),
1949 * and {ah,esp}4_input for tunnel mode
1950 */
1951int
1952ipsec4_in_reject_so(struct mbuf *m, struct socket *so)
1953{
1954 struct secpolicy *sp = NULL;
1955 int error;
1956 int result;
1957
1958 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
1959 /* sanity check */
1960 if (m == NULL)
1961 return 0; /* XXX should be panic ? */
1962
1963 /* get SP for this packet.
1964 * When we are called from ip_forward(), we call
1965 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
1966 */
1967 if (so == NULL)
1968 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
1969 else
1970 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
1971
1972 if (sp == NULL)
1973 return 0; /* XXX should be panic ?
1974 * -> No, there may be error. */
1975
1976 result = ipsec_in_reject(sp, m);
1977 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1978 printf("DP ipsec4_in_reject_so call free SP:0x%llx\n",
1979 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
1980 key_freesp(sp, KEY_SADB_UNLOCKED);
1981
1982 return result;
1983}
1984
1985int
1986ipsec4_in_reject(struct mbuf *m, struct inpcb *inp)
1987{
1988 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
1989 if (inp == NULL)
1990 return ipsec4_in_reject_so(m, NULL);
1991 if (inp->inp_socket)
1992 return ipsec4_in_reject_so(m, inp->inp_socket);
1993 else
1994 panic("ipsec4_in_reject: invalid inpcb/socket");
1995
1996 /* NOTREACHED */
1997 return 0;
1998}
1999
2000#if INET6
2001/*
2002 * Check AH/ESP integrity.
2003 * This function is called from tcp6_input(), udp6_input(),
2004 * and {ah,esp}6_input for tunnel mode
2005 */
2006int
2007ipsec6_in_reject_so(struct mbuf *m, struct socket *so)
2008{
2009 struct secpolicy *sp = NULL;
2010 int error;
2011 int result;
2012
2013 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2014 /* sanity check */
2015 if (m == NULL)
2016 return 0; /* XXX should be panic ? */
2017
2018 /* get SP for this packet.
2019 * When we are called from ip_forward(), we call
2020 * ipsec6_getpolicybyaddr() with IP_FORWARDING flag.
2021 */
2022 if (so == NULL)
2023 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2024 else
2025 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2026
2027 if (sp == NULL)
2028 return 0; /* XXX should be panic ? */
2029
2030 result = ipsec_in_reject(sp, m);
2031 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2032 printf("DP ipsec6_in_reject_so call free SP:0x%llx\n",
2033 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2034 key_freesp(sp, KEY_SADB_UNLOCKED);
2035
2036 return result;
2037}
2038
2039int
2040ipsec6_in_reject(struct mbuf *m, struct in6pcb *in6p)
2041{
2042
2043 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2044 if (in6p == NULL)
2045 return ipsec6_in_reject_so(m, NULL);
2046 if (in6p->in6p_socket)
2047 return ipsec6_in_reject_so(m, in6p->in6p_socket);
2048 else
2049 panic("ipsec6_in_reject: invalid in6p/socket");
2050
2051 /* NOTREACHED */
2052 return 0;
2053}
2054#endif
2055
2056/*
2057 * compute the byte size to be occupied by IPsec header.
2058 * in case it is tunneled, it includes the size of outer IP header.
2059 * NOTE: SP passed is free in this function.
2060 */
2061size_t
2062ipsec_hdrsiz(struct secpolicy *sp)
2063{
2064 struct ipsecrequest *isr;
2065 size_t siz, clen;
2066
2067 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2068 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2069 printf("ipsec_hdrsiz: using SP\n");
2070 kdebug_secpolicy(sp));
2071
2072 /* check policy */
2073 switch (sp->policy) {
2074 case IPSEC_POLICY_DISCARD:
2075 case IPSEC_POLICY_GENERATE:
2076 case IPSEC_POLICY_BYPASS:
2077 case IPSEC_POLICY_NONE:
2078 return 0;
2079
2080 case IPSEC_POLICY_IPSEC:
2081 break;
2082
2083 case IPSEC_POLICY_ENTRUST:
2084 default:
2085 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
2086 }
2087
2088 siz = 0;
2089
2090 for (isr = sp->req; isr != NULL; isr = isr->next) {
2091
2092 clen = 0;
2093
2094 switch (isr->saidx.proto) {
2095 case IPPROTO_ESP:
2096#if IPSEC_ESP
2097 clen = esp_hdrsiz(isr);
2098#else
2099 clen = 0; /*XXX*/
2100#endif
2101 break;
2102 case IPPROTO_AH:
2103 clen = ah_hdrsiz(isr);
2104 break;
2105 case IPPROTO_IPCOMP:
2106 clen = sizeof(struct ipcomp);
2107 break;
2108 }
2109
2110 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
2111 switch (((struct sockaddr *)&isr->saidx.dst)->sa_family) {
2112 case AF_INET:
2113 clen += sizeof(struct ip);
2114 break;
2115#if INET6
2116 case AF_INET6:
2117 clen += sizeof(struct ip6_hdr);
2118 break;
2119#endif
2120 default:
2121 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2122 "unknown AF %d in IPsec tunnel SA\n",
2123 ((struct sockaddr *)&isr->saidx.dst)->sa_family));
2124 break;
2125 }
2126 }
2127 siz += clen;
2128 }
2129
2130 return siz;
2131}
2132
2133/* This function is called from ip_forward() and ipsec4_hdrsize_tcp(). */
2134size_t
2135ipsec4_hdrsiz(struct mbuf *m, u_int dir, struct inpcb *inp)
2136{
2137 struct secpolicy *sp = NULL;
2138 int error;
2139 size_t size;
2140
2141 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2142 /* sanity check */
2143 if (m == NULL)
2144 return 0; /* XXX should be panic ? */
2145 if (inp != NULL && inp->inp_socket == NULL)
2146 panic("ipsec4_hdrsize: why is socket NULL but there is PCB.");
2147
2148 /* get SP for this packet.
2149 * When we are called from ip_forward(), we call
2150 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2151 */
2152 if (inp == NULL)
2153 sp = ipsec4_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2154 else
2155 sp = ipsec4_getpolicybyaddr(m, dir, 0, &error);
2156
2157 if (sp == NULL)
2158 return 0; /* XXX should be panic ? */
2159
2160 size = ipsec_hdrsiz(sp);
2161 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2162 printf("DP ipsec4_hdrsiz call free SP:0x%llx\n",
2163 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2164 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2165 printf("ipsec4_hdrsiz: size:%lu.\n", (u_int32_t)size));
2166 key_freesp(sp, KEY_SADB_UNLOCKED);
2167
2168 return size;
2169}
2170
2171#if INET6
2172/* This function is called from ipsec6_hdrsize_tcp(),
2173 * and maybe from ip6_forward.()
2174 */
2175size_t
2176ipsec6_hdrsiz(struct mbuf *m, u_int dir, struct in6pcb *in6p)
2177{
2178 struct secpolicy *sp = NULL;
2179 int error;
2180 size_t size;
2181
2182 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2183 /* sanity check */
2184 if (m == NULL)
2185 return 0; /* XXX shoud be panic ? */
2186 if (in6p != NULL && in6p->in6p_socket == NULL)
2187 panic("ipsec6_hdrsize: why is socket NULL but there is PCB.");
2188
2189 /* get SP for this packet */
2190 /* XXX Is it right to call with IP_FORWARDING. */
2191 if (in6p == NULL)
2192 sp = ipsec6_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2193 else
2194 sp = ipsec6_getpolicybyaddr(m, dir, 0, &error);
2195
2196 if (sp == NULL)
2197 return 0;
2198 size = ipsec_hdrsiz(sp);
2199 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2200 printf("DP ipsec6_hdrsiz call free SP:0x%llx\n",
2201 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2202 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2203 printf("ipsec6_hdrsiz: size:%lu.\n", (u_int32_t)size));
2204 key_freesp(sp, KEY_SADB_UNLOCKED);
2205
2206 return size;
2207}
2208#endif /*INET6*/
2209
2210#if INET
2211/*
2212 * encapsulate for ipsec tunnel.
2213 * ip->ip_src must be fixed later on.
2214 */
2215int
2216ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav)
2217{
2218 struct ip *oip;
2219 struct ip *ip;
2220 size_t hlen;
2221 size_t plen;
2222
2223 /* can't tunnel between different AFs */
2224 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2225 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2226 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2227 m_freem(m);
2228 return EINVAL;
2229 }
2230#if 0
2231 /* XXX if the dst is myself, perform nothing. */
2232 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2233 m_freem(m);
2234 return EINVAL;
2235 }
2236#endif
2237
2238 if (m->m_len < sizeof(*ip))
2239 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2240
2241 ip = mtod(m, struct ip *);
2242#ifdef _IP_VHL
2243 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2244#else
2245 hlen = ip->ip_hl << 2;
2246#endif
2247
2248 if (m->m_len != hlen)
2249 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2250
2251 /* generate header checksum */
2252 ip->ip_sum = 0;
2253#ifdef _IP_VHL
2254 ip->ip_sum = in_cksum(m, hlen);
2255#else
2256 ip->ip_sum = in_cksum(m, hlen);
2257#endif
2258
2259 plen = m->m_pkthdr.len;
2260
2261 /*
2262 * grow the mbuf to accomodate the new IPv4 header.
2263 * NOTE: IPv4 options will never be copied.
2264 */
2265 if (M_LEADINGSPACE(m->m_next) < hlen) {
2266 struct mbuf *n;
2267 MGET(n, M_DONTWAIT, MT_DATA);
2268 if (!n) {
2269 m_freem(m);
2270 return ENOBUFS;
2271 }
2272 n->m_len = hlen;
2273 n->m_next = m->m_next;
2274 m->m_next = n;
2275 m->m_pkthdr.len += hlen;
2276 oip = mtod(n, struct ip *);
2277 } else {
2278 m->m_next->m_len += hlen;
2279 m->m_next->m_data -= hlen;
2280 m->m_pkthdr.len += hlen;
2281 oip = mtod(m->m_next, struct ip *);
2282 }
2283 ip = mtod(m, struct ip *);
2284 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2285 m->m_len = sizeof(struct ip);
2286 m->m_pkthdr.len -= (hlen - sizeof(struct ip));
2287
2288 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2289 /* ECN consideration. */
2290 ip_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &oip->ip_tos);
2291#ifdef _IP_VHL
2292 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
2293#else
2294 ip->ip_hl = sizeof(struct ip) >> 2;
2295#endif
2296 ip->ip_off &= htons(~IP_OFFMASK);
2297 ip->ip_off &= htons(~IP_MF);
2298 switch (ip4_ipsec_dfbit) {
2299 case 0: /* clear DF bit */
2300 ip->ip_off &= htons(~IP_DF);
2301 break;
2302 case 1: /* set DF bit */
2303 ip->ip_off |= htons(IP_DF);
2304 break;
2305 default: /* copy DF bit */
2306 break;
2307 }
2308 ip->ip_p = IPPROTO_IPIP;
2309 if (plen + sizeof(struct ip) < IP_MAXPACKET)
2310 ip->ip_len = htons(plen + sizeof(struct ip));
2311 else {
2312 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2313 "leave ip_len as is (invalid packet)\n"));
2314 }
2315 if (rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off))) {
2316 ip->ip_id = 0;
2317 } else {
2318 ip->ip_id = ip_randomid();
2319 }
2320 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2321 &ip->ip_src, sizeof(ip->ip_src));
2322 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2323 &ip->ip_dst, sizeof(ip->ip_dst));
2324 ip->ip_ttl = IPDEFTTL;
2325
2326 /* XXX Should ip_src be updated later ? */
2327
2328 return 0;
2329}
2330
2331#endif /*INET*/
2332
2333#if INET6
2334int
2335ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav)
2336{
2337 struct ip6_hdr *oip6;
2338 struct ip6_hdr *ip6;
2339 size_t plen;
2340
2341 /* can't tunnel between different AFs */
2342 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2343 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2344 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2345 m_freem(m);
2346 return EINVAL;
2347 }
2348#if 0
2349 /* XXX if the dst is myself, perform nothing. */
2350 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2351 m_freem(m);
2352 return EINVAL;
2353 }
2354#endif
2355
2356 plen = m->m_pkthdr.len;
2357
2358 /*
2359 * grow the mbuf to accomodate the new IPv6 header.
2360 */
2361 if (m->m_len != sizeof(struct ip6_hdr))
2362 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2363 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2364 struct mbuf *n;
2365 MGET(n, M_DONTWAIT, MT_DATA);
2366 if (!n) {
2367 m_freem(m);
2368 return ENOBUFS;
2369 }
2370 n->m_len = sizeof(struct ip6_hdr);
2371 n->m_next = m->m_next;
2372 m->m_next = n;
2373 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2374 oip6 = mtod(n, struct ip6_hdr *);
2375 } else {
2376 m->m_next->m_len += sizeof(struct ip6_hdr);
2377 m->m_next->m_data -= sizeof(struct ip6_hdr);
2378 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2379 oip6 = mtod(m->m_next, struct ip6_hdr *);
2380 }
2381 ip6 = mtod(m, struct ip6_hdr *);
2382 ovbcopy((caddr_t)ip6, (caddr_t)oip6, sizeof(struct ip6_hdr));
2383
2384 /* Fake link-local scope-class addresses */
2385 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src))
2386 oip6->ip6_src.s6_addr16[1] = 0;
2387 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst))
2388 oip6->ip6_dst.s6_addr16[1] = 0;
2389
2390 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2391 /* ECN consideration. */
2392 ip6_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow);
2393 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr))
2394 ip6->ip6_plen = htons(plen);
2395 else {
2396 /* ip6->ip6_plen will be updated in ip6_output() */
2397 }
2398 ip6->ip6_nxt = IPPROTO_IPV6;
2399 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2400 &ip6->ip6_src, sizeof(ip6->ip6_src));
2401 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2402 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2403 ip6->ip6_hlim = IPV6_DEFHLIM;
2404
2405 /* XXX Should ip6_src be updated later ? */
2406
2407 return 0;
2408}
2409
2410static int
2411ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav)
2412{
2413 struct ip6_hdr *ip6, *ip6i;
2414 struct ip *ip;
2415 size_t plen;
2416 u_int8_t hlim;
2417
2418 /* tunneling over IPv4 */
2419 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2420 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2421 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2422 m_freem(m);
2423 return EINVAL;
2424 }
2425#if 0
2426 /* XXX if the dst is myself, perform nothing. */
2427 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2428 m_freem(m);
2429 return EINVAL;
2430 }
2431#endif
2432
2433 plen = m->m_pkthdr.len;
2434 ip6 = mtod(m, struct ip6_hdr *);
2435 hlim = ip6->ip6_hlim;
2436 /*
2437 * grow the mbuf to accomodate the new IPv4 header.
2438 */
2439 if (m->m_len != sizeof(struct ip6_hdr))
2440 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2441 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2442 struct mbuf *n;
2443 MGET(n, M_DONTWAIT, MT_DATA);
2444 if (!n) {
2445 m_freem(m);
2446 return ENOBUFS;
2447 }
2448 n->m_len = sizeof(struct ip6_hdr);
2449 n->m_next = m->m_next;
2450 m->m_next = n;
2451 m->m_pkthdr.len += sizeof(struct ip);
2452 ip6i = mtod(n, struct ip6_hdr *);
2453 } else {
2454 m->m_next->m_len += sizeof(struct ip6_hdr);
2455 m->m_next->m_data -= sizeof(struct ip6_hdr);
2456 m->m_pkthdr.len += sizeof(struct ip);
2457 ip6i = mtod(m->m_next, struct ip6_hdr *);
2458 }
2459
2460 bcopy(ip6, ip6i, sizeof(struct ip6_hdr));
2461 ip = mtod(m, struct ip *);
2462 m->m_len = sizeof(struct ip);
2463 /*
2464 * Fill in some of the IPv4 fields - we don't need all of them
2465 * because the rest will be filled in by ip_output
2466 */
2467 ip->ip_v = IPVERSION;
2468 ip->ip_hl = sizeof(struct ip) >> 2;
2469 ip->ip_id = 0;
2470 ip->ip_sum = 0;
2471 ip->ip_tos = 0;
2472 ip->ip_off = 0;
2473 ip->ip_ttl = hlim;
2474 ip->ip_p = IPPROTO_IPV6;
2475
2476 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2477 /* ECN consideration. */
2478 ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow);
2479
2480 if (plen + sizeof(struct ip) < IP_MAXPACKET)
2481 ip->ip_len = htons(plen + sizeof(struct ip));
2482 else {
2483 ip->ip_len = htons(plen);
2484 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2485 "leave ip_len as is (invalid packet)\n"));
2486 }
2487 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2488 &ip->ip_src, sizeof(ip->ip_src));
2489 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2490 &ip->ip_dst, sizeof(ip->ip_dst));
2491
2492 return 0;
2493}
2494
2495int
2496ipsec6_update_routecache_and_output(
2497 struct ipsec_output_state *state,
2498 struct secasvar *sav)
2499{
2500 struct sockaddr_in6* dst6;
2501 struct route_in6 *ro6;
2502 struct ip6_hdr *ip6;
2503 errno_t error = 0;
2504
2505 int plen;
2506 struct ip6_out_args ip6oa;
2507 struct route_in6 ro6_new;
2508 struct flowadv *adv = NULL;
2509
2510 if (!state->m) {
2511 return EINVAL;
2512 }
2513 ip6 = mtod(state->m, struct ip6_hdr *);
2514
2515 // grab sadb_mutex, before updating sah's route cache
2516 lck_mtx_lock(sadb_mutex);
2517 ro6 = &sav->sah->sa_route;
2518 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
2519 if (ro6->ro_rt) {
2520 RT_LOCK(ro6->ro_rt);
2521 }
2522 if (ROUTE_UNUSABLE(ro6) ||
2523 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
2524 if (ro6->ro_rt != NULL)
2525 RT_UNLOCK(ro6->ro_rt);
2526 ROUTE_RELEASE(ro6);
2527 }
2528 if (ro6->ro_rt == 0) {
2529 bzero(dst6, sizeof(*dst6));
2530 dst6->sin6_family = AF_INET6;
2531 dst6->sin6_len = sizeof(*dst6);
2532 dst6->sin6_addr = ip6->ip6_dst;
2533 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
2534 if (ro6->ro_rt) {
2535 RT_LOCK(ro6->ro_rt);
2536 }
2537 }
2538 if (ro6->ro_rt == 0) {
2539 ip6stat.ip6s_noroute++;
2540 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
2541 error = EHOSTUNREACH;
2542 // release sadb_mutex, after updating sah's route cache
2543 lck_mtx_unlock(sadb_mutex);
2544 return error;
2545 }
2546
2547 /*
2548 * adjust state->dst if tunnel endpoint is offlink
2549 *
2550 * XXX: caching rt_gateway value in the state is
2551 * not really good, since it may point elsewhere
2552 * when the gateway gets modified to a larger
2553 * sockaddr via rt_setgate(). This is currently
2554 * addressed by SA_SIZE roundup in that routine.
2555 */
2556 if (ro6->ro_rt->rt_flags & RTF_GATEWAY)
2557 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
2558 RT_UNLOCK(ro6->ro_rt);
2559 ROUTE_RELEASE(&state->ro);
2560 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
2561 state->dst = (struct sockaddr *)dst6;
2562 state->tunneled = 6;
2563 // release sadb_mutex, after updating sah's route cache
2564 lck_mtx_unlock(sadb_mutex);
2565
2566 state->m = ipsec6_splithdr(state->m);
2567 if (!state->m) {
2568 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
2569 error = ENOMEM;
2570 return error;
2571 }
2572
2573 ip6 = mtod(state->m, struct ip6_hdr *);
2574 switch (sav->sah->saidx.proto) {
2575 case IPPROTO_ESP:
2576#if IPSEC_ESP
2577 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2578#else
2579 m_freem(state->m);
2580 error = EINVAL;
2581#endif
2582 break;
2583 case IPPROTO_AH:
2584 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2585 break;
2586 case IPPROTO_IPCOMP:
2587 /* XXX code should be here */
2588 /*FALLTHROUGH*/
2589 default:
2590 ipseclog((LOG_ERR, "%s: unknown ipsec protocol %d\n", __FUNCTION__, sav->sah->saidx.proto));
2591 m_freem(state->m);
2592 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2593 error = EINVAL;
2594 break;
2595 }
2596 if (error) {
2597 // If error, packet already freed by above output routines
2598 state->m = NULL;
2599 return error;
2600 }
2601
2602 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
2603 if (plen > IPV6_MAXPACKET) {
2604 ipseclog((LOG_ERR, "%s: IPsec with IPv6 jumbogram is not supported\n", __FUNCTION__));
2605 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2606 error = EINVAL;/*XXX*/
2607 return error;
2608 }
2609 ip6 = mtod(state->m, struct ip6_hdr *);
2610 ip6->ip6_plen = htons(plen);
2611
2612 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET6);
2613 ipsec_set_ip6oa_for_interface(sav->sah->ipsec_if, &ip6oa);
2614
2615 /* Increment statistics */
2616 ifnet_stat_increment_out(sav->sah->ipsec_if, 1, mbuf_pkthdr_len(state->m), 0);
2617
2618 /* Send to ip6_output */
2619 bzero(&ro6_new, sizeof(ro6_new));
2620 bzero(&ip6oa, sizeof(ip6oa));
2621 ip6oa.ip6oa_flowadv.code = 0;
2622 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
2623 if (state->outgoing_if) {
2624 ip6oa.ip6oa_boundif = state->outgoing_if;
2625 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
2626 }
2627
2628 adv = &ip6oa.ip6oa_flowadv;
2629 (void) ip6_output(state->m, NULL, &ro6_new, IPV6_OUTARGS, NULL, NULL, &ip6oa);
2630 state->m = NULL;
2631
2632 if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) {
2633 error = ENOBUFS;
2634 ifnet_disable_output(sav->sah->ipsec_if);
2635 return error;
2636 }
2637
2638 return 0;
2639}
2640
2641int
2642ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav)
2643{
2644 struct mbuf *m;
2645 struct ip6_hdr *ip6;
2646 struct ip *oip;
2647 struct ip *ip;
2648 size_t hlen;
2649 size_t plen;
2650
2651 m = state->m;
2652 if (!m) {
2653 return EINVAL;
2654 }
2655
2656 /* can't tunnel between different AFs */
2657 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2658 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2659 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2660 m_freem(m);
2661 return EINVAL;
2662 }
2663#if 0
2664 /* XXX if the dst is myself, perform nothing. */
2665 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2666 m_freem(m);
2667 return EINVAL;
2668 }
2669#endif
2670
2671 if (m->m_len < sizeof(*ip)) {
2672 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2673 return EINVAL;
2674 }
2675
2676 ip = mtod(m, struct ip *);
2677#ifdef _IP_VHL
2678 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2679#else
2680 hlen = ip->ip_hl << 2;
2681#endif
2682
2683 if (m->m_len != hlen) {
2684 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2685 return EINVAL;
2686 }
2687
2688 /* generate header checksum */
2689 ip->ip_sum = 0;
2690#ifdef _IP_VHL
2691 ip->ip_sum = in_cksum(m, hlen);
2692#else
2693 ip->ip_sum = in_cksum(m, hlen);
2694#endif
2695
2696 plen = m->m_pkthdr.len; // save original IPv4 packet len, this will be ipv6 payload len
2697
2698 /*
2699 * First move the IPv4 header to the second mbuf in the chain
2700 */
2701 if (M_LEADINGSPACE(m->m_next) < hlen) {
2702 struct mbuf *n;
2703 MGET(n, M_DONTWAIT, MT_DATA);
2704 if (!n) {
2705 m_freem(m);
2706 return ENOBUFS;
2707 }
2708 n->m_len = hlen;
2709 n->m_next = m->m_next;
2710 m->m_next = n;
2711 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2712 oip = mtod(n, struct ip *);
2713 } else {
2714 m->m_next->m_len += hlen;
2715 m->m_next->m_data -= hlen;
2716 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2717 oip = mtod(m->m_next, struct ip *);
2718 }
2719 ip = mtod(m, struct ip *);
2720 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2721
2722 /*
2723 * Grow the first mbuf to accomodate the new IPv6 header.
2724 */
2725 if (M_LEADINGSPACE(m) < sizeof(struct ip6_hdr) - hlen) {
2726 struct mbuf *n;
2727 MGETHDR(n, M_DONTWAIT, MT_HEADER);
2728 if (!n) {
2729 m_freem(m);
2730 return ENOBUFS;
2731 }
2732 M_COPY_PKTHDR(n, m);
2733 MH_ALIGN(n, sizeof(struct ip6_hdr));
2734 n->m_len = sizeof(struct ip6_hdr);
2735 n->m_next = m->m_next;
2736 m->m_next = NULL;
2737 m_freem(m);
2738 state->m = n;
2739 m = state->m;
2740 } else {
2741 m->m_len += (sizeof(struct ip6_hdr) - hlen);
2742 m->m_data -= (sizeof(struct ip6_hdr) - hlen);
2743 }
2744 ip6 = mtod(m, struct ip6_hdr *);
2745 ip6->ip6_flow = 0;
2746 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2747 ip6->ip6_vfc |= IPV6_VERSION;
2748
2749 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2750 /* ECN consideration. */
2751 ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos);
2752 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr))
2753 ip6->ip6_plen = htons(plen);
2754 else {
2755 /* ip6->ip6_plen will be updated in ip6_output() */
2756 }
2757
2758 ip6->ip6_nxt = IPPROTO_IPV4;
2759 ip6->ip6_hlim = IPV6_DEFHLIM;
2760
2761 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2762 &ip6->ip6_src, sizeof(ip6->ip6_src));
2763 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2764 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2765
2766 return 0;
2767}
2768
2769#endif /*INET6*/
2770
2771/*
2772 * Check the variable replay window.
2773 * ipsec_chkreplay() performs replay check before ICV verification.
2774 * ipsec_updatereplay() updates replay bitmap. This must be called after
2775 * ICV verification (it also performs replay check, which is usually done
2776 * beforehand).
2777 * 0 (zero) is returned if packet disallowed, 1 if packet permitted.
2778 *
2779 * based on RFC 2401.
2780 */
2781int
2782ipsec_chkreplay(u_int32_t seq, struct secasvar *sav)
2783{
2784 const struct secreplay *replay;
2785 u_int32_t diff;
2786 int fr;
2787 u_int32_t wsizeb; /* constant: bits of window size */
2788 int frlast; /* constant: last frame */
2789
2790
2791 /* sanity check */
2792 if (sav == NULL)
2793 panic("ipsec_chkreplay: NULL pointer was passed.\n");
2794
2795 lck_mtx_lock(sadb_mutex);
2796 replay = sav->replay;
2797
2798 if (replay->wsize == 0) {
2799 lck_mtx_unlock(sadb_mutex);
2800 return 1; /* no need to check replay. */
2801 }
2802
2803 /* constant */
2804 frlast = replay->wsize - 1;
2805 wsizeb = replay->wsize << 3;
2806
2807 /* sequence number of 0 is invalid */
2808 if (seq == 0) {
2809 lck_mtx_unlock(sadb_mutex);
2810 return 0;
2811 }
2812
2813 /* first time is always okay */
2814 if (replay->count == 0) {
2815 lck_mtx_unlock(sadb_mutex);
2816 return 1;
2817 }
2818
2819 if (seq > replay->lastseq) {
2820 /* larger sequences are okay */
2821 lck_mtx_unlock(sadb_mutex);
2822 return 1;
2823 } else {
2824 /* seq is equal or less than lastseq. */
2825 diff = replay->lastseq - seq;
2826
2827 /* over range to check, i.e. too old or wrapped */
2828 if (diff >= wsizeb) {
2829 lck_mtx_unlock(sadb_mutex);
2830 return 0;
2831 }
2832
2833 fr = frlast - diff / 8;
2834
2835 /* this packet already seen ? */
2836 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2837 lck_mtx_unlock(sadb_mutex);
2838 return 0;
2839 }
2840
2841 /* out of order but good */
2842 lck_mtx_unlock(sadb_mutex);
2843 return 1;
2844 }
2845}
2846
2847/*
2848 * check replay counter whether to update or not.
2849 * OUT: 0: OK
2850 * 1: NG
2851 */
2852int
2853ipsec_updatereplay(u_int32_t seq, struct secasvar *sav)
2854{
2855 struct secreplay *replay;
2856 u_int32_t diff;
2857 int fr;
2858 u_int32_t wsizeb; /* constant: bits of window size */
2859 int frlast; /* constant: last frame */
2860
2861 /* sanity check */
2862 if (sav == NULL)
2863 panic("ipsec_chkreplay: NULL pointer was passed.\n");
2864
2865 lck_mtx_lock(sadb_mutex);
2866 replay = sav->replay;
2867
2868 if (replay->wsize == 0)
2869 goto ok; /* no need to check replay. */
2870
2871 /* constant */
2872 frlast = replay->wsize - 1;
2873 wsizeb = replay->wsize << 3;
2874
2875 /* sequence number of 0 is invalid */
2876 if (seq == 0) {
2877 lck_mtx_unlock(sadb_mutex);
2878 return 1;
2879 }
2880
2881 /* first time */
2882 if (replay->count == 0) {
2883 replay->lastseq = seq;
2884 bzero(replay->bitmap, replay->wsize);
2885 (replay->bitmap)[frlast] = 1;
2886 goto ok;
2887 }
2888
2889 if (seq > replay->lastseq) {
2890 /* seq is larger than lastseq. */
2891 diff = seq - replay->lastseq;
2892
2893 /* new larger sequence number */
2894 if (diff < wsizeb) {
2895 /* In window */
2896 /* set bit for this packet */
2897 vshiftl((unsigned char *) replay->bitmap, diff, replay->wsize);
2898 (replay->bitmap)[frlast] |= 1;
2899 } else {
2900 /* this packet has a "way larger" */
2901 bzero(replay->bitmap, replay->wsize);
2902 (replay->bitmap)[frlast] = 1;
2903 }
2904 replay->lastseq = seq;
2905
2906 /* larger is good */
2907 } else {
2908 /* seq is equal or less than lastseq. */
2909 diff = replay->lastseq - seq;
2910
2911 /* over range to check, i.e. too old or wrapped */
2912 if (diff >= wsizeb) {
2913 lck_mtx_unlock(sadb_mutex);
2914 return 1;
2915 }
2916
2917 fr = frlast - diff / 8;
2918
2919 /* this packet already seen ? */
2920 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2921 lck_mtx_unlock(sadb_mutex);
2922 return 1;
2923 }
2924
2925 /* mark as seen */
2926 (replay->bitmap)[fr] |= (1 << (diff % 8));
2927
2928 /* out of order but good */
2929 }
2930
2931ok:
2932 if (replay->count == ~0) {
2933
2934 /* set overflow flag */
2935 replay->overflow++;
2936
2937 /* don't increment, no more packets accepted */
2938 if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
2939 lck_mtx_unlock(sadb_mutex);
2940 return 1;
2941 }
2942
2943 ipseclog((LOG_WARNING, "replay counter made %d cycle. %s\n",
2944 replay->overflow, ipsec_logsastr(sav)));
2945 }
2946
2947 replay->count++;
2948
2949 lck_mtx_unlock(sadb_mutex);
2950 return 0;
2951}
2952
2953/*
2954 * shift variable length buffer to left.
2955 * IN: bitmap: pointer to the buffer
2956 * nbit: the number of to shift.
2957 * wsize: buffer size (bytes).
2958 */
2959static void
2960vshiftl(unsigned char *bitmap, int nbit, int wsize)
2961{
2962 int s, j, i;
2963 unsigned char over;
2964
2965 for (j = 0; j < nbit; j += 8) {
2966 s = (nbit - j < 8) ? (nbit - j): 8;
2967 bitmap[0] <<= s;
2968 for (i = 1; i < wsize; i++) {
2969 over = (bitmap[i] >> (8 - s));
2970 bitmap[i] <<= s;
2971 bitmap[i-1] |= over;
2972 }
2973 }
2974
2975 return;
2976}
2977
2978const char *
2979ipsec4_logpacketstr(struct ip *ip, u_int32_t spi)
2980{
2981 static char buf[256] __attribute__((aligned(4)));
2982 char *p;
2983 u_int8_t *s, *d;
2984
2985 s = (u_int8_t *)(&ip->ip_src);
2986 d = (u_int8_t *)(&ip->ip_dst);
2987
2988 p = buf;
2989 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
2990 while (p && *p)
2991 p++;
2992 snprintf(p, sizeof(buf) - (p - buf), "src=%u.%u.%u.%u",
2993 s[0], s[1], s[2], s[3]);
2994 while (p && *p)
2995 p++;
2996 snprintf(p, sizeof(buf) - (p - buf), " dst=%u.%u.%u.%u",
2997 d[0], d[1], d[2], d[3]);
2998 while (p && *p)
2999 p++;
3000 snprintf(p, sizeof(buf) - (p - buf), ")");
3001
3002 return buf;
3003}
3004
3005#if INET6
3006const char *
3007ipsec6_logpacketstr(struct ip6_hdr *ip6, u_int32_t spi)
3008{
3009 static char buf[256] __attribute__((aligned(4)));
3010 char *p;
3011
3012 p = buf;
3013 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3014 while (p && *p)
3015 p++;
3016 snprintf(p, sizeof(buf) - (p - buf), "src=%s",
3017 ip6_sprintf(&ip6->ip6_src));
3018 while (p && *p)
3019 p++;
3020 snprintf(p, sizeof(buf) - (p - buf), " dst=%s",
3021 ip6_sprintf(&ip6->ip6_dst));
3022 while (p && *p)
3023 p++;
3024 snprintf(p, sizeof(buf) - (p - buf), ")");
3025
3026 return buf;
3027}
3028#endif /*INET6*/
3029
3030const char *
3031ipsec_logsastr(struct secasvar *sav)
3032{
3033 static char buf[256] __attribute__((aligned(4)));
3034 char *p;
3035 struct secasindex *saidx = &sav->sah->saidx;
3036
3037 /* validity check */
3038 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
3039 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family)
3040 panic("ipsec_logsastr: family mismatched.\n");
3041
3042 p = buf;
3043 snprintf(buf, sizeof(buf), "SA(SPI=%u ", (u_int32_t)ntohl(sav->spi));
3044 while (p && *p)
3045 p++;
3046 if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET) {
3047 u_int8_t *s, *d;
3048 s = (u_int8_t *)&((struct sockaddr_in *)&saidx->src)->sin_addr;
3049 d = (u_int8_t *)&((struct sockaddr_in *)&saidx->dst)->sin_addr;
3050 snprintf(p, sizeof(buf) - (p - buf),
3051 "src=%d.%d.%d.%d dst=%d.%d.%d.%d",
3052 s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]);
3053 }
3054#if INET6
3055 else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) {
3056 snprintf(p, sizeof(buf) - (p - buf),
3057 "src=%s",
3058 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr));
3059 while (p && *p)
3060 p++;
3061 snprintf(p, sizeof(buf) - (p - buf),
3062 " dst=%s",
3063 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr));
3064 }
3065#endif
3066 while (p && *p)
3067 p++;
3068 snprintf(p, sizeof(buf) - (p - buf), ")");
3069
3070 return buf;
3071}
3072
3073void
3074ipsec_dumpmbuf(struct mbuf *m)
3075{
3076 int totlen;
3077 int i;
3078 u_char *p;
3079
3080 totlen = 0;
3081 printf("---\n");
3082 while (m) {
3083 p = mtod(m, u_char *);
3084 for (i = 0; i < m->m_len; i++) {
3085 printf("%02x ", p[i]);
3086 totlen++;
3087 if (totlen % 16 == 0)
3088 printf("\n");
3089 }
3090 m = m->m_next;
3091 }
3092 if (totlen % 16 != 0)
3093 printf("\n");
3094 printf("---\n");
3095}
3096
3097#if INET
3098/*
3099 * IPsec output logic for IPv4.
3100 */
3101static int
3102ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav)
3103{
3104 struct ip *ip = NULL;
3105 int error = 0;
3106 struct sockaddr_in *dst4;
3107 struct route *ro4;
3108
3109 /* validity check */
3110 if (sav == NULL || sav->sah == NULL) {
3111 error = EINVAL;
3112 goto bad;
3113 }
3114
3115 /*
3116 * If there is no valid SA, we give up to process any
3117 * more. In such a case, the SA's status is changed
3118 * from DYING to DEAD after allocating. If a packet
3119 * send to the receiver by dead SA, the receiver can
3120 * not decode a packet because SA has been dead.
3121 */
3122 if (sav->state != SADB_SASTATE_MATURE
3123 && sav->state != SADB_SASTATE_DYING) {
3124 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3125 error = EINVAL;
3126 goto bad;
3127 }
3128
3129 state->outgoing_if = sav->sah->outgoing_if;
3130
3131 /*
3132 * There may be the case that SA status will be changed when
3133 * we are refering to one. So calling splsoftnet().
3134 */
3135
3136 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3137 /*
3138 * build IPsec tunnel.
3139 */
3140 state->m = ipsec4_splithdr(state->m);
3141 if (!state->m) {
3142 error = ENOMEM;
3143 goto bad;
3144 }
3145
3146 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3147 error = ipsec46_encapsulate(state, sav);
3148 if (error) {
3149 // packet already freed by encapsulation error handling
3150 state->m = NULL;
3151 return error;
3152 }
3153
3154 error = ipsec6_update_routecache_and_output(state, sav);
3155 return error;
3156
3157 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3158 error = ipsec4_encapsulate(state->m, sav);
3159 if (error) {
3160 state->m = NULL;
3161 goto bad;
3162 }
3163 ip = mtod(state->m, struct ip *);
3164
3165 // grab sadb_mutex, before updating sah's route cache
3166 lck_mtx_lock(sadb_mutex);
3167 ro4= (struct route *)&sav->sah->sa_route;
3168 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3169 if (ro4->ro_rt != NULL) {
3170 RT_LOCK(ro4->ro_rt);
3171 }
3172 if (ROUTE_UNUSABLE(ro4) ||
3173 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3174 if (ro4->ro_rt != NULL)
3175 RT_UNLOCK(ro4->ro_rt);
3176 ROUTE_RELEASE(ro4);
3177 }
3178 if (ro4->ro_rt == 0) {
3179 dst4->sin_family = AF_INET;
3180 dst4->sin_len = sizeof(*dst4);
3181 dst4->sin_addr = ip->ip_dst;
3182 rtalloc_scoped(ro4, sav->sah->outgoing_if);
3183 if (ro4->ro_rt == 0) {
3184 OSAddAtomic(1, &ipstat.ips_noroute);
3185 error = EHOSTUNREACH;
3186 // release sadb_mutex, after updating sah's route cache
3187 lck_mtx_unlock(sadb_mutex);
3188 goto bad;
3189 }
3190 RT_LOCK(ro4->ro_rt);
3191 }
3192
3193 /*
3194 * adjust state->dst if tunnel endpoint is offlink
3195 *
3196 * XXX: caching rt_gateway value in the state is
3197 * not really good, since it may point elsewhere
3198 * when the gateway gets modified to a larger
3199 * sockaddr via rt_setgate(). This is currently
3200 * addressed by SA_SIZE roundup in that routine.
3201 */
3202 if (ro4->ro_rt->rt_flags & RTF_GATEWAY)
3203 dst4 = (struct sockaddr_in *)(void *)ro4->ro_rt->rt_gateway;
3204 RT_UNLOCK(ro4->ro_rt);
3205 ROUTE_RELEASE(&state->ro);
3206 route_copyout((struct route *)&state->ro, ro4, sizeof(struct route));
3207 state->dst = (struct sockaddr *)dst4;
3208 state->tunneled = 4;
3209 // release sadb_mutex, after updating sah's route cache
3210 lck_mtx_unlock(sadb_mutex);
3211 } else {
3212 ipseclog((LOG_ERR, "%s: family mismatched between inner and outer spi=%u\n",
3213 __FUNCTION__, (u_int32_t)ntohl(sav->spi)));
3214 error = EAFNOSUPPORT;
3215 goto bad;
3216 }
3217 }
3218
3219 state->m = ipsec4_splithdr(state->m);
3220 if (!state->m) {
3221 error = ENOMEM;
3222 goto bad;
3223 }
3224 switch (sav->sah->saidx.proto) {
3225 case IPPROTO_ESP:
3226#if IPSEC_ESP
3227 if ((error = esp4_output(state->m, sav)) != 0) {
3228 state->m = NULL;
3229 goto bad;
3230 }
3231 break;
3232#else
3233 m_freem(state->m);
3234 state->m = NULL;
3235 error = EINVAL;
3236 goto bad;
3237#endif
3238 case IPPROTO_AH:
3239 if ((error = ah4_output(state->m, sav)) != 0) {
3240 state->m = NULL;
3241 goto bad;
3242 }
3243 break;
3244 case IPPROTO_IPCOMP:
3245 if ((error = ipcomp4_output(state->m, sav)) != 0) {
3246 state->m = NULL;
3247 goto bad;
3248 }
3249 break;
3250 default:
3251 ipseclog((LOG_ERR,
3252 "ipsec4_output: unknown ipsec protocol %d\n",
3253 sav->sah->saidx.proto));
3254 m_freem(state->m);
3255 state->m = NULL;
3256 error = EINVAL;
3257 goto bad;
3258 }
3259
3260 if (state->m == 0) {
3261 error = ENOMEM;
3262 goto bad;
3263 }
3264
3265 return 0;
3266
3267bad:
3268 return error;
3269}
3270
3271int
3272ipsec4_interface_output(struct ipsec_output_state *state, ifnet_t interface)
3273{
3274 int error = 0;
3275 struct secasvar *sav = NULL;
3276
3277 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3278
3279 if (state == NULL) {
3280 panic("state == NULL in ipsec4_output");
3281 }
3282 if (state->m == NULL) {
3283 panic("state->m == NULL in ipsec4_output");
3284 }
3285 if (state->dst == NULL) {
3286 panic("state->dst == NULL in ipsec4_output");
3287 }
3288
3289 struct ip *ip = mtod(state->m, struct ip *);
3290
3291 struct sockaddr_in src = {};
3292 src.sin_family = AF_INET;
3293 src.sin_len = sizeof(src);
3294 memcpy(&src.sin_addr, &ip->ip_src, sizeof(src.sin_addr));
3295
3296 struct sockaddr_in dst = {};
3297 dst.sin_family = AF_INET;
3298 dst.sin_len = sizeof(dst);
3299 memcpy(&dst.sin_addr, &ip->ip_dst, sizeof(dst.sin_addr));
3300
3301 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET,
3302 (struct sockaddr *)&src,
3303 (struct sockaddr *)&dst);
3304 if (sav == NULL) {
3305 goto bad;
3306 }
3307
3308 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3309 goto bad;
3310 }
3311
3312 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0,0,0,0,0);
3313 if (sav) {
3314 key_freesav(sav, KEY_SADB_UNLOCKED);
3315 }
3316 return 0;
3317
3318bad:
3319 if (sav) {
3320 key_freesav(sav, KEY_SADB_UNLOCKED);
3321 }
3322 m_freem(state->m);
3323 state->m = NULL;
3324 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error,0,0,0,0);
3325 return error;
3326}
3327
3328int
3329ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused int flags)
3330{
3331 struct ip *ip = NULL;
3332 struct ipsecrequest *isr = NULL;
3333 struct secasindex saidx;
3334 struct secasvar *sav = NULL;
3335 int error = 0;
3336 struct sockaddr_in *sin;
3337
3338 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3339
3340 if (!state)
3341 panic("state == NULL in ipsec4_output");
3342 if (!state->m)
3343 panic("state->m == NULL in ipsec4_output");
3344 if (!state->dst)
3345 panic("state->dst == NULL in ipsec4_output");
3346
3347 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_START, 0,0,0,0,0);
3348
3349 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3350 printf("ipsec4_output: applied SP\n");
3351 kdebug_secpolicy(sp));
3352
3353 for (isr = sp->req; isr != NULL; isr = isr->next) {
3354 /* make SA index for search proper SA */
3355 ip = mtod(state->m, struct ip *);
3356 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3357 saidx.mode = isr->saidx.mode;
3358 saidx.reqid = isr->saidx.reqid;
3359 sin = (struct sockaddr_in *)&saidx.src;
3360 if (sin->sin_len == 0) {
3361 sin->sin_len = sizeof(*sin);
3362 sin->sin_family = AF_INET;
3363 sin->sin_port = IPSEC_PORT_ANY;
3364 bcopy(&ip->ip_src, &sin->sin_addr,
3365 sizeof(sin->sin_addr));
3366 }
3367 sin = (struct sockaddr_in *)&saidx.dst;
3368 if (sin->sin_len == 0) {
3369 sin->sin_len = sizeof(*sin);
3370 sin->sin_family = AF_INET;
3371 sin->sin_port = IPSEC_PORT_ANY;
3372 /*
3373 * Get port from packet if upper layer is UDP and nat traversal
3374 * is enabled and transport mode.
3375 */
3376
3377 if ((esp_udp_encap_port & 0xFFFF) != 0 &&
3378 isr->saidx.mode == IPSEC_MODE_TRANSPORT) {
3379
3380 if (ip->ip_p == IPPROTO_UDP) {
3381 struct udphdr *udp;
3382 size_t hlen;
3383#ifdef _IP_VHL
3384 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
3385#else
3386 hlen = ip->ip_hl << 2;
3387#endif
3388 if (state->m->m_len < hlen + sizeof(struct udphdr)) {
3389 state->m = m_pullup(state->m, hlen + sizeof(struct udphdr));
3390 if (!state->m) {
3391 ipseclog((LOG_DEBUG, "IPv4 output: can't pullup UDP header\n"));
3392 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
3393 goto bad;
3394 }
3395 ip = mtod(state->m, struct ip *);
3396 }
3397 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + hlen);
3398 sin->sin_port = udp->uh_dport;
3399 }
3400 }
3401
3402 bcopy(&ip->ip_dst, &sin->sin_addr,
3403 sizeof(sin->sin_addr));
3404 }
3405
3406 if ((error = key_checkrequest(isr, &saidx, &sav)) != 0) {
3407 /*
3408 * IPsec processing is required, but no SA found.
3409 * I assume that key_acquire() had been called
3410 * to get/establish the SA. Here I discard
3411 * this packet because it is responsibility for
3412 * upper layer to retransmit the packet.
3413 */
3414 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3415 goto bad;
3416 }
3417
3418 /* validity check */
3419 if (sav == NULL) {
3420 switch (ipsec_get_reqlevel(isr)) {
3421 case IPSEC_LEVEL_USE:
3422 continue;
3423 case IPSEC_LEVEL_REQUIRE:
3424 /* must be not reached here. */
3425 panic("ipsec4_output: no SA found, but required.");
3426 }
3427 }
3428
3429 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3430 goto bad;
3431 }
3432 }
3433
3434 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0,0,0,0,0);
3435 if (sav)
3436 key_freesav(sav, KEY_SADB_UNLOCKED);
3437 return 0;
3438
3439bad:
3440 if (sav)
3441 key_freesav(sav, KEY_SADB_UNLOCKED);
3442 m_freem(state->m);
3443 state->m = NULL;
3444 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error,0,0,0,0);
3445 return error;
3446}
3447
3448#endif
3449
3450#if INET6
3451/*
3452 * IPsec output logic for IPv6, transport mode.
3453 */
3454static int
3455ipsec6_output_trans_internal(
3456 struct ipsec_output_state *state,
3457 struct secasvar *sav,
3458 u_char *nexthdrp,
3459 struct mbuf *mprev)
3460{
3461 struct ip6_hdr *ip6;
3462 int error = 0;
3463 int plen;
3464
3465 /* validity check */
3466 if (sav == NULL || sav->sah == NULL) {
3467 error = EINVAL;
3468 goto bad;
3469 }
3470
3471 /*
3472 * If there is no valid SA, we give up to process.
3473 * see same place at ipsec4_output().
3474 */
3475 if (sav->state != SADB_SASTATE_MATURE
3476 && sav->state != SADB_SASTATE_DYING) {
3477 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3478 error = EINVAL;
3479 goto bad;
3480 }
3481
3482 state->outgoing_if = sav->sah->outgoing_if;
3483
3484 switch (sav->sah->saidx.proto) {
3485 case IPPROTO_ESP:
3486#if IPSEC_ESP
3487 error = esp6_output(state->m, nexthdrp, mprev->m_next, sav);
3488#else
3489 m_freem(state->m);
3490 error = EINVAL;
3491#endif
3492 break;
3493 case IPPROTO_AH:
3494 error = ah6_output(state->m, nexthdrp, mprev->m_next, sav);
3495 break;
3496 case IPPROTO_IPCOMP:
3497 error = ipcomp6_output(state->m, nexthdrp, mprev->m_next, sav);
3498 break;
3499 default:
3500 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3501 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3502 m_freem(state->m);
3503 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3504 error = EINVAL;
3505 break;
3506 }
3507 if (error) {
3508 state->m = NULL;
3509 goto bad;
3510 }
3511 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3512 if (plen > IPV6_MAXPACKET) {
3513 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3514 "IPsec with IPv6 jumbogram is not supported\n"));
3515 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3516 error = EINVAL; /*XXX*/
3517 goto bad;
3518 }
3519 ip6 = mtod(state->m, struct ip6_hdr *);
3520 ip6->ip6_plen = htons(plen);
3521
3522 return 0;
3523bad:
3524 return error;
3525}
3526
3527int
3528ipsec6_output_trans(
3529 struct ipsec_output_state *state,
3530 u_char *nexthdrp,
3531 struct mbuf *mprev,
3532 struct secpolicy *sp,
3533 __unused int flags,
3534 int *tun)
3535{
3536 struct ip6_hdr *ip6;
3537 struct ipsecrequest *isr = NULL;
3538 struct secasindex saidx;
3539 int error = 0;
3540 struct sockaddr_in6 *sin6;
3541 struct secasvar *sav = NULL;
3542
3543 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3544
3545 if (!state)
3546 panic("state == NULL in ipsec6_output_trans");
3547 if (!state->m)
3548 panic("state->m == NULL in ipsec6_output_trans");
3549 if (!nexthdrp)
3550 panic("nexthdrp == NULL in ipsec6_output_trans");
3551 if (!mprev)
3552 panic("mprev == NULL in ipsec6_output_trans");
3553 if (!sp)
3554 panic("sp == NULL in ipsec6_output_trans");
3555 if (!tun)
3556 panic("tun == NULL in ipsec6_output_trans");
3557
3558 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3559 printf("ipsec6_output_trans: applyed SP\n");
3560 kdebug_secpolicy(sp));
3561
3562 *tun = 0;
3563 for (isr = sp->req; isr; isr = isr->next) {
3564 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
3565 /* the rest will be handled by ipsec6_output_tunnel() */
3566 break;
3567 }
3568
3569 /* make SA index for search proper SA */
3570 ip6 = mtod(state->m, struct ip6_hdr *);
3571 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3572 saidx.mode = isr->saidx.mode;
3573 saidx.reqid = isr->saidx.reqid;
3574 sin6 = (struct sockaddr_in6 *)&saidx.src;
3575 if (sin6->sin6_len == 0) {
3576 sin6->sin6_len = sizeof(*sin6);
3577 sin6->sin6_family = AF_INET6;
3578 sin6->sin6_port = IPSEC_PORT_ANY;
3579 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
3580 sizeof(ip6->ip6_src));
3581 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
3582 /* fix scope id for comparing SPD */
3583 sin6->sin6_addr.s6_addr16[1] = 0;
3584 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
3585 }
3586 }
3587 sin6 = (struct sockaddr_in6 *)&saidx.dst;
3588 if (sin6->sin6_len == 0) {
3589 sin6->sin6_len = sizeof(*sin6);
3590 sin6->sin6_family = AF_INET6;
3591 sin6->sin6_port = IPSEC_PORT_ANY;
3592 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
3593 sizeof(ip6->ip6_dst));
3594 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
3595 /* fix scope id for comparing SPD */
3596 sin6->sin6_addr.s6_addr16[1] = 0;
3597 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
3598 }
3599 }
3600
3601 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
3602 /*
3603 * IPsec processing is required, but no SA found.
3604 * I assume that key_acquire() had been called
3605 * to get/establish the SA. Here I discard
3606 * this packet because it is responsibility for
3607 * upper layer to retransmit the packet.
3608 */
3609 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3610 error = ENOENT;
3611
3612 /*
3613 * Notify the fact that the packet is discarded
3614 * to ourselves. I believe this is better than
3615 * just silently discarding. (jinmei@kame.net)
3616 * XXX: should we restrict the error to TCP packets?
3617 * XXX: should we directly notify sockets via
3618 * pfctlinputs?
3619 */
3620 icmp6_error(state->m, ICMP6_DST_UNREACH,
3621 ICMP6_DST_UNREACH_ADMIN, 0);
3622 state->m = NULL; /* icmp6_error freed the mbuf */
3623 goto bad;
3624 }
3625
3626 /* validity check */
3627 if (sav == NULL) {
3628 switch (ipsec_get_reqlevel(isr)) {
3629 case IPSEC_LEVEL_USE:
3630 continue;
3631 case IPSEC_LEVEL_REQUIRE:
3632 /* must be not reached here. */
3633 panic("ipsec6_output_trans: no SA found, but required.");
3634 }
3635 }
3636
3637 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
3638 goto bad;
3639 }
3640 }
3641
3642 /* if we have more to go, we need a tunnel mode processing */
3643 if (isr != NULL)
3644 *tun = 1;
3645
3646 if (sav)
3647 key_freesav(sav, KEY_SADB_UNLOCKED);
3648 return 0;
3649
3650bad:
3651 if (sav)
3652 key_freesav(sav, KEY_SADB_UNLOCKED);
3653 m_freem(state->m);
3654 state->m = NULL;
3655 return error;
3656}
3657
3658/*
3659 * IPsec output logic for IPv6, tunnel mode.
3660 */
3661static int
3662ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar *sav, int *must_be_last)
3663{
3664 struct ip6_hdr *ip6;
3665 int error = 0;
3666 int plen;
3667 struct sockaddr_in6* dst6;
3668 struct route_in6 *ro6;
3669
3670 /* validity check */
3671 if (sav == NULL || sav->sah == NULL || sav->sah->saidx.mode != IPSEC_MODE_TUNNEL) {
3672 error = EINVAL;
3673 goto bad;
3674 }
3675
3676 /*
3677 * If there is no valid SA, we give up to process.
3678 * see same place at ipsec4_output().
3679 */
3680 if (sav->state != SADB_SASTATE_MATURE
3681 && sav->state != SADB_SASTATE_DYING) {
3682 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3683 error = EINVAL;
3684 goto bad;
3685 }
3686
3687 state->outgoing_if = sav->sah->outgoing_if;
3688
3689 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3690 /*
3691 * build IPsec tunnel.
3692 */
3693 state->m = ipsec6_splithdr(state->m);
3694 if (!state->m) {
3695 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3696 error = ENOMEM;
3697 goto bad;
3698 }
3699
3700 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3701 error = ipsec6_encapsulate(state->m, sav);
3702 if (error) {
3703 state->m = 0;
3704 goto bad;
3705 }
3706 ip6 = mtod(state->m, struct ip6_hdr *);
3707 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3708
3709 struct ip *ip;
3710 struct sockaddr_in* dst4;
3711 struct route *ro4 = NULL;
3712 struct route ro4_copy;
3713 struct ip_out_args ipoa;
3714
3715 bzero(&ipoa, sizeof(ipoa));
3716 ipoa.ipoa_boundif = IFSCOPE_NONE;
3717 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
3718 ipoa.ipoa_sotc = SO_TC_UNSPEC;
3719 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3720
3721 if (must_be_last)
3722 *must_be_last = 1;
3723
3724 state->tunneled = 4; /* must not process any further in ip6_output */
3725 error = ipsec64_encapsulate(state->m, sav);
3726 if (error) {
3727 state->m = 0;
3728 goto bad;
3729 }
3730 /* Now we have an IPv4 packet */
3731 ip = mtod(state->m, struct ip *);
3732
3733 // grab sadb_mutex, to update sah's route cache and get a local copy of it
3734 lck_mtx_lock(sadb_mutex);
3735 ro4 = (struct route *)&sav->sah->sa_route;
3736 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3737 if (ro4->ro_rt) {
3738 RT_LOCK(ro4->ro_rt);
3739 }
3740 if (ROUTE_UNUSABLE(ro4) ||
3741 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3742 if (ro4->ro_rt != NULL)
3743 RT_UNLOCK(ro4->ro_rt);
3744 ROUTE_RELEASE(ro4);
3745 }
3746 if (ro4->ro_rt == NULL) {
3747 dst4->sin_family = AF_INET;
3748 dst4->sin_len = sizeof(*dst4);
3749 dst4->sin_addr = ip->ip_dst;
3750 } else {
3751 RT_UNLOCK(ro4->ro_rt);
3752 }
3753 route_copyout(&ro4_copy, ro4, sizeof(struct route));
3754 // release sadb_mutex, after updating sah's route cache and getting a local copy
3755 lck_mtx_unlock(sadb_mutex);
3756 state->m = ipsec4_splithdr(state->m);
3757 if (!state->m) {
3758 error = ENOMEM;
3759 ROUTE_RELEASE(&ro4_copy);
3760 goto bad;
3761 }
3762 switch (sav->sah->saidx.proto) {
3763 case IPPROTO_ESP:
3764#if IPSEC_ESP
3765 if ((error = esp4_output(state->m, sav)) != 0) {
3766 state->m = NULL;
3767 ROUTE_RELEASE(&ro4_copy);
3768 goto bad;
3769 }
3770 break;
3771
3772#else
3773 m_freem(state->m);
3774 state->m = NULL;
3775 error = EINVAL;
3776 ROUTE_RELEASE(&ro4_copy);
3777 goto bad;
3778#endif
3779 case IPPROTO_AH:
3780 if ((error = ah4_output(state->m, sav)) != 0) {
3781 state->m = NULL;
3782 ROUTE_RELEASE(&ro4_copy);
3783 goto bad;
3784 }
3785 break;
3786 case IPPROTO_IPCOMP:
3787 if ((error = ipcomp4_output(state->m, sav)) != 0) {
3788 state->m = NULL;
3789 ROUTE_RELEASE(&ro4_copy);
3790 goto bad;
3791 }
3792 break;
3793 default:
3794 ipseclog((LOG_ERR,
3795 "ipsec4_output: unknown ipsec protocol %d\n",
3796 sav->sah->saidx.proto));
3797 m_freem(state->m);
3798 state->m = NULL;
3799 error = EINVAL;
3800 ROUTE_RELEASE(&ro4_copy);
3801 goto bad;
3802 }
3803
3804 if (state->m == 0) {
3805 error = ENOMEM;
3806 ROUTE_RELEASE(&ro4_copy);
3807 goto bad;
3808 }
3809 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET);
3810 ipsec_set_ipoa_for_interface(sav->sah->ipsec_if, &ipoa);
3811
3812 ip = mtod(state->m, struct ip *);
3813 ip->ip_len = ntohs(ip->ip_len); /* flip len field before calling ip_output */
3814 error = ip_output(state->m, NULL, &ro4_copy, IP_OUTARGS, NULL, &ipoa);
3815 state->m = NULL;
3816 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
3817 lck_mtx_lock(sadb_mutex);
3818 route_copyin(&ro4_copy, ro4, sizeof(struct route));
3819 lck_mtx_unlock(sadb_mutex);
3820 if (error != 0)
3821 goto bad;
3822 goto done;
3823 } else {
3824 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3825 "unsupported inner family, spi=%u\n",
3826 (u_int32_t)ntohl(sav->spi)));
3827 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3828 error = EAFNOSUPPORT;
3829 goto bad;
3830 }
3831
3832 // grab sadb_mutex, before updating sah's route cache
3833 lck_mtx_lock(sadb_mutex);
3834 ro6 = &sav->sah->sa_route;
3835 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
3836 if (ro6->ro_rt) {
3837 RT_LOCK(ro6->ro_rt);
3838 }
3839 if (ROUTE_UNUSABLE(ro6) ||
3840 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
3841 if (ro6->ro_rt != NULL)
3842 RT_UNLOCK(ro6->ro_rt);
3843 ROUTE_RELEASE(ro6);
3844 }
3845 if (ro6->ro_rt == 0) {
3846 bzero(dst6, sizeof(*dst6));
3847 dst6->sin6_family = AF_INET6;
3848 dst6->sin6_len = sizeof(*dst6);
3849 dst6->sin6_addr = ip6->ip6_dst;
3850 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
3851 if (ro6->ro_rt) {
3852 RT_LOCK(ro6->ro_rt);
3853 }
3854 }
3855 if (ro6->ro_rt == 0) {
3856 ip6stat.ip6s_noroute++;
3857 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
3858 error = EHOSTUNREACH;
3859 // release sadb_mutex, after updating sah's route cache
3860 lck_mtx_unlock(sadb_mutex);
3861 goto bad;
3862 }
3863
3864 /*
3865 * adjust state->dst if tunnel endpoint is offlink
3866 *
3867 * XXX: caching rt_gateway value in the state is
3868 * not really good, since it may point elsewhere
3869 * when the gateway gets modified to a larger
3870 * sockaddr via rt_setgate(). This is currently
3871 * addressed by SA_SIZE roundup in that routine.
3872 */
3873 if (ro6->ro_rt->rt_flags & RTF_GATEWAY)
3874 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
3875 RT_UNLOCK(ro6->ro_rt);
3876 ROUTE_RELEASE(&state->ro);
3877 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
3878 state->dst = (struct sockaddr *)dst6;
3879 state->tunneled = 6;
3880 // release sadb_mutex, after updating sah's route cache
3881 lck_mtx_unlock(sadb_mutex);
3882 }
3883
3884 state->m = ipsec6_splithdr(state->m);
3885 if (!state->m) {
3886 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3887 error = ENOMEM;
3888 goto bad;
3889 }
3890 ip6 = mtod(state->m, struct ip6_hdr *);
3891 switch (sav->sah->saidx.proto) {
3892 case IPPROTO_ESP:
3893#if IPSEC_ESP
3894 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
3895#else
3896 m_freem(state->m);
3897 error = EINVAL;
3898#endif
3899 break;
3900 case IPPROTO_AH:
3901 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
3902 break;
3903 case IPPROTO_IPCOMP:
3904 /* XXX code should be here */
3905 /*FALLTHROUGH*/
3906 default:
3907 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3908 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3909 m_freem(state->m);
3910 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3911 error = EINVAL;
3912 break;
3913 }
3914 if (error) {
3915 state->m = NULL;
3916 goto bad;
3917 }
3918 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3919 if (plen > IPV6_MAXPACKET) {
3920 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3921 "IPsec with IPv6 jumbogram is not supported\n"));
3922 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3923 error = EINVAL; /*XXX*/
3924 goto bad;
3925 }
3926 ip6 = mtod(state->m, struct ip6_hdr *);
3927 ip6->ip6_plen = htons(plen);
3928done:
3929 return 0;
3930
3931bad:
3932 return error;
3933}
3934
3935int
3936ipsec6_output_tunnel(
3937 struct ipsec_output_state *state,
3938 struct secpolicy *sp,
3939 __unused int flags)
3940{
3941 struct ip6_hdr *ip6;
3942 struct ipsecrequest *isr = NULL;
3943 struct secasindex saidx;
3944 struct secasvar *sav = NULL;
3945 int error = 0;
3946
3947 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3948
3949 if (!state)
3950 panic("state == NULL in ipsec6_output_tunnel");
3951 if (!state->m)
3952 panic("state->m == NULL in ipsec6_output_tunnel");
3953 if (!sp)
3954 panic("sp == NULL in ipsec6_output_tunnel");
3955
3956 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3957 printf("ipsec6_output_tunnel: applyed SP\n");
3958 kdebug_secpolicy(sp));
3959
3960 /*
3961 * transport mode ipsec (before the 1st tunnel mode) is already
3962 * processed by ipsec6_output_trans().
3963 */
3964 for (isr = sp->req; isr; isr = isr->next) {
3965 if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
3966 break;
3967 }
3968
3969 for (/* already initialized */; isr; isr = isr->next) {
3970 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
3971 /* When tunnel mode, SA peers must be specified. */
3972 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3973 } else {
3974 /* make SA index to look for a proper SA */
3975 struct sockaddr_in6 *sin6;
3976
3977 bzero(&saidx, sizeof(saidx));
3978 saidx.proto = isr->saidx.proto;
3979 saidx.mode = isr->saidx.mode;
3980 saidx.reqid = isr->saidx.reqid;
3981
3982 ip6 = mtod(state->m, struct ip6_hdr *);
3983 sin6 = (struct sockaddr_in6 *)&saidx.src;
3984 if (sin6->sin6_len == 0) {
3985 sin6->sin6_len = sizeof(*sin6);
3986 sin6->sin6_family = AF_INET6;
3987 sin6->sin6_port = IPSEC_PORT_ANY;
3988 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
3989 sizeof(ip6->ip6_src));
3990 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
3991 /* fix scope id for comparing SPD */
3992 sin6->sin6_addr.s6_addr16[1] = 0;
3993 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
3994 }
3995 }
3996 sin6 = (struct sockaddr_in6 *)&saidx.dst;
3997 if (sin6->sin6_len == 0) {
3998 sin6->sin6_len = sizeof(*sin6);
3999 sin6->sin6_family = AF_INET6;
4000 sin6->sin6_port = IPSEC_PORT_ANY;
4001 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
4002 sizeof(ip6->ip6_dst));
4003 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
4004 /* fix scope id for comparing SPD */
4005 sin6->sin6_addr.s6_addr16[1] = 0;
4006 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
4007 }
4008 }
4009 }
4010
4011 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
4012 /*
4013 * IPsec processing is required, but no SA found.
4014 * I assume that key_acquire() had been called
4015 * to get/establish the SA. Here I discard
4016 * this packet because it is responsibility for
4017 * upper layer to retransmit the packet.
4018 */
4019 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4020 error = ENOENT;
4021 goto bad;
4022 }
4023
4024 /* validity check */
4025 if (sav == NULL) {
4026 switch (ipsec_get_reqlevel(isr)) {
4027 case IPSEC_LEVEL_USE:
4028 continue;
4029 case IPSEC_LEVEL_REQUIRE:
4030 /* must be not reached here. */
4031 panic("ipsec6_output_tunnel: no SA found, but required.");
4032 }
4033 }
4034
4035 /*
4036 * If there is no valid SA, we give up to process.
4037 * see same place at ipsec4_output().
4038 */
4039 if (sav->state != SADB_SASTATE_MATURE
4040 && sav->state != SADB_SASTATE_DYING) {
4041 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4042 error = EINVAL;
4043 goto bad;
4044 }
4045
4046 int must_be_last = 0;
4047
4048 if ((error = ipsec6_output_tunnel_internal(state, sav, &must_be_last)) != 0) {
4049 goto bad;
4050 }
4051
4052 if (must_be_last && isr->next) {
4053 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4054 "IPv4 must be outer layer, spi=%u\n",
4055 (u_int32_t)ntohl(sav->spi)));
4056 error = EINVAL;
4057 goto bad;
4058 }
4059 }
4060
4061 if (sav)
4062 key_freesav(sav, KEY_SADB_UNLOCKED);
4063 return 0;
4064
4065bad:
4066 if (sav)
4067 key_freesav(sav, KEY_SADB_UNLOCKED);
4068 if (state->m)
4069 m_freem(state->m);
4070 state->m = NULL;
4071 return error;
4072}
4073
4074int
4075ipsec6_interface_output(struct ipsec_output_state *state, ifnet_t interface, u_char *nexthdrp, struct mbuf *mprev)
4076{
4077 int error = 0;
4078 struct secasvar *sav = NULL;
4079
4080 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4081
4082 if (state == NULL) {
4083 panic("state == NULL in ipsec6_output");
4084 }
4085 if (state->m == NULL) {
4086 panic("state->m == NULL in ipsec6_output");
4087 }
4088 if (nexthdrp == NULL) {
4089 panic("nexthdrp == NULL in ipsec6_output");
4090 }
4091 if (mprev == NULL) {
4092 panic("mprev == NULL in ipsec6_output");
4093 }
4094
4095 struct ip6_hdr *ip6 = mtod(state->m, struct ip6_hdr *);
4096
4097 struct sockaddr_in6 src = {};
4098 src.sin6_family = AF_INET6;
4099 src.sin6_len = sizeof(src);
4100 memcpy(&src.sin6_addr, &ip6->ip6_src, sizeof(src.sin6_addr));
4101
4102 struct sockaddr_in6 dst = {};
4103 dst.sin6_family = AF_INET6;
4104 dst.sin6_len = sizeof(dst);
4105 memcpy(&dst.sin6_addr, &ip6->ip6_dst, sizeof(dst.sin6_addr));
4106
4107 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET6,
4108 (struct sockaddr *)&src,
4109 (struct sockaddr *)&dst);
4110 if (sav == NULL) {
4111 goto bad;
4112 }
4113
4114 if (sav->sah && sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
4115 if ((error = ipsec6_output_tunnel_internal(state, sav, NULL)) != 0) {
4116 goto bad;
4117 }
4118 }
4119 else {
4120 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
4121 goto bad;
4122 }
4123 }
4124
4125 if (sav) {
4126 key_freesav(sav, KEY_SADB_UNLOCKED);
4127 }
4128 return 0;
4129
4130bad:
4131 if (sav) {
4132 key_freesav(sav, KEY_SADB_UNLOCKED);
4133 }
4134 m_freem(state->m);
4135 state->m = NULL;
4136 return error;
4137}
4138#endif /*INET6*/
4139
4140#if INET
4141/*
4142 * Chop IP header and option off from the payload.
4143 */
4144struct mbuf *
4145ipsec4_splithdr(struct mbuf *m)
4146{
4147 struct mbuf *mh;
4148 struct ip *ip;
4149 int hlen;
4150
4151 if (m->m_len < sizeof(struct ip))
4152 panic("ipsec4_splithdr: first mbuf too short, m_len %d, pkt_len %d, m_flag %x", m->m_len, m->m_pkthdr.len, m->m_flags);
4153 ip = mtod(m, struct ip *);
4154#ifdef _IP_VHL
4155 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
4156#else
4157 hlen = ip->ip_hl << 2;
4158#endif
4159 if (m->m_len > hlen) {
4160 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4161 if (!mh) {
4162 m_freem(m);
4163 return NULL;
4164 }
4165 M_COPY_PKTHDR(mh, m);
4166 MH_ALIGN(mh, hlen);
4167 m->m_flags &= ~M_PKTHDR;
4168 m_mchtype(m, MT_DATA);
4169 m->m_len -= hlen;
4170 m->m_data += hlen;
4171 mh->m_next = m;
4172 m = mh;
4173 m->m_len = hlen;
4174 bcopy((caddr_t)ip, mtod(m, caddr_t), hlen);
4175 } else if (m->m_len < hlen) {
4176 m = m_pullup(m, hlen);
4177 if (!m)
4178 return NULL;
4179 }
4180 return m;
4181}
4182#endif
4183
4184#if INET6
4185struct mbuf *
4186ipsec6_splithdr(struct mbuf *m)
4187{
4188 struct mbuf *mh;
4189 struct ip6_hdr *ip6;
4190 int hlen;
4191
4192 if (m->m_len < sizeof(struct ip6_hdr))
4193 panic("ipsec6_splithdr: first mbuf too short");
4194 ip6 = mtod(m, struct ip6_hdr *);
4195 hlen = sizeof(struct ip6_hdr);
4196 if (m->m_len > hlen) {
4197 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4198 if (!mh) {
4199 m_freem(m);
4200 return NULL;
4201 }
4202 M_COPY_PKTHDR(mh, m);
4203 MH_ALIGN(mh, hlen);
4204 m->m_flags &= ~M_PKTHDR;
4205 m_mchtype(m, MT_DATA);
4206 m->m_len -= hlen;
4207 m->m_data += hlen;
4208 mh->m_next = m;
4209 m = mh;
4210 m->m_len = hlen;
4211 bcopy((caddr_t)ip6, mtod(m, caddr_t), hlen);
4212 } else if (m->m_len < hlen) {
4213 m = m_pullup(m, hlen);
4214 if (!m)
4215 return NULL;
4216 }
4217 return m;
4218}
4219#endif
4220
4221/* validate inbound IPsec tunnel packet. */
4222int
4223ipsec4_tunnel_validate(
4224 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4225 int off,
4226 u_int nxt0,
4227 struct secasvar *sav,
4228 sa_family_t *ifamily)
4229{
4230 u_int8_t nxt = nxt0 & 0xff;
4231 struct sockaddr_in *sin;
4232 struct sockaddr_in osrc, odst, i4src, i4dst;
4233 struct sockaddr_in6 i6src, i6dst;
4234 int hlen;
4235 struct secpolicy *sp;
4236 struct ip *oip;
4237
4238 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4239
4240#if DIAGNOSTIC
4241 if (m->m_len < sizeof(struct ip))
4242 panic("too short mbuf on ipsec4_tunnel_validate");
4243#endif
4244 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6)
4245 return 0;
4246 if (m->m_pkthdr.len < off + sizeof(struct ip))
4247 return 0;
4248 /* do not decapsulate if the SA is for transport mode only */
4249 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT)
4250 return 0;
4251
4252 oip = mtod(m, struct ip *);
4253#ifdef _IP_VHL
4254 hlen = _IP_VHL_HL(oip->ip_vhl) << 2;
4255#else
4256 hlen = oip->ip_hl << 2;
4257#endif
4258 if (hlen != sizeof(struct ip))
4259 return 0;
4260
4261 sin = (struct sockaddr_in *)&sav->sah->saidx.dst;
4262 if (sin->sin_family != AF_INET)
4263 return 0;
4264 if (bcmp(&oip->ip_dst, &sin->sin_addr, sizeof(oip->ip_dst)) != 0)
4265 return 0;
4266
4267 if (sav->sah->ipsec_if != NULL) {
4268 // the ipsec interface SAs don't have a policies.
4269 if (nxt == IPPROTO_IPV4) {
4270 *ifamily = AF_INET;
4271 } else if (nxt == IPPROTO_IPV6) {
4272 *ifamily = AF_INET6;
4273 } else {
4274 return 0;
4275 }
4276 return 1;
4277 }
4278
4279 /* XXX slow */
4280 bzero(&osrc, sizeof(osrc));
4281 bzero(&odst, sizeof(odst));
4282 osrc.sin_family = odst.sin_family = AF_INET;
4283 osrc.sin_len = odst.sin_len = sizeof(struct sockaddr_in);
4284 osrc.sin_addr = oip->ip_src;
4285 odst.sin_addr = oip->ip_dst;
4286 /*
4287 * RFC2401 5.2.1 (b): (assume that we are using tunnel mode)
4288 * - if the inner destination is multicast address, there can be
4289 * multiple permissible inner source address. implementation
4290 * may want to skip verification of inner source address against
4291 * SPD selector.
4292 * - if the inner protocol is ICMP, the packet may be an error report
4293 * from routers on the other side of the VPN cloud (R in the
4294 * following diagram). in this case, we cannot verify inner source
4295 * address against SPD selector.
4296 * me -- gw === gw -- R -- you
4297 *
4298 * we consider the first bullet to be users responsibility on SPD entry
4299 * configuration (if you need to encrypt multicast traffic, set
4300 * the source range of SPD selector to 0.0.0.0/0, or have explicit
4301 * address ranges for possible senders).
4302 * the second bullet is not taken care of (yet).
4303 *
4304 * therefore, we do not do anything special about inner source.
4305 */
4306 if (nxt == IPPROTO_IPV4) {
4307 bzero(&i4src, sizeof(struct sockaddr_in));
4308 bzero(&i4dst, sizeof(struct sockaddr_in));
4309 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4310 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4311 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4312 (caddr_t)&i4src.sin_addr);
4313 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4314 (caddr_t)&i4dst.sin_addr);
4315 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4316 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4317 } else if (nxt == IPPROTO_IPV6) {
4318 bzero(&i6src, sizeof(struct sockaddr_in6));
4319 bzero(&i6dst, sizeof(struct sockaddr_in6));
4320 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4321 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4322 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4323 (caddr_t)&i6src.sin6_addr);
4324 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4325 (caddr_t)&i6dst.sin6_addr);
4326 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4327 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4328 } else
4329 return 0; /* unsupported family */
4330
4331 if (!sp)
4332 return 0;
4333
4334 key_freesp(sp, KEY_SADB_UNLOCKED);
4335
4336 return 1;
4337}
4338
4339#if INET6
4340/* validate inbound IPsec tunnel packet. */
4341int
4342ipsec6_tunnel_validate(
4343 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4344 int off,
4345 u_int nxt0,
4346 struct secasvar *sav,
4347 sa_family_t *ifamily)
4348{
4349 u_int8_t nxt = nxt0 & 0xff;
4350 struct sockaddr_in6 *sin6;
4351 struct sockaddr_in i4src, i4dst;
4352 struct sockaddr_in6 osrc, odst, i6src, i6dst;
4353 struct secpolicy *sp;
4354 struct ip6_hdr *oip6;
4355
4356 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4357
4358#if DIAGNOSTIC
4359 if (m->m_len < sizeof(struct ip6_hdr))
4360 panic("too short mbuf on ipsec6_tunnel_validate");
4361#endif
4362 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6)
4363 return 0;
4364
4365 if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr))
4366 return 0;
4367 /* do not decapsulate if the SA is for transport mode only */
4368 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT)
4369 return 0;
4370
4371 oip6 = mtod(m, struct ip6_hdr *);
4372 /* AF_INET should be supported, but at this moment we don't. */
4373 sin6 = (struct sockaddr_in6 *)&sav->sah->saidx.dst;
4374 if (sin6->sin6_family != AF_INET6)
4375 return 0;
4376 if (!IN6_ARE_ADDR_EQUAL(&oip6->ip6_dst, &sin6->sin6_addr))
4377 return 0;
4378
4379 if (sav->sah->ipsec_if != NULL) {
4380 // the ipsec interface SAs don't have a policies.
4381 if (nxt == IPPROTO_IPV4) {
4382 *ifamily = AF_INET;
4383 } else if (nxt == IPPROTO_IPV6) {
4384 *ifamily = AF_INET6;
4385 } else {
4386 return 0;
4387 }
4388 return 1;
4389 }
4390
4391 /* XXX slow */
4392 bzero(&osrc, sizeof(osrc));
4393 bzero(&odst, sizeof(odst));
4394 osrc.sin6_family = odst.sin6_family = AF_INET6;
4395 osrc.sin6_len = odst.sin6_len = sizeof(struct sockaddr_in6);
4396 osrc.sin6_addr = oip6->ip6_src;
4397 odst.sin6_addr = oip6->ip6_dst;
4398
4399 /*
4400 * regarding to inner source address validation, see a long comment
4401 * in ipsec4_tunnel_validate.
4402 */
4403
4404 if (nxt == IPPROTO_IPV4) {
4405 bzero(&i4src, sizeof(struct sockaddr_in));
4406 bzero(&i4dst, sizeof(struct sockaddr_in));
4407 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4408 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4409 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4410 (caddr_t)&i4src.sin_addr);
4411 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4412 (caddr_t)&i4dst.sin_addr);
4413 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4414 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4415 } else if (nxt == IPPROTO_IPV6) {
4416 bzero(&i6src, sizeof(struct sockaddr_in6));
4417 bzero(&i6dst, sizeof(struct sockaddr_in6));
4418 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4419 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4420 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4421 (caddr_t)&i6src.sin6_addr);
4422 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4423 (caddr_t)&i6dst.sin6_addr);
4424 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4425 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4426 } else
4427 return 0; /* unsupported family */
4428 /*
4429 * when there is no suitable inbound policy for the packet of the ipsec
4430 * tunnel mode, the kernel never decapsulate the tunneled packet
4431 * as the ipsec tunnel mode even when the system wide policy is "none".
4432 * then the kernel leaves the generic tunnel module to process this
4433 * packet. if there is no rule of the generic tunnel, the packet
4434 * is rejected and the statistics will be counted up.
4435 */
4436 if (!sp)
4437 return 0;
4438 key_freesp(sp, KEY_SADB_UNLOCKED);
4439
4440 return 1;
4441}
4442#endif
4443
4444/*
4445 * Make a mbuf chain for encryption.
4446 * If the original mbuf chain contains a mbuf with a cluster,
4447 * allocate a new cluster and copy the data to the new cluster.
4448 * XXX: this hack is inefficient, but is necessary to handle cases
4449 * of TCP retransmission...
4450 */
4451struct mbuf *
4452ipsec_copypkt(struct mbuf *m)
4453{
4454 struct mbuf *n, **mpp, *mnew;
4455
4456 for (n = m, mpp = &m; n; n = n->m_next) {
4457 if (n->m_flags & M_EXT) {
4458 /*
4459 * Make a copy only if there are more than one references
4460 * to the cluster.
4461 * XXX: is this approach effective?
4462 */
4463 if (
4464 m_get_ext_free(n) != NULL ||
4465 m_mclhasreference(n)
4466 )
4467 {
4468 int remain, copied;
4469 struct mbuf *mm;
4470
4471 if (n->m_flags & M_PKTHDR) {
4472 MGETHDR(mnew, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4473 if (mnew == NULL)
4474 goto fail;
4475 M_COPY_PKTHDR(mnew, n);
4476 }
4477 else {
4478 MGET(mnew, M_DONTWAIT, MT_DATA);
4479 if (mnew == NULL)
4480 goto fail;
4481 }
4482 mnew->m_len = 0;
4483 mm = mnew;
4484
4485 /*
4486 * Copy data. If we don't have enough space to
4487 * store the whole data, allocate a cluster
4488 * or additional mbufs.
4489 * XXX: we don't use m_copyback(), since the
4490 * function does not use clusters and thus is
4491 * inefficient.
4492 */
4493 remain = n->m_len;
4494 copied = 0;
4495 while (1) {
4496 int len;
4497 struct mbuf *mn;
4498
4499 if (remain <= (mm->m_flags & M_PKTHDR ? MHLEN : MLEN))
4500 len = remain;
4501 else { /* allocate a cluster */
4502 MCLGET(mm, M_DONTWAIT);
4503 if (!(mm->m_flags & M_EXT)) {
4504 m_free(mm);
4505 goto fail;
4506 }
4507 len = remain < MCLBYTES ?
4508 remain : MCLBYTES;
4509 }
4510
4511 bcopy(n->m_data + copied, mm->m_data,
4512 len);
4513
4514 copied += len;
4515 remain -= len;
4516 mm->m_len = len;
4517
4518 if (remain <= 0) /* completed? */
4519 break;
4520
4521 /* need another mbuf */
4522 MGETHDR(mn, M_DONTWAIT, MT_HEADER); /* XXXMAC: tags copied next time in loop? */
4523 if (mn == NULL)
4524 goto fail;
4525 mn->m_pkthdr.rcvif = NULL;
4526 mm->m_next = mn;
4527 mm = mn;
4528 }
4529
4530 /* adjust chain */
4531 mm->m_next = m_free(n);
4532 n = mm;
4533 *mpp = mnew;
4534 mpp = &n->m_next;
4535
4536 continue;
4537 }
4538 }
4539 *mpp = n;
4540 mpp = &n->m_next;
4541 }
4542
4543 return(m);
4544 fail:
4545 m_freem(m);
4546 return(NULL);
4547}
4548
4549/*
4550 * Tags are allocated as mbufs for now, since our minimum size is MLEN, we
4551 * should make use of up to that much space.
4552 */
4553#define IPSEC_TAG_HEADER \
4554
4555struct ipsec_tag {
4556 struct socket *socket;
4557 u_int32_t history_count;
4558 struct ipsec_history history[];
4559#if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
4560/* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers
4561 * are 32-bit:
4562 * Aligning to 64-bit since we case to m_tag which is 64-bit aligned.
4563 */
4564} __attribute__ ((aligned(8)));
4565#else
4566};
4567#endif
4568
4569#define IPSEC_TAG_SIZE (MLEN - sizeof(struct m_tag))
4570#define IPSEC_TAG_HDR_SIZE (offsetof(struct ipsec_tag, history[0]))
4571#define IPSEC_HISTORY_MAX ((IPSEC_TAG_SIZE - IPSEC_TAG_HDR_SIZE) / \
4572 sizeof(struct ipsec_history))
4573
4574static struct ipsec_tag *
4575ipsec_addaux(
4576 struct mbuf *m)
4577{
4578 struct m_tag *tag;
4579
4580 /* Check if the tag already exists */
4581 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4582
4583 if (tag == NULL) {
4584 struct ipsec_tag *itag;
4585
4586 /* Allocate a tag */
4587 tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC,
4588 IPSEC_TAG_SIZE, M_DONTWAIT, m);
4589
4590 if (tag) {
4591 itag = (struct ipsec_tag*)(tag + 1);
4592 itag->socket = 0;
4593 itag->history_count = 0;
4594
4595 m_tag_prepend(m, tag);
4596 }
4597 }
4598
4599 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4600}
4601
4602static struct ipsec_tag *
4603ipsec_findaux(
4604 struct mbuf *m)
4605{
4606 struct m_tag *tag;
4607
4608 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4609
4610 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4611}
4612
4613void
4614ipsec_delaux(
4615 struct mbuf *m)
4616{
4617 struct m_tag *tag;
4618
4619 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4620
4621 if (tag) {
4622 m_tag_delete(m, tag);
4623 }
4624}
4625
4626/* if the aux buffer is unnecessary, nuke it. */
4627static void
4628ipsec_optaux(
4629 struct mbuf *m,
4630 struct ipsec_tag *itag)
4631{
4632 if (itag && itag->socket == NULL && itag->history_count == 0) {
4633 m_tag_delete(m, ((struct m_tag*)itag) - 1);
4634 }
4635}
4636
4637int
4638ipsec_setsocket(struct mbuf *m, struct socket *so)
4639{
4640 struct ipsec_tag *tag;
4641
4642 /* if so == NULL, don't insist on getting the aux mbuf */
4643 if (so) {
4644 tag = ipsec_addaux(m);
4645 if (!tag)
4646 return ENOBUFS;
4647 } else
4648 tag = ipsec_findaux(m);
4649 if (tag) {
4650 tag->socket = so;
4651 ipsec_optaux(m, tag);
4652 }
4653 return 0;
4654}
4655
4656struct socket *
4657ipsec_getsocket(struct mbuf *m)
4658{
4659 struct ipsec_tag *itag;
4660
4661 itag = ipsec_findaux(m);
4662 if (itag)
4663 return itag->socket;
4664 else
4665 return NULL;
4666}
4667
4668int
4669ipsec_addhist(
4670 struct mbuf *m,
4671 int proto,
4672 u_int32_t spi)
4673{
4674 struct ipsec_tag *itag;
4675 struct ipsec_history *p;
4676 itag = ipsec_addaux(m);
4677 if (!itag)
4678 return ENOBUFS;
4679 if (itag->history_count == IPSEC_HISTORY_MAX)
4680 return ENOSPC; /* XXX */
4681
4682 p = &itag->history[itag->history_count];
4683 itag->history_count++;
4684
4685 bzero(p, sizeof(*p));
4686 p->ih_proto = proto;
4687 p->ih_spi = spi;
4688
4689 return 0;
4690}
4691
4692struct ipsec_history *
4693ipsec_gethist(
4694 struct mbuf *m,
4695 int *lenp)
4696{
4697 struct ipsec_tag *itag;
4698
4699 itag = ipsec_findaux(m);
4700 if (!itag)
4701 return NULL;
4702 if (itag->history_count == 0)
4703 return NULL;
4704 if (lenp)
4705 *lenp = (int)(itag->history_count * sizeof(struct ipsec_history));
4706 return itag->history;
4707}
4708
4709void
4710ipsec_clearhist(
4711 struct mbuf *m)
4712{
4713 struct ipsec_tag *itag;
4714
4715 itag = ipsec_findaux(m);
4716 if (itag) {
4717 itag->history_count = 0;
4718 }
4719 ipsec_optaux(m, itag);
4720}
4721
4722__private_extern__ int
4723ipsec_send_natt_keepalive(
4724 struct secasvar *sav)
4725{
4726 struct mbuf *m;
4727 struct ip *ip;
4728 int error;
4729 struct ip_out_args ipoa;
4730 struct route ro;
4731 int keepalive_interval = natt_keepalive_interval;
4732
4733 bzero(&ipoa, sizeof(ipoa));
4734 ipoa.ipoa_boundif = IFSCOPE_NONE;
4735 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
4736 ipoa.ipoa_sotc = SO_TC_UNSPEC;
4737 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
4738
4739 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4740
4741 if ((esp_udp_encap_port & 0xFFFF) == 0 || sav->remote_ike_port == 0) return FALSE;
4742
4743 if (sav->natt_interval != 0) {
4744 keepalive_interval = (int)sav->natt_interval;
4745 }
4746
4747 // natt timestamp may have changed... reverify
4748 if ((natt_now - sav->natt_last_activity) < keepalive_interval) return FALSE;
4749
4750 if (sav->flags & SADB_X_EXT_ESP_KEEPALIVE) return FALSE; // don't send these from the kernel
4751
4752 m = m_gethdr(M_NOWAIT, MT_DATA);
4753 if (m == NULL) return FALSE;
4754
4755 ip = (__typeof__(ip))m_mtod(m);
4756
4757 // this sends one type of NATT keepalives (Type 1, ESP keepalives, aren't sent by kernel)
4758 if ((sav->flags & SADB_X_EXT_ESP_KEEPALIVE) == 0) {
4759 struct udphdr *uh;
4760
4761 /*
4762 * Type 2: a UDP packet complete with IP header.
4763 * We must do this because UDP output requires
4764 * an inpcb which we don't have. UDP packet
4765 * contains one byte payload. The byte is set
4766 * to 0xFF.
4767 */
4768 uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip));
4769 m->m_len = sizeof(struct udpiphdr) + 1;
4770 bzero(m_mtod(m), m->m_len);
4771 m->m_pkthdr.len = m->m_len;
4772
4773 ip->ip_len = m->m_len;
4774 ip->ip_ttl = ip_defttl;
4775 ip->ip_p = IPPROTO_UDP;
4776 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4777 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4778 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4779 } else {
4780 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4781 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4782 }
4783 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4784 uh->uh_dport = htons(sav->remote_ike_port);
4785 uh->uh_ulen = htons(1 + sizeof(*uh));
4786 uh->uh_sum = 0;
4787 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip) + sizeof(*uh)) = 0xFF;
4788 }
4789
4790 // grab sadb_mutex, to get a local copy of sah's route cache
4791 lck_mtx_lock(sadb_mutex);
4792 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
4793 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET)
4794 ROUTE_RELEASE(&sav->sah->sa_route);
4795
4796 route_copyout(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
4797 lck_mtx_unlock(sadb_mutex);
4798
4799 necp_mark_packet_as_keepalive(m, TRUE);
4800
4801 error = ip_output(m, NULL, &ro, IP_OUTARGS | IP_NOIPSEC, NULL, &ipoa);
4802
4803 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
4804 lck_mtx_lock(sadb_mutex);
4805 route_copyin(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
4806 lck_mtx_unlock(sadb_mutex);
4807 if (error == 0) {
4808 sav->natt_last_activity = natt_now;
4809 return TRUE;
4810 }
4811 return FALSE;
4812}
4813
4814__private_extern__ bool
4815ipsec_fill_offload_frame(ifnet_t ifp,
4816 struct secasvar *sav,
4817 struct ifnet_keepalive_offload_frame *frame,
4818 size_t frame_data_offset)
4819{
4820 u_int8_t *data = NULL;
4821 struct ip *ip = NULL;
4822 struct udphdr *uh = NULL;
4823
4824 if (sav == NULL || sav->sah == NULL || frame == NULL ||
4825 (ifp != NULL && ifp->if_index != sav->sah->outgoing_if) ||
4826 sav->sah->saidx.dst.ss_family != AF_INET ||
4827 !(sav->flags & SADB_X_EXT_NATT) ||
4828 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE) ||
4829 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE_OFFLOAD) ||
4830 sav->flags & SADB_X_EXT_ESP_KEEPALIVE ||
4831 (esp_udp_encap_port & 0xFFFF) == 0 ||
4832 sav->remote_ike_port == 0 ||
4833 (natt_keepalive_interval == 0 && sav->natt_interval == 0 && sav->natt_offload_interval == 0)) {
4834 /* SA is not eligible for keepalive offload on this interface */
4835 return (FALSE);
4836 }
4837
4838 if (frame_data_offset + sizeof(struct udpiphdr) + 1 >
4839 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4840 /* Not enough room in this data frame */
4841 return (FALSE);
4842 }
4843
4844 data = frame->data;
4845 ip = (__typeof__(ip))(void *)(data + frame_data_offset);
4846 uh = (__typeof__(uh))(void *)(data + frame_data_offset + sizeof(*ip));
4847
4848 frame->length = frame_data_offset + sizeof(struct udpiphdr) + 1;
4849 frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_IPSEC;
4850 frame->ether_type = IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
4851
4852 bzero(data, IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE);
4853
4854 ip->ip_v = IPVERSION;
4855 ip->ip_hl = sizeof(struct ip) >> 2;
4856 ip->ip_off &= htons(~IP_OFFMASK);
4857 ip->ip_off &= htons(~IP_MF);
4858 switch (ip4_ipsec_dfbit) {
4859 case 0: /* clear DF bit */
4860 ip->ip_off &= htons(~IP_DF);
4861 break;
4862 case 1: /* set DF bit */
4863 ip->ip_off |= htons(IP_DF);
4864 break;
4865 default: /* copy DF bit */
4866 break;
4867 }
4868 ip->ip_len = htons(sizeof(struct udpiphdr) + 1);
4869 if (rfc6864 && IP_OFF_IS_ATOMIC(htons(ip->ip_off))) {
4870 ip->ip_id = 0;
4871 } else {
4872 ip->ip_id = ip_randomid();
4873 }
4874 ip->ip_ttl = ip_defttl;
4875 ip->ip_p = IPPROTO_UDP;
4876 ip->ip_sum = 0;
4877 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4878 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4879 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4880 } else {
4881 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4882 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4883 }
4884 ip->ip_sum = in_cksum_hdr_opt(ip);
4885 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4886 uh->uh_dport = htons(sav->remote_ike_port);
4887 uh->uh_ulen = htons(1 + sizeof(*uh));
4888 uh->uh_sum = 0;
4889 *(u_int8_t*)(data + frame_data_offset + sizeof(*ip) + sizeof(*uh)) = 0xFF;
4890
4891 if (sav->natt_offload_interval != 0) {
4892 frame->interval = sav->natt_offload_interval;
4893 } else if (sav->natt_interval != 0) {
4894 frame->interval = sav->natt_interval;
4895 } else {
4896 frame->interval = natt_keepalive_interval;
4897 }
4898 return (TRUE);
4899}
4900