1/*
2 * Copyright (c) 2008-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $FreeBSD: src/sys/netinet6/ipsec.c,v 1.3.2.7 2001/07/19 06:37:23 kris Exp $ */
30/* $KAME: ipsec.c,v 1.103 2001/05/24 07:14:18 sakane Exp $ */
31
32/*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61/*
62 * IPsec controller part.
63 */
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/malloc.h>
68#include <sys/mbuf.h>
69#include <sys/mcache.h>
70#include <sys/domain.h>
71#include <sys/protosw.h>
72#include <sys/socket.h>
73#include <sys/socketvar.h>
74#include <sys/errno.h>
75#include <sys/time.h>
76#include <sys/kernel.h>
77#include <sys/syslog.h>
78#include <sys/sysctl.h>
79#include <sys/priv.h>
80#include <kern/locks.h>
81#include <sys/kauth.h>
82#include <sys/bitstring.h>
83
84#include <libkern/OSAtomic.h>
85#include <libkern/sysctl.h>
86
87#include <net/if.h>
88#include <net/route.h>
89#include <net/if_ipsec.h>
90#include <net/if_ports_used.h>
91
92#include <netinet/in.h>
93#include <netinet/in_systm.h>
94#include <netinet/ip.h>
95#include <netinet/ip_var.h>
96#include <netinet/in_var.h>
97#include <netinet/udp.h>
98#include <netinet/udp_var.h>
99#include <netinet/ip_ecn.h>
100#include <netinet6/ip6_ecn.h>
101#include <netinet/tcp.h>
102#include <netinet/udp.h>
103
104#include <netinet/ip6.h>
105#include <netinet6/ip6_var.h>
106#include <netinet/in_pcb.h>
107#include <netinet/icmp6.h>
108
109#include <netinet6/ipsec.h>
110#include <netinet6/ipsec6.h>
111#include <netinet6/ah.h>
112#include <netinet6/ah6.h>
113#if IPSEC_ESP
114#include <netinet6/esp.h>
115#include <netinet6/esp6.h>
116#endif
117#include <netkey/key.h>
118#include <netkey/keydb.h>
119#include <netkey/key_debug.h>
120
121#include <net/net_osdep.h>
122
123#include <IOKit/pwr_mgt/IOPM.h>
124
125#include <os/log_private.h>
126
127#include <kern/assert.h>
128#if SKYWALK
129#include <skywalk/os_skywalk_private.h>
130#endif // SKYWALK
131
132#if IPSEC_DEBUG
133int ipsec_debug = 1;
134#else
135int ipsec_debug = 0;
136#endif
137
138#include <sys/kdebug.h>
139#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
140#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
141#define DBG_FNC_GETPOL_SOCK NETDBG_CODE(DBG_NETIPSEC, (1 << 8))
142#define DBG_FNC_GETPOL_ADDR NETDBG_CODE(DBG_NETIPSEC, (2 << 8))
143#define DBG_FNC_IPSEC_OUT NETDBG_CODE(DBG_NETIPSEC, (3 << 8))
144
145struct ipsecstat ipsecstat;
146int ip4_ah_cleartos = 1;
147int ip4_ah_offsetmask = 0; /* maybe IP_DF? */
148int ip4_ipsec_dfbit = 0; /* DF bit on encap. 0: clear 1: set 2: copy */
149int ip4_esp_trans_deflev = IPSEC_LEVEL_USE;
150int ip4_esp_net_deflev = IPSEC_LEVEL_USE;
151int ip4_ah_trans_deflev = IPSEC_LEVEL_USE;
152int ip4_ah_net_deflev = IPSEC_LEVEL_USE;
153struct secpolicy ip4_def_policy;
154int ip4_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
155int ip4_esp_randpad = -1;
156int esp_udp_encap_port = 0;
157static int sysctl_def_policy SYSCTL_HANDLER_ARGS;
158extern int natt_keepalive_interval;
159extern u_int64_t natt_now;
160
161struct ipsec_tag;
162
163void *sleep_wake_handle = NULL;
164
165SYSCTL_DECL(_net_inet_ipsec);
166SYSCTL_DECL(_net_inet6_ipsec6);
167/* net.inet.ipsec */
168SYSCTL_STRUCT(_net_inet_ipsec, IPSECCTL_STATS,
169 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsecstat, ipsecstat, "");
170SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
171 &ip4_def_policy.policy, 0, &sysctl_def_policy, "I", "");
172SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
173 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_trans_deflev, 0, "");
174SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
175 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_net_deflev, 0, "");
176SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
177 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_trans_deflev, 0, "");
178SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
179 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_net_deflev, 0, "");
180SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS,
181 ah_cleartos, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_cleartos, 0, "");
182SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_OFFSETMASK,
183 ah_offsetmask, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_offsetmask, 0, "");
184SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT,
185 dfbit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_dfbit, 0, "");
186SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN,
187 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_ecn, 0, "");
188SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEBUG,
189 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
190SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ESP_RANDPAD,
191 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_randpad, 0, "");
192
193/* for performance, we bypass ipsec until a security policy is set */
194int ipsec_bypass = 1;
195SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec_bypass, 0, "");
196
197/*
198 * NAT Traversal requires a UDP port for encapsulation,
199 * esp_udp_encap_port controls which port is used. Racoon
200 * must set this port to the port racoon is using locally
201 * for nat traversal.
202 */
203SYSCTL_INT(_net_inet_ipsec, OID_AUTO, esp_port,
204 CTLFLAG_RW | CTLFLAG_LOCKED, &esp_udp_encap_port, 0, "");
205
206struct ipsecstat ipsec6stat;
207int ip6_esp_trans_deflev = IPSEC_LEVEL_USE;
208int ip6_esp_net_deflev = IPSEC_LEVEL_USE;
209int ip6_ah_trans_deflev = IPSEC_LEVEL_USE;
210int ip6_ah_net_deflev = IPSEC_LEVEL_USE;
211struct secpolicy ip6_def_policy;
212int ip6_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
213int ip6_esp_randpad = -1;
214
215/* net.inet6.ipsec6 */
216SYSCTL_STRUCT(_net_inet6_ipsec6, IPSECCTL_STATS,
217 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec6stat, ipsecstat, "");
218SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY,
219 def_policy, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_def_policy.policy, 0, "");
220SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
221 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_trans_deflev, 0, "");
222SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
223 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_net_deflev, 0, "");
224SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
225 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_trans_deflev, 0, "");
226SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
227 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_net_deflev, 0, "");
228SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN,
229 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ipsec_ecn, 0, "");
230SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG,
231 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
232SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ESP_RANDPAD,
233 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_randpad, 0, "");
234
235SYSCTL_DECL(_net_link_generic_system);
236
237static int ipsec_setspidx_interface(struct secpolicyindex *, u_int8_t, struct mbuf *,
238 int, int, int);
239static int ipsec_setspidx_mbuf(struct secpolicyindex *, u_int8_t, u_int,
240 struct mbuf *, int);
241static int ipsec4_setspidx_inpcb(struct mbuf *, struct inpcb *pcb);
242static int ipsec6_setspidx_in6pcb(struct mbuf *, struct in6pcb *pcb);
243static int ipsec_setspidx(struct mbuf *, struct secpolicyindex *, int, int);
244static void ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
245static int ipsec4_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
246static void ipsec6_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
247static int ipsec6_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
248static struct inpcbpolicy *ipsec_newpcbpolicy(void);
249static void ipsec_delpcbpolicy(struct inpcbpolicy *);
250static struct secpolicy *ipsec_deepcopy_policy(struct secpolicy *src);
251static int ipsec_set_policy(struct secpolicy **pcb_sp,
252 int optname, caddr_t request, size_t len, int priv);
253static void vshiftl(unsigned char *, int, size_t);
254static int ipsec_in_reject(struct secpolicy *, struct mbuf *);
255static int ipsec64_encapsulate(struct mbuf *, struct secasvar *, uint32_t);
256static int ipsec6_update_routecache_and_output(struct ipsec_output_state *state, struct secasvar *sav);
257static int ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav);
258static struct ipsec_tag *ipsec_addaux(struct mbuf *);
259static struct ipsec_tag *ipsec_findaux(struct mbuf *);
260static void ipsec_optaux(struct mbuf *, struct ipsec_tag *);
261int ipsec_send_natt_keepalive(struct secasvar *sav);
262bool ipsec_fill_offload_frame(ifnet_t ifp, struct secasvar *sav, struct ifnet_keepalive_offload_frame *frame, size_t frame_data_offset);
263
264extern bool IOPMCopySleepWakeUUIDKey(char *, size_t);
265
266typedef IOReturn (*IOServiceInterestHandler)( void * target, void * refCon,
267 UInt32 messageType, void * provider,
268 void * messageArgument, vm_size_t argSize );
269extern void *registerSleepWakeInterest(IOServiceInterestHandler, void *, void *);
270
271static int
272sysctl_def_policy SYSCTL_HANDLER_ARGS
273{
274 int new_policy = ip4_def_policy.policy;
275 int error = sysctl_handle_int(oidp, arg1: &new_policy, arg2: 0, req);
276
277#pragma unused(arg1, arg2)
278 if (error == 0) {
279 if (new_policy != IPSEC_POLICY_NONE &&
280 new_policy != IPSEC_POLICY_DISCARD) {
281 return EINVAL;
282 }
283 ip4_def_policy.policy = new_policy;
284
285 /* Turn off the bypass if the default security policy changes */
286 if (ipsec_bypass != 0 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
287 ipsec_bypass = 0;
288 }
289 }
290
291 return error;
292}
293
294/*
295 * For OUTBOUND packet having a socket. Searching SPD for packet,
296 * and return a pointer to SP.
297 * OUT: NULL: no apropreate SP found, the following value is set to error.
298 * 0 : bypass
299 * EACCES : discard packet.
300 * ENOENT : ipsec_acquire() in progress, maybe.
301 * others : error occurred.
302 * others: a pointer to SP
303 *
304 * NOTE: IPv6 mapped adddress concern is implemented here.
305 */
306struct secpolicy *
307ipsec4_getpolicybysock(struct mbuf *m,
308 u_int8_t dir,
309 struct socket *so,
310 int *error)
311{
312 struct inpcbpolicy *pcbsp = NULL;
313 struct secpolicy *currsp = NULL; /* policy on socket */
314 struct secpolicy *kernsp = NULL; /* policy on kernel */
315
316 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
317 /* sanity check */
318 if (m == NULL || so == NULL || error == NULL) {
319 panic("ipsec4_getpolicybysock: NULL pointer was passed.");
320 }
321
322 if (so->so_pcb == NULL) {
323 printf("ipsec4_getpolicybysock: so->so_pcb == NULL\n");
324 return ipsec4_getpolicybyaddr(m, dir, 0, error);
325 }
326
327 switch (SOCK_DOM(so)) {
328 case PF_INET:
329 pcbsp = sotoinpcb(so)->inp_sp;
330 break;
331 case PF_INET6:
332 pcbsp = sotoin6pcb(so)->in6p_sp;
333 break;
334 }
335
336 if (!pcbsp) {
337 /* Socket has not specified an IPSEC policy */
338 return ipsec4_getpolicybyaddr(m, dir, 0, error);
339 }
340
341 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_START, 0, 0, 0, 0, 0);
342
343 switch (SOCK_DOM(so)) {
344 case PF_INET:
345 /* set spidx in pcb */
346 *error = ipsec4_setspidx_inpcb(m, sotoinpcb(so));
347 break;
348 case PF_INET6:
349 /* set spidx in pcb */
350 *error = ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
351 break;
352 default:
353 panic("ipsec4_getpolicybysock: unsupported address family");
354 }
355 if (*error) {
356 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 1, *error, 0, 0, 0);
357 return NULL;
358 }
359
360 /* sanity check */
361 if (pcbsp == NULL) {
362 panic("ipsec4_getpolicybysock: pcbsp is NULL.");
363 }
364
365 switch (dir) {
366 case IPSEC_DIR_INBOUND:
367 currsp = pcbsp->sp_in;
368 break;
369 case IPSEC_DIR_OUTBOUND:
370 currsp = pcbsp->sp_out;
371 break;
372 default:
373 panic("ipsec4_getpolicybysock: illegal direction.");
374 }
375
376 /* sanity check */
377 if (currsp == NULL) {
378 panic("ipsec4_getpolicybysock: currsp is NULL.");
379 }
380
381 /* when privilieged socket */
382 if (pcbsp->priv) {
383 switch (currsp->policy) {
384 case IPSEC_POLICY_BYPASS:
385 lck_mtx_lock(sadb_mutex);
386 currsp->refcnt++;
387 lck_mtx_unlock(sadb_mutex);
388 *error = 0;
389 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 2, *error, 0, 0, 0);
390 return currsp;
391
392 case IPSEC_POLICY_ENTRUST:
393 /* look for a policy in SPD */
394 kernsp = key_allocsp(&currsp->spidx, dir);
395
396 /* SP found */
397 if (kernsp != NULL) {
398 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
399 printf("DP ipsec4_getpolicybysock called "
400 "to allocate SP:0x%llx\n",
401 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
402 *error = 0;
403 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 3, *error, 0, 0, 0);
404 return kernsp;
405 }
406
407 /* no SP found */
408 lck_mtx_lock(sadb_mutex);
409 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
410 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
411 ipseclog((LOG_INFO,
412 "fixed system default policy: %d->%d\n",
413 ip4_def_policy.policy, IPSEC_POLICY_NONE));
414 ip4_def_policy.policy = IPSEC_POLICY_NONE;
415 }
416 ip4_def_policy.refcnt++;
417 lck_mtx_unlock(sadb_mutex);
418 *error = 0;
419 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 4, *error, 0, 0, 0);
420 return &ip4_def_policy;
421
422 case IPSEC_POLICY_IPSEC:
423 lck_mtx_lock(sadb_mutex);
424 currsp->refcnt++;
425 lck_mtx_unlock(sadb_mutex);
426 *error = 0;
427 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 5, *error, 0, 0, 0);
428 return currsp;
429
430 default:
431 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
432 "Invalid policy for PCB %d\n", currsp->policy));
433 *error = EINVAL;
434 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 6, *error, 0, 0, 0);
435 return NULL;
436 }
437 /* NOTREACHED */
438 }
439
440 /* when non-privilieged socket */
441 /* look for a policy in SPD */
442 kernsp = key_allocsp(&currsp->spidx, dir);
443
444 /* SP found */
445 if (kernsp != NULL) {
446 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
447 printf("DP ipsec4_getpolicybysock called "
448 "to allocate SP:0x%llx\n",
449 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
450 *error = 0;
451 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 7, *error, 0, 0, 0);
452 return kernsp;
453 }
454
455 /* no SP found */
456 switch (currsp->policy) {
457 case IPSEC_POLICY_BYPASS:
458 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
459 "Illegal policy for non-priviliged defined %d\n",
460 currsp->policy));
461 *error = EINVAL;
462 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 8, *error, 0, 0, 0);
463 return NULL;
464
465 case IPSEC_POLICY_ENTRUST:
466 lck_mtx_lock(sadb_mutex);
467 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
468 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
469 ipseclog((LOG_INFO,
470 "fixed system default policy: %d->%d\n",
471 ip4_def_policy.policy, IPSEC_POLICY_NONE));
472 ip4_def_policy.policy = IPSEC_POLICY_NONE;
473 }
474 ip4_def_policy.refcnt++;
475 lck_mtx_unlock(sadb_mutex);
476 *error = 0;
477 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 9, *error, 0, 0, 0);
478 return &ip4_def_policy;
479
480 case IPSEC_POLICY_IPSEC:
481 lck_mtx_lock(sadb_mutex);
482 currsp->refcnt++;
483 lck_mtx_unlock(sadb_mutex);
484 *error = 0;
485 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 10, *error, 0, 0, 0);
486 return currsp;
487
488 default:
489 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
490 "Invalid policy for PCB %d\n", currsp->policy));
491 *error = EINVAL;
492 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 11, *error, 0, 0, 0);
493 return NULL;
494 }
495 /* NOTREACHED */
496}
497
498/*
499 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
500 * and return a pointer to SP.
501 * OUT: positive: a pointer to the entry for security policy leaf matched.
502 * NULL: no apropreate SP found, the following value is set to error.
503 * 0 : bypass
504 * EACCES : discard packet.
505 * ENOENT : ipsec_acquire() in progress, maybe.
506 * others : error occurred.
507 */
508struct secpolicy *
509ipsec4_getpolicybyaddr(struct mbuf *m,
510 u_int8_t dir,
511 int flag,
512 int *error)
513{
514 struct secpolicy *sp = NULL;
515
516 if (ipsec_bypass != 0) {
517 return 0;
518 }
519
520 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
521
522 /* sanity check */
523 if (m == NULL || error == NULL) {
524 panic("ipsec4_getpolicybyaddr: NULL pointer was passed.");
525 }
526 {
527 struct secpolicyindex spidx;
528
529 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
530 bzero(s: &spidx, n: sizeof(spidx));
531
532 /* make a index to look for a policy */
533 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET, m,
534 (flag & IP_FORWARDING) ? 0 : 1);
535
536 if (*error != 0) {
537 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, *error, 0, 0, 0);
538 return NULL;
539 }
540
541 sp = key_allocsp(&spidx, dir);
542 }
543
544 /* SP found */
545 if (sp != NULL) {
546 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
547 printf("DP ipsec4_getpolicybyaddr called "
548 "to allocate SP:0x%llx\n",
549 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
550 *error = 0;
551 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0);
552 return sp;
553 }
554
555 /* no SP found */
556 lck_mtx_lock(sadb_mutex);
557 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
558 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
559 ipseclog((LOG_INFO, "fixed system default policy:%d->%d\n",
560 ip4_def_policy.policy,
561 IPSEC_POLICY_NONE));
562 ip4_def_policy.policy = IPSEC_POLICY_NONE;
563 }
564 ip4_def_policy.refcnt++;
565 lck_mtx_unlock(sadb_mutex);
566 *error = 0;
567 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 3, *error, 0, 0, 0);
568 return &ip4_def_policy;
569}
570
571/* Match with bound interface rather than src addr.
572 * Unlike getpolicybyaddr, do not set the default policy.
573 * Return 0 if should continue processing, or -1 if packet
574 * should be dropped.
575 */
576int
577ipsec4_getpolicybyinterface(struct mbuf *m,
578 u_int8_t dir,
579 int *flags,
580 struct ip_out_args *ipoa,
581 struct secpolicy **sp)
582{
583 struct secpolicyindex spidx;
584 int error = 0;
585
586 if (ipsec_bypass != 0) {
587 return 0;
588 }
589
590 /* Sanity check */
591 if (m == NULL || ipoa == NULL || sp == NULL) {
592 panic("ipsec4_getpolicybyinterface: NULL pointer was passed.");
593 }
594
595 if (ipoa->ipoa_boundif == IFSCOPE_NONE) {
596 return 0;
597 }
598
599 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
600 bzero(s: &spidx, n: sizeof(spidx));
601
602 /* make a index to look for a policy */
603 error = ipsec_setspidx_interface(&spidx, dir, m, (*flags & IP_FORWARDING) ? 0 : 1,
604 ipoa->ipoa_boundif, 4);
605
606 if (error != 0) {
607 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0);
608 return 0;
609 }
610
611 *sp = key_allocsp(&spidx, dir);
612
613 /* Return SP, whether NULL or not */
614 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
615 if ((*sp)->ipsec_if == NULL) {
616 /* Invalid to capture on an interface without redirect */
617 key_freesp(*sp, KEY_SADB_UNLOCKED);
618 *sp = NULL;
619 return -1;
620 } else if ((*sp)->disabled) {
621 /* Disabled policies go in the clear */
622 key_freesp(*sp, KEY_SADB_UNLOCKED);
623 *sp = NULL;
624 *flags |= IP_NOIPSEC; /* Avoid later IPsec check */
625 } else {
626 /* If policy is enabled, redirect to ipsec interface */
627 ipoa->ipoa_boundif = (*sp)->ipsec_if->if_index;
628 }
629 }
630
631 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, error, 0, 0, 0);
632
633 return 0;
634}
635
636
637/*
638 * For OUTBOUND packet having a socket. Searching SPD for packet,
639 * and return a pointer to SP.
640 * OUT: NULL: no apropreate SP found, the following value is set to error.
641 * 0 : bypass
642 * EACCES : discard packet.
643 * ENOENT : ipsec_acquire() in progress, maybe.
644 * others : error occurred.
645 * others: a pointer to SP
646 */
647struct secpolicy *
648ipsec6_getpolicybysock(struct mbuf *m,
649 u_int8_t dir,
650 struct socket *so,
651 int *error)
652{
653 struct inpcbpolicy *pcbsp = NULL;
654 struct secpolicy *currsp = NULL; /* policy on socket */
655 struct secpolicy *kernsp = NULL; /* policy on kernel */
656
657 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
658
659 /* sanity check */
660 if (m == NULL || so == NULL || error == NULL) {
661 panic("ipsec6_getpolicybysock: NULL pointer was passed.");
662 }
663
664#if DIAGNOSTIC
665 if (SOCK_DOM(so) != PF_INET6) {
666 panic("ipsec6_getpolicybysock: socket domain != inet6");
667 }
668#endif
669
670 pcbsp = sotoin6pcb(so)->in6p_sp;
671
672 if (!pcbsp) {
673 return ipsec6_getpolicybyaddr(m, dir, 0, error);
674 }
675
676 /* set spidx in pcb */
677 ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
678
679 /* sanity check */
680 if (pcbsp == NULL) {
681 panic("ipsec6_getpolicybysock: pcbsp is NULL.");
682 }
683
684 switch (dir) {
685 case IPSEC_DIR_INBOUND:
686 currsp = pcbsp->sp_in;
687 break;
688 case IPSEC_DIR_OUTBOUND:
689 currsp = pcbsp->sp_out;
690 break;
691 default:
692 panic("ipsec6_getpolicybysock: illegal direction.");
693 }
694
695 /* sanity check */
696 if (currsp == NULL) {
697 panic("ipsec6_getpolicybysock: currsp is NULL.");
698 }
699
700 /* when privilieged socket */
701 if (pcbsp->priv) {
702 switch (currsp->policy) {
703 case IPSEC_POLICY_BYPASS:
704 lck_mtx_lock(sadb_mutex);
705 currsp->refcnt++;
706 lck_mtx_unlock(sadb_mutex);
707 *error = 0;
708 return currsp;
709
710 case IPSEC_POLICY_ENTRUST:
711 /* look for a policy in SPD */
712 kernsp = key_allocsp(&currsp->spidx, dir);
713
714 /* SP found */
715 if (kernsp != NULL) {
716 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
717 printf("DP ipsec6_getpolicybysock called "
718 "to allocate SP:0x%llx\n",
719 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
720 *error = 0;
721 return kernsp;
722 }
723
724 /* no SP found */
725 lck_mtx_lock(sadb_mutex);
726 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
727 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
728 ipseclog((LOG_INFO,
729 "fixed system default policy: %d->%d\n",
730 ip6_def_policy.policy, IPSEC_POLICY_NONE));
731 ip6_def_policy.policy = IPSEC_POLICY_NONE;
732 }
733 ip6_def_policy.refcnt++;
734 lck_mtx_unlock(sadb_mutex);
735 *error = 0;
736 return &ip6_def_policy;
737
738 case IPSEC_POLICY_IPSEC:
739 lck_mtx_lock(sadb_mutex);
740 currsp->refcnt++;
741 lck_mtx_unlock(sadb_mutex);
742 *error = 0;
743 return currsp;
744
745 default:
746 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
747 "Invalid policy for PCB %d\n", currsp->policy));
748 *error = EINVAL;
749 return NULL;
750 }
751 /* NOTREACHED */
752 }
753
754 /* when non-privilieged socket */
755 /* look for a policy in SPD */
756 kernsp = key_allocsp(&currsp->spidx, dir);
757
758 /* SP found */
759 if (kernsp != NULL) {
760 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
761 printf("DP ipsec6_getpolicybysock called "
762 "to allocate SP:0x%llx\n",
763 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
764 *error = 0;
765 return kernsp;
766 }
767
768 /* no SP found */
769 switch (currsp->policy) {
770 case IPSEC_POLICY_BYPASS:
771 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
772 "Illegal policy for non-priviliged defined %d\n",
773 currsp->policy));
774 *error = EINVAL;
775 return NULL;
776
777 case IPSEC_POLICY_ENTRUST:
778 lck_mtx_lock(sadb_mutex);
779 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
780 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
781 ipseclog((LOG_INFO,
782 "fixed system default policy: %d->%d\n",
783 ip6_def_policy.policy, IPSEC_POLICY_NONE));
784 ip6_def_policy.policy = IPSEC_POLICY_NONE;
785 }
786 ip6_def_policy.refcnt++;
787 lck_mtx_unlock(sadb_mutex);
788 *error = 0;
789 return &ip6_def_policy;
790
791 case IPSEC_POLICY_IPSEC:
792 lck_mtx_lock(sadb_mutex);
793 currsp->refcnt++;
794 lck_mtx_unlock(sadb_mutex);
795 *error = 0;
796 return currsp;
797
798 default:
799 ipseclog((LOG_ERR,
800 "ipsec6_policybysock: Invalid policy for PCB %d\n",
801 currsp->policy));
802 *error = EINVAL;
803 return NULL;
804 }
805 /* NOTREACHED */
806}
807
808/*
809 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
810 * and return a pointer to SP.
811 * `flag' means that packet is to be forwarded whether or not.
812 * flag = 1: forwad
813 * OUT: positive: a pointer to the entry for security policy leaf matched.
814 * NULL: no apropreate SP found, the following value is set to error.
815 * 0 : bypass
816 * EACCES : discard packet.
817 * ENOENT : ipsec_acquire() in progress, maybe.
818 * others : error occurred.
819 */
820#ifndef IP_FORWARDING
821#define IP_FORWARDING 1
822#endif
823
824struct secpolicy *
825ipsec6_getpolicybyaddr(struct mbuf *m,
826 u_int8_t dir,
827 int flag,
828 int *error)
829{
830 struct secpolicy *sp = NULL;
831
832 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
833
834 /* sanity check */
835 if (m == NULL || error == NULL) {
836 panic("ipsec6_getpolicybyaddr: NULL pointer was passed.");
837 }
838
839 {
840 struct secpolicyindex spidx;
841
842 bzero(s: &spidx, n: sizeof(spidx));
843
844 /* make a index to look for a policy */
845 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET6, m,
846 (flag & IP_FORWARDING) ? 0 : 1);
847
848 if (*error != 0) {
849 return NULL;
850 }
851
852 sp = key_allocsp(&spidx, dir);
853 }
854
855 /* SP found */
856 if (sp != NULL) {
857 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
858 printf("DP ipsec6_getpolicybyaddr called "
859 "to allocate SP:0x%llx\n",
860 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
861 *error = 0;
862 return sp;
863 }
864
865 /* no SP found */
866 lck_mtx_lock(sadb_mutex);
867 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
868 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
869 ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n",
870 ip6_def_policy.policy, IPSEC_POLICY_NONE));
871 ip6_def_policy.policy = IPSEC_POLICY_NONE;
872 }
873 ip6_def_policy.refcnt++;
874 lck_mtx_unlock(sadb_mutex);
875 *error = 0;
876 return &ip6_def_policy;
877}
878
879/* Match with bound interface rather than src addr.
880 * Unlike getpolicybyaddr, do not set the default policy.
881 * Return 0 if should continue processing, or -1 if packet
882 * should be dropped.
883 */
884int
885ipsec6_getpolicybyinterface(struct mbuf *m,
886 u_int8_t dir,
887 int flag,
888 struct ip6_out_args *ip6oap,
889 int *noipsec,
890 struct secpolicy **sp)
891{
892 struct secpolicyindex spidx;
893 int error = 0;
894
895 if (ipsec_bypass != 0) {
896 return 0;
897 }
898
899 /* Sanity check */
900 if (m == NULL || sp == NULL || noipsec == NULL || ip6oap == NULL) {
901 panic("ipsec6_getpolicybyinterface: NULL pointer was passed.");
902 }
903
904 *noipsec = 0;
905
906 if (ip6oap->ip6oa_boundif == IFSCOPE_NONE) {
907 return 0;
908 }
909
910 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
911 bzero(s: &spidx, n: sizeof(spidx));
912
913 /* make a index to look for a policy */
914 error = ipsec_setspidx_interface(&spidx, dir, m, (flag & IP_FORWARDING) ? 0 : 1,
915 ip6oap->ip6oa_boundif, 6);
916
917 if (error != 0) {
918 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0);
919 return 0;
920 }
921
922 *sp = key_allocsp(&spidx, dir);
923
924 /* Return SP, whether NULL or not */
925 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
926 if ((*sp)->ipsec_if == NULL) {
927 /* Invalid to capture on an interface without redirect */
928 key_freesp(*sp, KEY_SADB_UNLOCKED);
929 *sp = NULL;
930 return -1;
931 } else if ((*sp)->disabled) {
932 /* Disabled policies go in the clear */
933 key_freesp(*sp, KEY_SADB_UNLOCKED);
934 *sp = NULL;
935 *noipsec = 1; /* Avoid later IPsec check */
936 } else {
937 /* If policy is enabled, redirect to ipsec interface */
938 ip6oap->ip6oa_boundif = (*sp)->ipsec_if->if_index;
939 }
940 }
941
942 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0);
943
944 return 0;
945}
946
947/*
948 * set IP address into spidx from mbuf.
949 * When Forwarding packet and ICMP echo reply, this function is used.
950 *
951 * IN: get the followings from mbuf.
952 * protocol family, src, dst, next protocol
953 * OUT:
954 * 0: success.
955 * other: failure, and set errno.
956 */
957static int
958ipsec_setspidx_mbuf(
959 struct secpolicyindex *spidx,
960 u_int8_t dir,
961 __unused u_int family,
962 struct mbuf *m,
963 int needport)
964{
965 int error;
966
967 /* sanity check */
968 if (spidx == NULL || m == NULL) {
969 panic("ipsec_setspidx_mbuf: NULL pointer was passed.");
970 }
971
972 bzero(s: spidx, n: sizeof(*spidx));
973
974 error = ipsec_setspidx(m, spidx, needport, 0);
975 if (error) {
976 goto bad;
977 }
978 spidx->dir = dir;
979
980 return 0;
981
982bad:
983 /* XXX initialize */
984 bzero(s: spidx, n: sizeof(*spidx));
985 return EINVAL;
986}
987
988static int
989ipsec_setspidx_interface(
990 struct secpolicyindex *spidx,
991 u_int8_t dir,
992 struct mbuf *m,
993 int needport,
994 int ifindex,
995 int ip_version)
996{
997 int error;
998
999 /* sanity check */
1000 if (spidx == NULL || m == NULL) {
1001 panic("ipsec_setspidx_interface: NULL pointer was passed.");
1002 }
1003
1004 bzero(s: spidx, n: sizeof(*spidx));
1005
1006 error = ipsec_setspidx(m, spidx, needport, ip_version);
1007 if (error) {
1008 goto bad;
1009 }
1010 spidx->dir = dir;
1011
1012 if (ifindex != 0) {
1013 ifnet_head_lock_shared();
1014 spidx->internal_if = ifindex2ifnet[ifindex];
1015 ifnet_head_done();
1016 } else {
1017 spidx->internal_if = NULL;
1018 }
1019
1020 return 0;
1021
1022bad:
1023 return EINVAL;
1024}
1025
1026static int
1027ipsec4_setspidx_inpcb(struct mbuf *m, struct inpcb *pcb)
1028{
1029 struct secpolicyindex *spidx;
1030 int error;
1031
1032 if (ipsec_bypass != 0) {
1033 return 0;
1034 }
1035
1036 /* sanity check */
1037 if (pcb == NULL) {
1038 panic("ipsec4_setspidx_inpcb: no PCB found.");
1039 }
1040 if (pcb->inp_sp == NULL) {
1041 panic("ipsec4_setspidx_inpcb: no inp_sp found.");
1042 }
1043 if (pcb->inp_sp->sp_out == NULL || pcb->inp_sp->sp_in == NULL) {
1044 panic("ipsec4_setspidx_inpcb: no sp_in/out found.");
1045 }
1046
1047 bzero(s: &pcb->inp_sp->sp_in->spidx, n: sizeof(*spidx));
1048 bzero(s: &pcb->inp_sp->sp_out->spidx, n: sizeof(*spidx));
1049
1050 spidx = &pcb->inp_sp->sp_in->spidx;
1051 error = ipsec_setspidx(m, spidx, 1, 0);
1052 if (error) {
1053 goto bad;
1054 }
1055 spidx->dir = IPSEC_DIR_INBOUND;
1056
1057 spidx = &pcb->inp_sp->sp_out->spidx;
1058 error = ipsec_setspidx(m, spidx, 1, 0);
1059 if (error) {
1060 goto bad;
1061 }
1062 spidx->dir = IPSEC_DIR_OUTBOUND;
1063
1064 return 0;
1065
1066bad:
1067 bzero(s: &pcb->inp_sp->sp_in->spidx, n: sizeof(*spidx));
1068 bzero(s: &pcb->inp_sp->sp_out->spidx, n: sizeof(*spidx));
1069 return error;
1070}
1071
1072static int
1073ipsec6_setspidx_in6pcb(struct mbuf *m, struct in6pcb *pcb)
1074{
1075 struct secpolicyindex *spidx;
1076 int error;
1077
1078 /* sanity check */
1079 if (pcb == NULL) {
1080 panic("ipsec6_setspidx_in6pcb: no PCB found.");
1081 }
1082 if (pcb->in6p_sp == NULL) {
1083 panic("ipsec6_setspidx_in6pcb: no in6p_sp found.");
1084 }
1085 if (pcb->in6p_sp->sp_out == NULL || pcb->in6p_sp->sp_in == NULL) {
1086 panic("ipsec6_setspidx_in6pcb: no sp_in/out found.");
1087 }
1088
1089 bzero(s: &pcb->in6p_sp->sp_in->spidx, n: sizeof(*spidx));
1090 bzero(s: &pcb->in6p_sp->sp_out->spidx, n: sizeof(*spidx));
1091
1092 spidx = &pcb->in6p_sp->sp_in->spidx;
1093 error = ipsec_setspidx(m, spidx, 1, 0);
1094 if (error) {
1095 goto bad;
1096 }
1097 spidx->dir = IPSEC_DIR_INBOUND;
1098
1099 spidx = &pcb->in6p_sp->sp_out->spidx;
1100 error = ipsec_setspidx(m, spidx, 1, 0);
1101 if (error) {
1102 goto bad;
1103 }
1104 spidx->dir = IPSEC_DIR_OUTBOUND;
1105
1106 return 0;
1107
1108bad:
1109 bzero(s: &pcb->in6p_sp->sp_in->spidx, n: sizeof(*spidx));
1110 bzero(s: &pcb->in6p_sp->sp_out->spidx, n: sizeof(*spidx));
1111 return error;
1112}
1113
1114/*
1115 * configure security policy index (src/dst/proto/sport/dport)
1116 * by looking at the content of mbuf.
1117 * the caller is responsible for error recovery (like clearing up spidx).
1118 */
1119static int
1120ipsec_setspidx(struct mbuf *m,
1121 struct secpolicyindex *spidx,
1122 int needport,
1123 int force_ip_version)
1124{
1125 struct ip *ip = NULL;
1126 struct ip ipbuf;
1127 u_int v;
1128 struct mbuf *n;
1129 int len;
1130 int error;
1131
1132 if (m == NULL) {
1133 panic("ipsec_setspidx: m == 0 passed.");
1134 }
1135
1136 /*
1137 * validate m->m_pkthdr.len. we see incorrect length if we
1138 * mistakenly call this function with inconsistent mbuf chain
1139 * (like 4.4BSD tcp/udp processing). XXX should we panic here?
1140 */
1141 len = 0;
1142 for (n = m; n; n = n->m_next) {
1143 len += n->m_len;
1144 }
1145 if (m->m_pkthdr.len != len) {
1146 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1147 printf("ipsec_setspidx: "
1148 "total of m_len(%d) != pkthdr.len(%d), "
1149 "ignored.\n",
1150 len, m->m_pkthdr.len));
1151 return EINVAL;
1152 }
1153
1154 if (m->m_pkthdr.len < sizeof(struct ip)) {
1155 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1156 printf("ipsec_setspidx: "
1157 "pkthdr.len(%d) < sizeof(struct ip), ignored.\n",
1158 m->m_pkthdr.len));
1159 return EINVAL;
1160 }
1161
1162 if (m->m_len >= sizeof(*ip)) {
1163 ip = mtod(m, struct ip *);
1164 } else {
1165 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1166 ip = &ipbuf;
1167 }
1168
1169 if (force_ip_version) {
1170 v = force_ip_version;
1171 } else {
1172#ifdef _IP_VHL
1173 v = _IP_VHL_V(ip->ip_vhl);
1174#else
1175 v = ip->ip_v;
1176#endif
1177 }
1178 switch (v) {
1179 case 4:
1180 error = ipsec4_setspidx_ipaddr(m, spidx);
1181 if (error) {
1182 return error;
1183 }
1184 ipsec4_get_ulp(m, spidx, needport);
1185 return 0;
1186 case 6:
1187 if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) {
1188 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1189 printf("ipsec_setspidx: "
1190 "pkthdr.len(%d) < sizeof(struct ip6_hdr), "
1191 "ignored.\n", m->m_pkthdr.len));
1192 return EINVAL;
1193 }
1194 error = ipsec6_setspidx_ipaddr(m, spidx);
1195 if (error) {
1196 return error;
1197 }
1198 ipsec6_get_ulp(m, spidx, needport);
1199 return 0;
1200 default:
1201 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1202 printf("ipsec_setspidx: "
1203 "unknown IP version %u, ignored.\n", v));
1204 return EINVAL;
1205 }
1206}
1207
1208static void
1209ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport)
1210{
1211 struct ip ip;
1212 struct ip6_ext ip6e;
1213 u_int8_t nxt;
1214 int off;
1215 struct tcphdr th;
1216 struct udphdr uh;
1217
1218 /* sanity check */
1219 if (m == NULL) {
1220 panic("ipsec4_get_ulp: NULL pointer was passed.");
1221 }
1222 if (m->m_pkthdr.len < sizeof(ip)) {
1223 panic("ipsec4_get_ulp: too short");
1224 }
1225
1226 /* set default */
1227 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1228 ((struct sockaddr_in *)&spidx->src)->sin_port = IPSEC_PORT_ANY;
1229 ((struct sockaddr_in *)&spidx->dst)->sin_port = IPSEC_PORT_ANY;
1230
1231 m_copydata(m, 0, sizeof(ip), (caddr_t)&ip);
1232 /* ip_input() flips it into host endian XXX need more checking */
1233 if (ip.ip_off & (IP_MF | IP_OFFMASK)) {
1234 return;
1235 }
1236
1237 nxt = ip.ip_p;
1238#ifdef _IP_VHL
1239 off = _IP_VHL_HL(ip->ip_vhl) << 2;
1240#else
1241 off = ip.ip_hl << 2;
1242#endif
1243 while (off < m->m_pkthdr.len) {
1244 switch (nxt) {
1245 case IPPROTO_TCP:
1246 spidx->ul_proto = nxt;
1247 if (!needport) {
1248 return;
1249 }
1250 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) {
1251 return;
1252 }
1253 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1254 ((struct sockaddr_in *)&spidx->src)->sin_port =
1255 th.th_sport;
1256 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1257 th.th_dport;
1258 return;
1259 case IPPROTO_UDP:
1260 spidx->ul_proto = nxt;
1261 if (!needport) {
1262 return;
1263 }
1264 if (off + sizeof(struct udphdr) > m->m_pkthdr.len) {
1265 return;
1266 }
1267 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1268 ((struct sockaddr_in *)&spidx->src)->sin_port =
1269 uh.uh_sport;
1270 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1271 uh.uh_dport;
1272 return;
1273 case IPPROTO_AH:
1274 if (off + sizeof(ip6e) > m->m_pkthdr.len) {
1275 return;
1276 }
1277 m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
1278 off += (ip6e.ip6e_len + 2) << 2;
1279 nxt = ip6e.ip6e_nxt;
1280 break;
1281 case IPPROTO_ICMP:
1282 default:
1283 /* XXX intermediate headers??? */
1284 spidx->ul_proto = nxt;
1285 return;
1286 }
1287 }
1288}
1289
1290/* assumes that m is sane */
1291static int
1292ipsec4_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx)
1293{
1294 struct ip *ip = NULL;
1295 struct ip ipbuf;
1296 struct sockaddr_in *sin;
1297
1298 if (m->m_len >= sizeof(*ip)) {
1299 ip = mtod(m, struct ip *);
1300 } else {
1301 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1302 ip = &ipbuf;
1303 }
1304
1305 sin = (struct sockaddr_in *)&spidx->src;
1306 bzero(s: sin, n: sizeof(*sin));
1307 sin->sin_family = AF_INET;
1308 sin->sin_len = sizeof(struct sockaddr_in);
1309 bcopy(src: &ip->ip_src, dst: &sin->sin_addr, n: sizeof(ip->ip_src));
1310 spidx->prefs = sizeof(struct in_addr) << 3;
1311
1312 sin = (struct sockaddr_in *)&spidx->dst;
1313 bzero(s: sin, n: sizeof(*sin));
1314 sin->sin_family = AF_INET;
1315 sin->sin_len = sizeof(struct sockaddr_in);
1316 bcopy(src: &ip->ip_dst, dst: &sin->sin_addr, n: sizeof(ip->ip_dst));
1317 spidx->prefd = sizeof(struct in_addr) << 3;
1318
1319 return 0;
1320}
1321
1322static void
1323ipsec6_get_ulp(struct mbuf *m,
1324 struct secpolicyindex *spidx,
1325 int needport)
1326{
1327 int off, nxt;
1328 struct tcphdr th;
1329 struct udphdr uh;
1330
1331 /* sanity check */
1332 if (m == NULL) {
1333 panic("ipsec6_get_ulp: NULL pointer was passed.");
1334 }
1335
1336 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1337 printf("ipsec6_get_ulp:\n"); kdebug_mbuf(m));
1338
1339 /* set default */
1340 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1341 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = IPSEC_PORT_ANY;
1342 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = IPSEC_PORT_ANY;
1343
1344 nxt = -1;
1345 off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt);
1346 if (off < 0 || m->m_pkthdr.len < off) {
1347 return;
1348 }
1349
1350 VERIFY(nxt <= UINT8_MAX);
1351 switch (nxt) {
1352 case IPPROTO_TCP:
1353 spidx->ul_proto = (u_int8_t)nxt;
1354 if (!needport) {
1355 break;
1356 }
1357 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) {
1358 break;
1359 }
1360 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1361 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = th.th_sport;
1362 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = th.th_dport;
1363 break;
1364 case IPPROTO_UDP:
1365 spidx->ul_proto = (u_int8_t)nxt;
1366 if (!needport) {
1367 break;
1368 }
1369 if (off + sizeof(struct udphdr) > m->m_pkthdr.len) {
1370 break;
1371 }
1372 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1373 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = uh.uh_sport;
1374 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = uh.uh_dport;
1375 break;
1376 case IPPROTO_ICMPV6:
1377 default:
1378 /* XXX intermediate headers??? */
1379 spidx->ul_proto = (u_int8_t)nxt;
1380 break;
1381 }
1382}
1383
1384/* assumes that m is sane */
1385static int
1386ipsec6_setspidx_ipaddr(struct mbuf *m,
1387 struct secpolicyindex *spidx)
1388{
1389 struct ip6_hdr *ip6 = NULL;
1390 struct ip6_hdr ip6buf;
1391 struct sockaddr_in6 *sin6;
1392
1393 if (m->m_len >= sizeof(*ip6)) {
1394 ip6 = mtod(m, struct ip6_hdr *);
1395 } else {
1396 m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf);
1397 ip6 = &ip6buf;
1398 }
1399
1400 sin6 = (struct sockaddr_in6 *)&spidx->src;
1401 bzero(s: sin6, n: sizeof(*sin6));
1402 sin6->sin6_family = AF_INET6;
1403 sin6->sin6_len = sizeof(struct sockaddr_in6);
1404 bcopy(src: &ip6->ip6_src, dst: &sin6->sin6_addr, n: sizeof(ip6->ip6_src));
1405 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
1406 if (m->m_pkthdr.pkt_flags & PKTF_IFAINFO) {
1407 ip6_getsrcifaddr_info(m, &sin6->sin6_scope_id, NULL);
1408 } else if (m->m_pkthdr.pkt_ext_flags & PKTF_EXT_OUTPUT_SCOPE) {
1409 sin6->sin6_scope_id = ip6_output_getsrcifscope(m);
1410 }
1411 in6_verify_ifscope(&ip6->ip6_src, sin6->sin6_scope_id);
1412 if (in6_embedded_scope) {
1413 sin6->sin6_addr.s6_addr16[1] = 0;
1414 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
1415 }
1416 }
1417 spidx->prefs = sizeof(struct in6_addr) << 3;
1418
1419 sin6 = (struct sockaddr_in6 *)&spidx->dst;
1420 bzero(s: sin6, n: sizeof(*sin6));
1421 sin6->sin6_family = AF_INET6;
1422 sin6->sin6_len = sizeof(struct sockaddr_in6);
1423 bcopy(src: &ip6->ip6_dst, dst: &sin6->sin6_addr, n: sizeof(ip6->ip6_dst));
1424 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
1425 if (m->m_pkthdr.pkt_flags & PKTF_IFAINFO) {
1426 ip6_getdstifaddr_info(m, &sin6->sin6_scope_id, NULL);
1427 } else if (m->m_pkthdr.pkt_ext_flags & PKTF_EXT_OUTPUT_SCOPE) {
1428 sin6->sin6_scope_id = ip6_output_getdstifscope(m);
1429 }
1430 in6_verify_ifscope(&ip6->ip6_dst, sin6->sin6_scope_id);
1431 if (in6_embedded_scope) {
1432 sin6->sin6_addr.s6_addr16[1] = 0;
1433 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
1434 }
1435 }
1436 spidx->prefd = sizeof(struct in6_addr) << 3;
1437
1438 return 0;
1439}
1440
1441static struct inpcbpolicy *
1442ipsec_newpcbpolicy(void)
1443{
1444 struct inpcbpolicy *p;
1445
1446 p = kalloc_type(struct inpcbpolicy, Z_WAITOK | Z_ZERO);
1447 return p;
1448}
1449
1450static void
1451ipsec_delpcbpolicy(struct inpcbpolicy *p)
1452{
1453 kfree_type(struct inpcbpolicy, p);
1454}
1455
1456/* initialize policy in PCB */
1457int
1458ipsec_init_policy(struct socket *so,
1459 struct inpcbpolicy **pcb_sp)
1460{
1461 struct inpcbpolicy *new;
1462
1463 /* sanity check. */
1464 if (so == NULL || pcb_sp == NULL) {
1465 panic("ipsec_init_policy: NULL pointer was passed.");
1466 }
1467
1468 new = ipsec_newpcbpolicy();
1469 if (new == NULL) {
1470 ipseclog((LOG_DEBUG, "ipsec_init_policy: No more memory.\n"));
1471 return ENOBUFS;
1472 }
1473
1474#ifdef __APPLE__
1475 if (kauth_cred_issuser(cred: so->so_cred))
1476#else
1477 if (so->so_cred != 0 && !suser(so->so_cred->pc_ucred, NULL))
1478#endif
1479 { new->priv = 1;} else {
1480 new->priv = 0;
1481 }
1482
1483 if ((new->sp_in = key_newsp()) == NULL) {
1484 ipsec_delpcbpolicy(p: new);
1485 return ENOBUFS;
1486 }
1487 new->sp_in->state = IPSEC_SPSTATE_ALIVE;
1488 new->sp_in->policy = IPSEC_POLICY_ENTRUST;
1489
1490 if ((new->sp_out = key_newsp()) == NULL) {
1491 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1492 ipsec_delpcbpolicy(p: new);
1493 return ENOBUFS;
1494 }
1495 new->sp_out->state = IPSEC_SPSTATE_ALIVE;
1496 new->sp_out->policy = IPSEC_POLICY_ENTRUST;
1497
1498 *pcb_sp = new;
1499
1500 return 0;
1501}
1502
1503/* copy old ipsec policy into new */
1504int
1505ipsec_copy_policy(struct inpcbpolicy *old,
1506 struct inpcbpolicy *new)
1507{
1508 struct secpolicy *sp;
1509
1510 if (ipsec_bypass != 0) {
1511 return 0;
1512 }
1513
1514 sp = ipsec_deepcopy_policy(src: old->sp_in);
1515 if (sp) {
1516 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1517 new->sp_in = sp;
1518 } else {
1519 return ENOBUFS;
1520 }
1521
1522 sp = ipsec_deepcopy_policy(src: old->sp_out);
1523 if (sp) {
1524 key_freesp(new->sp_out, KEY_SADB_UNLOCKED);
1525 new->sp_out = sp;
1526 } else {
1527 return ENOBUFS;
1528 }
1529
1530 new->priv = old->priv;
1531
1532 return 0;
1533}
1534
1535/* deep-copy a policy in PCB */
1536static struct secpolicy *
1537ipsec_deepcopy_policy(struct secpolicy *src)
1538{
1539 struct ipsecrequest *newchain = NULL;
1540 struct ipsecrequest *p;
1541 struct ipsecrequest **q;
1542 struct secpolicy *dst;
1543
1544 if (src == NULL) {
1545 return NULL;
1546 }
1547 dst = key_newsp();
1548 if (dst == NULL) {
1549 return NULL;
1550 }
1551
1552 /*
1553 * deep-copy IPsec request chain. This is required since struct
1554 * ipsecrequest is not reference counted.
1555 */
1556 q = &newchain;
1557 for (p = src->req; p; p = p->next) {
1558 *q = kalloc_type(struct ipsecrequest, Z_WAITOK_ZERO_NOFAIL);
1559
1560 (*q)->saidx.proto = p->saidx.proto;
1561 (*q)->saidx.mode = p->saidx.mode;
1562 (*q)->level = p->level;
1563 (*q)->saidx.reqid = p->saidx.reqid;
1564
1565 bcopy(src: &p->saidx.src, dst: &(*q)->saidx.src, n: sizeof((*q)->saidx.src));
1566 bcopy(src: &p->saidx.dst, dst: &(*q)->saidx.dst, n: sizeof((*q)->saidx.dst));
1567
1568 (*q)->sp = dst;
1569
1570 q = &((*q)->next);
1571 }
1572
1573 dst->req = newchain;
1574 dst->state = src->state;
1575 dst->policy = src->policy;
1576 /* do not touch the refcnt fields */
1577
1578 return dst;
1579}
1580
1581/* set policy and ipsec request if present. */
1582static int
1583ipsec_set_policy(struct secpolicy **pcb_sp,
1584 __unused int optname,
1585 caddr_t request,
1586 size_t len,
1587 int priv)
1588{
1589 struct sadb_x_policy *xpl;
1590 struct secpolicy *newsp = NULL;
1591 int error;
1592
1593 /* sanity check. */
1594 if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL) {
1595 return EINVAL;
1596 }
1597 if (len < sizeof(*xpl)) {
1598 return EINVAL;
1599 }
1600 xpl = (struct sadb_x_policy *)(void *)request;
1601
1602 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1603 printf("ipsec_set_policy: passed policy\n");
1604 kdebug_sadb_x_policy((struct sadb_ext *)xpl));
1605
1606 /* check policy type */
1607 /* ipsec_set_policy() accepts IPSEC, ENTRUST and BYPASS. */
1608 if (xpl->sadb_x_policy_type == IPSEC_POLICY_DISCARD
1609 || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE) {
1610 return EINVAL;
1611 }
1612
1613 /* check privileged socket */
1614 if (priv == 0 && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS) {
1615 return EACCES;
1616 }
1617
1618 /* allocation new SP entry */
1619 if ((newsp = key_msg2sp(xpl, len, &error)) == NULL) {
1620 return error;
1621 }
1622
1623 newsp->state = IPSEC_SPSTATE_ALIVE;
1624
1625 /* clear old SP and set new SP */
1626 key_freesp(*pcb_sp, KEY_SADB_UNLOCKED);
1627 *pcb_sp = newsp;
1628 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1629 printf("ipsec_set_policy: new policy\n");
1630 kdebug_secpolicy(newsp));
1631
1632 return 0;
1633}
1634
1635int
1636ipsec4_set_policy(struct inpcb *inp,
1637 int optname,
1638 caddr_t request,
1639 size_t len,
1640 int priv)
1641{
1642 struct sadb_x_policy *xpl;
1643 struct secpolicy **pcb_sp;
1644 int error = 0;
1645 struct sadb_x_policy xpl_aligned_buf;
1646 u_int8_t *xpl_unaligned;
1647
1648 /* sanity check. */
1649 if (inp == NULL || request == NULL) {
1650 return EINVAL;
1651 }
1652 if (len < sizeof(*xpl)) {
1653 return EINVAL;
1654 }
1655 xpl = (struct sadb_x_policy *)(void *)request;
1656
1657 /* This is a new mbuf allocated by soopt_getm() */
1658 if (IPSEC_IS_P2ALIGNED(xpl)) {
1659 xpl_unaligned = NULL;
1660 } else {
1661 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1662 memcpy(dst: &xpl_aligned_buf, src: xpl, n: sizeof(xpl_aligned_buf));
1663 xpl = (__typeof__(xpl)) & xpl_aligned_buf;
1664 }
1665
1666 if (inp->inp_sp == NULL) {
1667 error = ipsec_init_policy(so: inp->inp_socket, pcb_sp: &inp->inp_sp);
1668 if (error) {
1669 return error;
1670 }
1671 }
1672
1673 /* select direction */
1674 switch (xpl->sadb_x_policy_dir) {
1675 case IPSEC_DIR_INBOUND:
1676 pcb_sp = &inp->inp_sp->sp_in;
1677 break;
1678 case IPSEC_DIR_OUTBOUND:
1679 pcb_sp = &inp->inp_sp->sp_out;
1680 break;
1681 default:
1682 ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n",
1683 xpl->sadb_x_policy_dir));
1684 return EINVAL;
1685 }
1686
1687 /* turn bypass off */
1688 if (ipsec_bypass != 0) {
1689 ipsec_bypass = 0;
1690 }
1691
1692 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1693}
1694
1695/* delete policy in PCB */
1696int
1697ipsec4_delete_pcbpolicy(struct inpcb *inp)
1698{
1699 /* sanity check. */
1700 if (inp == NULL) {
1701 panic("ipsec4_delete_pcbpolicy: NULL pointer was passed.");
1702 }
1703
1704 if (inp->inp_sp == NULL) {
1705 return 0;
1706 }
1707
1708 if (inp->inp_sp->sp_in != NULL) {
1709 key_freesp(inp->inp_sp->sp_in, KEY_SADB_UNLOCKED);
1710 inp->inp_sp->sp_in = NULL;
1711 }
1712
1713 if (inp->inp_sp->sp_out != NULL) {
1714 key_freesp(inp->inp_sp->sp_out, KEY_SADB_UNLOCKED);
1715 inp->inp_sp->sp_out = NULL;
1716 }
1717
1718 ipsec_delpcbpolicy(p: inp->inp_sp);
1719 inp->inp_sp = NULL;
1720
1721 return 0;
1722}
1723
1724int
1725ipsec6_set_policy(struct in6pcb *in6p,
1726 int optname,
1727 caddr_t request,
1728 size_t len,
1729 int priv)
1730{
1731 struct sadb_x_policy *xpl;
1732 struct secpolicy **pcb_sp;
1733 int error = 0;
1734 struct sadb_x_policy xpl_aligned_buf;
1735 u_int8_t *xpl_unaligned;
1736
1737 /* sanity check. */
1738 if (in6p == NULL || request == NULL) {
1739 return EINVAL;
1740 }
1741 if (len < sizeof(*xpl)) {
1742 return EINVAL;
1743 }
1744 xpl = (struct sadb_x_policy *)(void *)request;
1745
1746 /* This is a new mbuf allocated by soopt_getm() */
1747 if (IPSEC_IS_P2ALIGNED(xpl)) {
1748 xpl_unaligned = NULL;
1749 } else {
1750 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1751 memcpy(dst: &xpl_aligned_buf, src: xpl, n: sizeof(xpl_aligned_buf));
1752 xpl = (__typeof__(xpl)) & xpl_aligned_buf;
1753 }
1754
1755 if (in6p->in6p_sp == NULL) {
1756 error = ipsec_init_policy(so: in6p->inp_socket, pcb_sp: &in6p->in6p_sp);
1757 if (error) {
1758 return error;
1759 }
1760 }
1761
1762 /* select direction */
1763 switch (xpl->sadb_x_policy_dir) {
1764 case IPSEC_DIR_INBOUND:
1765 pcb_sp = &in6p->in6p_sp->sp_in;
1766 break;
1767 case IPSEC_DIR_OUTBOUND:
1768 pcb_sp = &in6p->in6p_sp->sp_out;
1769 break;
1770 default:
1771 ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n",
1772 xpl->sadb_x_policy_dir));
1773 return EINVAL;
1774 }
1775
1776 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1777}
1778
1779int
1780ipsec6_delete_pcbpolicy(struct in6pcb *in6p)
1781{
1782 /* sanity check. */
1783 if (in6p == NULL) {
1784 panic("ipsec6_delete_pcbpolicy: NULL pointer was passed.");
1785 }
1786
1787 if (in6p->in6p_sp == NULL) {
1788 return 0;
1789 }
1790
1791 if (in6p->in6p_sp->sp_in != NULL) {
1792 key_freesp(in6p->in6p_sp->sp_in, KEY_SADB_UNLOCKED);
1793 in6p->in6p_sp->sp_in = NULL;
1794 }
1795
1796 if (in6p->in6p_sp->sp_out != NULL) {
1797 key_freesp(in6p->in6p_sp->sp_out, KEY_SADB_UNLOCKED);
1798 in6p->in6p_sp->sp_out = NULL;
1799 }
1800
1801 ipsec_delpcbpolicy(p: in6p->in6p_sp);
1802 in6p->in6p_sp = NULL;
1803
1804 return 0;
1805}
1806
1807/*
1808 * return current level.
1809 * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned.
1810 */
1811u_int
1812ipsec_get_reqlevel(struct ipsecrequest *isr)
1813{
1814 u_int level = 0;
1815 u_int esp_trans_deflev = 0, esp_net_deflev = 0, ah_trans_deflev = 0, ah_net_deflev = 0;
1816
1817 /* sanity check */
1818 if (isr == NULL || isr->sp == NULL) {
1819 panic("ipsec_get_reqlevel: NULL pointer is passed.");
1820 }
1821 if (((struct sockaddr *)&isr->sp->spidx.src)->sa_family
1822 != ((struct sockaddr *)&isr->sp->spidx.dst)->sa_family) {
1823 panic("ipsec_get_reqlevel: family mismatched.");
1824 }
1825
1826/* XXX note that we have ipseclog() expanded here - code sync issue */
1827#define IPSEC_CHECK_DEFAULT(lev) \
1828 (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \
1829 && (lev) != IPSEC_LEVEL_UNIQUE) \
1830 ? (ipsec_debug \
1831 ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\
1832 (lev), IPSEC_LEVEL_REQUIRE) \
1833 : (void)0), \
1834 (lev) = IPSEC_LEVEL_REQUIRE, \
1835 (lev) \
1836 : (lev))
1837
1838 /* set default level */
1839 switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) {
1840 case AF_INET:
1841 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_trans_deflev);
1842 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_net_deflev);
1843 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_trans_deflev);
1844 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_net_deflev);
1845 break;
1846 case AF_INET6:
1847 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_trans_deflev);
1848 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_net_deflev);
1849 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_trans_deflev);
1850 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_net_deflev);
1851 break;
1852 default:
1853 panic("key_get_reqlevel: Unknown family. %d",
1854 ((struct sockaddr *)&isr->sp->spidx.src)->sa_family);
1855 }
1856
1857#undef IPSEC_CHECK_DEFAULT
1858
1859 /* set level */
1860 switch (isr->level) {
1861 case IPSEC_LEVEL_DEFAULT:
1862 switch (isr->saidx.proto) {
1863 case IPPROTO_ESP:
1864 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
1865 level = esp_net_deflev;
1866 } else {
1867 level = esp_trans_deflev;
1868 }
1869 break;
1870 case IPPROTO_AH:
1871 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
1872 level = ah_net_deflev;
1873 } else {
1874 level = ah_trans_deflev;
1875 }
1876 break;
1877 case IPPROTO_IPCOMP:
1878 ipseclog((LOG_ERR, "ipsec_get_reqlevel: "
1879 "still got IPCOMP - exiting\n"));
1880 break;
1881 default:
1882 panic("ipsec_get_reqlevel: "
1883 "Illegal protocol defined %u\n",
1884 isr->saidx.proto);
1885 }
1886 break;
1887
1888 case IPSEC_LEVEL_USE:
1889 case IPSEC_LEVEL_REQUIRE:
1890 level = isr->level;
1891 break;
1892 case IPSEC_LEVEL_UNIQUE:
1893 level = IPSEC_LEVEL_REQUIRE;
1894 break;
1895
1896 default:
1897 panic("ipsec_get_reqlevel: Illegal IPsec level %u",
1898 isr->level);
1899 }
1900
1901 return level;
1902}
1903
1904/*
1905 * Check AH/ESP integrity.
1906 * OUT:
1907 * 0: valid
1908 * 1: invalid
1909 */
1910static int
1911ipsec_in_reject(struct secpolicy *sp, struct mbuf *m)
1912{
1913 struct ipsecrequest *isr;
1914 u_int level;
1915 int need_auth, need_conf, need_icv;
1916
1917 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
1918 printf("ipsec_in_reject: using SP\n");
1919 kdebug_secpolicy(sp));
1920
1921 /* check policy */
1922 switch (sp->policy) {
1923 case IPSEC_POLICY_DISCARD:
1924 case IPSEC_POLICY_GENERATE:
1925 return 1;
1926 case IPSEC_POLICY_BYPASS:
1927 case IPSEC_POLICY_NONE:
1928 return 0;
1929
1930 case IPSEC_POLICY_IPSEC:
1931 break;
1932
1933 case IPSEC_POLICY_ENTRUST:
1934 default:
1935 panic("ipsec_hdrsiz: Invalid policy found. %d", sp->policy);
1936 }
1937
1938 need_auth = 0;
1939 need_conf = 0;
1940 need_icv = 0;
1941
1942 /* XXX should compare policy against ipsec header history */
1943
1944 for (isr = sp->req; isr != NULL; isr = isr->next) {
1945 /* get current level */
1946 level = ipsec_get_reqlevel(isr);
1947
1948 switch (isr->saidx.proto) {
1949 case IPPROTO_ESP:
1950 if (level == IPSEC_LEVEL_REQUIRE) {
1951 need_conf++;
1952
1953#if 0
1954 /* this won't work with multiple input threads - isr->sav would change
1955 * with every packet and is not necessarily related to the current packet
1956 * being processed. If ESP processing is required - the esp code should
1957 * make sure that the integrity check is present and correct. I don't see
1958 * why it would be necessary to check for the presence of the integrity
1959 * check value here. I think this is just wrong.
1960 * isr->sav has been removed.
1961 * %%%%%% this needs to be re-worked at some point but I think the code below can
1962 * be ignored for now.
1963 */
1964 if (isr->sav != NULL
1965 && isr->sav->flags == SADB_X_EXT_NONE
1966 && isr->sav->alg_auth != SADB_AALG_NONE) {
1967 need_icv++;
1968 }
1969#endif
1970 }
1971 break;
1972 case IPPROTO_AH:
1973 if (level == IPSEC_LEVEL_REQUIRE) {
1974 need_auth++;
1975 need_icv++;
1976 }
1977 break;
1978 case IPPROTO_IPCOMP:
1979 /*
1980 * we don't really care, as IPcomp document says that
1981 * we shouldn't compress small packets, IPComp policy
1982 * should always be treated as being in "use" level.
1983 */
1984 break;
1985 }
1986 }
1987
1988 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1989 printf("ipsec_in_reject: auth:%d conf:%d icv:%d m_flags:%x\n",
1990 need_auth, need_conf, need_icv, m->m_flags));
1991
1992 if ((need_conf && !(m->m_flags & M_DECRYPTED))
1993 || (!need_auth && need_icv && !(m->m_flags & M_AUTHIPDGM))
1994 || (need_auth && !(m->m_flags & M_AUTHIPHDR))) {
1995 return 1;
1996 }
1997
1998 return 0;
1999}
2000
2001/*
2002 * Check AH/ESP integrity.
2003 * This function is called from tcp_input(), udp_input(),
2004 * and {ah,esp}4_input for tunnel mode
2005 */
2006int
2007ipsec4_in_reject_so(struct mbuf *m, struct socket *so)
2008{
2009 struct secpolicy *sp = NULL;
2010 int error;
2011 int result;
2012
2013 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2014 /* sanity check */
2015 if (m == NULL) {
2016 return 0; /* XXX should be panic ? */
2017 }
2018 /* get SP for this packet.
2019 * When we are called from ip_forward(), we call
2020 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2021 */
2022 if (so == NULL) {
2023 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, error: &error);
2024 } else {
2025 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, flag: 0, error: &error);
2026 }
2027
2028 if (sp == NULL) {
2029 return 0; /* XXX should be panic ?
2030 * -> No, there may be error. */
2031 }
2032 result = ipsec_in_reject(sp, m);
2033 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2034 printf("DP ipsec4_in_reject_so call free SP:0x%llx\n",
2035 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2036 key_freesp(sp, KEY_SADB_UNLOCKED);
2037
2038 return result;
2039}
2040
2041int
2042ipsec4_in_reject(struct mbuf *m, struct inpcb *inp)
2043{
2044 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2045 if (inp == NULL) {
2046 return ipsec4_in_reject_so(m, NULL);
2047 }
2048 if (inp->inp_socket) {
2049 return ipsec4_in_reject_so(m, so: inp->inp_socket);
2050 } else {
2051 panic("ipsec4_in_reject: invalid inpcb/socket");
2052 }
2053
2054 /* NOTREACHED */
2055 return 0;
2056}
2057
2058/*
2059 * Check AH/ESP integrity.
2060 * This function is called from tcp6_input(), udp6_input(),
2061 * and {ah,esp}6_input for tunnel mode
2062 */
2063int
2064ipsec6_in_reject_so(struct mbuf *m, struct socket *so)
2065{
2066 struct secpolicy *sp = NULL;
2067 int error;
2068 int result;
2069
2070 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2071 /* sanity check */
2072 if (m == NULL) {
2073 return 0; /* XXX should be panic ? */
2074 }
2075 /* get SP for this packet.
2076 * When we are called from ip_forward(), we call
2077 * ipsec6_getpolicybyaddr() with IP_FORWARDING flag.
2078 */
2079 if (so == NULL) {
2080 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, error: &error);
2081 } else {
2082 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, flag: 0, error: &error);
2083 }
2084
2085 if (sp == NULL) {
2086 return 0; /* XXX should be panic ? */
2087 }
2088 result = ipsec_in_reject(sp, m);
2089 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2090 printf("DP ipsec6_in_reject_so call free SP:0x%llx\n",
2091 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2092 key_freesp(sp, KEY_SADB_UNLOCKED);
2093
2094 return result;
2095}
2096
2097int
2098ipsec6_in_reject(struct mbuf *m, struct in6pcb *in6p)
2099{
2100 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2101 if (in6p == NULL) {
2102 return ipsec6_in_reject_so(m, NULL);
2103 }
2104 if (in6p->in6p_socket) {
2105 return ipsec6_in_reject_so(m, so: in6p->in6p_socket);
2106 } else {
2107 panic("ipsec6_in_reject: invalid in6p/socket");
2108 }
2109
2110 /* NOTREACHED */
2111 return 0;
2112}
2113
2114/*
2115 * compute the byte size to be occupied by IPsec header.
2116 * in case it is tunneled, it includes the size of outer IP header.
2117 * NOTE: SP passed is free in this function.
2118 */
2119size_t
2120ipsec_hdrsiz(struct secpolicy *sp)
2121{
2122 struct ipsecrequest *isr;
2123 size_t siz, clen;
2124
2125 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2126 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2127 printf("ipsec_hdrsiz: using SP\n");
2128 kdebug_secpolicy(sp));
2129
2130 /* check policy */
2131 switch (sp->policy) {
2132 case IPSEC_POLICY_DISCARD:
2133 case IPSEC_POLICY_GENERATE:
2134 case IPSEC_POLICY_BYPASS:
2135 case IPSEC_POLICY_NONE:
2136 return 0;
2137
2138 case IPSEC_POLICY_IPSEC:
2139 break;
2140
2141 case IPSEC_POLICY_ENTRUST:
2142 default:
2143 panic("ipsec_hdrsiz: Invalid policy found. %d", sp->policy);
2144 }
2145
2146 siz = 0;
2147
2148 for (isr = sp->req; isr != NULL; isr = isr->next) {
2149 clen = 0;
2150
2151 switch (isr->saidx.proto) {
2152 case IPPROTO_ESP:
2153#if IPSEC_ESP
2154 clen = esp_hdrsiz(isr);
2155#else
2156 clen = 0; /*XXX*/
2157#endif
2158 break;
2159 case IPPROTO_AH:
2160 clen = ah_hdrsiz(isr);
2161 break;
2162 default:
2163 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2164 "unknown protocol %u\n",
2165 isr->saidx.proto));
2166 break;
2167 }
2168
2169 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
2170 switch (((struct sockaddr *)&isr->saidx.dst)->sa_family) {
2171 case AF_INET:
2172 clen += sizeof(struct ip);
2173 break;
2174 case AF_INET6:
2175 clen += sizeof(struct ip6_hdr);
2176 break;
2177 default:
2178 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2179 "unknown AF %d in IPsec tunnel SA\n",
2180 ((struct sockaddr *)&isr->saidx.dst)->sa_family));
2181 break;
2182 }
2183 }
2184 siz += clen;
2185 }
2186
2187 return siz;
2188}
2189
2190/* This function is called from ip_forward() and ipsec4_hdrsize_tcp(). */
2191size_t
2192ipsec4_hdrsiz(struct mbuf *m, u_int8_t dir, struct inpcb *inp)
2193{
2194 struct secpolicy *sp = NULL;
2195 int error;
2196 size_t size;
2197
2198 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2199 /* sanity check */
2200 if (m == NULL) {
2201 return 0; /* XXX should be panic ? */
2202 }
2203 if (inp != NULL && inp->inp_socket == NULL) {
2204 panic("ipsec4_hdrsize: why is socket NULL but there is PCB.");
2205 }
2206
2207 /* get SP for this packet.
2208 * When we are called from ip_forward(), we call
2209 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2210 */
2211 if (inp == NULL) {
2212 sp = ipsec4_getpolicybyaddr(m, dir, IP_FORWARDING, error: &error);
2213 } else {
2214 sp = ipsec4_getpolicybyaddr(m, dir, flag: 0, error: &error);
2215 }
2216
2217 if (sp == NULL) {
2218 return 0; /* XXX should be panic ? */
2219 }
2220 size = ipsec_hdrsiz(sp);
2221 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2222 printf("DP ipsec4_hdrsiz call free SP:0x%llx\n",
2223 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2224 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2225 printf("ipsec4_hdrsiz: size:%lu.\n", (u_int32_t)size));
2226 key_freesp(sp, KEY_SADB_UNLOCKED);
2227
2228 return size;
2229}
2230
2231/* This function is called from ipsec6_hdrsize_tcp(),
2232 * and maybe from ip6_forward.()
2233 */
2234size_t
2235ipsec6_hdrsiz(struct mbuf *m, u_int8_t dir, struct in6pcb *in6p)
2236{
2237 struct secpolicy *sp = NULL;
2238 int error;
2239 size_t size;
2240
2241 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2242 /* sanity check */
2243 if (m == NULL) {
2244 return 0; /* XXX shoud be panic ? */
2245 }
2246 if (in6p != NULL && in6p->in6p_socket == NULL) {
2247 panic("ipsec6_hdrsize: why is socket NULL but there is PCB.");
2248 }
2249
2250 /* get SP for this packet */
2251 /* XXX Is it right to call with IP_FORWARDING. */
2252 if (in6p == NULL) {
2253 sp = ipsec6_getpolicybyaddr(m, dir, IP_FORWARDING, error: &error);
2254 } else {
2255 sp = ipsec6_getpolicybyaddr(m, dir, flag: 0, error: &error);
2256 }
2257
2258 if (sp == NULL) {
2259 return 0;
2260 }
2261 size = ipsec_hdrsiz(sp);
2262 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2263 printf("DP ipsec6_hdrsiz call free SP:0x%llx\n",
2264 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2265 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2266 printf("ipsec6_hdrsiz: size:%lu.\n", (u_int32_t)size));
2267 key_freesp(sp, KEY_SADB_UNLOCKED);
2268
2269 return size;
2270}
2271
2272/*
2273 * encapsulate for ipsec tunnel.
2274 * ip->ip_src must be fixed later on.
2275 */
2276int
2277ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav)
2278{
2279 struct ip *oip;
2280 struct ip *ip;
2281 size_t plen;
2282 u_int32_t hlen;
2283
2284 /* can't tunnel between different AFs */
2285 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2286 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2287 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2288 m_freem(m);
2289 return EINVAL;
2290 }
2291
2292 if (m->m_len < sizeof(*ip)) {
2293 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2294 }
2295
2296 ip = mtod(m, struct ip *);
2297#ifdef _IP_VHL
2298 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2299#else
2300 hlen = ip->ip_hl << 2;
2301#endif
2302
2303 if (m->m_len != hlen) {
2304 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2305 }
2306
2307 /* generate header checksum */
2308 ip->ip_sum = 0;
2309#ifdef _IP_VHL
2310 ip->ip_sum = in_cksum(m, hlen);
2311#else
2312 ip->ip_sum = in_cksum(m, hlen);
2313#endif
2314
2315 plen = m->m_pkthdr.len;
2316
2317 /*
2318 * grow the mbuf to accomodate the new IPv4 header.
2319 * NOTE: IPv4 options will never be copied.
2320 */
2321 if (M_LEADINGSPACE(m->m_next) < hlen) {
2322 struct mbuf *n;
2323 MGET(n, M_DONTWAIT, MT_DATA);
2324 if (!n) {
2325 m_freem(m);
2326 return ENOBUFS;
2327 }
2328 n->m_len = hlen;
2329 n->m_next = m->m_next;
2330 m->m_next = n;
2331 m->m_pkthdr.len += hlen;
2332 oip = mtod(n, struct ip *);
2333 } else {
2334 m->m_next->m_len += hlen;
2335 m->m_next->m_data -= hlen;
2336 m->m_pkthdr.len += hlen;
2337 oip = mtod(m->m_next, struct ip *);
2338 }
2339 ip = mtod(m, struct ip *);
2340 ovbcopy(from: (caddr_t)ip, to: (caddr_t)oip, len: hlen);
2341 m->m_len = sizeof(struct ip);
2342 m->m_pkthdr.len -= (hlen - sizeof(struct ip));
2343
2344 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2345 /* ECN consideration. */
2346 ip_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &oip->ip_tos);
2347#ifdef _IP_VHL
2348 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
2349#else
2350 ip->ip_hl = sizeof(struct ip) >> 2;
2351#endif
2352 ip->ip_off &= htons(~IP_OFFMASK);
2353 ip->ip_off &= htons(~IP_MF);
2354 switch (ip4_ipsec_dfbit) {
2355 case 0: /* clear DF bit */
2356 ip->ip_off &= htons(~IP_DF);
2357 break;
2358 case 1: /* set DF bit */
2359 ip->ip_off |= htons(IP_DF);
2360 break;
2361 default: /* copy DF bit */
2362 break;
2363 }
2364 ip->ip_p = IPPROTO_IPIP;
2365 if (plen + sizeof(struct ip) < IP_MAXPACKET) {
2366 ip->ip_len = htons((u_int16_t)(plen + sizeof(struct ip)));
2367 } else {
2368 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2369 "leave ip_len as is (invalid packet)\n"));
2370 }
2371 if (rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off))) {
2372 ip->ip_id = 0;
2373 } else {
2374 ip->ip_id = ip_randomid((uint64_t)m);
2375 }
2376 bcopy(src: &((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2377 dst: &ip->ip_src, n: sizeof(ip->ip_src));
2378 bcopy(src: &((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2379 dst: &ip->ip_dst, n: sizeof(ip->ip_dst));
2380 ip->ip_ttl = IPDEFTTL;
2381
2382 /* XXX Should ip_src be updated later ? */
2383
2384 return 0;
2385}
2386
2387
2388int
2389ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav)
2390{
2391 struct ip6_hdr *oip6;
2392 struct ip6_hdr *ip6;
2393 size_t plen;
2394
2395 /* can't tunnel between different AFs */
2396 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2397 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2398 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2399 m_freem(m);
2400 return EINVAL;
2401 }
2402
2403 plen = m->m_pkthdr.len;
2404
2405 /*
2406 * grow the mbuf to accomodate the new IPv6 header.
2407 */
2408 if (m->m_len != sizeof(struct ip6_hdr)) {
2409 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2410 }
2411 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2412 struct mbuf *n;
2413 MGET(n, M_DONTWAIT, MT_DATA);
2414 if (!n) {
2415 m_freem(m);
2416 return ENOBUFS;
2417 }
2418 n->m_len = sizeof(struct ip6_hdr);
2419 n->m_next = m->m_next;
2420 m->m_next = n;
2421 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2422 oip6 = mtod(n, struct ip6_hdr *);
2423 } else {
2424 m->m_next->m_len += sizeof(struct ip6_hdr);
2425 m->m_next->m_data -= sizeof(struct ip6_hdr);
2426 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2427 oip6 = mtod(m->m_next, struct ip6_hdr *);
2428 }
2429 ip6 = mtod(m, struct ip6_hdr *);
2430 ovbcopy(from: (caddr_t)ip6, to: (caddr_t)oip6, len: sizeof(struct ip6_hdr));
2431
2432 /* Fake link-local scope-class addresses */
2433 if (in6_embedded_scope && IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src)) {
2434 oip6->ip6_src.s6_addr16[1] = 0;
2435 }
2436 if (in6_embedded_scope && IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst)) {
2437 oip6->ip6_dst.s6_addr16[1] = 0;
2438 }
2439
2440 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2441 /* ECN consideration. */
2442 ip6_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow);
2443 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
2444 ip6->ip6_plen = htons((u_int16_t)plen);
2445 } else {
2446 /* ip6->ip6_plen will be updated in ip6_output() */
2447 }
2448 ip6->ip6_nxt = IPPROTO_IPV6;
2449 bcopy(src: &((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2450 dst: &ip6->ip6_src, n: sizeof(ip6->ip6_src));
2451 bcopy(src: &((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2452 dst: &ip6->ip6_dst, n: sizeof(ip6->ip6_dst));
2453 ip6->ip6_hlim = IPV6_DEFHLIM;
2454
2455 if (in6_embedded_scope && IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
2456 ip6->ip6_src.s6_addr16[1] = htons((u_int16_t)sav->sah->outgoing_if);
2457 ip6->ip6_dst.s6_addr16[1] = htons((u_int16_t)sav->sah->outgoing_if);
2458 }
2459
2460 /* XXX Should ip6_src be updated later ? */
2461
2462 return 0;
2463}
2464
2465static int
2466ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav, u_int32_t dscp_mapping)
2467{
2468 struct ip6_hdr *ip6, *ip6i;
2469 struct ip *ip;
2470 size_t plen;
2471
2472 /* tunneling over IPv4 */
2473 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2474 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2475 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2476 m_freem(m);
2477 return EINVAL;
2478 }
2479
2480 plen = m->m_pkthdr.len;
2481 ip6 = mtod(m, struct ip6_hdr *);
2482 /*
2483 * grow the mbuf to accomodate the new IPv4 header.
2484 */
2485 if (m->m_len != sizeof(struct ip6_hdr)) {
2486 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2487 }
2488 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2489 struct mbuf *n;
2490 MGET(n, M_DONTWAIT, MT_DATA);
2491 if (!n) {
2492 m_freem(m);
2493 return ENOBUFS;
2494 }
2495 n->m_len = sizeof(struct ip6_hdr);
2496 n->m_next = m->m_next;
2497 m->m_next = n;
2498 m->m_pkthdr.len += sizeof(struct ip);
2499 ip6i = mtod(n, struct ip6_hdr *);
2500 } else {
2501 m->m_next->m_len += sizeof(struct ip6_hdr);
2502 m->m_next->m_data -= sizeof(struct ip6_hdr);
2503 m->m_pkthdr.len += sizeof(struct ip);
2504 ip6i = mtod(m->m_next, struct ip6_hdr *);
2505 }
2506
2507 bcopy(src: ip6, dst: ip6i, n: sizeof(struct ip6_hdr));
2508 ip = mtod(m, struct ip *);
2509 m->m_len = sizeof(struct ip);
2510 /*
2511 * Fill in some of the IPv4 fields - we don't need all of them
2512 * because the rest will be filled in by ip_output
2513 */
2514 ip->ip_v = IPVERSION;
2515 ip->ip_hl = sizeof(struct ip) >> 2;
2516 ip->ip_id = 0;
2517 ip->ip_sum = 0;
2518 ip->ip_tos = 0;
2519 ip->ip_off = 0;
2520 ip->ip_ttl = IPDEFTTL;
2521 ip->ip_p = IPPROTO_IPV6;
2522
2523 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2524 /* ECN consideration. */
2525 if (dscp_mapping == IPSEC_DSCP_MAPPING_COPY) {
2526 // Copy DSCP bits from inner IP to outer IP packet.
2527 ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6i->ip6_flow);
2528 } else if (dscp_mapping == IPSEC_DSCP_MAPPING_LEGACY) {
2529 // Copy DSCP bits in legacy style.
2530 ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow);
2531 }
2532
2533 if (plen + sizeof(struct ip) < IP_MAXPACKET) {
2534 ip->ip_len = htons((u_int16_t)(plen + sizeof(struct ip)));
2535 } else {
2536 ip->ip_len = htons((u_int16_t)plen);
2537 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2538 "leave ip_len as is (invalid packet)\n"));
2539 }
2540 bcopy(src: &((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2541 dst: &ip->ip_src, n: sizeof(ip->ip_src));
2542 bcopy(src: &((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2543 dst: &ip->ip_dst, n: sizeof(ip->ip_dst));
2544
2545 return 0;
2546}
2547
2548int
2549ipsec6_update_routecache_and_output(
2550 struct ipsec_output_state *state,
2551 struct secasvar *sav)
2552{
2553 struct sockaddr_in6* dst6;
2554 struct route_in6 *ro6;
2555 struct ip6_hdr *ip6;
2556 errno_t error = 0;
2557
2558 int plen;
2559 struct ip6_out_args ip6oa;
2560 struct route_in6 ro6_new;
2561 struct flowadv *adv = NULL;
2562
2563 if (!state->m) {
2564 return EINVAL;
2565 }
2566 ip6 = mtod(state->m, struct ip6_hdr *);
2567
2568 // grab sadb_mutex, before updating sah's route cache
2569 lck_mtx_lock(sadb_mutex);
2570 ro6 = &sav->sah->sa_route;
2571 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
2572 if (ro6->ro_rt) {
2573 RT_LOCK(ro6->ro_rt);
2574 }
2575 if (ROUTE_UNUSABLE(ro6) ||
2576 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
2577 if (ro6->ro_rt != NULL) {
2578 RT_UNLOCK(ro6->ro_rt);
2579 }
2580 ROUTE_RELEASE(ro6);
2581 }
2582 if (ro6->ro_rt == 0) {
2583 bzero(s: dst6, n: sizeof(*dst6));
2584 dst6->sin6_family = AF_INET6;
2585 dst6->sin6_len = sizeof(*dst6);
2586 dst6->sin6_addr = ip6->ip6_dst;
2587 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
2588 if (ro6->ro_rt) {
2589 RT_LOCK(ro6->ro_rt);
2590 }
2591 }
2592 if (ro6->ro_rt == 0) {
2593 ip6stat.ip6s_noroute++;
2594 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
2595 error = EHOSTUNREACH;
2596 // release sadb_mutex, after updating sah's route cache
2597 lck_mtx_unlock(sadb_mutex);
2598 return error;
2599 }
2600
2601 /*
2602 * adjust state->dst if tunnel endpoint is offlink
2603 *
2604 * XXX: caching rt_gateway value in the state is
2605 * not really good, since it may point elsewhere
2606 * when the gateway gets modified to a larger
2607 * sockaddr via rt_setgate(). This is currently
2608 * addressed by SA_SIZE roundup in that routine.
2609 */
2610 if (ro6->ro_rt->rt_flags & RTF_GATEWAY) {
2611 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
2612 }
2613 RT_UNLOCK(ro6->ro_rt);
2614 ROUTE_RELEASE(&state->ro);
2615 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
2616 state->dst = (struct sockaddr *)dst6;
2617 state->tunneled = 6;
2618 // release sadb_mutex, after updating sah's route cache
2619 lck_mtx_unlock(sadb_mutex);
2620
2621 state->m = ipsec6_splithdr(state->m);
2622 if (!state->m) {
2623 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
2624 error = ENOMEM;
2625 return error;
2626 }
2627
2628 ip6 = mtod(state->m, struct ip6_hdr *);
2629 switch (sav->sah->saidx.proto) {
2630 case IPPROTO_ESP:
2631#if IPSEC_ESP
2632 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2633#else
2634 m_freem(state->m);
2635 error = EINVAL;
2636#endif
2637 break;
2638 case IPPROTO_AH:
2639 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2640 break;
2641 default:
2642 ipseclog((LOG_ERR, "%s: unknown ipsec protocol %d\n", __FUNCTION__, sav->sah->saidx.proto));
2643 m_freem(state->m);
2644 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2645 error = EINVAL;
2646 break;
2647 }
2648 if (error) {
2649 // If error, packet already freed by above output routines
2650 state->m = NULL;
2651 return error;
2652 }
2653
2654 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
2655 if (plen > IPV6_MAXPACKET) {
2656 ipseclog((LOG_ERR, "%s: IPsec with IPv6 jumbogram is not supported\n", __FUNCTION__));
2657 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2658 error = EINVAL;/*XXX*/
2659 return error;
2660 }
2661 ip6 = mtod(state->m, struct ip6_hdr *);
2662 ip6->ip6_plen = htons((u_int16_t)plen);
2663
2664 ipsec_set_pkthdr_for_interface(interface: sav->sah->ipsec_if, packet: state->m, AF_INET6,
2665 flowid: sav->flowid);
2666 ipsec_set_ip6oa_for_interface(interface: sav->sah->ipsec_if, ip6oa: &ip6oa);
2667
2668 /* Increment statistics */
2669 ifnet_stat_increment_out(interface: sav->sah->ipsec_if, packets_out: 1, bytes_out: (u_int32_t)mbuf_pkthdr_len(mbuf: state->m), errors_out: 0);
2670
2671 /* Send to ip6_output */
2672 bzero(s: &ro6_new, n: sizeof(ro6_new));
2673 bzero(s: &ip6oa, n: sizeof(ip6oa));
2674 ip6oa.ip6oa_flowadv.code = 0;
2675 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
2676 if (state->outgoing_if) {
2677 ip6oa.ip6oa_boundif = state->outgoing_if;
2678 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
2679 ip6_output_setsrcifscope(state->m, state->outgoing_if, NULL);
2680 ip6_output_setdstifscope(state->m, state->outgoing_if, NULL);
2681 }
2682
2683 adv = &ip6oa.ip6oa_flowadv;
2684 (void) ip6_output(state->m, NULL, &ro6_new, IPV6_OUTARGS, NULL, NULL, &ip6oa);
2685 state->m = NULL;
2686
2687 if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) {
2688 error = ENOBUFS;
2689 ifnet_disable_output(interface: sav->sah->ipsec_if);
2690 return error;
2691 }
2692
2693 return 0;
2694}
2695
2696int
2697ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav)
2698{
2699 struct mbuf *m;
2700 struct ip6_hdr *ip6;
2701 struct ip *oip;
2702 struct ip *ip;
2703 size_t plen;
2704 u_int32_t hlen;
2705
2706 m = state->m;
2707 if (!m) {
2708 return EINVAL;
2709 }
2710
2711 /* can't tunnel between different AFs */
2712 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2713 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2714 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2715 m_freem(m);
2716 return EINVAL;
2717 }
2718
2719 if (m->m_len < sizeof(*ip)) {
2720 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2721 return EINVAL;
2722 }
2723
2724 ip = mtod(m, struct ip *);
2725#ifdef _IP_VHL
2726 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2727#else
2728 hlen = ip->ip_hl << 2;
2729#endif
2730
2731 if (m->m_len != hlen) {
2732 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2733 return EINVAL;
2734 }
2735
2736 /* generate header checksum */
2737 ip->ip_sum = 0;
2738#ifdef _IP_VHL
2739 ip->ip_sum = in_cksum(m, hlen);
2740#else
2741 ip->ip_sum = in_cksum(m, hlen);
2742#endif
2743
2744 plen = m->m_pkthdr.len; // save original IPv4 packet len, this will be ipv6 payload len
2745
2746 /*
2747 * First move the IPv4 header to the second mbuf in the chain
2748 */
2749 if (M_LEADINGSPACE(m->m_next) < hlen) {
2750 struct mbuf *n;
2751 MGET(n, M_DONTWAIT, MT_DATA);
2752 if (!n) {
2753 m_freem(m);
2754 return ENOBUFS;
2755 }
2756 n->m_len = hlen;
2757 n->m_next = m->m_next;
2758 m->m_next = n;
2759 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2760 oip = mtod(n, struct ip *);
2761 } else {
2762 m->m_next->m_len += hlen;
2763 m->m_next->m_data -= hlen;
2764 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2765 oip = mtod(m->m_next, struct ip *);
2766 }
2767 ip = mtod(m, struct ip *);
2768 ovbcopy(from: (caddr_t)ip, to: (caddr_t)oip, len: hlen);
2769
2770 /*
2771 * Grow the first mbuf to accomodate the new IPv6 header.
2772 */
2773 if (M_LEADINGSPACE(m) < sizeof(struct ip6_hdr) - hlen) {
2774 struct mbuf *n;
2775 MGETHDR(n, M_DONTWAIT, MT_HEADER);
2776 if (!n) {
2777 m_freem(m);
2778 return ENOBUFS;
2779 }
2780 M_COPY_PKTHDR(n, m);
2781 MH_ALIGN(n, sizeof(struct ip6_hdr));
2782 n->m_len = sizeof(struct ip6_hdr);
2783 n->m_next = m->m_next;
2784 m->m_next = NULL;
2785 m_freem(m);
2786 state->m = n;
2787 m = state->m;
2788 } else {
2789 m->m_len += (sizeof(struct ip6_hdr) - hlen);
2790 m->m_data -= (sizeof(struct ip6_hdr) - hlen);
2791 }
2792 ip6 = mtod(m, struct ip6_hdr *);
2793 ip6->ip6_flow = 0;
2794 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2795 ip6->ip6_vfc |= IPV6_VERSION;
2796
2797 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2798 /* ECN consideration. */
2799 if (state->dscp_mapping == IPSEC_DSCP_MAPPING_COPY) {
2800 // Copy DSCP bits from inner IP to outer IP packet.
2801 ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip->ip_tos);
2802 } else if (state->dscp_mapping == IPSEC_DSCP_MAPPING_LEGACY) {
2803 // Copy DSCP bits in legacy style.
2804 ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos);
2805 }
2806 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
2807 ip6->ip6_plen = htons((u_int16_t)plen);
2808 } else {
2809 /* ip6->ip6_plen will be updated in ip6_output() */
2810 }
2811
2812 ip6->ip6_nxt = IPPROTO_IPV4;
2813 ip6->ip6_hlim = IPV6_DEFHLIM;
2814
2815 bcopy(src: &((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2816 dst: &ip6->ip6_src, n: sizeof(ip6->ip6_src));
2817 bcopy(src: &((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2818 dst: &ip6->ip6_dst, n: sizeof(ip6->ip6_dst));
2819
2820 if (in6_embedded_scope && IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
2821 ip6->ip6_src.s6_addr16[1] = htons((u_int16_t)sav->sah->outgoing_if);
2822 ip6->ip6_dst.s6_addr16[1] = htons((u_int16_t)sav->sah->outgoing_if);
2823 }
2824
2825 return 0;
2826}
2827
2828/*
2829 * Check the variable replay window.
2830 * ipsec_chkreplay() performs replay check before ICV verification.
2831 * ipsec_updatereplay() updates replay bitmap. This must be called after
2832 * ICV verification (it also performs replay check, which is usually done
2833 * beforehand).
2834 * 0 (zero) is returned if packet disallowed, 1 if packet permitted.
2835 *
2836 * based on RFC 2401.
2837 */
2838int
2839ipsec_chkreplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index)
2840{
2841 const struct secreplay *replay;
2842 u_int32_t diff;
2843 size_t fr;
2844 size_t wsizeb; /* constant: bits of window size */
2845 size_t frlast; /* constant: last frame */
2846
2847
2848 /* sanity check */
2849 if (sav == NULL) {
2850 panic("ipsec_chkreplay: NULL pointer was passed.");
2851 }
2852
2853 lck_mtx_lock(sadb_mutex);
2854 replay = sav->replay[replay_index];
2855
2856 if (replay->wsize == 0) {
2857 lck_mtx_unlock(sadb_mutex);
2858 return 1; /* no need to check replay. */
2859 }
2860
2861 /* constant */
2862 frlast = replay->wsize - 1;
2863 wsizeb = replay->wsize << 3;
2864
2865 /* sequence number of 0 is invalid */
2866 if (seq == 0) {
2867 lck_mtx_unlock(sadb_mutex);
2868 return 0;
2869 }
2870
2871 /* first time is always okay */
2872 if (replay->count == 0) {
2873 lck_mtx_unlock(sadb_mutex);
2874 return 1;
2875 }
2876
2877 if (seq > replay->lastseq) {
2878 /* larger sequences are okay */
2879 lck_mtx_unlock(sadb_mutex);
2880 return 1;
2881 } else {
2882 /* seq is equal or less than lastseq. */
2883 diff = replay->lastseq - seq;
2884
2885 /* over range to check, i.e. too old or wrapped */
2886 if (diff >= wsizeb) {
2887 lck_mtx_unlock(sadb_mutex);
2888 return 0;
2889 }
2890
2891 fr = frlast - diff / 8;
2892
2893 /* this packet already seen ? */
2894 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2895 lck_mtx_unlock(sadb_mutex);
2896 return 0;
2897 }
2898
2899 /* out of order but good */
2900 lck_mtx_unlock(sadb_mutex);
2901 return 1;
2902 }
2903}
2904
2905/*
2906 * check replay counter whether to update or not.
2907 * OUT: 0: OK
2908 * 1: NG
2909 */
2910int
2911ipsec_updatereplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index)
2912{
2913 struct secreplay *replay;
2914 u_int32_t diff;
2915 size_t fr;
2916 size_t wsizeb; /* constant: bits of window size */
2917 size_t frlast; /* constant: last frame */
2918
2919 /* sanity check */
2920 if (sav == NULL) {
2921 panic("ipsec_chkreplay: NULL pointer was passed.");
2922 }
2923
2924 lck_mtx_lock(sadb_mutex);
2925 replay = sav->replay[replay_index];
2926
2927 if (replay->wsize == 0) {
2928 goto ok; /* no need to check replay. */
2929 }
2930 /* constant */
2931 frlast = replay->wsize - 1;
2932 wsizeb = replay->wsize << 3;
2933
2934 /* sequence number of 0 is invalid */
2935 if (seq == 0) {
2936 lck_mtx_unlock(sadb_mutex);
2937 return 1;
2938 }
2939
2940 /* first time */
2941 if (replay->count == 0) {
2942 replay->lastseq = seq;
2943 bzero(s: replay->bitmap, n: replay->wsize);
2944 (replay->bitmap)[frlast] = 1;
2945 goto ok;
2946 }
2947
2948 if (seq > replay->lastseq) {
2949 /* seq is larger than lastseq. */
2950 diff = seq - replay->lastseq;
2951
2952 /* new larger sequence number */
2953 if (diff < wsizeb) {
2954 /* In window */
2955 /* set bit for this packet */
2956 vshiftl((unsigned char *) replay->bitmap, diff, replay->wsize);
2957 (replay->bitmap)[frlast] |= 1;
2958 } else {
2959 /* this packet has a "way larger" */
2960 bzero(s: replay->bitmap, n: replay->wsize);
2961 (replay->bitmap)[frlast] = 1;
2962 }
2963 replay->lastseq = seq;
2964
2965 /* larger is good */
2966 } else {
2967 /* seq is equal or less than lastseq. */
2968 diff = replay->lastseq - seq;
2969
2970 /* over range to check, i.e. too old or wrapped */
2971 if (diff >= wsizeb) {
2972 lck_mtx_unlock(sadb_mutex);
2973 return 1;
2974 }
2975
2976 fr = frlast - diff / 8;
2977
2978 /* this packet already seen ? */
2979 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2980 lck_mtx_unlock(sadb_mutex);
2981 return 1;
2982 }
2983
2984 /* mark as seen */
2985 (replay->bitmap)[fr] |= (1 << (diff % 8));
2986
2987 /* out of order but good */
2988 }
2989
2990ok:
2991 {
2992 u_int32_t max_count = ~0;
2993 if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
2994 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
2995 max_count = PER_TC_REPLAY_WINDOW_RANGE;
2996 }
2997
2998 if (replay->count == max_count) {
2999 /* set overflow flag */
3000 replay->overflow++;
3001
3002 /* don't increment, no more packets accepted */
3003 if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
3004 lck_mtx_unlock(sadb_mutex);
3005 return 1;
3006 }
3007
3008 ipseclog((LOG_WARNING, "replay counter made %d cycle. %s\n",
3009 replay->overflow, ipsec_logsastr(sav)));
3010 }
3011 }
3012
3013 replay->count++;
3014
3015 lck_mtx_unlock(sadb_mutex);
3016 return 0;
3017}
3018
3019/*
3020 * shift variable length buffer to left.
3021 * IN: bitmap: pointer to the buffer
3022 * nbit: the number of to shift.
3023 * wsize: buffer size (bytes).
3024 */
3025static void
3026vshiftl(unsigned char *bitmap, int nbit, size_t wsize)
3027{
3028 size_t i;
3029 int s, j;
3030 unsigned char over;
3031
3032 for (j = 0; j < nbit; j += 8) {
3033 s = (nbit - j < 8) ? (nbit - j): 8;
3034 bitmap[0] <<= s;
3035 for (i = 1; i < wsize; i++) {
3036 over = (bitmap[i] >> (8 - s));
3037 bitmap[i] <<= s;
3038 bitmap[i - 1] |= over;
3039 }
3040 }
3041
3042 return;
3043}
3044
3045const char *
3046ipsec4_logpacketstr(struct ip *ip, u_int32_t spi)
3047{
3048 static char buf[256] __attribute__((aligned(4)));
3049 char *p;
3050 u_int8_t *s, *d;
3051
3052 s = (u_int8_t *)(&ip->ip_src);
3053 d = (u_int8_t *)(&ip->ip_dst);
3054
3055 p = buf;
3056 snprintf(buf, count: sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3057 while (p && *p) {
3058 p++;
3059 }
3060 snprintf(p, count: sizeof(buf) - (p - buf), "src=%u.%u.%u.%u",
3061 s[0], s[1], s[2], s[3]);
3062 while (p && *p) {
3063 p++;
3064 }
3065 snprintf(p, count: sizeof(buf) - (p - buf), " dst=%u.%u.%u.%u",
3066 d[0], d[1], d[2], d[3]);
3067 while (p && *p) {
3068 p++;
3069 }
3070 snprintf(p, count: sizeof(buf) - (p - buf), ")");
3071
3072 return buf;
3073}
3074
3075const char *
3076ipsec6_logpacketstr(struct ip6_hdr *ip6, u_int32_t spi)
3077{
3078 static char buf[256] __attribute__((aligned(4)));
3079 char *p;
3080
3081 p = buf;
3082 snprintf(buf, count: sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3083 while (p && *p) {
3084 p++;
3085 }
3086 snprintf(p, count: sizeof(buf) - (p - buf), "src=%s",
3087 ip6_sprintf(&ip6->ip6_src));
3088 while (p && *p) {
3089 p++;
3090 }
3091 snprintf(p, count: sizeof(buf) - (p - buf), " dst=%s",
3092 ip6_sprintf(&ip6->ip6_dst));
3093 while (p && *p) {
3094 p++;
3095 }
3096 snprintf(p, count: sizeof(buf) - (p - buf), ")");
3097
3098 return buf;
3099}
3100
3101const char *
3102ipsec_logsastr(struct secasvar *sav)
3103{
3104 static char buf[256] __attribute__((aligned(4)));
3105 char *p;
3106 struct secasindex *saidx = &sav->sah->saidx;
3107
3108 /* validity check */
3109 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
3110 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family) {
3111 panic("ipsec_logsastr: family mismatched.");
3112 }
3113
3114 p = buf;
3115 snprintf(buf, count: sizeof(buf), "SA(SPI=%u ", (u_int32_t)ntohl(sav->spi));
3116 while (p && *p) {
3117 p++;
3118 }
3119 if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET) {
3120 u_int8_t *s, *d;
3121 s = (u_int8_t *)&((struct sockaddr_in *)&saidx->src)->sin_addr;
3122 d = (u_int8_t *)&((struct sockaddr_in *)&saidx->dst)->sin_addr;
3123 snprintf(p, count: sizeof(buf) - (p - buf),
3124 "src=%d.%d.%d.%d dst=%d.%d.%d.%d",
3125 s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]);
3126 } else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) {
3127 snprintf(p, count: sizeof(buf) - (p - buf),
3128 "src=%s",
3129 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr));
3130 while (p && *p) {
3131 p++;
3132 }
3133 snprintf(p, count: sizeof(buf) - (p - buf),
3134 " dst=%s",
3135 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr));
3136 }
3137 while (p && *p) {
3138 p++;
3139 }
3140 snprintf(p, count: sizeof(buf) - (p - buf), ")");
3141
3142 return buf;
3143}
3144
3145void
3146ipsec_dumpmbuf(struct mbuf *m)
3147{
3148 int totlen;
3149 int i;
3150 u_char *p;
3151
3152 totlen = 0;
3153 printf("---\n");
3154 while (m) {
3155 p = mtod(m, u_char *);
3156 for (i = 0; i < m->m_len; i++) {
3157 printf("%02x ", p[i]);
3158 totlen++;
3159 if (totlen % 16 == 0) {
3160 printf("\n");
3161 }
3162 }
3163 m = m->m_next;
3164 }
3165 if (totlen % 16 != 0) {
3166 printf("\n");
3167 }
3168 printf("---\n");
3169}
3170
3171#if INET
3172/*
3173 * IPsec output logic for IPv4.
3174 */
3175static int
3176ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav)
3177{
3178 struct ip *ip = NULL;
3179 int error = 0;
3180 struct sockaddr_in *dst4;
3181 struct route *ro4;
3182
3183 /* validity check */
3184 if (sav == NULL || sav->sah == NULL) {
3185 error = EINVAL;
3186 goto bad;
3187 }
3188
3189 /*
3190 * If there is no valid SA, we give up to process any
3191 * more. In such a case, the SA's status is changed
3192 * from DYING to DEAD after allocating. If a packet
3193 * send to the receiver by dead SA, the receiver can
3194 * not decode a packet because SA has been dead.
3195 */
3196 if (sav->state != SADB_SASTATE_MATURE
3197 && sav->state != SADB_SASTATE_DYING) {
3198 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3199 error = EINVAL;
3200 goto bad;
3201 }
3202
3203 state->outgoing_if = sav->sah->outgoing_if;
3204
3205 /*
3206 * There may be the case that SA status will be changed when
3207 * we are refering to one. So calling splsoftnet().
3208 */
3209
3210 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3211 /*
3212 * build IPsec tunnel.
3213 */
3214 state->m = ipsec4_splithdr(state->m);
3215 if (!state->m) {
3216 error = ENOMEM;
3217 goto bad;
3218 }
3219
3220 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3221 error = ipsec46_encapsulate(state, sav);
3222 if (error) {
3223 // packet already freed by encapsulation error handling
3224 state->m = NULL;
3225 return error;
3226 }
3227
3228 error = ipsec6_update_routecache_and_output(state, sav);
3229 return error;
3230 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3231 error = ipsec4_encapsulate(m: state->m, sav);
3232 if (error) {
3233 state->m = NULL;
3234 goto bad;
3235 }
3236 ip = mtod(state->m, struct ip *);
3237
3238 // grab sadb_mutex, before updating sah's route cache
3239 lck_mtx_lock(sadb_mutex);
3240 ro4 = (struct route *)&sav->sah->sa_route;
3241 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3242 if (ro4->ro_rt != NULL) {
3243 RT_LOCK(ro4->ro_rt);
3244 }
3245 if (ROUTE_UNUSABLE(ro4) ||
3246 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3247 if (ro4->ro_rt != NULL) {
3248 RT_UNLOCK(ro4->ro_rt);
3249 }
3250 ROUTE_RELEASE(ro4);
3251 }
3252 if (ro4->ro_rt == 0) {
3253 dst4->sin_family = AF_INET;
3254 dst4->sin_len = sizeof(*dst4);
3255 dst4->sin_addr = ip->ip_dst;
3256 rtalloc_scoped(ro4, sav->sah->outgoing_if);
3257 if (ro4->ro_rt == 0) {
3258 OSAddAtomic(1, &ipstat.ips_noroute);
3259 error = EHOSTUNREACH;
3260 // release sadb_mutex, after updating sah's route cache
3261 lck_mtx_unlock(sadb_mutex);
3262 goto bad;
3263 }
3264 RT_LOCK(ro4->ro_rt);
3265 }
3266
3267 /*
3268 * adjust state->dst if tunnel endpoint is offlink
3269 *
3270 * XXX: caching rt_gateway value in the state is
3271 * not really good, since it may point elsewhere
3272 * when the gateway gets modified to a larger
3273 * sockaddr via rt_setgate(). This is currently
3274 * addressed by SA_SIZE roundup in that routine.
3275 */
3276 if (ro4->ro_rt->rt_flags & RTF_GATEWAY) {
3277 dst4 = (struct sockaddr_in *)(void *)ro4->ro_rt->rt_gateway;
3278 }
3279 RT_UNLOCK(ro4->ro_rt);
3280 ROUTE_RELEASE(&state->ro);
3281 route_copyout((struct route *)&state->ro, ro4, sizeof(struct route));
3282 state->dst = (struct sockaddr *)dst4;
3283 state->tunneled = 4;
3284 // release sadb_mutex, after updating sah's route cache
3285 lck_mtx_unlock(sadb_mutex);
3286 } else {
3287 ipseclog((LOG_ERR, "%s: family mismatched between inner and outer spi=%u\n",
3288 __FUNCTION__, (u_int32_t)ntohl(sav->spi)));
3289 error = EAFNOSUPPORT;
3290 goto bad;
3291 }
3292 }
3293
3294 state->m = ipsec4_splithdr(state->m);
3295 if (!state->m) {
3296 error = ENOMEM;
3297 goto bad;
3298 }
3299 switch (sav->sah->saidx.proto) {
3300 case IPPROTO_ESP:
3301#if IPSEC_ESP
3302 if ((error = esp4_output(state->m, sav)) != 0) {
3303 state->m = NULL;
3304 goto bad;
3305 }
3306 break;
3307#else
3308 m_freem(state->m);
3309 state->m = NULL;
3310 error = EINVAL;
3311 goto bad;
3312#endif
3313 case IPPROTO_AH:
3314 if ((error = ah4_output(state->m, sav)) != 0) {
3315 state->m = NULL;
3316 goto bad;
3317 }
3318 break;
3319 default:
3320 ipseclog((LOG_ERR,
3321 "ipsec4_output: unknown ipsec protocol %d\n",
3322 sav->sah->saidx.proto));
3323 m_freem(state->m);
3324 state->m = NULL;
3325 error = EPROTONOSUPPORT;
3326 goto bad;
3327 }
3328
3329 if (state->m == 0) {
3330 error = ENOMEM;
3331 goto bad;
3332 }
3333
3334#if SKYWALK
3335 state->m->m_pkthdr.pkt_flowid = sav->flowid;
3336 state->m->m_pkthdr.pkt_flags |= PKTF_FLOW_ID;
3337#endif /* !SKYWALK */
3338
3339 return 0;
3340
3341bad:
3342 return error;
3343}
3344
3345int
3346ipsec4_interface_kpipe_output(ifnet_t interface, kern_packet_t sph,
3347 kern_packet_t dph)
3348{
3349 struct sockaddr_in src = {};
3350 struct sockaddr_in dst = {};
3351 struct secasvar *sav = NULL;
3352 uint8_t *sbaddr = NULL;
3353 uint8_t *dbaddr = NULL;
3354 size_t hlen = 0;
3355 uint32_t slen = 0;
3356 uint32_t dlim = 0, doff = 0, dlen = 0;
3357 int err = 0;
3358
3359 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3360
3361 MD_BUFLET_ADDR(SK_PTR_ADDR_KPKT(sph), sbaddr);
3362 kern_buflet_t sbuf = __packet_get_next_buflet(ph: sph, NULL);
3363 VERIFY(sbuf != NULL);
3364 slen = __buflet_get_data_length(buf: sbuf);
3365
3366 if (__improbable(slen < sizeof(struct ip))) {
3367 os_log_info(OS_LOG_DEFAULT, "ipsec4 interface kpipe output: "
3368 "source buffer shorter than ip header, %u\n", slen);
3369 err = EINVAL;
3370 goto bad;
3371 }
3372
3373 struct ip *ip = (struct ip *)(void *)sbaddr;
3374 ASSERT(IP_HDR_ALIGNED_P(ip));
3375
3376 /* Find security association matching source and destination address */
3377 src.sin_family = AF_INET;
3378 src.sin_len = sizeof(src);
3379 src.sin_addr.s_addr = ip->ip_src.s_addr;
3380
3381 dst.sin_family = AF_INET;
3382 dst.sin_len = sizeof(dst);
3383 dst.sin_addr.s_addr = ip->ip_dst.s_addr;
3384
3385 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET,
3386 src: (struct sockaddr *)&src, dst: (struct sockaddr *)&dst);
3387 if (__improbable(sav == NULL)) {
3388 os_log_info(OS_LOG_DEFAULT, "ipsec4 interface kpipe output: "
3389 "failed to find outbound sav\n");
3390 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3391 err = ENOENT;
3392 goto bad;
3393 }
3394
3395 if (__improbable(sav->sah == NULL)) {
3396 os_log_info(OS_LOG_DEFAULT, "ipsec4 interface kpipe output: "
3397 "sah is NULL\n");
3398 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3399 err = ENOENT;
3400 goto bad;
3401 }
3402
3403 if (__improbable(sav->sah->saidx.mode != IPSEC_MODE_TRANSPORT)) {
3404 os_log_info(OS_LOG_DEFAULT, "ipsec tunnel mode not supported "
3405 "in kpipe mode, SPI=%x\n", ntohl(sav->spi));
3406 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3407 err = EINVAL;
3408 goto bad;
3409 }
3410 if (__improbable((sav->flags & (SADB_X_EXT_OLD | SADB_X_EXT_DERIV |
3411 SADB_X_EXT_NATT | SADB_X_EXT_NATT_MULTIPLEUSERS |
3412 SADB_X_EXT_CYCSEQ | SADB_X_EXT_PMASK)) != 0)) {
3413 os_log_info(OS_LOG_DEFAULT, "sadb flag %x not supported in "
3414 "kpipe mode, SPI=%x\n", sav->flags, ntohl(sav->spi));
3415 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3416 err = EINVAL;
3417 goto bad;
3418 }
3419
3420 /*
3421 * If there is no valid SA, we give up to process any
3422 * more. In such a case, the SA's status is changed
3423 * from DYING to DEAD after allocating. If a packet
3424 * send to the receiver by dead SA, the receiver can
3425 * not decode a packet because SA has been dead.
3426 */
3427 if (__improbable(sav->state != SADB_SASTATE_MATURE
3428 && sav->state != SADB_SASTATE_DYING)) {
3429 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3430 err = EINVAL;
3431 goto bad;
3432 }
3433
3434#ifdef _IP_VHL
3435 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
3436#else
3437 hlen = ip->ip_hl << 2;
3438#endif
3439 /* Copy the IP header from source packet to destination packet */
3440 MD_BUFLET_ADDR(SK_PTR_ADDR_KPKT(dph), dbaddr);
3441 kern_buflet_t dbuf = __packet_get_next_buflet(ph: dph, NULL);
3442 doff = __buflet_get_data_offset(buf: dbuf);
3443 VERIFY(doff == 0);
3444 dlen = __buflet_get_data_length(buf: dbuf);
3445 VERIFY(dlen == 0);
3446
3447 dlim = __buflet_get_data_limit(buf: dbuf);
3448 if (__improbable(dlim < hlen)) {
3449 os_log_info(OS_LOG_DEFAULT, "ipsec4 interface kpipe output: "
3450 "buflet size shorter than hlen %u, SPI=%x\n", dlim, ntohl(sav->spi));
3451 err = EMSGSIZE;
3452 goto bad;
3453 }
3454
3455 VERIFY(hlen <= UINT16_MAX);
3456 memcpy(dst: dbaddr, src: sbaddr, n: hlen);
3457 __buflet_set_data_length(buf: dbuf, dlen: (uint16_t)hlen);
3458
3459 switch (sav->sah->saidx.proto) {
3460 case IPPROTO_ESP: {
3461 if (__improbable((err = esp_kpipe_output(sav, sph, dph)) != 0)) {
3462 goto bad;
3463 }
3464 break;
3465 }
3466 case IPPROTO_AH: {
3467 os_log_info(OS_LOG_DEFAULT, "AH not supported in kpipe mode\n");
3468 err = EPROTONOSUPPORT;
3469 goto bad;
3470 }
3471 default: {
3472 os_log_info(OS_LOG_DEFAULT, "unknown ipsec protocol %d\n",
3473 sav->sah->saidx.proto);
3474 err = EPROTONOSUPPORT;
3475 goto bad;
3476 }
3477 }
3478
3479 key_freesav(sav, KEY_SADB_UNLOCKED);
3480 return 0;
3481bad:
3482 if (sav != NULL) {
3483 key_freesav(sav, KEY_SADB_UNLOCKED);
3484 sav = NULL;
3485 }
3486
3487 return err;
3488}
3489
3490int
3491ipsec6_interface_kpipe_output(ifnet_t interface, kern_packet_t sph,
3492 kern_packet_t dph)
3493{
3494 struct sockaddr_in6 src = {};
3495 struct sockaddr_in6 dst = {};
3496 struct secasvar *sav = NULL;
3497 uint8_t *sbaddr = NULL;
3498 uint8_t *dbaddr = NULL;
3499 uint32_t slen = 0;
3500 uint32_t dlim = 0, doff = 0, dlen = 0;
3501 int err = 0;
3502
3503 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3504
3505 MD_BUFLET_ADDR(SK_PTR_ADDR_KPKT(sph), sbaddr);
3506 kern_buflet_t sbuf = __packet_get_next_buflet(ph: sph, NULL);
3507 VERIFY(sbuf != NULL);
3508 slen = __buflet_get_data_length(buf: sbuf);
3509
3510 if (__improbable(slen < sizeof(struct ip6_hdr))) {
3511 os_log_info(OS_LOG_DEFAULT, "ipsec6 interface kpipe output: "
3512 "source buffer shorter than ipv6 header, %u\n", slen);
3513 err = EINVAL;
3514 goto bad;
3515 }
3516
3517 struct ip6_hdr *ip6 = (struct ip6_hdr *)sbaddr;
3518
3519 /* Find security association matching source and destination address */
3520 src.sin6_family = AF_INET6;
3521 src.sin6_len = sizeof(src);
3522 memcpy(dst: &src.sin6_addr, src: &ip6->ip6_src, n: sizeof(src.sin6_addr));
3523
3524 dst.sin6_family = AF_INET6;
3525 dst.sin6_len = sizeof(dst);
3526 memcpy(dst: &dst.sin6_addr, src: &ip6->ip6_dst, n: sizeof(dst.sin6_addr));
3527
3528 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET6,
3529 src: (struct sockaddr *)&src, dst: (struct sockaddr *)&dst);
3530 if (__improbable(sav == NULL)) {
3531 os_log_info(OS_LOG_DEFAULT, "ipsec6 interface kpipe output: "
3532 "failed to find outbound sav\n");
3533 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3534 err = ENOENT;
3535 goto bad;
3536 }
3537
3538 if (__improbable(sav->sah == NULL)) {
3539 os_log_info(OS_LOG_DEFAULT, "ipsec6 interface kpipe output: "
3540 "sah is NULL\n");
3541 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3542 err = ENOENT;
3543 goto bad;
3544 }
3545
3546 if (__improbable(sav->sah->saidx.mode != IPSEC_MODE_TRANSPORT)) {
3547 os_log_info(OS_LOG_DEFAULT, "ipsec tunnel mode not supported "
3548 "in kpipe mode, SPI=%x\n", ntohl(sav->spi));
3549 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3550 err = EINVAL;
3551 goto bad;
3552 }
3553 if (__improbable((sav->flags & (SADB_X_EXT_OLD | SADB_X_EXT_DERIV |
3554 SADB_X_EXT_NATT | SADB_X_EXT_NATT_MULTIPLEUSERS |
3555 SADB_X_EXT_CYCSEQ | SADB_X_EXT_PMASK)) != 0)) {
3556 os_log_info(OS_LOG_DEFAULT, "sadb flag %x not supported in "
3557 "kpipe mode, SPI=%x\n", sav->flags, ntohl(sav->spi));
3558 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3559 err = EINVAL;
3560 goto bad;
3561 }
3562
3563 /*
3564 * If there is no valid SA, we give up to process any
3565 * more. In such a case, the SA's status is changed
3566 * from DYING to DEAD after allocating. If a packet
3567 * send to the receiver by dead SA, the receiver can
3568 * not decode a packet because SA has been dead.
3569 */
3570 if (__improbable(sav->state != SADB_SASTATE_MATURE
3571 && sav->state != SADB_SASTATE_DYING)) {
3572 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3573 err = EINVAL;
3574 goto bad;
3575 }
3576
3577 /* Copy the IPv6 header from source packet to destination packet */
3578 MD_BUFLET_ADDR(SK_PTR_ADDR_KPKT(dph), dbaddr);
3579 kern_buflet_t dbuf = __packet_get_next_buflet(ph: dph, NULL);
3580 doff = __buflet_get_data_offset(buf: dbuf);
3581 VERIFY(doff == 0);
3582 dlen = __buflet_get_data_length(buf: dbuf);
3583 VERIFY(dlen == 0);
3584
3585 dlim = __buflet_get_data_limit(buf: dbuf);
3586 if (__improbable(dlim < sizeof(struct ip6_hdr))) {
3587 os_log_info(OS_LOG_DEFAULT, "ipsec6 interface kpipe output"
3588 "buflet size shorter than hlen %u, SPI=%x\n", dlim, ntohl(sav->spi));
3589 err = EMSGSIZE;
3590 goto bad;
3591 }
3592
3593 memcpy(dst: dbaddr, src: sbaddr, n: sizeof(struct ip6_hdr));
3594 __buflet_set_data_length(buf: dbuf, dlen: sizeof(struct ip6_hdr));
3595
3596 switch (sav->sah->saidx.proto) {
3597 case IPPROTO_ESP: {
3598 if (__improbable((err = esp_kpipe_output(sav, sph, dph)) != 0)) {
3599 goto bad;
3600 }
3601 break;
3602 }
3603 case IPPROTO_AH: {
3604 os_log_info(OS_LOG_DEFAULT, "AH not supported in kpipe mode\n");
3605 err = EPROTONOSUPPORT;
3606 goto bad;
3607 }
3608 default: {
3609 os_log_info(OS_LOG_DEFAULT, "unknown ipsec protocol %d\n",
3610 sav->sah->saidx.proto);
3611 err = EPROTONOSUPPORT;
3612 goto bad;
3613 }
3614 }
3615
3616 key_freesav(sav, KEY_SADB_UNLOCKED);
3617 return 0;
3618bad:
3619 if (sav != NULL) {
3620 key_freesav(sav, KEY_SADB_UNLOCKED);
3621 sav = NULL;
3622 }
3623
3624 return err;
3625}
3626
3627int
3628ipsec4_interface_output(struct ipsec_output_state *state, ifnet_t interface)
3629{
3630 int error = 0;
3631 struct secasvar *sav = NULL;
3632
3633 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3634
3635 if (state == NULL) {
3636 panic("state == NULL in ipsec4_output");
3637 }
3638 if (state->m == NULL) {
3639 panic("state->m == NULL in ipsec4_output");
3640 }
3641 if (state->dst == NULL) {
3642 panic("state->dst == NULL in ipsec4_output");
3643 }
3644
3645 struct ip *ip = mtod(state->m, struct ip *);
3646
3647 struct sockaddr_in src = {};
3648 src.sin_family = AF_INET;
3649 src.sin_len = sizeof(src);
3650 memcpy(dst: &src.sin_addr, src: &ip->ip_src, n: sizeof(src.sin_addr));
3651
3652 struct sockaddr_in dst = {};
3653 dst.sin_family = AF_INET;
3654 dst.sin_len = sizeof(dst);
3655 memcpy(dst: &dst.sin_addr, src: &ip->ip_dst, n: sizeof(dst.sin_addr));
3656
3657 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET,
3658 src: (struct sockaddr *)&src,
3659 dst: (struct sockaddr *)&dst);
3660 if (sav == NULL) {
3661 goto bad;
3662 }
3663
3664 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3665 goto bad;
3666 }
3667
3668 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3669 if (sav) {
3670 key_freesav(sav, KEY_SADB_UNLOCKED);
3671 }
3672 return 0;
3673
3674bad:
3675 if (sav) {
3676 key_freesav(sav, KEY_SADB_UNLOCKED);
3677 }
3678 m_freem(state->m);
3679 state->m = NULL;
3680 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0);
3681 return error;
3682}
3683
3684int
3685ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused int flags)
3686{
3687 struct ip *ip = NULL;
3688 struct ipsecrequest *isr = NULL;
3689 struct secasindex saidx;
3690 struct secasvar *sav = NULL;
3691 int error = 0;
3692 struct sockaddr_in *sin;
3693
3694 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3695
3696 if (!state) {
3697 panic("state == NULL in ipsec4_output");
3698 }
3699 if (!state->m) {
3700 panic("state->m == NULL in ipsec4_output");
3701 }
3702 if (!state->dst) {
3703 panic("state->dst == NULL in ipsec4_output");
3704 }
3705
3706 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
3707
3708 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3709 printf("ipsec4_output: applied SP\n");
3710 kdebug_secpolicy(sp));
3711
3712 for (isr = sp->req; isr != NULL; isr = isr->next) {
3713 /* make SA index for search proper SA */
3714 ip = mtod(state->m, struct ip *);
3715 bcopy(src: &isr->saidx, dst: &saidx, n: sizeof(saidx));
3716 saidx.mode = isr->saidx.mode;
3717 saidx.reqid = isr->saidx.reqid;
3718 sin = (struct sockaddr_in *)&saidx.src;
3719 if (sin->sin_len == 0) {
3720 sin->sin_len = sizeof(*sin);
3721 sin->sin_family = AF_INET;
3722 sin->sin_port = IPSEC_PORT_ANY;
3723 bcopy(src: &ip->ip_src, dst: &sin->sin_addr,
3724 n: sizeof(sin->sin_addr));
3725 }
3726 sin = (struct sockaddr_in *)&saidx.dst;
3727 if (sin->sin_len == 0) {
3728 sin->sin_len = sizeof(*sin);
3729 sin->sin_family = AF_INET;
3730 sin->sin_port = IPSEC_PORT_ANY;
3731 /*
3732 * Get port from packet if upper layer is UDP and nat traversal
3733 * is enabled and transport mode.
3734 */
3735
3736 if ((esp_udp_encap_port & 0xFFFF) != 0 &&
3737 isr->saidx.mode == IPSEC_MODE_TRANSPORT) {
3738 if (ip->ip_p == IPPROTO_UDP) {
3739 struct udphdr *udp;
3740 u_int32_t hlen;
3741#ifdef _IP_VHL
3742 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
3743#else
3744 hlen = ip->ip_hl << 2;
3745#endif
3746 if (state->m->m_len < hlen + sizeof(struct udphdr)) {
3747 state->m = m_pullup(state->m, hlen + sizeof(struct udphdr));
3748 if (!state->m) {
3749 ipseclog((LOG_DEBUG, "IPv4 output: can't pullup UDP header\n"));
3750 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
3751 goto bad;
3752 }
3753 ip = mtod(state->m, struct ip *);
3754 }
3755 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + hlen);
3756 sin->sin_port = udp->uh_dport;
3757 }
3758 }
3759
3760 bcopy(src: &ip->ip_dst, dst: &sin->sin_addr,
3761 n: sizeof(sin->sin_addr));
3762 }
3763
3764 if ((error = key_checkrequest(isr, &saidx, sav: &sav)) != 0) {
3765 /*
3766 * IPsec processing is required, but no SA found.
3767 * I assume that key_acquire() had been called
3768 * to get/establish the SA. Here I discard
3769 * this packet because it is responsibility for
3770 * upper layer to retransmit the packet.
3771 */
3772 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3773 goto bad;
3774 }
3775
3776 /* validity check */
3777 if (sav == NULL) {
3778 switch (ipsec_get_reqlevel(isr)) {
3779 case IPSEC_LEVEL_USE:
3780 continue;
3781 case IPSEC_LEVEL_REQUIRE:
3782 /* must be not reached here. */
3783 panic("ipsec4_output: no SA found, but required.");
3784 }
3785 }
3786
3787 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3788 goto bad;
3789 }
3790 }
3791
3792 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3793 if (sav) {
3794 key_freesav(sav, KEY_SADB_UNLOCKED);
3795 }
3796 return 0;
3797
3798bad:
3799 if (sav) {
3800 key_freesav(sav, KEY_SADB_UNLOCKED);
3801 }
3802 m_freem(state->m);
3803 state->m = NULL;
3804 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0);
3805 return error;
3806}
3807
3808#endif
3809
3810/*
3811 * IPsec output logic for IPv6, transport mode.
3812 */
3813static int
3814ipsec6_output_trans_internal(
3815 struct ipsec_output_state *state,
3816 struct secasvar *sav,
3817 u_char *nexthdrp,
3818 struct mbuf *mprev)
3819{
3820 struct ip6_hdr *ip6;
3821 size_t plen;
3822 int error = 0;
3823
3824 /* validity check */
3825 if (sav == NULL || sav->sah == NULL) {
3826 error = EINVAL;
3827 goto bad;
3828 }
3829
3830 /*
3831 * If there is no valid SA, we give up to process.
3832 * see same place at ipsec4_output().
3833 */
3834 if (sav->state != SADB_SASTATE_MATURE
3835 && sav->state != SADB_SASTATE_DYING) {
3836 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3837 error = EINVAL;
3838 goto bad;
3839 }
3840
3841 state->outgoing_if = sav->sah->outgoing_if;
3842
3843 switch (sav->sah->saidx.proto) {
3844 case IPPROTO_ESP:
3845#if IPSEC_ESP
3846 error = esp6_output(state->m, nexthdrp, mprev->m_next, sav);
3847#else
3848 m_freem(state->m);
3849 error = EINVAL;
3850#endif
3851 break;
3852 case IPPROTO_AH:
3853 error = ah6_output(state->m, nexthdrp, mprev->m_next, sav);
3854 break;
3855 default:
3856 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3857 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3858 m_freem(state->m);
3859 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3860 error = EPROTONOSUPPORT;
3861 break;
3862 }
3863 if (error) {
3864 state->m = NULL;
3865 goto bad;
3866 }
3867 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3868 if (plen > IPV6_MAXPACKET) {
3869 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3870 "IPsec with IPv6 jumbogram is not supported\n"));
3871 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3872 error = EINVAL; /*XXX*/
3873 goto bad;
3874 }
3875 ip6 = mtod(state->m, struct ip6_hdr *);
3876 ip6->ip6_plen = htons((u_int16_t)plen);
3877
3878#if SKYWALK
3879 ASSERT(state->m != NULL);
3880 state->m->m_pkthdr.pkt_flowid = sav->flowid;
3881 state->m->m_pkthdr.pkt_flags |= PKTF_FLOW_ID;
3882#endif /* !SKYWALK */
3883 return 0;
3884bad:
3885 return error;
3886}
3887
3888int
3889ipsec6_output_trans(
3890 struct ipsec_output_state *state,
3891 u_char *nexthdrp,
3892 struct mbuf *mprev,
3893 struct secpolicy *sp,
3894 __unused int flags,
3895 int *tun)
3896{
3897 struct ip6_hdr *ip6;
3898 struct ipsecrequest *isr = NULL;
3899 struct secasindex saidx;
3900 int error = 0;
3901 struct sockaddr_in6 *sin6;
3902 struct secasvar *sav = NULL;
3903
3904 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3905
3906 if (!state) {
3907 panic("state == NULL in ipsec6_output_trans");
3908 }
3909 if (!state->m) {
3910 panic("state->m == NULL in ipsec6_output_trans");
3911 }
3912 if (!nexthdrp) {
3913 panic("nexthdrp == NULL in ipsec6_output_trans");
3914 }
3915 if (!mprev) {
3916 panic("mprev == NULL in ipsec6_output_trans");
3917 }
3918 if (!sp) {
3919 panic("sp == NULL in ipsec6_output_trans");
3920 }
3921 if (!tun) {
3922 panic("tun == NULL in ipsec6_output_trans");
3923 }
3924
3925 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3926 printf("ipsec6_output_trans: applyed SP\n");
3927 kdebug_secpolicy(sp));
3928
3929 *tun = 0;
3930 for (isr = sp->req; isr; isr = isr->next) {
3931 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
3932 /* the rest will be handled by ipsec6_output_tunnel() */
3933 break;
3934 }
3935
3936 /* make SA index for search proper SA */
3937 ip6 = mtod(state->m, struct ip6_hdr *);
3938 bcopy(src: &isr->saidx, dst: &saidx, n: sizeof(saidx));
3939 saidx.mode = isr->saidx.mode;
3940 saidx.reqid = isr->saidx.reqid;
3941 sin6 = (struct sockaddr_in6 *)&saidx.src;
3942 if (sin6->sin6_len == 0) {
3943 sin6->sin6_len = sizeof(*sin6);
3944 sin6->sin6_family = AF_INET6;
3945 sin6->sin6_port = IPSEC_PORT_ANY;
3946 bcopy(src: &ip6->ip6_src, dst: &sin6->sin6_addr,
3947 n: sizeof(ip6->ip6_src));
3948 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
3949 /* fix scope id for comparing SPD */
3950 sin6->sin6_scope_id = ip6_output_getsrcifscope(state->m);
3951 in6_verify_ifscope(&ip6->ip6_src, sin6->sin6_scope_id);
3952 if (in6_embedded_scope) {
3953 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
3954 sin6->sin6_addr.s6_addr16[1] = 0;
3955 }
3956 }
3957 }
3958 sin6 = (struct sockaddr_in6 *)&saidx.dst;
3959 if (sin6->sin6_len == 0) {
3960 sin6->sin6_len = sizeof(*sin6);
3961 sin6->sin6_family = AF_INET6;
3962 sin6->sin6_port = IPSEC_PORT_ANY;
3963 bcopy(src: &ip6->ip6_dst, dst: &sin6->sin6_addr,
3964 n: sizeof(ip6->ip6_dst));
3965 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
3966 /* fix scope id for comparing SPD */
3967 sin6->sin6_scope_id = ip6_output_getdstifscope(state->m);
3968 in6_verify_ifscope(&ip6->ip6_dst, sin6->sin6_scope_id);
3969 if (in6_embedded_scope) {
3970 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
3971 sin6->sin6_addr.s6_addr16[1] = 0;
3972 }
3973 }
3974 }
3975
3976 if (key_checkrequest(isr, &saidx, sav: &sav) == ENOENT) {
3977 /*
3978 * IPsec processing is required, but no SA found.
3979 * I assume that key_acquire() had been called
3980 * to get/establish the SA. Here I discard
3981 * this packet because it is responsibility for
3982 * upper layer to retransmit the packet.
3983 */
3984 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3985 error = ENOENT;
3986
3987 /*
3988 * Notify the fact that the packet is discarded
3989 * to ourselves. I believe this is better than
3990 * just silently discarding. (jinmei@kame.net)
3991 * XXX: should we restrict the error to TCP packets?
3992 * XXX: should we directly notify sockets via
3993 * pfctlinputs?
3994 */
3995 icmp6_error(state->m, ICMP6_DST_UNREACH,
3996 ICMP6_DST_UNREACH_ADMIN, 0);
3997 state->m = NULL; /* icmp6_error freed the mbuf */
3998 goto bad;
3999 }
4000
4001 /* validity check */
4002 if (sav == NULL) {
4003 switch (ipsec_get_reqlevel(isr)) {
4004 case IPSEC_LEVEL_USE:
4005 continue;
4006 case IPSEC_LEVEL_REQUIRE:
4007 /* must be not reached here. */
4008 panic("ipsec6_output_trans: no SA found, but required.");
4009 }
4010 }
4011
4012 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
4013 goto bad;
4014 }
4015 }
4016
4017 /* if we have more to go, we need a tunnel mode processing */
4018 if (isr != NULL) {
4019 *tun = 1;
4020 }
4021
4022 if (sav) {
4023 key_freesav(sav, KEY_SADB_UNLOCKED);
4024 }
4025 return 0;
4026
4027bad:
4028 if (sav) {
4029 key_freesav(sav, KEY_SADB_UNLOCKED);
4030 }
4031 m_freem(state->m);
4032 state->m = NULL;
4033 return error;
4034}
4035
4036/*
4037 * IPsec output logic for IPv6, tunnel mode.
4038 */
4039static int
4040ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar *sav, int *must_be_last)
4041{
4042 struct ip6_hdr *ip6;
4043 struct sockaddr_in6* dst6;
4044 struct route_in6 *ro6;
4045 size_t plen;
4046 int error = 0;
4047
4048 /* validity check */
4049 if (sav == NULL || sav->sah == NULL || sav->sah->saidx.mode != IPSEC_MODE_TUNNEL) {
4050 error = EINVAL;
4051 goto bad;
4052 }
4053
4054 /*
4055 * If there is no valid SA, we give up to process.
4056 * see same place at ipsec4_output().
4057 */
4058 if (sav->state != SADB_SASTATE_MATURE
4059 && sav->state != SADB_SASTATE_DYING) {
4060 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4061 error = EINVAL;
4062 goto bad;
4063 }
4064
4065 state->outgoing_if = sav->sah->outgoing_if;
4066
4067 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
4068 /*
4069 * build IPsec tunnel.
4070 */
4071 state->m = ipsec6_splithdr(state->m);
4072 if (!state->m) {
4073 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
4074 error = ENOMEM;
4075 goto bad;
4076 }
4077
4078 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
4079 error = ipsec6_encapsulate(m: state->m, sav);
4080 if (error) {
4081 state->m = 0;
4082 goto bad;
4083 }
4084 ip6 = mtod(state->m, struct ip6_hdr *);
4085 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
4086 struct ip *ip;
4087 struct sockaddr_in* dst4;
4088 struct route *ro4 = NULL;
4089 struct route ro4_copy;
4090 struct ip_out_args ipoa;
4091
4092 bzero(s: &ipoa, n: sizeof(ipoa));
4093 ipoa.ipoa_boundif = IFSCOPE_NONE;
4094 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
4095 ipoa.ipoa_sotc = SO_TC_UNSPEC;
4096 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
4097
4098 if (must_be_last) {
4099 *must_be_last = 1;
4100 }
4101
4102 state->tunneled = 4; /* must not process any further in ip6_output */
4103 error = ipsec64_encapsulate(m: state->m, sav, dscp_mapping: state->dscp_mapping);
4104 if (error) {
4105 state->m = 0;
4106 goto bad;
4107 }
4108 /* Now we have an IPv4 packet */
4109 ip = mtod(state->m, struct ip *);
4110
4111 // grab sadb_mutex, to update sah's route cache and get a local copy of it
4112 lck_mtx_lock(sadb_mutex);
4113 ro4 = (struct route *)&sav->sah->sa_route;
4114 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
4115 if (ro4->ro_rt) {
4116 RT_LOCK(ro4->ro_rt);
4117 }
4118 if (ROUTE_UNUSABLE(ro4) ||
4119 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
4120 if (ro4->ro_rt != NULL) {
4121 RT_UNLOCK(ro4->ro_rt);
4122 }
4123 ROUTE_RELEASE(ro4);
4124 }
4125 if (ro4->ro_rt == NULL) {
4126 dst4->sin_family = AF_INET;
4127 dst4->sin_len = sizeof(*dst4);
4128 dst4->sin_addr = ip->ip_dst;
4129 } else {
4130 RT_UNLOCK(ro4->ro_rt);
4131 }
4132 route_copyout(&ro4_copy, ro4, sizeof(struct route));
4133 // release sadb_mutex, after updating sah's route cache and getting a local copy
4134 lck_mtx_unlock(sadb_mutex);
4135 state->m = ipsec4_splithdr(state->m);
4136 if (!state->m) {
4137 error = ENOMEM;
4138 ROUTE_RELEASE(&ro4_copy);
4139 goto bad;
4140 }
4141 switch (sav->sah->saidx.proto) {
4142 case IPPROTO_ESP:
4143#if IPSEC_ESP
4144 if ((error = esp4_output(state->m, sav)) != 0) {
4145 state->m = NULL;
4146 ROUTE_RELEASE(&ro4_copy);
4147 goto bad;
4148 }
4149 break;
4150
4151#else
4152 m_freem(state->m);
4153 state->m = NULL;
4154 error = EINVAL;
4155 ROUTE_RELEASE(&ro4_copy);
4156 goto bad;
4157#endif
4158 case IPPROTO_AH:
4159 if ((error = ah4_output(state->m, sav)) != 0) {
4160 state->m = NULL;
4161 ROUTE_RELEASE(&ro4_copy);
4162 goto bad;
4163 }
4164 break;
4165 default:
4166 ipseclog((LOG_ERR,
4167 "ipsec4_output: unknown ipsec protocol %d\n",
4168 sav->sah->saidx.proto));
4169 m_freem(state->m);
4170 state->m = NULL;
4171 error = EPROTONOSUPPORT;
4172 ROUTE_RELEASE(&ro4_copy);
4173 goto bad;
4174 }
4175
4176 if (state->m == 0) {
4177 error = ENOMEM;
4178 ROUTE_RELEASE(&ro4_copy);
4179 goto bad;
4180 }
4181 ipsec_set_pkthdr_for_interface(interface: sav->sah->ipsec_if, packet: state->m,
4182 AF_INET, flowid: sav->flowid);
4183 ipsec_set_ipoa_for_interface(interface: sav->sah->ipsec_if, ipoa: &ipoa);
4184
4185 ip = mtod(state->m, struct ip *);
4186 ip->ip_len = ntohs(ip->ip_len); /* flip len field before calling ip_output */
4187 error = ip_output(state->m, NULL, &ro4_copy, IP_OUTARGS, NULL, &ipoa);
4188 state->m = NULL;
4189 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
4190 lck_mtx_lock(sadb_mutex);
4191 route_copyin(&ro4_copy, ro4, sizeof(struct route));
4192 lck_mtx_unlock(sadb_mutex);
4193 if (error != 0) {
4194 goto bad;
4195 }
4196 goto done;
4197 } else {
4198 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4199 "unsupported inner family, spi=%u\n",
4200 (u_int32_t)ntohl(sav->spi)));
4201 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
4202 error = EAFNOSUPPORT;
4203 goto bad;
4204 }
4205
4206 // grab sadb_mutex, before updating sah's route cache
4207 lck_mtx_lock(sadb_mutex);
4208 ro6 = &sav->sah->sa_route;
4209 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
4210 if (ro6->ro_rt) {
4211 RT_LOCK(ro6->ro_rt);
4212 }
4213 if (ROUTE_UNUSABLE(ro6) ||
4214 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
4215 if (ro6->ro_rt != NULL) {
4216 RT_UNLOCK(ro6->ro_rt);
4217 }
4218 ROUTE_RELEASE(ro6);
4219 }
4220 if (ro6->ro_rt == 0) {
4221 bzero(s: dst6, n: sizeof(*dst6));
4222 dst6->sin6_family = AF_INET6;
4223 dst6->sin6_len = sizeof(*dst6);
4224 dst6->sin6_addr = ip6->ip6_dst;
4225 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
4226 if (ro6->ro_rt) {
4227 RT_LOCK(ro6->ro_rt);
4228 }
4229 }
4230 if (ro6->ro_rt == 0) {
4231 ip6stat.ip6s_noroute++;
4232 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
4233 error = EHOSTUNREACH;
4234 // release sadb_mutex, after updating sah's route cache
4235 lck_mtx_unlock(sadb_mutex);
4236 goto bad;
4237 }
4238
4239 /*
4240 * adjust state->dst if tunnel endpoint is offlink
4241 *
4242 * XXX: caching rt_gateway value in the state is
4243 * not really good, since it may point elsewhere
4244 * when the gateway gets modified to a larger
4245 * sockaddr via rt_setgate(). This is currently
4246 * addressed by SA_SIZE roundup in that routine.
4247 */
4248 if (ro6->ro_rt->rt_flags & RTF_GATEWAY) {
4249 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
4250 }
4251 RT_UNLOCK(ro6->ro_rt);
4252 ROUTE_RELEASE(&state->ro);
4253 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
4254 state->dst = (struct sockaddr *)dst6;
4255 state->tunneled = 6;
4256 // release sadb_mutex, after updating sah's route cache
4257 lck_mtx_unlock(sadb_mutex);
4258 }
4259
4260 state->m = ipsec6_splithdr(state->m);
4261 if (!state->m) {
4262 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
4263 error = ENOMEM;
4264 goto bad;
4265 }
4266 ip6 = mtod(state->m, struct ip6_hdr *);
4267 switch (sav->sah->saidx.proto) {
4268 case IPPROTO_ESP:
4269#if IPSEC_ESP
4270 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
4271#else
4272 m_freem(state->m);
4273 error = EINVAL;
4274#endif
4275 break;
4276 case IPPROTO_AH:
4277 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
4278 break;
4279 default:
4280 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4281 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
4282 m_freem(state->m);
4283 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
4284 error = EINVAL;
4285 break;
4286 }
4287 if (error) {
4288 state->m = NULL;
4289 goto bad;
4290 }
4291 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
4292 if (plen > IPV6_MAXPACKET) {
4293 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4294 "IPsec with IPv6 jumbogram is not supported\n"));
4295 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
4296 error = EINVAL; /*XXX*/
4297 goto bad;
4298 }
4299 ip6 = mtod(state->m, struct ip6_hdr *);
4300 ip6->ip6_plen = htons((u_int16_t)plen);
4301done:
4302#if SKYWALK
4303 if (state->m != NULL) {
4304 state->m->m_pkthdr.pkt_flowid = sav->flowid;
4305 state->m->m_pkthdr.pkt_flags |= PKTF_FLOW_ID;
4306 }
4307#endif /* !SKYWALK */
4308
4309 return 0;
4310
4311bad:
4312 return error;
4313}
4314
4315int
4316ipsec6_output_tunnel(
4317 struct ipsec_output_state *state,
4318 struct secpolicy *sp,
4319 __unused int flags)
4320{
4321 struct ip6_hdr *ip6;
4322 struct ipsecrequest *isr = NULL;
4323 struct secasindex saidx;
4324 struct secasvar *sav = NULL;
4325 int error = 0;
4326
4327 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4328
4329 if (!state) {
4330 panic("state == NULL in ipsec6_output_tunnel");
4331 }
4332 if (!state->m) {
4333 panic("state->m == NULL in ipsec6_output_tunnel");
4334 }
4335 if (!sp) {
4336 panic("sp == NULL in ipsec6_output_tunnel");
4337 }
4338
4339 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
4340 printf("ipsec6_output_tunnel: applyed SP\n");
4341 kdebug_secpolicy(sp));
4342
4343 /*
4344 * transport mode ipsec (before the 1st tunnel mode) is already
4345 * processed by ipsec6_output_trans().
4346 */
4347 for (isr = sp->req; isr; isr = isr->next) {
4348 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
4349 break;
4350 }
4351 }
4352
4353 for (/* already initialized */; isr; isr = isr->next) {
4354 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
4355 /* When tunnel mode, SA peers must be specified. */
4356 bcopy(src: &isr->saidx, dst: &saidx, n: sizeof(saidx));
4357 } else {
4358 /* make SA index to look for a proper SA */
4359 struct sockaddr_in6 *sin6;
4360
4361 bzero(s: &saidx, n: sizeof(saidx));
4362 saidx.proto = isr->saidx.proto;
4363 saidx.mode = isr->saidx.mode;
4364 saidx.reqid = isr->saidx.reqid;
4365
4366 ip6 = mtod(state->m, struct ip6_hdr *);
4367 sin6 = (struct sockaddr_in6 *)&saidx.src;
4368 if (sin6->sin6_len == 0) {
4369 sin6->sin6_len = sizeof(*sin6);
4370 sin6->sin6_family = AF_INET6;
4371 sin6->sin6_port = IPSEC_PORT_ANY;
4372 bcopy(src: &ip6->ip6_src, dst: &sin6->sin6_addr,
4373 n: sizeof(ip6->ip6_src));
4374 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
4375 /* fix scope id for comparing SPD */
4376 sin6->sin6_scope_id = ip6_output_getsrcifscope(state->m);
4377 in6_verify_ifscope(&ip6->ip6_src, sin6->sin6_scope_id);
4378 if (in6_embedded_scope) {
4379 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
4380 sin6->sin6_addr.s6_addr16[1] = 0;
4381 }
4382 }
4383 }
4384 sin6 = (struct sockaddr_in6 *)&saidx.dst;
4385 if (sin6->sin6_len == 0) {
4386 sin6->sin6_len = sizeof(*sin6);
4387 sin6->sin6_family = AF_INET6;
4388 sin6->sin6_port = IPSEC_PORT_ANY;
4389 bcopy(src: &ip6->ip6_dst, dst: &sin6->sin6_addr,
4390 n: sizeof(ip6->ip6_dst));
4391 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
4392 /* fix scope id for comparing SPD */
4393 sin6->sin6_scope_id = ip6_output_getdstifscope(state->m);
4394 in6_verify_ifscope(&ip6->ip6_dst, sin6->sin6_scope_id);
4395 if (in6_embedded_scope) {
4396 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
4397 sin6->sin6_addr.s6_addr16[1] = 0;
4398 }
4399 }
4400 }
4401 }
4402
4403 if (key_checkrequest(isr, &saidx, sav: &sav) == ENOENT) {
4404 /*
4405 * IPsec processing is required, but no SA found.
4406 * I assume that key_acquire() had been called
4407 * to get/establish the SA. Here I discard
4408 * this packet because it is responsibility for
4409 * upper layer to retransmit the packet.
4410 */
4411 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4412 error = ENOENT;
4413 goto bad;
4414 }
4415
4416 /* validity check */
4417 if (sav == NULL) {
4418 switch (ipsec_get_reqlevel(isr)) {
4419 case IPSEC_LEVEL_USE:
4420 continue;
4421 case IPSEC_LEVEL_REQUIRE:
4422 /* must be not reached here. */
4423 panic("ipsec6_output_tunnel: no SA found, but required.");
4424 }
4425 }
4426
4427 /*
4428 * If there is no valid SA, we give up to process.
4429 * see same place at ipsec4_output().
4430 */
4431 if (sav->state != SADB_SASTATE_MATURE
4432 && sav->state != SADB_SASTATE_DYING) {
4433 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4434 error = EINVAL;
4435 goto bad;
4436 }
4437
4438 int must_be_last = 0;
4439
4440 if ((error = ipsec6_output_tunnel_internal(state, sav, must_be_last: &must_be_last)) != 0) {
4441 goto bad;
4442 }
4443
4444 if (must_be_last && isr->next) {
4445 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4446 "IPv4 must be outer layer, spi=%u\n",
4447 (u_int32_t)ntohl(sav->spi)));
4448 error = EINVAL;
4449 goto bad;
4450 }
4451 }
4452
4453 if (sav) {
4454 key_freesav(sav, KEY_SADB_UNLOCKED);
4455 }
4456 return 0;
4457
4458bad:
4459 if (sav) {
4460 key_freesav(sav, KEY_SADB_UNLOCKED);
4461 }
4462 if (state->m) {
4463 m_freem(state->m);
4464 }
4465 state->m = NULL;
4466 return error;
4467}
4468
4469int
4470ipsec6_interface_output(struct ipsec_output_state *state, ifnet_t interface, u_char *nexthdrp, struct mbuf *mprev)
4471{
4472 int error = 0;
4473 struct secasvar *sav = NULL;
4474
4475 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4476
4477 if (state == NULL) {
4478 panic("state == NULL in ipsec6_output");
4479 }
4480 if (state->m == NULL) {
4481 panic("state->m == NULL in ipsec6_output");
4482 }
4483 if (nexthdrp == NULL) {
4484 panic("nexthdrp == NULL in ipsec6_output");
4485 }
4486 if (mprev == NULL) {
4487 panic("mprev == NULL in ipsec6_output");
4488 }
4489
4490 struct ip6_hdr *ip6 = mtod(state->m, struct ip6_hdr *);
4491
4492 struct sockaddr_in6 src = {};
4493 src.sin6_family = AF_INET6;
4494 src.sin6_len = sizeof(src);
4495 memcpy(dst: &src.sin6_addr, src: &ip6->ip6_src, n: sizeof(src.sin6_addr));
4496
4497 struct sockaddr_in6 dst = {};
4498 dst.sin6_family = AF_INET6;
4499 dst.sin6_len = sizeof(dst);
4500 memcpy(dst: &dst.sin6_addr, src: &ip6->ip6_dst, n: sizeof(dst.sin6_addr));
4501
4502 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET6,
4503 src: (struct sockaddr *)&src,
4504 dst: (struct sockaddr *)&dst);
4505 if (sav == NULL) {
4506 goto bad;
4507 }
4508
4509 if (sav->sah && sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
4510 if ((error = ipsec6_output_tunnel_internal(state, sav, NULL)) != 0) {
4511 goto bad;
4512 }
4513 } else {
4514 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
4515 goto bad;
4516 }
4517 }
4518
4519 if (sav) {
4520 key_freesav(sav, KEY_SADB_UNLOCKED);
4521 }
4522 return 0;
4523
4524bad:
4525 if (sav) {
4526 key_freesav(sav, KEY_SADB_UNLOCKED);
4527 }
4528 m_freem(state->m);
4529 state->m = NULL;
4530 return error;
4531}
4532
4533#if INET
4534/*
4535 * Chop IP header and option off from the payload.
4536 */
4537struct mbuf *
4538ipsec4_splithdr(struct mbuf *m)
4539{
4540 struct mbuf *mh;
4541 struct ip *ip;
4542 int hlen;
4543
4544 if (m->m_len < sizeof(struct ip)) {
4545 panic("ipsec4_splithdr: first mbuf too short, m_len %d, pkt_len %d, m_flag %x", m->m_len, m->m_pkthdr.len, m->m_flags);
4546 }
4547 ip = mtod(m, struct ip *);
4548#ifdef _IP_VHL
4549 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
4550#else
4551 hlen = ip->ip_hl << 2;
4552#endif
4553 if (m->m_len > hlen) {
4554 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4555 if (!mh) {
4556 m_freem(m);
4557 return NULL;
4558 }
4559 M_COPY_PKTHDR(mh, m);
4560 MH_ALIGN(mh, hlen);
4561 m->m_flags &= ~M_PKTHDR;
4562 m_mchtype(m, MT_DATA);
4563 m->m_len -= hlen;
4564 m->m_data += hlen;
4565 mh->m_next = m;
4566 m = mh;
4567 m->m_len = hlen;
4568 bcopy(src: (caddr_t)ip, mtod(m, caddr_t), n: hlen);
4569 } else if (m->m_len < hlen) {
4570 m = m_pullup(m, hlen);
4571 if (!m) {
4572 return NULL;
4573 }
4574 }
4575 return m;
4576}
4577#endif
4578
4579struct mbuf *
4580ipsec6_splithdr(struct mbuf *m)
4581{
4582 struct mbuf *mh;
4583 struct ip6_hdr *ip6;
4584 int hlen;
4585
4586 if (m->m_len < sizeof(struct ip6_hdr)) {
4587 panic("ipsec6_splithdr: first mbuf too short");
4588 }
4589 ip6 = mtod(m, struct ip6_hdr *);
4590 hlen = sizeof(struct ip6_hdr);
4591 if (m->m_len > hlen) {
4592 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4593 if (!mh) {
4594 m_freem(m);
4595 return NULL;
4596 }
4597 M_COPY_PKTHDR(mh, m);
4598 MH_ALIGN(mh, hlen);
4599 m->m_flags &= ~M_PKTHDR;
4600 m_mchtype(m, MT_DATA);
4601 m->m_len -= hlen;
4602 m->m_data += hlen;
4603 mh->m_next = m;
4604 m = mh;
4605 m->m_len = hlen;
4606 bcopy(src: (caddr_t)ip6, mtod(m, caddr_t), n: hlen);
4607 } else if (m->m_len < hlen) {
4608 m = m_pullup(m, hlen);
4609 if (!m) {
4610 return NULL;
4611 }
4612 }
4613 return m;
4614}
4615
4616/* validate inbound IPsec tunnel packet. */
4617int
4618ipsec4_tunnel_validate(
4619 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4620 int off,
4621 u_int nxt0,
4622 struct secasvar *sav,
4623 sa_family_t *ifamily)
4624{
4625 u_int8_t nxt = nxt0 & 0xff;
4626 struct sockaddr_in *sin;
4627 struct sockaddr_in osrc, odst, i4src, i4dst;
4628 struct sockaddr_in6 i6src, i6dst;
4629 int hlen;
4630 struct secpolicy *sp;
4631 struct ip *oip;
4632
4633 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4634
4635 /* do not decapsulate if the SA is for transport mode only */
4636 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) {
4637 return 0;
4638 }
4639
4640#if DIAGNOSTIC
4641 if (m->m_len < sizeof(struct ip)) {
4642 panic("too short mbuf on ipsec4_tunnel_validate");
4643 }
4644#endif
4645 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) {
4646 return 0;
4647 }
4648 if (m->m_pkthdr.len < off + sizeof(struct ip)) {
4649 return 0;
4650 }
4651
4652 oip = mtod(m, struct ip *);
4653#ifdef _IP_VHL
4654 hlen = _IP_VHL_HL(oip->ip_vhl) << 2;
4655#else
4656 hlen = oip->ip_hl << 2;
4657#endif
4658 if (hlen != sizeof(struct ip)) {
4659 return 0;
4660 }
4661
4662 sin = (struct sockaddr_in *)&sav->sah->saidx.dst;
4663 if (sin->sin_family != AF_INET) {
4664 return 0;
4665 }
4666 if (bcmp(s1: &oip->ip_dst, s2: &sin->sin_addr, n: sizeof(oip->ip_dst)) != 0) {
4667 return 0;
4668 }
4669
4670 if (sav->sah->ipsec_if != NULL) {
4671 // the ipsec interface SAs don't have a policies.
4672 if (nxt == IPPROTO_IPV4) {
4673 *ifamily = AF_INET;
4674 } else if (nxt == IPPROTO_IPV6) {
4675 *ifamily = AF_INET6;
4676 } else {
4677 return 0;
4678 }
4679 return 1;
4680 }
4681
4682 /* XXX slow */
4683 bzero(s: &osrc, n: sizeof(osrc));
4684 bzero(s: &odst, n: sizeof(odst));
4685 osrc.sin_family = odst.sin_family = AF_INET;
4686 osrc.sin_len = odst.sin_len = sizeof(struct sockaddr_in);
4687 osrc.sin_addr = oip->ip_src;
4688 odst.sin_addr = oip->ip_dst;
4689 /*
4690 * RFC2401 5.2.1 (b): (assume that we are using tunnel mode)
4691 * - if the inner destination is multicast address, there can be
4692 * multiple permissible inner source address. implementation
4693 * may want to skip verification of inner source address against
4694 * SPD selector.
4695 * - if the inner protocol is ICMP, the packet may be an error report
4696 * from routers on the other side of the VPN cloud (R in the
4697 * following diagram). in this case, we cannot verify inner source
4698 * address against SPD selector.
4699 * me -- gw === gw -- R -- you
4700 *
4701 * we consider the first bullet to be users responsibility on SPD entry
4702 * configuration (if you need to encrypt multicast traffic, set
4703 * the source range of SPD selector to 0.0.0.0/0, or have explicit
4704 * address ranges for possible senders).
4705 * the second bullet is not taken care of (yet).
4706 *
4707 * therefore, we do not do anything special about inner source.
4708 */
4709 if (nxt == IPPROTO_IPV4) {
4710 bzero(s: &i4src, n: sizeof(struct sockaddr_in));
4711 bzero(s: &i4dst, n: sizeof(struct sockaddr_in));
4712 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4713 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4714 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4715 (caddr_t)&i4src.sin_addr);
4716 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4717 (caddr_t)&i4dst.sin_addr);
4718 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4719 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4720 } else if (nxt == IPPROTO_IPV6) {
4721 bzero(s: &i6src, n: sizeof(struct sockaddr_in6));
4722 bzero(s: &i6dst, n: sizeof(struct sockaddr_in6));
4723 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4724 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4725 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4726 (caddr_t)&i6src.sin6_addr);
4727 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4728 (caddr_t)&i6dst.sin6_addr);
4729 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4730 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4731 } else {
4732 return 0; /* unsupported family */
4733 }
4734 if (!sp) {
4735 return 0;
4736 }
4737
4738 key_freesp(sp, KEY_SADB_UNLOCKED);
4739
4740 return 1;
4741}
4742
4743/* validate inbound IPsec tunnel packet. */
4744int
4745ipsec6_tunnel_validate(
4746 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4747 int off,
4748 u_int nxt0,
4749 struct secasvar *sav,
4750 sa_family_t *ifamily)
4751{
4752 u_int8_t nxt = nxt0 & 0xff;
4753 struct sockaddr_in6 *sin6;
4754 struct sockaddr_in i4src, i4dst;
4755 struct sockaddr_in6 osrc, odst, i6src, i6dst;
4756 struct secpolicy *sp;
4757 struct ip6_hdr *oip6;
4758
4759 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4760
4761 /* do not decapsulate if the SA is for transport mode only */
4762 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) {
4763 return 0;
4764 }
4765
4766#if DIAGNOSTIC
4767 if (m->m_len < sizeof(struct ip6_hdr)) {
4768 panic("too short mbuf on ipsec6_tunnel_validate");
4769 }
4770#endif
4771 if (nxt == IPPROTO_IPV4) {
4772 if (m->m_pkthdr.len < off + sizeof(struct ip)) {
4773 ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate pkthdr %d off %d ip6hdr %zu", m->m_pkthdr.len, off, sizeof(struct ip6_hdr)));
4774 return 0;
4775 }
4776 } else if (nxt == IPPROTO_IPV6) {
4777 if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr)) {
4778 ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate pkthdr %d off %d ip6hdr %zu", m->m_pkthdr.len, off, sizeof(struct ip6_hdr)));
4779 return 0;
4780 }
4781 } else {
4782 ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate invalid nxt(%u) protocol", nxt));
4783 return 0;
4784 }
4785
4786 oip6 = mtod(m, struct ip6_hdr *);
4787 /* AF_INET should be supported, but at this moment we don't. */
4788 sin6 = (struct sockaddr_in6 *)&sav->sah->saidx.dst;
4789 if (sin6->sin6_family != AF_INET6) {
4790 return 0;
4791 }
4792
4793 struct in6_addr tmp_sah_dst_addr = {};
4794 struct in6_addr *sah_dst_addr = &((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
4795 if (in6_embedded_scope && IN6_IS_SCOPE_LINKLOCAL(sah_dst_addr)) {
4796 memcpy(dst: &tmp_sah_dst_addr, src: sah_dst_addr, n: sizeof(tmp_sah_dst_addr));
4797 tmp_sah_dst_addr.s6_addr16[1] = htons((u_int16_t)sav->sah->outgoing_if);
4798 sah_dst_addr = &tmp_sah_dst_addr;
4799 }
4800 if (!IN6_ARE_ADDR_EQUAL(&oip6->ip6_dst, sah_dst_addr)) {
4801 return 0;
4802 }
4803
4804 if (sav->sah->ipsec_if != NULL) {
4805 // the ipsec interface SAs don't have a policies.
4806 if (nxt == IPPROTO_IPV4) {
4807 *ifamily = AF_INET;
4808 } else if (nxt == IPPROTO_IPV6) {
4809 *ifamily = AF_INET6;
4810 } else {
4811 return 0;
4812 }
4813 return 1;
4814 }
4815
4816 /* XXX slow */
4817 bzero(s: &osrc, n: sizeof(osrc));
4818 bzero(s: &odst, n: sizeof(odst));
4819 osrc.sin6_family = odst.sin6_family = AF_INET6;
4820 osrc.sin6_len = odst.sin6_len = sizeof(struct sockaddr_in6);
4821 osrc.sin6_addr = oip6->ip6_src;
4822 odst.sin6_addr = oip6->ip6_dst;
4823
4824 /*
4825 * regarding to inner source address validation, see a long comment
4826 * in ipsec4_tunnel_validate.
4827 */
4828
4829 if (nxt == IPPROTO_IPV4) {
4830 bzero(s: &i4src, n: sizeof(struct sockaddr_in));
4831 bzero(s: &i4dst, n: sizeof(struct sockaddr_in));
4832 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4833 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4834 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4835 (caddr_t)&i4src.sin_addr);
4836 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4837 (caddr_t)&i4dst.sin_addr);
4838 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4839 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4840 } else if (nxt == IPPROTO_IPV6) {
4841 bzero(s: &i6src, n: sizeof(struct sockaddr_in6));
4842 bzero(s: &i6dst, n: sizeof(struct sockaddr_in6));
4843 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4844 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4845 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4846 (caddr_t)&i6src.sin6_addr);
4847 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4848 (caddr_t)&i6dst.sin6_addr);
4849 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4850 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4851 } else {
4852 return 0; /* unsupported family */
4853 }
4854 /*
4855 * when there is no suitable inbound policy for the packet of the ipsec
4856 * tunnel mode, the kernel never decapsulate the tunneled packet
4857 * as the ipsec tunnel mode even when the system wide policy is "none".
4858 * then the kernel leaves the generic tunnel module to process this
4859 * packet. if there is no rule of the generic tunnel, the packet
4860 * is rejected and the statistics will be counted up.
4861 */
4862 if (!sp) {
4863 return 0;
4864 }
4865 key_freesp(sp, KEY_SADB_UNLOCKED);
4866
4867 return 1;
4868}
4869
4870/*
4871 * Make a mbuf chain for encryption.
4872 * If the original mbuf chain contains a mbuf with a cluster,
4873 * allocate a new cluster and copy the data to the new cluster.
4874 * XXX: this hack is inefficient, but is necessary to handle cases
4875 * of TCP retransmission...
4876 */
4877struct mbuf *
4878ipsec_copypkt(struct mbuf *m)
4879{
4880 struct mbuf *n, **mpp, *mnew;
4881
4882 for (n = m, mpp = &m; n; n = n->m_next) {
4883 if (n->m_flags & M_EXT) {
4884 /*
4885 * Make a copy only if there are more than one references
4886 * to the cluster.
4887 * XXX: is this approach effective?
4888 */
4889 if (
4890 m_get_ext_free(n) != NULL ||
4891 m_mclhasreference(n)
4892 ) {
4893 int remain, copied;
4894 struct mbuf *mm;
4895
4896 if (n->m_flags & M_PKTHDR) {
4897 MGETHDR(mnew, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4898 if (mnew == NULL) {
4899 goto fail;
4900 }
4901 M_COPY_PKTHDR(mnew, n);
4902 } else {
4903 MGET(mnew, M_DONTWAIT, MT_DATA);
4904 if (mnew == NULL) {
4905 goto fail;
4906 }
4907 }
4908 mnew->m_len = 0;
4909 mm = mnew;
4910
4911 /*
4912 * Copy data. If we don't have enough space to
4913 * store the whole data, allocate a cluster
4914 * or additional mbufs.
4915 * XXX: we don't use m_copyback(), since the
4916 * function does not use clusters and thus is
4917 * inefficient.
4918 */
4919 remain = n->m_len;
4920 copied = 0;
4921 while (1) {
4922 int len;
4923 struct mbuf *mn;
4924
4925 if (remain <= (mm->m_flags & M_PKTHDR ? MHLEN : MLEN)) {
4926 len = remain;
4927 } else { /* allocate a cluster */
4928 MCLGET(mm, M_DONTWAIT);
4929 if (!(mm->m_flags & M_EXT)) {
4930 m_free(mm);
4931 goto fail;
4932 }
4933 len = remain < MCLBYTES ?
4934 remain : MCLBYTES;
4935 }
4936
4937 bcopy(src: m_mtod_current(m: n) + copied, dst: m_mtod_current(m: mm), n: len);
4938
4939 copied += len;
4940 remain -= len;
4941 mm->m_len = len;
4942
4943 if (remain <= 0) { /* completed? */
4944 break;
4945 }
4946
4947 /* need another mbuf */
4948 MGETHDR(mn, M_DONTWAIT, MT_HEADER); /* XXXMAC: tags copied next time in loop? */
4949 if (mn == NULL) {
4950 goto fail;
4951 }
4952 mn->m_pkthdr.rcvif = NULL;
4953 mm->m_next = mn;
4954 mm = mn;
4955 }
4956
4957 /* adjust chain */
4958 mm->m_next = m_free(n);
4959 n = mm;
4960 *mpp = mnew;
4961 mpp = &n->m_next;
4962
4963 continue;
4964 }
4965 }
4966 *mpp = n;
4967 mpp = &n->m_next;
4968 }
4969
4970 return m;
4971fail:
4972 m_freem(m);
4973 return NULL;
4974}
4975
4976/* Used to avoid processing the packet over again */
4977#define IPSEC_HISTORY_MAX 8
4978
4979struct ipsec_tag {
4980 struct socket *socket;
4981 u_int32_t history_count;
4982};
4983
4984static struct ipsec_tag *
4985ipsec_addaux(
4986 struct mbuf *m)
4987{
4988 struct m_tag *tag;
4989
4990 /* Check if the tag already exists */
4991 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC);
4992
4993 if (tag == NULL) {
4994 struct ipsec_tag *itag;
4995
4996 /* Allocate a tag */
4997 tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC,
4998 sizeof(struct ipsec_tag), M_DONTWAIT, m);
4999
5000 if (tag) {
5001 itag = (struct ipsec_tag*)(tag->m_tag_data);
5002 itag->socket = 0;
5003 itag->history_count = 0;
5004
5005 m_tag_prepend(m, tag);
5006 }
5007 }
5008
5009 return tag ? (struct ipsec_tag*)(tag->m_tag_data) : NULL;
5010}
5011
5012static struct ipsec_tag *
5013ipsec_findaux(
5014 struct mbuf *m)
5015{
5016 struct m_tag *tag;
5017
5018 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC);
5019
5020 return tag != NULL ? (struct ipsec_tag*)(tag->m_tag_data) : NULL;
5021}
5022
5023void
5024ipsec_delaux(
5025 struct mbuf *m)
5026{
5027 struct m_tag *tag;
5028
5029 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC);
5030
5031 if (tag != NULL) {
5032 m_tag_delete(m, tag);
5033 }
5034}
5035
5036/* if the aux buffer is unnecessary, nuke it. */
5037static void
5038ipsec_optaux(
5039 struct mbuf *m,
5040 struct ipsec_tag *itag)
5041{
5042 if (itag != NULL && itag->socket == NULL && itag->history_count == 0) {
5043 ipsec_delaux(m);
5044 }
5045}
5046
5047int
5048ipsec_setsocket(struct mbuf *m, struct socket *so)
5049{
5050 struct ipsec_tag *tag;
5051
5052 /* if so == NULL, don't insist on getting the aux mbuf */
5053 if (so != NULL) {
5054 tag = ipsec_addaux(m);
5055 if (!tag) {
5056 return ENOBUFS;
5057 }
5058 } else {
5059 tag = ipsec_findaux(m);
5060 }
5061 if (tag != NULL) {
5062 tag->socket = so;
5063 ipsec_optaux(m, itag: tag);
5064 }
5065 return 0;
5066}
5067
5068struct socket *
5069ipsec_getsocket(struct mbuf *m)
5070{
5071 struct ipsec_tag *itag;
5072
5073 itag = ipsec_findaux(m);
5074 if (itag) {
5075 return itag->socket;
5076 } else {
5077 return NULL;
5078 }
5079}
5080
5081int
5082ipsec_incr_history_count(
5083 struct mbuf *m,
5084 __unused int proto,
5085 __unused u_int32_t spi)
5086{
5087 struct ipsec_tag *itag;
5088
5089 itag = ipsec_addaux(m);
5090 if (itag == NULL) {
5091 return ENOBUFS;
5092 }
5093 if (itag->history_count == IPSEC_HISTORY_MAX) {
5094 return ENOSPC; /* XXX */
5095 }
5096 itag->history_count++;
5097
5098 return 0;
5099}
5100
5101u_int32_t
5102ipsec_get_history_count(
5103 struct mbuf *m)
5104{
5105 struct ipsec_tag *itag;
5106
5107 itag = ipsec_findaux(m);
5108 if (itag == NULL) {
5109 return 0;
5110 }
5111 return itag->history_count;
5112}
5113
5114struct ipsec_tag_container {
5115 struct m_tag ipsec_m_tag;
5116 struct ipsec_tag ipsec_tag;
5117};
5118
5119static struct m_tag *
5120m_tag_kalloc_ipsec(u_int32_t id, u_int16_t type, uint16_t len, int wait)
5121{
5122 struct ipsec_tag_container *tag_container;
5123 struct m_tag *tag = NULL;
5124
5125 assert3u(id, ==, KERNEL_MODULE_TAG_ID);
5126 assert3u(type, ==, KERNEL_TAG_TYPE_IPSEC);
5127 assert3u(len, ==, sizeof(struct ipsec_tag));
5128
5129 if (len != sizeof(struct ipsec_tag)) {
5130 return NULL;
5131 }
5132
5133 tag_container = kalloc_type(struct ipsec_tag_container, wait | M_ZERO);
5134 if (tag_container != NULL) {
5135 tag = &tag_container->ipsec_m_tag;
5136
5137 assert3p(tag, ==, tag_container);
5138
5139 M_TAG_INIT(tag, id, type, len, &tag_container->ipsec_tag, NULL);
5140 }
5141
5142 return tag;
5143}
5144
5145static void
5146m_tag_kfree_ipsec(struct m_tag *tag)
5147{
5148 struct ipsec_tag_container *tag_container = (struct ipsec_tag_container *)tag;
5149
5150 assert3u(tag->m_tag_len, ==, sizeof(struct ipsec_tag));
5151
5152 kfree_type(struct ipsec_tag_container, tag_container);
5153}
5154
5155void
5156ipsec_register_m_tag(void)
5157{
5158 int error;
5159
5160 error = m_register_internal_tag_type(type: KERNEL_TAG_TYPE_IPSEC, len: sizeof(struct ipsec_tag),
5161 alloc_func: m_tag_kalloc_ipsec, free_func: m_tag_kfree_ipsec);
5162
5163 assert3u(error, ==, 0);
5164}
5165
5166__private_extern__ boolean_t
5167ipsec_send_natt_keepalive(
5168 struct secasvar *sav)
5169{
5170 struct mbuf *m = NULL;
5171 int error = 0;
5172 int keepalive_interval = natt_keepalive_interval;
5173
5174 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
5175 lck_mtx_lock(sadb_mutex);
5176
5177 if (((esp_udp_encap_port & 0xFFFF) == 0 && sav->natt_encapsulated_src_port == 0) || sav->remote_ike_port == 0) {
5178 lck_mtx_unlock(sadb_mutex);
5179 return FALSE;
5180 }
5181
5182 if (sav->natt_interval != 0) {
5183 keepalive_interval = (int)sav->natt_interval;
5184 }
5185
5186 // natt timestamp may have changed... reverify
5187 if ((natt_now - sav->natt_last_activity) < keepalive_interval) {
5188 lck_mtx_unlock(sadb_mutex);
5189 return FALSE;
5190 }
5191
5192 if (sav->flags & SADB_X_EXT_ESP_KEEPALIVE) {
5193 lck_mtx_unlock(sadb_mutex);
5194 return FALSE; // don't send these from the kernel
5195 }
5196
5197 lck_mtx_unlock(sadb_mutex);
5198
5199 m = m_gethdr(M_NOWAIT, MT_DATA);
5200 if (m == NULL) {
5201 return FALSE;
5202 }
5203
5204 lck_mtx_lock(sadb_mutex);
5205 if (sav->sah->saidx.dst.ss_family == AF_INET) {
5206 struct ip_out_args ipoa = {};
5207 struct route ro = {};
5208
5209 ipoa.ipoa_boundif = IFSCOPE_NONE;
5210 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
5211 ipoa.ipoa_sotc = SO_TC_UNSPEC;
5212 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
5213
5214 struct ip *ip = (__typeof__(ip))m_mtod(m);
5215
5216 /*
5217 * Type 2: a UDP packet complete with IP header.
5218 * We must do this because UDP output requires
5219 * an inpcb which we don't have. UDP packet
5220 * contains one byte payload. The byte is set
5221 * to 0xFF.
5222 */
5223 struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip));
5224 m->m_len = sizeof(struct udpiphdr) + 1;
5225 bzero(s: m_mtod(m), n: m->m_len);
5226 m->m_pkthdr.len = m->m_len;
5227
5228 ip->ip_len = (u_short)m->m_len;
5229 ip->ip_ttl = (u_char)ip_defttl;
5230 ip->ip_p = IPPROTO_UDP;
5231 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
5232 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5233 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5234 } else {
5235 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5236 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5237 }
5238 if (sav->natt_encapsulated_src_port != 0) {
5239 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
5240 } else {
5241 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5242 }
5243 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5244 uh->uh_dport = htons(sav->remote_ike_port);
5245 uh->uh_ulen = htons(1 + sizeof(*uh));
5246 uh->uh_sum = 0;
5247 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip) + sizeof(*uh)) = 0xFF;
5248
5249 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
5250 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET) {
5251 ROUTE_RELEASE(&sav->sah->sa_route);
5252 }
5253
5254 route_copyout(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
5255 lck_mtx_unlock(sadb_mutex);
5256
5257 necp_mark_packet_as_keepalive(packet: m, TRUE);
5258 error = ip_output(m, NULL, &ro, IP_OUTARGS | IP_NOIPSEC, NULL, &ipoa);
5259
5260 lck_mtx_lock(sadb_mutex);
5261 route_copyin(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
5262 } else if (sav->sah->saidx.dst.ss_family == AF_INET6) {
5263 struct ip6_out_args ip6oa = {};
5264 struct route_in6 ro6 = {};
5265
5266 ip6oa.ip6oa_flowadv.code = 0;
5267 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
5268 if (sav->sah->outgoing_if) {
5269 ip6oa.ip6oa_boundif = sav->sah->outgoing_if;
5270 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
5271 }
5272
5273 struct ip6_hdr *ip6 = (__typeof__(ip6))m_mtod(m);
5274
5275 /*
5276 * Type 2: a UDP packet complete with IPv6 header.
5277 * We must do this because UDP output requires
5278 * an inpcb which we don't have. UDP packet
5279 * contains one byte payload. The byte is set
5280 * to 0xFF.
5281 */
5282 struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip6));
5283 m->m_len = sizeof(struct udphdr) + sizeof(struct ip6_hdr) + 1;
5284 bzero(s: m_mtod(m), n: m->m_len);
5285 m->m_pkthdr.len = m->m_len;
5286
5287 ip6->ip6_flow = 0;
5288 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
5289 ip6->ip6_vfc |= IPV6_VERSION;
5290 ip6->ip6_nxt = IPPROTO_UDP;
5291 ip6->ip6_hlim = (u_int8_t)ip6_defhlim;
5292 ip6->ip6_plen = htons(sizeof(struct udphdr) + 1);
5293 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
5294 ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr;
5295 ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
5296 ip6_output_setsrcifscope(m, ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_scope_id, NULL);
5297 ip6_output_setdstifscope(m, ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_scope_id, NULL);
5298 } else {
5299 ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
5300 ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr;
5301 ip6_output_setdstifscope(m, ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_scope_id, NULL);
5302 ip6_output_setsrcifscope(m, ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_scope_id, NULL);
5303 }
5304
5305 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
5306 ip6->ip6_src.s6_addr16[1] = 0;
5307 }
5308 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
5309 ip6->ip6_dst.s6_addr16[1] = 0;
5310 }
5311
5312 if (sav->natt_encapsulated_src_port != 0) {
5313 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
5314 } else {
5315 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5316 }
5317 uh->uh_dport = htons(sav->remote_ike_port);
5318 uh->uh_ulen = htons(1 + sizeof(*uh));
5319 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip6) + sizeof(*uh)) = 0xFF;
5320 uh->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(uh->uh_ulen) + IPPROTO_UDP));
5321 m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
5322 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
5323
5324 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
5325 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET6) {
5326 ROUTE_RELEASE(&sav->sah->sa_route);
5327 }
5328
5329 route_copyout((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6));
5330 lck_mtx_unlock(sadb_mutex);
5331
5332 necp_mark_packet_as_keepalive(packet: m, TRUE);
5333 error = ip6_output(m, NULL, &ro6, IPV6_OUTARGS, NULL, NULL, &ip6oa);
5334
5335 lck_mtx_lock(sadb_mutex);
5336 route_copyin((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6));
5337 } else {
5338 ipseclog((LOG_ERR, "nat keepalive: invalid address family %u\n", sav->sah->saidx.dst.ss_family));
5339 lck_mtx_unlock(sadb_mutex);
5340 m_freem(m);
5341 return FALSE;
5342 }
5343
5344 if (error == 0) {
5345 sav->natt_last_activity = natt_now;
5346 lck_mtx_unlock(sadb_mutex);
5347 return TRUE;
5348 }
5349
5350 lck_mtx_unlock(sadb_mutex);
5351 return FALSE;
5352}
5353
5354__private_extern__ bool
5355ipsec_fill_offload_frame(ifnet_t ifp,
5356 struct secasvar *sav,
5357 struct ifnet_keepalive_offload_frame *frame,
5358 size_t frame_data_offset)
5359{
5360 u_int8_t *data = NULL;
5361 struct ip *ip = NULL;
5362 struct udphdr *uh = NULL;
5363
5364 if (sav == NULL || sav->sah == NULL || frame == NULL ||
5365 (ifp != NULL && ifp->if_index != sav->sah->outgoing_if) ||
5366 sav->sah->saidx.dst.ss_family != AF_INET ||
5367 !(sav->flags & SADB_X_EXT_NATT) ||
5368 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE) ||
5369 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE_OFFLOAD) ||
5370 sav->flags & SADB_X_EXT_ESP_KEEPALIVE ||
5371 ((esp_udp_encap_port & 0xFFFF) == 0 && sav->natt_encapsulated_src_port == 0) ||
5372 sav->remote_ike_port == 0 ||
5373 (natt_keepalive_interval == 0 && sav->natt_interval == 0 && sav->natt_offload_interval == 0)) {
5374 /* SA is not eligible for keepalive offload on this interface */
5375 return FALSE;
5376 }
5377
5378 if (frame_data_offset + sizeof(struct udpiphdr) + 1 >
5379 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
5380 /* Not enough room in this data frame */
5381 return FALSE;
5382 }
5383
5384 data = frame->data;
5385 ip = (__typeof__(ip))(void *)(data + frame_data_offset);
5386 uh = (__typeof__(uh))(void *)(data + frame_data_offset + sizeof(*ip));
5387
5388 frame->length = (u_int8_t)(frame_data_offset + sizeof(struct udpiphdr) + 1);
5389 frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_IPSEC;
5390 frame->ether_type = IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
5391
5392 bzero(s: data, IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE);
5393
5394 ip->ip_v = IPVERSION;
5395 ip->ip_hl = sizeof(struct ip) >> 2;
5396 ip->ip_off &= htons(~IP_OFFMASK);
5397 ip->ip_off &= htons(~IP_MF);
5398 switch (ip4_ipsec_dfbit) {
5399 case 0: /* clear DF bit */
5400 ip->ip_off &= htons(~IP_DF);
5401 break;
5402 case 1: /* set DF bit */
5403 ip->ip_off |= htons(IP_DF);
5404 break;
5405 default: /* copy DF bit */
5406 break;
5407 }
5408 ip->ip_len = htons(sizeof(struct udpiphdr) + 1);
5409 if (rfc6864 && IP_OFF_IS_ATOMIC(htons(ip->ip_off))) {
5410 ip->ip_id = 0;
5411 } else {
5412 ip->ip_id = ip_randomid((uint64_t)data);
5413 }
5414 ip->ip_ttl = (u_char)ip_defttl;
5415 ip->ip_p = IPPROTO_UDP;
5416 ip->ip_sum = 0;
5417 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
5418 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5419 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5420 } else {
5421 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5422 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5423 }
5424 ip->ip_sum = in_cksum_hdr_opt(ip);
5425 /* Fill out the UDP header */
5426 if (sav->natt_encapsulated_src_port != 0) {
5427 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
5428 } else {
5429 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5430 }
5431 uh->uh_dport = htons(sav->remote_ike_port);
5432 uh->uh_ulen = htons(1 + sizeof(*uh));
5433 uh->uh_sum = 0;
5434 *(u_int8_t*)(data + frame_data_offset + sizeof(*ip) + sizeof(*uh)) = 0xFF;
5435
5436 if (sav->natt_offload_interval != 0) {
5437 frame->interval = sav->natt_offload_interval;
5438 } else if (sav->natt_interval != 0) {
5439 frame->interval = sav->natt_interval;
5440 } else {
5441 frame->interval = (u_int16_t)natt_keepalive_interval;
5442 }
5443 return TRUE;
5444}
5445
5446static void
5447ipsec_get_local_ports(void)
5448{
5449 errno_t error;
5450 ifnet_t *ifp_list;
5451 uint32_t count, i;
5452 static uint8_t port_bitmap[bitstr_size(IP_PORTRANGE_SIZE)];
5453
5454 error = ifnet_list_get_all(family: IFNET_FAMILY_IPSEC, interfaces: &ifp_list, count: &count);
5455 if (error != 0) {
5456 os_log_error(OS_LOG_DEFAULT, "%s: ifnet_list_get_all() failed %d",
5457 __func__, error);
5458 return;
5459 }
5460 for (i = 0; i < count; i++) {
5461 ifnet_t ifp = ifp_list[i];
5462
5463 /*
5464 * Get all the TCP and UDP ports for IPv4 and IPv6
5465 */
5466 error = ifnet_get_local_ports_extended(ifp, PF_UNSPEC,
5467 IFNET_GET_LOCAL_PORTS_WILDCARDOK |
5468 IFNET_GET_LOCAL_PORTS_NOWAKEUPOK |
5469 IFNET_GET_LOCAL_PORTS_ANYTCPSTATEOK,
5470 bitfield: port_bitmap);
5471 if (error != 0) {
5472 os_log_error(OS_LOG_DEFAULT, "%s: ifnet_get_local_ports_extended(%s) failed %d",
5473 __func__, if_name(ifp), error);
5474 }
5475 }
5476 ifnet_list_free(interfaces: ifp_list);
5477}
5478
5479static IOReturn
5480ipsec_sleep_wake_handler(void *target, void *refCon, UInt32 messageType,
5481 void *provider, void *messageArgument, vm_size_t argSize)
5482{
5483#pragma unused(target, refCon, provider, messageArgument, argSize)
5484 switch (messageType) {
5485 case kIOMessageSystemWillSleep:
5486 {
5487 ipsec_get_local_ports();
5488 break;
5489 }
5490 default:
5491 break;
5492 }
5493
5494 return IOPMAckImplied;
5495}
5496
5497void
5498ipsec_monitor_sleep_wake(void)
5499{
5500 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
5501
5502 if (sleep_wake_handle == NULL) {
5503 sleep_wake_handle = registerSleepWakeInterest(ipsec_sleep_wake_handler,
5504 NULL, NULL);
5505 if (sleep_wake_handle != NULL) {
5506 ipseclog((LOG_INFO,
5507 "ipsec: monitoring sleep wake"));
5508 }
5509 }
5510}
5511
5512void
5513ipsec_init(void)
5514{
5515 ipsec_register_control();
5516}
5517