1/*
2 * Copyright (c) 2008-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $ */
30/* $KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $ */
31
32/*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/socket.h>
64#include <sys/queue.h>
65#include <sys/syslog.h>
66#include <sys/mbuf.h>
67#include <sys/mcache.h>
68
69#include <kern/locks.h>
70
71#include <net/if.h>
72#include <net/route.h>
73
74#include <netinet6/ipsec.h>
75#include <netinet6/esp.h>
76#include <netinet6/esp_rijndael.h>
77
78#include <libkern/crypto/aes.h>
79
80#include <netkey/key.h>
81
82#include <net/net_osdep.h>
83
84#define MAX_REALIGN_LEN 2000
85#define AES_BLOCKLEN 16
86#define ESP_GCM_SALT_LEN 4 // RFC 4106 Section 4
87#define ESP_GCM_IVLEN 8
88#define ESP_GCM_ALIGN 16
89
90typedef struct {
91 ccgcm_ctx *decrypt;
92 ccgcm_ctx *encrypt;
93 ccgcm_ctx ctxt[0];
94} aes_gcm_ctx;
95
96size_t
97esp_aes_schedlen(
98 __unused const struct esp_algorithm *algo)
99{
100 return sizeof(aes_ctx);
101}
102
103int
104esp_aes_schedule(
105 __unused const struct esp_algorithm *algo,
106 struct secasvar *sav)
107{
108 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
109 aes_ctx *ctx = (aes_ctx*)sav->sched_enc;
110
111 aes_decrypt_key(key: (const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), cx: &ctx->decrypt);
112 aes_encrypt_key(key: (const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), cx: &ctx->encrypt);
113
114 return 0;
115}
116
117
118/* The following 2 functions decrypt or encrypt the contents of
119 * the mbuf chain passed in keeping the IP and ESP header's in place,
120 * along with the IV.
121 * The code attempts to call the crypto code with the largest chunk
122 * of data it can based on the amount of source data in
123 * the current source mbuf and the space remaining in the current
124 * destination mbuf. The crypto code requires data to be a multiples
125 * of 16 bytes. A separate buffer is used when a 16 byte block spans
126 * mbufs.
127 *
128 * m = mbuf chain
129 * off = offset to ESP header
130 *
131 * local vars for source:
132 * soff = offset from beginning of the chain to the head of the
133 * current mbuf.
134 * scut = last mbuf that contains headers to be retained
135 * scutoff = offset to end of the headers in scut
136 * s = the current mbuf
137 * sn = current offset to data in s (next source data to process)
138 *
139 * local vars for dest:
140 * d0 = head of chain
141 * d = current mbuf
142 * dn = current offset in d (next location to store result)
143 */
144
145
146int
147esp_cbc_decrypt_aes(
148 struct mbuf *m,
149 size_t off,
150 struct secasvar *sav,
151 const struct esp_algorithm *algo,
152 int ivlen)
153{
154 struct mbuf *s;
155 struct mbuf *d, *d0, *dp;
156 int soff; /* offset from the head of chain, to head of this mbuf */
157 int sn, dn; /* offset from the head of the mbuf, to meat */
158 size_t ivoff, bodyoff;
159 u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr;
160 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
161 struct mbuf *scut;
162 int scutoff;
163 int i, len;
164
165
166 if (ivlen != AES_BLOCKLEN) {
167 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
168 "unsupported ivlen %d\n", algo->name, ivlen));
169 m_freem(m);
170 return EINVAL;
171 }
172
173 if (sav->flags & SADB_X_EXT_OLD) {
174 /* RFC 1827 */
175 ivoff = off + sizeof(struct esp);
176 bodyoff = off + sizeof(struct esp) + ivlen;
177 } else {
178 ivoff = off + sizeof(struct newesp);
179 bodyoff = off + sizeof(struct newesp) + ivlen;
180 }
181
182 if (m->m_pkthdr.len < bodyoff) {
183 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%u\n",
184 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
185 m_freem(m);
186 return EINVAL;
187 }
188 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
189 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
190 "payload length must be multiple of %d\n",
191 algo->name, AES_BLOCKLEN));
192 m_freem(m);
193 return EINVAL;
194 }
195
196 VERIFY(ivoff <= INT_MAX);
197
198 /* grab iv */
199 m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv);
200
201 s = m;
202 soff = sn = dn = 0;
203 d = d0 = dp = NULL;
204 sp = dptr = NULL;
205
206 /* skip header/IV offset */
207 while (soff < bodyoff) {
208 if (soff + s->m_len > bodyoff) {
209 sn = (int)(bodyoff - soff);
210 break;
211 }
212
213 soff += s->m_len;
214 s = s->m_next;
215 }
216 scut = s;
217 scutoff = sn;
218
219 /* skip over empty mbuf */
220 while (s && s->m_len == 0) {
221 s = s->m_next;
222 }
223
224 while (soff < m->m_pkthdr.len) {
225 /* source */
226 if (sn + AES_BLOCKLEN <= s->m_len) {
227 /* body is continuous */
228 sp = mtod(s, u_int8_t *) + sn;
229 len = s->m_len - sn;
230 len -= len % AES_BLOCKLEN; // full blocks only
231 } else {
232 /* body is non-continuous */
233 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
234 sp = sbuf;
235 len = AES_BLOCKLEN; // 1 block only in sbuf
236 }
237
238 /* destination */
239 if (!d || dn + AES_BLOCKLEN > d->m_len) {
240 if (d) {
241 dp = d;
242 }
243 MGET(d, M_DONTWAIT, MT_DATA);
244 i = m->m_pkthdr.len - (soff + sn);
245 if (d && i > MLEN) {
246 MCLGET(d, M_DONTWAIT);
247 if ((d->m_flags & M_EXT) == 0) {
248 d = m_mbigget(d, M_DONTWAIT);
249 if ((d->m_flags & M_EXT) == 0) {
250 m_free(d);
251 d = NULL;
252 }
253 }
254 }
255 if (!d) {
256 m_freem(m);
257 if (d0) {
258 m_freem(d0);
259 }
260 return ENOBUFS;
261 }
262 if (!d0) {
263 d0 = d;
264 }
265 if (dp) {
266 dp->m_next = d;
267 }
268
269 // try to make mbuf data aligned
270 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
271 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
272 }
273
274 d->m_len = (int)M_TRAILINGSPACE(d);
275 d->m_len -= d->m_len % AES_BLOCKLEN;
276 if (d->m_len > i) {
277 d->m_len = i;
278 }
279 dptr = mtod(d, u_int8_t *);
280 dn = 0;
281 }
282
283 /* adjust len if greater than space available in dest */
284 if (len > d->m_len - dn) {
285 len = d->m_len - dn;
286 }
287
288 /* decrypt */
289 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
290 if (IPSEC_IS_P2ALIGNED(sp)) {
291 sp_unaligned = NULL;
292 } else {
293 sp_unaligned = sp;
294 if (len > MAX_REALIGN_LEN) {
295 m_freem(m);
296 if (d0 != NULL) {
297 m_freem(d0);
298 }
299 if (sp_aligned != NULL) {
300 kfree_data(sp_aligned, MAX_REALIGN_LEN);
301 sp_aligned = NULL;
302 }
303 return ENOBUFS;
304 }
305 if (sp_aligned == NULL) {
306 sp_aligned = (u_int8_t *)kalloc_data(MAX_REALIGN_LEN, Z_NOWAIT);
307 if (sp_aligned == NULL) {
308 m_freem(m);
309 if (d0 != NULL) {
310 m_freem(d0);
311 }
312 return ENOMEM;
313 }
314 }
315 sp = sp_aligned;
316 memcpy(dst: sp, src: sp_unaligned, n: len);
317 }
318 // no need to check output pointer alignment
319 aes_decrypt_cbc(in_blk: sp, in_iv: iv, num_blk: len >> 4, out_blk: dptr + dn,
320 cx: (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched_enc)->decrypt)));
321
322 // update unaligned pointers
323 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
324 sp = sp_unaligned;
325 }
326
327 /* udpate offsets */
328 sn += len;
329 dn += len;
330
331 // next iv
332 memcpy(dst: iv, src: sp + len - AES_BLOCKLEN, AES_BLOCKLEN);
333
334 /* find the next source block */
335 while (s && sn >= s->m_len) {
336 sn -= s->m_len;
337 soff += s->m_len;
338 s = s->m_next;
339 }
340 }
341
342 /* free un-needed source mbufs and add dest mbufs to chain */
343 m_freem(scut->m_next);
344 scut->m_len = scutoff;
345 scut->m_next = d0;
346
347 // free memory
348 if (sp_aligned != NULL) {
349 kfree_data(sp_aligned, MAX_REALIGN_LEN);
350 sp_aligned = NULL;
351 }
352
353 /* just in case */
354 cc_clear(len: sizeof(iv), dst: iv);
355 cc_clear(len: sizeof(sbuf), dst: sbuf);
356
357 return 0;
358}
359
360int
361esp_cbc_encrypt_aes(
362 struct mbuf *m,
363 size_t off,
364 __unused size_t plen,
365 struct secasvar *sav,
366 const struct esp_algorithm *algo,
367 int ivlen)
368{
369 struct mbuf *s;
370 struct mbuf *d, *d0, *dp;
371 int soff; /* offset from the head of chain, to head of this mbuf */
372 int sn, dn; /* offset from the head of the mbuf, to meat */
373 size_t ivoff, bodyoff;
374 u_int8_t *ivp, *dptr, *ivp_unaligned;
375 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
376 u_int8_t ivp_aligned_buf[AES_BLOCKLEN] __attribute__((aligned(4)));
377 struct mbuf *scut;
378 int scutoff;
379 int i, len;
380
381 if (ivlen != AES_BLOCKLEN) {
382 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
383 "unsupported ivlen %d\n", algo->name, ivlen));
384 m_freem(m);
385 return EINVAL;
386 }
387
388 if (sav->flags & SADB_X_EXT_OLD) {
389 /* RFC 1827 */
390 ivoff = off + sizeof(struct esp);
391 bodyoff = off + sizeof(struct esp) + ivlen;
392 } else {
393 ivoff = off + sizeof(struct newesp);
394 bodyoff = off + sizeof(struct newesp) + ivlen;
395 }
396
397 VERIFY(ivoff <= INT_MAX);
398
399 /* put iv into the packet */
400 m_copyback(m, (int)ivoff, ivlen, sav->iv);
401 ivp = (u_int8_t *) sav->iv;
402
403 if (m->m_pkthdr.len < bodyoff) {
404 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%u\n",
405 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
406 m_freem(m);
407 return EINVAL;
408 }
409 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
410 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
411 "payload length must be multiple of %d\n",
412 algo->name, AES_BLOCKLEN));
413 m_freem(m);
414 return EINVAL;
415 }
416
417 s = m;
418 soff = sn = dn = 0;
419 d = d0 = dp = NULL;
420 sp = dptr = NULL;
421
422 /* skip headers/IV */
423 while (soff < bodyoff) {
424 if (soff + s->m_len > bodyoff) {
425 sn = (int)(bodyoff - soff);
426 break;
427 }
428
429 soff += s->m_len;
430 s = s->m_next;
431 }
432 scut = s;
433 scutoff = sn;
434
435 /* skip over empty mbuf */
436 while (s && s->m_len == 0) {
437 s = s->m_next;
438 }
439
440 while (soff < m->m_pkthdr.len) {
441 /* source */
442 if (sn + AES_BLOCKLEN <= s->m_len) {
443 /* body is continuous */
444 sp = mtod(s, u_int8_t *) + sn;
445 len = s->m_len - sn;
446 len -= len % AES_BLOCKLEN; // full blocks only
447 } else {
448 /* body is non-continuous */
449 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
450 sp = sbuf;
451 len = AES_BLOCKLEN; // 1 block only in sbuf
452 }
453
454 /* destination */
455 if (!d || dn + AES_BLOCKLEN > d->m_len) {
456 if (d) {
457 dp = d;
458 }
459 MGET(d, M_DONTWAIT, MT_DATA);
460 i = m->m_pkthdr.len - (soff + sn);
461 if (d && i > MLEN) {
462 MCLGET(d, M_DONTWAIT);
463 if ((d->m_flags & M_EXT) == 0) {
464 d = m_mbigget(d, M_DONTWAIT);
465 if ((d->m_flags & M_EXT) == 0) {
466 m_free(d);
467 d = NULL;
468 }
469 }
470 }
471 if (!d) {
472 m_freem(m);
473 if (d0) {
474 m_freem(d0);
475 }
476 return ENOBUFS;
477 }
478 if (!d0) {
479 d0 = d;
480 }
481 if (dp) {
482 dp->m_next = d;
483 }
484
485 // try to make mbuf data aligned
486 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
487 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
488 }
489
490 d->m_len = (int)M_TRAILINGSPACE(d);
491 d->m_len -= d->m_len % AES_BLOCKLEN;
492 if (d->m_len > i) {
493 d->m_len = i;
494 }
495 dptr = mtod(d, u_int8_t *);
496 dn = 0;
497 }
498
499 /* adjust len if greater than space available */
500 if (len > d->m_len - dn) {
501 len = d->m_len - dn;
502 }
503
504 /* encrypt */
505 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
506 if (IPSEC_IS_P2ALIGNED(sp)) {
507 sp_unaligned = NULL;
508 } else {
509 sp_unaligned = sp;
510 if (len > MAX_REALIGN_LEN) {
511 m_freem(m);
512 if (d0) {
513 m_freem(d0);
514 }
515 if (sp_aligned != NULL) {
516 kfree_data(sp_aligned, MAX_REALIGN_LEN);
517 sp_aligned = NULL;
518 }
519 return ENOBUFS;
520 }
521 if (sp_aligned == NULL) {
522 sp_aligned = (u_int8_t *)kalloc_data(MAX_REALIGN_LEN, Z_NOWAIT);
523 if (sp_aligned == NULL) {
524 m_freem(m);
525 if (d0) {
526 m_freem(d0);
527 }
528 return ENOMEM;
529 }
530 }
531 sp = sp_aligned;
532 memcpy(dst: sp, src: sp_unaligned, n: len);
533 }
534 // check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
535 if (IPSEC_IS_P2ALIGNED(ivp)) {
536 ivp_unaligned = NULL;
537 } else {
538 ivp_unaligned = ivp;
539 ivp = ivp_aligned_buf;
540 memcpy(dst: ivp, src: ivp_unaligned, AES_BLOCKLEN);
541 }
542 // no need to check output pointer alignment
543 aes_encrypt_cbc(in_blk: sp, in_iv: ivp, num_blk: len >> 4, out_blk: dptr + dn,
544 cx: (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched_enc)->encrypt)));
545
546 // update unaligned pointers
547 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
548 sp = sp_unaligned;
549 }
550 if (!IPSEC_IS_P2ALIGNED(ivp_unaligned)) {
551 ivp = ivp_unaligned;
552 }
553
554 /* update offsets */
555 sn += len;
556 dn += len;
557
558 /* next iv */
559 ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted
560
561 /* find the next source block and skip empty mbufs */
562 while (s && sn >= s->m_len) {
563 sn -= s->m_len;
564 soff += s->m_len;
565 s = s->m_next;
566 }
567 }
568
569 /* free un-needed source mbufs and add dest mbufs to chain */
570 m_freem(scut->m_next);
571 scut->m_len = scutoff;
572 scut->m_next = d0;
573
574 // free memory
575 if (sp_aligned != NULL) {
576 kfree_data(sp_aligned, MAX_REALIGN_LEN);
577 sp_aligned = NULL;
578 }
579
580 /* just in case */
581 cc_clear(len: sizeof(sbuf), dst: sbuf);
582 key_sa_stir_iv(sav);
583
584 return 0;
585}
586
587int
588esp_aes_cbc_encrypt_data(struct secasvar *sav, uint8_t *input_data,
589 size_t input_data_len, struct newesp *esp_hdr, uint8_t *out_iv,
590 size_t out_ivlen, uint8_t *output_data, size_t output_data_len)
591{
592 aes_encrypt_ctx *ctx = NULL;
593 uint8_t *ivp = NULL;
594 aes_rval rc = 0;
595
596 ESP_CHECK_ARG(sav);
597 ESP_CHECK_ARG(input_data);
598 ESP_CHECK_ARG(esp_hdr);
599 ESP_CHECK_ARG(out_iv);
600 ESP_CHECK_ARG(output_data);
601
602 VERIFY(input_data_len > 0);
603 VERIFY(output_data_len >= input_data_len);
604
605 VERIFY(out_ivlen == AES_BLOCKLEN);
606 memcpy(dst: out_iv, src: sav->iv, n: out_ivlen);
607 ivp = (uint8_t *)sav->iv;
608
609 if (input_data_len % AES_BLOCKLEN) {
610 esp_log_err("payload length %zu must be multiple of "
611 "AES_BLOCKLEN, SPI 0x%08x", input_data_len, ntohl(sav->spi));
612 return EINVAL;
613 }
614
615 ctx = (aes_encrypt_ctx *)(&(((aes_ctx *)sav->sched_enc)->encrypt));
616
617 VERIFY((input_data_len >> 4) <= UINT32_MAX);
618 if (__improbable((rc = aes_encrypt_cbc(input_data, ivp,
619 (unsigned int)(input_data_len >> 4), output_data, ctx)) != 0)) {
620 esp_log_err("encrypt failed %d, SPI 0x%08x", rc, ntohl(sav->spi));
621 return rc;
622 }
623
624 key_sa_stir_iv(sav);
625 return 0;
626}
627
628int
629esp_aes_cbc_decrypt_data(struct secasvar *sav, uint8_t *input_data,
630 size_t input_data_len, struct newesp *esp_hdr, uint8_t *iv,
631 size_t ivlen, uint8_t *output_data, size_t output_data_len)
632{
633 aes_decrypt_ctx *ctx = NULL;
634 aes_rval rc = 0;
635
636 ESP_CHECK_ARG(sav);
637 ESP_CHECK_ARG(input_data);
638 ESP_CHECK_ARG(esp_hdr);
639 ESP_CHECK_ARG(output_data);
640
641 VERIFY(input_data_len > 0);
642 VERIFY(output_data_len >= input_data_len);
643
644 if (__improbable(ivlen != AES_BLOCKLEN)) {
645 esp_log_err("ivlen(%zu) != AES_BLOCKLEN, SPI 0x%08x",
646 ivlen, ntohl(sav->spi));
647 return EINVAL;
648 }
649
650 if (__improbable(input_data_len % AES_BLOCKLEN)) {
651 esp_packet_log_err("input data length(%zu) must be a multiple of "
652 "AES_BLOCKLEN", input_data_len);
653 return EINVAL;
654 }
655
656 ctx = (aes_decrypt_ctx *)(&(((aes_ctx *)sav->sched_enc)->decrypt));
657
658 VERIFY((input_data_len >> 4) <= UINT32_MAX);
659 if (__improbable((rc = aes_decrypt_cbc(input_data, iv,
660 (unsigned int)(input_data_len >> 4), output_data, ctx)) != 0)) {
661 esp_log_err("decrypt failed %d, SPI 0x%08x", rc, ntohl(sav->spi));
662 return rc;
663 }
664
665 return 0;
666}
667
668size_t
669esp_gcm_schedlen(
670 __unused const struct esp_algorithm *algo)
671{
672 return sizeof(aes_gcm_ctx) + aes_decrypt_get_ctx_size_gcm() + aes_encrypt_get_ctx_size_gcm() + ESP_GCM_ALIGN;
673}
674
675int
676esp_gcm_schedule( __unused const struct esp_algorithm *algo,
677 struct secasvar *sav)
678{
679 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
680 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched_enc, ESP_GCM_ALIGN);
681 const u_int ivlen = sav->ivlen;
682 const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) != 0);
683 const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
684 unsigned char nonce[ESP_GCM_SALT_LEN + ivlen];
685 int rc;
686
687 ctx->decrypt = &ctx->ctxt[0];
688 ctx->encrypt = &ctx->ctxt[aes_decrypt_get_ctx_size_gcm() / sizeof(ccgcm_ctx)];
689
690 if (ivlen != (implicit_iv ? 0 : ESP_GCM_IVLEN)) {
691 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
692 return EINVAL;
693 }
694
695 if (implicit_iv && gmac_only) {
696 ipseclog((LOG_ERR, "%s: IIV and GMAC-only not supported together\n", __FUNCTION__));
697 return EINVAL;
698 }
699
700 rc = aes_decrypt_key_gcm(key: (const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ctx: ctx->decrypt);
701 if (rc) {
702 return rc;
703 }
704
705 if (!implicit_iv) {
706 memset(s: nonce, c: 0, ESP_GCM_SALT_LEN + ivlen);
707 memcpy(dst: nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
708 memcpy(dst: nonce + ESP_GCM_SALT_LEN, src: sav->iv, n: ivlen);
709
710 rc = aes_encrypt_key_with_iv_gcm(key: (const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, in_iv: nonce, ctx: ctx->encrypt);
711 cc_clear(len: sizeof(nonce), dst: nonce);
712 if (rc) {
713 return rc;
714 }
715 } else {
716 rc = aes_encrypt_key_gcm(key: (const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ctx: ctx->encrypt);
717 if (rc) {
718 return rc;
719 }
720 }
721
722 rc = aes_encrypt_reset_gcm(ctx: ctx->encrypt);
723 if (rc) {
724 return rc;
725 }
726
727 return rc;
728}
729
730int
731esp_gcm_ivlen(const struct esp_algorithm *algo,
732 struct secasvar *sav)
733{
734 if (!algo) {
735 panic("esp_gcm_ivlen: unknown algorithm");
736 }
737
738 if (sav != NULL && ((sav->flags & SADB_X_EXT_IIV) != 0)) {
739 return 0;
740 } else {
741 return algo->ivlenval;
742 }
743}
744
745int
746esp_gcm_encrypt_finalize(struct secasvar *sav,
747 unsigned char *tag, size_t tag_bytes)
748{
749 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched_enc, ESP_GCM_ALIGN);
750 return aes_encrypt_finalize_gcm(tag, tag_bytes, ctx: ctx->encrypt);
751}
752
753int
754esp_gcm_decrypt_finalize(struct secasvar *sav,
755 unsigned char *tag, size_t tag_bytes)
756{
757 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched_enc, ESP_GCM_ALIGN);
758 return aes_decrypt_finalize_gcm(tag, tag_bytes, ctx: ctx->decrypt);
759}
760
761int
762esp_gcm_encrypt_aes(
763 struct mbuf *m,
764 size_t off,
765 __unused size_t plen,
766 struct secasvar *sav,
767 const struct esp_algorithm *algo __unused,
768 int ivlen)
769{
770 struct mbuf *s = m;
771 uint32_t soff = 0; /* offset from the head of chain, to head of this mbuf */
772 uint32_t sn = 0; /* offset from the head of the mbuf, to meat */
773 uint8_t *sp = NULL;
774 aes_gcm_ctx *ctx;
775 uint32_t len;
776 const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) != 0);
777 const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
778 struct newesp esp;
779 unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN];
780
781 VERIFY(off <= INT_MAX);
782 const size_t ivoff = off + sizeof(struct newesp);
783 VERIFY(ivoff <= INT_MAX);
784 const size_t bodyoff = ivoff + ivlen;
785 VERIFY(bodyoff <= INT_MAX);
786
787 if (ivlen != (implicit_iv ? 0 : ESP_GCM_IVLEN)) {
788 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
789 m_freem(m);
790 return EINVAL;
791 }
792
793 if (implicit_iv && gmac_only) {
794 ipseclog((LOG_ERR, "%s: IIV and GMAC-only not supported together\n", __FUNCTION__));
795 m_freem(m);
796 return EINVAL;
797 }
798
799 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched_enc, ESP_GCM_ALIGN);
800
801 if (aes_encrypt_reset_gcm(ctx: ctx->encrypt)) {
802 ipseclog((LOG_ERR, "%s: gcm reset failure\n", __FUNCTION__));
803 m_freem(m);
804 return EINVAL;
805 }
806
807 /* Copy the ESP header */
808 m_copydata(m, (int)off, sizeof(esp), (caddr_t) &esp);
809
810 /* Construct the IV */
811 memset(s: nonce, c: 0, n: sizeof(nonce));
812 if (!implicit_iv) {
813 /* generate new iv */
814 if (aes_encrypt_inc_iv_gcm(out_iv: (unsigned char *)nonce, ctx: ctx->encrypt)) {
815 ipseclog((LOG_ERR, "%s: iv generation failure\n", __FUNCTION__));
816 m_freem(m);
817 return EINVAL;
818 }
819
820 /*
821 * The IV is now generated within corecrypto and
822 * is provided to ESP using aes_encrypt_inc_iv_gcm().
823 * This makes the sav->iv redundant and is no longer
824 * used in GCM operations. But we still copy the IV
825 * back to sav->iv to ensure that any future code reading
826 * this value will get the latest IV.
827 */
828 memcpy(dst: sav->iv, src: (nonce + ESP_GCM_SALT_LEN), n: ivlen);
829 m_copyback(m, (int)ivoff, ivlen, sav->iv);
830 } else {
831 /* Use the ESP sequence number in the header to form the
832 * nonce according to RFC 8750. The first 4 bytes are the
833 * salt value, the next 4 bytes are zeroes, and the final
834 * 4 bytes are the ESP sequence number.
835 */
836 memcpy(dst: nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
837 memcpy(dst: nonce + sizeof(nonce) - sizeof(esp.esp_seq), src: &esp.esp_seq, n: sizeof(esp.esp_seq));
838 if (aes_encrypt_set_iv_gcm(in_iv: (const unsigned char *)nonce, len: sizeof(nonce), ctx: ctx->encrypt)) {
839 ipseclog((LOG_ERR, "%s: iv set failure\n", __FUNCTION__));
840 cc_clear(len: sizeof(nonce), dst: nonce);
841 m_freem(m);
842 return EINVAL;
843 }
844 }
845
846 if (m->m_pkthdr.len < bodyoff) {
847 ipseclog((LOG_ERR, "%s: bad len %d/%u\n", __FUNCTION__,
848 m->m_pkthdr.len, (u_int32_t)bodyoff));
849 cc_clear(len: sizeof(nonce), dst: nonce);
850 m_freem(m);
851 return EINVAL;
852 }
853
854 /* Add ESP header to Additional Authentication Data */
855 if (aes_encrypt_aad_gcm(aad: (unsigned char*)&esp, aad_bytes: sizeof(esp), ctx: ctx->encrypt)) {
856 ipseclog((LOG_ERR, "%s: packet encryption ESP header AAD failure\n", __FUNCTION__));
857 cc_clear(len: sizeof(nonce), dst: nonce);
858 m_freem(m);
859 return EINVAL;
860 }
861 /* Add IV to Additional Authentication Data for GMAC-only mode */
862 if (gmac_only) {
863 if (aes_encrypt_aad_gcm(aad: nonce + ESP_GCM_SALT_LEN, ESP_GCM_IVLEN, ctx: ctx->encrypt)) {
864 ipseclog((LOG_ERR, "%s: packet encryption IV AAD failure\n", __FUNCTION__));
865 cc_clear(len: sizeof(nonce), dst: nonce);
866 m_freem(m);
867 return EINVAL;
868 }
869 }
870
871 /* Clear nonce */
872 cc_clear(len: sizeof(nonce), dst: nonce);
873
874 /* skip headers/IV */
875 while (s != NULL && soff < bodyoff) {
876 if (soff + s->m_len > bodyoff) {
877 sn = (uint32_t)bodyoff - soff;
878 break;
879 }
880
881 soff += s->m_len;
882 s = s->m_next;
883 }
884
885 /* Encrypt (or add to AAD) payload */
886 while (s != NULL && soff < m->m_pkthdr.len) {
887 /* skip empty mbufs */
888 if ((len = s->m_len - sn) != 0) {
889 sp = mtod(s, uint8_t *) + sn;
890
891 if (!gmac_only) {
892 if (aes_encrypt_gcm(in_blk: sp, num_bytes: len, out_blk: sp, ctx: ctx->encrypt)) {
893 ipseclog((LOG_ERR, "%s: failed to encrypt\n", __FUNCTION__));
894 m_freem(m);
895 return EINVAL;
896 }
897 } else {
898 if (aes_encrypt_aad_gcm(aad: sp, aad_bytes: len, ctx: ctx->encrypt)) {
899 ipseclog((LOG_ERR, "%s: failed to add data to AAD\n", __FUNCTION__));
900 m_freem(m);
901 return EINVAL;
902 }
903 }
904 }
905
906 sn = 0;
907 soff += s->m_len;
908 s = s->m_next;
909 }
910
911 if (s == NULL && soff != m->m_pkthdr.len) {
912 ipseclog((LOG_ERR, "%s: not enough mbufs %d %d, SPI 0x%08x",
913 __FUNCTION__, soff, m->m_pkthdr.len, ntohl(sav->spi)));
914 m_freem(m);
915 return EFBIG;
916 }
917
918 return 0;
919}
920
921int
922esp_gcm_decrypt_aes(
923 struct mbuf *m,
924 size_t off,
925 struct secasvar *sav,
926 const struct esp_algorithm *algo __unused,
927 int ivlen)
928{
929 struct mbuf *s = m;
930 uint32_t soff = 0; /* offset from the head of chain, to head of this mbuf */
931 uint32_t sn = 0; /* offset from the head of the mbuf, to meat */
932 uint8_t *sp = NULL;
933 aes_gcm_ctx *ctx;
934 uint32_t len;
935 const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) != 0);
936 const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
937 struct newesp esp;
938 unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN];
939
940 VERIFY(off <= INT_MAX);
941 const size_t ivoff = off + sizeof(struct newesp);
942 VERIFY(ivoff <= INT_MAX);
943 const size_t bodyoff = ivoff + ivlen;
944 VERIFY(bodyoff <= INT_MAX);
945
946 if (ivlen != (implicit_iv ? 0 : ESP_GCM_IVLEN)) {
947 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
948 m_freem(m);
949 return EINVAL;
950 }
951
952 if (implicit_iv && gmac_only) {
953 ipseclog((LOG_ERR, "%s: IIV and GMAC-only not supported together\n", __FUNCTION__));
954 m_freem(m);
955 return EINVAL;
956 }
957
958 if (m->m_pkthdr.len < bodyoff) {
959 ipseclog((LOG_ERR, "%s: bad len %d/%u\n", __FUNCTION__,
960 m->m_pkthdr.len, (u_int32_t)bodyoff));
961 m_freem(m);
962 return EINVAL;
963 }
964
965 /* Copy the ESP header */
966 m_copydata(m, (int)off, sizeof(esp), (caddr_t) &esp);
967
968 /* Construct IV starting with salt */
969 memset(s: nonce, c: 0, n: sizeof(nonce));
970 memcpy(dst: nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
971 if (!implicit_iv) {
972 /* grab IV from packet */
973 u_int8_t iv[ESP_GCM_IVLEN] __attribute__((aligned(4)));
974 m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv);
975 memcpy(dst: nonce + ESP_GCM_SALT_LEN, src: iv, n: ivlen);
976 /* just in case */
977 cc_clear(len: sizeof(iv), dst: iv);
978 } else {
979 /* Use the ESP sequence number in the header to form the
980 * rest of the nonce according to RFC 8750.
981 */
982 memcpy(dst: nonce + sizeof(nonce) - sizeof(esp.esp_seq), src: &esp.esp_seq, n: sizeof(esp.esp_seq));
983 }
984
985 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched_enc, ESP_GCM_ALIGN);
986 if (aes_decrypt_set_iv_gcm(in_iv: nonce, len: sizeof(nonce), ctx: ctx->decrypt)) {
987 ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__));
988 cc_clear(len: sizeof(nonce), dst: nonce);
989 m_freem(m);
990 return EINVAL;
991 }
992
993 /* Add ESP header to Additional Authentication Data */
994 if (aes_decrypt_aad_gcm(aad: (unsigned char*)&esp, aad_bytes: sizeof(esp), ctx: ctx->decrypt)) {
995 ipseclog((LOG_ERR, "%s: packet decryption ESP header AAD failure\n", __FUNCTION__));
996 cc_clear(len: sizeof(nonce), dst: nonce);
997 m_freem(m);
998 return EINVAL;
999 }
1000
1001 /* Add IV to Additional Authentication Data for GMAC-only mode */
1002 if (gmac_only) {
1003 if (aes_decrypt_aad_gcm(aad: nonce + ESP_GCM_SALT_LEN, ESP_GCM_IVLEN, ctx: ctx->decrypt)) {
1004 ipseclog((LOG_ERR, "%s: packet decryption IV AAD failure\n", __FUNCTION__));
1005 cc_clear(len: sizeof(nonce), dst: nonce);
1006 m_freem(m);
1007 return EINVAL;
1008 }
1009 }
1010
1011 /* Clear nonce */
1012 cc_clear(len: sizeof(nonce), dst: nonce);
1013
1014 /* skip headers/IV */
1015 while (s != NULL && soff < bodyoff) {
1016 if (soff + s->m_len > bodyoff) {
1017 sn = (uint32_t)bodyoff - soff;
1018 break;
1019 }
1020
1021 soff += s->m_len;
1022 s = s->m_next;
1023 }
1024
1025 /* Decrypt (or just authenticate) payload */
1026 while (s != NULL && soff < m->m_pkthdr.len) {
1027 /* skip empty mbufs */
1028 if ((len = s->m_len - sn) != 0) {
1029 sp = mtod(s, uint8_t *) + sn;
1030
1031 if (!gmac_only) {
1032 if (aes_decrypt_gcm(in_blk: sp, num_bytes: len, out_blk: sp, ctx: ctx->decrypt)) {
1033 ipseclog((LOG_ERR, "%s: failed to decrypt\n", __FUNCTION__));
1034 m_freem(m);
1035 return EINVAL;
1036 }
1037 } else {
1038 if (aes_decrypt_aad_gcm(aad: sp, aad_bytes: len, ctx: ctx->decrypt)) {
1039 ipseclog((LOG_ERR, "%s: failed to add data to AAD\n", __FUNCTION__));
1040 m_freem(m);
1041 return EINVAL;
1042 }
1043 }
1044 }
1045
1046 sn = 0;
1047 soff += s->m_len;
1048 s = s->m_next;
1049 }
1050
1051 if (s == NULL && soff != m->m_pkthdr.len) {
1052 ipseclog((LOG_ERR, "%s: not enough mbufs %d %d, SPI 0x%08x",
1053 __FUNCTION__, soff, m->m_pkthdr.len, ntohl(sav->spi)));
1054 m_freem(m);
1055 return EFBIG;
1056 }
1057
1058 return 0;
1059}
1060
1061int
1062esp_aes_gcm_encrypt_data(struct secasvar *sav, uint8_t *input_data,
1063 size_t input_data_len, struct newesp *esp_hdr, uint8_t *out_iv,
1064 size_t ivlen, uint8_t *output_data, size_t output_data_len)
1065{
1066 unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN] = {};
1067 int rc = 0; // return code of corecrypto operations
1068
1069 ESP_CHECK_ARG(sav);
1070 ESP_CHECK_ARG(input_data);
1071 ESP_CHECK_ARG(esp_hdr);
1072 ESP_CHECK_ARG(output_data);
1073
1074 VERIFY(input_data_len > 0);
1075 VERIFY(output_data_len >= input_data_len);
1076
1077 const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) == SADB_X_EXT_IIV);
1078 const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
1079
1080 if (__improbable(implicit_iv && gmac_only)) {
1081 esp_log_err("IIV and GMAC-only not supported together, SPI 0x%08x\n",
1082 ntohl(sav->spi));
1083 return EINVAL;
1084 }
1085
1086 aes_gcm_ctx *ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched_enc, ESP_GCM_ALIGN);
1087 if (__improbable((rc = aes_encrypt_reset_gcm(ctx->encrypt)) != 0)) {
1088 esp_log_err("Context reset failure %d, SPI 0x%08x\n",
1089 rc, ntohl(sav->spi));
1090 return rc;
1091 }
1092
1093 if (implicit_iv) {
1094 VERIFY(out_iv == NULL);
1095 VERIFY(ivlen == 0);
1096
1097 /* Use the ESP sequence number in the header to form the
1098 * nonce according to RFC 8750. The first 4 bytes are the
1099 * salt value, the next 4 bytes are zeroes, and the final
1100 * 4 bytes are the ESP sequence number.
1101 */
1102 memcpy(dst: nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) -
1103 ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
1104 memcpy(dst: nonce + sizeof(nonce) - sizeof(esp_hdr->esp_seq),
1105 src: &esp_hdr->esp_seq, n: sizeof(esp_hdr->esp_seq));
1106 if (__improbable((rc = aes_encrypt_set_iv_gcm((const unsigned char *)nonce,
1107 sizeof(nonce), ctx->encrypt)) != 0)) {
1108 esp_log_err("Set IV failure %d, SPI 0x%08x\n",
1109 rc, ntohl(sav->spi));
1110 cc_clear(len: sizeof(nonce), dst: nonce);
1111 return rc;
1112 }
1113 } else {
1114 ESP_CHECK_ARG(out_iv);
1115 VERIFY(ivlen == ESP_GCM_IVLEN);
1116
1117 /* generate new iv */
1118 if (__improbable((rc = aes_encrypt_inc_iv_gcm((unsigned char *)nonce,
1119 ctx->encrypt)) != 0)) {
1120 esp_log_err("IV generation failure %d, SPI 0x%08x\n",
1121 rc, ntohl(sav->spi));
1122 cc_clear(len: sizeof(nonce), dst: nonce);
1123 return rc;
1124 }
1125
1126 memcpy(dst: out_iv, src: (nonce + ESP_GCM_SALT_LEN), ESP_GCM_IVLEN);
1127 }
1128
1129 /* Set Additional Authentication Data */
1130 if (__improbable((rc = aes_encrypt_aad_gcm((unsigned char*)esp_hdr,
1131 sizeof(*esp_hdr), ctx->encrypt)) != 0)) {
1132 esp_log_err("Set AAD failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1133 cc_clear(len: sizeof(nonce), dst: nonce);
1134 return rc;
1135 }
1136
1137 /* Add IV to Additional Authentication Data for GMAC-only mode */
1138 if (gmac_only) {
1139 if (__improbable((rc = aes_encrypt_aad_gcm(nonce +
1140 ESP_GCM_SALT_LEN, ESP_GCM_IVLEN, ctx->encrypt)) != 0)) {
1141 esp_log_err("Packet encryption IV AAD failure %d, SPI 0x%08x\n",
1142 rc, ntohl(sav->spi));
1143 cc_clear(len: sizeof(nonce), dst: nonce);
1144 return rc;
1145 }
1146 }
1147
1148 cc_clear(len: sizeof(nonce), dst: nonce);
1149
1150 if (gmac_only) {
1151 if (__improbable((rc = aes_encrypt_aad_gcm(input_data, (unsigned int)input_data_len,
1152 ctx->encrypt)) != 0)) {
1153 esp_log_err("set aad failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1154 return rc;
1155 }
1156 memcpy(dst: output_data, src: input_data, n: input_data_len);
1157 } else {
1158 if (__improbable((rc = aes_encrypt_gcm(input_data, (unsigned int)input_data_len,
1159 output_data, ctx->encrypt)) != 0)) {
1160 esp_log_err("encrypt failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1161 return rc;
1162 }
1163 }
1164
1165 return 0;
1166}
1167
1168int
1169esp_aes_gcm_decrypt_data(struct secasvar *sav, uint8_t *input_data,
1170 size_t input_data_len, struct newesp *esp_hdr, uint8_t *iv, size_t ivlen,
1171 uint8_t *output_data, size_t output_data_len)
1172{
1173 unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN] = {};
1174 aes_gcm_ctx *ctx = NULL;
1175 int rc = 0;
1176
1177 ESP_CHECK_ARG(sav);
1178 ESP_CHECK_ARG(input_data);
1179 ESP_CHECK_ARG(esp_hdr);
1180 ESP_CHECK_ARG(output_data);
1181
1182 VERIFY(input_data_len > 0);
1183 VERIFY(output_data_len >= input_data_len);
1184
1185 const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) == SADB_X_EXT_IIV);
1186 const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
1187
1188 if (__improbable(implicit_iv && gmac_only)) {
1189 esp_log_err("IIV and GMAC-only not supported together, SPI 0x%08x\n",
1190 ntohl(sav->spi));
1191 return EINVAL;
1192 }
1193
1194 memcpy(dst: nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) -
1195 ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
1196
1197 if (implicit_iv) {
1198 VERIFY(iv == NULL);
1199 VERIFY(ivlen == 0);
1200
1201 /* Use the ESP sequence number in the header to form the
1202 * rest of the nonce according to RFC 8750.
1203 */
1204 memcpy(dst: nonce + sizeof(nonce) - sizeof(esp_hdr->esp_seq), src: &esp_hdr->esp_seq, n: sizeof(esp_hdr->esp_seq));
1205 } else {
1206 ESP_CHECK_ARG(iv);
1207 VERIFY(ivlen == ESP_GCM_IVLEN);
1208
1209 memcpy(dst: nonce + ESP_GCM_SALT_LEN, src: iv, ESP_GCM_IVLEN);
1210 }
1211
1212 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched_enc, ESP_GCM_ALIGN);
1213
1214 if (__improbable((rc = aes_decrypt_set_iv_gcm(nonce, sizeof(nonce),
1215 ctx->decrypt)) != 0)) {
1216 esp_log_err("set iv failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1217 cc_clear(len: sizeof(nonce), dst: nonce);
1218 return rc;
1219 }
1220
1221 /* Set Additional Authentication Data */
1222 if (__improbable((rc = aes_decrypt_aad_gcm((unsigned char *)esp_hdr, sizeof(*esp_hdr),
1223 ctx->decrypt)) != 0)) {
1224 esp_log_err("AAD failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1225 cc_clear(len: sizeof(nonce), dst: nonce);
1226 return rc;
1227 }
1228
1229 /* Add IV to Additional Authentication Data for GMAC-only mode */
1230 if (gmac_only) {
1231 if (__improbable((rc = aes_decrypt_aad_gcm(nonce + ESP_GCM_SALT_LEN,
1232 ESP_GCM_IVLEN, ctx->decrypt)) != 0)) {
1233 esp_log_err("AAD failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1234 cc_clear(len: sizeof(nonce), dst: nonce);
1235 return rc;
1236 }
1237 }
1238
1239 cc_clear(len: sizeof(nonce), dst: nonce);
1240
1241 if (gmac_only) {
1242 if (__improbable((rc = aes_decrypt_aad_gcm(input_data, (unsigned int)input_data_len,
1243 ctx->decrypt)) != 0)) {
1244 esp_log_err("AAD failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1245 return rc;
1246 }
1247 memcpy(dst: output_data, src: input_data, n: input_data_len);
1248 } else {
1249 if (__improbable((rc = aes_decrypt_gcm(input_data, (unsigned int)input_data_len,
1250 output_data, ctx->decrypt)) != 0)) {
1251 esp_log_err("decrypt failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1252 return rc;
1253 }
1254 }
1255
1256 return 0;
1257}
1258