1/*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $ */
30/* $KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $ */
31
32/*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/socket.h>
64#include <sys/queue.h>
65#include <sys/syslog.h>
66#include <sys/mbuf.h>
67#include <sys/mcache.h>
68
69#include <kern/locks.h>
70
71#include <net/if.h>
72#include <net/route.h>
73
74#include <netinet6/ipsec.h>
75#include <netinet6/esp.h>
76#include <netinet6/esp_rijndael.h>
77
78#include <libkern/crypto/aes.h>
79
80#include <netkey/key.h>
81
82#include <net/net_osdep.h>
83
84#define MAX_REALIGN_LEN 2000
85#define AES_BLOCKLEN 16
86#define ESP_GCM_SALT_LEN 4 // RFC 4106 Section 4
87#define ESP_GCM_IVLEN 8
88#define ESP_GCM_ALIGN 16
89
90extern lck_mtx_t *sadb_mutex;
91
92typedef struct {
93 ccgcm_ctx *decrypt;
94 ccgcm_ctx *encrypt;
95 ccgcm_ctx ctxt[0];
96} aes_gcm_ctx;
97
98int
99esp_aes_schedlen(
100 __unused const struct esp_algorithm *algo)
101{
102
103 return sizeof(aes_ctx);
104}
105
106int
107esp_aes_schedule(
108 __unused const struct esp_algorithm *algo,
109 struct secasvar *sav)
110{
111
112 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
113 aes_ctx *ctx = (aes_ctx*)sav->sched;
114
115 aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt);
116 aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt);
117
118 return 0;
119}
120
121
122/* The following 2 functions decrypt or encrypt the contents of
123 * the mbuf chain passed in keeping the IP and ESP header's in place,
124 * along with the IV.
125 * The code attempts to call the crypto code with the largest chunk
126 * of data it can based on the amount of source data in
127 * the current source mbuf and the space remaining in the current
128 * destination mbuf. The crypto code requires data to be a multiples
129 * of 16 bytes. A separate buffer is used when a 16 byte block spans
130 * mbufs.
131 *
132 * m = mbuf chain
133 * off = offset to ESP header
134 *
135 * local vars for source:
136 * soff = offset from beginning of the chain to the head of the
137 * current mbuf.
138 * scut = last mbuf that contains headers to be retained
139 * scutoff = offset to end of the headers in scut
140 * s = the current mbuf
141 * sn = current offset to data in s (next source data to process)
142 *
143 * local vars for dest:
144 * d0 = head of chain
145 * d = current mbuf
146 * dn = current offset in d (next location to store result)
147 */
148
149
150int
151esp_cbc_decrypt_aes(
152 struct mbuf *m,
153 size_t off,
154 struct secasvar *sav,
155 const struct esp_algorithm *algo,
156 int ivlen)
157{
158 struct mbuf *s;
159 struct mbuf *d, *d0, *dp;
160 int soff; /* offset from the head of chain, to head of this mbuf */
161 int sn, dn; /* offset from the head of the mbuf, to meat */
162 size_t ivoff, bodyoff;
163 u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr;
164 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
165 struct mbuf *scut;
166 int scutoff;
167 int i, len;
168
169
170 if (ivlen != AES_BLOCKLEN) {
171 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
172 "unsupported ivlen %d\n", algo->name, ivlen));
173 m_freem(m);
174 return EINVAL;
175 }
176
177 if (sav->flags & SADB_X_EXT_OLD) {
178 /* RFC 1827 */
179 ivoff = off + sizeof(struct esp);
180 bodyoff = off + sizeof(struct esp) + ivlen;
181 } else {
182 ivoff = off + sizeof(struct newesp);
183 bodyoff = off + sizeof(struct newesp) + ivlen;
184 }
185
186 if (m->m_pkthdr.len < bodyoff) {
187 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%lu\n",
188 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
189 m_freem(m);
190 return EINVAL;
191 }
192 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
193 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
194 "payload length must be multiple of %d\n",
195 algo->name, AES_BLOCKLEN));
196 m_freem(m);
197 return EINVAL;
198 }
199
200 /* grab iv */
201 m_copydata(m, ivoff, ivlen, (caddr_t) iv);
202
203 s = m;
204 soff = sn = dn = 0;
205 d = d0 = dp = NULL;
206 sp = dptr = NULL;
207
208 /* skip header/IV offset */
209 while (soff < bodyoff) {
210 if (soff + s->m_len > bodyoff) {
211 sn = bodyoff - soff;
212 break;
213 }
214
215 soff += s->m_len;
216 s = s->m_next;
217 }
218 scut = s;
219 scutoff = sn;
220
221 /* skip over empty mbuf */
222 while (s && s->m_len == 0)
223 s = s->m_next;
224
225 while (soff < m->m_pkthdr.len) {
226 /* source */
227 if (sn + AES_BLOCKLEN <= s->m_len) {
228 /* body is continuous */
229 sp = mtod(s, u_int8_t *) + sn;
230 len = s->m_len - sn;
231 len -= len % AES_BLOCKLEN; // full blocks only
232 } else {
233 /* body is non-continuous */
234 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
235 sp = sbuf;
236 len = AES_BLOCKLEN; // 1 block only in sbuf
237 }
238
239 /* destination */
240 if (!d || dn + AES_BLOCKLEN > d->m_len) {
241 if (d)
242 dp = d;
243 MGET(d, M_DONTWAIT, MT_DATA);
244 i = m->m_pkthdr.len - (soff + sn);
245 if (d && i > MLEN) {
246 MCLGET(d, M_DONTWAIT);
247 if ((d->m_flags & M_EXT) == 0) {
248 d = m_mbigget(d, M_DONTWAIT);
249 if ((d->m_flags & M_EXT) == 0) {
250 m_free(d);
251 d = NULL;
252 }
253 }
254 }
255 if (!d) {
256 m_freem(m);
257 if (d0)
258 m_freem(d0);
259 return ENOBUFS;
260 }
261 if (!d0)
262 d0 = d;
263 if (dp)
264 dp->m_next = d;
265
266 // try to make mbuf data aligned
267 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
268 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
269 }
270
271 d->m_len = M_TRAILINGSPACE(d);
272 d->m_len -= d->m_len % AES_BLOCKLEN;
273 if (d->m_len > i)
274 d->m_len = i;
275 dptr = mtod(d, u_int8_t *);
276 dn = 0;
277 }
278
279 /* adjust len if greater than space available in dest */
280 if (len > d->m_len - dn)
281 len = d->m_len - dn;
282
283 /* decrypt */
284 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
285 if (IPSEC_IS_P2ALIGNED(sp)) {
286 sp_unaligned = NULL;
287 } else {
288 sp_unaligned = sp;
289 if (len > MAX_REALIGN_LEN) {
290 m_freem(m);
291 if (d0 != NULL) {
292 m_freem(d0);
293 }
294 if (sp_aligned != NULL) {
295 FREE(sp_aligned, M_SECA);
296 sp_aligned = NULL;
297 }
298 return ENOBUFS;
299 }
300 if (sp_aligned == NULL) {
301 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
302 if (sp_aligned == NULL) {
303 m_freem(m);
304 if (d0 != NULL) {
305 m_freem(d0);
306 }
307 return ENOMEM;
308 }
309 }
310 sp = sp_aligned;
311 memcpy(sp, sp_unaligned, len);
312 }
313 // no need to check output pointer alignment
314 aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn,
315 (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt)));
316
317 // update unaligned pointers
318 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
319 sp = sp_unaligned;
320 }
321
322 /* udpate offsets */
323 sn += len;
324 dn += len;
325
326 // next iv
327 bcopy(sp + len - AES_BLOCKLEN, iv, AES_BLOCKLEN);
328
329 /* find the next source block */
330 while (s && sn >= s->m_len) {
331 sn -= s->m_len;
332 soff += s->m_len;
333 s = s->m_next;
334 }
335
336 }
337
338 /* free un-needed source mbufs and add dest mbufs to chain */
339 m_freem(scut->m_next);
340 scut->m_len = scutoff;
341 scut->m_next = d0;
342
343 // free memory
344 if (sp_aligned != NULL) {
345 FREE(sp_aligned, M_SECA);
346 sp_aligned = NULL;
347 }
348
349 /* just in case */
350 bzero(iv, sizeof(iv));
351 bzero(sbuf, sizeof(sbuf));
352
353 return 0;
354}
355
356int
357esp_cbc_encrypt_aes(
358 struct mbuf *m,
359 size_t off,
360 __unused size_t plen,
361 struct secasvar *sav,
362 const struct esp_algorithm *algo,
363 int ivlen)
364{
365 struct mbuf *s;
366 struct mbuf *d, *d0, *dp;
367 int soff; /* offset from the head of chain, to head of this mbuf */
368 int sn, dn; /* offset from the head of the mbuf, to meat */
369 size_t ivoff, bodyoff;
370 u_int8_t *ivp, *dptr, *ivp_unaligned;
371 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
372 u_int8_t ivp_aligned_buf[AES_BLOCKLEN] __attribute__((aligned(4)));
373 struct mbuf *scut;
374 int scutoff;
375 int i, len;
376
377 if (ivlen != AES_BLOCKLEN) {
378 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
379 "unsupported ivlen %d\n", algo->name, ivlen));
380 m_freem(m);
381 return EINVAL;
382 }
383
384 if (sav->flags & SADB_X_EXT_OLD) {
385 /* RFC 1827 */
386 ivoff = off + sizeof(struct esp);
387 bodyoff = off + sizeof(struct esp) + ivlen;
388 } else {
389 ivoff = off + sizeof(struct newesp);
390 bodyoff = off + sizeof(struct newesp) + ivlen;
391 }
392
393 /* put iv into the packet */
394 m_copyback(m, ivoff, ivlen, sav->iv);
395 ivp = (u_int8_t *) sav->iv;
396
397 if (m->m_pkthdr.len < bodyoff) {
398 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%lu\n",
399 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
400 m_freem(m);
401 return EINVAL;
402 }
403 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
404 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
405 "payload length must be multiple of %lu\n",
406 algo->name, AES_BLOCKLEN));
407 m_freem(m);
408 return EINVAL;
409 }
410
411 s = m;
412 soff = sn = dn = 0;
413 d = d0 = dp = NULL;
414 sp = dptr = NULL;
415
416 /* skip headers/IV */
417 while (soff < bodyoff) {
418 if (soff + s->m_len > bodyoff) {
419 sn = bodyoff - soff;
420 break;
421 }
422
423 soff += s->m_len;
424 s = s->m_next;
425 }
426 scut = s;
427 scutoff = sn;
428
429 /* skip over empty mbuf */
430 while (s && s->m_len == 0)
431 s = s->m_next;
432
433 while (soff < m->m_pkthdr.len) {
434 /* source */
435 if (sn + AES_BLOCKLEN <= s->m_len) {
436 /* body is continuous */
437 sp = mtod(s, u_int8_t *) + sn;
438 len = s->m_len - sn;
439 len -= len % AES_BLOCKLEN; // full blocks only
440 } else {
441 /* body is non-continuous */
442 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
443 sp = sbuf;
444 len = AES_BLOCKLEN; // 1 block only in sbuf
445 }
446
447 /* destination */
448 if (!d || dn + AES_BLOCKLEN > d->m_len) {
449 if (d)
450 dp = d;
451 MGET(d, M_DONTWAIT, MT_DATA);
452 i = m->m_pkthdr.len - (soff + sn);
453 if (d && i > MLEN) {
454 MCLGET(d, M_DONTWAIT);
455 if ((d->m_flags & M_EXT) == 0) {
456 d = m_mbigget(d, M_DONTWAIT);
457 if ((d->m_flags & M_EXT) == 0) {
458 m_free(d);
459 d = NULL;
460 }
461 }
462 }
463 if (!d) {
464 m_freem(m);
465 if (d0)
466 m_freem(d0);
467 return ENOBUFS;
468 }
469 if (!d0)
470 d0 = d;
471 if (dp)
472 dp->m_next = d;
473
474 // try to make mbuf data aligned
475 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
476 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
477 }
478
479 d->m_len = M_TRAILINGSPACE(d);
480 d->m_len -= d->m_len % AES_BLOCKLEN;
481 if (d->m_len > i)
482 d->m_len = i;
483 dptr = mtod(d, u_int8_t *);
484 dn = 0;
485 }
486
487 /* adjust len if greater than space available */
488 if (len > d->m_len - dn)
489 len = d->m_len - dn;
490
491 /* encrypt */
492 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
493 if (IPSEC_IS_P2ALIGNED(sp)) {
494 sp_unaligned = NULL;
495 } else {
496 sp_unaligned = sp;
497 if (len > MAX_REALIGN_LEN) {
498 m_freem(m);
499 if (d0) {
500 m_freem(d0);
501 }
502 if (sp_aligned != NULL) {
503 FREE(sp_aligned, M_SECA);
504 sp_aligned = NULL;
505 }
506 return ENOBUFS;
507 }
508 if (sp_aligned == NULL) {
509 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
510 if (sp_aligned == NULL) {
511 m_freem(m);
512 if (d0) {
513 m_freem(d0);
514 }
515 return ENOMEM;
516 }
517 }
518 sp = sp_aligned;
519 memcpy(sp, sp_unaligned, len);
520 }
521 // check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
522 if (IPSEC_IS_P2ALIGNED(ivp)) {
523 ivp_unaligned = NULL;
524 } else {
525 ivp_unaligned = ivp;
526 ivp = ivp_aligned_buf;
527 memcpy(ivp, ivp_unaligned, AES_BLOCKLEN);
528 }
529 // no need to check output pointer alignment
530 aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn,
531 (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt)));
532
533 // update unaligned pointers
534 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
535 sp = sp_unaligned;
536 }
537 if (!IPSEC_IS_P2ALIGNED(ivp_unaligned)) {
538 ivp = ivp_unaligned;
539 }
540
541 /* update offsets */
542 sn += len;
543 dn += len;
544
545 /* next iv */
546 ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted
547
548 /* find the next source block and skip empty mbufs */
549 while (s && sn >= s->m_len) {
550 sn -= s->m_len;
551 soff += s->m_len;
552 s = s->m_next;
553 }
554 }
555
556 /* free un-needed source mbufs and add dest mbufs to chain */
557 m_freem(scut->m_next);
558 scut->m_len = scutoff;
559 scut->m_next = d0;
560
561 // free memory
562 if (sp_aligned != NULL) {
563 FREE(sp_aligned, M_SECA);
564 sp_aligned = NULL;
565 }
566
567 /* just in case */
568 bzero(sbuf, sizeof(sbuf));
569 key_sa_stir_iv(sav);
570
571 return 0;
572}
573
574int
575esp_gcm_schedlen(
576 __unused const struct esp_algorithm *algo)
577{
578 return (sizeof(aes_gcm_ctx) + aes_decrypt_get_ctx_size_gcm() + aes_encrypt_get_ctx_size_gcm() + ESP_GCM_ALIGN);
579}
580
581int
582esp_gcm_schedule( __unused const struct esp_algorithm *algo,
583 struct secasvar *sav)
584{
585 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
586 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
587 u_int ivlen = sav->ivlen;
588 unsigned char nonce[ESP_GCM_SALT_LEN+ivlen];
589 int rc;
590
591 ctx->decrypt = &ctx->ctxt[0];
592 ctx->encrypt = &ctx->ctxt[aes_decrypt_get_ctx_size_gcm() / sizeof(ccgcm_ctx)];
593
594 rc = aes_decrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ctx->decrypt);
595 if (rc) {
596 return (rc);
597 }
598
599 bzero(nonce, ESP_GCM_SALT_LEN + ivlen);
600 memcpy(nonce, _KEYBUF(sav->key_enc)+_KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
601 memcpy(nonce+ESP_GCM_SALT_LEN, sav->iv, ivlen);
602
603 rc = aes_encrypt_key_with_iv_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, nonce, ctx->encrypt);
604 if (rc) {
605 return (rc);
606 }
607
608 rc = aes_encrypt_reset_gcm(ctx->encrypt);
609 if (rc) {
610 return (rc);
611 }
612
613 return (rc);
614}
615
616int
617esp_gcm_encrypt_finalize(struct secasvar *sav,
618 unsigned char *tag, unsigned int tag_bytes)
619{
620 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
621 return (aes_encrypt_finalize_gcm(tag, tag_bytes, ctx->encrypt));
622}
623
624int
625esp_gcm_decrypt_finalize(struct secasvar *sav,
626 unsigned char *tag, unsigned int tag_bytes)
627{
628 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
629 return (aes_decrypt_finalize_gcm(tag, tag_bytes, ctx->decrypt));
630}
631
632int
633esp_gcm_encrypt_aes(
634 struct mbuf *m,
635 size_t off,
636 __unused size_t plen,
637 struct secasvar *sav,
638 const struct esp_algorithm *algo __unused,
639 int ivlen)
640{
641 struct mbuf *s;
642 struct mbuf *d, *d0, *dp;
643 int soff; /* offset from the head of chain, to head of this mbuf */
644 int sn, dn; /* offset from the head of the mbuf, to meat */
645 size_t ivoff, bodyoff;
646 u_int8_t *dptr, *sp, *sp_unaligned, *sp_aligned = NULL;
647 aes_gcm_ctx *ctx;
648 struct mbuf *scut;
649 int scutoff;
650 int i, len;
651 unsigned char nonce[ESP_GCM_SALT_LEN+ivlen];
652
653 if (ivlen != ESP_GCM_IVLEN) {
654 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
655 m_freem(m);
656 return EINVAL;
657 }
658
659 if (sav->flags & SADB_X_EXT_OLD) {
660 /* RFC 1827 */
661 ivoff = off + sizeof(struct esp);
662 bodyoff = off + sizeof(struct esp) + ivlen;
663 } else {
664 ivoff = off + sizeof(struct newesp);
665 bodyoff = off + sizeof(struct newesp) + ivlen;
666 }
667
668 bzero(nonce, ESP_GCM_SALT_LEN+ivlen);
669 /* generate new iv */
670 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
671
672 if (aes_encrypt_reset_gcm(ctx->encrypt)) {
673 ipseclog((LOG_ERR, "%s: gcm reset failure\n", __FUNCTION__));
674 m_freem(m);
675 return EINVAL;
676 }
677
678 if (aes_encrypt_inc_iv_gcm((unsigned char *)nonce, ctx->encrypt)) {
679 ipseclog((LOG_ERR, "%s: iv generation failure\n", __FUNCTION__));
680 m_freem(m);
681 return EINVAL;
682 }
683
684 /*
685 * The IV is now generated within corecrypto and
686 * is provided to ESP using aes_encrypt_inc_iv_gcm().
687 * This makes the sav->iv redundant and is no longer
688 * used in GCM operations. But we still copy the IV
689 * back to sav->iv to ensure that any future code reading
690 * this value will get the latest IV.
691 */
692 memcpy(sav->iv, (nonce + ESP_GCM_SALT_LEN), ivlen);
693 m_copyback(m, ivoff, ivlen, sav->iv);
694 bzero(nonce, ESP_GCM_SALT_LEN+ivlen);
695
696 if (m->m_pkthdr.len < bodyoff) {
697 ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__,
698 m->m_pkthdr.len, (u_int32_t)bodyoff));
699 m_freem(m);
700 return EINVAL;
701 }
702
703 /* Set Additional Authentication Data */
704 if (!(sav->flags & SADB_X_EXT_OLD)) {
705 struct newesp esp;
706 m_copydata(m, off, sizeof(esp), (caddr_t) &esp);
707 if (aes_encrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->encrypt)) {
708 ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__));
709 m_freem(m);
710 return EINVAL;
711 }
712 }
713
714 s = m;
715 soff = sn = dn = 0;
716 d = d0 = dp = NULL;
717 sp = dptr = NULL;
718
719 /* skip headers/IV */
720 while (soff < bodyoff) {
721 if (soff + s->m_len > bodyoff) {
722 sn = bodyoff - soff;
723 break;
724 }
725
726 soff += s->m_len;
727 s = s->m_next;
728 }
729 scut = s;
730 scutoff = sn;
731
732 /* skip over empty mbuf */
733 while (s && s->m_len == 0)
734 s = s->m_next;
735
736 while (soff < m->m_pkthdr.len) {
737 /* source */
738 sp = mtod(s, u_int8_t *) + sn;
739 len = s->m_len - sn;
740
741 /* destination */
742 if (!d || (dn + len > d->m_len)) {
743 if (d)
744 dp = d;
745 MGET(d, M_DONTWAIT, MT_DATA);
746 i = m->m_pkthdr.len - (soff + sn);
747 if (d && i > MLEN) {
748 MCLGET(d, M_DONTWAIT);
749 if ((d->m_flags & M_EXT) == 0) {
750 d = m_mbigget(d, M_DONTWAIT);
751 if ((d->m_flags & M_EXT) == 0) {
752 m_free(d);
753 d = NULL;
754 }
755 }
756 }
757 if (!d) {
758 m_freem(m);
759 if (d0)
760 m_freem(d0);
761 return ENOBUFS;
762 }
763 if (!d0)
764 d0 = d;
765 if (dp)
766 dp->m_next = d;
767
768 // try to make mbuf data aligned
769 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
770 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
771 }
772
773 d->m_len = M_TRAILINGSPACE(d);
774
775 if (d->m_len > i)
776 d->m_len = i;
777
778 dptr = mtod(d, u_int8_t *);
779 dn = 0;
780 }
781
782 /* adjust len if greater than space available */
783 if (len > d->m_len - dn)
784 len = d->m_len - dn;
785
786 /* encrypt */
787 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
788 if (IPSEC_IS_P2ALIGNED(sp)) {
789 sp_unaligned = NULL;
790 } else {
791 sp_unaligned = sp;
792 if (len > MAX_REALIGN_LEN) {
793 m_freem(m);
794 if (d0) {
795 m_freem(d0);
796 }
797 if (sp_aligned != NULL) {
798 FREE(sp_aligned, M_SECA);
799 sp_aligned = NULL;
800 }
801 return ENOBUFS;
802 }
803 if (sp_aligned == NULL) {
804 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
805 if (sp_aligned == NULL) {
806 m_freem(m);
807 if (d0) {
808 m_freem(d0);
809 }
810 return ENOMEM;
811 }
812 }
813 sp = sp_aligned;
814 memcpy(sp, sp_unaligned, len);
815 }
816
817 if (aes_encrypt_gcm(sp, len, dptr+dn, ctx->encrypt)) {
818 ipseclog((LOG_ERR, "%s: failed to encrypt\n", __FUNCTION__));
819 m_freem(m);
820 return EINVAL;
821 }
822
823 // update unaligned pointers
824 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
825 sp = sp_unaligned;
826 }
827
828 /* update offsets */
829 sn += len;
830 dn += len;
831
832 /* find the next source block and skip empty mbufs */
833 while (s && sn >= s->m_len) {
834 sn -= s->m_len;
835 soff += s->m_len;
836 s = s->m_next;
837 }
838 }
839
840 /* free un-needed source mbufs and add dest mbufs to chain */
841 m_freem(scut->m_next);
842 scut->m_len = scutoff;
843 scut->m_next = d0;
844
845 // free memory
846 if (sp_aligned != NULL) {
847 FREE(sp_aligned, M_SECA);
848 sp_aligned = NULL;
849 }
850
851 return 0;
852}
853
854int
855esp_gcm_decrypt_aes(
856 struct mbuf *m,
857 size_t off,
858 struct secasvar *sav,
859 const struct esp_algorithm *algo __unused,
860 int ivlen)
861{
862 struct mbuf *s;
863 struct mbuf *d, *d0, *dp;
864 int soff; /* offset from the head of chain, to head of this mbuf */
865 int sn, dn; /* offset from the head of the mbuf, to meat */
866 size_t ivoff, bodyoff;
867 u_int8_t iv[ESP_GCM_IVLEN] __attribute__((aligned(4))), *dptr;
868 u_int8_t *sp, *sp_unaligned, *sp_aligned = NULL;
869 aes_gcm_ctx *ctx;
870 struct mbuf *scut;
871 int scutoff;
872 int i, len;
873 unsigned char nonce[ESP_GCM_SALT_LEN+ivlen];
874
875 if (ivlen != ESP_GCM_IVLEN) {
876 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
877 m_freem(m);
878 return EINVAL;
879 }
880
881 if (sav->flags & SADB_X_EXT_OLD) {
882 /* RFC 1827 */
883 ivoff = off + sizeof(struct esp);
884 bodyoff = off + sizeof(struct esp) + ivlen;
885 } else {
886 ivoff = off + sizeof(struct newesp);
887 bodyoff = off + sizeof(struct newesp) + ivlen;
888 }
889
890 if (m->m_pkthdr.len < bodyoff) {
891 ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__,
892 m->m_pkthdr.len, (u_int32_t)bodyoff));
893 m_freem(m);
894 return EINVAL;
895 }
896
897 /* grab iv */
898 m_copydata(m, ivoff, ivlen, (caddr_t) iv);
899
900 /* Set IV */
901 memcpy(nonce, _KEYBUF(sav->key_enc)+_KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
902 memcpy(nonce+ESP_GCM_SALT_LEN, iv, ivlen);
903
904 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
905 if (aes_decrypt_set_iv_gcm(nonce, sizeof(nonce), ctx->decrypt)) {
906 ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__));
907 m_freem(m);
908 bzero(nonce, sizeof(nonce));
909 return EINVAL;
910 }
911 bzero(nonce, sizeof(nonce));
912
913 /* Set Additional Authentication Data */
914 if (!(sav->flags & SADB_X_EXT_OLD)) {
915 struct newesp esp;
916 m_copydata(m, off, sizeof(esp), (caddr_t) &esp);
917 if (aes_decrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->decrypt)) {
918 ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__));
919 return EINVAL;
920 }
921 }
922
923 s = m;
924 soff = sn = dn = 0;
925 d = d0 = dp = NULL;
926 sp = dptr = NULL;
927
928 /* skip header/IV offset */
929 while (soff < bodyoff) {
930 if (soff + s->m_len > bodyoff) {
931 sn = bodyoff - soff;
932 break;
933 }
934
935 soff += s->m_len;
936 s = s->m_next;
937 }
938 scut = s;
939 scutoff = sn;
940
941 /* skip over empty mbuf */
942 while (s && s->m_len == 0)
943 s = s->m_next;
944
945 while (soff < m->m_pkthdr.len) {
946 /* source */
947 sp = mtod(s, u_int8_t *) + sn;
948 len = s->m_len - sn;
949
950 /* destination */
951 if (!d || (dn + len > d->m_len)) {
952 if (d)
953 dp = d;
954 MGET(d, M_DONTWAIT, MT_DATA);
955 i = m->m_pkthdr.len - (soff + sn);
956 if (d && i > MLEN) {
957 MCLGET(d, M_DONTWAIT);
958 if ((d->m_flags & M_EXT) == 0) {
959 d = m_mbigget(d, M_DONTWAIT);
960 if ((d->m_flags & M_EXT) == 0) {
961 m_free(d);
962 d = NULL;
963 }
964 }
965 }
966 if (!d) {
967 m_freem(m);
968 if (d0)
969 m_freem(d0);
970 return ENOBUFS;
971 }
972 if (!d0)
973 d0 = d;
974 if (dp)
975 dp->m_next = d;
976
977 // try to make mbuf data aligned
978 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
979 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
980 }
981
982 d->m_len = M_TRAILINGSPACE(d);
983
984 if (d->m_len > i)
985 d->m_len = i;
986
987 dptr = mtod(d, u_int8_t *);
988 dn = 0;
989 }
990
991 /* adjust len if greater than space available in dest */
992 if (len > d->m_len - dn)
993 len = d->m_len - dn;
994
995 /* Decrypt */
996 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
997 if (IPSEC_IS_P2ALIGNED(sp)) {
998 sp_unaligned = NULL;
999 } else {
1000 sp_unaligned = sp;
1001 if (len > MAX_REALIGN_LEN) {
1002 m_freem(m);
1003 if (d0) {
1004 m_freem(d0);
1005 }
1006 if (sp_aligned != NULL) {
1007 FREE(sp_aligned, M_SECA);
1008 sp_aligned = NULL;
1009 }
1010 return ENOBUFS;
1011 }
1012 if (sp_aligned == NULL) {
1013 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
1014 if (sp_aligned == NULL) {
1015 m_freem(m);
1016 if (d0) {
1017 m_freem(d0);
1018 }
1019 return ENOMEM;
1020 }
1021 }
1022 sp = sp_aligned;
1023 memcpy(sp, sp_unaligned, len);
1024 }
1025 // no need to check output pointer alignment
1026
1027 if (aes_decrypt_gcm(sp, len, dptr + dn, ctx->decrypt)) {
1028 ipseclog((LOG_ERR, "%s: failed to decrypt\n", __FUNCTION__));
1029 m_freem(m);
1030 return EINVAL;
1031 }
1032
1033 // update unaligned pointers
1034 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
1035 sp = sp_unaligned;
1036 }
1037
1038 /* udpate offsets */
1039 sn += len;
1040 dn += len;
1041
1042 /* find the next source block */
1043 while (s && sn >= s->m_len) {
1044 sn -= s->m_len;
1045 soff += s->m_len;
1046 s = s->m_next;
1047 }
1048 }
1049
1050 /* free un-needed source mbufs and add dest mbufs to chain */
1051 m_freem(scut->m_next);
1052 scut->m_len = scutoff;
1053 scut->m_next = d0;
1054
1055 // free memory
1056 if (sp_aligned != NULL) {
1057 FREE(sp_aligned, M_SECA);
1058 sp_aligned = NULL;
1059 }
1060
1061 /* just in case */
1062 bzero(iv, sizeof(iv));
1063
1064 return 0;
1065}
1066