1 | /* |
2 | * Copyright (c) 2015-2017 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | /* TCP-cache to store and retrieve TCP-related information */ |
30 | |
31 | #include <net/flowhash.h> |
32 | #include <net/route.h> |
33 | #include <net/necp.h> |
34 | #include <netinet/in_pcb.h> |
35 | #include <netinet/mptcp_var.h> |
36 | #include <netinet/tcp_cache.h> |
37 | #include <netinet/tcp_seq.h> |
38 | #include <netinet/tcp_var.h> |
39 | #include <kern/locks.h> |
40 | #include <sys/queue.h> |
41 | #include <dev/random/randomdev.h> |
42 | |
43 | typedef union { |
44 | struct in_addr addr; |
45 | struct in6_addr addr6; |
46 | } in_4_6_addr; |
47 | |
48 | struct tcp_heuristic_key { |
49 | union { |
50 | uint8_t thk_net_signature[IFNET_SIGNATURELEN]; |
51 | in_4_6_addr thk_ip; |
52 | }; |
53 | sa_family_t thk_family; |
54 | }; |
55 | |
56 | struct tcp_heuristic { |
57 | SLIST_ENTRY(tcp_heuristic) list; |
58 | |
59 | uint32_t th_last_access; |
60 | |
61 | struct tcp_heuristic_key th_key; |
62 | |
63 | char th_val_start[0]; /* Marker for memsetting to 0 */ |
64 | |
65 | uint8_t th_tfo_data_loss; /* The number of times a SYN+data has been lost */ |
66 | uint8_t th_tfo_req_loss; /* The number of times a SYN+cookie-req has been lost */ |
67 | uint8_t th_tfo_data_rst; /* The number of times a SYN+data has received a RST */ |
68 | uint8_t th_tfo_req_rst; /* The number of times a SYN+cookie-req has received a RST */ |
69 | uint8_t th_mptcp_loss; /* The number of times a SYN+MP_CAPABLE has been lost */ |
70 | uint8_t th_ecn_loss; /* The number of times a SYN+ecn has been lost */ |
71 | uint8_t th_ecn_aggressive; /* The number of times we did an aggressive fallback */ |
72 | uint8_t th_ecn_droprst; /* The number of times ECN connections received a RST after first data pkt */ |
73 | uint8_t th_ecn_droprxmt; /* The number of times ECN connection is dropped after multiple retransmits */ |
74 | uint8_t th_ecn_synrst; /* number of times RST was received in response to an ECN enabled SYN */ |
75 | uint32_t th_tfo_enabled_time; /* The moment when we reenabled TFO after backing off */ |
76 | uint32_t th_tfo_backoff_until; /* Time until when we should not try out TFO */ |
77 | uint32_t th_tfo_backoff; /* Current backoff timer */ |
78 | uint32_t th_mptcp_backoff; /* Time until when we should not try out MPTCP */ |
79 | uint32_t th_ecn_backoff; /* Time until when we should not try out ECN */ |
80 | |
81 | uint8_t th_tfo_in_backoff:1, /* Are we avoiding TFO due to the backoff timer? */ |
82 | th_mptcp_in_backoff:1; /* Are we avoiding MPTCP due to the backoff timer? */ |
83 | |
84 | char th_val_end[0]; /* Marker for memsetting to 0 */ |
85 | }; |
86 | |
87 | struct tcp_heuristics_head { |
88 | SLIST_HEAD(tcp_heur_bucket, tcp_heuristic) tcp_heuristics; |
89 | |
90 | /* Per-hashbucket lock to avoid lock-contention */ |
91 | lck_mtx_t thh_mtx; |
92 | }; |
93 | |
94 | struct tcp_cache_key { |
95 | sa_family_t tck_family; |
96 | |
97 | struct tcp_heuristic_key tck_src; |
98 | in_4_6_addr tck_dst; |
99 | }; |
100 | |
101 | struct tcp_cache { |
102 | SLIST_ENTRY(tcp_cache) list; |
103 | |
104 | u_int32_t tc_last_access; |
105 | |
106 | struct tcp_cache_key tc_key; |
107 | |
108 | u_int8_t tc_tfo_cookie[TFO_COOKIE_LEN_MAX]; |
109 | u_int8_t tc_tfo_cookie_len; |
110 | }; |
111 | |
112 | struct tcp_cache_head { |
113 | SLIST_HEAD(tcp_cache_bucket, tcp_cache) tcp_caches; |
114 | |
115 | /* Per-hashbucket lock to avoid lock-contention */ |
116 | lck_mtx_t tch_mtx; |
117 | }; |
118 | |
119 | struct tcp_cache_key_src { |
120 | struct ifnet *ifp; |
121 | in_4_6_addr laddr; |
122 | in_4_6_addr faddr; |
123 | int af; |
124 | }; |
125 | |
126 | static u_int32_t tcp_cache_hash_seed; |
127 | |
128 | size_t tcp_cache_size; |
129 | |
130 | /* |
131 | * The maximum depth of the hash-bucket. This way we limit the tcp_cache to |
132 | * TCP_CACHE_BUCKET_SIZE * tcp_cache_size and have "natural" garbage collection |
133 | */ |
134 | #define TCP_CACHE_BUCKET_SIZE 5 |
135 | |
136 | static struct tcp_cache_head *tcp_cache; |
137 | |
138 | decl_lck_mtx_data(, tcp_cache_mtx); |
139 | |
140 | static lck_attr_t *tcp_cache_mtx_attr; |
141 | static lck_grp_t *tcp_cache_mtx_grp; |
142 | static lck_grp_attr_t *tcp_cache_mtx_grp_attr; |
143 | |
144 | static struct tcp_heuristics_head *tcp_heuristics; |
145 | |
146 | decl_lck_mtx_data(, tcp_heuristics_mtx); |
147 | |
148 | static lck_attr_t *tcp_heuristic_mtx_attr; |
149 | static lck_grp_t *tcp_heuristic_mtx_grp; |
150 | static lck_grp_attr_t *tcp_heuristic_mtx_grp_attr; |
151 | |
152 | static uint32_t tcp_backoff_maximum = 65536; |
153 | |
154 | SYSCTL_UINT(_net_inet_tcp, OID_AUTO, backoff_maximum, CTLFLAG_RW | CTLFLAG_LOCKED, |
155 | &tcp_backoff_maximum, 0, "Maximum time for which we won't try TFO" ); |
156 | |
157 | SYSCTL_SKMEM_TCP_INT(OID_AUTO, ecn_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, |
158 | static int, tcp_ecn_timeout, 60, "Initial minutes to wait before re-trying ECN" ); |
159 | |
160 | SYSCTL_SKMEM_TCP_INT(OID_AUTO, disable_tcp_heuristics, CTLFLAG_RW | CTLFLAG_LOCKED, |
161 | static int, disable_tcp_heuristics, 0, "Set to 1, to disable all TCP heuristics (TFO, ECN, MPTCP)" ); |
162 | |
163 | static uint32_t tcp_min_to_hz(uint32_t minutes) |
164 | { |
165 | if (minutes > 65536) |
166 | return ((uint32_t)65536 * 60 * TCP_RETRANSHZ); |
167 | |
168 | return (minutes * 60 * TCP_RETRANSHZ); |
169 | } |
170 | |
171 | /* |
172 | * This number is coupled with tcp_ecn_timeout, because we want to prevent |
173 | * integer overflow. Need to find an unexpensive way to prevent integer overflow |
174 | * while still allowing a dynamic sysctl. |
175 | */ |
176 | #define TCP_CACHE_OVERFLOW_PROTECT 9 |
177 | |
178 | /* Number of SYN-losses we accept */ |
179 | #define TFO_MAX_COOKIE_LOSS 2 |
180 | #define ECN_MAX_SYN_LOSS 2 |
181 | #define MPTCP_MAX_SYN_LOSS 2 |
182 | #define ECN_MAX_DROPRST 1 |
183 | #define ECN_MAX_DROPRXMT 4 |
184 | #define ECN_MAX_SYNRST 4 |
185 | |
186 | /* Flags for setting/unsetting loss-heuristics, limited to 4 bytes */ |
187 | #define TCPCACHE_F_TFO_REQ 0x01 |
188 | #define TCPCACHE_F_TFO_DATA 0x02 |
189 | #define TCPCACHE_F_ECN 0x04 |
190 | #define TCPCACHE_F_MPTCP 0x08 |
191 | #define TCPCACHE_F_ECN_DROPRST 0x10 |
192 | #define TCPCACHE_F_ECN_DROPRXMT 0x20 |
193 | #define TCPCACHE_F_TFO_REQ_RST 0x40 |
194 | #define TCPCACHE_F_TFO_DATA_RST 0x80 |
195 | #define TCPCACHE_F_ECN_SYNRST 0x100 |
196 | |
197 | /* Always retry ECN after backing off to this level for some heuristics */ |
198 | #define ECN_RETRY_LIMIT 9 |
199 | |
200 | #define TCP_CACHE_INC_IFNET_STAT(_ifp_, _af_, _stat_) { \ |
201 | if ((_ifp_) != NULL) { \ |
202 | if ((_af_) == AF_INET6) { \ |
203 | (_ifp_)->if_ipv6_stat->_stat_++;\ |
204 | } else { \ |
205 | (_ifp_)->if_ipv4_stat->_stat_++;\ |
206 | }\ |
207 | }\ |
208 | } |
209 | |
210 | /* |
211 | * Round up to next higher power-of 2. See "Bit Twiddling Hacks". |
212 | * |
213 | * Might be worth moving this to a library so that others |
214 | * (e.g., scale_to_powerof2()) can use this as well instead of a while-loop. |
215 | */ |
216 | static u_int32_t tcp_cache_roundup2(u_int32_t a) |
217 | { |
218 | a--; |
219 | a |= a >> 1; |
220 | a |= a >> 2; |
221 | a |= a >> 4; |
222 | a |= a >> 8; |
223 | a |= a >> 16; |
224 | a++; |
225 | |
226 | return a; |
227 | } |
228 | |
229 | static void tcp_cache_hash_src(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key) |
230 | { |
231 | struct ifnet *ifp = tcks->ifp; |
232 | uint8_t len = sizeof(key->thk_net_signature); |
233 | uint16_t flags; |
234 | |
235 | if (tcks->af == AF_INET6) { |
236 | int ret; |
237 | |
238 | key->thk_family = AF_INET6; |
239 | ret = ifnet_get_netsignature(ifp, AF_INET6, &len, &flags, |
240 | key->thk_net_signature); |
241 | |
242 | /* |
243 | * ifnet_get_netsignature only returns EINVAL if ifn is NULL |
244 | * (we made sure that in the other cases it does not). So, |
245 | * in this case we should take the connection's address. |
246 | */ |
247 | if (ret == ENOENT || ret == EINVAL) |
248 | memcpy(&key->thk_ip.addr6, &tcks->laddr.addr6, sizeof(struct in6_addr)); |
249 | } else { |
250 | int ret; |
251 | |
252 | key->thk_family = AF_INET; |
253 | ret = ifnet_get_netsignature(ifp, AF_INET, &len, &flags, |
254 | key->thk_net_signature); |
255 | |
256 | /* |
257 | * ifnet_get_netsignature only returns EINVAL if ifn is NULL |
258 | * (we made sure that in the other cases it does not). So, |
259 | * in this case we should take the connection's address. |
260 | */ |
261 | if (ret == ENOENT || ret == EINVAL) |
262 | memcpy(&key->thk_ip.addr, &tcks->laddr.addr, sizeof(struct in_addr)); |
263 | } |
264 | } |
265 | |
266 | static u_int16_t tcp_cache_hash(struct tcp_cache_key_src *tcks, struct tcp_cache_key *key) |
267 | { |
268 | u_int32_t hash; |
269 | |
270 | bzero(key, sizeof(struct tcp_cache_key)); |
271 | |
272 | tcp_cache_hash_src(tcks, &key->tck_src); |
273 | |
274 | if (tcks->af == AF_INET6) { |
275 | key->tck_family = AF_INET6; |
276 | memcpy(&key->tck_dst.addr6, &tcks->faddr.addr6, |
277 | sizeof(struct in6_addr)); |
278 | } else { |
279 | key->tck_family = AF_INET; |
280 | memcpy(&key->tck_dst.addr, &tcks->faddr.addr, |
281 | sizeof(struct in_addr)); |
282 | } |
283 | |
284 | hash = net_flowhash(key, sizeof(struct tcp_cache_key), |
285 | tcp_cache_hash_seed); |
286 | |
287 | return (hash & (tcp_cache_size - 1)); |
288 | } |
289 | |
290 | static void tcp_cache_unlock(struct tcp_cache_head *head) |
291 | { |
292 | lck_mtx_unlock(&head->tch_mtx); |
293 | } |
294 | |
295 | /* |
296 | * Make sure that everything that happens after tcp_getcache_with_lock() |
297 | * is short enough to justify that you hold the per-bucket lock!!! |
298 | * |
299 | * Otherwise, better build another lookup-function that does not hold the |
300 | * lock and you copy out the bits and bytes. |
301 | * |
302 | * That's why we provide the head as a "return"-pointer so that the caller |
303 | * can give it back to use for tcp_cache_unlock(). |
304 | */ |
305 | static struct tcp_cache *tcp_getcache_with_lock(struct tcp_cache_key_src *tcks, |
306 | int create, struct tcp_cache_head **headarg) |
307 | { |
308 | struct tcp_cache *tpcache = NULL; |
309 | struct tcp_cache_head *head; |
310 | struct tcp_cache_key key; |
311 | u_int16_t hash; |
312 | int i = 0; |
313 | |
314 | hash = tcp_cache_hash(tcks, &key); |
315 | head = &tcp_cache[hash]; |
316 | |
317 | lck_mtx_lock(&head->tch_mtx); |
318 | |
319 | /*** First step: Look for the tcp_cache in our bucket ***/ |
320 | SLIST_FOREACH(tpcache, &head->tcp_caches, list) { |
321 | if (memcmp(&tpcache->tc_key, &key, sizeof(key)) == 0) |
322 | break; |
323 | |
324 | i++; |
325 | } |
326 | |
327 | /*** Second step: If it's not there, create/recycle it ***/ |
328 | if ((tpcache == NULL) && create) { |
329 | if (i >= TCP_CACHE_BUCKET_SIZE) { |
330 | struct tcp_cache *oldest_cache = NULL; |
331 | u_int32_t max_age = 0; |
332 | |
333 | /* Look for the oldest tcp_cache in the bucket */ |
334 | SLIST_FOREACH(tpcache, &head->tcp_caches, list) { |
335 | u_int32_t age = tcp_now - tpcache->tc_last_access; |
336 | if (age > max_age) { |
337 | max_age = age; |
338 | oldest_cache = tpcache; |
339 | } |
340 | } |
341 | VERIFY(oldest_cache != NULL); |
342 | |
343 | tpcache = oldest_cache; |
344 | |
345 | /* We recycle, thus let's indicate that there is no cookie */ |
346 | tpcache->tc_tfo_cookie_len = 0; |
347 | } else { |
348 | /* Create a new cache and add it to the list */ |
349 | tpcache = _MALLOC(sizeof(struct tcp_cache), M_TEMP, |
350 | M_NOWAIT | M_ZERO); |
351 | if (tpcache == NULL) |
352 | goto out_null; |
353 | |
354 | SLIST_INSERT_HEAD(&head->tcp_caches, tpcache, list); |
355 | } |
356 | |
357 | memcpy(&tpcache->tc_key, &key, sizeof(key)); |
358 | } |
359 | |
360 | if (tpcache == NULL) |
361 | goto out_null; |
362 | |
363 | /* Update timestamp for garbage collection purposes */ |
364 | tpcache->tc_last_access = tcp_now; |
365 | *headarg = head; |
366 | |
367 | return (tpcache); |
368 | |
369 | out_null: |
370 | tcp_cache_unlock(head); |
371 | return (NULL); |
372 | } |
373 | |
374 | static void tcp_cache_key_src_create(struct tcpcb *tp, struct tcp_cache_key_src *tcks) |
375 | { |
376 | struct inpcb *inp = tp->t_inpcb; |
377 | memset(tcks, 0, sizeof(*tcks)); |
378 | |
379 | tcks->ifp = inp->inp_last_outifp; |
380 | |
381 | if (inp->inp_vflag & INP_IPV6) { |
382 | memcpy(&tcks->laddr.addr6, &inp->in6p_laddr, sizeof(struct in6_addr)); |
383 | memcpy(&tcks->faddr.addr6, &inp->in6p_faddr, sizeof(struct in6_addr)); |
384 | tcks->af = AF_INET6; |
385 | } else { |
386 | memcpy(&tcks->laddr.addr, &inp->inp_laddr, sizeof(struct in_addr)); |
387 | memcpy(&tcks->faddr.addr, &inp->inp_faddr, sizeof(struct in_addr)); |
388 | tcks->af = AF_INET; |
389 | } |
390 | |
391 | return; |
392 | } |
393 | |
394 | static void tcp_cache_set_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_int8_t len) |
395 | { |
396 | struct tcp_cache_head *head; |
397 | struct tcp_cache *tpcache; |
398 | |
399 | /* Call lookup/create function */ |
400 | tpcache = tcp_getcache_with_lock(tcks, 1, &head); |
401 | if (tpcache == NULL) |
402 | return; |
403 | |
404 | tpcache->tc_tfo_cookie_len = len > TFO_COOKIE_LEN_MAX ? |
405 | TFO_COOKIE_LEN_MAX : len; |
406 | memcpy(tpcache->tc_tfo_cookie, cookie, tpcache->tc_tfo_cookie_len); |
407 | |
408 | tcp_cache_unlock(head); |
409 | } |
410 | |
411 | void tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t len) |
412 | { |
413 | struct tcp_cache_key_src tcks; |
414 | |
415 | tcp_cache_key_src_create(tp, &tcks); |
416 | tcp_cache_set_cookie_common(&tcks, cookie, len); |
417 | } |
418 | |
419 | static int tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_int8_t *len) |
420 | { |
421 | struct tcp_cache_head *head; |
422 | struct tcp_cache *tpcache; |
423 | |
424 | /* Call lookup/create function */ |
425 | tpcache = tcp_getcache_with_lock(tcks, 1, &head); |
426 | if (tpcache == NULL) { |
427 | return (0); |
428 | } |
429 | |
430 | if (tpcache->tc_tfo_cookie_len == 0) { |
431 | tcp_cache_unlock(head); |
432 | return (0); |
433 | } |
434 | |
435 | /* |
436 | * Not enough space - this should never happen as it has been checked |
437 | * in tcp_tfo_check. So, fail here! |
438 | */ |
439 | VERIFY(tpcache->tc_tfo_cookie_len <= *len); |
440 | |
441 | memcpy(cookie, tpcache->tc_tfo_cookie, tpcache->tc_tfo_cookie_len); |
442 | *len = tpcache->tc_tfo_cookie_len; |
443 | |
444 | tcp_cache_unlock(head); |
445 | |
446 | return (1); |
447 | } |
448 | |
449 | /* |
450 | * Get the cookie related to 'tp', and copy it into 'cookie', provided that len |
451 | * is big enough (len designates the available memory. |
452 | * Upon return, 'len' is set to the cookie's length. |
453 | * |
454 | * Returns 0 if we should request a cookie. |
455 | * Returns 1 if the cookie has been found and written. |
456 | */ |
457 | int tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t *len) |
458 | { |
459 | struct tcp_cache_key_src tcks; |
460 | |
461 | tcp_cache_key_src_create(tp, &tcks); |
462 | return tcp_cache_get_cookie_common(&tcks, cookie, len); |
463 | } |
464 | |
465 | static unsigned int tcp_cache_get_cookie_len_common(struct tcp_cache_key_src *tcks) |
466 | { |
467 | struct tcp_cache_head *head; |
468 | struct tcp_cache *tpcache; |
469 | unsigned int cookie_len; |
470 | |
471 | /* Call lookup/create function */ |
472 | tpcache = tcp_getcache_with_lock(tcks, 1, &head); |
473 | if (tpcache == NULL) |
474 | return (0); |
475 | |
476 | cookie_len = tpcache->tc_tfo_cookie_len; |
477 | |
478 | tcp_cache_unlock(head); |
479 | |
480 | return cookie_len; |
481 | } |
482 | |
483 | unsigned int tcp_cache_get_cookie_len(struct tcpcb *tp) |
484 | { |
485 | struct tcp_cache_key_src tcks; |
486 | |
487 | tcp_cache_key_src_create(tp, &tcks); |
488 | return tcp_cache_get_cookie_len_common(&tcks); |
489 | } |
490 | |
491 | static u_int16_t tcp_heuristics_hash(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key) |
492 | { |
493 | u_int32_t hash; |
494 | |
495 | bzero(key, sizeof(struct tcp_heuristic_key)); |
496 | |
497 | tcp_cache_hash_src(tcks, key); |
498 | |
499 | hash = net_flowhash(key, sizeof(struct tcp_heuristic_key), |
500 | tcp_cache_hash_seed); |
501 | |
502 | return (hash & (tcp_cache_size - 1)); |
503 | } |
504 | |
505 | static void tcp_heuristic_unlock(struct tcp_heuristics_head *head) |
506 | { |
507 | lck_mtx_unlock(&head->thh_mtx); |
508 | } |
509 | |
510 | /* |
511 | * Make sure that everything that happens after tcp_getheuristic_with_lock() |
512 | * is short enough to justify that you hold the per-bucket lock!!! |
513 | * |
514 | * Otherwise, better build another lookup-function that does not hold the |
515 | * lock and you copy out the bits and bytes. |
516 | * |
517 | * That's why we provide the head as a "return"-pointer so that the caller |
518 | * can give it back to use for tcp_heur_unlock(). |
519 | * |
520 | * |
521 | * ToDo - way too much code-duplication. We should create an interface to handle |
522 | * bucketized hashtables with recycling of the oldest element. |
523 | */ |
524 | static struct tcp_heuristic *tcp_getheuristic_with_lock(struct tcp_cache_key_src *tcks, |
525 | int create, struct tcp_heuristics_head **headarg) |
526 | { |
527 | struct tcp_heuristic *tpheur = NULL; |
528 | struct tcp_heuristics_head *head; |
529 | struct tcp_heuristic_key key; |
530 | u_int16_t hash; |
531 | int i = 0; |
532 | |
533 | hash = tcp_heuristics_hash(tcks, &key); |
534 | head = &tcp_heuristics[hash]; |
535 | |
536 | lck_mtx_lock(&head->thh_mtx); |
537 | |
538 | /*** First step: Look for the tcp_heur in our bucket ***/ |
539 | SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) { |
540 | if (memcmp(&tpheur->th_key, &key, sizeof(key)) == 0) |
541 | break; |
542 | |
543 | i++; |
544 | } |
545 | |
546 | /*** Second step: If it's not there, create/recycle it ***/ |
547 | if ((tpheur == NULL) && create) { |
548 | if (i >= TCP_CACHE_BUCKET_SIZE) { |
549 | struct tcp_heuristic *oldest_heur = NULL; |
550 | u_int32_t max_age = 0; |
551 | |
552 | /* Look for the oldest tcp_heur in the bucket */ |
553 | SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) { |
554 | u_int32_t age = tcp_now - tpheur->th_last_access; |
555 | if (age > max_age) { |
556 | max_age = age; |
557 | oldest_heur = tpheur; |
558 | } |
559 | } |
560 | VERIFY(oldest_heur != NULL); |
561 | |
562 | tpheur = oldest_heur; |
563 | |
564 | /* We recycle - set everything to 0 */ |
565 | bzero(tpheur->th_val_start, |
566 | tpheur->th_val_end - tpheur->th_val_start); |
567 | } else { |
568 | /* Create a new heuristic and add it to the list */ |
569 | tpheur = _MALLOC(sizeof(struct tcp_heuristic), M_TEMP, |
570 | M_NOWAIT | M_ZERO); |
571 | if (tpheur == NULL) |
572 | goto out_null; |
573 | |
574 | SLIST_INSERT_HEAD(&head->tcp_heuristics, tpheur, list); |
575 | } |
576 | |
577 | /* |
578 | * Set to tcp_now, to make sure it won't be > than tcp_now in the |
579 | * near future. |
580 | */ |
581 | tpheur->th_ecn_backoff = tcp_now; |
582 | tpheur->th_tfo_backoff_until = tcp_now; |
583 | tpheur->th_mptcp_backoff = tcp_now; |
584 | tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout); |
585 | |
586 | memcpy(&tpheur->th_key, &key, sizeof(key)); |
587 | } |
588 | |
589 | if (tpheur == NULL) |
590 | goto out_null; |
591 | |
592 | /* Update timestamp for garbage collection purposes */ |
593 | tpheur->th_last_access = tcp_now; |
594 | *headarg = head; |
595 | |
596 | return (tpheur); |
597 | |
598 | out_null: |
599 | tcp_heuristic_unlock(head); |
600 | return (NULL); |
601 | } |
602 | |
603 | static void tcp_heuristic_reset_counters(struct tcp_cache_key_src *tcks, u_int8_t flags) |
604 | { |
605 | struct tcp_heuristics_head *head; |
606 | struct tcp_heuristic *tpheur; |
607 | |
608 | /* |
609 | * Don't attempt to create it! Keep the heuristics clean if the |
610 | * server does not support TFO. This reduces the lookup-cost on |
611 | * our side. |
612 | */ |
613 | tpheur = tcp_getheuristic_with_lock(tcks, 0, &head); |
614 | if (tpheur == NULL) |
615 | return; |
616 | |
617 | if (flags & TCPCACHE_F_TFO_DATA) { |
618 | tpheur->th_tfo_data_loss = 0; |
619 | } |
620 | |
621 | if (flags & TCPCACHE_F_TFO_REQ) { |
622 | tpheur->th_tfo_req_loss = 0; |
623 | } |
624 | |
625 | if (flags & TCPCACHE_F_TFO_DATA_RST) { |
626 | tpheur->th_tfo_data_rst = 0; |
627 | } |
628 | |
629 | if (flags & TCPCACHE_F_TFO_REQ_RST) { |
630 | tpheur->th_tfo_req_rst = 0; |
631 | } |
632 | |
633 | if (flags & TCPCACHE_F_ECN) { |
634 | tpheur->th_ecn_loss = 0; |
635 | tpheur->th_ecn_synrst = 0; |
636 | } |
637 | |
638 | if (flags & TCPCACHE_F_MPTCP) |
639 | tpheur->th_mptcp_loss = 0; |
640 | |
641 | tcp_heuristic_unlock(head); |
642 | } |
643 | |
644 | void tcp_heuristic_tfo_success(struct tcpcb *tp) |
645 | { |
646 | struct tcp_cache_key_src tcks; |
647 | uint8_t flag = 0; |
648 | |
649 | tcp_cache_key_src_create(tp, &tcks); |
650 | |
651 | if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) |
652 | flag = (TCPCACHE_F_TFO_DATA | TCPCACHE_F_TFO_REQ | |
653 | TCPCACHE_F_TFO_DATA_RST | TCPCACHE_F_TFO_REQ_RST ); |
654 | if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) |
655 | flag = (TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_REQ_RST); |
656 | |
657 | tcp_heuristic_reset_counters(&tcks, flag); |
658 | } |
659 | |
660 | void tcp_heuristic_mptcp_success(struct tcpcb *tp) |
661 | { |
662 | struct tcp_cache_key_src tcks; |
663 | |
664 | tcp_cache_key_src_create(tp, &tcks); |
665 | tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_MPTCP); |
666 | } |
667 | |
668 | void tcp_heuristic_ecn_success(struct tcpcb *tp) |
669 | { |
670 | struct tcp_cache_key_src tcks; |
671 | |
672 | tcp_cache_key_src_create(tp, &tcks); |
673 | tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_ECN); |
674 | } |
675 | |
676 | static void __tcp_heuristic_tfo_middlebox_common(struct tcp_heuristic *tpheur) |
677 | { |
678 | if (tpheur->th_tfo_in_backoff) |
679 | return; |
680 | |
681 | tpheur->th_tfo_in_backoff = 1; |
682 | |
683 | if (tpheur->th_tfo_enabled_time) { |
684 | uint32_t old_backoff = tpheur->th_tfo_backoff; |
685 | |
686 | tpheur->th_tfo_backoff -= (tcp_now - tpheur->th_tfo_enabled_time); |
687 | if (tpheur->th_tfo_backoff > old_backoff) |
688 | tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout); |
689 | } |
690 | |
691 | tpheur->th_tfo_backoff_until = tcp_now + tpheur->th_tfo_backoff; |
692 | |
693 | /* Then, increase the backoff time */ |
694 | tpheur->th_tfo_backoff *= 2; |
695 | |
696 | if (tpheur->th_tfo_backoff > tcp_min_to_hz(tcp_backoff_maximum)) |
697 | tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout); |
698 | } |
699 | |
700 | static void tcp_heuristic_tfo_middlebox_common(struct tcp_cache_key_src *tcks) |
701 | { |
702 | struct tcp_heuristics_head *head; |
703 | struct tcp_heuristic *tpheur; |
704 | |
705 | tpheur = tcp_getheuristic_with_lock(tcks, 1, &head); |
706 | if (tpheur == NULL) |
707 | return; |
708 | |
709 | __tcp_heuristic_tfo_middlebox_common(tpheur); |
710 | |
711 | tcp_heuristic_unlock(head); |
712 | } |
713 | |
714 | static void tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks, |
715 | u_int32_t flags) |
716 | { |
717 | struct tcp_heuristics_head *head; |
718 | struct tcp_heuristic *tpheur; |
719 | |
720 | tpheur = tcp_getheuristic_with_lock(tcks, 1, &head); |
721 | if (tpheur == NULL) |
722 | return; |
723 | |
724 | /* Limit to prevent integer-overflow during exponential backoff */ |
725 | if ((flags & TCPCACHE_F_TFO_DATA) && tpheur->th_tfo_data_loss < TCP_CACHE_OVERFLOW_PROTECT) { |
726 | tpheur->th_tfo_data_loss++; |
727 | |
728 | if (tpheur->th_tfo_data_loss >= TFO_MAX_COOKIE_LOSS) |
729 | __tcp_heuristic_tfo_middlebox_common(tpheur); |
730 | } |
731 | |
732 | if ((flags & TCPCACHE_F_TFO_REQ) && tpheur->th_tfo_req_loss < TCP_CACHE_OVERFLOW_PROTECT) { |
733 | tpheur->th_tfo_req_loss++; |
734 | |
735 | if (tpheur->th_tfo_req_loss >= TFO_MAX_COOKIE_LOSS) |
736 | __tcp_heuristic_tfo_middlebox_common(tpheur); |
737 | } |
738 | |
739 | if ((flags & TCPCACHE_F_TFO_DATA_RST) && tpheur->th_tfo_data_rst < TCP_CACHE_OVERFLOW_PROTECT) { |
740 | tpheur->th_tfo_data_rst++; |
741 | |
742 | if (tpheur->th_tfo_data_rst >= TFO_MAX_COOKIE_LOSS) |
743 | __tcp_heuristic_tfo_middlebox_common(tpheur); |
744 | } |
745 | |
746 | if ((flags & TCPCACHE_F_TFO_REQ_RST) && tpheur->th_tfo_req_rst < TCP_CACHE_OVERFLOW_PROTECT) { |
747 | tpheur->th_tfo_req_rst++; |
748 | |
749 | if (tpheur->th_tfo_req_rst >= TFO_MAX_COOKIE_LOSS) |
750 | __tcp_heuristic_tfo_middlebox_common(tpheur); |
751 | } |
752 | |
753 | if ((flags & TCPCACHE_F_ECN) && tpheur->th_ecn_loss < TCP_CACHE_OVERFLOW_PROTECT) { |
754 | tpheur->th_ecn_loss++; |
755 | if (tpheur->th_ecn_loss >= ECN_MAX_SYN_LOSS) { |
756 | tcpstat.tcps_ecn_fallback_synloss++; |
757 | TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af, ecn_fallback_synloss); |
758 | tpheur->th_ecn_backoff = tcp_now + |
759 | (tcp_min_to_hz(tcp_ecn_timeout) << |
760 | (tpheur->th_ecn_loss - ECN_MAX_SYN_LOSS)); |
761 | } |
762 | } |
763 | |
764 | if ((flags & TCPCACHE_F_MPTCP) && |
765 | tpheur->th_mptcp_loss < TCP_CACHE_OVERFLOW_PROTECT) { |
766 | tpheur->th_mptcp_loss++; |
767 | if (tpheur->th_mptcp_loss >= MPTCP_MAX_SYN_LOSS) { |
768 | /* |
769 | * Yes, we take tcp_ecn_timeout, to avoid adding yet |
770 | * another sysctl that is just used for testing. |
771 | */ |
772 | tpheur->th_mptcp_backoff = tcp_now + |
773 | (tcp_min_to_hz(tcp_ecn_timeout) << |
774 | (tpheur->th_mptcp_loss - MPTCP_MAX_SYN_LOSS)); |
775 | } |
776 | } |
777 | |
778 | if ((flags & TCPCACHE_F_ECN_DROPRST) && |
779 | tpheur->th_ecn_droprst < TCP_CACHE_OVERFLOW_PROTECT) { |
780 | tpheur->th_ecn_droprst++; |
781 | if (tpheur->th_ecn_droprst >= ECN_MAX_DROPRST) { |
782 | tcpstat.tcps_ecn_fallback_droprst++; |
783 | TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af, |
784 | ecn_fallback_droprst); |
785 | tpheur->th_ecn_backoff = tcp_now + |
786 | (tcp_min_to_hz(tcp_ecn_timeout) << |
787 | (tpheur->th_ecn_droprst - ECN_MAX_DROPRST)); |
788 | |
789 | } |
790 | } |
791 | |
792 | if ((flags & TCPCACHE_F_ECN_DROPRXMT) && |
793 | tpheur->th_ecn_droprxmt < TCP_CACHE_OVERFLOW_PROTECT) { |
794 | tpheur->th_ecn_droprxmt++; |
795 | if (tpheur->th_ecn_droprxmt >= ECN_MAX_DROPRXMT) { |
796 | tcpstat.tcps_ecn_fallback_droprxmt++; |
797 | TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af, |
798 | ecn_fallback_droprxmt); |
799 | tpheur->th_ecn_backoff = tcp_now + |
800 | (tcp_min_to_hz(tcp_ecn_timeout) << |
801 | (tpheur->th_ecn_droprxmt - ECN_MAX_DROPRXMT)); |
802 | } |
803 | } |
804 | if ((flags & TCPCACHE_F_ECN_SYNRST) && |
805 | tpheur->th_ecn_synrst < TCP_CACHE_OVERFLOW_PROTECT) { |
806 | tpheur->th_ecn_synrst++; |
807 | if (tpheur->th_ecn_synrst >= ECN_MAX_SYNRST) { |
808 | tcpstat.tcps_ecn_fallback_synrst++; |
809 | TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af, |
810 | ecn_fallback_synrst); |
811 | tpheur->th_ecn_backoff = tcp_now + |
812 | (tcp_min_to_hz(tcp_ecn_timeout) << |
813 | (tpheur->th_ecn_synrst - ECN_MAX_SYNRST)); |
814 | } |
815 | } |
816 | tcp_heuristic_unlock(head); |
817 | } |
818 | |
819 | void tcp_heuristic_tfo_loss(struct tcpcb *tp) |
820 | { |
821 | struct tcp_cache_key_src tcks; |
822 | uint32_t flag = 0; |
823 | |
824 | tcp_cache_key_src_create(tp, &tcks); |
825 | |
826 | if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) |
827 | flag = (TCPCACHE_F_TFO_DATA | TCPCACHE_F_TFO_REQ); |
828 | if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) |
829 | flag = TCPCACHE_F_TFO_REQ; |
830 | |
831 | tcp_heuristic_inc_counters(&tcks, flag); |
832 | } |
833 | |
834 | void tcp_heuristic_tfo_rst(struct tcpcb *tp) |
835 | { |
836 | struct tcp_cache_key_src tcks; |
837 | uint32_t flag = 0; |
838 | |
839 | tcp_cache_key_src_create(tp, &tcks); |
840 | |
841 | if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) |
842 | flag = (TCPCACHE_F_TFO_DATA_RST | TCPCACHE_F_TFO_REQ_RST); |
843 | if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) |
844 | flag = TCPCACHE_F_TFO_REQ_RST; |
845 | |
846 | tcp_heuristic_inc_counters(&tcks, flag); |
847 | } |
848 | |
849 | void tcp_heuristic_mptcp_loss(struct tcpcb *tp) |
850 | { |
851 | struct tcp_cache_key_src tcks; |
852 | |
853 | tcp_cache_key_src_create(tp, &tcks); |
854 | |
855 | tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_MPTCP); |
856 | } |
857 | |
858 | void tcp_heuristic_ecn_loss(struct tcpcb *tp) |
859 | { |
860 | struct tcp_cache_key_src tcks; |
861 | |
862 | tcp_cache_key_src_create(tp, &tcks); |
863 | |
864 | tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN); |
865 | } |
866 | |
867 | void tcp_heuristic_ecn_droprst(struct tcpcb *tp) |
868 | { |
869 | struct tcp_cache_key_src tcks; |
870 | |
871 | tcp_cache_key_src_create(tp, &tcks); |
872 | |
873 | tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRST); |
874 | } |
875 | |
876 | void tcp_heuristic_ecn_droprxmt(struct tcpcb *tp) |
877 | { |
878 | struct tcp_cache_key_src tcks; |
879 | |
880 | tcp_cache_key_src_create(tp, &tcks); |
881 | |
882 | tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRXMT); |
883 | } |
884 | |
885 | void tcp_heuristic_ecn_synrst(struct tcpcb *tp) |
886 | { |
887 | struct tcp_cache_key_src tcks; |
888 | |
889 | tcp_cache_key_src_create(tp, &tcks); |
890 | |
891 | tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_SYNRST); |
892 | } |
893 | |
894 | void tcp_heuristic_tfo_middlebox(struct tcpcb *tp) |
895 | { |
896 | struct tcp_cache_key_src tcks; |
897 | |
898 | tp->t_tfo_flags |= TFO_F_HEURISTIC_DONE; |
899 | |
900 | tcp_cache_key_src_create(tp, &tcks); |
901 | tcp_heuristic_tfo_middlebox_common(&tcks); |
902 | } |
903 | |
904 | static void tcp_heuristic_ecn_aggressive_common(struct tcp_cache_key_src *tcks) |
905 | { |
906 | struct tcp_heuristics_head *head; |
907 | struct tcp_heuristic *tpheur; |
908 | |
909 | tpheur = tcp_getheuristic_with_lock(tcks, 1, &head); |
910 | if (tpheur == NULL) |
911 | return; |
912 | |
913 | /* Must be done before, otherwise we will start off with expo-backoff */ |
914 | tpheur->th_ecn_backoff = tcp_now + |
915 | (tcp_min_to_hz(tcp_ecn_timeout) << (tpheur->th_ecn_aggressive)); |
916 | |
917 | /* |
918 | * Ugly way to prevent integer overflow... limit to prevent in |
919 | * overflow during exp. backoff. |
920 | */ |
921 | if (tpheur->th_ecn_aggressive < TCP_CACHE_OVERFLOW_PROTECT) |
922 | tpheur->th_ecn_aggressive++; |
923 | |
924 | tcp_heuristic_unlock(head); |
925 | } |
926 | |
927 | void tcp_heuristic_ecn_aggressive(struct tcpcb *tp) |
928 | { |
929 | struct tcp_cache_key_src tcks; |
930 | |
931 | tcp_cache_key_src_create(tp, &tcks); |
932 | tcp_heuristic_ecn_aggressive_common(&tcks); |
933 | } |
934 | |
935 | static boolean_t tcp_heuristic_do_tfo_common(struct tcp_cache_key_src *tcks) |
936 | { |
937 | struct tcp_heuristics_head *head; |
938 | struct tcp_heuristic *tpheur; |
939 | |
940 | if (disable_tcp_heuristics) |
941 | return (TRUE); |
942 | |
943 | /* Get the tcp-heuristic. */ |
944 | tpheur = tcp_getheuristic_with_lock(tcks, 0, &head); |
945 | if (tpheur == NULL) |
946 | return (TRUE); |
947 | |
948 | if (tpheur->th_tfo_in_backoff == 0) |
949 | goto tfo_ok; |
950 | |
951 | if (TSTMP_GT(tcp_now, tpheur->th_tfo_backoff_until)) { |
952 | tpheur->th_tfo_in_backoff = 0; |
953 | tpheur->th_tfo_enabled_time = tcp_now; |
954 | |
955 | goto tfo_ok; |
956 | } |
957 | |
958 | tcp_heuristic_unlock(head); |
959 | return (FALSE); |
960 | |
961 | tfo_ok: |
962 | tcp_heuristic_unlock(head); |
963 | return (TRUE); |
964 | } |
965 | |
966 | boolean_t tcp_heuristic_do_tfo(struct tcpcb *tp) |
967 | { |
968 | struct tcp_cache_key_src tcks; |
969 | |
970 | tcp_cache_key_src_create(tp, &tcks); |
971 | if (tcp_heuristic_do_tfo_common(&tcks)) |
972 | return (TRUE); |
973 | |
974 | return (FALSE); |
975 | } |
976 | |
977 | boolean_t tcp_heuristic_do_mptcp(struct tcpcb *tp) |
978 | { |
979 | struct tcp_cache_key_src tcks; |
980 | struct tcp_heuristics_head *head = NULL; |
981 | struct tcp_heuristic *tpheur; |
982 | |
983 | if (disable_tcp_heuristics) |
984 | return (TRUE); |
985 | |
986 | tcp_cache_key_src_create(tp, &tcks); |
987 | |
988 | /* Get the tcp-heuristic. */ |
989 | tpheur = tcp_getheuristic_with_lock(&tcks, 0, &head); |
990 | if (tpheur == NULL) |
991 | return (TRUE); |
992 | |
993 | if (TSTMP_GT(tpheur->th_mptcp_backoff, tcp_now)) |
994 | goto fallback; |
995 | |
996 | tcp_heuristic_unlock(head); |
997 | |
998 | return (TRUE); |
999 | |
1000 | fallback: |
1001 | if (head) |
1002 | tcp_heuristic_unlock(head); |
1003 | |
1004 | if (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FIRSTPARTY) |
1005 | tcpstat.tcps_mptcp_fp_heuristic_fallback++; |
1006 | else |
1007 | tcpstat.tcps_mptcp_heuristic_fallback++; |
1008 | |
1009 | return (FALSE); |
1010 | } |
1011 | |
1012 | static boolean_t tcp_heuristic_do_ecn_common(struct tcp_cache_key_src *tcks) |
1013 | { |
1014 | struct tcp_heuristics_head *head; |
1015 | struct tcp_heuristic *tpheur; |
1016 | boolean_t ret = TRUE; |
1017 | |
1018 | if (disable_tcp_heuristics) |
1019 | return (TRUE); |
1020 | |
1021 | /* Get the tcp-heuristic. */ |
1022 | tpheur = tcp_getheuristic_with_lock(tcks, 0, &head); |
1023 | if (tpheur == NULL) |
1024 | return ret; |
1025 | |
1026 | if (TSTMP_GT(tpheur->th_ecn_backoff, tcp_now)) { |
1027 | ret = FALSE; |
1028 | } else { |
1029 | /* Reset the following counters to start re-evaluating */ |
1030 | if (tpheur->th_ecn_droprst >= ECN_RETRY_LIMIT) |
1031 | tpheur->th_ecn_droprst = 0; |
1032 | if (tpheur->th_ecn_droprxmt >= ECN_RETRY_LIMIT) |
1033 | tpheur->th_ecn_droprxmt = 0; |
1034 | if (tpheur->th_ecn_synrst >= ECN_RETRY_LIMIT) |
1035 | tpheur->th_ecn_synrst = 0; |
1036 | } |
1037 | |
1038 | tcp_heuristic_unlock(head); |
1039 | |
1040 | return (ret); |
1041 | } |
1042 | |
1043 | boolean_t tcp_heuristic_do_ecn(struct tcpcb *tp) |
1044 | { |
1045 | struct tcp_cache_key_src tcks; |
1046 | |
1047 | tcp_cache_key_src_create(tp, &tcks); |
1048 | return tcp_heuristic_do_ecn_common(&tcks); |
1049 | } |
1050 | |
1051 | boolean_t tcp_heuristic_do_ecn_with_address(struct ifnet *ifp, |
1052 | union sockaddr_in_4_6 *local_address) |
1053 | { |
1054 | struct tcp_cache_key_src tcks; |
1055 | |
1056 | memset(&tcks, 0, sizeof(tcks)); |
1057 | tcks.ifp = ifp; |
1058 | |
1059 | calculate_tcp_clock(); |
1060 | |
1061 | if (local_address->sa.sa_family == AF_INET6) { |
1062 | memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr)); |
1063 | tcks.af = AF_INET6; |
1064 | } else if (local_address->sa.sa_family == AF_INET) { |
1065 | memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr)); |
1066 | tcks.af = AF_INET; |
1067 | } |
1068 | |
1069 | return tcp_heuristic_do_ecn_common(&tcks); |
1070 | } |
1071 | |
1072 | void tcp_heuristics_ecn_update(struct necp_tcp_ecn_cache *necp_buffer, |
1073 | struct ifnet *ifp, union sockaddr_in_4_6 *local_address) |
1074 | { |
1075 | struct tcp_cache_key_src tcks; |
1076 | |
1077 | memset(&tcks, 0, sizeof(tcks)); |
1078 | tcks.ifp = ifp; |
1079 | |
1080 | calculate_tcp_clock(); |
1081 | |
1082 | if (local_address->sa.sa_family == AF_INET6) { |
1083 | memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr)); |
1084 | tcks.af = AF_INET6; |
1085 | } else if (local_address->sa.sa_family == AF_INET) { |
1086 | memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr)); |
1087 | tcks.af = AF_INET; |
1088 | } |
1089 | |
1090 | if (necp_buffer->necp_tcp_ecn_heuristics_success) { |
1091 | tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_ECN); |
1092 | } else if (necp_buffer->necp_tcp_ecn_heuristics_loss) { |
1093 | tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN); |
1094 | } else if (necp_buffer->necp_tcp_ecn_heuristics_drop_rst) { |
1095 | tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRST); |
1096 | } else if (necp_buffer->necp_tcp_ecn_heuristics_drop_rxmt) { |
1097 | tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRXMT); |
1098 | } else if (necp_buffer->necp_tcp_ecn_heuristics_syn_rst) { |
1099 | tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_SYNRST); |
1100 | } else if (necp_buffer->necp_tcp_ecn_heuristics_aggressive) { |
1101 | tcp_heuristic_ecn_aggressive_common(&tcks); |
1102 | } |
1103 | |
1104 | return; |
1105 | } |
1106 | |
1107 | boolean_t tcp_heuristic_do_tfo_with_address(struct ifnet *ifp, |
1108 | union sockaddr_in_4_6 *local_address, union sockaddr_in_4_6 *remote_address, |
1109 | u_int8_t *cookie, u_int8_t *cookie_len) |
1110 | { |
1111 | struct tcp_cache_key_src tcks; |
1112 | |
1113 | memset(&tcks, 0, sizeof(tcks)); |
1114 | tcks.ifp = ifp; |
1115 | |
1116 | calculate_tcp_clock(); |
1117 | |
1118 | if (remote_address->sa.sa_family == AF_INET6) { |
1119 | memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr)); |
1120 | memcpy(&tcks.faddr.addr6, &remote_address->sin6.sin6_addr, sizeof(struct in6_addr)); |
1121 | tcks.af = AF_INET6; |
1122 | } else if (remote_address->sa.sa_family == AF_INET) { |
1123 | memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr)); |
1124 | memcpy(&tcks.faddr.addr, &remote_address->sin.sin_addr, sizeof(struct in_addr)); |
1125 | tcks.af = AF_INET; |
1126 | } |
1127 | |
1128 | if (tcp_heuristic_do_tfo_common(&tcks)) { |
1129 | if (!tcp_cache_get_cookie_common(&tcks, cookie, cookie_len)) { |
1130 | *cookie_len = 0; |
1131 | } |
1132 | return TRUE; |
1133 | } |
1134 | |
1135 | return FALSE; |
1136 | } |
1137 | |
1138 | void tcp_heuristics_tfo_update(struct necp_tcp_tfo_cache *necp_buffer, |
1139 | struct ifnet *ifp, union sockaddr_in_4_6 *local_address, |
1140 | union sockaddr_in_4_6 *remote_address) |
1141 | { |
1142 | struct tcp_cache_key_src tcks; |
1143 | |
1144 | memset(&tcks, 0, sizeof(tcks)); |
1145 | tcks.ifp = ifp; |
1146 | |
1147 | calculate_tcp_clock(); |
1148 | |
1149 | if (remote_address->sa.sa_family == AF_INET6) { |
1150 | memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr)); |
1151 | memcpy(&tcks.faddr.addr6, &remote_address->sin6.sin6_addr, sizeof(struct in6_addr)); |
1152 | tcks.af = AF_INET6; |
1153 | } else if (remote_address->sa.sa_family == AF_INET) { |
1154 | memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr)); |
1155 | memcpy(&tcks.faddr.addr, &remote_address->sin.sin_addr, sizeof(struct in_addr)); |
1156 | tcks.af = AF_INET; |
1157 | } |
1158 | |
1159 | if (necp_buffer->necp_tcp_tfo_heuristics_success) |
1160 | tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_DATA | |
1161 | TCPCACHE_F_TFO_REQ_RST | TCPCACHE_F_TFO_DATA_RST); |
1162 | |
1163 | if (necp_buffer->necp_tcp_tfo_heuristics_success_req) |
1164 | tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_REQ_RST); |
1165 | |
1166 | if (necp_buffer->necp_tcp_tfo_heuristics_loss) |
1167 | tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_DATA); |
1168 | |
1169 | if (necp_buffer->necp_tcp_tfo_heuristics_loss_req) |
1170 | tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ); |
1171 | |
1172 | if (necp_buffer->necp_tcp_tfo_heuristics_rst_data) |
1173 | tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ_RST | TCPCACHE_F_TFO_DATA_RST); |
1174 | |
1175 | if (necp_buffer->necp_tcp_tfo_heuristics_rst_req) |
1176 | tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ_RST); |
1177 | |
1178 | if (necp_buffer->necp_tcp_tfo_heuristics_middlebox) |
1179 | tcp_heuristic_tfo_middlebox_common(&tcks); |
1180 | |
1181 | if (necp_buffer->necp_tcp_tfo_cookie_len != 0) { |
1182 | tcp_cache_set_cookie_common(&tcks, |
1183 | necp_buffer->necp_tcp_tfo_cookie, necp_buffer->necp_tcp_tfo_cookie_len); |
1184 | } |
1185 | |
1186 | return; |
1187 | } |
1188 | |
1189 | static void sysctl_cleartfocache(void) |
1190 | { |
1191 | int i; |
1192 | |
1193 | for (i = 0; i < tcp_cache_size; i++) { |
1194 | struct tcp_cache_head *head = &tcp_cache[i]; |
1195 | struct tcp_cache *tpcache, *tmp; |
1196 | struct tcp_heuristics_head *hhead = &tcp_heuristics[i]; |
1197 | struct tcp_heuristic *tpheur, *htmp; |
1198 | |
1199 | lck_mtx_lock(&head->tch_mtx); |
1200 | SLIST_FOREACH_SAFE(tpcache, &head->tcp_caches, list, tmp) { |
1201 | SLIST_REMOVE(&head->tcp_caches, tpcache, tcp_cache, list); |
1202 | _FREE(tpcache, M_TEMP); |
1203 | } |
1204 | lck_mtx_unlock(&head->tch_mtx); |
1205 | |
1206 | lck_mtx_lock(&hhead->thh_mtx); |
1207 | SLIST_FOREACH_SAFE(tpheur, &hhead->tcp_heuristics, list, htmp) { |
1208 | SLIST_REMOVE(&hhead->tcp_heuristics, tpheur, tcp_heuristic, list); |
1209 | _FREE(tpheur, M_TEMP); |
1210 | } |
1211 | lck_mtx_unlock(&hhead->thh_mtx); |
1212 | } |
1213 | } |
1214 | |
1215 | /* This sysctl is useful for testing purposes only */ |
1216 | static int tcpcleartfo = 0; |
1217 | |
1218 | static int sysctl_cleartfo SYSCTL_HANDLER_ARGS |
1219 | { |
1220 | #pragma unused(arg1, arg2) |
1221 | int error = 0, val, oldval = tcpcleartfo; |
1222 | |
1223 | val = oldval; |
1224 | error = sysctl_handle_int(oidp, &val, 0, req); |
1225 | if (error || !req->newptr) |
1226 | return (error); |
1227 | |
1228 | /* |
1229 | * The actual value does not matter. If the value is set, it triggers |
1230 | * the clearing of the TFO cache. If a future implementation does not |
1231 | * use the route entry to hold the TFO cache, replace the route sysctl. |
1232 | */ |
1233 | |
1234 | if (val != oldval) |
1235 | sysctl_cleartfocache(); |
1236 | |
1237 | tcpcleartfo = val; |
1238 | |
1239 | return (error); |
1240 | } |
1241 | |
1242 | SYSCTL_PROC(_net_inet_tcp, OID_AUTO, clear_tfocache, CTLTYPE_INT | CTLFLAG_RW | |
1243 | CTLFLAG_LOCKED, &tcpcleartfo, 0, &sysctl_cleartfo, "I" , |
1244 | "Toggle to clear the TFO destination based heuristic cache" ); |
1245 | |
1246 | void tcp_cache_init(void) |
1247 | { |
1248 | uint64_t sane_size_meg = sane_size / 1024 / 1024; |
1249 | int i; |
1250 | |
1251 | /* |
1252 | * On machines with <100MB of memory this will result in a (full) cache-size |
1253 | * of 32 entries, thus 32 * 5 * 64bytes = 10KB. (about 0.01 %) |
1254 | * On machines with > 4GB of memory, we have a cache-size of 1024 entries, |
1255 | * thus about 327KB. |
1256 | * |
1257 | * Side-note: we convert to u_int32_t. If sane_size is more than |
1258 | * 16000 TB, we loose precision. But, who cares? :) |
1259 | */ |
1260 | tcp_cache_size = tcp_cache_roundup2((u_int32_t)(sane_size_meg >> 2)); |
1261 | if (tcp_cache_size < 32) |
1262 | tcp_cache_size = 32; |
1263 | else if (tcp_cache_size > 1024) |
1264 | tcp_cache_size = 1024; |
1265 | |
1266 | tcp_cache = _MALLOC(sizeof(struct tcp_cache_head) * tcp_cache_size, |
1267 | M_TEMP, M_ZERO); |
1268 | if (tcp_cache == NULL) |
1269 | panic("Allocating tcp_cache failed at boot-time!" ); |
1270 | |
1271 | tcp_cache_mtx_grp_attr = lck_grp_attr_alloc_init(); |
1272 | tcp_cache_mtx_grp = lck_grp_alloc_init("tcpcache" , tcp_cache_mtx_grp_attr); |
1273 | tcp_cache_mtx_attr = lck_attr_alloc_init(); |
1274 | |
1275 | tcp_heuristics = _MALLOC(sizeof(struct tcp_heuristics_head) * tcp_cache_size, |
1276 | M_TEMP, M_ZERO); |
1277 | if (tcp_heuristics == NULL) |
1278 | panic("Allocating tcp_heuristic failed at boot-time!" ); |
1279 | |
1280 | tcp_heuristic_mtx_grp_attr = lck_grp_attr_alloc_init(); |
1281 | tcp_heuristic_mtx_grp = lck_grp_alloc_init("tcpheuristic" , tcp_heuristic_mtx_grp_attr); |
1282 | tcp_heuristic_mtx_attr = lck_attr_alloc_init(); |
1283 | |
1284 | for (i = 0; i < tcp_cache_size; i++) { |
1285 | lck_mtx_init(&tcp_cache[i].tch_mtx, tcp_cache_mtx_grp, |
1286 | tcp_cache_mtx_attr); |
1287 | SLIST_INIT(&tcp_cache[i].tcp_caches); |
1288 | |
1289 | lck_mtx_init(&tcp_heuristics[i].thh_mtx, tcp_heuristic_mtx_grp, |
1290 | tcp_heuristic_mtx_attr); |
1291 | SLIST_INIT(&tcp_heuristics[i].tcp_heuristics); |
1292 | } |
1293 | |
1294 | tcp_cache_hash_seed = RandomULong(); |
1295 | } |
1296 | |