1 | /* |
2 | * Copyright (c) 2010-2014 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | #include <sys/param.h> |
29 | #include <sys/systm.h> |
30 | #include <sys/kernel.h> |
31 | #include <sys/protosw.h> |
32 | #include <sys/mcache.h> |
33 | #include <sys/sysctl.h> |
34 | |
35 | #include <net/route.h> |
36 | #include <netinet/in.h> |
37 | #include <netinet/in_systm.h> |
38 | #include <netinet/ip.h> |
39 | |
40 | #if INET6 |
41 | #include <netinet/ip6.h> |
42 | #endif |
43 | #include <netinet/ip_var.h> |
44 | #include <netinet/tcp.h> |
45 | #include <netinet/tcp_fsm.h> |
46 | #include <netinet/tcp_timer.h> |
47 | #include <netinet/tcp_var.h> |
48 | #include <netinet/tcpip.h> |
49 | #include <netinet/tcp_cc.h> |
50 | |
51 | #include <libkern/OSAtomic.h> |
52 | |
53 | /* This file implements an alternate TCP congestion control algorithm |
54 | * for background transport developed by LEDBAT working group at IETF and |
55 | * described in draft: draft-ietf-ledbat-congestion-02 |
56 | */ |
57 | |
58 | int tcp_ledbat_init(struct tcpcb *tp); |
59 | int tcp_ledbat_cleanup(struct tcpcb *tp); |
60 | void tcp_ledbat_cwnd_init(struct tcpcb *tp); |
61 | void tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th); |
62 | void tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th); |
63 | void tcp_ledbat_pre_fr(struct tcpcb *tp); |
64 | void tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th); |
65 | void tcp_ledbat_after_idle(struct tcpcb *tp); |
66 | void tcp_ledbat_after_timeout(struct tcpcb *tp); |
67 | int tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th); |
68 | void tcp_ledbat_switch_cc(struct tcpcb *tp, uint16_t old_cc_index); |
69 | |
70 | struct tcp_cc_algo tcp_cc_ledbat = { |
71 | .name = "ledbat" , |
72 | .init = tcp_ledbat_init, |
73 | .cleanup = tcp_ledbat_cleanup, |
74 | .cwnd_init = tcp_ledbat_cwnd_init, |
75 | .congestion_avd = tcp_ledbat_congestion_avd, |
76 | .ack_rcvd = tcp_ledbat_ack_rcvd, |
77 | .pre_fr = tcp_ledbat_pre_fr, |
78 | .post_fr = tcp_ledbat_post_fr, |
79 | .after_idle = tcp_ledbat_after_idle, |
80 | .after_timeout = tcp_ledbat_after_timeout, |
81 | .delay_ack = tcp_ledbat_delay_ack, |
82 | .switch_to = tcp_ledbat_switch_cc |
83 | }; |
84 | |
85 | /* Target queuing delay in milliseconds. This includes the processing |
86 | * and scheduling delay on both of the end-hosts. A LEDBAT sender tries |
87 | * to keep queuing delay below this limit. When the queuing delay |
88 | * goes above this limit, a LEDBAT sender will start reducing the |
89 | * congestion window. |
90 | * |
91 | * The LEDBAT draft says that target queue delay MUST be 100 ms for |
92 | * inter-operability. |
93 | */ |
94 | SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_target_qdelay, CTLFLAG_RW | CTLFLAG_LOCKED, |
95 | int, target_qdelay, 100, "Target queuing delay" ); |
96 | |
97 | /* Allowed increase and tether are used to place an upper bound on |
98 | * congestion window based on the amount of data that is outstanding. |
99 | * This will limit the congestion window when the amount of data in |
100 | * flight is little because the application is writing to the socket |
101 | * intermittently and is preventing the connection from becoming idle . |
102 | * |
103 | * max_allowed_cwnd = allowed_increase + (tether * flight_size) |
104 | * cwnd = min(cwnd, max_allowed_cwnd) |
105 | * |
106 | * 'Allowed_increase' parameter is set to 8. If the flight size is zero, then |
107 | * we want the congestion window to be at least 8 packets to reduce the |
108 | * delay induced by delayed ack. This helps when the receiver is acking |
109 | * more than 2 packets at a time (stretching acks for better performance). |
110 | * |
111 | * 'Tether' is also set to 2. We do not want this to limit the growth of cwnd |
112 | * during slow-start. |
113 | */ |
114 | SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_allowed_increase, CTLFLAG_RW | CTLFLAG_LOCKED, |
115 | int, allowed_increase, 8, |
116 | "Additive constant used to calculate max allowed congestion window" ); |
117 | |
118 | /* Left shift for cwnd to get tether value of 2 */ |
119 | SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_tether_shift, CTLFLAG_RW | CTLFLAG_LOCKED, |
120 | int, tether_shift, 1, "Tether shift for max allowed congestion window" ); |
121 | |
122 | /* Start with an initial window of 2. This will help to get more accurate |
123 | * minimum RTT measurement in the beginning. It will help to probe |
124 | * the path slowly and will not add to the existing delay if the path is |
125 | * already congested. Using 2 packets will reduce the delay induced by delayed-ack. |
126 | */ |
127 | SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_ss_fltsz, CTLFLAG_RW | CTLFLAG_LOCKED, |
128 | uint32_t, bg_ss_fltsz, 2, "Initial congestion window for background transport" ); |
129 | |
130 | extern int rtt_samples_per_slot; |
131 | |
132 | static void update_cwnd(struct tcpcb *tp, uint32_t incr) { |
133 | uint32_t max_allowed_cwnd = 0, flight_size = 0; |
134 | uint32_t base_rtt; |
135 | |
136 | base_rtt = get_base_rtt(tp); |
137 | |
138 | /* If we do not have a good RTT measurement yet, increment |
139 | * congestion window by the default value. |
140 | */ |
141 | if (base_rtt == 0 || tp->t_rttcur == 0) { |
142 | tp->snd_cwnd += incr; |
143 | goto check_max; |
144 | } |
145 | |
146 | if (tp->t_rttcur <= (base_rtt + target_qdelay)) { |
147 | /* |
148 | * Delay decreased or remained the same, we can increase |
149 | * the congestion window according to RFC 3465. |
150 | * |
151 | * Move background slow-start threshold to current |
152 | * congestion window so that the next time (after some idle |
153 | * period), we can attempt to do slow-start till here if there |
154 | * is no increase in rtt |
155 | */ |
156 | if (tp->bg_ssthresh < tp->snd_cwnd) |
157 | tp->bg_ssthresh = tp->snd_cwnd; |
158 | tp->snd_cwnd += incr; |
159 | |
160 | } else { |
161 | /* In response to an increase in rtt, reduce the congestion |
162 | * window by one-eighth. This will help to yield immediately |
163 | * to a competing stream. |
164 | */ |
165 | uint32_t redwin; |
166 | |
167 | redwin = tp->snd_cwnd >> 3; |
168 | tp->snd_cwnd -= redwin; |
169 | if (tp->snd_cwnd < bg_ss_fltsz * tp->t_maxseg) |
170 | tp->snd_cwnd = bg_ss_fltsz * tp->t_maxseg; |
171 | |
172 | /* Lower background slow-start threshold so that the connection |
173 | * will go into congestion avoidance phase |
174 | */ |
175 | if (tp->bg_ssthresh > tp->snd_cwnd) |
176 | tp->bg_ssthresh = tp->snd_cwnd; |
177 | } |
178 | check_max: |
179 | /* Calculate the outstanding flight size and restrict the |
180 | * congestion window to a factor of flight size. |
181 | */ |
182 | flight_size = tp->snd_max - tp->snd_una; |
183 | |
184 | max_allowed_cwnd = (allowed_increase * tp->t_maxseg) |
185 | + (flight_size << tether_shift); |
186 | tp->snd_cwnd = min(tp->snd_cwnd, max_allowed_cwnd); |
187 | return; |
188 | } |
189 | |
190 | int tcp_ledbat_init(struct tcpcb *tp) { |
191 | #pragma unused(tp) |
192 | OSIncrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets); |
193 | return 0; |
194 | } |
195 | |
196 | int tcp_ledbat_cleanup(struct tcpcb *tp) { |
197 | #pragma unused(tp) |
198 | OSDecrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets); |
199 | return 0; |
200 | } |
201 | |
202 | /* Initialize the congestion window for a connection |
203 | * |
204 | */ |
205 | |
206 | void |
207 | tcp_ledbat_cwnd_init(struct tcpcb *tp) { |
208 | tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz; |
209 | tp->bg_ssthresh = tp->snd_ssthresh; |
210 | } |
211 | |
212 | /* Function to handle an in-sequence ack which is fast-path processing |
213 | * of an in sequence ack in tcp_input function (called as header prediction). |
214 | * This gets called only during congestion avoidance phase. |
215 | */ |
216 | void |
217 | tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th) { |
218 | int acked = 0; |
219 | u_int32_t incr = 0; |
220 | |
221 | acked = BYTES_ACKED(th, tp); |
222 | tp->t_bytes_acked += acked; |
223 | if (tp->t_bytes_acked > tp->snd_cwnd) { |
224 | tp->t_bytes_acked -= tp->snd_cwnd; |
225 | incr = tp->t_maxseg; |
226 | } |
227 | |
228 | if (tp->snd_cwnd < tp->snd_wnd && incr > 0) { |
229 | update_cwnd(tp, incr); |
230 | } |
231 | } |
232 | /* Function to process an ack. |
233 | */ |
234 | void |
235 | tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { |
236 | /* |
237 | * RFC 3465 - Appropriate Byte Counting. |
238 | * |
239 | * If the window is currently less than ssthresh, |
240 | * open the window by the number of bytes ACKed by |
241 | * the last ACK, however clamp the window increase |
242 | * to an upper limit "L". |
243 | * |
244 | * In congestion avoidance phase, open the window by |
245 | * one segment each time "bytes_acked" grows to be |
246 | * greater than or equal to the congestion window. |
247 | */ |
248 | |
249 | u_int cw = tp->snd_cwnd; |
250 | u_int incr = tp->t_maxseg; |
251 | int acked = 0; |
252 | |
253 | acked = BYTES_ACKED(th, tp); |
254 | tp->t_bytes_acked += acked; |
255 | if (cw >= tp->bg_ssthresh) { |
256 | /* congestion-avoidance */ |
257 | if (tp->t_bytes_acked < cw) { |
258 | /* No need to increase yet. */ |
259 | incr = 0; |
260 | } |
261 | } else { |
262 | /* |
263 | * If the user explicitly enables RFC3465 |
264 | * use 2*SMSS for the "L" param. Otherwise |
265 | * use the more conservative 1*SMSS. |
266 | * |
267 | * (See RFC 3465 2.3 Choosing the Limit) |
268 | */ |
269 | u_int abc_lim; |
270 | |
271 | abc_lim = (tcp_do_rfc3465_lim2 && |
272 | tp->snd_nxt == tp->snd_max) ? incr * 2 : incr; |
273 | |
274 | incr = lmin(acked, abc_lim); |
275 | } |
276 | if (tp->t_bytes_acked >= cw) |
277 | tp->t_bytes_acked -= cw; |
278 | if (incr > 0) |
279 | update_cwnd(tp, incr); |
280 | } |
281 | |
282 | void |
283 | tcp_ledbat_pre_fr(struct tcpcb *tp) { |
284 | uint32_t win; |
285 | |
286 | win = min(tp->snd_wnd, tp->snd_cwnd) / |
287 | 2 / tp->t_maxseg; |
288 | if ( win < 2 ) |
289 | win = 2; |
290 | tp->snd_ssthresh = win * tp->t_maxseg; |
291 | if (tp->bg_ssthresh > tp->snd_ssthresh) |
292 | tp->bg_ssthresh = tp->snd_ssthresh; |
293 | |
294 | tcp_cc_resize_sndbuf(tp); |
295 | } |
296 | |
297 | void |
298 | tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th) { |
299 | int32_t ss; |
300 | |
301 | ss = tp->snd_max - th->th_ack; |
302 | |
303 | /* |
304 | * Complete ack. Inflate the congestion window to |
305 | * ssthresh and exit fast recovery. |
306 | * |
307 | * Window inflation should have left us with approx. |
308 | * snd_ssthresh outstanding data. But in case we |
309 | * would be inclined to send a burst, better to do |
310 | * it via the slow start mechanism. |
311 | * |
312 | * If the flight size is zero, then make congestion |
313 | * window to be worth at least 2 segments to avoid |
314 | * delayed acknowledgement (draft-ietf-tcpm-rfc3782-bis-05). |
315 | */ |
316 | if (ss < (int32_t)tp->snd_ssthresh) |
317 | tp->snd_cwnd = max(ss, tp->t_maxseg) + tp->t_maxseg; |
318 | else |
319 | tp->snd_cwnd = tp->snd_ssthresh; |
320 | tp->t_bytes_acked = 0; |
321 | } |
322 | |
323 | /* |
324 | * Function to handle connections that have been idle for |
325 | * some time. Slow start to get ack "clock" running again. |
326 | * Clear base history after idle time. |
327 | */ |
328 | void |
329 | tcp_ledbat_after_idle(struct tcpcb *tp) { |
330 | |
331 | /* Reset the congestion window */ |
332 | tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz; |
333 | } |
334 | |
335 | /* Function to change the congestion window when the retransmit |
336 | * timer fires. The behavior is the same as that for best-effort |
337 | * TCP, reduce congestion window to one segment and start probing |
338 | * the link using "slow start". The slow start threshold is set |
339 | * to half of the current window. Lower the background slow start |
340 | * threshold also. |
341 | */ |
342 | void |
343 | tcp_ledbat_after_timeout(struct tcpcb *tp) { |
344 | if (tp->t_state >= TCPS_ESTABLISHED) { |
345 | u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; |
346 | if (win < 2) |
347 | win = 2; |
348 | tp->snd_ssthresh = win * tp->t_maxseg; |
349 | |
350 | if (tp->bg_ssthresh > tp->snd_ssthresh) |
351 | tp->bg_ssthresh = tp->snd_ssthresh; |
352 | |
353 | tp->snd_cwnd = tp->t_maxseg; |
354 | tcp_cc_resize_sndbuf(tp); |
355 | } |
356 | } |
357 | |
358 | /* |
359 | * Indicate whether this ack should be delayed. |
360 | * We can delay the ack if: |
361 | * - our last ack wasn't a 0-sized window. |
362 | * - the peer hasn't sent us a TH_PUSH data packet: if he did, take this |
363 | * as a clue that we need to ACK without any delay. This helps higher |
364 | * level protocols who won't send us more data even if the window is |
365 | * open because their last "segment" hasn't been ACKed |
366 | * Otherwise the receiver will ack every other full-sized segment or when the |
367 | * delayed ack timer fires. This will help to generate better rtt estimates for |
368 | * the other end if it is a ledbat sender. |
369 | * |
370 | */ |
371 | |
372 | int |
373 | tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th) { |
374 | if ((tp->t_flags & TF_RXWIN0SENT) == 0 && |
375 | (th->th_flags & TH_PUSH) == 0 && (tp->t_unacksegs == 1)) |
376 | return(1); |
377 | return(0); |
378 | } |
379 | |
380 | /* Change a connection to use ledbat. First, lower bg_ssthresh value |
381 | * if it needs to be. |
382 | */ |
383 | void |
384 | tcp_ledbat_switch_cc(struct tcpcb *tp, uint16_t old_cc_index) { |
385 | #pragma unused(old_cc_index) |
386 | uint32_t cwnd; |
387 | |
388 | if (tp->bg_ssthresh == 0 || tp->bg_ssthresh > tp->snd_ssthresh) |
389 | tp->bg_ssthresh = tp->snd_ssthresh; |
390 | |
391 | cwnd = min(tp->snd_wnd, tp->snd_cwnd); |
392 | |
393 | if (tp->snd_cwnd > tp->bg_ssthresh) |
394 | cwnd = cwnd / tp->t_maxseg; |
395 | else |
396 | cwnd = cwnd / 2 / tp->t_maxseg; |
397 | |
398 | if (cwnd < bg_ss_fltsz) |
399 | cwnd = bg_ss_fltsz; |
400 | |
401 | tp->snd_cwnd = cwnd * tp->t_maxseg; |
402 | tp->t_bytes_acked = 0; |
403 | |
404 | OSIncrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets); |
405 | } |
406 | |