1/*
2 * Copyright (c) 2011-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/cdefs.h>
30#include <sys/param.h>
31#include <sys/mbuf.h>
32#include <sys/socket.h>
33#include <sys/sockio.h>
34#include <sys/systm.h>
35#include <sys/sysctl.h>
36#include <sys/syslog.h>
37#include <sys/proc.h>
38#include <sys/errno.h>
39#include <sys/kernel.h>
40#include <sys/kauth.h>
41
42#include <kern/zalloc.h>
43
44#include <net/if.h>
45#include <net/if_var.h>
46#include <net/if_types.h>
47#include <net/dlil.h>
48#include <net/flowadv.h>
49
50#include <netinet/in.h>
51#include <netinet/in_systm.h>
52#include <netinet/ip.h>
53#if INET6
54#include <netinet/ip6.h>
55#endif
56
57#include <net/classq/classq_sfb.h>
58#include <net/flowhash.h>
59#include <net/net_osdep.h>
60#include <dev/random/randomdev.h>
61
62/*
63 * Stochastic Fair Blue
64 *
65 * Wu-chang Feng, Dilip D. Kandlur, Debanjan Saha, Kang G. Shin
66 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
67 *
68 * Based on the NS code with the following parameters:
69 *
70 * bytes: false
71 * decrement: 0.001
72 * increment: 0.005
73 * hold-time: 10ms-50ms (randomized)
74 * algorithm: 0
75 * pbox: 1
76 * pbox-time: 50-100ms (randomized)
77 * hinterval: 11-23 (randomized)
78 *
79 * This implementation uses L = 2 and N = 32 for 2 sets of:
80 *
81 * B[L][N]: L x N array of bins (L levels, N bins per level)
82 *
83 * Each set effectively creates 32^2 virtual buckets (bin combinations)
84 * while using only O(32*2) states.
85 *
86 * Given a 32-bit hash value, we divide it such that octets [0,1,2,3] are
87 * used as index for the bins across the 2 levels, where level 1 uses [0,2]
88 * and level 2 uses [1,3]. The 2 values per level correspond to the indices
89 * for the current and warm-up sets (section 4.4. in the SFB paper regarding
90 * Moving Hash Functions explains the purposes of these 2 sets.)
91 */
92
93/*
94 * Use Murmur3A_x86_32 for hash function. It seems to perform consistently
95 * across platforms for 1-word key (32-bit flowhash value). See flowhash.h
96 * for other alternatives. We only need 16-bit hash output.
97 */
98#define SFB_HASH net_flowhash_mh3_x86_32
99#define SFB_HASHMASK HASHMASK(16)
100
101#define SFB_BINMASK(_x) \
102 ((_x) & HASHMASK(SFB_BINS_SHIFT))
103
104#define SFB_BINST(_sp, _l, _n, _c) \
105 (&(*(_sp)->sfb_bins)[_c].stats[_l][_n])
106
107#define SFB_BINFT(_sp, _l, _n, _c) \
108 (&(*(_sp)->sfb_bins)[_c].freezetime[_l][_n])
109
110#define SFB_FC_LIST(_sp, _n) \
111 (&(*(_sp)->sfb_fc_lists)[_n])
112
113/*
114 * The holdtime parameter determines the minimum time interval between
115 * two successive updates of the marking probability. In the event the
116 * uplink speed is not known, a default value is chosen and is randomized
117 * to be within the following range.
118 */
119#define HOLDTIME_BASE (100ULL * 1000 * 1000) /* 100ms */
120#define HOLDTIME_MIN (10ULL * 1000 * 1000) /* 10ms */
121#define HOLDTIME_MAX (100ULL * 1000 * 1000) /* 100ms */
122
123/*
124 * The pboxtime parameter determines the bandwidth allocated for rogue
125 * flows, i.e. the rate limiting bandwidth. In the event the uplink speed
126 * is not known, a default value is chosen and is randomized to be within
127 * the following range.
128 */
129#define PBOXTIME_BASE (300ULL * 1000 * 1000) /* 300ms */
130#define PBOXTIME_MIN (30ULL * 1000 * 1000) /* 30ms */
131#define PBOXTIME_MAX (300ULL * 1000 * 1000) /* 300ms */
132
133/*
134 * Target queueing delay is the amount of extra delay that can be added
135 * to accommodate variations in the link bandwidth. The queue should be
136 * large enough to induce this much delay and nothing more than that.
137 */
138#define TARGET_QDELAY_BASE (10ULL * 1000 * 1000) /* 10ms */
139#define TARGET_QDELAY_MIN (10ULL * 1000) /* 10us */
140#define TARGET_QDELAY_MAX (20ULL * 1000 * 1000 * 1000) /* 20s */
141
142/*
143 * Update interval for checking the extra delay added by the queue. This
144 * should be 90-95 percentile of RTT experienced by any TCP connection
145 * so that it will take care of the burst traffic.
146 */
147#define UPDATE_INTERVAL_BASE (100ULL * 1000 * 1000) /* 100ms */
148#define UPDATE_INTERVAL_MIN (100ULL * 1000 * 1000) /* 100ms */
149#define UPDATE_INTERVAL_MAX (10ULL * 1000 * 1000 * 1000) /* 10s */
150
151#define SFB_RANDOM(sp, tmin, tmax) ((sfb_random(sp) % (tmax)) + (tmin))
152
153#define SFB_PKT_PBOX 0x1 /* in penalty box */
154
155/* The following mantissa values are in SFB_FP_SHIFT Q format */
156#define SFB_MAX_PMARK (1 << SFB_FP_SHIFT) /* Q14 representation of 1.00 */
157
158/*
159 * These are d1 (increment) and d2 (decrement) parameters, used to determine
160 * the amount by which the marking probability is incremented when the queue
161 * overflows, or is decremented when the link is idle. d1 is set higher than
162 * d2, because link underutilization can occur when congestion management is
163 * either too conservative or too aggressive, but packet loss occurs only
164 * when congestion management is too conservative. By weighing heavily
165 * against packet loss, it can quickly reach to a substantial increase in
166 * traffic load.
167 */
168#define SFB_INCREMENT 82 /* Q14 representation of 0.005 */
169#define SFB_DECREMENT 16 /* Q14 representation of 0.001 */
170
171#define SFB_PMARK_TH 16056 /* Q14 representation of 0.98 */
172#define SFB_PMARK_WARM 3276 /* Q14 representation of 0.2 */
173
174#define SFB_PMARK_INC(_bin) do { \
175 (_bin)->pmark += sfb_increment; \
176 if ((_bin)->pmark > SFB_MAX_PMARK) \
177 (_bin)->pmark = SFB_MAX_PMARK; \
178} while (0)
179
180#define SFB_PMARK_DEC(_bin) do { \
181 if ((_bin)->pmark > 0) { \
182 (_bin)->pmark -= sfb_decrement; \
183 if ((_bin)->pmark < 0) \
184 (_bin)->pmark = 0; \
185 } \
186} while (0)
187
188/* Minimum nuber of bytes in queue to get flow controlled */
189#define SFB_MIN_FC_THRESHOLD_BYTES 7500
190
191#define SFB_SET_DELAY_HIGH(_sp_, _q_) do { \
192 (_sp_)->sfb_flags |= SFBF_DELAYHIGH; \
193 (_sp_)->sfb_fc_threshold = max(SFB_MIN_FC_THRESHOLD_BYTES, \
194 (qsize((_q_)) >> 3)); \
195} while (0)
196
197#define SFB_QUEUE_DELAYBASED(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYBASED)
198#define SFB_IS_DELAYHIGH(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYHIGH)
199#define SFB_QUEUE_DELAYBASED_MAXSIZE 2048 /* max pkts */
200
201#define HINTERVAL_MIN (10) /* 10 seconds */
202#define HINTERVAL_MAX (20) /* 20 seconds */
203#define SFB_HINTERVAL(sp) ((sfb_random(sp) % HINTERVAL_MAX) + HINTERVAL_MIN)
204
205#define DEQUEUE_DECAY 7 /* ilog2 of EWMA decay rate, (128) */
206#define DEQUEUE_SPIKE(_new, _old) \
207 ((u_int64_t)ABS((int64_t)(_new) - (int64_t)(_old)) > ((_old) << 11))
208
209#define ABS(v) (((v) > 0) ? (v) : -(v))
210
211#define SFB_ZONE_MAX 32 /* maximum elements in zone */
212#define SFB_ZONE_NAME "classq_sfb" /* zone name */
213
214#define SFB_BINS_ZONE_MAX 32 /* maximum elements in zone */
215#define SFB_BINS_ZONE_NAME "classq_sfb_bins" /* zone name */
216
217#define SFB_FCL_ZONE_MAX 32 /* maximum elements in zone */
218#define SFB_FCL_ZONE_NAME "classq_sfb_fcl" /* zone name */
219
220/* Place the flow control entries in current bin on level 0 */
221#define SFB_FC_LEVEL 0
222
223static unsigned int sfb_size; /* size of zone element */
224static struct zone *sfb_zone; /* zone for sfb */
225
226static unsigned int sfb_bins_size; /* size of zone element */
227static struct zone *sfb_bins_zone; /* zone for sfb_bins */
228
229static unsigned int sfb_fcl_size; /* size of zone element */
230static struct zone *sfb_fcl_zone; /* zone for sfb_fc_lists */
231
232/* internal function prototypes */
233static u_int32_t sfb_random(struct sfb *);
234static void *sfb_getq_flow(struct sfb *, class_queue_t *, u_int32_t, boolean_t,
235 pktsched_pkt_t *);
236static void sfb_resetq(struct sfb *, cqev_t);
237static void sfb_calc_holdtime(struct sfb *, u_int64_t);
238static void sfb_calc_pboxtime(struct sfb *, u_int64_t);
239static void sfb_calc_hinterval(struct sfb *, u_int64_t *);
240static void sfb_calc_update_interval(struct sfb *, u_int64_t);
241static void sfb_swap_bins(struct sfb *, u_int32_t);
242static inline int sfb_pcheck(struct sfb *, uint32_t);
243static int sfb_penalize(struct sfb *, uint32_t, uint32_t *, struct timespec *);
244static void sfb_adjust_bin(struct sfb *, struct sfbbinstats *,
245 struct timespec *, struct timespec *, boolean_t);
246static void sfb_decrement_bin(struct sfb *, struct sfbbinstats *,
247 struct timespec *, struct timespec *);
248static void sfb_increment_bin(struct sfb *, struct sfbbinstats *,
249 struct timespec *, struct timespec *);
250static inline void sfb_dq_update_bins(struct sfb *, uint32_t, uint32_t,
251 struct timespec *, u_int32_t qsize);
252static inline void sfb_eq_update_bins(struct sfb *, uint32_t, uint32_t);
253static int sfb_drop_early(struct sfb *, uint32_t, u_int16_t *,
254 struct timespec *);
255static boolean_t sfb_bin_addfcentry(struct sfb *, pktsched_pkt_t *,
256 uint32_t, uint8_t, uint32_t);
257static void sfb_fclist_append(struct sfb *, struct sfb_fcl *);
258static void sfb_fclists_clean(struct sfb *sp);
259static int sfb_bin_mark_or_drop(struct sfb *sp, struct sfbbinstats *bin);
260static void sfb_detect_dequeue_stall(struct sfb *sp, class_queue_t *,
261 struct timespec *);
262
263SYSCTL_NODE(_net_classq, OID_AUTO, sfb, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "SFB");
264
265static u_int64_t sfb_holdtime = 0; /* 0 indicates "automatic" */
266SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, holdtime, CTLFLAG_RW|CTLFLAG_LOCKED,
267 &sfb_holdtime, "SFB freeze time in nanoseconds");
268
269static u_int64_t sfb_pboxtime = 0; /* 0 indicates "automatic" */
270SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, pboxtime, CTLFLAG_RW|CTLFLAG_LOCKED,
271 &sfb_pboxtime, "SFB penalty box time in nanoseconds");
272
273static u_int64_t sfb_hinterval;
274SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, hinterval, CTLFLAG_RW|CTLFLAG_LOCKED,
275 &sfb_hinterval, "SFB hash interval in nanoseconds");
276
277static u_int32_t sfb_increment = SFB_INCREMENT;
278SYSCTL_UINT(_net_classq_sfb, OID_AUTO, increment, CTLFLAG_RW|CTLFLAG_LOCKED,
279 &sfb_increment, SFB_INCREMENT, "SFB increment [d1]");
280
281static u_int32_t sfb_decrement = SFB_DECREMENT;
282SYSCTL_UINT(_net_classq_sfb, OID_AUTO, decrement, CTLFLAG_RW|CTLFLAG_LOCKED,
283 &sfb_decrement, SFB_DECREMENT, "SFB decrement [d2]");
284
285static u_int32_t sfb_allocation = 0; /* 0 means "automatic" */
286SYSCTL_UINT(_net_classq_sfb, OID_AUTO, allocation, CTLFLAG_RW|CTLFLAG_LOCKED,
287 &sfb_allocation, 0, "SFB bin allocation");
288
289static u_int32_t sfb_ratelimit = 0;
290SYSCTL_UINT(_net_classq_sfb, OID_AUTO, ratelimit, CTLFLAG_RW|CTLFLAG_LOCKED,
291 &sfb_ratelimit, 0, "SFB rate limit");
292
293#define KBPS (1ULL * 1000) /* 1 Kbits per second */
294#define MBPS (1ULL * 1000 * 1000) /* 1 Mbits per second */
295#define GBPS (MBPS * 1000) /* 1 Gbits per second */
296
297struct sfb_time_tbl {
298 u_int64_t speed; /* uplink speed */
299 u_int64_t holdtime; /* hold time */
300 u_int64_t pboxtime; /* penalty box time */
301};
302
303static struct sfb_time_tbl sfb_ttbl[] = {
304 { 1 * MBPS, HOLDTIME_BASE * 1000, PBOXTIME_BASE * 1000 },
305 { 10 * MBPS, HOLDTIME_BASE * 100, PBOXTIME_BASE * 100 },
306 { 100 * MBPS, HOLDTIME_BASE * 10, PBOXTIME_BASE * 10 },
307 { 1 * GBPS, HOLDTIME_BASE, PBOXTIME_BASE },
308 { 10 * GBPS, HOLDTIME_BASE / 10, PBOXTIME_BASE / 10 },
309 { 100 * GBPS, HOLDTIME_BASE / 100, PBOXTIME_BASE / 100 },
310 { 0, 0, 0 }
311};
312
313void
314sfb_init(void)
315{
316 _CASSERT(SFBF_ECN4 == CLASSQF_ECN4);
317 _CASSERT(SFBF_ECN6 == CLASSQF_ECN6);
318
319 sfb_size = sizeof (struct sfb);
320 sfb_zone = zinit(sfb_size, SFB_ZONE_MAX * sfb_size,
321 0, SFB_ZONE_NAME);
322 if (sfb_zone == NULL) {
323 panic("%s: failed allocating %s", __func__, SFB_ZONE_NAME);
324 /* NOTREACHED */
325 }
326 zone_change(sfb_zone, Z_EXPAND, TRUE);
327 zone_change(sfb_zone, Z_CALLERACCT, TRUE);
328
329 sfb_bins_size = sizeof (*((struct sfb *)0)->sfb_bins);
330 sfb_bins_zone = zinit(sfb_bins_size, SFB_BINS_ZONE_MAX * sfb_bins_size,
331 0, SFB_BINS_ZONE_NAME);
332 if (sfb_bins_zone == NULL) {
333 panic("%s: failed allocating %s", __func__, SFB_BINS_ZONE_NAME);
334 /* NOTREACHED */
335 }
336 zone_change(sfb_bins_zone, Z_EXPAND, TRUE);
337 zone_change(sfb_bins_zone, Z_CALLERACCT, TRUE);
338
339 sfb_fcl_size = sizeof (*((struct sfb *)0)->sfb_fc_lists);
340 sfb_fcl_zone = zinit(sfb_fcl_size, SFB_FCL_ZONE_MAX * sfb_fcl_size,
341 0, SFB_FCL_ZONE_NAME);
342 if (sfb_fcl_zone == NULL) {
343 panic("%s: failed allocating %s", __func__, SFB_FCL_ZONE_NAME);
344 /* NOTREACHED */
345 }
346 zone_change(sfb_fcl_zone, Z_EXPAND, TRUE);
347 zone_change(sfb_fcl_zone, Z_CALLERACCT, TRUE);
348}
349
350static u_int32_t
351sfb_random(struct sfb *sp)
352{
353 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
354 return (RandomULong());
355}
356
357static void
358sfb_calc_holdtime(struct sfb *sp, u_int64_t outbw)
359{
360 u_int64_t holdtime;
361
362 if (sfb_holdtime != 0) {
363 holdtime = sfb_holdtime;
364 } else if (outbw == 0) {
365 holdtime = SFB_RANDOM(sp, HOLDTIME_MIN, HOLDTIME_MAX);
366 } else {
367 unsigned int n, i;
368
369 n = sfb_ttbl[0].holdtime;
370 for (i = 0; sfb_ttbl[i].speed != 0; i++) {
371 if (outbw < sfb_ttbl[i].speed)
372 break;
373 n = sfb_ttbl[i].holdtime;
374 }
375 holdtime = n;
376 }
377 net_nsectimer(&holdtime, &sp->sfb_holdtime);
378}
379
380static void
381sfb_calc_pboxtime(struct sfb *sp, u_int64_t outbw)
382{
383 u_int64_t pboxtime;
384
385 if (sfb_pboxtime != 0) {
386 pboxtime = sfb_pboxtime;
387 } else if (outbw == 0) {
388 pboxtime = SFB_RANDOM(sp, PBOXTIME_MIN, PBOXTIME_MAX);
389 } else {
390 unsigned int n, i;
391
392 n = sfb_ttbl[0].pboxtime;
393 for (i = 0; sfb_ttbl[i].speed != 0; i++) {
394 if (outbw < sfb_ttbl[i].speed)
395 break;
396 n = sfb_ttbl[i].pboxtime;
397 }
398 pboxtime = n;
399 }
400 net_nsectimer(&pboxtime, &sp->sfb_pboxtime);
401 net_timerclear(&sp->sfb_pboxfreeze);
402}
403
404static void
405sfb_calc_hinterval(struct sfb *sp, u_int64_t *t)
406{
407 u_int64_t hinterval = 0;
408 struct timespec now;
409
410 if (t != NULL) {
411 /*
412 * TODO adi@apple.com: use dq_avg to derive hinterval.
413 */
414 hinterval = *t;
415 }
416
417 if (sfb_hinterval != 0)
418 hinterval = sfb_hinterval;
419 else if (t == NULL || hinterval == 0)
420 hinterval = ((u_int64_t)SFB_HINTERVAL(sp) * NSEC_PER_SEC);
421
422 net_nsectimer(&hinterval, &sp->sfb_hinterval);
423
424 nanouptime(&now);
425 net_timeradd(&now, &sp->sfb_hinterval, &sp->sfb_nextreset);
426}
427
428static void
429sfb_calc_update_interval(struct sfb *sp, u_int64_t out_bw)
430{
431#pragma unused(out_bw)
432 u_int64_t update_interval = 0;
433 ifclassq_calc_update_interval(&update_interval);
434 net_nsectimer(&update_interval, &sp->sfb_update_interval);
435}
436
437/*
438 * sfb support routines
439 */
440struct sfb *
441sfb_alloc(struct ifnet *ifp, u_int32_t qid, u_int32_t qlim, u_int32_t flags)
442{
443 struct sfb *sp;
444 int i;
445
446 VERIFY(ifp != NULL && qlim > 0);
447
448 sp = zalloc(sfb_zone);
449 if (sp == NULL) {
450 log(LOG_ERR, "%s: SFB unable to allocate\n", if_name(ifp));
451 return (NULL);
452 }
453 bzero(sp, sfb_size);
454
455 if ((sp->sfb_bins = zalloc(sfb_bins_zone)) == NULL) {
456 log(LOG_ERR, "%s: SFB unable to allocate bins\n", if_name(ifp));
457 sfb_destroy(sp);
458 return (NULL);
459 }
460 bzero(sp->sfb_bins, sfb_bins_size);
461
462 if ((sp->sfb_fc_lists = zalloc(sfb_fcl_zone)) == NULL) {
463 log(LOG_ERR, "%s: SFB unable to allocate flow control lists\n",
464 if_name(ifp));
465 sfb_destroy(sp);
466 return (NULL);
467 }
468 bzero(sp->sfb_fc_lists, sfb_fcl_size);
469
470 for (i = 0; i < SFB_BINS; ++i)
471 STAILQ_INIT(&SFB_FC_LIST(sp, i)->fclist);
472
473 sp->sfb_ifp = ifp;
474 sp->sfb_qlim = qlim;
475 sp->sfb_qid = qid;
476 sp->sfb_flags = (flags & SFBF_USERFLAGS);
477#if !PF_ECN
478 if (sp->sfb_flags & SFBF_ECN) {
479 sp->sfb_flags &= ~SFBF_ECN;
480 log(LOG_ERR, "%s: SFB qid=%d, ECN not available; ignoring "
481 "SFBF_ECN flag!\n", if_name(ifp), sp->sfb_qid);
482 }
483#endif /* !PF_ECN */
484
485 sfb_resetq(sp, CLASSQ_EV_INIT);
486
487 return (sp);
488}
489
490static void
491sfb_fclist_append(struct sfb *sp, struct sfb_fcl *fcl)
492{
493 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
494 VERIFY(STAILQ_EMPTY(&fcl->fclist) || fcl->cnt > 0);
495 sp->sfb_stats.flow_feedback += fcl->cnt;
496 fcl->cnt = 0;
497
498 flowadv_add(&fcl->fclist);
499 VERIFY(fcl->cnt == 0 && STAILQ_EMPTY(&fcl->fclist));
500}
501
502static void
503sfb_fclists_clean(struct sfb *sp)
504{
505 int i;
506
507 /* Move all the flow control entries to the flowadv list */
508 for (i = 0; i < SFB_BINS; ++i) {
509 struct sfb_fcl *fcl = SFB_FC_LIST(sp, i);
510 if (!STAILQ_EMPTY(&fcl->fclist))
511 sfb_fclist_append(sp, fcl);
512 }
513}
514
515void
516sfb_destroy(struct sfb *sp)
517{
518 sfb_fclists_clean(sp);
519 if (sp->sfb_bins != NULL) {
520 zfree(sfb_bins_zone, sp->sfb_bins);
521 sp->sfb_bins = NULL;
522 }
523 if (sp->sfb_fc_lists != NULL) {
524 zfree(sfb_fcl_zone, sp->sfb_fc_lists);
525 sp->sfb_fc_lists = NULL;
526 }
527 zfree(sfb_zone, sp);
528}
529
530static void
531sfb_resetq(struct sfb *sp, cqev_t ev)
532{
533 struct ifnet *ifp = sp->sfb_ifp;
534 u_int64_t eff_rate;
535
536 VERIFY(ifp != NULL);
537
538 if (ev != CLASSQ_EV_LINK_DOWN) {
539 (*sp->sfb_bins)[0].fudge = sfb_random(sp);
540 (*sp->sfb_bins)[1].fudge = sfb_random(sp);
541 sp->sfb_allocation = ((sfb_allocation == 0) ?
542 (sp->sfb_qlim / 3) : sfb_allocation);
543 sp->sfb_drop_thresh = sp->sfb_allocation +
544 (sp->sfb_allocation >> 1);
545 }
546
547 sp->sfb_clearpkts = 0;
548 sp->sfb_current = 0;
549
550 eff_rate = ifnet_output_linkrate(ifp);
551 sp->sfb_eff_rate = eff_rate;
552
553 sfb_calc_holdtime(sp, eff_rate);
554 sfb_calc_pboxtime(sp, eff_rate);
555 sfb_calc_hinterval(sp, NULL);
556 ifclassq_calc_target_qdelay(ifp, &sp->sfb_target_qdelay);
557 sfb_calc_update_interval(sp, eff_rate);
558
559 if (ev == CLASSQ_EV_LINK_DOWN ||
560 ev == CLASSQ_EV_LINK_UP)
561 sfb_fclists_clean(sp);
562
563 bzero(sp->sfb_bins, sizeof (*sp->sfb_bins));
564 bzero(&sp->sfb_stats, sizeof (sp->sfb_stats));
565
566 if (ev == CLASSQ_EV_LINK_DOWN || !classq_verbose)
567 return;
568
569 log(LOG_DEBUG, "%s: SFB qid=%d, holdtime=%llu nsec, "
570 "pboxtime=%llu nsec, allocation=%d, drop_thresh=%d, "
571 "hinterval=%d sec, sfb_bins=%d bytes, eff_rate=%llu bps"
572 "target_qdelay= %llu nsec "
573 "update_interval=%llu sec %llu nsec flags=0x%x\n",
574 if_name(ifp), sp->sfb_qid, (u_int64_t)sp->sfb_holdtime.tv_nsec,
575 (u_int64_t)sp->sfb_pboxtime.tv_nsec,
576 (u_int32_t)sp->sfb_allocation, (u_int32_t)sp->sfb_drop_thresh,
577 (int)sp->sfb_hinterval.tv_sec, (int)sizeof (*sp->sfb_bins),
578 eff_rate, (u_int64_t)sp->sfb_target_qdelay,
579 (u_int64_t)sp->sfb_update_interval.tv_sec,
580 (u_int64_t)sp->sfb_update_interval.tv_nsec, sp->sfb_flags);
581}
582
583void
584sfb_getstats(struct sfb *sp, struct sfb_stats *sps)
585{
586 sps->allocation = sp->sfb_allocation;
587 sps->dropthresh = sp->sfb_drop_thresh;
588 sps->clearpkts = sp->sfb_clearpkts;
589 sps->current = sp->sfb_current;
590 sps->target_qdelay = sp->sfb_target_qdelay;
591 sps->min_estdelay = sp->sfb_min_qdelay;
592 sps->delay_fcthreshold = sp->sfb_fc_threshold;
593 sps->flags = sp->sfb_flags;
594
595 net_timernsec(&sp->sfb_holdtime, &sp->sfb_stats.hold_time);
596 net_timernsec(&sp->sfb_pboxtime, &sp->sfb_stats.pbox_time);
597 net_timernsec(&sp->sfb_hinterval, &sp->sfb_stats.rehash_intval);
598 net_timernsec(&sp->sfb_update_interval, &sps->update_interval);
599 *(&(sps->sfbstats)) = *(&(sp->sfb_stats));
600
601 _CASSERT(sizeof ((*sp->sfb_bins)[0].stats) ==
602 sizeof (sps->binstats[0].stats));
603
604 bcopy(&(*sp->sfb_bins)[0].stats, &sps->binstats[0].stats,
605 sizeof (sps->binstats[0].stats));
606 bcopy(&(*sp->sfb_bins)[1].stats, &sps->binstats[1].stats,
607 sizeof (sps->binstats[1].stats));
608}
609
610static void
611sfb_swap_bins(struct sfb *sp, u_int32_t len)
612{
613 int i, j, s;
614
615 if (sp->sfb_flags & SFBF_SUSPENDED)
616 return;
617
618 s = sp->sfb_current;
619 VERIFY((s + (s ^ 1)) == 1);
620
621 (*sp->sfb_bins)[s].fudge = sfb_random(sp); /* recompute perturbation */
622 sp->sfb_clearpkts = len;
623 sp->sfb_stats.num_rehash++;
624
625 s = (sp->sfb_current ^= 1); /* flip the bit (swap current) */
626
627 if (classq_verbose) {
628 log(LOG_DEBUG, "%s: SFB qid=%d, set %d is now current, "
629 "qlen=%d\n", if_name(sp->sfb_ifp), sp->sfb_qid, s, len);
630 }
631
632 /* clear freezetime for all current bins */
633 bzero(&(*sp->sfb_bins)[s].freezetime,
634 sizeof ((*sp->sfb_bins)[s].freezetime));
635
636 /* clear/adjust bin statistics and flow control lists */
637 for (i = 0; i < SFB_BINS; i++) {
638 struct sfb_fcl *fcl = SFB_FC_LIST(sp, i);
639
640 if (!STAILQ_EMPTY(&fcl->fclist))
641 sfb_fclist_append(sp, fcl);
642
643 for (j = 0; j < SFB_LEVELS; j++) {
644 struct sfbbinstats *cbin, *wbin;
645
646 cbin = SFB_BINST(sp, j, i, s); /* current */
647 wbin = SFB_BINST(sp, j, i, s ^ 1); /* warm-up */
648
649 cbin->pkts = 0;
650 cbin->bytes = 0;
651 if (cbin->pmark > SFB_MAX_PMARK)
652 cbin->pmark = SFB_MAX_PMARK;
653 if (cbin->pmark < 0)
654 cbin->pmark = 0;
655
656 /*
657 * Keep pmark from before to identify
658 * non-responsives immediately.
659 */
660 if (wbin->pmark > SFB_PMARK_WARM)
661 wbin->pmark = SFB_PMARK_WARM;
662 }
663 }
664}
665
666static inline int
667sfb_pcheck(struct sfb *sp, uint32_t pkt_sfb_hash)
668{
669#if SFB_LEVELS != 2
670 int i, n;
671#endif /* SFB_LEVELS != 2 */
672 uint8_t *pkt_sfb_hash8 = (uint8_t *)&pkt_sfb_hash;
673 int s;
674
675 s = sp->sfb_current;
676 VERIFY((s + (s ^ 1)) == 1);
677
678 /*
679 * For current bins, returns 1 if all pmark >= SFB_PMARK_TH,
680 * 0 otherwise; optimize for SFB_LEVELS=2.
681 */
682#if SFB_LEVELS == 2
683 /*
684 * Level 0: bin index at [0] for set 0; [2] for set 1
685 * Level 1: bin index at [1] for set 0; [3] for set 1
686 */
687 if (SFB_BINST(sp, 0, SFB_BINMASK(pkt_sfb_hash8[(s << 1)]),
688 s)->pmark < SFB_PMARK_TH ||
689 SFB_BINST(sp, 1, SFB_BINMASK(pkt_sfb_hash8[(s << 1) + 1]),
690 s)->pmark < SFB_PMARK_TH)
691 return (0);
692#else /* SFB_LEVELS != 2 */
693 for (i = 0; i < SFB_LEVELS; i++) {
694 if (s == 0) /* set 0, bin index [0,1] */
695 n = SFB_BINMASK(pkt_sfb_hash8[i]);
696 else /* set 1, bin index [2,3] */
697 n = SFB_BINMASK(pkt_sfb_hash8[i + 2]);
698
699 if (SFB_BINST(sp, i, n, s)->pmark < SFB_PMARK_TH)
700 return (0);
701 }
702#endif /* SFB_LEVELS != 2 */
703 return (1);
704}
705
706static int
707sfb_penalize(struct sfb *sp, uint32_t pkt_sfb_hash, uint32_t *pkt_sfb_flags,
708 struct timespec *now)
709{
710 struct timespec delta = { 0, 0 };
711 uint8_t *pkt_sfb_hash8 = (uint8_t *)&pkt_sfb_hash;
712
713 /* If minimum pmark of current bins is < SFB_PMARK_TH, we're done */
714 if (!sfb_ratelimit || !sfb_pcheck(sp, pkt_sfb_hash))
715 return (0);
716
717 net_timersub(now, &sp->sfb_pboxfreeze, &delta);
718 if (net_timercmp(&delta, &sp->sfb_pboxtime, <)) {
719#if SFB_LEVELS != 2
720 int i;
721#endif /* SFB_LEVELS != 2 */
722 struct sfbbinstats *bin;
723 int n, w;
724
725 w = sp->sfb_current ^ 1;
726 VERIFY((w + (w ^ 1)) == 1);
727
728 /*
729 * Update warm-up bins; optimize for SFB_LEVELS=2
730 */
731#if SFB_LEVELS == 2
732 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
733 n = SFB_BINMASK(pkt_sfb_hash8[(w << 1)]);
734 bin = SFB_BINST(sp, 0, n, w);
735 if (bin->pkts >= sp->sfb_allocation)
736 sfb_increment_bin(sp, bin, SFB_BINFT(sp, 0, n, w), now);
737
738 /* Level 0: bin index at [1] for set 0; [3] for set 1 */
739 n = SFB_BINMASK(pkt_sfb_hash8[(w << 1) + 1]);
740 bin = SFB_BINST(sp, 1, n, w);
741 if (bin->pkts >= sp->sfb_allocation)
742 sfb_increment_bin(sp, bin, SFB_BINFT(sp, 1, n, w), now);
743#else /* SFB_LEVELS != 2 */
744 for (i = 0; i < SFB_LEVELS; i++) {
745 if (w == 0) /* set 0, bin index [0,1] */
746 n = SFB_BINMASK(pkt_sfb_hash8[i]);
747 else /* set 1, bin index [2,3] */
748 n = SFB_BINMASK(pkt_sfb_hash8[i + 2]);
749
750 bin = SFB_BINST(sp, i, n, w);
751 if (bin->pkts >= sp->sfb_allocation) {
752 sfb_increment_bin(sp, bin,
753 SFB_BINFT(sp, i, n, w), now);
754 }
755 }
756#endif /* SFB_LEVELS != 2 */
757 return (1);
758 }
759
760 /* non-conformant or else misclassified flow; queue it anyway */
761 *pkt_sfb_flags |= SFB_PKT_PBOX;
762 *(&sp->sfb_pboxfreeze) = *now;
763
764 return (0);
765}
766
767static void
768sfb_adjust_bin(struct sfb *sp, struct sfbbinstats *bin, struct timespec *ft,
769 struct timespec *now, boolean_t inc)
770{
771 struct timespec delta;
772
773 net_timersub(now, ft, &delta);
774 if (net_timercmp(&delta, &sp->sfb_holdtime, <)) {
775 if (classq_verbose > 1) {
776 log(LOG_DEBUG, "%s: SFB qid=%d, %s update frozen "
777 "(delta=%llu nsec)\n", if_name(sp->sfb_ifp),
778 sp->sfb_qid, inc ? "increment" : "decrement",
779 (u_int64_t)delta.tv_nsec);
780 }
781 return;
782 }
783
784 /* increment/decrement marking probability */
785 *ft = *now;
786 if (inc)
787 SFB_PMARK_INC(bin);
788 else
789 SFB_PMARK_DEC(bin);
790}
791
792static void
793sfb_decrement_bin(struct sfb *sp, struct sfbbinstats *bin, struct timespec *ft,
794 struct timespec *now)
795{
796 return (sfb_adjust_bin(sp, bin, ft, now, FALSE));
797}
798
799static void
800sfb_increment_bin(struct sfb *sp, struct sfbbinstats *bin, struct timespec *ft,
801 struct timespec *now)
802{
803 return (sfb_adjust_bin(sp, bin, ft, now, TRUE));
804}
805
806static inline void
807sfb_dq_update_bins(struct sfb *sp, uint32_t pkt_sfb_hash, uint32_t pkt_len,
808 struct timespec *now, u_int32_t qsize)
809{
810#if SFB_LEVELS != 2 || SFB_FC_LEVEL != 0
811 int i;
812#endif /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
813 struct sfbbinstats *bin;
814 int s, n;
815 struct sfb_fcl *fcl = NULL;
816 uint8_t *pkt_sfb_hash8 = (uint8_t *)&pkt_sfb_hash;
817
818 s = sp->sfb_current;
819 VERIFY((s + (s ^ 1)) == 1);
820
821 /*
822 * Update current bins; optimize for SFB_LEVELS=2 and SFB_FC_LEVEL=0
823 */
824#if SFB_LEVELS == 2 && SFB_FC_LEVEL == 0
825 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
826 n = SFB_BINMASK(pkt_sfb_hash8[(s << 1)]);
827 bin = SFB_BINST(sp, 0, n, s);
828
829 VERIFY(bin->pkts > 0 && bin->bytes >= pkt_len);
830 bin->pkts--;
831 bin->bytes -= pkt_len;
832
833 if (bin->pkts == 0)
834 sfb_decrement_bin(sp, bin, SFB_BINFT(sp, 0, n, s), now);
835
836 /* Deliver flow control feedback to the sockets */
837 if (SFB_QUEUE_DELAYBASED(sp)) {
838 if (!(SFB_IS_DELAYHIGH(sp)) ||
839 bin->bytes <= sp->sfb_fc_threshold ||
840 bin->pkts == 0 || qsize == 0)
841 fcl = SFB_FC_LIST(sp, n);
842 } else if (bin->pkts <= (sp->sfb_allocation >> 2)) {
843 fcl = SFB_FC_LIST(sp, n);
844 }
845
846 if (fcl != NULL && !STAILQ_EMPTY(&fcl->fclist))
847 sfb_fclist_append(sp, fcl);
848 fcl = NULL;
849
850 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
851 n = SFB_BINMASK(pkt_sfb_hash8[(s << 1) + 1]);
852 bin = SFB_BINST(sp, 1, n, s);
853
854 VERIFY(bin->pkts > 0 && bin->bytes >= (u_int64_t)pkt_len);
855 bin->pkts--;
856 bin->bytes -= pkt_len;
857 if (bin->pkts == 0)
858 sfb_decrement_bin(sp, bin, SFB_BINFT(sp, 1, n, s), now);
859#else /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
860 for (i = 0; i < SFB_LEVELS; i++) {
861 if (s == 0) /* set 0, bin index [0,1] */
862 n = SFB_BINMASK(pkt_sfb_hash8[i]);
863 else /* set 1, bin index [2,3] */
864 n = SFB_BINMASK(pkt_sfb_hash8[i + 2]);
865
866 bin = SFB_BINST(sp, i, n, s);
867
868 VERIFY(bin->pkts > 0 && bin->bytes >= pkt_len);
869 bin->pkts--;
870 bin->bytes -= pkt_len;
871 if (bin->pkts == 0)
872 sfb_decrement_bin(sp, bin,
873 SFB_BINFT(sp, i, n, s), now);
874 if (i != SFB_FC_LEVEL)
875 continue;
876 if (SFB_QUEUE_DELAYBASED(sp)) {
877 if (!(SFB_IS_DELAYHIGH(sp)) ||
878 bin->bytes <= sp->sfb_fc_threshold)
879 fcl = SFB_FC_LIST(sp, n);
880 } else if (bin->pkts <= (sp->sfb_allocation >> 2)) {
881 fcl = SFB_FC_LIST(sp, n);
882 }
883 if (fcl != NULL && !STAILQ_EMPTY(&fcl->fclist))
884 sfb_fclist_append(sp, fcl);
885 fcl = NULL;
886 }
887#endif /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
888}
889
890static inline void
891sfb_eq_update_bins(struct sfb *sp, uint32_t pkt_sfb_hash, uint32_t pkt_len)
892{
893#if SFB_LEVELS != 2
894 int i, n;
895#endif /* SFB_LEVELS != 2 */
896 int s;
897 struct sfbbinstats *bin;
898 uint8_t *pkt_sfb_hash8 = (uint8_t *)&pkt_sfb_hash;
899 s = sp->sfb_current;
900 VERIFY((s + (s ^ 1)) == 1);
901
902 /*
903 * Update current bins; optimize for SFB_LEVELS=2
904 */
905#if SFB_LEVELS == 2
906 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
907 bin = SFB_BINST(sp, 0,
908 SFB_BINMASK(pkt_sfb_hash8[(s << 1)]), s);
909 bin->pkts++;
910 bin->bytes += pkt_len;
911
912 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
913 bin = SFB_BINST(sp, 1,
914 SFB_BINMASK(pkt_sfb_hash8[(s << 1) + 1]), s);
915 bin->pkts++;
916 bin->bytes += pkt_len;
917
918#else /* SFB_LEVELS != 2 */
919 for (i = 0; i < SFB_LEVELS; i++) {
920 if (s == 0) /* set 0, bin index [0,1] */
921 n = SFB_BINMASK(pkt_sfb_hash8[i]);
922 else /* set 1, bin index [2,3] */
923 n = SFB_BINMASK(pkt_sfb_hash8[i + 2]);
924
925 bin = SFB_BINST(sp, i, n, s);
926 bin->pkts++;
927 bin->bytes += pkt_len;
928 }
929#endif /* SFB_LEVELS != 2 */
930}
931
932static boolean_t
933sfb_bin_addfcentry(struct sfb *sp, pktsched_pkt_t *pkt, uint32_t pkt_sfb_hash,
934 uint8_t flowsrc, uint32_t flowid)
935{
936 struct flowadv_fcentry *fce;
937 struct sfb_fcl *fcl;
938 int s;
939 uint8_t *pkt_sfb_hash8 = (uint8_t *)&pkt_sfb_hash;
940
941 s = sp->sfb_current;
942 VERIFY((s + (s ^ 1)) == 1);
943
944 if (flowid == 0) {
945 sp->sfb_stats.null_flowid++;
946 return (FALSE);
947 }
948
949 /*
950 * Use value at index 0 for set 0 and
951 * value at index 2 for set 1
952 */
953 fcl = SFB_FC_LIST(sp, SFB_BINMASK(pkt_sfb_hash8[(s << 1)]));
954 STAILQ_FOREACH(fce, &fcl->fclist, fce_link) {
955 if ((uint8_t)fce->fce_flowsrc_type == flowsrc &&
956 fce->fce_flowid == flowid) {
957 /* Already on flow control list; just return */
958 return (TRUE);
959 }
960 }
961
962 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
963 fce = pktsched_alloc_fcentry(pkt, sp->sfb_ifp, M_WAITOK);
964 if (fce != NULL) {
965 STAILQ_INSERT_TAIL(&fcl->fclist, fce, fce_link);
966 fcl->cnt++;
967 sp->sfb_stats.flow_controlled++;
968 }
969
970 return (fce != NULL);
971}
972
973/*
974 * check if this flow needs to be flow-controlled or if this
975 * packet needs to be dropped.
976 */
977static int
978sfb_bin_mark_or_drop(struct sfb *sp, struct sfbbinstats *bin)
979{
980 int ret = 0;
981 if (SFB_QUEUE_DELAYBASED(sp)) {
982 /*
983 * Mark or drop if this bin has more
984 * bytes than the flowcontrol threshold.
985 */
986 if (SFB_IS_DELAYHIGH(sp) &&
987 bin->bytes >= (sp->sfb_fc_threshold << 1))
988 ret = 1;
989 } else {
990 if (bin->pkts >= sp->sfb_allocation &&
991 bin->pkts >= sp->sfb_drop_thresh)
992 ret = 1; /* drop or mark */
993 }
994 return (ret);
995}
996
997/*
998 * early-drop probability is kept in pmark of each bin of the flow
999 */
1000static int
1001sfb_drop_early(struct sfb *sp, uint32_t pkt_sfb_hash, u_int16_t *pmin,
1002 struct timespec *now)
1003{
1004#if SFB_LEVELS != 2
1005 int i;
1006#endif /* SFB_LEVELS != 2 */
1007 struct sfbbinstats *bin;
1008 int s, n, ret = 0;
1009 uint8_t *pkt_sfb_hash8 = (uint8_t *)&pkt_sfb_hash;
1010
1011 s = sp->sfb_current;
1012 VERIFY((s + (s ^ 1)) == 1);
1013
1014 *pmin = (u_int16_t)-1;
1015
1016 /*
1017 * Update current bins; optimize for SFB_LEVELS=2
1018 */
1019#if SFB_LEVELS == 2
1020 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
1021 n = SFB_BINMASK(pkt_sfb_hash8[(s << 1)]);
1022 bin = SFB_BINST(sp, 0, n, s);
1023 if (*pmin > (u_int16_t)bin->pmark)
1024 *pmin = (u_int16_t)bin->pmark;
1025
1026
1027 /* Update SFB probability */
1028 if (bin->pkts >= sp->sfb_allocation)
1029 sfb_increment_bin(sp, bin, SFB_BINFT(sp, 0, n, s), now);
1030
1031 ret = sfb_bin_mark_or_drop(sp, bin);
1032
1033 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
1034 n = SFB_BINMASK(pkt_sfb_hash8[(s << 1) + 1]);
1035 bin = SFB_BINST(sp, 1, n, s);
1036 if (*pmin > (u_int16_t)bin->pmark)
1037 *pmin = (u_int16_t)bin->pmark;
1038
1039 if (bin->pkts >= sp->sfb_allocation)
1040 sfb_increment_bin(sp, bin, SFB_BINFT(sp, 1, n, s), now);
1041#else /* SFB_LEVELS != 2 */
1042 for (i = 0; i < SFB_LEVELS; i++) {
1043 if (s == 0) /* set 0, bin index [0,1] */
1044 n = SFB_BINMASK(pkt_sfb_hash8[i]);
1045 else /* set 1, bin index [2,3] */
1046 n = SFB_BINMASK(pkt_sfb_hash8[i + 2]);
1047
1048 bin = SFB_BINST(sp, i, n, s);
1049 if (*pmin > (u_int16_t)bin->pmark)
1050 *pmin = (u_int16_t)bin->pmark;
1051
1052 if (bin->pkts >= sp->sfb_allocation)
1053 sfb_increment_bin(sp, bin,
1054 SFB_BINFT(sp, i, n, s), now);
1055 if (i == SFB_FC_LEVEL)
1056 ret = sfb_bin_mark_or_drop(sp, bin);
1057 }
1058#endif /* SFB_LEVELS != 2 */
1059
1060 if (sp->sfb_flags & SFBF_SUSPENDED)
1061 ret = 1; /* drop or mark */
1062
1063 return (ret);
1064}
1065
1066void
1067sfb_detect_dequeue_stall(struct sfb *sp, class_queue_t *q,
1068 struct timespec *now)
1069{
1070 struct timespec max_getqtime;
1071
1072 if (!SFB_QUEUE_DELAYBASED(sp) || SFB_IS_DELAYHIGH(sp) ||
1073 qsize(q) <= SFB_MIN_FC_THRESHOLD_BYTES ||
1074 !net_timerisset(&sp->sfb_getqtime))
1075 return;
1076
1077 net_timeradd(&sp->sfb_getqtime, &sp->sfb_update_interval,
1078 &max_getqtime);
1079 if (net_timercmp(now, &max_getqtime, >)) {
1080 /*
1081 * No packets have been dequeued in an update interval
1082 * worth of time. It means that the queue is stalled
1083 */
1084 SFB_SET_DELAY_HIGH(sp, q);
1085 sp->sfb_stats.dequeue_stall++;
1086 }
1087}
1088
1089#define DTYPE_NODROP 0 /* no drop */
1090#define DTYPE_FORCED 1 /* a "forced" drop */
1091#define DTYPE_EARLY 2 /* an "unforced" (early) drop */
1092
1093int
1094sfb_addq(struct sfb *sp, class_queue_t *q, pktsched_pkt_t *pkt,
1095 struct pf_mtag *t)
1096{
1097#if !PF_ECN
1098#pragma unused(t)
1099#endif /* !PF_ECN */
1100 struct timespec now;
1101 int droptype, s;
1102 uint16_t pmin;
1103 int fc_adv = 0;
1104 int ret = CLASSQEQ_SUCCESS;
1105 uint32_t maxqsize = 0;
1106 uint64_t *pkt_timestamp;
1107 uint32_t *pkt_sfb_hash;
1108 uint16_t *pkt_sfb_hash16;
1109 uint32_t *pkt_sfb_flags;
1110 uint32_t pkt_flowid;
1111 uint32_t *pkt_flags;
1112 uint8_t pkt_proto, pkt_flowsrc;
1113
1114 s = sp->sfb_current;
1115 VERIFY((s + (s ^ 1)) == 1);
1116
1117 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, &pkt_flowid,
1118 &pkt_flowsrc, &pkt_proto, NULL);
1119 pkt_sfb_hash = pktsched_get_pkt_sfb_vars(pkt, &pkt_sfb_flags);
1120 pkt_sfb_hash16 = (uint16_t *)pkt_sfb_hash;
1121
1122 if (pkt->pktsched_ptype == QP_MBUF) {
1123 /* See comments in <rdar://problem/14040693> */
1124 VERIFY(!(*pkt_flags & PKTF_PRIV_GUARDED));
1125 *pkt_flags |= PKTF_PRIV_GUARDED;
1126 }
1127
1128 if (*pkt_timestamp > 0) {
1129 net_nsectimer(pkt_timestamp, &now);
1130 } else {
1131 nanouptime(&now);
1132 net_timernsec(&now, pkt_timestamp);
1133 }
1134
1135 /* time to swap the bins? */
1136 if (net_timercmp(&now, &sp->sfb_nextreset, >=)) {
1137 net_timeradd(&now, &sp->sfb_hinterval, &sp->sfb_nextreset);
1138 sfb_swap_bins(sp, qlen(q));
1139 s = sp->sfb_current;
1140 VERIFY((s + (s ^ 1)) == 1);
1141 }
1142
1143 if (!net_timerisset(&sp->sfb_update_time)) {
1144 net_timeradd(&now, &sp->sfb_update_interval,
1145 &sp->sfb_update_time);
1146 }
1147
1148 /*
1149 * If getq time is not set because this is the first packet
1150 * or after idle time, set it now so that we can detect a stall.
1151 */
1152 if (qsize(q) == 0 && !net_timerisset(&sp->sfb_getqtime))
1153 *(&sp->sfb_getqtime) = *(&now);
1154
1155 *pkt_sfb_flags = 0;
1156 pkt_sfb_hash16[s] =
1157 (SFB_HASH(&pkt_flowid, sizeof (pkt_flowid),
1158 (*sp->sfb_bins)[s].fudge) & SFB_HASHMASK);
1159 pkt_sfb_hash16[s ^ 1] =
1160 (SFB_HASH(&pkt_flowid, sizeof (pkt_flowid),
1161 (*sp->sfb_bins)[s ^ 1].fudge) & SFB_HASHMASK);
1162
1163 /* check if the queue has been stalled */
1164 sfb_detect_dequeue_stall(sp, q, &now);
1165
1166 /* see if we drop early */
1167 droptype = DTYPE_NODROP;
1168 if (sfb_drop_early(sp, *pkt_sfb_hash, &pmin, &now)) {
1169 /* flow control, mark or drop by sfb */
1170 if ((sp->sfb_flags & SFBF_FLOWCTL) &&
1171 (*pkt_flags & PKTF_FLOW_ADV)) {
1172 fc_adv = 1;
1173 /* drop all during suspension or for non-TCP */
1174 if ((sp->sfb_flags & SFBF_SUSPENDED) ||
1175 pkt_proto != IPPROTO_TCP) {
1176 droptype = DTYPE_EARLY;
1177 sp->sfb_stats.drop_early++;
1178 }
1179 }
1180#if PF_ECN
1181 /* XXX: only supported for mbuf */
1182 else if ((sp->sfb_flags & SFBF_ECN) &&
1183 (pkt->pktsched_ptype == QP_MBUF) &&
1184 (pkt_proto == IPPROTO_TCP) && /* only for TCP */
1185 ((sfb_random(sp) & SFB_MAX_PMARK) <= pmin) &&
1186 mark_ecn(m, t, sp->sfb_flags) &&
1187 !(sp->sfb_flags & SFBF_SUSPENDED)) {
1188 /* successfully marked; do not drop. */
1189 sp->sfb_stats.marked_packets++;
1190 }
1191#endif /* PF_ECN */
1192 else {
1193 /* unforced drop by sfb */
1194 droptype = DTYPE_EARLY;
1195 sp->sfb_stats.drop_early++;
1196 }
1197 }
1198
1199 /* non-responsive flow penalty? */
1200 if (droptype == DTYPE_NODROP && sfb_penalize(sp, *pkt_sfb_hash,
1201 pkt_sfb_flags, &now)) {
1202 droptype = DTYPE_FORCED;
1203 sp->sfb_stats.drop_pbox++;
1204 }
1205
1206 if (SFB_QUEUE_DELAYBASED(sp))
1207 maxqsize = SFB_QUEUE_DELAYBASED_MAXSIZE;
1208 else
1209 maxqsize = qlimit(q);
1210
1211 /*
1212 * When the queue length hits the queue limit, make it a forced
1213 * drop
1214 */
1215 if (droptype == DTYPE_NODROP && qlen(q) >= maxqsize) {
1216 if (pkt_proto == IPPROTO_TCP &&
1217 qlen(q) < (maxqsize + (maxqsize >> 1)) &&
1218 ((*pkt_flags & PKTF_TCP_REXMT) ||
1219 (sp->sfb_flags & SFBF_LAST_PKT_DROPPED))) {
1220 /*
1221 * At some level, dropping packets will make the
1222 * flows backoff and will keep memory requirements
1223 * under control. But we should not cause a tail
1224 * drop because it can take a long time for a
1225 * TCP flow to recover. We should try to drop
1226 * alternate packets instead.
1227 */
1228 sp->sfb_flags &= ~SFBF_LAST_PKT_DROPPED;
1229 } else {
1230 droptype = DTYPE_FORCED;
1231 sp->sfb_stats.drop_queue++;
1232 sp->sfb_flags |= SFBF_LAST_PKT_DROPPED;
1233 }
1234 }
1235
1236 if (fc_adv == 1 && droptype != DTYPE_FORCED &&
1237 sfb_bin_addfcentry(sp, pkt, *pkt_sfb_hash, pkt_flowsrc,
1238 pkt_flowid)) {
1239 /* deliver flow control advisory error */
1240 if (droptype == DTYPE_NODROP) {
1241 ret = CLASSQEQ_SUCCESS_FC;
1242 VERIFY(!(sp->sfb_flags & SFBF_SUSPENDED));
1243 } else if (sp->sfb_flags & SFBF_SUSPENDED) {
1244 /* drop due to suspension */
1245 ret = CLASSQEQ_DROP_SP;
1246 } else {
1247 /* drop due to flow-control */
1248 ret = CLASSQEQ_DROP_FC;
1249 }
1250 }
1251 /* if successful enqueue this packet, else drop it */
1252 if (droptype == DTYPE_NODROP) {
1253 VERIFY(pkt->pktsched_ptype == qptype(q));
1254 _addq(q, pkt->pktsched_pkt);
1255 } else {
1256 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
1257 return ((ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP);
1258 }
1259
1260 if (!(*pkt_sfb_flags & SFB_PKT_PBOX))
1261 sfb_eq_update_bins(sp, *pkt_sfb_hash,
1262 pktsched_get_pkt_len(pkt));
1263 else
1264 sp->sfb_stats.pbox_packets++;
1265
1266 /* successfully queued */
1267 return (ret);
1268}
1269
1270static void *
1271sfb_getq_flow(struct sfb *sp, class_queue_t *q, u_int32_t flow, boolean_t purge,
1272 pktsched_pkt_t *pkt)
1273{
1274 struct timespec now;
1275 classq_pkt_type_t ptype;
1276 uint64_t *pkt_timestamp;
1277 uint32_t *pkt_flags;
1278 uint32_t *pkt_sfb_flags;
1279 uint32_t *pkt_sfb_hash;
1280 void *p;
1281
1282 if (!purge && (sp->sfb_flags & SFBF_SUSPENDED))
1283 return (NULL);
1284
1285 nanouptime(&now);
1286
1287 /* flow of 0 means head of queue */
1288 if ((p = ((flow == 0) ? _getq(q) : _getq_flow(q, flow))) == NULL) {
1289 if (!purge)
1290 net_timerclear(&sp->sfb_getqtime);
1291 return (NULL);
1292 }
1293
1294 ptype = qptype(q);
1295 pktsched_pkt_encap(pkt, ptype, p);
1296 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL,
1297 NULL, NULL, NULL);
1298 pkt_sfb_hash = pktsched_get_pkt_sfb_vars(pkt, &pkt_sfb_flags);
1299
1300 /* See comments in <rdar://problem/14040693> */
1301 if (ptype == QP_MBUF)
1302 VERIFY(*pkt_flags & PKTF_PRIV_GUARDED);
1303
1304 if (!purge) {
1305 /* calculate EWMA of dequeues */
1306 if (net_timerisset(&sp->sfb_getqtime)) {
1307 struct timespec delta;
1308 u_int64_t avg, new;
1309 net_timersub(&now, &sp->sfb_getqtime, &delta);
1310 net_timernsec(&delta, &new);
1311 avg = sp->sfb_stats.dequeue_avg;
1312 if (avg > 0) {
1313 int decay = DEQUEUE_DECAY;
1314 /*
1315 * If the time since last dequeue is
1316 * significantly greater than the current
1317 * average, weigh the average more against
1318 * the old value.
1319 */
1320 if (DEQUEUE_SPIKE(new, avg))
1321 decay += 5;
1322 avg = (((avg << decay) - avg) + new) >> decay;
1323 } else {
1324 avg = new;
1325 }
1326 sp->sfb_stats.dequeue_avg = avg;
1327 }
1328 *(&sp->sfb_getqtime) = *(&now);
1329 }
1330
1331 if (!purge && SFB_QUEUE_DELAYBASED(sp)) {
1332 u_int64_t dequeue_ns, queue_delay = 0;
1333 net_timernsec(&now, &dequeue_ns);
1334 if (dequeue_ns > *pkt_timestamp)
1335 queue_delay = dequeue_ns - *pkt_timestamp;
1336
1337 if (sp->sfb_min_qdelay == 0 ||
1338 (queue_delay > 0 && queue_delay < sp->sfb_min_qdelay))
1339 sp->sfb_min_qdelay = queue_delay;
1340 if (net_timercmp(&now, &sp->sfb_update_time, >=)) {
1341 if (sp->sfb_min_qdelay > sp->sfb_target_qdelay) {
1342 if (!SFB_IS_DELAYHIGH(sp))
1343 SFB_SET_DELAY_HIGH(sp, q);
1344 } else {
1345 sp->sfb_flags &= ~(SFBF_DELAYHIGH);
1346 sp->sfb_fc_threshold = 0;
1347
1348 }
1349 net_timeradd(&now, &sp->sfb_update_interval,
1350 &sp->sfb_update_time);
1351 sp->sfb_min_qdelay = 0;
1352 }
1353 }
1354 *pkt_timestamp = 0;
1355
1356 /*
1357 * Clearpkts are the ones which were in the queue when the hash
1358 * function was perturbed. Since the perturbation value (fudge),
1359 * and thus bin information for these packets is not known, we do
1360 * not change accounting information while dequeuing these packets.
1361 * It is important not to set the hash interval too small due to
1362 * this reason. A rule of thumb is to set it to K*D, where D is
1363 * the time taken to drain queue.
1364 */
1365 if (*pkt_sfb_flags & SFB_PKT_PBOX) {
1366 *pkt_sfb_flags &= ~SFB_PKT_PBOX;
1367 if (sp->sfb_clearpkts > 0)
1368 sp->sfb_clearpkts--;
1369 } else if (sp->sfb_clearpkts > 0) {
1370 sp->sfb_clearpkts--;
1371 } else {
1372 sfb_dq_update_bins(sp, *pkt_sfb_hash, pktsched_get_pkt_len(pkt),
1373 &now, qsize(q));
1374 }
1375
1376 /* See comments in <rdar://problem/14040693> */
1377 if (ptype == QP_MBUF)
1378 *pkt_flags &= ~PKTF_PRIV_GUARDED;
1379
1380 /*
1381 * If the queue becomes empty before the update interval, reset
1382 * the flow control threshold
1383 */
1384 if (qsize(q) == 0) {
1385 sp->sfb_flags &= ~SFBF_DELAYHIGH;
1386 sp->sfb_min_qdelay = 0;
1387 sp->sfb_fc_threshold = 0;
1388 net_timerclear(&sp->sfb_update_time);
1389 net_timerclear(&sp->sfb_getqtime);
1390 }
1391 return (p);
1392}
1393
1394void
1395sfb_getq(struct sfb *sp, class_queue_t *q, pktsched_pkt_t *pkt)
1396{
1397 sfb_getq_flow(sp, q, 0, FALSE, pkt);
1398}
1399
1400void
1401sfb_purgeq(struct sfb *sp, class_queue_t *q, u_int32_t flow, u_int32_t *packets,
1402 u_int32_t *bytes)
1403{
1404 u_int32_t cnt = 0, len = 0;
1405 pktsched_pkt_t pkt;
1406
1407 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
1408 while (sfb_getq_flow(sp, q, flow, TRUE, &pkt) != NULL) {
1409 cnt++;
1410 len += pktsched_get_pkt_len(&pkt);
1411 pktsched_free_pkt(&pkt);
1412 }
1413
1414 if (packets != NULL)
1415 *packets = cnt;
1416 if (bytes != NULL)
1417 *bytes = len;
1418}
1419
1420void
1421sfb_updateq(struct sfb *sp, cqev_t ev)
1422{
1423 struct ifnet *ifp = sp->sfb_ifp;
1424
1425 VERIFY(ifp != NULL);
1426
1427 switch (ev) {
1428 case CLASSQ_EV_LINK_BANDWIDTH: {
1429 u_int64_t eff_rate = ifnet_output_linkrate(ifp);
1430
1431 /* update parameters only if rate has changed */
1432 if (eff_rate == sp->sfb_eff_rate)
1433 break;
1434
1435 if (classq_verbose) {
1436 log(LOG_DEBUG, "%s: SFB qid=%d, adapting to new "
1437 "eff_rate=%llu bps\n", if_name(ifp), sp->sfb_qid,
1438 eff_rate);
1439 }
1440 sfb_calc_holdtime(sp, eff_rate);
1441 sfb_calc_pboxtime(sp, eff_rate);
1442 ifclassq_calc_target_qdelay(ifp, &sp->sfb_target_qdelay);
1443 sfb_calc_update_interval(sp, eff_rate);
1444 break;
1445 }
1446
1447 case CLASSQ_EV_LINK_UP:
1448 case CLASSQ_EV_LINK_DOWN:
1449 if (classq_verbose) {
1450 log(LOG_DEBUG, "%s: SFB qid=%d, resetting due to "
1451 "link %s\n", if_name(ifp), sp->sfb_qid,
1452 (ev == CLASSQ_EV_LINK_UP) ? "UP" : "DOWN");
1453 }
1454 sfb_resetq(sp, ev);
1455 break;
1456
1457 case CLASSQ_EV_LINK_LATENCY:
1458 case CLASSQ_EV_LINK_MTU:
1459 default:
1460 break;
1461 }
1462}
1463
1464int
1465sfb_suspendq(struct sfb *sp, class_queue_t *q, boolean_t on)
1466{
1467#pragma unused(q)
1468 struct ifnet *ifp = sp->sfb_ifp;
1469
1470 VERIFY(ifp != NULL);
1471
1472 if ((on && (sp->sfb_flags & SFBF_SUSPENDED)) ||
1473 (!on && !(sp->sfb_flags & SFBF_SUSPENDED)))
1474 return (0);
1475
1476 if (!(sp->sfb_flags & SFBF_FLOWCTL)) {
1477 log(LOG_ERR, "%s: SFB qid=%d, unable to %s queue since "
1478 "flow-control is not enabled", if_name(ifp), sp->sfb_qid,
1479 (on ? "suspend" : "resume"));
1480 return (ENOTSUP);
1481 }
1482
1483 if (classq_verbose) {
1484 log(LOG_DEBUG, "%s: SFB qid=%d, setting state to %s",
1485 if_name(ifp), sp->sfb_qid, (on ? "SUSPENDED" : "RUNNING"));
1486 }
1487
1488 if (on) {
1489 sp->sfb_flags |= SFBF_SUSPENDED;
1490 } else {
1491 sp->sfb_flags &= ~SFBF_SUSPENDED;
1492 sfb_swap_bins(sp, qlen(q));
1493 }
1494
1495 return (0);
1496}
1497