1 | /* |
2 | * Copyright (c) 2011-2017 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | /* |
30 | * Copyright (c) 2010 Fabio Checconi, Luigi Rizzo, Paolo Valente |
31 | * All rights reserved |
32 | * |
33 | * Redistribution and use in source and binary forms, with or without |
34 | * modification, are permitted provided that the following conditions |
35 | * are met: |
36 | * 1. Redistributions of source code must retain the above copyright |
37 | * notice, this list of conditions and the following disclaimer. |
38 | * 2. Redistributions in binary form must reproduce the above copyright |
39 | * notice, this list of conditions and the following disclaimer in the |
40 | * documentation and/or other materials provided with the distribution. |
41 | * |
42 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
43 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
44 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
45 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
46 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
47 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
48 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
49 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
50 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
51 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
52 | * SUCH DAMAGE. |
53 | */ |
54 | |
55 | /* |
56 | * Quick Fair Queueing is described in |
57 | * "QFQ: Efficient Packet Scheduling with Tight Bandwidth Distribution |
58 | * Guarantees" by Fabio Checconi, Paolo Valente, and Luigi Rizzo. |
59 | * |
60 | * This code is ported from the dummynet(4) QFQ implementation. |
61 | * See also http://info.iet.unipi.it/~luigi/qfq/ |
62 | */ |
63 | |
64 | #include <sys/cdefs.h> |
65 | #include <sys/param.h> |
66 | #include <sys/malloc.h> |
67 | #include <sys/mbuf.h> |
68 | #include <sys/systm.h> |
69 | #include <sys/errno.h> |
70 | #include <sys/kernel.h> |
71 | #include <sys/syslog.h> |
72 | |
73 | #include <kern/zalloc.h> |
74 | |
75 | #include <net/if.h> |
76 | #include <net/net_osdep.h> |
77 | |
78 | #include <net/pktsched/pktsched_qfq.h> |
79 | #include <netinet/in.h> |
80 | |
81 | |
82 | /* |
83 | * function prototypes |
84 | */ |
85 | static int qfq_enqueue_ifclassq(struct ifclassq *, void *, classq_pkt_type_t, |
86 | boolean_t *); |
87 | static void *qfq_dequeue_ifclassq(struct ifclassq *, classq_pkt_type_t *); |
88 | static int qfq_request_ifclassq(struct ifclassq *, cqrq_t, void *); |
89 | static int qfq_clear_interface(struct qfq_if *); |
90 | static struct qfq_class *qfq_class_create(struct qfq_if *, u_int32_t, |
91 | u_int32_t, u_int32_t, u_int32_t, u_int32_t, classq_pkt_type_t); |
92 | static int qfq_class_destroy(struct qfq_if *, struct qfq_class *); |
93 | static int qfq_destroy_locked(struct qfq_if *); |
94 | static inline int qfq_addq(struct qfq_class *, pktsched_pkt_t *, |
95 | struct pf_mtag *); |
96 | static inline void qfq_getq(struct qfq_class *, pktsched_pkt_t *); |
97 | static void qfq_purgeq(struct qfq_if *, struct qfq_class *, u_int32_t, |
98 | u_int32_t *, u_int32_t *); |
99 | static void qfq_purge_sc(struct qfq_if *, cqrq_purge_sc_t *); |
100 | static void qfq_updateq(struct qfq_if *, struct qfq_class *, cqev_t); |
101 | static int qfq_throttle(struct qfq_if *, cqrq_throttle_t *); |
102 | static int qfq_resumeq(struct qfq_if *, struct qfq_class *); |
103 | static int qfq_suspendq(struct qfq_if *, struct qfq_class *); |
104 | static int qfq_stat_sc(struct qfq_if *, cqrq_stat_sc_t *); |
105 | static inline struct qfq_class *qfq_clh_to_clp(struct qfq_if *, u_int32_t); |
106 | static const char *qfq_style(struct qfq_if *); |
107 | |
108 | static inline int qfq_gt(u_int64_t, u_int64_t); |
109 | static inline u_int64_t qfq_round_down(u_int64_t, u_int32_t); |
110 | static inline struct qfq_group *qfq_ffs(struct qfq_if *, pktsched_bitmap_t); |
111 | static int qfq_calc_index(struct qfq_class *, u_int32_t, u_int32_t); |
112 | static inline pktsched_bitmap_t mask_from(pktsched_bitmap_t, int); |
113 | static inline u_int32_t qfq_calc_state(struct qfq_if *, struct qfq_group *); |
114 | static inline void qfq_move_groups(struct qfq_if *, pktsched_bitmap_t, |
115 | int, int); |
116 | static inline void qfq_unblock_groups(struct qfq_if *, int, u_int64_t); |
117 | static inline void qfq_make_eligible(struct qfq_if *, u_int64_t); |
118 | static inline void qfq_slot_insert(struct qfq_if *, struct qfq_group *, |
119 | struct qfq_class *, u_int64_t); |
120 | static inline void qfq_front_slot_remove(struct qfq_group *); |
121 | static inline struct qfq_class *qfq_slot_scan(struct qfq_if *, |
122 | struct qfq_group *); |
123 | static inline void qfq_slot_rotate(struct qfq_if *, struct qfq_group *, |
124 | u_int64_t); |
125 | static inline void qfq_update_eligible(struct qfq_if *, u_int64_t); |
126 | static inline int qfq_update_class(struct qfq_if *, struct qfq_group *, |
127 | struct qfq_class *); |
128 | static inline void qfq_update_start(struct qfq_if *, struct qfq_class *); |
129 | static inline void qfq_slot_remove(struct qfq_if *, struct qfq_group *, |
130 | struct qfq_class *); |
131 | static void qfq_deactivate_class(struct qfq_if *, struct qfq_class *); |
132 | static const char *qfq_state2str(int); |
133 | #if QFQ_DEBUG |
134 | static void qfq_dump_groups(struct qfq_if *, u_int32_t); |
135 | static void qfq_dump_sched(struct qfq_if *, const char *); |
136 | #endif /* QFQ_DEBUG */ |
137 | |
138 | #define QFQ_ZONE_MAX 32 /* maximum elements in zone */ |
139 | #define QFQ_ZONE_NAME "pktsched_qfq" /* zone name */ |
140 | |
141 | static unsigned int qfq_size; /* size of zone element */ |
142 | static struct zone *qfq_zone; /* zone for qfq */ |
143 | |
144 | #define QFQ_CL_ZONE_MAX 32 /* maximum elements in zone */ |
145 | #define QFQ_CL_ZONE_NAME "pktsched_qfq_cl" /* zone name */ |
146 | |
147 | static unsigned int qfq_cl_size; /* size of zone element */ |
148 | static struct zone *qfq_cl_zone; /* zone for qfq_class */ |
149 | |
150 | /* |
151 | * Maximum number of consecutive slots occupied by backlogged classes |
152 | * inside a group. This is approx lmax/lmin + 5. Used when ALTQ is |
153 | * available. |
154 | * |
155 | * XXX check because it poses constraints on MAX_INDEX |
156 | */ |
157 | #define QFQ_MAX_SLOTS 32 /* default when ALTQ is available */ |
158 | |
159 | void |
160 | qfq_init(void) |
161 | { |
162 | qfq_size = sizeof (struct qfq_if); |
163 | qfq_zone = zinit(qfq_size, QFQ_ZONE_MAX * qfq_size, |
164 | 0, QFQ_ZONE_NAME); |
165 | if (qfq_zone == NULL) { |
166 | panic("%s: failed allocating %s" , __func__, QFQ_ZONE_NAME); |
167 | /* NOTREACHED */ |
168 | } |
169 | zone_change(qfq_zone, Z_EXPAND, TRUE); |
170 | zone_change(qfq_zone, Z_CALLERACCT, TRUE); |
171 | |
172 | qfq_cl_size = sizeof (struct qfq_class); |
173 | qfq_cl_zone = zinit(qfq_cl_size, QFQ_CL_ZONE_MAX * qfq_cl_size, |
174 | 0, QFQ_CL_ZONE_NAME); |
175 | if (qfq_cl_zone == NULL) { |
176 | panic("%s: failed allocating %s" , __func__, QFQ_CL_ZONE_NAME); |
177 | /* NOTREACHED */ |
178 | } |
179 | zone_change(qfq_cl_zone, Z_EXPAND, TRUE); |
180 | zone_change(qfq_cl_zone, Z_CALLERACCT, TRUE); |
181 | } |
182 | |
183 | struct qfq_if * |
184 | qfq_alloc(struct ifnet *ifp, int how) |
185 | { |
186 | struct qfq_if *qif; |
187 | |
188 | qif = (how == M_WAITOK) ? zalloc(qfq_zone) : zalloc_noblock(qfq_zone); |
189 | if (qif == NULL) |
190 | return (NULL); |
191 | |
192 | bzero(qif, qfq_size); |
193 | qif->qif_ifq = &ifp->if_snd; |
194 | |
195 | qif->qif_maxclasses = IFCQ_SC_MAX; |
196 | /* |
197 | * TODO: adi@apple.com |
198 | * |
199 | * Ideally I would like to have the following |
200 | * but QFQ needs further modifications. |
201 | * |
202 | * qif->qif_maxslots = IFCQ_SC_MAX; |
203 | */ |
204 | qif->qif_maxslots = QFQ_MAX_SLOTS; |
205 | |
206 | if ((qif->qif_class_tbl = _MALLOC(sizeof (struct qfq_class *) * |
207 | qif->qif_maxclasses, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) { |
208 | log(LOG_ERR, "%s: %s unable to allocate class table array\n" , |
209 | if_name(ifp), qfq_style(qif)); |
210 | goto error; |
211 | } |
212 | |
213 | if ((qif->qif_groups = _MALLOC(sizeof (struct qfq_group *) * |
214 | (QFQ_MAX_INDEX + 1), M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) { |
215 | log(LOG_ERR, "%s: %s unable to allocate group array\n" , |
216 | if_name(ifp), qfq_style(qif)); |
217 | goto error; |
218 | } |
219 | |
220 | if (pktsched_verbose) { |
221 | log(LOG_DEBUG, "%s: %s scheduler allocated\n" , |
222 | if_name(ifp), qfq_style(qif)); |
223 | } |
224 | |
225 | return (qif); |
226 | |
227 | error: |
228 | if (qif->qif_class_tbl != NULL) { |
229 | _FREE(qif->qif_class_tbl, M_DEVBUF); |
230 | qif->qif_class_tbl = NULL; |
231 | } |
232 | if (qif->qif_groups != NULL) { |
233 | _FREE(qif->qif_groups, M_DEVBUF); |
234 | qif->qif_groups = NULL; |
235 | } |
236 | zfree(qfq_zone, qif); |
237 | |
238 | return (NULL); |
239 | } |
240 | |
241 | int |
242 | qfq_destroy(struct qfq_if *qif) |
243 | { |
244 | struct ifclassq *ifq = qif->qif_ifq; |
245 | int err; |
246 | |
247 | IFCQ_LOCK(ifq); |
248 | err = qfq_destroy_locked(qif); |
249 | IFCQ_UNLOCK(ifq); |
250 | |
251 | return (err); |
252 | } |
253 | |
254 | static int |
255 | qfq_destroy_locked(struct qfq_if *qif) |
256 | { |
257 | int i; |
258 | |
259 | IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); |
260 | |
261 | (void) qfq_clear_interface(qif); |
262 | |
263 | VERIFY(qif->qif_class_tbl != NULL); |
264 | _FREE(qif->qif_class_tbl, M_DEVBUF); |
265 | qif->qif_class_tbl = NULL; |
266 | |
267 | VERIFY(qif->qif_groups != NULL); |
268 | for (i = 0; i <= QFQ_MAX_INDEX; i++) { |
269 | struct qfq_group *grp = qif->qif_groups[i]; |
270 | |
271 | if (grp != NULL) { |
272 | VERIFY(grp->qfg_slots != NULL); |
273 | _FREE(grp->qfg_slots, M_DEVBUF); |
274 | grp->qfg_slots = NULL; |
275 | _FREE(grp, M_DEVBUF); |
276 | qif->qif_groups[i] = NULL; |
277 | } |
278 | } |
279 | _FREE(qif->qif_groups, M_DEVBUF); |
280 | qif->qif_groups = NULL; |
281 | |
282 | if (pktsched_verbose) { |
283 | log(LOG_DEBUG, "%s: %s scheduler destroyed\n" , |
284 | if_name(QFQIF_IFP(qif)), qfq_style(qif)); |
285 | } |
286 | |
287 | zfree(qfq_zone, qif); |
288 | |
289 | return (0); |
290 | } |
291 | |
292 | /* |
293 | * bring the interface back to the initial state by discarding |
294 | * all the filters and classes. |
295 | */ |
296 | static int |
297 | qfq_clear_interface(struct qfq_if *qif) |
298 | { |
299 | struct qfq_class *cl; |
300 | int i; |
301 | |
302 | IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); |
303 | |
304 | /* clear out the classes */ |
305 | for (i = 0; i < qif->qif_maxclasses; i++) |
306 | if ((cl = qif->qif_class_tbl[i]) != NULL) |
307 | qfq_class_destroy(qif, cl); |
308 | |
309 | return (0); |
310 | } |
311 | |
312 | /* discard all the queued packets on the interface */ |
313 | void |
314 | qfq_purge(struct qfq_if *qif) |
315 | { |
316 | struct qfq_class *cl; |
317 | int i; |
318 | |
319 | IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); |
320 | |
321 | for (i = 0; i < qif->qif_maxclasses; i++) { |
322 | if ((cl = qif->qif_class_tbl[i]) != NULL) |
323 | qfq_purgeq(qif, cl, 0, NULL, NULL); |
324 | } |
325 | VERIFY(IFCQ_LEN(qif->qif_ifq) == 0); |
326 | } |
327 | |
328 | static void |
329 | qfq_purge_sc(struct qfq_if *qif, cqrq_purge_sc_t *pr) |
330 | { |
331 | struct ifclassq *ifq = qif->qif_ifq; |
332 | u_int32_t i; |
333 | |
334 | IFCQ_LOCK_ASSERT_HELD(ifq); |
335 | |
336 | VERIFY(pr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(pr->sc)); |
337 | VERIFY(pr->flow != 0); |
338 | |
339 | if (pr->sc != MBUF_SC_UNSPEC) { |
340 | i = MBUF_SCIDX(pr->sc); |
341 | VERIFY(i < IFCQ_SC_MAX); |
342 | |
343 | qfq_purgeq(qif, ifq->ifcq_disc_slots[i].cl, |
344 | pr->flow, &pr->packets, &pr->bytes); |
345 | } else { |
346 | u_int32_t cnt, len; |
347 | |
348 | pr->packets = 0; |
349 | pr->bytes = 0; |
350 | |
351 | for (i = 0; i < IFCQ_SC_MAX; i++) { |
352 | qfq_purgeq(qif, ifq->ifcq_disc_slots[i].cl, |
353 | pr->flow, &cnt, &len); |
354 | pr->packets += cnt; |
355 | pr->bytes += len; |
356 | } |
357 | } |
358 | } |
359 | |
360 | void |
361 | qfq_event(struct qfq_if *qif, cqev_t ev) |
362 | { |
363 | struct qfq_class *cl; |
364 | int i; |
365 | |
366 | IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); |
367 | |
368 | for (i = 0; i < qif->qif_maxclasses; i++) |
369 | if ((cl = qif->qif_class_tbl[i]) != NULL) |
370 | qfq_updateq(qif, cl, ev); |
371 | } |
372 | |
373 | int |
374 | qfq_add_queue(struct qfq_if *qif, u_int32_t qlimit, u_int32_t weight, |
375 | u_int32_t maxsz, u_int32_t flags, u_int32_t qid, struct qfq_class **clp, |
376 | classq_pkt_type_t ptype) |
377 | { |
378 | struct qfq_class *cl; |
379 | u_int32_t w; |
380 | |
381 | IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); |
382 | |
383 | if (qfq_clh_to_clp(qif, qid) != NULL) |
384 | return (EBUSY); |
385 | |
386 | /* check parameters */ |
387 | if (weight == 0 || weight > QFQ_MAX_WEIGHT) |
388 | return (EINVAL); |
389 | |
390 | w = (QFQ_ONE_FP / (QFQ_ONE_FP / weight)); |
391 | if (qif->qif_wsum + w > QFQ_MAX_WSUM) |
392 | return (EINVAL); |
393 | |
394 | if (maxsz == 0 || maxsz > (1 << QFQ_MTU_SHIFT)) |
395 | return (EINVAL); |
396 | |
397 | cl = qfq_class_create(qif, weight, qlimit, flags, maxsz, qid, ptype); |
398 | if (cl == NULL) |
399 | return (ENOMEM); |
400 | |
401 | if (clp != NULL) |
402 | *clp = cl; |
403 | |
404 | return (0); |
405 | } |
406 | |
407 | static struct qfq_class * |
408 | qfq_class_create(struct qfq_if *qif, u_int32_t weight, u_int32_t qlimit, |
409 | u_int32_t flags, u_int32_t maxsz, u_int32_t qid, classq_pkt_type_t ptype) |
410 | { |
411 | struct ifnet *ifp; |
412 | struct ifclassq *ifq; |
413 | struct qfq_group *grp; |
414 | struct qfq_class *cl; |
415 | u_int32_t w; /* approximated weight */ |
416 | int i; |
417 | |
418 | IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); |
419 | |
420 | if (qif->qif_classes >= qif->qif_maxclasses) { |
421 | log(LOG_ERR, "%s: %s out of classes! (max %d)\n" , |
422 | if_name(QFQIF_IFP(qif)), qfq_style(qif), |
423 | qif->qif_maxclasses); |
424 | return (NULL); |
425 | } |
426 | |
427 | ifq = qif->qif_ifq; |
428 | ifp = QFQIF_IFP(qif); |
429 | |
430 | cl = zalloc(qfq_cl_zone); |
431 | if (cl == NULL) |
432 | return (NULL); |
433 | |
434 | bzero(cl, qfq_cl_size); |
435 | |
436 | if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) { |
437 | qlimit = IFCQ_MAXLEN(ifq); |
438 | if (qlimit == 0) |
439 | qlimit = DEFAULT_QLIMIT; /* use default */ |
440 | } |
441 | _qinit(&cl->cl_q, Q_DROPTAIL, qlimit, ptype); |
442 | cl->cl_qif = qif; |
443 | cl->cl_flags = flags; |
444 | cl->cl_handle = qid; |
445 | |
446 | /* |
447 | * Find a free slot in the class table. If the slot matching |
448 | * the lower bits of qid is free, use this slot. Otherwise, |
449 | * use the first free slot. |
450 | */ |
451 | i = qid % qif->qif_maxclasses; |
452 | if (qif->qif_class_tbl[i] == NULL) { |
453 | qif->qif_class_tbl[i] = cl; |
454 | } else { |
455 | for (i = 0; i < qif->qif_maxclasses; i++) { |
456 | if (qif->qif_class_tbl[i] == NULL) { |
457 | qif->qif_class_tbl[i] = cl; |
458 | break; |
459 | } |
460 | } |
461 | if (i == qif->qif_maxclasses) { |
462 | zfree(qfq_cl_zone, cl); |
463 | return (NULL); |
464 | } |
465 | } |
466 | |
467 | w = weight; |
468 | VERIFY(w > 0 && w <= QFQ_MAX_WEIGHT); |
469 | cl->cl_lmax = maxsz; |
470 | cl->cl_inv_w = (QFQ_ONE_FP / w); |
471 | w = (QFQ_ONE_FP / cl->cl_inv_w); |
472 | VERIFY(qif->qif_wsum + w <= QFQ_MAX_WSUM); |
473 | |
474 | i = qfq_calc_index(cl, cl->cl_inv_w, cl->cl_lmax); |
475 | VERIFY(i <= QFQ_MAX_INDEX); |
476 | grp = qif->qif_groups[i]; |
477 | if (grp == NULL) { |
478 | grp = _MALLOC(sizeof (*grp), M_DEVBUF, M_WAITOK|M_ZERO); |
479 | if (grp != NULL) { |
480 | grp->qfg_index = i; |
481 | grp->qfg_slot_shift = |
482 | QFQ_MTU_SHIFT + QFQ_FRAC_BITS - (QFQ_MAX_INDEX - i); |
483 | grp->qfg_slots = _MALLOC(sizeof (struct qfq_class *) * |
484 | qif->qif_maxslots, M_DEVBUF, M_WAITOK|M_ZERO); |
485 | if (grp->qfg_slots == NULL) { |
486 | log(LOG_ERR, "%s: %s unable to allocate group " |
487 | "slots for index %d\n" , if_name(ifp), |
488 | qfq_style(qif), i); |
489 | } |
490 | } else { |
491 | log(LOG_ERR, "%s: %s unable to allocate group for " |
492 | "qid=%d\n" , if_name(ifp), qfq_style(qif), |
493 | cl->cl_handle); |
494 | } |
495 | if (grp == NULL || grp->qfg_slots == NULL) { |
496 | qif->qif_class_tbl[qid % qif->qif_maxclasses] = NULL; |
497 | if (grp != NULL) |
498 | _FREE(grp, M_DEVBUF); |
499 | zfree(qfq_cl_zone, cl); |
500 | return (NULL); |
501 | } else { |
502 | qif->qif_groups[i] = grp; |
503 | } |
504 | } |
505 | cl->cl_grp = grp; |
506 | qif->qif_wsum += w; |
507 | /* XXX cl->cl_S = qif->qif_V; ? */ |
508 | /* XXX compute qif->qif_i_wsum */ |
509 | |
510 | qif->qif_classes++; |
511 | |
512 | if (flags & QFCF_DEFAULTCLASS) |
513 | qif->qif_default = cl; |
514 | |
515 | if (flags & QFCF_SFB) { |
516 | cl->cl_qflags = 0; |
517 | if (flags & QFCF_ECN) { |
518 | cl->cl_qflags |= SFBF_ECN; |
519 | } |
520 | if (flags & QFCF_FLOWCTL) { |
521 | cl->cl_qflags |= SFBF_FLOWCTL; |
522 | } |
523 | if (flags & QFCF_DELAYBASED) { |
524 | cl->cl_qflags |= SFBF_DELAYBASED; |
525 | } |
526 | if (!(cl->cl_flags & QFCF_LAZY)) |
527 | cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle, |
528 | qlimit(&cl->cl_q), cl->cl_qflags); |
529 | if (cl->cl_sfb != NULL || (cl->cl_flags & QFCF_LAZY)) |
530 | qtype(&cl->cl_q) = Q_SFB; |
531 | } |
532 | |
533 | if (pktsched_verbose) { |
534 | log(LOG_DEBUG, "%s: %s created qid=%d grp=%d weight=%d " |
535 | "qlimit=%d flags=%b\n" , if_name(ifp), qfq_style(qif), |
536 | cl->cl_handle, cl->cl_grp->qfg_index, weight, qlimit, |
537 | flags, QFCF_BITS); |
538 | } |
539 | |
540 | return (cl); |
541 | } |
542 | |
543 | int |
544 | qfq_remove_queue(struct qfq_if *qif, u_int32_t qid) |
545 | { |
546 | struct qfq_class *cl; |
547 | |
548 | IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); |
549 | |
550 | if ((cl = qfq_clh_to_clp(qif, qid)) == NULL) |
551 | return (EINVAL); |
552 | |
553 | return (qfq_class_destroy(qif, cl)); |
554 | } |
555 | |
556 | static int |
557 | qfq_class_destroy(struct qfq_if *qif, struct qfq_class *cl) |
558 | { |
559 | struct ifclassq *ifq = qif->qif_ifq; |
560 | int i; |
561 | #if !MACH_ASSERT |
562 | #pragma unused(ifq) |
563 | #endif |
564 | |
565 | IFCQ_LOCK_ASSERT_HELD(ifq); |
566 | |
567 | qfq_purgeq(qif, cl, 0, NULL, NULL); |
568 | |
569 | if (cl->cl_inv_w != 0) { |
570 | qif->qif_wsum -= (QFQ_ONE_FP / cl->cl_inv_w); |
571 | cl->cl_inv_w = 0; /* reset weight to avoid run twice */ |
572 | } |
573 | |
574 | for (i = 0; i < qif->qif_maxclasses; i++) { |
575 | if (qif->qif_class_tbl[i] == cl) { |
576 | qif->qif_class_tbl[i] = NULL; |
577 | break; |
578 | } |
579 | } |
580 | qif->qif_classes--; |
581 | |
582 | if (cl->cl_qalg.ptr != NULL) { |
583 | if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) |
584 | sfb_destroy(cl->cl_sfb); |
585 | cl->cl_qalg.ptr = NULL; |
586 | qtype(&cl->cl_q) = Q_DROPTAIL; |
587 | qstate(&cl->cl_q) = QS_RUNNING; |
588 | } |
589 | |
590 | if (qif->qif_default == cl) |
591 | qif->qif_default = NULL; |
592 | |
593 | if (pktsched_verbose) { |
594 | log(LOG_DEBUG, "%s: %s destroyed qid=%d\n" , |
595 | if_name(QFQIF_IFP(qif)), qfq_style(qif), cl->cl_handle); |
596 | } |
597 | |
598 | zfree(qfq_cl_zone, cl); |
599 | |
600 | return (0); |
601 | } |
602 | |
603 | /* |
604 | * Calculate a mask to mimic what would be ffs_from() |
605 | */ |
606 | static inline pktsched_bitmap_t |
607 | mask_from(pktsched_bitmap_t bitmap, int from) |
608 | { |
609 | return (bitmap & ~((1UL << from) - 1)); |
610 | } |
611 | |
612 | /* |
613 | * The state computation relies on ER=0, IR=1, EB=2, IB=3 |
614 | * First compute eligibility comparing grp->qfg_S, qif->qif_V, |
615 | * then check if someone is blocking us and possibly add EB |
616 | */ |
617 | static inline u_int32_t |
618 | qfq_calc_state(struct qfq_if *qif, struct qfq_group *grp) |
619 | { |
620 | /* if S > V we are not eligible */ |
621 | u_int32_t state = qfq_gt(grp->qfg_S, qif->qif_V); |
622 | pktsched_bitmap_t mask = mask_from(qif->qif_bitmaps[ER], |
623 | grp->qfg_index); |
624 | struct qfq_group *next; |
625 | |
626 | if (mask) { |
627 | next = qfq_ffs(qif, mask); |
628 | if (qfq_gt(grp->qfg_F, next->qfg_F)) |
629 | state |= EB; |
630 | } |
631 | |
632 | return (state); |
633 | } |
634 | |
635 | /* |
636 | * In principle |
637 | * qif->qif_bitmaps[dst] |= qif->qif_bitmaps[src] & mask; |
638 | * qif->qif_bitmaps[src] &= ~mask; |
639 | * but we should make sure that src != dst |
640 | */ |
641 | static inline void |
642 | qfq_move_groups(struct qfq_if *qif, pktsched_bitmap_t mask, int src, int dst) |
643 | { |
644 | qif->qif_bitmaps[dst] |= qif->qif_bitmaps[src] & mask; |
645 | qif->qif_bitmaps[src] &= ~mask; |
646 | } |
647 | |
648 | static inline void |
649 | qfq_unblock_groups(struct qfq_if *qif, int index, u_int64_t old_finish) |
650 | { |
651 | pktsched_bitmap_t mask = mask_from(qif->qif_bitmaps[ER], index + 1); |
652 | struct qfq_group *next; |
653 | |
654 | if (mask) { |
655 | next = qfq_ffs(qif, mask); |
656 | if (!qfq_gt(next->qfg_F, old_finish)) |
657 | return; |
658 | } |
659 | |
660 | mask = (1UL << index) - 1; |
661 | qfq_move_groups(qif, mask, EB, ER); |
662 | qfq_move_groups(qif, mask, IB, IR); |
663 | } |
664 | |
665 | /* |
666 | * perhaps |
667 | * |
668 | * old_V ^= qif->qif_V; |
669 | * old_V >>= QFQ_MIN_SLOT_SHIFT; |
670 | * if (old_V) { |
671 | * ... |
672 | * } |
673 | */ |
674 | static inline void |
675 | qfq_make_eligible(struct qfq_if *qif, u_int64_t old_V) |
676 | { |
677 | pktsched_bitmap_t mask, vslot, old_vslot; |
678 | |
679 | vslot = qif->qif_V >> QFQ_MIN_SLOT_SHIFT; |
680 | old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT; |
681 | |
682 | if (vslot != old_vslot) { |
683 | mask = (2UL << (__fls(vslot ^ old_vslot))) - 1; |
684 | qfq_move_groups(qif, mask, IR, ER); |
685 | qfq_move_groups(qif, mask, IB, EB); |
686 | } |
687 | } |
688 | |
689 | /* |
690 | * XXX we should make sure that slot becomes less than 32. |
691 | * This is guaranteed by the input values. |
692 | * roundedS is always cl->qfg_S rounded on grp->qfg_slot_shift bits. |
693 | */ |
694 | static inline void |
695 | qfq_slot_insert(struct qfq_if *qif, struct qfq_group *grp, |
696 | struct qfq_class *cl, u_int64_t roundedS) |
697 | { |
698 | u_int64_t slot = (roundedS - grp->qfg_S) >> grp->qfg_slot_shift; |
699 | u_int32_t i = (grp->qfg_front + slot) % qif->qif_maxslots; |
700 | |
701 | cl->cl_next = grp->qfg_slots[i]; |
702 | grp->qfg_slots[i] = cl; |
703 | pktsched_bit_set(slot, &grp->qfg_full_slots); |
704 | } |
705 | |
706 | /* |
707 | * remove the entry from the slot |
708 | */ |
709 | static inline void |
710 | qfq_front_slot_remove(struct qfq_group *grp) |
711 | { |
712 | struct qfq_class **h = &grp->qfg_slots[grp->qfg_front]; |
713 | |
714 | *h = (*h)->cl_next; |
715 | if (!*h) |
716 | pktsched_bit_clr(0, &grp->qfg_full_slots); |
717 | } |
718 | |
719 | /* |
720 | * Returns the first full queue in a group. As a side effect, |
721 | * adjust the bucket list so the first non-empty bucket is at |
722 | * position 0 in qfg_full_slots. |
723 | */ |
724 | static inline struct qfq_class * |
725 | qfq_slot_scan(struct qfq_if *qif, struct qfq_group *grp) |
726 | { |
727 | int i; |
728 | |
729 | if (pktsched_verbose > 2) { |
730 | log(LOG_DEBUG, "%s: %s grp=%d full_slots=0x%x\n" , |
731 | if_name(QFQIF_IFP(qif)), qfq_style(qif), grp->qfg_index, |
732 | grp->qfg_full_slots); |
733 | } |
734 | |
735 | if (grp->qfg_full_slots == 0) |
736 | return (NULL); |
737 | |
738 | i = pktsched_ffs(grp->qfg_full_slots) - 1; /* zero-based */ |
739 | if (i > 0) { |
740 | grp->qfg_front = (grp->qfg_front + i) % qif->qif_maxslots; |
741 | grp->qfg_full_slots >>= i; |
742 | } |
743 | |
744 | return (grp->qfg_slots[grp->qfg_front]); |
745 | } |
746 | |
747 | /* |
748 | * adjust the bucket list. When the start time of a group decreases, |
749 | * we move the index down (modulo qif->qif_maxslots) so we don't need to |
750 | * move the objects. The mask of occupied slots must be shifted |
751 | * because we use ffs() to find the first non-empty slot. |
752 | * This covers decreases in the group's start time, but what about |
753 | * increases of the start time ? |
754 | * Here too we should make sure that i is less than 32 |
755 | */ |
756 | static inline void |
757 | qfq_slot_rotate(struct qfq_if *qif, struct qfq_group *grp, u_int64_t roundedS) |
758 | { |
759 | #pragma unused(qif) |
760 | u_int32_t i = (grp->qfg_S - roundedS) >> grp->qfg_slot_shift; |
761 | |
762 | grp->qfg_full_slots <<= i; |
763 | grp->qfg_front = (grp->qfg_front - i) % qif->qif_maxslots; |
764 | } |
765 | |
766 | static inline void |
767 | qfq_update_eligible(struct qfq_if *qif, u_int64_t old_V) |
768 | { |
769 | pktsched_bitmap_t ineligible; |
770 | |
771 | ineligible = qif->qif_bitmaps[IR] | qif->qif_bitmaps[IB]; |
772 | if (ineligible) { |
773 | if (!qif->qif_bitmaps[ER]) { |
774 | struct qfq_group *grp; |
775 | grp = qfq_ffs(qif, ineligible); |
776 | if (qfq_gt(grp->qfg_S, qif->qif_V)) |
777 | qif->qif_V = grp->qfg_S; |
778 | } |
779 | qfq_make_eligible(qif, old_V); |
780 | } |
781 | } |
782 | |
783 | /* |
784 | * Updates the class, returns true if also the group needs to be updated. |
785 | */ |
786 | static inline int |
787 | qfq_update_class(struct qfq_if *qif, struct qfq_group *grp, |
788 | struct qfq_class *cl) |
789 | { |
790 | #pragma unused(qif) |
791 | cl->cl_S = cl->cl_F; |
792 | if (qempty(&cl->cl_q)) { |
793 | qfq_front_slot_remove(grp); |
794 | } else { |
795 | u_int32_t len; |
796 | u_int64_t roundedS; |
797 | |
798 | len = m_pktlen((struct mbuf *)qhead(&cl->cl_q)); |
799 | cl->cl_F = cl->cl_S + (u_int64_t)len * cl->cl_inv_w; |
800 | roundedS = qfq_round_down(cl->cl_S, grp->qfg_slot_shift); |
801 | if (roundedS == grp->qfg_S) |
802 | return (0); |
803 | |
804 | qfq_front_slot_remove(grp); |
805 | qfq_slot_insert(qif, grp, cl, roundedS); |
806 | } |
807 | return (1); |
808 | } |
809 | |
810 | /* |
811 | * note: CLASSQDQ_POLL returns the next packet without removing the packet |
812 | * from the queue. CLASSQDQ_REMOVE is a normal dequeue operation. |
813 | * CLASSQDQ_REMOVE must return the same packet if called immediately |
814 | * after CLASSQDQ_POLL. |
815 | */ |
816 | void |
817 | qfq_dequeue(struct qfq_if *qif, pktsched_pkt_t *pkt) |
818 | { |
819 | pktsched_bitmap_t er_bits = qif->qif_bitmaps[ER]; |
820 | struct ifclassq *ifq = qif->qif_ifq; |
821 | struct qfq_group *grp; |
822 | struct qfq_class *cl; |
823 | u_int64_t old_V; |
824 | u_int32_t len; |
825 | |
826 | IFCQ_LOCK_ASSERT_HELD(ifq); |
827 | |
828 | pkt->pktsched_pkt = NULL; |
829 | |
830 | for (;;) { |
831 | if (er_bits == 0) { |
832 | #if QFQ_DEBUG |
833 | if (qif->qif_queued && pktsched_verbose > 1) |
834 | qfq_dump_sched(qif, "start dequeue" ); |
835 | #endif /* QFQ_DEBUG */ |
836 | /* no eligible and ready packet */ |
837 | return; |
838 | } |
839 | grp = qfq_ffs(qif, er_bits); |
840 | /* if group is non-empty, use it */ |
841 | if (grp->qfg_full_slots != 0) |
842 | break; |
843 | pktsched_bit_clr(grp->qfg_index, &er_bits); |
844 | #if QFQ_DEBUG |
845 | qif->qif_emptygrp++; |
846 | #endif /* QFQ_DEBUG */ |
847 | } |
848 | VERIFY(!IFCQ_IS_EMPTY(ifq)); |
849 | |
850 | cl = grp->qfg_slots[grp->qfg_front]; |
851 | VERIFY(cl != NULL && !qempty(&cl->cl_q)); |
852 | |
853 | qfq_getq(cl, pkt); |
854 | VERIFY(pkt->pktsched_pkt != NULL); /* qalg must be work conserving */ |
855 | len = pktsched_get_pkt_len(pkt); |
856 | |
857 | #if QFQ_DEBUG |
858 | qif->qif_queued--; |
859 | #endif /* QFQ_DEBUG */ |
860 | |
861 | IFCQ_DEC_LEN(ifq); |
862 | IFCQ_DEC_BYTES(ifq, len); |
863 | if (qempty(&cl->cl_q)) |
864 | cl->cl_period++; |
865 | PKTCNTR_ADD(&cl->cl_xmitcnt, 1, len); |
866 | IFCQ_XMIT_ADD(ifq, 1, len); |
867 | |
868 | old_V = qif->qif_V; |
869 | qif->qif_V += (u_int64_t)len * QFQ_IWSUM; |
870 | |
871 | if (pktsched_verbose > 2) { |
872 | log(LOG_DEBUG, "%s: %s qid=%d dequeue pkt=0x%llx F=0x%llx " |
873 | "V=0x%llx" , if_name(QFQIF_IFP(qif)), qfq_style(qif), |
874 | cl->cl_handle, |
875 | (uint64_t)VM_KERNEL_ADDRPERM(pkt->pktsched_pkt), cl->cl_F, |
876 | qif->qif_V); |
877 | } |
878 | |
879 | if (qfq_update_class(qif, grp, cl)) { |
880 | u_int64_t old_F = grp->qfg_F; |
881 | |
882 | cl = qfq_slot_scan(qif, grp); |
883 | if (!cl) { /* group gone, remove from ER */ |
884 | pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[ER]); |
885 | } else { |
886 | u_int32_t s; |
887 | u_int64_t roundedS = |
888 | qfq_round_down(cl->cl_S, grp->qfg_slot_shift); |
889 | |
890 | if (grp->qfg_S == roundedS) |
891 | goto skip_unblock; |
892 | |
893 | grp->qfg_S = roundedS; |
894 | grp->qfg_F = roundedS + (2ULL << grp->qfg_slot_shift); |
895 | |
896 | /* remove from ER and put in the new set */ |
897 | pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[ER]); |
898 | s = qfq_calc_state(qif, grp); |
899 | pktsched_bit_set(grp->qfg_index, &qif->qif_bitmaps[s]); |
900 | } |
901 | /* we need to unblock even if the group has gone away */ |
902 | qfq_unblock_groups(qif, grp->qfg_index, old_F); |
903 | } |
904 | |
905 | skip_unblock: |
906 | qfq_update_eligible(qif, old_V); |
907 | |
908 | #if QFQ_DEBUG |
909 | if (!qif->qif_bitmaps[ER] && qif->qif_queued && pktsched_verbose > 1) |
910 | qfq_dump_sched(qif, "end dequeue" ); |
911 | #endif /* QFQ_DEBUG */ |
912 | } |
913 | |
914 | /* |
915 | * Assign a reasonable start time for a new flow k in group i. |
916 | * Admissible values for hat(F) are multiples of sigma_i |
917 | * no greater than V+sigma_i . Larger values mean that |
918 | * we had a wraparound so we consider the timestamp to be stale. |
919 | * |
920 | * If F is not stale and F >= V then we set S = F. |
921 | * Otherwise we should assign S = V, but this may violate |
922 | * the ordering in ER. So, if we have groups in ER, set S to |
923 | * the F_j of the first group j which would be blocking us. |
924 | * We are guaranteed not to move S backward because |
925 | * otherwise our group i would still be blocked. |
926 | */ |
927 | static inline void |
928 | qfq_update_start(struct qfq_if *qif, struct qfq_class *cl) |
929 | { |
930 | pktsched_bitmap_t mask; |
931 | u_int64_t limit, roundedF; |
932 | int slot_shift = cl->cl_grp->qfg_slot_shift; |
933 | |
934 | roundedF = qfq_round_down(cl->cl_F, slot_shift); |
935 | limit = qfq_round_down(qif->qif_V, slot_shift) + (1UL << slot_shift); |
936 | |
937 | if (!qfq_gt(cl->cl_F, qif->qif_V) || qfq_gt(roundedF, limit)) { |
938 | /* timestamp was stale */ |
939 | mask = mask_from(qif->qif_bitmaps[ER], cl->cl_grp->qfg_index); |
940 | if (mask) { |
941 | struct qfq_group *next = qfq_ffs(qif, mask); |
942 | if (qfq_gt(roundedF, next->qfg_F)) { |
943 | cl->cl_S = next->qfg_F; |
944 | return; |
945 | } |
946 | } |
947 | cl->cl_S = qif->qif_V; |
948 | } else { /* timestamp is not stale */ |
949 | cl->cl_S = cl->cl_F; |
950 | } |
951 | } |
952 | |
953 | int |
954 | qfq_enqueue(struct qfq_if *qif, struct qfq_class *cl, pktsched_pkt_t *pkt, |
955 | struct pf_mtag *t) |
956 | { |
957 | struct ifclassq *ifq = qif->qif_ifq; |
958 | struct qfq_group *grp; |
959 | u_int64_t roundedS; |
960 | int len, ret, s; |
961 | |
962 | IFCQ_LOCK_ASSERT_HELD(ifq); |
963 | VERIFY(cl == NULL || cl->cl_qif == qif); |
964 | |
965 | if (cl == NULL) { |
966 | cl = qfq_clh_to_clp(qif, 0); |
967 | if (cl == NULL) { |
968 | cl = qif->qif_default; |
969 | if (cl == NULL) { |
970 | IFCQ_CONVERT_LOCK(ifq); |
971 | return (CLASSQEQ_DROP); |
972 | } |
973 | } |
974 | } |
975 | |
976 | VERIFY(pkt->pktsched_ptype == qptype(&cl->cl_q)); |
977 | len = pktsched_get_pkt_len(pkt); |
978 | |
979 | ret = qfq_addq(cl, pkt, t); |
980 | if ((ret != 0) && (ret != CLASSQEQ_SUCCESS_FC)) { |
981 | VERIFY(ret == CLASSQEQ_DROP || |
982 | ret == CLASSQEQ_DROP_FC || |
983 | ret == CLASSQEQ_DROP_SP); |
984 | PKTCNTR_ADD(&cl->cl_dropcnt, 1, len); |
985 | IFCQ_DROP_ADD(ifq, 1, len); |
986 | return (ret); |
987 | } |
988 | IFCQ_INC_LEN(ifq); |
989 | IFCQ_INC_BYTES(ifq, len); |
990 | |
991 | #if QFQ_DEBUG |
992 | qif->qif_queued++; |
993 | #endif /* QFQ_DEBUG */ |
994 | |
995 | /* queue was not idle, we're done */ |
996 | if (qlen(&cl->cl_q) > 1) |
997 | goto done; |
998 | |
999 | /* queue was idle */ |
1000 | grp = cl->cl_grp; |
1001 | qfq_update_start(qif, cl); /* adjust start time */ |
1002 | |
1003 | /* compute new finish time and rounded start */ |
1004 | cl->cl_F = cl->cl_S + (u_int64_t)len * cl->cl_inv_w; |
1005 | roundedS = qfq_round_down(cl->cl_S, grp->qfg_slot_shift); |
1006 | |
1007 | /* |
1008 | * Insert cl in the correct bucket. |
1009 | * |
1010 | * If cl->cl_S >= grp->qfg_S we don't need to adjust the bucket list |
1011 | * and simply go to the insertion phase. Otherwise grp->qfg_S is |
1012 | * decreasing, we must make room in the bucket list, and also |
1013 | * recompute the group state. Finally, if there were no flows |
1014 | * in this group and nobody was in ER make sure to adjust V. |
1015 | */ |
1016 | if (grp->qfg_full_slots != 0) { |
1017 | if (!qfq_gt(grp->qfg_S, cl->cl_S)) |
1018 | goto skip_update; |
1019 | |
1020 | /* create a slot for this cl->cl_S */ |
1021 | qfq_slot_rotate(qif, grp, roundedS); |
1022 | |
1023 | /* group was surely ineligible, remove */ |
1024 | pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[IR]); |
1025 | pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[IB]); |
1026 | } else if (!qif->qif_bitmaps[ER] && qfq_gt(roundedS, qif->qif_V)) { |
1027 | qif->qif_V = roundedS; |
1028 | } |
1029 | |
1030 | grp->qfg_S = roundedS; |
1031 | grp->qfg_F = |
1032 | roundedS + (2ULL << grp->qfg_slot_shift); /* i.e. 2 sigma_i */ |
1033 | s = qfq_calc_state(qif, grp); |
1034 | pktsched_bit_set(grp->qfg_index, &qif->qif_bitmaps[s]); |
1035 | |
1036 | if (pktsched_verbose > 2) { |
1037 | log(LOG_DEBUG, "%s: %s qid=%d enqueue m=0x%llx state=%s 0x%x " |
1038 | "S=0x%llx F=0x%llx V=0x%llx\n" , if_name(QFQIF_IFP(qif)), |
1039 | qfq_style(qif), cl->cl_handle, |
1040 | (uint64_t)VM_KERNEL_ADDRPERM(pkt->pktsched_pkt), |
1041 | qfq_state2str(s), |
1042 | qif->qif_bitmaps[s], cl->cl_S, cl->cl_F, qif->qif_V); |
1043 | } |
1044 | |
1045 | skip_update: |
1046 | qfq_slot_insert(qif, grp, cl, roundedS); |
1047 | |
1048 | done: |
1049 | /* successfully queued. */ |
1050 | return (ret); |
1051 | } |
1052 | |
1053 | static inline void |
1054 | qfq_slot_remove(struct qfq_if *qif, struct qfq_group *grp, |
1055 | struct qfq_class *cl) |
1056 | { |
1057 | #pragma unused(qif) |
1058 | struct qfq_class **pprev; |
1059 | u_int32_t i, offset; |
1060 | u_int64_t roundedS; |
1061 | |
1062 | roundedS = qfq_round_down(cl->cl_S, grp->qfg_slot_shift); |
1063 | offset = (roundedS - grp->qfg_S) >> grp->qfg_slot_shift; |
1064 | i = (grp->qfg_front + offset) % qif->qif_maxslots; |
1065 | |
1066 | pprev = &grp->qfg_slots[i]; |
1067 | while (*pprev && *pprev != cl) |
1068 | pprev = &(*pprev)->cl_next; |
1069 | |
1070 | *pprev = cl->cl_next; |
1071 | if (!grp->qfg_slots[i]) |
1072 | pktsched_bit_clr(offset, &grp->qfg_full_slots); |
1073 | } |
1074 | |
1075 | /* |
1076 | * Called to forcibly destroy a queue. |
1077 | * If the queue is not in the front bucket, or if it has |
1078 | * other queues in the front bucket, we can simply remove |
1079 | * the queue with no other side effects. |
1080 | * Otherwise we must propagate the event up. |
1081 | * XXX description to be completed. |
1082 | */ |
1083 | static void |
1084 | qfq_deactivate_class(struct qfq_if *qif, struct qfq_class *cl) |
1085 | { |
1086 | struct qfq_group *grp = cl->cl_grp; |
1087 | pktsched_bitmap_t mask; |
1088 | u_int64_t roundedS; |
1089 | int s; |
1090 | |
1091 | if (pktsched_verbose) { |
1092 | log(LOG_DEBUG, "%s: %s deactivate qid=%d grp=%d " |
1093 | "full_slots=0x%x front=%d bitmaps={ER=0x%x,EB=0x%x," |
1094 | "IR=0x%x,IB=0x%x}\n" , |
1095 | if_name(QFQIF_IFP(cl->cl_qif)), qfq_style(cl->cl_qif), |
1096 | cl->cl_handle, grp->qfg_index, grp->qfg_full_slots, |
1097 | grp->qfg_front, qif->qif_bitmaps[ER], qif->qif_bitmaps[EB], |
1098 | qif->qif_bitmaps[IR], qif->qif_bitmaps[IB]); |
1099 | #if QFQ_DEBUG |
1100 | if (pktsched_verbose > 1) |
1101 | qfq_dump_sched(qif, "start deactivate" ); |
1102 | #endif /* QFQ_DEBUG */ |
1103 | } |
1104 | |
1105 | cl->cl_F = cl->cl_S; /* not needed if the class goes away */ |
1106 | qfq_slot_remove(qif, grp, cl); |
1107 | |
1108 | if (grp->qfg_full_slots == 0) { |
1109 | /* |
1110 | * Nothing left in the group, remove from all sets. |
1111 | * Do ER last because if we were blocking other groups |
1112 | * we must unblock them. |
1113 | */ |
1114 | pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[IR]); |
1115 | pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[EB]); |
1116 | pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[IB]); |
1117 | |
1118 | if (pktsched_bit_tst(grp->qfg_index, &qif->qif_bitmaps[ER]) && |
1119 | !(qif->qif_bitmaps[ER] & ~((1UL << grp->qfg_index) - 1))) { |
1120 | mask = qif->qif_bitmaps[ER] & |
1121 | ((1UL << grp->qfg_index) - 1); |
1122 | if (mask) |
1123 | mask = ~((1UL << __fls(mask)) - 1); |
1124 | else |
1125 | mask = (pktsched_bitmap_t)~0UL; |
1126 | qfq_move_groups(qif, mask, EB, ER); |
1127 | qfq_move_groups(qif, mask, IB, IR); |
1128 | } |
1129 | pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[ER]); |
1130 | } else if (!grp->qfg_slots[grp->qfg_front]) { |
1131 | cl = qfq_slot_scan(qif, grp); |
1132 | roundedS = qfq_round_down(cl->cl_S, grp->qfg_slot_shift); |
1133 | if (grp->qfg_S != roundedS) { |
1134 | pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[ER]); |
1135 | pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[IR]); |
1136 | pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[EB]); |
1137 | pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[IB]); |
1138 | grp->qfg_S = roundedS; |
1139 | grp->qfg_F = roundedS + (2ULL << grp->qfg_slot_shift); |
1140 | s = qfq_calc_state(qif, grp); |
1141 | pktsched_bit_set(grp->qfg_index, &qif->qif_bitmaps[s]); |
1142 | } |
1143 | } |
1144 | qfq_update_eligible(qif, qif->qif_V); |
1145 | |
1146 | #if QFQ_DEBUG |
1147 | if (pktsched_verbose > 1) |
1148 | qfq_dump_sched(qif, "end deactivate" ); |
1149 | #endif /* QFQ_DEBUG */ |
1150 | } |
1151 | |
1152 | static const char * |
1153 | qfq_state2str(int s) |
1154 | { |
1155 | const char *c; |
1156 | |
1157 | switch (s) { |
1158 | case ER: |
1159 | c = "ER" ; |
1160 | break; |
1161 | case IR: |
1162 | c = "IR" ; |
1163 | break; |
1164 | case EB: |
1165 | c = "EB" ; |
1166 | break; |
1167 | case IB: |
1168 | c = "IB" ; |
1169 | break; |
1170 | default: |
1171 | c = "?" ; |
1172 | break; |
1173 | } |
1174 | return (c); |
1175 | } |
1176 | |
1177 | static inline int |
1178 | qfq_addq(struct qfq_class *cl, pktsched_pkt_t *pkt, struct pf_mtag *t) |
1179 | { |
1180 | struct qfq_if *qif = cl->cl_qif; |
1181 | struct ifclassq *ifq = qif->qif_ifq; |
1182 | |
1183 | IFCQ_LOCK_ASSERT_HELD(ifq); |
1184 | |
1185 | if (q_is_sfb(&cl->cl_q)) { |
1186 | if (cl->cl_sfb == NULL) { |
1187 | struct ifnet *ifp = QFQIF_IFP(qif); |
1188 | |
1189 | VERIFY(cl->cl_flags & QFCF_LAZY); |
1190 | cl->cl_flags &= ~QFCF_LAZY; |
1191 | |
1192 | IFCQ_CONVERT_LOCK(ifq); |
1193 | cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle, |
1194 | qlimit(&cl->cl_q), cl->cl_qflags); |
1195 | if (cl->cl_sfb == NULL) { |
1196 | /* fall back to droptail */ |
1197 | qtype(&cl->cl_q) = Q_DROPTAIL; |
1198 | cl->cl_flags &= ~QFCF_SFB; |
1199 | cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL); |
1200 | |
1201 | log(LOG_ERR, "%s: %s SFB lazy allocation " |
1202 | "failed for qid=%d grp=%d, falling back " |
1203 | "to DROPTAIL\n" , if_name(ifp), |
1204 | qfq_style(qif), cl->cl_handle, |
1205 | cl->cl_grp->qfg_index); |
1206 | } else if (qif->qif_throttle != IFNET_THROTTLE_OFF) { |
1207 | /* if there's pending throttling, set it */ |
1208 | cqrq_throttle_t tr = { 1, qif->qif_throttle }; |
1209 | int err = qfq_throttle(qif, &tr); |
1210 | |
1211 | if (err == EALREADY) |
1212 | err = 0; |
1213 | if (err != 0) { |
1214 | tr.level = IFNET_THROTTLE_OFF; |
1215 | (void) qfq_throttle(qif, &tr); |
1216 | } |
1217 | } |
1218 | } |
1219 | if (cl->cl_sfb != NULL) |
1220 | return (sfb_addq(cl->cl_sfb, &cl->cl_q, pkt, t)); |
1221 | } else if (qlen(&cl->cl_q) >= qlimit(&cl->cl_q)) { |
1222 | IFCQ_CONVERT_LOCK(ifq); |
1223 | return (CLASSQEQ_DROP); |
1224 | } |
1225 | |
1226 | #if PF_ECN |
1227 | if (cl->cl_flags & QFCF_CLEARDSCP) { |
1228 | /* not supported for non-mbuf type packets */ |
1229 | VERIFY(pkt->pktsched_ptype == QP_MBUF); |
1230 | write_dsfield(m, t, 0); |
1231 | } |
1232 | #endif /* PF_ECN */ |
1233 | |
1234 | VERIFY(pkt->pktsched_ptype == qptype(&cl->cl_q)); |
1235 | _addq(&cl->cl_q, pkt->pktsched_pkt); |
1236 | return (0); |
1237 | } |
1238 | |
1239 | static inline void |
1240 | qfq_getq(struct qfq_class *cl, pktsched_pkt_t *pkt) |
1241 | { |
1242 | IFCQ_LOCK_ASSERT_HELD(cl->cl_qif->qif_ifq); |
1243 | |
1244 | if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) |
1245 | return (sfb_getq(cl->cl_sfb, &cl->cl_q, pkt)); |
1246 | |
1247 | return (pktsched_pkt_encap(pkt, qptype(&cl->cl_q), _getq(&cl->cl_q))); |
1248 | } |
1249 | |
1250 | static void |
1251 | qfq_purgeq(struct qfq_if *qif, struct qfq_class *cl, u_int32_t flow, |
1252 | u_int32_t *packets, u_int32_t *bytes) |
1253 | { |
1254 | struct ifclassq *ifq = qif->qif_ifq; |
1255 | u_int32_t cnt = 0, len = 0, qlen; |
1256 | |
1257 | IFCQ_LOCK_ASSERT_HELD(ifq); |
1258 | |
1259 | if ((qlen = qlen(&cl->cl_q)) == 0) |
1260 | goto done; |
1261 | |
1262 | IFCQ_CONVERT_LOCK(ifq); |
1263 | if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) |
1264 | sfb_purgeq(cl->cl_sfb, &cl->cl_q, flow, &cnt, &len); |
1265 | else |
1266 | _flushq_flow(&cl->cl_q, flow, &cnt, &len); |
1267 | |
1268 | if (cnt > 0) { |
1269 | VERIFY(qlen(&cl->cl_q) == (qlen - cnt)); |
1270 | #if QFQ_DEBUG |
1271 | VERIFY(qif->qif_queued >= cnt); |
1272 | qif->qif_queued -= cnt; |
1273 | #endif /* QFQ_DEBUG */ |
1274 | |
1275 | PKTCNTR_ADD(&cl->cl_dropcnt, cnt, len); |
1276 | IFCQ_DROP_ADD(ifq, cnt, len); |
1277 | |
1278 | VERIFY(((signed)IFCQ_LEN(ifq) - cnt) >= 0); |
1279 | IFCQ_LEN(ifq) -= cnt; |
1280 | |
1281 | if (qempty(&cl->cl_q)) |
1282 | qfq_deactivate_class(qif, cl); |
1283 | |
1284 | if (pktsched_verbose) { |
1285 | log(LOG_DEBUG, "%s: %s purge qid=%d weight=%d " |
1286 | "qlen=[%d,%d] cnt=%d len=%d flow=0x%x\n" , |
1287 | if_name(QFQIF_IFP(qif)), |
1288 | qfq_style(qif), cl->cl_handle, |
1289 | (u_int32_t)(QFQ_ONE_FP / cl->cl_inv_w), qlen, |
1290 | qlen(&cl->cl_q), cnt, len, flow); |
1291 | } |
1292 | } |
1293 | done: |
1294 | if (packets != NULL) |
1295 | *packets = cnt; |
1296 | if (bytes != NULL) |
1297 | *bytes = len; |
1298 | } |
1299 | |
1300 | static void |
1301 | qfq_updateq(struct qfq_if *qif, struct qfq_class *cl, cqev_t ev) |
1302 | { |
1303 | IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); |
1304 | |
1305 | if (pktsched_verbose) { |
1306 | log(LOG_DEBUG, "%s: %s update qid=%d weight=%d event=%s\n" , |
1307 | if_name(QFQIF_IFP(qif)), qfq_style(qif), |
1308 | cl->cl_handle, (u_int32_t)(QFQ_ONE_FP / cl->cl_inv_w), |
1309 | ifclassq_ev2str(ev)); |
1310 | } |
1311 | |
1312 | if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) |
1313 | return (sfb_updateq(cl->cl_sfb, ev)); |
1314 | } |
1315 | |
1316 | int |
1317 | qfq_get_class_stats(struct qfq_if *qif, u_int32_t qid, |
1318 | struct qfq_classstats *sp) |
1319 | { |
1320 | struct qfq_class *cl; |
1321 | |
1322 | IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); |
1323 | |
1324 | if ((cl = qfq_clh_to_clp(qif, qid)) == NULL) |
1325 | return (EINVAL); |
1326 | |
1327 | sp->class_handle = cl->cl_handle; |
1328 | sp->index = cl->cl_grp->qfg_index; |
1329 | sp->weight = (QFQ_ONE_FP / cl->cl_inv_w); |
1330 | sp->lmax = cl->cl_lmax; |
1331 | sp->qlength = qlen(&cl->cl_q); |
1332 | sp->qlimit = qlimit(&cl->cl_q); |
1333 | sp->period = cl->cl_period; |
1334 | sp->xmitcnt = cl->cl_xmitcnt; |
1335 | sp->dropcnt = cl->cl_dropcnt; |
1336 | |
1337 | sp->qtype = qtype(&cl->cl_q); |
1338 | sp->qstate = qstate(&cl->cl_q); |
1339 | |
1340 | if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) |
1341 | sfb_getstats(cl->cl_sfb, &sp->sfb); |
1342 | |
1343 | return (0); |
1344 | } |
1345 | |
1346 | static int |
1347 | qfq_stat_sc(struct qfq_if *qif, cqrq_stat_sc_t *sr) |
1348 | { |
1349 | struct ifclassq *ifq = qif->qif_ifq; |
1350 | struct qfq_class *cl; |
1351 | u_int32_t i; |
1352 | |
1353 | IFCQ_LOCK_ASSERT_HELD(ifq); |
1354 | |
1355 | VERIFY(sr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(sr->sc)); |
1356 | |
1357 | i = MBUF_SCIDX(sr->sc); |
1358 | VERIFY(i < IFCQ_SC_MAX); |
1359 | |
1360 | cl = ifq->ifcq_disc_slots[i].cl; |
1361 | sr->packets = qlen(&cl->cl_q); |
1362 | sr->bytes = qsize(&cl->cl_q); |
1363 | |
1364 | return (0); |
1365 | } |
1366 | |
1367 | /* convert a class handle to the corresponding class pointer */ |
1368 | static inline struct qfq_class * |
1369 | qfq_clh_to_clp(struct qfq_if *qif, u_int32_t chandle) |
1370 | { |
1371 | struct qfq_class *cl; |
1372 | int i; |
1373 | |
1374 | IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); |
1375 | |
1376 | /* |
1377 | * First, try optimistically the slot matching the lower bits of |
1378 | * the handle. If it fails, do the linear table search. |
1379 | */ |
1380 | i = chandle % qif->qif_maxclasses; |
1381 | if ((cl = qif->qif_class_tbl[i]) != NULL && cl->cl_handle == chandle) |
1382 | return (cl); |
1383 | for (i = 0; i < qif->qif_maxclasses; i++) |
1384 | if ((cl = qif->qif_class_tbl[i]) != NULL && |
1385 | cl->cl_handle == chandle) |
1386 | return (cl); |
1387 | |
1388 | return (NULL); |
1389 | } |
1390 | |
1391 | static const char * |
1392 | qfq_style(struct qfq_if *qif) |
1393 | { |
1394 | #pragma unused(qif) |
1395 | return ("QFQ" ); |
1396 | } |
1397 | |
1398 | /* |
1399 | * Generic comparison function, handling wraparound |
1400 | */ |
1401 | static inline int |
1402 | qfq_gt(u_int64_t a, u_int64_t b) |
1403 | { |
1404 | return ((int64_t)(a - b) > 0); |
1405 | } |
1406 | |
1407 | /* |
1408 | * Round a precise timestamp to its slotted value |
1409 | */ |
1410 | static inline u_int64_t |
1411 | qfq_round_down(u_int64_t ts, u_int32_t shift) |
1412 | { |
1413 | return (ts & ~((1ULL << shift) - 1)); |
1414 | } |
1415 | |
1416 | /* |
1417 | * Return the pointer to the group with lowest index in the bitmap |
1418 | */ |
1419 | static inline struct qfq_group * |
1420 | qfq_ffs(struct qfq_if *qif, pktsched_bitmap_t bitmap) |
1421 | { |
1422 | int index = pktsched_ffs(bitmap) - 1; /* zero-based */ |
1423 | VERIFY(index >= 0 && index <= QFQ_MAX_INDEX && |
1424 | qif->qif_groups[index] != NULL); |
1425 | return (qif->qif_groups[index]); |
1426 | } |
1427 | |
1428 | /* |
1429 | * Calculate a flow index, given its weight and maximum packet length. |
1430 | * index = log_2(maxlen/weight) but we need to apply the scaling. |
1431 | * This is used only once at flow creation. |
1432 | */ |
1433 | static int |
1434 | qfq_calc_index(struct qfq_class *cl, u_int32_t inv_w, u_int32_t maxlen) |
1435 | { |
1436 | u_int64_t slot_size = (u_int64_t)maxlen *inv_w; |
1437 | pktsched_bitmap_t size_map; |
1438 | int index = 0; |
1439 | |
1440 | size_map = (pktsched_bitmap_t)(slot_size >> QFQ_MIN_SLOT_SHIFT); |
1441 | if (!size_map) |
1442 | goto out; |
1443 | |
1444 | index = __fls(size_map) + 1; /* basically a log_2() */ |
1445 | index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1))); |
1446 | |
1447 | if (index < 0) |
1448 | index = 0; |
1449 | out: |
1450 | if (pktsched_verbose) { |
1451 | log(LOG_DEBUG, "%s: %s qid=%d grp=%d W=%u, L=%u, I=%d\n" , |
1452 | if_name(QFQIF_IFP(cl->cl_qif)), qfq_style(cl->cl_qif), |
1453 | cl->cl_handle, index, (u_int32_t)(QFQ_ONE_FP/inv_w), |
1454 | maxlen, index); |
1455 | } |
1456 | return (index); |
1457 | } |
1458 | |
1459 | #if QFQ_DEBUG |
1460 | static void |
1461 | qfq_dump_groups(struct qfq_if *qif, u_int32_t mask) |
1462 | { |
1463 | int i, j; |
1464 | |
1465 | for (i = 0; i < QFQ_MAX_INDEX + 1; i++) { |
1466 | struct qfq_group *g = qif->qif_groups[i]; |
1467 | |
1468 | if (0 == (mask & (1 << i))) |
1469 | continue; |
1470 | if (g == NULL) |
1471 | continue; |
1472 | |
1473 | log(LOG_DEBUG, "%s: %s [%2d] full_slots 0x%x\n" , |
1474 | if_name(QFQIF_IFP(qif)), qfq_style(qif), i, |
1475 | g->qfg_full_slots); |
1476 | log(LOG_DEBUG, "%s: %s S 0x%20llx F 0x%llx %c\n" , |
1477 | if_name(QFQIF_IFP(qif)), qfq_style(qif), |
1478 | g->qfg_S, g->qfg_F, mask & (1 << i) ? '1' : '0'); |
1479 | |
1480 | for (j = 0; j < qif->qif_maxslots; j++) { |
1481 | if (g->qfg_slots[j]) { |
1482 | log(LOG_DEBUG, "%s: %s bucket %d 0x%llx " |
1483 | "qid %d\n" , if_name(QFQIF_IFP(qif)), |
1484 | qfq_style(qif), j, |
1485 | (uint64_t)VM_KERNEL_ADDRPERM( |
1486 | g->qfg_slots[j]), |
1487 | g->qfg_slots[j]->cl_handle); |
1488 | } |
1489 | } |
1490 | } |
1491 | } |
1492 | |
1493 | static void |
1494 | qfq_dump_sched(struct qfq_if *qif, const char *msg) |
1495 | { |
1496 | log(LOG_DEBUG, "%s: %s --- in %s: ---\n" , |
1497 | if_name(QFQIF_IFP(qif)), qfq_style(qif), msg); |
1498 | log(LOG_DEBUG, "%s: %s emptygrp %d queued %d V 0x%llx\n" , |
1499 | if_name(QFQIF_IFP(qif)), qfq_style(qif), qif->qif_emptygrp, |
1500 | qif->qif_queued, qif->qif_V); |
1501 | log(LOG_DEBUG, "%s: %s ER 0x%08x\n" , |
1502 | if_name(QFQIF_IFP(qif)), qfq_style(qif), qif->qif_bitmaps[ER]); |
1503 | log(LOG_DEBUG, "%s: %s EB 0x%08x\n" , |
1504 | if_name(QFQIF_IFP(qif)), qfq_style(qif), qif->qif_bitmaps[EB]); |
1505 | log(LOG_DEBUG, "%s: %s IR 0x%08x\n" , |
1506 | if_name(QFQIF_IFP(qif)), qfq_style(qif), qif->qif_bitmaps[IR]); |
1507 | log(LOG_DEBUG, "%s: %s IB 0x%08x\n" , |
1508 | if_name(QFQIF_IFP(qif)), qfq_style(qif), qif->qif_bitmaps[IB]); |
1509 | qfq_dump_groups(qif, 0xffffffff); |
1510 | }; |
1511 | #endif /* QFQ_DEBUG */ |
1512 | |
1513 | /* |
1514 | * qfq_enqueue_ifclassq is an enqueue function to be registered to |
1515 | * (*ifcq_enqueue) in struct ifclassq. |
1516 | */ |
1517 | static int |
1518 | qfq_enqueue_ifclassq(struct ifclassq *ifq, void *p, classq_pkt_type_t ptype, |
1519 | boolean_t *pdrop) |
1520 | { |
1521 | u_int32_t i = 0; |
1522 | int ret; |
1523 | pktsched_pkt_t pkt; |
1524 | struct pf_mtag *t = NULL; |
1525 | |
1526 | IFCQ_LOCK_ASSERT_HELD(ifq); |
1527 | |
1528 | switch (ptype) { |
1529 | case QP_MBUF: { |
1530 | struct mbuf *m = p; |
1531 | if (!(m->m_flags & M_PKTHDR)) { |
1532 | /* should not happen */ |
1533 | log(LOG_ERR, "%s: packet does not have pkthdr\n" , |
1534 | if_name(ifq->ifcq_ifp)); |
1535 | IFCQ_CONVERT_LOCK(ifq); |
1536 | m_freem(m); |
1537 | *pdrop = TRUE; |
1538 | return (ENOBUFS); |
1539 | } |
1540 | i = MBUF_SCIDX(mbuf_get_service_class(m)); |
1541 | t = m_pftag(m); |
1542 | break; |
1543 | } |
1544 | |
1545 | |
1546 | default: |
1547 | VERIFY(0); |
1548 | /* NOTREACHED */ |
1549 | } |
1550 | |
1551 | VERIFY((u_int32_t)i < IFCQ_SC_MAX); |
1552 | |
1553 | pktsched_pkt_encap(&pkt, ptype, p); |
1554 | |
1555 | ret = qfq_enqueue(ifq->ifcq_disc, |
1556 | ifq->ifcq_disc_slots[i].cl, &pkt, t); |
1557 | |
1558 | if ((ret != 0) && (ret != CLASSQEQ_SUCCESS_FC)) { |
1559 | pktsched_free_pkt(&pkt); |
1560 | *pdrop = TRUE; |
1561 | } else { |
1562 | *pdrop = FALSE; |
1563 | } |
1564 | |
1565 | switch (ret) { |
1566 | case CLASSQEQ_DROP: |
1567 | ret = ENOBUFS; |
1568 | break; |
1569 | case CLASSQEQ_DROP_FC: |
1570 | ret = EQFULL; |
1571 | break; |
1572 | case CLASSQEQ_DROP_SP: |
1573 | ret = EQSUSPENDED; |
1574 | break; |
1575 | case CLASSQEQ_SUCCESS_FC: |
1576 | ret = EQFULL; |
1577 | break; |
1578 | case CLASSQEQ_SUCCESS: |
1579 | ret = 0; |
1580 | break; |
1581 | default: |
1582 | VERIFY(0); |
1583 | } |
1584 | return (ret); |
1585 | } |
1586 | |
1587 | /* |
1588 | * qfq_dequeue_ifclassq is a dequeue function to be registered to |
1589 | * (*ifcq_dequeue) in struct ifclass. |
1590 | * |
1591 | * note: CLASSQDQ_POLL returns the next packet without removing the packet |
1592 | * from the queue. CLASSQDQ_REMOVE is a normal dequeue operation. |
1593 | * CLASSQDQ_REMOVE must return the same packet if called immediately |
1594 | * after CLASSQDQ_POLL. |
1595 | */ |
1596 | static void * |
1597 | qfq_dequeue_ifclassq(struct ifclassq *ifq, classq_pkt_type_t *ptype) |
1598 | { |
1599 | pktsched_pkt_t pkt; |
1600 | bzero(&pkt, sizeof (pkt)); |
1601 | qfq_dequeue(ifq->ifcq_disc, &pkt); |
1602 | *ptype = pkt.pktsched_ptype; |
1603 | return (pkt.pktsched_pkt); |
1604 | } |
1605 | |
1606 | static int |
1607 | qfq_request_ifclassq(struct ifclassq *ifq, cqrq_t req, void *arg) |
1608 | { |
1609 | struct qfq_if *qif = (struct qfq_if *)ifq->ifcq_disc; |
1610 | int err = 0; |
1611 | |
1612 | IFCQ_LOCK_ASSERT_HELD(ifq); |
1613 | |
1614 | switch (req) { |
1615 | case CLASSQRQ_PURGE: |
1616 | qfq_purge(qif); |
1617 | break; |
1618 | |
1619 | case CLASSQRQ_PURGE_SC: |
1620 | qfq_purge_sc(qif, (cqrq_purge_sc_t *)arg); |
1621 | break; |
1622 | |
1623 | case CLASSQRQ_EVENT: |
1624 | qfq_event(qif, (cqev_t)arg); |
1625 | break; |
1626 | |
1627 | case CLASSQRQ_THROTTLE: |
1628 | err = qfq_throttle(qif, (cqrq_throttle_t *)arg); |
1629 | break; |
1630 | case CLASSQRQ_STAT_SC: |
1631 | err = qfq_stat_sc(qif, (cqrq_stat_sc_t *)arg); |
1632 | break; |
1633 | } |
1634 | return (err); |
1635 | } |
1636 | |
1637 | int |
1638 | qfq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags, |
1639 | classq_pkt_type_t ptype) |
1640 | { |
1641 | struct ifnet *ifp = ifq->ifcq_ifp; |
1642 | struct qfq_class *cl0, *cl1, *cl2, *cl3, *cl4; |
1643 | struct qfq_class *cl5, *cl6, *cl7, *cl8, *cl9; |
1644 | struct qfq_if *qif; |
1645 | u_int32_t maxlen = 0, qflags = 0; |
1646 | int err = 0; |
1647 | |
1648 | IFCQ_LOCK_ASSERT_HELD(ifq); |
1649 | VERIFY(ifq->ifcq_disc == NULL); |
1650 | VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE); |
1651 | |
1652 | if (flags & PKTSCHEDF_QALG_SFB) |
1653 | qflags |= QFCF_SFB; |
1654 | if (flags & PKTSCHEDF_QALG_ECN) |
1655 | qflags |= QFCF_ECN; |
1656 | if (flags & PKTSCHEDF_QALG_FLOWCTL) |
1657 | qflags |= QFCF_FLOWCTL; |
1658 | if (flags & PKTSCHEDF_QALG_DELAYBASED) |
1659 | qflags |= QFCF_DELAYBASED; |
1660 | |
1661 | qif = qfq_alloc(ifp, M_WAITOK); |
1662 | if (qif == NULL) |
1663 | return (ENOMEM); |
1664 | |
1665 | if ((maxlen = IFCQ_MAXLEN(ifq)) == 0) |
1666 | maxlen = if_sndq_maxlen; |
1667 | |
1668 | if ((err = qfq_add_queue(qif, maxlen, 300, 1200, |
1669 | qflags | QFCF_LAZY, SCIDX_BK_SYS, &cl0, ptype)) != 0) |
1670 | goto cleanup; |
1671 | |
1672 | if ((err = qfq_add_queue(qif, maxlen, 600, 1400, |
1673 | qflags | QFCF_LAZY, SCIDX_BK, &cl1, ptype)) != 0) |
1674 | goto cleanup; |
1675 | |
1676 | if ((err = qfq_add_queue(qif, maxlen, 2400, 600, |
1677 | qflags | QFCF_DEFAULTCLASS, SCIDX_BE, &cl2, ptype)) != 0) |
1678 | goto cleanup; |
1679 | |
1680 | if ((err = qfq_add_queue(qif, maxlen, 2700, 600, |
1681 | qflags | QFCF_LAZY, SCIDX_RD, &cl3, ptype)) != 0) |
1682 | goto cleanup; |
1683 | |
1684 | if ((err = qfq_add_queue(qif, maxlen, 3000, 400, |
1685 | qflags | QFCF_LAZY, SCIDX_OAM, &cl4, ptype)) != 0) |
1686 | goto cleanup; |
1687 | |
1688 | if ((err = qfq_add_queue(qif, maxlen, 8000, 1000, |
1689 | qflags | QFCF_LAZY, SCIDX_AV, &cl5, ptype)) != 0) |
1690 | goto cleanup; |
1691 | |
1692 | if ((err = qfq_add_queue(qif, maxlen, 15000, 1200, |
1693 | qflags | QFCF_LAZY, SCIDX_RV, &cl6, ptype)) != 0) |
1694 | goto cleanup; |
1695 | |
1696 | if ((err = qfq_add_queue(qif, maxlen, 20000, 1400, |
1697 | qflags | QFCF_LAZY, SCIDX_VI, &cl7, ptype)) != 0) |
1698 | goto cleanup; |
1699 | |
1700 | if ((err = qfq_add_queue(qif, maxlen, 23000, 200, |
1701 | qflags | QFCF_LAZY, SCIDX_VO, &cl8, ptype)) != 0) |
1702 | goto cleanup; |
1703 | |
1704 | if ((err = qfq_add_queue(qif, maxlen, 25000, 200, |
1705 | qflags, SCIDX_CTL, &cl9, ptype)) != 0) |
1706 | goto cleanup; |
1707 | |
1708 | err = ifclassq_attach(ifq, PKTSCHEDT_QFQ, qif, |
1709 | qfq_enqueue_ifclassq, qfq_dequeue_ifclassq, NULL, |
1710 | NULL, NULL, qfq_request_ifclassq); |
1711 | |
1712 | /* cache these for faster lookup */ |
1713 | if (err == 0) { |
1714 | ifq->ifcq_disc_slots[SCIDX_BK_SYS].qid = SCIDX_BK_SYS; |
1715 | ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl = cl0; |
1716 | |
1717 | ifq->ifcq_disc_slots[SCIDX_BK].qid = SCIDX_BK; |
1718 | ifq->ifcq_disc_slots[SCIDX_BK].cl = cl1; |
1719 | |
1720 | ifq->ifcq_disc_slots[SCIDX_BE].qid = SCIDX_BE; |
1721 | ifq->ifcq_disc_slots[SCIDX_BE].cl = cl2; |
1722 | |
1723 | ifq->ifcq_disc_slots[SCIDX_RD].qid = SCIDX_RD; |
1724 | ifq->ifcq_disc_slots[SCIDX_RD].cl = cl3; |
1725 | |
1726 | ifq->ifcq_disc_slots[SCIDX_OAM].qid = SCIDX_OAM; |
1727 | ifq->ifcq_disc_slots[SCIDX_OAM].cl = cl4; |
1728 | |
1729 | ifq->ifcq_disc_slots[SCIDX_AV].qid = SCIDX_AV; |
1730 | ifq->ifcq_disc_slots[SCIDX_AV].cl = cl5; |
1731 | |
1732 | ifq->ifcq_disc_slots[SCIDX_RV].qid = SCIDX_RV; |
1733 | ifq->ifcq_disc_slots[SCIDX_RV].cl = cl6; |
1734 | |
1735 | ifq->ifcq_disc_slots[SCIDX_VI].qid = SCIDX_VI; |
1736 | ifq->ifcq_disc_slots[SCIDX_VI].cl = cl7; |
1737 | |
1738 | ifq->ifcq_disc_slots[SCIDX_VO].qid = SCIDX_VO; |
1739 | ifq->ifcq_disc_slots[SCIDX_VO].cl = cl8; |
1740 | |
1741 | ifq->ifcq_disc_slots[SCIDX_CTL].qid = SCIDX_CTL; |
1742 | ifq->ifcq_disc_slots[SCIDX_CTL].cl = cl9; |
1743 | } |
1744 | |
1745 | cleanup: |
1746 | if (err != 0) |
1747 | (void) qfq_destroy_locked(qif); |
1748 | |
1749 | return (err); |
1750 | } |
1751 | |
1752 | int |
1753 | qfq_teardown_ifclassq(struct ifclassq *ifq) |
1754 | { |
1755 | struct qfq_if *qif = ifq->ifcq_disc; |
1756 | int i; |
1757 | |
1758 | IFCQ_LOCK_ASSERT_HELD(ifq); |
1759 | VERIFY(qif != NULL && ifq->ifcq_type == PKTSCHEDT_QFQ); |
1760 | |
1761 | (void) qfq_destroy_locked(qif); |
1762 | |
1763 | ifq->ifcq_disc = NULL; |
1764 | for (i = 0; i < IFCQ_SC_MAX; i++) { |
1765 | ifq->ifcq_disc_slots[i].qid = 0; |
1766 | ifq->ifcq_disc_slots[i].cl = NULL; |
1767 | } |
1768 | |
1769 | return (ifclassq_detach(ifq)); |
1770 | } |
1771 | |
1772 | int |
1773 | qfq_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t slot, |
1774 | struct if_ifclassq_stats *ifqs) |
1775 | { |
1776 | struct qfq_if *qif = ifq->ifcq_disc; |
1777 | |
1778 | IFCQ_LOCK_ASSERT_HELD(ifq); |
1779 | VERIFY(ifq->ifcq_type == PKTSCHEDT_QFQ); |
1780 | |
1781 | if (slot >= IFCQ_SC_MAX) |
1782 | return (EINVAL); |
1783 | |
1784 | return (qfq_get_class_stats(qif, ifq->ifcq_disc_slots[slot].qid, |
1785 | &ifqs->ifqs_qfq_stats)); |
1786 | } |
1787 | |
1788 | static int |
1789 | qfq_throttle(struct qfq_if *qif, cqrq_throttle_t *tr) |
1790 | { |
1791 | struct ifclassq *ifq = qif->qif_ifq; |
1792 | struct qfq_class *cl; |
1793 | int err = 0; |
1794 | |
1795 | IFCQ_LOCK_ASSERT_HELD(ifq); |
1796 | |
1797 | if (!tr->set) { |
1798 | tr->level = qif->qif_throttle; |
1799 | return (0); |
1800 | } |
1801 | |
1802 | if (tr->level == qif->qif_throttle) |
1803 | return (EALREADY); |
1804 | |
1805 | /* Current throttling levels only involve BK_SYS class */ |
1806 | cl = ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl; |
1807 | |
1808 | switch (tr->level) { |
1809 | case IFNET_THROTTLE_OFF: |
1810 | err = qfq_resumeq(qif, cl); |
1811 | break; |
1812 | |
1813 | case IFNET_THROTTLE_OPPORTUNISTIC: |
1814 | err = qfq_suspendq(qif, cl); |
1815 | break; |
1816 | |
1817 | default: |
1818 | VERIFY(0); |
1819 | /* NOTREACHED */ |
1820 | } |
1821 | |
1822 | if (err == 0 || err == ENXIO) { |
1823 | if (pktsched_verbose) { |
1824 | log(LOG_DEBUG, "%s: %s throttling level %sset %d->%d\n" , |
1825 | if_name(QFQIF_IFP(qif)), qfq_style(qif), |
1826 | (err == 0) ? "" : "lazy " , qif->qif_throttle, |
1827 | tr->level); |
1828 | } |
1829 | qif->qif_throttle = tr->level; |
1830 | if (err != 0) |
1831 | err = 0; |
1832 | else |
1833 | qfq_purgeq(qif, cl, 0, NULL, NULL); |
1834 | } else { |
1835 | log(LOG_ERR, "%s: %s unable to set throttling level " |
1836 | "%d->%d [error=%d]\n" , if_name(QFQIF_IFP(qif)), |
1837 | qfq_style(qif), qif->qif_throttle, tr->level, err); |
1838 | } |
1839 | |
1840 | return (err); |
1841 | } |
1842 | |
1843 | static int |
1844 | qfq_resumeq(struct qfq_if *qif, struct qfq_class *cl) |
1845 | { |
1846 | struct ifclassq *ifq = qif->qif_ifq; |
1847 | int err = 0; |
1848 | #if !MACH_ASSERT |
1849 | #pragma unused(ifq) |
1850 | #endif |
1851 | IFCQ_LOCK_ASSERT_HELD(ifq); |
1852 | |
1853 | if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) |
1854 | err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, FALSE); |
1855 | |
1856 | if (err == 0) |
1857 | qstate(&cl->cl_q) = QS_RUNNING; |
1858 | |
1859 | return (err); |
1860 | } |
1861 | |
1862 | static int |
1863 | qfq_suspendq(struct qfq_if *qif, struct qfq_class *cl) |
1864 | { |
1865 | struct ifclassq *ifq = qif->qif_ifq; |
1866 | int err = 0; |
1867 | #if !MACH_ASSERT |
1868 | #pragma unused(ifq) |
1869 | #endif |
1870 | IFCQ_LOCK_ASSERT_HELD(ifq); |
1871 | |
1872 | if (q_is_sfb(&cl->cl_q)) { |
1873 | if (cl->cl_sfb != NULL) { |
1874 | err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, TRUE); |
1875 | } else { |
1876 | VERIFY(cl->cl_flags & QFCF_LAZY); |
1877 | err = ENXIO; /* delayed throttling */ |
1878 | } |
1879 | } |
1880 | |
1881 | if (err == 0 || err == ENXIO) |
1882 | qstate(&cl->cl_q) = QS_SUSPENDED; |
1883 | |
1884 | return (err); |
1885 | } |
1886 | |