1/*
2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $apfw: pf_table.c,v 1.4 2008/08/27 00:01:32 jhw Exp $ */
30/* $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $ */
31
32/*
33 * Copyright (c) 2002 Cedric Berger
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * - Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * - Redistributions in binary form must reproduce the above
43 * copyright notice, this list of conditions and the following
44 * disclaimer in the documentation and/or other materials provided
45 * with the distribution.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
50 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
51 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
53 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
54 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
55 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
57 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
58 * POSSIBILITY OF SUCH DAMAGE.
59 *
60 */
61
62#include <sys/param.h>
63#include <sys/systm.h>
64#include <sys/socket.h>
65#include <sys/mbuf.h>
66#include <sys/kernel.h>
67#include <sys/malloc.h>
68
69#include <net/if.h>
70#include <net/route.h>
71#include <netinet/in.h>
72#include <net/radix.h>
73#include <net/pfvar.h>
74
75#define ACCEPT_FLAGS(flags, oklist) \
76 do { \
77 if ((flags & ~(oklist)) & \
78 PFR_FLAG_ALLMASK) \
79 return (EINVAL); \
80 } while (0)
81
82#define COPYIN(from, to, size, flags) \
83 ((flags & PFR_FLAG_USERIOCTL) ? \
84 copyin((from), (to), (size)) : \
85 (bcopy((void *)(uintptr_t)(from), (to), (size)), 0))
86
87#define COPYOUT(from, to, size, flags) \
88 ((flags & PFR_FLAG_USERIOCTL) ? \
89 copyout((from), (to), (size)) : \
90 (bcopy((from), (void *)(uintptr_t)(to), (size)), 0))
91
92#define FILLIN_SIN(sin, addr) \
93 do { \
94 (sin).sin_len = sizeof (sin); \
95 (sin).sin_family = AF_INET; \
96 (sin).sin_addr = (addr); \
97 } while (0)
98
99#define FILLIN_SIN6(sin6, addr) \
100 do { \
101 (sin6).sin6_len = sizeof (sin6); \
102 (sin6).sin6_family = AF_INET6; \
103 (sin6).sin6_addr = (addr); \
104 } while (0)
105
106#define SWAP(type, a1, a2) \
107 do { \
108 type tmp = a1; \
109 a1 = a2; \
110 a2 = tmp; \
111 } while (0)
112
113#define SUNION2PF(su, af) (((af) == AF_INET) ? \
114 (struct pf_addr *)&(su)->sin.sin_addr : \
115 (struct pf_addr *)&(su)->sin6.sin6_addr)
116
117#define AF_BITS(af) (((af) == AF_INET) ? 32 : 128)
118#define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
119#define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
120#define KENTRY_RNF_ROOT(ke) \
121 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
122
123#define NO_ADDRESSES (-1)
124#define ENQUEUE_UNMARKED_ONLY (1)
125#define INVERT_NEG_FLAG (1)
126
127struct pfr_walktree {
128 enum pfrw_op {
129 PFRW_MARK,
130 PFRW_SWEEP,
131 PFRW_ENQUEUE,
132 PFRW_GET_ADDRS,
133 PFRW_GET_ASTATS,
134 PFRW_POOL_GET,
135 PFRW_DYNADDR_UPDATE
136 } pfrw_op;
137 union {
138 user_addr_t pfrw1_addr;
139 user_addr_t pfrw1_astats;
140 struct pfr_kentryworkq *pfrw1_workq;
141 struct pfr_kentry *pfrw1_kentry;
142 struct pfi_dynaddr *pfrw1_dyn;
143 } pfrw_1;
144 int pfrw_free;
145 int pfrw_flags;
146};
147#define pfrw_addr pfrw_1.pfrw1_addr
148#define pfrw_astats pfrw_1.pfrw1_astats
149#define pfrw_workq pfrw_1.pfrw1_workq
150#define pfrw_kentry pfrw_1.pfrw1_kentry
151#define pfrw_dyn pfrw_1.pfrw1_dyn
152#define pfrw_cnt pfrw_free
153
154#define senderr(e) do { rv = (e); goto _bad; } while (0)
155
156struct pool pfr_ktable_pl;
157struct pool pfr_kentry_pl;
158
159static struct pool pfr_kentry_pl2;
160static struct sockaddr_in pfr_sin;
161static struct sockaddr_in6 pfr_sin6;
162static union sockaddr_union pfr_mask;
163static struct pf_addr pfr_ffaddr;
164
165static void pfr_copyout_addr(struct pfr_addr *, struct pfr_kentry *ke);
166static int pfr_validate_addr(struct pfr_addr *);
167static void pfr_enqueue_addrs(struct pfr_ktable *, struct pfr_kentryworkq *,
168 int *, int);
169static void pfr_mark_addrs(struct pfr_ktable *);
170static struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
171 struct pfr_addr *, int);
172static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, boolean_t);
173static void pfr_destroy_kentries(struct pfr_kentryworkq *);
174static void pfr_destroy_kentry(struct pfr_kentry *);
175static void pfr_insert_kentries(struct pfr_ktable *,
176 struct pfr_kentryworkq *, u_int64_t);
177static void pfr_remove_kentries(struct pfr_ktable *, struct pfr_kentryworkq *);
178static void pfr_clstats_kentries(struct pfr_kentryworkq *, u_int64_t, int);
179static void pfr_reset_feedback(user_addr_t, int, int);
180static void pfr_prepare_network(union sockaddr_union *, int, int);
181static int pfr_route_kentry(struct pfr_ktable *, struct pfr_kentry *);
182static int pfr_unroute_kentry(struct pfr_ktable *, struct pfr_kentry *);
183static int pfr_walktree(struct radix_node *, void *);
184static int pfr_validate_table(struct pfr_table *, int, int);
185static int pfr_fix_anchor(char *);
186static void pfr_commit_ktable(struct pfr_ktable *, u_int64_t);
187static void pfr_insert_ktables(struct pfr_ktableworkq *);
188static void pfr_insert_ktable(struct pfr_ktable *);
189static void pfr_setflags_ktables(struct pfr_ktableworkq *);
190static void pfr_setflags_ktable(struct pfr_ktable *, int);
191static void pfr_clstats_ktables(struct pfr_ktableworkq *, u_int64_t, int);
192static void pfr_clstats_ktable(struct pfr_ktable *, u_int64_t, int);
193static struct pfr_ktable *pfr_create_ktable(struct pfr_table *, u_int64_t, int);
194static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
195static void pfr_destroy_ktable(struct pfr_ktable *, int);
196static int pfr_ktable_compare(struct pfr_ktable *, struct pfr_ktable *);
197static struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
198static void pfr_clean_node_mask(struct pfr_ktable *, struct pfr_kentryworkq *);
199static int pfr_table_count(struct pfr_table *, int);
200static int pfr_skip_table(struct pfr_table *, struct pfr_ktable *, int);
201static struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
202
203RB_PROTOTYPE_SC(static, pfr_ktablehead, pfr_ktable, pfrkt_tree,
204 pfr_ktable_compare);
205RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
206
207static struct pfr_ktablehead pfr_ktables;
208static struct pfr_table pfr_nulltable;
209static int pfr_ktable_cnt;
210
211void
212pfr_initialize(void)
213{
214 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
215 "pfrktable", NULL);
216 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
217 "pfrkentry", NULL);
218 pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
219 "pfrkentry2", NULL);
220
221 pfr_sin.sin_len = sizeof(pfr_sin);
222 pfr_sin.sin_family = AF_INET;
223 pfr_sin6.sin6_len = sizeof(pfr_sin6);
224 pfr_sin6.sin6_family = AF_INET6;
225
226 memset(s: &pfr_ffaddr, c: 0xff, n: sizeof(pfr_ffaddr));
227}
228
229#if 0
230void
231pfr_destroy(void)
232{
233 pool_destroy(&pfr_ktable_pl);
234 pool_destroy(&pfr_kentry_pl);
235 pool_destroy(&pfr_kentry_pl2);
236}
237#endif
238
239int
240pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
241{
242 struct pfr_ktable *kt;
243 struct pfr_kentryworkq workq;
244
245 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
246 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) {
247 return EINVAL;
248 }
249 kt = pfr_lookup_table(tbl);
250 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
251 return ESRCH;
252 }
253 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
254 return EPERM;
255 }
256 pfr_enqueue_addrs(kt, &workq, ndel, 0);
257
258 if (!(flags & PFR_FLAG_DUMMY)) {
259 pfr_remove_kentries(kt, &workq);
260 if (kt->pfrkt_cnt) {
261 printf("pfr_clr_addrs: corruption detected (%d).\n",
262 kt->pfrkt_cnt);
263 kt->pfrkt_cnt = 0;
264 }
265 }
266 return 0;
267}
268
269int
270pfr_add_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
271 int *nadd, int flags)
272{
273 struct pfr_ktable *kt, *tmpkt;
274 struct pfr_kentryworkq workq;
275 struct pfr_kentry *p, *q;
276 struct pfr_addr ad;
277 int i, rv, xadd = 0;
278 user_addr_t addr = _addr;
279 u_int64_t tzero = pf_calendar_time_second();
280
281 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
282 PFR_FLAG_FEEDBACK);
283 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) {
284 return EINVAL;
285 }
286 kt = pfr_lookup_table(tbl);
287 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
288 return ESRCH;
289 }
290 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
291 return EPERM;
292 }
293 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
294 if (tmpkt == NULL) {
295 return ENOMEM;
296 }
297 SLIST_INIT(&workq);
298 for (i = 0; i < size; i++, addr += sizeof(ad)) {
299 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
300 senderr(EFAULT);
301 }
302 if (pfr_validate_addr(&ad)) {
303 senderr(EINVAL);
304 }
305 p = pfr_lookup_addr(kt, &ad, 1);
306 q = pfr_lookup_addr(tmpkt, &ad, 1);
307 if (flags & PFR_FLAG_FEEDBACK) {
308 if (q != NULL) {
309 ad.pfra_fback = PFR_FB_DUPLICATE;
310 } else if (p == NULL) {
311 ad.pfra_fback = PFR_FB_ADDED;
312 } else if (p->pfrke_not != ad.pfra_not) {
313 ad.pfra_fback = PFR_FB_CONFLICT;
314 } else {
315 ad.pfra_fback = PFR_FB_NONE;
316 }
317 }
318 if (p == NULL && q == NULL) {
319 p = pfr_create_kentry(&ad,
320 !(flags & PFR_FLAG_USERIOCTL));
321 if (p == NULL) {
322 senderr(ENOMEM);
323 }
324 if (pfr_route_kentry(tmpkt, p)) {
325 pfr_destroy_kentry(p);
326 ad.pfra_fback = PFR_FB_NONE;
327 } else {
328 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
329 xadd++;
330 }
331 }
332 if (flags & PFR_FLAG_FEEDBACK) {
333 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
334 senderr(EFAULT);
335 }
336 }
337 }
338 pfr_clean_node_mask(tmpkt, &workq);
339 if (!(flags & PFR_FLAG_DUMMY)) {
340 pfr_insert_kentries(kt, &workq, tzero);
341 } else {
342 pfr_destroy_kentries(&workq);
343 }
344 if (nadd != NULL) {
345 *nadd = xadd;
346 }
347 pfr_destroy_ktable(tmpkt, 0);
348 return 0;
349_bad:
350 pfr_clean_node_mask(tmpkt, &workq);
351 pfr_destroy_kentries(&workq);
352 if (flags & PFR_FLAG_FEEDBACK) {
353 pfr_reset_feedback(_addr, size, flags);
354 }
355 pfr_destroy_ktable(tmpkt, 0);
356 return rv;
357}
358
359int
360pfr_del_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
361 int *ndel, int flags)
362{
363 struct pfr_ktable *kt;
364 struct pfr_kentryworkq workq;
365 struct pfr_kentry *p;
366 struct pfr_addr ad;
367 user_addr_t addr = _addr;
368 int i, rv, xdel = 0, log = 1;
369
370 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
371 PFR_FLAG_FEEDBACK);
372 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) {
373 return EINVAL;
374 }
375 kt = pfr_lookup_table(tbl);
376 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
377 return ESRCH;
378 }
379 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
380 return EPERM;
381 }
382 /*
383 * there are two algorithms to choose from here.
384 * with:
385 * n: number of addresses to delete
386 * N: number of addresses in the table
387 *
388 * one is O(N) and is better for large 'n'
389 * one is O(n*LOG(N)) and is better for small 'n'
390 *
391 * following code try to decide which one is best.
392 */
393 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) {
394 log++;
395 }
396 if (size > kt->pfrkt_cnt / log) {
397 /* full table scan */
398 pfr_mark_addrs(kt);
399 } else {
400 /* iterate over addresses to delete */
401 for (i = 0; i < size; i++, addr += sizeof(ad)) {
402 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
403 return EFAULT;
404 }
405 if (pfr_validate_addr(&ad)) {
406 return EINVAL;
407 }
408 p = pfr_lookup_addr(kt, &ad, 1);
409 if (p != NULL) {
410 p->pfrke_mark = 0;
411 }
412 }
413 }
414 SLIST_INIT(&workq);
415 for (addr = _addr, i = 0; i < size; i++, addr += sizeof(ad)) {
416 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
417 senderr(EFAULT);
418 }
419 if (pfr_validate_addr(&ad)) {
420 senderr(EINVAL);
421 }
422 p = pfr_lookup_addr(kt, &ad, 1);
423 if (flags & PFR_FLAG_FEEDBACK) {
424 if (p == NULL) {
425 ad.pfra_fback = PFR_FB_NONE;
426 } else if (p->pfrke_not != ad.pfra_not) {
427 ad.pfra_fback = PFR_FB_CONFLICT;
428 } else if (p->pfrke_mark) {
429 ad.pfra_fback = PFR_FB_DUPLICATE;
430 } else {
431 ad.pfra_fback = PFR_FB_DELETED;
432 }
433 }
434 if (p != NULL && p->pfrke_not == ad.pfra_not &&
435 !p->pfrke_mark) {
436 p->pfrke_mark = 1;
437 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
438 xdel++;
439 }
440 if (flags & PFR_FLAG_FEEDBACK) {
441 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
442 senderr(EFAULT);
443 }
444 }
445 }
446 if (!(flags & PFR_FLAG_DUMMY)) {
447 pfr_remove_kentries(kt, &workq);
448 }
449 if (ndel != NULL) {
450 *ndel = xdel;
451 }
452 return 0;
453_bad:
454 if (flags & PFR_FLAG_FEEDBACK) {
455 pfr_reset_feedback(_addr, size, flags);
456 }
457 return rv;
458}
459
460int
461pfr_set_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
462 int *size2, int *nadd, int *ndel, int *nchange, int flags,
463 u_int32_t ignore_pfrt_flags)
464{
465 struct pfr_ktable *kt, *tmpkt;
466 struct pfr_kentryworkq addq, delq, changeq;
467 struct pfr_kentry *p, *q;
468 struct pfr_addr ad;
469 user_addr_t addr = _addr;
470 int i, rv, xadd = 0, xdel = 0, xchange = 0;
471 u_int64_t tzero = pf_calendar_time_second();
472
473 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
474 PFR_FLAG_FEEDBACK);
475 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
476 PFR_FLAG_USERIOCTL)) {
477 return EINVAL;
478 }
479 kt = pfr_lookup_table(tbl);
480 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
481 return ESRCH;
482 }
483 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
484 return EPERM;
485 }
486 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
487 if (tmpkt == NULL) {
488 return ENOMEM;
489 }
490 pfr_mark_addrs(kt);
491 SLIST_INIT(&addq);
492 SLIST_INIT(&delq);
493 SLIST_INIT(&changeq);
494 for (i = 0; i < size; i++, addr += sizeof(ad)) {
495 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
496 senderr(EFAULT);
497 }
498 if (pfr_validate_addr(&ad)) {
499 senderr(EINVAL);
500 }
501 ad.pfra_fback = PFR_FB_NONE;
502 p = pfr_lookup_addr(kt, &ad, 1);
503 if (p != NULL) {
504 if (p->pfrke_mark) {
505 ad.pfra_fback = PFR_FB_DUPLICATE;
506 goto _skip;
507 }
508 p->pfrke_mark = 1;
509 if (p->pfrke_not != ad.pfra_not) {
510 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
511 ad.pfra_fback = PFR_FB_CHANGED;
512 xchange++;
513 }
514 } else {
515 q = pfr_lookup_addr(tmpkt, &ad, 1);
516 if (q != NULL) {
517 ad.pfra_fback = PFR_FB_DUPLICATE;
518 goto _skip;
519 }
520 p = pfr_create_kentry(&ad,
521 !(flags & PFR_FLAG_USERIOCTL));
522 if (p == NULL) {
523 senderr(ENOMEM);
524 }
525 if (pfr_route_kentry(tmpkt, p)) {
526 pfr_destroy_kentry(p);
527 ad.pfra_fback = PFR_FB_NONE;
528 } else {
529 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
530 ad.pfra_fback = PFR_FB_ADDED;
531 xadd++;
532 }
533 }
534_skip:
535 if (flags & PFR_FLAG_FEEDBACK) {
536 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
537 senderr(EFAULT);
538 }
539 }
540 }
541 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
542 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
543 if (*size2 < size + xdel) {
544 *size2 = size + xdel;
545 senderr(0);
546 }
547 i = 0;
548 addr = _addr + size;
549 SLIST_FOREACH(p, &delq, pfrke_workq) {
550 pfr_copyout_addr(&ad, ke: p);
551 ad.pfra_fback = PFR_FB_DELETED;
552 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
553 senderr(EFAULT);
554 }
555 addr += sizeof(ad);
556 i++;
557 }
558 }
559 pfr_clean_node_mask(tmpkt, &addq);
560 if (!(flags & PFR_FLAG_DUMMY)) {
561 pfr_insert_kentries(kt, &addq, tzero);
562 pfr_remove_kentries(kt, &delq);
563 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
564 } else {
565 pfr_destroy_kentries(&addq);
566 }
567 if (nadd != NULL) {
568 *nadd = xadd;
569 }
570 if (ndel != NULL) {
571 *ndel = xdel;
572 }
573 if (nchange != NULL) {
574 *nchange = xchange;
575 }
576 if ((flags & PFR_FLAG_FEEDBACK) && size2) {
577 *size2 = size + xdel;
578 }
579 pfr_destroy_ktable(tmpkt, 0);
580 return 0;
581_bad:
582 pfr_clean_node_mask(tmpkt, &addq);
583 pfr_destroy_kentries(&addq);
584 if (flags & PFR_FLAG_FEEDBACK) {
585 pfr_reset_feedback(_addr, size, flags);
586 }
587 pfr_destroy_ktable(tmpkt, 0);
588 return rv;
589}
590
591int
592pfr_tst_addrs(struct pfr_table *tbl, user_addr_t addr, int size,
593 int *nmatch, int flags)
594{
595 struct pfr_ktable *kt;
596 struct pfr_kentry *p;
597 struct pfr_addr ad;
598 int i, xmatch = 0;
599
600 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
601 if (pfr_validate_table(tbl, 0, 0)) {
602 return EINVAL;
603 }
604 kt = pfr_lookup_table(tbl);
605 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
606 return ESRCH;
607 }
608
609 for (i = 0; i < size; i++, addr += sizeof(ad)) {
610 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
611 return EFAULT;
612 }
613 if (pfr_validate_addr(&ad)) {
614 return EINVAL;
615 }
616 if (ADDR_NETWORK(&ad)) {
617 return EINVAL;
618 }
619 p = pfr_lookup_addr(kt, &ad, 0);
620 if (flags & PFR_FLAG_REPLACE) {
621 pfr_copyout_addr(&ad, ke: p);
622 }
623 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
624 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
625 if (p != NULL && !p->pfrke_not) {
626 xmatch++;
627 }
628 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
629 return EFAULT;
630 }
631 }
632 if (nmatch != NULL) {
633 *nmatch = xmatch;
634 }
635 return 0;
636}
637
638int
639pfr_get_addrs(struct pfr_table *tbl, user_addr_t addr, int *size,
640 int flags)
641{
642 struct pfr_ktable *kt;
643 struct pfr_walktree w;
644 int rv;
645
646 ACCEPT_FLAGS(flags, 0);
647 if (pfr_validate_table(tbl, 0, 0)) {
648 return EINVAL;
649 }
650 kt = pfr_lookup_table(tbl);
651 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
652 return ESRCH;
653 }
654 if (kt->pfrkt_cnt > *size) {
655 *size = kt->pfrkt_cnt;
656 return 0;
657 }
658
659 bzero(s: &w, n: sizeof(w));
660 w.pfrw_op = PFRW_GET_ADDRS;
661 w.pfrw_addr = addr;
662 w.pfrw_free = kt->pfrkt_cnt;
663 w.pfrw_flags = flags;
664 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
665 if (!rv) {
666 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
667 pfr_walktree, &w);
668 }
669 if (rv) {
670 return rv;
671 }
672
673 if (w.pfrw_free) {
674 printf("pfr_get_addrs: corruption detected (%d).\n",
675 w.pfrw_free);
676 return ENOTTY;
677 }
678 *size = kt->pfrkt_cnt;
679 return 0;
680}
681
682int
683pfr_get_astats(struct pfr_table *tbl, user_addr_t addr, int *size,
684 int flags)
685{
686 struct pfr_ktable *kt;
687 struct pfr_walktree w;
688 struct pfr_kentryworkq workq;
689 int rv;
690 u_int64_t tzero = pf_calendar_time_second();
691
692 /* XXX PFR_FLAG_CLSTATS disabled */
693 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
694 if (pfr_validate_table(tbl, 0, 0)) {
695 return EINVAL;
696 }
697 kt = pfr_lookup_table(tbl);
698 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
699 return ESRCH;
700 }
701 if (kt->pfrkt_cnt > *size) {
702 *size = kt->pfrkt_cnt;
703 return 0;
704 }
705
706 bzero(s: &w, n: sizeof(w));
707 w.pfrw_op = PFRW_GET_ASTATS;
708 w.pfrw_astats = addr;
709 w.pfrw_free = kt->pfrkt_cnt;
710 w.pfrw_flags = flags;
711 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
712 if (!rv) {
713 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
714 pfr_walktree, &w);
715 }
716 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
717 pfr_enqueue_addrs(kt, &workq, NULL, 0);
718 pfr_clstats_kentries(&workq, tzero, 0);
719 }
720 if (rv) {
721 return rv;
722 }
723
724 if (w.pfrw_free) {
725 printf("pfr_get_astats: corruption detected (%d).\n",
726 w.pfrw_free);
727 return ENOTTY;
728 }
729 *size = kt->pfrkt_cnt;
730 return 0;
731}
732
733int
734pfr_clr_astats(struct pfr_table *tbl, user_addr_t _addr, int size,
735 int *nzero, int flags)
736{
737 struct pfr_ktable *kt;
738 struct pfr_kentryworkq workq;
739 struct pfr_kentry *p;
740 struct pfr_addr ad;
741 user_addr_t addr = _addr;
742 int i, rv, xzero = 0;
743
744 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
745 PFR_FLAG_FEEDBACK);
746 if (pfr_validate_table(tbl, 0, 0)) {
747 return EINVAL;
748 }
749 kt = pfr_lookup_table(tbl);
750 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
751 return ESRCH;
752 }
753 SLIST_INIT(&workq);
754 for (i = 0; i < size; i++, addr += sizeof(ad)) {
755 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
756 senderr(EFAULT);
757 }
758 if (pfr_validate_addr(&ad)) {
759 senderr(EINVAL);
760 }
761 p = pfr_lookup_addr(kt, &ad, 1);
762 if (flags & PFR_FLAG_FEEDBACK) {
763 ad.pfra_fback = (p != NULL) ?
764 PFR_FB_CLEARED : PFR_FB_NONE;
765 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
766 senderr(EFAULT);
767 }
768 }
769 if (p != NULL) {
770 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
771 xzero++;
772 }
773 }
774
775 if (!(flags & PFR_FLAG_DUMMY)) {
776 pfr_clstats_kentries(&workq, 0, 0);
777 }
778 if (nzero != NULL) {
779 *nzero = xzero;
780 }
781 return 0;
782_bad:
783 if (flags & PFR_FLAG_FEEDBACK) {
784 pfr_reset_feedback(_addr, size, flags);
785 }
786 return rv;
787}
788
789static int
790pfr_validate_addr(struct pfr_addr *ad)
791{
792 int i;
793
794 switch (ad->pfra_af) {
795#if INET
796 case AF_INET:
797 if (ad->pfra_net > 32) {
798 return -1;
799 }
800 break;
801#endif /* INET */
802 case AF_INET6:
803 if (ad->pfra_net > 128) {
804 return -1;
805 }
806 break;
807 default:
808 return -1;
809 }
810 if (ad->pfra_net < 128 &&
811 (((caddr_t)ad)[ad->pfra_net / 8] & (0xFF >> (ad->pfra_net % 8)))) {
812 return -1;
813 }
814 for (i = (ad->pfra_net + 7) / 8; i < (int)sizeof(ad->pfra_u); i++) {
815 if (((caddr_t)ad)[i]) {
816 return -1;
817 }
818 }
819 if (ad->pfra_not && ad->pfra_not != 1) {
820 return -1;
821 }
822 if (ad->pfra_fback) {
823 return -1;
824 }
825 return 0;
826}
827
828static void
829pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
830 int *naddr, int sweep)
831{
832 struct pfr_walktree w;
833
834 SLIST_INIT(workq);
835 bzero(s: &w, n: sizeof(w));
836 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
837 w.pfrw_workq = workq;
838 if (kt->pfrkt_ip4 != NULL) {
839 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
840 pfr_walktree, &w)) {
841 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
842 }
843 }
844 if (kt->pfrkt_ip6 != NULL) {
845 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
846 pfr_walktree, &w)) {
847 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
848 }
849 }
850 if (naddr != NULL) {
851 *naddr = w.pfrw_cnt;
852 }
853}
854
855static void
856pfr_mark_addrs(struct pfr_ktable *kt)
857{
858 struct pfr_walktree w;
859
860 bzero(s: &w, n: sizeof(w));
861 w.pfrw_op = PFRW_MARK;
862 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) {
863 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
864 }
865 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) {
866 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
867 }
868}
869
870
871static struct pfr_kentry *
872pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
873{
874 union sockaddr_union sa, mask;
875 struct radix_node_head *head;
876 struct pfr_kentry *ke;
877
878 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
879
880 bzero(s: &sa, n: sizeof(sa));
881 if (ad->pfra_af == AF_INET) {
882 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
883 head = kt->pfrkt_ip4;
884 } else if (ad->pfra_af == AF_INET6) {
885 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
886 head = kt->pfrkt_ip6;
887 } else {
888 return NULL;
889 }
890 if (ADDR_NETWORK(ad)) {
891 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
892 ke = (struct pfr_kentry *)rn_lookup(v_arg: &sa, m_arg: &mask, head);
893 if (ke && KENTRY_RNF_ROOT(ke)) {
894 ke = NULL;
895 }
896 } else {
897 ke = (struct pfr_kentry *)rn_match(&sa, head);
898 if (ke && KENTRY_RNF_ROOT(ke)) {
899 ke = NULL;
900 }
901 if (exact && ke && KENTRY_NETWORK(ke)) {
902 ke = NULL;
903 }
904 }
905 return ke;
906}
907
908static struct pfr_kentry *
909pfr_create_kentry(struct pfr_addr *ad, boolean_t intr)
910{
911 struct pfr_kentry *ke;
912
913 if (intr) {
914 ke = pool_get(&pfr_kentry_pl2, PR_WAITOK);
915 } else {
916 ke = pool_get(&pfr_kentry_pl, PR_WAITOK);
917 }
918 if (ke == NULL) {
919 return NULL;
920 }
921 bzero(s: ke, n: sizeof(*ke));
922
923 if (ad->pfra_af == AF_INET) {
924 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
925 } else if (ad->pfra_af == AF_INET6) {
926 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
927 }
928 ke->pfrke_af = ad->pfra_af;
929 ke->pfrke_net = ad->pfra_net;
930 ke->pfrke_not = ad->pfra_not;
931 ke->pfrke_intrpool = (u_int8_t)intr;
932 return ke;
933}
934
935static void
936pfr_destroy_kentries(struct pfr_kentryworkq *workq)
937{
938 struct pfr_kentry *p, *q;
939
940 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
941 q = SLIST_NEXT(p, pfrke_workq);
942 pfr_destroy_kentry(p);
943 }
944}
945
946static void
947pfr_destroy_kentry(struct pfr_kentry *ke)
948{
949 if (ke->pfrke_intrpool) {
950 pool_put(&pfr_kentry_pl2, ke);
951 } else {
952 pool_put(&pfr_kentry_pl, ke);
953 }
954}
955
956static void
957pfr_insert_kentries(struct pfr_ktable *kt,
958 struct pfr_kentryworkq *workq, u_int64_t tzero)
959{
960 struct pfr_kentry *p;
961 int rv, n = 0;
962
963 SLIST_FOREACH(p, workq, pfrke_workq) {
964 rv = pfr_route_kentry(kt, p);
965 if (rv) {
966 printf("pfr_insert_kentries: cannot route entry "
967 "(code=%d).\n", rv);
968 break;
969 }
970 p->pfrke_tzero = tzero;
971 n++;
972 }
973 kt->pfrkt_cnt += n;
974}
975
976int
977pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, u_int64_t tzero)
978{
979 struct pfr_kentry *p;
980 int rv;
981
982 p = pfr_lookup_addr(kt, ad, exact: 1);
983 if (p != NULL) {
984 return 0;
985 }
986 p = pfr_create_kentry(ad, TRUE);
987 if (p == NULL) {
988 return EINVAL;
989 }
990
991 rv = pfr_route_kentry(kt, p);
992 if (rv) {
993 return rv;
994 }
995
996 p->pfrke_tzero = tzero;
997 kt->pfrkt_cnt++;
998
999 return 0;
1000}
1001
1002static void
1003pfr_remove_kentries(struct pfr_ktable *kt,
1004 struct pfr_kentryworkq *workq)
1005{
1006 struct pfr_kentry *p;
1007 int n = 0;
1008
1009 SLIST_FOREACH(p, workq, pfrke_workq) {
1010 pfr_unroute_kentry(kt, p);
1011 n++;
1012 }
1013 kt->pfrkt_cnt -= n;
1014 pfr_destroy_kentries(workq);
1015}
1016
1017static void
1018pfr_clean_node_mask(struct pfr_ktable *kt,
1019 struct pfr_kentryworkq *workq)
1020{
1021 struct pfr_kentry *p;
1022
1023 SLIST_FOREACH(p, workq, pfrke_workq)
1024 pfr_unroute_kentry(kt, p);
1025}
1026
1027static void
1028pfr_clstats_kentries(struct pfr_kentryworkq *workq, u_int64_t tzero,
1029 int negchange)
1030{
1031 struct pfr_kentry *p;
1032
1033 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1034
1035 SLIST_FOREACH(p, workq, pfrke_workq) {
1036 if (negchange) {
1037 p->pfrke_not = !p->pfrke_not;
1038 }
1039 bzero(s: p->pfrke_packets, n: sizeof(p->pfrke_packets));
1040 bzero(s: p->pfrke_bytes, n: sizeof(p->pfrke_bytes));
1041 p->pfrke_tzero = tzero;
1042 }
1043}
1044
1045static void
1046pfr_reset_feedback(user_addr_t addr, int size, int flags)
1047{
1048 struct pfr_addr ad;
1049 int i;
1050
1051 for (i = 0; i < size; i++, addr += sizeof(ad)) {
1052 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
1053 break;
1054 }
1055 ad.pfra_fback = PFR_FB_NONE;
1056 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
1057 break;
1058 }
1059 }
1060}
1061
1062static void
1063pfr_prepare_network(union sockaddr_union *sa, int af, int net)
1064{
1065 int i;
1066
1067 bzero(s: sa, n: sizeof(*sa));
1068 if (af == AF_INET) {
1069 sa->sin.sin_len = sizeof(sa->sin);
1070 sa->sin.sin_family = AF_INET;
1071 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32 - net)) : 0;
1072 } else if (af == AF_INET6) {
1073 sa->sin6.sin6_len = sizeof(sa->sin6);
1074 sa->sin6.sin6_family = AF_INET6;
1075 for (i = 0; i < 4; i++) {
1076 if (net <= 32) {
1077 sa->sin6.sin6_addr.s6_addr32[i] =
1078 net ? htonl(-1 << (32 - net)) : 0;
1079 break;
1080 }
1081 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
1082 net -= 32;
1083 }
1084 }
1085}
1086
1087static int
1088pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1089{
1090 union sockaddr_union mask;
1091 struct radix_node *rn;
1092 struct radix_node_head *head;
1093
1094 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1095
1096 bzero(s: ke->pfrke_node, n: sizeof(ke->pfrke_node));
1097 if (ke->pfrke_af == AF_INET) {
1098 head = kt->pfrkt_ip4;
1099 } else if (ke->pfrke_af == AF_INET6) {
1100 head = kt->pfrkt_ip6;
1101 } else {
1102 return -1;
1103 }
1104
1105 if (KENTRY_NETWORK(ke)) {
1106 pfr_prepare_network(sa: &mask, af: ke->pfrke_af, net: ke->pfrke_net);
1107 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1108 } else {
1109 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1110 }
1111
1112 return rn == NULL ? -1 : 0;
1113}
1114
1115static int
1116pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1117{
1118 union sockaddr_union mask;
1119 struct radix_node *rn;
1120 struct radix_node_head *head;
1121
1122 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1123
1124 if (ke->pfrke_af == AF_INET) {
1125 head = kt->pfrkt_ip4;
1126 } else if (ke->pfrke_af == AF_INET6) {
1127 head = kt->pfrkt_ip6;
1128 } else {
1129 return -1;
1130 }
1131
1132 if (KENTRY_NETWORK(ke)) {
1133 pfr_prepare_network(sa: &mask, af: ke->pfrke_af, net: ke->pfrke_net);
1134 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1135 } else {
1136 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1137 }
1138
1139 if (rn == NULL) {
1140 printf("pfr_unroute_kentry: delete failed.\n");
1141 return -1;
1142 }
1143 return 0;
1144}
1145
1146static void
1147pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1148{
1149 bzero(s: ad, n: sizeof(*ad));
1150 if (ke == NULL) {
1151 return;
1152 }
1153 ad->pfra_af = ke->pfrke_af;
1154 ad->pfra_net = ke->pfrke_net;
1155 ad->pfra_not = ke->pfrke_not;
1156 if (ad->pfra_af == AF_INET) {
1157 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1158 } else if (ad->pfra_af == AF_INET6) {
1159 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1160 }
1161}
1162
1163static int
1164pfr_walktree(struct radix_node *rn, void *arg)
1165{
1166 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1167 struct pfr_walktree *w = arg;
1168 int flags = w->pfrw_flags;
1169
1170 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1171
1172 VERIFY(ke != NULL);
1173 switch (w->pfrw_op) {
1174 case PFRW_MARK:
1175 ke->pfrke_mark = 0;
1176 break;
1177 case PFRW_SWEEP:
1178 if (ke->pfrke_mark) {
1179 break;
1180 }
1181 OS_FALLTHROUGH;
1182 case PFRW_ENQUEUE:
1183 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1184 w->pfrw_cnt++;
1185 break;
1186 case PFRW_GET_ADDRS:
1187 if (w->pfrw_free-- > 0) {
1188 struct pfr_addr ad;
1189
1190 pfr_copyout_addr(ad: &ad, ke);
1191 if (copyout(&ad, w->pfrw_addr, sizeof(ad))) {
1192 return EFAULT;
1193 }
1194 w->pfrw_addr += sizeof(ad);
1195 }
1196 break;
1197 case PFRW_GET_ASTATS:
1198 if (w->pfrw_free-- > 0) {
1199 struct pfr_astats as;
1200
1201 bzero(s: &as, n: sizeof(as));
1202
1203 pfr_copyout_addr(ad: &as.pfras_a, ke);
1204
1205 bcopy(src: ke->pfrke_packets, dst: as.pfras_packets,
1206 n: sizeof(as.pfras_packets));
1207 bcopy(src: ke->pfrke_bytes, dst: as.pfras_bytes,
1208 n: sizeof(as.pfras_bytes));
1209 as.pfras_tzero = ke->pfrke_tzero;
1210
1211 if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags)) {
1212 return EFAULT;
1213 }
1214 w->pfrw_astats += sizeof(as);
1215 }
1216 break;
1217 case PFRW_POOL_GET:
1218 if (ke->pfrke_not) {
1219 break; /* negative entries are ignored */
1220 }
1221 if (!w->pfrw_cnt--) {
1222 w->pfrw_kentry = ke;
1223 return 1; /* finish search */
1224 }
1225 break;
1226 case PFRW_DYNADDR_UPDATE:
1227 if (ke->pfrke_af == AF_INET) {
1228 if (w->pfrw_dyn->pfid_acnt4++ > 0) {
1229 break;
1230 }
1231 pfr_prepare_network(sa: &pfr_mask, AF_INET, net: ke->pfrke_net);
1232 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1233 &ke->pfrke_sa, AF_INET);
1234 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1235 &pfr_mask, AF_INET);
1236 } else if (ke->pfrke_af == AF_INET6) {
1237 if (w->pfrw_dyn->pfid_acnt6++ > 0) {
1238 break;
1239 }
1240 pfr_prepare_network(sa: &pfr_mask, AF_INET6, net: ke->pfrke_net);
1241 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1242 &ke->pfrke_sa, AF_INET6);
1243 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1244 &pfr_mask, AF_INET6);
1245 }
1246 break;
1247 }
1248 return 0;
1249}
1250
1251int
1252pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1253{
1254 struct pfr_ktableworkq workq;
1255 struct pfr_ktable *p;
1256 int xdel = 0;
1257
1258 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1259
1260 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1261 PFR_FLAG_ALLRSETS);
1262 if (pfr_fix_anchor(filter->pfrt_anchor)) {
1263 return EINVAL;
1264 }
1265 if (pfr_table_count(filter, flags) < 0) {
1266 return ENOENT;
1267 }
1268
1269 SLIST_INIT(&workq);
1270 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1271 if (pfr_skip_table(filter, p, flags)) {
1272 continue;
1273 }
1274 if (strcmp(s1: p->pfrkt_anchor, PF_RESERVED_ANCHOR) == 0) {
1275 continue;
1276 }
1277 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1278 continue;
1279 }
1280 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1281 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1282 xdel++;
1283 }
1284 if (!(flags & PFR_FLAG_DUMMY)) {
1285 pfr_setflags_ktables(&workq);
1286 }
1287 if (ndel != NULL) {
1288 *ndel = xdel;
1289 }
1290 return 0;
1291}
1292
1293int
1294pfr_add_tables(user_addr_t tbl, int size, int *nadd, int flags)
1295{
1296 struct pfr_ktableworkq addq, changeq;
1297 struct pfr_ktable *p, *q, *r, key;
1298 int i, rv, xadd = 0;
1299 u_int64_t tzero = pf_calendar_time_second();
1300
1301 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1302
1303 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1304 SLIST_INIT(&addq);
1305 SLIST_INIT(&changeq);
1306 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1307 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1308 senderr(EFAULT);
1309 }
1310 pfr_table_copyin_cleanup(&key.pfrkt_t);
1311 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1312 flags & PFR_FLAG_USERIOCTL)) {
1313 senderr(EINVAL);
1314 }
1315 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1316 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1317 if (p == NULL) {
1318 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1319 if (p == NULL) {
1320 senderr(ENOMEM);
1321 }
1322 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1323 if (!pfr_ktable_compare(p, q)) {
1324 pfr_destroy_ktable(p, 0);
1325 goto _skip;
1326 }
1327 }
1328 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1329 xadd++;
1330 if (!key.pfrkt_anchor[0]) {
1331 goto _skip;
1332 }
1333
1334 /* find or create root table */
1335 bzero(s: key.pfrkt_anchor, n: sizeof(key.pfrkt_anchor));
1336 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1337 if (r != NULL) {
1338 p->pfrkt_root = r;
1339 goto _skip;
1340 }
1341 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1342 if (!pfr_ktable_compare(&key, q)) {
1343 p->pfrkt_root = q;
1344 goto _skip;
1345 }
1346 }
1347 key.pfrkt_flags = 0;
1348 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1349 if (r == NULL) {
1350 senderr(ENOMEM);
1351 }
1352 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1353 p->pfrkt_root = r;
1354 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1355 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1356 if (!pfr_ktable_compare(&key, q)) {
1357 goto _skip;
1358 }
1359 p->pfrkt_nflags = (p->pfrkt_flags &
1360 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1361 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1362 xadd++;
1363 }
1364_skip:
1365 ;
1366 }
1367 if (!(flags & PFR_FLAG_DUMMY)) {
1368 pfr_insert_ktables(&addq);
1369 pfr_setflags_ktables(&changeq);
1370 } else {
1371 pfr_destroy_ktables(&addq, 0);
1372 }
1373 if (nadd != NULL) {
1374 *nadd = xadd;
1375 }
1376 return 0;
1377_bad:
1378 pfr_destroy_ktables(&addq, 0);
1379 return rv;
1380}
1381
1382int
1383pfr_del_tables(user_addr_t tbl, int size, int *ndel, int flags)
1384{
1385 struct pfr_ktableworkq workq;
1386 struct pfr_ktable *p, *q, key;
1387 int i, xdel = 0;
1388
1389 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1390
1391 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1392 SLIST_INIT(&workq);
1393 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1394 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1395 return EFAULT;
1396 }
1397 pfr_table_copyin_cleanup(&key.pfrkt_t);
1398 if (pfr_validate_table(&key.pfrkt_t, 0,
1399 flags & PFR_FLAG_USERIOCTL)) {
1400 return EINVAL;
1401 }
1402 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1403 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1404 SLIST_FOREACH(q, &workq, pfrkt_workq)
1405 if (!pfr_ktable_compare(p, q)) {
1406 goto _skip;
1407 }
1408 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1409 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1410 xdel++;
1411 }
1412_skip:
1413 ;
1414 }
1415
1416 if (!(flags & PFR_FLAG_DUMMY)) {
1417 pfr_setflags_ktables(&workq);
1418 }
1419 if (ndel != NULL) {
1420 *ndel = xdel;
1421 }
1422 return 0;
1423}
1424
1425int
1426pfr_get_tables(struct pfr_table *filter, user_addr_t tbl, int *size,
1427 int flags)
1428{
1429 struct pfr_ktable *p;
1430 int n, nn;
1431
1432 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1433 if (pfr_fix_anchor(filter->pfrt_anchor)) {
1434 return EINVAL;
1435 }
1436 n = nn = pfr_table_count(filter, flags);
1437 if (n < 0) {
1438 return ENOENT;
1439 }
1440 if (n > *size) {
1441 *size = n;
1442 return 0;
1443 }
1444 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1445 if (pfr_skip_table(filter, p, flags)) {
1446 continue;
1447 }
1448 if (n-- <= 0) {
1449 continue;
1450 }
1451 if (COPYOUT(&p->pfrkt_t, tbl, sizeof(p->pfrkt_t), flags)) {
1452 return EFAULT;
1453 }
1454 tbl += sizeof(p->pfrkt_t);
1455 }
1456 if (n) {
1457 printf("pfr_get_tables: corruption detected (%d).\n", n);
1458 return ENOTTY;
1459 }
1460 *size = nn;
1461 return 0;
1462}
1463
1464int
1465pfr_get_tstats(struct pfr_table *filter, user_addr_t tbl, int *size,
1466 int flags)
1467{
1468 struct pfr_ktable *p;
1469 struct pfr_ktableworkq workq;
1470 int n, nn;
1471 u_int64_t tzero = pf_calendar_time_second();
1472
1473 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1474
1475 /* XXX PFR_FLAG_CLSTATS disabled */
1476 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1477 if (pfr_fix_anchor(filter->pfrt_anchor)) {
1478 return EINVAL;
1479 }
1480 n = nn = pfr_table_count(filter, flags);
1481 if (n < 0) {
1482 return ENOENT;
1483 }
1484 if (n > *size) {
1485 *size = n;
1486 return 0;
1487 }
1488 SLIST_INIT(&workq);
1489 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1490 if (pfr_skip_table(filter, p, flags)) {
1491 continue;
1492 }
1493 if (n-- <= 0) {
1494 continue;
1495 }
1496 if (COPYOUT(&p->pfrkt_ts, tbl, sizeof(p->pfrkt_ts), flags)) {
1497 return EFAULT;
1498 }
1499 tbl += sizeof(p->pfrkt_ts);
1500 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1501 }
1502 if (flags & PFR_FLAG_CLSTATS) {
1503 pfr_clstats_ktables(&workq, tzero,
1504 flags & PFR_FLAG_ADDRSTOO);
1505 }
1506 if (n) {
1507 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1508 return ENOTTY;
1509 }
1510 *size = nn;
1511 return 0;
1512}
1513
1514int
1515pfr_clr_tstats(user_addr_t tbl, int size, int *nzero, int flags)
1516{
1517 struct pfr_ktableworkq workq;
1518 struct pfr_ktable *p, key;
1519 int i, xzero = 0;
1520 u_int64_t tzero = pf_calendar_time_second();
1521
1522 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1523
1524 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1525 PFR_FLAG_ADDRSTOO);
1526 SLIST_INIT(&workq);
1527 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1528 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1529 return EFAULT;
1530 }
1531 pfr_table_copyin_cleanup(&key.pfrkt_t);
1532 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) {
1533 return EINVAL;
1534 }
1535 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1536 if (p != NULL) {
1537 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1538 xzero++;
1539 }
1540 }
1541 if (!(flags & PFR_FLAG_DUMMY)) {
1542 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1543 }
1544 if (nzero != NULL) {
1545 *nzero = xzero;
1546 }
1547 return 0;
1548}
1549
1550int
1551pfr_set_tflags(user_addr_t tbl, int size, int setflag, int clrflag,
1552 int *nchange, int *ndel, int flags)
1553{
1554 struct pfr_ktableworkq workq;
1555 struct pfr_ktable *p, *q, key;
1556 int i, xchange = 0, xdel = 0;
1557
1558 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1559
1560 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1561 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1562 (clrflag & ~PFR_TFLAG_USRMASK) ||
1563 (setflag & clrflag)) {
1564 return EINVAL;
1565 }
1566 SLIST_INIT(&workq);
1567 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1568 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1569 return EFAULT;
1570 }
1571 pfr_table_copyin_cleanup(&key.pfrkt_t);
1572 if (pfr_validate_table(&key.pfrkt_t, 0,
1573 flags & PFR_FLAG_USERIOCTL)) {
1574 return EINVAL;
1575 }
1576 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1577 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1578 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1579 ~clrflag;
1580 if (p->pfrkt_nflags == p->pfrkt_flags) {
1581 goto _skip;
1582 }
1583 SLIST_FOREACH(q, &workq, pfrkt_workq)
1584 if (!pfr_ktable_compare(p, q)) {
1585 goto _skip;
1586 }
1587 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1588 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1589 (clrflag & PFR_TFLAG_PERSIST) &&
1590 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) {
1591 xdel++;
1592 } else {
1593 xchange++;
1594 }
1595 }
1596_skip:
1597 ;
1598 }
1599 if (!(flags & PFR_FLAG_DUMMY)) {
1600 pfr_setflags_ktables(&workq);
1601 }
1602 if (nchange != NULL) {
1603 *nchange = xchange;
1604 }
1605 if (ndel != NULL) {
1606 *ndel = xdel;
1607 }
1608 return 0;
1609}
1610
1611int
1612pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1613{
1614 struct pfr_ktableworkq workq;
1615 struct pfr_ktable *p;
1616 struct pf_ruleset *rs;
1617 int xdel = 0;
1618
1619 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1620
1621 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1622 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1623 if (rs == NULL) {
1624 return ENOMEM;
1625 }
1626 SLIST_INIT(&workq);
1627 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1628 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1629 pfr_skip_table(trs, p, 0)) {
1630 continue;
1631 }
1632 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1633 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1634 xdel++;
1635 }
1636 if (!(flags & PFR_FLAG_DUMMY)) {
1637 pfr_setflags_ktables(&workq);
1638 if (ticket != NULL) {
1639 *ticket = ++rs->tticket;
1640 }
1641 rs->topen = 1;
1642 } else {
1643 pf_release_ruleset(r: rs);
1644 }
1645 if (ndel != NULL) {
1646 *ndel = xdel;
1647 }
1648 return 0;
1649}
1650
1651int
1652pfr_ina_define(struct pfr_table *tbl, user_addr_t addr, int size,
1653 int *nadd, int *naddr, u_int32_t ticket, int flags)
1654{
1655 struct pfr_ktableworkq tableq;
1656 struct pfr_kentryworkq addrq;
1657 struct pfr_ktable *kt, *rt, *shadow, key;
1658 struct pfr_kentry *p;
1659 struct pfr_addr ad;
1660 struct pf_ruleset *rs;
1661 int i, rv, xadd = 0, xaddr = 0;
1662
1663 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1664
1665 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1666 if (size && !(flags & PFR_FLAG_ADDRSTOO)) {
1667 return EINVAL;
1668 }
1669 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1670 flags & PFR_FLAG_USERIOCTL)) {
1671 return EINVAL;
1672 }
1673 rs = pf_find_ruleset(tbl->pfrt_anchor);
1674 if (rs == NULL || !rs->topen || ticket != rs->tticket) {
1675 return EBUSY;
1676 }
1677 pf_release_ruleset(r: rs);
1678 rs = NULL;
1679 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1680 SLIST_INIT(&tableq);
1681 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)(void *)tbl);
1682 if (kt == NULL) {
1683 kt = pfr_create_ktable(tbl, 0, 1);
1684 if (kt == NULL) {
1685 return ENOMEM;
1686 }
1687 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1688 xadd++;
1689 if (!tbl->pfrt_anchor[0]) {
1690 goto _skip;
1691 }
1692
1693 /* find or create root table */
1694 bzero(s: &key, n: sizeof(key));
1695 strlcpy(dst: key.pfrkt_name, src: tbl->pfrt_name,
1696 n: sizeof(key.pfrkt_name));
1697 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1698 if (rt != NULL) {
1699 kt->pfrkt_root = rt;
1700 goto _skip;
1701 }
1702 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1703 if (rt == NULL) {
1704 pfr_destroy_ktables(&tableq, 0);
1705 return ENOMEM;
1706 }
1707 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1708 kt->pfrkt_root = rt;
1709 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) {
1710 xadd++;
1711 }
1712_skip:
1713 shadow = pfr_create_ktable(tbl, 0, 0);
1714 if (shadow == NULL) {
1715 pfr_destroy_ktables(&tableq, 0);
1716 return ENOMEM;
1717 }
1718 SLIST_INIT(&addrq);
1719 for (i = 0; i < size; i++, addr += sizeof(ad)) {
1720 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
1721 senderr(EFAULT);
1722 }
1723 if (pfr_validate_addr(ad: &ad)) {
1724 senderr(EINVAL);
1725 }
1726 if (pfr_lookup_addr(kt: shadow, ad: &ad, exact: 1) != NULL) {
1727 continue;
1728 }
1729 p = pfr_create_kentry(ad: &ad, FALSE);
1730 if (p == NULL) {
1731 senderr(ENOMEM);
1732 }
1733 if (pfr_route_kentry(kt: shadow, ke: p)) {
1734 pfr_destroy_kentry(ke: p);
1735 continue;
1736 }
1737 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1738 xaddr++;
1739 }
1740 if (!(flags & PFR_FLAG_DUMMY)) {
1741 if (kt->pfrkt_shadow != NULL) {
1742 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1743 }
1744 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1745 pfr_insert_ktables(&tableq);
1746 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1747 xaddr : NO_ADDRESSES;
1748 kt->pfrkt_shadow = shadow;
1749 } else {
1750 pfr_clean_node_mask(kt: shadow, workq: &addrq);
1751 pfr_destroy_ktable(shadow, 0);
1752 pfr_destroy_ktables(&tableq, 0);
1753 pfr_destroy_kentries(workq: &addrq);
1754 }
1755 if (nadd != NULL) {
1756 *nadd = xadd;
1757 }
1758 if (naddr != NULL) {
1759 *naddr = xaddr;
1760 }
1761 return 0;
1762_bad:
1763 pfr_destroy_ktable(shadow, 0);
1764 pfr_destroy_ktables(&tableq, 0);
1765 pfr_destroy_kentries(workq: &addrq);
1766 return rv;
1767}
1768
1769int
1770pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1771{
1772 struct pfr_ktableworkq workq;
1773 struct pfr_ktable *p;
1774 struct pf_ruleset *rs;
1775 int xdel = 0;
1776
1777 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1778
1779 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1780 rs = pf_find_ruleset(trs->pfrt_anchor);
1781 if (rs == NULL || !rs->topen || ticket != rs->tticket) {
1782 goto done;
1783 }
1784 SLIST_INIT(&workq);
1785 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1786 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1787 pfr_skip_table(trs, p, 0)) {
1788 continue;
1789 }
1790 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1791 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1792 xdel++;
1793 }
1794 if (!(flags & PFR_FLAG_DUMMY)) {
1795 pfr_setflags_ktables(&workq);
1796 rs->topen = 0;
1797 }
1798 if (ndel != NULL) {
1799 *ndel = xdel;
1800 }
1801done:
1802 if (rs) {
1803 pf_release_ruleset(r: rs);
1804 rs = NULL;
1805 }
1806 return 0;
1807}
1808
1809int
1810pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1811 int *nchange, int flags)
1812{
1813 struct pfr_ktable *p, *q;
1814 struct pfr_ktableworkq workq;
1815 struct pf_ruleset *rs;
1816 int xadd = 0, xchange = 0;
1817 u_int64_t tzero = pf_calendar_time_second();
1818 int err = 0;
1819
1820 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1821
1822 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1823 rs = pf_find_ruleset(trs->pfrt_anchor);
1824 if (rs == NULL || !rs->topen || ticket != rs->tticket) {
1825 err = EBUSY;
1826 goto done;
1827 }
1828
1829 SLIST_INIT(&workq);
1830 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1831 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1832 pfr_skip_table(trs, p, 0)) {
1833 continue;
1834 }
1835 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1836 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1837 xchange++;
1838 } else {
1839 xadd++;
1840 }
1841 }
1842
1843 if (!(flags & PFR_FLAG_DUMMY)) {
1844 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1845 q = SLIST_NEXT(p, pfrkt_workq);
1846 pfr_commit_ktable(p, tzero);
1847 }
1848 rs->topen = 0;
1849 }
1850 if (nadd != NULL) {
1851 *nadd = xadd;
1852 }
1853 if (nchange != NULL) {
1854 *nchange = xchange;
1855 }
1856
1857done:
1858 if (rs != NULL) {
1859 pf_release_ruleset(r: rs);
1860 rs = NULL;
1861 }
1862 return err;
1863}
1864
1865static void
1866pfr_commit_ktable(struct pfr_ktable *kt, u_int64_t tzero)
1867{
1868 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1869 int nflags;
1870
1871 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1872
1873 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1874 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1875 pfr_clstats_ktable(kt, tzero, 1);
1876 }
1877 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1878 /* kt might contain addresses */
1879 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1880 struct pfr_kentry *p, *q, *next;
1881 struct pfr_addr ad;
1882
1883 pfr_enqueue_addrs(kt: shadow, workq: &addrq, NULL, sweep: 0);
1884 pfr_mark_addrs(kt);
1885 SLIST_INIT(&addq);
1886 SLIST_INIT(&changeq);
1887 SLIST_INIT(&delq);
1888 SLIST_INIT(&garbageq);
1889 pfr_clean_node_mask(kt: shadow, workq: &addrq);
1890 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1891 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1892 pfr_copyout_addr(ad: &ad, ke: p);
1893 q = pfr_lookup_addr(kt, ad: &ad, exact: 1);
1894 if (q != NULL) {
1895 if (q->pfrke_not != p->pfrke_not) {
1896 SLIST_INSERT_HEAD(&changeq, q,
1897 pfrke_workq);
1898 }
1899 q->pfrke_mark = 1;
1900 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1901 } else {
1902 p->pfrke_tzero = tzero;
1903 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1904 }
1905 }
1906 pfr_enqueue_addrs(kt, workq: &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1907 pfr_insert_kentries(kt, workq: &addq, tzero);
1908 pfr_remove_kentries(kt, workq: &delq);
1909 pfr_clstats_kentries(workq: &changeq, tzero, INVERT_NEG_FLAG);
1910 pfr_destroy_kentries(workq: &garbageq);
1911 } else {
1912 /* kt cannot contain addresses */
1913 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1914 shadow->pfrkt_ip4);
1915 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1916 shadow->pfrkt_ip6);
1917 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1918 pfr_clstats_ktable(kt, tzero, 1);
1919 }
1920 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1921 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) &
1922 ~PFR_TFLAG_INACTIVE;
1923 pfr_destroy_ktable(shadow, 0);
1924 kt->pfrkt_shadow = NULL;
1925 pfr_setflags_ktable(kt, nflags);
1926}
1927
1928void
1929pfr_table_copyin_cleanup(struct pfr_table *tbl)
1930{
1931 tbl->pfrt_anchor[sizeof(tbl->pfrt_anchor) - 1] = '\0';
1932 tbl->pfrt_name[sizeof(tbl->pfrt_name) - 1] = '\0';
1933}
1934
1935static int
1936pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1937{
1938 size_t i;
1939
1940 if (!tbl->pfrt_name[0]) {
1941 return -1;
1942 }
1943 if (no_reserved && strcmp(s1: tbl->pfrt_anchor, PF_RESERVED_ANCHOR) == 0) {
1944 return -1;
1945 }
1946 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE - 1]) {
1947 return -1;
1948 }
1949 for (i = strlen(s: tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) {
1950 if (tbl->pfrt_name[i]) {
1951 return -1;
1952 }
1953 }
1954 if (pfr_fix_anchor(tbl->pfrt_anchor)) {
1955 return -1;
1956 }
1957 if (tbl->pfrt_flags & ~allowedflags) {
1958 return -1;
1959 }
1960 return 0;
1961}
1962
1963/*
1964 * Rewrite anchors referenced by tables to remove slashes
1965 * and check for validity.
1966 */
1967static int
1968pfr_fix_anchor(char *anchor)
1969{
1970 size_t siz = MAXPATHLEN;
1971 size_t i;
1972
1973 if (anchor[0] == '/') {
1974 char *path;
1975 int off;
1976
1977 path = anchor;
1978 off = 1;
1979 while (*++path == '/') {
1980 off++;
1981 }
1982 bcopy(src: path, dst: anchor, n: siz - off);
1983 memset(s: anchor + siz - off, c: 0, n: off);
1984 }
1985 if (anchor[siz - 1]) {
1986 return -1;
1987 }
1988 for (i = strlen(s: anchor); i < siz; i++) {
1989 if (anchor[i]) {
1990 return -1;
1991 }
1992 }
1993 return 0;
1994}
1995
1996static int
1997pfr_table_count(struct pfr_table *filter, int flags)
1998{
1999 struct pf_ruleset *rs;
2000
2001 if (flags & PFR_FLAG_ALLRSETS) {
2002 return pfr_ktable_cnt;
2003 }
2004 if (filter->pfrt_anchor[0]) {
2005 int r = -1;
2006 rs = pf_find_ruleset(filter->pfrt_anchor);
2007 r = (rs != NULL) ? rs->tables : -1;
2008 if (rs) {
2009 pf_release_ruleset(r: rs);
2010 rs = NULL;
2011 }
2012 return r;
2013 }
2014 return pf_main_ruleset.tables;
2015}
2016
2017static int
2018pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
2019{
2020 if (flags & PFR_FLAG_ALLRSETS) {
2021 return 0;
2022 }
2023 if (strcmp(s1: filter->pfrt_anchor, s2: kt->pfrkt_anchor)) {
2024 return 1;
2025 }
2026 return 0;
2027}
2028
2029static void
2030pfr_insert_ktables(struct pfr_ktableworkq *workq)
2031{
2032 struct pfr_ktable *p;
2033
2034 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2035
2036 SLIST_FOREACH(p, workq, pfrkt_workq)
2037 pfr_insert_ktable(p);
2038}
2039
2040static void
2041pfr_insert_ktable(struct pfr_ktable *kt)
2042{
2043 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2044
2045 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
2046 pfr_ktable_cnt++;
2047 if (kt->pfrkt_root != NULL) {
2048 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) {
2049 pfr_setflags_ktable(kt->pfrkt_root,
2050 kt->pfrkt_root->pfrkt_flags | PFR_TFLAG_REFDANCHOR);
2051 }
2052 }
2053}
2054
2055static void
2056pfr_setflags_ktables(struct pfr_ktableworkq *workq)
2057{
2058 struct pfr_ktable *p, *q;
2059
2060 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2061
2062 for (p = SLIST_FIRST(workq); p; p = q) {
2063 q = SLIST_NEXT(p, pfrkt_workq);
2064 pfr_setflags_ktable(p, p->pfrkt_nflags);
2065 }
2066}
2067
2068static void
2069pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
2070{
2071 struct pfr_kentryworkq addrq;
2072
2073 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2074
2075 if (!(newf & PFR_TFLAG_REFERENCED) &&
2076 !(newf & PFR_TFLAG_REFDANCHOR) &&
2077 !(newf & PFR_TFLAG_PERSIST)) {
2078 newf &= ~PFR_TFLAG_ACTIVE;
2079 }
2080 if (!(newf & PFR_TFLAG_ACTIVE)) {
2081 newf &= ~PFR_TFLAG_USRMASK;
2082 }
2083 if (!(newf & PFR_TFLAG_SETMASK)) {
2084 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
2085 if (kt->pfrkt_root != NULL) {
2086 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) {
2087 pfr_setflags_ktable(kt: kt->pfrkt_root,
2088 newf: kt->pfrkt_root->pfrkt_flags &
2089 ~PFR_TFLAG_REFDANCHOR);
2090 }
2091 }
2092 pfr_destroy_ktable(kt, 1);
2093 pfr_ktable_cnt--;
2094 return;
2095 }
2096 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
2097 pfr_enqueue_addrs(kt, workq: &addrq, NULL, sweep: 0);
2098 pfr_remove_kentries(kt, workq: &addrq);
2099 }
2100 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
2101 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
2102 kt->pfrkt_shadow = NULL;
2103 }
2104 kt->pfrkt_flags = newf;
2105}
2106
2107static void
2108pfr_clstats_ktables(struct pfr_ktableworkq *workq, u_int64_t tzero, int recurse)
2109{
2110 struct pfr_ktable *p;
2111
2112 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2113
2114 SLIST_FOREACH(p, workq, pfrkt_workq)
2115 pfr_clstats_ktable(p, tzero, recurse);
2116}
2117
2118static void
2119pfr_clstats_ktable(struct pfr_ktable *kt, u_int64_t tzero, int recurse)
2120{
2121 struct pfr_kentryworkq addrq;
2122
2123 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2124
2125 if (recurse) {
2126 pfr_enqueue_addrs(kt, workq: &addrq, NULL, sweep: 0);
2127 pfr_clstats_kentries(workq: &addrq, tzero, negchange: 0);
2128 }
2129 bzero(s: kt->pfrkt_packets, n: sizeof(kt->pfrkt_packets));
2130 bzero(s: kt->pfrkt_bytes, n: sizeof(kt->pfrkt_bytes));
2131 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
2132 kt->pfrkt_tzero = tzero;
2133}
2134
2135static struct pfr_ktable *
2136pfr_create_ktable(struct pfr_table *tbl, u_int64_t tzero, int attachruleset)
2137{
2138 struct pfr_ktable *kt;
2139 struct pf_ruleset *rs;
2140
2141 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2142
2143 kt = pool_get(&pfr_ktable_pl, PR_WAITOK);
2144 if (kt == NULL) {
2145 return NULL;
2146 }
2147 bzero(s: kt, n: sizeof(*kt));
2148 kt->pfrkt_t = *tbl;
2149
2150 if (attachruleset) {
2151 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
2152 if (!rs) {
2153 pfr_destroy_ktable(kt, 0);
2154 return NULL;
2155 }
2156 kt->pfrkt_rs = rs;
2157 rs->tables++;
2158 }
2159
2160 if (!rn_inithead((void **)&kt->pfrkt_ip4,
2161 offsetof(struct sockaddr_in, sin_addr) * 8) ||
2162 !rn_inithead((void **)&kt->pfrkt_ip6,
2163 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
2164 pfr_destroy_ktable(kt, 0);
2165 return NULL;
2166 }
2167 kt->pfrkt_tzero = tzero;
2168
2169 return kt;
2170}
2171
2172static void
2173pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2174{
2175 struct pfr_ktable *p, *q;
2176
2177 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2178
2179 for (p = SLIST_FIRST(workq); p; p = q) {
2180 q = SLIST_NEXT(p, pfrkt_workq);
2181 pfr_destroy_ktable(p, flushaddr);
2182 }
2183}
2184
2185static void
2186pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2187{
2188 struct pfr_kentryworkq addrq;
2189
2190 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2191
2192 if (flushaddr) {
2193 pfr_enqueue_addrs(kt, workq: &addrq, NULL, sweep: 0);
2194 pfr_clean_node_mask(kt, workq: &addrq);
2195 pfr_destroy_kentries(workq: &addrq);
2196 }
2197 if (kt->pfrkt_ip4 != NULL) {
2198 zfree(radix_node_head_zone, kt->pfrkt_ip4);
2199 }
2200 if (kt->pfrkt_ip6 != NULL) {
2201 zfree(radix_node_head_zone, kt->pfrkt_ip6);
2202 }
2203 if (kt->pfrkt_shadow != NULL) {
2204 pfr_destroy_ktable(kt: kt->pfrkt_shadow, flushaddr);
2205 }
2206 if (kt->pfrkt_rs != NULL) {
2207 kt->pfrkt_rs->tables--;
2208 pf_release_ruleset(r: kt->pfrkt_rs);
2209 }
2210 pool_put(&pfr_ktable_pl, kt);
2211}
2212
2213static int
2214pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2215{
2216 int d;
2217
2218 if ((d = strncmp(s1: p->pfrkt_name, s2: q->pfrkt_name, PF_TABLE_NAME_SIZE))) {
2219 return d;
2220 }
2221 return strcmp(s1: p->pfrkt_anchor, s2: q->pfrkt_anchor);
2222}
2223
2224static struct pfr_ktable *
2225pfr_lookup_table(struct pfr_table *tbl)
2226{
2227 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2228
2229 /* struct pfr_ktable start like a struct pfr_table */
2230 return RB_FIND(pfr_ktablehead, &pfr_ktables,
2231 (struct pfr_ktable *)(void *)tbl);
2232}
2233
2234int
2235pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2236{
2237 struct pfr_kentry *ke = NULL;
2238 int match;
2239
2240 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2241
2242 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) {
2243 kt = kt->pfrkt_root;
2244 }
2245 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
2246 return 0;
2247 }
2248
2249 switch (af) {
2250#if INET
2251 case AF_INET:
2252 pfr_sin.sin_addr.s_addr = a->addr32[0];
2253 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2254 if (ke && KENTRY_RNF_ROOT(ke)) {
2255 ke = NULL;
2256 }
2257 break;
2258#endif /* INET */
2259 case AF_INET6:
2260 bcopy(src: a, dst: &pfr_sin6.sin6_addr, n: sizeof(pfr_sin6.sin6_addr));
2261 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2262 if (ke && KENTRY_RNF_ROOT(ke)) {
2263 ke = NULL;
2264 }
2265 break;
2266 }
2267 match = (ke && !ke->pfrke_not);
2268 if (match) {
2269 kt->pfrkt_match++;
2270 } else {
2271 kt->pfrkt_nomatch++;
2272 }
2273 return match;
2274}
2275
2276void
2277pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2278 u_int64_t len, int dir_out, int op_pass, int notrule)
2279{
2280 struct pfr_kentry *ke = NULL;
2281
2282 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2283
2284 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) {
2285 kt = kt->pfrkt_root;
2286 }
2287 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
2288 return;
2289 }
2290
2291 switch (af) {
2292#if INET
2293 case AF_INET:
2294 pfr_sin.sin_addr.s_addr = a->addr32[0];
2295 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2296 if (ke && KENTRY_RNF_ROOT(ke)) {
2297 ke = NULL;
2298 }
2299 break;
2300#endif /* INET */
2301 case AF_INET6:
2302 bcopy(src: a, dst: &pfr_sin6.sin6_addr, n: sizeof(pfr_sin6.sin6_addr));
2303 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2304 if (ke && KENTRY_RNF_ROOT(ke)) {
2305 ke = NULL;
2306 }
2307 break;
2308 default:
2309 ;
2310 }
2311 if ((ke == NULL || ke->pfrke_not) != notrule) {
2312 if (op_pass != PFR_OP_PASS) {
2313 printf("pfr_update_stats: assertion failed.\n");
2314 }
2315 op_pass = PFR_OP_XPASS;
2316 }
2317 kt->pfrkt_packets[dir_out][op_pass]++;
2318 kt->pfrkt_bytes[dir_out][op_pass] += len;
2319 if (ke != NULL && op_pass != PFR_OP_XPASS) {
2320 ke->pfrke_packets[dir_out][op_pass]++;
2321 ke->pfrke_bytes[dir_out][op_pass] += len;
2322 }
2323}
2324
2325struct pfr_ktable *
2326pfr_attach_table(struct pf_ruleset *rs, char *name)
2327{
2328 struct pfr_ktable *kt, *rt;
2329 struct pfr_table tbl;
2330 struct pf_anchor *ac = rs->anchor;
2331
2332 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2333
2334 bzero(s: &tbl, n: sizeof(tbl));
2335 strlcpy(dst: tbl.pfrt_name, src: name, n: sizeof(tbl.pfrt_name));
2336 if (ac != NULL) {
2337 strlcpy(dst: tbl.pfrt_anchor, src: ac->path, n: sizeof(tbl.pfrt_anchor));
2338 }
2339 kt = pfr_lookup_table(tbl: &tbl);
2340 if (kt == NULL) {
2341 kt = pfr_create_ktable(tbl: &tbl, tzero: pf_calendar_time_second(), attachruleset: 1);
2342 if (kt == NULL) {
2343 return NULL;
2344 }
2345 if (ac != NULL) {
2346 bzero(s: tbl.pfrt_anchor, n: sizeof(tbl.pfrt_anchor));
2347 rt = pfr_lookup_table(tbl: &tbl);
2348 if (rt == NULL) {
2349 rt = pfr_create_ktable(tbl: &tbl, tzero: 0, attachruleset: 1);
2350 if (rt == NULL) {
2351 pfr_destroy_ktable(kt, flushaddr: 0);
2352 return NULL;
2353 }
2354 pfr_insert_ktable(kt: rt);
2355 }
2356 kt->pfrkt_root = rt;
2357 }
2358 pfr_insert_ktable(kt);
2359 }
2360 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) {
2361 pfr_setflags_ktable(kt, newf: kt->pfrkt_flags | PFR_TFLAG_REFERENCED);
2362 }
2363 return kt;
2364}
2365
2366void
2367pfr_detach_table(struct pfr_ktable *kt)
2368{
2369 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2370
2371 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) {
2372 printf("pfr_detach_table: refcount = %d.\n",
2373 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2374 } else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) {
2375 pfr_setflags_ktable(kt, newf: kt->pfrkt_flags & ~PFR_TFLAG_REFERENCED);
2376 }
2377}
2378
2379int
2380pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2381 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2382{
2383 struct pfr_kentry *ke, *ke2;
2384 struct pf_addr *addr;
2385 union sockaddr_union mask;
2386 int idx = -1, use_counter = 0;
2387
2388 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2389
2390 if (af == AF_INET) {
2391 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2392 } else if (af == AF_INET6) {
2393 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2394 } else {
2395 return -1;
2396 }
2397
2398 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) {
2399 kt = kt->pfrkt_root;
2400 }
2401 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
2402 return -1;
2403 }
2404
2405 if (pidx != NULL) {
2406 idx = *pidx;
2407 }
2408 if (counter != NULL && idx >= 0) {
2409 use_counter = 1;
2410 }
2411 if (idx < 0) {
2412 idx = 0;
2413 }
2414
2415_next_block:
2416 ke = pfr_kentry_byidx(kt, idx, af);
2417 if (ke == NULL) {
2418 kt->pfrkt_nomatch++;
2419 return 1;
2420 }
2421 pfr_prepare_network(sa: &pfr_mask, af, net: ke->pfrke_net);
2422 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2423 *rmask = SUNION2PF(&pfr_mask, af);
2424
2425 if (use_counter) {
2426 /* is supplied address within block? */
2427 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2428 /* no, go to next block in table */
2429 idx++;
2430 use_counter = 0;
2431 goto _next_block;
2432 }
2433 PF_ACPY(addr, counter, af);
2434 } else {
2435 /* use first address of block */
2436 PF_ACPY(addr, *raddr, af);
2437 }
2438
2439 if (!KENTRY_NETWORK(ke)) {
2440 /* this is a single IP address - no possible nested block */
2441 PF_ACPY(counter, addr, af);
2442 *pidx = idx;
2443 kt->pfrkt_match++;
2444 return 0;
2445 }
2446 for (;;) {
2447 /* we don't want to use a nested block */
2448 if (af == AF_INET) {
2449 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2450 kt->pfrkt_ip4);
2451 } else if (af == AF_INET6) {
2452 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2453 kt->pfrkt_ip6);
2454 } else {
2455 return -1; /* never happens */
2456 }
2457 /* no need to check KENTRY_RNF_ROOT() here */
2458 if (ke2 == ke) {
2459 /* lookup return the same block - perfect */
2460 PF_ACPY(counter, addr, af);
2461 *pidx = idx;
2462 kt->pfrkt_match++;
2463 return 0;
2464 }
2465
2466 /* we need to increase the counter past the nested block */
2467 pfr_prepare_network(sa: &mask, AF_INET, net: ke2->pfrke_net);
2468 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2469 PF_AINC(addr, af);
2470 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2471 /* ok, we reached the end of our main block */
2472 /* go to next block in table */
2473 idx++;
2474 use_counter = 0;
2475 goto _next_block;
2476 }
2477 }
2478}
2479
2480static struct pfr_kentry *
2481pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2482{
2483 struct pfr_walktree w;
2484
2485 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2486
2487 bzero(s: &w, n: sizeof(w));
2488 w.pfrw_op = PFRW_POOL_GET;
2489 w.pfrw_cnt = idx;
2490
2491 switch (af) {
2492#if INET
2493 case AF_INET:
2494 (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2495 pfr_walktree, &w);
2496 return w.pfrw_kentry;
2497#endif /* INET */
2498 case AF_INET6:
2499 (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2500 pfr_walktree, &w);
2501 return w.pfrw_kentry;
2502 default:
2503 return NULL;
2504 }
2505}
2506
2507void
2508pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2509{
2510 struct pfr_walktree w;
2511
2512 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2513
2514 bzero(s: &w, n: sizeof(w));
2515 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2516 w.pfrw_dyn = dyn;
2517
2518 dyn->pfid_acnt4 = 0;
2519 dyn->pfid_acnt6 = 0;
2520 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) {
2521 (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2522 pfr_walktree, &w);
2523 }
2524 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) {
2525 (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2526 pfr_walktree, &w);
2527 }
2528}
2529