1/*
2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30/* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31
32/*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67#include <machine/endian.h>
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/mbuf.h>
71#include <sys/filio.h>
72#include <sys/fcntl.h>
73#include <sys/socket.h>
74#include <sys/socketvar.h>
75#include <sys/kernel.h>
76#include <sys/time.h>
77#include <sys/proc_internal.h>
78#include <sys/malloc.h>
79#include <sys/kauth.h>
80#include <sys/conf.h>
81#include <sys/mcache.h>
82#include <sys/queue.h>
83
84#include <mach/vm_param.h>
85
86#include <net/dlil.h>
87#include <net/if.h>
88#include <net/if_types.h>
89#include <net/net_api_stats.h>
90#include <net/route.h>
91
92#include <netinet/in.h>
93#include <netinet/in_var.h>
94#include <netinet/in_systm.h>
95#include <netinet/ip.h>
96#include <netinet/ip_var.h>
97#include <netinet/ip_icmp.h>
98#include <netinet/if_ether.h>
99
100#if DUMMYNET
101#include <netinet/ip_dummynet.h>
102#else
103struct ip_fw_args;
104#endif /* DUMMYNET */
105
106#include <libkern/crypto/md5.h>
107
108#include <machine/machine_routines.h>
109
110#include <miscfs/devfs/devfs.h>
111
112#include <net/pfvar.h>
113
114#if NPFSYNC
115#include <net/if_pfsync.h>
116#endif /* NPFSYNC */
117
118#if PFLOG
119#include <net/if_pflog.h>
120#endif /* PFLOG */
121
122#if INET6
123#include <netinet/ip6.h>
124#include <netinet/in_pcb.h>
125#endif /* INET6 */
126
127#include <dev/random/randomdev.h>
128
129#if 0
130static void pfdetach(void);
131#endif
132static int pfopen(dev_t, int, int, struct proc *);
133static int pfclose(dev_t, int, int, struct proc *);
134static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
135static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
136 struct pfioc_table_64 *, struct proc *);
137static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
138 struct pfioc_tokens_64 *, struct proc *);
139static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
140static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
141 struct proc *);
142static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
143static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
144 struct pfioc_states_64 *, struct proc *);
145static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
146static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
147static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
148static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
149static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
150static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
151 struct pfioc_trans_64 *, struct proc *);
152static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
153 struct pfioc_src_nodes_64 *, struct proc *);
154static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
155 struct proc *);
156static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
157 struct pfioc_iface_64 *, struct proc *);
158static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
159 u_int8_t, u_int8_t, u_int8_t);
160static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
161static void pf_empty_pool(struct pf_palist *);
162static int pf_begin_rules(u_int32_t *, int, const char *);
163static int pf_rollback_rules(u_int32_t, int, char *);
164static int pf_setup_pfsync_matching(struct pf_ruleset *);
165static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
166static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
167static int pf_commit_rules(u_int32_t, int, char *);
168static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
169 int);
170static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
171static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
172 struct pf_state *);
173static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
174 struct pf_state *);
175static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
176static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
177static void pf_expire_states_and_src_nodes(struct pf_rule *);
178static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
179 int, struct pf_rule *);
180static void pf_addrwrap_setup(struct pf_addr_wrap *);
181static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
182 struct pf_ruleset *);
183static void pf_delete_rule_by_owner(char *, u_int32_t);
184static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
185static void pf_ruleset_cleanup(struct pf_ruleset *, int);
186static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
187 int, struct pf_rule **);
188
189#define PF_CDEV_MAJOR (-1)
190
191static struct cdevsw pf_cdevsw = {
192 /* open */ pfopen,
193 /* close */ pfclose,
194 /* read */ eno_rdwrt,
195 /* write */ eno_rdwrt,
196 /* ioctl */ pfioctl,
197 /* stop */ eno_stop,
198 /* reset */ eno_reset,
199 /* tty */ NULL,
200 /* select */ eno_select,
201 /* mmap */ eno_mmap,
202 /* strategy */ eno_strat,
203 /* getc */ eno_getc,
204 /* putc */ eno_putc,
205 /* type */ 0
206};
207
208static void pf_attach_hooks(void);
209#if 0
210/* currently unused along with pfdetach() */
211static void pf_detach_hooks(void);
212#endif
213
214/*
215 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
216 * and used in pf_af_hook() for performance optimization, such that packets
217 * will enter pf_test() or pf_test6() only when PF is running.
218 */
219int pf_is_enabled = 0;
220
221u_int32_t pf_hash_seed;
222int16_t pf_nat64_configured = 0;
223
224/*
225 * These are the pf enabled reference counting variables
226 */
227static u_int64_t pf_enabled_ref_count;
228static u_int32_t nr_tokens = 0;
229static u_int64_t pffwrules;
230static u_int32_t pfdevcnt;
231
232SLIST_HEAD(list_head, pfioc_kernel_token);
233static struct list_head token_list_head;
234
235struct pf_rule pf_default_rule;
236
237#define TAGID_MAX 50000
238static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags =
239 TAILQ_HEAD_INITIALIZER(pf_tags);
240
241#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
242#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
243#endif
244static u_int16_t tagname2tag(struct pf_tags *, char *);
245static void tag2tagname(struct pf_tags *, u_int16_t, char *);
246static void tag_unref(struct pf_tags *, u_int16_t);
247static int pf_rtlabel_add(struct pf_addr_wrap *);
248static void pf_rtlabel_remove(struct pf_addr_wrap *);
249static void pf_rtlabel_copyout(struct pf_addr_wrap *);
250
251#if INET
252static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
253 struct ip_fw_args *);
254#endif /* INET */
255#if INET6
256static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
257 struct ip_fw_args *);
258#endif /* INET6 */
259
260#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
261
262/*
263 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
264 */
265#define PFIOCX_STRUCT_DECL(s) \
266struct { \
267 union { \
268 struct s##_32 _s##_32; \
269 struct s##_64 _s##_64; \
270 } _u; \
271} *s##_un = NULL \
272
273#define PFIOCX_STRUCT_BEGIN(a, s, _action) { \
274 VERIFY(s##_un == NULL); \
275 s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \
276 if (s##_un == NULL) { \
277 _action \
278 } else { \
279 if (p64) \
280 bcopy(a, &s##_un->_u._s##_64, \
281 sizeof (struct s##_64)); \
282 else \
283 bcopy(a, &s##_un->_u._s##_32, \
284 sizeof (struct s##_32)); \
285 } \
286}
287
288#define PFIOCX_STRUCT_END(s, a) { \
289 VERIFY(s##_un != NULL); \
290 if (p64) \
291 bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \
292 else \
293 bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \
294 _FREE(s##_un, M_TEMP); \
295 s##_un = NULL; \
296}
297
298#define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
299#define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
300
301/*
302 * Helper macros for regular ioctl structures.
303 */
304#define PFIOC_STRUCT_BEGIN(a, v, _action) { \
305 VERIFY((v) == NULL); \
306 (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \
307 if ((v) == NULL) { \
308 _action \
309 } else { \
310 bcopy(a, v, sizeof (*(v))); \
311 } \
312}
313
314#define PFIOC_STRUCT_END(v, a) { \
315 VERIFY((v) != NULL); \
316 bcopy(v, a, sizeof (*(v))); \
317 _FREE(v, M_TEMP); \
318 (v) = NULL; \
319}
320
321#define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
322#define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
323
324static lck_attr_t *pf_perim_lock_attr;
325static lck_grp_t *pf_perim_lock_grp;
326static lck_grp_attr_t *pf_perim_lock_grp_attr;
327
328static lck_attr_t *pf_lock_attr;
329static lck_grp_t *pf_lock_grp;
330static lck_grp_attr_t *pf_lock_grp_attr;
331
332struct thread *pf_purge_thread;
333
334extern void pfi_kifaddr_update(void *);
335
336/* pf enable ref-counting helper functions */
337static u_int64_t generate_token(struct proc *);
338static int remove_token(struct pfioc_remove_token *);
339static void invalidate_all_tokens(void);
340
341static u_int64_t
342generate_token(struct proc *p)
343{
344 u_int64_t token_value;
345 struct pfioc_kernel_token *new_token;
346
347 new_token = _MALLOC(sizeof (struct pfioc_kernel_token), M_TEMP,
348 M_WAITOK|M_ZERO);
349
350 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
351
352 if (new_token == NULL) {
353 /* malloc failed! bail! */
354 printf("%s: unable to allocate pf token structure!", __func__);
355 return (0);
356 }
357
358 token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token);
359
360 new_token->token.token_value = token_value;
361 new_token->token.pid = proc_pid(p);
362 proc_name(new_token->token.pid, new_token->token.proc_name,
363 sizeof (new_token->token.proc_name));
364 new_token->token.timestamp = pf_calendar_time_second();
365
366 SLIST_INSERT_HEAD(&token_list_head, new_token, next);
367 nr_tokens++;
368
369 return (token_value);
370}
371
372static int
373remove_token(struct pfioc_remove_token *tok)
374{
375 struct pfioc_kernel_token *entry, *tmp;
376
377 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
378
379 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
380 if (tok->token_value == entry->token.token_value) {
381 SLIST_REMOVE(&token_list_head, entry,
382 pfioc_kernel_token, next);
383 _FREE(entry, M_TEMP);
384 nr_tokens--;
385 return (0); /* success */
386 }
387 }
388
389 printf("pf : remove failure\n");
390 return (ESRCH); /* failure */
391}
392
393static void
394invalidate_all_tokens(void)
395{
396 struct pfioc_kernel_token *entry, *tmp;
397
398 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
399
400 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
401 SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
402 _FREE(entry, M_TEMP);
403 }
404
405 nr_tokens = 0;
406}
407
408void
409pfinit(void)
410{
411 u_int32_t *t = pf_default_rule.timeout;
412 int maj;
413
414 pf_perim_lock_grp_attr = lck_grp_attr_alloc_init();
415 pf_perim_lock_grp = lck_grp_alloc_init("pf_perim",
416 pf_perim_lock_grp_attr);
417 pf_perim_lock_attr = lck_attr_alloc_init();
418 lck_rw_init(pf_perim_lock, pf_perim_lock_grp, pf_perim_lock_attr);
419
420 pf_lock_grp_attr = lck_grp_attr_alloc_init();
421 pf_lock_grp = lck_grp_alloc_init("pf", pf_lock_grp_attr);
422 pf_lock_attr = lck_attr_alloc_init();
423 lck_mtx_init(pf_lock, pf_lock_grp, pf_lock_attr);
424
425 pool_init(&pf_rule_pl, sizeof (struct pf_rule), 0, 0, 0, "pfrulepl",
426 NULL);
427 pool_init(&pf_src_tree_pl, sizeof (struct pf_src_node), 0, 0, 0,
428 "pfsrctrpl", NULL);
429 pool_init(&pf_state_pl, sizeof (struct pf_state), 0, 0, 0, "pfstatepl",
430 NULL);
431 pool_init(&pf_state_key_pl, sizeof (struct pf_state_key), 0, 0, 0,
432 "pfstatekeypl", NULL);
433 pool_init(&pf_app_state_pl, sizeof (struct pf_app_state), 0, 0, 0,
434 "pfappstatepl", NULL);
435 pool_init(&pf_pooladdr_pl, sizeof (struct pf_pooladdr), 0, 0, 0,
436 "pfpooladdrpl", NULL);
437 pfr_initialize();
438 pfi_initialize();
439 pf_osfp_initialize();
440
441 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
442 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
443
444 if (max_mem <= 256*1024*1024)
445 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
446 PFR_KENTRY_HIWAT_SMALL;
447
448 RB_INIT(&tree_src_tracking);
449 RB_INIT(&pf_anchors);
450 pf_init_ruleset(&pf_main_ruleset);
451 TAILQ_INIT(&pf_pabuf);
452 TAILQ_INIT(&state_list);
453
454 _CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
455 _CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
456 _CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
457 _CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
458 _CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
459 _CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
460 _CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
461 _CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
462 _CASSERT((SC_SIG & SCIDX_MASK) == SCIDX_SIG);
463 _CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
464 _CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
465
466 /* default rule should never be garbage collected */
467 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
468 pf_default_rule.action = PF_PASS;
469 pf_default_rule.nr = -1;
470 pf_default_rule.rtableid = IFSCOPE_NONE;
471
472 /* initialize default timeouts */
473 t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
474 t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
475 t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
476 t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
477 t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
478 t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
479 t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
480 t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
481 t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
482 t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
483 t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
484 t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
485 t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
486 t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
487 t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
488 t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
489 t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
490 t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
491 t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
492 t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
493 t[PFTM_FRAG] = PFTM_FRAG_VAL;
494 t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
495 t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
496 t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
497 t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
498 t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
499
500 pf_normalize_init();
501 bzero(&pf_status, sizeof (pf_status));
502 pf_status.debug = PF_DEBUG_URGENT;
503 pf_hash_seed = RandomULong();
504
505 /* XXX do our best to avoid a conflict */
506 pf_status.hostid = random();
507
508 if (kernel_thread_start(pf_purge_thread_fn, NULL,
509 &pf_purge_thread) != 0) {
510 printf("%s: unable to start purge thread!", __func__);
511 return;
512 }
513
514 maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
515 if (maj == -1) {
516 printf("%s: failed to allocate major number!\n", __func__);
517 return;
518 }
519 (void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
520 UID_ROOT, GID_WHEEL, 0600, "pf", 0);
521
522 (void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
523 UID_ROOT, GID_WHEEL, 0600, "pfm", 0);
524
525 pf_attach_hooks();
526#if DUMMYNET
527 dummynet_init();
528#endif
529}
530
531#if 0
532static void
533pfdetach(void)
534{
535 struct pf_anchor *anchor;
536 struct pf_state *state;
537 struct pf_src_node *node;
538 struct pfioc_table pt;
539 u_int32_t ticket;
540 int i;
541 char r = '\0';
542
543 pf_detach_hooks();
544
545 pf_status.running = 0;
546 wakeup(pf_purge_thread_fn);
547
548 /* clear the rulesets */
549 for (i = 0; i < PF_RULESET_MAX; i++)
550 if (pf_begin_rules(&ticket, i, &r) == 0)
551 pf_commit_rules(ticket, i, &r);
552
553 /* clear states */
554 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
555 state->timeout = PFTM_PURGE;
556#if NPFSYNC
557 state->sync_flags = PFSTATE_NOSYNC;
558#endif
559 }
560 pf_purge_expired_states(pf_status.states);
561
562#if NPFSYNC
563 pfsync_clear_states(pf_status.hostid, NULL);
564#endif
565
566 /* clear source nodes */
567 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
568 state->src_node = NULL;
569 state->nat_src_node = NULL;
570 }
571 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
572 node->expire = 1;
573 node->states = 0;
574 }
575 pf_purge_expired_src_nodes();
576
577 /* clear tables */
578 memset(&pt, '\0', sizeof (pt));
579 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
580
581 /* destroy anchors */
582 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
583 for (i = 0; i < PF_RULESET_MAX; i++)
584 if (pf_begin_rules(&ticket, i, anchor->name) == 0)
585 pf_commit_rules(ticket, i, anchor->name);
586 }
587
588 /* destroy main ruleset */
589 pf_remove_if_empty_ruleset(&pf_main_ruleset);
590
591 /* destroy the pools */
592 pool_destroy(&pf_pooladdr_pl);
593 pool_destroy(&pf_state_pl);
594 pool_destroy(&pf_rule_pl);
595 pool_destroy(&pf_src_tree_pl);
596
597 /* destroy subsystems */
598 pf_normalize_destroy();
599 pf_osfp_destroy();
600 pfr_destroy();
601 pfi_destroy();
602}
603#endif
604
605static int
606pfopen(dev_t dev, int flags, int fmt, struct proc *p)
607{
608#pragma unused(flags, fmt, p)
609 if (minor(dev) >= PFDEV_MAX)
610 return (ENXIO);
611
612 if (minor(dev) == PFDEV_PFM) {
613 lck_mtx_lock(pf_lock);
614 if (pfdevcnt != 0) {
615 lck_mtx_unlock(pf_lock);
616 return (EBUSY);
617 }
618 pfdevcnt++;
619 lck_mtx_unlock(pf_lock);
620 }
621 return (0);
622}
623
624static int
625pfclose(dev_t dev, int flags, int fmt, struct proc *p)
626{
627#pragma unused(flags, fmt, p)
628 if (minor(dev) >= PFDEV_MAX)
629 return (ENXIO);
630
631 if (minor(dev) == PFDEV_PFM) {
632 lck_mtx_lock(pf_lock);
633 VERIFY(pfdevcnt > 0);
634 pfdevcnt--;
635 lck_mtx_unlock(pf_lock);
636 }
637 return (0);
638}
639
640static struct pf_pool *
641pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
642 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
643 u_int8_t check_ticket)
644{
645 struct pf_ruleset *ruleset;
646 struct pf_rule *rule;
647 int rs_num;
648
649 ruleset = pf_find_ruleset(anchor);
650 if (ruleset == NULL)
651 return (NULL);
652 rs_num = pf_get_ruleset_number(rule_action);
653 if (rs_num >= PF_RULESET_MAX)
654 return (NULL);
655 if (active) {
656 if (check_ticket && ticket !=
657 ruleset->rules[rs_num].active.ticket)
658 return (NULL);
659 if (r_last)
660 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
661 pf_rulequeue);
662 else
663 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
664 } else {
665 if (check_ticket && ticket !=
666 ruleset->rules[rs_num].inactive.ticket)
667 return (NULL);
668 if (r_last)
669 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
670 pf_rulequeue);
671 else
672 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
673 }
674 if (!r_last) {
675 while ((rule != NULL) && (rule->nr != rule_number))
676 rule = TAILQ_NEXT(rule, entries);
677 }
678 if (rule == NULL)
679 return (NULL);
680
681 return (&rule->rpool);
682}
683
684static void
685pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
686{
687 struct pf_pooladdr *mv_pool_pa;
688
689 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
690 TAILQ_REMOVE(poola, mv_pool_pa, entries);
691 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
692 }
693}
694
695static void
696pf_empty_pool(struct pf_palist *poola)
697{
698 struct pf_pooladdr *empty_pool_pa;
699
700 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
701 pfi_dynaddr_remove(&empty_pool_pa->addr);
702 pf_tbladdr_remove(&empty_pool_pa->addr);
703 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
704 TAILQ_REMOVE(poola, empty_pool_pa, entries);
705 pool_put(&pf_pooladdr_pl, empty_pool_pa);
706 }
707}
708
709void
710pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
711{
712 if (rulequeue != NULL) {
713 if (rule->states <= 0) {
714 /*
715 * XXX - we need to remove the table *before* detaching
716 * the rule to make sure the table code does not delete
717 * the anchor under our feet.
718 */
719 pf_tbladdr_remove(&rule->src.addr);
720 pf_tbladdr_remove(&rule->dst.addr);
721 if (rule->overload_tbl)
722 pfr_detach_table(rule->overload_tbl);
723 }
724 TAILQ_REMOVE(rulequeue, rule, entries);
725 rule->entries.tqe_prev = NULL;
726 rule->nr = -1;
727 }
728
729 if (rule->states > 0 || rule->src_nodes > 0 ||
730 rule->entries.tqe_prev != NULL)
731 return;
732 pf_tag_unref(rule->tag);
733 pf_tag_unref(rule->match_tag);
734 pf_rtlabel_remove(&rule->src.addr);
735 pf_rtlabel_remove(&rule->dst.addr);
736 pfi_dynaddr_remove(&rule->src.addr);
737 pfi_dynaddr_remove(&rule->dst.addr);
738 if (rulequeue == NULL) {
739 pf_tbladdr_remove(&rule->src.addr);
740 pf_tbladdr_remove(&rule->dst.addr);
741 if (rule->overload_tbl)
742 pfr_detach_table(rule->overload_tbl);
743 }
744 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
745 pf_anchor_remove(rule);
746 pf_empty_pool(&rule->rpool.list);
747 pool_put(&pf_rule_pl, rule);
748}
749
750static u_int16_t
751tagname2tag(struct pf_tags *head, char *tagname)
752{
753 struct pf_tagname *tag, *p = NULL;
754 u_int16_t new_tagid = 1;
755
756 TAILQ_FOREACH(tag, head, entries)
757 if (strcmp(tagname, tag->name) == 0) {
758 tag->ref++;
759 return (tag->tag);
760 }
761
762 /*
763 * to avoid fragmentation, we do a linear search from the beginning
764 * and take the first free slot we find. if there is none or the list
765 * is empty, append a new entry at the end.
766 */
767
768 /* new entry */
769 if (!TAILQ_EMPTY(head))
770 for (p = TAILQ_FIRST(head); p != NULL &&
771 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
772 new_tagid = p->tag + 1;
773
774 if (new_tagid > TAGID_MAX)
775 return (0);
776
777 /* allocate and fill new struct pf_tagname */
778 tag = _MALLOC(sizeof (*tag), M_TEMP, M_WAITOK|M_ZERO);
779 if (tag == NULL)
780 return (0);
781 strlcpy(tag->name, tagname, sizeof (tag->name));
782 tag->tag = new_tagid;
783 tag->ref++;
784
785 if (p != NULL) /* insert new entry before p */
786 TAILQ_INSERT_BEFORE(p, tag, entries);
787 else /* either list empty or no free slot in between */
788 TAILQ_INSERT_TAIL(head, tag, entries);
789
790 return (tag->tag);
791}
792
793static void
794tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
795{
796 struct pf_tagname *tag;
797
798 TAILQ_FOREACH(tag, head, entries)
799 if (tag->tag == tagid) {
800 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
801 return;
802 }
803}
804
805static void
806tag_unref(struct pf_tags *head, u_int16_t tag)
807{
808 struct pf_tagname *p, *next;
809
810 if (tag == 0)
811 return;
812
813 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
814 next = TAILQ_NEXT(p, entries);
815 if (tag == p->tag) {
816 if (--p->ref == 0) {
817 TAILQ_REMOVE(head, p, entries);
818 _FREE(p, M_TEMP);
819 }
820 break;
821 }
822 }
823}
824
825u_int16_t
826pf_tagname2tag(char *tagname)
827{
828 return (tagname2tag(&pf_tags, tagname));
829}
830
831void
832pf_tag2tagname(u_int16_t tagid, char *p)
833{
834 tag2tagname(&pf_tags, tagid, p);
835}
836
837void
838pf_tag_ref(u_int16_t tag)
839{
840 struct pf_tagname *t;
841
842 TAILQ_FOREACH(t, &pf_tags, entries)
843 if (t->tag == tag)
844 break;
845 if (t != NULL)
846 t->ref++;
847}
848
849void
850pf_tag_unref(u_int16_t tag)
851{
852 tag_unref(&pf_tags, tag);
853}
854
855static int
856pf_rtlabel_add(struct pf_addr_wrap *a)
857{
858#pragma unused(a)
859 return (0);
860}
861
862static void
863pf_rtlabel_remove(struct pf_addr_wrap *a)
864{
865#pragma unused(a)
866}
867
868static void
869pf_rtlabel_copyout(struct pf_addr_wrap *a)
870{
871#pragma unused(a)
872}
873
874static int
875pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
876{
877 struct pf_ruleset *rs;
878 struct pf_rule *rule;
879
880 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
881 return (EINVAL);
882 rs = pf_find_or_create_ruleset(anchor);
883 if (rs == NULL)
884 return (EINVAL);
885 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
886 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
887 rs->rules[rs_num].inactive.rcount--;
888 }
889 *ticket = ++rs->rules[rs_num].inactive.ticket;
890 rs->rules[rs_num].inactive.open = 1;
891 return (0);
892}
893
894static int
895pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
896{
897 struct pf_ruleset *rs;
898 struct pf_rule *rule;
899
900 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
901 return (EINVAL);
902 rs = pf_find_ruleset(anchor);
903 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
904 rs->rules[rs_num].inactive.ticket != ticket)
905 return (0);
906 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
907 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
908 rs->rules[rs_num].inactive.rcount--;
909 }
910 rs->rules[rs_num].inactive.open = 0;
911 return (0);
912}
913
914#define PF_MD5_UPD(st, elm) \
915 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
916
917#define PF_MD5_UPD_STR(st, elm) \
918 MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
919
920#define PF_MD5_UPD_HTONL(st, elm, stor) do { \
921 (stor) = htonl((st)->elm); \
922 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
923} while (0)
924
925#define PF_MD5_UPD_HTONS(st, elm, stor) do { \
926 (stor) = htons((st)->elm); \
927 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
928} while (0)
929
930static void
931pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
932{
933 PF_MD5_UPD(pfr, addr.type);
934 switch (pfr->addr.type) {
935 case PF_ADDR_DYNIFTL:
936 PF_MD5_UPD(pfr, addr.v.ifname);
937 PF_MD5_UPD(pfr, addr.iflags);
938 break;
939 case PF_ADDR_TABLE:
940 PF_MD5_UPD(pfr, addr.v.tblname);
941 break;
942 case PF_ADDR_ADDRMASK:
943 /* XXX ignore af? */
944 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
945 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
946 break;
947 case PF_ADDR_RTLABEL:
948 PF_MD5_UPD(pfr, addr.v.rtlabelname);
949 break;
950 }
951
952 switch (proto) {
953 case IPPROTO_TCP:
954 case IPPROTO_UDP:
955 PF_MD5_UPD(pfr, xport.range.port[0]);
956 PF_MD5_UPD(pfr, xport.range.port[1]);
957 PF_MD5_UPD(pfr, xport.range.op);
958 break;
959
960 default:
961 break;
962 }
963
964 PF_MD5_UPD(pfr, neg);
965}
966
967static void
968pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
969{
970 u_int16_t x;
971 u_int32_t y;
972
973 pf_hash_rule_addr(ctx, &rule->src, rule->proto);
974 pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
975 PF_MD5_UPD_STR(rule, label);
976 PF_MD5_UPD_STR(rule, ifname);
977 PF_MD5_UPD_STR(rule, match_tagname);
978 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
979 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
980 PF_MD5_UPD_HTONL(rule, prob, y);
981 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
982 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
983 PF_MD5_UPD(rule, uid.op);
984 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
985 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
986 PF_MD5_UPD(rule, gid.op);
987 PF_MD5_UPD_HTONL(rule, rule_flag, y);
988 PF_MD5_UPD(rule, action);
989 PF_MD5_UPD(rule, direction);
990 PF_MD5_UPD(rule, af);
991 PF_MD5_UPD(rule, quick);
992 PF_MD5_UPD(rule, ifnot);
993 PF_MD5_UPD(rule, match_tag_not);
994 PF_MD5_UPD(rule, natpass);
995 PF_MD5_UPD(rule, keep_state);
996 PF_MD5_UPD(rule, proto);
997 PF_MD5_UPD(rule, type);
998 PF_MD5_UPD(rule, code);
999 PF_MD5_UPD(rule, flags);
1000 PF_MD5_UPD(rule, flagset);
1001 PF_MD5_UPD(rule, allow_opts);
1002 PF_MD5_UPD(rule, rt);
1003 PF_MD5_UPD(rule, tos);
1004}
1005
1006static int
1007pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1008{
1009 struct pf_ruleset *rs;
1010 struct pf_rule *rule, **old_array, *r;
1011 struct pf_rulequeue *old_rules;
1012 int error;
1013 u_int32_t old_rcount;
1014
1015 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1016
1017 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1018 return (EINVAL);
1019 rs = pf_find_ruleset(anchor);
1020 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1021 ticket != rs->rules[rs_num].inactive.ticket)
1022 return (EBUSY);
1023
1024 /* Calculate checksum for the main ruleset */
1025 if (rs == &pf_main_ruleset) {
1026 error = pf_setup_pfsync_matching(rs);
1027 if (error != 0)
1028 return (error);
1029 }
1030
1031 /* Swap rules, keep the old. */
1032 old_rules = rs->rules[rs_num].active.ptr;
1033 old_rcount = rs->rules[rs_num].active.rcount;
1034 old_array = rs->rules[rs_num].active.ptr_array;
1035
1036 if(old_rcount != 0) {
1037 r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1038 while (r) {
1039 if (r->rule_flag & PFRULE_PFM)
1040 pffwrules--;
1041 r = TAILQ_NEXT(r, entries);
1042 }
1043 }
1044
1045
1046 rs->rules[rs_num].active.ptr =
1047 rs->rules[rs_num].inactive.ptr;
1048 rs->rules[rs_num].active.ptr_array =
1049 rs->rules[rs_num].inactive.ptr_array;
1050 rs->rules[rs_num].active.rcount =
1051 rs->rules[rs_num].inactive.rcount;
1052 rs->rules[rs_num].inactive.ptr = old_rules;
1053 rs->rules[rs_num].inactive.ptr_array = old_array;
1054 rs->rules[rs_num].inactive.rcount = old_rcount;
1055
1056 rs->rules[rs_num].active.ticket =
1057 rs->rules[rs_num].inactive.ticket;
1058 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1059
1060
1061 /* Purge the old rule list. */
1062 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1063 pf_rm_rule(old_rules, rule);
1064 if (rs->rules[rs_num].inactive.ptr_array)
1065 _FREE(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1066 rs->rules[rs_num].inactive.ptr_array = NULL;
1067 rs->rules[rs_num].inactive.rcount = 0;
1068 rs->rules[rs_num].inactive.open = 0;
1069 pf_remove_if_empty_ruleset(rs);
1070 return (0);
1071}
1072
1073static void
1074pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1075 int minordev)
1076{
1077 bcopy(src, dst, sizeof (struct pf_rule));
1078
1079 dst->label[sizeof (dst->label) - 1] = '\0';
1080 dst->ifname[sizeof (dst->ifname) - 1] = '\0';
1081 dst->qname[sizeof (dst->qname) - 1] = '\0';
1082 dst->pqname[sizeof (dst->pqname) - 1] = '\0';
1083 dst->tagname[sizeof (dst->tagname) - 1] = '\0';
1084 dst->match_tagname[sizeof (dst->match_tagname) - 1] = '\0';
1085 dst->overload_tblname[sizeof (dst->overload_tblname) - 1] = '\0';
1086
1087 dst->cuid = kauth_cred_getuid(p->p_ucred);
1088 dst->cpid = p->p_pid;
1089
1090 dst->anchor = NULL;
1091 dst->kif = NULL;
1092 dst->overload_tbl = NULL;
1093
1094 TAILQ_INIT(&dst->rpool.list);
1095 dst->rpool.cur = NULL;
1096
1097 /* initialize refcounting */
1098 dst->states = 0;
1099 dst->src_nodes = 0;
1100
1101 dst->entries.tqe_prev = NULL;
1102 dst->entries.tqe_next = NULL;
1103 if ((uint8_t)minordev == PFDEV_PFM)
1104 dst->rule_flag |= PFRULE_PFM;
1105}
1106
1107static void
1108pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1109{
1110 bcopy(src, dst, sizeof (struct pf_rule));
1111
1112 dst->anchor = NULL;
1113 dst->kif = NULL;
1114 dst->overload_tbl = NULL;
1115
1116 TAILQ_INIT(&dst->rpool.list);
1117 dst->rpool.cur = NULL;
1118
1119 dst->entries.tqe_prev = NULL;
1120 dst->entries.tqe_next = NULL;
1121}
1122
1123static void
1124pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1125 struct pf_state *s)
1126{
1127 uint64_t secs = pf_time_second();
1128 bzero(sp, sizeof (struct pfsync_state));
1129
1130 /* copy from state key */
1131 sp->lan.addr = sk->lan.addr;
1132 sp->lan.xport = sk->lan.xport;
1133 sp->gwy.addr = sk->gwy.addr;
1134 sp->gwy.xport = sk->gwy.xport;
1135 sp->ext_lan.addr = sk->ext_lan.addr;
1136 sp->ext_lan.xport = sk->ext_lan.xport;
1137 sp->ext_gwy.addr = sk->ext_gwy.addr;
1138 sp->ext_gwy.xport = sk->ext_gwy.xport;
1139 sp->proto_variant = sk->proto_variant;
1140 sp->tag = s->tag;
1141 sp->proto = sk->proto;
1142 sp->af_lan = sk->af_lan;
1143 sp->af_gwy = sk->af_gwy;
1144 sp->direction = sk->direction;
1145 sp->flowhash = sk->flowhash;
1146
1147 /* copy from state */
1148 memcpy(&sp->id, &s->id, sizeof (sp->id));
1149 sp->creatorid = s->creatorid;
1150 strlcpy(sp->ifname, s->kif->pfik_name, sizeof (sp->ifname));
1151 pf_state_peer_to_pfsync(&s->src, &sp->src);
1152 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1153
1154 sp->rule = s->rule.ptr->nr;
1155 sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1156 (unsigned)-1 : s->nat_rule.ptr->nr;
1157 sp->anchor = (s->anchor.ptr == NULL) ?
1158 (unsigned)-1 : s->anchor.ptr->nr;
1159
1160 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1161 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1162 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1163 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1164 sp->creation = secs - s->creation;
1165 sp->expire = pf_state_expires(s);
1166 sp->log = s->log;
1167 sp->allow_opts = s->allow_opts;
1168 sp->timeout = s->timeout;
1169
1170 if (s->src_node)
1171 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1172 if (s->nat_src_node)
1173 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1174
1175 if (sp->expire > secs)
1176 sp->expire -= secs;
1177 else
1178 sp->expire = 0;
1179
1180}
1181
1182static void
1183pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1184 struct pf_state *s)
1185{
1186 /* copy to state key */
1187 sk->lan.addr = sp->lan.addr;
1188 sk->lan.xport = sp->lan.xport;
1189 sk->gwy.addr = sp->gwy.addr;
1190 sk->gwy.xport = sp->gwy.xport;
1191 sk->ext_lan.addr = sp->ext_lan.addr;
1192 sk->ext_lan.xport = sp->ext_lan.xport;
1193 sk->ext_gwy.addr = sp->ext_gwy.addr;
1194 sk->ext_gwy.xport = sp->ext_gwy.xport;
1195 sk->proto_variant = sp->proto_variant;
1196 s->tag = sp->tag;
1197 sk->proto = sp->proto;
1198 sk->af_lan = sp->af_lan;
1199 sk->af_gwy = sp->af_gwy;
1200 sk->direction = sp->direction;
1201 sk->flowhash = pf_calc_state_key_flowhash(sk);
1202
1203 /* copy to state */
1204 memcpy(&s->id, &sp->id, sizeof (sp->id));
1205 s->creatorid = sp->creatorid;
1206 pf_state_peer_from_pfsync(&sp->src, &s->src);
1207 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1208
1209 s->rule.ptr = &pf_default_rule;
1210 s->nat_rule.ptr = NULL;
1211 s->anchor.ptr = NULL;
1212 s->rt_kif = NULL;
1213 s->creation = pf_time_second();
1214 s->expire = pf_time_second();
1215 if (sp->expire > 0)
1216 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1217 s->pfsync_time = 0;
1218 s->packets[0] = s->packets[1] = 0;
1219 s->bytes[0] = s->bytes[1] = 0;
1220}
1221
1222static void
1223pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1224{
1225 bcopy(src, dst, sizeof (struct pf_pooladdr));
1226
1227 dst->entries.tqe_prev = NULL;
1228 dst->entries.tqe_next = NULL;
1229 dst->ifname[sizeof (dst->ifname) - 1] = '\0';
1230 dst->kif = NULL;
1231}
1232
1233static void
1234pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1235{
1236 bcopy(src, dst, sizeof (struct pf_pooladdr));
1237
1238 dst->entries.tqe_prev = NULL;
1239 dst->entries.tqe_next = NULL;
1240 dst->kif = NULL;
1241}
1242
1243static int
1244pf_setup_pfsync_matching(struct pf_ruleset *rs)
1245{
1246 MD5_CTX ctx;
1247 struct pf_rule *rule;
1248 int rs_cnt;
1249 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1250
1251 MD5Init(&ctx);
1252 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1253 /* XXX PF_RULESET_SCRUB as well? */
1254 if (rs_cnt == PF_RULESET_SCRUB)
1255 continue;
1256
1257 if (rs->rules[rs_cnt].inactive.ptr_array)
1258 _FREE(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1259 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1260
1261 if (rs->rules[rs_cnt].inactive.rcount) {
1262 rs->rules[rs_cnt].inactive.ptr_array =
1263 _MALLOC(sizeof (caddr_t) *
1264 rs->rules[rs_cnt].inactive.rcount,
1265 M_TEMP, M_WAITOK);
1266
1267 if (!rs->rules[rs_cnt].inactive.ptr_array)
1268 return (ENOMEM);
1269 }
1270
1271 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1272 entries) {
1273 pf_hash_rule(&ctx, rule);
1274 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1275 }
1276 }
1277
1278 MD5Final(digest, &ctx);
1279 memcpy(pf_status.pf_chksum, digest, sizeof (pf_status.pf_chksum));
1280 return (0);
1281}
1282
1283static void
1284pf_start(void)
1285{
1286 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1287
1288 VERIFY(pf_is_enabled == 0);
1289
1290 pf_is_enabled = 1;
1291 pf_status.running = 1;
1292 pf_status.since = pf_calendar_time_second();
1293 if (pf_status.stateid == 0) {
1294 pf_status.stateid = pf_time_second();
1295 pf_status.stateid = pf_status.stateid << 32;
1296 }
1297 wakeup(pf_purge_thread_fn);
1298 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1299}
1300
1301static void
1302pf_stop(void)
1303{
1304 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1305
1306 VERIFY(pf_is_enabled);
1307
1308 pf_status.running = 0;
1309 pf_is_enabled = 0;
1310 pf_status.since = pf_calendar_time_second();
1311 wakeup(pf_purge_thread_fn);
1312 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1313}
1314
1315static int
1316pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1317{
1318#pragma unused(dev)
1319 int p64 = proc_is64bit(p);
1320 int error = 0;
1321 int minordev = minor(dev);
1322
1323 if (kauth_cred_issuser(kauth_cred_get()) == 0)
1324 return (EPERM);
1325
1326 /* XXX keep in sync with switch() below */
1327 if (securelevel > 1)
1328 switch (cmd) {
1329 case DIOCGETRULES:
1330 case DIOCGETRULE:
1331 case DIOCGETADDRS:
1332 case DIOCGETADDR:
1333 case DIOCGETSTATE:
1334 case DIOCSETSTATUSIF:
1335 case DIOCGETSTATUS:
1336 case DIOCCLRSTATUS:
1337 case DIOCNATLOOK:
1338 case DIOCSETDEBUG:
1339 case DIOCGETSTATES:
1340 case DIOCINSERTRULE:
1341 case DIOCDELETERULE:
1342 case DIOCGETTIMEOUT:
1343 case DIOCCLRRULECTRS:
1344 case DIOCGETLIMIT:
1345 case DIOCGETALTQS:
1346 case DIOCGETALTQ:
1347 case DIOCGETQSTATS:
1348 case DIOCGETRULESETS:
1349 case DIOCGETRULESET:
1350 case DIOCRGETTABLES:
1351 case DIOCRGETTSTATS:
1352 case DIOCRCLRTSTATS:
1353 case DIOCRCLRADDRS:
1354 case DIOCRADDADDRS:
1355 case DIOCRDELADDRS:
1356 case DIOCRSETADDRS:
1357 case DIOCRGETADDRS:
1358 case DIOCRGETASTATS:
1359 case DIOCRCLRASTATS:
1360 case DIOCRTSTADDRS:
1361 case DIOCOSFPGET:
1362 case DIOCGETSRCNODES:
1363 case DIOCCLRSRCNODES:
1364 case DIOCIGETIFACES:
1365 case DIOCGIFSPEED:
1366 case DIOCSETIFFLAG:
1367 case DIOCCLRIFFLAG:
1368 break;
1369 case DIOCRCLRTABLES:
1370 case DIOCRADDTABLES:
1371 case DIOCRDELTABLES:
1372 case DIOCRSETTFLAGS: {
1373 int pfrio_flags;
1374
1375 bcopy(&((struct pfioc_table *)(void *)addr)->
1376 pfrio_flags, &pfrio_flags, sizeof (pfrio_flags));
1377
1378 if (pfrio_flags & PFR_FLAG_DUMMY)
1379 break; /* dummy operation ok */
1380 return (EPERM);
1381 }
1382 default:
1383 return (EPERM);
1384 }
1385
1386 if (!(flags & FWRITE))
1387 switch (cmd) {
1388 case DIOCSTART:
1389 case DIOCSTARTREF:
1390 case DIOCSTOP:
1391 case DIOCSTOPREF:
1392 case DIOCGETSTARTERS:
1393 case DIOCGETRULES:
1394 case DIOCGETADDRS:
1395 case DIOCGETADDR:
1396 case DIOCGETSTATE:
1397 case DIOCGETSTATUS:
1398 case DIOCGETSTATES:
1399 case DIOCINSERTRULE:
1400 case DIOCDELETERULE:
1401 case DIOCGETTIMEOUT:
1402 case DIOCGETLIMIT:
1403 case DIOCGETALTQS:
1404 case DIOCGETALTQ:
1405 case DIOCGETQSTATS:
1406 case DIOCGETRULESETS:
1407 case DIOCGETRULESET:
1408 case DIOCNATLOOK:
1409 case DIOCRGETTABLES:
1410 case DIOCRGETTSTATS:
1411 case DIOCRGETADDRS:
1412 case DIOCRGETASTATS:
1413 case DIOCRTSTADDRS:
1414 case DIOCOSFPGET:
1415 case DIOCGETSRCNODES:
1416 case DIOCIGETIFACES:
1417 case DIOCGIFSPEED:
1418 break;
1419 case DIOCRCLRTABLES:
1420 case DIOCRADDTABLES:
1421 case DIOCRDELTABLES:
1422 case DIOCRCLRTSTATS:
1423 case DIOCRCLRADDRS:
1424 case DIOCRADDADDRS:
1425 case DIOCRDELADDRS:
1426 case DIOCRSETADDRS:
1427 case DIOCRSETTFLAGS: {
1428 int pfrio_flags;
1429
1430 bcopy(&((struct pfioc_table *)(void *)addr)->
1431 pfrio_flags, &pfrio_flags, sizeof (pfrio_flags));
1432
1433 if (pfrio_flags & PFR_FLAG_DUMMY) {
1434 flags |= FWRITE; /* need write lock for dummy */
1435 break; /* dummy operation ok */
1436 }
1437 return (EACCES);
1438 }
1439 case DIOCGETRULE: {
1440 u_int32_t action;
1441
1442 bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1443 &action, sizeof (action));
1444
1445 if (action == PF_GET_CLR_CNTR)
1446 return (EACCES);
1447 break;
1448 }
1449 default:
1450 return (EACCES);
1451 }
1452
1453 if (flags & FWRITE)
1454 lck_rw_lock_exclusive(pf_perim_lock);
1455 else
1456 lck_rw_lock_shared(pf_perim_lock);
1457
1458 lck_mtx_lock(pf_lock);
1459
1460 switch (cmd) {
1461
1462 case DIOCSTART:
1463 if (pf_status.running) {
1464 /*
1465 * Increment the reference for a simple -e enable, so
1466 * that even if other processes drop their references,
1467 * pf will still be available to processes that turned
1468 * it on without taking a reference
1469 */
1470 if (nr_tokens == pf_enabled_ref_count) {
1471 pf_enabled_ref_count++;
1472 VERIFY(pf_enabled_ref_count != 0);
1473 }
1474 error = EEXIST;
1475 } else if (pf_purge_thread == NULL) {
1476 error = ENOMEM;
1477 } else {
1478 pf_start();
1479 pf_enabled_ref_count++;
1480 VERIFY(pf_enabled_ref_count != 0);
1481 }
1482 break;
1483
1484 case DIOCSTARTREF: /* u_int64_t */
1485 if (pf_purge_thread == NULL) {
1486 error = ENOMEM;
1487 } else {
1488 u_int64_t token;
1489
1490 /* small enough to be on stack */
1491 if ((token = generate_token(p)) != 0) {
1492 if (pf_is_enabled == 0) {
1493 pf_start();
1494 }
1495 pf_enabled_ref_count++;
1496 VERIFY(pf_enabled_ref_count != 0);
1497 } else {
1498 error = ENOMEM;
1499 DPFPRINTF(PF_DEBUG_URGENT,
1500 ("pf: unable to generate token\n"));
1501 }
1502 bcopy(&token, addr, sizeof (token));
1503 }
1504 break;
1505
1506 case DIOCSTOP:
1507 if (!pf_status.running) {
1508 error = ENOENT;
1509 } else {
1510 pf_stop();
1511 pf_enabled_ref_count = 0;
1512 invalidate_all_tokens();
1513 }
1514 break;
1515
1516 case DIOCSTOPREF: /* struct pfioc_remove_token */
1517 if (!pf_status.running) {
1518 error = ENOENT;
1519 } else {
1520 struct pfioc_remove_token pfrt;
1521
1522 /* small enough to be on stack */
1523 bcopy(addr, &pfrt, sizeof (pfrt));
1524 if ((error = remove_token(&pfrt)) == 0) {
1525 VERIFY(pf_enabled_ref_count != 0);
1526 pf_enabled_ref_count--;
1527 /* return currently held references */
1528 pfrt.refcount = pf_enabled_ref_count;
1529 DPFPRINTF(PF_DEBUG_MISC,
1530 ("pf: enabled refcount decremented\n"));
1531 } else {
1532 error = EINVAL;
1533 DPFPRINTF(PF_DEBUG_URGENT,
1534 ("pf: token mismatch\n"));
1535 }
1536 bcopy(&pfrt, addr, sizeof (pfrt));
1537
1538 if (error == 0 && pf_enabled_ref_count == 0)
1539 pf_stop();
1540 }
1541 break;
1542
1543 case DIOCGETSTARTERS: { /* struct pfioc_tokens */
1544 PFIOCX_STRUCT_DECL(pfioc_tokens);
1545
1546 PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens, error = ENOMEM; break;);
1547 error = pfioctl_ioc_tokens(cmd,
1548 PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1549 PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1550 PFIOCX_STRUCT_END(pfioc_tokens, addr);
1551 break;
1552 }
1553
1554 case DIOCADDRULE: /* struct pfioc_rule */
1555 case DIOCGETRULES: /* struct pfioc_rule */
1556 case DIOCGETRULE: /* struct pfioc_rule */
1557 case DIOCCHANGERULE: /* struct pfioc_rule */
1558 case DIOCINSERTRULE: /* struct pfioc_rule */
1559 case DIOCDELETERULE: { /* struct pfioc_rule */
1560 struct pfioc_rule *pr = NULL;
1561
1562 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;);
1563 error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1564 PFIOC_STRUCT_END(pr, addr);
1565 break;
1566 }
1567
1568 case DIOCCLRSTATES: /* struct pfioc_state_kill */
1569 case DIOCKILLSTATES: { /* struct pfioc_state_kill */
1570 struct pfioc_state_kill *psk = NULL;
1571
1572 PFIOC_STRUCT_BEGIN(addr, psk, error = ENOMEM; break;);
1573 error = pfioctl_ioc_state_kill(cmd, psk, p);
1574 PFIOC_STRUCT_END(psk, addr);
1575 break;
1576 }
1577
1578 case DIOCADDSTATE: /* struct pfioc_state */
1579 case DIOCGETSTATE: { /* struct pfioc_state */
1580 struct pfioc_state *ps = NULL;
1581
1582 PFIOC_STRUCT_BEGIN(addr, ps, error = ENOMEM; break;);
1583 error = pfioctl_ioc_state(cmd, ps, p);
1584 PFIOC_STRUCT_END(ps, addr);
1585 break;
1586 }
1587
1588 case DIOCGETSTATES: { /* struct pfioc_states */
1589 PFIOCX_STRUCT_DECL(pfioc_states);
1590
1591 PFIOCX_STRUCT_BEGIN(addr, pfioc_states, error = ENOMEM; break;);
1592 error = pfioctl_ioc_states(cmd,
1593 PFIOCX_STRUCT_ADDR32(pfioc_states),
1594 PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1595 PFIOCX_STRUCT_END(pfioc_states, addr);
1596 break;
1597 }
1598
1599 case DIOCGETSTATUS: { /* struct pf_status */
1600 struct pf_status *s = NULL;
1601
1602 PFIOC_STRUCT_BEGIN(&pf_status, s, error = ENOMEM; break;);
1603 pfi_update_status(s->ifname, s);
1604 PFIOC_STRUCT_END(s, addr);
1605 break;
1606 }
1607
1608 case DIOCSETSTATUSIF: { /* struct pfioc_if */
1609 struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1610
1611 /* OK for unaligned accesses */
1612 if (pi->ifname[0] == 0) {
1613 bzero(pf_status.ifname, IFNAMSIZ);
1614 break;
1615 }
1616 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1617 break;
1618 }
1619
1620 case DIOCCLRSTATUS: {
1621 bzero(pf_status.counters, sizeof (pf_status.counters));
1622 bzero(pf_status.fcounters, sizeof (pf_status.fcounters));
1623 bzero(pf_status.scounters, sizeof (pf_status.scounters));
1624 pf_status.since = pf_calendar_time_second();
1625 if (*pf_status.ifname)
1626 pfi_update_status(pf_status.ifname, NULL);
1627 break;
1628 }
1629
1630 case DIOCNATLOOK: { /* struct pfioc_natlook */
1631 struct pfioc_natlook *pnl = NULL;
1632
1633 PFIOC_STRUCT_BEGIN(addr, pnl, error = ENOMEM; break;);
1634 error = pfioctl_ioc_natlook(cmd, pnl, p);
1635 PFIOC_STRUCT_END(pnl, addr);
1636 break;
1637 }
1638
1639 case DIOCSETTIMEOUT: /* struct pfioc_tm */
1640 case DIOCGETTIMEOUT: { /* struct pfioc_tm */
1641 struct pfioc_tm pt;
1642
1643 /* small enough to be on stack */
1644 bcopy(addr, &pt, sizeof (pt));
1645 error = pfioctl_ioc_tm(cmd, &pt, p);
1646 bcopy(&pt, addr, sizeof (pt));
1647 break;
1648 }
1649
1650 case DIOCGETLIMIT: /* struct pfioc_limit */
1651 case DIOCSETLIMIT: { /* struct pfioc_limit */
1652 struct pfioc_limit pl;
1653
1654 /* small enough to be on stack */
1655 bcopy(addr, &pl, sizeof (pl));
1656 error = pfioctl_ioc_limit(cmd, &pl, p);
1657 bcopy(&pl, addr, sizeof (pl));
1658 break;
1659 }
1660
1661 case DIOCSETDEBUG: { /* u_int32_t */
1662 bcopy(addr, &pf_status.debug, sizeof (u_int32_t));
1663 break;
1664 }
1665
1666 case DIOCCLRRULECTRS: {
1667 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1668 struct pf_ruleset *ruleset = &pf_main_ruleset;
1669 struct pf_rule *rule;
1670
1671 TAILQ_FOREACH(rule,
1672 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1673 rule->evaluations = 0;
1674 rule->packets[0] = rule->packets[1] = 0;
1675 rule->bytes[0] = rule->bytes[1] = 0;
1676 }
1677 break;
1678 }
1679
1680 case DIOCGIFSPEED: {
1681 struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1682 struct pf_ifspeed ps;
1683 struct ifnet *ifp;
1684 u_int64_t baudrate;
1685
1686 if (psp->ifname[0] != '\0') {
1687 /* Can we completely trust user-land? */
1688 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1689 ps.ifname[IFNAMSIZ - 1] = '\0';
1690 ifp = ifunit(ps.ifname);
1691 if (ifp != NULL) {
1692 baudrate = ifp->if_output_bw.max_bw;
1693 bcopy(&baudrate, &psp->baudrate,
1694 sizeof (baudrate));
1695 } else {
1696 error = EINVAL;
1697 }
1698 } else {
1699 error = EINVAL;
1700 }
1701 break;
1702 }
1703
1704 case DIOCBEGINADDRS: /* struct pfioc_pooladdr */
1705 case DIOCADDADDR: /* struct pfioc_pooladdr */
1706 case DIOCGETADDRS: /* struct pfioc_pooladdr */
1707 case DIOCGETADDR: /* struct pfioc_pooladdr */
1708 case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */
1709 struct pfioc_pooladdr *pp = NULL;
1710
1711 PFIOC_STRUCT_BEGIN(addr, pp, error = ENOMEM; break;)
1712 error = pfioctl_ioc_pooladdr(cmd, pp, p);
1713 PFIOC_STRUCT_END(pp, addr);
1714 break;
1715 }
1716
1717 case DIOCGETRULESETS: /* struct pfioc_ruleset */
1718 case DIOCGETRULESET: { /* struct pfioc_ruleset */
1719 struct pfioc_ruleset *pr = NULL;
1720
1721 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;);
1722 error = pfioctl_ioc_ruleset(cmd, pr, p);
1723 PFIOC_STRUCT_END(pr, addr);
1724 break;
1725 }
1726
1727 case DIOCRCLRTABLES: /* struct pfioc_table */
1728 case DIOCRADDTABLES: /* struct pfioc_table */
1729 case DIOCRDELTABLES: /* struct pfioc_table */
1730 case DIOCRGETTABLES: /* struct pfioc_table */
1731 case DIOCRGETTSTATS: /* struct pfioc_table */
1732 case DIOCRCLRTSTATS: /* struct pfioc_table */
1733 case DIOCRSETTFLAGS: /* struct pfioc_table */
1734 case DIOCRCLRADDRS: /* struct pfioc_table */
1735 case DIOCRADDADDRS: /* struct pfioc_table */
1736 case DIOCRDELADDRS: /* struct pfioc_table */
1737 case DIOCRSETADDRS: /* struct pfioc_table */
1738 case DIOCRGETADDRS: /* struct pfioc_table */
1739 case DIOCRGETASTATS: /* struct pfioc_table */
1740 case DIOCRCLRASTATS: /* struct pfioc_table */
1741 case DIOCRTSTADDRS: /* struct pfioc_table */
1742 case DIOCRINADEFINE: { /* struct pfioc_table */
1743 PFIOCX_STRUCT_DECL(pfioc_table);
1744
1745 PFIOCX_STRUCT_BEGIN(addr, pfioc_table, error = ENOMEM; break;);
1746 error = pfioctl_ioc_table(cmd,
1747 PFIOCX_STRUCT_ADDR32(pfioc_table),
1748 PFIOCX_STRUCT_ADDR64(pfioc_table), p);
1749 PFIOCX_STRUCT_END(pfioc_table, addr);
1750 break;
1751 }
1752
1753 case DIOCOSFPADD: /* struct pf_osfp_ioctl */
1754 case DIOCOSFPGET: { /* struct pf_osfp_ioctl */
1755 struct pf_osfp_ioctl *io = NULL;
1756
1757 PFIOC_STRUCT_BEGIN(addr, io, error = ENOMEM; break;);
1758 if (cmd == DIOCOSFPADD) {
1759 error = pf_osfp_add(io);
1760 } else {
1761 VERIFY(cmd == DIOCOSFPGET);
1762 error = pf_osfp_get(io);
1763 }
1764 PFIOC_STRUCT_END(io, addr);
1765 break;
1766 }
1767
1768 case DIOCXBEGIN: /* struct pfioc_trans */
1769 case DIOCXROLLBACK: /* struct pfioc_trans */
1770 case DIOCXCOMMIT: { /* struct pfioc_trans */
1771 PFIOCX_STRUCT_DECL(pfioc_trans);
1772
1773 PFIOCX_STRUCT_BEGIN(addr, pfioc_trans, error = ENOMEM; break;);
1774 error = pfioctl_ioc_trans(cmd,
1775 PFIOCX_STRUCT_ADDR32(pfioc_trans),
1776 PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
1777 PFIOCX_STRUCT_END(pfioc_trans, addr);
1778 break;
1779 }
1780
1781 case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */
1782 PFIOCX_STRUCT_DECL(pfioc_src_nodes);
1783
1784 PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes,
1785 error = ENOMEM; break;);
1786 error = pfioctl_ioc_src_nodes(cmd,
1787 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
1788 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
1789 PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
1790 break;
1791 }
1792
1793 case DIOCCLRSRCNODES: {
1794 struct pf_src_node *n;
1795 struct pf_state *state;
1796
1797 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1798 state->src_node = NULL;
1799 state->nat_src_node = NULL;
1800 }
1801 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
1802 n->expire = 1;
1803 n->states = 0;
1804 }
1805 pf_purge_expired_src_nodes();
1806 pf_status.src_nodes = 0;
1807 break;
1808 }
1809
1810 case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */
1811 struct pfioc_src_node_kill *psnk = NULL;
1812
1813 PFIOC_STRUCT_BEGIN(addr, psnk, error = ENOMEM; break;);
1814 error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
1815 PFIOC_STRUCT_END(psnk, addr);
1816 break;
1817 }
1818
1819 case DIOCSETHOSTID: { /* u_int32_t */
1820 u_int32_t hid;
1821
1822 /* small enough to be on stack */
1823 bcopy(addr, &hid, sizeof (hid));
1824 if (hid == 0)
1825 pf_status.hostid = random();
1826 else
1827 pf_status.hostid = hid;
1828 break;
1829 }
1830
1831 case DIOCOSFPFLUSH:
1832 pf_osfp_flush();
1833 break;
1834
1835 case DIOCIGETIFACES: /* struct pfioc_iface */
1836 case DIOCSETIFFLAG: /* struct pfioc_iface */
1837 case DIOCCLRIFFLAG: { /* struct pfioc_iface */
1838 PFIOCX_STRUCT_DECL(pfioc_iface);
1839
1840 PFIOCX_STRUCT_BEGIN(addr, pfioc_iface, error = ENOMEM; break;);
1841 error = pfioctl_ioc_iface(cmd,
1842 PFIOCX_STRUCT_ADDR32(pfioc_iface),
1843 PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
1844 PFIOCX_STRUCT_END(pfioc_iface, addr);
1845 break;
1846 }
1847
1848 default:
1849 error = ENODEV;
1850 break;
1851 }
1852
1853 lck_mtx_unlock(pf_lock);
1854 lck_rw_done(pf_perim_lock);
1855
1856 return (error);
1857}
1858
1859static int
1860pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
1861 struct pfioc_table_64 *io64, struct proc *p)
1862{
1863 int p64 = proc_is64bit(p);
1864 int error = 0;
1865
1866 if (!p64)
1867 goto struct32;
1868
1869 /*
1870 * 64-bit structure processing
1871 */
1872 switch (cmd) {
1873 case DIOCRCLRTABLES:
1874 if (io64->pfrio_esize != 0) {
1875 error = ENODEV;
1876 break;
1877 }
1878 pfr_table_copyin_cleanup(&io64->pfrio_table);
1879 error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
1880 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1881 break;
1882
1883 case DIOCRADDTABLES:
1884 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
1885 error = ENODEV;
1886 break;
1887 }
1888 error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
1889 &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1890 break;
1891
1892 case DIOCRDELTABLES:
1893 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
1894 error = ENODEV;
1895 break;
1896 }
1897 error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
1898 &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1899 break;
1900
1901 case DIOCRGETTABLES:
1902 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
1903 error = ENODEV;
1904 break;
1905 }
1906 pfr_table_copyin_cleanup(&io64->pfrio_table);
1907 error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
1908 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1909 break;
1910
1911 case DIOCRGETTSTATS:
1912 if (io64->pfrio_esize != sizeof (struct pfr_tstats)) {
1913 error = ENODEV;
1914 break;
1915 }
1916 pfr_table_copyin_cleanup(&io64->pfrio_table);
1917 error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
1918 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1919 break;
1920
1921 case DIOCRCLRTSTATS:
1922 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
1923 error = ENODEV;
1924 break;
1925 }
1926 error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
1927 &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1928 break;
1929
1930 case DIOCRSETTFLAGS:
1931 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
1932 error = ENODEV;
1933 break;
1934 }
1935 error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
1936 io64->pfrio_setflag, io64->pfrio_clrflag,
1937 &io64->pfrio_nchange, &io64->pfrio_ndel,
1938 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1939 break;
1940
1941 case DIOCRCLRADDRS:
1942 if (io64->pfrio_esize != 0) {
1943 error = ENODEV;
1944 break;
1945 }
1946 pfr_table_copyin_cleanup(&io64->pfrio_table);
1947 error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
1948 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1949 break;
1950
1951 case DIOCRADDADDRS:
1952 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
1953 error = ENODEV;
1954 break;
1955 }
1956 pfr_table_copyin_cleanup(&io64->pfrio_table);
1957 error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
1958 io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
1959 PFR_FLAG_USERIOCTL);
1960 break;
1961
1962 case DIOCRDELADDRS:
1963 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
1964 error = ENODEV;
1965 break;
1966 }
1967 pfr_table_copyin_cleanup(&io64->pfrio_table);
1968 error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
1969 io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
1970 PFR_FLAG_USERIOCTL);
1971 break;
1972
1973 case DIOCRSETADDRS:
1974 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
1975 error = ENODEV;
1976 break;
1977 }
1978 pfr_table_copyin_cleanup(&io64->pfrio_table);
1979 error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
1980 io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
1981 &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
1982 PFR_FLAG_USERIOCTL, 0);
1983 break;
1984
1985 case DIOCRGETADDRS:
1986 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
1987 error = ENODEV;
1988 break;
1989 }
1990 pfr_table_copyin_cleanup(&io64->pfrio_table);
1991 error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
1992 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1993 break;
1994
1995 case DIOCRGETASTATS:
1996 if (io64->pfrio_esize != sizeof (struct pfr_astats)) {
1997 error = ENODEV;
1998 break;
1999 }
2000 pfr_table_copyin_cleanup(&io64->pfrio_table);
2001 error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2002 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2003 break;
2004
2005 case DIOCRCLRASTATS:
2006 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2007 error = ENODEV;
2008 break;
2009 }
2010 pfr_table_copyin_cleanup(&io64->pfrio_table);
2011 error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2012 io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2013 PFR_FLAG_USERIOCTL);
2014 break;
2015
2016 case DIOCRTSTADDRS:
2017 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2018 error = ENODEV;
2019 break;
2020 }
2021 pfr_table_copyin_cleanup(&io64->pfrio_table);
2022 error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2023 io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2024 PFR_FLAG_USERIOCTL);
2025 break;
2026
2027 case DIOCRINADEFINE:
2028 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2029 error = ENODEV;
2030 break;
2031 }
2032 pfr_table_copyin_cleanup(&io64->pfrio_table);
2033 error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2034 io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2035 io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2036 break;
2037
2038 default:
2039 VERIFY(0);
2040 /* NOTREACHED */
2041 }
2042 goto done;
2043
2044struct32:
2045 /*
2046 * 32-bit structure processing
2047 */
2048 switch (cmd) {
2049 case DIOCRCLRTABLES:
2050 if (io32->pfrio_esize != 0) {
2051 error = ENODEV;
2052 break;
2053 }
2054 pfr_table_copyin_cleanup(&io32->pfrio_table);
2055 error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2056 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2057 break;
2058
2059 case DIOCRADDTABLES:
2060 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2061 error = ENODEV;
2062 break;
2063 }
2064 error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2065 &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2066 break;
2067
2068 case DIOCRDELTABLES:
2069 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2070 error = ENODEV;
2071 break;
2072 }
2073 error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2074 &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2075 break;
2076
2077 case DIOCRGETTABLES:
2078 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2079 error = ENODEV;
2080 break;
2081 }
2082 pfr_table_copyin_cleanup(&io32->pfrio_table);
2083 error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2084 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2085 break;
2086
2087 case DIOCRGETTSTATS:
2088 if (io32->pfrio_esize != sizeof (struct pfr_tstats)) {
2089 error = ENODEV;
2090 break;
2091 }
2092 pfr_table_copyin_cleanup(&io32->pfrio_table);
2093 error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2094 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2095 break;
2096
2097 case DIOCRCLRTSTATS:
2098 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2099 error = ENODEV;
2100 break;
2101 }
2102 error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2103 &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2104 break;
2105
2106 case DIOCRSETTFLAGS:
2107 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2108 error = ENODEV;
2109 break;
2110 }
2111 error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2112 io32->pfrio_setflag, io32->pfrio_clrflag,
2113 &io32->pfrio_nchange, &io32->pfrio_ndel,
2114 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2115 break;
2116
2117 case DIOCRCLRADDRS:
2118 if (io32->pfrio_esize != 0) {
2119 error = ENODEV;
2120 break;
2121 }
2122 pfr_table_copyin_cleanup(&io32->pfrio_table);
2123 error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2124 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2125 break;
2126
2127 case DIOCRADDADDRS:
2128 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2129 error = ENODEV;
2130 break;
2131 }
2132 pfr_table_copyin_cleanup(&io32->pfrio_table);
2133 error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2134 io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2135 PFR_FLAG_USERIOCTL);
2136 break;
2137
2138 case DIOCRDELADDRS:
2139 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2140 error = ENODEV;
2141 break;
2142 }
2143 pfr_table_copyin_cleanup(&io32->pfrio_table);
2144 error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2145 io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2146 PFR_FLAG_USERIOCTL);
2147 break;
2148
2149 case DIOCRSETADDRS:
2150 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2151 error = ENODEV;
2152 break;
2153 }
2154 pfr_table_copyin_cleanup(&io32->pfrio_table);
2155 error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2156 io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2157 &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2158 PFR_FLAG_USERIOCTL, 0);
2159 break;
2160
2161 case DIOCRGETADDRS:
2162 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2163 error = ENODEV;
2164 break;
2165 }
2166 pfr_table_copyin_cleanup(&io32->pfrio_table);
2167 error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2168 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2169 break;
2170
2171 case DIOCRGETASTATS:
2172 if (io32->pfrio_esize != sizeof (struct pfr_astats)) {
2173 error = ENODEV;
2174 break;
2175 }
2176 pfr_table_copyin_cleanup(&io32->pfrio_table);
2177 error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2178 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2179 break;
2180
2181 case DIOCRCLRASTATS:
2182 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2183 error = ENODEV;
2184 break;
2185 }
2186 pfr_table_copyin_cleanup(&io32->pfrio_table);
2187 error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2188 io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2189 PFR_FLAG_USERIOCTL);
2190 break;
2191
2192 case DIOCRTSTADDRS:
2193 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2194 error = ENODEV;
2195 break;
2196 }
2197 pfr_table_copyin_cleanup(&io32->pfrio_table);
2198 error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2199 io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2200 PFR_FLAG_USERIOCTL);
2201 break;
2202
2203 case DIOCRINADEFINE:
2204 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2205 error = ENODEV;
2206 break;
2207 }
2208 pfr_table_copyin_cleanup(&io32->pfrio_table);
2209 error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2210 io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2211 io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2212 break;
2213
2214 default:
2215 VERIFY(0);
2216 /* NOTREACHED */
2217 }
2218
2219done:
2220 return (error);
2221}
2222
2223static int
2224pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2225 struct pfioc_tokens_64 *tok64, struct proc *p)
2226{
2227 struct pfioc_token *tokens;
2228 struct pfioc_kernel_token *entry, *tmp;
2229 user_addr_t token_buf;
2230 int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2231 char *ptr;
2232
2233 switch (cmd) {
2234 case DIOCGETSTARTERS: {
2235 int size;
2236
2237 if (nr_tokens == 0) {
2238 error = ENOENT;
2239 break;
2240 }
2241
2242 size = sizeof (struct pfioc_token) * nr_tokens;
2243 ocnt = cnt = (p64 ? tok64->size : tok32->size);
2244 if (cnt == 0) {
2245 if (p64)
2246 tok64->size = size;
2247 else
2248 tok32->size = size;
2249 break;
2250 }
2251
2252 token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2253 tokens = _MALLOC(size, M_TEMP, M_WAITOK|M_ZERO);
2254 if (tokens == NULL) {
2255 error = ENOMEM;
2256 break;
2257 }
2258
2259 ptr = (void *)tokens;
2260 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2261 struct pfioc_token *t;
2262
2263 if ((unsigned)cnt < sizeof (*tokens))
2264 break; /* no more buffer space left */
2265
2266 t = (struct pfioc_token *)(void *)ptr;
2267 t->token_value = entry->token.token_value;
2268 t->timestamp = entry->token.timestamp;
2269 t->pid = entry->token.pid;
2270 bcopy(entry->token.proc_name, t->proc_name,
2271 PFTOK_PROCNAME_LEN);
2272 ptr += sizeof (struct pfioc_token);
2273
2274 cnt -= sizeof (struct pfioc_token);
2275 }
2276
2277 if (cnt < ocnt)
2278 error = copyout(tokens, token_buf, ocnt - cnt);
2279
2280 if (p64)
2281 tok64->size = ocnt - cnt;
2282 else
2283 tok32->size = ocnt - cnt;
2284
2285 _FREE(tokens, M_TEMP);
2286 break;
2287 }
2288
2289 default:
2290 VERIFY(0);
2291 /* NOTREACHED */
2292 }
2293
2294 return (error);
2295}
2296
2297static void
2298pf_expire_states_and_src_nodes(struct pf_rule *rule)
2299{
2300 struct pf_state *state;
2301 struct pf_src_node *sn;
2302 int killed = 0;
2303
2304 /* expire the states */
2305 state = TAILQ_FIRST(&state_list);
2306 while (state) {
2307 if (state->rule.ptr == rule)
2308 state->timeout = PFTM_PURGE;
2309 state = TAILQ_NEXT(state, entry_list);
2310 }
2311 pf_purge_expired_states(pf_status.states);
2312
2313 /* expire the src_nodes */
2314 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2315 if (sn->rule.ptr != rule)
2316 continue;
2317 if (sn->states != 0) {
2318 RB_FOREACH(state, pf_state_tree_id,
2319 &tree_id) {
2320 if (state->src_node == sn)
2321 state->src_node = NULL;
2322 if (state->nat_src_node == sn)
2323 state->nat_src_node = NULL;
2324 }
2325 sn->states = 0;
2326 }
2327 sn->expire = 1;
2328 killed++;
2329 }
2330 if (killed)
2331 pf_purge_expired_src_nodes();
2332}
2333
2334static void
2335pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2336 struct pf_rule *rule)
2337{
2338 struct pf_rule *r;
2339 int nr = 0;
2340
2341 pf_expire_states_and_src_nodes(rule);
2342
2343 pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2344 if (ruleset->rules[rs_num].active.rcount-- == 0)
2345 panic("%s: rcount value broken!", __func__);
2346 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2347
2348 while (r) {
2349 r->nr = nr++;
2350 r = TAILQ_NEXT(r, entries);
2351 }
2352}
2353
2354
2355static void
2356pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2357{
2358 pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2359 ruleset->rules[rs].active.ticket =
2360 ++ruleset->rules[rs].inactive.ticket;
2361}
2362
2363/*
2364 * req_dev encodes the PF interface. Currently, possible values are
2365 * 0 or PFRULE_PFM
2366 */
2367static int
2368pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2369{
2370 struct pf_ruleset *ruleset;
2371 struct pf_rule *rule = NULL;
2372 int is_anchor;
2373 int error;
2374 int i;
2375
2376 is_anchor = (pr->anchor_call[0] != '\0');
2377 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
2378 pr->rule.owner, is_anchor, &error)) == NULL)
2379 return (error);
2380
2381 for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2382 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2383 while (rule && (rule->ticket != pr->rule.ticket))
2384 rule = TAILQ_NEXT(rule, entries);
2385 }
2386 if (rule == NULL)
2387 return (ENOENT);
2388 else
2389 i--;
2390
2391 if (strcmp(rule->owner, pr->rule.owner))
2392 return (EACCES);
2393
2394delete_rule:
2395 if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2396 ((strcmp(ruleset->anchor->owner, "")) == 0) &&
2397 ((ruleset->rules[i].active.rcount - 1) == 0)) {
2398 /* set rule & ruleset to parent and repeat */
2399 struct pf_rule *delete_rule = rule;
2400 struct pf_ruleset *delete_ruleset = ruleset;
2401
2402#define parent_ruleset ruleset->anchor->parent->ruleset
2403 if (ruleset->anchor->parent == NULL)
2404 ruleset = &pf_main_ruleset;
2405 else
2406 ruleset = &parent_ruleset;
2407
2408 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2409 while (rule &&
2410 (rule->anchor != delete_ruleset->anchor))
2411 rule = TAILQ_NEXT(rule, entries);
2412 if (rule == NULL)
2413 panic("%s: rule not found!", __func__);
2414
2415 /*
2416 * if reqest device != rule's device, bail :
2417 * with error if ticket matches;
2418 * without error if ticket doesn't match (i.e. its just cleanup)
2419 */
2420 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2421 if (rule->ticket != pr->rule.ticket) {
2422 return (0);
2423 } else {
2424 return EACCES;
2425 }
2426 }
2427
2428 if (delete_rule->rule_flag & PFRULE_PFM) {
2429 pffwrules--;
2430 }
2431
2432 pf_delete_rule_from_ruleset(delete_ruleset,
2433 i, delete_rule);
2434 delete_ruleset->rules[i].active.ticket =
2435 ++delete_ruleset->rules[i].inactive.ticket;
2436 goto delete_rule;
2437 } else {
2438 /*
2439 * process deleting rule only if device that added the
2440 * rule matches device that issued the request
2441 */
2442 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev)
2443 return EACCES;
2444 if (rule->rule_flag & PFRULE_PFM)
2445 pffwrules--;
2446 pf_delete_rule_from_ruleset(ruleset, i,
2447 rule);
2448 pf_ruleset_cleanup(ruleset, i);
2449 }
2450
2451 return (0);
2452}
2453
2454/*
2455 * req_dev encodes the PF interface. Currently, possible values are
2456 * 0 or PFRULE_PFM
2457 */
2458static void
2459pf_delete_rule_by_owner(char *owner, u_int32_t req_dev)
2460{
2461 struct pf_ruleset *ruleset;
2462 struct pf_rule *rule, *next;
2463 int deleted = 0;
2464
2465 for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2466 rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2467 ruleset = &pf_main_ruleset;
2468 while (rule) {
2469 next = TAILQ_NEXT(rule, entries);
2470 /*
2471 * process deleting rule only if device that added the
2472 * rule matches device that issued the request
2473 */
2474 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2475 rule = next;
2476 continue;
2477 }
2478 if (rule->anchor) {
2479 if (((strcmp(rule->owner, owner)) == 0) ||
2480 ((strcmp(rule->owner, "")) == 0)) {
2481 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2482 if (deleted) {
2483 pf_ruleset_cleanup(ruleset, rs);
2484 deleted = 0;
2485 }
2486 /* step into anchor */
2487 ruleset =
2488 &rule->anchor->ruleset;
2489 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2490 continue;
2491 } else {
2492 if (rule->rule_flag &
2493 PFRULE_PFM)
2494 pffwrules--;
2495 pf_delete_rule_from_ruleset(ruleset, rs, rule);
2496 deleted = 1;
2497 rule = next;
2498 }
2499 } else
2500 rule = next;
2501 } else {
2502 if (((strcmp(rule->owner, owner)) == 0)) {
2503 /* delete rule */
2504 if (rule->rule_flag & PFRULE_PFM)
2505 pffwrules--;
2506 pf_delete_rule_from_ruleset(ruleset,
2507 rs, rule);
2508 deleted = 1;
2509 }
2510 rule = next;
2511 }
2512 if (rule == NULL) {
2513 if (deleted) {
2514 pf_ruleset_cleanup(ruleset, rs);
2515 deleted = 0;
2516 }
2517 if (ruleset != &pf_main_ruleset)
2518 pf_deleterule_anchor_step_out(&ruleset,
2519 rs, &rule);
2520 }
2521 }
2522 }
2523}
2524
2525static void
2526pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2527 int rs, struct pf_rule **rule_ptr)
2528{
2529 struct pf_ruleset *ruleset = *ruleset_ptr;
2530 struct pf_rule *rule = *rule_ptr;
2531
2532 /* step out of anchor */
2533 struct pf_ruleset *rs_copy = ruleset;
2534 ruleset = ruleset->anchor->parent?
2535 &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2536
2537 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2538 while (rule && (rule->anchor != rs_copy->anchor))
2539 rule = TAILQ_NEXT(rule, entries);
2540 if (rule == NULL)
2541 panic("%s: parent rule of anchor not found!", __func__);
2542 if (rule->anchor->ruleset.rules[rs].active.rcount > 0)
2543 rule = TAILQ_NEXT(rule, entries);
2544
2545 *ruleset_ptr = ruleset;
2546 *rule_ptr = rule;
2547}
2548
2549static void
2550pf_addrwrap_setup(struct pf_addr_wrap *aw)
2551{
2552 VERIFY(aw);
2553 bzero(&aw->p, sizeof aw->p);
2554}
2555
2556static int
2557pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
2558 struct pf_ruleset *ruleset) {
2559 struct pf_pooladdr *apa;
2560 int error = 0;
2561
2562 if (rule->ifname[0]) {
2563 rule->kif = pfi_kif_get(rule->ifname);
2564 if (rule->kif == NULL) {
2565 pool_put(&pf_rule_pl, rule);
2566 return (EINVAL);
2567 }
2568 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
2569 }
2570 if (rule->tagname[0])
2571 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2572 error = EBUSY;
2573 if (rule->match_tagname[0])
2574 if ((rule->match_tag =
2575 pf_tagname2tag(rule->match_tagname)) == 0)
2576 error = EBUSY;
2577 if (rule->rt && !rule->direction)
2578 error = EINVAL;
2579#if PFLOG
2580 if (!rule->log)
2581 rule->logif = 0;
2582 if (rule->logif >= PFLOGIFS_MAX)
2583 error = EINVAL;
2584#endif /* PFLOG */
2585 pf_addrwrap_setup(&rule->src.addr);
2586 pf_addrwrap_setup(&rule->dst.addr);
2587 if (pf_rtlabel_add(&rule->src.addr) ||
2588 pf_rtlabel_add(&rule->dst.addr))
2589 error = EBUSY;
2590 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
2591 error = EINVAL;
2592 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
2593 error = EINVAL;
2594 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
2595 error = EINVAL;
2596 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
2597 error = EINVAL;
2598 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
2599 error = EINVAL;
2600 TAILQ_FOREACH(apa, &pf_pabuf, entries)
2601 if (pf_tbladdr_setup(ruleset, &apa->addr))
2602 error = EINVAL;
2603
2604 if (rule->overload_tblname[0]) {
2605 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2606 rule->overload_tblname)) == NULL)
2607 error = EINVAL;
2608 else
2609 rule->overload_tbl->pfrkt_flags |=
2610 PFR_TFLAG_ACTIVE;
2611 }
2612
2613 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
2614
2615 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2616 (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) &&
2617 rule->anchor == NULL) ||
2618 (rule->rt > PF_FASTROUTE)) &&
2619 (TAILQ_FIRST(&rule->rpool.list) == NULL))
2620 error = EINVAL;
2621
2622 if (error) {
2623 pf_rm_rule(NULL, rule);
2624 return (error);
2625 }
2626 /* For a NAT64 rule the rule's address family is AF_INET6 whereas
2627 * the address pool's family will be AF_INET
2628 */
2629 rule->rpool.af = (rule->action == PF_NAT64) ? AF_INET: rule->af;
2630 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2631 rule->evaluations = rule->packets[0] = rule->packets[1] =
2632 rule->bytes[0] = rule->bytes[1] = 0;
2633
2634 return (0);
2635}
2636
2637static int
2638pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
2639{
2640 int error = 0;
2641 u_int32_t req_dev = 0;
2642
2643 switch (cmd) {
2644 case DIOCADDRULE: {
2645 struct pf_ruleset *ruleset;
2646 struct pf_rule *rule, *tail;
2647 int rs_num;
2648
2649 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
2650 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
2651 ruleset = pf_find_ruleset(pr->anchor);
2652 if (ruleset == NULL) {
2653 error = EINVAL;
2654 break;
2655 }
2656 rs_num = pf_get_ruleset_number(pr->rule.action);
2657 if (rs_num >= PF_RULESET_MAX) {
2658 error = EINVAL;
2659 break;
2660 }
2661 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2662 error = EINVAL;
2663 break;
2664 }
2665 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
2666 error = EBUSY;
2667 break;
2668 }
2669 if (pr->pool_ticket != ticket_pabuf) {
2670 error = EBUSY;
2671 break;
2672 }
2673 rule = pool_get(&pf_rule_pl, PR_WAITOK);
2674 if (rule == NULL) {
2675 error = ENOMEM;
2676 break;
2677 }
2678 pf_rule_copyin(&pr->rule, rule, p, minordev);
2679#if !INET
2680 if (rule->af == AF_INET) {
2681 pool_put(&pf_rule_pl, rule);
2682 error = EAFNOSUPPORT;
2683 break;
2684 }
2685#endif /* INET */
2686#if !INET6
2687 if (rule->af == AF_INET6) {
2688 pool_put(&pf_rule_pl, rule);
2689 error = EAFNOSUPPORT;
2690 break;
2691 }
2692#endif /* INET6 */
2693 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2694 pf_rulequeue);
2695 if (tail)
2696 rule->nr = tail->nr + 1;
2697 else
2698 rule->nr = 0;
2699
2700 if ((error = pf_rule_setup(pr, rule, ruleset)))
2701 break;
2702
2703 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2704 rule, entries);
2705 ruleset->rules[rs_num].inactive.rcount++;
2706 if (rule->rule_flag & PFRULE_PFM)
2707 pffwrules++;
2708
2709 if (rule->action == PF_NAT64)
2710 atomic_add_16(&pf_nat64_configured, 1);
2711
2712 if (pr->anchor_call[0] == '\0') {
2713 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
2714 if (rule->rule_flag & PFRULE_PFM) {
2715 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
2716 }
2717 }
2718
2719#if DUMMYNET
2720 if (rule->action == PF_DUMMYNET) {
2721 struct dummynet_event dn_event;
2722 uint32_t direction = DN_INOUT;;
2723 bzero(&dn_event, sizeof(dn_event));
2724
2725 dn_event.dn_event_code = DUMMYNET_RULE_CONFIG;
2726
2727 if (rule->direction == PF_IN)
2728 direction = DN_IN;
2729 else if (rule->direction == PF_OUT)
2730 direction = DN_OUT;
2731
2732 dn_event.dn_event_rule_config.dir = direction;
2733 dn_event.dn_event_rule_config.af = rule->af;
2734 dn_event.dn_event_rule_config.proto = rule->proto;
2735 dn_event.dn_event_rule_config.src_port = rule->src.xport.range.port[0];
2736 dn_event.dn_event_rule_config.dst_port = rule->dst.xport.range.port[0];
2737 strlcpy(dn_event.dn_event_rule_config.ifname, rule->ifname,
2738 sizeof(dn_event.dn_event_rule_config.ifname));
2739
2740 dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2741 }
2742#endif
2743 break;
2744 }
2745
2746 case DIOCGETRULES: {
2747 struct pf_ruleset *ruleset;
2748 struct pf_rule *tail;
2749 int rs_num;
2750
2751 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
2752 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
2753 ruleset = pf_find_ruleset(pr->anchor);
2754 if (ruleset == NULL) {
2755 error = EINVAL;
2756 break;
2757 }
2758 rs_num = pf_get_ruleset_number(pr->rule.action);
2759 if (rs_num >= PF_RULESET_MAX) {
2760 error = EINVAL;
2761 break;
2762 }
2763 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2764 pf_rulequeue);
2765 if (tail)
2766 pr->nr = tail->nr + 1;
2767 else
2768 pr->nr = 0;
2769 pr->ticket = ruleset->rules[rs_num].active.ticket;
2770 break;
2771 }
2772
2773 case DIOCGETRULE: {
2774 struct pf_ruleset *ruleset;
2775 struct pf_rule *rule;
2776 int rs_num, i;
2777
2778 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
2779 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
2780 ruleset = pf_find_ruleset(pr->anchor);
2781 if (ruleset == NULL) {
2782 error = EINVAL;
2783 break;
2784 }
2785 rs_num = pf_get_ruleset_number(pr->rule.action);
2786 if (rs_num >= PF_RULESET_MAX) {
2787 error = EINVAL;
2788 break;
2789 }
2790 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2791 error = EBUSY;
2792 break;
2793 }
2794 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2795 while ((rule != NULL) && (rule->nr != pr->nr))
2796 rule = TAILQ_NEXT(rule, entries);
2797 if (rule == NULL) {
2798 error = EBUSY;
2799 break;
2800 }
2801 pf_rule_copyout(rule, &pr->rule);
2802 if (pf_anchor_copyout(ruleset, rule, pr)) {
2803 error = EBUSY;
2804 break;
2805 }
2806 pfi_dynaddr_copyout(&pr->rule.src.addr);
2807 pfi_dynaddr_copyout(&pr->rule.dst.addr);
2808 pf_tbladdr_copyout(&pr->rule.src.addr);
2809 pf_tbladdr_copyout(&pr->rule.dst.addr);
2810 pf_rtlabel_copyout(&pr->rule.src.addr);
2811 pf_rtlabel_copyout(&pr->rule.dst.addr);
2812 for (i = 0; i < PF_SKIP_COUNT; ++i)
2813 if (rule->skip[i].ptr == NULL)
2814 pr->rule.skip[i].nr = -1;
2815 else
2816 pr->rule.skip[i].nr =
2817 rule->skip[i].ptr->nr;
2818
2819 if (pr->action == PF_GET_CLR_CNTR) {
2820 rule->evaluations = 0;
2821 rule->packets[0] = rule->packets[1] = 0;
2822 rule->bytes[0] = rule->bytes[1] = 0;
2823 }
2824 break;
2825 }
2826
2827 case DIOCCHANGERULE: {
2828 struct pfioc_rule *pcr = pr;
2829 struct pf_ruleset *ruleset;
2830 struct pf_rule *oldrule = NULL, *newrule = NULL;
2831 struct pf_pooladdr *pa;
2832 u_int32_t nr = 0;
2833 int rs_num;
2834
2835 if (!(pcr->action == PF_CHANGE_REMOVE ||
2836 pcr->action == PF_CHANGE_GET_TICKET) &&
2837 pcr->pool_ticket != ticket_pabuf) {
2838 error = EBUSY;
2839 break;
2840 }
2841
2842 if (pcr->action < PF_CHANGE_ADD_HEAD ||
2843 pcr->action > PF_CHANGE_GET_TICKET) {
2844 error = EINVAL;
2845 break;
2846 }
2847 pcr->anchor[sizeof (pcr->anchor) - 1] = '\0';
2848 pcr->anchor_call[sizeof (pcr->anchor_call) - 1] = '\0';
2849 ruleset = pf_find_ruleset(pcr->anchor);
2850 if (ruleset == NULL) {
2851 error = EINVAL;
2852 break;
2853 }
2854 rs_num = pf_get_ruleset_number(pcr->rule.action);
2855 if (rs_num >= PF_RULESET_MAX) {
2856 error = EINVAL;
2857 break;
2858 }
2859
2860 if (pcr->action == PF_CHANGE_GET_TICKET) {
2861 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
2862 break;
2863 } else {
2864 if (pcr->ticket !=
2865 ruleset->rules[rs_num].active.ticket) {
2866 error = EINVAL;
2867 break;
2868 }
2869 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2870 error = EINVAL;
2871 break;
2872 }
2873 }
2874
2875 if (pcr->action != PF_CHANGE_REMOVE) {
2876 newrule = pool_get(&pf_rule_pl, PR_WAITOK);
2877 if (newrule == NULL) {
2878 error = ENOMEM;
2879 break;
2880 }
2881 pf_rule_copyin(&pcr->rule, newrule, p, minordev);
2882#if !INET
2883 if (newrule->af == AF_INET) {
2884 pool_put(&pf_rule_pl, newrule);
2885 error = EAFNOSUPPORT;
2886 break;
2887 }
2888#endif /* INET */
2889#if !INET6
2890 if (newrule->af == AF_INET6) {
2891 pool_put(&pf_rule_pl, newrule);
2892 error = EAFNOSUPPORT;
2893 break;
2894 }
2895#endif /* INET6 */
2896 if (newrule->ifname[0]) {
2897 newrule->kif = pfi_kif_get(newrule->ifname);
2898 if (newrule->kif == NULL) {
2899 pool_put(&pf_rule_pl, newrule);
2900 error = EINVAL;
2901 break;
2902 }
2903 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
2904 } else
2905 newrule->kif = NULL;
2906
2907 if (newrule->tagname[0])
2908 if ((newrule->tag =
2909 pf_tagname2tag(newrule->tagname)) == 0)
2910 error = EBUSY;
2911 if (newrule->match_tagname[0])
2912 if ((newrule->match_tag = pf_tagname2tag(
2913 newrule->match_tagname)) == 0)
2914 error = EBUSY;
2915 if (newrule->rt && !newrule->direction)
2916 error = EINVAL;
2917#if PFLOG
2918 if (!newrule->log)
2919 newrule->logif = 0;
2920 if (newrule->logif >= PFLOGIFS_MAX)
2921 error = EINVAL;
2922#endif /* PFLOG */
2923 pf_addrwrap_setup(&newrule->src.addr);
2924 pf_addrwrap_setup(&newrule->dst.addr);
2925 if (pf_rtlabel_add(&newrule->src.addr) ||
2926 pf_rtlabel_add(&newrule->dst.addr))
2927 error = EBUSY;
2928 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
2929 error = EINVAL;
2930 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
2931 error = EINVAL;
2932 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
2933 error = EINVAL;
2934 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
2935 error = EINVAL;
2936 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
2937 error = EINVAL;
2938 TAILQ_FOREACH(pa, &pf_pabuf, entries)
2939 if (pf_tbladdr_setup(ruleset, &pa->addr))
2940 error = EINVAL;
2941
2942 if (newrule->overload_tblname[0]) {
2943 if ((newrule->overload_tbl = pfr_attach_table(
2944 ruleset, newrule->overload_tblname)) ==
2945 NULL)
2946 error = EINVAL;
2947 else
2948 newrule->overload_tbl->pfrkt_flags |=
2949 PFR_TFLAG_ACTIVE;
2950 }
2951
2952 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
2953 if (((((newrule->action == PF_NAT) ||
2954 (newrule->action == PF_RDR) ||
2955 (newrule->action == PF_BINAT) ||
2956 (newrule->rt > PF_FASTROUTE)) &&
2957 !newrule->anchor)) &&
2958 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
2959 error = EINVAL;
2960
2961 if (error) {
2962 pf_rm_rule(NULL, newrule);
2963 break;
2964 }
2965 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
2966 newrule->evaluations = 0;
2967 newrule->packets[0] = newrule->packets[1] = 0;
2968 newrule->bytes[0] = newrule->bytes[1] = 0;
2969 }
2970 pf_empty_pool(&pf_pabuf);
2971
2972 if (pcr->action == PF_CHANGE_ADD_HEAD)
2973 oldrule = TAILQ_FIRST(
2974 ruleset->rules[rs_num].active.ptr);
2975 else if (pcr->action == PF_CHANGE_ADD_TAIL)
2976 oldrule = TAILQ_LAST(
2977 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
2978 else {
2979 oldrule = TAILQ_FIRST(
2980 ruleset->rules[rs_num].active.ptr);
2981 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
2982 oldrule = TAILQ_NEXT(oldrule, entries);
2983 if (oldrule == NULL) {
2984 if (newrule != NULL)
2985 pf_rm_rule(NULL, newrule);
2986 error = EINVAL;
2987 break;
2988 }
2989 }
2990
2991 if (pcr->action == PF_CHANGE_REMOVE) {
2992 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
2993 ruleset->rules[rs_num].active.rcount--;
2994 } else {
2995 if (oldrule == NULL)
2996 TAILQ_INSERT_TAIL(
2997 ruleset->rules[rs_num].active.ptr,
2998 newrule, entries);
2999 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3000 pcr->action == PF_CHANGE_ADD_BEFORE)
3001 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3002 else
3003 TAILQ_INSERT_AFTER(
3004 ruleset->rules[rs_num].active.ptr,
3005 oldrule, newrule, entries);
3006 ruleset->rules[rs_num].active.rcount++;
3007 }
3008
3009 nr = 0;
3010 TAILQ_FOREACH(oldrule,
3011 ruleset->rules[rs_num].active.ptr, entries)
3012 oldrule->nr = nr++;
3013
3014 ruleset->rules[rs_num].active.ticket++;
3015
3016 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3017 pf_remove_if_empty_ruleset(ruleset);
3018
3019 break;
3020 }
3021
3022 case DIOCINSERTRULE: {
3023 struct pf_ruleset *ruleset;
3024 struct pf_rule *rule, *tail, *r;
3025 int rs_num;
3026 int is_anchor;
3027
3028 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3029 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3030 is_anchor = (pr->anchor_call[0] != '\0');
3031
3032 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
3033 pr->rule.owner, is_anchor, &error)) == NULL)
3034 break;
3035
3036 rs_num = pf_get_ruleset_number(pr->rule.action);
3037 if (rs_num >= PF_RULESET_MAX) {
3038 error = EINVAL;
3039 break;
3040 }
3041 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3042 error = EINVAL;
3043 break;
3044 }
3045
3046 /* make sure this anchor rule doesn't exist already */
3047 if (is_anchor) {
3048 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3049 while (r) {
3050 if (r->anchor &&
3051 ((strcmp(r->anchor->name,
3052 pr->anchor_call)) == 0)) {
3053 if (((strcmp(pr->rule.owner,
3054 r->owner)) == 0) ||
3055 ((strcmp(r->owner, "")) == 0))
3056 error = EEXIST;
3057 else
3058 error = EPERM;
3059 break;
3060 }
3061 r = TAILQ_NEXT(r, entries);
3062 }
3063 if (error != 0)
3064 return (error);
3065 }
3066
3067 rule = pool_get(&pf_rule_pl, PR_WAITOK);
3068 if (rule == NULL) {
3069 error = ENOMEM;
3070 break;
3071 }
3072 pf_rule_copyin(&pr->rule, rule, p, minordev);
3073#if !INET
3074 if (rule->af == AF_INET) {
3075 pool_put(&pf_rule_pl, rule);
3076 error = EAFNOSUPPORT;
3077 break;
3078 }
3079#endif /* INET */
3080#if !INET6
3081 if (rule->af == AF_INET6) {
3082 pool_put(&pf_rule_pl, rule);
3083 error = EAFNOSUPPORT;
3084 break;
3085 }
3086
3087#endif /* INET6 */
3088 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3089 while ((r != NULL) && (rule->priority >= (unsigned)r->priority))
3090 r = TAILQ_NEXT(r, entries);
3091 if (r == NULL) {
3092 if ((tail =
3093 TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3094 pf_rulequeue)) != NULL)
3095 rule->nr = tail->nr + 1;
3096 else
3097 rule->nr = 0;
3098 } else {
3099 rule->nr = r->nr;
3100 }
3101
3102 if ((error = pf_rule_setup(pr, rule, ruleset)))
3103 break;
3104
3105 if (rule->anchor != NULL)
3106 strlcpy(rule->anchor->owner, rule->owner,
3107 PF_OWNER_NAME_SIZE);
3108
3109 if (r) {
3110 TAILQ_INSERT_BEFORE(r, rule, entries);
3111 while (r && ++r->nr)
3112 r = TAILQ_NEXT(r, entries);
3113 } else
3114 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3115 rule, entries);
3116 ruleset->rules[rs_num].active.rcount++;
3117
3118 /* Calculate checksum for the main ruleset */
3119 if (ruleset == &pf_main_ruleset)
3120 error = pf_setup_pfsync_matching(ruleset);
3121
3122 pf_ruleset_cleanup(ruleset, rs_num);
3123 rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule);
3124
3125 pr->rule.ticket = rule->ticket;
3126 pf_rule_copyout(rule, &pr->rule);
3127 if (rule->rule_flag & PFRULE_PFM)
3128 pffwrules++;
3129 if (rule->action == PF_NAT64)
3130 atomic_add_16(&pf_nat64_configured, 1);
3131
3132 if (pr->anchor_call[0] == '\0') {
3133 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
3134 if (rule->rule_flag & PFRULE_PFM) {
3135 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
3136 }
3137 }
3138 break;
3139 }
3140
3141 case DIOCDELETERULE: {
3142 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3143 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3144
3145 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3146 error = EINVAL;
3147 break;
3148 }
3149
3150 /* get device through which request is made */
3151 if ((uint8_t)minordev == PFDEV_PFM)
3152 req_dev |= PFRULE_PFM;
3153
3154 if (pr->rule.ticket) {
3155 if ((error = pf_delete_rule_by_ticket(pr, req_dev)))
3156 break;
3157 } else
3158 pf_delete_rule_by_owner(pr->rule.owner, req_dev);
3159 pr->nr = pffwrules;
3160 if (pr->rule.action == PF_NAT64)
3161 atomic_add_16(&pf_nat64_configured, -1);
3162 break;
3163 }
3164
3165 default:
3166 VERIFY(0);
3167 /* NOTREACHED */
3168 }
3169
3170 return (error);
3171}
3172
3173static int
3174pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3175{
3176#pragma unused(p)
3177 int error = 0;
3178
3179 psk->psk_ifname[sizeof (psk->psk_ifname) - 1] = '\0';
3180 psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0';
3181
3182 bool ifname_matched = true;
3183 bool owner_matched = true;
3184
3185 switch (cmd) {
3186 case DIOCCLRSTATES: {
3187 struct pf_state *s, *nexts;
3188 int killed = 0;
3189
3190 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3191 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3192 /*
3193 * Purge all states only when neither ifname
3194 * or owner is provided. If any of these are provided
3195 * we purge only the states with meta data that match
3196 */
3197 bool unlink_state = false;
3198 ifname_matched = true;
3199 owner_matched = true;
3200
3201 if (psk->psk_ifname[0] &&
3202 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3203 ifname_matched = false;
3204 }
3205
3206 if (psk->psk_ownername[0] &&
3207 ((NULL == s->rule.ptr) ||
3208 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3209 owner_matched = false;
3210 }
3211
3212 unlink_state = ifname_matched && owner_matched;
3213
3214 if (unlink_state) {
3215#if NPFSYNC
3216 /* don't send out individual delete messages */
3217 s->sync_flags = PFSTATE_NOSYNC;
3218#endif
3219 pf_unlink_state(s);
3220 killed++;
3221 }
3222 }
3223 psk->psk_af = killed;
3224#if NPFSYNC
3225 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3226#endif
3227 break;
3228 }
3229
3230 case DIOCKILLSTATES: {
3231 struct pf_state *s, *nexts;
3232 struct pf_state_key *sk;
3233 struct pf_state_host *src, *dst;
3234 int killed = 0;
3235
3236 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3237 s = nexts) {
3238 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3239 sk = s->state_key;
3240 ifname_matched = true;
3241 owner_matched = true;
3242
3243 if (psk->psk_ifname[0] &&
3244 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3245 ifname_matched = false;
3246 }
3247
3248 if (psk->psk_ownername[0] &&
3249 ((NULL == s->rule.ptr) ||
3250 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3251 owner_matched = false;
3252 }
3253
3254 if (sk->direction == PF_OUT) {
3255 src = &sk->lan;
3256 dst = &sk->ext_lan;
3257 } else {
3258 src = &sk->ext_lan;
3259 dst = &sk->lan;
3260 }
3261 if ((!psk->psk_af || sk->af_lan == psk->psk_af) &&
3262 (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3263 PF_MATCHA(psk->psk_src.neg,
3264 &psk->psk_src.addr.v.a.addr,
3265 &psk->psk_src.addr.v.a.mask,
3266 &src->addr, sk->af_lan) &&
3267 PF_MATCHA(psk->psk_dst.neg,
3268 &psk->psk_dst.addr.v.a.addr,
3269 &psk->psk_dst.addr.v.a.mask,
3270 &dst->addr, sk->af_lan) &&
3271 (pf_match_xport(psk->psk_proto,
3272 psk->psk_proto_variant, &psk->psk_src.xport,
3273 &src->xport)) &&
3274 (pf_match_xport(psk->psk_proto,
3275 psk->psk_proto_variant, &psk->psk_dst.xport,
3276 &dst->xport)) &&
3277 ifname_matched &&
3278 owner_matched) {
3279#if NPFSYNC
3280 /* send immediate delete of state */
3281 pfsync_delete_state(s);
3282 s->sync_flags |= PFSTATE_NOSYNC;
3283#endif
3284 pf_unlink_state(s);
3285 killed++;
3286 }
3287 }
3288 psk->psk_af = killed;
3289 break;
3290 }
3291
3292 default:
3293 VERIFY(0);
3294 /* NOTREACHED */
3295 }
3296
3297 return (error);
3298}
3299
3300static int
3301pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3302{
3303#pragma unused(p)
3304 int error = 0;
3305
3306 switch (cmd) {
3307 case DIOCADDSTATE: {
3308 struct pfsync_state *sp = &ps->state;
3309 struct pf_state *s;
3310 struct pf_state_key *sk;
3311 struct pfi_kif *kif;
3312
3313 if (sp->timeout >= PFTM_MAX) {
3314 error = EINVAL;
3315 break;
3316 }
3317 s = pool_get(&pf_state_pl, PR_WAITOK);
3318 if (s == NULL) {
3319 error = ENOMEM;
3320 break;
3321 }
3322 bzero(s, sizeof (struct pf_state));
3323 if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3324 pool_put(&pf_state_pl, s);
3325 error = ENOMEM;
3326 break;
3327 }
3328 pf_state_import(sp, sk, s);
3329 kif = pfi_kif_get(sp->ifname);
3330 if (kif == NULL) {
3331 pool_put(&pf_state_pl, s);
3332 pool_put(&pf_state_key_pl, sk);
3333 error = ENOENT;
3334 break;
3335 }
3336 TAILQ_INIT(&s->unlink_hooks);
3337 s->state_key->app_state = 0;
3338 if (pf_insert_state(kif, s)) {
3339 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3340 pool_put(&pf_state_pl, s);
3341 error = EEXIST;
3342 break;
3343 }
3344 pf_default_rule.states++;
3345 VERIFY(pf_default_rule.states != 0);
3346 break;
3347 }
3348
3349 case DIOCGETSTATE: {
3350 struct pf_state *s;
3351 struct pf_state_cmp id_key;
3352
3353 bcopy(ps->state.id, &id_key.id, sizeof (id_key.id));
3354 id_key.creatorid = ps->state.creatorid;
3355
3356 s = pf_find_state_byid(&id_key);
3357 if (s == NULL) {
3358 error = ENOENT;
3359 break;
3360 }
3361
3362 pf_state_export(&ps->state, s->state_key, s);
3363 break;
3364 }
3365
3366 default:
3367 VERIFY(0);
3368 /* NOTREACHED */
3369 }
3370
3371 return (error);
3372}
3373
3374static int
3375pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3376 struct pfioc_states_64 *ps64, struct proc *p)
3377{
3378 int p64 = proc_is64bit(p);
3379 int error = 0;
3380
3381 switch (cmd) {
3382 case DIOCGETSTATES: { /* struct pfioc_states */
3383 struct pf_state *state;
3384 struct pfsync_state *pstore;
3385 user_addr_t buf;
3386 u_int32_t nr = 0;
3387 int len, size;
3388
3389 len = (p64 ? ps64->ps_len : ps32->ps_len);
3390 if (len == 0) {
3391 size = sizeof (struct pfsync_state) * pf_status.states;
3392 if (p64)
3393 ps64->ps_len = size;
3394 else
3395 ps32->ps_len = size;
3396 break;
3397 }
3398
3399 pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK | M_ZERO);
3400 if (pstore == NULL) {
3401 error = ENOMEM;
3402 break;
3403 }
3404 buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3405
3406 state = TAILQ_FIRST(&state_list);
3407 while (state) {
3408 if (state->timeout != PFTM_UNLINKED) {
3409 if ((nr + 1) * sizeof (*pstore) > (unsigned)len)
3410 break;
3411
3412 pf_state_export(pstore,
3413 state->state_key, state);
3414 error = copyout(pstore, buf, sizeof (*pstore));
3415 if (error) {
3416 _FREE(pstore, M_TEMP);
3417 goto fail;
3418 }
3419 buf += sizeof (*pstore);
3420 nr++;
3421 }
3422 state = TAILQ_NEXT(state, entry_list);
3423 }
3424
3425 size = sizeof (struct pfsync_state) * nr;
3426 if (p64)
3427 ps64->ps_len = size;
3428 else
3429 ps32->ps_len = size;
3430
3431 _FREE(pstore, M_TEMP);
3432 break;
3433 }
3434
3435 default:
3436 VERIFY(0);
3437 /* NOTREACHED */
3438 }
3439fail:
3440 return (error);
3441}
3442
3443static int
3444pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3445{
3446#pragma unused(p)
3447 int error = 0;
3448
3449 switch (cmd) {
3450 case DIOCNATLOOK: {
3451 struct pf_state_key *sk;
3452 struct pf_state *state;
3453 struct pf_state_key_cmp key;
3454 int m = 0, direction = pnl->direction;
3455
3456 key.proto = pnl->proto;
3457 key.proto_variant = pnl->proto_variant;
3458
3459 if (!pnl->proto ||
3460 PF_AZERO(&pnl->saddr, pnl->af) ||
3461 PF_AZERO(&pnl->daddr, pnl->af) ||
3462 ((pnl->proto == IPPROTO_TCP ||
3463 pnl->proto == IPPROTO_UDP) &&
3464 (!pnl->dxport.port || !pnl->sxport.port)))
3465 error = EINVAL;
3466 else {
3467 /*
3468 * userland gives us source and dest of connection,
3469 * reverse the lookup so we ask for what happens with
3470 * the return traffic, enabling us to find it in the
3471 * state tree.
3472 */
3473 if (direction == PF_IN) {
3474 key.af_gwy = pnl->af;
3475 PF_ACPY(&key.ext_gwy.addr, &pnl->daddr,
3476 pnl->af);
3477 memcpy(&key.ext_gwy.xport, &pnl->dxport,
3478 sizeof (key.ext_gwy.xport));
3479 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3480 memcpy(&key.gwy.xport, &pnl->sxport,
3481 sizeof (key.gwy.xport));
3482 state = pf_find_state_all(&key, PF_IN, &m);
3483 } else {
3484 key.af_lan = pnl->af;
3485 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3486 memcpy(&key.lan.xport, &pnl->dxport,
3487 sizeof (key.lan.xport));
3488 PF_ACPY(&key.ext_lan.addr, &pnl->saddr,
3489 pnl->af);
3490 memcpy(&key.ext_lan.xport, &pnl->sxport,
3491 sizeof (key.ext_lan.xport));
3492 state = pf_find_state_all(&key, PF_OUT, &m);
3493 }
3494 if (m > 1)
3495 error = E2BIG; /* more than one state */
3496 else if (state != NULL) {
3497 sk = state->state_key;
3498 if (direction == PF_IN) {
3499 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3500 sk->af_lan);
3501 memcpy(&pnl->rsxport, &sk->lan.xport,
3502 sizeof (pnl->rsxport));
3503 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3504 pnl->af);
3505 memcpy(&pnl->rdxport, &pnl->dxport,
3506 sizeof (pnl->rdxport));
3507 } else {
3508 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3509 sk->af_gwy);
3510 memcpy(&pnl->rdxport, &sk->gwy.xport,
3511 sizeof (pnl->rdxport));
3512 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3513 pnl->af);
3514 memcpy(&pnl->rsxport, &pnl->sxport,
3515 sizeof (pnl->rsxport));
3516 }
3517 } else
3518 error = ENOENT;
3519 }
3520 break;
3521 }
3522
3523 default:
3524 VERIFY(0);
3525 /* NOTREACHED */
3526 }
3527
3528 return (error);
3529}
3530
3531static int
3532pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3533{
3534#pragma unused(p)
3535 int error = 0;
3536
3537 switch (cmd) {
3538 case DIOCSETTIMEOUT: {
3539 int old;
3540
3541 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3542 pt->seconds < 0) {
3543 error = EINVAL;
3544 goto fail;
3545 }
3546 old = pf_default_rule.timeout[pt->timeout];
3547 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
3548 pt->seconds = 1;
3549 pf_default_rule.timeout[pt->timeout] = pt->seconds;
3550 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
3551 wakeup(pf_purge_thread_fn);
3552 pt->seconds = old;
3553 break;
3554 }
3555
3556 case DIOCGETTIMEOUT: {
3557 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3558 error = EINVAL;
3559 goto fail;
3560 }
3561 pt->seconds = pf_default_rule.timeout[pt->timeout];
3562 break;
3563 }
3564
3565 default:
3566 VERIFY(0);
3567 /* NOTREACHED */
3568 }
3569fail:
3570 return (error);
3571}
3572
3573static int
3574pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3575{
3576#pragma unused(p)
3577 int error = 0;
3578
3579 switch (cmd) {
3580 case DIOCGETLIMIT: {
3581
3582 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3583 error = EINVAL;
3584 goto fail;
3585 }
3586 pl->limit = pf_pool_limits[pl->index].limit;
3587 break;
3588 }
3589
3590 case DIOCSETLIMIT: {
3591 int old_limit;
3592
3593 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3594 pf_pool_limits[pl->index].pp == NULL) {
3595 error = EINVAL;
3596 goto fail;
3597 }
3598 pool_sethardlimit(pf_pool_limits[pl->index].pp,
3599 pl->limit, NULL, 0);
3600 old_limit = pf_pool_limits[pl->index].limit;
3601 pf_pool_limits[pl->index].limit = pl->limit;
3602 pl->limit = old_limit;
3603 break;
3604 }
3605
3606 default:
3607 VERIFY(0);
3608 /* NOTREACHED */
3609 }
3610fail:
3611 return (error);
3612}
3613
3614static int
3615pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
3616{
3617#pragma unused(p)
3618 struct pf_pooladdr *pa = NULL;
3619 struct pf_pool *pool = NULL;
3620 int error = 0;
3621
3622 switch (cmd) {
3623 case DIOCBEGINADDRS: {
3624 pf_empty_pool(&pf_pabuf);
3625 pp->ticket = ++ticket_pabuf;
3626 break;
3627 }
3628
3629 case DIOCADDADDR: {
3630 pp->anchor[sizeof (pp->anchor) - 1] = '\0';
3631 if (pp->ticket != ticket_pabuf) {
3632 error = EBUSY;
3633 break;
3634 }
3635#if !INET
3636 if (pp->af == AF_INET) {
3637 error = EAFNOSUPPORT;
3638 break;
3639 }
3640#endif /* INET */
3641#if !INET6
3642 if (pp->af == AF_INET6) {
3643 error = EAFNOSUPPORT;
3644 break;
3645 }
3646#endif /* INET6 */
3647 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3648 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3649 pp->addr.addr.type != PF_ADDR_TABLE) {
3650 error = EINVAL;
3651 break;
3652 }
3653 pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3654 if (pa == NULL) {
3655 error = ENOMEM;
3656 break;
3657 }
3658 pf_pooladdr_copyin(&pp->addr, pa);
3659 if (pa->ifname[0]) {
3660 pa->kif = pfi_kif_get(pa->ifname);
3661 if (pa->kif == NULL) {
3662 pool_put(&pf_pooladdr_pl, pa);
3663 error = EINVAL;
3664 break;
3665 }
3666 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
3667 }
3668 pf_addrwrap_setup(&pa->addr);
3669 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
3670 pfi_dynaddr_remove(&pa->addr);
3671 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
3672 pool_put(&pf_pooladdr_pl, pa);
3673 error = EINVAL;
3674 break;
3675 }
3676 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
3677 break;
3678 }
3679
3680 case DIOCGETADDRS: {
3681 pp->nr = 0;
3682 pp->anchor[sizeof (pp->anchor) - 1] = '\0';
3683 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3684 pp->r_num, 0, 1, 0);
3685 if (pool == NULL) {
3686 error = EBUSY;
3687 break;
3688 }
3689 TAILQ_FOREACH(pa, &pool->list, entries)
3690 pp->nr++;
3691 break;
3692 }
3693
3694 case DIOCGETADDR: {
3695 u_int32_t nr = 0;
3696
3697 pp->anchor[sizeof (pp->anchor) - 1] = '\0';
3698 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3699 pp->r_num, 0, 1, 1);
3700 if (pool == NULL) {
3701 error = EBUSY;
3702 break;
3703 }
3704 pa = TAILQ_FIRST(&pool->list);
3705 while ((pa != NULL) && (nr < pp->nr)) {
3706 pa = TAILQ_NEXT(pa, entries);
3707 nr++;
3708 }
3709 if (pa == NULL) {
3710 error = EBUSY;
3711 break;
3712 }
3713 pf_pooladdr_copyout(pa, &pp->addr);
3714 pfi_dynaddr_copyout(&pp->addr.addr);
3715 pf_tbladdr_copyout(&pp->addr.addr);
3716 pf_rtlabel_copyout(&pp->addr.addr);
3717 break;
3718 }
3719
3720 case DIOCCHANGEADDR: {
3721 struct pfioc_pooladdr *pca = pp;
3722 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
3723 struct pf_ruleset *ruleset;
3724
3725 if (pca->action < PF_CHANGE_ADD_HEAD ||
3726 pca->action > PF_CHANGE_REMOVE) {
3727 error = EINVAL;
3728 break;
3729 }
3730 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3731 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3732 pca->addr.addr.type != PF_ADDR_TABLE) {
3733 error = EINVAL;
3734 break;
3735 }
3736
3737 pca->anchor[sizeof (pca->anchor) - 1] = '\0';
3738 ruleset = pf_find_ruleset(pca->anchor);
3739 if (ruleset == NULL) {
3740 error = EBUSY;
3741 break;
3742 }
3743 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
3744 pca->r_num, pca->r_last, 1, 1);
3745 if (pool == NULL) {
3746 error = EBUSY;
3747 break;
3748 }
3749 if (pca->action != PF_CHANGE_REMOVE) {
3750 newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3751 if (newpa == NULL) {
3752 error = ENOMEM;
3753 break;
3754 }
3755 pf_pooladdr_copyin(&pca->addr, newpa);
3756#if !INET
3757 if (pca->af == AF_INET) {
3758 pool_put(&pf_pooladdr_pl, newpa);
3759 error = EAFNOSUPPORT;
3760 break;
3761 }
3762#endif /* INET */
3763#if !INET6
3764 if (pca->af == AF_INET6) {
3765 pool_put(&pf_pooladdr_pl, newpa);
3766 error = EAFNOSUPPORT;
3767 break;
3768 }
3769#endif /* INET6 */
3770 if (newpa->ifname[0]) {
3771 newpa->kif = pfi_kif_get(newpa->ifname);
3772 if (newpa->kif == NULL) {
3773 pool_put(&pf_pooladdr_pl, newpa);
3774 error = EINVAL;
3775 break;
3776 }
3777 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
3778 } else
3779 newpa->kif = NULL;
3780 pf_addrwrap_setup(&newpa->addr);
3781 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
3782 pf_tbladdr_setup(ruleset, &newpa->addr)) {
3783 pfi_dynaddr_remove(&newpa->addr);
3784 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
3785 pool_put(&pf_pooladdr_pl, newpa);
3786 error = EINVAL;
3787 break;
3788 }
3789 }
3790
3791 if (pca->action == PF_CHANGE_ADD_HEAD)
3792 oldpa = TAILQ_FIRST(&pool->list);
3793 else if (pca->action == PF_CHANGE_ADD_TAIL)
3794 oldpa = TAILQ_LAST(&pool->list, pf_palist);
3795 else {
3796 int i = 0;
3797
3798 oldpa = TAILQ_FIRST(&pool->list);
3799 while ((oldpa != NULL) && (i < (int)pca->nr)) {
3800 oldpa = TAILQ_NEXT(oldpa, entries);
3801 i++;
3802 }
3803 if (oldpa == NULL) {
3804 error = EINVAL;
3805 break;
3806 }
3807 }
3808
3809 if (pca->action == PF_CHANGE_REMOVE) {
3810 TAILQ_REMOVE(&pool->list, oldpa, entries);
3811 pfi_dynaddr_remove(&oldpa->addr);
3812 pf_tbladdr_remove(&oldpa->addr);
3813 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
3814 pool_put(&pf_pooladdr_pl, oldpa);
3815 } else {
3816 if (oldpa == NULL)
3817 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3818 else if (pca->action == PF_CHANGE_ADD_HEAD ||
3819 pca->action == PF_CHANGE_ADD_BEFORE)
3820 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
3821 else
3822 TAILQ_INSERT_AFTER(&pool->list, oldpa,
3823 newpa, entries);
3824 }
3825
3826 pool->cur = TAILQ_FIRST(&pool->list);
3827 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
3828 pca->af);
3829 break;
3830 }
3831
3832 default:
3833 VERIFY(0);
3834 /* NOTREACHED */
3835 }
3836
3837 return (error);
3838}
3839
3840static int
3841pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
3842{
3843#pragma unused(p)
3844 int error = 0;
3845
3846 switch (cmd) {
3847 case DIOCGETRULESETS: {
3848 struct pf_ruleset *ruleset;
3849 struct pf_anchor *anchor;
3850
3851 pr->path[sizeof (pr->path) - 1] = '\0';
3852 pr->name[sizeof (pr->name) - 1] = '\0';
3853 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
3854 error = EINVAL;
3855 break;
3856 }
3857 pr->nr = 0;
3858 if (ruleset->anchor == NULL) {
3859 /* XXX kludge for pf_main_ruleset */
3860 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
3861 if (anchor->parent == NULL)
3862 pr->nr++;
3863 } else {
3864 RB_FOREACH(anchor, pf_anchor_node,
3865 &ruleset->anchor->children)
3866 pr->nr++;
3867 }
3868 break;
3869 }
3870
3871 case DIOCGETRULESET: {
3872 struct pf_ruleset *ruleset;
3873 struct pf_anchor *anchor;
3874 u_int32_t nr = 0;
3875
3876 pr->path[sizeof (pr->path) - 1] = '\0';
3877 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
3878 error = EINVAL;
3879 break;
3880 }
3881 pr->name[0] = 0;
3882 if (ruleset->anchor == NULL) {
3883 /* XXX kludge for pf_main_ruleset */
3884 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
3885 if (anchor->parent == NULL && nr++ == pr->nr) {
3886 strlcpy(pr->name, anchor->name,
3887 sizeof (pr->name));
3888 break;
3889 }
3890 } else {
3891 RB_FOREACH(anchor, pf_anchor_node,
3892 &ruleset->anchor->children)
3893 if (nr++ == pr->nr) {
3894 strlcpy(pr->name, anchor->name,
3895 sizeof (pr->name));
3896 break;
3897 }
3898 }
3899 if (!pr->name[0])
3900 error = EBUSY;
3901 break;
3902 }
3903
3904 default:
3905 VERIFY(0);
3906 /* NOTREACHED */
3907 }
3908
3909 return (error);
3910}
3911
3912static int
3913pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
3914 struct pfioc_trans_64 *io64, struct proc *p)
3915{
3916 int p64 = proc_is64bit(p);
3917 int error = 0, esize, size;
3918 user_addr_t buf;
3919
3920 esize = (p64 ? io64->esize : io32->esize);
3921 size = (p64 ? io64->size : io32->size);
3922 buf = (p64 ? io64->array : io32->array);
3923
3924 switch (cmd) {
3925 case DIOCXBEGIN: {
3926 struct pfioc_trans_e *ioe;
3927 struct pfr_table *table;
3928 int i;
3929
3930 if (esize != sizeof (*ioe)) {
3931 error = ENODEV;
3932 goto fail;
3933 }
3934 ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
3935 table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
3936 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
3937 if (copyin(buf, ioe, sizeof (*ioe))) {
3938 _FREE(table, M_TEMP);
3939 _FREE(ioe, M_TEMP);
3940 error = EFAULT;
3941 goto fail;
3942 }
3943 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
3944 switch (ioe->rs_num) {
3945 case PF_RULESET_ALTQ:
3946 break;
3947 case PF_RULESET_TABLE:
3948 bzero(table, sizeof (*table));
3949 strlcpy(table->pfrt_anchor, ioe->anchor,
3950 sizeof (table->pfrt_anchor));
3951 if ((error = pfr_ina_begin(table,
3952 &ioe->ticket, NULL, 0))) {
3953 _FREE(table, M_TEMP);
3954 _FREE(ioe, M_TEMP);
3955 goto fail;
3956 }
3957 break;
3958 default:
3959 if ((error = pf_begin_rules(&ioe->ticket,
3960 ioe->rs_num, ioe->anchor))) {
3961 _FREE(table, M_TEMP);
3962 _FREE(ioe, M_TEMP);
3963 goto fail;
3964 }
3965 break;
3966 }
3967 if (copyout(ioe, buf, sizeof (*ioe))) {
3968 _FREE(table, M_TEMP);
3969 _FREE(ioe, M_TEMP);
3970 error = EFAULT;
3971 goto fail;
3972 }
3973 }
3974 _FREE(table, M_TEMP);
3975 _FREE(ioe, M_TEMP);
3976 break;
3977 }
3978
3979 case DIOCXROLLBACK: {
3980 struct pfioc_trans_e *ioe;
3981 struct pfr_table *table;
3982 int i;
3983
3984 if (esize != sizeof (*ioe)) {
3985 error = ENODEV;
3986 goto fail;
3987 }
3988 ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
3989 table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
3990 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
3991 if (copyin(buf, ioe, sizeof (*ioe))) {
3992 _FREE(table, M_TEMP);
3993 _FREE(ioe, M_TEMP);
3994 error = EFAULT;
3995 goto fail;
3996 }
3997 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
3998 switch (ioe->rs_num) {
3999 case PF_RULESET_ALTQ:
4000 break;
4001 case PF_RULESET_TABLE:
4002 bzero(table, sizeof (*table));
4003 strlcpy(table->pfrt_anchor, ioe->anchor,
4004 sizeof (table->pfrt_anchor));
4005 if ((error = pfr_ina_rollback(table,
4006 ioe->ticket, NULL, 0))) {
4007 _FREE(table, M_TEMP);
4008 _FREE(ioe, M_TEMP);
4009 goto fail; /* really bad */
4010 }
4011 break;
4012 default:
4013 if ((error = pf_rollback_rules(ioe->ticket,
4014 ioe->rs_num, ioe->anchor))) {
4015 _FREE(table, M_TEMP);
4016 _FREE(ioe, M_TEMP);
4017 goto fail; /* really bad */
4018 }
4019 break;
4020 }
4021 }
4022 _FREE(table, M_TEMP);
4023 _FREE(ioe, M_TEMP);
4024 break;
4025 }
4026
4027 case DIOCXCOMMIT: {
4028 struct pfioc_trans_e *ioe;
4029 struct pfr_table *table;
4030 struct pf_ruleset *rs;
4031 user_addr_t _buf = buf;
4032 int i;
4033
4034 if (esize != sizeof (*ioe)) {
4035 error = ENODEV;
4036 goto fail;
4037 }
4038 ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
4039 table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
4040 /* first makes sure everything will succeed */
4041 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4042 if (copyin(buf, ioe, sizeof (*ioe))) {
4043 _FREE(table, M_TEMP);
4044 _FREE(ioe, M_TEMP);
4045 error = EFAULT;
4046 goto fail;
4047 }
4048 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4049 switch (ioe->rs_num) {
4050 case PF_RULESET_ALTQ:
4051 break;
4052 case PF_RULESET_TABLE:
4053 rs = pf_find_ruleset(ioe->anchor);
4054 if (rs == NULL || !rs->topen || ioe->ticket !=
4055 rs->tticket) {
4056 _FREE(table, M_TEMP);
4057 _FREE(ioe, M_TEMP);
4058 error = EBUSY;
4059 goto fail;
4060 }
4061 break;
4062 default:
4063 if (ioe->rs_num < 0 || ioe->rs_num >=
4064 PF_RULESET_MAX) {
4065 _FREE(table, M_TEMP);
4066 _FREE(ioe, M_TEMP);
4067 error = EINVAL;
4068 goto fail;
4069 }
4070 rs = pf_find_ruleset(ioe->anchor);
4071 if (rs == NULL ||
4072 !rs->rules[ioe->rs_num].inactive.open ||
4073 rs->rules[ioe->rs_num].inactive.ticket !=
4074 ioe->ticket) {
4075 _FREE(table, M_TEMP);
4076 _FREE(ioe, M_TEMP);
4077 error = EBUSY;
4078 goto fail;
4079 }
4080 break;
4081 }
4082 }
4083 buf = _buf;
4084 /* now do the commit - no errors should happen here */
4085 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4086 if (copyin(buf, ioe, sizeof (*ioe))) {
4087 _FREE(table, M_TEMP);
4088 _FREE(ioe, M_TEMP);
4089 error = EFAULT;
4090 goto fail;
4091 }
4092 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4093 switch (ioe->rs_num) {
4094 case PF_RULESET_ALTQ:
4095 break;
4096 case PF_RULESET_TABLE:
4097 bzero(table, sizeof (*table));
4098 strlcpy(table->pfrt_anchor, ioe->anchor,
4099 sizeof (table->pfrt_anchor));
4100 if ((error = pfr_ina_commit(table, ioe->ticket,
4101 NULL, NULL, 0))) {
4102 _FREE(table, M_TEMP);
4103 _FREE(ioe, M_TEMP);
4104 goto fail; /* really bad */
4105 }
4106 break;
4107 default:
4108 if ((error = pf_commit_rules(ioe->ticket,
4109 ioe->rs_num, ioe->anchor))) {
4110 _FREE(table, M_TEMP);
4111 _FREE(ioe, M_TEMP);
4112 goto fail; /* really bad */
4113 }
4114 break;
4115 }
4116 }
4117 _FREE(table, M_TEMP);
4118 _FREE(ioe, M_TEMP);
4119 break;
4120 }
4121
4122 default:
4123 VERIFY(0);
4124 /* NOTREACHED */
4125 }
4126fail:
4127 return (error);
4128}
4129
4130static int
4131pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4132 struct pfioc_src_nodes_64 *psn64, struct proc *p)
4133{
4134 int p64 = proc_is64bit(p);
4135 int error = 0;
4136
4137 switch (cmd) {
4138 case DIOCGETSRCNODES: {
4139 struct pf_src_node *n, *pstore;
4140 user_addr_t buf;
4141 u_int32_t nr = 0;
4142 int space, size;
4143
4144 space = (p64 ? psn64->psn_len : psn32->psn_len);
4145 if (space == 0) {
4146 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4147 nr++;
4148
4149 size = sizeof (struct pf_src_node) * nr;
4150 if (p64)
4151 psn64->psn_len = size;
4152 else
4153 psn32->psn_len = size;
4154 break;
4155 }
4156
4157 pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK);
4158 if (pstore == NULL) {
4159 error = ENOMEM;
4160 break;
4161 }
4162 buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4163
4164 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4165 uint64_t secs = pf_time_second(), diff;
4166
4167 if ((nr + 1) * sizeof (*pstore) > (unsigned)space)
4168 break;
4169
4170 bcopy(n, pstore, sizeof (*pstore));
4171 if (n->rule.ptr != NULL)
4172 pstore->rule.nr = n->rule.ptr->nr;
4173 pstore->creation = secs - pstore->creation;
4174 if (pstore->expire > secs)
4175 pstore->expire -= secs;
4176 else
4177 pstore->expire = 0;
4178
4179 /* adjust the connection rate estimate */
4180 diff = secs - n->conn_rate.last;
4181 if (diff >= n->conn_rate.seconds)
4182 pstore->conn_rate.count = 0;
4183 else
4184 pstore->conn_rate.count -=
4185 n->conn_rate.count * diff /
4186 n->conn_rate.seconds;
4187
4188 _RB_PARENT(pstore, entry) = NULL;
4189 RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4190 pstore->kif = NULL;
4191
4192 error = copyout(pstore, buf, sizeof (*pstore));
4193 if (error) {
4194 _FREE(pstore, M_TEMP);
4195 goto fail;
4196 }
4197 buf += sizeof (*pstore);
4198 nr++;
4199 }
4200
4201 size = sizeof (struct pf_src_node) * nr;
4202 if (p64)
4203 psn64->psn_len = size;
4204 else
4205 psn32->psn_len = size;
4206
4207 _FREE(pstore, M_TEMP);
4208 break;
4209 }
4210
4211 default:
4212 VERIFY(0);
4213 /* NOTREACHED */
4214 }
4215fail:
4216 return (error);
4217
4218}
4219
4220static int
4221pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4222 struct proc *p)
4223{
4224#pragma unused(p)
4225 int error = 0;
4226
4227 switch (cmd) {
4228 case DIOCKILLSRCNODES: {
4229 struct pf_src_node *sn;
4230 struct pf_state *s;
4231 int killed = 0;
4232
4233 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4234 if (PF_MATCHA(psnk->psnk_src.neg,
4235 &psnk->psnk_src.addr.v.a.addr,
4236 &psnk->psnk_src.addr.v.a.mask,
4237 &sn->addr, sn->af) &&
4238 PF_MATCHA(psnk->psnk_dst.neg,
4239 &psnk->psnk_dst.addr.v.a.addr,
4240 &psnk->psnk_dst.addr.v.a.mask,
4241 &sn->raddr, sn->af)) {
4242 /* Handle state to src_node linkage */
4243 if (sn->states != 0) {
4244 RB_FOREACH(s, pf_state_tree_id,
4245 &tree_id) {
4246 if (s->src_node == sn)
4247 s->src_node = NULL;
4248 if (s->nat_src_node == sn)
4249 s->nat_src_node = NULL;
4250 }
4251 sn->states = 0;
4252 }
4253 sn->expire = 1;
4254 killed++;
4255 }
4256 }
4257
4258 if (killed > 0)
4259 pf_purge_expired_src_nodes();
4260
4261 psnk->psnk_af = killed;
4262 break;
4263 }
4264
4265 default:
4266 VERIFY(0);
4267 /* NOTREACHED */
4268 }
4269
4270 return (error);
4271}
4272
4273static int
4274pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4275 struct pfioc_iface_64 *io64, struct proc *p)
4276{
4277 int p64 = proc_is64bit(p);
4278 int error = 0;
4279
4280 switch (cmd) {
4281 case DIOCIGETIFACES: {
4282 user_addr_t buf;
4283 int esize;
4284
4285 buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4286 esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4287
4288 /* esize must be that of the user space version of pfi_kif */
4289 if (esize != sizeof (struct pfi_uif)) {
4290 error = ENODEV;
4291 break;
4292 }
4293 if (p64)
4294 io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4295 else
4296 io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4297 error = pfi_get_ifaces(
4298 p64 ? io64->pfiio_name : io32->pfiio_name, buf,
4299 p64 ? &io64->pfiio_size : &io32->pfiio_size);
4300 break;
4301 }
4302
4303 case DIOCSETIFFLAG: {
4304 if (p64)
4305 io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4306 else
4307 io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4308
4309 error = pfi_set_flags(
4310 p64 ? io64->pfiio_name : io32->pfiio_name,
4311 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4312 break;
4313 }
4314
4315 case DIOCCLRIFFLAG: {
4316 if (p64)
4317 io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4318 else
4319 io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4320
4321 error = pfi_clear_flags(
4322 p64 ? io64->pfiio_name : io32->pfiio_name,
4323 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4324 break;
4325 }
4326
4327 default:
4328 VERIFY(0);
4329 /* NOTREACHED */
4330 }
4331
4332 return (error);
4333}
4334
4335int
4336pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4337 unsigned int af, int input, struct ip_fw_args *fwa)
4338{
4339 int error = 0;
4340 struct mbuf *nextpkt;
4341 net_thread_marks_t marks;
4342 struct ifnet * pf_ifp = ifp;
4343
4344 /* Always allow traffic on co-processor interfaces. */
4345 if (!intcoproc_unrestricted && ifp && IFNET_IS_INTCOPROC(ifp))
4346 return (0);
4347
4348 marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4349
4350 if (marks != net_thread_marks_none) {
4351 lck_rw_lock_shared(pf_perim_lock);
4352 if (!pf_is_enabled)
4353 goto done;
4354 lck_mtx_lock(pf_lock);
4355 }
4356
4357 if (mppn != NULL && *mppn != NULL)
4358 VERIFY(*mppn == *mp);
4359 if ((nextpkt = (*mp)->m_nextpkt) != NULL)
4360 (*mp)->m_nextpkt = NULL;
4361
4362 /*
4363 * For packets destined to locally hosted IP address
4364 * ip_output_list sets Mbuf's pkt header's rcvif to
4365 * the interface hosting the IP address.
4366 * While on the output path ifp passed to pf_af_hook
4367 * to such local communication is the loopback interface,
4368 * the input path derives ifp from mbuf packet header's
4369 * rcvif.
4370 * This asymmetry caues issues with PF.
4371 * To handle that case, we have a limited change here to
4372 * pass interface as loopback if packets are looped in.
4373 */
4374 if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4375 pf_ifp = lo_ifp;
4376 }
4377
4378 switch (af) {
4379#if INET
4380 case AF_INET: {
4381 error = pf_inet_hook(pf_ifp, mp, input, fwa);
4382 break;
4383 }
4384#endif /* INET */
4385#if INET6
4386 case AF_INET6:
4387 error = pf_inet6_hook(pf_ifp, mp, input, fwa);
4388 break;
4389#endif /* INET6 */
4390 default:
4391 break;
4392 }
4393
4394 /* When packet valid, link to the next packet */
4395 if (*mp != NULL && nextpkt != NULL) {
4396 struct mbuf *m = *mp;
4397 while (m->m_nextpkt != NULL)
4398 m = m->m_nextpkt;
4399 m->m_nextpkt = nextpkt;
4400 }
4401 /* Fix up linkage of previous packet in the chain */
4402 if (mppn != NULL) {
4403 if (*mp != NULL)
4404 *mppn = *mp;
4405 else
4406 *mppn = nextpkt;
4407 }
4408
4409 if (marks != net_thread_marks_none)
4410 lck_mtx_unlock(pf_lock);
4411
4412done:
4413 if (marks != net_thread_marks_none)
4414 lck_rw_done(pf_perim_lock);
4415
4416 net_thread_marks_pop(marks);
4417 return (error);
4418}
4419
4420
4421#if INET
4422static int
4423pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4424 struct ip_fw_args *fwa)
4425{
4426 struct mbuf *m = *mp;
4427#if BYTE_ORDER != BIG_ENDIAN
4428 struct ip *ip = mtod(m, struct ip *);
4429#endif
4430 int error = 0;
4431
4432 /*
4433 * If the packet is outbound, is originated locally, is flagged for
4434 * delayed UDP/TCP checksum calculation, and is about to be processed
4435 * for an interface that doesn't support the appropriate checksum
4436 * offloading, then calculated the checksum here so that PF can adjust
4437 * it properly.
4438 */
4439 if (!input && m->m_pkthdr.rcvif == NULL) {
4440 static const int mask = CSUM_DELAY_DATA;
4441 const int flags = m->m_pkthdr.csum_flags &
4442 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4443
4444 if (flags & mask) {
4445 in_delayed_cksum(m);
4446 m->m_pkthdr.csum_flags &= ~mask;
4447 }
4448 }
4449
4450#if BYTE_ORDER != BIG_ENDIAN
4451 HTONS(ip->ip_len);
4452 HTONS(ip->ip_off);
4453#endif
4454 if (pf_test_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4455 if (*mp != NULL) {
4456 m_freem(*mp);
4457 *mp = NULL;
4458 error = EHOSTUNREACH;
4459 } else {
4460 error = ENOBUFS;
4461 }
4462 }
4463#if BYTE_ORDER != BIG_ENDIAN
4464 else {
4465 if (*mp != NULL) {
4466 ip = mtod(*mp, struct ip *);
4467 NTOHS(ip->ip_len);
4468 NTOHS(ip->ip_off);
4469 }
4470 }
4471#endif
4472 return (error);
4473}
4474#endif /* INET */
4475
4476#if INET6
4477int
4478pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4479 struct ip_fw_args *fwa)
4480{
4481 int error = 0;
4482
4483 /*
4484 * If the packet is outbound, is originated locally, is flagged for
4485 * delayed UDP/TCP checksum calculation, and is about to be processed
4486 * for an interface that doesn't support the appropriate checksum
4487 * offloading, then calculated the checksum here so that PF can adjust
4488 * it properly.
4489 */
4490 if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4491 static const int mask = CSUM_DELAY_IPV6_DATA;
4492 const int flags = (*mp)->m_pkthdr.csum_flags &
4493 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4494
4495 if (flags & mask) {
4496 /*
4497 * Checksum offload should not have been enabled
4498 * when extension headers exist, thus 0 for optlen.
4499 */
4500 in6_delayed_cksum(*mp);
4501 (*mp)->m_pkthdr.csum_flags &= ~mask;
4502 }
4503 }
4504
4505 if (pf_test6_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4506 if (*mp != NULL) {
4507 m_freem(*mp);
4508 *mp = NULL;
4509 error = EHOSTUNREACH;
4510 } else {
4511 error = ENOBUFS;
4512 }
4513 }
4514 return (error);
4515}
4516#endif /* INET6 */
4517
4518int
4519pf_ifaddr_hook(struct ifnet *ifp)
4520{
4521 struct pfi_kif *kif = ifp->if_pf_kif;
4522
4523 if (kif != NULL) {
4524 lck_rw_lock_shared(pf_perim_lock);
4525 lck_mtx_lock(pf_lock);
4526
4527 pfi_kifaddr_update(kif);
4528
4529 lck_mtx_unlock(pf_lock);
4530 lck_rw_done(pf_perim_lock);
4531 }
4532 return (0);
4533}
4534
4535/*
4536 * Caller acquires dlil lock as writer (exclusive)
4537 */
4538void
4539pf_ifnet_hook(struct ifnet *ifp, int attach)
4540{
4541 lck_rw_lock_shared(pf_perim_lock);
4542 lck_mtx_lock(pf_lock);
4543 if (attach)
4544 pfi_attach_ifnet(ifp);
4545 else
4546 pfi_detach_ifnet(ifp);
4547 lck_mtx_unlock(pf_lock);
4548 lck_rw_done(pf_perim_lock);
4549}
4550
4551static void
4552pf_attach_hooks(void)
4553{
4554 ifnet_head_lock_shared();
4555 /*
4556 * Check against ifnet_addrs[] before proceeding, in case this
4557 * is called very early on, e.g. during dlil_init() before any
4558 * network interface is attached.
4559 */
4560 if (ifnet_addrs != NULL) {
4561 int i;
4562
4563 for (i = 0; i <= if_index; i++) {
4564 struct ifnet *ifp = ifindex2ifnet[i];
4565 if (ifp != NULL) {
4566 pfi_attach_ifnet(ifp);
4567 }
4568 }
4569 }
4570 ifnet_head_done();
4571}
4572
4573#if 0
4574/* currently unused along with pfdetach() */
4575static void
4576pf_detach_hooks(void)
4577{
4578 ifnet_head_lock_shared();
4579 if (ifnet_addrs != NULL) {
4580 for (i = 0; i <= if_index; i++) {
4581 int i;
4582
4583 struct ifnet *ifp = ifindex2ifnet[i];
4584 if (ifp != NULL && ifp->if_pf_kif != NULL) {
4585 pfi_detach_ifnet(ifp);
4586 }
4587 }
4588 }
4589 ifnet_head_done();
4590}
4591#endif
4592
4593/*
4594 * 'D' group ioctls.
4595 *
4596 * The switch statement below does nothing at runtime, as it serves as a
4597 * compile time check to ensure that all of the socket 'D' ioctls (those
4598 * in the 'D' group going thru soo_ioctl) that are made available by the
4599 * networking stack is unique. This works as long as this routine gets
4600 * updated each time a new interface ioctl gets added.
4601 *
4602 * Any failures at compile time indicates duplicated ioctl values.
4603 */
4604static __attribute__((unused)) void
4605pfioctl_cassert(void)
4606{
4607 /*
4608 * This is equivalent to _CASSERT() and the compiler wouldn't
4609 * generate any instructions, thus for compile time only.
4610 */
4611 switch ((u_long)0) {
4612 case 0:
4613
4614 /* bsd/net/pfvar.h */
4615 case DIOCSTART:
4616 case DIOCSTOP:
4617 case DIOCADDRULE:
4618 case DIOCGETSTARTERS:
4619 case DIOCGETRULES:
4620 case DIOCGETRULE:
4621 case DIOCSTARTREF:
4622 case DIOCSTOPREF:
4623 case DIOCCLRSTATES:
4624 case DIOCGETSTATE:
4625 case DIOCSETSTATUSIF:
4626 case DIOCGETSTATUS:
4627 case DIOCCLRSTATUS:
4628 case DIOCNATLOOK:
4629 case DIOCSETDEBUG:
4630 case DIOCGETSTATES:
4631 case DIOCCHANGERULE:
4632 case DIOCINSERTRULE:
4633 case DIOCDELETERULE:
4634 case DIOCSETTIMEOUT:
4635 case DIOCGETTIMEOUT:
4636 case DIOCADDSTATE:
4637 case DIOCCLRRULECTRS:
4638 case DIOCGETLIMIT:
4639 case DIOCSETLIMIT:
4640 case DIOCKILLSTATES:
4641 case DIOCSTARTALTQ:
4642 case DIOCSTOPALTQ:
4643 case DIOCADDALTQ:
4644 case DIOCGETALTQS:
4645 case DIOCGETALTQ:
4646 case DIOCCHANGEALTQ:
4647 case DIOCGETQSTATS:
4648 case DIOCBEGINADDRS:
4649 case DIOCADDADDR:
4650 case DIOCGETADDRS:
4651 case DIOCGETADDR:
4652 case DIOCCHANGEADDR:
4653 case DIOCGETRULESETS:
4654 case DIOCGETRULESET:
4655 case DIOCRCLRTABLES:
4656 case DIOCRADDTABLES:
4657 case DIOCRDELTABLES:
4658 case DIOCRGETTABLES:
4659 case DIOCRGETTSTATS:
4660 case DIOCRCLRTSTATS:
4661 case DIOCRCLRADDRS:
4662 case DIOCRADDADDRS:
4663 case DIOCRDELADDRS:
4664 case DIOCRSETADDRS:
4665 case DIOCRGETADDRS:
4666 case DIOCRGETASTATS:
4667 case DIOCRCLRASTATS:
4668 case DIOCRTSTADDRS:
4669 case DIOCRSETTFLAGS:
4670 case DIOCRINADEFINE:
4671 case DIOCOSFPFLUSH:
4672 case DIOCOSFPADD:
4673 case DIOCOSFPGET:
4674 case DIOCXBEGIN:
4675 case DIOCXCOMMIT:
4676 case DIOCXROLLBACK:
4677 case DIOCGETSRCNODES:
4678 case DIOCCLRSRCNODES:
4679 case DIOCSETHOSTID:
4680 case DIOCIGETIFACES:
4681 case DIOCSETIFFLAG:
4682 case DIOCCLRIFFLAG:
4683 case DIOCKILLSRCNODES:
4684 case DIOCGIFSPEED:
4685 ;
4686 }
4687}
4688