1 | /* |
2 | * Copyright (c) 2000-2017 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * Copyright 1994, 1995 Massachusetts Institute of Technology |
30 | * |
31 | * Permission to use, copy, modify, and distribute this software and |
32 | * its documentation for any purpose and without fee is hereby |
33 | * granted, provided that both the above copyright notice and this |
34 | * permission notice appear in all copies, that both the above |
35 | * copyright notice and this permission notice appear in all |
36 | * supporting documentation, and that the name of M.I.T. not be used |
37 | * in advertising or publicity pertaining to distribution of the |
38 | * software without specific, written prior permission. M.I.T. makes |
39 | * no representations about the suitability of this software for any |
40 | * purpose. It is provided "as is" without express or implied |
41 | * warranty. |
42 | * |
43 | * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS |
44 | * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, |
45 | * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
46 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT |
47 | * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
48 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
49 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
50 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
51 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
52 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
53 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
54 | * SUCH DAMAGE. |
55 | * |
56 | */ |
57 | |
58 | /* |
59 | * This code does two things necessary for the enhanced TCP metrics to |
60 | * function in a useful manner: |
61 | * 1) It marks all non-host routes as `cloning', thus ensuring that |
62 | * every actual reference to such a route actually gets turned |
63 | * into a reference to a host route to the specific destination |
64 | * requested. |
65 | * 2) When such routes lose all their references, it arranges for them |
66 | * to be deleted in some random collection of circumstances, so that |
67 | * a large quantity of stale routing data is not kept in kernel memory |
68 | * indefinitely. See in_rtqtimo() below for the exact mechanism. |
69 | */ |
70 | |
71 | #include <sys/param.h> |
72 | #include <sys/systm.h> |
73 | #include <sys/kernel.h> |
74 | #include <sys/sysctl.h> |
75 | #include <sys/socket.h> |
76 | #include <sys/mbuf.h> |
77 | #include <sys/protosw.h> |
78 | #include <sys/syslog.h> |
79 | #include <sys/mcache.h> |
80 | #include <kern/locks.h> |
81 | |
82 | #include <net/if.h> |
83 | #include <net/route.h> |
84 | #include <netinet/in.h> |
85 | #include <netinet/in_var.h> |
86 | #include <netinet/in_arp.h> |
87 | #include <netinet/ip.h> |
88 | #include <netinet/ip6.h> |
89 | #include <netinet6/nd6.h> |
90 | |
91 | #include <net/sockaddr_utils.h> |
92 | |
93 | extern int tvtohz(struct timeval *); |
94 | |
95 | static int in_rtqtimo_run; /* in_rtqtimo is scheduled to run */ |
96 | static void in_rtqtimo(void *); |
97 | static void in_sched_rtqtimo(struct timeval *); |
98 | |
99 | static struct radix_node *in_addroute(void *, void *, struct radix_node_head *, |
100 | struct radix_node *); |
101 | static struct radix_node *in_deleteroute(void *, void *, |
102 | struct radix_node_head *); |
103 | static struct radix_node *in_matroute(void *, struct radix_node_head *); |
104 | static struct radix_node *in_matroute_args(void *, struct radix_node_head *, |
105 | rn_matchf_t *f, void *); |
106 | static void in_clsroute(struct radix_node *, struct radix_node_head *); |
107 | static int in_rtqkill(struct radix_node *, void *); |
108 | |
109 | static int in_ifadownkill(struct radix_node *, void *); |
110 | |
111 | /* |
112 | * Do what we need to do when inserting a route. |
113 | */ |
114 | static struct radix_node * |
115 | in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, |
116 | struct radix_node *treenodes) |
117 | { |
118 | struct rtentry *rt = (struct rtentry *)treenodes; |
119 | struct sockaddr_in *sin = SIN(rt_key(rt)); |
120 | struct radix_node *ret; |
121 | char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN]; |
122 | uint32_t flags = rt->rt_flags; |
123 | boolean_t verbose = (rt_verbose > 0); |
124 | |
125 | LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); |
126 | RT_LOCK_ASSERT_HELD(rt); |
127 | |
128 | if (verbose) { |
129 | rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); |
130 | } |
131 | |
132 | /* |
133 | * For IP, all unicast non-host routes are automatically cloning. |
134 | */ |
135 | if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { |
136 | rt->rt_flags |= RTF_MULTICAST; |
137 | } |
138 | |
139 | if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) { |
140 | rt->rt_flags |= RTF_PRCLONING; |
141 | } |
142 | |
143 | /* |
144 | * A little bit of help for both IP output and input: |
145 | * For host routes, we make sure that RTF_BROADCAST |
146 | * is set for anything that looks like a broadcast address. |
147 | * This way, we can avoid an expensive call to in_broadcast() |
148 | * in ip_output() most of the time (because the route passed |
149 | * to ip_output() is almost always a host route). |
150 | * |
151 | * We also do the same for local addresses, with the thought |
152 | * that this might one day be used to speed up ip_input(). |
153 | * |
154 | * We also mark routes to multicast addresses as such, because |
155 | * it's easy to do and might be useful (but this is much more |
156 | * dubious since it's so easy to inspect the address). (This |
157 | * is done above.) |
158 | */ |
159 | if (rt->rt_flags & RTF_HOST) { |
160 | if (in_broadcast(sin->sin_addr, rt->rt_ifp)) { |
161 | rt->rt_flags |= RTF_BROADCAST; |
162 | } else { |
163 | /* Become a regular mutex */ |
164 | RT_CONVERT_LOCK(rt); |
165 | IFA_LOCK_SPIN(rt->rt_ifa); |
166 | if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr == |
167 | sin->sin_addr.s_addr) { |
168 | rt->rt_flags |= RTF_LOCAL; |
169 | } |
170 | IFA_UNLOCK(rt->rt_ifa); |
171 | } |
172 | } |
173 | |
174 | if (!rt->rt_rmx.rmx_mtu && !(rt->rt_rmx.rmx_locks & RTV_MTU) && |
175 | rt->rt_ifp) { |
176 | rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; |
177 | if (INTF_ADJUST_MTU_FOR_CLAT46(rt->rt_ifp)) { |
178 | rt->rt_rmx.rmx_mtu = IN6_LINKMTU(rt->rt_ifp); |
179 | /* Further adjust the size for CLAT46 expansion */ |
180 | rt->rt_rmx.rmx_mtu -= CLAT46_HDR_EXPANSION_OVERHD; |
181 | } |
182 | } |
183 | |
184 | ret = rn_addroute(v_arg, n_arg, head, treenodes); |
185 | if (ret == NULL && (rt->rt_flags & RTF_HOST)) { |
186 | struct rtentry *rt2; |
187 | /* |
188 | * We are trying to add a host route, but can't. |
189 | * Find out if it is because of an |
190 | * ARP entry and delete it if so. |
191 | */ |
192 | rt2 = rtalloc1_scoped_locked(rt_key(rt), 0, |
193 | RTF_CLONING | RTF_PRCLONING, sin_get_ifscope(rt_key(rt))); |
194 | if (rt2 != NULL) { |
195 | char dbufc[MAX_IPv4_STR_LEN]; |
196 | |
197 | RT_LOCK(rt2); |
198 | if (verbose) { |
199 | rt_str(rt2, dbufc, sizeof(dbufc), NULL, 0); |
200 | } |
201 | |
202 | if ((rt2->rt_flags & RTF_LLINFO) && |
203 | (rt2->rt_flags & RTF_HOST) && |
204 | rt2->rt_gateway != NULL && |
205 | rt2->rt_gateway->sa_family == AF_LINK) { |
206 | if (verbose) { |
207 | os_log_debug(OS_LOG_DEFAULT, "%s: unable to insert " |
208 | "route to %s;%s, flags=0x%x, due to " |
209 | "existing ARP route %s->%s " |
210 | "flags=0x%x, attempting to delete\n" , |
211 | __func__, dbuf, |
212 | (rt->rt_ifp != NULL) ? |
213 | rt->rt_ifp->if_xname : "" , |
214 | rt->rt_flags, dbufc, |
215 | (rt2->rt_ifp != NULL) ? |
216 | rt2->rt_ifp->if_xname : "" , |
217 | rt2->rt_flags); |
218 | } |
219 | /* |
220 | * Safe to drop rt_lock and use rt_key, |
221 | * rt_gateway, since holding rnh_lock here |
222 | * prevents another thread from calling |
223 | * rt_setgate() on this route. |
224 | */ |
225 | RT_UNLOCK(rt2); |
226 | (void) rtrequest_locked(RTM_DELETE, rt_key(rt2), |
227 | rt2->rt_gateway, rt_mask(rt2), |
228 | rt2->rt_flags, NULL); |
229 | ret = rn_addroute(v_arg, n_arg, head, |
230 | treenodes); |
231 | } else { |
232 | RT_UNLOCK(rt2); |
233 | } |
234 | rtfree_locked(rt2); |
235 | } |
236 | } |
237 | |
238 | if (!verbose) { |
239 | goto done; |
240 | } |
241 | |
242 | if (ret != NULL) { |
243 | if (flags != rt->rt_flags) { |
244 | os_log_debug(OS_LOG_DEFAULT, "%s: route to %s->%s->%s inserted, " |
245 | "oflags=0x%x, flags=0x%x\n" , __func__, |
246 | dbuf, gbuf, (rt->rt_ifp != NULL) ? |
247 | rt->rt_ifp->if_xname : "" , flags, |
248 | rt->rt_flags); |
249 | } else { |
250 | os_log_debug(OS_LOG_DEFAULT, "%s: route to %s->%s->%s inserted, " |
251 | "flags=0x%x\n" , __func__, dbuf, gbuf, |
252 | (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "" , |
253 | rt->rt_flags); |
254 | } |
255 | } else { |
256 | os_log_debug(OS_LOG_DEFAULT, "%s: unable to insert route to %s->%s->%s, " |
257 | "flags=0x%x, already exists\n" , __func__, dbuf, gbuf, |
258 | (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "" , |
259 | rt->rt_flags); |
260 | } |
261 | done: |
262 | return ret; |
263 | } |
264 | |
265 | static struct radix_node * |
266 | in_deleteroute(void *v_arg, void *netmask_arg, struct radix_node_head *head) |
267 | { |
268 | struct radix_node *rn; |
269 | |
270 | LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); |
271 | |
272 | rn = rn_delete(v_arg, netmask_arg, head); |
273 | if (rt_verbose > 0 && rn != NULL) { |
274 | char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN]; |
275 | struct rtentry *rt = (struct rtentry *)rn; |
276 | |
277 | RT_LOCK(rt); |
278 | rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); |
279 | os_log_debug(OS_LOG_DEFAULT, "%s: route to %s->%s->%s deleted, " |
280 | "flags=0x%x\n" , __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ? |
281 | rt->rt_ifp->if_xname : "" , rt->rt_flags); |
282 | RT_UNLOCK(rt); |
283 | } |
284 | return rn; |
285 | } |
286 | |
287 | /* |
288 | * Validate (unexpire) an expiring AF_INET route. |
289 | */ |
290 | struct radix_node * |
291 | in_validate(struct radix_node *rn) |
292 | { |
293 | struct rtentry *rt = (struct rtentry *)rn; |
294 | |
295 | RT_LOCK_ASSERT_HELD(rt); |
296 | |
297 | /* This is first reference? */ |
298 | if (rt->rt_refcnt == 0) { |
299 | if (rt_verbose > 2) { |
300 | char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN]; |
301 | |
302 | rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); |
303 | os_log_debug(OS_LOG_DEFAULT, "%s: route to %s->%s->%s validated, " |
304 | "flags=0x%x\n" , __func__, dbuf, gbuf, |
305 | (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "" , |
306 | rt->rt_flags); |
307 | } |
308 | |
309 | /* |
310 | * It's one of ours; unexpire it. If the timer is already |
311 | * scheduled, let it run later as it won't re-arm itself |
312 | * if there's nothing to do. |
313 | */ |
314 | if (rt->rt_flags & RTPRF_OURS) { |
315 | rt->rt_flags &= ~RTPRF_OURS; |
316 | rt_setexpire(rt, 0); |
317 | } |
318 | } |
319 | return rn; |
320 | } |
321 | |
322 | /* |
323 | * Similar to in_matroute_args except without the leaf-matching parameters. |
324 | */ |
325 | static struct radix_node * |
326 | in_matroute(void *v_arg, struct radix_node_head *head) |
327 | { |
328 | return in_matroute_args(v_arg, head, NULL, NULL); |
329 | } |
330 | |
331 | /* |
332 | * This code is the inverse of in_clsroute: on first reference, if we |
333 | * were managing the route, stop doing so and set the expiration timer |
334 | * back off again. |
335 | */ |
336 | static struct radix_node * |
337 | in_matroute_args(void *v_arg, struct radix_node_head *head, |
338 | rn_matchf_t *f, void *w) |
339 | { |
340 | struct radix_node *rn = rn_match_args(v_arg, head, f, w); |
341 | |
342 | if (rn != NULL) { |
343 | RT_LOCK_SPIN((struct rtentry *)rn); |
344 | in_validate(rn); |
345 | RT_UNLOCK((struct rtentry *)rn); |
346 | } |
347 | return rn; |
348 | } |
349 | |
350 | /* one hour is ``really old'' */ |
351 | static uint32_t rtq_reallyold = 60 * 60; |
352 | SYSCTL_UINT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, |
353 | CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_reallyold, 0, |
354 | "Default expiration time on dynamically learned routes" ); |
355 | |
356 | /* never automatically crank down to less */ |
357 | static uint32_t rtq_minreallyold = 10; |
358 | SYSCTL_UINT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, |
359 | CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_minreallyold, 0, |
360 | "Minimum time to attempt to hold onto dynamically learned routes" ); |
361 | |
362 | /* 128 cached routes is ``too many'' */ |
363 | static uint32_t rtq_toomany = 128; |
364 | SYSCTL_UINT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, |
365 | CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_toomany, 0, |
366 | "Upper limit on dynamically learned routes" ); |
367 | |
368 | /* |
369 | * On last reference drop, mark the route as belong to us so that it can be |
370 | * timed out. |
371 | */ |
372 | static void |
373 | in_clsroute(struct radix_node *rn, struct radix_node_head *head) |
374 | { |
375 | #pragma unused(head) |
376 | char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN]; |
377 | struct rtentry *rt = (struct rtentry *)rn; |
378 | boolean_t verbose = (rt_verbose > 1); |
379 | |
380 | LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); |
381 | RT_LOCK_ASSERT_HELD(rt); |
382 | |
383 | if (!(rt->rt_flags & RTF_UP)) { |
384 | return; /* prophylactic measures */ |
385 | } |
386 | if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) { |
387 | return; |
388 | } |
389 | |
390 | if (rt->rt_flags & RTPRF_OURS) { |
391 | return; |
392 | } |
393 | |
394 | if (!(rt->rt_flags & (RTF_WASCLONED | RTF_DYNAMIC))) { |
395 | return; |
396 | } |
397 | |
398 | if (verbose) { |
399 | rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); |
400 | } |
401 | |
402 | /* |
403 | * Delete the route immediately if RTF_DELCLONE is set or |
404 | * if route caching is disabled (rtq_reallyold set to 0). |
405 | * Otherwise, let it expire and be deleted by in_rtqkill(). |
406 | */ |
407 | if ((rt->rt_flags & RTF_DELCLONE) || rtq_reallyold == 0) { |
408 | int err; |
409 | |
410 | if (verbose) { |
411 | os_log_debug(OS_LOG_DEFAULT, "%s: deleting route to %s->%s->%s, " |
412 | "flags=0x%x\n" , __func__, dbuf, gbuf, |
413 | (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "" , |
414 | rt->rt_flags); |
415 | } |
416 | /* |
417 | * Delete the route from the radix tree but since we are |
418 | * called when the route's reference count is 0, don't |
419 | * deallocate it until we return from this routine by |
420 | * telling rtrequest that we're interested in it. |
421 | * Safe to drop rt_lock and use rt_key, rt_gateway since |
422 | * holding rnh_lock here prevents another thread from |
423 | * calling rt_setgate() on this route. |
424 | */ |
425 | RT_UNLOCK(rt); |
426 | err = rtrequest_locked(RTM_DELETE, rt_key(rt), |
427 | rt->rt_gateway, rt_mask(rt), rt->rt_flags, &rt); |
428 | if (err == 0) { |
429 | /* Now let the caller free it */ |
430 | RT_LOCK(rt); |
431 | RT_REMREF_LOCKED(rt); |
432 | } else { |
433 | RT_LOCK(rt); |
434 | rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); |
435 | os_log_error(OS_LOG_DEFAULT, "%s: error deleting route to " |
436 | "%s->%s->%s, flags=0x%x, err=%d\n" , __func__, |
437 | dbuf, gbuf, (rt->rt_ifp != NULL) ? |
438 | rt->rt_ifp->if_xname : "" , rt->rt_flags, |
439 | err); |
440 | } |
441 | } else { |
442 | uint64_t timenow; |
443 | |
444 | timenow = net_uptime(); |
445 | rt->rt_flags |= RTPRF_OURS; |
446 | rt_setexpire(rt, timenow + rtq_reallyold); |
447 | |
448 | if (rt_verbose > 1) { |
449 | os_log_debug(OS_LOG_DEFAULT, "%s: route to %s->%s->%s invalidated, " |
450 | "flags=0x%x, expire=T+%u\n" , __func__, dbuf, gbuf, |
451 | (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "" , |
452 | rt->rt_flags, rt->rt_expire - timenow); |
453 | } |
454 | |
455 | /* We have at least one entry; arm the timer if not already */ |
456 | in_sched_rtqtimo(NULL); |
457 | } |
458 | } |
459 | |
460 | struct rtqk_arg { |
461 | struct radix_node_head *rnh; |
462 | int updating; |
463 | int draining; |
464 | uint32_t killed; |
465 | uint32_t found; |
466 | uint64_t nextstop; |
467 | }; |
468 | |
469 | /* |
470 | * Get rid of old routes. When draining, this deletes everything, even when |
471 | * the timeout is not expired yet. When updating, this makes sure that |
472 | * nothing has a timeout longer than the current value of rtq_reallyold. |
473 | */ |
474 | static int |
475 | in_rtqkill(struct radix_node *rn, void *rock) |
476 | { |
477 | struct rtqk_arg *ap = rock; |
478 | struct rtentry *rt = (struct rtentry *)rn; |
479 | boolean_t verbose = (rt_verbose > 1); |
480 | uint64_t timenow; |
481 | int err; |
482 | |
483 | timenow = net_uptime(); |
484 | LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); |
485 | |
486 | RT_LOCK(rt); |
487 | if (rt->rt_flags & RTPRF_OURS) { |
488 | char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN]; |
489 | |
490 | if (verbose) { |
491 | rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); |
492 | } |
493 | |
494 | ap->found++; |
495 | VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0); |
496 | VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0); |
497 | if (ap->draining || rt->rt_expire <= timenow) { |
498 | if (rt->rt_refcnt > 0) { |
499 | panic("%s: route %p marked with RTPRF_OURS " |
500 | "with non-zero refcnt (%u)" , __func__, |
501 | rt, rt->rt_refcnt); |
502 | /* NOTREACHED */ |
503 | } |
504 | |
505 | if (verbose) { |
506 | os_log_debug(OS_LOG_DEFAULT, "%s: deleting route to " |
507 | "%s->%s->%s, flags=0x%x, draining=%d\n" , |
508 | __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ? |
509 | rt->rt_ifp->if_xname : "" , rt->rt_flags, |
510 | ap->draining); |
511 | } |
512 | RT_ADDREF_LOCKED(rt); /* for us to free below */ |
513 | /* |
514 | * Delete this route since we're done with it; |
515 | * the route may be freed afterwards, so we |
516 | * can no longer refer to 'rt' upon returning |
517 | * from rtrequest(). Safe to drop rt_lock and |
518 | * use rt_key, rt_gateway since holding rnh_lock |
519 | * here prevents another thread from calling |
520 | * rt_setgate() on this route. |
521 | */ |
522 | RT_UNLOCK(rt); |
523 | err = rtrequest_locked(RTM_DELETE, rt_key(rt), |
524 | rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL); |
525 | if (err != 0) { |
526 | RT_LOCK(rt); |
527 | if (!verbose) { |
528 | rt_str(rt, dbuf, sizeof(dbuf), |
529 | gbuf, sizeof(gbuf)); |
530 | } |
531 | os_log_error(OS_LOG_DEFAULT, "%s: error deleting route to " |
532 | "%s->%s->%s, flags=0x%x, err=%d\n" , __func__, |
533 | dbuf, gbuf, (rt->rt_ifp != NULL) ? |
534 | rt->rt_ifp->if_xname : "" , rt->rt_flags, |
535 | err); |
536 | RT_UNLOCK(rt); |
537 | } else { |
538 | ap->killed++; |
539 | } |
540 | rtfree_locked(rt); |
541 | } else { |
542 | uint64_t expire = (rt->rt_expire - timenow); |
543 | |
544 | if (ap->updating && expire > rtq_reallyold) { |
545 | rt_setexpire(rt, timenow + rtq_reallyold); |
546 | if (verbose) { |
547 | os_log_debug(OS_LOG_DEFAULT, "%s: route to " |
548 | "%s->%s->%s, flags=0x%x, adjusted " |
549 | "expire=T+%u (was T+%u)\n" , |
550 | __func__, dbuf, gbuf, |
551 | (rt->rt_ifp != NULL) ? |
552 | rt->rt_ifp->if_xname : "" , |
553 | rt->rt_flags, |
554 | (rt->rt_expire - timenow), expire); |
555 | } |
556 | } |
557 | ap->nextstop = lmin(a: ap->nextstop, b: rt->rt_expire); |
558 | RT_UNLOCK(rt); |
559 | } |
560 | } else { |
561 | RT_UNLOCK(rt); |
562 | } |
563 | |
564 | return 0; |
565 | } |
566 | |
567 | #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ |
568 | static int rtq_timeout = RTQ_TIMEOUT; |
569 | |
570 | static void |
571 | in_rtqtimo(void *targ) |
572 | { |
573 | #pragma unused(targ) |
574 | struct radix_node_head *rnh; |
575 | struct rtqk_arg arg; |
576 | struct timeval atv; |
577 | static uint64_t last_adjusted_timeout = 0; |
578 | boolean_t verbose = (rt_verbose > 1); |
579 | uint64_t timenow; |
580 | uint32_t ours; |
581 | |
582 | lck_mtx_lock(rnh_lock); |
583 | rnh = rt_tables[AF_INET]; |
584 | VERIFY(rnh != NULL); |
585 | |
586 | /* Get the timestamp after we acquire the lock for better accuracy */ |
587 | timenow = net_uptime(); |
588 | if (verbose) { |
589 | os_log_debug(OS_LOG_DEFAULT, "%s: initial nextstop is T+%u seconds\n" , |
590 | __func__, rtq_timeout); |
591 | } |
592 | bzero(s: &arg, n: sizeof(arg)); |
593 | arg.rnh = rnh; |
594 | arg.nextstop = timenow + rtq_timeout; |
595 | rnh->rnh_walktree(rnh, in_rtqkill, &arg); |
596 | if (verbose) { |
597 | os_log_debug(OS_LOG_DEFAULT, "%s: found %u, killed %u\n" , __func__, |
598 | arg.found, arg.killed); |
599 | } |
600 | /* |
601 | * Attempt to be somewhat dynamic about this: |
602 | * If there are ``too many'' routes sitting around taking up space, |
603 | * then crank down the timeout, and see if we can't make some more |
604 | * go away. However, we make sure that we will never adjust more |
605 | * than once in rtq_timeout seconds, to keep from cranking down too |
606 | * hard. |
607 | */ |
608 | ours = (arg.found - arg.killed); |
609 | if (ours > rtq_toomany && |
610 | ((timenow - last_adjusted_timeout) >= (uint64_t)rtq_timeout) && |
611 | rtq_reallyold > rtq_minreallyold) { |
612 | rtq_reallyold = 2 * rtq_reallyold / 3; |
613 | if (rtq_reallyold < rtq_minreallyold) { |
614 | rtq_reallyold = rtq_minreallyold; |
615 | } |
616 | |
617 | last_adjusted_timeout = timenow; |
618 | if (verbose) { |
619 | os_log_debug(OS_LOG_DEFAULT, "%s: adjusted rtq_reallyold to %d " |
620 | "seconds\n" , __func__, rtq_reallyold); |
621 | } |
622 | arg.found = arg.killed = 0; |
623 | arg.updating = 1; |
624 | rnh->rnh_walktree(rnh, in_rtqkill, &arg); |
625 | } |
626 | |
627 | atv.tv_usec = 0; |
628 | atv.tv_sec = arg.nextstop - timenow; |
629 | /* re-arm the timer only if there's work to do */ |
630 | in_rtqtimo_run = 0; |
631 | if (ours > 0) { |
632 | in_sched_rtqtimo(&atv); |
633 | } else if (verbose) { |
634 | os_log_debug(OS_LOG_DEFAULT, "%s: not rescheduling timer\n" , __func__); |
635 | } |
636 | lck_mtx_unlock(rnh_lock); |
637 | } |
638 | |
639 | static void |
640 | in_sched_rtqtimo(struct timeval *atv) |
641 | { |
642 | LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); |
643 | |
644 | if (!in_rtqtimo_run) { |
645 | struct timeval tv; |
646 | |
647 | if (atv == NULL) { |
648 | tv.tv_usec = 0; |
649 | tv.tv_sec = MAX(rtq_timeout / 10, 1); |
650 | atv = &tv; |
651 | } |
652 | if (rt_verbose > 1) { |
653 | os_log_debug(OS_LOG_DEFAULT, "%s: timer scheduled in " |
654 | "T+%llus.%lluu\n" , __func__, |
655 | (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec); |
656 | } |
657 | in_rtqtimo_run = 1; |
658 | timeout(in_rtqtimo, NULL, ticks: tvtohz(atv)); |
659 | } |
660 | } |
661 | |
662 | void |
663 | in_rtqdrain(void) |
664 | { |
665 | struct radix_node_head *rnh; |
666 | struct rtqk_arg arg; |
667 | |
668 | if (rt_verbose > 1) { |
669 | os_log_debug(OS_LOG_DEFAULT, "%s: draining routes\n" , __func__); |
670 | } |
671 | |
672 | lck_mtx_lock(rnh_lock); |
673 | rnh = rt_tables[AF_INET]; |
674 | VERIFY(rnh != NULL); |
675 | bzero(s: &arg, n: sizeof(arg)); |
676 | arg.rnh = rnh; |
677 | arg.draining = 1; |
678 | rnh->rnh_walktree(rnh, in_rtqkill, &arg); |
679 | lck_mtx_unlock(rnh_lock); |
680 | } |
681 | |
682 | /* |
683 | * Initialize our routing tree. |
684 | */ |
685 | int |
686 | in_inithead(void **head, int off) |
687 | { |
688 | struct radix_node_head *rnh; |
689 | |
690 | /* If called from route_init(), make sure it is exactly once */ |
691 | VERIFY(head != (void **)&rt_tables[AF_INET] || *head == NULL); |
692 | |
693 | if (!rn_inithead(head, off)) { |
694 | return 0; |
695 | } |
696 | |
697 | /* |
698 | * We can get here from nfs_subs.c as well, in which case this |
699 | * won't be for the real routing table and thus we're done; |
700 | * this also takes care of the case when we're called more than |
701 | * once from anywhere but route_init(). |
702 | */ |
703 | if (head != (void **)&rt_tables[AF_INET]) { |
704 | return 1; /* only do this for the real routing table */ |
705 | } |
706 | rnh = *head; |
707 | rnh->rnh_addaddr = in_addroute; |
708 | rnh->rnh_deladdr = in_deleteroute; |
709 | rnh->rnh_matchaddr = in_matroute; |
710 | rnh->rnh_matchaddr_args = in_matroute_args; |
711 | rnh->rnh_close = in_clsroute; |
712 | return 1; |
713 | } |
714 | |
715 | /* |
716 | * This zaps old routes when the interface goes down or interface |
717 | * address is deleted. In the latter case, it deletes static routes |
718 | * that point to this address. If we don't do this, we may end up |
719 | * using the old address in the future. The ones we always want to |
720 | * get rid of are things like ARP entries, since the user might down |
721 | * the interface, walk over to a completely different network, and |
722 | * plug back in. |
723 | */ |
724 | struct in_ifadown_arg { |
725 | struct radix_node_head *rnh; |
726 | struct ifaddr *ifa; |
727 | int del; |
728 | }; |
729 | |
730 | static int |
731 | in_ifadownkill(struct radix_node *rn, void *xap) |
732 | { |
733 | char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN]; |
734 | struct in_ifadown_arg *ap = xap; |
735 | struct rtentry *rt = (struct rtentry *)rn; |
736 | boolean_t verbose = (rt_verbose > 1); |
737 | int err; |
738 | |
739 | LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); |
740 | |
741 | RT_LOCK(rt); |
742 | if (rt->rt_ifa == ap->ifa && |
743 | (ap->del || !(rt->rt_flags & RTF_STATIC))) { |
744 | rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); |
745 | if (verbose) { |
746 | os_log_debug(OS_LOG_DEFAULT, "%s: deleting route to %s->%s->%s, " |
747 | "flags=0x%x\n" , __func__, dbuf, gbuf, |
748 | (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "" , |
749 | rt->rt_flags); |
750 | } |
751 | |
752 | RT_ADDREF_LOCKED(rt); /* for us to free below */ |
753 | /* |
754 | * We need to disable the automatic prune that happens |
755 | * in this case in rtrequest() because it will blow |
756 | * away the pointers that rn_walktree() needs in order |
757 | * continue our descent. We will end up deleting all |
758 | * the routes that rtrequest() would have in any case, |
759 | * so that behavior is not needed there. Safe to drop |
760 | * rt_lock and use rt_key, rt_gateway, since holding |
761 | * rnh_lock here prevents another thread from calling |
762 | * rt_setgate() on this route. |
763 | */ |
764 | rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING); |
765 | RT_UNLOCK(rt); |
766 | err = rtrequest_locked(RTM_DELETE, rt_key(rt), |
767 | rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL); |
768 | if (err != 0) { |
769 | RT_LOCK(rt); |
770 | if (!verbose) { |
771 | rt_str(rt, dbuf, sizeof(dbuf), |
772 | gbuf, sizeof(gbuf)); |
773 | } |
774 | os_log_error(OS_LOG_DEFAULT, "%s: error deleting route to " |
775 | "%s->%s->%s, flags=0x%x, err=%d\n" , __func__, |
776 | dbuf, gbuf, (rt->rt_ifp != NULL) ? |
777 | rt->rt_ifp->if_xname : "" , rt->rt_flags, |
778 | err); |
779 | RT_UNLOCK(rt); |
780 | } |
781 | rtfree_locked(rt); |
782 | } else { |
783 | RT_UNLOCK(rt); |
784 | } |
785 | return 0; |
786 | } |
787 | |
788 | int |
789 | in_ifadown(struct ifaddr *ifa, int delete) |
790 | { |
791 | struct in_ifadown_arg arg; |
792 | struct radix_node_head *rnh; |
793 | |
794 | LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); |
795 | |
796 | /* |
797 | * Holding rnh_lock here prevents the possibility of |
798 | * ifa from changing (e.g. in_ifinit), so it is safe |
799 | * to access its ifa_addr without locking. |
800 | */ |
801 | if (ifa->ifa_addr->sa_family != AF_INET) { |
802 | return 1; |
803 | } |
804 | |
805 | /* trigger route cache reevaluation */ |
806 | routegenid_inet_update(); |
807 | |
808 | arg.rnh = rnh = rt_tables[AF_INET]; |
809 | arg.ifa = ifa; |
810 | arg.del = delete; |
811 | rnh->rnh_walktree(rnh, in_ifadownkill, &arg); |
812 | IFA_LOCK_SPIN(ifa); |
813 | ifa->ifa_flags &= ~IFA_ROUTE; |
814 | IFA_UNLOCK(ifa); |
815 | return 0; |
816 | } |
817 | |