1/*
2 * Copyright (c) 2004-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1982, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 */
61
62#include <kern/debug.h>
63#include <netinet/in_arp.h>
64#include <sys/types.h>
65#include <sys/param.h>
66#include <sys/kernel_types.h>
67#include <sys/syslog.h>
68#include <sys/systm.h>
69#include <sys/time.h>
70#include <sys/kernel.h>
71#include <sys/mbuf.h>
72#include <sys/sysctl.h>
73#include <sys/mcache.h>
74#include <sys/protosw.h>
75#include <string.h>
76#include <net/if_arp.h>
77#include <net/if_dl.h>
78#include <net/dlil.h>
79#include <net/if_types.h>
80#include <net/if_llreach.h>
81#include <net/route.h>
82#include <net/nwk_wq.h>
83
84#include <netinet/if_ether.h>
85#include <netinet/in_var.h>
86#include <netinet/ip.h>
87#include <netinet/ip6.h>
88#include <kern/zalloc.h>
89
90#include <kern/thread.h>
91#include <kern/sched_prim.h>
92
93#define CONST_LLADDR(s) ((const u_char*)((s)->sdl_data + (s)->sdl_nlen))
94
95static const size_t MAX_HW_LEN = 10;
96
97/*
98 * Synchronization notes:
99 *
100 * The global list of ARP entries are stored in llinfo_arp; an entry
101 * gets inserted into the list when the route is created and gets
102 * removed from the list when it is deleted; this is done as part
103 * of RTM_ADD/RTM_RESOLVE/RTM_DELETE in arp_rtrequest().
104 *
105 * Because rnh_lock and rt_lock for the entry are held during those
106 * operations, the same locks (and thus lock ordering) must be used
107 * elsewhere to access the relevant data structure fields:
108 *
109 * la_le.{le_next,le_prev}, la_rt
110 *
111 * - Routing lock (rnh_lock)
112 *
113 * la_holdq, la_asked, la_llreach, la_lastused, la_flags
114 *
115 * - Routing entry lock (rt_lock)
116 *
117 * Due to the dependency on rt_lock, llinfo_arp has the same lifetime
118 * as the route entry itself. When a route is deleted (RTM_DELETE),
119 * it is simply removed from the global list but the memory is not
120 * freed until the route itself is freed.
121 */
122struct llinfo_arp {
123 /*
124 * The following are protected by rnh_lock
125 */
126 LIST_ENTRY(llinfo_arp) la_le;
127 struct rtentry *la_rt;
128 /*
129 * The following are protected by rt_lock
130 */
131 class_queue_t la_holdq; /* packets awaiting resolution */
132 struct if_llreach *la_llreach; /* link-layer reachability record */
133 u_int64_t la_lastused; /* last used timestamp */
134 u_int32_t la_asked; /* # of requests sent */
135 u_int32_t la_maxtries; /* retry limit */
136 u_int64_t la_probeexp; /* probe deadline timestamp */
137 u_int32_t la_prbreq_cnt; /* probe request count */
138 u_int32_t la_flags;
139#define LLINFO_RTRFAIL_EVTSENT 0x1 /* sent an ARP event */
140#define LLINFO_PROBING 0x2 /* waiting for an ARP reply */
141};
142
143static LIST_HEAD(, llinfo_arp) llinfo_arp;
144
145static thread_call_t arp_timeout_tcall;
146static int arp_timeout_run; /* arp_timeout is scheduled to run */
147static void arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1);
148static void arp_sched_timeout(struct timeval *);
149
150static thread_call_t arp_probe_tcall;
151static int arp_probe_run; /* arp_probe is scheduled to run */
152static void arp_probe(thread_call_param_t arg0, thread_call_param_t arg1);
153static void arp_sched_probe(struct timeval *);
154
155static void arptfree(struct llinfo_arp *, void *);
156static errno_t arp_lookup_route(const struct in_addr *, int,
157 int, route_t *, unsigned int);
158static int arp_getstat SYSCTL_HANDLER_ARGS;
159
160static struct llinfo_arp *arp_llinfo_alloc(int);
161static void arp_llinfo_free(void *);
162static uint32_t arp_llinfo_flushq(struct llinfo_arp *);
163static void arp_llinfo_purge(struct rtentry *);
164static void arp_llinfo_get_ri(struct rtentry *, struct rt_reach_info *);
165static void arp_llinfo_get_iflri(struct rtentry *, struct ifnet_llreach_info *);
166static void arp_llinfo_refresh(struct rtentry *);
167
168static __inline void arp_llreach_use(struct llinfo_arp *);
169static __inline int arp_llreach_reachable(struct llinfo_arp *);
170static void arp_llreach_alloc(struct rtentry *, struct ifnet *, void *,
171 unsigned int, boolean_t, uint32_t *);
172
173extern int tvtohz(struct timeval *);
174
175static int arpinit_done;
176
177SYSCTL_DECL(_net_link_ether);
178SYSCTL_NODE(_net_link_ether, PF_INET, inet, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "");
179
180static int arpt_prune = (5*60*1); /* walk list every 5 minutes */
181SYSCTL_INT(_net_link_ether_inet, OID_AUTO, prune_intvl,
182 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_prune, 0, "");
183
184#define ARP_PROBE_TIME 7 /* seconds */
185static u_int32_t arpt_probe = ARP_PROBE_TIME;
186SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, probe_intvl,
187 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_probe, 0, "");
188
189static int arpt_keep = (20*60); /* once resolved, good for 20 more minutes */
190SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_age,
191 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_keep, 0, "");
192
193static int arpt_down = 20; /* once declared down, don't send for 20 sec */
194SYSCTL_INT(_net_link_ether_inet, OID_AUTO, host_down_time,
195 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_down, 0, "");
196
197static int arp_llreach_base = 120; /* seconds */
198SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_llreach_base,
199 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_llreach_base, 0,
200 "default ARP link-layer reachability max lifetime (in seconds)");
201
202#define ARP_UNICAST_LIMIT 3 /* # of probes until ARP refresh broadcast */
203static u_int32_t arp_unicast_lim = ARP_UNICAST_LIMIT;
204SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_unicast_lim,
205 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_unicast_lim, ARP_UNICAST_LIMIT,
206 "number of unicast ARP refresh probes before using broadcast");
207
208static u_int32_t arp_maxtries = 5;
209SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxtries,
210 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxtries, 0, "");
211
212static u_int32_t arp_maxhold = 16;
213SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, maxhold,
214 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxhold, 0, "");
215
216static int useloopback = 1; /* use loopback interface for local traffic */
217SYSCTL_INT(_net_link_ether_inet, OID_AUTO, useloopback,
218 CTLFLAG_RW | CTLFLAG_LOCKED, &useloopback, 0, "");
219
220static int arp_proxyall = 0;
221SYSCTL_INT(_net_link_ether_inet, OID_AUTO, proxyall,
222 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_proxyall, 0, "");
223
224static int arp_sendllconflict = 0;
225SYSCTL_INT(_net_link_ether_inet, OID_AUTO, sendllconflict,
226 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_sendllconflict, 0, "");
227
228static int log_arp_warnings = 0; /* Thread safe: no accumulated state */
229SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_warnings,
230 CTLFLAG_RW | CTLFLAG_LOCKED,
231 &log_arp_warnings, 0,
232 "log arp warning messages");
233
234static int keep_announcements = 1; /* Thread safe: no aging of state */
235SYSCTL_INT(_net_link_ether_inet, OID_AUTO, keep_announcements,
236 CTLFLAG_RW | CTLFLAG_LOCKED,
237 &keep_announcements, 0,
238 "keep arp announcements");
239
240static int send_conflicting_probes = 1; /* Thread safe: no accumulated state */
241SYSCTL_INT(_net_link_ether_inet, OID_AUTO, send_conflicting_probes,
242 CTLFLAG_RW | CTLFLAG_LOCKED,
243 &send_conflicting_probes, 0,
244 "send conflicting link-local arp probes");
245
246static int arp_verbose;
247SYSCTL_INT(_net_link_ether_inet, OID_AUTO, verbose,
248 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_verbose, 0, "");
249
250/*
251 * Generally protected by rnh_lock; use atomic operations on fields
252 * that are also modified outside of that lock (if needed).
253 */
254struct arpstat arpstat __attribute__((aligned(sizeof (uint64_t))));
255SYSCTL_PROC(_net_link_ether_inet, OID_AUTO, stats,
256 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
257 0, 0, arp_getstat, "S,arpstat",
258 "ARP statistics (struct arpstat, net/if_arp.h)");
259
260static struct zone *llinfo_arp_zone;
261#define LLINFO_ARP_ZONE_MAX 256 /* maximum elements in zone */
262#define LLINFO_ARP_ZONE_NAME "llinfo_arp" /* name for zone */
263
264void
265arp_init(void)
266{
267 VERIFY(!arpinit_done);
268
269 LIST_INIT(&llinfo_arp);
270
271 llinfo_arp_zone = zinit(sizeof (struct llinfo_arp),
272 LLINFO_ARP_ZONE_MAX * sizeof (struct llinfo_arp), 0,
273 LLINFO_ARP_ZONE_NAME);
274 if (llinfo_arp_zone == NULL)
275 panic("%s: failed allocating llinfo_arp_zone", __func__);
276
277 zone_change(llinfo_arp_zone, Z_EXPAND, TRUE);
278 zone_change(llinfo_arp_zone, Z_CALLERACCT, FALSE);
279
280 arpinit_done = 1;
281}
282
283static struct llinfo_arp *
284arp_llinfo_alloc(int how)
285{
286 struct llinfo_arp *la;
287
288 la = (how == M_WAITOK) ? zalloc(llinfo_arp_zone) :
289 zalloc_noblock(llinfo_arp_zone);
290 if (la != NULL) {
291 bzero(la, sizeof (*la));
292 /*
293 * The type of queue (Q_DROPHEAD) here is just a hint;
294 * the actual logic that works on this queue performs
295 * a head drop, details in arp_llinfo_addq().
296 */
297 _qinit(&la->la_holdq, Q_DROPHEAD, (arp_maxhold == 0) ?
298 (uint32_t)-1 : arp_maxhold, QP_MBUF);
299 }
300
301 return (la);
302}
303
304static void
305arp_llinfo_free(void *arg)
306{
307 struct llinfo_arp *la = arg;
308
309 if (la->la_le.le_next != NULL || la->la_le.le_prev != NULL) {
310 panic("%s: trying to free %p when it is in use", __func__, la);
311 /* NOTREACHED */
312 }
313
314 /* Free any held packets */
315 (void) arp_llinfo_flushq(la);
316
317 /* Purge any link-layer info caching */
318 VERIFY(la->la_rt->rt_llinfo == la);
319 if (la->la_rt->rt_llinfo_purge != NULL)
320 la->la_rt->rt_llinfo_purge(la->la_rt);
321
322 zfree(llinfo_arp_zone, la);
323}
324
325static void
326arp_llinfo_addq(struct llinfo_arp *la, struct mbuf *m)
327{
328 if (qlen(&la->la_holdq) >= qlimit(&la->la_holdq)) {
329 struct mbuf *_m;
330 /* prune less than CTL, else take what's at the head */
331 _m = _getq_scidx_lt(&la->la_holdq, SCIDX_CTL);
332 if (_m == NULL)
333 _m = _getq(&la->la_holdq);
334 VERIFY(_m != NULL);
335 if (arp_verbose) {
336 log(LOG_DEBUG, "%s: dropping packet (scidx %u)\n",
337 __func__, MBUF_SCIDX(mbuf_get_service_class(_m)));
338 }
339 m_freem(_m);
340 atomic_add_32(&arpstat.dropped, 1);
341 atomic_add_32(&arpstat.held, -1);
342 }
343 _addq(&la->la_holdq, m);
344 atomic_add_32(&arpstat.held, 1);
345 if (arp_verbose) {
346 log(LOG_DEBUG, "%s: enqueued packet (scidx %u), qlen now %u\n",
347 __func__, MBUF_SCIDX(mbuf_get_service_class(m)),
348 qlen(&la->la_holdq));
349 }
350}
351
352static uint32_t
353arp_llinfo_flushq(struct llinfo_arp *la)
354{
355 uint32_t held = qlen(&la->la_holdq);
356
357 if (held != 0) {
358 atomic_add_32(&arpstat.purged, held);
359 atomic_add_32(&arpstat.held, -held);
360 _flushq(&la->la_holdq);
361 }
362 la->la_prbreq_cnt = 0;
363 VERIFY(qempty(&la->la_holdq));
364 return (held);
365}
366
367static void
368arp_llinfo_purge(struct rtentry *rt)
369{
370 struct llinfo_arp *la = rt->rt_llinfo;
371
372 RT_LOCK_ASSERT_HELD(rt);
373 VERIFY(rt->rt_llinfo_purge == arp_llinfo_purge && la != NULL);
374
375 if (la->la_llreach != NULL) {
376 RT_CONVERT_LOCK(rt);
377 ifnet_llreach_free(la->la_llreach);
378 la->la_llreach = NULL;
379 }
380 la->la_lastused = 0;
381}
382
383static void
384arp_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri)
385{
386 struct llinfo_arp *la = rt->rt_llinfo;
387 struct if_llreach *lr = la->la_llreach;
388
389 if (lr == NULL) {
390 bzero(ri, sizeof (*ri));
391 ri->ri_rssi = IFNET_RSSI_UNKNOWN;
392 ri->ri_lqm = IFNET_LQM_THRESH_OFF;
393 ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN;
394 } else {
395 IFLR_LOCK(lr);
396 /* Export to rt_reach_info structure */
397 ifnet_lr2ri(lr, ri);
398 /* Export ARP send expiration (calendar) time */
399 ri->ri_snd_expire =
400 ifnet_llreach_up2calexp(lr, la->la_lastused);
401 IFLR_UNLOCK(lr);
402 }
403}
404
405static void
406arp_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri)
407{
408 struct llinfo_arp *la = rt->rt_llinfo;
409 struct if_llreach *lr = la->la_llreach;
410
411 if (lr == NULL) {
412 bzero(iflri, sizeof (*iflri));
413 iflri->iflri_rssi = IFNET_RSSI_UNKNOWN;
414 iflri->iflri_lqm = IFNET_LQM_THRESH_OFF;
415 iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN;
416 } else {
417 IFLR_LOCK(lr);
418 /* Export to ifnet_llreach_info structure */
419 ifnet_lr2iflri(lr, iflri);
420 /* Export ARP send expiration (uptime) time */
421 iflri->iflri_snd_expire =
422 ifnet_llreach_up2upexp(lr, la->la_lastused);
423 IFLR_UNLOCK(lr);
424 }
425}
426
427static void
428arp_llinfo_refresh(struct rtentry *rt)
429{
430 uint64_t timenow = net_uptime();
431 /*
432 * If route entry is permanent or if expiry is less
433 * than timenow and extra time taken for unicast probe
434 * we can't expedite the refresh
435 */
436 if ((rt->rt_expire == 0) ||
437 (rt->rt_flags & RTF_STATIC) ||
438 !(rt->rt_flags & RTF_LLINFO)) {
439 return;
440 }
441
442 if (rt->rt_expire > timenow)
443 rt->rt_expire = timenow;
444 return;
445}
446
447void
448arp_llreach_set_reachable(struct ifnet *ifp, void *addr, unsigned int alen)
449{
450 /* Nothing more to do if it's disabled */
451 if (arp_llreach_base == 0)
452 return;
453
454 ifnet_llreach_set_reachable(ifp, ETHERTYPE_IP, addr, alen);
455}
456
457static __inline void
458arp_llreach_use(struct llinfo_arp *la)
459{
460 if (la->la_llreach != NULL)
461 la->la_lastused = net_uptime();
462}
463
464static __inline int
465arp_llreach_reachable(struct llinfo_arp *la)
466{
467 struct if_llreach *lr;
468 const char *why = NULL;
469
470 /* Nothing more to do if it's disabled; pretend it's reachable */
471 if (arp_llreach_base == 0)
472 return (1);
473
474 if ((lr = la->la_llreach) == NULL) {
475 /*
476 * Link-layer reachability record isn't present for this
477 * ARP entry; pretend it's reachable and use it as is.
478 */
479 return (1);
480 } else if (ifnet_llreach_reachable(lr)) {
481 /*
482 * Record is present, it's not shared with other ARP
483 * entries and a packet has recently been received
484 * from the remote host; consider it reachable.
485 */
486 if (lr->lr_reqcnt == 1)
487 return (1);
488
489 /* Prime it up, if this is the first time */
490 if (la->la_lastused == 0) {
491 VERIFY(la->la_llreach != NULL);
492 arp_llreach_use(la);
493 }
494
495 /*
496 * Record is present and shared with one or more ARP
497 * entries, and a packet has recently been received
498 * from the remote host. Since it's shared by more
499 * than one IP addresses, we can't rely on the link-
500 * layer reachability alone; consider it reachable if
501 * this ARP entry has been used "recently."
502 */
503 if (ifnet_llreach_reachable_delta(lr, la->la_lastused))
504 return (1);
505
506 why = "has alias(es) and hasn't been used in a while";
507 } else {
508 why = "haven't heard from it in a while";
509 }
510
511 if (arp_verbose > 1) {
512 char tmp[MAX_IPv4_STR_LEN];
513 u_int64_t now = net_uptime();
514
515 log(LOG_DEBUG, "%s: ARP probe(s) needed for %s; "
516 "%s [lastused %lld, lastrcvd %lld] secs ago\n",
517 if_name(lr->lr_ifp), inet_ntop(AF_INET,
518 &SIN(rt_key(la->la_rt))->sin_addr, tmp, sizeof (tmp)), why,
519 (la->la_lastused ? (int64_t)(now - la->la_lastused) : -1),
520 (lr->lr_lastrcvd ? (int64_t)(now - lr->lr_lastrcvd) : -1));
521
522 }
523 return (0);
524}
525
526/*
527 * Obtain a link-layer source cache entry for the sender.
528 *
529 * NOTE: This is currently only for ARP/Ethernet.
530 */
531static void
532arp_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr,
533 unsigned int alen, boolean_t solicited, uint32_t *p_rt_event_code)
534{
535 VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
536 VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
537
538 if (arp_llreach_base != 0 && rt->rt_expire != 0 &&
539 !(rt->rt_ifp->if_flags & IFF_LOOPBACK) &&
540 ifp->if_addrlen == IF_LLREACH_MAXLEN && /* Ethernet */
541 alen == ifp->if_addrlen) {
542 struct llinfo_arp *la = rt->rt_llinfo;
543 struct if_llreach *lr;
544 const char *why = NULL, *type = "";
545
546 /* Become a regular mutex, just in case */
547 RT_CONVERT_LOCK(rt);
548
549 if ((lr = la->la_llreach) != NULL) {
550 type = (solicited ? "ARP reply" : "ARP announcement");
551 /*
552 * If target has changed, create a new record;
553 * otherwise keep existing record.
554 */
555 IFLR_LOCK(lr);
556 if (bcmp(addr, lr->lr_key.addr, alen) != 0) {
557 IFLR_UNLOCK(lr);
558 /* Purge any link-layer info caching */
559 VERIFY(rt->rt_llinfo_purge != NULL);
560 rt->rt_llinfo_purge(rt);
561 lr = NULL;
562 why = " for different target HW address; "
563 "using new llreach record";
564 *p_rt_event_code = ROUTE_LLENTRY_CHANGED;
565 } else {
566 /*
567 * If we were doing unicast probing, we need to
568 * deliver an event for neighbor cache resolution
569 */
570 if (lr->lr_probes != 0)
571 *p_rt_event_code = ROUTE_LLENTRY_RESOLVED;
572
573 lr->lr_probes = 0; /* reset probe count */
574 IFLR_UNLOCK(lr);
575 if (solicited) {
576 why = " for same target HW address; "
577 "keeping existing llreach record";
578 }
579 }
580 }
581
582 if (lr == NULL) {
583 lr = la->la_llreach = ifnet_llreach_alloc(ifp,
584 ETHERTYPE_IP, addr, alen, arp_llreach_base);
585 if (lr != NULL) {
586 lr->lr_probes = 0; /* reset probe count */
587 if (why == NULL)
588 why = "creating new llreach record";
589 }
590 *p_rt_event_code = ROUTE_LLENTRY_RESOLVED;
591 }
592
593 if (arp_verbose > 1 && lr != NULL && why != NULL) {
594 char tmp[MAX_IPv4_STR_LEN];
595
596 log(LOG_DEBUG, "%s: %s%s for %s\n", if_name(ifp),
597 type, why, inet_ntop(AF_INET,
598 &SIN(rt_key(rt))->sin_addr, tmp, sizeof (tmp)));
599 }
600 }
601}
602
603struct arptf_arg {
604 boolean_t draining;
605 boolean_t probing;
606 uint32_t killed;
607 uint32_t aging;
608 uint32_t sticky;
609 uint32_t found;
610 uint32_t qlen;
611 uint32_t qsize;
612};
613
614/*
615 * Free an arp entry.
616 */
617static void
618arptfree(struct llinfo_arp *la, void *arg)
619{
620 struct arptf_arg *ap = arg;
621 struct rtentry *rt = la->la_rt;
622 uint64_t timenow;
623
624 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
625
626 /* rnh_lock acquired by caller protects rt from going away */
627 RT_LOCK(rt);
628
629 VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
630 VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
631
632 ap->found++;
633 timenow = net_uptime();
634
635 /* If we're probing, flush out held packets upon probe expiration */
636 if (ap->probing && (la->la_flags & LLINFO_PROBING) &&
637 la->la_probeexp <= timenow) {
638 struct sockaddr_dl *sdl = SDL(rt->rt_gateway);
639 if (sdl != NULL)
640 sdl->sdl_alen = 0;
641 (void) arp_llinfo_flushq(la);
642 /*
643 * Enqueue work item to invoke callback for this route entry
644 */
645 route_event_enqueue_nwk_wq_entry(rt, NULL,
646 ROUTE_LLENTRY_UNREACH, NULL, TRUE);
647 }
648
649 /*
650 * The following is mostly being used to arm the timer
651 * again and for logging.
652 * qlen is used to re-arm the timer. Therefore, pure probe
653 * requests can be considered as 0 length packets
654 * contributing only to length but not to the size.
655 */
656 ap->qlen += qlen(&la->la_holdq);
657 ap->qlen += la->la_prbreq_cnt;
658 ap->qsize += qsize(&la->la_holdq);
659
660 if (rt->rt_expire == 0 || (rt->rt_flags & RTF_STATIC)) {
661 ap->sticky++;
662 /* ARP entry is permanent? */
663 if (rt->rt_expire == 0) {
664 RT_UNLOCK(rt);
665 return;
666 }
667 }
668
669 /* ARP entry hasn't expired and we're not draining? */
670 if (!ap->draining && rt->rt_expire > timenow) {
671 RT_UNLOCK(rt);
672 ap->aging++;
673 return;
674 }
675
676 if (rt->rt_refcnt > 0) {
677 /*
678 * ARP entry has expired, with outstanding refcnt.
679 * If we're not draining, force ARP query to be
680 * generated next time this entry is used.
681 */
682 if (!ap->draining && !ap->probing) {
683 struct sockaddr_dl *sdl = SDL(rt->rt_gateway);
684 if (sdl != NULL)
685 sdl->sdl_alen = 0;
686 la->la_asked = 0;
687 rt->rt_flags &= ~RTF_REJECT;
688 }
689 RT_UNLOCK(rt);
690 } else if (!(rt->rt_flags & RTF_STATIC) && !ap->probing) {
691 /*
692 * ARP entry has no outstanding refcnt, and we're either
693 * draining or it has expired; delete it from the routing
694 * table. Safe to drop rt_lock and use rt_key, since holding
695 * rnh_lock here prevents another thread from calling
696 * rt_setgate() on this route.
697 */
698 RT_UNLOCK(rt);
699 rtrequest_locked(RTM_DELETE, rt_key(rt), NULL,
700 rt_mask(rt), 0, NULL);
701 arpstat.timeouts++;
702 ap->killed++;
703 } else {
704 /* ARP entry is static; let it linger */
705 RT_UNLOCK(rt);
706 }
707}
708
709void
710in_arpdrain(void *arg)
711{
712#pragma unused(arg)
713 struct llinfo_arp *la, *ola;
714 struct arptf_arg farg;
715
716 if (arp_verbose)
717 log(LOG_DEBUG, "%s: draining ARP entries\n", __func__);
718
719 lck_mtx_lock(rnh_lock);
720 la = llinfo_arp.lh_first;
721 bzero(&farg, sizeof (farg));
722 farg.draining = TRUE;
723 while ((ola = la) != NULL) {
724 la = la->la_le.le_next;
725 arptfree(ola, &farg);
726 }
727 if (arp_verbose) {
728 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
729 "%u pkts held (%u bytes)\n", __func__, farg.found,
730 farg.aging, farg.sticky, farg.killed, farg.qlen,
731 farg.qsize);
732 }
733 lck_mtx_unlock(rnh_lock);
734}
735
736/*
737 * Timeout routine. Age arp_tab entries periodically.
738 */
739static void
740arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1)
741{
742#pragma unused(arg0, arg1)
743 struct llinfo_arp *la, *ola;
744 struct timeval atv;
745 struct arptf_arg farg;
746
747 lck_mtx_lock(rnh_lock);
748 la = llinfo_arp.lh_first;
749 bzero(&farg, sizeof (farg));
750 while ((ola = la) != NULL) {
751 la = la->la_le.le_next;
752 arptfree(ola, &farg);
753 }
754 if (arp_verbose) {
755 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
756 "%u pkts held (%u bytes)\n", __func__, farg.found,
757 farg.aging, farg.sticky, farg.killed, farg.qlen,
758 farg.qsize);
759 }
760 atv.tv_usec = 0;
761 atv.tv_sec = MAX(arpt_prune, 5);
762 /* re-arm the timer if there's work to do */
763 arp_timeout_run = 0;
764 if (farg.aging > 0)
765 arp_sched_timeout(&atv);
766 else if (arp_verbose)
767 log(LOG_DEBUG, "%s: not rescheduling timer\n", __func__);
768 lck_mtx_unlock(rnh_lock);
769}
770
771static void
772arp_sched_timeout(struct timeval *atv)
773{
774 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
775
776 if (!arp_timeout_run) {
777 struct timeval tv;
778 uint64_t deadline = 0;
779
780 if (arp_timeout_tcall == NULL) {
781 arp_timeout_tcall =
782 thread_call_allocate(arp_timeout, NULL);
783 VERIFY(arp_timeout_tcall != NULL);
784 }
785
786 if (atv == NULL) {
787 tv.tv_usec = 0;
788 tv.tv_sec = MAX(arpt_prune / 5, 1);
789 atv = &tv;
790 }
791 if (arp_verbose) {
792 log(LOG_DEBUG, "%s: timer scheduled in "
793 "T+%llus.%lluu\n", __func__,
794 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
795 }
796 arp_timeout_run = 1;
797
798 clock_deadline_for_periodic_event(atv->tv_sec * NSEC_PER_SEC,
799 mach_absolute_time(), &deadline);
800 (void) thread_call_enter_delayed(arp_timeout_tcall, deadline);
801 }
802}
803
804/*
805 * Probe routine.
806 */
807static void
808arp_probe(thread_call_param_t arg0, thread_call_param_t arg1)
809{
810#pragma unused(arg0, arg1)
811 struct llinfo_arp *la, *ola;
812 struct timeval atv;
813 struct arptf_arg farg;
814
815 lck_mtx_lock(rnh_lock);
816 la = llinfo_arp.lh_first;
817 bzero(&farg, sizeof (farg));
818 farg.probing = TRUE;
819 while ((ola = la) != NULL) {
820 la = la->la_le.le_next;
821 arptfree(ola, &farg);
822 }
823 if (arp_verbose) {
824 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
825 "%u pkts held (%u bytes)\n", __func__, farg.found,
826 farg.aging, farg.sticky, farg.killed, farg.qlen,
827 farg.qsize);
828 }
829 atv.tv_usec = 0;
830 atv.tv_sec = MAX(arpt_probe, ARP_PROBE_TIME);
831 /* re-arm the probe if there's work to do */
832 arp_probe_run = 0;
833 if (farg.qlen > 0)
834 arp_sched_probe(&atv);
835 else if (arp_verbose)
836 log(LOG_DEBUG, "%s: not rescheduling probe\n", __func__);
837 lck_mtx_unlock(rnh_lock);
838}
839
840static void
841arp_sched_probe(struct timeval *atv)
842{
843 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
844
845 if (!arp_probe_run) {
846 struct timeval tv;
847 uint64_t deadline = 0;
848
849 if (arp_probe_tcall == NULL) {
850 arp_probe_tcall =
851 thread_call_allocate(arp_probe, NULL);
852 VERIFY(arp_probe_tcall != NULL);
853 }
854
855 if (atv == NULL) {
856 tv.tv_usec = 0;
857 tv.tv_sec = MAX(arpt_probe, ARP_PROBE_TIME);
858 atv = &tv;
859 }
860 if (arp_verbose) {
861 log(LOG_DEBUG, "%s: probe scheduled in "
862 "T+%llus.%lluu\n", __func__,
863 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
864 }
865 arp_probe_run = 1;
866
867 clock_deadline_for_periodic_event(atv->tv_sec * NSEC_PER_SEC,
868 mach_absolute_time(), &deadline);
869 (void) thread_call_enter_delayed(arp_probe_tcall, deadline);
870 }
871}
872
873/*
874 * ifa_rtrequest() callback
875 */
876static void
877arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa)
878{
879#pragma unused(sa)
880 struct sockaddr *gate = rt->rt_gateway;
881 struct llinfo_arp *la = rt->rt_llinfo;
882 static struct sockaddr_dl null_sdl =
883 { .sdl_len = sizeof (null_sdl), .sdl_family = AF_LINK };
884 uint64_t timenow;
885 char buf[MAX_IPv4_STR_LEN];
886
887 VERIFY(arpinit_done);
888 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
889 RT_LOCK_ASSERT_HELD(rt);
890
891 if (rt->rt_flags & RTF_GATEWAY)
892 return;
893
894 timenow = net_uptime();
895 switch (req) {
896 case RTM_ADD:
897 /*
898 * XXX: If this is a manually added route to interface
899 * such as older version of routed or gated might provide,
900 * restore cloning bit.
901 */
902 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL &&
903 SIN(rt_mask(rt))->sin_addr.s_addr != INADDR_BROADCAST)
904 rt->rt_flags |= RTF_CLONING;
905
906 if (rt->rt_flags & RTF_CLONING) {
907 /*
908 * Case 1: This route should come from a route to iface.
909 */
910 if (rt_setgate(rt, rt_key(rt), SA(&null_sdl)) == 0) {
911 gate = rt->rt_gateway;
912 SDL(gate)->sdl_type = rt->rt_ifp->if_type;
913 SDL(gate)->sdl_index = rt->rt_ifp->if_index;
914 /*
915 * In case we're called before 1.0 sec.
916 * has elapsed.
917 */
918 rt_setexpire(rt, MAX(timenow, 1));
919 }
920 break;
921 }
922 /* Announce a new entry if requested. */
923 if (rt->rt_flags & RTF_ANNOUNCE) {
924 if (la != NULL)
925 arp_llreach_use(la); /* Mark use timestamp */
926 RT_UNLOCK(rt);
927 dlil_send_arp(rt->rt_ifp, ARPOP_REQUEST,
928 SDL(gate), rt_key(rt), NULL, rt_key(rt), 0);
929 RT_LOCK(rt);
930 arpstat.txannounces++;
931 }
932 /* FALLTHRU */
933 case RTM_RESOLVE:
934 if (gate->sa_family != AF_LINK ||
935 gate->sa_len < sizeof (null_sdl)) {
936 arpstat.invalidreqs++;
937 log(LOG_ERR, "%s: route to %s has bad gateway address "
938 "(sa_family %u sa_len %u) on %s\n",
939 __func__, inet_ntop(AF_INET,
940 &SIN(rt_key(rt))->sin_addr.s_addr, buf,
941 sizeof (buf)), gate->sa_family, gate->sa_len,
942 if_name(rt->rt_ifp));
943 break;
944 }
945 SDL(gate)->sdl_type = rt->rt_ifp->if_type;
946 SDL(gate)->sdl_index = rt->rt_ifp->if_index;
947
948 if (la != NULL)
949 break; /* This happens on a route change */
950
951 /*
952 * Case 2: This route may come from cloning, or a manual route
953 * add with a LL address.
954 */
955 rt->rt_llinfo = la = arp_llinfo_alloc(M_WAITOK);
956 if (la == NULL) {
957 arpstat.reqnobufs++;
958 break;
959 }
960 rt->rt_llinfo_get_ri = arp_llinfo_get_ri;
961 rt->rt_llinfo_get_iflri = arp_llinfo_get_iflri;
962 rt->rt_llinfo_purge = arp_llinfo_purge;
963 rt->rt_llinfo_free = arp_llinfo_free;
964 rt->rt_llinfo_refresh = arp_llinfo_refresh;
965 rt->rt_flags |= RTF_LLINFO;
966 la->la_rt = rt;
967 LIST_INSERT_HEAD(&llinfo_arp, la, la_le);
968 arpstat.inuse++;
969
970 /* We have at least one entry; arm the timer if not already */
971 arp_sched_timeout(NULL);
972
973 /*
974 * This keeps the multicast addresses from showing up
975 * in `arp -a' listings as unresolved. It's not actually
976 * functional. Then the same for broadcast. For IPv4
977 * link-local address, keep the entry around even after
978 * it has expired.
979 */
980 if (IN_MULTICAST(ntohl(SIN(rt_key(rt))->sin_addr.s_addr))) {
981 RT_UNLOCK(rt);
982 dlil_resolve_multi(rt->rt_ifp, rt_key(rt), gate,
983 sizeof (struct sockaddr_dl));
984 RT_LOCK(rt);
985 rt_setexpire(rt, 0);
986 } else if (in_broadcast(SIN(rt_key(rt))->sin_addr,
987 rt->rt_ifp)) {
988 struct sockaddr_dl *gate_ll = SDL(gate);
989 size_t broadcast_len;
990 ifnet_llbroadcast_copy_bytes(rt->rt_ifp,
991 LLADDR(gate_ll), sizeof (gate_ll->sdl_data),
992 &broadcast_len);
993 gate_ll->sdl_alen = broadcast_len;
994 gate_ll->sdl_family = AF_LINK;
995 gate_ll->sdl_len = sizeof (struct sockaddr_dl);
996 /* In case we're called before 1.0 sec. has elapsed */
997 rt_setexpire(rt, MAX(timenow, 1));
998 } else if (IN_LINKLOCAL(ntohl(SIN(rt_key(rt))->
999 sin_addr.s_addr))) {
1000 rt->rt_flags |= RTF_STATIC;
1001 }
1002
1003 /* Set default maximum number of retries */
1004 la->la_maxtries = arp_maxtries;
1005
1006 /* Become a regular mutex, just in case */
1007 RT_CONVERT_LOCK(rt);
1008 IFA_LOCK_SPIN(rt->rt_ifa);
1009 if (SIN(rt_key(rt))->sin_addr.s_addr ==
1010 (IA_SIN(rt->rt_ifa))->sin_addr.s_addr) {
1011 IFA_UNLOCK(rt->rt_ifa);
1012 /*
1013 * This test used to be
1014 * if (loif.if_flags & IFF_UP)
1015 * It allowed local traffic to be forced through the
1016 * hardware by configuring the loopback down. However,
1017 * it causes problems during network configuration
1018 * for boards that can't receive packets they send.
1019 * It is now necessary to clear "useloopback" and
1020 * remove the route to force traffic out to the
1021 * hardware.
1022 */
1023 rt_setexpire(rt, 0);
1024 ifnet_lladdr_copy_bytes(rt->rt_ifp, LLADDR(SDL(gate)),
1025 SDL(gate)->sdl_alen = rt->rt_ifp->if_addrlen);
1026 if (useloopback) {
1027 if (rt->rt_ifp != lo_ifp) {
1028 /*
1029 * Purge any link-layer info caching.
1030 */
1031 if (rt->rt_llinfo_purge != NULL)
1032 rt->rt_llinfo_purge(rt);
1033
1034 /*
1035 * Adjust route ref count for the
1036 * interfaces.
1037 */
1038 if (rt->rt_if_ref_fn != NULL) {
1039 rt->rt_if_ref_fn(lo_ifp, 1);
1040 rt->rt_if_ref_fn(rt->rt_ifp, -1);
1041 }
1042 }
1043 rt->rt_ifp = lo_ifp;
1044 /*
1045 * If rmx_mtu is not locked, update it
1046 * to the MTU used by the new interface.
1047 */
1048 if (!(rt->rt_rmx.rmx_locks & RTV_MTU))
1049 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
1050 }
1051 } else {
1052 IFA_UNLOCK(rt->rt_ifa);
1053 }
1054 break;
1055
1056 case RTM_DELETE:
1057 if (la == NULL)
1058 break;
1059 /*
1060 * Unchain it but defer the actual freeing until the route
1061 * itself is to be freed. rt->rt_llinfo still points to
1062 * llinfo_arp, and likewise, la->la_rt still points to this
1063 * route entry, except that RTF_LLINFO is now cleared.
1064 */
1065 LIST_REMOVE(la, la_le);
1066 la->la_le.le_next = NULL;
1067 la->la_le.le_prev = NULL;
1068 arpstat.inuse--;
1069
1070 /*
1071 * Purge any link-layer info caching.
1072 */
1073 if (rt->rt_llinfo_purge != NULL)
1074 rt->rt_llinfo_purge(rt);
1075
1076 rt->rt_flags &= ~RTF_LLINFO;
1077 (void) arp_llinfo_flushq(la);
1078 }
1079}
1080
1081/*
1082 * convert hardware address to hex string for logging errors.
1083 */
1084static const char *
1085sdl_addr_to_hex(const struct sockaddr_dl *sdl, char *orig_buf, int buflen)
1086{
1087 char *buf = orig_buf;
1088 int i;
1089 const u_char *lladdr = (u_char *)(size_t)sdl->sdl_data;
1090 int maxbytes = buflen / 3;
1091
1092 if (maxbytes > sdl->sdl_alen) {
1093 maxbytes = sdl->sdl_alen;
1094 }
1095 *buf = '\0';
1096 for (i = 0; i < maxbytes; i++) {
1097 snprintf(buf, 3, "%02x", lladdr[i]);
1098 buf += 2;
1099 *buf = (i == maxbytes - 1) ? '\0' : ':';
1100 buf++;
1101 }
1102 return (orig_buf);
1103}
1104
1105/*
1106 * arp_lookup_route will lookup the route for a given address.
1107 *
1108 * The address must be for a host on a local network on this interface.
1109 * If the returned route is non-NULL, the route is locked and the caller
1110 * is responsible for unlocking it and releasing its reference.
1111 */
1112static errno_t
1113arp_lookup_route(const struct in_addr *addr, int create, int proxy,
1114 route_t *route, unsigned int ifscope)
1115{
1116 struct sockaddr_inarp sin =
1117 { sizeof (sin), AF_INET, 0, { 0 }, { 0 }, 0, 0 };
1118 const char *why = NULL;
1119 errno_t error = 0;
1120 route_t rt;
1121
1122 *route = NULL;
1123
1124 sin.sin_addr.s_addr = addr->s_addr;
1125 sin.sin_other = proxy ? SIN_PROXY : 0;
1126
1127 /*
1128 * If the destination is a link-local address, don't
1129 * constrain the lookup (don't scope it).
1130 */
1131 if (IN_LINKLOCAL(ntohl(addr->s_addr)))
1132 ifscope = IFSCOPE_NONE;
1133
1134 rt = rtalloc1_scoped((struct sockaddr *)&sin, create, 0, ifscope);
1135 if (rt == NULL)
1136 return (ENETUNREACH);
1137
1138 RT_LOCK(rt);
1139
1140 if (rt->rt_flags & RTF_GATEWAY) {
1141 why = "host is not on local network";
1142 error = ENETUNREACH;
1143 } else if (!(rt->rt_flags & RTF_LLINFO)) {
1144 why = "could not allocate llinfo";
1145 error = ENOMEM;
1146 } else if (rt->rt_gateway->sa_family != AF_LINK) {
1147 why = "gateway route is not ours";
1148 error = EPROTONOSUPPORT;
1149 }
1150
1151 if (error != 0) {
1152 if (create && (arp_verbose || log_arp_warnings)) {
1153 char tmp[MAX_IPv4_STR_LEN];
1154 log(LOG_DEBUG, "%s: link#%d %s failed: %s\n",
1155 __func__, ifscope, inet_ntop(AF_INET, addr, tmp,
1156 sizeof (tmp)), why);
1157 }
1158
1159 /*
1160 * If there are no references to this route, and it is
1161 * a cloned route, and not static, and ARP had created
1162 * the route, then purge it from the routing table as
1163 * it is probably bogus.
1164 */
1165 if (rt->rt_refcnt == 1 &&
1166 (rt->rt_flags & (RTF_WASCLONED | RTF_STATIC)) ==
1167 RTF_WASCLONED) {
1168 /*
1169 * Prevent another thread from modiying rt_key,
1170 * rt_gateway via rt_setgate() after rt_lock is
1171 * dropped by marking the route as defunct.
1172 */
1173 rt->rt_flags |= RTF_CONDEMNED;
1174 RT_UNLOCK(rt);
1175 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
1176 rt_mask(rt), rt->rt_flags, NULL);
1177 rtfree(rt);
1178 } else {
1179 RT_REMREF_LOCKED(rt);
1180 RT_UNLOCK(rt);
1181 }
1182 return (error);
1183 }
1184
1185 /*
1186 * Caller releases reference and does RT_UNLOCK(rt).
1187 */
1188 *route = rt;
1189 return (0);
1190}
1191
1192boolean_t
1193arp_is_entry_probing (route_t p_route)
1194{
1195 struct llinfo_arp *llinfo = p_route->rt_llinfo;
1196
1197 if (llinfo != NULL &&
1198 llinfo->la_llreach != NULL &&
1199 llinfo->la_llreach->lr_probes != 0)
1200 return (TRUE);
1201
1202 return (FALSE);
1203}
1204
1205/*
1206 * This is the ARP pre-output routine; care must be taken to ensure that
1207 * the "hint" route never gets freed via rtfree(), since the caller may
1208 * have stored it inside a struct route with a reference held for that
1209 * placeholder.
1210 */
1211errno_t
1212arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest,
1213 struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint,
1214 mbuf_t packet)
1215{
1216 route_t route = NULL; /* output route */
1217 errno_t result = 0;
1218 struct sockaddr_dl *gateway;
1219 struct llinfo_arp *llinfo = NULL;
1220 boolean_t usable, probing = FALSE;
1221 uint64_t timenow;
1222 struct if_llreach *lr;
1223 struct ifaddr *rt_ifa;
1224 struct sockaddr *sa;
1225 uint32_t rtflags;
1226 struct sockaddr_dl sdl;
1227 boolean_t send_probe_notif = FALSE;
1228
1229 if (ifp == NULL || net_dest == NULL)
1230 return (EINVAL);
1231
1232 if (net_dest->sin_family != AF_INET)
1233 return (EAFNOSUPPORT);
1234
1235 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
1236 return (ENETDOWN);
1237
1238 /*
1239 * If we were given a route, verify the route and grab the gateway
1240 */
1241 if (hint != NULL) {
1242 /*
1243 * Callee holds a reference on the route and returns
1244 * with the route entry locked, upon success.
1245 */
1246 result = route_to_gwroute((const struct sockaddr *)
1247 net_dest, hint, &route);
1248 if (result != 0)
1249 return (result);
1250 if (route != NULL)
1251 RT_LOCK_ASSERT_HELD(route);
1252 }
1253
1254 if ((packet != NULL && (packet->m_flags & M_BCAST)) ||
1255 in_broadcast(net_dest->sin_addr, ifp)) {
1256 size_t broadcast_len;
1257 bzero(ll_dest, ll_dest_len);
1258 result = ifnet_llbroadcast_copy_bytes(ifp, LLADDR(ll_dest),
1259 ll_dest_len - offsetof(struct sockaddr_dl, sdl_data),
1260 &broadcast_len);
1261 if (result == 0) {
1262 ll_dest->sdl_alen = broadcast_len;
1263 ll_dest->sdl_family = AF_LINK;
1264 ll_dest->sdl_len = sizeof (struct sockaddr_dl);
1265 }
1266 goto release;
1267 }
1268 if ((packet != NULL && (packet->m_flags & M_MCAST)) ||
1269 ((ifp->if_flags & IFF_MULTICAST) &&
1270 IN_MULTICAST(ntohl(net_dest->sin_addr.s_addr)))) {
1271 if (route != NULL)
1272 RT_UNLOCK(route);
1273 result = dlil_resolve_multi(ifp,
1274 (const struct sockaddr *)net_dest,
1275 (struct sockaddr *)ll_dest, ll_dest_len);
1276 if (route != NULL)
1277 RT_LOCK(route);
1278 goto release;
1279 }
1280
1281 /*
1282 * If we didn't find a route, or the route doesn't have
1283 * link layer information, trigger the creation of the
1284 * route and link layer information.
1285 */
1286 if (route == NULL || route->rt_llinfo == NULL) {
1287 /* Clean up now while we can */
1288 if (route != NULL) {
1289 if (route == hint) {
1290 RT_REMREF_LOCKED(route);
1291 RT_UNLOCK(route);
1292 } else {
1293 RT_UNLOCK(route);
1294 rtfree(route);
1295 }
1296 }
1297 /*
1298 * Callee holds a reference on the route and returns
1299 * with the route entry locked, upon success.
1300 */
1301 result = arp_lookup_route(&net_dest->sin_addr, 1, 0, &route,
1302 ifp->if_index);
1303 if (result == 0)
1304 RT_LOCK_ASSERT_HELD(route);
1305 }
1306
1307 if (result || route == NULL || (llinfo = route->rt_llinfo) == NULL) {
1308 /* In case result is 0 but no route, return an error */
1309 if (result == 0)
1310 result = EHOSTUNREACH;
1311
1312 if (route != NULL && route->rt_llinfo == NULL) {
1313 char tmp[MAX_IPv4_STR_LEN];
1314 log(LOG_ERR, "%s: can't allocate llinfo for %s\n",
1315 __func__, inet_ntop(AF_INET, &net_dest->sin_addr,
1316 tmp, sizeof (tmp)));
1317 }
1318 goto release;
1319 }
1320
1321 /*
1322 * Now that we have the right route, is it filled in?
1323 */
1324 gateway = SDL(route->rt_gateway);
1325 timenow = net_uptime();
1326 VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1327 VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
1328
1329 usable = ((route->rt_expire == 0 || route->rt_expire > timenow) &&
1330 gateway != NULL && gateway->sdl_family == AF_LINK &&
1331 gateway->sdl_alen != 0);
1332
1333 if (usable) {
1334 boolean_t unreachable = !arp_llreach_reachable(llinfo);
1335
1336 /* Entry is usable, so fill in info for caller */
1337 bcopy(gateway, ll_dest, MIN(gateway->sdl_len, ll_dest_len));
1338 result = 0;
1339 arp_llreach_use(llinfo); /* Mark use timestamp */
1340
1341 lr = llinfo->la_llreach;
1342 if (lr == NULL)
1343 goto release;
1344 rt_ifa = route->rt_ifa;
1345
1346 /* Become a regular mutex, just in case */
1347 RT_CONVERT_LOCK(route);
1348 IFLR_LOCK_SPIN(lr);
1349
1350 if ((unreachable || (llinfo->la_flags & LLINFO_PROBING)) &&
1351 lr->lr_probes < arp_unicast_lim) {
1352 /*
1353 * Thus mark the entry with la_probeexp deadline to
1354 * trigger the probe timer to be scheduled (if not
1355 * already). This gets cleared the moment we get
1356 * an ARP reply.
1357 */
1358 probing = TRUE;
1359 if (lr->lr_probes == 0) {
1360 llinfo->la_probeexp = (timenow + arpt_probe);
1361 llinfo->la_flags |= LLINFO_PROBING;
1362 /*
1363 * Provide notification that ARP unicast
1364 * probing has started.
1365 * We only do it for the first unicast probe
1366 * attempt.
1367 */
1368 send_probe_notif = TRUE;
1369 }
1370
1371 /*
1372 * Start the unicast probe and anticipate a reply;
1373 * afterwards, return existing entry to caller and
1374 * let it be used anyway. If peer is non-existent
1375 * we'll broadcast ARP next time around.
1376 */
1377 lr->lr_probes++;
1378 bzero(&sdl, sizeof (sdl));
1379 sdl.sdl_alen = ifp->if_addrlen;
1380 bcopy(&lr->lr_key.addr, LLADDR(&sdl),
1381 ifp->if_addrlen);
1382 IFLR_UNLOCK(lr);
1383 IFA_LOCK_SPIN(rt_ifa);
1384 IFA_ADDREF_LOCKED(rt_ifa);
1385 sa = rt_ifa->ifa_addr;
1386 IFA_UNLOCK(rt_ifa);
1387 rtflags = route->rt_flags;
1388 RT_UNLOCK(route);
1389 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
1390 (const struct sockaddr_dl *)&sdl,
1391 (const struct sockaddr *)net_dest, rtflags);
1392 IFA_REMREF(rt_ifa);
1393 RT_LOCK(route);
1394 goto release;
1395 } else {
1396 IFLR_UNLOCK(lr);
1397 if (!unreachable &&
1398 !(llinfo->la_flags & LLINFO_PROBING)) {
1399 /*
1400 * Normal case where peer is still reachable,
1401 * we're not probing and if_addrlen is anything
1402 * but IF_LLREACH_MAXLEN.
1403 */
1404 goto release;
1405 }
1406 }
1407 }
1408
1409 if (ifp->if_flags & IFF_NOARP) {
1410 result = ENOTSUP;
1411 goto release;
1412 }
1413
1414 /*
1415 * Route wasn't complete/valid; we need to send out ARP request.
1416 * If we've exceeded the limit of la_holdq, drop from the head
1417 * of queue and add this packet to the tail. If we end up with
1418 * RTF_REJECT below, we'll dequeue this from tail and have the
1419 * caller free the packet instead. It's safe to do that since
1420 * we still hold the route's rt_lock.
1421 */
1422 if (packet != NULL)
1423 arp_llinfo_addq(llinfo, packet);
1424 else
1425 llinfo->la_prbreq_cnt++;
1426 /*
1427 * Regardless of permanent vs. expirable entry, we need to
1428 * avoid having packets sit in la_holdq forever; thus mark the
1429 * entry with la_probeexp deadline to trigger the probe timer
1430 * to be scheduled (if not already). This gets cleared the
1431 * moment we get an ARP reply.
1432 */
1433 probing = TRUE;
1434 if ((qlen(&llinfo->la_holdq) + llinfo->la_prbreq_cnt) == 1) {
1435 llinfo->la_probeexp = (timenow + arpt_probe);
1436 llinfo->la_flags |= LLINFO_PROBING;
1437 }
1438
1439 if (route->rt_expire) {
1440 route->rt_flags &= ~RTF_REJECT;
1441 if (llinfo->la_asked == 0 || route->rt_expire != timenow) {
1442 rt_setexpire(route, timenow);
1443 if (llinfo->la_asked++ < llinfo->la_maxtries) {
1444 struct kev_msg ev_msg;
1445 struct kev_in_arpfailure in_arpfailure;
1446 boolean_t sendkev = FALSE;
1447
1448 rt_ifa = route->rt_ifa;
1449 lr = llinfo->la_llreach;
1450 /* Become a regular mutex, just in case */
1451 RT_CONVERT_LOCK(route);
1452 /* Update probe count, if applicable */
1453 if (lr != NULL) {
1454 IFLR_LOCK_SPIN(lr);
1455 lr->lr_probes++;
1456 IFLR_UNLOCK(lr);
1457 }
1458 if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
1459 route->rt_flags & RTF_ROUTER &&
1460 llinfo->la_asked > 1) {
1461 sendkev = TRUE;
1462 llinfo->la_flags |= LLINFO_RTRFAIL_EVTSENT;
1463 }
1464 IFA_LOCK_SPIN(rt_ifa);
1465 IFA_ADDREF_LOCKED(rt_ifa);
1466 sa = rt_ifa->ifa_addr;
1467 IFA_UNLOCK(rt_ifa);
1468 arp_llreach_use(llinfo); /* Mark use tstamp */
1469 rtflags = route->rt_flags;
1470 RT_UNLOCK(route);
1471 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
1472 NULL, (const struct sockaddr *)net_dest,
1473 rtflags);
1474 IFA_REMREF(rt_ifa);
1475 if (sendkev) {
1476 bzero(&ev_msg, sizeof(ev_msg));
1477 bzero(&in_arpfailure,
1478 sizeof(in_arpfailure));
1479 in_arpfailure.link_data.if_family =
1480 ifp->if_family;
1481 in_arpfailure.link_data.if_unit =
1482 ifp->if_unit;
1483 strlcpy(in_arpfailure.link_data.if_name,
1484 ifp->if_name, IFNAMSIZ);
1485 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1486 ev_msg.kev_class = KEV_NETWORK_CLASS;
1487 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1488 ev_msg.event_code =
1489 KEV_INET_ARPRTRFAILURE;
1490 ev_msg.dv[0].data_ptr = &in_arpfailure;
1491 ev_msg.dv[0].data_length =
1492 sizeof(struct
1493 kev_in_arpfailure);
1494 dlil_post_complete_msg(NULL, &ev_msg);
1495 }
1496 result = EJUSTRETURN;
1497 RT_LOCK(route);
1498 goto release;
1499 } else {
1500 route->rt_flags |= RTF_REJECT;
1501 rt_setexpire(route,
1502 route->rt_expire + arpt_down);
1503 llinfo->la_asked = 0;
1504 /*
1505 * Remove the packet that was just added above;
1506 * don't free it since we're not returning
1507 * EJUSTRETURN. The caller will handle the
1508 * freeing. Since we haven't dropped rt_lock
1509 * from the time of _addq() above, this packet
1510 * must be at the tail.
1511 */
1512 if (packet != NULL) {
1513 struct mbuf *_m =
1514 _getq_tail(&llinfo->la_holdq);
1515 atomic_add_32(&arpstat.held, -1);
1516 VERIFY(_m == packet);
1517 }
1518 result = EHOSTUNREACH;
1519
1520 /*
1521 * Enqueue work item to invoke callback for this route entry
1522 */
1523 route_event_enqueue_nwk_wq_entry(route, NULL,
1524 ROUTE_LLENTRY_UNREACH, NULL, TRUE);
1525 goto release;
1526 }
1527 }
1528 }
1529
1530 /* The packet is now held inside la_holdq */
1531 result = EJUSTRETURN;
1532
1533release:
1534 if (result == EHOSTUNREACH)
1535 atomic_add_32(&arpstat.dropped, 1);
1536
1537 if (route != NULL) {
1538 if (send_probe_notif) {
1539 route_event_enqueue_nwk_wq_entry(route, NULL,
1540 ROUTE_LLENTRY_PROBED, NULL, TRUE);
1541
1542 if (route->rt_flags & RTF_ROUTER) {
1543 struct radix_node_head *rnh = NULL;
1544 struct route_event rt_ev;
1545 route_event_init(&rt_ev, route, NULL, ROUTE_LLENTRY_PROBED);
1546 /*
1547 * We already have a reference on rt. The function
1548 * frees it before returning.
1549 */
1550 RT_UNLOCK(route);
1551 lck_mtx_lock(rnh_lock);
1552 rnh = rt_tables[AF_INET];
1553
1554 if (rnh != NULL)
1555 (void) rnh->rnh_walktree(rnh,
1556 route_event_walktree, (void *)&rt_ev);
1557 lck_mtx_unlock(rnh_lock);
1558 RT_LOCK(route);
1559 }
1560 }
1561
1562 if (route == hint) {
1563 RT_REMREF_LOCKED(route);
1564 RT_UNLOCK(route);
1565 } else {
1566 RT_UNLOCK(route);
1567 rtfree(route);
1568 }
1569 }
1570 if (probing) {
1571 /* Do this after we drop rt_lock to preserve ordering */
1572 lck_mtx_lock(rnh_lock);
1573 arp_sched_probe(NULL);
1574 lck_mtx_unlock(rnh_lock);
1575 }
1576 return (result);
1577}
1578
1579errno_t
1580arp_ip_handle_input(ifnet_t ifp, u_short arpop,
1581 const struct sockaddr_dl *sender_hw, const struct sockaddr_in *sender_ip,
1582 const struct sockaddr_in *target_ip)
1583{
1584 char ipv4str[MAX_IPv4_STR_LEN];
1585 struct sockaddr_dl proxied;
1586 struct sockaddr_dl *gateway, *target_hw = NULL;
1587 struct ifaddr *ifa;
1588 struct in_ifaddr *ia;
1589 struct in_ifaddr *best_ia = NULL;
1590 struct sockaddr_in best_ia_sin;
1591 route_t route = NULL;
1592 char buf[3 * MAX_HW_LEN]; /* enough for MAX_HW_LEN byte hw address */
1593 struct llinfo_arp *llinfo;
1594 errno_t error;
1595 int created_announcement = 0;
1596 int bridged = 0, is_bridge = 0;
1597 uint32_t rt_evcode = 0;
1598
1599 /*
1600 * Here and other places within this routine where we don't hold
1601 * rnh_lock, trade accuracy for speed for the common scenarios
1602 * and avoid the use of atomic updates.
1603 */
1604 arpstat.received++;
1605
1606 /* Do not respond to requests for 0.0.0.0 */
1607 if (target_ip->sin_addr.s_addr == INADDR_ANY && arpop == ARPOP_REQUEST)
1608 goto done;
1609
1610 if (ifp->if_bridge)
1611 bridged = 1;
1612 if (ifp->if_type == IFT_BRIDGE)
1613 is_bridge = 1;
1614
1615 if (arpop == ARPOP_REPLY)
1616 arpstat.rxreplies++;
1617
1618 /*
1619 * Determine if this ARP is for us
1620 * For a bridge, we want to check the address irrespective
1621 * of the receive interface.
1622 */
1623 lck_rw_lock_shared(in_ifaddr_rwlock);
1624 TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr), ia_hash) {
1625 IFA_LOCK_SPIN(&ia->ia_ifa);
1626 if (((bridged && ia->ia_ifp->if_bridge != NULL) ||
1627 (ia->ia_ifp == ifp)) &&
1628 ia->ia_addr.sin_addr.s_addr == target_ip->sin_addr.s_addr) {
1629 best_ia = ia;
1630 best_ia_sin = best_ia->ia_addr;
1631 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1632 IFA_UNLOCK(&ia->ia_ifa);
1633 lck_rw_done(in_ifaddr_rwlock);
1634 goto match;
1635 }
1636 IFA_UNLOCK(&ia->ia_ifa);
1637 }
1638
1639 TAILQ_FOREACH(ia, INADDR_HASH(sender_ip->sin_addr.s_addr), ia_hash) {
1640 IFA_LOCK_SPIN(&ia->ia_ifa);
1641 if (((bridged && ia->ia_ifp->if_bridge != NULL) ||
1642 (ia->ia_ifp == ifp)) &&
1643 ia->ia_addr.sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
1644 best_ia = ia;
1645 best_ia_sin = best_ia->ia_addr;
1646 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1647 IFA_UNLOCK(&ia->ia_ifa);
1648 lck_rw_done(in_ifaddr_rwlock);
1649 goto match;
1650 }
1651 IFA_UNLOCK(&ia->ia_ifa);
1652 }
1653
1654#define BDG_MEMBER_MATCHES_ARP(addr, ifp, ia) \
1655 (ia->ia_ifp->if_bridge == ifp->if_softc && \
1656 bcmp(IF_LLADDR(ia->ia_ifp), IF_LLADDR(ifp), ifp->if_addrlen) == 0 && \
1657 addr == ia->ia_addr.sin_addr.s_addr)
1658 /*
1659 * Check the case when bridge shares its MAC address with
1660 * some of its children, so packets are claimed by bridge
1661 * itself (bridge_input() does it first), but they are really
1662 * meant to be destined to the bridge member.
1663 */
1664 if (is_bridge) {
1665 TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr),
1666 ia_hash) {
1667 IFA_LOCK_SPIN(&ia->ia_ifa);
1668 if (BDG_MEMBER_MATCHES_ARP(target_ip->sin_addr.s_addr,
1669 ifp, ia)) {
1670 ifp = ia->ia_ifp;
1671 best_ia = ia;
1672 best_ia_sin = best_ia->ia_addr;
1673 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1674 IFA_UNLOCK(&ia->ia_ifa);
1675 lck_rw_done(in_ifaddr_rwlock);
1676 goto match;
1677 }
1678 IFA_UNLOCK(&ia->ia_ifa);
1679 }
1680 }
1681#undef BDG_MEMBER_MATCHES_ARP
1682 lck_rw_done(in_ifaddr_rwlock);
1683
1684 /*
1685 * No match, use the first inet address on the receive interface
1686 * as a dummy address for the rest of the function; we may be
1687 * proxying for another address.
1688 */
1689 ifnet_lock_shared(ifp);
1690 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1691 IFA_LOCK_SPIN(ifa);
1692 if (ifa->ifa_addr->sa_family != AF_INET) {
1693 IFA_UNLOCK(ifa);
1694 continue;
1695 }
1696 best_ia = (struct in_ifaddr *)ifa;
1697 best_ia_sin = best_ia->ia_addr;
1698 IFA_ADDREF_LOCKED(ifa);
1699 IFA_UNLOCK(ifa);
1700 ifnet_lock_done(ifp);
1701 goto match;
1702 }
1703 ifnet_lock_done(ifp);
1704
1705 /*
1706 * If we're not a bridge member, or if we are but there's no
1707 * IPv4 address to use for the interface, drop the packet.
1708 */
1709 if (!bridged || best_ia == NULL)
1710 goto done;
1711
1712match:
1713 /* If the packet is from this interface, ignore the packet */
1714 if (bcmp(CONST_LLADDR(sender_hw), IF_LLADDR(ifp),
1715 sender_hw->sdl_alen) == 0)
1716 goto done;
1717
1718 /* Check for a conflict */
1719 if (!bridged &&
1720 sender_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr) {
1721 struct kev_msg ev_msg;
1722 struct kev_in_collision *in_collision;
1723 u_char storage[sizeof (struct kev_in_collision) + MAX_HW_LEN];
1724
1725 bzero(&ev_msg, sizeof (struct kev_msg));
1726 bzero(storage, (sizeof (struct kev_in_collision) + MAX_HW_LEN));
1727 in_collision = (struct kev_in_collision *)(void *)storage;
1728 log(LOG_ERR, "%s duplicate IP address %s sent from "
1729 "address %s\n", if_name(ifp),
1730 inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
1731 sizeof (ipv4str)), sdl_addr_to_hex(sender_hw, buf,
1732 sizeof (buf)));
1733
1734 /* Send a kernel event so anyone can learn of the conflict */
1735 in_collision->link_data.if_family = ifp->if_family;
1736 in_collision->link_data.if_unit = ifp->if_unit;
1737 strlcpy(&in_collision->link_data.if_name[0],
1738 ifp->if_name, IFNAMSIZ);
1739 in_collision->ia_ipaddr = sender_ip->sin_addr;
1740 in_collision->hw_len = (sender_hw->sdl_alen < MAX_HW_LEN) ?
1741 sender_hw->sdl_alen : MAX_HW_LEN;
1742 bcopy(CONST_LLADDR(sender_hw), (caddr_t)in_collision->hw_addr,
1743 in_collision->hw_len);
1744 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1745 ev_msg.kev_class = KEV_NETWORK_CLASS;
1746 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1747 ev_msg.event_code = KEV_INET_ARPCOLLISION;
1748 ev_msg.dv[0].data_ptr = in_collision;
1749 ev_msg.dv[0].data_length =
1750 sizeof (struct kev_in_collision) + in_collision->hw_len;
1751 ev_msg.dv[1].data_length = 0;
1752 dlil_post_complete_msg(NULL, &ev_msg);
1753 atomic_add_32(&arpstat.dupips, 1);
1754 goto respond;
1755 }
1756
1757 /*
1758 * Look up the routing entry. If it doesn't exist and we are the
1759 * target, and the sender isn't 0.0.0.0, go ahead and create one.
1760 * Callee holds a reference on the route and returns with the route
1761 * entry locked, upon success.
1762 */
1763 error = arp_lookup_route(&sender_ip->sin_addr,
1764 (target_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr &&
1765 sender_ip->sin_addr.s_addr != 0), 0, &route, ifp->if_index);
1766
1767 if (error == 0)
1768 RT_LOCK_ASSERT_HELD(route);
1769
1770 if (error || route == NULL || route->rt_gateway == NULL) {
1771 if (arpop != ARPOP_REQUEST)
1772 goto respond;
1773
1774 if (arp_sendllconflict && send_conflicting_probes != 0 &&
1775 (ifp->if_eflags & IFEF_ARPLL) &&
1776 IN_LINKLOCAL(ntohl(target_ip->sin_addr.s_addr)) &&
1777 sender_ip->sin_addr.s_addr == INADDR_ANY) {
1778 /*
1779 * Verify this ARP probe doesn't conflict with
1780 * an IPv4LL we know of on another interface.
1781 */
1782 if (route != NULL) {
1783 RT_REMREF_LOCKED(route);
1784 RT_UNLOCK(route);
1785 route = NULL;
1786 }
1787 /*
1788 * Callee holds a reference on the route and returns
1789 * with the route entry locked, upon success.
1790 */
1791 error = arp_lookup_route(&target_ip->sin_addr, 0, 0,
1792 &route, ifp->if_index);
1793
1794 if (error != 0 || route == NULL ||
1795 route->rt_gateway == NULL)
1796 goto respond;
1797
1798 RT_LOCK_ASSERT_HELD(route);
1799
1800 gateway = SDL(route->rt_gateway);
1801 if (route->rt_ifp != ifp && gateway->sdl_alen != 0 &&
1802 (gateway->sdl_alen != sender_hw->sdl_alen ||
1803 bcmp(CONST_LLADDR(gateway), CONST_LLADDR(sender_hw),
1804 gateway->sdl_alen) != 0)) {
1805 /*
1806 * A node is probing for an IPv4LL we know
1807 * exists on a different interface. We respond
1808 * with a conflicting probe to force the new
1809 * device to pick a different IPv4LL address.
1810 */
1811 if (arp_verbose || log_arp_warnings) {
1812 log(LOG_INFO, "arp: %s on %s sent "
1813 "probe for %s, already on %s\n",
1814 sdl_addr_to_hex(sender_hw, buf,
1815 sizeof (buf)), if_name(ifp),
1816 inet_ntop(AF_INET,
1817 &target_ip->sin_addr, ipv4str,
1818 sizeof (ipv4str)),
1819 if_name(route->rt_ifp));
1820 log(LOG_INFO, "arp: sending "
1821 "conflicting probe to %s on %s\n",
1822 sdl_addr_to_hex(sender_hw, buf,
1823 sizeof (buf)), if_name(ifp));
1824 }
1825 /* Mark use timestamp */
1826 if (route->rt_llinfo != NULL)
1827 arp_llreach_use(route->rt_llinfo);
1828 /* We're done with the route */
1829 RT_REMREF_LOCKED(route);
1830 RT_UNLOCK(route);
1831 route = NULL;
1832 /*
1833 * Send a conservative unicast "ARP probe".
1834 * This should force the other device to pick
1835 * a new number. This will not force the
1836 * device to pick a new number if the device
1837 * has already assigned that number. This will
1838 * not imply to the device that we own that
1839 * address. The link address is always
1840 * present; it's never freed.
1841 */
1842 ifnet_lock_shared(ifp);
1843 ifa = ifp->if_lladdr;
1844 IFA_ADDREF(ifa);
1845 ifnet_lock_done(ifp);
1846 dlil_send_arp_internal(ifp, ARPOP_REQUEST,
1847 SDL(ifa->ifa_addr),
1848 (const struct sockaddr *)sender_ip,
1849 sender_hw,
1850 (const struct sockaddr *)target_ip);
1851 IFA_REMREF(ifa);
1852 ifa = NULL;
1853 atomic_add_32(&arpstat.txconflicts, 1);
1854 }
1855 goto respond;
1856 } else if (keep_announcements != 0 &&
1857 target_ip->sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
1858 /*
1859 * Don't create entry if link-local address and
1860 * link-local is disabled
1861 */
1862 if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1863 (ifp->if_eflags & IFEF_ARPLL)) {
1864 if (route != NULL) {
1865 RT_REMREF_LOCKED(route);
1866 RT_UNLOCK(route);
1867 route = NULL;
1868 }
1869 /*
1870 * Callee holds a reference on the route and
1871 * returns with the route entry locked, upon
1872 * success.
1873 */
1874 error = arp_lookup_route(&sender_ip->sin_addr,
1875 1, 0, &route, ifp->if_index);
1876
1877 if (error == 0)
1878 RT_LOCK_ASSERT_HELD(route);
1879
1880 if (error == 0 && route != NULL &&
1881 route->rt_gateway != NULL)
1882 created_announcement = 1;
1883 }
1884 if (created_announcement == 0)
1885 goto respond;
1886 } else {
1887 goto respond;
1888 }
1889 }
1890
1891 RT_LOCK_ASSERT_HELD(route);
1892 VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1893 VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
1894
1895 gateway = SDL(route->rt_gateway);
1896 if (!bridged && route->rt_ifp != ifp) {
1897 if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1898 !(ifp->if_eflags & IFEF_ARPLL)) {
1899 if (arp_verbose || log_arp_warnings)
1900 log(LOG_ERR, "arp: %s is on %s but got "
1901 "reply from %s on %s\n",
1902 inet_ntop(AF_INET, &sender_ip->sin_addr,
1903 ipv4str, sizeof (ipv4str)),
1904 if_name(route->rt_ifp),
1905 sdl_addr_to_hex(sender_hw, buf,
1906 sizeof (buf)), if_name(ifp));
1907 goto respond;
1908 } else {
1909 /* Don't change a permanent address */
1910 if (route->rt_expire == 0)
1911 goto respond;
1912
1913 /*
1914 * We're about to check and/or change the route's ifp
1915 * and ifa, so do the lock dance: drop rt_lock, hold
1916 * rnh_lock and re-hold rt_lock to avoid violating the
1917 * lock ordering. We have an extra reference on the
1918 * route, so it won't go away while we do this.
1919 */
1920 RT_UNLOCK(route);
1921 lck_mtx_lock(rnh_lock);
1922 RT_LOCK(route);
1923 /*
1924 * Don't change the cloned route away from the
1925 * parent's interface if the address did resolve
1926 * or if the route is defunct. rt_ifp on both
1927 * the parent and the clone can now be freely
1928 * accessed now that we have acquired rnh_lock.
1929 */
1930 gateway = SDL(route->rt_gateway);
1931 if ((gateway->sdl_alen != 0 &&
1932 route->rt_parent != NULL &&
1933 route->rt_parent->rt_ifp == route->rt_ifp) ||
1934 (route->rt_flags & RTF_CONDEMNED)) {
1935 RT_REMREF_LOCKED(route);
1936 RT_UNLOCK(route);
1937 route = NULL;
1938 lck_mtx_unlock(rnh_lock);
1939 goto respond;
1940 }
1941 if (route->rt_ifp != ifp) {
1942 /*
1943 * Purge any link-layer info caching.
1944 */
1945 if (route->rt_llinfo_purge != NULL)
1946 route->rt_llinfo_purge(route);
1947
1948 /* Adjust route ref count for the interfaces */
1949 if (route->rt_if_ref_fn != NULL) {
1950 route->rt_if_ref_fn(ifp, 1);
1951 route->rt_if_ref_fn(route->rt_ifp, -1);
1952 }
1953 }
1954 /* Change the interface when the existing route is on */
1955 route->rt_ifp = ifp;
1956 /*
1957 * If rmx_mtu is not locked, update it
1958 * to the MTU used by the new interface.
1959 */
1960 if (!(route->rt_rmx.rmx_locks & RTV_MTU)) {
1961 route->rt_rmx.rmx_mtu = route->rt_ifp->if_mtu;
1962 if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
1963 route->rt_rmx.rmx_mtu = IN6_LINKMTU(route->rt_ifp);
1964 /* Further adjust the size for CLAT46 expansion */
1965 route->rt_rmx.rmx_mtu -= CLAT46_HDR_EXPANSION_OVERHD;
1966 }
1967 }
1968
1969 rtsetifa(route, &best_ia->ia_ifa);
1970 gateway->sdl_index = ifp->if_index;
1971 RT_UNLOCK(route);
1972 lck_mtx_unlock(rnh_lock);
1973 RT_LOCK(route);
1974 /* Don't bother if the route is down */
1975 if (!(route->rt_flags & RTF_UP))
1976 goto respond;
1977 /* Refresh gateway pointer */
1978 gateway = SDL(route->rt_gateway);
1979 }
1980 RT_LOCK_ASSERT_HELD(route);
1981 }
1982
1983 if (gateway->sdl_alen != 0 && bcmp(LLADDR(gateway),
1984 CONST_LLADDR(sender_hw), gateway->sdl_alen) != 0) {
1985 if (route->rt_expire != 0 &&
1986 (arp_verbose || log_arp_warnings)) {
1987 char buf2[3 * MAX_HW_LEN];
1988 log(LOG_INFO, "arp: %s moved from %s to %s on %s\n",
1989 inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
1990 sizeof (ipv4str)),
1991 sdl_addr_to_hex(gateway, buf, sizeof (buf)),
1992 sdl_addr_to_hex(sender_hw, buf2, sizeof (buf2)),
1993 if_name(ifp));
1994 } else if (route->rt_expire == 0) {
1995 if (arp_verbose || log_arp_warnings) {
1996 log(LOG_ERR, "arp: %s attempts to modify "
1997 "permanent entry for %s on %s\n",
1998 sdl_addr_to_hex(sender_hw, buf,
1999 sizeof (buf)),
2000 inet_ntop(AF_INET, &sender_ip->sin_addr,
2001 ipv4str, sizeof (ipv4str)),
2002 if_name(ifp));
2003 }
2004 goto respond;
2005 }
2006 }
2007
2008 /* Copy the sender hardware address in to the route's gateway address */
2009 gateway->sdl_alen = sender_hw->sdl_alen;
2010 bcopy(CONST_LLADDR(sender_hw), LLADDR(gateway), gateway->sdl_alen);
2011
2012 /* Update the expire time for the route and clear the reject flag */
2013 if (route->rt_expire != 0)
2014 rt_setexpire(route, net_uptime() + arpt_keep);
2015 route->rt_flags &= ~RTF_REJECT;
2016
2017 /* cache the gateway (sender HW) address */
2018 arp_llreach_alloc(route, ifp, LLADDR(gateway), gateway->sdl_alen,
2019 (arpop == ARPOP_REPLY), &rt_evcode);
2020
2021 llinfo = route->rt_llinfo;
2022 /* send a notification that the route is back up */
2023 if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
2024 route->rt_flags & RTF_ROUTER &&
2025 llinfo->la_flags & LLINFO_RTRFAIL_EVTSENT) {
2026 struct kev_msg ev_msg;
2027 struct kev_in_arpalive in_arpalive;
2028
2029 llinfo->la_flags &= ~LLINFO_RTRFAIL_EVTSENT;
2030 RT_UNLOCK(route);
2031 bzero(&ev_msg, sizeof(ev_msg));
2032 bzero(&in_arpalive, sizeof(in_arpalive));
2033 in_arpalive.link_data.if_family = ifp->if_family;
2034 in_arpalive.link_data.if_unit = ifp->if_unit;
2035 strlcpy(in_arpalive.link_data.if_name, ifp->if_name, IFNAMSIZ);
2036 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2037 ev_msg.kev_class = KEV_NETWORK_CLASS;
2038 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
2039 ev_msg.event_code = KEV_INET_ARPRTRALIVE;
2040 ev_msg.dv[0].data_ptr = &in_arpalive;
2041 ev_msg.dv[0].data_length = sizeof(struct kev_in_arpalive);
2042 dlil_post_complete_msg(NULL, &ev_msg);
2043 RT_LOCK(route);
2044 }
2045 /* Update the llinfo, send out all queued packets at once */
2046 llinfo->la_asked = 0;
2047 llinfo->la_flags &= ~LLINFO_PROBING;
2048 llinfo->la_prbreq_cnt = 0;
2049
2050 if (rt_evcode) {
2051 /*
2052 * Enqueue work item to invoke callback for this route entry
2053 */
2054 route_event_enqueue_nwk_wq_entry(route, NULL, rt_evcode, NULL, TRUE);
2055
2056 if (route->rt_flags & RTF_ROUTER) {
2057 struct radix_node_head *rnh = NULL;
2058 struct route_event rt_ev;
2059 route_event_init(&rt_ev, route, NULL, rt_evcode);
2060 /*
2061 * We already have a reference on rt. The function
2062 * frees it before returning.
2063 */
2064 RT_UNLOCK(route);
2065 lck_mtx_lock(rnh_lock);
2066 rnh = rt_tables[AF_INET];
2067
2068 if (rnh != NULL)
2069 (void) rnh->rnh_walktree(rnh, route_event_walktree,
2070 (void *)&rt_ev);
2071 lck_mtx_unlock(rnh_lock);
2072 RT_LOCK(route);
2073 }
2074 }
2075
2076 if (!qempty(&llinfo->la_holdq)) {
2077 uint32_t held;
2078 struct mbuf *m0 =
2079 _getq_all(&llinfo->la_holdq, NULL, &held, NULL);
2080 if (arp_verbose) {
2081 log(LOG_DEBUG, "%s: sending %u held packets\n",
2082 __func__, held);
2083 }
2084 atomic_add_32(&arpstat.held, -held);
2085 VERIFY(qempty(&llinfo->la_holdq));
2086 RT_UNLOCK(route);
2087 dlil_output(ifp, PF_INET, m0, (caddr_t)route,
2088 rt_key(route), 0, NULL);
2089 RT_REMREF(route);
2090 route = NULL;
2091 }
2092
2093respond:
2094 if (route != NULL) {
2095 /* Mark use timestamp if we're going to send a reply */
2096 if (arpop == ARPOP_REQUEST && route->rt_llinfo != NULL)
2097 arp_llreach_use(route->rt_llinfo);
2098 RT_REMREF_LOCKED(route);
2099 RT_UNLOCK(route);
2100 route = NULL;
2101 }
2102
2103 if (arpop != ARPOP_REQUEST)
2104 goto done;
2105
2106 /* See comments at the beginning of this routine */
2107 arpstat.rxrequests++;
2108
2109 /* If we are not the target, check if we should proxy */
2110 if (target_ip->sin_addr.s_addr != best_ia_sin.sin_addr.s_addr) {
2111 /*
2112 * Find a proxy route; callee holds a reference on the
2113 * route and returns with the route entry locked, upon
2114 * success.
2115 */
2116 error = arp_lookup_route(&target_ip->sin_addr, 0, SIN_PROXY,
2117 &route, ifp->if_index);
2118
2119 if (error == 0) {
2120 RT_LOCK_ASSERT_HELD(route);
2121 /*
2122 * Return proxied ARP replies only on the interface
2123 * or bridge cluster where this network resides.
2124 * Otherwise we may conflict with the host we are
2125 * proxying for.
2126 */
2127 if (route->rt_ifp != ifp &&
2128 (route->rt_ifp->if_bridge != ifp->if_bridge ||
2129 ifp->if_bridge == NULL)) {
2130 RT_REMREF_LOCKED(route);
2131 RT_UNLOCK(route);
2132 goto done;
2133 }
2134 proxied = *SDL(route->rt_gateway);
2135 target_hw = &proxied;
2136 } else {
2137 /*
2138 * We don't have a route entry indicating we should
2139 * use proxy. If we aren't supposed to proxy all,
2140 * we are done.
2141 */
2142 if (!arp_proxyall)
2143 goto done;
2144
2145 /*
2146 * See if we have a route to the target ip before
2147 * we proxy it.
2148 */
2149 route = rtalloc1_scoped((struct sockaddr *)
2150 (size_t)target_ip, 0, 0, ifp->if_index);
2151 if (!route)
2152 goto done;
2153
2154 /*
2155 * Don't proxy for hosts already on the same interface.
2156 */
2157 RT_LOCK(route);
2158 if (route->rt_ifp == ifp) {
2159 RT_UNLOCK(route);
2160 rtfree(route);
2161 goto done;
2162 }
2163 }
2164 /* Mark use timestamp */
2165 if (route->rt_llinfo != NULL)
2166 arp_llreach_use(route->rt_llinfo);
2167 RT_REMREF_LOCKED(route);
2168 RT_UNLOCK(route);
2169 }
2170
2171 dlil_send_arp(ifp, ARPOP_REPLY,
2172 target_hw, (const struct sockaddr *)target_ip,
2173 sender_hw, (const struct sockaddr *)sender_ip, 0);
2174
2175done:
2176 if (best_ia != NULL)
2177 IFA_REMREF(&best_ia->ia_ifa);
2178 return (0);
2179}
2180
2181void
2182arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa)
2183{
2184 struct sockaddr *sa;
2185
2186 IFA_LOCK(ifa);
2187 ifa->ifa_rtrequest = arp_rtrequest;
2188 ifa->ifa_flags |= RTF_CLONING;
2189 sa = ifa->ifa_addr;
2190 IFA_UNLOCK(ifa);
2191 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa, NULL, sa, 0);
2192}
2193
2194static int
2195arp_getstat SYSCTL_HANDLER_ARGS
2196{
2197#pragma unused(oidp, arg1, arg2)
2198 if (req->oldptr == USER_ADDR_NULL)
2199 req->oldlen = (size_t)sizeof (struct arpstat);
2200
2201 return (SYSCTL_OUT(req, &arpstat, MIN(sizeof (arpstat), req->oldlen)));
2202}
2203