1 | /* |
2 | * Copyright (c) 2000-2023 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * Copyright (c) 1982, 1986, 1988, 1993 |
30 | * The Regents of the University of California. All rights reserved. |
31 | * |
32 | * Redistribution and use in source and binary forms, with or without |
33 | * modification, are permitted provided that the following conditions |
34 | * are met: |
35 | * 1. Redistributions of source code must retain the above copyright |
36 | * notice, this list of conditions and the following disclaimer. |
37 | * 2. Redistributions in binary form must reproduce the above copyright |
38 | * notice, this list of conditions and the following disclaimer in the |
39 | * documentation and/or other materials provided with the distribution. |
40 | * 3. All advertising materials mentioning features or use of this software |
41 | * must display the following acknowledgement: |
42 | * This product includes software developed by the University of |
43 | * California, Berkeley and its contributors. |
44 | * 4. Neither the name of the University nor the names of its contributors |
45 | * may be used to endorse or promote products derived from this software |
46 | * without specific prior written permission. |
47 | * |
48 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
58 | * SUCH DAMAGE. |
59 | * |
60 | * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95 |
61 | */ |
62 | /* |
63 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce |
64 | * support for mandatory and extensible security protections. This notice |
65 | * is included in support of clause 2.2 (b) of the Apple Public License, |
66 | * Version 2.0. |
67 | */ |
68 | |
69 | #include <sys/param.h> |
70 | #include <sys/systm.h> |
71 | #include <sys/kernel.h> |
72 | #include <sys/malloc.h> |
73 | #include <sys/mbuf.h> |
74 | #include <sys/mcache.h> |
75 | #include <sys/proc.h> |
76 | #include <sys/domain.h> |
77 | #include <sys/protosw.h> |
78 | #include <sys/socket.h> |
79 | #include <sys/socketvar.h> |
80 | #include <sys/sysctl.h> |
81 | #include <libkern/OSAtomic.h> |
82 | #include <kern/zalloc.h> |
83 | |
84 | #include <pexpert/pexpert.h> |
85 | |
86 | #include <net/if.h> |
87 | #include <net/net_api_stats.h> |
88 | #include <net/route.h> |
89 | #include <net/content_filter.h> |
90 | |
91 | #define _IP_VHL |
92 | #include <netinet/in.h> |
93 | #include <netinet/in_systm.h> |
94 | #include <netinet/in_tclass.h> |
95 | #include <netinet/ip.h> |
96 | #include <netinet/in_pcb.h> |
97 | #include <netinet/in_var.h> |
98 | #include <netinet/ip_var.h> |
99 | |
100 | #include <netinet6/in6_pcb.h> |
101 | |
102 | |
103 | #if IPSEC |
104 | #include <netinet6/ipsec.h> |
105 | #endif /*IPSEC*/ |
106 | |
107 | #if DUMMYNET |
108 | #include <netinet/ip_dummynet.h> |
109 | #endif /* DUMMYNET */ |
110 | |
111 | int rip_detach(struct socket *); |
112 | int rip_abort(struct socket *); |
113 | int rip_disconnect(struct socket *); |
114 | int rip_bind(struct socket *, struct sockaddr *, struct proc *); |
115 | int rip_connect(struct socket *, struct sockaddr *, struct proc *); |
116 | int rip_shutdown(struct socket *); |
117 | |
118 | struct inpcbhead ripcb; |
119 | struct inpcbinfo ripcbinfo; |
120 | |
121 | /* control hooks for dummynet */ |
122 | #if DUMMYNET |
123 | ip_dn_ctl_t *ip_dn_ctl_ptr; |
124 | #endif /* DUMMYNET */ |
125 | |
126 | /* |
127 | * Nominal space allocated to a raw ip socket. |
128 | */ |
129 | #define RIPSNDQ 8192 |
130 | #define RIPRCVQ 8192 |
131 | |
132 | static KALLOC_TYPE_DEFINE(ripzone, struct inpcb, NET_KT_DEFAULT); |
133 | |
134 | /* |
135 | * Raw interface to IP protocol. |
136 | */ |
137 | |
138 | /* |
139 | * Initialize raw connection block q. |
140 | */ |
141 | void |
142 | rip_init(struct protosw *pp, struct domain *dp) |
143 | { |
144 | #pragma unused(dp) |
145 | static int rip_initialized = 0; |
146 | struct inpcbinfo *pcbinfo; |
147 | |
148 | VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); |
149 | |
150 | if (rip_initialized) { |
151 | return; |
152 | } |
153 | rip_initialized = 1; |
154 | |
155 | LIST_INIT(&ripcb); |
156 | ripcbinfo.ipi_listhead = &ripcb; |
157 | /* |
158 | * XXX We don't use the hash list for raw IP, but it's easier |
159 | * to allocate a one entry hash list than it is to check all |
160 | * over the place for ipi_hashbase == NULL. |
161 | */ |
162 | ripcbinfo.ipi_hashbase = hashinit(count: 1, M_PCB, hashmask: &ripcbinfo.ipi_hashmask); |
163 | ripcbinfo.ipi_porthashbase = hashinit(count: 1, M_PCB, hashmask: &ripcbinfo.ipi_porthashmask); |
164 | |
165 | ripcbinfo.ipi_zone = ripzone; |
166 | |
167 | pcbinfo = &ripcbinfo; |
168 | /* |
169 | * allocate lock group attribute and group for udp pcb mutexes |
170 | */ |
171 | pcbinfo->ipi_lock_grp = lck_grp_alloc_init(grp_name: "ripcb" , LCK_GRP_ATTR_NULL); |
172 | |
173 | /* |
174 | * allocate the lock attribute for udp pcb mutexes |
175 | */ |
176 | lck_attr_setdefault(attr: &pcbinfo->ipi_lock_attr); |
177 | lck_rw_init(lck: &pcbinfo->ipi_lock, grp: pcbinfo->ipi_lock_grp, |
178 | attr: &pcbinfo->ipi_lock_attr); |
179 | |
180 | in_pcbinfo_attach(&ripcbinfo); |
181 | } |
182 | |
183 | static uint32_t |
184 | rip_inp_input(struct inpcb *inp, struct mbuf *m, int iphlen) |
185 | { |
186 | struct ip *ip = mtod(m, struct ip *); |
187 | struct ifnet *ifp = m->m_pkthdr.rcvif; |
188 | struct sockaddr_in ripsrc = { |
189 | .sin_len = sizeof(ripsrc), |
190 | .sin_family = AF_INET, |
191 | .sin_port = 0, |
192 | .sin_addr = { .s_addr = 0 }, |
193 | .sin_zero = {0, 0, 0, 0, 0, 0, 0, 0, } |
194 | }; |
195 | struct mbuf *opts = NULL; |
196 | boolean_t is_wake_pkt = false; |
197 | uint32_t num_delivered = 0; |
198 | |
199 | #if NECP |
200 | if (!necp_socket_is_allowed_to_send_recv_v4(inp, local_port: 0, remote_port: 0, |
201 | local_addr: &ip->ip_dst, remote_addr: &ip->ip_src, input_interface: ifp, pf_tag: 0, NULL, NULL, NULL, NULL)) { |
202 | /* do not inject data to pcb */ |
203 | goto done; |
204 | } |
205 | #endif /* NECP */ |
206 | |
207 | ripsrc.sin_addr = ip->ip_src; |
208 | |
209 | if ((m->m_flags & M_PKTHDR) && (m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT)) { |
210 | is_wake_pkt = true; |
211 | } |
212 | |
213 | if ((inp->inp_flags & INP_CONTROLOPTS) != 0 || |
214 | SOFLOW_ENABLED(inp->inp_socket) || |
215 | SO_RECV_CONTROL_OPTS(inp->inp_socket)) { |
216 | if (ip_savecontrol(inp, &opts, ip, m) != 0) { |
217 | m_freem(opts); |
218 | goto done; |
219 | } |
220 | } |
221 | if (inp->inp_flags & INP_STRIPHDR |
222 | #if CONTENT_FILTER |
223 | /* |
224 | * If socket is subject to Content Filter, delay stripping until reinject |
225 | */ |
226 | && (!CFIL_DGRAM_FILTERED(inp->inp_socket)) |
227 | #endif |
228 | ) { |
229 | m->m_len -= iphlen; |
230 | m->m_pkthdr.len -= iphlen; |
231 | m->m_data += iphlen; |
232 | } |
233 | so_recv_data_stat(inp->inp_socket, m, 0); |
234 | if (sbappendaddr(sb: &inp->inp_socket->so_rcv, |
235 | asa: (struct sockaddr *)&ripsrc, m0: m, control: opts, NULL) != 0) { |
236 | num_delivered = 1; |
237 | sorwakeup(so: inp->inp_socket); |
238 | if (is_wake_pkt) { |
239 | soevent(so: inp->in6p_socket, |
240 | SO_FILT_HINT_LOCKED | SO_FILT_HINT_WAKE_PKT); |
241 | } |
242 | } else { |
243 | ipstat.ips_raw_sappend_fail++; |
244 | } |
245 | done: |
246 | return num_delivered; |
247 | } |
248 | |
249 | /* |
250 | * The first pass is for IPv4 socket and the second pass for IPv6 |
251 | */ |
252 | static bool |
253 | rip_input_inner(struct mbuf *m, int iphlen, bool is_ipv4_pass, uint32_t *total_delivered) |
254 | { |
255 | struct inpcb *inp; |
256 | struct inpcb *last = NULL; |
257 | struct ip *ip = mtod(m, struct ip *); |
258 | struct ifnet *ifp = m->m_pkthdr.rcvif; |
259 | bool need_ipv6_pass = false; |
260 | uint32_t num_delivered = 0; |
261 | |
262 | lck_rw_lock_shared(lck: &ripcbinfo.ipi_lock); |
263 | LIST_FOREACH(inp, &ripcb, inp_list) { |
264 | if (is_ipv4_pass) { |
265 | if ((inp->inp_vflag & (INP_IPV4 | INP_IPV6)) != INP_IPV4) { |
266 | /* Tell if we need to an IPv6 pass */ |
267 | need_ipv6_pass = true; |
268 | continue; |
269 | } |
270 | } else { |
271 | if ((inp->inp_vflag & (INP_IPV4 | INP_IPV6)) != (INP_IPV4 | INP_IPV6)) { |
272 | continue; |
273 | } |
274 | } |
275 | if (inp->inp_ip_p && (inp->inp_ip_p != ip->ip_p)) { |
276 | continue; |
277 | } |
278 | if (inp->inp_laddr.s_addr && |
279 | inp->inp_laddr.s_addr != ip->ip_dst.s_addr) { |
280 | continue; |
281 | } |
282 | if (inp->inp_faddr.s_addr && |
283 | inp->inp_faddr.s_addr != ip->ip_src.s_addr) { |
284 | continue; |
285 | } |
286 | if (inp_restricted_recv(inp, ifp)) { |
287 | continue; |
288 | } |
289 | if (last != NULL) { |
290 | struct mbuf *n = m_copym_mode(m, 0, (int)M_COPYALL, M_DONTWAIT, NULL, NULL, M_COPYM_MUST_COPY_HDR); |
291 | |
292 | if (n == NULL) { |
293 | continue; |
294 | } |
295 | num_delivered += rip_inp_input(inp: last, m: n, iphlen); |
296 | } |
297 | last = inp; |
298 | } |
299 | |
300 | /* |
301 | * Consume the orignal mbuf 'm' if: |
302 | * - it is the first pass and there is no IPv6 raw socket |
303 | * - it is the second pass for IPv6 |
304 | */ |
305 | if (need_ipv6_pass == false || is_ipv4_pass == false) { |
306 | if (last != NULL) { |
307 | num_delivered += rip_inp_input(inp: last, m, iphlen); |
308 | } else { |
309 | m_freem(m); |
310 | } |
311 | } else { |
312 | if (last != NULL) { |
313 | struct mbuf *n = m_copym_mode(m, 0, (int)M_COPYALL, M_DONTWAIT, NULL, NULL, M_COPYM_MUST_COPY_HDR); |
314 | |
315 | if (n != NULL) { |
316 | num_delivered += rip_inp_input(inp: last, m: n, iphlen); |
317 | } |
318 | } |
319 | } |
320 | /* |
321 | * Keep the list locked because socket filter may force the socket lock |
322 | * to be released when calling sbappendaddr() -- see rdar://7627704 |
323 | */ |
324 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
325 | |
326 | *total_delivered += num_delivered; |
327 | |
328 | return need_ipv6_pass; |
329 | } |
330 | |
331 | |
332 | /* |
333 | * Setup generic address and protocol structures |
334 | * for raw_input routine, then pass them along with |
335 | * mbuf chain. |
336 | */ |
337 | void |
338 | rip_input(struct mbuf *m, int iphlen) |
339 | { |
340 | uint32_t num_delivered = 0; |
341 | bool need_v6_pass = false; |
342 | |
343 | /* Expect 32-bit aligned data pointer on strict-align platforms */ |
344 | MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); |
345 | |
346 | /* |
347 | * First pass for raw IPv4 sockets that are protected by the inet_domain_mutex lock |
348 | */ |
349 | need_v6_pass = rip_input_inner(m, iphlen, true, total_delivered: &num_delivered); |
350 | |
351 | /* |
352 | * For the IPv6 pass we need to switch to the inet6_domain_mutex lock |
353 | * to protect the raw IPv6 sockets |
354 | */ |
355 | if (need_v6_pass) { |
356 | lck_mtx_unlock(lck: inet_domain_mutex); |
357 | |
358 | lck_mtx_lock(lck: inet6_domain_mutex); |
359 | rip_input_inner(m, iphlen, false, total_delivered: &num_delivered); |
360 | lck_mtx_unlock(lck: inet6_domain_mutex); |
361 | |
362 | lck_mtx_lock(lck: inet_domain_mutex); |
363 | } |
364 | |
365 | if (num_delivered > 0) { |
366 | OSAddAtomic(1, &ipstat.ips_delivered); |
367 | } else { |
368 | OSAddAtomic(1, &ipstat.ips_noproto); |
369 | } |
370 | } |
371 | |
372 | /* |
373 | * Generate IP header and pass packet to ip_output. |
374 | * Tack on options user may have setup with control call. |
375 | */ |
376 | int |
377 | rip_output( |
378 | struct mbuf *m, |
379 | struct socket *so, |
380 | u_int32_t dst, |
381 | struct mbuf *control) |
382 | { |
383 | struct ip *ip; |
384 | struct inpcb *inp = sotoinpcb(so); |
385 | int flags = (so->so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST; |
386 | int inp_flags = inp ? inp->inp_flags : 0; |
387 | struct ip_out_args ipoa; |
388 | struct ip_moptions *imo; |
389 | int tos = IPTOS_UNSPEC; |
390 | int error = 0; |
391 | #if CONTENT_FILTER |
392 | struct m_tag *cfil_tag = NULL; |
393 | bool cfil_faddr_use = false; |
394 | uint32_t cfil_so_state_change_cnt = 0; |
395 | uint32_t cfil_so_options = 0; |
396 | int cfil_inp_flags = 0; |
397 | struct sockaddr *cfil_faddr = NULL; |
398 | struct sockaddr_in *cfil_sin; |
399 | u_int32_t cfil_dst = 0; |
400 | #endif |
401 | |
402 | #if CONTENT_FILTER |
403 | /* |
404 | * If socket is subject to Content Filter and no addr is passed in, |
405 | * retrieve CFIL saved state from mbuf and use it if necessary. |
406 | */ |
407 | if (CFIL_DGRAM_FILTERED(so) && dst == INADDR_ANY) { |
408 | cfil_tag = cfil_dgram_get_socket_state(m, state_change_cnt: &cfil_so_state_change_cnt, options: &cfil_so_options, faddr: &cfil_faddr, inp_flags: &cfil_inp_flags); |
409 | if (cfil_tag) { |
410 | cfil_sin = SIN(cfil_faddr); |
411 | flags = (cfil_so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST; |
412 | inp_flags = cfil_inp_flags; |
413 | if (inp && inp->inp_faddr.s_addr == INADDR_ANY) { |
414 | /* |
415 | * Socket is unconnected, simply use the saved faddr as 'addr' to go through |
416 | * the connect/disconnect logic. |
417 | */ |
418 | dst = cfil_sin->sin_addr.s_addr; |
419 | } else if ((so->so_state_change_cnt != cfil_so_state_change_cnt) && |
420 | (inp->inp_fport != cfil_sin->sin_port || |
421 | inp->inp_faddr.s_addr != cfil_sin->sin_addr.s_addr)) { |
422 | /* |
423 | * Socket is connected but socket state and dest addr/port changed. |
424 | * We need to use the saved faddr and socket options. |
425 | */ |
426 | cfil_faddr_use = true; |
427 | cfil_dst = cfil_sin->sin_addr.s_addr; |
428 | } |
429 | m_tag_free(cfil_tag); |
430 | } |
431 | } |
432 | #endif |
433 | |
434 | if (so->so_state & SS_ISCONNECTED) { |
435 | if (dst != INADDR_ANY) { |
436 | if (m != NULL) { |
437 | m_freem(m); |
438 | } |
439 | if (control != NULL) { |
440 | m_freem(control); |
441 | } |
442 | return EISCONN; |
443 | } |
444 | dst = cfil_faddr_use ? cfil_dst : inp->inp_faddr.s_addr; |
445 | } else { |
446 | if (dst == INADDR_ANY) { |
447 | if (m != NULL) { |
448 | m_freem(m); |
449 | } |
450 | if (control != NULL) { |
451 | m_freem(control); |
452 | } |
453 | return ENOTCONN; |
454 | } |
455 | } |
456 | |
457 | bzero(s: &ipoa, n: sizeof(ipoa)); |
458 | ipoa.ipoa_boundif = IFSCOPE_NONE; |
459 | ipoa.ipoa_flags = IPOAF_SELECT_SRCIF; |
460 | |
461 | int sotc = SO_TC_UNSPEC; |
462 | int netsvctype = _NET_SERVICE_TYPE_UNSPEC; |
463 | |
464 | |
465 | if (control != NULL) { |
466 | tos = so_tos_from_control(control); |
467 | sotc = so_tc_from_control(control, &netsvctype); |
468 | |
469 | m_freem(control); |
470 | control = NULL; |
471 | } |
472 | if (sotc == SO_TC_UNSPEC) { |
473 | sotc = so->so_traffic_class; |
474 | netsvctype = so->so_netsvctype; |
475 | } |
476 | |
477 | if (inp == NULL |
478 | #if NECP |
479 | || (necp_socket_should_use_flow_divert(inp)) |
480 | #endif /* NECP */ |
481 | ) { |
482 | if (m != NULL) { |
483 | m_freem(m); |
484 | } |
485 | VERIFY(control == NULL); |
486 | return inp == NULL ? EINVAL : EPROTOTYPE; |
487 | } |
488 | |
489 | flags |= IP_OUTARGS; |
490 | /* If socket was bound to an ifindex, tell ip_output about it */ |
491 | if (inp->inp_flags & INP_BOUND_IF) { |
492 | ipoa.ipoa_boundif = inp->inp_boundifp->if_index; |
493 | ipoa.ipoa_flags |= IPOAF_BOUND_IF; |
494 | } |
495 | if (INP_NO_CELLULAR(inp)) { |
496 | ipoa.ipoa_flags |= IPOAF_NO_CELLULAR; |
497 | } |
498 | if (INP_NO_EXPENSIVE(inp)) { |
499 | ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE; |
500 | } |
501 | if (INP_NO_CONSTRAINED(inp)) { |
502 | ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED; |
503 | } |
504 | if (INP_AWDL_UNRESTRICTED(inp)) { |
505 | ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED; |
506 | } |
507 | if (INP_MANAGEMENT_ALLOWED(inp)) { |
508 | ipoa.ipoa_flags |= IPOAF_MANAGEMENT_ALLOWED; |
509 | } |
510 | ipoa.ipoa_sotc = sotc; |
511 | ipoa.ipoa_netsvctype = netsvctype; |
512 | |
513 | if (inp->inp_flowhash == 0) { |
514 | inp_calc_flowhash(inp); |
515 | ASSERT(inp->inp_flowhash != 0); |
516 | } |
517 | |
518 | /* |
519 | * If the user handed us a complete IP packet, use it. |
520 | * Otherwise, allocate an mbuf for a header and fill it in. |
521 | */ |
522 | if ((inp_flags & INP_HDRINCL) == 0) { |
523 | if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { |
524 | m_freem(m); |
525 | return EMSGSIZE; |
526 | } |
527 | M_PREPEND(m, sizeof(struct ip), M_WAIT, 1); |
528 | if (m == NULL) { |
529 | return ENOBUFS; |
530 | } |
531 | ip = mtod(m, struct ip *); |
532 | if (tos != IPTOS_UNSPEC) { |
533 | ip->ip_tos = (uint8_t)(tos & IPTOS_MASK); |
534 | } else { |
535 | ip->ip_tos = inp->inp_ip_tos; |
536 | } |
537 | if (inp->inp_flags2 & INP2_DONTFRAG) { |
538 | ip->ip_off = IP_DF; |
539 | } else { |
540 | ip->ip_off = 0; |
541 | } |
542 | ip->ip_p = inp->inp_ip_p; |
543 | ip->ip_len = (uint16_t)m->m_pkthdr.len; |
544 | ip->ip_src = inp->inp_laddr; |
545 | ip->ip_dst.s_addr = dst; |
546 | ip->ip_ttl = inp->inp_ip_ttl; |
547 | } else { |
548 | if (m->m_pkthdr.len > IP_MAXPACKET) { |
549 | m_freem(m); |
550 | return EMSGSIZE; |
551 | } |
552 | ip = mtod(m, struct ip *); |
553 | /* |
554 | * don't allow both user specified and setsockopt options, |
555 | * and don't allow packet length sizes that will crash |
556 | */ |
557 | if (m->m_pkthdr.len < sizeof(struct ip) || |
558 | ((IP_VHL_HL(ip->ip_vhl) != (sizeof(*ip) >> 2)) && inp->inp_options) || |
559 | (ip->ip_len > m->m_pkthdr.len) || |
560 | (ip->ip_len < (IP_VHL_HL(ip->ip_vhl) << 2))) { |
561 | m_freem(m); |
562 | return EINVAL; |
563 | } |
564 | if (ip->ip_id == 0 && !(rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off)))) { |
565 | ip->ip_id = ip_randomid((uint64_t)m); |
566 | } |
567 | /* XXX prevent ip_output from overwriting header fields */ |
568 | flags |= IP_RAWOUTPUT; |
569 | OSAddAtomic(1, &ipstat.ips_rawout); |
570 | } |
571 | |
572 | if (inp->inp_laddr.s_addr != INADDR_ANY) { |
573 | ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR; |
574 | } |
575 | |
576 | #if NECP |
577 | { |
578 | necp_kernel_policy_id policy_id; |
579 | necp_kernel_policy_id skip_policy_id; |
580 | u_int32_t route_rule_id; |
581 | u_int32_t pass_flags; |
582 | |
583 | /* |
584 | * We need a route to perform NECP route rule checks |
585 | */ |
586 | if ((net_qos_policy_restricted != 0 && |
587 | ROUTE_UNUSABLE(&inp->inp_route)) |
588 | #if CONTENT_FILTER |
589 | || cfil_faddr_use |
590 | #endif |
591 | ) { |
592 | struct sockaddr_in to; |
593 | struct sockaddr_in from; |
594 | struct in_addr laddr = ip->ip_src; |
595 | |
596 | ROUTE_RELEASE(&inp->inp_route); |
597 | |
598 | bzero(s: &from, n: sizeof(struct sockaddr_in)); |
599 | from.sin_family = AF_INET; |
600 | from.sin_len = sizeof(struct sockaddr_in); |
601 | from.sin_addr = laddr; |
602 | |
603 | bzero(s: &to, n: sizeof(struct sockaddr_in)); |
604 | to.sin_family = AF_INET; |
605 | to.sin_len = sizeof(struct sockaddr_in); |
606 | to.sin_addr.s_addr = ip->ip_dst.s_addr; |
607 | |
608 | if ((error = in_pcbladdr(inp, (struct sockaddr *)&to, |
609 | &laddr, ipoa.ipoa_boundif, NULL, 1)) != 0) { |
610 | printf("%s in_pcbladdr(%p) error %d\n" , |
611 | __func__, inp, error); |
612 | m_freem(m); |
613 | return error; |
614 | } |
615 | |
616 | inp_update_necp_policy(inp, (struct sockaddr *)&from, |
617 | (struct sockaddr *)&to, ipoa.ipoa_boundif); |
618 | inp->inp_policyresult.results.qos_marking_gencount = 0; |
619 | } |
620 | |
621 | if (!necp_socket_is_allowed_to_send_recv_v4(inp, local_port: 0, remote_port: 0, |
622 | local_addr: &ip->ip_src, remote_addr: &ip->ip_dst, NULL, pf_tag: 0, return_policy_id: &policy_id, return_route_rule_id: &route_rule_id, return_skip_policy_id: &skip_policy_id, return_pass_flags: &pass_flags)) { |
623 | m_freem(m); |
624 | return EHOSTUNREACH; |
625 | } |
626 | |
627 | necp_mark_packet_from_socket(packet: m, inp, policy_id, route_rule_id, skip_policy_id, pass_flags); |
628 | |
629 | if (net_qos_policy_restricted != 0) { |
630 | struct ifnet *rt_ifp = NULL; |
631 | |
632 | if (inp->inp_route.ro_rt != NULL) { |
633 | rt_ifp = inp->inp_route.ro_rt->rt_ifp; |
634 | } |
635 | |
636 | necp_socket_update_qos_marking(inp, route: inp->inp_route.ro_rt, route_rule_id); |
637 | } |
638 | } |
639 | #endif /* NECP */ |
640 | if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { |
641 | ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED; |
642 | } |
643 | #if IPSEC |
644 | if (inp->inp_sp != NULL && ipsec_setsocket(m, so) != 0) { |
645 | m_freem(m); |
646 | return ENOBUFS; |
647 | } |
648 | #endif /*IPSEC*/ |
649 | |
650 | if (ROUTE_UNUSABLE(&inp->inp_route)) { |
651 | ROUTE_RELEASE(&inp->inp_route); |
652 | } |
653 | |
654 | set_packet_service_class(m, so, sotc, 0); |
655 | m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB; |
656 | m->m_pkthdr.pkt_flowid = inp->inp_flowhash; |
657 | m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC | |
658 | PKTF_FLOW_RAWSOCK); |
659 | m->m_pkthdr.pkt_proto = inp->inp_ip_p; |
660 | m->m_pkthdr.tx_rawip_pid = so->last_pid; |
661 | m->m_pkthdr.tx_rawip_e_pid = so->e_pid; |
662 | if (so->so_flags & SOF_DELEGATED) { |
663 | m->m_pkthdr.tx_rawip_e_pid = so->e_pid; |
664 | } else { |
665 | m->m_pkthdr.tx_rawip_e_pid = 0; |
666 | } |
667 | #if (DEBUG || DEVELOPMENT) |
668 | if (so->so_flags & SOF_MARK_WAKE_PKT) { |
669 | so->so_flags &= ~SOF_MARK_WAKE_PKT; |
670 | m->m_pkthdr.pkt_flags |= PKTF_WAKE_PKT; |
671 | } |
672 | #endif /* (DEBUG || DEVELOPMENT) */ |
673 | |
674 | imo = inp->inp_moptions; |
675 | if (imo != NULL) { |
676 | IMO_ADDREF(imo); |
677 | } |
678 | /* |
679 | * The domain lock is held across ip_output, so it is okay |
680 | * to pass the PCB cached route pointer directly to IP and |
681 | * the modules beneath it. |
682 | */ |
683 | // TODO: PASS DOWN ROUTE RULE ID |
684 | error = ip_output(m, inp->inp_options, &inp->inp_route, flags, |
685 | imo, &ipoa); |
686 | |
687 | if (imo != NULL) { |
688 | IMO_REMREF(imo); |
689 | } |
690 | |
691 | if (inp->inp_route.ro_rt != NULL) { |
692 | struct rtentry *rt = inp->inp_route.ro_rt; |
693 | struct ifnet *outif; |
694 | |
695 | if ((rt->rt_flags & (RTF_MULTICAST | RTF_BROADCAST)) || |
696 | inp->inp_socket == NULL || |
697 | #if CONTENT_FILTER |
698 | /* Discard temporary route for cfil case */ |
699 | cfil_faddr_use || |
700 | #endif |
701 | !(inp->inp_socket->so_state & SS_ISCONNECTED)) { |
702 | rt = NULL; /* unusable */ |
703 | } |
704 | /* |
705 | * Always discard the cached route for unconnected |
706 | * socket or if it is a multicast route. |
707 | */ |
708 | if (rt == NULL) { |
709 | ROUTE_RELEASE(&inp->inp_route); |
710 | } |
711 | |
712 | /* |
713 | * If this is a connected socket and the destination |
714 | * route is unicast, update outif with that of the |
715 | * route interface used by IP. |
716 | */ |
717 | if (rt != NULL && |
718 | (outif = rt->rt_ifp) != inp->inp_last_outifp) { |
719 | inp->inp_last_outifp = outif; |
720 | } |
721 | } else { |
722 | ROUTE_RELEASE(&inp->inp_route); |
723 | } |
724 | |
725 | /* |
726 | * If output interface was cellular/expensive/constrained, and this socket is |
727 | * denied access to it, generate an event. |
728 | */ |
729 | if (error != 0 && (ipoa.ipoa_flags & IPOAF_R_IFDENIED) && |
730 | (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp) || INP_NO_CONSTRAINED(inp))) { |
731 | soevent(so, hint: (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED)); |
732 | } |
733 | |
734 | return error; |
735 | } |
736 | |
737 | |
738 | /* |
739 | * Raw IP socket option processing. |
740 | */ |
741 | int |
742 | rip_ctloutput(struct socket *so, struct sockopt *sopt) |
743 | { |
744 | struct inpcb *inp = sotoinpcb(so); |
745 | int error, optval; |
746 | |
747 | /* Allow <SOL_SOCKET,SO_FLUSH> at this level */ |
748 | if (sopt->sopt_level != IPPROTO_IP && |
749 | !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) { |
750 | return EINVAL; |
751 | } |
752 | |
753 | error = 0; |
754 | |
755 | switch (sopt->sopt_dir) { |
756 | case SOPT_GET: |
757 | switch (sopt->sopt_name) { |
758 | case IP_HDRINCL: |
759 | optval = inp->inp_flags & INP_HDRINCL; |
760 | error = sooptcopyout(sopt, data: &optval, len: sizeof optval); |
761 | break; |
762 | |
763 | case IP_STRIPHDR: |
764 | optval = inp->inp_flags & INP_STRIPHDR; |
765 | error = sooptcopyout(sopt, data: &optval, len: sizeof optval); |
766 | break; |
767 | |
768 | |
769 | #if DUMMYNET |
770 | case IP_DUMMYNET_GET: |
771 | if (!DUMMYNET_LOADED) { |
772 | ip_dn_init(); |
773 | } |
774 | if (DUMMYNET_LOADED) { |
775 | error = ip_dn_ctl_ptr(sopt); |
776 | } else { |
777 | error = ENOPROTOOPT; |
778 | } |
779 | break; |
780 | #endif /* DUMMYNET */ |
781 | |
782 | default: |
783 | error = ip_ctloutput(so, sopt); |
784 | break; |
785 | } |
786 | break; |
787 | |
788 | case SOPT_SET: |
789 | switch (sopt->sopt_name) { |
790 | case IP_HDRINCL: |
791 | error = sooptcopyin(sopt, &optval, len: sizeof optval, |
792 | minlen: sizeof optval); |
793 | if (error) { |
794 | break; |
795 | } |
796 | if (optval) { |
797 | inp->inp_flags |= INP_HDRINCL; |
798 | } else { |
799 | inp->inp_flags &= ~INP_HDRINCL; |
800 | } |
801 | break; |
802 | |
803 | case IP_STRIPHDR: |
804 | error = sooptcopyin(sopt, &optval, len: sizeof optval, |
805 | minlen: sizeof optval); |
806 | if (error) { |
807 | break; |
808 | } |
809 | if (optval) { |
810 | inp->inp_flags |= INP_STRIPHDR; |
811 | } else { |
812 | inp->inp_flags &= ~INP_STRIPHDR; |
813 | } |
814 | break; |
815 | |
816 | |
817 | #if DUMMYNET |
818 | case IP_DUMMYNET_CONFIGURE: |
819 | case IP_DUMMYNET_DEL: |
820 | case IP_DUMMYNET_FLUSH: |
821 | if (!DUMMYNET_LOADED) { |
822 | ip_dn_init(); |
823 | } |
824 | if (DUMMYNET_LOADED) { |
825 | error = ip_dn_ctl_ptr(sopt); |
826 | } else { |
827 | error = ENOPROTOOPT; |
828 | } |
829 | break; |
830 | #endif /* DUMMYNET */ |
831 | |
832 | case SO_FLUSH: |
833 | if ((error = sooptcopyin(sopt, &optval, len: sizeof(optval), |
834 | minlen: sizeof(optval))) != 0) { |
835 | break; |
836 | } |
837 | |
838 | error = inp_flush(inp, optval); |
839 | break; |
840 | |
841 | default: |
842 | error = ip_ctloutput(so, sopt); |
843 | break; |
844 | } |
845 | break; |
846 | } |
847 | |
848 | return error; |
849 | } |
850 | |
851 | /* |
852 | * This function exists solely to receive the PRC_IFDOWN messages which |
853 | * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, |
854 | * and calls in_ifadown() to remove all routes corresponding to that address. |
855 | * It also receives the PRC_IFUP messages from if_up() and reinstalls the |
856 | * interface routes. |
857 | */ |
858 | void |
859 | rip_ctlinput( |
860 | int cmd, |
861 | struct sockaddr *sa, |
862 | __unused void *vip, |
863 | __unused struct ifnet *ifp) |
864 | { |
865 | struct in_ifaddr *ia = NULL; |
866 | struct ifnet *iaifp = NULL; |
867 | int err = 0; |
868 | int flags, done = 0; |
869 | |
870 | switch (cmd) { |
871 | case PRC_IFDOWN: |
872 | lck_rw_lock_shared(lck: &in_ifaddr_rwlock); |
873 | for (ia = in_ifaddrhead.tqh_first; ia; |
874 | ia = ia->ia_link.tqe_next) { |
875 | IFA_LOCK(&ia->ia_ifa); |
876 | if (ia->ia_ifa.ifa_addr == sa && |
877 | (ia->ia_flags & IFA_ROUTE)) { |
878 | done = 1; |
879 | ifa_addref(ifa: &ia->ia_ifa); |
880 | IFA_UNLOCK(&ia->ia_ifa); |
881 | lck_rw_done(lck: &in_ifaddr_rwlock); |
882 | lck_mtx_lock(rnh_lock); |
883 | /* |
884 | * in_ifscrub kills the interface route. |
885 | */ |
886 | in_ifscrub(ia->ia_ifp, ia, 1); |
887 | /* |
888 | * in_ifadown gets rid of all the rest of |
889 | * the routes. This is not quite the right |
890 | * thing to do, but at least if we are running |
891 | * a routing process they will come back. |
892 | */ |
893 | in_ifadown(ifa: &ia->ia_ifa, 1); |
894 | lck_mtx_unlock(rnh_lock); |
895 | ifa_remref(ifa: &ia->ia_ifa); |
896 | break; |
897 | } |
898 | IFA_UNLOCK(&ia->ia_ifa); |
899 | } |
900 | if (!done) { |
901 | lck_rw_done(lck: &in_ifaddr_rwlock); |
902 | } |
903 | break; |
904 | |
905 | case PRC_IFUP: |
906 | lck_rw_lock_shared(lck: &in_ifaddr_rwlock); |
907 | for (ia = in_ifaddrhead.tqh_first; ia; |
908 | ia = ia->ia_link.tqe_next) { |
909 | IFA_LOCK(&ia->ia_ifa); |
910 | if (ia->ia_ifa.ifa_addr == sa) { |
911 | /* keep it locked */ |
912 | break; |
913 | } |
914 | IFA_UNLOCK(&ia->ia_ifa); |
915 | } |
916 | if (ia == NULL || (ia->ia_flags & IFA_ROUTE) || |
917 | (ia->ia_ifa.ifa_debug & IFD_NOTREADY)) { |
918 | if (ia != NULL) { |
919 | IFA_UNLOCK(&ia->ia_ifa); |
920 | } |
921 | lck_rw_done(lck: &in_ifaddr_rwlock); |
922 | return; |
923 | } |
924 | ifa_addref(ifa: &ia->ia_ifa); |
925 | IFA_UNLOCK(&ia->ia_ifa); |
926 | lck_rw_done(lck: &in_ifaddr_rwlock); |
927 | |
928 | flags = RTF_UP; |
929 | iaifp = ia->ia_ifa.ifa_ifp; |
930 | |
931 | if ((iaifp->if_flags & IFF_LOOPBACK) |
932 | || (iaifp->if_flags & IFF_POINTOPOINT)) { |
933 | flags |= RTF_HOST; |
934 | } |
935 | |
936 | err = rtinit(&ia->ia_ifa, RTM_ADD, flags); |
937 | if (err == 0) { |
938 | IFA_LOCK_SPIN(&ia->ia_ifa); |
939 | ia->ia_flags |= IFA_ROUTE; |
940 | IFA_UNLOCK(&ia->ia_ifa); |
941 | } |
942 | ifa_remref(ifa: &ia->ia_ifa); |
943 | break; |
944 | } |
945 | } |
946 | |
947 | u_int32_t rip_sendspace = RIPSNDQ; |
948 | u_int32_t rip_recvspace = RIPRCVQ; |
949 | |
950 | SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW | CTLFLAG_LOCKED, |
951 | &rip_sendspace, 0, "Maximum outgoing raw IP datagram size" ); |
952 | SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED, |
953 | &rip_recvspace, 0, "Maximum incoming raw IP datagram size" ); |
954 | SYSCTL_UINT(_net_inet_raw, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED, |
955 | &ripcbinfo.ipi_count, 0, "Number of active PCBs" ); |
956 | |
957 | static int |
958 | rip_attach(struct socket *so, int proto, struct proc *p) |
959 | { |
960 | struct inpcb *inp; |
961 | int error; |
962 | |
963 | inp = sotoinpcb(so); |
964 | if (inp) { |
965 | panic("rip_attach" ); |
966 | } |
967 | if ((so->so_state & SS_PRIV) == 0) { |
968 | return EPERM; |
969 | } |
970 | if (proto > UINT8_MAX) { |
971 | return EINVAL; |
972 | } |
973 | |
974 | error = soreserve(so, sndcc: rip_sendspace, rcvcc: rip_recvspace); |
975 | if (error) { |
976 | return error; |
977 | } |
978 | error = in_pcballoc(so, &ripcbinfo, p); |
979 | if (error) { |
980 | return error; |
981 | } |
982 | inp = (struct inpcb *)so->so_pcb; |
983 | inp->inp_vflag |= INP_IPV4; |
984 | VERIFY(proto <= UINT8_MAX); |
985 | inp->inp_ip_p = (u_char)proto; |
986 | inp->inp_ip_ttl = (u_char)ip_defttl; |
987 | return 0; |
988 | } |
989 | |
990 | __private_extern__ int |
991 | rip_detach(struct socket *so) |
992 | { |
993 | struct inpcb *inp; |
994 | |
995 | inp = sotoinpcb(so); |
996 | if (inp == 0) { |
997 | panic("rip_detach" ); |
998 | } |
999 | in_pcbdetach(inp); |
1000 | return 0; |
1001 | } |
1002 | |
1003 | __private_extern__ int |
1004 | rip_abort(struct socket *so) |
1005 | { |
1006 | soisdisconnected(so); |
1007 | return rip_detach(so); |
1008 | } |
1009 | |
1010 | __private_extern__ int |
1011 | rip_disconnect(struct socket *so) |
1012 | { |
1013 | if ((so->so_state & SS_ISCONNECTED) == 0) { |
1014 | return ENOTCONN; |
1015 | } |
1016 | return rip_abort(so); |
1017 | } |
1018 | |
1019 | __private_extern__ int |
1020 | rip_bind(struct socket *so, struct sockaddr *nam, struct proc *p) |
1021 | { |
1022 | #pragma unused(p) |
1023 | struct inpcb *inp = sotoinpcb(so); |
1024 | struct sockaddr_in sin; |
1025 | struct ifaddr *ifa = NULL; |
1026 | struct ifnet *outif = NULL; |
1027 | |
1028 | if (inp == NULL |
1029 | #if NECP |
1030 | || (necp_socket_should_use_flow_divert(inp)) |
1031 | #endif /* NECP */ |
1032 | ) { |
1033 | return inp == NULL ? EINVAL : EPROTOTYPE; |
1034 | } |
1035 | |
1036 | if (nam->sa_len != sizeof(struct sockaddr_in)) { |
1037 | return EINVAL; |
1038 | } |
1039 | |
1040 | /* Sanitized local copy for interface address searches */ |
1041 | bzero(s: &sin, n: sizeof(sin)); |
1042 | sin.sin_family = AF_INET; |
1043 | sin.sin_len = sizeof(struct sockaddr_in); |
1044 | sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr; |
1045 | |
1046 | if (TAILQ_EMPTY(&ifnet_head) || |
1047 | (sin.sin_family != AF_INET && sin.sin_family != AF_IMPLINK) || |
1048 | (sin.sin_addr.s_addr && (ifa = ifa_ifwithaddr(SA(&sin))) == 0)) { |
1049 | return EADDRNOTAVAIL; |
1050 | } else if (ifa) { |
1051 | /* |
1052 | * Opportunistically determine the outbound |
1053 | * interface that may be used; this may not |
1054 | * hold true if we end up using a route |
1055 | * going over a different interface, e.g. |
1056 | * when sending to a local address. This |
1057 | * will get updated again after sending. |
1058 | */ |
1059 | IFA_LOCK(ifa); |
1060 | outif = ifa->ifa_ifp; |
1061 | IFA_UNLOCK(ifa); |
1062 | ifa_remref(ifa); |
1063 | } |
1064 | inp->inp_laddr = sin.sin_addr; |
1065 | inp->inp_last_outifp = outif; |
1066 | |
1067 | return 0; |
1068 | } |
1069 | |
1070 | __private_extern__ int |
1071 | rip_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p) |
1072 | { |
1073 | struct inpcb *inp = sotoinpcb(so); |
1074 | struct sockaddr_in *addr = (struct sockaddr_in *)(void *)nam; |
1075 | |
1076 | if (inp == NULL |
1077 | #if NECP |
1078 | || (necp_socket_should_use_flow_divert(inp)) |
1079 | #endif /* NECP */ |
1080 | ) { |
1081 | return inp == NULL ? EINVAL : EPROTOTYPE; |
1082 | } |
1083 | if (nam->sa_len != sizeof(*addr)) { |
1084 | return EINVAL; |
1085 | } |
1086 | if (TAILQ_EMPTY(&ifnet_head)) { |
1087 | return EADDRNOTAVAIL; |
1088 | } |
1089 | if ((addr->sin_family != AF_INET) && |
1090 | (addr->sin_family != AF_IMPLINK)) { |
1091 | return EAFNOSUPPORT; |
1092 | } |
1093 | |
1094 | if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) { |
1095 | so->so_flags1 |= SOF1_CONNECT_COUNTED; |
1096 | INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_connected); |
1097 | } |
1098 | |
1099 | inp->inp_faddr = addr->sin_addr; |
1100 | soisconnected(so); |
1101 | |
1102 | return 0; |
1103 | } |
1104 | |
1105 | __private_extern__ int |
1106 | rip_shutdown(struct socket *so) |
1107 | { |
1108 | socantsendmore(so); |
1109 | return 0; |
1110 | } |
1111 | |
1112 | __private_extern__ int |
1113 | rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, |
1114 | struct mbuf *control, struct proc *p) |
1115 | { |
1116 | #pragma unused(flags, p) |
1117 | struct inpcb *inp = sotoinpcb(so); |
1118 | u_int32_t dst = INADDR_ANY; |
1119 | int error = 0; |
1120 | |
1121 | if (inp == NULL |
1122 | #if NECP |
1123 | || (necp_socket_should_use_flow_divert(inp) && (error = EPROTOTYPE)) |
1124 | #endif /* NECP */ |
1125 | ) { |
1126 | if (inp == NULL) { |
1127 | error = EINVAL; |
1128 | } else { |
1129 | error = EPROTOTYPE; |
1130 | } |
1131 | goto bad; |
1132 | } |
1133 | |
1134 | if (nam != NULL) { |
1135 | dst = ((struct sockaddr_in *)(void *)nam)->sin_addr.s_addr; |
1136 | } |
1137 | return rip_output(m, so, dst, control); |
1138 | |
1139 | bad: |
1140 | VERIFY(error != 0); |
1141 | |
1142 | if (m != NULL) { |
1143 | m_freem(m); |
1144 | } |
1145 | if (control != NULL) { |
1146 | m_freem(control); |
1147 | } |
1148 | |
1149 | return error; |
1150 | } |
1151 | |
1152 | /* note: rip_unlock is called from different protos instead of the generic socket_unlock, |
1153 | * it will handle the socket dealloc on last reference |
1154 | * */ |
1155 | int |
1156 | rip_unlock(struct socket *so, int refcount, void *debug) |
1157 | { |
1158 | void *lr_saved; |
1159 | struct inpcb *inp = sotoinpcb(so); |
1160 | |
1161 | if (debug == NULL) { |
1162 | lr_saved = __builtin_return_address(0); |
1163 | } else { |
1164 | lr_saved = debug; |
1165 | } |
1166 | |
1167 | if (refcount) { |
1168 | if (so->so_usecount <= 0) { |
1169 | panic("rip_unlock: bad refoucnt so=%p val=%x lrh= %s" , |
1170 | so, so->so_usecount, solockhistory_nr(so)); |
1171 | /* NOTREACHED */ |
1172 | } |
1173 | so->so_usecount--; |
1174 | if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) { |
1175 | /* cleanup after last reference */ |
1176 | lck_mtx_unlock(lck: so->so_proto->pr_domain->dom_mtx); |
1177 | lck_rw_lock_exclusive(lck: &ripcbinfo.ipi_lock); |
1178 | if (inp->inp_state != INPCB_STATE_DEAD) { |
1179 | if (SOCK_CHECK_DOM(so, PF_INET6)) { |
1180 | in6_pcbdetach(inp); |
1181 | } else { |
1182 | in_pcbdetach(inp); |
1183 | } |
1184 | } |
1185 | in_pcbdispose(inp); |
1186 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
1187 | return 0; |
1188 | } |
1189 | } |
1190 | so->unlock_lr[so->next_unlock_lr] = lr_saved; |
1191 | so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX; |
1192 | lck_mtx_unlock(lck: so->so_proto->pr_domain->dom_mtx); |
1193 | return 0; |
1194 | } |
1195 | |
1196 | static int |
1197 | rip_pcblist SYSCTL_HANDLER_ARGS |
1198 | { |
1199 | #pragma unused(oidp, arg1, arg2) |
1200 | int error, i, n, sz; |
1201 | struct inpcb *inp, **inp_list; |
1202 | inp_gen_t gencnt; |
1203 | struct xinpgen xig; |
1204 | |
1205 | /* |
1206 | * The process of preparing the TCB list is too time-consuming and |
1207 | * resource-intensive to repeat twice on every request. |
1208 | */ |
1209 | lck_rw_lock_exclusive(lck: &ripcbinfo.ipi_lock); |
1210 | if (req->oldptr == USER_ADDR_NULL) { |
1211 | n = ripcbinfo.ipi_count; |
1212 | req->oldidx = 2 * (sizeof xig) |
1213 | + (n + n / 8) * sizeof(struct xinpcb); |
1214 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
1215 | return 0; |
1216 | } |
1217 | |
1218 | if (req->newptr != USER_ADDR_NULL) { |
1219 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
1220 | return EPERM; |
1221 | } |
1222 | |
1223 | /* |
1224 | * OK, now we're committed to doing something. |
1225 | */ |
1226 | gencnt = ripcbinfo.ipi_gencnt; |
1227 | sz = n = ripcbinfo.ipi_count; |
1228 | |
1229 | bzero(s: &xig, n: sizeof(xig)); |
1230 | xig.xig_len = sizeof xig; |
1231 | xig.xig_count = n; |
1232 | xig.xig_gen = gencnt; |
1233 | xig.xig_sogen = so_gencnt; |
1234 | error = SYSCTL_OUT(req, &xig, sizeof xig); |
1235 | if (error) { |
1236 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
1237 | return error; |
1238 | } |
1239 | /* |
1240 | * We are done if there is no pcb |
1241 | */ |
1242 | if (n == 0) { |
1243 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
1244 | return 0; |
1245 | } |
1246 | |
1247 | inp_list = kalloc_type(struct inpcb *, n, Z_WAITOK); |
1248 | if (inp_list == NULL) { |
1249 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
1250 | return ENOMEM; |
1251 | } |
1252 | |
1253 | for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n; |
1254 | inp = inp->inp_list.le_next) { |
1255 | if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) { |
1256 | inp_list[i++] = inp; |
1257 | } |
1258 | } |
1259 | n = i; |
1260 | |
1261 | error = 0; |
1262 | for (i = 0; i < n; i++) { |
1263 | inp = inp_list[i]; |
1264 | if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) { |
1265 | struct xinpcb xi; |
1266 | |
1267 | bzero(s: &xi, n: sizeof(xi)); |
1268 | xi.xi_len = sizeof xi; |
1269 | /* XXX should avoid extra copy */ |
1270 | inpcb_to_compat(inp, &xi.xi_inp); |
1271 | if (inp->inp_socket) { |
1272 | sotoxsocket(so: inp->inp_socket, xso: &xi.xi_socket); |
1273 | } |
1274 | error = SYSCTL_OUT(req, &xi, sizeof xi); |
1275 | } |
1276 | } |
1277 | if (!error) { |
1278 | /* |
1279 | * Give the user an updated idea of our state. |
1280 | * If the generation differs from what we told |
1281 | * her before, she knows that something happened |
1282 | * while we were processing this request, and it |
1283 | * might be necessary to retry. |
1284 | */ |
1285 | bzero(s: &xig, n: sizeof(xig)); |
1286 | xig.xig_len = sizeof xig; |
1287 | xig.xig_gen = ripcbinfo.ipi_gencnt; |
1288 | xig.xig_sogen = so_gencnt; |
1289 | xig.xig_count = ripcbinfo.ipi_count; |
1290 | error = SYSCTL_OUT(req, &xig, sizeof xig); |
1291 | } |
1292 | |
1293 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
1294 | kfree_type(struct inpcb *, sz, inp_list); |
1295 | return error; |
1296 | } |
1297 | |
1298 | SYSCTL_PROC(_net_inet_raw, OID_AUTO /*XXX*/, pcblist, |
1299 | CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, |
1300 | rip_pcblist, "S,xinpcb" , "List of active raw IP sockets" ); |
1301 | |
1302 | #if XNU_TARGET_OS_OSX |
1303 | |
1304 | static int |
1305 | rip_pcblist64 SYSCTL_HANDLER_ARGS |
1306 | { |
1307 | #pragma unused(oidp, arg1, arg2) |
1308 | int error, i, n, sz; |
1309 | struct inpcb *inp, **inp_list; |
1310 | inp_gen_t gencnt; |
1311 | struct xinpgen xig; |
1312 | |
1313 | /* |
1314 | * The process of preparing the TCB list is too time-consuming and |
1315 | * resource-intensive to repeat twice on every request. |
1316 | */ |
1317 | lck_rw_lock_exclusive(lck: &ripcbinfo.ipi_lock); |
1318 | if (req->oldptr == USER_ADDR_NULL) { |
1319 | n = ripcbinfo.ipi_count; |
1320 | req->oldidx = 2 * (sizeof xig) |
1321 | + (n + n / 8) * sizeof(struct xinpcb64); |
1322 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
1323 | return 0; |
1324 | } |
1325 | |
1326 | if (req->newptr != USER_ADDR_NULL) { |
1327 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
1328 | return EPERM; |
1329 | } |
1330 | |
1331 | /* |
1332 | * OK, now we're committed to doing something. |
1333 | */ |
1334 | gencnt = ripcbinfo.ipi_gencnt; |
1335 | sz = n = ripcbinfo.ipi_count; |
1336 | |
1337 | bzero(s: &xig, n: sizeof(xig)); |
1338 | xig.xig_len = sizeof xig; |
1339 | xig.xig_count = n; |
1340 | xig.xig_gen = gencnt; |
1341 | xig.xig_sogen = so_gencnt; |
1342 | error = SYSCTL_OUT(req, &xig, sizeof xig); |
1343 | if (error) { |
1344 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
1345 | return error; |
1346 | } |
1347 | /* |
1348 | * We are done if there is no pcb |
1349 | */ |
1350 | if (n == 0) { |
1351 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
1352 | return 0; |
1353 | } |
1354 | |
1355 | inp_list = kalloc_type(struct inpcb *, n, Z_WAITOK); |
1356 | if (inp_list == NULL) { |
1357 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
1358 | return ENOMEM; |
1359 | } |
1360 | |
1361 | for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n; |
1362 | inp = inp->inp_list.le_next) { |
1363 | if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) { |
1364 | inp_list[i++] = inp; |
1365 | } |
1366 | } |
1367 | n = i; |
1368 | |
1369 | error = 0; |
1370 | for (i = 0; i < n; i++) { |
1371 | inp = inp_list[i]; |
1372 | if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) { |
1373 | struct xinpcb64 xi; |
1374 | |
1375 | bzero(s: &xi, n: sizeof(xi)); |
1376 | xi.xi_len = sizeof xi; |
1377 | inpcb_to_xinpcb64(inp, &xi); |
1378 | if (inp->inp_socket) { |
1379 | sotoxsocket64(so: inp->inp_socket, xso: &xi.xi_socket); |
1380 | } |
1381 | error = SYSCTL_OUT(req, &xi, sizeof xi); |
1382 | } |
1383 | } |
1384 | if (!error) { |
1385 | /* |
1386 | * Give the user an updated idea of our state. |
1387 | * If the generation differs from what we told |
1388 | * her before, she knows that something happened |
1389 | * while we were processing this request, and it |
1390 | * might be necessary to retry. |
1391 | */ |
1392 | bzero(s: &xig, n: sizeof(xig)); |
1393 | xig.xig_len = sizeof xig; |
1394 | xig.xig_gen = ripcbinfo.ipi_gencnt; |
1395 | xig.xig_sogen = so_gencnt; |
1396 | xig.xig_count = ripcbinfo.ipi_count; |
1397 | error = SYSCTL_OUT(req, &xig, sizeof xig); |
1398 | } |
1399 | |
1400 | lck_rw_done(lck: &ripcbinfo.ipi_lock); |
1401 | kfree_type(struct inpcb *, sz, inp_list); |
1402 | return error; |
1403 | } |
1404 | |
1405 | SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist64, |
1406 | CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, |
1407 | rip_pcblist64, "S,xinpcb64" , "List of active raw IP sockets" ); |
1408 | |
1409 | #endif /* XNU_TARGET_OS_OSX */ |
1410 | |
1411 | |
1412 | static int |
1413 | rip_pcblist_n SYSCTL_HANDLER_ARGS |
1414 | { |
1415 | #pragma unused(oidp, arg1, arg2) |
1416 | int error = 0; |
1417 | |
1418 | error = get_pcblist_n(IPPROTO_IP, req, &ripcbinfo); |
1419 | |
1420 | return error; |
1421 | } |
1422 | |
1423 | SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist_n, |
1424 | CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, |
1425 | rip_pcblist_n, "S,xinpcb_n" , "List of active raw IP sockets" ); |
1426 | |
1427 | struct pr_usrreqs rip_usrreqs = { |
1428 | .pru_abort = rip_abort, |
1429 | .pru_attach = rip_attach, |
1430 | .pru_bind = rip_bind, |
1431 | .pru_connect = rip_connect, |
1432 | .pru_control = in_control, |
1433 | .pru_detach = rip_detach, |
1434 | .pru_disconnect = rip_disconnect, |
1435 | .pru_peeraddr = in_getpeeraddr, |
1436 | .pru_send = rip_send, |
1437 | .pru_shutdown = rip_shutdown, |
1438 | .pru_sockaddr = in_getsockaddr, |
1439 | .pru_sosend = sosend, |
1440 | .pru_soreceive = soreceive, |
1441 | }; |
1442 | /* DSEP Review Done pl-20051213-v02 @3253 */ |
1443 | |