1 | /* |
2 | * Copyright (c) 2000-2022 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ |
29 | /* |
30 | * Copyright (c) 1982, 1986, 1993 |
31 | * The Regents of the University of California. All rights reserved. |
32 | * |
33 | * Redistribution and use in source and binary forms, with or without |
34 | * modification, are permitted provided that the following conditions |
35 | * are met: |
36 | * 1. Redistributions of source code must retain the above copyright |
37 | * notice, this list of conditions and the following disclaimer. |
38 | * 2. Redistributions in binary form must reproduce the above copyright |
39 | * notice, this list of conditions and the following disclaimer in the |
40 | * documentation and/or other materials provided with the distribution. |
41 | * 3. All advertising materials mentioning features or use of this software |
42 | * must display the following acknowledgement: |
43 | * This product includes software developed by the University of |
44 | * California, Berkeley and its contributors. |
45 | * 4. Neither the name of the University nor the names of its contributors |
46 | * may be used to endorse or promote products derived from this software |
47 | * without specific prior written permission. |
48 | * |
49 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
50 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
51 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
52 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
53 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
54 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
55 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
56 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
57 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
58 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
59 | * SUCH DAMAGE. |
60 | * |
61 | * @(#)subr_log.c 8.3 (Berkeley) 2/14/95 |
62 | */ |
63 | |
64 | /* |
65 | * Error log buffer for kernel printf's. |
66 | */ |
67 | |
68 | #include <machine/atomic.h> |
69 | #include <sys/param.h> |
70 | #include <sys/systm.h> |
71 | #include <sys/proc_internal.h> |
72 | #include <sys/vnode.h> |
73 | #include <stdbool.h> |
74 | #include <firehose/tracepoint_private.h> |
75 | #include <firehose/chunk_private.h> |
76 | #include <firehose/ioctl_private.h> |
77 | #include <os/firehose_buffer_private.h> |
78 | |
79 | #include <os/log_private.h> |
80 | #include <sys/ioctl.h> |
81 | #include <sys/msgbuf.h> |
82 | #include <sys/file_internal.h> |
83 | #include <sys/errno.h> |
84 | #include <sys/select.h> |
85 | #include <sys/kernel.h> |
86 | #include <kern/thread.h> |
87 | #include <kern/sched_prim.h> |
88 | #include <kern/simple_lock.h> |
89 | #include <sys/lock.h> |
90 | #include <sys/signalvar.h> |
91 | #include <sys/conf.h> |
92 | #include <sys/sysctl.h> |
93 | #include <sys/queue.h> |
94 | #include <kern/kalloc.h> |
95 | #include <pexpert/pexpert.h> |
96 | #include <mach/mach_port.h> |
97 | #include <mach/mach_vm.h> |
98 | #include <mach/vm_map.h> |
99 | #include <vm/vm_kern.h> |
100 | #include <kern/task.h> |
101 | #include <kern/locks.h> |
102 | |
103 | #define LOG_NBIO 0x02 |
104 | #define LOG_ASYNC 0x04 |
105 | #define LOG_RDWAIT 0x08 |
106 | |
107 | /* All globals should be accessed under bsd_log_lock() or bsd_log_lock_safe() */ |
108 | |
109 | /* logsoftc only valid while log_open=1 */ |
110 | struct logsoftc { |
111 | int sc_state; /* see above for possibilities */ |
112 | struct selinfo sc_selp; /* thread waiting for select */ |
113 | int sc_pgid; /* process/group for async I/O */ |
114 | struct msgbuf *sc_mbp; |
115 | } logsoftc; |
116 | |
117 | char smsg_bufc[CONFIG_MSG_BSIZE]; /* static buffer */ |
118 | struct msgbuf msgbuf = {.msg_magic = MSG_MAGIC, .msg_size = sizeof(smsg_bufc), .msg_bufx = 0, .msg_bufr = 0, .msg_bufc = smsg_bufc}; |
119 | struct msgbuf *msgbufp __attribute__((used)) = &msgbuf; |
120 | |
121 | /* oslogsoftc only valid while oslog_open=true */ |
122 | struct oslogsoftc { |
123 | int sc_state; /* see above for possibilities */ |
124 | struct selinfo sc_selp; /* thread waiting for select */ |
125 | int sc_pgid; /* process/group for async I/O */ |
126 | } oslogsoftc; |
127 | |
128 | static bool log_open = false; |
129 | static bool oslog_open = false; |
130 | static bool os_log_wakeup = false; |
131 | |
132 | uint32_t oslog_msgbuf_dropped_charcount = 0; |
133 | |
134 | SECURITY_READ_ONLY_LATE(vm_offset_t) kernel_firehose_addr = 0; |
135 | SECURITY_READ_ONLY_LATE(uint8_t) __firehose_buffer_kernel_chunk_count = |
136 | FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT; |
137 | SECURITY_READ_ONLY_LATE(uint8_t) __firehose_num_kernel_io_pages = |
138 | FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES; |
139 | |
140 | /* defined in osfmk/kern/printf.c */ |
141 | extern bool bsd_log_lock(bool); |
142 | extern void bsd_log_lock_safe(void); |
143 | extern void bsd_log_unlock(void); |
144 | |
145 | extern void logwakeup(void); |
146 | extern void oslogwakeup(void); |
147 | extern bool os_log_disabled(void); |
148 | |
149 | /* XXX wants a linker set so these can be static */ |
150 | extern d_open_t logopen; |
151 | extern d_close_t logclose; |
152 | extern d_read_t logread; |
153 | extern d_ioctl_t logioctl; |
154 | extern d_select_t logselect; |
155 | |
156 | /* XXX wants a linker set so these can be static */ |
157 | extern d_open_t oslogopen; |
158 | extern d_close_t oslogclose; |
159 | extern d_select_t oslogselect; |
160 | extern d_ioctl_t oslogioctl; |
161 | |
162 | /* |
163 | * Serialize log access. Note that the log can be written at interrupt level, |
164 | * so any log manipulations that can be done from, or affect, another processor |
165 | * at interrupt level must be guarded with a spin lock. |
166 | */ |
167 | |
168 | static int sysctl_kern_msgbuf(struct sysctl_oid *oidp, |
169 | void *arg1, int arg2, struct sysctl_req *req); |
170 | |
171 | /*ARGSUSED*/ |
172 | int |
173 | logopen(__unused dev_t dev, __unused int flags, __unused int mode, struct proc *p) |
174 | { |
175 | bsd_log_lock_safe(); |
176 | |
177 | if (log_open) { |
178 | bsd_log_unlock(); |
179 | return EBUSY; |
180 | } |
181 | |
182 | /* |
183 | * Legacy logging has to be supported as long as userspace supports it. |
184 | */ |
185 | if ((atm_get_diagnostic_config() & ATM_ENABLE_LEGACY_LOGGING)) { |
186 | logsoftc.sc_mbp = msgbufp; |
187 | logsoftc.sc_pgid = proc_getpid(p); /* signal process only */ |
188 | log_open = true; |
189 | bsd_log_unlock(); |
190 | return 0; |
191 | } |
192 | |
193 | bsd_log_unlock(); |
194 | return ENOTSUP; |
195 | } |
196 | |
197 | /*ARGSUSED*/ |
198 | int |
199 | logclose(__unused dev_t dev, __unused int flag, __unused int devtype, __unused struct proc *p) |
200 | { |
201 | bsd_log_lock_safe(); |
202 | |
203 | logsoftc.sc_state &= ~(LOG_NBIO | LOG_ASYNC); |
204 | selthreadclear(&logsoftc.sc_selp); |
205 | log_open = false; |
206 | |
207 | bsd_log_unlock(); |
208 | return 0; |
209 | } |
210 | |
211 | int |
212 | oslogopen(__unused dev_t dev, __unused int flags, __unused int mode, struct proc *p) |
213 | { |
214 | bsd_log_lock_safe(); |
215 | if (oslog_open) { |
216 | bsd_log_unlock(); |
217 | return EBUSY; |
218 | } |
219 | oslogsoftc.sc_pgid = proc_getpid(p); /* signal process only */ |
220 | oslog_open = true; |
221 | |
222 | bsd_log_unlock(); |
223 | return 0; |
224 | } |
225 | |
226 | int |
227 | oslogclose(__unused dev_t dev, __unused int flag, __unused int devtype, __unused struct proc *p) |
228 | { |
229 | bsd_log_lock_safe(); |
230 | oslogsoftc.sc_state &= ~(LOG_NBIO | LOG_ASYNC); |
231 | selthreadclear(&oslogsoftc.sc_selp); |
232 | oslog_open = false; |
233 | bsd_log_unlock(); |
234 | return 0; |
235 | } |
236 | |
237 | /*ARGSUSED*/ |
238 | int |
239 | logread(__unused dev_t dev, struct uio *uio, int flag) |
240 | { |
241 | int error; |
242 | ssize_t resid; |
243 | |
244 | bsd_log_lock_safe(); |
245 | |
246 | struct msgbuf *mbp = logsoftc.sc_mbp; |
247 | |
248 | while (mbp->msg_bufr == mbp->msg_bufx) { |
249 | if ((flag & IO_NDELAY) || (logsoftc.sc_state & LOG_NBIO)) { |
250 | bsd_log_unlock(); |
251 | return EWOULDBLOCK; |
252 | } |
253 | logsoftc.sc_state |= LOG_RDWAIT; |
254 | bsd_log_unlock(); |
255 | /* |
256 | * If the wakeup is missed then wait for 5 sec and reevaluate. |
257 | * If it times out, carry on. |
258 | */ |
259 | error = tsleep(chan: (caddr_t)mbp, pri: (PZERO + 1) | PCATCH, wmesg: "klog" , timo: 5 * hz); |
260 | if (error && error != EWOULDBLOCK) { |
261 | return error; |
262 | } |
263 | bsd_log_lock_safe(); |
264 | } |
265 | logsoftc.sc_state &= ~LOG_RDWAIT; |
266 | |
267 | while ((resid = uio_resid(a_uio: uio)) > 0) { |
268 | size_t l; |
269 | |
270 | if (mbp->msg_bufx >= mbp->msg_bufr) { |
271 | l = mbp->msg_bufx - mbp->msg_bufr; |
272 | } else { |
273 | l = mbp->msg_size - mbp->msg_bufr; |
274 | } |
275 | if ((l = MIN(l, (size_t)resid)) == 0) { |
276 | break; |
277 | } |
278 | |
279 | const size_t readpos = mbp->msg_bufr; |
280 | |
281 | bsd_log_unlock(); |
282 | error = uiomove(cp: (caddr_t)&mbp->msg_bufc[readpos], n: (int)l, uio); |
283 | if (error) { |
284 | return error; |
285 | } |
286 | bsd_log_lock_safe(); |
287 | |
288 | mbp->msg_bufr = (int)(readpos + l); |
289 | if (mbp->msg_bufr >= mbp->msg_size) { |
290 | mbp->msg_bufr = 0; |
291 | } |
292 | } |
293 | |
294 | bsd_log_unlock(); |
295 | return 0; |
296 | } |
297 | |
298 | /*ARGSUSED*/ |
299 | int |
300 | logselect(__unused dev_t dev, int rw, void * wql, struct proc *p) |
301 | { |
302 | if (rw != FREAD) { |
303 | return 0; |
304 | } |
305 | |
306 | bsd_log_lock_safe(); |
307 | if (logsoftc.sc_mbp->msg_bufr == logsoftc.sc_mbp->msg_bufx) { |
308 | selrecord(selector: p, &logsoftc.sc_selp, wql); |
309 | bsd_log_unlock(); |
310 | return 0; |
311 | } |
312 | bsd_log_unlock(); |
313 | |
314 | return 1; |
315 | } |
316 | |
317 | int |
318 | oslogselect(__unused dev_t dev, int rw, void * wql, struct proc *p) |
319 | { |
320 | if (rw != FREAD) { |
321 | return 0; |
322 | } |
323 | |
324 | bsd_log_lock_safe(); |
325 | if (os_log_wakeup) { |
326 | bsd_log_unlock(); |
327 | return 1; |
328 | } |
329 | selrecord(selector: p, &oslogsoftc.sc_selp, wql); |
330 | bsd_log_unlock(); |
331 | |
332 | return 0; |
333 | } |
334 | |
335 | void |
336 | logwakeup(void) |
337 | { |
338 | /* |
339 | * Legacy logging is rarely enabled during a typical system run. Check |
340 | * log_open without taking a lock as a shortcut. |
341 | */ |
342 | if (!log_open || !oslog_is_safe()) { |
343 | return; |
344 | } |
345 | |
346 | bsd_log_lock_safe(); |
347 | |
348 | if (!log_open) { |
349 | bsd_log_unlock(); |
350 | return; |
351 | } |
352 | |
353 | selwakeup(&logsoftc.sc_selp); |
354 | if (logsoftc.sc_state & LOG_ASYNC) { |
355 | int pgid = logsoftc.sc_pgid; |
356 | bsd_log_unlock(); |
357 | if (pgid < 0) { |
358 | gsignal(pgid: -pgid, SIGIO); |
359 | } else { |
360 | proc_signal(pid: pgid, SIGIO); |
361 | } |
362 | bsd_log_lock_safe(); |
363 | } |
364 | |
365 | if (log_open && (logsoftc.sc_state & LOG_RDWAIT)) { |
366 | wakeup(chan: (caddr_t)logsoftc.sc_mbp); |
367 | logsoftc.sc_state &= ~LOG_RDWAIT; |
368 | } |
369 | |
370 | bsd_log_unlock(); |
371 | } |
372 | |
373 | void |
374 | oslogwakeup(void) |
375 | { |
376 | if (!oslog_is_safe()) { |
377 | return; |
378 | } |
379 | |
380 | bsd_log_lock_safe(); |
381 | if (!oslog_open) { |
382 | bsd_log_unlock(); |
383 | return; |
384 | } |
385 | selwakeup(&oslogsoftc.sc_selp); |
386 | os_log_wakeup = true; |
387 | bsd_log_unlock(); |
388 | } |
389 | |
390 | /*ARGSUSED*/ |
391 | int |
392 | logioctl(__unused dev_t dev, u_long com, caddr_t data, __unused int flag, __unused struct proc *p) |
393 | { |
394 | bsd_log_lock_safe(); |
395 | |
396 | const struct msgbuf *mbp = logsoftc.sc_mbp; |
397 | int l; |
398 | |
399 | switch (com) { |
400 | /* return number of characters immediately available */ |
401 | case FIONREAD: |
402 | l = mbp->msg_bufx - mbp->msg_bufr; |
403 | if (l < 0) { |
404 | l += mbp->msg_size; |
405 | } |
406 | *(off_t *)data = l; |
407 | break; |
408 | |
409 | case FIONBIO: |
410 | if (*(int *)data) { |
411 | logsoftc.sc_state |= LOG_NBIO; |
412 | } else { |
413 | logsoftc.sc_state &= ~LOG_NBIO; |
414 | } |
415 | break; |
416 | |
417 | case FIOASYNC: |
418 | if (*(int *)data) { |
419 | logsoftc.sc_state |= LOG_ASYNC; |
420 | } else { |
421 | logsoftc.sc_state &= ~LOG_ASYNC; |
422 | } |
423 | break; |
424 | |
425 | case TIOCSPGRP: |
426 | logsoftc.sc_pgid = *(int *)data; |
427 | break; |
428 | |
429 | case TIOCGPGRP: |
430 | *(int *)data = logsoftc.sc_pgid; |
431 | break; |
432 | |
433 | default: |
434 | bsd_log_unlock(); |
435 | return -1; |
436 | } |
437 | |
438 | bsd_log_unlock(); |
439 | return 0; |
440 | } |
441 | |
442 | /*ARGSUSED*/ |
443 | int |
444 | oslogioctl(__unused dev_t dev, u_long com, caddr_t data, __unused int flag, __unused struct proc *p) |
445 | { |
446 | int ret = 0; |
447 | mach_vm_size_t buffer_size = (__firehose_buffer_kernel_chunk_count * FIREHOSE_CHUNK_SIZE); |
448 | firehose_buffer_map_info_t map_info = {0, 0}; |
449 | firehose_buffer_t kernel_firehose_buffer = NULL; |
450 | mach_vm_address_t user_addr = 0; |
451 | mach_port_t mem_entry_ptr = MACH_PORT_NULL; |
452 | bool has_more; |
453 | |
454 | switch (com) { |
455 | /* return number of characters immediately available */ |
456 | |
457 | case LOGBUFFERMAP: |
458 | kernel_firehose_buffer = (firehose_buffer_t)kernel_firehose_addr; |
459 | |
460 | ret = mach_make_memory_entry_64(target_task: kernel_map, |
461 | size: &buffer_size, |
462 | offset: (mach_vm_offset_t) kernel_firehose_buffer, |
463 | permission: (MAP_MEM_VM_SHARE | VM_PROT_READ), |
464 | object_handle: &mem_entry_ptr, |
465 | MACH_PORT_NULL); |
466 | if (ret == KERN_SUCCESS) { |
467 | ret = mach_vm_map_kernel(target_map: get_task_map(current_task()), |
468 | address: &user_addr, |
469 | initial_size: buffer_size, |
470 | mask: 0, /* mask */ |
471 | VM_MAP_KERNEL_FLAGS_ANYWHERE(), |
472 | port: mem_entry_ptr, |
473 | offset: 0, /* offset */ |
474 | FALSE, /* copy */ |
475 | VM_PROT_READ, |
476 | VM_PROT_READ, |
477 | VM_INHERIT_SHARE); |
478 | } |
479 | |
480 | if (ret == KERN_SUCCESS) { |
481 | map_info.fbmi_addr = (uint64_t) (user_addr); |
482 | map_info.fbmi_size = buffer_size; |
483 | bcopy(src: &map_info, dst: data, n: sizeof(firehose_buffer_map_info_t)); |
484 | } |
485 | break; |
486 | case LOGFLUSHED: |
487 | has_more = __firehose_merge_updates(update: *(firehose_push_reply_t *)(data)); |
488 | bsd_log_lock_safe(); |
489 | os_log_wakeup = has_more; |
490 | if (os_log_wakeup) { |
491 | selwakeup(&oslogsoftc.sc_selp); |
492 | } |
493 | bsd_log_unlock(); |
494 | break; |
495 | default: |
496 | return -1; |
497 | } |
498 | return 0; |
499 | } |
500 | |
501 | __startup_func |
502 | static void |
503 | oslog_init_firehose(void) |
504 | { |
505 | if (os_log_disabled()) { |
506 | printf("Firehose disabled: Logging disabled by ATM\n" ); |
507 | return; |
508 | } |
509 | |
510 | if (!PE_parse_boot_argn(arg_string: "firehose_chunk_count" , arg_ptr: &__firehose_buffer_kernel_chunk_count, max_arg: sizeof(__firehose_buffer_kernel_chunk_count))) { |
511 | __firehose_buffer_kernel_chunk_count = FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT; |
512 | } |
513 | if (!PE_parse_boot_argn(arg_string: "firehose_io_pages" , arg_ptr: &__firehose_num_kernel_io_pages, max_arg: sizeof(__firehose_num_kernel_io_pages))) { |
514 | __firehose_num_kernel_io_pages = FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES; |
515 | } |
516 | if (!__firehose_kernel_configuration_valid(chunk_count: __firehose_buffer_kernel_chunk_count, io_pages: __firehose_num_kernel_io_pages)) { |
517 | printf("illegal firehose configuration %u/%u, using defaults\n" , __firehose_buffer_kernel_chunk_count, __firehose_num_kernel_io_pages); |
518 | __firehose_buffer_kernel_chunk_count = FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT; |
519 | __firehose_num_kernel_io_pages = FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES; |
520 | } |
521 | vm_size_t size = __firehose_buffer_kernel_chunk_count * FIREHOSE_CHUNK_SIZE; |
522 | |
523 | kmem_alloc(map: kernel_map, addrp: &kernel_firehose_addr, size: size + ptoa(2), |
524 | flags: KMA_NOFAIL | KMA_PERMANENT | KMA_GUARD_FIRST | KMA_GUARD_LAST | |
525 | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_LOG); |
526 | |
527 | kernel_firehose_addr += PAGE_SIZE; |
528 | /* register buffer with firehose */ |
529 | kernel_firehose_addr = (vm_offset_t)__firehose_buffer_create(size: (size_t *) &size); |
530 | |
531 | printf("Firehose configured: %u chunks, %u io pages\n" , |
532 | __firehose_buffer_kernel_chunk_count, __firehose_num_kernel_io_pages); |
533 | } |
534 | STARTUP(OSLOG, STARTUP_RANK_SECOND, oslog_init_firehose); |
535 | |
536 | /* |
537 | * log_putc_locked |
538 | * |
539 | * Decription: Output a character to the log; assumes the bsd_log_lock() or |
540 | * bsd_log_lock_safe() is held by the caller. |
541 | * |
542 | * Parameters: c Character to output |
543 | * |
544 | * Returns: (void) |
545 | * |
546 | * Notes: This functions is used for multibyte output to the log; it |
547 | * should be used preferrentially where possible to ensure that |
548 | * log entries do not end up interspersed due to preemption or |
549 | * SMP reentrancy. |
550 | */ |
551 | void |
552 | log_putc_locked(struct msgbuf *mbp, char c) |
553 | { |
554 | mbp->msg_bufc[mbp->msg_bufx++] = c; |
555 | if (mbp->msg_bufx >= mbp->msg_size) { |
556 | mbp->msg_bufx = 0; |
557 | } |
558 | } |
559 | |
560 | /* |
561 | * log_putc |
562 | * |
563 | * Decription: Output a character to the log; assumes the bsd_log_lock() or |
564 | * bsd_log_lock_safe() is NOT held by the caller. |
565 | * |
566 | * Parameters: c Character to output |
567 | * |
568 | * Returns: (void) |
569 | * |
570 | * Notes: This function is used for single byte output to the log. It |
571 | * primarily exists to maintain binary backward compatibility. |
572 | */ |
573 | void |
574 | log_putc(char c) |
575 | { |
576 | if (!bsd_log_lock(oslog_is_safe())) { |
577 | os_atomic_inc(&oslog_msgbuf_dropped_charcount, relaxed); |
578 | return; |
579 | } |
580 | |
581 | log_putc_locked(mbp: msgbufp, c); |
582 | int unread_count = msgbufp->msg_bufx - msgbufp->msg_bufr; |
583 | |
584 | bsd_log_unlock(); |
585 | |
586 | if (unread_count < 0) { |
587 | unread_count = 0 - unread_count; |
588 | } |
589 | if (c == '\n' || unread_count >= (msgbufp->msg_size / 2)) { |
590 | logwakeup(); |
591 | } |
592 | } |
593 | |
594 | /* |
595 | * it is possible to increase the kernel log buffer size by adding |
596 | * msgbuf=n |
597 | * to the kernel command line, and to read the current size using |
598 | * sysctl kern.msgbuf |
599 | * If there is no parameter on the kernel command line, the buffer is |
600 | * allocated statically and is CONFIG_MSG_BSIZE characters in size, otherwise |
601 | * memory is dynamically allocated. Memory management must already be up. |
602 | */ |
603 | static int |
604 | log_setsize(size_t size) |
605 | { |
606 | int i, count; |
607 | char *p; |
608 | |
609 | if (size == 0 || size > MAX_MSG_BSIZE) { |
610 | return EINVAL; |
611 | } |
612 | |
613 | int new_logsize = (int)size; |
614 | char *new_logdata = kalloc_data(size, Z_WAITOK | Z_ZERO); |
615 | if (!new_logdata) { |
616 | printf("Cannot resize system message buffer: Not enough memory\n" ); |
617 | return ENOMEM; |
618 | } |
619 | |
620 | bsd_log_lock_safe(); |
621 | |
622 | char *old_logdata = msgbufp->msg_bufc; |
623 | int old_logsize = msgbufp->msg_size; |
624 | int old_bufr = msgbufp->msg_bufr; |
625 | int old_bufx = msgbufp->msg_bufx; |
626 | |
627 | /* start "new_logsize" bytes before the write pointer */ |
628 | if (new_logsize <= old_bufx) { |
629 | count = new_logsize; |
630 | p = old_logdata + old_bufx - count; |
631 | } else { |
632 | /* |
633 | * if new buffer is bigger, copy what we have and let the |
634 | * bzero above handle the difference |
635 | */ |
636 | count = MIN(new_logsize, old_logsize); |
637 | p = old_logdata + old_logsize - (count - old_bufx); |
638 | } |
639 | for (i = 0; i < count; i++) { |
640 | if (p >= old_logdata + old_logsize) { |
641 | p = old_logdata; |
642 | } |
643 | new_logdata[i] = *p++; |
644 | } |
645 | |
646 | int new_bufx = i; |
647 | if (new_bufx >= new_logsize) { |
648 | new_bufx = 0; |
649 | } |
650 | msgbufp->msg_bufx = new_bufx; |
651 | |
652 | int new_bufr = old_bufx - old_bufr; /* how much were we trailing bufx by? */ |
653 | if (new_bufr < 0) { |
654 | new_bufr += old_logsize; |
655 | } |
656 | new_bufr = new_bufx - new_bufr; /* now relative to oldest data in new buffer */ |
657 | if (new_bufr < 0) { |
658 | new_bufr += new_logsize; |
659 | } |
660 | msgbufp->msg_bufr = new_bufr; |
661 | |
662 | msgbufp->msg_size = new_logsize; |
663 | msgbufp->msg_bufc = new_logdata; |
664 | |
665 | bsd_log_unlock(); |
666 | |
667 | /* |
668 | * This memory is now dead - clear it so that it compresses better |
669 | * in case of suspend to disk etc. |
670 | */ |
671 | bzero(s: old_logdata, n: old_logsize); |
672 | if (old_logdata != smsg_bufc) { |
673 | /* dynamic memory that must be freed */ |
674 | kfree_data(old_logdata, old_logsize); |
675 | } |
676 | |
677 | printf("System message buffer configured: %lu bytes\n" , size); |
678 | |
679 | return 0; |
680 | } |
681 | |
682 | SYSCTL_PROC(_kern, OID_AUTO, msgbuf, |
683 | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, |
684 | sysctl_kern_msgbuf, "I" , "" ); |
685 | |
686 | static int |
687 | sysctl_kern_msgbuf(struct sysctl_oid *oidp __unused, |
688 | void *arg1 __unused, int arg2 __unused, struct sysctl_req *req) |
689 | { |
690 | int old_bufsize, bufsize; |
691 | int error; |
692 | |
693 | bsd_log_lock_safe(); |
694 | old_bufsize = bufsize = msgbufp->msg_size; |
695 | bsd_log_unlock(); |
696 | |
697 | error = sysctl_io_number(req, bigValue: bufsize, valueSize: sizeof(bufsize), pValue: &bufsize, NULL); |
698 | if (error) { |
699 | return error; |
700 | } |
701 | |
702 | if (bufsize < 0) { |
703 | return EINVAL; |
704 | } |
705 | |
706 | if (bufsize != old_bufsize) { |
707 | error = log_setsize(size: bufsize); |
708 | } |
709 | |
710 | return error; |
711 | } |
712 | |
713 | /* |
714 | * This should be called by /sbin/dmesg only via libproc. |
715 | * It returns as much data still in the buffer as possible. |
716 | */ |
717 | int |
718 | log_dmesg(user_addr_t buffer, uint32_t buffersize, int32_t *retval) |
719 | { |
720 | uint32_t i; |
721 | uint32_t localbuff_size; |
722 | int error = 0, newl, skip; |
723 | char *localbuff, *p, *copystart, ch; |
724 | size_t copysize; |
725 | |
726 | bsd_log_lock_safe(); |
727 | localbuff_size = (msgbufp->msg_size + 2); /* + '\n' + '\0' */ |
728 | bsd_log_unlock(); |
729 | |
730 | /* Allocate a temporary non-circular buffer for copyout */ |
731 | localbuff = kalloc_data(localbuff_size, Z_WAITOK); |
732 | if (!localbuff) { |
733 | printf("log_dmesg: unable to allocate memory\n" ); |
734 | return ENOMEM; |
735 | } |
736 | |
737 | /* in between here, the log could become bigger, but that's fine */ |
738 | bsd_log_lock_safe(); |
739 | |
740 | /* |
741 | * The message buffer is circular; start at the write pointer, and |
742 | * make one loop up to write pointer - 1. |
743 | */ |
744 | p = msgbufp->msg_bufc + msgbufp->msg_bufx; |
745 | for (i = newl = skip = 0; p != msgbufp->msg_bufc + msgbufp->msg_bufx - 1; ++p) { |
746 | if (p >= msgbufp->msg_bufc + msgbufp->msg_size) { |
747 | p = msgbufp->msg_bufc; |
748 | } |
749 | ch = *p; |
750 | /* Skip "\n<.*>" syslog sequences. */ |
751 | if (skip) { |
752 | if (ch == '>') { |
753 | newl = skip = 0; |
754 | } |
755 | continue; |
756 | } |
757 | if (newl && ch == '<') { |
758 | skip = 1; |
759 | continue; |
760 | } |
761 | if (ch == '\0') { |
762 | continue; |
763 | } |
764 | newl = (ch == '\n'); |
765 | localbuff[i++] = ch; |
766 | /* The original version of this routine contained a buffer |
767 | * overflow. At the time, a "small" targeted fix was desired |
768 | * so the change below to check the buffer bounds was made. |
769 | * TODO: rewrite this needlessly convoluted routine. |
770 | */ |
771 | if (i == (localbuff_size - 2)) { |
772 | break; |
773 | } |
774 | } |
775 | if (!newl) { |
776 | localbuff[i++] = '\n'; |
777 | } |
778 | localbuff[i++] = 0; |
779 | |
780 | if (buffersize >= i) { |
781 | copystart = localbuff; |
782 | copysize = i; |
783 | } else { |
784 | copystart = localbuff + i - buffersize; |
785 | copysize = buffersize; |
786 | } |
787 | |
788 | bsd_log_unlock(); |
789 | |
790 | error = copyout(copystart, buffer, copysize); |
791 | if (!error) { |
792 | *retval = (int32_t)copysize; |
793 | } |
794 | |
795 | kfree_data(localbuff, localbuff_size); |
796 | return error; |
797 | } |
798 | |
799 | #ifdef CONFIG_XNUPOST |
800 | |
801 | size_t find_pattern_in_buffer(const char *, size_t, size_t); |
802 | |
803 | /* |
804 | * returns count of pattern found in systemlog buffer. |
805 | * stops searching further if count reaches expected_count. |
806 | */ |
807 | size_t |
808 | find_pattern_in_buffer(const char *pattern, size_t len, size_t expected_count) |
809 | { |
810 | if (pattern == NULL || len == 0 || expected_count == 0) { |
811 | return 0; |
812 | } |
813 | |
814 | size_t msg_bufx = msgbufp->msg_bufx; |
815 | size_t msg_size = msgbufp->msg_size; |
816 | size_t match_count = 0; |
817 | |
818 | for (size_t i = 0; i < msg_size; i++) { |
819 | boolean_t match = TRUE; |
820 | for (size_t j = 0; j < len; j++) { |
821 | size_t pos = (msg_bufx + i + j) % msg_size; |
822 | if (msgbufp->msg_bufc[pos] != pattern[j]) { |
823 | match = FALSE; |
824 | break; |
825 | } |
826 | } |
827 | if (match && ++match_count >= expected_count) { |
828 | break; |
829 | } |
830 | } |
831 | |
832 | return match_count; |
833 | } |
834 | |
835 | __startup_func |
836 | static void |
837 | oslog_init_msgbuf(void) |
838 | { |
839 | size_t msgbuf_size = 0; |
840 | |
841 | if (PE_parse_boot_argn("msgbuf" , &msgbuf_size, sizeof(msgbuf_size))) { |
842 | (void) log_setsize(msgbuf_size); |
843 | } |
844 | } |
845 | STARTUP(OSLOG, STARTUP_RANK_SECOND, oslog_init_msgbuf); |
846 | |
847 | #endif |
848 | |