1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifdef __x86_64__
30#include <i386/mp.h>
31#include <i386/cpu_data.h>
32#include <i386/bit_routines.h>
33#include <i386/machine_cpu.h>
34#include <i386/machine_routines.h>
35#include <i386/misc_protos.h>
36#include <i386/serial_io.h>
37#endif /* __x86_64__ */
38
39#include <libkern/OSAtomic.h>
40#include <vm/vm_kern.h>
41#include <vm/vm_map.h>
42#include <console/video_console.h>
43#include <console/serial_protos.h>
44#include <kern/kalloc.h>
45#include <kern/thread.h>
46#include <kern/cpu_data.h>
47#include <libkern/section_keywords.h>
48
49#if __arm__ || __arm64__
50#include <machine/machine_routines.h>
51#include <arm/cpu_data_internal.h>
52#endif
53
54#ifdef CONFIG_XNUPOST
55#include <tests/xnupost.h>
56kern_return_t console_serial_test(void);
57kern_return_t console_serial_alloc_rel_tests(void);
58kern_return_t console_serial_parallel_log_tests(void);
59#define MAX_CPU_SLOTS (MAX_CPUS + 2)
60#endif
61
62#ifndef MAX_CPU_SLOTS
63#define MAX_CPU_SLOTS (MAX_CPUS)
64#endif
65
66static struct {
67 char * buffer;
68 int len;
69 int used;
70 char * write_ptr;
71 char * read_ptr;
72 decl_simple_lock_data(, read_lock);
73 decl_simple_lock_data(, write_lock);
74} console_ring;
75
76hw_lock_data_t cnputc_lock;
77static volatile uint32_t console_output = 0;
78
79/*
80 * New allocation mechanism for console buffers
81 * Total allocation: 1 * PAGE_SIZE
82 * - Each cpu gets CPU_CONS_BUF_SIZE buffer
83 * - Kernel wide console ring gets PAGE_SIZE - MAX_CPU_SLOTS * CPU_CONS_BUF_SIZE
84 *
85 * At the return from console_init() the memory is setup as follows:
86 * +----------------------------+-------------+-------------+-------------+-------------+
87 * |console ring buffer---------|f2eec075-----|f2eec075-----|f2eec075-----|f2eec075-----|
88 * +----------------------------+-------------+-------------+-------------+-------------+
89 * Each cpu allocation will find the first (f2eec075) and use that buffer.
90 *
91 */
92
93#define CPU_CONS_BUF_SIZE 256
94#define CPU_BUF_FREE_HEX 0xf2eec075
95
96#define KERN_CONSOLE_BUF_SIZE vm_map_round_page(CPU_CONS_BUF_SIZE *(MAX_CPU_SLOTS + 1), PAGE_SIZE - 1)
97#define KERN_CONSOLE_RING_SIZE (KERN_CONSOLE_BUF_SIZE - (CPU_CONS_BUF_SIZE * MAX_CPU_SLOTS))
98
99/*
100 * A serial line running at 115200 bps can output ~11.5 characters per millisecond.
101 * Synchronous serial logging with preemption+interrupts disabled fundamentally prevents us
102 * from hitting expected scheduling deadlines, but we can at least tone it down a bit.
103 *
104 * TODO: IOLog should use asynchronous serial logging instead of the synchronous serial console. (26555148)
105 *
106 * Keep interrupt disabled periods shorter than 1ms
107 */
108#define MAX_INT_DISABLED_FLUSH_SIZE 8
109#define MAX_TOTAL_FLUSH_SIZE (MAX(2, MAX_CPU_SLOTS) * CPU_CONS_BUF_SIZE)
110
111typedef struct console_buf {
112 char * buf_base;
113 char * buf_end;
114 char * buf_ptr;
115#define CPU_BUFFER_LEN (CPU_CONS_BUF_SIZE - 3 * (sizeof(char *)))
116 char buf[CPU_BUFFER_LEN];
117} console_buf_t;
118
119extern int serial_getc(void);
120extern void serial_putc(char);
121
122static void _serial_putc(int, int, int);
123
124SECURITY_READ_ONLY_EARLY(struct console_ops) cons_ops[] = {
125 {
126 .putc = _serial_putc, .getc = _serial_getc,
127 },
128 {
129 .putc = vcputc, .getc = vcgetc,
130 },
131};
132
133SECURITY_READ_ONLY_EARLY(uint32_t) nconsops = (sizeof cons_ops / sizeof cons_ops[0]);
134
135uint32_t cons_ops_index = VC_CONS_OPS;
136
137#if defined(__x86_64__) || defined(__arm__)
138// NMI static variables
139#define NMI_STRING_SIZE 32
140char nmi_string[NMI_STRING_SIZE] = "afDIGHr84A84jh19Kphgp428DNPdnapq";
141static int nmi_counter = 0;
142#endif /* __arm__ */
143
144static bool console_suspended = false;
145
146/* Wrapper for ml_set_interrupts_enabled */
147static void
148console_restore_interrupts_state(boolean_t state)
149{
150#if INTERRUPT_MASKED_DEBUG
151 /*
152 * Serial console holds interrupts disabled for far too long
153 * and would trip the spin-debugger. If we are about to reenable
154 * interrupts then clear the timer and avoid panicking on the delay.
155 * Otherwise, let the code that printed with interrupt disabled
156 * take the panic when it reenables interrupts.
157 * Hopefully one day this is fixed so that this workaround is unnecessary.
158 */
159 if (state == TRUE)
160 ml_spin_debug_clear_self();
161#endif /* INTERRUPT_MASKED_DEBUG */
162 ml_set_interrupts_enabled(state);
163}
164
165static void
166console_ring_lock_init(void)
167{
168 simple_lock_init(&console_ring.read_lock, 0);
169 simple_lock_init(&console_ring.write_lock, 0);
170}
171
172void
173console_init(void)
174{
175 int ret, i;
176 uint32_t * p;
177
178 if (!OSCompareAndSwap(0, KERN_CONSOLE_RING_SIZE, (UInt32 *)&console_ring.len))
179 return;
180
181 assert(console_ring.len > 0);
182
183 ret = kmem_alloc(kernel_map, (vm_offset_t *)&console_ring.buffer, KERN_CONSOLE_BUF_SIZE, VM_KERN_MEMORY_OSFMK);
184 if (ret != KERN_SUCCESS) {
185 panic("console_ring_init() failed to allocate ring buffer, error %d\n", ret);
186 }
187
188 /* setup memory for per cpu console buffers */
189 for (i = 0; i < MAX_CPU_SLOTS; i++) {
190 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
191 *p = CPU_BUF_FREE_HEX;
192 }
193
194 console_ring.used = 0;
195 console_ring.read_ptr = console_ring.buffer;
196 console_ring.write_ptr = console_ring.buffer;
197 console_ring_lock_init();
198 hw_lock_init(&cnputc_lock);
199}
200
201void *
202console_cpu_alloc(__unused boolean_t boot_processor)
203{
204 console_buf_t * cbp;
205 int i;
206 uint32_t * p = NULL;
207
208 console_init();
209 assert(console_ring.buffer != NULL);
210
211 /* select the next slot from the per cpu buffers at end of console_ring.buffer */
212 for (i = 0; i < MAX_CPU_SLOTS; i++) {
213 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
214 if (OSCompareAndSwap(CPU_BUF_FREE_HEX, 0, (UInt32 *)p))
215 break;
216 }
217 assert(i < MAX_CPU_SLOTS);
218
219 cbp = (console_buf_t *)(uintptr_t)p;
220 if ((uintptr_t)cbp >= (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE) {
221 printf("console_cpu_alloc() failed to allocate cpu buffer\n");
222 return NULL;
223 }
224
225 cbp->buf_base = (char *)&cbp->buf;
226 cbp->buf_ptr = cbp->buf_base;
227 cbp->buf_end = cbp->buf_base + CPU_BUFFER_LEN;
228 return (void *)cbp;
229}
230
231void
232console_cpu_free(void * buf)
233{
234 assert((uintptr_t)buf > (uintptr_t)console_ring.buffer);
235 assert((uintptr_t)buf < (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE);
236 if (buf != NULL)
237 *(uint32_t *)buf = CPU_BUF_FREE_HEX;
238}
239
240static inline int
241console_ring_space(void)
242{
243 return console_ring.len - console_ring.used;
244}
245
246static boolean_t
247console_ring_put(char ch)
248{
249 if (console_ring.used < console_ring.len) {
250 console_ring.used++;
251 *console_ring.write_ptr++ = ch;
252 if (console_ring.write_ptr - console_ring.buffer == console_ring.len)
253 console_ring.write_ptr = console_ring.buffer;
254 return TRUE;
255 } else {
256 return FALSE;
257 }
258}
259
260static inline boolean_t
261cpu_buffer_put(console_buf_t * cbp, char ch)
262{
263 if (ch != '\0' && cbp->buf_ptr < cbp->buf_end) {
264 *(cbp->buf_ptr++) = ch;
265 return TRUE;
266 } else {
267 return FALSE;
268 }
269}
270
271static inline int
272cpu_buffer_size(console_buf_t * cbp)
273{
274 return (int)(cbp->buf_ptr - cbp->buf_base);
275}
276
277static inline void
278_cnputs(char * c, int size)
279{
280 /* The console device output routines are assumed to be
281 * non-reentrant.
282 */
283#ifdef __x86_64__
284 uint32_t lock_timeout_ticks = UINT32_MAX;
285#else
286 uint32_t lock_timeout_ticks = LockTimeOut;
287#endif
288
289 mp_disable_preemption();
290 if (!hw_lock_to(&cnputc_lock, lock_timeout_ticks)) {
291 /* If we timed out on the lock, and we're in the debugger,
292 * copy lock data for debugging and break the lock.
293 */
294 hw_lock_data_t _shadow_lock;
295 memcpy(&_shadow_lock, &cnputc_lock, sizeof(cnputc_lock));
296 if (kernel_debugger_entry_count) {
297 /* Since hw_lock_to takes a pre-emption count...*/
298 mp_enable_preemption();
299 hw_lock_init(&cnputc_lock);
300 hw_lock_lock(&cnputc_lock);
301 } else {
302 panic("Lock acquire timeout in _cnputs() lock=%p, lock owner thread=0x%lx, current_thread: %p\n", &_shadow_lock,
303 _shadow_lock.lock_data, current_thread());
304 }
305 }
306
307 while (size-- > 0) {
308 cons_ops[cons_ops_index].putc(0, 0, *c);
309 if (*c == '\n')
310 cons_ops[cons_ops_index].putc(0, 0, '\r');
311 c++;
312 }
313
314 hw_lock_unlock(&cnputc_lock);
315 mp_enable_preemption();
316}
317
318void
319cnputc_unbuffered(char c)
320{
321 _cnputs(&c, 1);
322}
323
324
325void cnputcusr(char c)
326{
327 cnputsusr(&c, 1);
328}
329
330void
331cnputsusr(char *s, int size)
332{
333
334 if (size > 1) {
335 console_write(s, size);
336 return;
337 }
338
339 boolean_t state;
340
341 /* Spin (with pre-emption enabled) waiting for console_ring_try_empty()
342 * to complete output. There is a small window here where we could
343 * end up with a stale value of console_output, but it's unlikely,
344 * and _cnputs(), which outputs to the console device, is internally
345 * synchronized. There's something of a conflict between the
346 * character-at-a-time (with pre-emption enabled) unbuffered
347 * output model here, and the buffered output from cnputc(),
348 * whose consumers include printf() ( which outputs a sequence
349 * with pre-emption disabled, and should be safe to call with
350 * interrupts off); we don't want to disable pre-emption indefinitely
351 * here, and spinlocks and mutexes are inappropriate.
352 */
353 while (console_output != 0) {
354 delay(1);
355 }
356
357 /*
358 * We disable interrupts to avoid issues caused by rendevous IPIs
359 * and an interruptible core holding the lock while an uninterruptible
360 * core wants it. Stackshot is the prime example of this.
361 */
362 state = ml_set_interrupts_enabled(FALSE);
363 _cnputs(s, 1);
364 console_restore_interrupts_state(state);
365}
366
367static void
368console_ring_try_empty(void)
369{
370#ifdef __x86_64__
371 boolean_t handle_tlb_flushes = (ml_get_interrupts_enabled() == FALSE);
372#endif /* __x86_64__ */
373
374 int nchars_out = 0;
375 int total_chars_out = 0;
376 int size_before_wrap = 0;
377
378 do {
379#ifdef __x86_64__
380 if (handle_tlb_flushes)
381 handle_pending_TLB_flushes();
382#endif /* __x86_64__ */
383
384 /*
385 * Try to get the read lock on the ring buffer to empty it.
386 * If this fails someone else is already emptying...
387 */
388 if (!simple_lock_try(&console_ring.read_lock)) {
389 /*
390 * If multiple cores are spinning trying to empty the buffer,
391 * we may suffer lock starvation (get the read lock, but
392 * never the write lock, with other cores unable to get the
393 * read lock). As a result, insert a delay on failure, to
394 * let other cores have a turn.
395 */
396 delay(1);
397 return;
398 }
399
400 boolean_t state = ml_set_interrupts_enabled(FALSE);
401
402 /* Indicate that we're in the process of writing a block of data to the console. */
403 (void)hw_atomic_add(&console_output, 1);
404
405 simple_lock_try_lock_loop(&console_ring.write_lock);
406
407 /* try small chunk at a time, so we allow writes from other cpus into the buffer */
408 nchars_out = MIN(console_ring.used, MAX_INT_DISABLED_FLUSH_SIZE);
409
410 /* account for data to be read before wrap around */
411 size_before_wrap = (int)((console_ring.buffer + console_ring.len) - console_ring.read_ptr);
412 if (nchars_out > size_before_wrap)
413 nchars_out = size_before_wrap;
414
415 if (nchars_out > 0) {
416 _cnputs(console_ring.read_ptr, nchars_out);
417 console_ring.read_ptr =
418 console_ring.buffer + ((console_ring.read_ptr - console_ring.buffer + nchars_out) % console_ring.len);
419 console_ring.used -= nchars_out;
420 total_chars_out += nchars_out;
421 }
422
423 simple_unlock(&console_ring.write_lock);
424
425 (void)hw_atomic_sub(&console_output, 1);
426
427 simple_unlock(&console_ring.read_lock);
428
429 console_restore_interrupts_state(state);
430
431 /*
432 * In case we end up being the console drain thread
433 * for far too long, break out. Except in panic/suspend cases
434 * where we should clear out full buffer.
435 */
436 if (!kernel_debugger_entry_count && !console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE))
437 break;
438
439 } while (nchars_out > 0);
440}
441
442
443void
444console_suspend()
445{
446 console_suspended = true;
447 console_ring_try_empty();
448}
449
450void
451console_resume()
452{
453 console_suspended = false;
454}
455
456void
457console_write(char * str, int size)
458{
459 console_init();
460 int chunk_size = size;
461 int i = 0;
462
463 if (size > console_ring.len)
464 chunk_size = CPU_CONS_BUF_SIZE;
465
466 while (size > 0) {
467 boolean_t state = ml_set_interrupts_enabled(FALSE);
468
469 simple_lock_try_lock_loop(&console_ring.write_lock);
470 while (chunk_size > console_ring_space()) {
471 simple_unlock(&console_ring.write_lock);
472 console_restore_interrupts_state(state);
473
474 console_ring_try_empty();
475
476 state = ml_set_interrupts_enabled(FALSE);
477 simple_lock_try_lock_loop(&console_ring.write_lock);
478 }
479
480 for (i = 0; i < chunk_size; i++)
481 console_ring_put(str[i]);
482
483 str = &str[i];
484 size -= chunk_size;
485 simple_unlock(&console_ring.write_lock);
486 console_restore_interrupts_state(state);
487 }
488
489 console_ring_try_empty();
490}
491
492void
493cnputc(char c)
494{
495 console_buf_t * cbp;
496 cpu_data_t * cpu_data_p;
497 boolean_t state;
498 boolean_t needs_print = TRUE;
499 char * cp;
500
501restart:
502 mp_disable_preemption();
503 cpu_data_p = current_cpu_datap();
504 cbp = (console_buf_t *)cpu_data_p->cpu_console_buf;
505 if (console_suspended || cbp == NULL) {
506 mp_enable_preemption();
507 /* Put directly if console ring is not initialized or we're heading into suspend */
508 _cnputs(&c, 1);
509 return;
510 }
511
512#ifndef __x86_64__
513 /* Is there a panic backtrace going on? */
514 if (cpu_data_p->PAB_active) {
515 /* If another processor was in the process of emptying the
516 * console ring buffer when it received the panic backtrace
517 * signal, that processor will be spinning in DebugXCall()
518 * waiting for the panicking processor to finish printing
519 * the backtrace. But panicking processor will never
520 * be able to obtain the ring buffer lock since it is
521 * owned by a processor that's spinning in DebugXCall().
522 * Blow away any locks that other processors may have on
523 * the console ring buffer so that the backtrace can
524 * complete.
525 */
526 console_ring_lock_init();
527 }
528#endif /* __x86_64__ */
529
530 state = ml_set_interrupts_enabled(FALSE);
531
532 /*
533 * add to stack buf
534 * If the cpu buffer is full, we'll flush, then try
535 * another put. If it fails a second time... screw
536 * it.
537 */
538 if (needs_print && !cpu_buffer_put(cbp, c)) {
539 simple_lock_try_lock_loop(&console_ring.write_lock);
540
541 if (cpu_buffer_size(cbp) > console_ring_space()) {
542 simple_unlock(&console_ring.write_lock);
543 console_restore_interrupts_state(state);
544 mp_enable_preemption();
545
546 console_ring_try_empty();
547 goto restart;
548 }
549
550 for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++)
551 console_ring_put(*cp);
552 cbp->buf_ptr = cbp->buf_base;
553 simple_unlock(&console_ring.write_lock);
554
555 cpu_buffer_put(cbp, c);
556 }
557
558 needs_print = FALSE;
559
560 if (c != '\n') {
561 console_restore_interrupts_state(state);
562 mp_enable_preemption();
563 return;
564 }
565
566 /* We printed a newline, time to flush the CPU buffer to the global buffer */
567 simple_lock_try_lock_loop(&console_ring.write_lock);
568
569 /*
570 * Is there enough space in the shared ring buffer?
571 * Try to empty if not.
572 * Note, we want the entire local buffer to fit to
573 * avoid another cpu interjecting.
574 */
575
576 if (cpu_buffer_size(cbp) > console_ring_space()) {
577 simple_unlock(&console_ring.write_lock);
578 console_restore_interrupts_state(state);
579 mp_enable_preemption();
580
581 console_ring_try_empty();
582
583 goto restart;
584 }
585
586 for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++)
587 console_ring_put(*cp);
588
589 cbp->buf_ptr = cbp->buf_base;
590 simple_unlock(&console_ring.write_lock);
591
592 console_restore_interrupts_state(state);
593 mp_enable_preemption();
594
595 console_ring_try_empty();
596
597 return;
598}
599
600int
601_serial_getc(__unused int a, __unused int b, boolean_t wait, __unused boolean_t raw)
602{
603 int c;
604 do {
605 c = serial_getc();
606 } while (wait && c < 0);
607
608#if defined(__x86_64__) || defined(__arm__)
609 // Check for the NMI string
610 if (c == nmi_string[nmi_counter]) {
611 nmi_counter++;
612 if (nmi_counter == NMI_STRING_SIZE) {
613 // We've got the NMI string, now do an NMI
614 Debugger("Automatic NMI");
615 nmi_counter = 0;
616 return '\n';
617 }
618 } else if (c != -1) {
619 nmi_counter = 0;
620 }
621#endif
622
623 return c;
624}
625
626static void
627_serial_putc(__unused int a, __unused int b, int c)
628{
629 serial_putc(c);
630}
631
632int
633cngetc(void)
634{
635 return cons_ops[cons_ops_index].getc(0, 0, TRUE, FALSE);
636}
637
638int
639cnmaygetc(void)
640{
641 return cons_ops[cons_ops_index].getc(0, 0, FALSE, FALSE);
642}
643
644int
645vcgetc(__unused int l, __unused int u, __unused boolean_t wait, __unused boolean_t raw)
646{
647 char c;
648
649 if (0 == (*PE_poll_input)(0, &c))
650 return c;
651 else
652 return 0;
653}
654
655#ifdef CONFIG_XNUPOST
656static uint32_t cons_test_ops_count = 0;
657
658/*
659 * Try to do multiple cpu buffer allocs and free and intentionally
660 * allow for pre-emption.
661 */
662static void
663alloc_free_func(void * arg, wait_result_t wres __unused)
664{
665 console_buf_t * cbp = NULL;
666 int count = (int)arg;
667
668 T_LOG("Doing %d iterations of console cpu alloc and free.", count);
669
670 while (count-- > 0) {
671 (void)hw_atomic_add(&cons_test_ops_count, 1);
672 cbp = (console_buf_t *)console_cpu_alloc(0);
673 if (cbp == NULL) {
674 T_ASSERT_NOTNULL(cbp, "cpu allocation failed");
675 }
676 console_cpu_free(cbp);
677 cbp = NULL;
678 /* give chance to another thread to come in */
679 delay(10);
680 }
681}
682
683/*
684 * Log to console by multiple methods - printf, unbuffered write, console_write()
685 */
686static void
687log_to_console_func(void * arg __unused, wait_result_t wres __unused)
688{
689 uint64_t thread_id = current_thread()->thread_id;
690 char somedata[10] = "123456789";
691 for (int i = 0; i < 26; i++) {
692 (void)hw_atomic_add(&cons_test_ops_count, 1);
693 printf(" thid: %llu printf iteration %d\n", thread_id, i);
694 cnputc_unbuffered((char)('A' + i));
695 cnputc_unbuffered('\n');
696 console_write((char *)somedata, sizeof(somedata));
697 delay(10);
698 }
699 printf("finished the log_to_console_func operations\n\n");
700}
701
702kern_return_t
703console_serial_parallel_log_tests(void)
704{
705 thread_t thread;
706 kern_return_t kr;
707 cons_test_ops_count = 0;
708
709 kr = kernel_thread_start(log_to_console_func, NULL, &thread);
710 T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kernel_thread_start returned successfully");
711
712 delay(100);
713
714 log_to_console_func(NULL, 0);
715
716 /* wait until other thread has also finished */
717 while (cons_test_ops_count < 52) {
718 delay(1000);
719 }
720
721 thread_deallocate(thread);
722 T_LOG("parallel_logging tests is now complete. From this point forward we expect full lines\n");
723 return KERN_SUCCESS;
724}
725
726kern_return_t
727console_serial_alloc_rel_tests(void)
728{
729 unsigned long i, free_buf_count = 0;
730 uint32_t * p;
731 console_buf_t * cbp;
732 thread_t thread;
733 kern_return_t kr;
734
735 T_LOG("doing alloc/release tests");
736
737 for (i = 0; i < MAX_CPU_SLOTS; i++) {
738 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
739 cbp = (console_buf_t *)(void *)p;
740 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
741 T_ASSERT(*p == CPU_BUF_FREE_HEX || cbp->buf_base == &cbp->buf[0], "");
742 if (*p == CPU_BUF_FREE_HEX) {
743 free_buf_count++;
744 }
745 }
746
747 T_ASSERT_GE_ULONG(free_buf_count, 2, "At least 2 buffers should be free");
748 cons_test_ops_count = 0;
749
750 kr = kernel_thread_start(alloc_free_func, (void *)1000, &thread);
751 T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kernel_thread_start returned successfully");
752
753 /* yeild cpu to give other thread chance to get on-core */
754 delay(100);
755
756 alloc_free_func((void *)1000, 0);
757
758 /* wait until other thread finishes its tasks */
759 while (cons_test_ops_count < 2000) {
760 delay(1000);
761 }
762
763 thread_deallocate(thread);
764 /* verify again that atleast 2 slots are free */
765 free_buf_count = 0;
766 for (i = 0; i < MAX_CPU_SLOTS; i++) {
767 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
768 cbp = (console_buf_t *)(void *)p;
769 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
770 T_ASSERT(*p == CPU_BUF_FREE_HEX || cbp->buf_base == &cbp->buf[0], "");
771 if (*p == CPU_BUF_FREE_HEX) {
772 free_buf_count++;
773 }
774 }
775 T_ASSERT_GE_ULONG(free_buf_count, 2, "At least 2 buffers should be free after alloc free tests");
776
777 return KERN_SUCCESS;
778}
779
780kern_return_t
781console_serial_test(void)
782{
783 unsigned long i;
784 char buffer[CPU_BUFFER_LEN];
785 uint32_t * p;
786 console_buf_t * cbp;
787
788 T_LOG("Checking console_ring status.");
789 T_ASSERT_EQ_INT(console_ring.len, KERN_CONSOLE_RING_SIZE, "Console ring size is not correct.");
790 T_ASSERT_GT_INT(KERN_CONSOLE_BUF_SIZE, KERN_CONSOLE_RING_SIZE, "kernel console buffer size is < allocation.");
791
792 /* select the next slot from the per cpu buffers at end of console_ring.buffer */
793 for (i = 0; i < MAX_CPU_SLOTS; i++) {
794 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
795 cbp = (console_buf_t *)(void *)p;
796 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
797 T_ASSERT(*p == CPU_BUF_FREE_HEX || cbp->buf_base == &cbp->buf[0], "verified initialization of cpu buffers p=%p", (void *)p);
798 }
799
800 /* setup buffer to be chars */
801 for (i = 0; i < CPU_BUFFER_LEN; i++) {
802 buffer[i] = (char)('0' + (i % 10));
803 }
804 buffer[CPU_BUFFER_LEN - 1] = '\0';
805
806 T_LOG("Printing %d char string to serial one char at a time.", CPU_BUFFER_LEN);
807 for (i = 0; i < CPU_BUFFER_LEN; i++) {
808 printf("%c", buffer[i]);
809 }
810 printf("End\n");
811 T_LOG("Printing %d char string to serial as a whole", CPU_BUFFER_LEN);
812 printf("%s\n", buffer);
813
814 T_LOG("Using console_write call repeatedly for 100 iterations");
815 for (i = 0; i < 100; i++) {
816 console_write(&buffer[0], 14);
817 if ((i % 6) == 0)
818 printf("\n");
819 }
820 printf("\n");
821
822 T_LOG("Using T_LOG to print buffer %s", buffer);
823 return KERN_SUCCESS;
824}
825#endif
826