1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: kern/machine.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1987
62 *
63 * Support for machine independent machine abstraction.
64 */
65
66#include <string.h>
67
68#include <mach/mach_types.h>
69#include <mach/boolean.h>
70#include <mach/kern_return.h>
71#include <mach/machine.h>
72#include <mach/host_info.h>
73#include <mach/host_reboot.h>
74#include <mach/host_priv_server.h>
75#include <mach/processor_server.h>
76
77#include <kern/kern_types.h>
78#include <kern/counters.h>
79#include <kern/cpu_data.h>
80#include <kern/cpu_quiesce.h>
81#include <kern/ipc_host.h>
82#include <kern/host.h>
83#include <kern/machine.h>
84#include <kern/misc_protos.h>
85#include <kern/processor.h>
86#include <kern/queue.h>
87#include <kern/sched.h>
88#include <kern/task.h>
89#include <kern/thread.h>
90
91#include <machine/commpage.h>
92
93#if HIBERNATION
94#include <IOKit/IOHibernatePrivate.h>
95#endif
96#include <IOKit/IOPlatformExpert.h>
97
98#if CONFIG_DTRACE
99extern void (*dtrace_cpu_state_changed_hook)(int, boolean_t);
100#endif
101
102/*
103 * Exported variables:
104 */
105
106struct machine_info machine_info;
107
108/* Forwards */
109void processor_doshutdown(
110 processor_t processor);
111
112/*
113 * processor_up:
114 *
115 * Flag processor as up and running, and available
116 * for scheduling.
117 */
118void
119processor_up(
120 processor_t processor)
121{
122 processor_set_t pset;
123 spl_t s;
124
125 s = splsched();
126 init_ast_check(processor);
127 pset = processor->processor_set;
128 pset_lock(pset);
129 ++pset->online_processor_count;
130 pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
131 (void)hw_atomic_add(&processor_avail_count, 1);
132 commpage_update_active_cpus();
133 pset_unlock(pset);
134 ml_cpu_up();
135 splx(s);
136
137#if CONFIG_DTRACE
138 if (dtrace_cpu_state_changed_hook)
139 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE);
140#endif
141}
142#include <atm/atm_internal.h>
143
144kern_return_t
145host_reboot(
146 host_priv_t host_priv,
147 int options)
148{
149 if (host_priv == HOST_PRIV_NULL)
150 return (KERN_INVALID_HOST);
151
152 assert(host_priv == &realhost);
153
154#if DEVELOPMENT || DEBUG
155 if (options & HOST_REBOOT_DEBUGGER) {
156 Debugger("Debugger");
157 return (KERN_SUCCESS);
158 }
159#endif
160
161 if (options & HOST_REBOOT_UPSDELAY) {
162 // UPS power cutoff path
163 PEHaltRestart( kPEUPSDelayHaltCPU );
164 } else {
165 halt_all_cpus(!(options & HOST_REBOOT_HALT));
166 }
167
168 return (KERN_SUCCESS);
169}
170
171kern_return_t
172processor_assign(
173 __unused processor_t processor,
174 __unused processor_set_t new_pset,
175 __unused boolean_t wait)
176{
177 return (KERN_FAILURE);
178}
179
180kern_return_t
181processor_shutdown(
182 processor_t processor)
183{
184 processor_set_t pset;
185 spl_t s;
186
187 s = splsched();
188 pset = processor->processor_set;
189 pset_lock(pset);
190 if (processor->state == PROCESSOR_OFF_LINE) {
191 /*
192 * Success if already shutdown.
193 */
194 pset_unlock(pset);
195 splx(s);
196
197 return (KERN_SUCCESS);
198 }
199
200 if (processor->state == PROCESSOR_START) {
201 /*
202 * Failure if currently being started.
203 */
204 pset_unlock(pset);
205 splx(s);
206
207 return (KERN_FAILURE);
208 }
209
210 /*
211 * If the processor is dispatching, let it finish.
212 */
213 while (processor->state == PROCESSOR_DISPATCHING) {
214 pset_unlock(pset);
215 splx(s);
216 delay(1);
217 s = splsched();
218 pset_lock(pset);
219 }
220
221 /*
222 * Success if already being shutdown.
223 */
224 if (processor->state == PROCESSOR_SHUTDOWN) {
225 pset_unlock(pset);
226 splx(s);
227
228 return (KERN_SUCCESS);
229 }
230
231 pset_update_processor_state(pset, processor, PROCESSOR_SHUTDOWN);
232
233 pset_unlock(pset);
234
235 processor_doshutdown(processor);
236 splx(s);
237
238 cpu_exit_wait(processor->cpu_id);
239
240 return (KERN_SUCCESS);
241}
242
243/*
244 * Called with interrupts disabled.
245 */
246void
247processor_doshutdown(
248 processor_t processor)
249{
250 thread_t old_thread, self = current_thread();
251 processor_t prev;
252 processor_set_t pset;
253
254 /*
255 * Get onto the processor to shutdown
256 */
257 prev = thread_bind(processor);
258 thread_block(THREAD_CONTINUE_NULL);
259
260 assert(processor->state == PROCESSOR_SHUTDOWN);
261
262#if CONFIG_DTRACE
263 if (dtrace_cpu_state_changed_hook)
264 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE);
265#endif
266
267 ml_cpu_down();
268
269#if HIBERNATION
270 if (processor_avail_count < 2) {
271 hibernate_vm_lock();
272 hibernate_vm_unlock();
273 }
274#endif
275
276 pset = processor->processor_set;
277 pset_lock(pset);
278 pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE);
279 --pset->online_processor_count;
280 (void)hw_atomic_sub(&processor_avail_count, 1);
281 commpage_update_active_cpus();
282 SCHED(processor_queue_shutdown)(processor);
283 /* pset lock dropped */
284 SCHED(rt_queue_shutdown)(processor);
285
286 /*
287 * Continue processor shutdown in shutdown context.
288 *
289 * We save the current context in machine_processor_shutdown in such a way
290 * that when this thread is next invoked it will return from here instead of
291 * from the machine_switch_context() in thread_invoke like a normal context switch.
292 *
293 * As such, 'old_thread' is neither the idle thread nor the current thread - it's whatever
294 * thread invoked back to this one. (Usually, it's another processor's idle thread.)
295 *
296 * TODO: Make this a real thread_run of the idle_thread, so we don't have to keep this in sync
297 * with thread_invoke.
298 */
299 thread_bind(prev);
300 old_thread = machine_processor_shutdown(self, processor_offline, processor);
301
302 thread_dispatch(old_thread, self);
303}
304
305/*
306 * Complete the shutdown and place the processor offline.
307 *
308 * Called at splsched in the shutdown context.
309 * This performs a minimal thread_invoke() to the idle thread,
310 * so it needs to be kept in sync with what thread_invoke() does.
311 *
312 * The onlining half of this is done in load_context().
313 */
314void
315processor_offline(
316 processor_t processor)
317{
318 assert(processor == current_processor());
319 assert(processor->active_thread == current_thread());
320
321 thread_t old_thread = processor->active_thread;
322 thread_t new_thread = processor->idle_thread;
323
324 if (!new_thread->kernel_stack) {
325 /* the idle thread has a reserved stack, so this will never fail */
326 if (!stack_alloc_try(new_thread))
327 panic("processor_offline");
328 }
329
330 processor->active_thread = new_thread;
331 processor_state_update_idle(processor);
332 processor->starting_pri = IDLEPRI;
333 processor->deadline = UINT64_MAX;
334 new_thread->last_processor = processor;
335
336 uint64_t ctime = mach_absolute_time();
337
338 processor->last_dispatch = ctime;
339 old_thread->last_run_time = ctime;
340
341 /* Update processor->thread_timer and ->kernel_timer to point to the new thread */
342 processor_timer_switch_thread(ctime, &new_thread->system_timer);
343 PROCESSOR_DATA(processor, kernel_timer) = &new_thread->system_timer;
344 timer_stop(PROCESSOR_DATA(processor, current_state), ctime);
345
346 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
347 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
348 old_thread->reason, (uintptr_t)thread_tid(new_thread),
349 old_thread->sched_pri, new_thread->sched_pri, 0);
350
351 machine_set_current_thread(new_thread);
352
353 thread_dispatch(old_thread, new_thread);
354
355 cpu_quiescent_counter_leave(processor->last_dispatch);
356
357 PMAP_DEACTIVATE_KERNEL(processor->cpu_id);
358
359 cpu_sleep();
360 panic("zombie processor");
361 /*NOTREACHED*/
362}
363
364kern_return_t
365host_get_boot_info(
366 host_priv_t host_priv,
367 kernel_boot_info_t boot_info)
368{
369 const char *src = "";
370 if (host_priv == HOST_PRIV_NULL)
371 return (KERN_INVALID_HOST);
372
373 assert(host_priv == &realhost);
374
375 /*
376 * Copy first operator string terminated by '\0' followed by
377 * standardized strings generated from boot string.
378 */
379 src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX);
380 if (src != boot_info)
381 (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX);
382
383 return (KERN_SUCCESS);
384}
385
386#if CONFIG_DTRACE
387#include <mach/sdt.h>
388#endif
389
390unsigned long long ml_io_read(uintptr_t vaddr, int size) {
391 unsigned long long result = 0;
392 unsigned char s1;
393 unsigned short s2;
394
395#if defined(__x86_64__)
396 uint64_t sabs, eabs;
397 boolean_t istate, timeread = FALSE;
398#if DEVELOPMENT || DEBUG
399 pmap_verify_noncacheable(vaddr);
400#endif /* x86_64 DEVELOPMENT || DEBUG */
401 if (__improbable(reportphyreaddelayabs != 0)) {
402 istate = ml_set_interrupts_enabled(FALSE);
403 sabs = mach_absolute_time();
404 timeread = TRUE;
405 }
406#endif /* x86_64 */
407
408 switch (size) {
409 case 1:
410 s1 = *(volatile unsigned char *)vaddr;
411 result = s1;
412 break;
413 case 2:
414 s2 = *(volatile unsigned short *)vaddr;
415 result = s2;
416 break;
417 case 4:
418 result = *(volatile unsigned int *)vaddr;
419 break;
420 case 8:
421 result = *(volatile unsigned long long *)vaddr;
422 break;
423 default:
424 panic("Invalid size %d for ml_io_read(%p)\n", size, (void *)vaddr);
425 break;
426 }
427
428#if defined(__x86_64__)
429 if (__improbable(timeread == TRUE)) {
430 eabs = mach_absolute_time();
431 (void)ml_set_interrupts_enabled(istate);
432
433 if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
434 if (phyreadpanic && (machine_timeout_suspended() == FALSE)) {
435 panic("Read from IO virtual addr 0x%lx took %llu ns, result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", vaddr, (eabs - sabs), result, sabs, eabs, reportphyreaddelayabs);
436 }
437#if CONFIG_DTRACE
438 DTRACE_PHYSLAT3(physread, uint64_t, (eabs - sabs),
439 uint64_t, vaddr, uint32_t, size);
440#endif /* CONFIG_DTRACE */
441 }
442 }
443#endif /* x86_64 */
444 return result;
445}
446
447unsigned int ml_io_read8(uintptr_t vaddr) {
448 return (unsigned) ml_io_read(vaddr, 1);
449}
450
451unsigned int ml_io_read16(uintptr_t vaddr) {
452 return (unsigned) ml_io_read(vaddr, 2);
453}
454
455unsigned int ml_io_read32(uintptr_t vaddr) {
456 return (unsigned) ml_io_read(vaddr, 4);
457}
458
459unsigned long long ml_io_read64(uintptr_t vaddr) {
460 return ml_io_read(vaddr, 8);
461}
462