1 | /* |
2 | * Copyright (c) 2000-2017 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | |
57 | #include <kern/ast.h> |
58 | #include <kern/counters.h> |
59 | #include <kern/cpu_quiesce.h> |
60 | #include <kern/misc_protos.h> |
61 | #include <kern/queue.h> |
62 | #include <kern/sched_prim.h> |
63 | #include <kern/thread.h> |
64 | #include <kern/processor.h> |
65 | #include <kern/spl.h> |
66 | #include <kern/sfi.h> |
67 | #if CONFIG_TELEMETRY |
68 | #include <kern/telemetry.h> |
69 | #endif |
70 | #include <kern/waitq.h> |
71 | #include <kern/ledger.h> |
72 | #include <kern/machine.h> |
73 | #include <kperf/kperf_kpc.h> |
74 | #include <mach/policy.h> |
75 | #include <security/mac_mach_internal.h> // for MACF AST hook |
76 | #include <stdatomic.h> |
77 | |
78 | static void __attribute__((noinline, noreturn, disable_tail_calls)) |
79 | thread_preempted(__unused void* parameter, __unused wait_result_t result) |
80 | { |
81 | /* |
82 | * We've been scheduled again after a userspace preemption, |
83 | * try again to return to userspace. |
84 | */ |
85 | thread_exception_return(); |
86 | } |
87 | |
88 | /* |
89 | * AST_URGENT was detected while in kernel mode |
90 | * Called with interrupts disabled, returns the same way |
91 | * Must return to caller |
92 | */ |
93 | void |
94 | ast_taken_kernel(void) |
95 | { |
96 | assert(ml_get_interrupts_enabled() == FALSE); |
97 | |
98 | thread_t thread = current_thread(); |
99 | |
100 | /* Idle threads handle preemption themselves */ |
101 | if ((thread->state & TH_IDLE)) { |
102 | ast_off(AST_PREEMPTION); |
103 | return; |
104 | } |
105 | |
106 | /* |
107 | * It's possible for this to be called after AST_URGENT |
108 | * has already been handled, due to races in enable_preemption |
109 | */ |
110 | if (ast_peek(AST_URGENT) != AST_URGENT) |
111 | return; |
112 | |
113 | /* |
114 | * Don't preempt if the thread is already preparing to block. |
115 | * TODO: the thread can cheese this with clear_wait() |
116 | */ |
117 | if (waitq_wait_possible(thread) == FALSE) { |
118 | /* Consume AST_URGENT or the interrupt will call us again */ |
119 | ast_consume(AST_URGENT); |
120 | return; |
121 | } |
122 | |
123 | /* TODO: Should we csw_check again to notice if conditions have changed? */ |
124 | |
125 | ast_t urgent_reason = ast_consume(AST_PREEMPTION); |
126 | |
127 | assert(urgent_reason & AST_PREEMPT); |
128 | |
129 | counter(c_ast_taken_block++); |
130 | |
131 | thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason); |
132 | |
133 | assert(ml_get_interrupts_enabled() == FALSE); |
134 | } |
135 | |
136 | /* |
137 | * An AST flag was set while returning to user mode |
138 | * Called with interrupts disabled, returns with interrupts enabled |
139 | * May call continuation instead of returning |
140 | */ |
141 | void |
142 | ast_taken_user(void) |
143 | { |
144 | assert(ml_get_interrupts_enabled() == FALSE); |
145 | |
146 | thread_t thread = current_thread(); |
147 | |
148 | /* We are about to return to userspace, there must not be a pending wait */ |
149 | assert(waitq_wait_possible(thread)); |
150 | assert((thread->state & TH_IDLE) == 0); |
151 | |
152 | /* TODO: Add more 'return to userspace' assertions here */ |
153 | |
154 | /* |
155 | * If this thread was urgently preempted in userspace, |
156 | * take the preemption before processing the ASTs. |
157 | * The trap handler will call us again if we have more ASTs, so it's |
158 | * safe to block in a continuation here. |
159 | */ |
160 | if (ast_peek(AST_URGENT) == AST_URGENT) { |
161 | ast_t urgent_reason = ast_consume(AST_PREEMPTION); |
162 | |
163 | assert(urgent_reason & AST_PREEMPT); |
164 | |
165 | /* TODO: Should we csw_check again to notice if conditions have changed? */ |
166 | |
167 | thread_block_reason(thread_preempted, NULL, urgent_reason); |
168 | /* NOTREACHED */ |
169 | } |
170 | |
171 | /* |
172 | * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel |
173 | * on a different processor. Only the ast bit on the thread will be set. |
174 | * |
175 | * Force a propagate for concurrent updates without an IPI. |
176 | */ |
177 | ast_propagate(thread); |
178 | |
179 | /* |
180 | * Consume all non-preemption processor ASTs matching reasons |
181 | * because we're handling them here. |
182 | * |
183 | * If one of the AST handlers blocks in a continuation, |
184 | * we'll reinstate the unserviced thread-level AST flags |
185 | * from the thread to the processor on context switch. |
186 | * If one of the AST handlers sets another AST, |
187 | * the trap handler will call ast_taken_user again. |
188 | * |
189 | * We expect the AST handlers not to thread_exception_return |
190 | * without an ast_propagate or context switch to reinstate |
191 | * the per-processor ASTs. |
192 | * |
193 | * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs? |
194 | */ |
195 | ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE); |
196 | |
197 | ml_set_interrupts_enabled(TRUE); |
198 | |
199 | #if CONFIG_DTRACE |
200 | if (reasons & AST_DTRACE) { |
201 | dtrace_ast(); |
202 | } |
203 | #endif |
204 | |
205 | #ifdef MACH_BSD |
206 | if (reasons & AST_BSD) { |
207 | thread_ast_clear(thread, AST_BSD); |
208 | bsd_ast(thread); |
209 | } |
210 | #endif |
211 | |
212 | #if CONFIG_MACF |
213 | if (reasons & AST_MACF) { |
214 | thread_ast_clear(thread, AST_MACF); |
215 | mac_thread_userret(thread); |
216 | } |
217 | #endif |
218 | |
219 | if (reasons & AST_APC) { |
220 | thread_ast_clear(thread, AST_APC); |
221 | thread_apc_ast(thread); |
222 | } |
223 | |
224 | if (reasons & AST_GUARD) { |
225 | thread_ast_clear(thread, AST_GUARD); |
226 | guard_ast(thread); |
227 | } |
228 | |
229 | if (reasons & AST_LEDGER) { |
230 | thread_ast_clear(thread, AST_LEDGER); |
231 | ledger_ast(thread); |
232 | } |
233 | |
234 | if (reasons & AST_KPERF) { |
235 | thread_ast_clear(thread, AST_KPERF); |
236 | kperf_kpc_thread_ast(thread); |
237 | } |
238 | |
239 | if (reasons & AST_KEVENT) { |
240 | thread_ast_clear(thread, AST_KEVENT); |
241 | uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0); |
242 | if (bits) kevent_ast(thread, bits); |
243 | } |
244 | |
245 | #if CONFIG_TELEMETRY |
246 | if (reasons & AST_TELEMETRY_ALL) { |
247 | ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL; |
248 | thread_ast_clear(thread, AST_TELEMETRY_ALL); |
249 | telemetry_ast(thread, telemetry_reasons); |
250 | } |
251 | #endif |
252 | |
253 | spl_t s = splsched(); |
254 | |
255 | #if CONFIG_SCHED_SFI |
256 | /* |
257 | * SFI is currently a per-processor AST, not a per-thread AST |
258 | * TODO: SFI should be a per-thread AST |
259 | */ |
260 | if (ast_consume(AST_SFI) == AST_SFI) { |
261 | sfi_ast(thread); |
262 | } |
263 | #endif |
264 | |
265 | /* We are about to return to userspace, there must not be a pending wait */ |
266 | assert(waitq_wait_possible(thread)); |
267 | |
268 | /* |
269 | * We've handled all per-thread ASTs, time to handle non-urgent preemption. |
270 | * |
271 | * We delay reading the preemption bits until now in case the thread |
272 | * blocks while handling per-thread ASTs. |
273 | * |
274 | * If one of the AST handlers had managed to set a new AST bit, |
275 | * thread_exception_return will call ast_taken again. |
276 | */ |
277 | ast_t preemption_reasons = ast_consume(AST_PREEMPTION); |
278 | |
279 | if (preemption_reasons & AST_PREEMPT) { |
280 | /* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */ |
281 | |
282 | thread_lock(thread); |
283 | preemption_reasons = csw_check(current_processor(), (preemption_reasons & AST_QUANTUM)); |
284 | thread_unlock(thread); |
285 | |
286 | #if CONFIG_SCHED_SFI |
287 | /* csw_check might tell us that SFI is needed */ |
288 | if (preemption_reasons & AST_SFI) { |
289 | sfi_ast(thread); |
290 | } |
291 | #endif |
292 | |
293 | if (preemption_reasons & AST_PREEMPT) { |
294 | counter(c_ast_taken_block++); |
295 | /* switching to a continuation implicitly re-enables interrupts */ |
296 | thread_block_reason(thread_preempted, NULL, preemption_reasons); |
297 | /* NOTREACHED */ |
298 | } |
299 | } |
300 | |
301 | if (ast_consume(AST_UNQUIESCE) == AST_UNQUIESCE) { |
302 | cpu_quiescent_counter_ast(); |
303 | } |
304 | |
305 | cpu_quiescent_counter_assert_ast(); |
306 | |
307 | splx(s); |
308 | |
309 | /* |
310 | * Here's a good place to put assertions of things which must be true |
311 | * upon return to userspace. |
312 | */ |
313 | assert((thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) == 0); |
314 | assert((thread->sched_flags & TH_SFLAG_RW_PROMOTED) == 0); |
315 | assert((thread->sched_flags & TH_SFLAG_EXEC_PROMOTED) == 0); |
316 | assert((thread->sched_flags & TH_SFLAG_PROMOTED) == 0); |
317 | assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0); |
318 | |
319 | assert(thread->promotions == 0); |
320 | assert(thread->was_promoted_on_wakeup == 0); |
321 | assert(thread->waiting_for_mutex == NULL); |
322 | assert(thread->rwlock_count == 0); |
323 | } |
324 | |
325 | /* |
326 | * Handle preemption IPI or IPI in response to setting an AST flag |
327 | * Triggered by cause_ast_check |
328 | * Called at splsched |
329 | */ |
330 | void |
331 | ast_check(processor_t processor) |
332 | { |
333 | if (processor->state != PROCESSOR_RUNNING && |
334 | processor->state != PROCESSOR_SHUTDOWN) |
335 | return; |
336 | |
337 | thread_t thread = processor->active_thread; |
338 | |
339 | assert(thread == current_thread()); |
340 | |
341 | thread_lock(thread); |
342 | |
343 | /* |
344 | * Propagate thread ast to processor. |
345 | * (handles IPI in response to setting AST flag) |
346 | */ |
347 | ast_propagate(thread); |
348 | |
349 | boolean_t needs_callout = false; |
350 | processor->current_pri = thread->sched_pri; |
351 | processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread); |
352 | processor->current_recommended_pset_type = recommended_pset_type(thread); |
353 | perfcontrol_class_t thread_class = thread_get_perfcontrol_class(thread); |
354 | if (thread_class != processor->current_perfctl_class) { |
355 | /* We updated the perfctl class of this thread from another core. |
356 | * Since we dont do CLPC callouts from another core, do a callout |
357 | * here to let CLPC know that the currently running thread has a new |
358 | * class. |
359 | */ |
360 | needs_callout = true; |
361 | } |
362 | processor->current_perfctl_class = thread_class; |
363 | |
364 | ast_t preempt; |
365 | |
366 | if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE) |
367 | ast_on(preempt); |
368 | |
369 | thread_unlock(thread); |
370 | |
371 | if (needs_callout) { |
372 | machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE, |
373 | mach_approximate_time(), 0, thread); |
374 | } |
375 | } |
376 | |
377 | /* |
378 | * Set AST flags on current processor |
379 | * Called at splsched |
380 | */ |
381 | void |
382 | ast_on(ast_t reasons) |
383 | { |
384 | ast_t *pending_ast = ast_pending(); |
385 | |
386 | *pending_ast |= reasons; |
387 | } |
388 | |
389 | /* |
390 | * Clear AST flags on current processor |
391 | * Called at splsched |
392 | */ |
393 | void |
394 | ast_off(ast_t reasons) |
395 | { |
396 | ast_t *pending_ast = ast_pending(); |
397 | |
398 | *pending_ast &= ~reasons; |
399 | } |
400 | |
401 | /* |
402 | * Consume the requested subset of the AST flags set on the processor |
403 | * Return the bits that were set |
404 | * Called at splsched |
405 | */ |
406 | ast_t |
407 | ast_consume(ast_t reasons) |
408 | { |
409 | ast_t *pending_ast = ast_pending(); |
410 | |
411 | reasons &= *pending_ast; |
412 | *pending_ast &= ~reasons; |
413 | |
414 | return reasons; |
415 | } |
416 | |
417 | /* |
418 | * Read the requested subset of the AST flags set on the processor |
419 | * Return the bits that were set, don't modify the processor |
420 | * Called at splsched |
421 | */ |
422 | ast_t |
423 | ast_peek(ast_t reasons) |
424 | { |
425 | ast_t *pending_ast = ast_pending(); |
426 | |
427 | reasons &= *pending_ast; |
428 | |
429 | return reasons; |
430 | } |
431 | |
432 | /* |
433 | * Re-set current processor's per-thread AST flags to those set on thread |
434 | * Called at splsched |
435 | */ |
436 | void |
437 | ast_context(thread_t thread) |
438 | { |
439 | ast_t *pending_ast = ast_pending(); |
440 | |
441 | *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast); |
442 | } |
443 | |
444 | /* |
445 | * Propagate ASTs set on a thread to the current processor |
446 | * Called at splsched |
447 | */ |
448 | void |
449 | ast_propagate(thread_t thread) |
450 | { |
451 | ast_on(thread->ast); |
452 | } |
453 | |
454 | void |
455 | ast_dtrace_on(void) |
456 | { |
457 | ast_on(AST_DTRACE); |
458 | } |
459 | |
460 | |
461 | |