1/*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <mach/mach_types.h>
29#include <mach/machine/vm_param.h>
30#include <mach/task.h>
31
32#include <kern/kern_types.h>
33#include <kern/ledger.h>
34#include <kern/processor.h>
35#include <kern/thread.h>
36#include <kern/task.h>
37#include <kern/spl.h>
38#include <kern/ast.h>
39#include <kern/monotonic.h>
40#include <machine/monotonic.h>
41#include <ipc/ipc_port.h>
42#include <ipc/ipc_object.h>
43#include <vm/vm_map.h>
44#include <vm/vm_kern.h>
45#include <vm/pmap.h>
46#include <vm/vm_protos.h> /* last */
47#include <sys/resource.h>
48#include <sys/signal.h>
49#include <sys/errno.h>
50#include <sys/proc_require.h>
51
52#include <machine/limits.h>
53#include <sys/codesign.h> /* CS_CDHASH_LEN */
54
55#undef thread_should_halt
56
57/* BSD KERN COMPONENT INTERFACE */
58
59extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
60
61thread_t get_firstthread(task_t);
62int get_task_userstop(task_t);
63int get_thread_userstop(thread_t);
64boolean_t current_thread_aborted(void);
65void task_act_iterate_wth_args_locked(task_t, void (*)(thread_t, void *), void *);
66void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
67kern_return_t get_signalact(task_t, thread_t *, int);
68int fill_task_rusage(task_t task, rusage_info_current *ri);
69int fill_task_io_rusage(task_t task, rusage_info_current *ri);
70int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
71uint64_t get_task_logical_writes(task_t task, bool external);
72void fill_task_billed_usage(task_t task, rusage_info_current *ri);
73void task_bsdtask_kill(task_t);
74
75extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
76extern uint64_t get_dispatchqueue_label_offset_from_proc(void *p);
77extern uint64_t proc_uniqueid_task(void *p, void *t);
78extern int proc_pidversion(void *p);
79extern int proc_getcdhash(void *p, char *cdhash);
80
81int mach_to_bsd_errno(kern_return_t mach_err);
82kern_return_t bsd_to_mach_failure(int bsd_err);
83
84#if MACH_BSD
85extern void psignal(void *, int);
86#endif
87
88/*
89 *
90 */
91void *
92get_bsdtask_info(task_t t)
93{
94 void *proc_from_task = task_get_proc_raw(task: t);
95 proc_require(proc: proc_from_task, flags: PROC_REQUIRE_ALLOW_NULL | PROC_REQUIRE_ALLOW_ALL);
96 return task_has_proc(t) ? proc_from_task : NULL;
97}
98
99void
100task_bsdtask_kill(task_t t)
101{
102 void * bsd_info = get_bsdtask_info(t);
103 if (bsd_info != NULL) {
104 psignal(bsd_info, SIGKILL);
105 }
106}
107/*
108 *
109 */
110void *
111get_bsdthreadtask_info(thread_t th)
112{
113 return get_thread_ro(th)->tro_proc;
114}
115
116/*
117 *
118 */
119void
120set_bsdtask_info(task_t t, void * v)
121{
122 void *proc_from_task = task_get_proc_raw(task: t);
123 if (v == NULL) {
124 task_clear_has_proc(t);
125 } else {
126 if (v != proc_from_task) {
127 panic("set_bsdtask_info trying to set random bsd_info %p", v);
128 }
129 task_set_has_proc(t);
130 }
131}
132
133__abortlike
134static void
135__thread_ro_circularity_panic(thread_t th, thread_ro_t tro)
136{
137 panic("tro %p points back to %p instead of %p", tro, tro->tro_owner, th);
138}
139
140__attribute__((always_inline))
141thread_ro_t
142get_thread_ro_unchecked(thread_t th)
143{
144 return th->t_tro;
145}
146
147thread_ro_t
148get_thread_ro(thread_t th)
149{
150 thread_ro_t tro = th->t_tro;
151
152 zone_require_ro(zone_id: ZONE_ID_THREAD_RO, elem_size: sizeof(struct thread_ro), addr: tro);
153 if (tro->tro_owner != th) {
154 __thread_ro_circularity_panic(th, tro);
155 }
156 return tro;
157}
158
159__attribute__((always_inline))
160thread_ro_t
161current_thread_ro_unchecked(void)
162{
163 return get_thread_ro_unchecked(th: current_thread());
164}
165
166thread_ro_t
167current_thread_ro(void)
168{
169 return get_thread_ro(th: current_thread());
170}
171
172void
173clear_thread_ro_proc(thread_t th)
174{
175 thread_ro_t tro = get_thread_ro(th);
176
177 zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_proc);
178}
179
180struct uthread *
181get_bsdthread_info(thread_t th)
182{
183 return (struct uthread *)((uintptr_t)th + sizeof(struct thread));
184}
185
186thread_t
187get_machthread(struct uthread *uth)
188{
189 return (struct thread *)((uintptr_t)uth - sizeof(struct thread));
190}
191
192/*
193 * This is used to remember any FS error from VNOP_PAGEIN code when
194 * invoked under vm_fault(). The value is an errno style value. It can
195 * be retrieved by exception handlers using thread_get_state().
196 */
197void
198set_thread_pagein_error(thread_t th, int error)
199{
200 assert(th == current_thread());
201 if (error == 0 || th->t_pagein_error == 0) {
202 th->t_pagein_error = error;
203 }
204}
205
206#if defined(__x86_64__)
207/*
208 * Returns non-zero if the thread has a non-NULL task
209 * and that task has an LDT.
210 */
211int
212thread_task_has_ldt(thread_t th)
213{
214 task_t task = get_threadtask(th);
215 return task && task->i386_ldt != 0;
216}
217#endif /* __x86_64__ */
218
219/*
220 * XXX
221 */
222int get_thread_lock_count(thread_t th); /* forced forward */
223int
224get_thread_lock_count(thread_t th __unused)
225{
226 /*
227 * TODO: one day: resurect counting locks held to disallow
228 * holding locks across upcalls.
229 *
230 * never worked on arm.
231 */
232 return 0;
233}
234
235/*
236 * Returns a thread reference.
237 */
238thread_t
239get_firstthread(task_t task)
240{
241 thread_t thread = THREAD_NULL;
242 task_lock(task);
243
244 if (!task->active) {
245 task_unlock(task);
246 return THREAD_NULL;
247 }
248
249 thread = (thread_t)(void *)queue_first(&task->threads);
250
251 if (queue_end(&task->threads, (queue_entry_t)thread)) {
252 task_unlock(task);
253 return THREAD_NULL;
254 }
255
256 thread_reference(thread);
257 task_unlock(task);
258 return thread;
259}
260
261kern_return_t
262get_signalact(
263 task_t task,
264 thread_t *result_out,
265 int setast)
266{
267 kern_return_t result = KERN_SUCCESS;
268 thread_t inc, thread = THREAD_NULL;
269
270 task_lock(task);
271
272 if (!task->active) {
273 task_unlock(task);
274
275 return KERN_FAILURE;
276 }
277
278 for (inc = (thread_t)(void *)queue_first(&task->threads);
279 !queue_end(&task->threads, (queue_entry_t)inc);) {
280 thread_mtx_lock(thread: inc);
281 if (inc->active &&
282 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
283 thread = inc;
284 break;
285 }
286 thread_mtx_unlock(thread: inc);
287
288 inc = (thread_t)(void *)queue_next(&inc->task_threads);
289 }
290
291 if (result_out) {
292 *result_out = thread;
293 }
294
295 if (thread) {
296 if (setast) {
297 act_set_astbsd(thread);
298 }
299
300 thread_mtx_unlock(thread);
301 } else {
302 result = KERN_FAILURE;
303 }
304
305 task_unlock(task);
306
307 return result;
308}
309
310
311kern_return_t
312check_actforsig(
313 task_t task,
314 thread_t thread,
315 int setast)
316{
317 kern_return_t result = KERN_FAILURE;
318 thread_t inc;
319
320 task_lock(task);
321
322 if (!task->active) {
323 task_unlock(task);
324
325 return KERN_FAILURE;
326 }
327
328 for (inc = (thread_t)(void *)queue_first(&task->threads);
329 !queue_end(&task->threads, (queue_entry_t)inc);) {
330 if (inc == thread) {
331 thread_mtx_lock(thread: inc);
332
333 if (inc->active &&
334 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
335 result = KERN_SUCCESS;
336 break;
337 }
338
339 thread_mtx_unlock(thread: inc);
340 break;
341 }
342
343 inc = (thread_t)(void *)queue_next(&inc->task_threads);
344 }
345
346 if (result == KERN_SUCCESS) {
347 if (setast) {
348 act_set_astbsd(thread);
349 }
350
351 thread_mtx_unlock(thread);
352 }
353
354 task_unlock(task);
355
356 return result;
357}
358
359ledger_t
360get_task_ledger(task_t t)
361{
362 return t->ledger;
363}
364
365/*
366 * This is only safe to call from a thread executing in
367 * in the task's context or if the task is locked. Otherwise,
368 * the map could be switched for the task (and freed) before
369 * we go to return it here.
370 */
371vm_map_t
372get_task_map(task_t t)
373{
374 return t->map;
375}
376
377vm_map_t
378get_task_map_reference(task_t t)
379{
380 vm_map_t m;
381
382 if (t == NULL) {
383 return VM_MAP_NULL;
384 }
385
386 task_lock(t);
387 if (!t->active) {
388 task_unlock(t);
389 return VM_MAP_NULL;
390 }
391 m = t->map;
392 vm_map_reference(map: m);
393 task_unlock(t);
394 return m;
395}
396
397/*
398 *
399 */
400ipc_space_t
401get_task_ipcspace(task_t t)
402{
403 return t->itk_space;
404}
405
406int
407get_task_numacts(task_t t)
408{
409 return t->thread_count;
410}
411
412/* does this machine need 64bit register set for signal handler */
413int
414is_64signalregset(void)
415{
416 if (task_has_64Bit_data(current_task())) {
417 return 1;
418 }
419
420 return 0;
421}
422
423/*
424 * Swap in a new map for the task/thread pair; the old map reference is
425 * returned. Also does a pmap switch if thread provided is current thread.
426 */
427vm_map_t
428swap_task_map(task_t task, thread_t thread, vm_map_t map)
429{
430 vm_map_t old_map;
431 boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
432
433 if (task != get_threadtask(thread)) {
434 panic("swap_task_map");
435 }
436
437 task_lock(task);
438 mp_disable_preemption();
439
440 old_map = task->map;
441 thread->map = task->map = map;
442 vm_commit_pagezero_status(tmap: map);
443
444 if (doswitch) {
445 PMAP_SWITCH_USER(thread, map, cpu_number());
446 }
447 mp_enable_preemption();
448 task_unlock(task);
449
450 return old_map;
451}
452
453/*
454 *
455 * This is only safe to call from a thread executing in
456 * in the task's context or if the task is locked. Otherwise,
457 * the map could be switched for the task (and freed) before
458 * we go to return it here.
459 */
460pmap_t
461get_task_pmap(task_t t)
462{
463 return t->map->pmap;
464}
465
466/*
467 *
468 */
469uint64_t
470get_task_resident_size(task_t task)
471{
472 uint64_t val;
473
474 ledger_get_balance(ledger: task->ledger, entry: task_ledgers.phys_mem, balance: (ledger_amount_t *) &val);
475 return val;
476}
477
478uint64_t
479get_task_compressed(task_t task)
480{
481 uint64_t val;
482
483 ledger_get_balance(ledger: task->ledger, entry: task_ledgers.internal_compressed, balance: (ledger_amount_t *) &val);
484 return val;
485}
486
487uint64_t
488get_task_resident_max(task_t task)
489{
490 uint64_t val;
491
492 ledger_get_lifetime_max(ledger: task->ledger, entry: task_ledgers.phys_mem, max_lifetime_balance: (ledger_amount_t *) &val);
493 return val;
494}
495
496/*
497 * Get the balance for a given field in the task ledger.
498 * Returns 0 if the entry is invalid.
499 */
500static uint64_t
501get_task_ledger_balance(task_t task, int entry)
502{
503 ledger_amount_t balance = 0;
504
505 ledger_get_balance(ledger: task->ledger, entry, balance: &balance);
506 return balance;
507}
508
509uint64_t
510get_task_purgeable_size(task_t task)
511{
512 kern_return_t ret;
513 ledger_amount_t balance = 0;
514 uint64_t volatile_size = 0;
515
516 ret = ledger_get_balance(ledger: task->ledger, entry: task_ledgers.purgeable_volatile, balance: &balance);
517 if (ret != KERN_SUCCESS) {
518 return 0;
519 }
520
521 volatile_size += balance;
522
523 ret = ledger_get_balance(ledger: task->ledger, entry: task_ledgers.purgeable_volatile_compressed, balance: &balance);
524 if (ret != KERN_SUCCESS) {
525 return 0;
526 }
527
528 volatile_size += balance;
529
530 return volatile_size;
531}
532
533/*
534 *
535 */
536uint64_t
537get_task_phys_footprint(task_t task)
538{
539 return get_task_ledger_balance(task, entry: task_ledgers.phys_footprint);
540}
541
542#if CONFIG_LEDGER_INTERVAL_MAX
543/*
544 *
545 */
546uint64_t
547get_task_phys_footprint_interval_max(task_t task, int reset)
548{
549 kern_return_t ret;
550 ledger_amount_t max;
551
552 ret = ledger_get_interval_max(ledger: task->ledger, entry: task_ledgers.phys_footprint, max_interval_balance: &max, reset);
553
554 if (KERN_SUCCESS == ret) {
555 return max;
556 }
557
558 return 0;
559}
560#endif /* CONFIG_LEDGER_INTERVAL_MAX */
561
562/*
563 *
564 */
565uint64_t
566get_task_phys_footprint_lifetime_max(task_t task)
567{
568 kern_return_t ret;
569 ledger_amount_t max;
570
571 ret = ledger_get_lifetime_max(ledger: task->ledger, entry: task_ledgers.phys_footprint, max_lifetime_balance: &max);
572
573 if (KERN_SUCCESS == ret) {
574 return max;
575 }
576
577 return 0;
578}
579
580/*
581 *
582 */
583uint64_t
584get_task_phys_footprint_limit(task_t task)
585{
586 kern_return_t ret;
587 ledger_amount_t max;
588
589 ret = ledger_get_limit(ledger: task->ledger, entry: task_ledgers.phys_footprint, limit: &max);
590 if (KERN_SUCCESS == ret) {
591 return max;
592 }
593
594 return 0;
595}
596
597uint64_t
598get_task_internal(task_t task)
599{
600 return get_task_ledger_balance(task, entry: task_ledgers.internal);
601}
602
603uint64_t
604get_task_internal_compressed(task_t task)
605{
606 return get_task_ledger_balance(task, entry: task_ledgers.internal_compressed);
607}
608
609uint64_t
610get_task_purgeable_nonvolatile(task_t task)
611{
612 return get_task_ledger_balance(task, entry: task_ledgers.purgeable_nonvolatile);
613}
614
615uint64_t
616get_task_purgeable_nonvolatile_compressed(task_t task)
617{
618 return get_task_ledger_balance(task, entry: task_ledgers.purgeable_nonvolatile_compressed);
619}
620
621uint64_t
622get_task_alternate_accounting(task_t task)
623{
624 return get_task_ledger_balance(task, entry: task_ledgers.alternate_accounting);
625}
626
627uint64_t
628get_task_alternate_accounting_compressed(task_t task)
629{
630 return get_task_ledger_balance(task, entry: task_ledgers.alternate_accounting_compressed);
631}
632
633uint64_t
634get_task_page_table(task_t task)
635{
636 return get_task_ledger_balance(task, entry: task_ledgers.page_table);
637}
638
639#if CONFIG_FREEZE
640uint64_t
641get_task_frozen_to_swap(task_t task)
642{
643 return get_task_ledger_balance(task, task_ledgers.frozen_to_swap);
644}
645#endif /* CONFIG_FREEZE */
646
647uint64_t
648get_task_iokit_mapped(task_t task)
649{
650 return get_task_ledger_balance(task, entry: task_ledgers.iokit_mapped);
651}
652
653uint64_t
654get_task_network_nonvolatile(task_t task)
655{
656 return get_task_ledger_balance(task, entry: task_ledgers.network_nonvolatile);
657}
658
659uint64_t
660get_task_network_nonvolatile_compressed(task_t task)
661{
662 return get_task_ledger_balance(task, entry: task_ledgers.network_nonvolatile_compressed);
663}
664
665uint64_t
666get_task_wired_mem(task_t task)
667{
668 return get_task_ledger_balance(task, entry: task_ledgers.wired_mem);
669}
670
671uint64_t
672get_task_tagged_footprint(task_t task)
673{
674 kern_return_t ret;
675 ledger_amount_t credit, debit;
676
677 ret = ledger_get_entries(ledger: task->ledger, entry: task_ledgers.tagged_footprint, credit: &credit, debit: &debit);
678 if (KERN_SUCCESS == ret) {
679 return credit - debit;
680 }
681
682 return 0;
683}
684
685uint64_t
686get_task_tagged_footprint_compressed(task_t task)
687{
688 kern_return_t ret;
689 ledger_amount_t credit, debit;
690
691 ret = ledger_get_entries(ledger: task->ledger, entry: task_ledgers.tagged_footprint_compressed, credit: &credit, debit: &debit);
692 if (KERN_SUCCESS == ret) {
693 return credit - debit;
694 }
695
696 return 0;
697}
698
699uint64_t
700get_task_media_footprint(task_t task)
701{
702 kern_return_t ret;
703 ledger_amount_t credit, debit;
704
705 ret = ledger_get_entries(ledger: task->ledger, entry: task_ledgers.media_footprint, credit: &credit, debit: &debit);
706 if (KERN_SUCCESS == ret) {
707 return credit - debit;
708 }
709
710 return 0;
711}
712
713uint64_t
714get_task_media_footprint_compressed(task_t task)
715{
716 kern_return_t ret;
717 ledger_amount_t credit, debit;
718
719 ret = ledger_get_entries(ledger: task->ledger, entry: task_ledgers.media_footprint_compressed, credit: &credit, debit: &debit);
720 if (KERN_SUCCESS == ret) {
721 return credit - debit;
722 }
723
724 return 0;
725}
726
727uint64_t
728get_task_graphics_footprint(task_t task)
729{
730 kern_return_t ret;
731 ledger_amount_t credit, debit;
732
733 ret = ledger_get_entries(ledger: task->ledger, entry: task_ledgers.graphics_footprint, credit: &credit, debit: &debit);
734 if (KERN_SUCCESS == ret) {
735 return credit - debit;
736 }
737
738 return 0;
739}
740
741
742uint64_t
743get_task_graphics_footprint_compressed(task_t task)
744{
745 kern_return_t ret;
746 ledger_amount_t credit, debit;
747
748 ret = ledger_get_entries(ledger: task->ledger, entry: task_ledgers.graphics_footprint_compressed, credit: &credit, debit: &debit);
749 if (KERN_SUCCESS == ret) {
750 return credit - debit;
751 }
752
753 return 0;
754}
755
756uint64_t
757get_task_neural_footprint(task_t task)
758{
759 kern_return_t ret;
760 ledger_amount_t credit, debit;
761
762 ret = ledger_get_entries(ledger: task->ledger, entry: task_ledgers.neural_footprint, credit: &credit, debit: &debit);
763 if (KERN_SUCCESS == ret) {
764 return credit - debit;
765 }
766
767 return 0;
768}
769
770uint64_t
771get_task_neural_footprint_compressed(task_t task)
772{
773 kern_return_t ret;
774 ledger_amount_t credit, debit;
775
776 ret = ledger_get_entries(ledger: task->ledger, entry: task_ledgers.neural_footprint_compressed, credit: &credit, debit: &debit);
777 if (KERN_SUCCESS == ret) {
778 return credit - debit;
779 }
780
781 return 0;
782}
783
784uint64_t
785get_task_cpu_time(task_t task)
786{
787 return get_task_ledger_balance(task, entry: task_ledgers.cpu_time);
788}
789
790uint32_t
791get_task_loadTag(task_t task)
792{
793 return os_atomic_load(&task->loadTag, relaxed);
794}
795
796uint32_t
797set_task_loadTag(task_t task, uint32_t loadTag)
798{
799 return os_atomic_xchg(&task->loadTag, loadTag, relaxed);
800}
801
802
803task_t
804get_threadtask(thread_t th)
805{
806 return get_thread_ro(th)->tro_task;
807}
808
809task_t
810get_threadtask_early(thread_t th)
811{
812 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
813 if (th == THREAD_NULL || th->t_tro == NULL) {
814 return TASK_NULL;
815 }
816 }
817 return get_threadtask(th);
818}
819
820/*
821 *
822 */
823vm_map_offset_t
824get_map_min(
825 vm_map_t map)
826{
827 return vm_map_min(map);
828}
829
830/*
831 *
832 */
833vm_map_offset_t
834get_map_max(
835 vm_map_t map)
836{
837 return vm_map_max(map);
838}
839vm_map_size_t
840get_vmmap_size(
841 vm_map_t map)
842{
843 return vm_map_adjusted_size(map);
844}
845int
846get_task_page_size(
847 task_t task)
848{
849 return vm_map_page_size(map: task->map);
850}
851
852#if CONFIG_COREDUMP
853
854static int
855get_vmsubmap_entries(
856 vm_map_t map,
857 vm_object_offset_t start,
858 vm_object_offset_t end)
859{
860 int total_entries = 0;
861 vm_map_entry_t entry;
862
863 if (not_in_kdp) {
864 vm_map_lock(map);
865 }
866 entry = vm_map_first_entry(map);
867 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
868 entry = entry->vme_next;
869 }
870
871 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
872 if (entry->is_sub_map) {
873 total_entries +=
874 get_vmsubmap_entries(VME_SUBMAP(entry),
875 start: VME_OFFSET(entry),
876 end: (VME_OFFSET(entry) +
877 entry->vme_end -
878 entry->vme_start));
879 } else {
880 total_entries += 1;
881 }
882 entry = entry->vme_next;
883 }
884 if (not_in_kdp) {
885 vm_map_unlock(map);
886 }
887 return total_entries;
888}
889
890int
891get_vmmap_entries(
892 vm_map_t map)
893{
894 int total_entries = 0;
895 vm_map_entry_t entry;
896
897 if (not_in_kdp) {
898 vm_map_lock(map);
899 }
900 entry = vm_map_first_entry(map);
901
902 while (entry != vm_map_to_entry(map)) {
903 if (entry->is_sub_map) {
904 total_entries +=
905 get_vmsubmap_entries(VME_SUBMAP(entry),
906 start: VME_OFFSET(entry),
907 end: (VME_OFFSET(entry) +
908 entry->vme_end -
909 entry->vme_start));
910 } else {
911 total_entries += 1;
912 }
913 entry = entry->vme_next;
914 }
915 if (not_in_kdp) {
916 vm_map_unlock(map);
917 }
918 return total_entries;
919}
920#endif /* CONFIG_COREDUMP */
921
922int
923get_task_userstop(
924 task_t task)
925{
926 return task->user_stop_count;
927}
928
929int
930get_thread_userstop(
931 thread_t th)
932{
933 return th->user_stop_count;
934}
935
936boolean_t
937get_task_pidsuspended(
938 task_t task)
939{
940 return task->pidsuspended;
941}
942
943boolean_t
944get_task_frozen(
945 task_t task)
946{
947 return task->frozen;
948}
949
950boolean_t
951thread_should_abort(
952 thread_t th)
953{
954 return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT;
955}
956
957/*
958 * This routine is like thread_should_abort() above. It checks to
959 * see if the current thread is aborted. But unlike above, it also
960 * checks to see if thread is safely aborted. If so, it returns
961 * that fact, and clears the condition (safe aborts only should
962 * have a single effect, and a poll of the abort status
963 * qualifies.
964 */
965boolean_t
966current_thread_aborted(
967 void)
968{
969 thread_t th = current_thread();
970 spl_t s;
971
972 if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
973 (th->options & TH_OPT_INTMASK) != THREAD_UNINT) {
974 return TRUE;
975 }
976 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
977 s = splsched();
978 thread_lock(th);
979 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
980 th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
981 }
982 thread_unlock(th);
983 splx(s);
984 }
985 return FALSE;
986}
987
988/* Iterate over a task that is already protected by a held lock. */
989void
990task_act_iterate_wth_args_locked(
991 task_t task,
992 void (*func_callback)(thread_t, void *),
993 void *func_arg)
994{
995 for (thread_t inc = (thread_t)(void *)queue_first(&task->threads);
996 !queue_end(&task->threads, (queue_entry_t)inc);) {
997 (void) (*func_callback)(inc, func_arg);
998 inc = (thread_t)(void *)queue_next(&inc->task_threads);
999 }
1000}
1001
1002void
1003task_act_iterate_wth_args(
1004 task_t task,
1005 void (*func_callback)(thread_t, void *),
1006 void *func_arg)
1007{
1008 task_lock(task);
1009 task_act_iterate_wth_args_locked(task, func_callback, func_arg);
1010 task_unlock(task);
1011}
1012
1013#include <sys/bsdtask_info.h>
1014
1015void
1016fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
1017{
1018 vm_map_t map;
1019 task_absolutetime_info_data_t tinfo;
1020 thread_t thread;
1021 uint32_t cswitch = 0, numrunning = 0;
1022 uint32_t syscalls_unix = 0;
1023 uint32_t syscalls_mach = 0;
1024
1025 task_lock(task);
1026
1027 map = (task == kernel_task)? kernel_map: task->map;
1028
1029 ptinfo->pti_virtual_size = vm_map_adjusted_size(map);
1030 ledger_get_balance(ledger: task->ledger, entry: task_ledgers.phys_mem, balance: (ledger_amount_t *) &ptinfo->pti_resident_size);
1031
1032 ptinfo->pti_policy = ((task != kernel_task)?
1033 POLICY_TIMESHARE: POLICY_RR);
1034
1035 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1036 spl_t x;
1037
1038 if (thread->options & TH_OPT_IDLE_THREAD) {
1039 continue;
1040 }
1041
1042 x = splsched();
1043 thread_lock(thread);
1044
1045 if ((thread->state & TH_RUN) == TH_RUN) {
1046 numrunning++;
1047 }
1048 cswitch += thread->c_switch;
1049
1050 syscalls_unix += thread->syscalls_unix;
1051 syscalls_mach += thread->syscalls_mach;
1052
1053 thread_unlock(thread);
1054 splx(x);
1055 }
1056
1057 struct recount_times_mach term_times = recount_task_terminated_times(task);
1058 struct recount_times_mach total_times = recount_task_times(task);
1059
1060 tinfo.threads_user = total_times.rtm_user - term_times.rtm_user;
1061 tinfo.threads_system = total_times.rtm_system - term_times.rtm_system;
1062 ptinfo->pti_threads_system = tinfo.threads_system;
1063 ptinfo->pti_threads_user = tinfo.threads_user;
1064
1065 ptinfo->pti_total_system = total_times.rtm_system;
1066 ptinfo->pti_total_user = total_times.rtm_user;
1067
1068 ptinfo->pti_faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
1069 ptinfo->pti_pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
1070 ptinfo->pti_cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
1071 ptinfo->pti_messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
1072 ptinfo->pti_messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
1073 ptinfo->pti_syscalls_mach = (int32_t) MIN(task->syscalls_mach + syscalls_mach, INT32_MAX);
1074 ptinfo->pti_syscalls_unix = (int32_t) MIN(task->syscalls_unix + syscalls_unix, INT32_MAX);
1075 ptinfo->pti_csw = (int32_t) MIN(task->c_switch + cswitch, INT32_MAX);
1076 ptinfo->pti_threadnum = task->thread_count;
1077 ptinfo->pti_numrunning = numrunning;
1078 ptinfo->pti_priority = task->priority;
1079
1080 task_unlock(task);
1081}
1082
1083int
1084fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
1085{
1086 thread_t thact;
1087 int err = 0;
1088 mach_msg_type_number_t count;
1089 thread_basic_info_data_t basic_info;
1090 kern_return_t kret;
1091 uint64_t addr = 0;
1092
1093 task_lock(task);
1094
1095 for (thact = (thread_t)(void *)queue_first(&task->threads);
1096 !queue_end(&task->threads, (queue_entry_t)thact);) {
1097 addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1098 if (addr == thaddr) {
1099 count = THREAD_BASIC_INFO_COUNT;
1100 if ((kret = thread_info_internal(thread: thact, THREAD_BASIC_INFO, thread_info_out: (thread_info_t)&basic_info, thread_info_count: &count)) != KERN_SUCCESS) {
1101 err = 1;
1102 goto out;
1103 }
1104 ptinfo->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
1105 ptinfo->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
1106
1107 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
1108 ptinfo->pth_policy = basic_info.policy;
1109 ptinfo->pth_run_state = basic_info.run_state;
1110 ptinfo->pth_flags = basic_info.flags;
1111 ptinfo->pth_sleep_time = basic_info.sleep_time;
1112 ptinfo->pth_curpri = thact->sched_pri;
1113 ptinfo->pth_priority = thact->base_pri;
1114 ptinfo->pth_maxpriority = thact->max_priority;
1115
1116 if (vpp != NULL) {
1117 bsd_threadcdir(uth: get_bsdthread_info(th: thact), vptr: vpp, vidp);
1118 }
1119 bsd_getthreadname(uth: get_bsdthread_info(th: thact), buffer: ptinfo->pth_name);
1120 err = 0;
1121 goto out;
1122 }
1123 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1124 }
1125 err = 1;
1126
1127out:
1128 task_unlock(task);
1129 return err;
1130}
1131
1132int
1133fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
1134{
1135 int numthr = 0;
1136 thread_t thact;
1137 uint64_t * uptr;
1138 uint64_t thaddr;
1139
1140 uptr = (uint64_t *)buffer;
1141
1142 task_lock(task);
1143
1144 for (thact = (thread_t)(void *)queue_first(&task->threads);
1145 !queue_end(&task->threads, (queue_entry_t)thact);) {
1146 thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1147 *uptr++ = thaddr;
1148 numthr++;
1149 if (numthr >= thcount) {
1150 goto out;
1151 }
1152 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1153 }
1154
1155out:
1156 task_unlock(task);
1157 return (int)(numthr * sizeof(uint64_t));
1158}
1159
1160int
1161fill_taskthreadschedinfo(task_t task, uint64_t thread_id, struct proc_threadschedinfo_internal *thread_sched_info)
1162{
1163 int err = 0;
1164
1165 thread_t thread = current_thread();
1166
1167 /*
1168 * Looking up threads is pretty expensive and not realtime-safe
1169 * right now, requiring locking the task and iterating over all
1170 * threads. As long as that is the case, we officially only
1171 * support getting this info for the current thread.
1172 */
1173 if (task != current_task() || thread_id != thread->thread_id) {
1174 return -1;
1175 }
1176
1177#if SCHED_HYGIENE_DEBUG
1178 absolutetime_to_nanoseconds(thread->machine.int_time_mt, &thread_sched_info->int_time_ns);
1179#else
1180 (void)thread;
1181 thread_sched_info->int_time_ns = 0;
1182#endif
1183
1184 return err;
1185}
1186
1187int
1188get_numthreads(task_t task)
1189{
1190 return task->thread_count;
1191}
1192
1193/*
1194 * Gather the various pieces of info about the designated task,
1195 * and collect it all into a single rusage_info.
1196 */
1197int
1198fill_task_rusage(task_t task, rusage_info_current *ri)
1199{
1200 struct task_power_info powerinfo;
1201
1202 assert(task != TASK_NULL);
1203 task_lock(task);
1204
1205 struct task_power_info_extra extra = { 0 };
1206 task_power_info_locked(task, info: &powerinfo, NULL, NULL, extra_info: &extra);
1207 ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
1208 ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
1209 ri->ri_user_time = powerinfo.total_user;
1210 ri->ri_system_time = powerinfo.total_system;
1211 ri->ri_runnable_time = extra.runnable_time;
1212 ri->ri_cycles = extra.cycles;
1213 ri->ri_instructions = extra.instructions;
1214 ri->ri_pcycles = extra.pcycles;
1215 ri->ri_pinstructions = extra.pinstructions;
1216 ri->ri_user_ptime = extra.user_ptime;
1217 ri->ri_system_ptime = extra.system_ptime;
1218 ri->ri_energy_nj = extra.energy;
1219 ri->ri_penergy_nj = extra.penergy;
1220 ri->ri_secure_time_in_system = extra.secure_time;
1221 ri->ri_secure_ptime_in_system = extra.secure_ptime;
1222
1223 ri->ri_phys_footprint = get_task_phys_footprint(task);
1224 ledger_get_balance(ledger: task->ledger, entry: task_ledgers.phys_mem,
1225 balance: (ledger_amount_t *)&ri->ri_resident_size);
1226 ri->ri_wired_size = get_task_wired_mem(task);
1227
1228 ri->ri_pageins = counter_load(&task->pageins);
1229
1230 task_unlock(task);
1231 return 0;
1232}
1233
1234void
1235fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1236{
1237 bank_billed_balance_safe(task, cpu_time: &ri->ri_billed_system_time, energy: &ri->ri_billed_energy);
1238 bank_serviced_balance_safe(task, cpu_time: &ri->ri_serviced_system_time, energy: &ri->ri_serviced_energy);
1239}
1240
1241int
1242fill_task_io_rusage(task_t task, rusage_info_current *ri)
1243{
1244 assert(task != TASK_NULL);
1245 task_lock(task);
1246
1247 if (task->task_io_stats) {
1248 ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1249 ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1250 } else {
1251 /* I/O Stats unavailable */
1252 ri->ri_diskio_bytesread = 0;
1253 ri->ri_diskio_byteswritten = 0;
1254 }
1255 task_unlock(task);
1256 return 0;
1257}
1258
1259int
1260fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1261{
1262 thread_t thread;
1263
1264 assert(task != TASK_NULL);
1265 task_lock(task);
1266
1267 /* Rollup QoS time of all the threads to task */
1268 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1269 if (thread->options & TH_OPT_IDLE_THREAD) {
1270 continue;
1271 }
1272
1273 thread_update_qos_cpu_time(thread);
1274 }
1275 ri->ri_cpu_time_qos_default = task->cpu_time_eqos_stats.cpu_time_qos_default;
1276 ri->ri_cpu_time_qos_maintenance = task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
1277 ri->ri_cpu_time_qos_background = task->cpu_time_eqos_stats.cpu_time_qos_background;
1278 ri->ri_cpu_time_qos_utility = task->cpu_time_eqos_stats.cpu_time_qos_utility;
1279 ri->ri_cpu_time_qos_legacy = task->cpu_time_eqos_stats.cpu_time_qos_legacy;
1280 ri->ri_cpu_time_qos_user_initiated = task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
1281 ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
1282
1283 task_unlock(task);
1284 return 0;
1285}
1286
1287uint64_t
1288get_task_logical_writes(task_t task, bool external)
1289{
1290 assert(task != TASK_NULL);
1291 struct ledger_entry_info lei;
1292 int entry = external ? task_ledgers.logical_writes_to_external :
1293 task_ledgers.logical_writes;
1294
1295 task_lock(task);
1296 ledger_get_entry_info(ledger: task->ledger, entry, lei: &lei);
1297 task_unlock(task);
1298
1299 return lei.lei_balance;
1300}
1301
1302uint64_t
1303get_task_dispatchqueue_serialno_offset(task_t task)
1304{
1305 uint64_t dq_serialno_offset = 0;
1306 void *bsd_info = get_bsdtask_info(t: task);
1307
1308 if (bsd_info) {
1309 dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(p: bsd_info);
1310 }
1311
1312 return dq_serialno_offset;
1313}
1314
1315uint64_t
1316get_task_dispatchqueue_label_offset(task_t task)
1317{
1318 uint64_t dq_label_offset = 0;
1319 void *bsd_info = get_bsdtask_info(t: task);
1320
1321 if (bsd_info) {
1322 dq_label_offset = get_dispatchqueue_label_offset_from_proc(p: bsd_info);
1323 }
1324
1325 return dq_label_offset;
1326}
1327
1328uint64_t
1329get_task_uniqueid(task_t task)
1330{
1331 void *bsd_info = get_bsdtask_info(t: task);
1332
1333 if (bsd_info) {
1334 return proc_uniqueid_task(p: bsd_info, t: task);
1335 } else {
1336 return UINT64_MAX;
1337 }
1338}
1339
1340int
1341get_task_version(task_t task)
1342{
1343 void *bsd_info = get_bsdtask_info(t: task);
1344
1345 if (bsd_info) {
1346 return proc_pidversion(p: bsd_info);
1347 } else {
1348 return INT_MAX;
1349 }
1350}
1351
1352#if CONFIG_MACF
1353struct label *
1354get_task_crash_label(task_t task)
1355{
1356 return task->crash_label;
1357}
1358
1359void
1360set_task_crash_label(task_t task, struct label *label)
1361{
1362 task->crash_label = label;
1363}
1364#endif
1365
1366int
1367fill_taskipctableinfo(task_t task, uint32_t *table_size, uint32_t *table_free)
1368{
1369 ipc_space_t space = task->itk_space;
1370 if (space == NULL) {
1371 return -1;
1372 }
1373
1374 is_read_lock(space);
1375 if (!is_active(space)) {
1376 is_read_unlock(space);
1377 return -1;
1378 }
1379
1380 *table_size = ipc_entry_table_count(array: is_active_table(space));
1381 *table_free = space->is_table_free;
1382
1383 is_read_unlock(space);
1384
1385 return 0;
1386}
1387
1388int
1389get_task_cdhash(task_t task, char cdhash[static CS_CDHASH_LEN])
1390{
1391 int result = 0;
1392 void *bsd_info = NULL;
1393
1394 task_lock(task);
1395 bsd_info = get_bsdtask_info(t: task);
1396 result = bsd_info ? proc_getcdhash(p: bsd_info, cdhash) : ESRCH;
1397 task_unlock(task);
1398
1399 return result;
1400}
1401
1402/* moved from ubc_subr.c */
1403int
1404mach_to_bsd_errno(kern_return_t mach_err)
1405{
1406 switch (mach_err) {
1407 case KERN_SUCCESS:
1408 return 0;
1409
1410 case KERN_INVALID_ADDRESS:
1411 case KERN_INVALID_ARGUMENT:
1412 case KERN_NOT_IN_SET:
1413 case KERN_INVALID_NAME:
1414 case KERN_INVALID_TASK:
1415 case KERN_INVALID_RIGHT:
1416 case KERN_INVALID_VALUE:
1417 case KERN_INVALID_CAPABILITY:
1418 case KERN_INVALID_HOST:
1419 case KERN_MEMORY_PRESENT:
1420 case KERN_INVALID_PROCESSOR_SET:
1421 case KERN_INVALID_POLICY:
1422 case KERN_ALREADY_WAITING:
1423 case KERN_DEFAULT_SET:
1424 case KERN_EXCEPTION_PROTECTED:
1425 case KERN_INVALID_LEDGER:
1426 case KERN_INVALID_MEMORY_CONTROL:
1427 case KERN_INVALID_SECURITY:
1428 case KERN_NOT_DEPRESSED:
1429 case KERN_LOCK_OWNED:
1430 case KERN_LOCK_OWNED_SELF:
1431 return EINVAL;
1432
1433 case KERN_NOT_RECEIVER:
1434 case KERN_NO_ACCESS:
1435 case KERN_POLICY_STATIC:
1436 return EACCES;
1437
1438 case KERN_NO_SPACE:
1439 case KERN_RESOURCE_SHORTAGE:
1440 case KERN_UREFS_OVERFLOW:
1441 case KERN_INVALID_OBJECT:
1442 return ENOMEM;
1443
1444 case KERN_MEMORY_FAILURE:
1445 case KERN_MEMORY_ERROR:
1446 case KERN_PROTECTION_FAILURE:
1447 return EFAULT;
1448
1449 case KERN_POLICY_LIMIT:
1450 case KERN_CODESIGN_ERROR:
1451 case KERN_DENIED:
1452 return EPERM;
1453
1454 case KERN_ALREADY_IN_SET:
1455 case KERN_NAME_EXISTS:
1456 case KERN_RIGHT_EXISTS:
1457 return EEXIST;
1458
1459 case KERN_ABORTED:
1460 return EINTR;
1461
1462 case KERN_TERMINATED:
1463 case KERN_LOCK_SET_DESTROYED:
1464 case KERN_LOCK_UNSTABLE:
1465 case KERN_SEMAPHORE_DESTROYED:
1466 case KERN_NOT_FOUND:
1467 case KERN_NOT_WAITING:
1468 return ENOENT;
1469
1470 case KERN_RPC_SERVER_TERMINATED:
1471 return ECONNRESET;
1472
1473 case KERN_NOT_SUPPORTED:
1474 return ENOTSUP;
1475
1476 case KERN_NODE_DOWN:
1477 return ENETDOWN;
1478
1479 case KERN_OPERATION_TIMED_OUT:
1480 return ETIMEDOUT;
1481
1482 default:
1483 return EIO; /* 5 == KERN_FAILURE */
1484 }
1485}
1486
1487kern_return_t
1488bsd_to_mach_failure(int bsd_err)
1489{
1490 switch (bsd_err) {
1491 case EIO:
1492 case EACCES:
1493 case ENOMEM:
1494 case EFAULT:
1495 return KERN_MEMORY_ERROR;
1496
1497 case EINVAL:
1498 return KERN_INVALID_ARGUMENT;
1499
1500 case ETIMEDOUT:
1501 case EBUSY:
1502 return KERN_OPERATION_TIMED_OUT;
1503
1504 case ECONNRESET:
1505 return KERN_RPC_SERVER_TERMINATED;
1506
1507 case ENOTSUP:
1508 return KERN_NOT_SUPPORTED;
1509
1510 case ENETDOWN:
1511 return KERN_NODE_DOWN;
1512
1513 case ENOENT:
1514 return KERN_NOT_FOUND;
1515
1516 case EINTR:
1517 return KERN_ABORTED;
1518
1519 case EPERM:
1520 return KERN_DENIED;
1521
1522 case EEXIST:
1523 return KERN_ALREADY_IN_SET;
1524
1525 default:
1526 return KERN_FAILURE;
1527 }
1528}
1529