1/*
2 * Copyright (c) 1999-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * File: ubc_subr.c
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
32 *
33 * Functions related to Unified Buffer cache.
34 *
35 * Caller of UBC functions MUST have a valid reference on the vnode.
36 *
37 */
38
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/lock.h>
43#include <sys/mman.h>
44#include <sys/mount_internal.h>
45#include <sys/vnode_internal.h>
46#include <sys/ubc_internal.h>
47#include <sys/ucred.h>
48#include <sys/proc_internal.h>
49#include <sys/kauth.h>
50#include <sys/buf.h>
51#include <sys/user.h>
52#include <sys/codesign.h>
53#include <sys/codedir_internal.h>
54#include <sys/fsevents.h>
55#include <sys/fcntl.h>
56
57#include <mach/mach_types.h>
58#include <mach/memory_object_types.h>
59#include <mach/memory_object_control.h>
60#include <mach/vm_map.h>
61#include <mach/mach_vm.h>
62#include <mach/upl.h>
63
64#include <kern/kern_types.h>
65#include <kern/kalloc.h>
66#include <kern/zalloc.h>
67#include <kern/thread.h>
68#include <vm/pmap.h>
69#include <vm/vm_kern.h>
70#include <vm/vm_protos.h> /* last */
71
72#include <libkern/crypto/sha1.h>
73#include <libkern/crypto/sha2.h>
74#include <libkern/libkern.h>
75
76#include <security/mac_framework.h>
77#include <stdbool.h>
78
79/* XXX These should be in a BSD accessible Mach header, but aren't. */
80extern kern_return_t memory_object_pages_resident(memory_object_control_t,
81 boolean_t *);
82extern kern_return_t memory_object_signed(memory_object_control_t control,
83 boolean_t is_signed);
84extern boolean_t memory_object_is_signed(memory_object_control_t);
85
86/* XXX Same for those. */
87
88extern void Debugger(const char *message);
89
90
91/* XXX no one uses this interface! */
92kern_return_t ubc_page_op_with_control(
93 memory_object_control_t control,
94 off_t f_offset,
95 int ops,
96 ppnum_t *phys_entryp,
97 int *flagsp);
98
99
100#if DIAGNOSTIC
101#if defined(assert)
102#undef assert
103#endif
104#define assert(cond) \
105 ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
106#else
107#include <kern/assert.h>
108#endif /* DIAGNOSTIC */
109
110static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
111static int ubc_umcallback(vnode_t, void *);
112static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
113static void ubc_cs_free(struct ubc_info *uip);
114
115static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
116static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
117
118struct zone *ubc_info_zone;
119static uint32_t cs_blob_generation_count = 1;
120
121/*
122 * CODESIGNING
123 * Routines to navigate code signing data structures in the kernel...
124 */
125
126extern int cs_debug;
127
128#define PAGE_SHIFT_4K (12)
129
130static boolean_t
131cs_valid_range(
132 const void *start,
133 const void *end,
134 const void *lower_bound,
135 const void *upper_bound)
136{
137 if (upper_bound < lower_bound ||
138 end < start) {
139 return FALSE;
140 }
141
142 if (start < lower_bound ||
143 end > upper_bound) {
144 return FALSE;
145 }
146
147 return TRUE;
148}
149
150typedef void (*cs_md_init)(void *ctx);
151typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
152typedef void (*cs_md_final)(void *hash, void *ctx);
153
154struct cs_hash {
155 uint8_t cs_type; /* type code as per code signing */
156 size_t cs_size; /* size of effective hash (may be truncated) */
157 size_t cs_digest_size; /* size of native hash */
158 cs_md_init cs_init;
159 cs_md_update cs_update;
160 cs_md_final cs_final;
161};
162
163uint8_t cs_hash_type(
164 struct cs_hash const * const cs_hash)
165{
166 return cs_hash->cs_type;
167}
168
169static const struct cs_hash cs_hash_sha1 = {
170 .cs_type = CS_HASHTYPE_SHA1,
171 .cs_size = CS_SHA1_LEN,
172 .cs_digest_size = SHA_DIGEST_LENGTH,
173 .cs_init = (cs_md_init)SHA1Init,
174 .cs_update = (cs_md_update)SHA1Update,
175 .cs_final = (cs_md_final)SHA1Final,
176};
177#if CRYPTO_SHA2
178static const struct cs_hash cs_hash_sha256 = {
179 .cs_type = CS_HASHTYPE_SHA256,
180 .cs_size = SHA256_DIGEST_LENGTH,
181 .cs_digest_size = SHA256_DIGEST_LENGTH,
182 .cs_init = (cs_md_init)SHA256_Init,
183 .cs_update = (cs_md_update)SHA256_Update,
184 .cs_final = (cs_md_final)SHA256_Final,
185};
186static const struct cs_hash cs_hash_sha256_truncate = {
187 .cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
188 .cs_size = CS_SHA256_TRUNCATED_LEN,
189 .cs_digest_size = SHA256_DIGEST_LENGTH,
190 .cs_init = (cs_md_init)SHA256_Init,
191 .cs_update = (cs_md_update)SHA256_Update,
192 .cs_final = (cs_md_final)SHA256_Final,
193};
194static const struct cs_hash cs_hash_sha384 = {
195 .cs_type = CS_HASHTYPE_SHA384,
196 .cs_size = SHA384_DIGEST_LENGTH,
197 .cs_digest_size = SHA384_DIGEST_LENGTH,
198 .cs_init = (cs_md_init)SHA384_Init,
199 .cs_update = (cs_md_update)SHA384_Update,
200 .cs_final = (cs_md_final)SHA384_Final,
201};
202#endif
203
204static struct cs_hash const *
205cs_find_md(uint8_t type)
206{
207 if (type == CS_HASHTYPE_SHA1) {
208 return &cs_hash_sha1;
209#if CRYPTO_SHA2
210 } else if (type == CS_HASHTYPE_SHA256) {
211 return &cs_hash_sha256;
212 } else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
213 return &cs_hash_sha256_truncate;
214 } else if (type == CS_HASHTYPE_SHA384) {
215 return &cs_hash_sha384;
216#endif
217 }
218 return NULL;
219}
220
221union cs_hash_union {
222 SHA1_CTX sha1ctxt;
223 SHA256_CTX sha256ctx;
224 SHA384_CTX sha384ctx;
225};
226
227
228/*
229 * Choose among different hash algorithms.
230 * Higher is better, 0 => don't use at all.
231 */
232static const uint32_t hashPriorities[] = {
233 CS_HASHTYPE_SHA1,
234 CS_HASHTYPE_SHA256_TRUNCATED,
235 CS_HASHTYPE_SHA256,
236 CS_HASHTYPE_SHA384,
237};
238
239static unsigned int
240hash_rank(const CS_CodeDirectory *cd)
241{
242 uint32_t type = cd->hashType;
243 unsigned int n;
244
245 for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n)
246 if (hashPriorities[n] == type)
247 return n + 1;
248 return 0; /* not supported */
249}
250
251
252/*
253 * Locating a page hash
254 */
255static const unsigned char *
256hashes(
257 const CS_CodeDirectory *cd,
258 uint32_t page,
259 size_t hash_len,
260 const char *lower_bound,
261 const char *upper_bound)
262{
263 const unsigned char *base, *top, *hash;
264 uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
265
266 assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
267
268 if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
269 /* Get first scatter struct */
270 const SC_Scatter *scatter = (const SC_Scatter*)
271 ((const char*)cd + ntohl(cd->scatterOffset));
272 uint32_t hashindex=0, scount, sbase=0;
273 /* iterate all scatter structs */
274 do {
275 if((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
276 if(cs_debug) {
277 printf("CODE SIGNING: Scatter extends past Code Directory\n");
278 }
279 return NULL;
280 }
281
282 scount = ntohl(scatter->count);
283 uint32_t new_base = ntohl(scatter->base);
284
285 /* last scatter? */
286 if (scount == 0) {
287 return NULL;
288 }
289
290 if((hashindex > 0) && (new_base <= sbase)) {
291 if(cs_debug) {
292 printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
293 sbase, new_base);
294 }
295 return NULL; /* unordered scatter array */
296 }
297 sbase = new_base;
298
299 /* this scatter beyond page we're looking for? */
300 if (sbase > page) {
301 return NULL;
302 }
303
304 if (sbase+scount >= page) {
305 /* Found the scatter struct that is
306 * referencing our page */
307
308 /* base = address of first hash covered by scatter */
309 base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
310 hashindex * hash_len;
311 /* top = address of first hash after this scatter */
312 top = base + scount * hash_len;
313 if (!cs_valid_range(base, top, lower_bound,
314 upper_bound) ||
315 hashindex > nCodeSlots) {
316 return NULL;
317 }
318
319 break;
320 }
321
322 /* this scatter struct is before the page we're looking
323 * for. Iterate. */
324 hashindex+=scount;
325 scatter++;
326 } while(1);
327
328 hash = base + (page - sbase) * hash_len;
329 } else {
330 base = (const unsigned char *)cd + ntohl(cd->hashOffset);
331 top = base + nCodeSlots * hash_len;
332 if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
333 page > nCodeSlots) {
334 return NULL;
335 }
336 assert(page < nCodeSlots);
337
338 hash = base + page * hash_len;
339 }
340
341 if (!cs_valid_range(hash, hash + hash_len,
342 lower_bound, upper_bound)) {
343 hash = NULL;
344 }
345
346 return hash;
347}
348
349/*
350 * cs_validate_codedirectory
351 *
352 * Validate that pointers inside the code directory to make sure that
353 * all offsets and lengths are constrained within the buffer.
354 *
355 * Parameters: cd Pointer to code directory buffer
356 * length Length of buffer
357 *
358 * Returns: 0 Success
359 * EBADEXEC Invalid code signature
360 */
361
362static int
363cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
364{
365 struct cs_hash const *hashtype;
366
367 if (length < sizeof(*cd))
368 return EBADEXEC;
369 if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY)
370 return EBADEXEC;
371 if (cd->pageSize < PAGE_SHIFT_4K || cd->pageSize > PAGE_SHIFT)
372 return EBADEXEC;
373 hashtype = cs_find_md(cd->hashType);
374 if (hashtype == NULL)
375 return EBADEXEC;
376
377 if (cd->hashSize != hashtype->cs_size)
378 return EBADEXEC;
379
380 if (length < ntohl(cd->hashOffset))
381 return EBADEXEC;
382
383 /* check that nSpecialSlots fits in the buffer in front of hashOffset */
384 if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots))
385 return EBADEXEC;
386
387 /* check that codeslots fits in the buffer */
388 if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots))
389 return EBADEXEC;
390
391 if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
392
393 if (length < ntohl(cd->scatterOffset))
394 return EBADEXEC;
395
396 const SC_Scatter *scatter = (const SC_Scatter *)
397 (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
398 uint32_t nPages = 0;
399
400 /*
401 * Check each scatter buffer, since we don't know the
402 * length of the scatter buffer array, we have to
403 * check each entry.
404 */
405 while(1) {
406 /* check that the end of each scatter buffer in within the length */
407 if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length)
408 return EBADEXEC;
409 uint32_t scount = ntohl(scatter->count);
410 if (scount == 0)
411 break;
412 if (nPages + scount < nPages)
413 return EBADEXEC;
414 nPages += scount;
415 scatter++;
416
417 /* XXX check that basees doesn't overlap */
418 /* XXX check that targetOffset doesn't overlap */
419 }
420#if 0 /* rdar://12579439 */
421 if (nPages != ntohl(cd->nCodeSlots))
422 return EBADEXEC;
423#endif
424 }
425
426 if (length < ntohl(cd->identOffset))
427 return EBADEXEC;
428
429 /* identifier is NUL terminated string */
430 if (cd->identOffset) {
431 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
432 if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL)
433 return EBADEXEC;
434 }
435
436 /* team identifier is NULL terminated string */
437 if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
438 if (length < ntohl(cd->teamOffset))
439 return EBADEXEC;
440
441 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
442 if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL)
443 return EBADEXEC;
444 }
445
446 return 0;
447}
448
449/*
450 *
451 */
452
453static int
454cs_validate_blob(const CS_GenericBlob *blob, size_t length)
455{
456 if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length))
457 return EBADEXEC;
458 return 0;
459}
460
461/*
462 * cs_validate_csblob
463 *
464 * Validate that superblob/embedded code directory to make sure that
465 * all internal pointers are valid.
466 *
467 * Will validate both a superblob csblob and a "raw" code directory.
468 *
469 *
470 * Parameters: buffer Pointer to code signature
471 * length Length of buffer
472 * rcd returns pointer to code directory
473 *
474 * Returns: 0 Success
475 * EBADEXEC Invalid code signature
476 */
477
478static int
479cs_validate_csblob(
480 const uint8_t *addr,
481 const size_t blob_size,
482 const CS_CodeDirectory **rcd,
483 const CS_GenericBlob **rentitlements)
484{
485 const CS_GenericBlob *blob;
486 int error;
487 size_t length;
488
489 *rcd = NULL;
490 *rentitlements = NULL;
491
492 blob = (const CS_GenericBlob *)(const void *)addr;
493
494 length = blob_size;
495 error = cs_validate_blob(blob, length);
496 if (error)
497 return error;
498 length = ntohl(blob->length);
499
500 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
501 const CS_SuperBlob *sb;
502 uint32_t n, count;
503 const CS_CodeDirectory *best_cd = NULL;
504 unsigned int best_rank = 0;
505#if PLATFORM_WatchOS
506 const CS_CodeDirectory *sha1_cd = NULL;
507#endif
508
509 if (length < sizeof(CS_SuperBlob))
510 return EBADEXEC;
511
512 sb = (const CS_SuperBlob *)blob;
513 count = ntohl(sb->count);
514
515 /* check that the array of BlobIndex fits in the rest of the data */
516 if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count)
517 return EBADEXEC;
518
519 /* now check each BlobIndex */
520 for (n = 0; n < count; n++) {
521 const CS_BlobIndex *blobIndex = &sb->index[n];
522 uint32_t type = ntohl(blobIndex->type);
523 uint32_t offset = ntohl(blobIndex->offset);
524 if (length < offset)
525 return EBADEXEC;
526
527 const CS_GenericBlob *subBlob =
528 (const CS_GenericBlob *)(const void *)(addr + offset);
529
530 size_t subLength = length - offset;
531
532 if ((error = cs_validate_blob(subBlob, subLength)) != 0)
533 return error;
534 subLength = ntohl(subBlob->length);
535
536 /* extra validation for CDs, that is also returned */
537 if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
538 const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
539 if ((error = cs_validate_codedirectory(candidate, subLength)) != 0)
540 return error;
541 unsigned int rank = hash_rank(candidate);
542 if (cs_debug > 3)
543 printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
544 if (best_cd == NULL || rank > best_rank) {
545 best_cd = candidate;
546 best_rank = rank;
547
548 if (cs_debug > 2)
549 printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
550 *rcd = best_cd;
551 } else if (best_cd != NULL && rank == best_rank) {
552 /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
553 printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
554 return EBADEXEC;
555 }
556#if PLATFORM_WatchOS
557 if (candidate->hashType == CS_HASHTYPE_SHA1) {
558 if (sha1_cd != NULL) {
559 printf("multiple sha1 CodeDirectories in signature; rejecting\n");
560 return EBADEXEC;
561 }
562 sha1_cd = candidate;
563 }
564#endif
565 } else if (type == CSSLOT_ENTITLEMENTS) {
566 if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
567 return EBADEXEC;
568 }
569 if (*rentitlements != NULL) {
570 printf("multiple entitlements blobs\n");
571 return EBADEXEC;
572 }
573 *rentitlements = subBlob;
574 }
575 }
576
577#if PLATFORM_WatchOS
578 /* To keep watchOS fast enough, we have to resort to sha1 for
579 * some code.
580 *
581 * At the time of writing this comment, known sha1 attacks are
582 * collision attacks (not preimage or second preimage
583 * attacks), which do not apply to platform binaries since
584 * they have a fixed hash in the trust cache. Given this
585 * property, we only prefer sha1 code directories for adhoc
586 * signatures, which always have to be in a trust cache to be
587 * valid (can-load-cdhash does not exist for watchOS). Those
588 * are, incidentally, also the platform binaries, for which we
589 * care about the performance hit that sha256 would bring us.
590 *
591 * Platform binaries may still contain a (not chosen) sha256
592 * code directory, which keeps software updates that switch to
593 * sha256-only small.
594 */
595
596 if (*rcd != NULL && sha1_cd != NULL && (ntohl(sha1_cd->flags) & CS_ADHOC)) {
597 if (sha1_cd->flags != (*rcd)->flags) {
598 printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
599 (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags);
600 *rcd = NULL;
601 return EBADEXEC;
602 }
603
604 *rcd = sha1_cd;
605 }
606#endif
607
608 } else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
609
610 if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0)
611 return error;
612 *rcd = (const CS_CodeDirectory *)blob;
613 } else {
614 return EBADEXEC;
615 }
616
617 if (*rcd == NULL)
618 return EBADEXEC;
619
620 return 0;
621}
622
623/*
624 * cs_find_blob_bytes
625 *
626 * Find an blob from the superblob/code directory. The blob must have
627 * been been validated by cs_validate_csblob() before calling
628 * this. Use csblob_find_blob() instead.
629 *
630 * Will also find a "raw" code directory if its stored as well as
631 * searching the superblob.
632 *
633 * Parameters: buffer Pointer to code signature
634 * length Length of buffer
635 * type type of blob to find
636 * magic the magic number for that blob
637 *
638 * Returns: pointer Success
639 * NULL Buffer not found
640 */
641
642const CS_GenericBlob *
643csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
644{
645 const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
646
647 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
648 const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
649 size_t n, count = ntohl(sb->count);
650
651 for (n = 0; n < count; n++) {
652 if (ntohl(sb->index[n].type) != type)
653 continue;
654 uint32_t offset = ntohl(sb->index[n].offset);
655 if (length - sizeof(const CS_GenericBlob) < offset)
656 return NULL;
657 blob = (const CS_GenericBlob *)(const void *)(addr + offset);
658 if (ntohl(blob->magic) != magic)
659 continue;
660 return blob;
661 }
662 } else if (type == CSSLOT_CODEDIRECTORY
663 && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
664 && magic == CSMAGIC_CODEDIRECTORY)
665 return blob;
666 return NULL;
667}
668
669
670const CS_GenericBlob *
671csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
672{
673 if ((csblob->csb_flags & CS_VALID) == 0)
674 return NULL;
675 return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
676}
677
678static const uint8_t *
679find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
680{
681 /* there is no zero special slot since that is the first code slot */
682 if (ntohl(cd->nSpecialSlots) < slot || slot == 0)
683 return NULL;
684
685 return ((const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot));
686}
687
688static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
689
690int
691csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
692{
693 uint8_t computed_hash[CS_HASH_MAX_SIZE];
694 const CS_GenericBlob *entitlements;
695 const CS_CodeDirectory *code_dir;
696 const uint8_t *embedded_hash;
697 union cs_hash_union context;
698
699 *out_start = NULL;
700 *out_length = 0;
701
702 if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash))
703 return EBADEXEC;
704
705 code_dir = csblob->csb_cd;
706
707 if ((csblob->csb_flags & CS_VALID) == 0) {
708 entitlements = NULL;
709 } else {
710 entitlements = csblob->csb_entitlements_blob;
711 }
712 embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
713
714 if (embedded_hash == NULL) {
715 if (entitlements)
716 return EBADEXEC;
717 return 0;
718 } else if (entitlements == NULL) {
719 if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
720 return EBADEXEC;
721 } else {
722 return 0;
723 }
724 }
725
726 csblob->csb_hashtype->cs_init(&context);
727 csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
728 csblob->csb_hashtype->cs_final(computed_hash, &context);
729
730 if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0)
731 return EBADEXEC;
732
733 *out_start = __DECONST(void *, entitlements);
734 *out_length = ntohl(entitlements->length);
735
736 return 0;
737}
738
739/*
740 * CODESIGNING
741 * End of routines to navigate code signing data structures in the kernel.
742 */
743
744
745
746/*
747 * ubc_init
748 *
749 * Initialization of the zone for Unified Buffer Cache.
750 *
751 * Parameters: (void)
752 *
753 * Returns: (void)
754 *
755 * Implicit returns:
756 * ubc_info_zone(global) initialized for subsequent allocations
757 */
758__private_extern__ void
759ubc_init(void)
760{
761 int i;
762
763 i = (vm_size_t) sizeof (struct ubc_info);
764
765 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
766
767 zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE);
768}
769
770
771/*
772 * ubc_info_init
773 *
774 * Allocate and attach an empty ubc_info structure to a vnode
775 *
776 * Parameters: vp Pointer to the vnode
777 *
778 * Returns: 0 Success
779 * vnode_size:ENOMEM Not enough space
780 * vnode_size:??? Other error from vnode_getattr
781 *
782 */
783int
784ubc_info_init(struct vnode *vp)
785{
786 return(ubc_info_init_internal(vp, 0, 0));
787}
788
789
790/*
791 * ubc_info_init_withsize
792 *
793 * Allocate and attach a sized ubc_info structure to a vnode
794 *
795 * Parameters: vp Pointer to the vnode
796 * filesize The size of the file
797 *
798 * Returns: 0 Success
799 * vnode_size:ENOMEM Not enough space
800 * vnode_size:??? Other error from vnode_getattr
801 */
802int
803ubc_info_init_withsize(struct vnode *vp, off_t filesize)
804{
805 return(ubc_info_init_internal(vp, 1, filesize));
806}
807
808
809/*
810 * ubc_info_init_internal
811 *
812 * Allocate and attach a ubc_info structure to a vnode
813 *
814 * Parameters: vp Pointer to the vnode
815 * withfsize{0,1} Zero if the size should be obtained
816 * from the vnode; otherwise, use filesize
817 * filesize The size of the file, if withfsize == 1
818 *
819 * Returns: 0 Success
820 * vnode_size:ENOMEM Not enough space
821 * vnode_size:??? Other error from vnode_getattr
822 *
823 * Notes: We call a blocking zalloc(), and the zone was created as an
824 * expandable and collectable zone, so if no memory is available,
825 * it is possible for zalloc() to block indefinitely. zalloc()
826 * may also panic if the zone of zones is exhausted, since it's
827 * NOT expandable.
828 *
829 * We unconditionally call vnode_pager_setup(), even if this is
830 * a reuse of a ubc_info; in that case, we should probably assert
831 * that it does not already have a pager association, but do not.
832 *
833 * Since memory_object_create_named() can only fail from receiving
834 * an invalid pager argument, the explicit check and panic is
835 * merely precautionary.
836 */
837static int
838ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
839{
840 struct ubc_info *uip;
841 void * pager;
842 int error = 0;
843 kern_return_t kret;
844 memory_object_control_t control;
845
846 uip = vp->v_ubcinfo;
847
848 /*
849 * If there is not already a ubc_info attached to the vnode, we
850 * attach one; otherwise, we will reuse the one that's there.
851 */
852 if (uip == UBC_INFO_NULL) {
853
854 uip = (struct ubc_info *) zalloc(ubc_info_zone);
855 bzero((char *)uip, sizeof(struct ubc_info));
856
857 uip->ui_vnode = vp;
858 uip->ui_flags = UI_INITED;
859 uip->ui_ucred = NOCRED;
860 }
861 assert(uip->ui_flags != UI_NONE);
862 assert(uip->ui_vnode == vp);
863
864 /* now set this ubc_info in the vnode */
865 vp->v_ubcinfo = uip;
866
867 /*
868 * Allocate a pager object for this vnode
869 *
870 * XXX The value of the pager parameter is currently ignored.
871 * XXX Presumably, this API changed to avoid the race between
872 * XXX setting the pager and the UI_HASPAGER flag.
873 */
874 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
875 assert(pager);
876
877 /*
878 * Explicitly set the pager into the ubc_info, after setting the
879 * UI_HASPAGER flag.
880 */
881 SET(uip->ui_flags, UI_HASPAGER);
882 uip->ui_pager = pager;
883
884 /*
885 * Note: We can not use VNOP_GETATTR() to get accurate
886 * value of ui_size because this may be an NFS vnode, and
887 * nfs_getattr() can call vinvalbuf(); if this happens,
888 * ubc_info is not set up to deal with that event.
889 * So use bogus size.
890 */
891
892 /*
893 * create a vnode - vm_object association
894 * memory_object_create_named() creates a "named" reference on the
895 * memory object we hold this reference as long as the vnode is
896 * "alive." Since memory_object_create_named() took its own reference
897 * on the vnode pager we passed it, we can drop the reference
898 * vnode_pager_setup() returned here.
899 */
900 kret = memory_object_create_named(pager,
901 (memory_object_size_t)uip->ui_size, &control);
902 vnode_pager_deallocate(pager);
903 if (kret != KERN_SUCCESS)
904 panic("ubc_info_init: memory_object_create_named returned %d", kret);
905
906 assert(control);
907 uip->ui_control = control; /* cache the value of the mo control */
908 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
909
910 if (withfsize == 0) {
911 /* initialize the size */
912 error = vnode_size(vp, &uip->ui_size, vfs_context_current());
913 if (error)
914 uip->ui_size = 0;
915 } else {
916 uip->ui_size = filesize;
917 }
918 vp->v_lflag |= VNAMED_UBC; /* vnode has a named ubc reference */
919
920 return (error);
921}
922
923
924/*
925 * ubc_info_free
926 *
927 * Free a ubc_info structure
928 *
929 * Parameters: uip A pointer to the ubc_info to free
930 *
931 * Returns: (void)
932 *
933 * Notes: If there is a credential that has subsequently been associated
934 * with the ubc_info via a call to ubc_setcred(), the reference
935 * to the credential is dropped.
936 *
937 * It's actually impossible for a ubc_info.ui_control to take the
938 * value MEMORY_OBJECT_CONTROL_NULL.
939 */
940static void
941ubc_info_free(struct ubc_info *uip)
942{
943 if (IS_VALID_CRED(uip->ui_ucred)) {
944 kauth_cred_unref(&uip->ui_ucred);
945 }
946
947 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
948 memory_object_control_deallocate(uip->ui_control);
949
950 cluster_release(uip);
951 ubc_cs_free(uip);
952
953 zfree(ubc_info_zone, uip);
954 return;
955}
956
957
958void
959ubc_info_deallocate(struct ubc_info *uip)
960{
961 ubc_info_free(uip);
962}
963
964errno_t mach_to_bsd_errno(kern_return_t mach_err)
965{
966 switch (mach_err) {
967 case KERN_SUCCESS:
968 return 0;
969
970 case KERN_INVALID_ADDRESS:
971 case KERN_INVALID_ARGUMENT:
972 case KERN_NOT_IN_SET:
973 case KERN_INVALID_NAME:
974 case KERN_INVALID_TASK:
975 case KERN_INVALID_RIGHT:
976 case KERN_INVALID_VALUE:
977 case KERN_INVALID_CAPABILITY:
978 case KERN_INVALID_HOST:
979 case KERN_MEMORY_PRESENT:
980 case KERN_INVALID_PROCESSOR_SET:
981 case KERN_INVALID_POLICY:
982 case KERN_ALREADY_WAITING:
983 case KERN_DEFAULT_SET:
984 case KERN_EXCEPTION_PROTECTED:
985 case KERN_INVALID_LEDGER:
986 case KERN_INVALID_MEMORY_CONTROL:
987 case KERN_INVALID_SECURITY:
988 case KERN_NOT_DEPRESSED:
989 case KERN_LOCK_OWNED:
990 case KERN_LOCK_OWNED_SELF:
991 return EINVAL;
992
993 case KERN_PROTECTION_FAILURE:
994 case KERN_NOT_RECEIVER:
995 case KERN_NO_ACCESS:
996 case KERN_POLICY_STATIC:
997 return EACCES;
998
999 case KERN_NO_SPACE:
1000 case KERN_RESOURCE_SHORTAGE:
1001 case KERN_UREFS_OVERFLOW:
1002 case KERN_INVALID_OBJECT:
1003 return ENOMEM;
1004
1005 case KERN_FAILURE:
1006 return EIO;
1007
1008 case KERN_MEMORY_FAILURE:
1009 case KERN_POLICY_LIMIT:
1010 case KERN_CODESIGN_ERROR:
1011 return EPERM;
1012
1013 case KERN_MEMORY_ERROR:
1014 return EBUSY;
1015
1016 case KERN_ALREADY_IN_SET:
1017 case KERN_NAME_EXISTS:
1018 case KERN_RIGHT_EXISTS:
1019 return EEXIST;
1020
1021 case KERN_ABORTED:
1022 return EINTR;
1023
1024 case KERN_TERMINATED:
1025 case KERN_LOCK_SET_DESTROYED:
1026 case KERN_LOCK_UNSTABLE:
1027 case KERN_SEMAPHORE_DESTROYED:
1028 return ENOENT;
1029
1030 case KERN_RPC_SERVER_TERMINATED:
1031 return ECONNRESET;
1032
1033 case KERN_NOT_SUPPORTED:
1034 return ENOTSUP;
1035
1036 case KERN_NODE_DOWN:
1037 return ENETDOWN;
1038
1039 case KERN_NOT_WAITING:
1040 return ENOENT;
1041
1042 case KERN_OPERATION_TIMED_OUT:
1043 return ETIMEDOUT;
1044
1045 default:
1046 return EIO;
1047 }
1048}
1049
1050/*
1051 * ubc_setsize_ex
1052 *
1053 * Tell the VM that the the size of the file represented by the vnode has
1054 * changed
1055 *
1056 * Parameters: vp The vp whose backing file size is
1057 * being changed
1058 * nsize The new size of the backing file
1059 * opts Options
1060 *
1061 * Returns: EINVAL for new size < 0
1062 * ENOENT if no UBC info exists
1063 * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1064 * Other errors (mapped to errno_t) returned by VM functions
1065 *
1066 * Notes: This function will indicate success if the new size is the
1067 * same or larger than the old size (in this case, the
1068 * remainder of the file will require modification or use of
1069 * an existing upl to access successfully).
1070 *
1071 * This function will fail if the new file size is smaller,
1072 * and the memory region being invalidated was unable to
1073 * actually be invalidated and/or the last page could not be
1074 * flushed, if the new size is not aligned to a page
1075 * boundary. This is usually indicative of an I/O error.
1076 */
1077errno_t ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
1078{
1079 off_t osize; /* ui_size before change */
1080 off_t lastpg, olastpgend, lastoff;
1081 struct ubc_info *uip;
1082 memory_object_control_t control;
1083 kern_return_t kret = KERN_SUCCESS;
1084
1085 if (nsize < (off_t)0)
1086 return EINVAL;
1087
1088 if (!UBCINFOEXISTS(vp))
1089 return ENOENT;
1090
1091 uip = vp->v_ubcinfo;
1092 osize = uip->ui_size;
1093
1094 if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize)
1095 return EAGAIN;
1096
1097 /*
1098 * Update the size before flushing the VM
1099 */
1100 uip->ui_size = nsize;
1101
1102 if (nsize >= osize) { /* Nothing more to do */
1103 if (nsize > osize) {
1104 lock_vnode_and_post(vp, NOTE_EXTEND);
1105 }
1106
1107 return 0;
1108 }
1109
1110 /*
1111 * When the file shrinks, invalidate the pages beyond the
1112 * new size. Also get rid of garbage beyond nsize on the
1113 * last page. The ui_size already has the nsize, so any
1114 * subsequent page-in will zero-fill the tail properly
1115 */
1116 lastpg = trunc_page_64(nsize);
1117 olastpgend = round_page_64(osize);
1118 control = uip->ui_control;
1119 assert(control);
1120 lastoff = (nsize & PAGE_MASK_64);
1121
1122 if (lastoff) {
1123 upl_t upl;
1124 upl_page_info_t *pl;
1125
1126 /*
1127 * new EOF ends up in the middle of a page
1128 * zero the tail of this page if it's currently
1129 * present in the cache
1130 */
1131 kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE, VM_KERN_MEMORY_FILE);
1132
1133 if (kret != KERN_SUCCESS)
1134 panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret);
1135
1136 if (upl_valid_page(pl, 0))
1137 cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
1138
1139 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1140
1141 lastpg += PAGE_SIZE_64;
1142 }
1143 if (olastpgend > lastpg) {
1144 int flags;
1145
1146 if (lastpg == 0)
1147 flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
1148 else
1149 flags = MEMORY_OBJECT_DATA_FLUSH;
1150 /*
1151 * invalidate the pages beyond the new EOF page
1152 *
1153 */
1154 kret = memory_object_lock_request(control,
1155 (memory_object_offset_t)lastpg,
1156 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
1157 MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
1158 if (kret != KERN_SUCCESS)
1159 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
1160 }
1161 return mach_to_bsd_errno(kret);
1162}
1163
1164// Returns true for success
1165int ubc_setsize(vnode_t vp, off_t nsize)
1166{
1167 return ubc_setsize_ex(vp, nsize, 0) == 0;
1168}
1169
1170/*
1171 * ubc_getsize
1172 *
1173 * Get the size of the file assocated with the specified vnode
1174 *
1175 * Parameters: vp The vnode whose size is of interest
1176 *
1177 * Returns: 0 There is no ubc_info associated with
1178 * this vnode, or the size is zero
1179 * !0 The size of the file
1180 *
1181 * Notes: Using this routine, it is not possible for a caller to
1182 * successfully distinguish between a vnode associate with a zero
1183 * length file, and a vnode with no associated ubc_info. The
1184 * caller therefore needs to not care, or needs to ensure that
1185 * they have previously successfully called ubc_info_init() or
1186 * ubc_info_init_withsize().
1187 */
1188off_t
1189ubc_getsize(struct vnode *vp)
1190{
1191 /* people depend on the side effect of this working this way
1192 * as they call this for directory
1193 */
1194 if (!UBCINFOEXISTS(vp))
1195 return ((off_t)0);
1196 return (vp->v_ubcinfo->ui_size);
1197}
1198
1199
1200/*
1201 * ubc_umount
1202 *
1203 * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1204 * mount point
1205 *
1206 * Parameters: mp The mount point
1207 *
1208 * Returns: 0 Success
1209 *
1210 * Notes: There is no failure indication for this function.
1211 *
1212 * This function is used in the unmount path; since it may block
1213 * I/O indefinitely, it should not be used in the forced unmount
1214 * path, since a device unavailability could also block that
1215 * indefinitely.
1216 *
1217 * Because there is no device ejection interlock on USB, FireWire,
1218 * or similar devices, it's possible that an ejection that begins
1219 * subsequent to the vnode_iterate() completing, either on one of
1220 * those devices, or a network mount for which the server quits
1221 * responding, etc., may cause the caller to block indefinitely.
1222 */
1223__private_extern__ int
1224ubc_umount(struct mount *mp)
1225{
1226 vnode_iterate(mp, 0, ubc_umcallback, 0);
1227 return(0);
1228}
1229
1230
1231/*
1232 * ubc_umcallback
1233 *
1234 * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1235 * and vnode_iterate() for details of implementation.
1236 */
1237static int
1238ubc_umcallback(vnode_t vp, __unused void * args)
1239{
1240
1241 if (UBCINFOEXISTS(vp)) {
1242
1243 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1244 }
1245 return (VNODE_RETURNED);
1246}
1247
1248
1249/*
1250 * ubc_getcred
1251 *
1252 * Get the credentials currently active for the ubc_info associated with the
1253 * vnode.
1254 *
1255 * Parameters: vp The vnode whose ubc_info credentials
1256 * are to be retrieved
1257 *
1258 * Returns: !NOCRED The credentials
1259 * NOCRED If there is no ubc_info for the vnode,
1260 * or if there is one, but it has not had
1261 * any credentials associated with it via
1262 * a call to ubc_setcred()
1263 */
1264kauth_cred_t
1265ubc_getcred(struct vnode *vp)
1266{
1267 if (UBCINFOEXISTS(vp))
1268 return (vp->v_ubcinfo->ui_ucred);
1269
1270 return (NOCRED);
1271}
1272
1273
1274/*
1275 * ubc_setthreadcred
1276 *
1277 * If they are not already set, set the credentials of the ubc_info structure
1278 * associated with the vnode to those of the supplied thread; otherwise leave
1279 * them alone.
1280 *
1281 * Parameters: vp The vnode whose ubc_info creds are to
1282 * be set
1283 * p The process whose credentials are to
1284 * be used, if not running on an assumed
1285 * credential
1286 * thread The thread whose credentials are to
1287 * be used
1288 *
1289 * Returns: 1 This vnode has no associated ubc_info
1290 * 0 Success
1291 *
1292 * Notes: This function takes a proc parameter to account for bootstrap
1293 * issues where a task or thread may call this routine, either
1294 * before credentials have been initialized by bsd_init(), or if
1295 * there is no BSD info asscoiate with a mach thread yet. This
1296 * is known to happen in both the initial swap and memory mapping
1297 * calls.
1298 *
1299 * This function is generally used only in the following cases:
1300 *
1301 * o a memory mapped file via the mmap() system call
1302 * o a swap store backing file
1303 * o subsequent to a successful write via vn_write()
1304 *
1305 * The information is then used by the NFS client in order to
1306 * cons up a wire message in either the page-in or page-out path.
1307 *
1308 * There are two potential problems with the use of this API:
1309 *
1310 * o Because the write path only set it on a successful
1311 * write, there is a race window between setting the
1312 * credential and its use to evict the pages to the
1313 * remote file server
1314 *
1315 * o Because a page-in may occur prior to a write, the
1316 * credential may not be set at this time, if the page-in
1317 * is not the result of a mapping established via mmap().
1318 *
1319 * In both these cases, this will be triggered from the paging
1320 * path, which will instead use the credential of the current
1321 * process, which in this case is either the dynamic_pager or
1322 * the kernel task, both of which utilize "root" credentials.
1323 *
1324 * This may potentially permit operations to occur which should
1325 * be denied, or it may cause to be denied operations which
1326 * should be permitted, depending on the configuration of the NFS
1327 * server.
1328 */
1329int
1330ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
1331{
1332 struct ubc_info *uip;
1333 kauth_cred_t credp;
1334 struct uthread *uthread = get_bsdthread_info(thread);
1335
1336 if (!UBCINFOEXISTS(vp))
1337 return (1);
1338
1339 vnode_lock(vp);
1340
1341 uip = vp->v_ubcinfo;
1342 credp = uip->ui_ucred;
1343
1344 if (!IS_VALID_CRED(credp)) {
1345 /* use per-thread cred, if assumed identity, else proc cred */
1346 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
1347 uip->ui_ucred = kauth_cred_proc_ref(p);
1348 } else {
1349 uip->ui_ucred = uthread->uu_ucred;
1350 kauth_cred_ref(uip->ui_ucred);
1351 }
1352 }
1353 vnode_unlock(vp);
1354
1355 return (0);
1356}
1357
1358
1359/*
1360 * ubc_setcred
1361 *
1362 * If they are not already set, set the credentials of the ubc_info structure
1363 * associated with the vnode to those of the process; otherwise leave them
1364 * alone.
1365 *
1366 * Parameters: vp The vnode whose ubc_info creds are to
1367 * be set
1368 * p The process whose credentials are to
1369 * be used
1370 *
1371 * Returns: 0 This vnode has no associated ubc_info
1372 * 1 Success
1373 *
1374 * Notes: The return values for this function are inverted from nearly
1375 * all other uses in the kernel.
1376 *
1377 * See also ubc_setthreadcred(), above.
1378 *
1379 * This function is considered deprecated, and generally should
1380 * not be used, as it is incompatible with per-thread credentials;
1381 * it exists for legacy KPI reasons.
1382 *
1383 * DEPRECATION: ubc_setcred() is being deprecated. Please use
1384 * ubc_setthreadcred() instead.
1385 */
1386int
1387ubc_setcred(struct vnode *vp, proc_t p)
1388{
1389 struct ubc_info *uip;
1390 kauth_cred_t credp;
1391
1392 /* If there is no ubc_info, deny the operation */
1393 if ( !UBCINFOEXISTS(vp))
1394 return (0);
1395
1396 /*
1397 * Check to see if there is already a credential reference in the
1398 * ubc_info; if there is not, take one on the supplied credential.
1399 */
1400 vnode_lock(vp);
1401 uip = vp->v_ubcinfo;
1402 credp = uip->ui_ucred;
1403 if (!IS_VALID_CRED(credp)) {
1404 uip->ui_ucred = kauth_cred_proc_ref(p);
1405 }
1406 vnode_unlock(vp);
1407
1408 return (1);
1409}
1410
1411/*
1412 * ubc_getpager
1413 *
1414 * Get the pager associated with the ubc_info associated with the vnode.
1415 *
1416 * Parameters: vp The vnode to obtain the pager from
1417 *
1418 * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager
1419 * VNODE_PAGER_NULL There is no ubc_info for this vnode
1420 *
1421 * Notes: For each vnode that has a ubc_info associated with it, that
1422 * ubc_info SHALL have a pager associated with it, so in the
1423 * normal case, it's impossible to return VNODE_PAGER_NULL for
1424 * a vnode with an associated ubc_info.
1425 */
1426__private_extern__ memory_object_t
1427ubc_getpager(struct vnode *vp)
1428{
1429 if (UBCINFOEXISTS(vp))
1430 return (vp->v_ubcinfo->ui_pager);
1431
1432 return (0);
1433}
1434
1435
1436/*
1437 * ubc_getobject
1438 *
1439 * Get the memory object control associated with the ubc_info associated with
1440 * the vnode
1441 *
1442 * Parameters: vp The vnode to obtain the memory object
1443 * from
1444 * flags DEPRECATED
1445 *
1446 * Returns: !MEMORY_OBJECT_CONTROL_NULL
1447 * MEMORY_OBJECT_CONTROL_NULL
1448 *
1449 * Notes: Historically, if the flags were not "do not reactivate", this
1450 * function would look up the memory object using the pager if
1451 * it did not exist (this could be the case if the vnode had
1452 * been previously reactivated). The flags would also permit a
1453 * hold to be requested, which would have created an object
1454 * reference, if one had not already existed. This usage is
1455 * deprecated, as it would permit a race between finding and
1456 * taking the reference vs. a single reference being dropped in
1457 * another thread.
1458 */
1459memory_object_control_t
1460ubc_getobject(struct vnode *vp, __unused int flags)
1461{
1462 if (UBCINFOEXISTS(vp))
1463 return((vp->v_ubcinfo->ui_control));
1464
1465 return (MEMORY_OBJECT_CONTROL_NULL);
1466}
1467
1468/*
1469 * ubc_blktooff
1470 *
1471 * Convert a given block number to a memory backing object (file) offset for a
1472 * given vnode
1473 *
1474 * Parameters: vp The vnode in which the block is located
1475 * blkno The block number to convert
1476 *
1477 * Returns: !-1 The offset into the backing object
1478 * -1 There is no ubc_info associated with
1479 * the vnode
1480 * -1 An error occurred in the underlying VFS
1481 * while translating the block to an
1482 * offset; the most likely cause is that
1483 * the caller specified a block past the
1484 * end of the file, but this could also be
1485 * any other error from VNOP_BLKTOOFF().
1486 *
1487 * Note: Representing the error in band loses some information, but does
1488 * not occlude a valid offset, since an off_t of -1 is normally
1489 * used to represent EOF. If we had a more reliable constant in
1490 * our header files for it (i.e. explicitly cast to an off_t), we
1491 * would use it here instead.
1492 */
1493off_t
1494ubc_blktooff(vnode_t vp, daddr64_t blkno)
1495{
1496 off_t file_offset = -1;
1497 int error;
1498
1499 if (UBCINFOEXISTS(vp)) {
1500 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
1501 if (error)
1502 file_offset = -1;
1503 }
1504
1505 return (file_offset);
1506}
1507
1508
1509/*
1510 * ubc_offtoblk
1511 *
1512 * Convert a given offset in a memory backing object into a block number for a
1513 * given vnode
1514 *
1515 * Parameters: vp The vnode in which the offset is
1516 * located
1517 * offset The offset into the backing object
1518 *
1519 * Returns: !-1 The returned block number
1520 * -1 There is no ubc_info associated with
1521 * the vnode
1522 * -1 An error occurred in the underlying VFS
1523 * while translating the block to an
1524 * offset; the most likely cause is that
1525 * the caller specified a block past the
1526 * end of the file, but this could also be
1527 * any other error from VNOP_OFFTOBLK().
1528 *
1529 * Note: Representing the error in band loses some information, but does
1530 * not occlude a valid block number, since block numbers exceed
1531 * the valid range for offsets, due to their relative sizes. If
1532 * we had a more reliable constant than -1 in our header files
1533 * for it (i.e. explicitly cast to an daddr64_t), we would use it
1534 * here instead.
1535 */
1536daddr64_t
1537ubc_offtoblk(vnode_t vp, off_t offset)
1538{
1539 daddr64_t blkno = -1;
1540 int error = 0;
1541
1542 if (UBCINFOEXISTS(vp)) {
1543 error = VNOP_OFFTOBLK(vp, offset, &blkno);
1544 if (error)
1545 blkno = -1;
1546 }
1547
1548 return (blkno);
1549}
1550
1551
1552/*
1553 * ubc_pages_resident
1554 *
1555 * Determine whether or not a given vnode has pages resident via the memory
1556 * object control associated with the ubc_info associated with the vnode
1557 *
1558 * Parameters: vp The vnode we want to know about
1559 *
1560 * Returns: 1 Yes
1561 * 0 No
1562 */
1563int
1564ubc_pages_resident(vnode_t vp)
1565{
1566 kern_return_t kret;
1567 boolean_t has_pages_resident;
1568
1569 if (!UBCINFOEXISTS(vp))
1570 return (0);
1571
1572 /*
1573 * The following call may fail if an invalid ui_control is specified,
1574 * or if there is no VM object associated with the control object. In
1575 * either case, reacting to it as if there were no pages resident will
1576 * result in correct behavior.
1577 */
1578 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
1579
1580 if (kret != KERN_SUCCESS)
1581 return (0);
1582
1583 if (has_pages_resident == TRUE)
1584 return (1);
1585
1586 return (0);
1587}
1588
1589/*
1590 * ubc_msync
1591 *
1592 * Clean and/or invalidate a range in the memory object that backs this vnode
1593 *
1594 * Parameters: vp The vnode whose associated ubc_info's
1595 * associated memory object is to have a
1596 * range invalidated within it
1597 * beg_off The start of the range, as an offset
1598 * end_off The end of the range, as an offset
1599 * resid_off The address of an off_t supplied by the
1600 * caller; may be set to NULL to ignore
1601 * flags See ubc_msync_internal()
1602 *
1603 * Returns: 0 Success
1604 * !0 Failure; an errno is returned
1605 *
1606 * Implicit Returns:
1607 * *resid_off, modified If non-NULL, the contents are ALWAYS
1608 * modified; they are initialized to the
1609 * beg_off, and in case of an I/O error,
1610 * the difference between beg_off and the
1611 * current value will reflect what was
1612 * able to be written before the error
1613 * occurred. If no error is returned, the
1614 * value of the resid_off is undefined; do
1615 * NOT use it in place of end_off if you
1616 * intend to increment from the end of the
1617 * last call and call iteratively.
1618 *
1619 * Notes: see ubc_msync_internal() for more detailed information.
1620 *
1621 */
1622errno_t
1623ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
1624{
1625 int retval;
1626 int io_errno = 0;
1627
1628 if (resid_off)
1629 *resid_off = beg_off;
1630
1631 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
1632
1633 if (retval == 0 && io_errno == 0)
1634 return (EINVAL);
1635 return (io_errno);
1636}
1637
1638
1639/*
1640 * ubc_msync_internal
1641 *
1642 * Clean and/or invalidate a range in the memory object that backs this vnode
1643 *
1644 * Parameters: vp The vnode whose associated ubc_info's
1645 * associated memory object is to have a
1646 * range invalidated within it
1647 * beg_off The start of the range, as an offset
1648 * end_off The end of the range, as an offset
1649 * resid_off The address of an off_t supplied by the
1650 * caller; may be set to NULL to ignore
1651 * flags MUST contain at least one of the flags
1652 * UBC_INVALIDATE, UBC_PUSHDIRTY, or
1653 * UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1654 * UBC_SYNC may also be specified to cause
1655 * this function to block until the
1656 * operation is complete. The behavior
1657 * of UBC_SYNC is otherwise undefined.
1658 * io_errno The address of an int to contain the
1659 * errno from a failed I/O operation, if
1660 * one occurs; may be set to NULL to
1661 * ignore
1662 *
1663 * Returns: 1 Success
1664 * 0 Failure
1665 *
1666 * Implicit Returns:
1667 * *resid_off, modified The contents of this offset MAY be
1668 * modified; in case of an I/O error, the
1669 * difference between beg_off and the
1670 * current value will reflect what was
1671 * able to be written before the error
1672 * occurred.
1673 * *io_errno, modified The contents of this offset are set to
1674 * an errno, if an error occurs; if the
1675 * caller supplies an io_errno parameter,
1676 * they should be careful to initialize it
1677 * to 0 before calling this function to
1678 * enable them to distinguish an error
1679 * with a valid *resid_off from an invalid
1680 * one, and to avoid potentially falsely
1681 * reporting an error, depending on use.
1682 *
1683 * Notes: If there is no ubc_info associated with the vnode supplied,
1684 * this function immediately returns success.
1685 *
1686 * If the value of end_off is less than or equal to beg_off, this
1687 * function immediately returns success; that is, end_off is NOT
1688 * inclusive.
1689 *
1690 * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1691 * UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1692 * attempt to block on in-progress I/O by calling this function
1693 * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1694 * in order to block pending on the I/O already in progress.
1695 *
1696 * The start offset is truncated to the page boundary and the
1697 * size is adjusted to include the last page in the range; that
1698 * is, end_off on exactly a page boundary will not change if it
1699 * is rounded, and the range of bytes written will be from the
1700 * truncate beg_off to the rounded (end_off - 1).
1701 */
1702static int
1703ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1704{
1705 memory_object_size_t tsize;
1706 kern_return_t kret;
1707 int request_flags = 0;
1708 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
1709
1710 if ( !UBCINFOEXISTS(vp))
1711 return (0);
1712 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
1713 return (0);
1714 if (end_off <= beg_off)
1715 return (1);
1716
1717 if (flags & UBC_INVALIDATE)
1718 /*
1719 * discard the resident pages
1720 */
1721 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
1722
1723 if (flags & UBC_SYNC)
1724 /*
1725 * wait for all the I/O to complete before returning
1726 */
1727 request_flags |= MEMORY_OBJECT_IO_SYNC;
1728
1729 if (flags & UBC_PUSHDIRTY)
1730 /*
1731 * we only return the dirty pages in the range
1732 */
1733 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
1734
1735 if (flags & UBC_PUSHALL)
1736 /*
1737 * then return all the interesting pages in the range (both
1738 * dirty and precious) to the pager
1739 */
1740 flush_flags = MEMORY_OBJECT_RETURN_ALL;
1741
1742 beg_off = trunc_page_64(beg_off);
1743 end_off = round_page_64(end_off);
1744 tsize = (memory_object_size_t)end_off - beg_off;
1745
1746 /* flush and/or invalidate pages in the range requested */
1747 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
1748 beg_off, tsize,
1749 (memory_object_offset_t *)resid_off,
1750 io_errno, flush_flags, request_flags,
1751 VM_PROT_NO_CHANGE);
1752
1753 return ((kret == KERN_SUCCESS) ? 1 : 0);
1754}
1755
1756
1757/*
1758 * ubc_map
1759 *
1760 * Explicitly map a vnode that has an associate ubc_info, and add a reference
1761 * to it for the ubc system, if there isn't one already, so it will not be
1762 * recycled while it's in use, and set flags on the ubc_info to indicate that
1763 * we have done this
1764 *
1765 * Parameters: vp The vnode to map
1766 * flags The mapping flags for the vnode; this
1767 * will be a combination of one or more of
1768 * PROT_READ, PROT_WRITE, and PROT_EXEC
1769 *
1770 * Returns: 0 Success
1771 * EPERM Permission was denied
1772 *
1773 * Notes: An I/O reference on the vnode must already be held on entry
1774 *
1775 * If there is no ubc_info associated with the vnode, this function
1776 * will return success.
1777 *
1778 * If a permission error occurs, this function will return
1779 * failure; all other failures will cause this function to return
1780 * success.
1781 *
1782 * IMPORTANT: This is an internal use function, and its symbols
1783 * are not exported, hence its error checking is not very robust.
1784 * It is primarily used by:
1785 *
1786 * o mmap(), when mapping a file
1787 * o When mapping a shared file (a shared library in the
1788 * shared segment region)
1789 * o When loading a program image during the exec process
1790 *
1791 * ...all of these uses ignore the return code, and any fault that
1792 * results later because of a failure is handled in the fix-up path
1793 * of the fault handler. The interface exists primarily as a
1794 * performance hint.
1795 *
1796 * Given that third party implementation of the type of interfaces
1797 * that would use this function, such as alternative executable
1798 * formats, etc., are unsupported, this function is not exported
1799 * for general use.
1800 *
1801 * The extra reference is held until the VM system unmaps the
1802 * vnode from its own context to maintain a vnode reference in
1803 * cases like open()/mmap()/close(), which leave the backing
1804 * object referenced by a mapped memory region in a process
1805 * address space.
1806 */
1807__private_extern__ int
1808ubc_map(vnode_t vp, int flags)
1809{
1810 struct ubc_info *uip;
1811 int error = 0;
1812 int need_ref = 0;
1813 int need_wakeup = 0;
1814
1815 if (UBCINFOEXISTS(vp)) {
1816
1817 vnode_lock(vp);
1818 uip = vp->v_ubcinfo;
1819
1820 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
1821 SET(uip->ui_flags, UI_MAPWAITING);
1822 (void) msleep(&uip->ui_flags, &vp->v_lock,
1823 PRIBIO, "ubc_map", NULL);
1824 }
1825 SET(uip->ui_flags, UI_MAPBUSY);
1826 vnode_unlock(vp);
1827
1828 error = VNOP_MMAP(vp, flags, vfs_context_current());
1829
1830 /*
1831 * rdar://problem/22587101 required that we stop propagating
1832 * EPERM up the stack. Otherwise, we would have to funnel up
1833 * the error at all the call sites for memory_object_map().
1834 * The risk is in having to undo the map/object/entry state at
1835 * all these call sites. It would also affect more than just mmap()
1836 * e.g. vm_remap().
1837 *
1838 * if (error != EPERM)
1839 * error = 0;
1840 */
1841
1842 error = 0;
1843
1844 vnode_lock_spin(vp);
1845
1846 if (error == 0) {
1847 if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
1848 need_ref = 1;
1849 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
1850 if (flags & PROT_WRITE) {
1851 SET(uip->ui_flags, UI_MAPPEDWRITE);
1852 }
1853 }
1854 CLR(uip->ui_flags, UI_MAPBUSY);
1855
1856 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
1857 CLR(uip->ui_flags, UI_MAPWAITING);
1858 need_wakeup = 1;
1859 }
1860 vnode_unlock(vp);
1861
1862 if (need_wakeup)
1863 wakeup(&uip->ui_flags);
1864
1865 if (need_ref) {
1866 /*
1867 * Make sure we get a ref as we can't unwind from here
1868 */
1869 if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE))
1870 panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__);
1871 }
1872 }
1873 return (error);
1874}
1875
1876
1877/*
1878 * ubc_destroy_named
1879 *
1880 * Destroy the named memory object associated with the ubc_info control object
1881 * associated with the designated vnode, if there is a ubc_info associated
1882 * with the vnode, and a control object is associated with it
1883 *
1884 * Parameters: vp The designated vnode
1885 *
1886 * Returns: (void)
1887 *
1888 * Notes: This function is called on vnode termination for all vnodes,
1889 * and must therefore not assume that there is a ubc_info that is
1890 * associated with the vnode, nor that there is a control object
1891 * associated with the ubc_info.
1892 *
1893 * If all the conditions necessary are present, this function
1894 * calls memory_object_destory(), which will in turn end up
1895 * calling ubc_unmap() to release any vnode references that were
1896 * established via ubc_map().
1897 *
1898 * IMPORTANT: This is an internal use function that is used
1899 * exclusively by the internal use function vclean().
1900 */
1901__private_extern__ void
1902ubc_destroy_named(vnode_t vp)
1903{
1904 memory_object_control_t control;
1905 struct ubc_info *uip;
1906 kern_return_t kret;
1907
1908 if (UBCINFOEXISTS(vp)) {
1909 uip = vp->v_ubcinfo;
1910
1911 /* Terminate the memory object */
1912 control = ubc_getobject(vp, UBC_HOLDOBJECT);
1913 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1914 kret = memory_object_destroy(control, 0);
1915 if (kret != KERN_SUCCESS)
1916 panic("ubc_destroy_named: memory_object_destroy failed");
1917 }
1918 }
1919}
1920
1921
1922/*
1923 * ubc_isinuse
1924 *
1925 * Determine whether or not a vnode is currently in use by ubc at a level in
1926 * excess of the requested busycount
1927 *
1928 * Parameters: vp The vnode to check
1929 * busycount The threshold busy count, used to bias
1930 * the count usually already held by the
1931 * caller to avoid races
1932 *
1933 * Returns: 1 The vnode is in use over the threshold
1934 * 0 The vnode is not in use over the
1935 * threshold
1936 *
1937 * Notes: Because the vnode is only held locked while actually asking
1938 * the use count, this function only represents a snapshot of the
1939 * current state of the vnode. If more accurate information is
1940 * required, an additional busycount should be held by the caller
1941 * and a non-zero busycount used.
1942 *
1943 * If there is no ubc_info associated with the vnode, this
1944 * function will report that the vnode is not in use by ubc.
1945 */
1946int
1947ubc_isinuse(struct vnode *vp, int busycount)
1948{
1949 if ( !UBCINFOEXISTS(vp))
1950 return (0);
1951 return(ubc_isinuse_locked(vp, busycount, 0));
1952}
1953
1954
1955/*
1956 * ubc_isinuse_locked
1957 *
1958 * Determine whether or not a vnode is currently in use by ubc at a level in
1959 * excess of the requested busycount
1960 *
1961 * Parameters: vp The vnode to check
1962 * busycount The threshold busy count, used to bias
1963 * the count usually already held by the
1964 * caller to avoid races
1965 * locked True if the vnode is already locked by
1966 * the caller
1967 *
1968 * Returns: 1 The vnode is in use over the threshold
1969 * 0 The vnode is not in use over the
1970 * threshold
1971 *
1972 * Notes: If the vnode is not locked on entry, it is locked while
1973 * actually asking the use count. If this is the case, this
1974 * function only represents a snapshot of the current state of
1975 * the vnode. If more accurate information is required, the
1976 * vnode lock should be held by the caller, otherwise an
1977 * additional busycount should be held by the caller and a
1978 * non-zero busycount used.
1979 *
1980 * If there is no ubc_info associated with the vnode, this
1981 * function will report that the vnode is not in use by ubc.
1982 */
1983int
1984ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
1985{
1986 int retval = 0;
1987
1988
1989 if (!locked)
1990 vnode_lock_spin(vp);
1991
1992 if ((vp->v_usecount - vp->v_kusecount) > busycount)
1993 retval = 1;
1994
1995 if (!locked)
1996 vnode_unlock(vp);
1997 return (retval);
1998}
1999
2000
2001/*
2002 * ubc_unmap
2003 *
2004 * Reverse the effects of a ubc_map() call for a given vnode
2005 *
2006 * Parameters: vp vnode to unmap from ubc
2007 *
2008 * Returns: (void)
2009 *
2010 * Notes: This is an internal use function used by vnode_pager_unmap().
2011 * It will attempt to obtain a reference on the supplied vnode,
2012 * and if it can do so, and there is an associated ubc_info, and
2013 * the flags indicate that it was mapped via ubc_map(), then the
2014 * flag is cleared, the mapping removed, and the reference taken
2015 * by ubc_map() is released.
2016 *
2017 * IMPORTANT: This MUST only be called by the VM
2018 * to prevent race conditions.
2019 */
2020__private_extern__ void
2021ubc_unmap(struct vnode *vp)
2022{
2023 struct ubc_info *uip;
2024 int need_rele = 0;
2025 int need_wakeup = 0;
2026
2027 if (vnode_getwithref(vp))
2028 return;
2029
2030 if (UBCINFOEXISTS(vp)) {
2031 bool want_fsevent = false;
2032
2033 vnode_lock(vp);
2034 uip = vp->v_ubcinfo;
2035
2036 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2037 SET(uip->ui_flags, UI_MAPWAITING);
2038 (void) msleep(&uip->ui_flags, &vp->v_lock,
2039 PRIBIO, "ubc_unmap", NULL);
2040 }
2041 SET(uip->ui_flags, UI_MAPBUSY);
2042
2043 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
2044 if (ISSET(uip->ui_flags, UI_MAPPEDWRITE))
2045 want_fsevent = true;
2046
2047 need_rele = 1;
2048
2049 /*
2050 * We want to clear the mapped flags after we've called
2051 * VNOP_MNOMAP to avoid certain races and allow
2052 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2053 */
2054 }
2055 vnode_unlock(vp);
2056
2057 if (need_rele) {
2058 vfs_context_t ctx = vfs_context_current();
2059
2060 (void)VNOP_MNOMAP(vp, ctx);
2061
2062#if CONFIG_FSE
2063 /*
2064 * Why do we want an fsevent here? Normally the
2065 * content modified fsevent is posted when a file is
2066 * closed and only if it's written to via conventional
2067 * means. It's perfectly legal to close a file and
2068 * keep your mappings and we don't currently track
2069 * whether it was written to via a mapping.
2070 * Therefore, we need to post an fsevent here if the
2071 * file was mapped writable. This may result in false
2072 * events, i.e. we post a notification when nothing
2073 * has really changed.
2074 */
2075 if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
2076 add_fsevent(FSE_CONTENT_MODIFIED, ctx,
2077 FSE_ARG_VNODE, vp,
2078 FSE_ARG_DONE);
2079 }
2080#endif
2081
2082 vnode_rele(vp);
2083 }
2084
2085 vnode_lock_spin(vp);
2086
2087 if (need_rele)
2088 CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
2089
2090 CLR(uip->ui_flags, UI_MAPBUSY);
2091
2092 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2093 CLR(uip->ui_flags, UI_MAPWAITING);
2094 need_wakeup = 1;
2095 }
2096 vnode_unlock(vp);
2097
2098 if (need_wakeup)
2099 wakeup(&uip->ui_flags);
2100
2101 }
2102 /*
2103 * the drop of the vnode ref will cleanup
2104 */
2105 vnode_put(vp);
2106}
2107
2108
2109/*
2110 * ubc_page_op
2111 *
2112 * Manipulate individual page state for a vnode with an associated ubc_info
2113 * with an associated memory object control.
2114 *
2115 * Parameters: vp The vnode backing the page
2116 * f_offset A file offset interior to the page
2117 * ops The operations to perform, as a bitmap
2118 * (see below for more information)
2119 * phys_entryp The address of a ppnum_t; may be NULL
2120 * to ignore
2121 * flagsp A pointer to an int to contain flags;
2122 * may be NULL to ignore
2123 *
2124 * Returns: KERN_SUCCESS Success
2125 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2126 * object associated
2127 * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is
2128 * not physically contiguous
2129 * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is
2130 * physically contiguous
2131 * KERN_FAILURE If the page cannot be looked up
2132 *
2133 * Implicit Returns:
2134 * *phys_entryp (modified) If phys_entryp is non-NULL and
2135 * UPL_POP_PHYSICAL
2136 * *flagsp (modified) If flagsp is non-NULL and there was
2137 * !UPL_POP_PHYSICAL and a KERN_SUCCESS
2138 *
2139 * Notes: For object boundaries, it is considerably more efficient to
2140 * ensure that f_offset is in fact on a page boundary, as this
2141 * will avoid internal use of the hash table to identify the
2142 * page, and would therefore skip a number of early optimizations.
2143 * Since this is a page operation anyway, the caller should try
2144 * to pass only a page aligned offset because of this.
2145 *
2146 * *flagsp may be modified even if this function fails. If it is
2147 * modified, it will contain the condition of the page before the
2148 * requested operation was attempted; these will only include the
2149 * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2150 * UPL_POP_SET, or UPL_POP_CLR bits.
2151 *
2152 * The flags field may contain a specific operation, such as
2153 * UPL_POP_PHYSICAL or UPL_POP_DUMP:
2154 *
2155 * o UPL_POP_PHYSICAL Fail if not contiguous; if
2156 * *phys_entryp and successful, set
2157 * *phys_entryp
2158 * o UPL_POP_DUMP Dump the specified page
2159 *
2160 * Otherwise, it is treated as a bitmap of one or more page
2161 * operations to perform on the final memory object; allowable
2162 * bit values are:
2163 *
2164 * o UPL_POP_DIRTY The page is dirty
2165 * o UPL_POP_PAGEOUT The page is paged out
2166 * o UPL_POP_PRECIOUS The page is precious
2167 * o UPL_POP_ABSENT The page is absent
2168 * o UPL_POP_BUSY The page is busy
2169 *
2170 * If the page status is only being queried and not modified, then
2171 * not other bits should be specified. However, if it is being
2172 * modified, exactly ONE of the following bits should be set:
2173 *
2174 * o UPL_POP_SET Set the current bitmap bits
2175 * o UPL_POP_CLR Clear the current bitmap bits
2176 *
2177 * Thus to effect a combination of setting an clearing, it may be
2178 * necessary to call this function twice. If this is done, the
2179 * set should be used before the clear, since clearing may trigger
2180 * a wakeup on the destination page, and if the page is backed by
2181 * an encrypted swap file, setting will trigger the decryption
2182 * needed before the wakeup occurs.
2183 */
2184kern_return_t
2185ubc_page_op(
2186 struct vnode *vp,
2187 off_t f_offset,
2188 int ops,
2189 ppnum_t *phys_entryp,
2190 int *flagsp)
2191{
2192 memory_object_control_t control;
2193
2194 control = ubc_getobject(vp, UBC_FLAGS_NONE);
2195 if (control == MEMORY_OBJECT_CONTROL_NULL)
2196 return KERN_INVALID_ARGUMENT;
2197
2198 return (memory_object_page_op(control,
2199 (memory_object_offset_t)f_offset,
2200 ops,
2201 phys_entryp,
2202 flagsp));
2203}
2204
2205
2206/*
2207 * ubc_range_op
2208 *
2209 * Manipulate page state for a range of memory for a vnode with an associated
2210 * ubc_info with an associated memory object control, when page level state is
2211 * not required to be returned from the call (i.e. there are no phys_entryp or
2212 * flagsp parameters to this call, and it takes a range which may contain
2213 * multiple pages, rather than an offset interior to a single page).
2214 *
2215 * Parameters: vp The vnode backing the page
2216 * f_offset_beg A file offset interior to the start page
2217 * f_offset_end A file offset interior to the end page
2218 * ops The operations to perform, as a bitmap
2219 * (see below for more information)
2220 * range The address of an int; may be NULL to
2221 * ignore
2222 *
2223 * Returns: KERN_SUCCESS Success
2224 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2225 * object associated
2226 * KERN_INVALID_OBJECT If the object is physically contiguous
2227 *
2228 * Implicit Returns:
2229 * *range (modified) If range is non-NULL, its contents will
2230 * be modified to contain the number of
2231 * bytes successfully operated upon.
2232 *
2233 * Notes: IMPORTANT: This function cannot be used on a range that
2234 * consists of physically contiguous pages.
2235 *
2236 * For object boundaries, it is considerably more efficient to
2237 * ensure that f_offset_beg and f_offset_end are in fact on page
2238 * boundaries, as this will avoid internal use of the hash table
2239 * to identify the page, and would therefore skip a number of
2240 * early optimizations. Since this is an operation on a set of
2241 * pages anyway, the caller should try to pass only a page aligned
2242 * offsets because of this.
2243 *
2244 * *range will be modified only if this function succeeds.
2245 *
2246 * The flags field MUST contain a specific operation; allowable
2247 * values are:
2248 *
2249 * o UPL_ROP_ABSENT Returns the extent of the range
2250 * presented which is absent, starting
2251 * with the start address presented
2252 *
2253 * o UPL_ROP_PRESENT Returns the extent of the range
2254 * presented which is present (resident),
2255 * starting with the start address
2256 * presented
2257 * o UPL_ROP_DUMP Dump the pages which are found in the
2258 * target object for the target range.
2259 *
2260 * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2261 * multiple regions in the range, only the first matching region
2262 * is returned.
2263 */
2264kern_return_t
2265ubc_range_op(
2266 struct vnode *vp,
2267 off_t f_offset_beg,
2268 off_t f_offset_end,
2269 int ops,
2270 int *range)
2271{
2272 memory_object_control_t control;
2273
2274 control = ubc_getobject(vp, UBC_FLAGS_NONE);
2275 if (control == MEMORY_OBJECT_CONTROL_NULL)
2276 return KERN_INVALID_ARGUMENT;
2277
2278 return (memory_object_range_op(control,
2279 (memory_object_offset_t)f_offset_beg,
2280 (memory_object_offset_t)f_offset_end,
2281 ops,
2282 range));
2283}
2284
2285
2286/*
2287 * ubc_create_upl
2288 *
2289 * Given a vnode, cause the population of a portion of the vm_object; based on
2290 * the nature of the request, the pages returned may contain valid data, or
2291 * they may be uninitialized.
2292 *
2293 * Parameters: vp The vnode from which to create the upl
2294 * f_offset The start offset into the backing store
2295 * represented by the vnode
2296 * bufsize The size of the upl to create
2297 * uplp Pointer to the upl_t to receive the
2298 * created upl; MUST NOT be NULL
2299 * plp Pointer to receive the internal page
2300 * list for the created upl; MAY be NULL
2301 * to ignore
2302 *
2303 * Returns: KERN_SUCCESS The requested upl has been created
2304 * KERN_INVALID_ARGUMENT The bufsize argument is not an even
2305 * multiple of the page size
2306 * KERN_INVALID_ARGUMENT There is no ubc_info associated with
2307 * the vnode, or there is no memory object
2308 * control associated with the ubc_info
2309 * memory_object_upl_request:KERN_INVALID_VALUE
2310 * The supplied upl_flags argument is
2311 * invalid
2312 * Implicit Returns:
2313 * *uplp (modified)
2314 * *plp (modified) If non-NULL, the value of *plp will be
2315 * modified to point to the internal page
2316 * list; this modification may occur even
2317 * if this function is unsuccessful, in
2318 * which case the contents may be invalid
2319 *
2320 * Note: If successful, the returned *uplp MUST subsequently be freed
2321 * via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2322 * ubc_upl_abort(), or ubc_upl_abort_range().
2323 */
2324kern_return_t
2325ubc_create_upl_external(
2326 struct vnode *vp,
2327 off_t f_offset,
2328 int bufsize,
2329 upl_t *uplp,
2330 upl_page_info_t **plp,
2331 int uplflags)
2332{
2333 return (ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt()));
2334}
2335
2336kern_return_t
2337ubc_create_upl_kernel(
2338 struct vnode *vp,
2339 off_t f_offset,
2340 int bufsize,
2341 upl_t *uplp,
2342 upl_page_info_t **plp,
2343 int uplflags,
2344 vm_tag_t tag)
2345{
2346 memory_object_control_t control;
2347 kern_return_t kr;
2348
2349 if (plp != NULL)
2350 *plp = NULL;
2351 *uplp = NULL;
2352
2353 if (bufsize & 0xfff)
2354 return KERN_INVALID_ARGUMENT;
2355
2356 if (bufsize > MAX_UPL_SIZE_BYTES)
2357 return KERN_INVALID_ARGUMENT;
2358
2359 if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
2360
2361 if (uplflags & UPL_UBC_MSYNC) {
2362 uplflags &= UPL_RET_ONLY_DIRTY;
2363
2364 uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
2365 UPL_SET_INTERNAL | UPL_SET_LITE;
2366
2367 } else if (uplflags & UPL_UBC_PAGEOUT) {
2368 uplflags &= UPL_RET_ONLY_DIRTY;
2369
2370 if (uplflags & UPL_RET_ONLY_DIRTY)
2371 uplflags |= UPL_NOBLOCK;
2372
2373 uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
2374 UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
2375 } else {
2376 uplflags |= UPL_RET_ONLY_ABSENT |
2377 UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
2378 UPL_SET_INTERNAL | UPL_SET_LITE;
2379
2380 /*
2381 * if the requested size == PAGE_SIZE, we don't want to set
2382 * the UPL_NOBLOCK since we may be trying to recover from a
2383 * previous partial pagein I/O that occurred because we were low
2384 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2385 * since we're only asking for a single page, we can block w/o fear
2386 * of tying up pages while waiting for more to become available
2387 */
2388 if (bufsize > PAGE_SIZE)
2389 uplflags |= UPL_NOBLOCK;
2390 }
2391 } else {
2392 uplflags &= ~UPL_FOR_PAGEOUT;
2393
2394 if (uplflags & UPL_WILL_BE_DUMPED) {
2395 uplflags &= ~UPL_WILL_BE_DUMPED;
2396 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
2397 } else
2398 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
2399 }
2400 control = ubc_getobject(vp, UBC_FLAGS_NONE);
2401 if (control == MEMORY_OBJECT_CONTROL_NULL)
2402 return KERN_INVALID_ARGUMENT;
2403
2404 kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
2405 if (kr == KERN_SUCCESS && plp != NULL)
2406 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
2407 return kr;
2408}
2409
2410
2411/*
2412 * ubc_upl_maxbufsize
2413 *
2414 * Return the maximum bufsize ubc_create_upl( ) will take.
2415 *
2416 * Parameters: none
2417 *
2418 * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take.
2419 */
2420upl_size_t
2421ubc_upl_maxbufsize(
2422 void)
2423{
2424 return(MAX_UPL_SIZE_BYTES);
2425}
2426
2427/*
2428 * ubc_upl_map
2429 *
2430 * Map the page list assocated with the supplied upl into the kernel virtual
2431 * address space at the virtual address indicated by the dst_addr argument;
2432 * the entire upl is mapped
2433 *
2434 * Parameters: upl The upl to map
2435 * dst_addr The address at which to map the upl
2436 *
2437 * Returns: KERN_SUCCESS The upl has been mapped
2438 * KERN_INVALID_ARGUMENT The upl is UPL_NULL
2439 * KERN_FAILURE The upl is already mapped
2440 * vm_map_enter:KERN_INVALID_ARGUMENT
2441 * A failure code from vm_map_enter() due
2442 * to an invalid argument
2443 */
2444kern_return_t
2445ubc_upl_map(
2446 upl_t upl,
2447 vm_offset_t *dst_addr)
2448{
2449 return (vm_upl_map(kernel_map, upl, dst_addr));
2450}
2451
2452
2453/*
2454 * ubc_upl_unmap
2455 *
2456 * Unmap the page list assocated with the supplied upl from the kernel virtual
2457 * address space; the entire upl is unmapped.
2458 *
2459 * Parameters: upl The upl to unmap
2460 *
2461 * Returns: KERN_SUCCESS The upl has been unmapped
2462 * KERN_FAILURE The upl is not currently mapped
2463 * KERN_INVALID_ARGUMENT If the upl is UPL_NULL
2464 */
2465kern_return_t
2466ubc_upl_unmap(
2467 upl_t upl)
2468{
2469 return(vm_upl_unmap(kernel_map, upl));
2470}
2471
2472
2473/*
2474 * ubc_upl_commit
2475 *
2476 * Commit the contents of the upl to the backing store
2477 *
2478 * Parameters: upl The upl to commit
2479 *
2480 * Returns: KERN_SUCCESS The upl has been committed
2481 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2482 * KERN_FAILURE The supplied upl does not represent
2483 * device memory, and the offset plus the
2484 * size would exceed the actual size of
2485 * the upl
2486 *
2487 * Notes: In practice, the only return value for this function should be
2488 * KERN_SUCCESS, unless there has been data structure corruption;
2489 * since the upl is deallocated regardless of success or failure,
2490 * there's really nothing to do about this other than panic.
2491 *
2492 * IMPORTANT: Use of this function should not be mixed with use of
2493 * ubc_upl_commit_range(), due to the unconditional deallocation
2494 * by this function.
2495 */
2496kern_return_t
2497ubc_upl_commit(
2498 upl_t upl)
2499{
2500 upl_page_info_t *pl;
2501 kern_return_t kr;
2502
2503 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2504 kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
2505 upl_deallocate(upl);
2506 return kr;
2507}
2508
2509
2510/*
2511 * ubc_upl_commit
2512 *
2513 * Commit the contents of the specified range of the upl to the backing store
2514 *
2515 * Parameters: upl The upl to commit
2516 * offset The offset into the upl
2517 * size The size of the region to be committed,
2518 * starting at the specified offset
2519 * flags commit type (see below)
2520 *
2521 * Returns: KERN_SUCCESS The range has been committed
2522 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2523 * KERN_FAILURE The supplied upl does not represent
2524 * device memory, and the offset plus the
2525 * size would exceed the actual size of
2526 * the upl
2527 *
2528 * Notes: IMPORTANT: If the commit is successful, and the object is now
2529 * empty, the upl will be deallocated. Since the caller cannot
2530 * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2531 * should generally only be used when the offset is 0 and the size
2532 * is equal to the upl size.
2533 *
2534 * The flags argument is a bitmap of flags on the rage of pages in
2535 * the upl to be committed; allowable flags are:
2536 *
2537 * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is
2538 * both empty and has been
2539 * successfully committed
2540 * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty
2541 * bit; will prevent a
2542 * later pageout
2543 * o UPL_COMMIT_SET_DIRTY Set each pages dirty
2544 * bit; will cause a later
2545 * pageout
2546 * o UPL_COMMIT_INACTIVATE Clear each pages
2547 * reference bit; the page
2548 * will not be accessed
2549 * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages
2550 * become busy when an
2551 * IOMemoryDescriptor is
2552 * mapped or redirected,
2553 * and we have to wait for
2554 * an IOKit driver
2555 *
2556 * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2557 * not be specified by the caller.
2558 *
2559 * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2560 * mutually exclusive, and should not be combined.
2561 */
2562kern_return_t
2563ubc_upl_commit_range(
2564 upl_t upl,
2565 upl_offset_t offset,
2566 upl_size_t size,
2567 int flags)
2568{
2569 upl_page_info_t *pl;
2570 boolean_t empty;
2571 kern_return_t kr;
2572
2573 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
2574 flags |= UPL_COMMIT_NOTIFY_EMPTY;
2575
2576 if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
2577 return KERN_INVALID_ARGUMENT;
2578 }
2579
2580 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2581
2582 kr = upl_commit_range(upl, offset, size, flags,
2583 pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
2584
2585 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
2586 upl_deallocate(upl);
2587
2588 return kr;
2589}
2590
2591
2592/*
2593 * ubc_upl_abort_range
2594 *
2595 * Abort the contents of the specified range of the specified upl
2596 *
2597 * Parameters: upl The upl to abort
2598 * offset The offset into the upl
2599 * size The size of the region to be aborted,
2600 * starting at the specified offset
2601 * abort_flags abort type (see below)
2602 *
2603 * Returns: KERN_SUCCESS The range has been aborted
2604 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2605 * KERN_FAILURE The supplied upl does not represent
2606 * device memory, and the offset plus the
2607 * size would exceed the actual size of
2608 * the upl
2609 *
2610 * Notes: IMPORTANT: If the abort is successful, and the object is now
2611 * empty, the upl will be deallocated. Since the caller cannot
2612 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2613 * should generally only be used when the offset is 0 and the size
2614 * is equal to the upl size.
2615 *
2616 * The abort_flags argument is a bitmap of flags on the range of
2617 * pages in the upl to be aborted; allowable flags are:
2618 *
2619 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2620 * empty and has been successfully
2621 * aborted
2622 * o UPL_ABORT_RESTART The operation must be restarted
2623 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2624 * o UPL_ABORT_ERROR An I/O error occurred
2625 * o UPL_ABORT_DUMP_PAGES Just free the pages
2626 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2627 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2628 *
2629 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2630 * not be specified by the caller. It is intended to fulfill the
2631 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2632 * ubc_upl_commit_range(), but is never referenced internally.
2633 *
2634 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2635 * referenced; do not use it.
2636 */
2637kern_return_t
2638ubc_upl_abort_range(
2639 upl_t upl,
2640 upl_offset_t offset,
2641 upl_size_t size,
2642 int abort_flags)
2643{
2644 kern_return_t kr;
2645 boolean_t empty = FALSE;
2646
2647 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
2648 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
2649
2650 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
2651
2652 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
2653 upl_deallocate(upl);
2654
2655 return kr;
2656}
2657
2658
2659/*
2660 * ubc_upl_abort
2661 *
2662 * Abort the contents of the specified upl
2663 *
2664 * Parameters: upl The upl to abort
2665 * abort_type abort type (see below)
2666 *
2667 * Returns: KERN_SUCCESS The range has been aborted
2668 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2669 * KERN_FAILURE The supplied upl does not represent
2670 * device memory, and the offset plus the
2671 * size would exceed the actual size of
2672 * the upl
2673 *
2674 * Notes: IMPORTANT: If the abort is successful, and the object is now
2675 * empty, the upl will be deallocated. Since the caller cannot
2676 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2677 * should generally only be used when the offset is 0 and the size
2678 * is equal to the upl size.
2679 *
2680 * The abort_type is a bitmap of flags on the range of
2681 * pages in the upl to be aborted; allowable flags are:
2682 *
2683 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2684 * empty and has been successfully
2685 * aborted
2686 * o UPL_ABORT_RESTART The operation must be restarted
2687 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2688 * o UPL_ABORT_ERROR An I/O error occurred
2689 * o UPL_ABORT_DUMP_PAGES Just free the pages
2690 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2691 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2692 *
2693 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2694 * not be specified by the caller. It is intended to fulfill the
2695 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2696 * ubc_upl_commit_range(), but is never referenced internally.
2697 *
2698 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2699 * referenced; do not use it.
2700 */
2701kern_return_t
2702ubc_upl_abort(
2703 upl_t upl,
2704 int abort_type)
2705{
2706 kern_return_t kr;
2707
2708 kr = upl_abort(upl, abort_type);
2709 upl_deallocate(upl);
2710 return kr;
2711}
2712
2713
2714/*
2715 * ubc_upl_pageinfo
2716 *
2717 * Retrieve the internal page list for the specified upl
2718 *
2719 * Parameters: upl The upl to obtain the page list from
2720 *
2721 * Returns: !NULL The (upl_page_info_t *) for the page
2722 * list internal to the upl
2723 * NULL Error/no page list associated
2724 *
2725 * Notes: IMPORTANT: The function is only valid on internal objects
2726 * where the list request was made with the UPL_INTERNAL flag.
2727 *
2728 * This function is a utility helper function, since some callers
2729 * may not have direct access to the header defining the macro,
2730 * due to abstraction layering constraints.
2731 */
2732upl_page_info_t *
2733ubc_upl_pageinfo(
2734 upl_t upl)
2735{
2736 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
2737}
2738
2739
2740int
2741UBCINFOEXISTS(const struct vnode * vp)
2742{
2743 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
2744}
2745
2746
2747void
2748ubc_upl_range_needed(
2749 upl_t upl,
2750 int index,
2751 int count)
2752{
2753 upl_range_needed(upl, index, count);
2754}
2755
2756boolean_t ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
2757{
2758 if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED))
2759 return FALSE;
2760 if (writable)
2761 *writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
2762 return TRUE;
2763}
2764
2765boolean_t ubc_is_mapped_writable(const struct vnode *vp)
2766{
2767 boolean_t writable;
2768 return ubc_is_mapped(vp, &writable) && writable;
2769}
2770
2771
2772/*
2773 * CODE SIGNING
2774 */
2775static volatile SInt32 cs_blob_size = 0;
2776static volatile SInt32 cs_blob_count = 0;
2777static SInt32 cs_blob_size_peak = 0;
2778static UInt32 cs_blob_size_max = 0;
2779static SInt32 cs_blob_count_peak = 0;
2780
2781SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs");
2782SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs");
2783SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
2784SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, 0, "Peak size of code signature blobs");
2785SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, 0, "Size of biggest code signature blob");
2786
2787/*
2788 * Function: csblob_parse_teamid
2789 *
2790 * Description: This function returns a pointer to the team id
2791 stored within the codedirectory of the csblob.
2792 If the codedirectory predates team-ids, it returns
2793 NULL.
2794 This does not copy the name but returns a pointer to
2795 it within the CD. Subsequently, the CD must be
2796 available when this is used.
2797*/
2798
2799static const char *
2800csblob_parse_teamid(struct cs_blob *csblob)
2801{
2802 const CS_CodeDirectory *cd;
2803
2804 cd = csblob->csb_cd;
2805
2806 if (ntohl(cd->version) < CS_SUPPORTSTEAMID)
2807 return NULL;
2808
2809 if (cd->teamOffset == 0)
2810 return NULL;
2811
2812 const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
2813 if (cs_debug > 1)
2814 printf("found team-id %s in cdblob\n", name);
2815
2816 return name;
2817}
2818
2819
2820kern_return_t
2821ubc_cs_blob_allocate(
2822 vm_offset_t *blob_addr_p,
2823 vm_size_t *blob_size_p)
2824{
2825 kern_return_t kr = KERN_FAILURE;
2826
2827 {
2828 *blob_addr_p = (vm_offset_t) kalloc_tag(*blob_size_p, VM_KERN_MEMORY_SECURITY);
2829
2830 if (*blob_addr_p == 0) {
2831 kr = KERN_NO_SPACE;
2832 } else {
2833 kr = KERN_SUCCESS;
2834 }
2835 }
2836
2837 return kr;
2838}
2839
2840void
2841ubc_cs_blob_deallocate(
2842 vm_offset_t blob_addr,
2843 vm_size_t blob_size)
2844{
2845#if PMAP_CS
2846 if (blob_size > pmap_cs_blob_limit) {
2847 kmem_free(kernel_map, blob_addr, blob_size);
2848 } else
2849#endif
2850 {
2851 kfree((void *) blob_addr, blob_size);
2852 }
2853}
2854
2855/*
2856 * Some codesigned files use a lowest common denominator page size of
2857 * 4KiB, but can be used on systems that have a runtime page size of
2858 * 16KiB. Since faults will only occur on 16KiB ranges in
2859 * cs_validate_range(), we can convert the original Code Directory to
2860 * a multi-level scheme where groups of 4 hashes are combined to form
2861 * a new hash, which represents 16KiB in the on-disk file. This can
2862 * reduce the wired memory requirement for the Code Directory by
2863 * 75%. Care must be taken for binaries that use the "fourk" VM pager
2864 * for unaligned access, which may still attempt to validate on
2865 * non-16KiB multiples for compatibility with 3rd party binaries.
2866 */
2867static boolean_t
2868ubc_cs_supports_multilevel_hash(struct cs_blob *blob)
2869{
2870 const CS_CodeDirectory *cd;
2871
2872
2873 /*
2874 * Only applies to binaries that ship as part of the OS,
2875 * primarily the shared cache.
2876 */
2877 if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
2878 return FALSE;
2879 }
2880
2881 /*
2882 * If the runtime page size matches the code signing page
2883 * size, there is no work to do.
2884 */
2885 if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
2886 return FALSE;
2887 }
2888
2889 cd = blob->csb_cd;
2890
2891 /*
2892 * There must be a valid integral multiple of hashes
2893 */
2894 if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
2895 return FALSE;
2896 }
2897
2898 /*
2899 * Scatter lists must also have ranges that have an integral number of hashes
2900 */
2901 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
2902
2903 const SC_Scatter *scatter = (const SC_Scatter*)
2904 ((const char*)cd + ntohl(cd->scatterOffset));
2905 /* iterate all scatter structs to make sure they are all aligned */
2906 do {
2907 uint32_t sbase = ntohl(scatter->base);
2908 uint32_t scount = ntohl(scatter->count);
2909
2910 /* last scatter? */
2911 if (scount == 0) {
2912 break;
2913 }
2914
2915 if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
2916 return FALSE;
2917 }
2918
2919 if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
2920 return FALSE;
2921 }
2922
2923 scatter++;
2924 } while(1);
2925 }
2926
2927 /* Covered range must be a multiple of the new page size */
2928 if (ntohl(cd->codeLimit) & PAGE_MASK) {
2929 return FALSE;
2930 }
2931
2932 /* All checks pass */
2933 return TRUE;
2934}
2935
2936/*
2937 * Given a cs_blob with an already chosen best code directory, this
2938 * function allocates memory and copies into it only the blobs that
2939 * will be needed by the kernel, namely the single chosen code
2940 * directory (and not any of its alternatives) and the entitlement
2941 * blob.
2942 *
2943 * This saves significant memory with agile signatures, and additional
2944 * memory for 3rd Party Code because we also omit the CMS blob.
2945 *
2946 * To support multilevel and other potential code directory rewriting,
2947 * the size of a new code directory can be specified. Since that code
2948 * directory will replace the existing code directory,
2949 * ubc_cs_reconstitute_code_signature does not copy the original code
2950 * directory when a size is given, and the caller must fill it in.
2951 */
2952static int
2953ubc_cs_reconstitute_code_signature(struct cs_blob const *blob, vm_size_t optional_new_cd_size,
2954 vm_address_t *new_blob_addr_p, vm_size_t *new_blob_size_p,
2955 CS_CodeDirectory **new_cd_p, CS_GenericBlob const **new_entitlements_p)
2956{
2957 const CS_CodeDirectory *old_cd, *cd;
2958 CS_CodeDirectory *new_cd;
2959 const CS_GenericBlob *entitlements;
2960 vm_offset_t new_blob_addr;
2961 vm_size_t new_blob_size;
2962 vm_size_t new_cdsize;
2963 kern_return_t kr;
2964 int error;
2965
2966 old_cd = blob->csb_cd;
2967
2968 new_cdsize = optional_new_cd_size != 0 ? optional_new_cd_size : htonl(old_cd->length);
2969
2970 new_blob_size = sizeof(CS_SuperBlob);
2971 new_blob_size += sizeof(CS_BlobIndex);
2972 new_blob_size += new_cdsize;
2973
2974 if (blob->csb_entitlements_blob) {
2975 /* We need to add a slot for the entitlements */
2976 new_blob_size += sizeof(CS_BlobIndex);
2977 new_blob_size += ntohl(blob->csb_entitlements_blob->length);
2978 }
2979
2980 kr = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_size);
2981 if (kr != KERN_SUCCESS) {
2982 if (cs_debug > 1) {
2983 printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n",
2984 kr);
2985 }
2986 return ENOMEM;
2987 }
2988
2989 CS_SuperBlob *new_superblob;
2990
2991 new_superblob = (CS_SuperBlob *)new_blob_addr;
2992 new_superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
2993 new_superblob->length = htonl((uint32_t)new_blob_size);
2994 if (blob->csb_entitlements_blob) {
2995 vm_size_t ent_offset, cd_offset;
2996
2997 cd_offset = sizeof(CS_SuperBlob) + 2 * sizeof(CS_BlobIndex);
2998 ent_offset = cd_offset + new_cdsize;
2999
3000 new_superblob->count = htonl(2);
3001 new_superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
3002 new_superblob->index[0].offset = htonl((uint32_t)cd_offset);
3003 new_superblob->index[1].type = htonl(CSSLOT_ENTITLEMENTS);
3004 new_superblob->index[1].offset = htonl((uint32_t)ent_offset);
3005
3006 memcpy((void *)(new_blob_addr + ent_offset), blob->csb_entitlements_blob, ntohl(blob->csb_entitlements_blob->length));
3007
3008 new_cd = (CS_CodeDirectory *)(new_blob_addr + cd_offset);
3009 } else {
3010 // Blob is the code directory, directly.
3011 new_cd = (CS_CodeDirectory *)new_blob_addr;
3012 }
3013
3014 if (optional_new_cd_size == 0) {
3015 // Copy code directory, and revalidate.
3016 memcpy(new_cd, old_cd, new_cdsize);
3017
3018 vm_size_t length = new_blob_size;
3019
3020 error = cs_validate_csblob((const uint8_t *)new_blob_addr, length, &cd, &entitlements);
3021
3022 if (error) {
3023 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3024 error);
3025
3026 ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3027 return error;
3028 }
3029 *new_entitlements_p = entitlements;
3030 } else {
3031 // Caller will fill out and validate code directory.
3032 memset(new_cd, 0, new_cdsize);
3033 *new_entitlements_p = NULL;
3034 }
3035
3036 *new_blob_addr_p = new_blob_addr;
3037 *new_blob_size_p = new_blob_size;
3038 *new_cd_p = new_cd;
3039
3040 return 0;
3041}
3042
3043static int
3044ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
3045{
3046 const CS_CodeDirectory *old_cd, *cd;
3047 CS_CodeDirectory *new_cd;
3048 const CS_GenericBlob *entitlements;
3049 vm_offset_t new_blob_addr;
3050 vm_size_t new_blob_size;
3051 vm_size_t new_cdsize;
3052 int error;
3053
3054 uint32_t hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
3055
3056 if (cs_debug > 1) {
3057 printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
3058 (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
3059 }
3060
3061 old_cd = blob->csb_cd;
3062
3063 /* Up to the hashes, we can copy all data */
3064 new_cdsize = ntohl(old_cd->hashOffset);
3065 new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
3066
3067 error = ubc_cs_reconstitute_code_signature(blob, new_cdsize,
3068 &new_blob_addr, &new_blob_size, &new_cd,
3069 &entitlements);
3070 if (error != 0) {
3071 printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
3072 return error;
3073 }
3074
3075 memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
3076
3077 /* Update fields in the Code Directory structure */
3078 new_cd->length = htonl((uint32_t)new_cdsize);
3079
3080 uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
3081 nCodeSlots >>= hashes_per_new_hash_shift;
3082 new_cd->nCodeSlots = htonl(nCodeSlots);
3083
3084 new_cd->pageSize = PAGE_SHIFT; /* Not byte-swapped */
3085
3086 if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
3087 SC_Scatter *scatter = (SC_Scatter*)
3088 ((char *)new_cd + ntohl(new_cd->scatterOffset));
3089 /* iterate all scatter structs to scale their counts */
3090 do {
3091 uint32_t scount = ntohl(scatter->count);
3092 uint32_t sbase = ntohl(scatter->base);
3093
3094 /* last scatter? */
3095 if (scount == 0) {
3096 break;
3097 }
3098
3099 scount >>= hashes_per_new_hash_shift;
3100 scatter->count = htonl(scount);
3101
3102 sbase >>= hashes_per_new_hash_shift;
3103 scatter->base = htonl(sbase);
3104
3105 scatter++;
3106 } while(1);
3107 }
3108
3109 /* For each group of hashes, hash them together */
3110 const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
3111 unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
3112
3113 uint32_t hash_index;
3114 for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
3115 union cs_hash_union mdctx;
3116
3117 uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
3118 const unsigned char *src = src_base + hash_index * source_hash_len;
3119 unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
3120
3121 blob->csb_hashtype->cs_init(&mdctx);
3122 blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
3123 blob->csb_hashtype->cs_final(dst, &mdctx);
3124 }
3125
3126 error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements);
3127 if (error != 0) {
3128
3129 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3130 error);
3131
3132 ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3133 return error;
3134 }
3135
3136 /* New Code Directory is ready for use, swap it out in the blob structure */
3137 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3138
3139 blob->csb_mem_size = new_blob_size;
3140 blob->csb_mem_kaddr = new_blob_addr;
3141 blob->csb_cd = cd;
3142 blob->csb_entitlements_blob = entitlements;
3143
3144 /* The blob has some cached attributes of the Code Directory, so update those */
3145
3146 blob->csb_hash_firstlevel_pagesize = blob->csb_hash_pagesize; /* Save the original page size */
3147
3148 blob->csb_hash_pagesize = PAGE_SIZE;
3149 blob->csb_hash_pagemask = PAGE_MASK;
3150 blob->csb_hash_pageshift = PAGE_SHIFT;
3151 blob->csb_end_offset = ntohl(cd->codeLimit);
3152 if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3153 const SC_Scatter *scatter = (const SC_Scatter*)
3154 ((const char*)cd + ntohl(cd->scatterOffset));
3155 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
3156 } else {
3157 blob->csb_start_offset = 0;
3158 }
3159
3160 return 0;
3161}
3162
3163/*
3164 * Validate the code signature blob, create a struct cs_blob wrapper
3165 * and return it together with a pointer to the chosen code directory
3166 * and entitlements blob.
3167 *
3168 * Note that this takes ownership of the memory as addr, mainly because
3169 * this function can actually replace the passed in blob with another
3170 * one, e.g. when performing multilevel hashing optimization.
3171 */
3172int
3173cs_blob_create_validated(
3174 vm_address_t * const addr,
3175 vm_size_t size,
3176 struct cs_blob ** const ret_blob,
3177 CS_CodeDirectory const ** const ret_cd)
3178{
3179 struct cs_blob *blob;
3180 int error = EINVAL;
3181 const CS_CodeDirectory *cd;
3182 const CS_GenericBlob *entitlements;
3183 union cs_hash_union mdctx;
3184 size_t length;
3185
3186 if (ret_blob)
3187 *ret_blob = NULL;
3188
3189 blob = (struct cs_blob *) kalloc(sizeof (struct cs_blob));
3190 if (blob == NULL) {
3191 return ENOMEM;
3192 }
3193
3194 /* fill in the new blob */
3195 blob->csb_mem_size = size;
3196 blob->csb_mem_offset = 0;
3197 blob->csb_mem_kaddr = *addr;
3198 blob->csb_flags = 0;
3199 blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
3200 blob->csb_platform_binary = 0;
3201 blob->csb_platform_path = 0;
3202 blob->csb_teamid = NULL;
3203 blob->csb_entitlements_blob = NULL;
3204 blob->csb_entitlements = NULL;
3205 blob->csb_reconstituted = false;
3206
3207 /* Transfer ownership. Even on error, this function will deallocate */
3208 *addr = 0;
3209
3210 /*
3211 * Validate the blob's contents
3212 */
3213 length = (size_t) size;
3214 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
3215 length, &cd, &entitlements);
3216 if (error) {
3217
3218 if (cs_debug)
3219 printf("CODESIGNING: csblob invalid: %d\n", error);
3220 /*
3221 * The vnode checker can't make the rest of this function
3222 * succeed if csblob validation failed, so bail */
3223 goto out;
3224
3225 } else {
3226 const unsigned char *md_base;
3227 uint8_t hash[CS_HASH_MAX_SIZE];
3228 int md_size;
3229
3230 blob->csb_cd = cd;
3231 blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
3232 blob->csb_hashtype = cs_find_md(cd->hashType);
3233 if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash))
3234 panic("validated CodeDirectory but unsupported type");
3235
3236 blob->csb_hash_pageshift = cd->pageSize;
3237 blob->csb_hash_pagesize = (1U << cd->pageSize);
3238 blob->csb_hash_pagemask = blob->csb_hash_pagesize - 1;
3239 blob->csb_hash_firstlevel_pagesize = 0;
3240 blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
3241 blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + blob->csb_hash_pagemask) & ~((vm_offset_t)blob->csb_hash_pagemask));
3242 if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3243 const SC_Scatter *scatter = (const SC_Scatter*)
3244 ((const char*)cd + ntohl(cd->scatterOffset));
3245 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * blob->csb_hash_pagesize;
3246 } else {
3247 blob->csb_start_offset = 0;
3248 }
3249 /* compute the blob's cdhash */
3250 md_base = (const unsigned char *) cd;
3251 md_size = ntohl(cd->length);
3252
3253 blob->csb_hashtype->cs_init(&mdctx);
3254 blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
3255 blob->csb_hashtype->cs_final(hash, &mdctx);
3256
3257 memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
3258 }
3259
3260 error = 0;
3261
3262out:
3263 if (error != 0) {
3264 cs_blob_free(blob);
3265 blob = NULL;
3266 cd = NULL;
3267 }
3268
3269 if (ret_blob != NULL) {
3270 *ret_blob = blob;
3271 }
3272 if (ret_cd != NULL) {
3273 *ret_cd = cd;
3274 }
3275
3276 return error;
3277}
3278
3279/*
3280 * Free a cs_blob previously created by cs_blob_create_validated.
3281 */
3282void
3283cs_blob_free(
3284 struct cs_blob * const blob)
3285{
3286 if (blob != NULL) {
3287 if (blob->csb_mem_kaddr) {
3288 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3289 blob->csb_mem_kaddr = 0;
3290 }
3291 if (blob->csb_entitlements != NULL) {
3292 osobject_release(blob->csb_entitlements);
3293 blob->csb_entitlements = NULL;
3294 }
3295 kfree(blob, sizeof (*blob));
3296 }
3297}
3298
3299int
3300ubc_cs_blob_add(
3301 struct vnode *vp,
3302 cpu_type_t cputype,
3303 off_t base_offset,
3304 vm_address_t *addr,
3305 vm_size_t size,
3306 struct image_params *imgp,
3307 __unused int flags,
3308 struct cs_blob **ret_blob)
3309{
3310 kern_return_t kr;
3311 struct ubc_info *uip;
3312 struct cs_blob *blob, *oblob;
3313 int error;
3314 CS_CodeDirectory const *cd;
3315 off_t blob_start_offset, blob_end_offset;
3316 boolean_t record_mtime;
3317
3318 record_mtime = FALSE;
3319 if (ret_blob)
3320 *ret_blob = NULL;
3321
3322 /* Create the struct cs_blob wrapper that will be attached to the vnode.
3323 * Validates the passed in blob in the process. */
3324 error = cs_blob_create_validated(addr, size, &blob, &cd);
3325
3326 if (error != 0) {
3327 printf("malform code signature blob: %d\n", error);
3328 return error;
3329 }
3330
3331 blob->csb_cpu_type = cputype;
3332 blob->csb_base_offset = base_offset;
3333
3334 /*
3335 * Let policy module check whether the blob's signature is accepted.
3336 */
3337#if CONFIG_MACF
3338 unsigned int cs_flags = blob->csb_flags;
3339 unsigned int signer_type = blob->csb_signer_type;
3340 error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags);
3341 blob->csb_flags = cs_flags;
3342 blob->csb_signer_type = signer_type;
3343
3344 if (error) {
3345 if (cs_debug)
3346 printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
3347 goto out;
3348 }
3349 if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(blob->csb_flags & CS_PLATFORM_BINARY)) {
3350 if (cs_debug)
3351 printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid);
3352 error = EPERM;
3353 goto out;
3354 }
3355#endif
3356
3357#if CONFIG_ENFORCE_SIGNED_CODE
3358 /*
3359 * Reconstitute code signature
3360 */
3361 {
3362 vm_address_t new_mem_kaddr = 0;
3363 vm_size_t new_mem_size = 0;
3364
3365 CS_CodeDirectory *new_cd = NULL;
3366 CS_GenericBlob const *new_entitlements = NULL;
3367
3368 error = ubc_cs_reconstitute_code_signature(blob, 0,
3369 &new_mem_kaddr, &new_mem_size,
3370 &new_cd, &new_entitlements);
3371
3372 if (error != 0) {
3373 printf("failed code signature reconstitution: %d\n", error);
3374 goto out;
3375 }
3376
3377 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3378
3379 blob->csb_mem_kaddr = new_mem_kaddr;
3380 blob->csb_mem_size = new_mem_size;
3381 blob->csb_cd = new_cd;
3382 blob->csb_entitlements_blob = new_entitlements;
3383 blob->csb_reconstituted = true;
3384 }
3385
3386#endif
3387
3388
3389 if (blob->csb_flags & CS_PLATFORM_BINARY) {
3390 if (cs_debug > 1)
3391 printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid);
3392 blob->csb_platform_binary = 1;
3393 blob->csb_platform_path = !!(blob->csb_flags & CS_PLATFORM_PATH);
3394 } else {
3395 blob->csb_platform_binary = 0;
3396 blob->csb_platform_path = 0;
3397 blob->csb_teamid = csblob_parse_teamid(blob);
3398 if (cs_debug > 1) {
3399 if (blob->csb_teamid)
3400 printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid, blob->csb_teamid);
3401 else
3402 printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid);
3403 }
3404 }
3405
3406 /*
3407 * Validate the blob's coverage
3408 */
3409 blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
3410 blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
3411
3412 if (blob_start_offset >= blob_end_offset ||
3413 blob_start_offset < 0 ||
3414 blob_end_offset <= 0) {
3415 /* reject empty or backwards blob */
3416 error = EINVAL;
3417 goto out;
3418 }
3419
3420 if (ubc_cs_supports_multilevel_hash(blob)) {
3421 error = ubc_cs_convert_to_multilevel_hash(blob);
3422 if (error != 0) {
3423 printf("failed multilevel hash conversion: %d\n", error);
3424 goto out;
3425 }
3426 blob->csb_reconstituted = true;
3427 }
3428
3429 vnode_lock(vp);
3430 if (! UBCINFOEXISTS(vp)) {
3431 vnode_unlock(vp);
3432 error = ENOENT;
3433 goto out;
3434 }
3435 uip = vp->v_ubcinfo;
3436
3437 /* check if this new blob overlaps with an existing blob */
3438 for (oblob = uip->cs_blobs;
3439 oblob != NULL;
3440 oblob = oblob->csb_next) {
3441 off_t oblob_start_offset, oblob_end_offset;
3442
3443 if (blob->csb_signer_type != oblob->csb_signer_type) { // signer type needs to be the same for slices
3444 vnode_unlock(vp);
3445 error = EALREADY;
3446 goto out;
3447 } else if (blob->csb_platform_binary) { //platform binary needs to be the same for app slices
3448 if (!oblob->csb_platform_binary) {
3449 vnode_unlock(vp);
3450 error = EALREADY;
3451 goto out;
3452 }
3453 } else if (blob->csb_teamid) { //teamid binary needs to be the same for app slices
3454 if (oblob->csb_platform_binary ||
3455 oblob->csb_teamid == NULL ||
3456 strcmp(oblob->csb_teamid, blob->csb_teamid) != 0) {
3457 vnode_unlock(vp);
3458 error = EALREADY;
3459 goto out;
3460 }
3461 } else { // non teamid binary needs to be the same for app slices
3462 if (oblob->csb_platform_binary ||
3463 oblob->csb_teamid != NULL) {
3464 vnode_unlock(vp);
3465 error = EALREADY;
3466 goto out;
3467 }
3468 }
3469
3470 oblob_start_offset = (oblob->csb_base_offset +
3471 oblob->csb_start_offset);
3472 oblob_end_offset = (oblob->csb_base_offset +
3473 oblob->csb_end_offset);
3474 if (blob_start_offset >= oblob_end_offset ||
3475 blob_end_offset <= oblob_start_offset) {
3476 /* no conflict with this existing blob */
3477 } else {
3478 /* conflict ! */
3479 if (blob_start_offset == oblob_start_offset &&
3480 blob_end_offset == oblob_end_offset &&
3481 blob->csb_mem_size == oblob->csb_mem_size &&
3482 blob->csb_flags == oblob->csb_flags &&
3483 (blob->csb_cpu_type == CPU_TYPE_ANY ||
3484 oblob->csb_cpu_type == CPU_TYPE_ANY ||
3485 blob->csb_cpu_type == oblob->csb_cpu_type) &&
3486 !bcmp(blob->csb_cdhash,
3487 oblob->csb_cdhash,
3488 CS_CDHASH_LEN)) {
3489 /*
3490 * We already have this blob:
3491 * we'll return success but
3492 * throw away the new blob.
3493 */
3494 if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
3495 /*
3496 * The old blob matches this one
3497 * but doesn't have any CPU type.
3498 * Update it with whatever the caller
3499 * provided this time.
3500 */
3501 oblob->csb_cpu_type = cputype;
3502 }
3503
3504 /* The signature is still accepted, so update the
3505 * generation count. */
3506 uip->cs_add_gen = cs_blob_generation_count;
3507
3508 vnode_unlock(vp);
3509 if (ret_blob)
3510 *ret_blob = oblob;
3511 error = EAGAIN;
3512 goto out;
3513 } else {
3514 /* different blob: reject the new one */
3515 vnode_unlock(vp);
3516 error = EALREADY;
3517 goto out;
3518 }
3519 }
3520
3521 }
3522
3523
3524 /* mark this vnode's VM object as having "signed pages" */
3525 kr = memory_object_signed(uip->ui_control, TRUE);
3526 if (kr != KERN_SUCCESS) {
3527 vnode_unlock(vp);
3528 error = ENOENT;
3529 goto out;
3530 }
3531
3532 if (uip->cs_blobs == NULL) {
3533 /* loading 1st blob: record the file's current "modify time" */
3534 record_mtime = TRUE;
3535 }
3536
3537 /* set the generation count for cs_blobs */
3538 uip->cs_add_gen = cs_blob_generation_count;
3539
3540 /*
3541 * Add this blob to the list of blobs for this vnode.
3542 * We always add at the front of the list and we never remove a
3543 * blob from the list, so ubc_cs_get_blobs() can return whatever
3544 * the top of the list was and that list will remain valid
3545 * while we validate a page, even after we release the vnode's lock.
3546 */
3547 blob->csb_next = uip->cs_blobs;
3548 uip->cs_blobs = blob;
3549
3550 OSAddAtomic(+1, &cs_blob_count);
3551 if (cs_blob_count > cs_blob_count_peak) {
3552 cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */
3553 }
3554 OSAddAtomic((SInt32) +blob->csb_mem_size, &cs_blob_size);
3555 if ((SInt32) cs_blob_size > cs_blob_size_peak) {
3556 cs_blob_size_peak = (SInt32) cs_blob_size; /* XXX atomic ? */
3557 }
3558 if ((UInt32) blob->csb_mem_size > cs_blob_size_max) {
3559 cs_blob_size_max = (UInt32) blob->csb_mem_size;
3560 }
3561
3562 if (cs_debug > 1) {
3563 proc_t p;
3564 const char *name = vnode_getname_printable(vp);
3565 p = current_proc();
3566 printf("CODE SIGNING: proc %d(%s) "
3567 "loaded %s signatures for file (%s) "
3568 "range 0x%llx:0x%llx flags 0x%x\n",
3569 p->p_pid, p->p_comm,
3570 blob->csb_cpu_type == -1 ? "detached" : "embedded",
3571 name,
3572 blob->csb_base_offset + blob->csb_start_offset,
3573 blob->csb_base_offset + blob->csb_end_offset,
3574 blob->csb_flags);
3575 vnode_putname_printable(name);
3576 }
3577
3578 vnode_unlock(vp);
3579
3580 if (record_mtime) {
3581 vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
3582 }
3583
3584 if (ret_blob)
3585 *ret_blob = blob;
3586
3587 error = 0; /* success ! */
3588
3589out:
3590 if (error) {
3591 if (cs_debug)
3592 printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid, error);
3593
3594 cs_blob_free(blob);
3595 }
3596
3597 if (error == EAGAIN) {
3598 /*
3599 * See above: error is EAGAIN if we were asked
3600 * to add an existing blob again. We cleaned the new
3601 * blob and we want to return success.
3602 */
3603 error = 0;
3604 }
3605
3606 return error;
3607}
3608
3609void
3610csvnode_print_debug(struct vnode *vp)
3611{
3612 const char *name = NULL;
3613 struct ubc_info *uip;
3614 struct cs_blob *blob;
3615
3616 name = vnode_getname_printable(vp);
3617 if (name) {
3618 printf("csvnode: name: %s\n", name);
3619 vnode_putname_printable(name);
3620 }
3621
3622 vnode_lock_spin(vp);
3623
3624 if (! UBCINFOEXISTS(vp)) {
3625 blob = NULL;
3626 goto out;
3627 }
3628
3629 uip = vp->v_ubcinfo;
3630 for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
3631 printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
3632 (unsigned long)blob->csb_start_offset,
3633 (unsigned long)blob->csb_end_offset,
3634 blob->csb_flags,
3635 blob->csb_platform_binary ? "yes" : "no",
3636 blob->csb_platform_path ? "yes" : "no",
3637 blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
3638 }
3639
3640out:
3641 vnode_unlock(vp);
3642
3643}
3644
3645struct cs_blob *
3646ubc_cs_blob_get(
3647 struct vnode *vp,
3648 cpu_type_t cputype,
3649 off_t offset)
3650{
3651 struct ubc_info *uip;
3652 struct cs_blob *blob;
3653 off_t offset_in_blob;
3654
3655 vnode_lock_spin(vp);
3656
3657 if (! UBCINFOEXISTS(vp)) {
3658 blob = NULL;
3659 goto out;
3660 }
3661
3662 uip = vp->v_ubcinfo;
3663 for (blob = uip->cs_blobs;
3664 blob != NULL;
3665 blob = blob->csb_next) {
3666 if (cputype != -1 && blob->csb_cpu_type == cputype) {
3667 break;
3668 }
3669 if (offset != -1) {
3670 offset_in_blob = offset - blob->csb_base_offset;
3671 if (offset_in_blob >= blob->csb_start_offset &&
3672 offset_in_blob < blob->csb_end_offset) {
3673 /* our offset is covered by this blob */
3674 break;
3675 }
3676 }
3677 }
3678
3679out:
3680 vnode_unlock(vp);
3681
3682 return blob;
3683}
3684
3685static void
3686ubc_cs_free(
3687 struct ubc_info *uip)
3688{
3689 struct cs_blob *blob, *next_blob;
3690
3691 for (blob = uip->cs_blobs;
3692 blob != NULL;
3693 blob = next_blob) {
3694 next_blob = blob->csb_next;
3695 OSAddAtomic(-1, &cs_blob_count);
3696 OSAddAtomic((SInt32) -blob->csb_mem_size, &cs_blob_size);
3697 cs_blob_free(blob);
3698 }
3699#if CHECK_CS_VALIDATION_BITMAP
3700 ubc_cs_validation_bitmap_deallocate( uip->ui_vnode );
3701#endif
3702 uip->cs_blobs = NULL;
3703}
3704
3705/* check cs blob generation on vnode
3706 * returns:
3707 * 0 : Success, the cs_blob attached is current
3708 * ENEEDAUTH : Generation count mismatch. Needs authentication again.
3709 */
3710int
3711ubc_cs_generation_check(
3712 struct vnode *vp)
3713{
3714 int retval = ENEEDAUTH;
3715
3716 vnode_lock_spin(vp);
3717
3718 if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
3719 retval = 0;
3720 }
3721
3722 vnode_unlock(vp);
3723 return retval;
3724}
3725
3726int
3727ubc_cs_blob_revalidate(
3728 struct vnode *vp,
3729 struct cs_blob *blob,
3730 struct image_params *imgp,
3731 int flags
3732 )
3733{
3734 int error = 0;
3735 const CS_CodeDirectory *cd = NULL;
3736 const CS_GenericBlob *entitlements = NULL;
3737 size_t size;
3738 assert(vp != NULL);
3739 assert(blob != NULL);
3740
3741 size = blob->csb_mem_size;
3742 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
3743 size, &cd, &entitlements);
3744 if (error) {
3745 if (cs_debug) {
3746 printf("CODESIGNING: csblob invalid: %d\n", error);
3747 }
3748 goto out;
3749 }
3750
3751 unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
3752 unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
3753
3754 if (blob->csb_reconstituted) {
3755 /*
3756 * Code signatures that have been modified after validation
3757 * cannot be revalidated inline from their in-memory blob.
3758 *
3759 * That's okay, though, because the only path left that relies
3760 * on revalidation of existing in-memory blobs is the legacy
3761 * detached signature database path, which only exists on macOS,
3762 * which does not do reconstitution of any kind.
3763 */
3764 if (cs_debug) {
3765 printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
3766 }
3767
3768 /*
3769 * EAGAIN tells the caller that they may reread the code
3770 * signature and try attaching it again, which is the same
3771 * thing they would do if there was no cs_blob yet in the
3772 * first place.
3773 *
3774 * Conveniently, after ubc_cs_blob_add did a successful
3775 * validation, it will detect that a matching cs_blob (cdhash,
3776 * offset, arch etc.) already exists, and return success
3777 * without re-adding a cs_blob to the vnode.
3778 */
3779 return EAGAIN;
3780 }
3781
3782 /* callout to mac_vnode_check_signature */
3783#if CONFIG_MACF
3784 error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags);
3785 if (cs_debug && error) {
3786 printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
3787 }
3788#else
3789 (void)flags;
3790 (void)signer_type;
3791#endif
3792
3793 /* update generation number if success */
3794 vnode_lock_spin(vp);
3795 blob->csb_flags = cs_flags;
3796 blob->csb_signer_type = signer_type;
3797 if (UBCINFOEXISTS(vp)) {
3798 if (error == 0)
3799 vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
3800 else
3801 vp->v_ubcinfo->cs_add_gen = 0;
3802 }
3803
3804 vnode_unlock(vp);
3805
3806out:
3807 return error;
3808}
3809
3810void
3811cs_blob_reset_cache()
3812{
3813 /* incrementing odd no by 2 makes sure '0' is never reached. */
3814 OSAddAtomic(+2, &cs_blob_generation_count);
3815 printf("Reseting cs_blob cache from all vnodes. \n");
3816}
3817
3818struct cs_blob *
3819ubc_get_cs_blobs(
3820 struct vnode *vp)
3821{
3822 struct ubc_info *uip;
3823 struct cs_blob *blobs;
3824
3825 /*
3826 * No need to take the vnode lock here. The caller must be holding
3827 * a reference on the vnode (via a VM mapping or open file descriptor),
3828 * so the vnode will not go away. The ubc_info stays until the vnode
3829 * goes away. And we only modify "blobs" by adding to the head of the
3830 * list.
3831 * The ubc_info could go away entirely if the vnode gets reclaimed as
3832 * part of a forced unmount. In the case of a code-signature validation
3833 * during a page fault, the "paging_in_progress" reference on the VM
3834 * object guarantess that the vnode pager (and the ubc_info) won't go
3835 * away during the fault.
3836 * Other callers need to protect against vnode reclaim by holding the
3837 * vnode lock, for example.
3838 */
3839
3840 if (! UBCINFOEXISTS(vp)) {
3841 blobs = NULL;
3842 goto out;
3843 }
3844
3845 uip = vp->v_ubcinfo;
3846 blobs = uip->cs_blobs;
3847
3848out:
3849 return blobs;
3850}
3851
3852void
3853ubc_get_cs_mtime(
3854 struct vnode *vp,
3855 struct timespec *cs_mtime)
3856{
3857 struct ubc_info *uip;
3858
3859 if (! UBCINFOEXISTS(vp)) {
3860 cs_mtime->tv_sec = 0;
3861 cs_mtime->tv_nsec = 0;
3862 return;
3863 }
3864
3865 uip = vp->v_ubcinfo;
3866 cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
3867 cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
3868}
3869
3870unsigned long cs_validate_page_no_hash = 0;
3871unsigned long cs_validate_page_bad_hash = 0;
3872static boolean_t
3873cs_validate_hash(
3874 struct cs_blob *blobs,
3875 memory_object_t pager,
3876 memory_object_offset_t page_offset,
3877 const void *data,
3878 vm_size_t *bytes_processed,
3879 unsigned *tainted)
3880{
3881 union cs_hash_union mdctx;
3882 struct cs_hash const *hashtype = NULL;
3883 unsigned char actual_hash[CS_HASH_MAX_SIZE];
3884 unsigned char expected_hash[CS_HASH_MAX_SIZE];
3885 boolean_t found_hash;
3886 struct cs_blob *blob;
3887 const CS_CodeDirectory *cd;
3888 const unsigned char *hash;
3889 boolean_t validated;
3890 off_t offset; /* page offset in the file */
3891 size_t size;
3892 off_t codeLimit = 0;
3893 const char *lower_bound, *upper_bound;
3894 vm_offset_t kaddr, blob_addr;
3895
3896 /* retrieve the expected hash */
3897 found_hash = FALSE;
3898
3899 for (blob = blobs;
3900 blob != NULL;
3901 blob = blob->csb_next) {
3902 offset = page_offset - blob->csb_base_offset;
3903 if (offset < blob->csb_start_offset ||
3904 offset >= blob->csb_end_offset) {
3905 /* our page is not covered by this blob */
3906 continue;
3907 }
3908
3909 /* blob data has been released */
3910 kaddr = blob->csb_mem_kaddr;
3911 if (kaddr == 0) {
3912 continue;
3913 }
3914
3915 blob_addr = kaddr + blob->csb_mem_offset;
3916 lower_bound = CAST_DOWN(char *, blob_addr);
3917 upper_bound = lower_bound + blob->csb_mem_size;
3918
3919 cd = blob->csb_cd;
3920 if (cd != NULL) {
3921 /* all CD's that have been injected is already validated */
3922
3923 hashtype = blob->csb_hashtype;
3924 if (hashtype == NULL)
3925 panic("unknown hash type ?");
3926 if (hashtype->cs_digest_size > sizeof(actual_hash))
3927 panic("hash size too large");
3928 if (offset & blob->csb_hash_pagemask)
3929 panic("offset not aligned to cshash boundary");
3930
3931 codeLimit = ntohl(cd->codeLimit);
3932
3933 hash = hashes(cd, (uint32_t)(offset>>blob->csb_hash_pageshift),
3934 hashtype->cs_size,
3935 lower_bound, upper_bound);
3936 if (hash != NULL) {
3937 bcopy(hash, expected_hash, hashtype->cs_size);
3938 found_hash = TRUE;
3939 }
3940
3941 break;
3942 }
3943 }
3944
3945 if (found_hash == FALSE) {
3946 /*
3947 * We can't verify this page because there is no signature
3948 * for it (yet). It's possible that this part of the object
3949 * is not signed, or that signatures for that part have not
3950 * been loaded yet.
3951 * Report that the page has not been validated and let the
3952 * caller decide if it wants to accept it or not.
3953 */
3954 cs_validate_page_no_hash++;
3955 if (cs_debug > 1) {
3956 printf("CODE SIGNING: cs_validate_page: "
3957 "mobj %p off 0x%llx: no hash to validate !?\n",
3958 pager, page_offset);
3959 }
3960 validated = FALSE;
3961 *tainted = 0;
3962 } else {
3963
3964 *tainted = 0;
3965
3966 size = blob->csb_hash_pagesize;
3967 *bytes_processed = size;
3968
3969 const uint32_t *asha1, *esha1;
3970 if ((off_t)(offset + size) > codeLimit) {
3971 /* partial page at end of segment */
3972 assert(offset < codeLimit);
3973 size = (size_t) (codeLimit & blob->csb_hash_pagemask);
3974 *tainted |= CS_VALIDATE_NX;
3975 }
3976
3977 hashtype->cs_init(&mdctx);
3978
3979 if (blob->csb_hash_firstlevel_pagesize) {
3980 const unsigned char *partial_data = (const unsigned char *)data;
3981 size_t i;
3982 for (i=0; i < size;) {
3983 union cs_hash_union partialctx;
3984 unsigned char partial_digest[CS_HASH_MAX_SIZE];
3985 size_t partial_size = MIN(size-i, blob->csb_hash_firstlevel_pagesize);
3986
3987 hashtype->cs_init(&partialctx);
3988 hashtype->cs_update(&partialctx, partial_data, partial_size);
3989 hashtype->cs_final(partial_digest, &partialctx);
3990
3991 /* Update cumulative multi-level hash */
3992 hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
3993 partial_data = partial_data + partial_size;
3994 i += partial_size;
3995 }
3996 } else {
3997 hashtype->cs_update(&mdctx, data, size);
3998 }
3999 hashtype->cs_final(actual_hash, &mdctx);
4000
4001 asha1 = (const uint32_t *) actual_hash;
4002 esha1 = (const uint32_t *) expected_hash;
4003
4004 if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
4005 if (cs_debug) {
4006 printf("CODE SIGNING: cs_validate_page: "
4007 "mobj %p off 0x%llx size 0x%lx: "
4008 "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
4009 "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
4010 pager, page_offset, size,
4011 asha1[0], asha1[1], asha1[2],
4012 asha1[3], asha1[4],
4013 esha1[0], esha1[1], esha1[2],
4014 esha1[3], esha1[4]);
4015 }
4016 cs_validate_page_bad_hash++;
4017 *tainted |= CS_VALIDATE_TAINTED;
4018 } else {
4019 if (cs_debug > 10) {
4020 printf("CODE SIGNING: cs_validate_page: "
4021 "mobj %p off 0x%llx size 0x%lx: "
4022 "SHA1 OK\n",
4023 pager, page_offset, size);
4024 }
4025 }
4026 validated = TRUE;
4027 }
4028
4029 return validated;
4030}
4031
4032boolean_t
4033cs_validate_range(
4034 struct vnode *vp,
4035 memory_object_t pager,
4036 memory_object_offset_t page_offset,
4037 const void *data,
4038 vm_size_t dsize,
4039 unsigned *tainted)
4040{
4041 vm_size_t offset_in_range;
4042 boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
4043
4044 struct cs_blob *blobs = ubc_get_cs_blobs(vp);
4045
4046 *tainted = 0;
4047
4048 for (offset_in_range = 0;
4049 offset_in_range < dsize;
4050 /* offset_in_range updated based on bytes processed */) {
4051 unsigned subrange_tainted = 0;
4052 boolean_t subrange_validated;
4053 vm_size_t bytes_processed = 0;
4054
4055 subrange_validated = cs_validate_hash(blobs,
4056 pager,
4057 page_offset + offset_in_range,
4058 (const void *)((const char *)data + offset_in_range),
4059 &bytes_processed,
4060 &subrange_tainted);
4061
4062 *tainted |= subrange_tainted;
4063
4064 if (bytes_processed == 0) {
4065 /* Cannote make forward progress, so return an error */
4066 all_subranges_validated = FALSE;
4067 break;
4068 } else if (subrange_validated == FALSE) {
4069 all_subranges_validated = FALSE;
4070 /* Keep going to detect other types of failures in subranges */
4071 }
4072
4073 offset_in_range += bytes_processed;
4074 }
4075
4076 return all_subranges_validated;
4077}
4078
4079int
4080ubc_cs_getcdhash(
4081 vnode_t vp,
4082 off_t offset,
4083 unsigned char *cdhash)
4084{
4085 struct cs_blob *blobs, *blob;
4086 off_t rel_offset;
4087 int ret;
4088
4089 vnode_lock(vp);
4090
4091 blobs = ubc_get_cs_blobs(vp);
4092 for (blob = blobs;
4093 blob != NULL;
4094 blob = blob->csb_next) {
4095 /* compute offset relative to this blob */
4096 rel_offset = offset - blob->csb_base_offset;
4097 if (rel_offset >= blob->csb_start_offset &&
4098 rel_offset < blob->csb_end_offset) {
4099 /* this blob does cover our "offset" ! */
4100 break;
4101 }
4102 }
4103
4104 if (blob == NULL) {
4105 /* we didn't find a blob covering "offset" */
4106 ret = EBADEXEC; /* XXX any better error ? */
4107 } else {
4108 /* get the SHA1 hash of that blob */
4109 bcopy(blob->csb_cdhash, cdhash, sizeof (blob->csb_cdhash));
4110 ret = 0;
4111 }
4112
4113 vnode_unlock(vp);
4114
4115 return ret;
4116}
4117
4118boolean_t
4119ubc_cs_is_range_codesigned(
4120 vnode_t vp,
4121 mach_vm_offset_t start,
4122 mach_vm_size_t size)
4123{
4124 struct cs_blob *csblob;
4125 mach_vm_offset_t blob_start;
4126 mach_vm_offset_t blob_end;
4127
4128 if (vp == NULL) {
4129 /* no file: no code signature */
4130 return FALSE;
4131 }
4132 if (size == 0) {
4133 /* no range: no code signature */
4134 return FALSE;
4135 }
4136 if (start + size < start) {
4137 /* overflow */
4138 return FALSE;
4139 }
4140
4141 csblob = ubc_cs_blob_get(vp, -1, start);
4142 if (csblob == NULL) {
4143 return FALSE;
4144 }
4145
4146 /*
4147 * We currently check if the range is covered by a single blob,
4148 * which should always be the case for the dyld shared cache.
4149 * If we ever want to make this routine handle other cases, we
4150 * would have to iterate if the blob does not cover the full range.
4151 */
4152 blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
4153 csblob->csb_start_offset);
4154 blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
4155 csblob->csb_end_offset);
4156 if (blob_start > start || blob_end < (start + size)) {
4157 /* range not fully covered by this code-signing blob */
4158 return FALSE;
4159 }
4160
4161 return TRUE;
4162}
4163
4164#if CHECK_CS_VALIDATION_BITMAP
4165#define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
4166extern boolean_t root_fs_upgrade_try;
4167
4168/*
4169 * Should we use the code-sign bitmap to avoid repeated code-sign validation?
4170 * Depends:
4171 * a) Is the target vnode on the root filesystem?
4172 * b) Has someone tried to mount the root filesystem read-write?
4173 * If answers are (a) yes AND (b) no, then we can use the bitmap.
4174 */
4175#define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
4176kern_return_t
4177ubc_cs_validation_bitmap_allocate(
4178 vnode_t vp)
4179{
4180 kern_return_t kr = KERN_SUCCESS;
4181 struct ubc_info *uip;
4182 char *target_bitmap;
4183 vm_object_size_t bitmap_size;
4184
4185 if ( ! USE_CODE_SIGN_BITMAP(vp) || (! UBCINFOEXISTS(vp))) {
4186 kr = KERN_INVALID_ARGUMENT;
4187 } else {
4188 uip = vp->v_ubcinfo;
4189
4190 if ( uip->cs_valid_bitmap == NULL ) {
4191 bitmap_size = stob(uip->ui_size);
4192 target_bitmap = (char*) kalloc( (vm_size_t)bitmap_size );
4193 if (target_bitmap == 0) {
4194 kr = KERN_NO_SPACE;
4195 } else {
4196 kr = KERN_SUCCESS;
4197 }
4198 if( kr == KERN_SUCCESS ) {
4199 memset( target_bitmap, 0, (size_t)bitmap_size);
4200 uip->cs_valid_bitmap = (void*)target_bitmap;
4201 uip->cs_valid_bitmap_size = bitmap_size;
4202 }
4203 }
4204 }
4205 return kr;
4206}
4207
4208kern_return_t
4209ubc_cs_check_validation_bitmap (
4210 vnode_t vp,
4211 memory_object_offset_t offset,
4212 int optype)
4213{
4214 kern_return_t kr = KERN_SUCCESS;
4215
4216 if ( ! USE_CODE_SIGN_BITMAP(vp) || ! UBCINFOEXISTS(vp)) {
4217 kr = KERN_INVALID_ARGUMENT;
4218 } else {
4219 struct ubc_info *uip = vp->v_ubcinfo;
4220 char *target_bitmap = uip->cs_valid_bitmap;
4221
4222 if ( target_bitmap == NULL ) {
4223 kr = KERN_INVALID_ARGUMENT;
4224 } else {
4225 uint64_t bit, byte;
4226 bit = atop_64( offset );
4227 byte = bit >> 3;
4228
4229 if ( byte > uip->cs_valid_bitmap_size ) {
4230 kr = KERN_INVALID_ARGUMENT;
4231 } else {
4232
4233 if (optype == CS_BITMAP_SET) {
4234 target_bitmap[byte] |= (1 << (bit & 07));
4235 kr = KERN_SUCCESS;
4236 } else if (optype == CS_BITMAP_CLEAR) {
4237 target_bitmap[byte] &= ~(1 << (bit & 07));
4238 kr = KERN_SUCCESS;
4239 } else if (optype == CS_BITMAP_CHECK) {
4240 if ( target_bitmap[byte] & (1 << (bit & 07))) {
4241 kr = KERN_SUCCESS;
4242 } else {
4243 kr = KERN_FAILURE;
4244 }
4245 }
4246 }
4247 }
4248 }
4249 return kr;
4250}
4251
4252void
4253ubc_cs_validation_bitmap_deallocate(
4254 vnode_t vp)
4255{
4256 struct ubc_info *uip;
4257 void *target_bitmap;
4258 vm_object_size_t bitmap_size;
4259
4260 if ( UBCINFOEXISTS(vp)) {
4261 uip = vp->v_ubcinfo;
4262
4263 if ( (target_bitmap = uip->cs_valid_bitmap) != NULL ) {
4264 bitmap_size = uip->cs_valid_bitmap_size;
4265 kfree( target_bitmap, (vm_size_t) bitmap_size );
4266 uip->cs_valid_bitmap = NULL;
4267 }
4268 }
4269}
4270#else
4271kern_return_t ubc_cs_validation_bitmap_allocate(__unused vnode_t vp){
4272 return KERN_INVALID_ARGUMENT;
4273}
4274
4275kern_return_t ubc_cs_check_validation_bitmap(
4276 __unused struct vnode *vp,
4277 __unused memory_object_offset_t offset,
4278 __unused int optype){
4279
4280 return KERN_INVALID_ARGUMENT;
4281}
4282
4283void ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp){
4284 return;
4285}
4286#endif /* CHECK_CS_VALIDATION_BITMAP */
4287
4288#if PMAP_CS
4289kern_return_t
4290cs_associate_blob_with_mapping(
4291 void *pmap,
4292 vm_map_offset_t start,
4293 vm_map_size_t size,
4294 vm_object_offset_t offset,
4295 void *blobs_p)
4296{
4297 off_t blob_start_offset, blob_end_offset;
4298 kern_return_t kr;
4299 struct cs_blob *blobs, *blob;
4300 vm_offset_t kaddr;
4301 struct pmap_cs_code_directory *cd_entry = NULL;
4302
4303 if (!pmap_cs) {
4304 return KERN_NOT_SUPPORTED;
4305 }
4306
4307 blobs = (struct cs_blob *)blobs_p;
4308
4309 for (blob = blobs;
4310 blob != NULL;
4311 blob = blob->csb_next) {
4312 blob_start_offset = (blob->csb_base_offset +
4313 blob->csb_start_offset);
4314 blob_end_offset = (blob->csb_base_offset +
4315 blob->csb_end_offset);
4316 if ((off_t) offset < blob_start_offset ||
4317 (off_t) offset >= blob_end_offset ||
4318 (off_t) (offset + size) <= blob_start_offset ||
4319 (off_t) (offset + size) > blob_end_offset) {
4320 continue;
4321 }
4322 kaddr = blob->csb_mem_kaddr;
4323 if (kaddr == 0) {
4324 /* blob data has been released */
4325 continue;
4326 }
4327 cd_entry = blob->csb_pmap_cs_entry;
4328 if (cd_entry == NULL) {
4329 continue;
4330 }
4331
4332 break;
4333 }
4334
4335 if (cd_entry != NULL) {
4336 kr = pmap_cs_associate(pmap,
4337 cd_entry,
4338 start,
4339 size);
4340 } else {
4341 kr = KERN_CODESIGN_ERROR;
4342 }
4343#if 00
4344 printf("FBDP %d[%s] pmap_cs_associate(%p,%p,0x%llx,0x%llx) -> kr=0x%x\n", proc_selfpid(), &(current_proc()->p_comm[0]), pmap, cd_entry, (uint64_t)start, (uint64_t)size, kr);
4345 kr = KERN_SUCCESS;
4346#endif
4347 return kr;
4348}
4349#endif /* PMAP_CS */
4350