1 | /* |
2 | * Copyright (c) 2015-2023 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <sys/cprotect.h> |
30 | #include <sys/malloc.h> |
31 | #include <sys/mount_internal.h> |
32 | #include <sys/filio.h> |
33 | #include <sys/content_protection.h> |
34 | #include <libkern/crypto/sha1.h> |
35 | #include <libkern/libkern.h> |
36 | //for write protection |
37 | #include <vm/vm_kern.h> |
38 | #include <vm/vm_map.h> |
39 | |
40 | #define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset)) |
41 | |
42 | // -- struct cpx -- |
43 | |
44 | /* |
45 | * This structure contains the unwrapped key and is passed to the lower layers. |
46 | * It is private so users must use the accessors declared in sys/cprotect.h |
47 | * to read/write it. |
48 | */ |
49 | |
50 | // cpx_flags defined in cprotect.h |
51 | enum { |
52 | CPX_SEP_WRAPPEDKEY = 0x01, |
53 | CPX_IV_AES_CTX_INITIALIZED = 0x02, |
54 | CPX_USE_OFFSET_FOR_IV = 0x04, |
55 | |
56 | // Using AES IV context generated from key |
57 | CPX_IV_AES_CTX_VFS = 0x08, |
58 | CPX_SYNTHETIC_OFFSET_FOR_IV = 0x10, |
59 | CPX_COMPOSITEKEY = 0x20, |
60 | |
61 | //write page protection |
62 | CPX_WRITE_PROTECTABLE = 0x40 |
63 | }; |
64 | |
65 | /* |
66 | * variable-length CPX structure. See fixed-length variant in cprotect.h |
67 | */ |
68 | struct cpx { |
69 | #if DEBUG |
70 | uint32_t cpx_magic1; |
71 | #endif |
72 | aes_encrypt_ctx *cpx_iv_aes_ctx_ptr;// Pointer to context used for generating the IV |
73 | cpx_flags_t cpx_flags; |
74 | uint16_t cpx_max_key_len; |
75 | uint16_t cpx_key_len; |
76 | //fixed length up to here. cpx_cached_key is variable-length |
77 | uint8_t cpx_cached_key[]; |
78 | }; |
79 | |
80 | /* Allows us to switch between CPX types */ |
81 | typedef union cpxunion { |
82 | struct cpx cpx_var; |
83 | fcpx_t cpx_fixed; |
84 | } cpxunion_t; |
85 | |
86 | ZONE_DEFINE(cpx_zone, "cpx" , |
87 | sizeof(struct fcpx), ZC_ZFREE_CLEARMEM); |
88 | ZONE_DEFINE(aes_ctz_zone, "AES ctx" , |
89 | sizeof(aes_encrypt_ctx), ZC_ZFREE_CLEARMEM); |
90 | |
91 | // Note: see struct fcpx defined in sys/cprotect.h |
92 | |
93 | // -- cpx_t accessors -- |
94 | |
95 | size_t |
96 | cpx_size(size_t key_len) |
97 | { |
98 | // This should pick up the 'magic' word in DEBUG for free. |
99 | size_t size = sizeof(struct cpx) + key_len; |
100 | |
101 | return size; |
102 | } |
103 | |
104 | size_t |
105 | cpx_sizex(const struct cpx *cpx) |
106 | { |
107 | return cpx_size(key_len: cpx->cpx_max_key_len); |
108 | } |
109 | |
110 | cpx_t |
111 | cpx_alloc(size_t key_len, bool needs_ctx) |
112 | { |
113 | cpx_t cpx = NULL; |
114 | |
115 | #if CONFIG_KEYPAGE_WP |
116 | #pragma unused(key_len, needs_ctx) |
117 | |
118 | /* |
119 | * Macs only use 1 key per volume, so force it into its own page. |
120 | * This way, we can write-protect as needed. |
121 | */ |
122 | |
123 | assert(cpx_size(key_len) <= PAGE_SIZE); |
124 | kmem_alloc(kernel_map, (vm_offset_t *)&cpx, PAGE_SIZE, |
125 | KMA_DATA | KMA_NOFAIL, VM_KERN_MEMORY_FILE); |
126 | //mark the page as protectable, since kmem_alloc succeeded. |
127 | cpx->cpx_flags |= CPX_WRITE_PROTECTABLE; |
128 | #else |
129 | /* If key page write protection disabled, just switch to zalloc */ |
130 | |
131 | // error out if you try to request a key that's too big |
132 | if (key_len > VFS_CP_MAX_CACHEBUFLEN) { |
133 | return NULL; |
134 | } |
135 | |
136 | // the actual key array is fixed-length, but the amount of usable content can vary, via 'key_len' |
137 | cpx = zalloc_flags(cpx_zone, Z_WAITOK | Z_ZERO); |
138 | |
139 | // if our encryption type needs it, alloc the context |
140 | if (needs_ctx) { |
141 | cpx_alloc_ctx(cpx); |
142 | } |
143 | |
144 | #endif |
145 | cpx_init(cpx, key_len); |
146 | |
147 | return cpx; |
148 | } |
149 | |
150 | int |
151 | cpx_alloc_ctx(cpx_t cpx) |
152 | { |
153 | #if CONFIG_KEYPAGE_WP |
154 | (void) cpx; |
155 | #else |
156 | if (cpx->cpx_iv_aes_ctx_ptr) { |
157 | // already allocated? |
158 | return 0; |
159 | } |
160 | |
161 | cpx->cpx_iv_aes_ctx_ptr = zalloc_flags(aes_ctz_zone, Z_WAITOK | Z_ZERO); |
162 | #endif // CONFIG_KEYPAGE_WP |
163 | |
164 | return 0; |
165 | } |
166 | |
167 | void |
168 | cpx_free_ctx(cpx_t cpx) |
169 | { |
170 | #if CONFIG_KEYPAGE_WP |
171 | (void) cpx; |
172 | # else |
173 | if (cpx->cpx_iv_aes_ctx_ptr) { |
174 | zfree(aes_ctz_zone, cpx->cpx_iv_aes_ctx_ptr); |
175 | } |
176 | #endif // CONFIG_KEYPAGE_WP |
177 | } |
178 | |
179 | void |
180 | cpx_writeprotect(cpx_t cpx) |
181 | { |
182 | #if CONFIG_KEYPAGE_WP |
183 | void *cpxstart = (void*)cpx; |
184 | void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE); |
185 | if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) { |
186 | vm_map_protect(kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_READ), FALSE); |
187 | } |
188 | #else |
189 | (void) cpx; |
190 | #endif |
191 | return; |
192 | } |
193 | |
194 | #if DEBUG |
195 | static const uint32_t cpx_magic1 = 0x7b787063; // cpx{ |
196 | static const uint32_t cpx_magic2 = 0x7870637d; // }cpx |
197 | #endif |
198 | |
199 | void |
200 | cpx_free(cpx_t cpx) |
201 | { |
202 | #if DEBUG |
203 | assert(cpx->cpx_magic1 == cpx_magic1); |
204 | assert(*PTR_ADD(uint32_t *, cpx, cpx_sizex(cpx) - 4) == cpx_magic2); |
205 | #endif |
206 | |
207 | #if CONFIG_KEYPAGE_WP |
208 | /* unprotect the page before bzeroing */ |
209 | void *cpxstart = (void*)cpx; |
210 | void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE); |
211 | if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) { |
212 | vm_map_protect(kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_DEFAULT), FALSE); |
213 | |
214 | //now zero the memory after un-protecting it |
215 | bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len); |
216 | |
217 | //If we are here, then we used kmem_alloc to get the page. Must use kmem_free to drop it. |
218 | kmem_free(kernel_map, (vm_offset_t)cpx, PAGE_SIZE); |
219 | return; |
220 | } |
221 | #else |
222 | // free the context if it wasn't already freed |
223 | cpx_free_ctx(cpx); |
224 | zfree(cpx_zone, cpx); |
225 | return; |
226 | #endif |
227 | } |
228 | |
229 | void |
230 | cpx_init(cpx_t cpx, size_t key_len) |
231 | { |
232 | #if DEBUG |
233 | cpx->cpx_magic1 = cpx_magic1; |
234 | *PTR_ADD(uint32_t *, cpx, cpx_size(key_len) - 4) = cpx_magic2; |
235 | #endif |
236 | cpx->cpx_flags = 0; |
237 | cpx->cpx_key_len = 0; |
238 | assert(key_len <= UINT16_MAX); |
239 | cpx->cpx_max_key_len = (uint16_t)key_len; |
240 | } |
241 | |
242 | bool |
243 | cpx_is_sep_wrapped_key(const struct cpx *cpx) |
244 | { |
245 | return ISSET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); |
246 | } |
247 | |
248 | void |
249 | cpx_set_is_sep_wrapped_key(struct cpx *cpx, bool v) |
250 | { |
251 | if (v) { |
252 | SET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); |
253 | } else { |
254 | CLR(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); |
255 | } |
256 | } |
257 | |
258 | bool |
259 | cpx_is_composite_key(const struct cpx *cpx) |
260 | { |
261 | return ISSET(cpx->cpx_flags, CPX_COMPOSITEKEY); |
262 | } |
263 | |
264 | void |
265 | cpx_set_is_composite_key(struct cpx *cpx, bool v) |
266 | { |
267 | if (v) { |
268 | SET(cpx->cpx_flags, CPX_COMPOSITEKEY); |
269 | } else { |
270 | CLR(cpx->cpx_flags, CPX_COMPOSITEKEY); |
271 | } |
272 | } |
273 | |
274 | bool |
275 | cpx_use_offset_for_iv(const struct cpx *cpx) |
276 | { |
277 | return ISSET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); |
278 | } |
279 | |
280 | void |
281 | cpx_set_use_offset_for_iv(struct cpx *cpx, bool v) |
282 | { |
283 | if (v) { |
284 | SET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); |
285 | } else { |
286 | CLR(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); |
287 | } |
288 | } |
289 | |
290 | bool |
291 | cpx_synthetic_offset_for_iv(const struct cpx *cpx) |
292 | { |
293 | return ISSET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV); |
294 | } |
295 | |
296 | void |
297 | cpx_set_synthetic_offset_for_iv(struct cpx *cpx, bool v) |
298 | { |
299 | if (v) { |
300 | SET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV); |
301 | } else { |
302 | CLR(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV); |
303 | } |
304 | } |
305 | |
306 | uint16_t |
307 | cpx_max_key_len(const struct cpx *cpx) |
308 | { |
309 | return cpx->cpx_max_key_len; |
310 | } |
311 | |
312 | uint16_t |
313 | cpx_key_len(const struct cpx *cpx) |
314 | { |
315 | return cpx->cpx_key_len; |
316 | } |
317 | |
318 | void |
319 | cpx_set_key_len(struct cpx *cpx, uint16_t key_len) |
320 | { |
321 | cpx->cpx_key_len = key_len; |
322 | |
323 | if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_VFS)) { |
324 | /* |
325 | * We assume that if the key length is being modified, the key |
326 | * has changed. As a result, un-set any bits related to the |
327 | * AES context, if needed. They should be re-generated |
328 | * on-demand. |
329 | */ |
330 | CLR(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_IV_AES_CTX_VFS); |
331 | } |
332 | } |
333 | |
334 | bool |
335 | cpx_has_key(const struct cpx *cpx) |
336 | { |
337 | return cpx->cpx_key_len > 0; |
338 | } |
339 | |
340 | #pragma clang diagnostic push |
341 | #pragma clang diagnostic ignored "-Wcast-qual" |
342 | void * |
343 | cpx_key(const struct cpx *cpx) |
344 | { |
345 | return (void *)cpx->cpx_cached_key; |
346 | } |
347 | #pragma clang diagnostic pop |
348 | |
349 | void |
350 | cpx_set_aes_iv_key(struct cpx *cpx, void *iv_key) |
351 | { |
352 | if (cpx->cpx_iv_aes_ctx_ptr) { |
353 | aes_encrypt_key128(key: iv_key, cx: cpx->cpx_iv_aes_ctx_ptr); |
354 | SET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_USE_OFFSET_FOR_IV); |
355 | CLR(cpx->cpx_flags, CPX_IV_AES_CTX_VFS); |
356 | } |
357 | } |
358 | |
359 | aes_encrypt_ctx * |
360 | cpx_iv_aes_ctx(struct cpx *cpx) |
361 | { |
362 | if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) { |
363 | return cpx->cpx_iv_aes_ctx_ptr; |
364 | } |
365 | |
366 | SHA1_CTX sha1ctxt; |
367 | uint8_t digest[SHA_DIGEST_LENGTH]; /* Kiv */ |
368 | |
369 | /* First init the cp_cache_iv_key[] */ |
370 | SHA1Init(&sha1ctxt); |
371 | |
372 | /* |
373 | * We can only use this when the keys are generated in the AP; As a result |
374 | * we only use the first 32 bytes of key length in the cache key |
375 | */ |
376 | SHA1Update(&sha1ctxt, cpx->cpx_cached_key, cpx->cpx_key_len); |
377 | SHA1Final(digest, &sha1ctxt); |
378 | |
379 | cpx_set_aes_iv_key(cpx, iv_key: digest); |
380 | SET(cpx->cpx_flags, CPX_IV_AES_CTX_VFS); |
381 | |
382 | return cpx->cpx_iv_aes_ctx_ptr; |
383 | } |
384 | |
385 | void |
386 | cpx_flush(cpx_t cpx) |
387 | { |
388 | bzero(s: cpx->cpx_cached_key, n: cpx->cpx_max_key_len); |
389 | if (cpx->cpx_iv_aes_ctx_ptr) { |
390 | bzero(s: cpx->cpx_iv_aes_ctx_ptr, n: sizeof(aes_encrypt_ctx)); |
391 | } |
392 | cpx->cpx_flags = 0; |
393 | cpx->cpx_key_len = 0; |
394 | } |
395 | |
396 | bool |
397 | cpx_can_copy(const struct cpx *src, const struct cpx *dst) |
398 | { |
399 | return src->cpx_key_len <= dst->cpx_max_key_len; |
400 | } |
401 | |
402 | void |
403 | cpx_copy(const struct cpx *src, cpx_t dst) |
404 | { |
405 | uint16_t key_len = cpx_key_len(cpx: src); |
406 | cpx_set_key_len(cpx: dst, key_len); |
407 | memcpy(dst: cpx_key(cpx: dst), src: cpx_key(cpx: src), n: key_len); |
408 | dst->cpx_flags = src->cpx_flags; |
409 | if (ISSET(dst->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) { |
410 | *(dst->cpx_iv_aes_ctx_ptr) = *(src->cpx_iv_aes_ctx_ptr); // deep copy |
411 | } |
412 | } |
413 | |
414 | typedef unsigned char cp_vfs_callback_arg_type_t; |
415 | enum { |
416 | CP_TYPE_LOCK_STATE = 0, |
417 | CP_TYPE_EP_STATE = 1, |
418 | CP_TYPE_CX_STATE = 2, |
419 | }; |
420 | |
421 | typedef struct { |
422 | cp_vfs_callback_arg_type_t type; |
423 | union { |
424 | cp_lock_state_t lock_state; |
425 | cp_ep_state_t ep_state; |
426 | cp_cx_state_t cx_state; |
427 | }; |
428 | int valid_uuid; |
429 | uuid_t volume_uuid; |
430 | } cp_vfs_callback_arg; |
431 | |
432 | static int |
433 | cp_vfs_callback(mount_t mp, void *arg) |
434 | { |
435 | cp_vfs_callback_arg *callback_arg = (cp_vfs_callback_arg *)arg; |
436 | |
437 | if (callback_arg->valid_uuid) { |
438 | struct vfs_attr va; |
439 | VFSATTR_INIT(&va); |
440 | VFSATTR_WANTED(&va, f_uuid); |
441 | |
442 | if (vfs_getattr(mp, vfa: &va, ctx: vfs_context_current())) { |
443 | return 0; |
444 | } |
445 | |
446 | if (!VFSATTR_IS_SUPPORTED(&va, f_uuid)) { |
447 | return 0; |
448 | } |
449 | |
450 | if (memcmp(s1: va.f_uuid, s2: callback_arg->volume_uuid, n: sizeof(uuid_t))) { |
451 | return 0; |
452 | } |
453 | } |
454 | |
455 | switch (callback_arg->type) { |
456 | case(CP_TYPE_LOCK_STATE): |
457 | VFS_IOCTL(mp, FIODEVICELOCKED, data: (void *)(uintptr_t)callback_arg->lock_state, flags: 0, context: vfs_context_kernel()); |
458 | break; |
459 | case(CP_TYPE_EP_STATE): |
460 | VFS_IOCTL(mp, FIODEVICEEPSTATE, data: (void *)(uintptr_t)callback_arg->ep_state, flags: 0, context: vfs_context_kernel()); |
461 | break; |
462 | case(CP_TYPE_CX_STATE): |
463 | VFS_IOCTL(mp, FIODEVICECXSTATE, data: (void *)(uintptr_t)callback_arg->cx_state, flags: 0, context: vfs_context_kernel()); |
464 | break; |
465 | default: |
466 | break; |
467 | } |
468 | return 0; |
469 | } |
470 | |
471 | int |
472 | cp_key_store_action(cp_key_store_action_t action) |
473 | { |
474 | cp_vfs_callback_arg callback_arg; |
475 | |
476 | memset(s: callback_arg.volume_uuid, c: 0, n: sizeof(uuid_t)); |
477 | callback_arg.valid_uuid = 0; |
478 | |
479 | switch (action) { |
480 | case CP_ACTION_LOCKED: |
481 | case CP_ACTION_UNLOCKED: |
482 | callback_arg.type = CP_TYPE_LOCK_STATE; |
483 | callback_arg.lock_state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE); |
484 | return vfs_iterate(flags: 0, callout: cp_vfs_callback, arg: (void *)&callback_arg); |
485 | case CP_ACTION_EP_INVALIDATED: |
486 | callback_arg.type = CP_TYPE_EP_STATE; |
487 | callback_arg.ep_state = CP_EP_INVALIDATED; |
488 | return vfs_iterate(flags: 0, callout: cp_vfs_callback, arg: (void *)&callback_arg); |
489 | case CP_ACTION_CX_EXPIRED: |
490 | callback_arg.type = CP_TYPE_CX_STATE; |
491 | callback_arg.cx_state = CP_CX_EXPIRED; |
492 | return vfs_iterate(flags: 0, callout: cp_vfs_callback, arg: (void *)&callback_arg); |
493 | default: |
494 | return -1; |
495 | } |
496 | } |
497 | |
498 | int |
499 | cp_key_store_action_for_volume(uuid_t volume_uuid, cp_key_store_action_t action) |
500 | { |
501 | cp_vfs_callback_arg callback_arg; |
502 | |
503 | memcpy(dst: callback_arg.volume_uuid, src: volume_uuid, n: sizeof(uuid_t)); |
504 | callback_arg.valid_uuid = 1; |
505 | |
506 | switch (action) { |
507 | case CP_ACTION_LOCKED: |
508 | case CP_ACTION_UNLOCKED: |
509 | callback_arg.type = CP_TYPE_LOCK_STATE; |
510 | callback_arg.lock_state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE); |
511 | return vfs_iterate(flags: 0, callout: cp_vfs_callback, arg: (void *)&callback_arg); |
512 | case CP_ACTION_EP_INVALIDATED: |
513 | callback_arg.type = CP_TYPE_EP_STATE; |
514 | callback_arg.ep_state = CP_EP_INVALIDATED; |
515 | return vfs_iterate(flags: 0, callout: cp_vfs_callback, arg: (void *)&callback_arg); |
516 | case CP_ACTION_CX_EXPIRED: |
517 | callback_arg.type = CP_TYPE_CX_STATE; |
518 | callback_arg.cx_state = CP_CX_EXPIRED; |
519 | return vfs_iterate(flags: 0, callout: cp_vfs_callback, arg: (void *)&callback_arg); |
520 | default: |
521 | return -1; |
522 | } |
523 | } |
524 | |
525 | int |
526 | cp_is_valid_class(int isdir, int32_t protectionclass) |
527 | { |
528 | /* |
529 | * The valid protection classes are from 0 -> N |
530 | * We use a signed argument to detect unassigned values from |
531 | * directory entry creation time in HFS. |
532 | */ |
533 | if (isdir) { |
534 | /* Directories are not allowed to have F, but they can have "NONE" */ |
535 | return (protectionclass == PROTECTION_CLASS_CX) || |
536 | ((protectionclass >= PROTECTION_CLASS_DIR_NONE) && |
537 | (protectionclass <= PROTECTION_CLASS_D)); |
538 | } else { |
539 | return (protectionclass >= PROTECTION_CLASS_A) && |
540 | (protectionclass <= PROTECTION_CLASS_CX); |
541 | } |
542 | } |
543 | |
544 | /* |
545 | * Parses versions of the form 12A316, i.e. <major><minor><revision> and |
546 | * returns a uint32_t in the form 0xaabbcccc where aa = <major>, |
547 | * bb = <ASCII char>, cccc = <revision>. |
548 | */ |
549 | static cp_key_os_version_t |
550 | parse_os_version(const char *vers) |
551 | { |
552 | const char *p = vers; |
553 | |
554 | int a = 0; |
555 | while (*p >= '0' && *p <= '9') { |
556 | a = a * 10 + *p - '0'; |
557 | ++p; |
558 | } |
559 | |
560 | if (!a) { |
561 | return 0; |
562 | } |
563 | |
564 | int b = *p++; |
565 | if (!b) { |
566 | return 0; |
567 | } |
568 | |
569 | int c = 0; |
570 | while (*p >= '0' && *p <= '9') { |
571 | c = c * 10 + *p - '0'; |
572 | ++p; |
573 | } |
574 | |
575 | if (!c) { |
576 | return 0; |
577 | } |
578 | |
579 | return (a & 0xff) << 24 | b << 16 | (c & 0xffff); |
580 | } |
581 | |
582 | cp_key_os_version_t |
583 | cp_os_version(void) |
584 | { |
585 | static cp_key_os_version_t cp_os_version; |
586 | |
587 | if (cp_os_version) { |
588 | return cp_os_version; |
589 | } |
590 | |
591 | if (!osversion[0]) { |
592 | return 0; |
593 | } |
594 | |
595 | cp_os_version = parse_os_version(vers: osversion); |
596 | if (!cp_os_version) { |
597 | printf("cp_os_version: unable to parse osversion `%s'\n" , osversion); |
598 | cp_os_version = 1; |
599 | } |
600 | |
601 | return cp_os_version; |
602 | } |
603 | |