1/*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/assert.h>
30#include <mach/mach_types.h>
31#include <mach/boolean.h>
32#include <mach/vm_param.h>
33#include <kern/kern_types.h>
34#include <kern/mach_param.h>
35#include <kern/thread.h>
36#include <kern/task.h>
37#include <kern/kern_cdata.h>
38#include <kern/kalloc.h>
39#include <kern/ipc_kobject.h>
40#include <mach/mach_vm.h>
41
42static kern_return_t kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data, uint32_t type, uint32_t size, uint64_t flags, mach_vm_address_t *user_addr);
43static size_t kcdata_get_memory_size_for_data(uint32_t size);
44static kern_return_t kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t flags);
45static kern_return_t kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size);
46static kern_return_t kcdata_write_compression_stats(kcdata_descriptor_t data);
47static kern_return_t kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin);
48static void kcdata_object_no_senders(ipc_port_t port, mach_port_mscount_t mscount);
49
50#ifndef ROUNDUP
51#define ROUNDUP(x, y) ((((x)+(y)-1)/(y))*(y))
52#endif
53
54/*
55 * zlib will need to store its metadata and this value is indifferent from the
56 * window bits and other zlib internals
57 */
58#define ZLIB_METADATA_SIZE 1440
59
60/* #define kcdata_debug_printf printf */
61#define kcdata_debug_printf(...) ;
62
63#pragma pack(push, 4)
64
65/* Internal structs for convenience */
66struct _uint64_with_description_data {
67 char desc[KCDATA_DESC_MAXLEN];
68 uint64_t data;
69};
70
71struct _uint32_with_description_data {
72 char desc[KCDATA_DESC_MAXLEN];
73 uint32_t data;
74};
75
76#pragma pack(pop)
77
78int _Atomic lw_corpse_obj_cnt = 0;
79
80IPC_KOBJECT_DEFINE(IKOT_KCDATA,
81 .iko_op_stable = true,
82 .iko_op_no_senders = kcdata_object_no_senders);
83
84KALLOC_TYPE_DEFINE(KCDATA_OBJECT, struct kcdata_object, KT_DEFAULT);
85
86os_refgrp_decl(static, kcdata_object_refgrp, "kcdata_object", NULL);
87
88/* Grab a throttle slot for rate-limited kcdata object type(s) */
89kern_return_t
90kcdata_object_throttle_get(
91 kcdata_obj_flags_t flags)
92{
93 int oval, nval;
94
95 /* Currently only lightweight corpse is rate-limited */
96 assert(flags & KCDATA_OBJECT_TYPE_LW_CORPSE);
97 if (flags & KCDATA_OBJECT_TYPE_LW_CORPSE) {
98 os_atomic_rmw_loop(&lw_corpse_obj_cnt, oval, nval, relaxed, {
99 if (oval >= MAX_INFLIGHT_KCOBJECT_LW_CORPSE) {
100 printf("Too many lightweight corpse in flight: %d\n", oval);
101 os_atomic_rmw_loop_give_up(return KERN_RESOURCE_SHORTAGE);
102 }
103 nval = oval + 1;
104 });
105 }
106
107 return KERN_SUCCESS;
108}
109
110/* Release a throttle slot for rate-limited kcdata object type(s) */
111void
112kcdata_object_throttle_release(
113 kcdata_obj_flags_t flags)
114{
115 int oval, nval;
116
117 /* Currently only lightweight corpse is rate-limited */
118 assert(flags & KCDATA_OBJECT_TYPE_LW_CORPSE);
119 if (flags & KCDATA_OBJECT_TYPE_LW_CORPSE) {
120 os_atomic_rmw_loop(&lw_corpse_obj_cnt, oval, nval, relaxed, {
121 nval = oval - 1;
122 if (__improbable(nval < 0)) {
123 os_atomic_rmw_loop_give_up(panic("Lightweight corpse kcdata object over-released"));
124 }
125 });
126 }
127}
128
129/*
130 * Create an object representation for the given kcdata.
131 *
132 * Captures kcdata descripter ref in object. If the object creation
133 * should be rate-limited, kcdata_object_throttle_get() must be called
134 * manually before invoking kcdata_create_object(), so as to save
135 * work (of creating the enclosed kcdata blob) if a throttled reference
136 * cannot be obtained in the first place.
137 */
138kern_return_t
139kcdata_create_object(
140 kcdata_descriptor_t data,
141 kcdata_obj_flags_t flags,
142 uint32_t size,
143 kcdata_object_t *objp)
144{
145 kcdata_object_t obj;
146
147 if (data == NULL) {
148 return KERN_INVALID_ARGUMENT;
149 }
150
151 obj = zalloc_flags(KCDATA_OBJECT,
152 Z_ZERO | Z_WAITOK | Z_NOFAIL | Z_SET_NOTSHARED);
153
154 obj->ko_data = data;
155 obj->ko_flags = flags;
156 obj->ko_alloc_size = size;
157 obj->ko_port = IP_NULL;
158
159 os_ref_init_count(&obj->ko_refs, &kcdata_object_refgrp, 1);
160
161 *objp = obj;
162
163 return KERN_SUCCESS;
164}
165
166void
167kcdata_object_reference(kcdata_object_t obj)
168{
169 if (obj == KCDATA_OBJECT_NULL) {
170 return;
171 }
172
173 os_ref_retain(rc: &obj->ko_refs);
174}
175
176static void
177kcdata_object_destroy(kcdata_object_t obj)
178{
179 void *begin_addr;
180 ipc_port_t port;
181 kcdata_obj_flags_t flags;
182
183 if (obj == KCDATA_OBJECT_NULL) {
184 return;
185 }
186
187 port = obj->ko_port;
188 flags = obj->ko_flags;
189
190 /* Release the port */
191 if (IP_VALID(port)) {
192 ipc_kobject_dealloc_port(port, mscount: 0, type: IKOT_KCDATA);
193 }
194
195 /* Release the ref for rate-limited kcdata object type(s) */
196 kcdata_object_throttle_release(flags);
197
198 /* Destroy the kcdata backing captured in the object */
199 begin_addr = kcdata_memory_get_begin_addr(data: obj->ko_data);
200 kfree_data(begin_addr, obj->ko_alloc_size);
201 kcdata_memory_destroy(data: obj->ko_data);
202
203 /* Free the object */
204 zfree(KCDATA_OBJECT, obj);
205}
206
207void
208kcdata_object_release(kcdata_object_t obj)
209{
210 if (obj == KCDATA_OBJECT_NULL) {
211 return;
212 }
213
214 if (os_ref_release(rc: &obj->ko_refs) > 0) {
215 return;
216 }
217 /* last ref */
218
219 kcdata_object_destroy(obj);
220}
221
222/* Produces kcdata object ref */
223kcdata_object_t
224convert_port_to_kcdata_object(ipc_port_t port)
225{
226 kcdata_object_t obj = KCDATA_OBJECT_NULL;
227
228 if (IP_VALID(port)) {
229 obj = ipc_kobject_get_stable(port, type: IKOT_KCDATA);
230 if (obj != KCDATA_OBJECT_NULL) {
231 zone_require(zone: KCDATA_OBJECT->kt_zv.zv_zone, addr: obj);
232 kcdata_object_reference(obj);
233 }
234 }
235
236 return obj;
237}
238
239/* Consumes kcdata object ref */
240ipc_port_t
241convert_kcdata_object_to_port(kcdata_object_t obj)
242{
243 if (obj == KCDATA_OBJECT_NULL) {
244 return IP_NULL;
245 }
246
247 zone_require(zone: KCDATA_OBJECT->kt_zv.zv_zone, addr: obj);
248
249 if (!ipc_kobject_make_send_lazy_alloc_port(port_store: &obj->ko_port,
250 kobject: obj, type: IKOT_KCDATA, alloc_opts: IPC_KOBJECT_ALLOC_NONE)) {
251 kcdata_object_release(obj);
252 }
253 /* object ref consumed */
254
255 return obj->ko_port;
256}
257
258static void
259kcdata_object_no_senders(
260 ipc_port_t port,
261 __unused mach_port_mscount_t mscount)
262{
263 kcdata_object_t obj;
264
265 obj = ipc_kobject_get_stable(port, type: IKOT_KCDATA);
266 assert(obj != KCDATA_OBJECT_NULL);
267
268 /* release the ref given by no-senders notification */
269 kcdata_object_release(obj);
270}
271
272/*
273 * Estimates how large of a buffer that should be allocated for a buffer that will contain
274 * num_items items of known types with overall length payload_size.
275 *
276 * NOTE: This function will not give an accurate estimate for buffers that will
277 * contain unknown types (those with string descriptions).
278 */
279uint32_t
280kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t payload_size)
281{
282 /*
283 * In the worst case each item will need (KCDATA_ALIGNMENT_SIZE - 1) padding
284 */
285 uint32_t max_padding_bytes = 0;
286 uint32_t max_padding_with_item_description_bytes = 0;
287 uint32_t estimated_required_buffer_size = 0;
288 const uint32_t begin_and_end_marker_bytes = 2 * sizeof(struct kcdata_item);
289
290 if (os_mul_overflow(num_items, KCDATA_ALIGNMENT_SIZE - 1, &max_padding_bytes)) {
291 panic("%s: Overflow in required buffer size estimate", __func__);
292 }
293
294 if (os_mul_and_add_overflow(num_items, sizeof(struct kcdata_item), max_padding_bytes, &max_padding_with_item_description_bytes)) {
295 panic("%s: Overflow in required buffer size estimate", __func__);
296 }
297
298 if (os_add3_overflow(max_padding_with_item_description_bytes, begin_and_end_marker_bytes, payload_size, &estimated_required_buffer_size)) {
299 panic("%s: Overflow in required buffer size estimate", __func__);
300 }
301
302 return estimated_required_buffer_size;
303}
304
305kcdata_descriptor_t
306kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
307{
308 kcdata_descriptor_t data = NULL;
309 mach_vm_address_t user_addr = 0;
310 uint16_t clamped_flags = (uint16_t) flags;
311
312 data = kalloc_type(struct kcdata_descriptor, Z_WAITOK | Z_ZERO | Z_NOFAIL);
313 data->kcd_addr_begin = buffer_addr_p;
314 data->kcd_addr_end = buffer_addr_p;
315 data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
316 data->kcd_length = size;
317 data->kcd_endalloced = 0;
318
319 /* Initialize the BEGIN header */
320 if (KERN_SUCCESS != kcdata_get_memory_addr(data, type: data_type, size: 0, user_addr: &user_addr)) {
321 kcdata_memory_destroy(data);
322 return NULL;
323 }
324
325 return data;
326}
327
328kern_return_t
329kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
330{
331 mach_vm_address_t user_addr = 0;
332 uint16_t clamped_flags = (uint16_t) flags;
333
334 if (data == NULL) {
335 return KERN_INVALID_ARGUMENT;
336 }
337 bzero(s: data, n: sizeof(struct kcdata_descriptor));
338 data->kcd_addr_begin = buffer_addr_p;
339 data->kcd_addr_end = buffer_addr_p;
340 data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
341 data->kcd_length = size;
342 data->kcd_endalloced = 0;
343
344 /* Initialize the BEGIN header */
345 return kcdata_get_memory_addr(data, type: data_type, size: 0, user_addr: &user_addr);
346}
347
348void *
349kcdata_endalloc(kcdata_descriptor_t data, size_t length)
350{
351 mach_vm_address_t curend = data->kcd_addr_begin + data->kcd_length;
352 /* round up allocation and ensure return value is uint64-aligned */
353 size_t toalloc = ROUNDUP(length, sizeof(uint64_t)) + (curend % sizeof(uint64_t));
354 /* an arbitrary limit: make sure we don't allocate more then 1/4th of the remaining buffer. */
355 if (data->kcd_length / 4 <= toalloc) {
356 return NULL;
357 }
358 data->kcd_length -= toalloc;
359 data->kcd_endalloced += toalloc;
360 return (void *)(curend - toalloc);
361}
362
363/* Zeros and releases data allocated from the end of the buffer */
364static void
365kcdata_release_endallocs(kcdata_descriptor_t data)
366{
367 mach_vm_address_t curend = data->kcd_addr_begin + data->kcd_length;
368 size_t endalloced = data->kcd_endalloced;
369 if (endalloced > 0) {
370 bzero(s: (void *)curend, n: endalloced);
371 data->kcd_length += endalloced;
372 data->kcd_endalloced = 0;
373 }
374}
375
376void *
377kcdata_memory_get_begin_addr(kcdata_descriptor_t data)
378{
379 if (data == NULL) {
380 return NULL;
381 }
382
383 return (void *)data->kcd_addr_begin;
384}
385
386uint64_t
387kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)
388{
389 assert(kcd != NULL);
390 return ((uint64_t)kcd->kcd_addr_end - (uint64_t)kcd->kcd_addr_begin) + sizeof(struct kcdata_item);
391}
392
393uint64_t
394kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd)
395{
396 kern_return_t kr;
397
398 assert(kcd != NULL);
399 if (kcd->kcd_flags & KCFLAG_USE_COMPRESSION) {
400 uint64_t totalout, totalin;
401
402 kr = kcdata_get_compression_stats(data: kcd, totalout: &totalout, totalin: &totalin);
403 if (kr == KERN_SUCCESS) {
404 return totalin;
405 } else {
406 return 0;
407 }
408 } else {
409 /* If compression wasn't used, get the number of bytes used */
410 return kcdata_memory_get_used_bytes(kcd);
411 }
412}
413
414/*
415 * Free up the memory associated with kcdata
416 */
417kern_return_t
418kcdata_memory_destroy(kcdata_descriptor_t data)
419{
420 if (!data) {
421 return KERN_INVALID_ARGUMENT;
422 }
423
424 /*
425 * data->kcd_addr_begin points to memory in not tracked by
426 * kcdata lib. So not clearing that here.
427 */
428 kfree_type(struct kcdata_descriptor, data);
429 return KERN_SUCCESS;
430}
431
432/* Used by zlib to allocate space in its metadata section */
433static void *
434kcdata_compress_zalloc(void *opaque, u_int items, u_int size)
435{
436 void *result;
437 struct kcdata_compress_descriptor *cd = opaque;
438 int alloc_size = ~31L & (31 + (items * size));
439
440 result = (void *)((uintptr_t)cd->kcd_cd_base + cd->kcd_cd_offset);
441 if ((uintptr_t) result + alloc_size > (uintptr_t) cd->kcd_cd_base + cd->kcd_cd_maxoffset) {
442 result = Z_NULL;
443 } else {
444 cd->kcd_cd_offset += alloc_size;
445 }
446
447 kcdata_debug_printf("%s: %d * %d = %d => %p\n", __func__, items, size, items * size, result);
448
449 return result;
450}
451
452/* Used by zlib to free previously allocated space in its metadata section */
453static void
454kcdata_compress_zfree(void *opaque, void *ptr)
455{
456 (void) opaque;
457 (void) ptr;
458
459 kcdata_debug_printf("%s: ptr %p\n", __func__, ptr);
460
461 /*
462 * Since the buffers we are using are temporary, we don't worry about
463 * freeing memory for now. Besides, testing has shown that zlib only calls
464 * this at the end, near deflateEnd() or a Z_FINISH deflate() call.
465 */
466}
467
468/* Used to initialize the selected compression algorithm's internal state (if any) */
469static kern_return_t
470kcdata_init_compress_state(kcdata_descriptor_t data, void (*memcpy_f)(void *, const void *, size_t), uint64_t type, mach_vm_address_t totalout_addr, mach_vm_address_t totalin_addr)
471{
472 kern_return_t ret = KERN_SUCCESS;
473 size_t size;
474 int wbits = 12, memlevel = 3;
475 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
476
477 cd->kcd_cd_memcpy_f = memcpy_f;
478 cd->kcd_cd_compression_type = type;
479 cd->kcd_cd_totalout_addr = totalout_addr;
480 cd->kcd_cd_totalin_addr = totalin_addr;
481
482 switch (type) {
483 case KCDCT_ZLIB:
484 /* allocate space for the metadata used by zlib */
485 size = round_page(ZLIB_METADATA_SIZE + zlib_deflate_memory_size(wbits, memlevel));
486 kcdata_debug_printf("%s: size = %zu kcd_length: %d\n", __func__, size, data->kcd_length);
487 kcdata_debug_printf("%s: kcd buffer [%p - %p]\n", __func__, (void *) data->kcd_addr_begin, (void *) data->kcd_addr_begin + data->kcd_length);
488 void *buf = kcdata_endalloc(data, length: size);
489 if (buf == NULL) {
490 return KERN_INSUFFICIENT_BUFFER_SIZE;
491 }
492
493 cd->kcd_cd_zs.avail_in = 0;
494 cd->kcd_cd_zs.next_in = NULL;
495 cd->kcd_cd_zs.avail_out = 0;
496 cd->kcd_cd_zs.next_out = NULL;
497 cd->kcd_cd_zs.opaque = cd;
498 cd->kcd_cd_zs.zalloc = kcdata_compress_zalloc;
499 cd->kcd_cd_zs.zfree = kcdata_compress_zfree;
500 cd->kcd_cd_base = (void *)(data->kcd_addr_begin + data->kcd_length - size);
501 data->kcd_length -= size;
502 cd->kcd_cd_offset = 0;
503 cd->kcd_cd_maxoffset = size;
504 cd->kcd_cd_flags = 0;
505
506 kcdata_debug_printf("%s: buffer [%p - %p]\n", __func__, cd->kcd_cd_base, cd->kcd_cd_base + size);
507
508 if (deflateInit2(&cd->kcd_cd_zs, Z_BEST_SPEED, Z_DEFLATED, wbits, memlevel, Z_DEFAULT_STRATEGY) != Z_OK) {
509 kcdata_debug_printf("EMERGENCY: deflateInit2 failed!\n");
510 ret = KERN_INVALID_ARGUMENT;
511 }
512 break;
513 default:
514 panic("kcdata_init_compress_state: invalid compression type: %d", (int) type);
515 }
516
517 return ret;
518}
519
520
521/*
522 * Turn on the compression logic for kcdata
523 */
524kern_return_t
525kcdata_init_compress(kcdata_descriptor_t data, int hdr_tag, void (*memcpy_f)(void *, const void *, size_t), uint64_t type)
526{
527 kern_return_t kr;
528 mach_vm_address_t user_addr, totalout_addr, totalin_addr;
529 struct _uint64_with_description_data save_data;
530 const uint64_t size_req = sizeof(save_data);
531
532 assert(data && (data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0);
533
534 /* reset the compression descriptor */
535 bzero(s: &data->kcd_comp_d, n: sizeof(struct kcdata_compress_descriptor));
536
537 /* add the header information */
538 kcdata_add_uint64_with_description(crashinfo: data, data: type, description: "kcd_c_type");
539
540 /* reserve space to write total out */
541 bzero(s: &save_data, n: size_req);
542 strlcpy(dst: &(save_data.desc[0]), src: "kcd_c_totalout", n: sizeof(save_data.desc));
543 kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size: size_req, user_addr: &totalout_addr);
544 if (kr != KERN_SUCCESS) {
545 return kr;
546 }
547 memcpy(dst: (void *)totalout_addr, src: &save_data, n: size_req);
548
549 /* space for total in */
550 bzero(s: &save_data, n: size_req);
551 strlcpy(dst: &(save_data.desc[0]), src: "kcd_c_totalin", n: sizeof(save_data.desc));
552 kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size: size_req, user_addr: &totalin_addr);
553 if (kr != KERN_SUCCESS) {
554 return kr;
555 }
556 memcpy(dst: (void *)totalin_addr, src: &save_data, n: size_req);
557
558 /* add the inner buffer */
559 kcdata_get_memory_addr(data, type: hdr_tag, size: 0, user_addr: &user_addr);
560
561 /* save the flag */
562 data->kcd_flags |= KCFLAG_USE_COMPRESSION;
563
564 /* initialize algorithm specific state */
565 kr = kcdata_init_compress_state(data, memcpy_f, type, totalout_addr: totalout_addr + offsetof(struct _uint64_with_description_data, data), totalin_addr: totalin_addr + offsetof(struct _uint64_with_description_data, data));
566 if (kr != KERN_SUCCESS) {
567 kcdata_debug_printf("%s: failed to initialize compression state!\n", __func__);
568 return kr;
569 }
570
571 return KERN_SUCCESS;
572}
573
574static inline
575int
576kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush)
577{
578 switch (flush) {
579 case KCDCF_NO_FLUSH: return Z_NO_FLUSH;
580 case KCDCF_SYNC_FLUSH: return Z_SYNC_FLUSH;
581 case KCDCF_FINISH: return Z_FINISH;
582 default: panic("invalid kcdata_zlib_translate_kcd_cf_flag flag");
583 }
584}
585
586static inline
587int
588kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush)
589{
590 switch (flush) {
591 case KCDCF_NO_FLUSH: /* fall through */
592 case KCDCF_SYNC_FLUSH: return Z_OK;
593 case KCDCF_FINISH: return Z_STREAM_END;
594 default: panic("invalid kcdata_zlib_translate_kcd_cf_expected_ret flag");
595 }
596}
597
598/* Called by kcdata_do_compress() when the configured compression algorithm is zlib */
599static kern_return_t
600kcdata_do_compress_zlib(kcdata_descriptor_t data, void *inbuffer,
601 size_t insize, void *outbuffer, size_t outsize, size_t *wrote,
602 enum kcdata_compression_flush flush)
603{
604 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
605 z_stream *zs = &cd->kcd_cd_zs;
606 int expected_ret, ret;
607
608 zs->next_out = outbuffer;
609 zs->avail_out = (unsigned int) outsize;
610 zs->next_in = inbuffer;
611 zs->avail_in = (unsigned int) insize;
612 ret = deflate(strm: zs, flush: kcdata_zlib_translate_kcd_cf_flag(flush));
613 if (zs->avail_in != 0 || zs->avail_out <= 0) {
614 return KERN_INSUFFICIENT_BUFFER_SIZE;
615 }
616
617 expected_ret = kcdata_zlib_translate_kcd_cf_expected_ret(flush);
618 if (ret != expected_ret) {
619 /*
620 * Should only fail with catastrophic, unrecoverable cases (i.e.,
621 * corrupted z_stream, or incorrect configuration)
622 */
623 panic("zlib kcdata compression ret = %d", ret);
624 }
625
626 kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d; ret = %ld\n",
627 __func__, outbuffer, outsize, inbuffer, insize, flush, outsize - zs->avail_out);
628 if (wrote) {
629 *wrote = outsize - zs->avail_out;
630 }
631 return KERN_SUCCESS;
632}
633
634/*
635 * Compress the buffer at @inbuffer (of size @insize) into the kcdata buffer
636 * @outbuffer (of size @outsize). Flush based on the @flush parameter.
637 *
638 * Returns KERN_SUCCESS on success, or KERN_INSUFFICIENT_BUFFER_SIZE if
639 * @outsize isn't sufficient. Also, writes the number of bytes written in the
640 * @outbuffer to @wrote.
641 */
642static kern_return_t
643kcdata_do_compress(kcdata_descriptor_t data, void *inbuffer, size_t insize,
644 void *outbuffer, size_t outsize, size_t *wrote, enum kcdata_compression_flush flush)
645{
646 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
647
648 assert(data->kcd_flags & KCFLAG_USE_COMPRESSION);
649
650 kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d\n",
651 __func__, outbuffer, outsize, inbuffer, insize, flush);
652
653 /* don't compress if we are in a window */
654 if (cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK || data->kcd_comp_d.kcd_cd_compression_type == KCDCT_NONE) {
655 assert(cd->kcd_cd_memcpy_f);
656 if (outsize >= insize) {
657 cd->kcd_cd_memcpy_f(outbuffer, inbuffer, insize);
658 if (wrote) {
659 *wrote = insize;
660 }
661 return KERN_SUCCESS;
662 } else {
663 return KERN_INSUFFICIENT_BUFFER_SIZE;
664 }
665 }
666
667 switch (data->kcd_comp_d.kcd_cd_compression_type) {
668 case KCDCT_ZLIB:
669 return kcdata_do_compress_zlib(data, inbuffer, insize, outbuffer, outsize, wrote, flush);
670 default:
671 panic("invalid compression type 0x%llx in kcdata_do_compress", data->kcd_comp_d.kcd_cd_compression_type);
672 }
673}
674
675static size_t
676kcdata_compression_bound_zlib(kcdata_descriptor_t data, size_t size)
677{
678 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
679 z_stream *zs = &cd->kcd_cd_zs;
680
681 return (size_t) deflateBound(strm: zs, sourceLen: (unsigned long) size);
682}
683
684
685/*
686 * returns the worst-case, maximum length of the compressed data when
687 * compressing a buffer of size @size using the configured algorithm.
688 */
689static size_t
690kcdata_compression_bound(kcdata_descriptor_t data, size_t size)
691{
692 switch (data->kcd_comp_d.kcd_cd_compression_type) {
693 case KCDCT_ZLIB:
694 return kcdata_compression_bound_zlib(data, size);
695 case KCDCT_NONE:
696 return size;
697 default:
698 panic("%s: unknown compression method", __func__);
699 }
700}
701
702/*
703 * kcdata_compress_chunk_with_flags:
704 * Compress buffer found at @input_data (length @input_size) to the kcdata
705 * buffer described by @data. This method will construct the kcdata_item_t
706 * required by parsers using the type information @type and flags @flags.
707 *
708 * Returns KERN_SUCCESS when successful. Currently, asserts on failure.
709 */
710kern_return_t
711kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t kcdata_flags)
712{
713 assert(data);
714 assert((data->kcd_flags & KCFLAG_USE_COMPRESSION));
715 assert(input_data);
716 struct kcdata_item info;
717 char padding_data[16] = {0};
718 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
719 size_t wrote = 0;
720 kern_return_t kr;
721
722 kcdata_debug_printf("%s: type: %d input_data: %p (%d) kcdata_flags: 0x%llx\n",
723 __func__, type, input_data, input_size, kcdata_flags);
724
725 /*
726 * first, get memory space. The uncompressed size must fit in the remained
727 * of the kcdata buffer, in case the compression algorithm doesn't actually
728 * compress the data at all.
729 */
730 size_t total_uncompressed_size = kcdata_compression_bound(data, size: (size_t) kcdata_get_memory_size_for_data(size: input_size));
731 if (total_uncompressed_size > data->kcd_length ||
732 data->kcd_length - total_uncompressed_size < data->kcd_addr_end - data->kcd_addr_begin) {
733 kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %zu\n",
734 __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, total_uncompressed_size);
735 return KERN_INSUFFICIENT_BUFFER_SIZE;
736 }
737 uint32_t padding = kcdata_calc_padding(size: input_size);
738 assert(padding < sizeof(padding_data));
739
740 void *space_start = (void *) data->kcd_addr_end;
741 void *space_ptr = space_start;
742
743 /* create the output stream */
744 size_t total_uncompressed_space_remaining = total_uncompressed_size;
745
746 /* create the info data */
747 bzero(s: &info, n: sizeof(info));
748 info.type = type;
749 info.size = input_size + padding;
750 info.flags = kcdata_flags;
751
752 /*
753 * The next possibly three compresses are needed separately because of the
754 * scatter-gather nature of this operation. The kcdata item header (info)
755 * and padding are on the stack, while the actual data is somewhere else.
756 * */
757
758 /* create the input stream for info & compress */
759 enum kcdata_compression_flush flush = (padding || input_size) ? KCDCF_NO_FLUSH :
760 cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
761 KCDCF_SYNC_FLUSH;
762 kr = kcdata_do_compress(data, inbuffer: &info, insize: sizeof(info), outbuffer: space_ptr, outsize: total_uncompressed_space_remaining, wrote: &wrote, flush);
763 if (kr != KERN_SUCCESS) {
764 return kr;
765 }
766 kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
767 space_ptr = (void *)((uintptr_t)space_ptr + wrote);
768 total_uncompressed_space_remaining -= wrote;
769
770 /* If there is input provided, compress that here */
771 if (input_size) {
772 flush = padding ? KCDCF_NO_FLUSH :
773 cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
774 KCDCF_SYNC_FLUSH;
775 kr = kcdata_do_compress(data, inbuffer: (void *) (uintptr_t) input_data, insize: input_size, outbuffer: space_ptr, outsize: total_uncompressed_space_remaining, wrote: &wrote, flush);
776 if (kr != KERN_SUCCESS) {
777 return kr;
778 }
779 kcdata_debug_printf("%s: 2nd wrote = %zu\n", __func__, wrote);
780 space_ptr = (void *)((uintptr_t)space_ptr + wrote);
781 total_uncompressed_space_remaining -= wrote;
782 }
783
784 /* If the item and its data require padding to maintain alignment,
785 * "compress" that into the output buffer. */
786 if (padding) {
787 /* write the padding */
788 kr = kcdata_do_compress(data, inbuffer: padding_data, insize: padding, outbuffer: space_ptr, outsize: total_uncompressed_space_remaining, wrote: &wrote,
789 flush: cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH : KCDCF_SYNC_FLUSH);
790 if (kr != KERN_SUCCESS) {
791 return kr;
792 }
793 kcdata_debug_printf("%s: 3rd wrote = %zu\n", __func__, wrote);
794 if (wrote == 0) {
795 return KERN_FAILURE;
796 }
797 space_ptr = (void *)((uintptr_t)space_ptr + wrote);
798 total_uncompressed_space_remaining -= wrote;
799 }
800
801 assert((size_t)((uintptr_t)space_ptr - (uintptr_t)space_start) <= total_uncompressed_size);
802
803 /* move the end marker forward */
804 data->kcd_addr_end = (mach_vm_address_t) space_start + (total_uncompressed_size - total_uncompressed_space_remaining);
805
806 return KERN_SUCCESS;
807}
808
809/*
810 * kcdata_compress_chunk:
811 * Like kcdata_compress_chunk_with_flags(), but uses the default set of kcdata flags,
812 * i.e. padding and also saves the amount of padding bytes.
813 *
814 * Returns are the same as in kcdata_compress_chunk_with_flags()
815 */
816kern_return_t
817kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size)
818{
819 /* these flags are for kcdata - store that the struct is padded and store the amount of padding bytes */
820 uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(size: input_size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
821 return kcdata_compress_chunk_with_flags(data, type, input_data, input_size, kcdata_flags: flags);
822}
823
824kern_return_t
825kcdata_push_data(kcdata_descriptor_t data, uint32_t type, uint32_t size, const void *input_data)
826{
827 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
828 return kcdata_compress_chunk(data, type, input_data, input_size: size);
829 } else {
830 kern_return_t ret;
831 mach_vm_address_t uaddr = 0;
832 ret = kcdata_get_memory_addr(data, type, size, user_addr: &uaddr);
833 if (ret != KERN_SUCCESS) {
834 return ret;
835 }
836
837 kcdata_memcpy(data, dst_addr: uaddr, src_addr: input_data, size);
838 return KERN_SUCCESS;
839 }
840}
841
842kern_return_t
843kcdata_push_array(kcdata_descriptor_t data, uint32_t type_of_element, uint32_t size_of_element, uint32_t count, const void *input_data)
844{
845 uint64_t flags = type_of_element;
846 flags = (flags << 32) | count;
847 uint32_t total_size = count * size_of_element;
848 uint32_t pad = kcdata_calc_padding(size: total_size);
849
850 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
851 return kcdata_compress_chunk_with_flags(data, KCDATA_TYPE_ARRAY_PAD0 | pad, input_data, input_size: total_size, kcdata_flags: flags);
852 } else {
853 kern_return_t ret;
854 mach_vm_address_t uaddr = 0;
855 ret = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, size: total_size, flags, user_addr: &uaddr);
856 if (ret != KERN_SUCCESS) {
857 return ret;
858 }
859
860 kcdata_memcpy(data, dst_addr: uaddr, src_addr: input_data, size: total_size);
861 return KERN_SUCCESS;
862 }
863}
864
865/* A few words on how window compression works:
866 *
867 * This is how the buffer looks when the window is opened:
868 *
869 * X---------------------------------------------------------------------X
870 * | | |
871 * | Filled with stackshot data | Zero bytes |
872 * | | |
873 * X---------------------------------------------------------------------X
874 * ^
875 * \ - kcd_addr_end
876 *
877 * Opening a window will save the current kcd_addr_end to kcd_cd_mark_begin.
878 *
879 * Any kcdata_* operation will then push data to the buffer like normal. (If
880 * you call any compressing functions they will pass-through, i.e. no
881 * compression will be done) Once the window is closed, the following takes
882 * place:
883 *
884 * X---------------------------------------------------------------------X
885 * | | | | |
886 * | Existing data | New data | Scratch buffer | |
887 * | | | | |
888 * X---------------------------------------------------------------------X
889 * ^ ^ ^
890 * | | |
891 * \ -kcd_cd_mark_begin | |
892 * | |
893 * \ - kcd_addr_end |
894 * |
895 * kcd_addr_end + (kcd_addr_end - kcd_cd_mark_begin) - /
896 *
897 * (1) The data between kcd_cd_mark_begin and kcd_addr_end is fed to the
898 * compression algorithm to compress to the scratch buffer.
899 * (2) The scratch buffer's contents are copied into the area denoted "New
900 * data" above. Effectively overwriting the uncompressed data with the
901 * compressed one.
902 * (3) kcd_addr_end is then rewound to kcd_cd_mark_begin + sizeof_compressed_data
903 */
904
905/* Record the state, and restart compression from this later */
906void
907kcdata_compression_window_open(kcdata_descriptor_t data)
908{
909 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
910 assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
911
912 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
913 cd->kcd_cd_flags |= KCD_CD_FLAG_IN_MARK;
914 cd->kcd_cd_mark_begin = data->kcd_addr_end;
915 }
916}
917
918/* Compress the region between the mark and the current end */
919kern_return_t
920kcdata_compression_window_close(kcdata_descriptor_t data)
921{
922 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
923 uint64_t total_size, max_size;
924 void *space_start, *space_ptr;
925 size_t total_uncompressed_space_remaining, wrote = 0;
926 kern_return_t kr;
927
928 if ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0) {
929 return KERN_SUCCESS;
930 }
931
932 assert(cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK);
933
934 if (data->kcd_addr_end == (mach_vm_address_t) cd->kcd_cd_mark_begin) {
935 /* clear the window marker and return, this is a no-op */
936 cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
937 return KERN_SUCCESS;
938 }
939
940 assert(cd->kcd_cd_mark_begin < data->kcd_addr_end);
941 total_size = data->kcd_addr_end - (uint64_t) cd->kcd_cd_mark_begin;
942 max_size = (uint64_t) kcdata_compression_bound(data, size: total_size);
943 kcdata_debug_printf("%s: total_size = %lld\n", __func__, total_size);
944
945 /*
946 * first, get memory space. The uncompressed size must fit in the remained
947 * of the kcdata buffer, in case the compression algorithm doesn't actually
948 * compress the data at all.
949 */
950 if (max_size > data->kcd_length ||
951 data->kcd_length - max_size < data->kcd_addr_end - data->kcd_addr_begin) {
952 kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %lld\n",
953 __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, max_size);
954 return KERN_INSUFFICIENT_BUFFER_SIZE;
955 }
956
957 /* clear the window marker */
958 cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
959
960 space_start = (void *) data->kcd_addr_end;
961 space_ptr = space_start;
962 total_uncompressed_space_remaining = (unsigned int) max_size;
963 kr = kcdata_do_compress(data, inbuffer: (void *) cd->kcd_cd_mark_begin, insize: total_size, outbuffer: space_ptr,
964 outsize: total_uncompressed_space_remaining, wrote: &wrote, flush: KCDCF_SYNC_FLUSH);
965 if (kr != KERN_SUCCESS) {
966 return kr;
967 }
968 kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
969 if (wrote == 0) {
970 return KERN_FAILURE;
971 }
972 space_ptr = (void *)((uintptr_t)space_ptr + wrote);
973 total_uncompressed_space_remaining -= wrote;
974
975 assert((size_t)((uintptr_t)space_ptr - (uintptr_t)space_start) <= max_size);
976
977 /* copy to the original location */
978 kcdata_memcpy(data, dst_addr: cd->kcd_cd_mark_begin, src_addr: space_start, size: (uint32_t) (max_size - total_uncompressed_space_remaining));
979
980 /* rewind the end marker */
981 data->kcd_addr_end = cd->kcd_cd_mark_begin + (max_size - total_uncompressed_space_remaining);
982
983 return KERN_SUCCESS;
984}
985
986static kern_return_t
987kcdata_get_compression_stats_zlib(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
988{
989 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
990 z_stream *zs = &cd->kcd_cd_zs;
991
992 assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
993
994 *totalout = (uint64_t) zs->total_out;
995 *totalin = (uint64_t) zs->total_in;
996
997 return KERN_SUCCESS;
998}
999
1000static kern_return_t
1001kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
1002{
1003 kern_return_t kr;
1004
1005 switch (data->kcd_comp_d.kcd_cd_compression_type) {
1006 case KCDCT_ZLIB:
1007 kr = kcdata_get_compression_stats_zlib(data, totalout, totalin);
1008 break;
1009 case KCDCT_NONE:
1010 *totalout = *totalin = kcdata_memory_get_used_bytes(kcd: data);
1011 kr = KERN_SUCCESS;
1012 break;
1013 default:
1014 panic("invalid compression flag 0x%llx in kcdata_write_compression_stats", (data->kcd_comp_d.kcd_cd_compression_type));
1015 }
1016
1017 return kr;
1018}
1019
1020kern_return_t
1021kcdata_write_compression_stats(kcdata_descriptor_t data)
1022{
1023 kern_return_t kr;
1024 uint64_t totalout, totalin;
1025
1026 kr = kcdata_get_compression_stats(data, totalout: &totalout, totalin: &totalin);
1027 if (kr != KERN_SUCCESS) {
1028 return kr;
1029 }
1030
1031 *(uint64_t *)data->kcd_comp_d.kcd_cd_totalout_addr = totalout;
1032 *(uint64_t *)data->kcd_comp_d.kcd_cd_totalin_addr = totalin;
1033
1034 return kr;
1035}
1036
1037static kern_return_t
1038kcdata_finish_compression_zlib(kcdata_descriptor_t data)
1039{
1040 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
1041 z_stream *zs = &cd->kcd_cd_zs;
1042
1043 /*
1044 * macOS on x86 w/ coprocessor ver. 2 and later context: Stackshot compression leaves artifacts
1045 * in the panic buffer which interferes with CRC checks. The CRC is calculated here over the full
1046 * buffer but only the portion with valid panic data is sent to iBoot via the SMC. When iBoot
1047 * calculates the CRC to compare with the value in the header it uses a zero-filled buffer.
1048 * The stackshot compression leaves non-zero bytes behind so those must be cleared prior to the CRC calculation.
1049 * This doesn't get the compression metadata; that's zeroed by kcdata_release_endallocs().
1050 *
1051 * All other contexts: The stackshot compression artifacts are present in its panic buffer but the CRC check
1052 * is done on the same buffer for the before and after calculation so there's nothing functionally
1053 * broken. The same buffer cleanup is done here for completeness' sake.
1054 * From rdar://problem/64381661
1055 */
1056
1057 void* stackshot_end = (char*)data->kcd_addr_begin + kcdata_memory_get_used_bytes(kcd: data);
1058 uint32_t zero_fill_size = data->kcd_length - kcdata_memory_get_used_bytes(kcd: data);
1059 bzero(s: stackshot_end, n: zero_fill_size);
1060
1061 if (deflateEnd(strm: zs) == Z_OK) {
1062 return KERN_SUCCESS;
1063 } else {
1064 return KERN_FAILURE;
1065 }
1066}
1067
1068static kern_return_t
1069kcdata_finish_compression(kcdata_descriptor_t data)
1070{
1071 kcdata_write_compression_stats(data);
1072
1073 switch (data->kcd_comp_d.kcd_cd_compression_type) {
1074 case KCDCT_ZLIB:
1075 return kcdata_finish_compression_zlib(data);
1076 case KCDCT_NONE:
1077 return KERN_SUCCESS;
1078 default:
1079 panic("invalid compression type 0x%llxin kcdata_finish_compression", data->kcd_comp_d.kcd_cd_compression_type);
1080 }
1081}
1082
1083kern_return_t
1084kcdata_finish(kcdata_descriptor_t data)
1085{
1086 int ret = KERN_SUCCESS;
1087 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
1088 ret = kcdata_finish_compression(data);
1089 }
1090 kcdata_release_endallocs(data);
1091 return ret;
1092}
1093
1094void
1095kcd_finalize_compression(kcdata_descriptor_t data)
1096{
1097 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
1098 data->kcd_comp_d.kcd_cd_flags |= KCD_CD_FLAG_FINALIZE;
1099 }
1100}
1101
1102/*
1103 * Routine: kcdata_get_memory_addr
1104 * Desc: get memory address in the userspace memory for corpse info
1105 * NOTE: The caller is responsible for zeroing the resulting memory or
1106 * using other means to mark memory if it has failed populating the
1107 * data in middle of operation.
1108 * params: data - pointer describing the crash info allocation
1109 * type - type of data to be put. See corpse.h for defined types
1110 * size - size requested. The header describes this size
1111 * returns: mach_vm_address_t address in user memory for copyout().
1112 */
1113kern_return_t
1114kcdata_get_memory_addr(kcdata_descriptor_t data, uint32_t type, uint32_t size, mach_vm_address_t * user_addr)
1115{
1116 /* record number of padding bytes as lower 4 bits of flags */
1117 uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
1118 return kcdata_get_memory_addr_with_flavor(data, type, size, flags, user_addr);
1119}
1120
1121/*
1122 * Routine: kcdata_add_buffer_end
1123 *
1124 * Desc: Write buffer end marker. This does not advance the end pointer in the
1125 * kcdata_descriptor_t, so it may be used conservatively before additional data
1126 * is added, as long as it is at least called after the last time data is added.
1127 *
1128 * params: data - pointer describing the crash info allocation
1129 */
1130
1131kern_return_t
1132kcdata_write_buffer_end(kcdata_descriptor_t data)
1133{
1134 struct kcdata_item info;
1135 bzero(s: &info, n: sizeof(info));
1136 info.type = KCDATA_TYPE_BUFFER_END;
1137 info.size = 0;
1138 return kcdata_memcpy(data, dst_addr: data->kcd_addr_end, src_addr: &info, size: sizeof(info));
1139}
1140
1141/*
1142 * Routine: kcdata_get_memory_addr_with_flavor
1143 * Desc: internal function with flags field. See documentation for kcdata_get_memory_addr for details
1144 */
1145
1146static kern_return_t
1147kcdata_get_memory_addr_with_flavor(
1148 kcdata_descriptor_t data,
1149 uint32_t type,
1150 uint32_t size,
1151 uint64_t flags,
1152 mach_vm_address_t *user_addr)
1153{
1154 kern_return_t kr;
1155 struct kcdata_item info;
1156
1157 uint32_t orig_size = size;
1158 /* make sure 16 byte aligned */
1159 uint32_t padding = kcdata_calc_padding(size);
1160 size += padding;
1161 uint32_t total_size = size + sizeof(info);
1162
1163 if (user_addr == NULL || data == NULL || total_size + sizeof(info) < orig_size) {
1164 return KERN_INVALID_ARGUMENT;
1165 }
1166
1167 assert(((data->kcd_flags & KCFLAG_USE_COMPRESSION) && (data->kcd_comp_d.kcd_cd_flags & KCD_CD_FLAG_IN_MARK))
1168 || ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0));
1169
1170 bzero(s: &info, n: sizeof(info));
1171 info.type = type;
1172 info.size = size;
1173 info.flags = flags;
1174
1175 /* check available memory, including trailer size for KCDATA_TYPE_BUFFER_END */
1176 if (total_size + sizeof(info) > data->kcd_length ||
1177 data->kcd_length - (total_size + sizeof(info)) < data->kcd_addr_end - data->kcd_addr_begin) {
1178 return KERN_INSUFFICIENT_BUFFER_SIZE;
1179 }
1180
1181 kr = kcdata_memcpy(data, dst_addr: data->kcd_addr_end, src_addr: &info, size: sizeof(info));
1182 if (kr) {
1183 return kr;
1184 }
1185
1186 data->kcd_addr_end += sizeof(info);
1187
1188 if (padding) {
1189 kr = kcdata_bzero(data, dst_addr: data->kcd_addr_end + size - padding, size: padding);
1190 if (kr) {
1191 return kr;
1192 }
1193 }
1194
1195 *user_addr = data->kcd_addr_end;
1196 data->kcd_addr_end += size;
1197
1198 if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
1199 /* setup the end header as well */
1200 return kcdata_write_buffer_end(data);
1201 } else {
1202 return KERN_SUCCESS;
1203 }
1204}
1205
1206/* Routine: kcdata_get_memory_size_for_data
1207 * Desc: returns the amount of memory that is required to store the information
1208 * in kcdata
1209 */
1210static size_t
1211kcdata_get_memory_size_for_data(uint32_t size)
1212{
1213 return size + kcdata_calc_padding(size) + sizeof(struct kcdata_item);
1214}
1215
1216/*
1217 * Routine: kcdata_get_memory_addr_for_array
1218 * Desc: get memory address in the userspace memory for corpse info
1219 * NOTE: The caller is responsible to zero the resulting memory or
1220 * user other means to mark memory if it has failed populating the
1221 * data in middle of operation.
1222 * params: data - pointer describing the crash info allocation
1223 * type_of_element - type of data to be put. See kern_cdata.h for defined types
1224 * size_of_element - size of element. The header describes this size
1225 * count - num of elements in array.
1226 * returns: mach_vm_address_t address in user memory for copyout().
1227 */
1228
1229kern_return_t
1230kcdata_get_memory_addr_for_array(
1231 kcdata_descriptor_t data,
1232 uint32_t type_of_element,
1233 uint32_t size_of_element,
1234 uint32_t count,
1235 mach_vm_address_t *user_addr)
1236{
1237 /* for arrays we record the number of padding bytes as the low-order 4 bits
1238 * of the type field. KCDATA_TYPE_ARRAY_PAD{x} means x bytes of pad. */
1239 uint64_t flags = type_of_element;
1240 flags = (flags << 32) | count;
1241 uint32_t total_size = count * size_of_element;
1242 uint32_t pad = kcdata_calc_padding(size: total_size);
1243
1244 return kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, size: total_size, flags, user_addr);
1245}
1246
1247/*
1248 * Routine: kcdata_add_container_marker
1249 * Desc: Add a container marker in the buffer for type and identifier.
1250 * params: data - pointer describing the crash info allocation
1251 * header_type - one of (KCDATA_TYPE_CONTAINER_BEGIN ,KCDATA_TYPE_CONTAINER_END)
1252 * container_type - type of data to be put. See kern_cdata.h for defined types
1253 * identifier - unique identifier. This is required to match nested containers.
1254 * returns: return value of kcdata_get_memory_addr()
1255 */
1256
1257kern_return_t
1258kcdata_add_container_marker(
1259 kcdata_descriptor_t data,
1260 uint32_t header_type,
1261 uint32_t container_type,
1262 uint64_t identifier)
1263{
1264 mach_vm_address_t user_addr;
1265 kern_return_t kr;
1266 uint32_t data_size;
1267
1268 assert(header_type == KCDATA_TYPE_CONTAINER_END || header_type == KCDATA_TYPE_CONTAINER_BEGIN);
1269
1270 data_size = (header_type == KCDATA_TYPE_CONTAINER_BEGIN)? sizeof(uint32_t): 0;
1271
1272 if (!(data->kcd_flags & KCFLAG_USE_COMPRESSION)) {
1273 kr = kcdata_get_memory_addr_with_flavor(data, type: header_type, size: data_size, flags: identifier, user_addr: &user_addr);
1274 if (kr != KERN_SUCCESS) {
1275 return kr;
1276 }
1277
1278 if (data_size) {
1279 kr = kcdata_memcpy(data, dst_addr: user_addr, src_addr: &container_type, size: data_size);
1280 }
1281 } else {
1282 kr = kcdata_compress_chunk_with_flags(data, type: header_type, input_data: &container_type, input_size: data_size, kcdata_flags: identifier);
1283 }
1284
1285 return kr;
1286}
1287
1288/*
1289 * Routine: kcdata_undo_addcontainer_begin
1290 * Desc: call this after adding a container begin but before adding anything else to revert.
1291 */
1292kern_return_t
1293kcdata_undo_add_container_begin(kcdata_descriptor_t data)
1294{
1295 /*
1296 * the payload of a container begin is a single uint64_t. It is padded out
1297 * to 16 bytes.
1298 */
1299 const mach_vm_address_t padded_payload_size = 16;
1300 data->kcd_addr_end -= sizeof(struct kcdata_item) + padded_payload_size;
1301
1302 if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
1303 /* setup the end header as well */
1304 return kcdata_write_buffer_end(data);
1305 } else {
1306 return KERN_SUCCESS;
1307 }
1308}
1309
1310/*
1311 * Routine: kcdata_memcpy
1312 * Desc: a common function to copy data out based on either copyout or memcopy flags
1313 * params: data - pointer describing the kcdata buffer
1314 * dst_addr - destination address
1315 * src_addr - source address
1316 * size - size in bytes to copy.
1317 * returns: KERN_NO_ACCESS if copyout fails.
1318 */
1319
1320kern_return_t
1321kcdata_memcpy(kcdata_descriptor_t data, mach_vm_address_t dst_addr, const void *src_addr, uint32_t size)
1322{
1323 if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
1324 if (copyout(src_addr, dst_addr, size)) {
1325 return KERN_NO_ACCESS;
1326 }
1327 } else {
1328 memcpy(dst: (void *)dst_addr, src: src_addr, n: size);
1329 }
1330 return KERN_SUCCESS;
1331}
1332
1333/*
1334 * Routine: kcdata_bzero
1335 * Desc: zero out a portion of a kcdata buffer.
1336 */
1337kern_return_t
1338kcdata_bzero(kcdata_descriptor_t data, mach_vm_address_t dst_addr, uint32_t size)
1339{
1340 kern_return_t kr = KERN_SUCCESS;
1341 if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
1342 uint8_t zeros[16] = {};
1343 while (size) {
1344 uint32_t block_size = MIN(size, 16);
1345 kr = copyout(&zeros, dst_addr, block_size);
1346 if (kr) {
1347 return KERN_NO_ACCESS;
1348 }
1349 size -= block_size;
1350 }
1351 return KERN_SUCCESS;
1352 } else {
1353 bzero(s: (void*)dst_addr, n: size);
1354 return KERN_SUCCESS;
1355 }
1356}
1357
1358/*
1359 * Routine: kcdata_add_type_definition
1360 * Desc: add type definition to kcdata buffer.
1361 * see feature description in documentation above.
1362 * params: data - pointer describing the kcdata buffer
1363 * type_id - unique type identifier for this data
1364 * type_name - a string of max KCDATA_DESC_MAXLEN size for name of type
1365 * elements_array - address to descriptors for each field in struct
1366 * elements_count - count of how many fields are there in struct.
1367 * returns: return code from kcdata_get_memory_addr in case of failure.
1368 */
1369
1370kern_return_t
1371kcdata_add_type_definition(
1372 kcdata_descriptor_t data,
1373 uint32_t type_id,
1374 char *type_name,
1375 struct kcdata_subtype_descriptor *elements_array_addr,
1376 uint32_t elements_count)
1377{
1378 kern_return_t kr = KERN_SUCCESS;
1379 struct kcdata_type_definition kc_type_definition;
1380 mach_vm_address_t user_addr;
1381 uint32_t total_size = sizeof(struct kcdata_type_definition);
1382 bzero(s: &kc_type_definition, n: sizeof(kc_type_definition));
1383
1384 if (strlen(s: type_name) >= KCDATA_DESC_MAXLEN) {
1385 return KERN_INVALID_ARGUMENT;
1386 }
1387 strlcpy(dst: &kc_type_definition.kct_name[0], src: type_name, KCDATA_DESC_MAXLEN);
1388 kc_type_definition.kct_num_elements = elements_count;
1389 kc_type_definition.kct_type_identifier = type_id;
1390
1391 total_size += elements_count * sizeof(struct kcdata_subtype_descriptor);
1392 /* record number of padding bytes as lower 4 bits of flags */
1393 if (KERN_SUCCESS != (kr = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_TYPEDEFINTION, size: total_size,
1394 flags: kcdata_calc_padding(size: total_size), user_addr: &user_addr))) {
1395 return kr;
1396 }
1397 if (KERN_SUCCESS != (kr = kcdata_memcpy(data, dst_addr: user_addr, src_addr: (void *)&kc_type_definition, size: sizeof(struct kcdata_type_definition)))) {
1398 return kr;
1399 }
1400 user_addr += sizeof(struct kcdata_type_definition);
1401 if (KERN_SUCCESS != (kr = kcdata_memcpy(data, dst_addr: user_addr, src_addr: (void *)elements_array_addr, size: elements_count * sizeof(struct kcdata_subtype_descriptor)))) {
1402 return kr;
1403 }
1404 return kr;
1405}
1406
1407kern_return_t
1408kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc, uint64_t data, const char * description)
1409{
1410 if (strlen(s: description) >= KCDATA_DESC_MAXLEN) {
1411 return KERN_INVALID_ARGUMENT;
1412 }
1413
1414 kern_return_t kr = 0;
1415 mach_vm_address_t user_addr;
1416 struct _uint64_with_description_data save_data;
1417 const uint64_t size_req = sizeof(save_data);
1418 bzero(s: &save_data, n: size_req);
1419
1420 strlcpy(dst: &(save_data.desc[0]), src: description, n: sizeof(save_data.desc));
1421 save_data.data = data;
1422
1423 if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1424 /* allocate space for the output */
1425 return kcdata_compress_chunk(data: data_desc, KCDATA_TYPE_UINT64_DESC, input_data: &save_data, input_size: size_req);
1426 }
1427
1428 kr = kcdata_get_memory_addr(data: data_desc, KCDATA_TYPE_UINT64_DESC, size: size_req, user_addr: &user_addr);
1429 if (kr != KERN_SUCCESS) {
1430 return kr;
1431 }
1432
1433 if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
1434 if (copyout(&save_data, user_addr, size_req)) {
1435 return KERN_NO_ACCESS;
1436 }
1437 } else {
1438 memcpy(dst: (void *)user_addr, src: &save_data, n: size_req);
1439 }
1440 return KERN_SUCCESS;
1441}
1442
1443kern_return_t
1444kcdata_add_uint32_with_description(
1445 kcdata_descriptor_t data_desc,
1446 uint32_t data,
1447 const char *description)
1448{
1449 assert(strlen(description) < KCDATA_DESC_MAXLEN);
1450 if (strlen(s: description) >= KCDATA_DESC_MAXLEN) {
1451 return KERN_INVALID_ARGUMENT;
1452 }
1453 kern_return_t kr = 0;
1454 mach_vm_address_t user_addr;
1455 struct _uint32_with_description_data save_data;
1456 const uint64_t size_req = sizeof(save_data);
1457
1458 bzero(s: &save_data, n: size_req);
1459 strlcpy(dst: &(save_data.desc[0]), src: description, n: sizeof(save_data.desc));
1460 save_data.data = data;
1461
1462 if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1463 /* allocate space for the output */
1464 return kcdata_compress_chunk(data: data_desc, KCDATA_TYPE_UINT32_DESC, input_data: &save_data, input_size: size_req);
1465 }
1466
1467 kr = kcdata_get_memory_addr(data: data_desc, KCDATA_TYPE_UINT32_DESC, size: size_req, user_addr: &user_addr);
1468 if (kr != KERN_SUCCESS) {
1469 return kr;
1470 }
1471
1472 if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
1473 if (copyout(&save_data, user_addr, size_req)) {
1474 return KERN_NO_ACCESS;
1475 }
1476 } else {
1477 memcpy(dst: (void *)user_addr, src: &save_data, n: size_req);
1478 }
1479
1480 return KERN_SUCCESS;
1481}
1482
1483
1484/* end buffer management api */
1485