1/*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/assert.h>
30#include <mach/mach_types.h>
31#include <mach/boolean.h>
32#include <mach/vm_param.h>
33#include <kern/kern_types.h>
34#include <kern/mach_param.h>
35#include <kern/thread.h>
36#include <kern/task.h>
37#include <kern/kern_cdata.h>
38#include <kern/kalloc.h>
39#include <mach/mach_vm.h>
40
41static kern_return_t kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data, uint32_t type, uint32_t size, uint64_t flags, mach_vm_address_t *user_addr);
42
43/*
44 * Estimates how large of a buffer that should be allocated for a buffer that will contain
45 * num_items items of known types with overall length payload_size.
46 *
47 * NOTE: This function will not give an accurate estimate for buffers that will
48 * contain unknown types (those with string descriptions).
49 */
50uint32_t kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t payload_size)
51{
52 /*
53 * In the worst case each item will need (KCDATA_ALIGNMENT_SIZE - 1) padding
54 */
55 uint32_t max_padding_bytes = num_items * (KCDATA_ALIGNMENT_SIZE - 1);
56 uint32_t item_description_bytes = num_items * sizeof(struct kcdata_item);
57 uint32_t begin_and_end_marker_bytes = 2 * sizeof(struct kcdata_item);
58
59 return max_padding_bytes + item_description_bytes + begin_and_end_marker_bytes + payload_size;
60}
61
62kcdata_descriptor_t kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
63{
64 kcdata_descriptor_t data = NULL;
65 mach_vm_address_t user_addr = 0;
66
67 data = kalloc(sizeof(struct kcdata_descriptor));
68 if (data == NULL) {
69 return NULL;
70 }
71 bzero(data, sizeof(struct kcdata_descriptor));
72 data->kcd_addr_begin = buffer_addr_p;
73 data->kcd_addr_end = buffer_addr_p;
74 data->kcd_flags = (flags & KCFLAG_USE_COPYOUT)? KCFLAG_USE_COPYOUT : KCFLAG_USE_MEMCOPY;
75 data->kcd_length = size;
76
77 /* Initialize the BEGIN header */
78 if (KERN_SUCCESS != kcdata_get_memory_addr(data, data_type, 0, &user_addr)){
79 kcdata_memory_destroy(data);
80 return NULL;
81 }
82
83 return data;
84}
85
86kern_return_t kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
87{
88 mach_vm_address_t user_addr = 0;
89
90 if (data == NULL) {
91 return KERN_INVALID_ARGUMENT;
92 }
93 bzero(data, sizeof(struct kcdata_descriptor));
94 data->kcd_addr_begin = buffer_addr_p;
95 data->kcd_addr_end = buffer_addr_p;
96 data->kcd_flags = (flags & KCFLAG_USE_COPYOUT)? KCFLAG_USE_COPYOUT : KCFLAG_USE_MEMCOPY;
97 data->kcd_length = size;
98
99 /* Initialize the BEGIN header */
100 return kcdata_get_memory_addr(data, data_type, 0, &user_addr);
101}
102
103void *kcdata_memory_get_begin_addr(kcdata_descriptor_t data)
104{
105 if (data == NULL) {
106 return NULL;
107 }
108
109 return (void *)data->kcd_addr_begin;
110}
111
112uint64_t kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)
113{
114 assert(kcd != NULL);
115 return ((uint64_t)kcd->kcd_addr_end - (uint64_t)kcd->kcd_addr_begin) + sizeof(struct kcdata_item);
116}
117
118/*
119 * Free up the memory associated with kcdata
120 */
121kern_return_t kcdata_memory_destroy(kcdata_descriptor_t data)
122{
123 if (!data) {
124 return KERN_INVALID_ARGUMENT;
125 }
126
127 /*
128 * data->kcd_addr_begin points to memory in not tracked by
129 * kcdata lib. So not clearing that here.
130 */
131 kfree(data, sizeof(struct kcdata_descriptor));
132 return KERN_SUCCESS;
133}
134
135
136
137/*
138 * Routine: kcdata_get_memory_addr
139 * Desc: get memory address in the userspace memory for corpse info
140 * NOTE: The caller is responsible for zeroing the resulting memory or
141 * using other means to mark memory if it has failed populating the
142 * data in middle of operation.
143 * params: data - pointer describing the crash info allocation
144 * type - type of data to be put. See corpse.h for defined types
145 * size - size requested. The header describes this size
146 * returns: mach_vm_address_t address in user memory for copyout().
147 */
148kern_return_t
149kcdata_get_memory_addr(kcdata_descriptor_t data, uint32_t type, uint32_t size, mach_vm_address_t * user_addr)
150{
151 /* record number of padding bytes as lower 4 bits of flags */
152 uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
153 return kcdata_get_memory_addr_with_flavor(data, type, size, flags, user_addr);
154}
155
156/*
157 * Routine: kcdata_add_buffer_end
158 *
159 * Desc: Write buffer end marker. This does not advance the end pointer in the
160 * kcdata_descriptor_t, so it may be used conservatively before additional data
161 * is added, as long as it is at least called after the last time data is added.
162 *
163 * params: data - pointer describing the crash info allocation
164 */
165
166kern_return_t
167kcdata_write_buffer_end(kcdata_descriptor_t data)
168{
169 struct kcdata_item info;
170 bzero(&info, sizeof(info));
171 info.type = KCDATA_TYPE_BUFFER_END;
172 info.size = 0;
173 return kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
174}
175
176/*
177 * Routine: kcdata_get_memory_addr_with_flavor
178 * Desc: internal function with flags field. See documentation for kcdata_get_memory_addr for details
179 */
180
181static kern_return_t kcdata_get_memory_addr_with_flavor(
182 kcdata_descriptor_t data,
183 uint32_t type,
184 uint32_t size,
185 uint64_t flags,
186 mach_vm_address_t *user_addr)
187{
188 kern_return_t kr;
189 struct kcdata_item info;
190
191 uint32_t orig_size = size;
192 /* make sure 16 byte aligned */
193 uint32_t padding = kcdata_calc_padding(size);
194 size += padding;
195 uint32_t total_size = size + sizeof(info);
196
197 if (user_addr == NULL || data == NULL || total_size + sizeof(info) < orig_size) {
198 return KERN_INVALID_ARGUMENT;
199 }
200
201 bzero(&info, sizeof(info));
202 info.type = type;
203 info.size = size;
204 info.flags = flags;
205
206 /* check available memory, including trailer size for KCDATA_TYPE_BUFFER_END */
207 if (total_size + sizeof(info) > data->kcd_length ||
208 data->kcd_length - (total_size + sizeof(info)) < data->kcd_addr_end - data->kcd_addr_begin) {
209 return KERN_RESOURCE_SHORTAGE;
210 }
211
212 kr = kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
213 if (kr)
214 return kr;
215
216 data->kcd_addr_end += sizeof(info);
217
218 if (padding) {
219 kr = kcdata_bzero(data, data->kcd_addr_end + size - padding, padding);
220 if (kr)
221 return kr;
222 }
223
224 *user_addr = data->kcd_addr_end;
225 data->kcd_addr_end += size;
226
227 if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
228 /* setup the end header as well */
229 return kcdata_write_buffer_end(data);
230 } else {
231 return KERN_SUCCESS;
232 }
233}
234
235/*
236 * Routine: kcdata_get_memory_addr_for_array
237 * Desc: get memory address in the userspace memory for corpse info
238 * NOTE: The caller is responsible to zero the resulting memory or
239 * user other means to mark memory if it has failed populating the
240 * data in middle of operation.
241 * params: data - pointer describing the crash info allocation
242 * type_of_element - type of data to be put. See kern_cdata.h for defined types
243 * size_of_element - size of element. The header describes this size
244 * count - num of elements in array.
245 * returns: mach_vm_address_t address in user memory for copyout().
246 */
247
248kern_return_t kcdata_get_memory_addr_for_array(
249 kcdata_descriptor_t data,
250 uint32_t type_of_element,
251 uint32_t size_of_element,
252 uint32_t count,
253 mach_vm_address_t *user_addr)
254{
255 /* for arrays we record the number of padding bytes as the low-order 4 bits
256 * of the type field. KCDATA_TYPE_ARRAY_PAD{x} means x bytes of pad. */
257 uint64_t flags = type_of_element;
258 flags = (flags << 32) | count;
259 uint32_t total_size = count * size_of_element;
260 uint32_t pad = kcdata_calc_padding(total_size);
261
262 return kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, user_addr);
263}
264
265/*
266 * Routine: kcdata_add_container_marker
267 * Desc: Add a container marker in the buffer for type and identifier.
268 * params: data - pointer describing the crash info allocation
269 * header_type - one of (KCDATA_TYPE_CONTAINER_BEGIN ,KCDATA_TYPE_CONTAINER_END)
270 * container_type - type of data to be put. See kern_cdata.h for defined types
271 * identifier - unique identifier. This is required to match nested containers.
272 * returns: return value of kcdata_get_memory_addr()
273 */
274
275kern_return_t kcdata_add_container_marker(
276 kcdata_descriptor_t data,
277 uint32_t header_type,
278 uint32_t container_type,
279 uint64_t identifier)
280{
281 mach_vm_address_t user_addr;
282 kern_return_t kr;
283 assert(header_type == KCDATA_TYPE_CONTAINER_END || header_type == KCDATA_TYPE_CONTAINER_BEGIN);
284 uint32_t data_size = (header_type == KCDATA_TYPE_CONTAINER_BEGIN)? sizeof(uint32_t): 0;
285 kr = kcdata_get_memory_addr_with_flavor(data, header_type, data_size, identifier, &user_addr);
286 if (kr != KERN_SUCCESS)
287 return kr;
288
289 if (data_size)
290 kr = kcdata_memcpy(data, user_addr, &container_type, data_size);
291 return kr;
292}
293
294/*
295 * Routine: kcdata_undo_addcontainer_begin
296 * Desc: call this after adding a container begin but before adding anything else to revert.
297 */
298kern_return_t
299kcdata_undo_add_container_begin(kcdata_descriptor_t data)
300{
301 /*
302 * the payload of a container begin is a single uint64_t. It is padded out
303 * to 16 bytes.
304 */
305 const mach_vm_address_t padded_payload_size = 16;
306 data->kcd_addr_end -= sizeof(struct kcdata_item) + padded_payload_size;
307
308 if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
309 /* setup the end header as well */
310 return kcdata_write_buffer_end(data);
311 } else {
312 return KERN_SUCCESS;
313 }
314}
315
316/*
317 * Routine: kcdata_memcpy
318 * Desc: a common function to copy data out based on either copyout or memcopy flags
319 * params: data - pointer describing the kcdata buffer
320 * dst_addr - destination address
321 * src_addr - source address
322 * size - size in bytes to copy.
323 * returns: KERN_NO_ACCESS if copyout fails.
324 */
325
326kern_return_t kcdata_memcpy(kcdata_descriptor_t data, mach_vm_address_t dst_addr, const void *src_addr, uint32_t size)
327{
328 if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
329 if (copyout(src_addr, dst_addr, size))
330 return KERN_NO_ACCESS;
331 } else {
332 memcpy((void *)dst_addr, src_addr, size);
333 }
334 return KERN_SUCCESS;
335}
336
337/*
338 * Routine: kcdata_bzero
339 * Desc: zero out a portion of a kcdata buffer.
340 */
341kern_return_t
342kcdata_bzero(kcdata_descriptor_t data, mach_vm_address_t dst_addr, uint32_t size)
343{
344 kern_return_t kr = KERN_SUCCESS;
345 if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
346 uint8_t zeros[16] = {};
347 while (size) {
348 uint32_t block_size = MIN(size, 16);
349 kr = copyout(&zeros, dst_addr, block_size);
350 if (kr)
351 return KERN_NO_ACCESS;
352 size -= block_size;
353 }
354 return KERN_SUCCESS;
355 } else {
356 bzero((void*)dst_addr, size);
357 return KERN_SUCCESS;
358 }
359}
360
361/*
362 * Routine: kcdata_add_type_definition
363 * Desc: add type definition to kcdata buffer.
364 * see feature description in documentation above.
365 * params: data - pointer describing the kcdata buffer
366 * type_id - unique type identifier for this data
367 * type_name - a string of max KCDATA_DESC_MAXLEN size for name of type
368 * elements_array - address to descriptors for each field in struct
369 * elements_count - count of how many fields are there in struct.
370 * returns: return code from kcdata_get_memory_addr in case of failure.
371 */
372
373kern_return_t kcdata_add_type_definition(
374 kcdata_descriptor_t data,
375 uint32_t type_id,
376 char *type_name,
377 struct kcdata_subtype_descriptor *elements_array_addr,
378 uint32_t elements_count)
379{
380 kern_return_t kr = KERN_SUCCESS;
381 struct kcdata_type_definition kc_type_definition;
382 mach_vm_address_t user_addr;
383 uint32_t total_size = sizeof(struct kcdata_type_definition);
384 bzero(&kc_type_definition, sizeof(kc_type_definition));
385
386 if (strlen(type_name) >= KCDATA_DESC_MAXLEN)
387 return KERN_INVALID_ARGUMENT;
388 strlcpy(&kc_type_definition.kct_name[0], type_name, KCDATA_DESC_MAXLEN);
389 kc_type_definition.kct_num_elements = elements_count;
390 kc_type_definition.kct_type_identifier = type_id;
391
392 total_size += elements_count * sizeof(struct kcdata_subtype_descriptor);
393 /* record number of padding bytes as lower 4 bits of flags */
394 if (KERN_SUCCESS != (kr = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_TYPEDEFINTION, total_size,
395 kcdata_calc_padding(total_size), &user_addr)))
396 return kr;
397 if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)&kc_type_definition, sizeof(struct kcdata_type_definition))))
398 return kr;
399 user_addr += sizeof(struct kcdata_type_definition);
400 if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)elements_array_addr, elements_count * sizeof(struct kcdata_subtype_descriptor))))
401 return kr;
402 return kr;
403}
404
405#pragma pack(4)
406
407/* Internal structs for convenience */
408struct _uint64_with_description_data {
409 char desc[KCDATA_DESC_MAXLEN];
410 uint64_t data;
411};
412
413struct _uint32_with_description_data {
414 char desc[KCDATA_DESC_MAXLEN];
415 uint32_t data;
416};
417
418#pragma pack()
419
420kern_return_t
421kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc, uint64_t data, const char * description)
422{
423 if (strlen(description) >= KCDATA_DESC_MAXLEN)
424 return KERN_INVALID_ARGUMENT;
425
426 kern_return_t kr = 0;
427 mach_vm_address_t user_addr;
428 struct _uint64_with_description_data save_data;
429 const uint64_t size_req = sizeof(save_data);
430 bzero(&save_data, size_req);
431
432 strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
433 save_data.data = data;
434
435 kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT64_DESC, size_req, &user_addr);
436 if (kr != KERN_SUCCESS)
437 return kr;
438
439 if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
440 if (copyout(&save_data, user_addr, size_req))
441 return KERN_NO_ACCESS;
442 } else {
443 memcpy((void *)user_addr, &save_data, size_req);
444 }
445 return KERN_SUCCESS;
446}
447
448kern_return_t kcdata_add_uint32_with_description(
449 kcdata_descriptor_t data_desc,
450 uint32_t data,
451 const char *description)
452{
453 assert(strlen(description) < KCDATA_DESC_MAXLEN);
454 if (strlen(description) >= KCDATA_DESC_MAXLEN)
455 return KERN_INVALID_ARGUMENT;
456 kern_return_t kr = 0;
457 mach_vm_address_t user_addr;
458 struct _uint32_with_description_data save_data;
459 const uint64_t size_req = sizeof(save_data);
460
461 bzero(&save_data, size_req);
462 strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
463 save_data.data = data;
464
465 kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT32_DESC, size_req, &user_addr);
466 if (kr != KERN_SUCCESS)
467 return kr;
468 if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
469 if (copyout(&save_data, user_addr, size_req))
470 return KERN_NO_ACCESS;
471 } else {
472 memcpy((void *)user_addr, &save_data, size_req);
473 }
474 return KERN_SUCCESS;
475}
476
477
478/* end buffer management api */
479