1/*
2 * IDENTIFICATION:
3 * stub generated by bootstrap_cmds-133
4 * OPTIONS:
5 * KernelServer
6 */
7
8/* Module mach_vm */
9
10#define __MIG_check__Request__mach_vm_subsystem__ 1
11
12#include "mach_vm_server.h"
13
14#ifndef mig_internal
15#define mig_internal static __inline__
16#endif /* mig_internal */
17
18#ifndef mig_external
19#define mig_external
20#endif /* mig_external */
21
22#if !defined(__MigTypeCheck) && defined(TypeCheck)
23#define __MigTypeCheck TypeCheck /* Legacy setting */
24#endif /* !defined(__MigTypeCheck) */
25
26#if !defined(__MigKernelSpecificCode) && defined(_MIG_KERNEL_SPECIFIC_CODE_)
27#define __MigKernelSpecificCode _MIG_KERNEL_SPECIFIC_CODE_ /* Legacy setting */
28#endif /* !defined(__MigKernelSpecificCode) */
29
30#ifndef LimitCheck
31#define LimitCheck 0
32#endif /* LimitCheck */
33
34#ifndef min
35#define min(a,b) ( ((a) < (b))? (a): (b) )
36#endif /* min */
37
38#if !defined(_WALIGN_)
39#define _WALIGN_(x) (((x) + 3) & ~3)
40#endif /* !defined(_WALIGN_) */
41
42#if !defined(_WALIGNSZ_)
43#define _WALIGNSZ_(x) _WALIGN_(sizeof(x))
44#endif /* !defined(_WALIGNSZ_) */
45
46#ifndef UseStaticTemplates
47#define UseStaticTemplates 0
48#endif /* UseStaticTemplates */
49
50#ifndef MIG_SERVER_ROUTINE
51#define MIG_SERVER_ROUTINE
52#endif
53
54#ifndef __DeclareRcvRpc
55#define __DeclareRcvRpc(_NUM_, _NAME_)
56#endif /* __DeclareRcvRpc */
57
58#ifndef __BeforeRcvRpc
59#define __BeforeRcvRpc(_NUM_, _NAME_)
60#endif /* __BeforeRcvRpc */
61
62#ifndef __AfterRcvRpc
63#define __AfterRcvRpc(_NUM_, _NAME_)
64#endif /* __AfterRcvRpc */
65
66#ifndef __DeclareRcvSimple
67#define __DeclareRcvSimple(_NUM_, _NAME_)
68#endif /* __DeclareRcvSimple */
69
70#ifndef __BeforeRcvSimple
71#define __BeforeRcvSimple(_NUM_, _NAME_)
72#endif /* __BeforeRcvSimple */
73
74#ifndef __AfterRcvSimple
75#define __AfterRcvSimple(_NUM_, _NAME_)
76#endif /* __AfterRcvSimple */
77
78#define novalue void
79
80#if __MigKernelSpecificCode
81#define msgh_request_port msgh_remote_port
82#define MACH_MSGH_BITS_REQUEST(bits) MACH_MSGH_BITS_REMOTE(bits)
83#define msgh_reply_port msgh_local_port
84#define MACH_MSGH_BITS_REPLY(bits) MACH_MSGH_BITS_LOCAL(bits)
85#else
86#define msgh_request_port msgh_local_port
87#define MACH_MSGH_BITS_REQUEST(bits) MACH_MSGH_BITS_LOCAL(bits)
88#define msgh_reply_port msgh_remote_port
89#define MACH_MSGH_BITS_REPLY(bits) MACH_MSGH_BITS_REMOTE(bits)
90#endif /* __MigKernelSpecificCode */
91
92#define MIG_RETURN_ERROR(X, code) {\
93 ((mig_reply_error_t *)X)->RetCode = code;\
94 ((mig_reply_error_t *)X)->NDR = NDR_record;\
95 return;\
96 }
97
98/* Forward Declarations */
99
100
101mig_internal novalue _Xmach_vm_allocate_external
102 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
103
104mig_internal novalue _Xmach_vm_deallocate
105 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
106
107mig_internal novalue _Xmach_vm_protect
108 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
109
110mig_internal novalue _Xmach_vm_inherit
111 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
112
113mig_internal novalue _Xmach_vm_read
114 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
115
116mig_internal novalue _Xmach_vm_read_list
117 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
118
119mig_internal novalue _Xmach_vm_write
120 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
121
122mig_internal novalue _Xmach_vm_copy
123 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
124
125mig_internal novalue _Xmach_vm_read_overwrite
126 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
127
128mig_internal novalue _Xmach_vm_msync
129 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
130
131mig_internal novalue _Xmach_vm_behavior_set
132 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
133
134mig_internal novalue _Xmach_vm_map_external
135 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
136
137mig_internal novalue _Xmach_vm_machine_attribute
138 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
139
140mig_internal novalue _Xmach_vm_remap_external
141 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
142
143mig_internal novalue _Xmach_vm_page_query
144 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
145
146mig_internal novalue _Xmach_vm_region_recurse
147 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
148
149mig_internal novalue _Xmach_vm_region
150 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
151
152mig_internal novalue _X_mach_make_memory_entry
153 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
154
155mig_internal novalue _Xmach_vm_purgable_control_external
156 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
157
158mig_internal novalue _Xmach_vm_page_info
159 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
160
161mig_internal novalue _Xmach_vm_page_range_query
162 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
163
164mig_internal novalue _Xmach_vm_remap_new_external
165 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
166
167mig_internal novalue _Xmach_vm_deferred_reclamation_buffer_init
168 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
169
170mig_internal novalue _Xmach_vm_deferred_reclamation_buffer_synchronize
171 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
172
173mig_internal novalue _Xmach_vm_deferred_reclamation_buffer_update_reclaimable_bytes
174 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
175
176mig_internal novalue _Xmach_vm_range_create
177 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
178
179
180#if ( __MigTypeCheck )
181#if __MIG_check__Request__mach_vm_subsystem__
182#if !defined(__MIG_check__Request__mach_vm_allocate_external_t__defined)
183#define __MIG_check__Request__mach_vm_allocate_external_t__defined
184
185mig_internal kern_return_t __MIG_check__Request__mach_vm_allocate_external_t(
186 __attribute__((__unused__)) __RequestKData__mach_vm_allocate_external_t *InKP,
187 __attribute__((__unused__)) __RequestUData__mach_vm_allocate_external_t *In0UP,
188 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
189{
190
191 typedef __Request__mach_vm_allocate_external_t __Request;
192 typedef __RequestUData__mach_vm_allocate_external_t __RequestU __attribute__((unused));
193#if __MigTypeCheck
194 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
195 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
196 return MIG_BAD_ARGUMENTS;
197#endif /* __MigTypeCheck */
198
199 return MACH_MSG_SUCCESS;
200}
201#endif /* !defined(__MIG_check__Request__mach_vm_allocate_external_t__defined) */
202#endif /* __MIG_check__Request__mach_vm_subsystem__ */
203#endif /* ( __MigTypeCheck ) */
204
205
206/* Routine mach_vm_allocate_external */
207mig_internal novalue _Xmach_vm_allocate_external
208 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
209 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
210 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
211{
212
213#ifdef __MigPackStructs
214#pragma pack(push, 4)
215#endif
216 typedef struct {
217 NDR_record_t NDR;
218 mach_vm_address_t address;
219 mach_vm_size_t size;
220 int flags;
221 mach_msg_trailer_t trailer;
222 char padding[0]; /* Avoid generating empty UData structs */
223 } RequestU __attribute__((unused));
224#ifdef __MigPackStructs
225#pragma pack(pop)
226#endif
227 typedef __RequestKData__mach_vm_allocate_external_t RequestK;
228 typedef __RequestUData__mach_vm_allocate_external_t __RequestU;
229 typedef __ReplyKData__mach_vm_allocate_external_t ReplyK __attribute__((unused));
230 typedef __ReplyUData__mach_vm_allocate_external_t ReplyU __attribute__((unused));
231 typedef __Reply__mach_vm_allocate_external_t Reply __attribute__((unused));
232 typedef __Request__mach_vm_allocate_external_t __Request __attribute__((unused));
233
234 /*
235 * typedef struct {
236 * mach_msg_header_t Head;
237 * NDR_record_t NDR;
238 * kern_return_t RetCode;
239 * } mig_reply_error_t;
240 */
241
242 RequestK *InKP = (RequestK *) InHeadP;
243 RequestU *In0UP = (RequestU *) InDataP;
244 ReplyK *OutKP = (ReplyK *) OutHeadP;
245 ReplyU *OutUP = (ReplyU *) OutDataP;
246 (void)OutUP;
247#ifdef __MIG_check__Request__mach_vm_allocate_external_t__defined
248 kern_return_t check_result;
249#endif /* __MIG_check__Request__mach_vm_allocate_external_t__defined */
250
251#if __MigKernelSpecificCode
252#else
253#endif /* __MigKernelSpecificCode */
254 vm_map_t target;
255
256 __DeclareRcvRpc(4800, "mach_vm_allocate_external")
257 __BeforeRcvRpc(4800, "mach_vm_allocate_external")
258
259#if defined(__MIG_check__Request__mach_vm_allocate_external_t__defined)
260 check_result = __MIG_check__Request__mach_vm_allocate_external_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
261 if (check_result != MACH_MSG_SUCCESS)
262 { MIG_RETURN_ERROR(OutKP, check_result); }
263#endif /* defined(__MIG_check__Request__mach_vm_allocate_external_t__defined) */
264
265 target = convert_port_entry_to_map(port: InKP->Head.msgh_request_port);
266
267 OutUP->RetCode = mach_vm_allocate_external(target, address: &In0UP->address, size: In0UP->size, flags: In0UP->flags);
268 vm_map_deallocate(map: target);
269 if (OutUP->RetCode != KERN_SUCCESS) {
270 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
271 }
272#if __MigKernelSpecificCode
273#endif /* __MigKernelSpecificCode */
274
275 OutUP->NDR = NDR_record;
276
277
278 OutUP->address = In0UP->address;
279
280 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
281 __AfterRcvRpc(4800, "mach_vm_allocate_external")
282}
283
284#if ( __MigTypeCheck )
285#if __MIG_check__Request__mach_vm_subsystem__
286#if !defined(__MIG_check__Request__mach_vm_deallocate_t__defined)
287#define __MIG_check__Request__mach_vm_deallocate_t__defined
288
289mig_internal kern_return_t __MIG_check__Request__mach_vm_deallocate_t(
290 __attribute__((__unused__)) __RequestKData__mach_vm_deallocate_t *InKP,
291 __attribute__((__unused__)) __RequestUData__mach_vm_deallocate_t *In0UP,
292 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
293{
294
295 typedef __Request__mach_vm_deallocate_t __Request;
296 typedef __RequestUData__mach_vm_deallocate_t __RequestU __attribute__((unused));
297#if __MigTypeCheck
298 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
299 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
300 return MIG_BAD_ARGUMENTS;
301#endif /* __MigTypeCheck */
302
303 return MACH_MSG_SUCCESS;
304}
305#endif /* !defined(__MIG_check__Request__mach_vm_deallocate_t__defined) */
306#endif /* __MIG_check__Request__mach_vm_subsystem__ */
307#endif /* ( __MigTypeCheck ) */
308
309
310/* Routine mach_vm_deallocate */
311mig_internal novalue _Xmach_vm_deallocate
312 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
313 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
314 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
315{
316
317#ifdef __MigPackStructs
318#pragma pack(push, 4)
319#endif
320 typedef struct {
321 NDR_record_t NDR;
322 mach_vm_address_t address;
323 mach_vm_size_t size;
324 mach_msg_trailer_t trailer;
325 char padding[0]; /* Avoid generating empty UData structs */
326 } RequestU __attribute__((unused));
327#ifdef __MigPackStructs
328#pragma pack(pop)
329#endif
330 typedef __RequestKData__mach_vm_deallocate_t RequestK;
331 typedef __RequestUData__mach_vm_deallocate_t __RequestU;
332 typedef __ReplyKData__mach_vm_deallocate_t ReplyK __attribute__((unused));
333 typedef __ReplyUData__mach_vm_deallocate_t ReplyU __attribute__((unused));
334 typedef __Reply__mach_vm_deallocate_t Reply __attribute__((unused));
335 typedef __Request__mach_vm_deallocate_t __Request __attribute__((unused));
336
337 /*
338 * typedef struct {
339 * mach_msg_header_t Head;
340 * NDR_record_t NDR;
341 * kern_return_t RetCode;
342 * } mig_reply_error_t;
343 */
344
345 RequestK *InKP = (RequestK *) InHeadP;
346 RequestU *In0UP = (RequestU *) InDataP;
347 ReplyK *OutKP = (ReplyK *) OutHeadP;
348 ReplyU *OutUP = (ReplyU *) OutDataP;
349 (void)OutUP;
350#ifdef __MIG_check__Request__mach_vm_deallocate_t__defined
351 kern_return_t check_result;
352#endif /* __MIG_check__Request__mach_vm_deallocate_t__defined */
353
354#if __MigKernelSpecificCode
355#else
356#endif /* __MigKernelSpecificCode */
357 vm_map_t target;
358
359 __DeclareRcvRpc(4801, "mach_vm_deallocate")
360 __BeforeRcvRpc(4801, "mach_vm_deallocate")
361
362#if defined(__MIG_check__Request__mach_vm_deallocate_t__defined)
363 check_result = __MIG_check__Request__mach_vm_deallocate_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
364 if (check_result != MACH_MSG_SUCCESS)
365 { MIG_RETURN_ERROR(OutKP, check_result); }
366#endif /* defined(__MIG_check__Request__mach_vm_deallocate_t__defined) */
367
368 target = convert_port_entry_to_map(port: InKP->Head.msgh_request_port);
369
370 OutUP->RetCode = mach_vm_deallocate(target, address: In0UP->address, size: In0UP->size);
371 vm_map_deallocate(map: target);
372#if __MigKernelSpecificCode
373#endif /* __MigKernelSpecificCode */
374
375 OutUP->NDR = NDR_record;
376
377
378 __AfterRcvRpc(4801, "mach_vm_deallocate")
379}
380
381#if ( __MigTypeCheck )
382#if __MIG_check__Request__mach_vm_subsystem__
383#if !defined(__MIG_check__Request__mach_vm_protect_t__defined)
384#define __MIG_check__Request__mach_vm_protect_t__defined
385
386mig_internal kern_return_t __MIG_check__Request__mach_vm_protect_t(
387 __attribute__((__unused__)) __RequestKData__mach_vm_protect_t *InKP,
388 __attribute__((__unused__)) __RequestUData__mach_vm_protect_t *In0UP,
389 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
390{
391
392 typedef __Request__mach_vm_protect_t __Request;
393 typedef __RequestUData__mach_vm_protect_t __RequestU __attribute__((unused));
394#if __MigTypeCheck
395 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
396 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
397 return MIG_BAD_ARGUMENTS;
398#endif /* __MigTypeCheck */
399
400 return MACH_MSG_SUCCESS;
401}
402#endif /* !defined(__MIG_check__Request__mach_vm_protect_t__defined) */
403#endif /* __MIG_check__Request__mach_vm_subsystem__ */
404#endif /* ( __MigTypeCheck ) */
405
406
407/* Routine mach_vm_protect */
408mig_internal novalue _Xmach_vm_protect
409 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
410 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
411 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
412{
413
414#ifdef __MigPackStructs
415#pragma pack(push, 4)
416#endif
417 typedef struct {
418 NDR_record_t NDR;
419 mach_vm_address_t address;
420 mach_vm_size_t size;
421 boolean_t set_maximum;
422 vm_prot_t new_protection;
423 mach_msg_trailer_t trailer;
424 char padding[0]; /* Avoid generating empty UData structs */
425 } RequestU __attribute__((unused));
426#ifdef __MigPackStructs
427#pragma pack(pop)
428#endif
429 typedef __RequestKData__mach_vm_protect_t RequestK;
430 typedef __RequestUData__mach_vm_protect_t __RequestU;
431 typedef __ReplyKData__mach_vm_protect_t ReplyK __attribute__((unused));
432 typedef __ReplyUData__mach_vm_protect_t ReplyU __attribute__((unused));
433 typedef __Reply__mach_vm_protect_t Reply __attribute__((unused));
434 typedef __Request__mach_vm_protect_t __Request __attribute__((unused));
435
436 /*
437 * typedef struct {
438 * mach_msg_header_t Head;
439 * NDR_record_t NDR;
440 * kern_return_t RetCode;
441 * } mig_reply_error_t;
442 */
443
444 RequestK *InKP = (RequestK *) InHeadP;
445 RequestU *In0UP = (RequestU *) InDataP;
446 ReplyK *OutKP = (ReplyK *) OutHeadP;
447 ReplyU *OutUP = (ReplyU *) OutDataP;
448 (void)OutUP;
449#ifdef __MIG_check__Request__mach_vm_protect_t__defined
450 kern_return_t check_result;
451#endif /* __MIG_check__Request__mach_vm_protect_t__defined */
452
453#if __MigKernelSpecificCode
454#else
455#endif /* __MigKernelSpecificCode */
456 vm_map_t target_task;
457
458 __DeclareRcvRpc(4802, "mach_vm_protect")
459 __BeforeRcvRpc(4802, "mach_vm_protect")
460
461#if defined(__MIG_check__Request__mach_vm_protect_t__defined)
462 check_result = __MIG_check__Request__mach_vm_protect_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
463 if (check_result != MACH_MSG_SUCCESS)
464 { MIG_RETURN_ERROR(OutKP, check_result); }
465#endif /* defined(__MIG_check__Request__mach_vm_protect_t__defined) */
466
467 target_task = convert_port_entry_to_map(port: InKP->Head.msgh_request_port);
468
469 OutUP->RetCode = mach_vm_protect(target_task, address: In0UP->address, size: In0UP->size, set_maximum: In0UP->set_maximum, new_protection: In0UP->new_protection);
470 vm_map_deallocate(map: target_task);
471#if __MigKernelSpecificCode
472#endif /* __MigKernelSpecificCode */
473
474 OutUP->NDR = NDR_record;
475
476
477 __AfterRcvRpc(4802, "mach_vm_protect")
478}
479
480#if ( __MigTypeCheck )
481#if __MIG_check__Request__mach_vm_subsystem__
482#if !defined(__MIG_check__Request__mach_vm_inherit_t__defined)
483#define __MIG_check__Request__mach_vm_inherit_t__defined
484
485mig_internal kern_return_t __MIG_check__Request__mach_vm_inherit_t(
486 __attribute__((__unused__)) __RequestKData__mach_vm_inherit_t *InKP,
487 __attribute__((__unused__)) __RequestUData__mach_vm_inherit_t *In0UP,
488 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
489{
490
491 typedef __Request__mach_vm_inherit_t __Request;
492 typedef __RequestUData__mach_vm_inherit_t __RequestU __attribute__((unused));
493#if __MigTypeCheck
494 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
495 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
496 return MIG_BAD_ARGUMENTS;
497#endif /* __MigTypeCheck */
498
499 return MACH_MSG_SUCCESS;
500}
501#endif /* !defined(__MIG_check__Request__mach_vm_inherit_t__defined) */
502#endif /* __MIG_check__Request__mach_vm_subsystem__ */
503#endif /* ( __MigTypeCheck ) */
504
505
506/* Routine mach_vm_inherit */
507mig_internal novalue _Xmach_vm_inherit
508 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
509 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
510 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
511{
512
513#ifdef __MigPackStructs
514#pragma pack(push, 4)
515#endif
516 typedef struct {
517 NDR_record_t NDR;
518 mach_vm_address_t address;
519 mach_vm_size_t size;
520 vm_inherit_t new_inheritance;
521 mach_msg_trailer_t trailer;
522 char padding[0]; /* Avoid generating empty UData structs */
523 } RequestU __attribute__((unused));
524#ifdef __MigPackStructs
525#pragma pack(pop)
526#endif
527 typedef __RequestKData__mach_vm_inherit_t RequestK;
528 typedef __RequestUData__mach_vm_inherit_t __RequestU;
529 typedef __ReplyKData__mach_vm_inherit_t ReplyK __attribute__((unused));
530 typedef __ReplyUData__mach_vm_inherit_t ReplyU __attribute__((unused));
531 typedef __Reply__mach_vm_inherit_t Reply __attribute__((unused));
532 typedef __Request__mach_vm_inherit_t __Request __attribute__((unused));
533
534 /*
535 * typedef struct {
536 * mach_msg_header_t Head;
537 * NDR_record_t NDR;
538 * kern_return_t RetCode;
539 * } mig_reply_error_t;
540 */
541
542 RequestK *InKP = (RequestK *) InHeadP;
543 RequestU *In0UP = (RequestU *) InDataP;
544 ReplyK *OutKP = (ReplyK *) OutHeadP;
545 ReplyU *OutUP = (ReplyU *) OutDataP;
546 (void)OutUP;
547#ifdef __MIG_check__Request__mach_vm_inherit_t__defined
548 kern_return_t check_result;
549#endif /* __MIG_check__Request__mach_vm_inherit_t__defined */
550
551#if __MigKernelSpecificCode
552#else
553#endif /* __MigKernelSpecificCode */
554 vm_map_t target_task;
555
556 __DeclareRcvRpc(4803, "mach_vm_inherit")
557 __BeforeRcvRpc(4803, "mach_vm_inherit")
558
559#if defined(__MIG_check__Request__mach_vm_inherit_t__defined)
560 check_result = __MIG_check__Request__mach_vm_inherit_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
561 if (check_result != MACH_MSG_SUCCESS)
562 { MIG_RETURN_ERROR(OutKP, check_result); }
563#endif /* defined(__MIG_check__Request__mach_vm_inherit_t__defined) */
564
565 target_task = convert_port_entry_to_map(port: InKP->Head.msgh_request_port);
566
567 OutUP->RetCode = mach_vm_inherit(target_task, address: In0UP->address, size: In0UP->size, new_inheritance: In0UP->new_inheritance);
568 vm_map_deallocate(map: target_task);
569#if __MigKernelSpecificCode
570#endif /* __MigKernelSpecificCode */
571
572 OutUP->NDR = NDR_record;
573
574
575 __AfterRcvRpc(4803, "mach_vm_inherit")
576}
577
578#if ( __MigTypeCheck )
579#if __MIG_check__Request__mach_vm_subsystem__
580#if !defined(__MIG_check__Request__mach_vm_read_t__defined)
581#define __MIG_check__Request__mach_vm_read_t__defined
582
583mig_internal kern_return_t __MIG_check__Request__mach_vm_read_t(
584 __attribute__((__unused__)) __RequestKData__mach_vm_read_t *InKP,
585 __attribute__((__unused__)) __RequestUData__mach_vm_read_t *In0UP,
586 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
587{
588
589 typedef __Request__mach_vm_read_t __Request;
590 typedef __RequestUData__mach_vm_read_t __RequestU __attribute__((unused));
591#if __MigTypeCheck
592 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
593 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
594 return MIG_BAD_ARGUMENTS;
595#endif /* __MigTypeCheck */
596
597 return MACH_MSG_SUCCESS;
598}
599#endif /* !defined(__MIG_check__Request__mach_vm_read_t__defined) */
600#endif /* __MIG_check__Request__mach_vm_subsystem__ */
601#endif /* ( __MigTypeCheck ) */
602
603
604/* Routine mach_vm_read */
605mig_internal novalue _Xmach_vm_read
606 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
607 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
608 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
609{
610
611#ifdef __MigPackStructs
612#pragma pack(push, 4)
613#endif
614 typedef struct {
615 NDR_record_t NDR;
616 mach_vm_address_t address;
617 mach_vm_size_t size;
618 mach_msg_trailer_t trailer;
619 char padding[0]; /* Avoid generating empty UData structs */
620 } RequestU __attribute__((unused));
621#ifdef __MigPackStructs
622#pragma pack(pop)
623#endif
624 typedef __RequestKData__mach_vm_read_t RequestK;
625 typedef __RequestUData__mach_vm_read_t __RequestU;
626 typedef __ReplyKData__mach_vm_read_t ReplyK __attribute__((unused));
627 typedef __ReplyUData__mach_vm_read_t ReplyU __attribute__((unused));
628 typedef __Reply__mach_vm_read_t Reply __attribute__((unused));
629 typedef __Request__mach_vm_read_t __Request __attribute__((unused));
630
631 /*
632 * typedef struct {
633 * mach_msg_header_t Head;
634 * NDR_record_t NDR;
635 * kern_return_t RetCode;
636 * } mig_reply_error_t;
637 */
638
639 RequestK *InKP = (RequestK *) InHeadP;
640 RequestU *In0UP = (RequestU *) InDataP;
641 ReplyK *OutKP = (ReplyK *) OutHeadP;
642 ReplyU *OutUP = (ReplyU *) OutDataP;
643 (void)OutUP;
644#ifdef __MIG_check__Request__mach_vm_read_t__defined
645 kern_return_t check_result;
646#endif /* __MIG_check__Request__mach_vm_read_t__defined */
647
648#if __MigKernelSpecificCode
649#if UseStaticTemplates
650 const static mach_msg_ool_descriptor_t dataTemplate = {
651 /* addr = */ (void *)0,
652 /* size = */ 0,
653 /* deal = */ FALSE,
654 /* copy = */ MACH_MSG_VIRTUAL_COPY,
655 /* pad2 = */ 0,
656 /* type = */ MACH_MSG_OOL_DESCRIPTOR,
657 };
658#endif /* UseStaticTemplates */
659
660#else
661#if UseStaticTemplates
662 const static mach_msg_ool_descriptor_t dataTemplate = {
663 /* addr = */ (void *)0,
664 /* size = */ 0,
665 /* deal = */ FALSE,
666 /* copy = */ MACH_MSG_VIRTUAL_COPY,
667 /* pad2 = */ 0,
668 /* type = */ MACH_MSG_OOL_DESCRIPTOR,
669 };
670#endif /* UseStaticTemplates */
671
672#endif /* __MigKernelSpecificCode */
673 kern_return_t RetCode;
674 vm_map_read_t target_task;
675
676 __DeclareRcvRpc(4804, "mach_vm_read")
677 __BeforeRcvRpc(4804, "mach_vm_read")
678
679#if defined(__MIG_check__Request__mach_vm_read_t__defined)
680 check_result = __MIG_check__Request__mach_vm_read_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
681 if (check_result != MACH_MSG_SUCCESS)
682 { MIG_RETURN_ERROR(OutKP, check_result); }
683#endif /* defined(__MIG_check__Request__mach_vm_read_t__defined) */
684
685#if UseStaticTemplates
686 OutKP->data = dataTemplate;
687#else /* UseStaticTemplates */
688 OutKP->data.deallocate = FALSE;
689 OutKP->data.copy = MACH_MSG_VIRTUAL_COPY;
690 OutKP->data.pad1 = 0;
691 OutKP->data.type = MACH_MSG_OOL_DESCRIPTOR;
692#if defined(KERNEL) && !defined(__LP64__)
693 OutKP->data.pad_end = 0;
694#endif
695#endif /* UseStaticTemplates */
696
697
698 target_task = convert_port_to_map_read(port: InKP->Head.msgh_request_port);
699
700 RetCode = mach_vm_read(target_task, address: In0UP->address, size: In0UP->size, data: (vm_offset_t *)&(OutKP->data.address), dataCnt: &OutUP->dataCnt);
701 vm_map_read_deallocate(map: target_task);
702 if (RetCode != KERN_SUCCESS) {
703 MIG_RETURN_ERROR(OutKP, RetCode);
704 }
705#if __MigKernelSpecificCode
706#endif /* __MigKernelSpecificCode */
707 OutKP->data.size = OutUP->dataCnt;
708
709
710 OutUP->NDR = NDR_record;
711
712
713 OutKP->Head.msgh_bits |= MACH_MSGH_BITS_COMPLEX;
714 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
715 OutKP->msgh_body.msgh_descriptor_count = 1;
716 __AfterRcvRpc(4804, "mach_vm_read")
717}
718
719#if ( __MigTypeCheck )
720#if __MIG_check__Request__mach_vm_subsystem__
721#if !defined(__MIG_check__Request__mach_vm_read_list_t__defined)
722#define __MIG_check__Request__mach_vm_read_list_t__defined
723
724mig_internal kern_return_t __MIG_check__Request__mach_vm_read_list_t(
725 __attribute__((__unused__)) __RequestKData__mach_vm_read_list_t *InKP,
726 __attribute__((__unused__)) __RequestUData__mach_vm_read_list_t *In0UP,
727 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
728{
729
730 typedef __Request__mach_vm_read_list_t __Request;
731 typedef __RequestUData__mach_vm_read_list_t __RequestU __attribute__((unused));
732#if __MigTypeCheck
733 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
734 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
735 return MIG_BAD_ARGUMENTS;
736#endif /* __MigTypeCheck */
737
738 return MACH_MSG_SUCCESS;
739}
740#endif /* !defined(__MIG_check__Request__mach_vm_read_list_t__defined) */
741#endif /* __MIG_check__Request__mach_vm_subsystem__ */
742#endif /* ( __MigTypeCheck ) */
743
744
745/* Routine mach_vm_read_list */
746mig_internal novalue _Xmach_vm_read_list
747 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
748 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
749 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
750{
751
752#ifdef __MigPackStructs
753#pragma pack(push, 4)
754#endif
755 typedef struct {
756 NDR_record_t NDR;
757 mach_vm_read_entry_t data_list;
758 natural_t count;
759 mach_msg_trailer_t trailer;
760 char padding[0]; /* Avoid generating empty UData structs */
761 } RequestU __attribute__((unused));
762#ifdef __MigPackStructs
763#pragma pack(pop)
764#endif
765 typedef __RequestKData__mach_vm_read_list_t RequestK;
766 typedef __RequestUData__mach_vm_read_list_t __RequestU;
767 typedef __ReplyKData__mach_vm_read_list_t ReplyK __attribute__((unused));
768 typedef __ReplyUData__mach_vm_read_list_t ReplyU __attribute__((unused));
769 typedef __Reply__mach_vm_read_list_t Reply __attribute__((unused));
770 typedef __Request__mach_vm_read_list_t __Request __attribute__((unused));
771
772 /*
773 * typedef struct {
774 * mach_msg_header_t Head;
775 * NDR_record_t NDR;
776 * kern_return_t RetCode;
777 * } mig_reply_error_t;
778 */
779
780 RequestK *InKP = (RequestK *) InHeadP;
781 RequestU *In0UP = (RequestU *) InDataP;
782 ReplyK *OutKP = (ReplyK *) OutHeadP;
783 ReplyU *OutUP = (ReplyU *) OutDataP;
784 (void)OutUP;
785#ifdef __MIG_check__Request__mach_vm_read_list_t__defined
786 kern_return_t check_result;
787#endif /* __MIG_check__Request__mach_vm_read_list_t__defined */
788
789#if __MigKernelSpecificCode
790#else
791#endif /* __MigKernelSpecificCode */
792 vm_map_read_t target_task;
793
794 __DeclareRcvRpc(4805, "mach_vm_read_list")
795 __BeforeRcvRpc(4805, "mach_vm_read_list")
796
797#if defined(__MIG_check__Request__mach_vm_read_list_t__defined)
798 check_result = __MIG_check__Request__mach_vm_read_list_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
799 if (check_result != MACH_MSG_SUCCESS)
800 { MIG_RETURN_ERROR(OutKP, check_result); }
801#endif /* defined(__MIG_check__Request__mach_vm_read_list_t__defined) */
802
803 target_task = convert_port_to_map_read(port: InKP->Head.msgh_request_port);
804
805 OutUP->RetCode = mach_vm_read_list(target_task, data_list: In0UP->data_list, count: In0UP->count);
806 vm_map_read_deallocate(map: target_task);
807 if (OutUP->RetCode != KERN_SUCCESS) {
808 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
809 }
810#if __MigKernelSpecificCode
811#endif /* __MigKernelSpecificCode */
812
813 OutUP->NDR = NDR_record;
814
815
816 { typedef struct { char data[4096]; } *sp;
817 * (sp) OutUP->data_list = * (sp) In0UP->data_list;
818 }
819
820 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
821 __AfterRcvRpc(4805, "mach_vm_read_list")
822}
823
824#if ( __MigTypeCheck )
825#if __MIG_check__Request__mach_vm_subsystem__
826#if !defined(__MIG_check__Request__mach_vm_write_t__defined)
827#define __MIG_check__Request__mach_vm_write_t__defined
828
829mig_internal kern_return_t __MIG_check__Request__mach_vm_write_t(
830 __attribute__((__unused__)) __RequestKData__mach_vm_write_t *InKP,
831 __attribute__((__unused__)) __RequestUData__mach_vm_write_t *In0UP,
832 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
833{
834
835 typedef __Request__mach_vm_write_t __Request;
836 typedef __RequestUData__mach_vm_write_t __RequestU __attribute__((unused));
837#if __MigTypeCheck
838 if (!(InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
839 (InKP->msgh_body.msgh_descriptor_count != 1) ||
840 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
841 return MIG_BAD_ARGUMENTS;
842#endif /* __MigTypeCheck */
843
844#if __MigTypeCheck
845 if (InKP->data.type != MACH_MSG_OOL_DESCRIPTOR)
846 return MIG_TYPE_ERROR;
847#endif /* __MigTypeCheck */
848
849#if __MigTypeCheck
850 if (InKP->data.size != In0UP->dataCnt)
851 return MIG_TYPE_ERROR;
852#endif /* __MigTypeCheck */
853
854 return MACH_MSG_SUCCESS;
855}
856#endif /* !defined(__MIG_check__Request__mach_vm_write_t__defined) */
857#endif /* __MIG_check__Request__mach_vm_subsystem__ */
858#endif /* ( __MigTypeCheck ) */
859
860
861/* Routine mach_vm_write */
862mig_internal novalue _Xmach_vm_write
863 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
864 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
865 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
866{
867
868#ifdef __MigPackStructs
869#pragma pack(push, 4)
870#endif
871 typedef struct {
872 NDR_record_t NDR;
873 mach_vm_address_t address;
874 mach_msg_type_number_t dataCnt;
875 mach_msg_trailer_t trailer;
876 char padding[0]; /* Avoid generating empty UData structs */
877 } RequestU __attribute__((unused));
878#ifdef __MigPackStructs
879#pragma pack(pop)
880#endif
881 typedef __RequestKData__mach_vm_write_t RequestK;
882 typedef __RequestUData__mach_vm_write_t __RequestU;
883 typedef __ReplyKData__mach_vm_write_t ReplyK __attribute__((unused));
884 typedef __ReplyUData__mach_vm_write_t ReplyU __attribute__((unused));
885 typedef __Reply__mach_vm_write_t Reply __attribute__((unused));
886 typedef __Request__mach_vm_write_t __Request __attribute__((unused));
887
888 /*
889 * typedef struct {
890 * mach_msg_header_t Head;
891 * NDR_record_t NDR;
892 * kern_return_t RetCode;
893 * } mig_reply_error_t;
894 */
895
896 RequestK *InKP = (RequestK *) InHeadP;
897 RequestU *In0UP = (RequestU *) InDataP;
898 ReplyK *OutKP = (ReplyK *) OutHeadP;
899 ReplyU *OutUP = (ReplyU *) OutDataP;
900 (void)OutUP;
901#ifdef __MIG_check__Request__mach_vm_write_t__defined
902 kern_return_t check_result;
903#endif /* __MIG_check__Request__mach_vm_write_t__defined */
904
905#if __MigKernelSpecificCode
906#else
907#endif /* __MigKernelSpecificCode */
908 vm_map_t target_task;
909
910 __DeclareRcvRpc(4806, "mach_vm_write")
911 __BeforeRcvRpc(4806, "mach_vm_write")
912
913#if defined(__MIG_check__Request__mach_vm_write_t__defined)
914 check_result = __MIG_check__Request__mach_vm_write_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
915 if (check_result != MACH_MSG_SUCCESS)
916 { MIG_RETURN_ERROR(OutKP, check_result); }
917#endif /* defined(__MIG_check__Request__mach_vm_write_t__defined) */
918
919 target_task = convert_port_to_map(port: InKP->Head.msgh_request_port);
920
921 OutUP->RetCode = mach_vm_write(target_task, address: In0UP->address, data: (vm_offset_t)(InKP->data.address), dataCnt: InKP->data.size);
922 vm_map_deallocate(map: target_task);
923#if __MigKernelSpecificCode
924#endif /* __MigKernelSpecificCode */
925
926 OutUP->NDR = NDR_record;
927
928
929 __AfterRcvRpc(4806, "mach_vm_write")
930}
931
932#if ( __MigTypeCheck )
933#if __MIG_check__Request__mach_vm_subsystem__
934#if !defined(__MIG_check__Request__mach_vm_copy_t__defined)
935#define __MIG_check__Request__mach_vm_copy_t__defined
936
937mig_internal kern_return_t __MIG_check__Request__mach_vm_copy_t(
938 __attribute__((__unused__)) __RequestKData__mach_vm_copy_t *InKP,
939 __attribute__((__unused__)) __RequestUData__mach_vm_copy_t *In0UP,
940 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
941{
942
943 typedef __Request__mach_vm_copy_t __Request;
944 typedef __RequestUData__mach_vm_copy_t __RequestU __attribute__((unused));
945#if __MigTypeCheck
946 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
947 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
948 return MIG_BAD_ARGUMENTS;
949#endif /* __MigTypeCheck */
950
951 return MACH_MSG_SUCCESS;
952}
953#endif /* !defined(__MIG_check__Request__mach_vm_copy_t__defined) */
954#endif /* __MIG_check__Request__mach_vm_subsystem__ */
955#endif /* ( __MigTypeCheck ) */
956
957
958/* Routine mach_vm_copy */
959mig_internal novalue _Xmach_vm_copy
960 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
961 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
962 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
963{
964
965#ifdef __MigPackStructs
966#pragma pack(push, 4)
967#endif
968 typedef struct {
969 NDR_record_t NDR;
970 mach_vm_address_t source_address;
971 mach_vm_size_t size;
972 mach_vm_address_t dest_address;
973 mach_msg_trailer_t trailer;
974 char padding[0]; /* Avoid generating empty UData structs */
975 } RequestU __attribute__((unused));
976#ifdef __MigPackStructs
977#pragma pack(pop)
978#endif
979 typedef __RequestKData__mach_vm_copy_t RequestK;
980 typedef __RequestUData__mach_vm_copy_t __RequestU;
981 typedef __ReplyKData__mach_vm_copy_t ReplyK __attribute__((unused));
982 typedef __ReplyUData__mach_vm_copy_t ReplyU __attribute__((unused));
983 typedef __Reply__mach_vm_copy_t Reply __attribute__((unused));
984 typedef __Request__mach_vm_copy_t __Request __attribute__((unused));
985
986 /*
987 * typedef struct {
988 * mach_msg_header_t Head;
989 * NDR_record_t NDR;
990 * kern_return_t RetCode;
991 * } mig_reply_error_t;
992 */
993
994 RequestK *InKP = (RequestK *) InHeadP;
995 RequestU *In0UP = (RequestU *) InDataP;
996 ReplyK *OutKP = (ReplyK *) OutHeadP;
997 ReplyU *OutUP = (ReplyU *) OutDataP;
998 (void)OutUP;
999#ifdef __MIG_check__Request__mach_vm_copy_t__defined
1000 kern_return_t check_result;
1001#endif /* __MIG_check__Request__mach_vm_copy_t__defined */
1002
1003#if __MigKernelSpecificCode
1004#else
1005#endif /* __MigKernelSpecificCode */
1006 vm_map_t target_task;
1007
1008 __DeclareRcvRpc(4807, "mach_vm_copy")
1009 __BeforeRcvRpc(4807, "mach_vm_copy")
1010
1011#if defined(__MIG_check__Request__mach_vm_copy_t__defined)
1012 check_result = __MIG_check__Request__mach_vm_copy_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
1013 if (check_result != MACH_MSG_SUCCESS)
1014 { MIG_RETURN_ERROR(OutKP, check_result); }
1015#endif /* defined(__MIG_check__Request__mach_vm_copy_t__defined) */
1016
1017 target_task = convert_port_to_map(port: InKP->Head.msgh_request_port);
1018
1019 OutUP->RetCode = mach_vm_copy(target_task, source_address: In0UP->source_address, size: In0UP->size, dest_address: In0UP->dest_address);
1020 vm_map_deallocate(map: target_task);
1021#if __MigKernelSpecificCode
1022#endif /* __MigKernelSpecificCode */
1023
1024 OutUP->NDR = NDR_record;
1025
1026
1027 __AfterRcvRpc(4807, "mach_vm_copy")
1028}
1029
1030#if ( __MigTypeCheck )
1031#if __MIG_check__Request__mach_vm_subsystem__
1032#if !defined(__MIG_check__Request__mach_vm_read_overwrite_t__defined)
1033#define __MIG_check__Request__mach_vm_read_overwrite_t__defined
1034
1035mig_internal kern_return_t __MIG_check__Request__mach_vm_read_overwrite_t(
1036 __attribute__((__unused__)) __RequestKData__mach_vm_read_overwrite_t *InKP,
1037 __attribute__((__unused__)) __RequestUData__mach_vm_read_overwrite_t *In0UP,
1038 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
1039{
1040
1041 typedef __Request__mach_vm_read_overwrite_t __Request;
1042 typedef __RequestUData__mach_vm_read_overwrite_t __RequestU __attribute__((unused));
1043#if __MigTypeCheck
1044 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1045 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1046 return MIG_BAD_ARGUMENTS;
1047#endif /* __MigTypeCheck */
1048
1049 return MACH_MSG_SUCCESS;
1050}
1051#endif /* !defined(__MIG_check__Request__mach_vm_read_overwrite_t__defined) */
1052#endif /* __MIG_check__Request__mach_vm_subsystem__ */
1053#endif /* ( __MigTypeCheck ) */
1054
1055
1056/* Routine mach_vm_read_overwrite */
1057mig_internal novalue _Xmach_vm_read_overwrite
1058 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
1059 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
1060 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
1061{
1062
1063#ifdef __MigPackStructs
1064#pragma pack(push, 4)
1065#endif
1066 typedef struct {
1067 NDR_record_t NDR;
1068 mach_vm_address_t address;
1069 mach_vm_size_t size;
1070 mach_vm_address_t data;
1071 mach_msg_trailer_t trailer;
1072 char padding[0]; /* Avoid generating empty UData structs */
1073 } RequestU __attribute__((unused));
1074#ifdef __MigPackStructs
1075#pragma pack(pop)
1076#endif
1077 typedef __RequestKData__mach_vm_read_overwrite_t RequestK;
1078 typedef __RequestUData__mach_vm_read_overwrite_t __RequestU;
1079 typedef __ReplyKData__mach_vm_read_overwrite_t ReplyK __attribute__((unused));
1080 typedef __ReplyUData__mach_vm_read_overwrite_t ReplyU __attribute__((unused));
1081 typedef __Reply__mach_vm_read_overwrite_t Reply __attribute__((unused));
1082 typedef __Request__mach_vm_read_overwrite_t __Request __attribute__((unused));
1083
1084 /*
1085 * typedef struct {
1086 * mach_msg_header_t Head;
1087 * NDR_record_t NDR;
1088 * kern_return_t RetCode;
1089 * } mig_reply_error_t;
1090 */
1091
1092 RequestK *InKP = (RequestK *) InHeadP;
1093 RequestU *In0UP = (RequestU *) InDataP;
1094 ReplyK *OutKP = (ReplyK *) OutHeadP;
1095 ReplyU *OutUP = (ReplyU *) OutDataP;
1096 (void)OutUP;
1097#ifdef __MIG_check__Request__mach_vm_read_overwrite_t__defined
1098 kern_return_t check_result;
1099#endif /* __MIG_check__Request__mach_vm_read_overwrite_t__defined */
1100
1101#if __MigKernelSpecificCode
1102#else
1103#endif /* __MigKernelSpecificCode */
1104 vm_map_read_t target_task;
1105
1106 __DeclareRcvRpc(4808, "mach_vm_read_overwrite")
1107 __BeforeRcvRpc(4808, "mach_vm_read_overwrite")
1108
1109#if defined(__MIG_check__Request__mach_vm_read_overwrite_t__defined)
1110 check_result = __MIG_check__Request__mach_vm_read_overwrite_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
1111 if (check_result != MACH_MSG_SUCCESS)
1112 { MIG_RETURN_ERROR(OutKP, check_result); }
1113#endif /* defined(__MIG_check__Request__mach_vm_read_overwrite_t__defined) */
1114
1115 target_task = convert_port_to_map_read(port: InKP->Head.msgh_request_port);
1116
1117 OutUP->RetCode = mach_vm_read_overwrite(target_task, address: In0UP->address, size: In0UP->size, data: In0UP->data, outsize: &OutUP->outsize);
1118 vm_map_read_deallocate(map: target_task);
1119 if (OutUP->RetCode != KERN_SUCCESS) {
1120 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
1121 }
1122#if __MigKernelSpecificCode
1123#endif /* __MigKernelSpecificCode */
1124
1125 OutUP->NDR = NDR_record;
1126
1127
1128 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
1129 __AfterRcvRpc(4808, "mach_vm_read_overwrite")
1130}
1131
1132#if ( __MigTypeCheck )
1133#if __MIG_check__Request__mach_vm_subsystem__
1134#if !defined(__MIG_check__Request__mach_vm_msync_t__defined)
1135#define __MIG_check__Request__mach_vm_msync_t__defined
1136
1137mig_internal kern_return_t __MIG_check__Request__mach_vm_msync_t(
1138 __attribute__((__unused__)) __RequestKData__mach_vm_msync_t *InKP,
1139 __attribute__((__unused__)) __RequestUData__mach_vm_msync_t *In0UP,
1140 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
1141{
1142
1143 typedef __Request__mach_vm_msync_t __Request;
1144 typedef __RequestUData__mach_vm_msync_t __RequestU __attribute__((unused));
1145#if __MigTypeCheck
1146 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1147 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1148 return MIG_BAD_ARGUMENTS;
1149#endif /* __MigTypeCheck */
1150
1151 return MACH_MSG_SUCCESS;
1152}
1153#endif /* !defined(__MIG_check__Request__mach_vm_msync_t__defined) */
1154#endif /* __MIG_check__Request__mach_vm_subsystem__ */
1155#endif /* ( __MigTypeCheck ) */
1156
1157
1158/* Routine mach_vm_msync */
1159mig_internal novalue _Xmach_vm_msync
1160 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
1161 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
1162 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
1163{
1164
1165#ifdef __MigPackStructs
1166#pragma pack(push, 4)
1167#endif
1168 typedef struct {
1169 NDR_record_t NDR;
1170 mach_vm_address_t address;
1171 mach_vm_size_t size;
1172 vm_sync_t sync_flags;
1173 mach_msg_trailer_t trailer;
1174 char padding[0]; /* Avoid generating empty UData structs */
1175 } RequestU __attribute__((unused));
1176#ifdef __MigPackStructs
1177#pragma pack(pop)
1178#endif
1179 typedef __RequestKData__mach_vm_msync_t RequestK;
1180 typedef __RequestUData__mach_vm_msync_t __RequestU;
1181 typedef __ReplyKData__mach_vm_msync_t ReplyK __attribute__((unused));
1182 typedef __ReplyUData__mach_vm_msync_t ReplyU __attribute__((unused));
1183 typedef __Reply__mach_vm_msync_t Reply __attribute__((unused));
1184 typedef __Request__mach_vm_msync_t __Request __attribute__((unused));
1185
1186 /*
1187 * typedef struct {
1188 * mach_msg_header_t Head;
1189 * NDR_record_t NDR;
1190 * kern_return_t RetCode;
1191 * } mig_reply_error_t;
1192 */
1193
1194 RequestK *InKP = (RequestK *) InHeadP;
1195 RequestU *In0UP = (RequestU *) InDataP;
1196 ReplyK *OutKP = (ReplyK *) OutHeadP;
1197 ReplyU *OutUP = (ReplyU *) OutDataP;
1198 (void)OutUP;
1199#ifdef __MIG_check__Request__mach_vm_msync_t__defined
1200 kern_return_t check_result;
1201#endif /* __MIG_check__Request__mach_vm_msync_t__defined */
1202
1203#if __MigKernelSpecificCode
1204#else
1205#endif /* __MigKernelSpecificCode */
1206 vm_map_t target_task;
1207
1208 __DeclareRcvRpc(4809, "mach_vm_msync")
1209 __BeforeRcvRpc(4809, "mach_vm_msync")
1210
1211#if defined(__MIG_check__Request__mach_vm_msync_t__defined)
1212 check_result = __MIG_check__Request__mach_vm_msync_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
1213 if (check_result != MACH_MSG_SUCCESS)
1214 { MIG_RETURN_ERROR(OutKP, check_result); }
1215#endif /* defined(__MIG_check__Request__mach_vm_msync_t__defined) */
1216
1217 target_task = convert_port_to_map(port: InKP->Head.msgh_request_port);
1218
1219 OutUP->RetCode = mach_vm_msync(target_task, address: In0UP->address, size: In0UP->size, sync_flags: In0UP->sync_flags);
1220 vm_map_deallocate(map: target_task);
1221#if __MigKernelSpecificCode
1222#endif /* __MigKernelSpecificCode */
1223
1224 OutUP->NDR = NDR_record;
1225
1226
1227 __AfterRcvRpc(4809, "mach_vm_msync")
1228}
1229
1230#if ( __MigTypeCheck )
1231#if __MIG_check__Request__mach_vm_subsystem__
1232#if !defined(__MIG_check__Request__mach_vm_behavior_set_t__defined)
1233#define __MIG_check__Request__mach_vm_behavior_set_t__defined
1234
1235mig_internal kern_return_t __MIG_check__Request__mach_vm_behavior_set_t(
1236 __attribute__((__unused__)) __RequestKData__mach_vm_behavior_set_t *InKP,
1237 __attribute__((__unused__)) __RequestUData__mach_vm_behavior_set_t *In0UP,
1238 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
1239{
1240
1241 typedef __Request__mach_vm_behavior_set_t __Request;
1242 typedef __RequestUData__mach_vm_behavior_set_t __RequestU __attribute__((unused));
1243#if __MigTypeCheck
1244 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1245 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1246 return MIG_BAD_ARGUMENTS;
1247#endif /* __MigTypeCheck */
1248
1249 return MACH_MSG_SUCCESS;
1250}
1251#endif /* !defined(__MIG_check__Request__mach_vm_behavior_set_t__defined) */
1252#endif /* __MIG_check__Request__mach_vm_subsystem__ */
1253#endif /* ( __MigTypeCheck ) */
1254
1255
1256/* Routine mach_vm_behavior_set */
1257mig_internal novalue _Xmach_vm_behavior_set
1258 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
1259 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
1260 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
1261{
1262
1263#ifdef __MigPackStructs
1264#pragma pack(push, 4)
1265#endif
1266 typedef struct {
1267 NDR_record_t NDR;
1268 mach_vm_address_t address;
1269 mach_vm_size_t size;
1270 vm_behavior_t new_behavior;
1271 mach_msg_trailer_t trailer;
1272 char padding[0]; /* Avoid generating empty UData structs */
1273 } RequestU __attribute__((unused));
1274#ifdef __MigPackStructs
1275#pragma pack(pop)
1276#endif
1277 typedef __RequestKData__mach_vm_behavior_set_t RequestK;
1278 typedef __RequestUData__mach_vm_behavior_set_t __RequestU;
1279 typedef __ReplyKData__mach_vm_behavior_set_t ReplyK __attribute__((unused));
1280 typedef __ReplyUData__mach_vm_behavior_set_t ReplyU __attribute__((unused));
1281 typedef __Reply__mach_vm_behavior_set_t Reply __attribute__((unused));
1282 typedef __Request__mach_vm_behavior_set_t __Request __attribute__((unused));
1283
1284 /*
1285 * typedef struct {
1286 * mach_msg_header_t Head;
1287 * NDR_record_t NDR;
1288 * kern_return_t RetCode;
1289 * } mig_reply_error_t;
1290 */
1291
1292 RequestK *InKP = (RequestK *) InHeadP;
1293 RequestU *In0UP = (RequestU *) InDataP;
1294 ReplyK *OutKP = (ReplyK *) OutHeadP;
1295 ReplyU *OutUP = (ReplyU *) OutDataP;
1296 (void)OutUP;
1297#ifdef __MIG_check__Request__mach_vm_behavior_set_t__defined
1298 kern_return_t check_result;
1299#endif /* __MIG_check__Request__mach_vm_behavior_set_t__defined */
1300
1301#if __MigKernelSpecificCode
1302#else
1303#endif /* __MigKernelSpecificCode */
1304 vm_map_t target_task;
1305
1306 __DeclareRcvRpc(4810, "mach_vm_behavior_set")
1307 __BeforeRcvRpc(4810, "mach_vm_behavior_set")
1308
1309#if defined(__MIG_check__Request__mach_vm_behavior_set_t__defined)
1310 check_result = __MIG_check__Request__mach_vm_behavior_set_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
1311 if (check_result != MACH_MSG_SUCCESS)
1312 { MIG_RETURN_ERROR(OutKP, check_result); }
1313#endif /* defined(__MIG_check__Request__mach_vm_behavior_set_t__defined) */
1314
1315 target_task = convert_port_to_map(port: InKP->Head.msgh_request_port);
1316
1317 OutUP->RetCode = mach_vm_behavior_set(target_task, address: In0UP->address, size: In0UP->size, new_behavior: In0UP->new_behavior);
1318 vm_map_deallocate(map: target_task);
1319#if __MigKernelSpecificCode
1320#endif /* __MigKernelSpecificCode */
1321
1322 OutUP->NDR = NDR_record;
1323
1324
1325 __AfterRcvRpc(4810, "mach_vm_behavior_set")
1326}
1327
1328#if ( __MigTypeCheck )
1329#if __MIG_check__Request__mach_vm_subsystem__
1330#if !defined(__MIG_check__Request__mach_vm_map_external_t__defined)
1331#define __MIG_check__Request__mach_vm_map_external_t__defined
1332
1333mig_internal kern_return_t __MIG_check__Request__mach_vm_map_external_t(
1334 __attribute__((__unused__)) __RequestKData__mach_vm_map_external_t *InKP,
1335 __attribute__((__unused__)) __RequestUData__mach_vm_map_external_t *In0UP,
1336 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
1337{
1338
1339 typedef __Request__mach_vm_map_external_t __Request;
1340 typedef __RequestUData__mach_vm_map_external_t __RequestU __attribute__((unused));
1341#if __MigTypeCheck
1342 if (!(InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1343 (InKP->msgh_body.msgh_descriptor_count != 1) ||
1344 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1345 return MIG_BAD_ARGUMENTS;
1346#endif /* __MigTypeCheck */
1347
1348#if __MigTypeCheck
1349 if (InKP->object.type != MACH_MSG_PORT_DESCRIPTOR ||
1350 InKP->object.disposition != 17)
1351 return MIG_TYPE_ERROR;
1352#endif /* __MigTypeCheck */
1353
1354 return MACH_MSG_SUCCESS;
1355}
1356#endif /* !defined(__MIG_check__Request__mach_vm_map_external_t__defined) */
1357#endif /* __MIG_check__Request__mach_vm_subsystem__ */
1358#endif /* ( __MigTypeCheck ) */
1359
1360
1361/* Routine mach_vm_map_external */
1362mig_internal novalue _Xmach_vm_map_external
1363 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
1364 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
1365 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
1366{
1367
1368#ifdef __MigPackStructs
1369#pragma pack(push, 4)
1370#endif
1371 typedef struct {
1372 NDR_record_t NDR;
1373 mach_vm_address_t address;
1374 mach_vm_size_t size;
1375 mach_vm_offset_t mask;
1376 int flags;
1377 memory_object_offset_t offset;
1378 boolean_t copy;
1379 vm_prot_t cur_protection;
1380 vm_prot_t max_protection;
1381 vm_inherit_t inheritance;
1382 mach_msg_trailer_t trailer;
1383 char padding[0]; /* Avoid generating empty UData structs */
1384 } RequestU __attribute__((unused));
1385#ifdef __MigPackStructs
1386#pragma pack(pop)
1387#endif
1388 typedef __RequestKData__mach_vm_map_external_t RequestK;
1389 typedef __RequestUData__mach_vm_map_external_t __RequestU;
1390 typedef __ReplyKData__mach_vm_map_external_t ReplyK __attribute__((unused));
1391 typedef __ReplyUData__mach_vm_map_external_t ReplyU __attribute__((unused));
1392 typedef __Reply__mach_vm_map_external_t Reply __attribute__((unused));
1393 typedef __Request__mach_vm_map_external_t __Request __attribute__((unused));
1394
1395 /*
1396 * typedef struct {
1397 * mach_msg_header_t Head;
1398 * NDR_record_t NDR;
1399 * kern_return_t RetCode;
1400 * } mig_reply_error_t;
1401 */
1402
1403 RequestK *InKP = (RequestK *) InHeadP;
1404 RequestU *In0UP = (RequestU *) InDataP;
1405 ReplyK *OutKP = (ReplyK *) OutHeadP;
1406 ReplyU *OutUP = (ReplyU *) OutDataP;
1407 (void)OutUP;
1408#ifdef __MIG_check__Request__mach_vm_map_external_t__defined
1409 kern_return_t check_result;
1410#endif /* __MIG_check__Request__mach_vm_map_external_t__defined */
1411
1412#if __MigKernelSpecificCode
1413#else
1414#endif /* __MigKernelSpecificCode */
1415 vm_map_t target_task;
1416
1417 __DeclareRcvRpc(4811, "mach_vm_map_external")
1418 __BeforeRcvRpc(4811, "mach_vm_map_external")
1419
1420#if defined(__MIG_check__Request__mach_vm_map_external_t__defined)
1421 check_result = __MIG_check__Request__mach_vm_map_external_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
1422 if (check_result != MACH_MSG_SUCCESS)
1423 { MIG_RETURN_ERROR(OutKP, check_result); }
1424#endif /* defined(__MIG_check__Request__mach_vm_map_external_t__defined) */
1425
1426 target_task = convert_port_entry_to_map(port: InKP->Head.msgh_request_port);
1427
1428 OutUP->RetCode = mach_vm_map_external(target_task, address: &In0UP->address, size: In0UP->size, mask: In0UP->mask, flags: In0UP->flags, null_conversion(InKP->object.name), offset: In0UP->offset, copy: In0UP->copy, cur_protection: In0UP->cur_protection, max_protection: In0UP->max_protection, inheritance: In0UP->inheritance);
1429 vm_map_deallocate(map: target_task);
1430 if (OutUP->RetCode != KERN_SUCCESS) {
1431 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
1432 }
1433#if __MigKernelSpecificCode
1434 ipc_port_release_send(port: (ipc_port_t)InKP->object.name);
1435#endif /* __MigKernelSpecificCode */
1436
1437 OutUP->NDR = NDR_record;
1438
1439
1440 OutUP->address = In0UP->address;
1441
1442 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
1443 __AfterRcvRpc(4811, "mach_vm_map_external")
1444}
1445
1446#if ( __MigTypeCheck )
1447#if __MIG_check__Request__mach_vm_subsystem__
1448#if !defined(__MIG_check__Request__mach_vm_machine_attribute_t__defined)
1449#define __MIG_check__Request__mach_vm_machine_attribute_t__defined
1450
1451mig_internal kern_return_t __MIG_check__Request__mach_vm_machine_attribute_t(
1452 __attribute__((__unused__)) __RequestKData__mach_vm_machine_attribute_t *InKP,
1453 __attribute__((__unused__)) __RequestUData__mach_vm_machine_attribute_t *In0UP,
1454 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
1455{
1456
1457 typedef __Request__mach_vm_machine_attribute_t __Request;
1458 typedef __RequestUData__mach_vm_machine_attribute_t __RequestU __attribute__((unused));
1459#if __MigTypeCheck
1460 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1461 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1462 return MIG_BAD_ARGUMENTS;
1463#endif /* __MigTypeCheck */
1464
1465 return MACH_MSG_SUCCESS;
1466}
1467#endif /* !defined(__MIG_check__Request__mach_vm_machine_attribute_t__defined) */
1468#endif /* __MIG_check__Request__mach_vm_subsystem__ */
1469#endif /* ( __MigTypeCheck ) */
1470
1471
1472/* Routine mach_vm_machine_attribute */
1473mig_internal novalue _Xmach_vm_machine_attribute
1474 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
1475 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
1476 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
1477{
1478
1479#ifdef __MigPackStructs
1480#pragma pack(push, 4)
1481#endif
1482 typedef struct {
1483 NDR_record_t NDR;
1484 mach_vm_address_t address;
1485 mach_vm_size_t size;
1486 vm_machine_attribute_t attribute;
1487 vm_machine_attribute_val_t value;
1488 mach_msg_trailer_t trailer;
1489 char padding[0]; /* Avoid generating empty UData structs */
1490 } RequestU __attribute__((unused));
1491#ifdef __MigPackStructs
1492#pragma pack(pop)
1493#endif
1494 typedef __RequestKData__mach_vm_machine_attribute_t RequestK;
1495 typedef __RequestUData__mach_vm_machine_attribute_t __RequestU;
1496 typedef __ReplyKData__mach_vm_machine_attribute_t ReplyK __attribute__((unused));
1497 typedef __ReplyUData__mach_vm_machine_attribute_t ReplyU __attribute__((unused));
1498 typedef __Reply__mach_vm_machine_attribute_t Reply __attribute__((unused));
1499 typedef __Request__mach_vm_machine_attribute_t __Request __attribute__((unused));
1500
1501 /*
1502 * typedef struct {
1503 * mach_msg_header_t Head;
1504 * NDR_record_t NDR;
1505 * kern_return_t RetCode;
1506 * } mig_reply_error_t;
1507 */
1508
1509 RequestK *InKP = (RequestK *) InHeadP;
1510 RequestU *In0UP = (RequestU *) InDataP;
1511 ReplyK *OutKP = (ReplyK *) OutHeadP;
1512 ReplyU *OutUP = (ReplyU *) OutDataP;
1513 (void)OutUP;
1514#ifdef __MIG_check__Request__mach_vm_machine_attribute_t__defined
1515 kern_return_t check_result;
1516#endif /* __MIG_check__Request__mach_vm_machine_attribute_t__defined */
1517
1518#if __MigKernelSpecificCode
1519#else
1520#endif /* __MigKernelSpecificCode */
1521 vm_map_t target_task;
1522
1523 __DeclareRcvRpc(4812, "mach_vm_machine_attribute")
1524 __BeforeRcvRpc(4812, "mach_vm_machine_attribute")
1525
1526#if defined(__MIG_check__Request__mach_vm_machine_attribute_t__defined)
1527 check_result = __MIG_check__Request__mach_vm_machine_attribute_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
1528 if (check_result != MACH_MSG_SUCCESS)
1529 { MIG_RETURN_ERROR(OutKP, check_result); }
1530#endif /* defined(__MIG_check__Request__mach_vm_machine_attribute_t__defined) */
1531
1532 target_task = convert_port_to_map(port: InKP->Head.msgh_request_port);
1533
1534 OutUP->RetCode = mach_vm_machine_attribute(target_task, address: In0UP->address, size: In0UP->size, attribute: In0UP->attribute, value: &In0UP->value);
1535 vm_map_deallocate(map: target_task);
1536 if (OutUP->RetCode != KERN_SUCCESS) {
1537 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
1538 }
1539#if __MigKernelSpecificCode
1540#endif /* __MigKernelSpecificCode */
1541
1542 OutUP->NDR = NDR_record;
1543
1544
1545 OutUP->value = In0UP->value;
1546
1547 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
1548 __AfterRcvRpc(4812, "mach_vm_machine_attribute")
1549}
1550
1551#if ( __MigTypeCheck )
1552#if __MIG_check__Request__mach_vm_subsystem__
1553#if !defined(__MIG_check__Request__mach_vm_remap_external_t__defined)
1554#define __MIG_check__Request__mach_vm_remap_external_t__defined
1555
1556mig_internal kern_return_t __MIG_check__Request__mach_vm_remap_external_t(
1557 __attribute__((__unused__)) __RequestKData__mach_vm_remap_external_t *InKP,
1558 __attribute__((__unused__)) __RequestUData__mach_vm_remap_external_t *In0UP,
1559 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
1560{
1561
1562 typedef __Request__mach_vm_remap_external_t __Request;
1563 typedef __RequestUData__mach_vm_remap_external_t __RequestU __attribute__((unused));
1564#if __MigTypeCheck
1565 if (!(InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1566 (InKP->msgh_body.msgh_descriptor_count != 1) ||
1567 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1568 return MIG_BAD_ARGUMENTS;
1569#endif /* __MigTypeCheck */
1570
1571#if __MigTypeCheck
1572 if (InKP->src_task.type != MACH_MSG_PORT_DESCRIPTOR ||
1573 InKP->src_task.disposition != 17)
1574 return MIG_TYPE_ERROR;
1575#endif /* __MigTypeCheck */
1576
1577 return MACH_MSG_SUCCESS;
1578}
1579#endif /* !defined(__MIG_check__Request__mach_vm_remap_external_t__defined) */
1580#endif /* __MIG_check__Request__mach_vm_subsystem__ */
1581#endif /* ( __MigTypeCheck ) */
1582
1583
1584/* Routine mach_vm_remap_external */
1585mig_internal novalue _Xmach_vm_remap_external
1586 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
1587 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
1588 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
1589{
1590
1591#ifdef __MigPackStructs
1592#pragma pack(push, 4)
1593#endif
1594 typedef struct {
1595 NDR_record_t NDR;
1596 mach_vm_address_t target_address;
1597 mach_vm_size_t size;
1598 mach_vm_offset_t mask;
1599 int flags;
1600 mach_vm_address_t src_address;
1601 boolean_t copy;
1602 vm_inherit_t inheritance;
1603 mach_msg_trailer_t trailer;
1604 char padding[0]; /* Avoid generating empty UData structs */
1605 } RequestU __attribute__((unused));
1606#ifdef __MigPackStructs
1607#pragma pack(pop)
1608#endif
1609 typedef __RequestKData__mach_vm_remap_external_t RequestK;
1610 typedef __RequestUData__mach_vm_remap_external_t __RequestU;
1611 typedef __ReplyKData__mach_vm_remap_external_t ReplyK __attribute__((unused));
1612 typedef __ReplyUData__mach_vm_remap_external_t ReplyU __attribute__((unused));
1613 typedef __Reply__mach_vm_remap_external_t Reply __attribute__((unused));
1614 typedef __Request__mach_vm_remap_external_t __Request __attribute__((unused));
1615
1616 /*
1617 * typedef struct {
1618 * mach_msg_header_t Head;
1619 * NDR_record_t NDR;
1620 * kern_return_t RetCode;
1621 * } mig_reply_error_t;
1622 */
1623
1624 RequestK *InKP = (RequestK *) InHeadP;
1625 RequestU *In0UP = (RequestU *) InDataP;
1626 ReplyK *OutKP = (ReplyK *) OutHeadP;
1627 ReplyU *OutUP = (ReplyU *) OutDataP;
1628 (void)OutUP;
1629#ifdef __MIG_check__Request__mach_vm_remap_external_t__defined
1630 kern_return_t check_result;
1631#endif /* __MIG_check__Request__mach_vm_remap_external_t__defined */
1632
1633#if __MigKernelSpecificCode
1634#else
1635#endif /* __MigKernelSpecificCode */
1636 vm_map_t target_task;
1637 vm_map_t src_task;
1638
1639 __DeclareRcvRpc(4813, "mach_vm_remap_external")
1640 __BeforeRcvRpc(4813, "mach_vm_remap_external")
1641
1642#if defined(__MIG_check__Request__mach_vm_remap_external_t__defined)
1643 check_result = __MIG_check__Request__mach_vm_remap_external_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
1644 if (check_result != MACH_MSG_SUCCESS)
1645 { MIG_RETURN_ERROR(OutKP, check_result); }
1646#endif /* defined(__MIG_check__Request__mach_vm_remap_external_t__defined) */
1647
1648 target_task = convert_port_to_map(port: InKP->Head.msgh_request_port);
1649
1650 src_task = convert_port_to_map(port: InKP->src_task.name);
1651
1652 OutUP->RetCode = mach_vm_remap_external(target_task, target_address: &In0UP->target_address, size: In0UP->size, mask: In0UP->mask, flags: In0UP->flags, src_task, src_address: In0UP->src_address, copy: In0UP->copy, cur_protection: &OutUP->cur_protection, max_protection: &OutUP->max_protection, inheritance: In0UP->inheritance);
1653 vm_map_deallocate(map: src_task);
1654 vm_map_deallocate(map: target_task);
1655 if (OutUP->RetCode != KERN_SUCCESS) {
1656 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
1657 }
1658#if __MigKernelSpecificCode
1659 ipc_port_release_send(port: (ipc_port_t)InKP->src_task.name);
1660#endif /* __MigKernelSpecificCode */
1661
1662 OutUP->NDR = NDR_record;
1663
1664
1665 OutUP->target_address = In0UP->target_address;
1666
1667 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
1668 __AfterRcvRpc(4813, "mach_vm_remap_external")
1669}
1670
1671#if ( __MigTypeCheck )
1672#if __MIG_check__Request__mach_vm_subsystem__
1673#if !defined(__MIG_check__Request__mach_vm_page_query_t__defined)
1674#define __MIG_check__Request__mach_vm_page_query_t__defined
1675
1676mig_internal kern_return_t __MIG_check__Request__mach_vm_page_query_t(
1677 __attribute__((__unused__)) __RequestKData__mach_vm_page_query_t *InKP,
1678 __attribute__((__unused__)) __RequestUData__mach_vm_page_query_t *In0UP,
1679 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
1680{
1681
1682 typedef __Request__mach_vm_page_query_t __Request;
1683 typedef __RequestUData__mach_vm_page_query_t __RequestU __attribute__((unused));
1684#if __MigTypeCheck
1685 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1686 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1687 return MIG_BAD_ARGUMENTS;
1688#endif /* __MigTypeCheck */
1689
1690 return MACH_MSG_SUCCESS;
1691}
1692#endif /* !defined(__MIG_check__Request__mach_vm_page_query_t__defined) */
1693#endif /* __MIG_check__Request__mach_vm_subsystem__ */
1694#endif /* ( __MigTypeCheck ) */
1695
1696
1697/* Routine mach_vm_page_query */
1698mig_internal novalue _Xmach_vm_page_query
1699 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
1700 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
1701 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
1702{
1703
1704#ifdef __MigPackStructs
1705#pragma pack(push, 4)
1706#endif
1707 typedef struct {
1708 NDR_record_t NDR;
1709 mach_vm_offset_t offset;
1710 mach_msg_trailer_t trailer;
1711 char padding[0]; /* Avoid generating empty UData structs */
1712 } RequestU __attribute__((unused));
1713#ifdef __MigPackStructs
1714#pragma pack(pop)
1715#endif
1716 typedef __RequestKData__mach_vm_page_query_t RequestK;
1717 typedef __RequestUData__mach_vm_page_query_t __RequestU;
1718 typedef __ReplyKData__mach_vm_page_query_t ReplyK __attribute__((unused));
1719 typedef __ReplyUData__mach_vm_page_query_t ReplyU __attribute__((unused));
1720 typedef __Reply__mach_vm_page_query_t Reply __attribute__((unused));
1721 typedef __Request__mach_vm_page_query_t __Request __attribute__((unused));
1722
1723 /*
1724 * typedef struct {
1725 * mach_msg_header_t Head;
1726 * NDR_record_t NDR;
1727 * kern_return_t RetCode;
1728 * } mig_reply_error_t;
1729 */
1730
1731 RequestK *InKP = (RequestK *) InHeadP;
1732 RequestU *In0UP = (RequestU *) InDataP;
1733 ReplyK *OutKP = (ReplyK *) OutHeadP;
1734 ReplyU *OutUP = (ReplyU *) OutDataP;
1735 (void)OutUP;
1736#ifdef __MIG_check__Request__mach_vm_page_query_t__defined
1737 kern_return_t check_result;
1738#endif /* __MIG_check__Request__mach_vm_page_query_t__defined */
1739
1740#if __MigKernelSpecificCode
1741#else
1742#endif /* __MigKernelSpecificCode */
1743 vm_map_read_t target_map;
1744
1745 __DeclareRcvRpc(4814, "mach_vm_page_query")
1746 __BeforeRcvRpc(4814, "mach_vm_page_query")
1747
1748#if defined(__MIG_check__Request__mach_vm_page_query_t__defined)
1749 check_result = __MIG_check__Request__mach_vm_page_query_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
1750 if (check_result != MACH_MSG_SUCCESS)
1751 { MIG_RETURN_ERROR(OutKP, check_result); }
1752#endif /* defined(__MIG_check__Request__mach_vm_page_query_t__defined) */
1753
1754 target_map = convert_port_to_map_read(port: InKP->Head.msgh_request_port);
1755
1756 OutUP->RetCode = mach_vm_page_query(target_map, offset: In0UP->offset, disposition: &OutUP->disposition, ref_count: &OutUP->ref_count);
1757 vm_map_read_deallocate(map: target_map);
1758 if (OutUP->RetCode != KERN_SUCCESS) {
1759 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
1760 }
1761#if __MigKernelSpecificCode
1762#endif /* __MigKernelSpecificCode */
1763
1764 OutUP->NDR = NDR_record;
1765
1766
1767 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
1768 __AfterRcvRpc(4814, "mach_vm_page_query")
1769}
1770
1771#if ( __MigTypeCheck )
1772#if __MIG_check__Request__mach_vm_subsystem__
1773#if !defined(__MIG_check__Request__mach_vm_region_recurse_t__defined)
1774#define __MIG_check__Request__mach_vm_region_recurse_t__defined
1775
1776mig_internal kern_return_t __MIG_check__Request__mach_vm_region_recurse_t(
1777 __attribute__((__unused__)) __RequestKData__mach_vm_region_recurse_t *InKP,
1778 __attribute__((__unused__)) __RequestUData__mach_vm_region_recurse_t *In0UP,
1779 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
1780{
1781
1782 typedef __Request__mach_vm_region_recurse_t __Request;
1783 typedef __RequestUData__mach_vm_region_recurse_t __RequestU __attribute__((unused));
1784#if __MigTypeCheck
1785 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1786 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1787 return MIG_BAD_ARGUMENTS;
1788#endif /* __MigTypeCheck */
1789
1790 return MACH_MSG_SUCCESS;
1791}
1792#endif /* !defined(__MIG_check__Request__mach_vm_region_recurse_t__defined) */
1793#endif /* __MIG_check__Request__mach_vm_subsystem__ */
1794#endif /* ( __MigTypeCheck ) */
1795
1796
1797/* Routine mach_vm_region_recurse */
1798mig_internal novalue _Xmach_vm_region_recurse
1799 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
1800 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
1801 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
1802{
1803
1804#ifdef __MigPackStructs
1805#pragma pack(push, 4)
1806#endif
1807 typedef struct {
1808 NDR_record_t NDR;
1809 mach_vm_address_t address;
1810 natural_t nesting_depth;
1811 mach_msg_type_number_t infoCnt;
1812 mach_msg_trailer_t trailer;
1813 char padding[0]; /* Avoid generating empty UData structs */
1814 } RequestU __attribute__((unused));
1815#ifdef __MigPackStructs
1816#pragma pack(pop)
1817#endif
1818 typedef __RequestKData__mach_vm_region_recurse_t RequestK;
1819 typedef __RequestUData__mach_vm_region_recurse_t __RequestU;
1820 typedef __ReplyKData__mach_vm_region_recurse_t ReplyK __attribute__((unused));
1821 typedef __ReplyUData__mach_vm_region_recurse_t ReplyU __attribute__((unused));
1822 typedef __Reply__mach_vm_region_recurse_t Reply __attribute__((unused));
1823 typedef __Request__mach_vm_region_recurse_t __Request __attribute__((unused));
1824
1825 /*
1826 * typedef struct {
1827 * mach_msg_header_t Head;
1828 * NDR_record_t NDR;
1829 * kern_return_t RetCode;
1830 * } mig_reply_error_t;
1831 */
1832
1833 RequestK *InKP = (RequestK *) InHeadP;
1834 RequestU *In0UP = (RequestU *) InDataP;
1835 ReplyK *OutKP = (ReplyK *) OutHeadP;
1836 ReplyU *OutUP = (ReplyU *) OutDataP;
1837 (void)OutUP;
1838#ifdef __MIG_check__Request__mach_vm_region_recurse_t__defined
1839 kern_return_t check_result;
1840#endif /* __MIG_check__Request__mach_vm_region_recurse_t__defined */
1841
1842#if __MigKernelSpecificCode
1843#else
1844#endif /* __MigKernelSpecificCode */
1845 vm_map_read_t target_task;
1846
1847 __DeclareRcvRpc(4815, "mach_vm_region_recurse")
1848 __BeforeRcvRpc(4815, "mach_vm_region_recurse")
1849
1850#if defined(__MIG_check__Request__mach_vm_region_recurse_t__defined)
1851 check_result = __MIG_check__Request__mach_vm_region_recurse_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
1852 if (check_result != MACH_MSG_SUCCESS)
1853 { MIG_RETURN_ERROR(OutKP, check_result); }
1854#endif /* defined(__MIG_check__Request__mach_vm_region_recurse_t__defined) */
1855
1856 target_task = convert_port_to_map_read(port: InKP->Head.msgh_request_port);
1857
1858 OutUP->infoCnt = 19;
1859 if (In0UP->infoCnt < OutUP->infoCnt)
1860 OutUP->infoCnt = In0UP->infoCnt;
1861
1862 OutUP->RetCode = mach_vm_region_recurse(target_task, address: &In0UP->address, size: &OutUP->size, nesting_depth: &In0UP->nesting_depth, info: OutUP->info, infoCnt: &OutUP->infoCnt);
1863 vm_map_read_deallocate(map: target_task);
1864 if (OutUP->RetCode != KERN_SUCCESS) {
1865 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
1866 }
1867#if __MigKernelSpecificCode
1868#endif /* __MigKernelSpecificCode */
1869
1870 OutUP->NDR = NDR_record;
1871
1872
1873 OutUP->address = In0UP->address;
1874
1875 OutUP->nesting_depth = In0UP->nesting_depth;
1876 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply) - 76) + (((4 * OutUP->infoCnt)));
1877
1878 __AfterRcvRpc(4815, "mach_vm_region_recurse")
1879}
1880
1881#if ( __MigTypeCheck )
1882#if __MIG_check__Request__mach_vm_subsystem__
1883#if !defined(__MIG_check__Request__mach_vm_region_t__defined)
1884#define __MIG_check__Request__mach_vm_region_t__defined
1885
1886mig_internal kern_return_t __MIG_check__Request__mach_vm_region_t(
1887 __attribute__((__unused__)) __RequestKData__mach_vm_region_t *InKP,
1888 __attribute__((__unused__)) __RequestUData__mach_vm_region_t *In0UP,
1889 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
1890{
1891
1892 typedef __Request__mach_vm_region_t __Request;
1893 typedef __RequestUData__mach_vm_region_t __RequestU __attribute__((unused));
1894#if __MigTypeCheck
1895 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1896 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1897 return MIG_BAD_ARGUMENTS;
1898#endif /* __MigTypeCheck */
1899
1900 return MACH_MSG_SUCCESS;
1901}
1902#endif /* !defined(__MIG_check__Request__mach_vm_region_t__defined) */
1903#endif /* __MIG_check__Request__mach_vm_subsystem__ */
1904#endif /* ( __MigTypeCheck ) */
1905
1906
1907/* Routine mach_vm_region */
1908mig_internal novalue _Xmach_vm_region
1909 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
1910 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
1911 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
1912{
1913
1914#ifdef __MigPackStructs
1915#pragma pack(push, 4)
1916#endif
1917 typedef struct {
1918 NDR_record_t NDR;
1919 mach_vm_address_t address;
1920 vm_region_flavor_t flavor;
1921 mach_msg_type_number_t infoCnt;
1922 mach_msg_trailer_t trailer;
1923 char padding[0]; /* Avoid generating empty UData structs */
1924 } RequestU __attribute__((unused));
1925#ifdef __MigPackStructs
1926#pragma pack(pop)
1927#endif
1928 typedef __RequestKData__mach_vm_region_t RequestK;
1929 typedef __RequestUData__mach_vm_region_t __RequestU;
1930 typedef __ReplyKData__mach_vm_region_t ReplyK __attribute__((unused));
1931 typedef __ReplyUData__mach_vm_region_t ReplyU __attribute__((unused));
1932 typedef __Reply__mach_vm_region_t Reply __attribute__((unused));
1933 typedef __Request__mach_vm_region_t __Request __attribute__((unused));
1934
1935 /*
1936 * typedef struct {
1937 * mach_msg_header_t Head;
1938 * NDR_record_t NDR;
1939 * kern_return_t RetCode;
1940 * } mig_reply_error_t;
1941 */
1942
1943 RequestK *InKP = (RequestK *) InHeadP;
1944 RequestU *In0UP = (RequestU *) InDataP;
1945 ReplyK *OutKP = (ReplyK *) OutHeadP;
1946 ReplyU *OutUP = (ReplyU *) OutDataP;
1947 (void)OutUP;
1948#ifdef __MIG_check__Request__mach_vm_region_t__defined
1949 kern_return_t check_result;
1950#endif /* __MIG_check__Request__mach_vm_region_t__defined */
1951
1952#if __MigKernelSpecificCode
1953#if UseStaticTemplates
1954 const static mach_msg_port_descriptor_t object_nameTemplate = {
1955 /* name = */ MACH_PORT_NULL,
1956 /* pad1 = */ 0,
1957 /* pad2 = */ 0,
1958 /* disp = */ 17,
1959 /* type = */ MACH_MSG_PORT_DESCRIPTOR,
1960 };
1961#endif /* UseStaticTemplates */
1962
1963#else
1964#if UseStaticTemplates
1965 const static mach_msg_port_descriptor_t object_nameTemplate = {
1966 /* name = */ MACH_PORT_NULL,
1967 /* pad1 = */ 0,
1968 /* pad2 = */ 0,
1969 /* disp = */ 17,
1970 /* type = */ MACH_MSG_PORT_DESCRIPTOR,
1971 };
1972#endif /* UseStaticTemplates */
1973
1974#endif /* __MigKernelSpecificCode */
1975 kern_return_t RetCode;
1976 vm_map_read_t target_task;
1977
1978 __DeclareRcvRpc(4816, "mach_vm_region")
1979 __BeforeRcvRpc(4816, "mach_vm_region")
1980
1981#if defined(__MIG_check__Request__mach_vm_region_t__defined)
1982 check_result = __MIG_check__Request__mach_vm_region_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
1983 if (check_result != MACH_MSG_SUCCESS)
1984 { MIG_RETURN_ERROR(OutKP, check_result); }
1985#endif /* defined(__MIG_check__Request__mach_vm_region_t__defined) */
1986
1987#if UseStaticTemplates
1988 OutKP->object_name = object_nameTemplate;
1989#else /* UseStaticTemplates */
1990#if __MigKernelSpecificCode
1991 OutKP->object_name.disposition = 17;
1992#else
1993 OutKP->object_name.disposition = 17;
1994#endif /* __MigKernelSpecificCode */
1995#if !(defined(KERNEL) && defined(__LP64__))
1996 OutKP->object_name.pad1 = 0;
1997#endif
1998 OutKP->object_name.pad2 = 0;
1999 OutKP->object_name.type = MACH_MSG_PORT_DESCRIPTOR;
2000#if defined(KERNEL)
2001 OutKP->object_name.pad_end = 0;
2002#endif
2003#endif /* UseStaticTemplates */
2004
2005
2006 target_task = convert_port_to_map_read(port: InKP->Head.msgh_request_port);
2007
2008 OutUP->infoCnt = 10;
2009 if (In0UP->infoCnt < OutUP->infoCnt)
2010 OutUP->infoCnt = In0UP->infoCnt;
2011
2012 RetCode = mach_vm_region(target_task, address: &In0UP->address, size: &OutUP->size, flavor: In0UP->flavor, info: OutUP->info, infoCnt: &OutUP->infoCnt, object_name: &OutKP->object_name.name);
2013 vm_map_read_deallocate(map: target_task);
2014 if (RetCode != KERN_SUCCESS) {
2015 MIG_RETURN_ERROR(OutKP, RetCode);
2016 }
2017#if __MigKernelSpecificCode
2018#endif /* __MigKernelSpecificCode */
2019
2020 OutUP->NDR = NDR_record;
2021
2022
2023 OutUP->address = In0UP->address;
2024 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply) - 40) + (((4 * OutUP->infoCnt)));
2025
2026 OutKP->Head.msgh_bits |= MACH_MSGH_BITS_COMPLEX;
2027 OutKP->msgh_body.msgh_descriptor_count = 1;
2028 __AfterRcvRpc(4816, "mach_vm_region")
2029}
2030
2031#if ( __MigTypeCheck )
2032#if __MIG_check__Request__mach_vm_subsystem__
2033#if !defined(__MIG_check__Request___mach_make_memory_entry_t__defined)
2034#define __MIG_check__Request___mach_make_memory_entry_t__defined
2035
2036mig_internal kern_return_t __MIG_check__Request___mach_make_memory_entry_t(
2037 __attribute__((__unused__)) __RequestKData___mach_make_memory_entry_t *InKP,
2038 __attribute__((__unused__)) __RequestUData___mach_make_memory_entry_t *In0UP,
2039 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
2040{
2041
2042 typedef __Request___mach_make_memory_entry_t __Request;
2043 typedef __RequestUData___mach_make_memory_entry_t __RequestU __attribute__((unused));
2044#if __MigTypeCheck
2045 if (!(InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2046 (InKP->msgh_body.msgh_descriptor_count != 1) ||
2047 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2048 return MIG_BAD_ARGUMENTS;
2049#endif /* __MigTypeCheck */
2050
2051#if __MigTypeCheck
2052 if (InKP->parent_handle.type != MACH_MSG_PORT_DESCRIPTOR ||
2053 InKP->parent_handle.disposition != 17)
2054 return MIG_TYPE_ERROR;
2055#endif /* __MigTypeCheck */
2056
2057 return MACH_MSG_SUCCESS;
2058}
2059#endif /* !defined(__MIG_check__Request___mach_make_memory_entry_t__defined) */
2060#endif /* __MIG_check__Request__mach_vm_subsystem__ */
2061#endif /* ( __MigTypeCheck ) */
2062
2063
2064/* Routine _mach_make_memory_entry */
2065mig_internal novalue _X_mach_make_memory_entry
2066 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
2067 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
2068 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
2069{
2070
2071#ifdef __MigPackStructs
2072#pragma pack(push, 4)
2073#endif
2074 typedef struct {
2075 NDR_record_t NDR;
2076 memory_object_size_t size;
2077 memory_object_offset_t offset;
2078 vm_prot_t permission;
2079 mach_msg_trailer_t trailer;
2080 char padding[0]; /* Avoid generating empty UData structs */
2081 } RequestU __attribute__((unused));
2082#ifdef __MigPackStructs
2083#pragma pack(pop)
2084#endif
2085 typedef __RequestKData___mach_make_memory_entry_t RequestK;
2086 typedef __RequestUData___mach_make_memory_entry_t __RequestU;
2087 typedef __ReplyKData___mach_make_memory_entry_t ReplyK __attribute__((unused));
2088 typedef __ReplyUData___mach_make_memory_entry_t ReplyU __attribute__((unused));
2089 typedef __Reply___mach_make_memory_entry_t Reply __attribute__((unused));
2090 typedef __Request___mach_make_memory_entry_t __Request __attribute__((unused));
2091
2092 /*
2093 * typedef struct {
2094 * mach_msg_header_t Head;
2095 * NDR_record_t NDR;
2096 * kern_return_t RetCode;
2097 * } mig_reply_error_t;
2098 */
2099
2100 RequestK *InKP = (RequestK *) InHeadP;
2101 RequestU *In0UP = (RequestU *) InDataP;
2102 ReplyK *OutKP = (ReplyK *) OutHeadP;
2103 ReplyU *OutUP = (ReplyU *) OutDataP;
2104 (void)OutUP;
2105#ifdef __MIG_check__Request___mach_make_memory_entry_t__defined
2106 kern_return_t check_result;
2107#endif /* __MIG_check__Request___mach_make_memory_entry_t__defined */
2108
2109#if __MigKernelSpecificCode
2110#if UseStaticTemplates
2111 const static mach_msg_port_descriptor_t object_handleTemplate = {
2112 /* name = */ MACH_PORT_NULL,
2113 /* pad1 = */ 0,
2114 /* pad2 = */ 0,
2115 /* disp = */ 17,
2116 /* type = */ MACH_MSG_PORT_DESCRIPTOR,
2117 };
2118#endif /* UseStaticTemplates */
2119
2120#else
2121#if UseStaticTemplates
2122 const static mach_msg_port_descriptor_t object_handleTemplate = {
2123 /* name = */ MACH_PORT_NULL,
2124 /* pad1 = */ 0,
2125 /* pad2 = */ 0,
2126 /* disp = */ 17,
2127 /* type = */ MACH_MSG_PORT_DESCRIPTOR,
2128 };
2129#endif /* UseStaticTemplates */
2130
2131#endif /* __MigKernelSpecificCode */
2132 kern_return_t RetCode;
2133 vm_map_t target_task;
2134 mem_entry_name_port_t object_handle;
2135
2136 __DeclareRcvRpc(4817, "_mach_make_memory_entry")
2137 __BeforeRcvRpc(4817, "_mach_make_memory_entry")
2138
2139#if defined(__MIG_check__Request___mach_make_memory_entry_t__defined)
2140 check_result = __MIG_check__Request___mach_make_memory_entry_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
2141 if (check_result != MACH_MSG_SUCCESS)
2142 { MIG_RETURN_ERROR(OutKP, check_result); }
2143#endif /* defined(__MIG_check__Request___mach_make_memory_entry_t__defined) */
2144
2145#if UseStaticTemplates
2146 OutKP->object_handle = object_handleTemplate;
2147#else /* UseStaticTemplates */
2148#if __MigKernelSpecificCode
2149 OutKP->object_handle.disposition = 17;
2150#else
2151 OutKP->object_handle.disposition = 17;
2152#endif /* __MigKernelSpecificCode */
2153#if !(defined(KERNEL) && defined(__LP64__))
2154 OutKP->object_handle.pad1 = 0;
2155#endif
2156 OutKP->object_handle.pad2 = 0;
2157 OutKP->object_handle.type = MACH_MSG_PORT_DESCRIPTOR;
2158#if defined(KERNEL)
2159 OutKP->object_handle.pad_end = 0;
2160#endif
2161#endif /* UseStaticTemplates */
2162
2163
2164 target_task = convert_port_to_map(port: InKP->Head.msgh_request_port);
2165
2166 RetCode = _mach_make_memory_entry(target_task, size: &In0UP->size, offset: In0UP->offset, permission: In0UP->permission, object_handle: &object_handle, null_conversion(InKP->parent_handle.name));
2167 vm_map_deallocate(map: target_task);
2168 if (RetCode != KERN_SUCCESS) {
2169 MIG_RETURN_ERROR(OutKP, RetCode);
2170 }
2171#if __MigKernelSpecificCode
2172 ipc_port_release_send(port: (ipc_port_t)InKP->parent_handle.name);
2173#endif /* __MigKernelSpecificCode */
2174 OutKP->object_handle.name = (mach_port_t)null_conversion(object_handle);
2175
2176
2177 OutUP->NDR = NDR_record;
2178
2179
2180 OutUP->size = In0UP->size;
2181
2182 OutKP->Head.msgh_bits |= MACH_MSGH_BITS_COMPLEX;
2183 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
2184 OutKP->msgh_body.msgh_descriptor_count = 1;
2185 __AfterRcvRpc(4817, "_mach_make_memory_entry")
2186}
2187
2188#if ( __MigTypeCheck )
2189#if __MIG_check__Request__mach_vm_subsystem__
2190#if !defined(__MIG_check__Request__mach_vm_purgable_control_external_t__defined)
2191#define __MIG_check__Request__mach_vm_purgable_control_external_t__defined
2192
2193mig_internal kern_return_t __MIG_check__Request__mach_vm_purgable_control_external_t(
2194 __attribute__((__unused__)) __RequestKData__mach_vm_purgable_control_external_t *InKP,
2195 __attribute__((__unused__)) __RequestUData__mach_vm_purgable_control_external_t *In0UP,
2196 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
2197{
2198
2199 typedef __Request__mach_vm_purgable_control_external_t __Request;
2200 typedef __RequestUData__mach_vm_purgable_control_external_t __RequestU __attribute__((unused));
2201#if __MigTypeCheck
2202 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2203 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2204 return MIG_BAD_ARGUMENTS;
2205#endif /* __MigTypeCheck */
2206
2207 return MACH_MSG_SUCCESS;
2208}
2209#endif /* !defined(__MIG_check__Request__mach_vm_purgable_control_external_t__defined) */
2210#endif /* __MIG_check__Request__mach_vm_subsystem__ */
2211#endif /* ( __MigTypeCheck ) */
2212
2213
2214/* Routine mach_vm_purgable_control_external */
2215mig_internal novalue _Xmach_vm_purgable_control_external
2216 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
2217 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
2218 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
2219{
2220
2221#ifdef __MigPackStructs
2222#pragma pack(push, 4)
2223#endif
2224 typedef struct {
2225 NDR_record_t NDR;
2226 mach_vm_address_t address;
2227 vm_purgable_t control;
2228 int state;
2229 mach_msg_trailer_t trailer;
2230 char padding[0]; /* Avoid generating empty UData structs */
2231 } RequestU __attribute__((unused));
2232#ifdef __MigPackStructs
2233#pragma pack(pop)
2234#endif
2235 typedef __RequestKData__mach_vm_purgable_control_external_t RequestK;
2236 typedef __RequestUData__mach_vm_purgable_control_external_t __RequestU;
2237 typedef __ReplyKData__mach_vm_purgable_control_external_t ReplyK __attribute__((unused));
2238 typedef __ReplyUData__mach_vm_purgable_control_external_t ReplyU __attribute__((unused));
2239 typedef __Reply__mach_vm_purgable_control_external_t Reply __attribute__((unused));
2240 typedef __Request__mach_vm_purgable_control_external_t __Request __attribute__((unused));
2241
2242 /*
2243 * typedef struct {
2244 * mach_msg_header_t Head;
2245 * NDR_record_t NDR;
2246 * kern_return_t RetCode;
2247 * } mig_reply_error_t;
2248 */
2249
2250 RequestK *InKP = (RequestK *) InHeadP;
2251 RequestU *In0UP = (RequestU *) InDataP;
2252 ReplyK *OutKP = (ReplyK *) OutHeadP;
2253 ReplyU *OutUP = (ReplyU *) OutDataP;
2254 (void)OutUP;
2255#ifdef __MIG_check__Request__mach_vm_purgable_control_external_t__defined
2256 kern_return_t check_result;
2257#endif /* __MIG_check__Request__mach_vm_purgable_control_external_t__defined */
2258
2259#if __MigKernelSpecificCode
2260#else
2261#endif /* __MigKernelSpecificCode */
2262 __DeclareRcvRpc(4818, "mach_vm_purgable_control_external")
2263 __BeforeRcvRpc(4818, "mach_vm_purgable_control_external")
2264
2265#if defined(__MIG_check__Request__mach_vm_purgable_control_external_t__defined)
2266 check_result = __MIG_check__Request__mach_vm_purgable_control_external_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
2267 if (check_result != MACH_MSG_SUCCESS)
2268 { MIG_RETURN_ERROR(OutKP, check_result); }
2269#endif /* defined(__MIG_check__Request__mach_vm_purgable_control_external_t__defined) */
2270
2271 OutUP->RetCode = mach_vm_purgable_control_external(target_tport: InKP->Head.msgh_request_port, address: In0UP->address, control: In0UP->control, state: &In0UP->state);
2272 if (OutUP->RetCode != KERN_SUCCESS) {
2273 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
2274 }
2275#if __MigKernelSpecificCode
2276#endif /* __MigKernelSpecificCode */
2277
2278 OutUP->NDR = NDR_record;
2279
2280
2281 OutUP->state = In0UP->state;
2282
2283 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
2284 __AfterRcvRpc(4818, "mach_vm_purgable_control_external")
2285}
2286
2287#if ( __MigTypeCheck )
2288#if __MIG_check__Request__mach_vm_subsystem__
2289#if !defined(__MIG_check__Request__mach_vm_page_info_t__defined)
2290#define __MIG_check__Request__mach_vm_page_info_t__defined
2291
2292mig_internal kern_return_t __MIG_check__Request__mach_vm_page_info_t(
2293 __attribute__((__unused__)) __RequestKData__mach_vm_page_info_t *InKP,
2294 __attribute__((__unused__)) __RequestUData__mach_vm_page_info_t *In0UP,
2295 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
2296{
2297
2298 typedef __Request__mach_vm_page_info_t __Request;
2299 typedef __RequestUData__mach_vm_page_info_t __RequestU __attribute__((unused));
2300#if __MigTypeCheck
2301 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2302 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2303 return MIG_BAD_ARGUMENTS;
2304#endif /* __MigTypeCheck */
2305
2306 return MACH_MSG_SUCCESS;
2307}
2308#endif /* !defined(__MIG_check__Request__mach_vm_page_info_t__defined) */
2309#endif /* __MIG_check__Request__mach_vm_subsystem__ */
2310#endif /* ( __MigTypeCheck ) */
2311
2312
2313/* Routine mach_vm_page_info */
2314mig_internal novalue _Xmach_vm_page_info
2315 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
2316 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
2317 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
2318{
2319
2320#ifdef __MigPackStructs
2321#pragma pack(push, 4)
2322#endif
2323 typedef struct {
2324 NDR_record_t NDR;
2325 mach_vm_address_t address;
2326 vm_page_info_flavor_t flavor;
2327 mach_msg_type_number_t infoCnt;
2328 mach_msg_trailer_t trailer;
2329 char padding[0]; /* Avoid generating empty UData structs */
2330 } RequestU __attribute__((unused));
2331#ifdef __MigPackStructs
2332#pragma pack(pop)
2333#endif
2334 typedef __RequestKData__mach_vm_page_info_t RequestK;
2335 typedef __RequestUData__mach_vm_page_info_t __RequestU;
2336 typedef __ReplyKData__mach_vm_page_info_t ReplyK __attribute__((unused));
2337 typedef __ReplyUData__mach_vm_page_info_t ReplyU __attribute__((unused));
2338 typedef __Reply__mach_vm_page_info_t Reply __attribute__((unused));
2339 typedef __Request__mach_vm_page_info_t __Request __attribute__((unused));
2340
2341 /*
2342 * typedef struct {
2343 * mach_msg_header_t Head;
2344 * NDR_record_t NDR;
2345 * kern_return_t RetCode;
2346 * } mig_reply_error_t;
2347 */
2348
2349 RequestK *InKP = (RequestK *) InHeadP;
2350 RequestU *In0UP = (RequestU *) InDataP;
2351 ReplyK *OutKP = (ReplyK *) OutHeadP;
2352 ReplyU *OutUP = (ReplyU *) OutDataP;
2353 (void)OutUP;
2354#ifdef __MIG_check__Request__mach_vm_page_info_t__defined
2355 kern_return_t check_result;
2356#endif /* __MIG_check__Request__mach_vm_page_info_t__defined */
2357
2358#if __MigKernelSpecificCode
2359#else
2360#endif /* __MigKernelSpecificCode */
2361 vm_map_read_t target_task;
2362
2363 __DeclareRcvRpc(4819, "mach_vm_page_info")
2364 __BeforeRcvRpc(4819, "mach_vm_page_info")
2365
2366#if defined(__MIG_check__Request__mach_vm_page_info_t__defined)
2367 check_result = __MIG_check__Request__mach_vm_page_info_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
2368 if (check_result != MACH_MSG_SUCCESS)
2369 { MIG_RETURN_ERROR(OutKP, check_result); }
2370#endif /* defined(__MIG_check__Request__mach_vm_page_info_t__defined) */
2371
2372 target_task = convert_port_to_map_read(port: InKP->Head.msgh_request_port);
2373
2374 OutUP->infoCnt = 32;
2375 if (In0UP->infoCnt < OutUP->infoCnt)
2376 OutUP->infoCnt = In0UP->infoCnt;
2377
2378 OutUP->RetCode = mach_vm_page_info(target_task, address: In0UP->address, flavor: In0UP->flavor, info: OutUP->info, infoCnt: &OutUP->infoCnt);
2379 vm_map_read_deallocate(map: target_task);
2380 if (OutUP->RetCode != KERN_SUCCESS) {
2381 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
2382 }
2383#if __MigKernelSpecificCode
2384#endif /* __MigKernelSpecificCode */
2385
2386 OutUP->NDR = NDR_record;
2387
2388 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply) - 128) + (((4 * OutUP->infoCnt)));
2389
2390 __AfterRcvRpc(4819, "mach_vm_page_info")
2391}
2392
2393#if ( __MigTypeCheck )
2394#if __MIG_check__Request__mach_vm_subsystem__
2395#if !defined(__MIG_check__Request__mach_vm_page_range_query_t__defined)
2396#define __MIG_check__Request__mach_vm_page_range_query_t__defined
2397
2398mig_internal kern_return_t __MIG_check__Request__mach_vm_page_range_query_t(
2399 __attribute__((__unused__)) __RequestKData__mach_vm_page_range_query_t *InKP,
2400 __attribute__((__unused__)) __RequestUData__mach_vm_page_range_query_t *In0UP,
2401 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
2402{
2403
2404 typedef __Request__mach_vm_page_range_query_t __Request;
2405 typedef __RequestUData__mach_vm_page_range_query_t __RequestU __attribute__((unused));
2406#if __MigTypeCheck
2407 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2408 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2409 return MIG_BAD_ARGUMENTS;
2410#endif /* __MigTypeCheck */
2411
2412 return MACH_MSG_SUCCESS;
2413}
2414#endif /* !defined(__MIG_check__Request__mach_vm_page_range_query_t__defined) */
2415#endif /* __MIG_check__Request__mach_vm_subsystem__ */
2416#endif /* ( __MigTypeCheck ) */
2417
2418
2419/* Routine mach_vm_page_range_query */
2420mig_internal novalue _Xmach_vm_page_range_query
2421 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
2422 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
2423 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
2424{
2425
2426#ifdef __MigPackStructs
2427#pragma pack(push, 4)
2428#endif
2429 typedef struct {
2430 NDR_record_t NDR;
2431 mach_vm_offset_t address;
2432 mach_vm_size_t size;
2433 mach_vm_address_t dispositions;
2434 mach_vm_size_t dispositions_count;
2435 mach_msg_trailer_t trailer;
2436 char padding[0]; /* Avoid generating empty UData structs */
2437 } RequestU __attribute__((unused));
2438#ifdef __MigPackStructs
2439#pragma pack(pop)
2440#endif
2441 typedef __RequestKData__mach_vm_page_range_query_t RequestK;
2442 typedef __RequestUData__mach_vm_page_range_query_t __RequestU;
2443 typedef __ReplyKData__mach_vm_page_range_query_t ReplyK __attribute__((unused));
2444 typedef __ReplyUData__mach_vm_page_range_query_t ReplyU __attribute__((unused));
2445 typedef __Reply__mach_vm_page_range_query_t Reply __attribute__((unused));
2446 typedef __Request__mach_vm_page_range_query_t __Request __attribute__((unused));
2447
2448 /*
2449 * typedef struct {
2450 * mach_msg_header_t Head;
2451 * NDR_record_t NDR;
2452 * kern_return_t RetCode;
2453 * } mig_reply_error_t;
2454 */
2455
2456 RequestK *InKP = (RequestK *) InHeadP;
2457 RequestU *In0UP = (RequestU *) InDataP;
2458 ReplyK *OutKP = (ReplyK *) OutHeadP;
2459 ReplyU *OutUP = (ReplyU *) OutDataP;
2460 (void)OutUP;
2461#ifdef __MIG_check__Request__mach_vm_page_range_query_t__defined
2462 kern_return_t check_result;
2463#endif /* __MIG_check__Request__mach_vm_page_range_query_t__defined */
2464
2465#if __MigKernelSpecificCode
2466#else
2467#endif /* __MigKernelSpecificCode */
2468 vm_map_read_t target_map;
2469
2470 __DeclareRcvRpc(4820, "mach_vm_page_range_query")
2471 __BeforeRcvRpc(4820, "mach_vm_page_range_query")
2472
2473#if defined(__MIG_check__Request__mach_vm_page_range_query_t__defined)
2474 check_result = __MIG_check__Request__mach_vm_page_range_query_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
2475 if (check_result != MACH_MSG_SUCCESS)
2476 { MIG_RETURN_ERROR(OutKP, check_result); }
2477#endif /* defined(__MIG_check__Request__mach_vm_page_range_query_t__defined) */
2478
2479 target_map = convert_port_to_map_read(port: InKP->Head.msgh_request_port);
2480
2481 OutUP->RetCode = mach_vm_page_range_query(target_map, address: In0UP->address, size: In0UP->size, dispositions: In0UP->dispositions, dispositions_count: &In0UP->dispositions_count);
2482 vm_map_read_deallocate(map: target_map);
2483 if (OutUP->RetCode != KERN_SUCCESS) {
2484 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
2485 }
2486#if __MigKernelSpecificCode
2487#endif /* __MigKernelSpecificCode */
2488
2489 OutUP->NDR = NDR_record;
2490
2491
2492 OutUP->dispositions_count = In0UP->dispositions_count;
2493
2494 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
2495 __AfterRcvRpc(4820, "mach_vm_page_range_query")
2496}
2497
2498#if ( __MigTypeCheck )
2499#if __MIG_check__Request__mach_vm_subsystem__
2500#if !defined(__MIG_check__Request__mach_vm_remap_new_external_t__defined)
2501#define __MIG_check__Request__mach_vm_remap_new_external_t__defined
2502
2503mig_internal kern_return_t __MIG_check__Request__mach_vm_remap_new_external_t(
2504 __attribute__((__unused__)) __RequestKData__mach_vm_remap_new_external_t *InKP,
2505 __attribute__((__unused__)) __RequestUData__mach_vm_remap_new_external_t *In0UP,
2506 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
2507{
2508
2509 typedef __Request__mach_vm_remap_new_external_t __Request;
2510 typedef __RequestUData__mach_vm_remap_new_external_t __RequestU __attribute__((unused));
2511#if __MigTypeCheck
2512 if (!(InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2513 (InKP->msgh_body.msgh_descriptor_count != 1) ||
2514 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2515 return MIG_BAD_ARGUMENTS;
2516#endif /* __MigTypeCheck */
2517
2518#if __MigTypeCheck
2519 if (InKP->src_tport.type != MACH_MSG_PORT_DESCRIPTOR ||
2520 InKP->src_tport.disposition != 17)
2521 return MIG_TYPE_ERROR;
2522#endif /* __MigTypeCheck */
2523
2524 return MACH_MSG_SUCCESS;
2525}
2526#endif /* !defined(__MIG_check__Request__mach_vm_remap_new_external_t__defined) */
2527#endif /* __MIG_check__Request__mach_vm_subsystem__ */
2528#endif /* ( __MigTypeCheck ) */
2529
2530
2531/* Routine mach_vm_remap_new_external */
2532mig_internal novalue _Xmach_vm_remap_new_external
2533 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
2534 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
2535 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
2536{
2537
2538#ifdef __MigPackStructs
2539#pragma pack(push, 4)
2540#endif
2541 typedef struct {
2542 NDR_record_t NDR;
2543 mach_vm_address_t target_address;
2544 mach_vm_size_t size;
2545 mach_vm_offset_t mask;
2546 int flags;
2547 mach_vm_address_t src_address;
2548 boolean_t copy;
2549 vm_prot_t cur_protection;
2550 vm_prot_t max_protection;
2551 vm_inherit_t inheritance;
2552 mach_msg_trailer_t trailer;
2553 char padding[0]; /* Avoid generating empty UData structs */
2554 } RequestU __attribute__((unused));
2555#ifdef __MigPackStructs
2556#pragma pack(pop)
2557#endif
2558 typedef __RequestKData__mach_vm_remap_new_external_t RequestK;
2559 typedef __RequestUData__mach_vm_remap_new_external_t __RequestU;
2560 typedef __ReplyKData__mach_vm_remap_new_external_t ReplyK __attribute__((unused));
2561 typedef __ReplyUData__mach_vm_remap_new_external_t ReplyU __attribute__((unused));
2562 typedef __Reply__mach_vm_remap_new_external_t Reply __attribute__((unused));
2563 typedef __Request__mach_vm_remap_new_external_t __Request __attribute__((unused));
2564
2565 /*
2566 * typedef struct {
2567 * mach_msg_header_t Head;
2568 * NDR_record_t NDR;
2569 * kern_return_t RetCode;
2570 * } mig_reply_error_t;
2571 */
2572
2573 RequestK *InKP = (RequestK *) InHeadP;
2574 RequestU *In0UP = (RequestU *) InDataP;
2575 ReplyK *OutKP = (ReplyK *) OutHeadP;
2576 ReplyU *OutUP = (ReplyU *) OutDataP;
2577 (void)OutUP;
2578#ifdef __MIG_check__Request__mach_vm_remap_new_external_t__defined
2579 kern_return_t check_result;
2580#endif /* __MIG_check__Request__mach_vm_remap_new_external_t__defined */
2581
2582#if __MigKernelSpecificCode
2583#else
2584#endif /* __MigKernelSpecificCode */
2585 vm_map_t target_task;
2586
2587 __DeclareRcvRpc(4821, "mach_vm_remap_new_external")
2588 __BeforeRcvRpc(4821, "mach_vm_remap_new_external")
2589
2590#if defined(__MIG_check__Request__mach_vm_remap_new_external_t__defined)
2591 check_result = __MIG_check__Request__mach_vm_remap_new_external_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
2592 if (check_result != MACH_MSG_SUCCESS)
2593 { MIG_RETURN_ERROR(OutKP, check_result); }
2594#endif /* defined(__MIG_check__Request__mach_vm_remap_new_external_t__defined) */
2595
2596 target_task = convert_port_to_map(port: InKP->Head.msgh_request_port);
2597
2598 OutUP->RetCode = mach_vm_remap_new_external(target_task, target_address: &In0UP->target_address, size: In0UP->size, mask: In0UP->mask, flags: In0UP->flags, src_tport: InKP->src_tport.name, src_address: In0UP->src_address, copy: In0UP->copy, cur_protection: &In0UP->cur_protection, max_protection: &In0UP->max_protection, inheritance: In0UP->inheritance);
2599 vm_map_deallocate(map: target_task);
2600 if (OutUP->RetCode != KERN_SUCCESS) {
2601 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
2602 }
2603#if __MigKernelSpecificCode
2604#endif /* __MigKernelSpecificCode */
2605
2606 OutUP->NDR = NDR_record;
2607
2608
2609 OutUP->target_address = In0UP->target_address;
2610
2611 OutUP->cur_protection = In0UP->cur_protection;
2612
2613 OutUP->max_protection = In0UP->max_protection;
2614
2615 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
2616 __AfterRcvRpc(4821, "mach_vm_remap_new_external")
2617}
2618
2619#if ( __MigTypeCheck )
2620#if __MIG_check__Request__mach_vm_subsystem__
2621#if !defined(__MIG_check__Request__mach_vm_deferred_reclamation_buffer_init_t__defined)
2622#define __MIG_check__Request__mach_vm_deferred_reclamation_buffer_init_t__defined
2623
2624mig_internal kern_return_t __MIG_check__Request__mach_vm_deferred_reclamation_buffer_init_t(
2625 __attribute__((__unused__)) __RequestKData__mach_vm_deferred_reclamation_buffer_init_t *InKP,
2626 __attribute__((__unused__)) __RequestUData__mach_vm_deferred_reclamation_buffer_init_t *In0UP,
2627 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
2628{
2629
2630 typedef __Request__mach_vm_deferred_reclamation_buffer_init_t __Request;
2631 typedef __RequestUData__mach_vm_deferred_reclamation_buffer_init_t __RequestU __attribute__((unused));
2632#if __MigTypeCheck
2633 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2634 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2635 return MIG_BAD_ARGUMENTS;
2636#endif /* __MigTypeCheck */
2637
2638 return MACH_MSG_SUCCESS;
2639}
2640#endif /* !defined(__MIG_check__Request__mach_vm_deferred_reclamation_buffer_init_t__defined) */
2641#endif /* __MIG_check__Request__mach_vm_subsystem__ */
2642#endif /* ( __MigTypeCheck ) */
2643
2644
2645/* Routine mach_vm_deferred_reclamation_buffer_init */
2646mig_internal novalue _Xmach_vm_deferred_reclamation_buffer_init
2647 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
2648 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
2649 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
2650{
2651
2652#ifdef __MigPackStructs
2653#pragma pack(push, 4)
2654#endif
2655 typedef struct {
2656 NDR_record_t NDR;
2657 mach_vm_offset_t address;
2658 mach_vm_size_t size;
2659 mach_msg_trailer_t trailer;
2660 char padding[0]; /* Avoid generating empty UData structs */
2661 } RequestU __attribute__((unused));
2662#ifdef __MigPackStructs
2663#pragma pack(pop)
2664#endif
2665 typedef __RequestKData__mach_vm_deferred_reclamation_buffer_init_t RequestK;
2666 typedef __RequestUData__mach_vm_deferred_reclamation_buffer_init_t __RequestU;
2667 typedef __ReplyKData__mach_vm_deferred_reclamation_buffer_init_t ReplyK __attribute__((unused));
2668 typedef __ReplyUData__mach_vm_deferred_reclamation_buffer_init_t ReplyU __attribute__((unused));
2669 typedef __Reply__mach_vm_deferred_reclamation_buffer_init_t Reply __attribute__((unused));
2670 typedef __Request__mach_vm_deferred_reclamation_buffer_init_t __Request __attribute__((unused));
2671
2672 /*
2673 * typedef struct {
2674 * mach_msg_header_t Head;
2675 * NDR_record_t NDR;
2676 * kern_return_t RetCode;
2677 * } mig_reply_error_t;
2678 */
2679
2680 RequestK *InKP = (RequestK *) InHeadP;
2681 RequestU *In0UP = (RequestU *) InDataP;
2682 ReplyK *OutKP = (ReplyK *) OutHeadP;
2683 ReplyU *OutUP = (ReplyU *) OutDataP;
2684 (void)OutUP;
2685#ifdef __MIG_check__Request__mach_vm_deferred_reclamation_buffer_init_t__defined
2686 kern_return_t check_result;
2687#endif /* __MIG_check__Request__mach_vm_deferred_reclamation_buffer_init_t__defined */
2688
2689#if __MigKernelSpecificCode
2690#else
2691#endif /* __MigKernelSpecificCode */
2692 task_t target_task;
2693
2694 __DeclareRcvRpc(4822, "mach_vm_deferred_reclamation_buffer_init")
2695 __BeforeRcvRpc(4822, "mach_vm_deferred_reclamation_buffer_init")
2696
2697#if defined(__MIG_check__Request__mach_vm_deferred_reclamation_buffer_init_t__defined)
2698 check_result = __MIG_check__Request__mach_vm_deferred_reclamation_buffer_init_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
2699 if (check_result != MACH_MSG_SUCCESS)
2700 { MIG_RETURN_ERROR(OutKP, check_result); }
2701#endif /* defined(__MIG_check__Request__mach_vm_deferred_reclamation_buffer_init_t__defined) */
2702
2703 target_task = convert_port_to_task_mig(port: InKP->Head.msgh_request_port);
2704
2705 OutUP->RetCode = mach_vm_deferred_reclamation_buffer_init(target_task, address: In0UP->address, size: In0UP->size);
2706 task_deallocate_mig(target_task);
2707#if __MigKernelSpecificCode
2708#endif /* __MigKernelSpecificCode */
2709
2710 OutUP->NDR = NDR_record;
2711
2712
2713 __AfterRcvRpc(4822, "mach_vm_deferred_reclamation_buffer_init")
2714}
2715
2716#if ( __MigTypeCheck )
2717#if __MIG_check__Request__mach_vm_subsystem__
2718#if !defined(__MIG_check__Request__mach_vm_deferred_reclamation_buffer_synchronize_t__defined)
2719#define __MIG_check__Request__mach_vm_deferred_reclamation_buffer_synchronize_t__defined
2720
2721mig_internal kern_return_t __MIG_check__Request__mach_vm_deferred_reclamation_buffer_synchronize_t(
2722 __attribute__((__unused__)) __RequestKData__mach_vm_deferred_reclamation_buffer_synchronize_t *InKP,
2723 __attribute__((__unused__)) __RequestUData__mach_vm_deferred_reclamation_buffer_synchronize_t *In0UP,
2724 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
2725{
2726
2727 typedef __Request__mach_vm_deferred_reclamation_buffer_synchronize_t __Request;
2728 typedef __RequestUData__mach_vm_deferred_reclamation_buffer_synchronize_t __RequestU __attribute__((unused));
2729#if __MigTypeCheck
2730 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2731 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2732 return MIG_BAD_ARGUMENTS;
2733#endif /* __MigTypeCheck */
2734
2735 return MACH_MSG_SUCCESS;
2736}
2737#endif /* !defined(__MIG_check__Request__mach_vm_deferred_reclamation_buffer_synchronize_t__defined) */
2738#endif /* __MIG_check__Request__mach_vm_subsystem__ */
2739#endif /* ( __MigTypeCheck ) */
2740
2741
2742/* Routine mach_vm_deferred_reclamation_buffer_synchronize */
2743mig_internal novalue _Xmach_vm_deferred_reclamation_buffer_synchronize
2744 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
2745 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
2746 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
2747{
2748
2749#ifdef __MigPackStructs
2750#pragma pack(push, 4)
2751#endif
2752 typedef struct {
2753 NDR_record_t NDR;
2754 mach_vm_size_t num_entries_to_reclaim;
2755 mach_msg_trailer_t trailer;
2756 char padding[0]; /* Avoid generating empty UData structs */
2757 } RequestU __attribute__((unused));
2758#ifdef __MigPackStructs
2759#pragma pack(pop)
2760#endif
2761 typedef __RequestKData__mach_vm_deferred_reclamation_buffer_synchronize_t RequestK;
2762 typedef __RequestUData__mach_vm_deferred_reclamation_buffer_synchronize_t __RequestU;
2763 typedef __ReplyKData__mach_vm_deferred_reclamation_buffer_synchronize_t ReplyK __attribute__((unused));
2764 typedef __ReplyUData__mach_vm_deferred_reclamation_buffer_synchronize_t ReplyU __attribute__((unused));
2765 typedef __Reply__mach_vm_deferred_reclamation_buffer_synchronize_t Reply __attribute__((unused));
2766 typedef __Request__mach_vm_deferred_reclamation_buffer_synchronize_t __Request __attribute__((unused));
2767
2768 /*
2769 * typedef struct {
2770 * mach_msg_header_t Head;
2771 * NDR_record_t NDR;
2772 * kern_return_t RetCode;
2773 * } mig_reply_error_t;
2774 */
2775
2776 RequestK *InKP = (RequestK *) InHeadP;
2777 RequestU *In0UP = (RequestU *) InDataP;
2778 ReplyK *OutKP = (ReplyK *) OutHeadP;
2779 ReplyU *OutUP = (ReplyU *) OutDataP;
2780 (void)OutUP;
2781#ifdef __MIG_check__Request__mach_vm_deferred_reclamation_buffer_synchronize_t__defined
2782 kern_return_t check_result;
2783#endif /* __MIG_check__Request__mach_vm_deferred_reclamation_buffer_synchronize_t__defined */
2784
2785#if __MigKernelSpecificCode
2786#else
2787#endif /* __MigKernelSpecificCode */
2788 task_t target_task;
2789
2790 __DeclareRcvRpc(4823, "mach_vm_deferred_reclamation_buffer_synchronize")
2791 __BeforeRcvRpc(4823, "mach_vm_deferred_reclamation_buffer_synchronize")
2792
2793#if defined(__MIG_check__Request__mach_vm_deferred_reclamation_buffer_synchronize_t__defined)
2794 check_result = __MIG_check__Request__mach_vm_deferred_reclamation_buffer_synchronize_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
2795 if (check_result != MACH_MSG_SUCCESS)
2796 { MIG_RETURN_ERROR(OutKP, check_result); }
2797#endif /* defined(__MIG_check__Request__mach_vm_deferred_reclamation_buffer_synchronize_t__defined) */
2798
2799 target_task = convert_port_to_task_mig(port: InKP->Head.msgh_request_port);
2800
2801 OutUP->RetCode = mach_vm_deferred_reclamation_buffer_synchronize(target_task, num_entries_to_reclaim: In0UP->num_entries_to_reclaim);
2802 task_deallocate_mig(target_task);
2803#if __MigKernelSpecificCode
2804#endif /* __MigKernelSpecificCode */
2805
2806 OutUP->NDR = NDR_record;
2807
2808
2809 __AfterRcvRpc(4823, "mach_vm_deferred_reclamation_buffer_synchronize")
2810}
2811
2812#if ( __MigTypeCheck )
2813#if __MIG_check__Request__mach_vm_subsystem__
2814#if !defined(__MIG_check__Request__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t__defined)
2815#define __MIG_check__Request__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t__defined
2816
2817mig_internal kern_return_t __MIG_check__Request__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t(
2818 __attribute__((__unused__)) __RequestKData__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t *InKP,
2819 __attribute__((__unused__)) __RequestUData__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t *In0UP,
2820 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
2821{
2822
2823 typedef __Request__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t __Request;
2824 typedef __RequestUData__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t __RequestU __attribute__((unused));
2825#if __MigTypeCheck
2826 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2827 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2828 return MIG_BAD_ARGUMENTS;
2829#endif /* __MigTypeCheck */
2830
2831 return MACH_MSG_SUCCESS;
2832}
2833#endif /* !defined(__MIG_check__Request__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t__defined) */
2834#endif /* __MIG_check__Request__mach_vm_subsystem__ */
2835#endif /* ( __MigTypeCheck ) */
2836
2837
2838/* Routine mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes */
2839mig_internal novalue _Xmach_vm_deferred_reclamation_buffer_update_reclaimable_bytes
2840 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
2841 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
2842 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
2843{
2844
2845#ifdef __MigPackStructs
2846#pragma pack(push, 4)
2847#endif
2848 typedef struct {
2849 NDR_record_t NDR;
2850 mach_vm_size_t reclaimable_bytes;
2851 mach_msg_trailer_t trailer;
2852 char padding[0]; /* Avoid generating empty UData structs */
2853 } RequestU __attribute__((unused));
2854#ifdef __MigPackStructs
2855#pragma pack(pop)
2856#endif
2857 typedef __RequestKData__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t RequestK;
2858 typedef __RequestUData__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t __RequestU;
2859 typedef __ReplyKData__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t ReplyK __attribute__((unused));
2860 typedef __ReplyUData__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t ReplyU __attribute__((unused));
2861 typedef __Reply__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t Reply __attribute__((unused));
2862 typedef __Request__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t __Request __attribute__((unused));
2863
2864 /*
2865 * typedef struct {
2866 * mach_msg_header_t Head;
2867 * NDR_record_t NDR;
2868 * kern_return_t RetCode;
2869 * } mig_reply_error_t;
2870 */
2871
2872 RequestK *InKP = (RequestK *) InHeadP;
2873 RequestU *In0UP = (RequestU *) InDataP;
2874 ReplyK *OutKP = (ReplyK *) OutHeadP;
2875 ReplyU *OutUP = (ReplyU *) OutDataP;
2876 (void)OutUP;
2877#ifdef __MIG_check__Request__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t__defined
2878 kern_return_t check_result;
2879#endif /* __MIG_check__Request__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t__defined */
2880
2881#if __MigKernelSpecificCode
2882#else
2883#endif /* __MigKernelSpecificCode */
2884 task_t target_task;
2885
2886 __DeclareRcvRpc(4824, "mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes")
2887 __BeforeRcvRpc(4824, "mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes")
2888
2889#if defined(__MIG_check__Request__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t__defined)
2890 check_result = __MIG_check__Request__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
2891 if (check_result != MACH_MSG_SUCCESS)
2892 { MIG_RETURN_ERROR(OutKP, check_result); }
2893#endif /* defined(__MIG_check__Request__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t__defined) */
2894
2895 target_task = convert_port_to_task_mig(port: InKP->Head.msgh_request_port);
2896
2897 OutUP->RetCode = mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes(target_task, reclaimable_bytes: In0UP->reclaimable_bytes);
2898 task_deallocate_mig(target_task);
2899#if __MigKernelSpecificCode
2900#endif /* __MigKernelSpecificCode */
2901
2902 OutUP->NDR = NDR_record;
2903
2904
2905 __AfterRcvRpc(4824, "mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes")
2906}
2907
2908#if ( __MigTypeCheck )
2909#if __MIG_check__Request__mach_vm_subsystem__
2910#if !defined(__MIG_check__Request__mach_vm_range_create_t__defined)
2911#define __MIG_check__Request__mach_vm_range_create_t__defined
2912
2913mig_internal kern_return_t __MIG_check__Request__mach_vm_range_create_t(
2914 __attribute__((__unused__)) __RequestKData__mach_vm_range_create_t *InKP,
2915 __attribute__((__unused__)) __RequestUData__mach_vm_range_create_t *In0UP,
2916 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
2917{
2918
2919 typedef __Request__mach_vm_range_create_t __Request;
2920 typedef __RequestUData__mach_vm_range_create_t __RequestU __attribute__((unused));
2921#if __MigTypeCheck
2922 unsigned int msgh_size;
2923#endif /* __MigTypeCheck */
2924
2925#if __MigTypeCheck
2926 msgh_size = InKP->Head.msgh_size;
2927 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2928 (msgh_size < (mach_msg_size_t)(sizeof(__Request) - 1024)) || (msgh_size > (mach_msg_size_t)sizeof(__Request)))
2929 return MIG_BAD_ARGUMENTS;
2930#endif /* __MigTypeCheck */
2931
2932#if defined(__NDR_convert__int_rep__Request__mach_vm_range_create_t__recipesCnt__defined)
2933 if (In0UP->NDR.int_rep != NDR_record.int_rep)
2934 __NDR_convert__int_rep__Request__mach_vm_range_create_t__recipesCnt(&In0UP->recipesCnt, In0UP->NDR.int_rep);
2935#endif /* __NDR_convert__int_rep__Request__mach_vm_range_create_t__recipesCnt__defined */
2936#if __MigTypeCheck
2937 if (In0UP->recipesCnt > 1024)
2938 return MIG_BAD_ARGUMENTS;
2939 if (((msgh_size - (mach_msg_size_t)(sizeof(__Request) - 1024)) < In0UP->recipesCnt) ||
2940 (msgh_size != (mach_msg_size_t)(sizeof(__Request) - 1024) + _WALIGN_(In0UP->recipesCnt)))
2941 return MIG_BAD_ARGUMENTS;
2942#endif /* __MigTypeCheck */
2943
2944 return MACH_MSG_SUCCESS;
2945}
2946#endif /* !defined(__MIG_check__Request__mach_vm_range_create_t__defined) */
2947#endif /* __MIG_check__Request__mach_vm_subsystem__ */
2948#endif /* ( __MigTypeCheck ) */
2949
2950
2951/* Routine mach_vm_range_create */
2952mig_internal novalue _Xmach_vm_range_create
2953 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
2954 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
2955 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
2956{
2957
2958#ifdef __MigPackStructs
2959#pragma pack(push, 4)
2960#endif
2961 typedef struct {
2962 NDR_record_t NDR;
2963 mach_vm_range_flavor_t flavor;
2964 mach_msg_type_number_t recipesCnt;
2965 uint8_t recipes[1024];
2966 mach_msg_trailer_t trailer;
2967 char padding[0]; /* Avoid generating empty UData structs */
2968 } RequestU __attribute__((unused));
2969#ifdef __MigPackStructs
2970#pragma pack(pop)
2971#endif
2972 typedef __RequestKData__mach_vm_range_create_t RequestK;
2973 typedef __RequestUData__mach_vm_range_create_t __RequestU;
2974 typedef __ReplyKData__mach_vm_range_create_t ReplyK __attribute__((unused));
2975 typedef __ReplyUData__mach_vm_range_create_t ReplyU __attribute__((unused));
2976 typedef __Reply__mach_vm_range_create_t Reply __attribute__((unused));
2977 typedef __Request__mach_vm_range_create_t __Request __attribute__((unused));
2978
2979 /*
2980 * typedef struct {
2981 * mach_msg_header_t Head;
2982 * NDR_record_t NDR;
2983 * kern_return_t RetCode;
2984 * } mig_reply_error_t;
2985 */
2986
2987 RequestK *InKP = (RequestK *) InHeadP;
2988 RequestU *In0UP = (RequestU *) InDataP;
2989 ReplyK *OutKP = (ReplyK *) OutHeadP;
2990 ReplyU *OutUP = (ReplyU *) OutDataP;
2991 (void)OutUP;
2992#ifdef __MIG_check__Request__mach_vm_range_create_t__defined
2993 kern_return_t check_result;
2994#endif /* __MIG_check__Request__mach_vm_range_create_t__defined */
2995
2996#if __MigKernelSpecificCode
2997#else
2998#endif /* __MigKernelSpecificCode */
2999 vm_map_t target_task;
3000
3001 __DeclareRcvRpc(4825, "mach_vm_range_create")
3002 __BeforeRcvRpc(4825, "mach_vm_range_create")
3003
3004#if defined(__MIG_check__Request__mach_vm_range_create_t__defined)
3005 check_result = __MIG_check__Request__mach_vm_range_create_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
3006 if (check_result != MACH_MSG_SUCCESS)
3007 { MIG_RETURN_ERROR(OutKP, check_result); }
3008#endif /* defined(__MIG_check__Request__mach_vm_range_create_t__defined) */
3009
3010 target_task = convert_port_to_map(port: InKP->Head.msgh_request_port);
3011
3012 OutUP->RetCode = mach_vm_range_create(target_task, flavor: In0UP->flavor, recipes: In0UP->recipes, recipesCnt: In0UP->recipesCnt);
3013 vm_map_deallocate(map: target_task);
3014#if __MigKernelSpecificCode
3015#endif /* __MigKernelSpecificCode */
3016
3017 OutUP->NDR = NDR_record;
3018
3019
3020 __AfterRcvRpc(4825, "mach_vm_range_create")
3021}
3022
3023
3024
3025/* Description of this kernel subsystem, for use in direct RPC */
3026const struct mach_vm_subsystem mach_vm_subsystem = {
3027 mach_vm_server_routine,
3028 4800,
3029 4826,
3030 (mach_msg_size_t)sizeof(union __ReplyUnion__mach_vm_subsystem),
3031 (vm_address_t)0,
3032 {
3033 { (mig_impl_routine_t) 0,
3034 (mig_stub_kern_routine_t) _Xmach_vm_allocate_external, 5, 0, 0, (mach_msg_size_t)sizeof(__Reply__mach_vm_allocate_external_t)},
3035 { .impl_routine: (mig_impl_routine_t) 0,
3036 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_deallocate, .argc: 5, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_deallocate_t)},
3037 { .impl_routine: (mig_impl_routine_t) 0,
3038 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_protect, .argc: 7, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_protect_t)},
3039 { .impl_routine: (mig_impl_routine_t) 0,
3040 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_inherit, .argc: 6, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_inherit_t)},
3041 { .impl_routine: (mig_impl_routine_t) 0,
3042 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_read, .argc: 7, .descr_count: 0, .reply_descr_count: 1, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_read_t)},
3043 { .impl_routine: (mig_impl_routine_t) 0,
3044 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_read_list, .argc: 3, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_read_list_t)},
3045 { .impl_routine: (mig_impl_routine_t) 0,
3046 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_write, .argc: 5, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_write_t)},
3047 { .impl_routine: (mig_impl_routine_t) 0,
3048 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_copy, .argc: 7, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_copy_t)},
3049 { .impl_routine: (mig_impl_routine_t) 0,
3050 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_read_overwrite, .argc: 8, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_read_overwrite_t)},
3051 { .impl_routine: (mig_impl_routine_t) 0,
3052 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_msync, .argc: 6, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_msync_t)},
3053 { .impl_routine: (mig_impl_routine_t) 0,
3054 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_behavior_set, .argc: 6, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_behavior_set_t)},
3055 { .impl_routine: (mig_impl_routine_t) 0,
3056 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_map_external, .argc: 14, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_map_external_t)},
3057 { .impl_routine: (mig_impl_routine_t) 0,
3058 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_machine_attribute, .argc: 7, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_machine_attribute_t)},
3059 { .impl_routine: (mig_impl_routine_t) 0,
3060 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_remap_external, .argc: 14, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_remap_external_t)},
3061 { .impl_routine: (mig_impl_routine_t) 0,
3062 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_page_query, .argc: 5, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_page_query_t)},
3063 { .impl_routine: (mig_impl_routine_t) 0,
3064 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_region_recurse, .argc: 6, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_region_recurse_t)},
3065 { .impl_routine: (mig_impl_routine_t) 0,
3066 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_region, .argc: 7, .descr_count: 0, .reply_descr_count: 1, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_region_t)},
3067 { .impl_routine: (mig_impl_routine_t) 0,
3068 .kstub_routine: (mig_stub_kern_routine_t) _X_mach_make_memory_entry, .argc: 7, .descr_count: 0, .reply_descr_count: 1, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply___mach_make_memory_entry_t)},
3069 { .impl_routine: (mig_impl_routine_t) 0,
3070 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_purgable_control_external, .argc: 5, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_purgable_control_external_t)},
3071 { .impl_routine: (mig_impl_routine_t) 0,
3072 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_page_info, .argc: 6, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_page_info_t)},
3073 { .impl_routine: (mig_impl_routine_t) 0,
3074 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_page_range_query, .argc: 8, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_page_range_query_t)},
3075 { .impl_routine: (mig_impl_routine_t) 0,
3076 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_remap_new_external, .argc: 14, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_remap_new_external_t)},
3077 { .impl_routine: (mig_impl_routine_t) 0,
3078 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_deferred_reclamation_buffer_init, .argc: 5, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_deferred_reclamation_buffer_init_t)},
3079 { .impl_routine: (mig_impl_routine_t) 0,
3080 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_deferred_reclamation_buffer_synchronize, .argc: 3, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_deferred_reclamation_buffer_synchronize_t)},
3081 { .impl_routine: (mig_impl_routine_t) 0,
3082 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_deferred_reclamation_buffer_update_reclaimable_bytes, .argc: 3, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes_t)},
3083 { .impl_routine: (mig_impl_routine_t) 0,
3084 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_vm_range_create, .argc: 4, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_vm_range_create_t)},
3085 }
3086};
3087
3088mig_external boolean_t mach_vm_server
3089 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP)
3090{
3091 /*
3092 * typedef struct {
3093 * mach_msg_header_t Head;
3094 * NDR_record_t NDR;
3095 * kern_return_t RetCode;
3096 * } mig_reply_error_t;
3097 */
3098
3099 mig_kern_routine_t routine;
3100
3101 OutHeadP->msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REPLY(InHeadP->msgh_bits), 0);
3102 OutHeadP->msgh_remote_port = InHeadP->msgh_reply_port;
3103 /* Minimal size: routine() will update it if different */
3104 OutHeadP->msgh_size = (mach_msg_size_t)sizeof(mig_reply_error_t);
3105 OutHeadP->msgh_local_port = MACH_PORT_NULL;
3106 OutHeadP->msgh_id = InHeadP->msgh_id + 100;
3107 OutHeadP->msgh_reserved = 0;
3108
3109 if ((InHeadP->msgh_id > 4825) || (InHeadP->msgh_id < 4800) ||
3110 ((routine = mach_vm_subsystem.kroutine[InHeadP->msgh_id - 4800].kstub_routine) == 0)) {
3111 ((mig_reply_error_t *)OutHeadP)->NDR = NDR_record;
3112 ((mig_reply_error_t *)OutHeadP)->RetCode = MIG_BAD_ID;
3113 return FALSE;
3114 }
3115 (*routine) (InHeadP, InDataP, InTrailerP, OutHeadP, OutDataP);
3116 return TRUE;
3117}
3118
3119mig_external mig_kern_routine_t mach_vm_server_routine
3120 (mach_msg_header_t *InHeadP)
3121{
3122 int msgh_id;
3123
3124 msgh_id = InHeadP->msgh_id - 4800;
3125
3126 if ((msgh_id > 25) || (msgh_id < 0))
3127 return 0;
3128
3129 return mach_vm_subsystem.kroutine[msgh_id].kstub_routine;
3130}
3131