1/*
2 * IDENTIFICATION:
3 * stub generated by bootstrap_cmds-133
4 * OPTIONS:
5 * KernelServer
6 */
7
8/* Module memory_entry */
9
10#define __MIG_check__Request__memory_entry_subsystem__ 1
11
12#include "memory_entry_server.h"
13
14#ifndef mig_internal
15#define mig_internal static __inline__
16#endif /* mig_internal */
17
18#ifndef mig_external
19#define mig_external
20#endif /* mig_external */
21
22#if !defined(__MigTypeCheck) && defined(TypeCheck)
23#define __MigTypeCheck TypeCheck /* Legacy setting */
24#endif /* !defined(__MigTypeCheck) */
25
26#if !defined(__MigKernelSpecificCode) && defined(_MIG_KERNEL_SPECIFIC_CODE_)
27#define __MigKernelSpecificCode _MIG_KERNEL_SPECIFIC_CODE_ /* Legacy setting */
28#endif /* !defined(__MigKernelSpecificCode) */
29
30#ifndef LimitCheck
31#define LimitCheck 0
32#endif /* LimitCheck */
33
34#ifndef min
35#define min(a,b) ( ((a) < (b))? (a): (b) )
36#endif /* min */
37
38#if !defined(_WALIGN_)
39#define _WALIGN_(x) (((x) + 3) & ~3)
40#endif /* !defined(_WALIGN_) */
41
42#if !defined(_WALIGNSZ_)
43#define _WALIGNSZ_(x) _WALIGN_(sizeof(x))
44#endif /* !defined(_WALIGNSZ_) */
45
46#ifndef UseStaticTemplates
47#define UseStaticTemplates 0
48#endif /* UseStaticTemplates */
49
50#ifndef MIG_SERVER_ROUTINE
51#define MIG_SERVER_ROUTINE
52#endif
53
54#ifndef __DeclareRcvRpc
55#define __DeclareRcvRpc(_NUM_, _NAME_)
56#endif /* __DeclareRcvRpc */
57
58#ifndef __BeforeRcvRpc
59#define __BeforeRcvRpc(_NUM_, _NAME_)
60#endif /* __BeforeRcvRpc */
61
62#ifndef __AfterRcvRpc
63#define __AfterRcvRpc(_NUM_, _NAME_)
64#endif /* __AfterRcvRpc */
65
66#ifndef __DeclareRcvSimple
67#define __DeclareRcvSimple(_NUM_, _NAME_)
68#endif /* __DeclareRcvSimple */
69
70#ifndef __BeforeRcvSimple
71#define __BeforeRcvSimple(_NUM_, _NAME_)
72#endif /* __BeforeRcvSimple */
73
74#ifndef __AfterRcvSimple
75#define __AfterRcvSimple(_NUM_, _NAME_)
76#endif /* __AfterRcvSimple */
77
78#define novalue void
79
80#if __MigKernelSpecificCode
81#define msgh_request_port msgh_remote_port
82#define MACH_MSGH_BITS_REQUEST(bits) MACH_MSGH_BITS_REMOTE(bits)
83#define msgh_reply_port msgh_local_port
84#define MACH_MSGH_BITS_REPLY(bits) MACH_MSGH_BITS_LOCAL(bits)
85#else
86#define msgh_request_port msgh_local_port
87#define MACH_MSGH_BITS_REQUEST(bits) MACH_MSGH_BITS_LOCAL(bits)
88#define msgh_reply_port msgh_remote_port
89#define MACH_MSGH_BITS_REPLY(bits) MACH_MSGH_BITS_REMOTE(bits)
90#endif /* __MigKernelSpecificCode */
91
92#define MIG_RETURN_ERROR(X, code) {\
93 ((mig_reply_error_t *)X)->RetCode = code;\
94 ((mig_reply_error_t *)X)->NDR = NDR_record;\
95 return;\
96 }
97
98/* Forward Declarations */
99
100
101mig_internal novalue _Xmach_memory_entry_purgable_control
102 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
103
104mig_internal novalue _Xmach_memory_entry_access_tracking
105 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
106
107mig_internal novalue _Xmach_memory_entry_ownership_from_user
108 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP);
109
110
111#if ( __MigTypeCheck )
112#if __MIG_check__Request__memory_entry_subsystem__
113#if !defined(__MIG_check__Request__mach_memory_entry_purgable_control_t__defined)
114#define __MIG_check__Request__mach_memory_entry_purgable_control_t__defined
115
116mig_internal kern_return_t __MIG_check__Request__mach_memory_entry_purgable_control_t(
117 __attribute__((__unused__)) __RequestKData__mach_memory_entry_purgable_control_t *InKP,
118 __attribute__((__unused__)) __RequestUData__mach_memory_entry_purgable_control_t *In0UP,
119 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
120{
121
122 typedef __Request__mach_memory_entry_purgable_control_t __Request;
123 typedef __RequestUData__mach_memory_entry_purgable_control_t __RequestU __attribute__((unused));
124#if __MigTypeCheck
125 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
126 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
127 return MIG_BAD_ARGUMENTS;
128#endif /* __MigTypeCheck */
129
130 return MACH_MSG_SUCCESS;
131}
132#endif /* !defined(__MIG_check__Request__mach_memory_entry_purgable_control_t__defined) */
133#endif /* __MIG_check__Request__memory_entry_subsystem__ */
134#endif /* ( __MigTypeCheck ) */
135
136
137/* Routine mach_memory_entry_purgable_control */
138mig_internal novalue _Xmach_memory_entry_purgable_control
139 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
140 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
141 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
142{
143
144#ifdef __MigPackStructs
145#pragma pack(push, 4)
146#endif
147 typedef struct {
148 NDR_record_t NDR;
149 vm_purgable_t control;
150 int state;
151 mach_msg_trailer_t trailer;
152 char padding[0]; /* Avoid generating empty UData structs */
153 } RequestU __attribute__((unused));
154#ifdef __MigPackStructs
155#pragma pack(pop)
156#endif
157 typedef __RequestKData__mach_memory_entry_purgable_control_t RequestK;
158 typedef __RequestUData__mach_memory_entry_purgable_control_t __RequestU;
159 typedef __ReplyKData__mach_memory_entry_purgable_control_t ReplyK __attribute__((unused));
160 typedef __ReplyUData__mach_memory_entry_purgable_control_t ReplyU __attribute__((unused));
161 typedef __Reply__mach_memory_entry_purgable_control_t Reply __attribute__((unused));
162 typedef __Request__mach_memory_entry_purgable_control_t __Request __attribute__((unused));
163
164 /*
165 * typedef struct {
166 * mach_msg_header_t Head;
167 * NDR_record_t NDR;
168 * kern_return_t RetCode;
169 * } mig_reply_error_t;
170 */
171
172 RequestK *InKP = (RequestK *) InHeadP;
173 RequestU *In0UP = (RequestU *) InDataP;
174 ReplyK *OutKP = (ReplyK *) OutHeadP;
175 ReplyU *OutUP = (ReplyU *) OutDataP;
176 (void)OutUP;
177#ifdef __MIG_check__Request__mach_memory_entry_purgable_control_t__defined
178 kern_return_t check_result;
179#endif /* __MIG_check__Request__mach_memory_entry_purgable_control_t__defined */
180
181#if __MigKernelSpecificCode
182#else
183#endif /* __MigKernelSpecificCode */
184 __DeclareRcvRpc(4900, "mach_memory_entry_purgable_control")
185 __BeforeRcvRpc(4900, "mach_memory_entry_purgable_control")
186
187#if defined(__MIG_check__Request__mach_memory_entry_purgable_control_t__defined)
188 check_result = __MIG_check__Request__mach_memory_entry_purgable_control_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
189 if (check_result != MACH_MSG_SUCCESS)
190 { MIG_RETURN_ERROR(OutKP, check_result); }
191#endif /* defined(__MIG_check__Request__mach_memory_entry_purgable_control_t__defined) */
192
193 OutUP->RetCode = mach_memory_entry_purgable_control(null_conversion(InKP->Head.msgh_request_port), control: In0UP->control, state: &In0UP->state);
194 if (OutUP->RetCode != KERN_SUCCESS) {
195 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
196 }
197#if __MigKernelSpecificCode
198#endif /* __MigKernelSpecificCode */
199
200 OutUP->NDR = NDR_record;
201
202
203 OutUP->state = In0UP->state;
204
205 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
206 __AfterRcvRpc(4900, "mach_memory_entry_purgable_control")
207}
208
209#if ( __MigTypeCheck )
210#if __MIG_check__Request__memory_entry_subsystem__
211#if !defined(__MIG_check__Request__mach_memory_entry_access_tracking_t__defined)
212#define __MIG_check__Request__mach_memory_entry_access_tracking_t__defined
213
214mig_internal kern_return_t __MIG_check__Request__mach_memory_entry_access_tracking_t(
215 __attribute__((__unused__)) __RequestKData__mach_memory_entry_access_tracking_t *InKP,
216 __attribute__((__unused__)) __RequestUData__mach_memory_entry_access_tracking_t *In0UP,
217 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
218{
219
220 typedef __Request__mach_memory_entry_access_tracking_t __Request;
221 typedef __RequestUData__mach_memory_entry_access_tracking_t __RequestU __attribute__((unused));
222#if __MigTypeCheck
223 if ((InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
224 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
225 return MIG_BAD_ARGUMENTS;
226#endif /* __MigTypeCheck */
227
228 return MACH_MSG_SUCCESS;
229}
230#endif /* !defined(__MIG_check__Request__mach_memory_entry_access_tracking_t__defined) */
231#endif /* __MIG_check__Request__memory_entry_subsystem__ */
232#endif /* ( __MigTypeCheck ) */
233
234
235/* Routine mach_memory_entry_access_tracking */
236mig_internal novalue _Xmach_memory_entry_access_tracking
237 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
238 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
239 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
240{
241
242#ifdef __MigPackStructs
243#pragma pack(push, 4)
244#endif
245 typedef struct {
246 NDR_record_t NDR;
247 int access_tracking;
248 mach_msg_trailer_t trailer;
249 char padding[0]; /* Avoid generating empty UData structs */
250 } RequestU __attribute__((unused));
251#ifdef __MigPackStructs
252#pragma pack(pop)
253#endif
254 typedef __RequestKData__mach_memory_entry_access_tracking_t RequestK;
255 typedef __RequestUData__mach_memory_entry_access_tracking_t __RequestU;
256 typedef __ReplyKData__mach_memory_entry_access_tracking_t ReplyK __attribute__((unused));
257 typedef __ReplyUData__mach_memory_entry_access_tracking_t ReplyU __attribute__((unused));
258 typedef __Reply__mach_memory_entry_access_tracking_t Reply __attribute__((unused));
259 typedef __Request__mach_memory_entry_access_tracking_t __Request __attribute__((unused));
260
261 /*
262 * typedef struct {
263 * mach_msg_header_t Head;
264 * NDR_record_t NDR;
265 * kern_return_t RetCode;
266 * } mig_reply_error_t;
267 */
268
269 RequestK *InKP = (RequestK *) InHeadP;
270 RequestU *In0UP = (RequestU *) InDataP;
271 ReplyK *OutKP = (ReplyK *) OutHeadP;
272 ReplyU *OutUP = (ReplyU *) OutDataP;
273 (void)OutUP;
274#ifdef __MIG_check__Request__mach_memory_entry_access_tracking_t__defined
275 kern_return_t check_result;
276#endif /* __MIG_check__Request__mach_memory_entry_access_tracking_t__defined */
277
278#if __MigKernelSpecificCode
279#else
280#endif /* __MigKernelSpecificCode */
281 __DeclareRcvRpc(4901, "mach_memory_entry_access_tracking")
282 __BeforeRcvRpc(4901, "mach_memory_entry_access_tracking")
283
284#if defined(__MIG_check__Request__mach_memory_entry_access_tracking_t__defined)
285 check_result = __MIG_check__Request__mach_memory_entry_access_tracking_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
286 if (check_result != MACH_MSG_SUCCESS)
287 { MIG_RETURN_ERROR(OutKP, check_result); }
288#endif /* defined(__MIG_check__Request__mach_memory_entry_access_tracking_t__defined) */
289
290 OutUP->RetCode = mach_memory_entry_access_tracking(null_conversion(InKP->Head.msgh_request_port), access_tracking: &In0UP->access_tracking, access_tracking_reads: &OutUP->access_tracking_reads, access_tracking_writes: &OutUP->access_tracking_writes);
291 if (OutUP->RetCode != KERN_SUCCESS) {
292 MIG_RETURN_ERROR(OutKP, OutUP->RetCode);
293 }
294#if __MigKernelSpecificCode
295#endif /* __MigKernelSpecificCode */
296
297 OutUP->NDR = NDR_record;
298
299
300 OutUP->access_tracking = In0UP->access_tracking;
301
302 OutKP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
303 __AfterRcvRpc(4901, "mach_memory_entry_access_tracking")
304}
305
306#if ( __MigTypeCheck )
307#if __MIG_check__Request__memory_entry_subsystem__
308#if !defined(__MIG_check__Request__mach_memory_entry_ownership_from_user_t__defined)
309#define __MIG_check__Request__mach_memory_entry_ownership_from_user_t__defined
310
311mig_internal kern_return_t __MIG_check__Request__mach_memory_entry_ownership_from_user_t(
312 __attribute__((__unused__)) __RequestKData__mach_memory_entry_ownership_from_user_t *InKP,
313 __attribute__((__unused__)) __RequestUData__mach_memory_entry_ownership_from_user_t *In0UP,
314 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP)
315{
316
317 typedef __Request__mach_memory_entry_ownership_from_user_t __Request;
318 typedef __RequestUData__mach_memory_entry_ownership_from_user_t __RequestU __attribute__((unused));
319#if __MigTypeCheck
320 if (!(InKP->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
321 (InKP->msgh_body.msgh_descriptor_count != 1) ||
322 (InKP->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
323 return MIG_BAD_ARGUMENTS;
324#endif /* __MigTypeCheck */
325
326#if __MigTypeCheck
327 if (InKP->owner.type != MACH_MSG_PORT_DESCRIPTOR ||
328 InKP->owner.disposition != 17)
329 return MIG_TYPE_ERROR;
330#endif /* __MigTypeCheck */
331
332 return MACH_MSG_SUCCESS;
333}
334#endif /* !defined(__MIG_check__Request__mach_memory_entry_ownership_from_user_t__defined) */
335#endif /* __MIG_check__Request__memory_entry_subsystem__ */
336#endif /* ( __MigTypeCheck ) */
337
338
339/* Routine mach_memory_entry_ownership_from_user */
340mig_internal novalue _Xmach_memory_entry_ownership_from_user
341 (mach_msg_header_t *InHeadP, __attribute__((__unused__)) void *InDataP,
342 __attribute__((__unused__)) mach_msg_max_trailer_t *InTrailerP,
343 mach_msg_header_t *OutHeadP, __attribute__((__unused__)) void *OutDataP)
344{
345
346#ifdef __MigPackStructs
347#pragma pack(push, 4)
348#endif
349 typedef struct {
350 NDR_record_t NDR;
351 int ledger_tag;
352 int ledger_flags;
353 mach_msg_trailer_t trailer;
354 char padding[0]; /* Avoid generating empty UData structs */
355 } RequestU __attribute__((unused));
356#ifdef __MigPackStructs
357#pragma pack(pop)
358#endif
359 typedef __RequestKData__mach_memory_entry_ownership_from_user_t RequestK;
360 typedef __RequestUData__mach_memory_entry_ownership_from_user_t __RequestU;
361 typedef __ReplyKData__mach_memory_entry_ownership_from_user_t ReplyK __attribute__((unused));
362 typedef __ReplyUData__mach_memory_entry_ownership_from_user_t ReplyU __attribute__((unused));
363 typedef __Reply__mach_memory_entry_ownership_from_user_t Reply __attribute__((unused));
364 typedef __Request__mach_memory_entry_ownership_from_user_t __Request __attribute__((unused));
365
366 /*
367 * typedef struct {
368 * mach_msg_header_t Head;
369 * NDR_record_t NDR;
370 * kern_return_t RetCode;
371 * } mig_reply_error_t;
372 */
373
374 RequestK *InKP = (RequestK *) InHeadP;
375 RequestU *In0UP = (RequestU *) InDataP;
376 ReplyK *OutKP = (ReplyK *) OutHeadP;
377 ReplyU *OutUP = (ReplyU *) OutDataP;
378 (void)OutUP;
379#ifdef __MIG_check__Request__mach_memory_entry_ownership_from_user_t__defined
380 kern_return_t check_result;
381#endif /* __MIG_check__Request__mach_memory_entry_ownership_from_user_t__defined */
382
383#if __MigKernelSpecificCode
384#else
385#endif /* __MigKernelSpecificCode */
386 __DeclareRcvRpc(4902, "mach_memory_entry_ownership_from_user")
387 __BeforeRcvRpc(4902, "mach_memory_entry_ownership_from_user")
388
389#if defined(__MIG_check__Request__mach_memory_entry_ownership_from_user_t__defined)
390 check_result = __MIG_check__Request__mach_memory_entry_ownership_from_user_t(InKP: (RequestK *)InKP, In0UP: (__RequestU *)In0UP, InTrailerP);
391 if (check_result != MACH_MSG_SUCCESS)
392 { MIG_RETURN_ERROR(OutKP, check_result); }
393#endif /* defined(__MIG_check__Request__mach_memory_entry_ownership_from_user_t__defined) */
394
395 OutUP->RetCode = mach_memory_entry_ownership_from_user(null_conversion(InKP->Head.msgh_request_port), owner: InKP->owner.name, ledger_tag: In0UP->ledger_tag, ledger_flags: In0UP->ledger_flags);
396#if __MigKernelSpecificCode
397#endif /* __MigKernelSpecificCode */
398
399 OutUP->NDR = NDR_record;
400
401
402 __AfterRcvRpc(4902, "mach_memory_entry_ownership_from_user")
403}
404
405
406
407/* Description of this kernel subsystem, for use in direct RPC */
408const struct memory_entry_subsystem memory_entry_subsystem = {
409 memory_entry_server_routine,
410 4900,
411 4903,
412 (mach_msg_size_t)sizeof(union __ReplyUnion__memory_entry_subsystem),
413 (vm_address_t)0,
414 {
415 { (mig_impl_routine_t) 0,
416 (mig_stub_kern_routine_t) _Xmach_memory_entry_purgable_control, 3, 0, 0, (mach_msg_size_t)sizeof(__Reply__mach_memory_entry_purgable_control_t)},
417 { .impl_routine: (mig_impl_routine_t) 0,
418 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_memory_entry_access_tracking, .argc: 4, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_memory_entry_access_tracking_t)},
419 { .impl_routine: (mig_impl_routine_t) 0,
420 .kstub_routine: (mig_stub_kern_routine_t) _Xmach_memory_entry_ownership_from_user, .argc: 4, .descr_count: 0, .reply_descr_count: 0, .max_reply_msg: (mach_msg_size_t)sizeof(__Reply__mach_memory_entry_ownership_from_user_t)},
421 }
422};
423
424mig_external boolean_t memory_entry_server
425 (mach_msg_header_t *InHeadP, void *InDataP, mach_msg_max_trailer_t *InTrailerP, mach_msg_header_t *OutHeadP, void *OutDataP)
426{
427 /*
428 * typedef struct {
429 * mach_msg_header_t Head;
430 * NDR_record_t NDR;
431 * kern_return_t RetCode;
432 * } mig_reply_error_t;
433 */
434
435 mig_kern_routine_t routine;
436
437 OutHeadP->msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REPLY(InHeadP->msgh_bits), 0);
438 OutHeadP->msgh_remote_port = InHeadP->msgh_reply_port;
439 /* Minimal size: routine() will update it if different */
440 OutHeadP->msgh_size = (mach_msg_size_t)sizeof(mig_reply_error_t);
441 OutHeadP->msgh_local_port = MACH_PORT_NULL;
442 OutHeadP->msgh_id = InHeadP->msgh_id + 100;
443 OutHeadP->msgh_reserved = 0;
444
445 if ((InHeadP->msgh_id > 4902) || (InHeadP->msgh_id < 4900) ||
446 ((routine = memory_entry_subsystem.kroutine[InHeadP->msgh_id - 4900].kstub_routine) == 0)) {
447 ((mig_reply_error_t *)OutHeadP)->NDR = NDR_record;
448 ((mig_reply_error_t *)OutHeadP)->RetCode = MIG_BAD_ID;
449 return FALSE;
450 }
451 (*routine) (InHeadP, InDataP, InTrailerP, OutHeadP, OutDataP);
452 return TRUE;
453}
454
455mig_external mig_kern_routine_t memory_entry_server_routine
456 (mach_msg_header_t *InHeadP)
457{
458 int msgh_id;
459
460 msgh_id = InHeadP->msgh_id - 4900;
461
462 if ((msgh_id > 2) || (msgh_id < 0))
463 return 0;
464
465 return memory_entry_subsystem.kroutine[msgh_id].kstub_routine;
466}
467