1 | /* |
2 | * Copyright (c) 2015-2016 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* File: kern/mach_node.h |
29 | * Author: Dean Reece |
30 | * Date: 2016 |
31 | * |
32 | * Implementation of mach node support. |
33 | * This is the basis for flipc, which provides inter-node communication. |
34 | */ |
35 | |
36 | |
37 | #include <mach/mach_types.h> |
38 | #include <mach/boolean.h> |
39 | #include <mach/kern_return.h> |
40 | |
41 | #include <kern/kern_types.h> |
42 | #include <kern/assert.h> |
43 | |
44 | #include <kern/host.h> |
45 | #include <kern/kalloc.h> |
46 | #include <kern/mach_node_link.h> |
47 | #include <kern/mach_node.h> |
48 | #include <kern/ipc_mig.h> // mach_msg_send_from_kernel_proper() |
49 | |
50 | #include <ipc/port.h> |
51 | #include <ipc/ipc_types.h> |
52 | #include <ipc/ipc_init.h> |
53 | #include <ipc/ipc_kmsg.h> |
54 | #include <ipc/ipc_port.h> |
55 | #include <ipc/ipc_pset.h> |
56 | #include <ipc/ipc_table.h> |
57 | #include <ipc/ipc_entry.h> |
58 | |
59 | #include <ipc/flipc.h> |
60 | |
61 | #include <libkern/OSAtomic.h> // OSAddAtomic64(), OSCompareAndSwap() |
62 | #include <libkern/OSByteOrder.h> // OSHostByteOrder() |
63 | |
64 | #pragma pack(4) |
65 | |
66 | #define MNL_NAME_TABLE_SIZE (256) // Hash is evenly distributed, so ^2 is ok |
67 | #define MNL_NAME_HASH(name) (name % MNL_NAME_TABLE_SIZE) |
68 | |
69 | /*** Visible outside mach_node layer ***/ |
70 | mach_node_id_t localnode_id = -1; // This node's FLIPC id. |
71 | #if MACH_FLIPC |
72 | mach_node_t localnode; // This node's mach_node_t struct |
73 | |
74 | |
75 | /*** Private to mach_node layer ***/ |
76 | static int mach_nodes_to_publish; |
77 | static mach_node_t mach_node_table[MACH_NODES_MAX]; |
78 | static lck_spin_t mach_node_table_lock_data; |
79 | #define MACH_NODE_TABLE_LOCK() lck_spin_lock(&mach_node_table_lock_data) |
80 | #define MACH_NODE_TABLE_UNLOCK() lck_spin_unlock(&mach_node_table_lock_data) |
81 | #define MACH_NODE_TABLE_LOCK_INIT() lck_spin_init(&mach_node_table_lock_data, \ |
82 | &ipc_lck_grp, &ipc_lck_attr) |
83 | |
84 | static volatile SInt64 mnl_name_next; |
85 | static queue_head_t mnl_name_table[MNL_NAME_TABLE_SIZE]; |
86 | static lck_spin_t mnl_name_table_lock_data; |
87 | #define MNL_NAME_TABLE_LOCK() lck_spin_lock(&mnl_name_table_lock_data) |
88 | #define MNL_NAME_TABLE_UNLOCK() lck_spin_unlock(&mnl_name_table_lock_data) |
89 | #define MNL_NAME_TABLE_LOCK_INIT() lck_spin_init(&mnl_name_table_lock_data, \ |
90 | &ipc_lck_grp, &ipc_lck_attr) |
91 | |
92 | static void mach_node_init(void); |
93 | static void mnl_name_table_init(void); |
94 | static void mach_node_table_init(void); |
95 | static void mach_node_publish(mach_node_t node); |
96 | |
97 | static mach_node_t mach_node_alloc_init(mach_node_id_t node_id); |
98 | static kern_return_t mach_node_register(mach_node_t node); |
99 | |
100 | |
101 | /* mach_node_init() is run lazily when a node link driver registers |
102 | * or the node special port is set. |
103 | * The variable localnode_id is used to determine if init has already run. |
104 | */ |
105 | void |
106 | mach_node_init(void) |
107 | { |
108 | mach_node_id_t node_id = 0; // TODO: Read from device tree? |
109 | if (OSCompareAndSwap((UInt32)(HOST_LOCAL_NODE), |
110 | (UInt32)node_id, |
111 | &localnode_id)) { |
112 | printf("mach_node_init(): localnode_id=%d of %d\n" , |
113 | localnode_id, MACH_NODES_MAX); |
114 | mach_node_table_init(); |
115 | mnl_name_table_init(); |
116 | flipc_init(); |
117 | } // TODO: else block until init is finished (init completion race) |
118 | } |
119 | |
120 | void |
121 | mach_node_table_init(void) |
122 | { |
123 | MACH_NODE_TABLE_LOCK_INIT(); |
124 | MACH_NODE_TABLE_LOCK(); |
125 | |
126 | /* Start with an enpty node table. */ |
127 | bzero(mach_node_table, sizeof(mach_node_t) * MACH_NODES_MAX); |
128 | mach_nodes_to_publish = 0; |
129 | |
130 | /* Allocate localnode's struct */ |
131 | localnode = mach_node_for_id_locked(localnode_id, 1, 1); |
132 | assert(MACH_NODE_VALID(localnode)); |
133 | |
134 | MACH_NODE_TABLE_UNLOCK(); |
135 | |
136 | /* Set up localnode's struct */ |
137 | bzero(localnode, sizeof(localnode)); |
138 | localnode->info.datamodel = LOCAL_DATA_MODEL; |
139 | localnode->info.byteorder = OSHostByteOrder(); |
140 | localnode->info.proto_vers_min = MNL_PROTOCOL_V1; |
141 | localnode->info.proto_vers_max = MNL_PROTOCOL_V1; |
142 | localnode->proto_vers = MNL_PROTOCOL_V1; |
143 | localnode->published = 0; |
144 | localnode->active = 1; |
145 | |
146 | MACH_NODE_UNLOCK(localnode); |
147 | } |
148 | |
149 | /* Sends a publication message to the local node's bootstrap server. |
150 | * This function is smart and will only send a notification if one as really |
151 | * needed - it can be called speculatively on any node at any time. |
152 | * |
153 | * Note: MUST be called with the node table lock held. |
154 | */ |
155 | |
156 | void |
157 | mach_node_publish(mach_node_t node) |
158 | { |
159 | kern_return_t kr; |
160 | |
161 | if (!MACH_NODE_VALID(node) || (!node->active) || (node->published)) |
162 | return; // node is invalid or not suitable for publication |
163 | |
164 | ipc_port_t bs_port = localnode->bootstrap_port; |
165 | if (!IP_VALID(bs_port)) |
166 | return; // No bootstrap server to notify! |
167 | |
168 | /* Node is suitable and server is present, so make registration message */ |
169 | struct mach_node_server_register_msg msg; |
170 | |
171 | msg.node_header.header.msgh_remote_port = bs_port; |
172 | msg.node_header.header.msgh_size = sizeof(msg); |
173 | msg.node_header.header.msgh_local_port = MACH_PORT_NULL; |
174 | msg.node_header.header.msgh_voucher_port = MACH_PORT_NULL; |
175 | msg.node_header.header.msgh_id = MACH_NODE_SERVER_MSG_ID; |
176 | msg.node_header.node_id = node->info.node_id; |
177 | msg.node_header.options = 0; |
178 | msg.datamodel = node->info.datamodel; |
179 | msg.byteorder = node->info.byteorder; |
180 | |
181 | if (node == localnode) { |
182 | msg.node_header.identifier = MACH_NODE_SM_REG_LOCAL; |
183 | msg.node_header.header.msgh_bits = |
184 | MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, 0, 0, 0); |
185 | } else { |
186 | msg.node_header.identifier = MACH_NODE_SM_REG_REMOTE; |
187 | msg.node_header.header.msgh_local_port = node->bootstrap_port; |
188 | msg.node_header.header.msgh_bits = MACH_MSGH_BITS_SET |
189 | (MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND, 0, 0); |
190 | } |
191 | |
192 | kr = mach_msg_send_from_kernel_proper(&msg.node_header.header, |
193 | sizeof (msg)); |
194 | if (kr == KERN_SUCCESS) { |
195 | node->published = 1; |
196 | mach_nodes_to_publish--; |
197 | } |
198 | printf("mach_node_publish(%d)=%d\n" , node->info.node_id, kr); |
199 | } |
200 | |
201 | /* Called whenever the node special port changes */ |
202 | void |
203 | mach_node_port_changed(void) |
204 | { |
205 | ipc_port_t bs_port; |
206 | |
207 | mach_node_init(); // Lazy init of mach_node layer |
208 | |
209 | /* Cleanup previous bootstrap port if necessary */ |
210 | MACH_NODE_LOCK(localnode); |
211 | flipc_node_retire(localnode); |
212 | bs_port = localnode->bootstrap_port; |
213 | if (IP_VALID(bs_port)) { |
214 | localnode->bootstrap_port = IP_NULL; |
215 | // TODO: destroy send right to outgoing bs_port |
216 | } |
217 | |
218 | kernel_get_special_port(host_priv_self(), HOST_NODE_PORT, &bs_port); |
219 | assert(IP_VALID(bs_port)); |
220 | localnode->bootstrap_port = bs_port; |
221 | flipc_node_prepare(localnode); |
222 | MACH_NODE_UNLOCK(localnode); |
223 | |
224 | /* Cleanup the publication state of all nodes in the table */ |
225 | MACH_NODE_TABLE_LOCK(); |
226 | // TODO: Signup for bootstrap port death notifications |
227 | localnode->active = 1; |
228 | |
229 | mach_nodes_to_publish = 0; |
230 | |
231 | int n; |
232 | for (n=0; n<MACH_NODES_MAX; n++) { |
233 | mach_node_t np = mach_node_table[n]; |
234 | // Publish all active nodes (except the local node) |
235 | if (!MACH_NODE_VALID(np)) |
236 | continue; |
237 | np->published = 0; |
238 | if (np->active == 1) |
239 | mach_nodes_to_publish++; |
240 | } |
241 | |
242 | mach_node_publish(localnode); // Always publish local node first |
243 | |
244 | for (n=0; n<MACH_NODES_MAX; n++) |
245 | mach_node_publish(mach_node_table[n]); |
246 | |
247 | MACH_NODE_TABLE_UNLOCK(); |
248 | |
249 | // TODO: notify all active nodes we are bootstrapped |
250 | } |
251 | |
252 | /* Allocate/init a mach_node struct and fill in the node_id field. |
253 | * This does NOT insert the node struct into the node table. |
254 | */ |
255 | mach_node_t |
256 | mach_node_alloc_init(mach_node_id_t node_id) |
257 | { |
258 | mach_node_t node = MACH_NODE_ALLOC(); |
259 | if (MACH_NODE_VALID(node)) { |
260 | bzero(node, sizeof(struct mach_node)); |
261 | MACH_NODE_LOCK_INIT(node); |
262 | node->info.node_id = node_id; |
263 | } |
264 | return node; |
265 | } |
266 | |
267 | |
268 | /* This function takes a mach_node struct with a completed info field and |
269 | * registers it with the mach_node and flipc (if flipc is enabled) layers. |
270 | */ |
271 | kern_return_t |
272 | mach_node_register(mach_node_t node) |
273 | { |
274 | assert(MACH_NODE_VALID(node)); |
275 | mach_node_id_t nid = node->info.node_id; |
276 | assert(MACH_NODE_ID_VALID(nid)); |
277 | |
278 | kern_return_t kr; |
279 | ipc_space_t proxy_space = IS_NULL; |
280 | ipc_pset_t pp_set = IPS_NULL; // pset for proxy ports |
281 | ipc_port_t bs_port = MACH_PORT_NULL; |
282 | ipc_port_t ack_port = MACH_PORT_NULL; |
283 | |
284 | printf("mach_node_register(%d)\n" , nid); |
285 | |
286 | /* TODO: Support non-native byte order and data models */ |
287 | if ((node->info.byteorder != OSHostByteOrder()) || |
288 | (node->info.datamodel != LOCAL_DATA_MODEL)) { |
289 | printf("mach_node_register: unsupported byte order (%d) or width (%d)" , |
290 | node->info.byteorder, node->info.datamodel); |
291 | return KERN_INVALID_ARGUMENT; |
292 | } |
293 | |
294 | /* Create the space that holds all local rights assigned to <nid> */ |
295 | kr = ipc_space_create_special(&proxy_space); |
296 | if (kr != KERN_SUCCESS) |
297 | goto out; |
298 | proxy_space->is_node_id = nid; |
299 | |
300 | /* Create the bootstrap proxy port for this remote node */ |
301 | bs_port = ipc_port_alloc_special(proxy_space); |
302 | if (bs_port == MACH_PORT_NULL) { |
303 | kr = KERN_RESOURCE_SHORTAGE; |
304 | goto out; |
305 | } |
306 | |
307 | /* Create the control (ack) port for this remote node */ |
308 | ack_port = ipc_port_alloc_special(proxy_space); |
309 | if (ack_port == MACH_PORT_NULL) { |
310 | kr = KERN_RESOURCE_SHORTAGE; |
311 | goto out; |
312 | } |
313 | |
314 | /* Create the set that holds all proxy ports for this remote node */ |
315 | pp_set = ipc_pset_alloc_special(proxy_space); |
316 | if (pp_set == IPS_NULL) { |
317 | kr = KERN_RESOURCE_SHORTAGE; |
318 | goto out; |
319 | } |
320 | |
321 | waitq_set_lazy_init_link(pp_set); |
322 | /* Add the bootstrap port to the proxy port set */ |
323 | uint64_t wq_link_id = waitq_link_reserve(NULL); |
324 | uint64_t wq_reserved_prepost = waitq_prepost_reserve(NULL, 10, |
325 | WAITQ_DONT_LOCK); |
326 | ips_lock(pp_set); |
327 | ip_lock(bs_port); |
328 | ipc_pset_add(pp_set, |
329 | bs_port, |
330 | &wq_link_id, |
331 | &wq_reserved_prepost); |
332 | ip_unlock(bs_port); |
333 | ips_unlock(pp_set); |
334 | |
335 | waitq_link_release(wq_link_id); |
336 | waitq_prepost_release_reserve(wq_reserved_prepost); |
337 | |
338 | /* Add the control port to the proxy port set */ |
339 | wq_link_id = waitq_link_reserve(NULL); |
340 | wq_reserved_prepost = waitq_prepost_reserve(NULL, 10, |
341 | WAITQ_DONT_LOCK); |
342 | ips_lock(pp_set); |
343 | ip_lock(ack_port); |
344 | ipc_pset_add(pp_set, |
345 | ack_port, |
346 | &wq_link_id, |
347 | &wq_reserved_prepost); |
348 | ip_unlock(ack_port); |
349 | ips_unlock(pp_set); |
350 | |
351 | waitq_link_release(wq_link_id); |
352 | waitq_prepost_release_reserve(wq_reserved_prepost); |
353 | |
354 | // Setup mach_node struct |
355 | node->published = 0; |
356 | node->active = 1; |
357 | node->proxy_space = proxy_space; |
358 | node->proxy_port_set = pp_set; |
359 | node->bootstrap_port = bs_port; |
360 | node->proto_vers = node->info.proto_vers_max; |
361 | node->control_port = ack_port; |
362 | |
363 | // Place new mach_node struct into node table |
364 | MACH_NODE_TABLE_LOCK(); |
365 | |
366 | mach_node_t old_node = mach_node_table[nid]; |
367 | if (!MACH_NODE_VALID(old_node) || (old_node->dead)) { |
368 | node->antecedent = old_node; |
369 | flipc_node_prepare(node); |
370 | mach_node_table[nid] = node; |
371 | mach_nodes_to_publish++; |
372 | mach_node_publish(node); |
373 | kr = KERN_SUCCESS; |
374 | } else { |
375 | printf("mach_node_register: id %d already active!" , nid); |
376 | kr = KERN_FAILURE; |
377 | } |
378 | MACH_NODE_TABLE_UNLOCK(); |
379 | |
380 | out: |
381 | if (kr != KERN_SUCCESS) { // Dispose of whatever we allocated |
382 | if (pp_set) { |
383 | ips_lock(pp_set); |
384 | ipc_pset_destroy(pp_set); |
385 | } |
386 | |
387 | if (bs_port) |
388 | ipc_port_dealloc_special(bs_port, proxy_space); |
389 | |
390 | if (ack_port) |
391 | ipc_port_dealloc_special(ack_port, proxy_space); |
392 | |
393 | if (proxy_space) |
394 | ipc_space_terminate(proxy_space); |
395 | } |
396 | |
397 | return kr; |
398 | } |
399 | |
400 | |
401 | /* Gets or allocates a locked mach_node struct for the specified <node_id>. |
402 | * The current node is locked and returned if it is not dead, or if it is dead |
403 | * and <alloc_if_dead> is false. A new node struct is allocated, locked and |
404 | * returned if the node is dead and <alloc_if_dead> is true, or if the node |
405 | * is absent and <alloc_if_absent> is true. MACH_NODE_NULL is returned if |
406 | * the node is absent and <alloc_if_absent> is false. MACH_NODE_NULL is also |
407 | * returned if a new node structure was not able to be allocated. |
408 | * |
409 | * Note: This function must be called with the node table lock held! |
410 | */ |
411 | mach_node_t |
412 | mach_node_for_id_locked(mach_node_id_t node_id, |
413 | boolean_t alloc_if_dead, |
414 | boolean_t alloc_if_absent) |
415 | { |
416 | if ((node_id < 0) || (node_id >= MACH_NODES_MAX)) |
417 | return MACH_NODE_NULL; |
418 | |
419 | mach_node_t node = mach_node_table[node_id]; |
420 | |
421 | if ( (!MACH_NODE_VALID(node) && alloc_if_absent) || |
422 | (MACH_NODE_VALID(node) && node->dead && alloc_if_dead) ) { |
423 | node = mach_node_alloc_init(node_id); |
424 | if (MACH_NODE_VALID(node)) { |
425 | node->antecedent = mach_node_table[node_id]; |
426 | mach_node_table[node_id] = node; |
427 | } |
428 | } |
429 | |
430 | if (MACH_NODE_VALID(node)) |
431 | MACH_NODE_LOCK(node); |
432 | |
433 | return node; |
434 | } |
435 | |
436 | |
437 | |
438 | /*** Mach Node Link Name and Hash Table Implementation ***/ |
439 | |
440 | /* Allocate a new unique name and return it. |
441 | * Dispose of this with mnl_name_free(). |
442 | * Returns MNL_NAME_NULL on failure. |
443 | */ |
444 | mnl_name_t |
445 | mnl_name_alloc(void) |
446 | { |
447 | return (mnl_name_t)OSAddAtomic64(MACH_NODES_MAX, &mnl_name_next); |
448 | } |
449 | |
450 | |
451 | /* Deallocate a unique name that was allocated via mnl_name_alloc(). |
452 | */ |
453 | void |
454 | mnl_name_free(mnl_name_t name __unused) |
455 | { |
456 | ; // Nothing to do for now since we don't recycle mnl names. |
457 | } |
458 | |
459 | |
460 | /* Called once from mach_node_init(), this sets up the hash table structures. |
461 | */ |
462 | void |
463 | mnl_name_table_init(void) |
464 | { |
465 | MNL_NAME_TABLE_LOCK_INIT(); |
466 | MNL_NAME_TABLE_LOCK(); |
467 | |
468 | // Set the first name to this node's bootstrap name |
469 | mnl_name_next = localnode_id + MACH_NODES_MAX; |
470 | |
471 | for (int i=0; i<MNL_NAME_TABLE_SIZE; i++) |
472 | queue_head_init(mnl_name_table[i]); |
473 | |
474 | MNL_NAME_TABLE_UNLOCK(); |
475 | } |
476 | |
477 | |
478 | /* Initialize the data structures in the mnl_obj structure at the head of the |
479 | * provided object. This should be called on an object before it is passed to |
480 | * any other mnl_obj* routine. |
481 | */ |
482 | void |
483 | mnl_obj_init(mnl_obj_t obj) |
484 | { |
485 | queue_chain_init(obj->links); |
486 | obj->name = MNL_NAME_NULL; |
487 | } |
488 | |
489 | |
490 | /* Search the local node's hash table for the object associated with a |
491 | * mnl_name_t and return it. Returns MNL_NAME_NULL on failure. |
492 | */ |
493 | mnl_obj_t |
494 | mnl_obj_lookup(mnl_name_t name) |
495 | { |
496 | mnl_obj_t obj = MNL_OBJ_NULL; |
497 | |
498 | if (name != MNL_NAME_NULL) { |
499 | qe_foreach_element(obj, &mnl_name_table[MNL_NAME_HASH(name)], links) { |
500 | if (obj->name == name) |
501 | break; |
502 | } |
503 | } |
504 | return obj; |
505 | } |
506 | |
507 | |
508 | /* Search the local node's hash table for the object associated with a |
509 | * mnl_name_t and remove it. The pointer to the removed object is returned so |
510 | * that the caller can appropriately dispose of the object. |
511 | * Returns MNL_NAME_NULL on failure. |
512 | */ |
513 | mnl_obj_t |
514 | mnl_obj_remove(mnl_name_t name) |
515 | { |
516 | mnl_obj_t obj = MNL_OBJ_NULL; |
517 | |
518 | if (name != MNL_NAME_NULL) { |
519 | qe_foreach_element_safe(obj, &mnl_name_table[MNL_NAME_HASH(name)], links) { |
520 | if (obj->name == name) |
521 | remqueue(&obj->links); |
522 | } |
523 | } |
524 | return obj; |
525 | } |
526 | |
527 | |
528 | /* Insert an object into the local node's hash table. If the name of the |
529 | * provided object is MNL_NAME_NULL then a new mnl_name is allocated and |
530 | * assigned to the object. |
531 | * Returns KERN_SUCCESS if obj was added to hash table |
532 | * Returns KERN_INVALID_ARGUMENT if obj is invalid |
533 | * Returns KERN_NAME_EXISTS if obj's name already exists in hash table |
534 | */ |
535 | kern_return_t |
536 | mnl_obj_insert(mnl_obj_t obj) |
537 | { |
538 | if (!MNL_OBJ_VALID(obj)) |
539 | return KERN_INVALID_ARGUMENT; |
540 | |
541 | MNL_NAME_TABLE_LOCK(); |
542 | |
543 | if (!MNL_NAME_VALID(obj->name)) { |
544 | // obj is unnammed, so lets allocate a fresh one |
545 | obj->name = mnl_name_alloc(); |
546 | } |
547 | |
548 | enqueue(&mnl_name_table[MNL_NAME_HASH(obj->name)], &obj->links); |
549 | MNL_NAME_TABLE_UNLOCK(); |
550 | |
551 | if(obj->name >= (MACH_NODES_MAX<<1)) |
552 | panic("Unexpected MNL_NAME %lld in obj %p" , obj->name, obj); |
553 | |
554 | return KERN_SUCCESS; |
555 | } |
556 | |
557 | |
558 | /*** Mach Node Link Driver Interface Implementation ***/ |
559 | |
560 | /* Allocate a mnl_msg struct plus additional payload. Link drivers are not |
561 | * required to use this to allocate messages; any wired and mapped kernel |
562 | * memory is acceptable. |
563 | * |
564 | * Arguments: |
565 | * payload Number of additional bytes to allocate for message payload |
566 | * flags Currently unused; 0 should be passed |
567 | * |
568 | * Return values: |
569 | * MNL_MSG_NULL: Allocation failed |
570 | * *: Pointer to new mnl_msg struct of requested size |
571 | */ |
572 | mnl_msg_t |
573 | mnl_msg_alloc(int payload, |
574 | uint32_t flags __unused) |
575 | { |
576 | mnl_msg_t msg = kalloc(MNL_MSG_SIZE + payload); |
577 | |
578 | if (MNL_MSG_VALID(msg)) { |
579 | bzero(msg, MNL_MSG_SIZE); // Only zero the header |
580 | msg->size = payload; |
581 | } |
582 | |
583 | return msg; |
584 | } |
585 | |
586 | |
587 | /* Free a mnl_msg struct allocated by mnl_msg_alloc(). |
588 | * |
589 | * Arguments: |
590 | * msg Pointer to the message buffer to be freed |
591 | * flags Currently unused; 0 should be passed |
592 | */ |
593 | void |
594 | mnl_msg_free(mnl_msg_t msg, |
595 | uint32_t flags __unused) |
596 | { |
597 | if (MNL_MSG_VALID(msg)) |
598 | kfree(msg, MNL_MSG_SIZE + msg->size); |
599 | } |
600 | |
601 | |
602 | /* The link driver calls this to setup a new (or restarted) node, and to get |
603 | * an mnl_node_info struct for use as a parameter to other mnl functions. |
604 | * If MNL_NODE_NULL is returned, the operation failed. Otherwise, a pointer |
605 | * to a new mnl_node struct is returned. The caller should set all fields |
606 | * in the structure, then call mnl_register() to complete node registration. |
607 | * |
608 | * Arguments: |
609 | * nid The id of the node to be instantiated |
610 | * flags Currently unused; 0 should be passed |
611 | * |
612 | * Return values: |
613 | * MNL_NODE_NULL: Operation failed |
614 | * *: Pointer to a new mnl_node struct |
615 | */ |
616 | mnl_node_info_t |
617 | mnl_instantiate(mach_node_id_t nid, |
618 | uint32_t flags __unused) |
619 | { |
620 | mach_node_init(); // Lazy init of mach_node layer |
621 | |
622 | if ((nid==localnode_id) || !MACH_NODE_ID_VALID(nid)) |
623 | return MNL_NODE_NULL; |
624 | |
625 | return (mnl_node_info_t)mach_node_alloc_init(nid); |
626 | } |
627 | |
628 | /* The link driver calls mnl_register() to complete the node registration |
629 | * process. KERN_SUCCESS is returned if registration succeeded, otherwise |
630 | * an error is returned. |
631 | * |
632 | * Arguments: |
633 | * node Pointer to the node's mnl_node structure |
634 | * flags Currently unused; 0 should be passed |
635 | * |
636 | * Return values: |
637 | * KERN_SUCCESS: Registration succeeded |
638 | * KERN_INVALID_ARGUMENT: Field(s) in <node> contained unacceptable values |
639 | * KERN_*: Values returned from underlying functions |
640 | */ |
641 | kern_return_t |
642 | mnl_register(mnl_node_info_t node, |
643 | uint32_t flags __unused) |
644 | { |
645 | if (MNL_NODE_VALID(node) && (node->node_id != localnode_id)) |
646 | return mach_node_register((mach_node_t)node); |
647 | |
648 | return KERN_INVALID_ARGUMENT; |
649 | } |
650 | |
651 | |
652 | /* The link driver calls this to report that the link has been raised in one |
653 | * or both directions. If the link is two uni-directional channels, each link |
654 | * driver will independently call this function, each only raising the link |
655 | * they are responsible for. The mach_node layer will not communicate with |
656 | * the remote node until both rx and tx links are up. |
657 | * |
658 | * Arguments: |
659 | * node Pointer to the node's mnl_node structure |
660 | * link Indicates which link(s) are up (see MNL_LINK_* defines) |
661 | * flags Currently unused; 0 should be passed |
662 | * |
663 | * Return values: |
664 | * KERN_SUCCESS: Link state changed successfully. |
665 | * KERN_INVALID_ARGUMENT: An argument value was not allowed. |
666 | * KERN_*: Values returned from underlying functions. |
667 | */ |
668 | kern_return_t |
669 | mnl_set_link_state(mnl_node_info_t node, |
670 | int link, |
671 | uint32_t flags __unused) |
672 | { |
673 | kern_return_t kr; |
674 | mach_node_t mnode = (mach_node_t)node; |
675 | |
676 | if (!MACH_NODE_VALID(mnode) || !(link & MNL_LINK_UP) || (link & mnode->link)) |
677 | return KERN_INVALID_ARGUMENT; // bad node, or bad link argument |
678 | |
679 | MACH_NODE_LOCK(mnode); |
680 | |
681 | if (mnode->dead) { |
682 | kr = KERN_NODE_DOWN; |
683 | } else { |
684 | mnode->link |= link; |
685 | kr = KERN_SUCCESS; |
686 | } |
687 | |
688 | MACH_NODE_UNLOCK(mnode); |
689 | |
690 | return kr; |
691 | } |
692 | |
693 | /* The link driver calls this to indicate a node has terminated and is no |
694 | * longer available for messaging. This may be due to a crash or an orderly |
695 | * shutdown, but either way the remote node no longer retains any state about |
696 | * the remaining nodes. References held on behalf of the terminated node |
697 | * will be cleaned up. After this is called, both the rx and tx links are |
698 | * marked as down. If the remote node restarts, the link driver can bring |
699 | * up the link using mnl_instantiate() again. |
700 | * |
701 | * Arguments: |
702 | * node Pointer to the node's mnl_node structure |
703 | * flags Currently unused; 0 should be passed |
704 | * |
705 | * Return values: |
706 | * KERN_SUCCESS: Node was terminated. |
707 | * KERN_INVALID_ARGUMENT: Node id was invalid or non-existant. |
708 | * KERN_*: Values returned from underlying functions. |
709 | */ |
710 | kern_return_t |
711 | mnl_terminate(mnl_node_info_t node, |
712 | uint32_t flags __unused) |
713 | { |
714 | kern_return_t kr = KERN_SUCCESS; |
715 | mach_node_t mnode = (mach_node_t)node; |
716 | |
717 | if (!MACH_NODE_VALID(mnode)) |
718 | return KERN_INVALID_ARGUMENT; // bad node |
719 | |
720 | MACH_NODE_LOCK(mnode); |
721 | if (mnode->dead) { |
722 | kr = KERN_NODE_DOWN; // node is already terminated |
723 | goto unlock; |
724 | } |
725 | |
726 | mnode->link = MNL_LINK_DOWN; |
727 | mnode->active = 0; |
728 | mnode->suspended = 0; |
729 | mnode->dead = 1; |
730 | |
731 | flipc_node_retire(mnode); |
732 | |
733 | // Wake any threads sleeping on the proxy port set |
734 | if (mnode->proxy_port_set != IPS_NULL) { |
735 | ips_lock(mnode->proxy_port_set); |
736 | ipc_pset_destroy(mnode->proxy_port_set); |
737 | mnode->proxy_port_set = IPS_NULL; |
738 | } |
739 | |
740 | // TODO: Inform node name server (if registered) of termination |
741 | |
742 | unlock: |
743 | MACH_NODE_UNLOCK(mnode); |
744 | return kr; |
745 | } |
746 | |
747 | |
748 | /* The link driver calls this to deliver an incoming message. Note that the |
749 | * link driver must dispose of the memory pointed to by <msg> after the |
750 | * function call returns. |
751 | * |
752 | * Arguments: |
753 | * node Pointer to the node's mnl_node structure |
754 | * msg Pointer to the message buffer |
755 | * flags Currently unused; 0 should be passed |
756 | */ |
757 | void |
758 | mnl_msg_from_node(mnl_node_info_t node __unused, |
759 | mnl_msg_t msg, |
760 | uint32_t flags __unused) |
761 | { |
762 | assert(MNL_MSG_VALID(msg)); |
763 | assert(MACH_NODE_ID_VALID(msg->node_id)); |
764 | assert(MNL_NODE_VALID(node)); |
765 | |
766 | /* If node message forwarding is supported, the from_node_id arg may not |
767 | * match fmsg->info.node_id. The former is the node from which we received |
768 | * the message; the latter is the node that generated the message originally. |
769 | * We always use fmsg->info.node_id, which is where the ack needs to go. |
770 | */ |
771 | |
772 | switch (msg->sub) { |
773 | |
774 | case MACH_NODE_SUB_FLIPC: |
775 | flipc_msg_from_node((mach_node_t)node, msg, flags); |
776 | break; |
777 | |
778 | default: |
779 | #if DEBUG |
780 | PE_enter_debugger("mnl_msg_from_node(): Invalid subsystem" ); |
781 | #endif |
782 | break; |
783 | } |
784 | } |
785 | |
786 | |
787 | /* The link driver calls this to fetch the next message to transmit. |
788 | * This function will block until a message is available, or will return |
789 | * FLIPC_MSG_NULL if the link is to be terminated. After the caller has |
790 | * completed the transmission and no longer needs the msg buffer, it should |
791 | * call mnl_msg_complete(). |
792 | * |
793 | * Arguments: |
794 | * node Pointer to the node's mnl_node structure |
795 | * flags Currently unused; 0 should be passed |
796 | */ |
797 | mnl_msg_t |
798 | mnl_msg_to_node(mnl_node_info_t node __unused, |
799 | uint32_t flags __unused) |
800 | { |
801 | assert(MNL_NODE_VALID(node)); |
802 | |
803 | #if DEBUG |
804 | thread_set_thread_name(current_thread(), "MNL_Link" ); |
805 | #endif |
806 | |
807 | return flipc_msg_to_remote_node((mach_node_t)node, 0); |
808 | } |
809 | |
810 | |
811 | /* The link driver calls this to indicate that the specified msg buffer has |
812 | * been sent over the link and can be deallocated. |
813 | * |
814 | * Arguments: |
815 | * node Pointer to the node's mnl_node structure |
816 | * msg Pointer to the message buffer |
817 | * flags Currently unused; 0 should be passed |
818 | */ |
819 | void |
820 | mnl_msg_complete(mnl_node_info_t node __unused, |
821 | mnl_msg_t msg, |
822 | uint32_t flags) |
823 | { |
824 | switch (msg->sub) { |
825 | case MACH_NODE_SUB_NODE: |
826 | mnl_msg_free(msg, flags); |
827 | break; |
828 | |
829 | case MACH_NODE_SUB_FLIPC: |
830 | flipc_msg_free(msg, flags); |
831 | break; |
832 | |
833 | default: |
834 | #if DEBUG |
835 | PE_enter_debugger("mnl_msg_complete(): Invalid subsystem" ); |
836 | #endif |
837 | break; |
838 | } |
839 | } |
840 | |
841 | #else // MACH_FLIPC not configured, so provide KPI stubs |
842 | |
843 | mnl_msg_t |
844 | mnl_msg_alloc(int payload __unused, uint32_t flags __unused) |
845 | { |
846 | return MNL_MSG_NULL; |
847 | } |
848 | |
849 | void |
850 | mnl_msg_free(mnl_msg_t msg __unused, uint32_t flags __unused) |
851 | { |
852 | return; |
853 | } |
854 | |
855 | mnl_node_info_t |
856 | mnl_instantiate(mach_node_id_t nid __unused, uint32_t flags __unused) |
857 | { |
858 | return MNL_NODE_NULL; |
859 | } |
860 | |
861 | kern_return_t |
862 | mnl_register(mnl_node_info_t node __unused, uint32_t flags __unused) |
863 | { |
864 | return KERN_FAILURE; |
865 | } |
866 | |
867 | kern_return_t |
868 | mnl_set_link_state(mnl_node_info_t node __unused, |
869 | int link __unused, |
870 | uint32_t flags __unused) |
871 | { |
872 | return KERN_FAILURE; |
873 | } |
874 | |
875 | kern_return_t |
876 | mnl_terminate(mnl_node_info_t node __unused, uint32_t flags __unused) |
877 | { |
878 | return KERN_FAILURE; |
879 | } |
880 | |
881 | void |
882 | mnl_msg_from_node(mnl_node_info_t node __unused, |
883 | mnl_msg_t msg __unused, |
884 | uint32_t flags __unused) |
885 | { |
886 | return; |
887 | } |
888 | |
889 | mnl_msg_t |
890 | mnl_msg_to_node(mnl_node_info_t node __unused, uint32_t flags __unused) |
891 | { |
892 | return MNL_MSG_NULL; |
893 | } |
894 | |
895 | void |
896 | mnl_msg_complete(mnl_node_info_t node __unused, |
897 | mnl_msg_t msg __unused, |
898 | uint32_t flags __unused) |
899 | { |
900 | return; |
901 | } |
902 | |
903 | #endif // MACH_FLIPC |
904 | |