1/*
2 * Copyright (c) 2015-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* File: kern/mach_node.h
29 * Author: Dean Reece
30 * Date: 2016
31 *
32 * Implementation of mach node support.
33 * This is the basis for flipc, which provides inter-node communication.
34 */
35
36
37#include <mach/mach_types.h>
38#include <mach/boolean.h>
39#include <mach/kern_return.h>
40
41#include <kern/kern_types.h>
42#include <kern/assert.h>
43
44#include <kern/host.h>
45#include <kern/kalloc.h>
46#include <kern/mach_node_link.h>
47#include <kern/mach_node.h>
48#include <kern/ipc_mig.h> // mach_msg_send_from_kernel_proper()
49
50#include <ipc/port.h>
51#include <ipc/ipc_types.h>
52#include <ipc/ipc_init.h>
53#include <ipc/ipc_kmsg.h>
54#include <ipc/ipc_port.h>
55#include <ipc/ipc_pset.h>
56#include <ipc/ipc_entry.h>
57
58#include <ipc/flipc.h>
59
60#include <libkern/OSAtomic.h> // OSAddAtomic64(), OSCompareAndSwap()
61#include <libkern/OSByteOrder.h> // OSHostByteOrder()
62
63#pragma pack(4)
64
65#define MNL_NAME_TABLE_SIZE (256) // Hash is evenly distributed, so ^2 is ok
66#define MNL_NAME_HASH(name) (name % MNL_NAME_TABLE_SIZE)
67
68/*** Visible outside mach_node layer ***/
69mach_node_id_t localnode_id = -1; // This node's FLIPC id.
70#if MACH_FLIPC
71mach_node_t localnode; // This node's mach_node_t struct
72
73
74/*** Private to mach_node layer ***/
75static int mach_nodes_to_publish;
76static mach_node_t mach_node_table[MACH_NODES_MAX];
77static LCK_SPIN_DECLARE_ATTR(mach_node_table_lock_data,
78 &ipc_lck_grp, &ipc_lck_attr);
79#define MACH_NODE_TABLE_LOCK() lck_spin_lock(&mach_node_table_lock_data)
80#define MACH_NODE_TABLE_UNLOCK() lck_spin_unlock(&mach_node_table_lock_data)
81
82static volatile SInt64 mnl_name_next;
83static queue_head_t mnl_name_table[MNL_NAME_TABLE_SIZE];
84static LCK_SPIN_DECLARE_ATTR(mnl_name_table_lock_data,
85 &ipc_lck_grp, &ipc_lck_attr);
86#define MNL_NAME_TABLE_LOCK() lck_spin_lock(&mnl_name_table_lock_data)
87#define MNL_NAME_TABLE_UNLOCK() lck_spin_unlock(&mnl_name_table_lock_data)
88
89static void mach_node_init(void);
90static void mnl_name_table_init(void);
91static void mach_node_table_init(void);
92static void mach_node_publish(mach_node_t node);
93
94static mach_node_t mach_node_alloc_init(mach_node_id_t node_id);
95static kern_return_t mach_node_register(mach_node_t node);
96
97
98/* mach_node_init() is run lazily when a node link driver registers
99 * or the node special port is set.
100 * The variable localnode_id is used to determine if init has already run.
101 */
102void
103mach_node_init(void)
104{
105 mach_node_id_t node_id = 0; // TODO: Read from device tree?
106 if (OSCompareAndSwap((UInt32)(HOST_LOCAL_NODE),
107 (UInt32)node_id,
108 &localnode_id)) {
109 printf("mach_node_init(): localnode_id=%d of %d\n",
110 localnode_id, MACH_NODES_MAX);
111 mach_node_table_init();
112 mnl_name_table_init();
113 } // TODO: else block until init is finished (init completion race)
114}
115
116void
117mach_node_table_init(void)
118{
119 MACH_NODE_TABLE_LOCK();
120
121 /* Start with an enpty node table. */
122 bzero(mach_node_table, sizeof(mach_node_t) * MACH_NODES_MAX);
123 mach_nodes_to_publish = 0;
124
125 /* Allocate localnode's struct */
126 localnode = mach_node_for_id_locked(localnode_id, 1, 1);
127 assert(MACH_NODE_VALID(localnode));
128
129 MACH_NODE_TABLE_UNLOCK();
130
131 /* Set up localnode's struct */
132 bzero(localnode, sizeof(*localnode));
133 localnode->info.datamodel = LOCAL_DATA_MODEL;
134 localnode->info.byteorder = OSHostByteOrder();
135 localnode->info.proto_vers_min = MNL_PROTOCOL_V1;
136 localnode->info.proto_vers_max = MNL_PROTOCOL_V1;
137 localnode->proto_vers = MNL_PROTOCOL_V1;
138 localnode->published = 0;
139 localnode->active = 1;
140
141 MACH_NODE_UNLOCK(localnode);
142}
143
144/* Sends a publication message to the local node's bootstrap server.
145 * This function is smart and will only send a notification if one as really
146 * needed - it can be called speculatively on any node at any time.
147 *
148 * Note: MUST be called with the node table lock held.
149 */
150
151void
152mach_node_publish(mach_node_t node)
153{
154 kern_return_t kr;
155
156 if (!MACH_NODE_VALID(node) || (!node->active) || (node->published)) {
157 return; // node is invalid or not suitable for publication
158 }
159 ipc_port_t bs_port = localnode->bootstrap_port;
160 if (!IP_VALID(bs_port)) {
161 return; // No bootstrap server to notify!
162 }
163 /* Node is suitable and server is present, so make registration message */
164 struct mach_node_server_register_msg msg;
165
166 msg.node_header.header.msgh_remote_port = bs_port;
167 msg.node_header.header.msgh_size = sizeof(msg);
168 msg.node_header.header.msgh_local_port = MACH_PORT_NULL;
169 msg.node_header.header.msgh_voucher_port = MACH_PORT_NULL;
170 msg.node_header.header.msgh_id = MACH_NODE_SERVER_MSG_ID;
171 msg.node_header.node_id = node->info.node_id;
172 msg.node_header.options = 0;
173 msg.datamodel = node->info.datamodel;
174 msg.byteorder = node->info.byteorder;
175
176 if (node == localnode) {
177 msg.node_header.identifier = MACH_NODE_SM_REG_LOCAL;
178 msg.node_header.header.msgh_bits =
179 MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, 0, 0, 0);
180 } else {
181 msg.node_header.identifier = MACH_NODE_SM_REG_REMOTE;
182 msg.node_header.header.msgh_local_port = node->bootstrap_port;
183 msg.node_header.header.msgh_bits = MACH_MSGH_BITS_SET
184 (MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND, 0, 0);
185 }
186
187 kr = mach_msg_send_from_kernel_proper(&msg.node_header.header,
188 sizeof(msg));
189 if (kr == KERN_SUCCESS) {
190 node->published = 1;
191 mach_nodes_to_publish--;
192 }
193 printf("mach_node_publish(%d)=%d\n", node->info.node_id, kr);
194}
195
196/* Called whenever the node special port changes */
197void
198mach_node_port_changed(void)
199{
200 ipc_port_t bs_port;
201
202 mach_node_init(); // Lazy init of mach_node layer
203
204 /* Cleanup previous bootstrap port if necessary */
205 MACH_NODE_LOCK(localnode);
206 flipc_node_retire(localnode);
207 bs_port = localnode->bootstrap_port;
208 if (IP_VALID(bs_port)) {
209 localnode->bootstrap_port = IP_NULL;
210 // TODO: destroy send right to outgoing bs_port
211 }
212
213 kernel_get_special_port(host_priv_self(), HOST_NODE_PORT, &bs_port);
214 assert(IP_VALID(bs_port));
215 localnode->bootstrap_port = bs_port;
216 flipc_node_prepare(localnode);
217 MACH_NODE_UNLOCK(localnode);
218
219 /* Cleanup the publication state of all nodes in the table */
220 MACH_NODE_TABLE_LOCK();
221 // TODO: Signup for bootstrap port death notifications
222 localnode->active = 1;
223
224 mach_nodes_to_publish = 0;
225
226 int n;
227 for (n = 0; n < MACH_NODES_MAX; n++) {
228 mach_node_t np = mach_node_table[n];
229 // Publish all active nodes (except the local node)
230 if (!MACH_NODE_VALID(np)) {
231 continue;
232 }
233 np->published = 0;
234 if (np->active == 1) {
235 mach_nodes_to_publish++;
236 }
237 }
238
239 mach_node_publish(localnode); // Always publish local node first
240
241 for (n = 0; n < MACH_NODES_MAX; n++) {
242 mach_node_publish(mach_node_table[n]);
243 }
244
245 MACH_NODE_TABLE_UNLOCK();
246
247 // TODO: notify all active nodes we are bootstrapped
248}
249
250/* Allocate/init a mach_node struct and fill in the node_id field.
251 * This does NOT insert the node struct into the node table.
252 */
253mach_node_t
254mach_node_alloc_init(mach_node_id_t node_id)
255{
256 mach_node_t node = MACH_NODE_ALLOC();
257 if (MACH_NODE_VALID(node)) {
258 bzero(node, sizeof(struct mach_node));
259 MACH_NODE_LOCK_INIT(node);
260 node->info.node_id = node_id;
261 }
262 return node;
263}
264
265
266/* This function takes a mach_node struct with a completed info field and
267 * registers it with the mach_node and flipc (if flipc is enabled) layers.
268 */
269kern_return_t
270mach_node_register(mach_node_t node)
271{
272 assert(MACH_NODE_VALID(node));
273 mach_node_id_t nid = node->info.node_id;
274 assert(MACH_NODE_ID_VALID(nid));
275
276 kern_return_t kr;
277 ipc_space_t proxy_space = IS_NULL;
278 ipc_pset_t pp_set = IPS_NULL; // pset for proxy ports
279 ipc_port_t bs_port = MACH_PORT_NULL;
280 ipc_port_t ack_port = MACH_PORT_NULL;
281
282 printf("mach_node_register(%d)\n", nid);
283
284 /* TODO: Support non-native byte order and data models */
285 if ((node->info.byteorder != OSHostByteOrder()) ||
286 (node->info.datamodel != LOCAL_DATA_MODEL)) {
287 printf("mach_node_register: unsupported byte order (%d) or width (%d)",
288 node->info.byteorder, node->info.datamodel);
289 return KERN_INVALID_ARGUMENT;
290 }
291
292 /* Create the space that holds all local rights assigned to <nid> */
293 kr = ipc_space_create_special(&proxy_space);
294 if (kr != KERN_SUCCESS) {
295 goto out;
296 }
297 proxy_space->is_node_id = nid;
298
299 /* Create the bootstrap proxy port for this remote node */
300 bs_port = ipc_port_alloc_special(proxy_space, IPC_PORT_INIT_MESSAGE_QUEUE);
301 if (bs_port == MACH_PORT_NULL) {
302 kr = KERN_RESOURCE_SHORTAGE;
303 goto out;
304 }
305
306 /* Create the control (ack) port for this remote node */
307 ack_port = ipc_port_alloc_special(proxy_space, IPC_PORT_INIT_MESSAGE_QUEUE);
308 if (ack_port == MACH_PORT_NULL) {
309 kr = KERN_RESOURCE_SHORTAGE;
310 goto out;
311 }
312
313 /* Create the set that holds all proxy ports for this remote node */
314 pp_set = ipc_pset_alloc_special(proxy_space);
315 if (pp_set == IPS_NULL) {
316 kr = KERN_RESOURCE_SHORTAGE;
317 goto out;
318 }
319
320 waitq_set_lazy_init_link(&pp_set->ips_wqset);
321 /* Add the bootstrap port to the proxy port set */
322 waitq_link_t link = waitq_link_alloc(WQT_PORT_SET);
323 ip_mq_lock(bs_port);
324 ips_mq_lock(pp_set); // Revisit the lock when enabling flipc
325 ipc_mqueue_add_locked(bs_port, pp_set, &link);
326 ips_mq_unlock(pp_set);
327 ip_mq_unlock(bs_port);
328
329 /* Add the control port to the proxy port set */
330 if (link.wqlh == NULL) {
331 link = waitq_link_alloc(WQT_PORT_SET);
332 }
333 ip_mq_lock(ack_port);
334 ips_mq_lock(pp_set); // Revisit the lock when enabling flipc
335 ipc_mqueue_add_locked(ack_port, pp_set, &link);
336 ips_mq_unlock(pp_set);
337 ips_mq_unlock(ack_port);
338
339 if (link.wqlh) {
340 waitq_link_free(WQT_PORT_SET, link);
341 }
342
343 // Setup mach_node struct
344 node->published = 0;
345 node->active = 1;
346 node->proxy_space = proxy_space;
347 node->proxy_port_set = pp_set;
348 node->bootstrap_port = bs_port;
349 node->proto_vers = node->info.proto_vers_max;
350 node->control_port = ack_port;
351
352 // Place new mach_node struct into node table
353 MACH_NODE_TABLE_LOCK();
354
355 mach_node_t old_node = mach_node_table[nid];
356 if (!MACH_NODE_VALID(old_node) || (old_node->dead)) {
357 node->antecedent = old_node;
358 flipc_node_prepare(node);
359 mach_node_table[nid] = node;
360 mach_nodes_to_publish++;
361 mach_node_publish(node);
362 kr = KERN_SUCCESS;
363 } else {
364 printf("mach_node_register: id %d already active!", nid);
365 kr = KERN_FAILURE;
366 }
367 MACH_NODE_TABLE_UNLOCK();
368
369out:
370 if (kr != KERN_SUCCESS) { // Dispose of whatever we allocated
371 if (pp_set) {
372 ips_mq_lock(pp_set);
373 ipc_pset_destroy(proxy_space, pp_set);
374 }
375
376 if (bs_port) {
377 ipc_port_dealloc_special(bs_port, proxy_space);
378 }
379
380 if (ack_port) {
381 ipc_port_dealloc_special(ack_port, proxy_space);
382 }
383
384 if (proxy_space) {
385 ipc_space_terminate(proxy_space);
386 }
387 }
388
389 return kr;
390}
391
392
393/* Gets or allocates a locked mach_node struct for the specified <node_id>.
394 * The current node is locked and returned if it is not dead, or if it is dead
395 * and <alloc_if_dead> is false. A new node struct is allocated, locked and
396 * returned if the node is dead and <alloc_if_dead> is true, or if the node
397 * is absent and <alloc_if_absent> is true. MACH_NODE_NULL is returned if
398 * the node is absent and <alloc_if_absent> is false. MACH_NODE_NULL is also
399 * returned if a new node structure was not able to be allocated.
400 *
401 * Note: This function must be called with the node table lock held!
402 */
403mach_node_t
404mach_node_for_id_locked(mach_node_id_t node_id,
405 boolean_t alloc_if_dead,
406 boolean_t alloc_if_absent)
407{
408 if ((node_id < 0) || (node_id >= MACH_NODES_MAX)) {
409 return MACH_NODE_NULL;
410 }
411
412 mach_node_t node = mach_node_table[node_id];
413
414 if ((!MACH_NODE_VALID(node) && alloc_if_absent) ||
415 (MACH_NODE_VALID(node) && node->dead && alloc_if_dead)) {
416 node = mach_node_alloc_init(node_id);
417 if (MACH_NODE_VALID(node)) {
418 node->antecedent = mach_node_table[node_id];
419 mach_node_table[node_id] = node;
420 }
421 }
422
423 if (MACH_NODE_VALID(node)) {
424 MACH_NODE_LOCK(node);
425 }
426
427 return node;
428}
429
430
431
432/*** Mach Node Link Name and Hash Table Implementation ***/
433
434/* Allocate a new unique name and return it.
435 * Dispose of this with mnl_name_free().
436 * Returns MNL_NAME_NULL on failure.
437 */
438mnl_name_t
439mnl_name_alloc(void)
440{
441 return (mnl_name_t)OSAddAtomic64(MACH_NODES_MAX, &mnl_name_next);
442}
443
444
445/* Deallocate a unique name that was allocated via mnl_name_alloc().
446 */
447void
448mnl_name_free(mnl_name_t name __unused)
449{
450 ; // Nothing to do for now since we don't recycle mnl names.
451}
452
453
454/* Called once from mach_node_init(), this sets up the hash table structures.
455 */
456void
457mnl_name_table_init(void)
458{
459 MNL_NAME_TABLE_LOCK();
460
461 // Set the first name to this node's bootstrap name
462 mnl_name_next = localnode_id + MACH_NODES_MAX;
463
464 for (int i = 0; i < MNL_NAME_TABLE_SIZE; i++) {
465 queue_head_init(mnl_name_table[i]);
466 }
467
468 MNL_NAME_TABLE_UNLOCK();
469}
470
471
472/* Initialize the data structures in the mnl_obj structure at the head of the
473 * provided object. This should be called on an object before it is passed to
474 * any other mnl_obj* routine.
475 */
476void
477mnl_obj_init(mnl_obj_t obj)
478{
479 queue_chain_init(obj->links);
480 obj->name = MNL_NAME_NULL;
481}
482
483
484/* Search the local node's hash table for the object associated with a
485 * mnl_name_t and return it. Returns MNL_NAME_NULL on failure.
486 */
487mnl_obj_t
488mnl_obj_lookup(mnl_name_t name)
489{
490 mnl_obj_t obj = MNL_OBJ_NULL;
491
492 if (name != MNL_NAME_NULL) {
493 qe_foreach_element(obj, &mnl_name_table[MNL_NAME_HASH(name)], links) {
494 if (obj->name == name) {
495 break;
496 }
497 }
498 }
499 return obj;
500}
501
502
503/* Search the local node's hash table for the object associated with a
504 * mnl_name_t and remove it. The pointer to the removed object is returned so
505 * that the caller can appropriately dispose of the object.
506 * Returns MNL_NAME_NULL on failure.
507 */
508mnl_obj_t
509mnl_obj_remove(mnl_name_t name)
510{
511 mnl_obj_t obj = MNL_OBJ_NULL;
512
513 if (name != MNL_NAME_NULL) {
514 qe_foreach_element_safe(obj, &mnl_name_table[MNL_NAME_HASH(name)], links) {
515 if (obj->name == name) {
516 remqueue(&obj->links);
517 }
518 }
519 }
520 return obj;
521}
522
523
524/* Insert an object into the local node's hash table. If the name of the
525 * provided object is MNL_NAME_NULL then a new mnl_name is allocated and
526 * assigned to the object.
527 * Returns KERN_SUCCESS if obj was added to hash table
528 * Returns KERN_INVALID_ARGUMENT if obj is invalid
529 * Returns KERN_NAME_EXISTS if obj's name already exists in hash table
530 */
531kern_return_t
532mnl_obj_insert(mnl_obj_t obj)
533{
534 if (!MNL_OBJ_VALID(obj)) {
535 return KERN_INVALID_ARGUMENT;
536 }
537
538 MNL_NAME_TABLE_LOCK();
539
540 if (!MNL_NAME_VALID(obj->name)) {
541 // obj is unnammed, so lets allocate a fresh one
542 obj->name = mnl_name_alloc();
543 }
544
545 enqueue(&mnl_name_table[MNL_NAME_HASH(obj->name)], &obj->links);
546 MNL_NAME_TABLE_UNLOCK();
547
548 if (obj->name >= (MACH_NODES_MAX << 1)) {
549 panic("Unexpected MNL_NAME %lld in obj %p", obj->name, obj);
550 }
551
552 return KERN_SUCCESS;
553}
554
555
556/*** Mach Node Link Driver Interface Implementation ***/
557
558/* Allocate a mnl_msg struct plus additional payload. Link drivers are not
559 * required to use this to allocate messages; any wired and mapped kernel
560 * memory is acceptable.
561 *
562 * Arguments:
563 * payload Number of additional bytes to allocate for message payload
564 * flags Currently unused; 0 should be passed
565 *
566 * Return values:
567 * MNL_MSG_NULL: Allocation failed
568 * *: Pointer to new mnl_msg struct of requested size
569 */
570mnl_msg_t
571mnl_msg_alloc(int payload,
572 uint32_t flags __unused)
573{
574 mnl_msg_t msg = kalloc(MNL_MSG_SIZE + payload);
575
576 if (MNL_MSG_VALID(msg)) {
577 bzero(msg, MNL_MSG_SIZE); // Only zero the header
578 msg->size = payload;
579 }
580
581 return msg;
582}
583
584
585/* Free a mnl_msg struct allocated by mnl_msg_alloc().
586 *
587 * Arguments:
588 * msg Pointer to the message buffer to be freed
589 * flags Currently unused; 0 should be passed
590 */
591void
592mnl_msg_free(mnl_msg_t msg,
593 uint32_t flags __unused)
594{
595 if (MNL_MSG_VALID(msg)) {
596 kfree(msg, MNL_MSG_SIZE + msg->size);
597 }
598}
599
600
601/* The link driver calls this to setup a new (or restarted) node, and to get
602 * an mnl_node_info struct for use as a parameter to other mnl functions.
603 * If MNL_NODE_NULL is returned, the operation failed. Otherwise, a pointer
604 * to a new mnl_node struct is returned. The caller should set all fields
605 * in the structure, then call mnl_register() to complete node registration.
606 *
607 * Arguments:
608 * nid The id of the node to be instantiated
609 * flags Currently unused; 0 should be passed
610 *
611 * Return values:
612 * MNL_NODE_NULL: Operation failed
613 * *: Pointer to a new mnl_node struct
614 */
615mnl_node_info_t
616mnl_instantiate(mach_node_id_t nid,
617 uint32_t flags __unused)
618{
619 mach_node_init(); // Lazy init of mach_node layer
620
621 if ((nid == localnode_id) || !MACH_NODE_ID_VALID(nid)) {
622 return MNL_NODE_NULL;
623 }
624
625 return (mnl_node_info_t)mach_node_alloc_init(nid);
626}
627
628/* The link driver calls mnl_register() to complete the node registration
629 * process. KERN_SUCCESS is returned if registration succeeded, otherwise
630 * an error is returned.
631 *
632 * Arguments:
633 * node Pointer to the node's mnl_node structure
634 * flags Currently unused; 0 should be passed
635 *
636 * Return values:
637 * KERN_SUCCESS: Registration succeeded
638 * KERN_INVALID_ARGUMENT: Field(s) in <node> contained unacceptable values
639 * KERN_*: Values returned from underlying functions
640 */
641kern_return_t
642mnl_register(mnl_node_info_t node,
643 uint32_t flags __unused)
644{
645 if (MNL_NODE_VALID(node) && (node->node_id != localnode_id)) {
646 return mach_node_register((mach_node_t)node);
647 }
648
649 return KERN_INVALID_ARGUMENT;
650}
651
652
653/* The link driver calls this to report that the link has been raised in one
654 * or both directions. If the link is two uni-directional channels, each link
655 * driver will independently call this function, each only raising the link
656 * they are responsible for. The mach_node layer will not communicate with
657 * the remote node until both rx and tx links are up.
658 *
659 * Arguments:
660 * node Pointer to the node's mnl_node structure
661 * link Indicates which link(s) are up (see MNL_LINK_* defines)
662 * flags Currently unused; 0 should be passed
663 *
664 * Return values:
665 * KERN_SUCCESS: Link state changed successfully.
666 * KERN_INVALID_ARGUMENT: An argument value was not allowed.
667 * KERN_*: Values returned from underlying functions.
668 */
669kern_return_t
670mnl_set_link_state(mnl_node_info_t node,
671 int link,
672 uint32_t flags __unused)
673{
674 kern_return_t kr;
675 mach_node_t mnode = (mach_node_t)node;
676
677 if (!MACH_NODE_VALID(mnode) || !(link & MNL_LINK_UP) || (link & mnode->link)) {
678 return KERN_INVALID_ARGUMENT; // bad node, or bad link argument
679 }
680 MACH_NODE_LOCK(mnode);
681
682 if (mnode->dead) {
683 kr = KERN_NODE_DOWN;
684 } else {
685 mnode->link |= link;
686 kr = KERN_SUCCESS;
687 }
688
689 MACH_NODE_UNLOCK(mnode);
690
691 return kr;
692}
693
694/* The link driver calls this to indicate a node has terminated and is no
695 * longer available for messaging. This may be due to a crash or an orderly
696 * shutdown, but either way the remote node no longer retains any state about
697 * the remaining nodes. References held on behalf of the terminated node
698 * will be cleaned up. After this is called, both the rx and tx links are
699 * marked as down. If the remote node restarts, the link driver can bring
700 * up the link using mnl_instantiate() again.
701 *
702 * Arguments:
703 * node Pointer to the node's mnl_node structure
704 * flags Currently unused; 0 should be passed
705 *
706 * Return values:
707 * KERN_SUCCESS: Node was terminated.
708 * KERN_INVALID_ARGUMENT: Node id was invalid or non-existant.
709 * KERN_*: Values returned from underlying functions.
710 */
711kern_return_t
712mnl_terminate(mnl_node_info_t node,
713 uint32_t flags __unused)
714{
715 kern_return_t kr = KERN_SUCCESS;
716 mach_node_t mnode = (mach_node_t)node;
717
718 if (!MACH_NODE_VALID(mnode)) {
719 return KERN_INVALID_ARGUMENT; // bad node
720 }
721 MACH_NODE_LOCK(mnode);
722 if (mnode->dead) {
723 kr = KERN_NODE_DOWN; // node is already terminated
724 goto unlock;
725 }
726
727 mnode->link = MNL_LINK_DOWN;
728 mnode->active = 0;
729 mnode->suspended = 0;
730 mnode->dead = 1;
731
732 flipc_node_retire(mnode);
733
734 // Wake any threads sleeping on the proxy port set
735 if (mnode->proxy_port_set != IPS_NULL) {
736 ips_mq_lock(mnode->proxy_port_set);
737 ipc_pset_destroy(mnode->proxy_space, mnode->proxy_port_set);
738 mnode->proxy_port_set = IPS_NULL;
739 }
740
741 // TODO: Inform node name server (if registered) of termination
742
743unlock:
744 MACH_NODE_UNLOCK(mnode);
745 return kr;
746}
747
748
749/* The link driver calls this to deliver an incoming message. Note that the
750 * link driver must dispose of the memory pointed to by <msg> after the
751 * function call returns.
752 *
753 * Arguments:
754 * node Pointer to the node's mnl_node structure
755 * msg Pointer to the message buffer
756 * flags Currently unused; 0 should be passed
757 */
758void
759mnl_msg_from_node(mnl_node_info_t node __unused,
760 mnl_msg_t msg,
761 uint32_t flags __unused)
762{
763 assert(MNL_MSG_VALID(msg));
764 assert(MACH_NODE_ID_VALID(msg->node_id));
765 assert(MNL_NODE_VALID(node));
766
767 /* If node message forwarding is supported, the from_node_id arg may not
768 * match fmsg->info.node_id. The former is the node from which we received
769 * the message; the latter is the node that generated the message originally.
770 * We always use fmsg->info.node_id, which is where the ack needs to go.
771 */
772
773 switch (msg->sub) {
774 case MACH_NODE_SUB_FLIPC:
775 flipc_msg_from_node((mach_node_t)node, msg, flags);
776 break;
777
778 default:
779#if DEBUG
780 PE_enter_debugger("mnl_msg_from_node(): Invalid subsystem");
781#endif
782 break;
783 }
784}
785
786
787/* The link driver calls this to fetch the next message to transmit.
788 * This function will block until a message is available, or will return
789 * FLIPC_MSG_NULL if the link is to be terminated. After the caller has
790 * completed the transmission and no longer needs the msg buffer, it should
791 * call mnl_msg_complete().
792 *
793 * Arguments:
794 * node Pointer to the node's mnl_node structure
795 * flags Currently unused; 0 should be passed
796 */
797mnl_msg_t
798mnl_msg_to_node(mnl_node_info_t node __unused,
799 uint32_t flags __unused)
800{
801 assert(MNL_NODE_VALID(node));
802
803#if DEBUG
804 thread_set_thread_name(current_thread(), "MNL_Link");
805#endif
806
807 return flipc_msg_to_remote_node((mach_node_t)node, 0);
808}
809
810
811/* The link driver calls this to indicate that the specified msg buffer has
812 * been sent over the link and can be deallocated.
813 *
814 * Arguments:
815 * node Pointer to the node's mnl_node structure
816 * msg Pointer to the message buffer
817 * flags Currently unused; 0 should be passed
818 */
819void
820mnl_msg_complete(mnl_node_info_t node __unused,
821 mnl_msg_t msg,
822 uint32_t flags)
823{
824 switch (msg->sub) {
825 case MACH_NODE_SUB_NODE:
826 mnl_msg_free(msg, flags);
827 break;
828
829 case MACH_NODE_SUB_FLIPC:
830 flipc_msg_free(msg, flags);
831 break;
832
833 default:
834#if DEBUG
835 PE_enter_debugger("mnl_msg_complete(): Invalid subsystem");
836#endif
837 break;
838 }
839}
840
841#else // MACH_FLIPC not configured, so provide KPI stubs
842
843mnl_msg_t
844mnl_msg_alloc(int payload __unused, uint32_t flags __unused)
845{
846 return MNL_MSG_NULL;
847}
848
849void
850mnl_msg_free(mnl_msg_t msg __unused, uint32_t flags __unused)
851{
852 return;
853}
854
855mnl_node_info_t
856mnl_instantiate(mach_node_id_t nid __unused, uint32_t flags __unused)
857{
858 return MNL_NODE_NULL;
859}
860
861kern_return_t
862mnl_register(mnl_node_info_t node __unused, uint32_t flags __unused)
863{
864 return KERN_FAILURE;
865}
866
867kern_return_t
868mnl_set_link_state(mnl_node_info_t node __unused,
869 int link __unused,
870 uint32_t flags __unused)
871{
872 return KERN_FAILURE;
873}
874
875kern_return_t
876mnl_terminate(mnl_node_info_t node __unused, uint32_t flags __unused)
877{
878 return KERN_FAILURE;
879}
880
881void
882mnl_msg_from_node(mnl_node_info_t node __unused,
883 mnl_msg_t msg __unused,
884 uint32_t flags __unused)
885{
886 return;
887}
888
889mnl_msg_t
890mnl_msg_to_node(mnl_node_info_t node __unused, uint32_t flags __unused)
891{
892 return MNL_MSG_NULL;
893}
894
895void
896mnl_msg_complete(mnl_node_info_t node __unused,
897 mnl_msg_t msg __unused,
898 uint32_t flags __unused)
899{
900 return;
901}
902
903#endif // MACH_FLIPC
904