| 1 | /* | 
| 2 |  * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. | 
| 3 |  * | 
| 4 |  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | 
| 5 |  * | 
| 6 |  * This file contains Original Code and/or Modifications of Original Code | 
| 7 |  * as defined in and that are subject to the Apple Public Source License | 
| 8 |  * Version 2.0 (the 'License'). You may not use this file except in | 
| 9 |  * compliance with the License. The rights granted to you under the License | 
| 10 |  * may not be used to create, or enable the creation or redistribution of, | 
| 11 |  * unlawful or unlicensed copies of an Apple operating system, or to | 
| 12 |  * circumvent, violate, or enable the circumvention or violation of, any | 
| 13 |  * terms of an Apple operating system software license agreement. | 
| 14 |  * | 
| 15 |  * Please obtain a copy of the License at | 
| 16 |  * http://www.opensource.apple.com/apsl/ and read it before using this file. | 
| 17 |  * | 
| 18 |  * The Original Code and all software distributed under the License are | 
| 19 |  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | 
| 20 |  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | 
| 21 |  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | 
| 22 |  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | 
| 23 |  * Please see the License for the specific language governing rights and | 
| 24 |  * limitations under the License. | 
| 25 |  * | 
| 26 |  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | 
| 27 |  */ | 
| 28 | #include <mach/boolean.h> | 
| 29 | #include <mach/kern_return.h> | 
| 30 | #include <mach/mig_errors.h> | 
| 31 | #include <mach/port.h> | 
| 32 | #include <mach/vm_param.h> | 
| 33 | #include <mach/notify.h> | 
| 34 | //#include <mach/mach_host_server.h> | 
| 35 | #include <mach/mach_types.h> | 
| 36 |  | 
| 37 | #include <machine/machparam.h>          /* spl definitions */ | 
| 38 |  | 
| 39 | #include <ipc/ipc_port.h> | 
| 40 | #include <ipc/ipc_space.h> | 
| 41 |  | 
| 42 | #include <kern/clock.h> | 
| 43 | #include <kern/spl.h> | 
| 44 | #include <kern/queue.h> | 
| 45 | #include <kern/zalloc.h> | 
| 46 | #include <kern/thread.h> | 
| 47 | #include <kern/task.h> | 
| 48 | #include <kern/sched_prim.h> | 
| 49 | #include <kern/misc_protos.h> | 
| 50 |  | 
| 51 | #include <vm/pmap.h> | 
| 52 | #include <vm/vm_map.h> | 
| 53 | #include <vm/vm_kern.h> | 
| 54 |  | 
| 55 | #include <device/device_types.h> | 
| 56 | #include <device/device_port.h> | 
| 57 | #include <device/device_server.h> | 
| 58 |  | 
| 59 | #include <machine/machparam.h> | 
| 60 |  | 
| 61 | #if defined(__i386__) || defined(__x86_64__) | 
| 62 | #include <i386/pmap.h> | 
| 63 | #endif | 
| 64 | #if defined(__arm64__) | 
| 65 | #include <arm/pmap.h> | 
| 66 | #endif | 
| 67 | #include <IOKit/IOKitServer.h> | 
| 68 |  | 
| 69 | #define EXTERN | 
| 70 | #define MIGEXTERN | 
| 71 |  | 
| 72 | static void | 
| 73 | iokit_no_senders( ipc_port_t port, mach_port_mscount_t mscount ); | 
| 74 |  | 
| 75 | /* | 
| 76 |  * Lifetime: | 
| 77 |  * - non lazy port with no-more senders | 
| 78 |  * - can be destroyed by iokit_destroy_object_port | 
| 79 |  * | 
| 80 |  */ | 
| 81 | IPC_KOBJECT_DEFINE(IKOT_IOKIT_IDENT, | 
| 82 |     .iko_op_no_senders = iokit_no_senders); | 
| 83 | IPC_KOBJECT_DEFINE(IKOT_IOKIT_OBJECT, | 
| 84 |     .iko_op_no_senders = iokit_no_senders); | 
| 85 | IPC_KOBJECT_DEFINE(IKOT_IOKIT_CONNECT, | 
| 86 |     .iko_op_no_senders = iokit_no_senders); | 
| 87 | IPC_KOBJECT_DEFINE(IKOT_UEXT_OBJECT, | 
| 88 |     .iko_op_no_senders = iokit_no_senders); | 
| 89 |  | 
| 90 | /* | 
| 91 |  * Lookup a device by its port. | 
| 92 |  * Doesn't consume the naked send right; produces a device reference. | 
| 93 |  */ | 
| 94 | io_object_t | 
| 95 | iokit_lookup_io_object(ipc_port_t port, ipc_kobject_type_t type) | 
| 96 | { | 
| 97 | 	io_object_t  obj = NULL; | 
| 98 | 	io_kobject_t kobj = NULL; | 
| 99 |  | 
| 100 | 	if (!IP_VALID(port)) { | 
| 101 | 		return NULL; | 
| 102 | 	} | 
| 103 |  | 
| 104 | 	ip_mq_lock(port); | 
| 105 | 	if (ip_active(port)) { | 
| 106 | 		kobj = ipc_kobject_get_locked(port, type); | 
| 107 | 		if (kobj) { | 
| 108 | 			iokit_kobject_retain(machPort: kobj); | 
| 109 | 		} | 
| 110 | 	} | 
| 111 | 	ip_mq_unlock(port); | 
| 112 | 	if (kobj) { | 
| 113 | 		obj = iokit_copy_object_for_consumed_kobject(machPort: kobj, type); | 
| 114 | 	} | 
| 115 |  | 
| 116 | 	return obj; | 
| 117 | } | 
| 118 |  | 
| 119 | MIGEXTERN io_object_t | 
| 120 | iokit_lookup_object_port( | 
| 121 | 	ipc_port_t      port) | 
| 122 | { | 
| 123 | 	return iokit_lookup_io_object(port, type: IKOT_IOKIT_OBJECT); | 
| 124 | } | 
| 125 |  | 
| 126 | MIGEXTERN io_object_t | 
| 127 | iokit_lookup_connect_port( | 
| 128 | 	ipc_port_t      port) | 
| 129 | { | 
| 130 | 	return iokit_lookup_io_object(port, type: IKOT_IOKIT_CONNECT); | 
| 131 | } | 
| 132 |  | 
| 133 | MIGEXTERN io_object_t | 
| 134 | iokit_lookup_ident_port( | 
| 135 | 	ipc_port_t      port) | 
| 136 | { | 
| 137 | 	return iokit_lookup_io_object(port, type: IKOT_IOKIT_IDENT); | 
| 138 | } | 
| 139 |  | 
| 140 | MIGEXTERN io_object_t | 
| 141 | iokit_lookup_uext_object_port( | 
| 142 | 	ipc_port_t      port) | 
| 143 | { | 
| 144 | 	return iokit_lookup_io_object(port, type: IKOT_UEXT_OBJECT); | 
| 145 | } | 
| 146 |  | 
| 147 | static io_object_t | 
| 148 | iokit_lookup_object_in_space_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, ipc_space_t space) | 
| 149 | { | 
| 150 | 	io_object_t obj = NULL; | 
| 151 | 	io_kobject_t kobj; | 
| 152 |  | 
| 153 | 	if (name && MACH_PORT_VALID(name)) { | 
| 154 | 		ipc_port_t port; | 
| 155 | 		kern_return_t kr; | 
| 156 |  | 
| 157 | 		kr = ipc_port_translate_send(space, name, portp: &port); | 
| 158 |  | 
| 159 | 		if (kr == KERN_SUCCESS) { | 
| 160 | 			assert(IP_VALID(port)); | 
| 161 | 			assert(ip_active(port)); | 
| 162 | 			kobj = ipc_kobject_get_locked(port, type); | 
| 163 | 			if (kobj) { | 
| 164 | 				iokit_kobject_retain(machPort: kobj); | 
| 165 | 			} | 
| 166 | 			ip_mq_unlock(port); | 
| 167 | 			if (kobj) { | 
| 168 | 				obj = iokit_copy_object_for_consumed_kobject(machPort: kobj, type); | 
| 169 | 			} | 
| 170 | 		} | 
| 171 | 	} | 
| 172 |  | 
| 173 | 	return obj; | 
| 174 | } | 
| 175 |  | 
| 176 | EXTERN io_object_t | 
| 177 | iokit_lookup_object_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, task_t task) | 
| 178 | { | 
| 179 | 	return iokit_lookup_object_in_space_with_port_name(name, type, space: task->itk_space); | 
| 180 | } | 
| 181 |  | 
| 182 | EXTERN io_object_t | 
| 183 | iokit_lookup_connect_ref_current_task(mach_port_name_t name) | 
| 184 | { | 
| 185 | 	return iokit_lookup_object_in_space_with_port_name(name, type: IKOT_IOKIT_CONNECT, current_space()); | 
| 186 | } | 
| 187 |  | 
| 188 | EXTERN io_object_t | 
| 189 | iokit_lookup_uext_ref_current_task(mach_port_name_t name) | 
| 190 | { | 
| 191 | 	return iokit_lookup_object_in_space_with_port_name(name, type: IKOT_UEXT_OBJECT, current_space()); | 
| 192 | } | 
| 193 |  | 
| 194 | /* | 
| 195 |  * Look up a port given a port name. | 
| 196 |  * This returns the port unlocked with a +1 send right. | 
| 197 |  * Release with iokit_release_port_send() | 
| 198 |  */ | 
| 199 | EXTERN ipc_port_t | 
| 200 | iokit_lookup_raw_current_task(mach_port_name_t name, ipc_kobject_type_t * type) | 
| 201 | { | 
| 202 | 	ipc_port_t port = NULL; | 
| 203 | 	if (name && MACH_PORT_VALID(name)) { | 
| 204 | 		kern_return_t kr = ipc_object_copyin(current_space(), name, MACH_MSG_TYPE_COPY_SEND, objectp: (ipc_object_t *)&port, context: 0, NULL, copyin_flags: IPC_OBJECT_COPYIN_FLAGS_NONE); | 
| 205 | 		if (kr == KERN_SUCCESS) { | 
| 206 | 			assert(IP_VALID(port)); | 
| 207 | 			assert(ip_active(port)); | 
| 208 | 			if (type != NULL) { | 
| 209 | 				*type = ip_kotype(port); | 
| 210 | 			} | 
| 211 | 		} | 
| 212 | 	} | 
| 213 |  | 
| 214 | 	return port; | 
| 215 | } | 
| 216 |  | 
| 217 | EXTERN void | 
| 218 | iokit_retain_port( ipc_port_t port ) | 
| 219 | { | 
| 220 | 	ipc_port_reference( port ); | 
| 221 | } | 
| 222 |  | 
| 223 | EXTERN void | 
| 224 | iokit_release_port( ipc_port_t port ) | 
| 225 | { | 
| 226 | 	ipc_port_release( port ); | 
| 227 | } | 
| 228 |  | 
| 229 | EXTERN void | 
| 230 | iokit_release_port_send( ipc_port_t port ) | 
| 231 | { | 
| 232 | 	ipc_port_release_send( port ); | 
| 233 | } | 
| 234 |  | 
| 235 | /* | 
| 236 |  * Get the port for a device. | 
| 237 |  * Consumes a device reference; produces a naked send right. | 
| 238 |  */ | 
| 239 |  | 
| 240 | static ipc_port_t | 
| 241 | iokit_make_port_of_type(io_object_t obj, ipc_kobject_type_t type) | 
| 242 | { | 
| 243 | 	ipc_port_t  port; | 
| 244 | 	ipc_port_t  sendPort; | 
| 245 | 	ipc_kobject_t kobj; | 
| 246 |  | 
| 247 | 	if (obj == NULL) { | 
| 248 | 		return IP_NULL; | 
| 249 | 	} | 
| 250 |  | 
| 251 | 	port = iokit_port_for_object(obj, type, kobj: &kobj); | 
| 252 | 	if (port) { | 
| 253 | 		sendPort = ipc_kobject_make_send( port, kobj, type ); | 
| 254 | 		iokit_release_port( port ); | 
| 255 | 	} else { | 
| 256 | 		sendPort = IP_NULL; | 
| 257 | 	} | 
| 258 |  | 
| 259 | 	iokit_remove_reference( obj ); | 
| 260 |  | 
| 261 | 	return sendPort; | 
| 262 | } | 
| 263 |  | 
| 264 | MIGEXTERN ipc_port_t | 
| 265 | iokit_make_object_port( | 
| 266 | 	io_object_t     obj ) | 
| 267 | { | 
| 268 | 	return iokit_make_port_of_type(obj, type: IKOT_IOKIT_OBJECT); | 
| 269 | } | 
| 270 |  | 
| 271 | MIGEXTERN ipc_port_t | 
| 272 | iokit_make_connect_port( | 
| 273 | 	io_object_t     obj ) | 
| 274 | { | 
| 275 | 	return iokit_make_port_of_type(obj, type: IKOT_IOKIT_CONNECT); | 
| 276 | } | 
| 277 |  | 
| 278 | MIGEXTERN ipc_port_t | 
| 279 | iokit_make_ident_port( | 
| 280 | 	io_object_t     obj ) | 
| 281 | { | 
| 282 | 	return iokit_make_port_of_type(obj, type: IKOT_IOKIT_IDENT); | 
| 283 | } | 
| 284 |  | 
| 285 | EXTERN ipc_port_t | 
| 286 | iokit_alloc_object_port( io_kobject_t obj, ipc_kobject_type_t type ) | 
| 287 | { | 
| 288 | 	/* Allocate port, keeping a reference for it. */ | 
| 289 | 	ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_NSREQUEST; | 
| 290 | 	if (type == IKOT_IOKIT_CONNECT) { | 
| 291 | 		options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND; | 
| 292 | 	} | 
| 293 | 	if (type == IKOT_UEXT_OBJECT) { | 
| 294 | 		ipc_label_t label = IPC_LABEL_DEXT; | 
| 295 | 		return ipc_kobject_alloc_labeled_port(kobject: (ipc_kobject_t) obj, type, label, options); | 
| 296 | 	} else { | 
| 297 | 		return ipc_kobject_alloc_port(kobject: (ipc_kobject_t) obj, type, options); | 
| 298 | 	} | 
| 299 | } | 
| 300 |  | 
| 301 | EXTERN void | 
| 302 | iokit_remove_object_port( ipc_port_t port, ipc_kobject_type_t type ) | 
| 303 | { | 
| 304 | 	ipc_kobject_disable(port, type); | 
| 305 | } | 
| 306 |  | 
| 307 | EXTERN kern_return_t | 
| 308 | iokit_destroy_object_port( ipc_port_t port, ipc_kobject_type_t type ) | 
| 309 | { | 
| 310 | 	ipc_kobject_dealloc_port(port, mscount: 0, type); | 
| 311 | 	return KERN_SUCCESS; | 
| 312 | } | 
| 313 |  | 
| 314 | EXTERN ipc_kobject_type_t | 
| 315 | iokit_port_type(ipc_port_t port) | 
| 316 | { | 
| 317 | 	return ip_kotype(port); | 
| 318 | } | 
| 319 |  | 
| 320 | EXTERN mach_port_name_t | 
| 321 | iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type ) | 
| 322 | { | 
| 323 | 	ipc_port_t          port; | 
| 324 | 	ipc_port_t          sendPort; | 
| 325 | 	mach_port_name_t    name = 0; | 
| 326 | 	ipc_kobject_t       kobj; | 
| 327 |  | 
| 328 | 	if (obj == NULL) { | 
| 329 | 		return MACH_PORT_NULL; | 
| 330 | 	} | 
| 331 |  | 
| 332 | 	port = iokit_port_for_object( obj, type, kobj: &kobj ); | 
| 333 | 	if (port) { | 
| 334 | 		sendPort = ipc_kobject_make_send( port, kobj, type ); | 
| 335 | 		iokit_release_port( port ); | 
| 336 | 	} else { | 
| 337 | 		sendPort = IP_NULL; | 
| 338 | 	} | 
| 339 |  | 
| 340 | 	if (IP_VALID( sendPort )) { | 
| 341 | 		kern_return_t   kr; | 
| 342 | 		// Remove once <rdar://problem/45522961> is fixed. | 
| 343 | 		// We need to make ith_knote NULL as ipc_object_copyout() uses | 
| 344 | 		// thread-argument-passing and its value should not be garbage | 
| 345 | 		current_thread()->ith_knote = ITH_KNOTE_NULL; | 
| 346 | 		kr = ipc_object_copyout( space: task->itk_space, ip_to_object(sendPort), | 
| 347 | 		    MACH_MSG_TYPE_PORT_SEND, flags: IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, namep: &name); | 
| 348 | 		if (kr != KERN_SUCCESS) { | 
| 349 | 			name = MACH_PORT_NULL; | 
| 350 | 		} | 
| 351 | 	} else if (sendPort == IP_NULL) { | 
| 352 | 		name = MACH_PORT_NULL; | 
| 353 | 	} else if (sendPort == IP_DEAD) { | 
| 354 | 		name = MACH_PORT_DEAD; | 
| 355 | 	} | 
| 356 |  | 
| 357 | 	return name; | 
| 358 | } | 
| 359 |  | 
| 360 | EXTERN kern_return_t | 
| 361 | iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta ) | 
| 362 | { | 
| 363 | 	return mach_port_mod_refs( task: task->itk_space, name, MACH_PORT_RIGHT_SEND, delta ); | 
| 364 | } | 
| 365 |  | 
| 366 | /* | 
| 367 |  * Handle the No-More_Senders notification generated from a device port destroy. | 
| 368 |  * Since there are no longer any tasks which hold a send right to this device | 
| 369 |  * port a NMS notification has been generated. | 
| 370 |  */ | 
| 371 |  | 
| 372 | static void | 
| 373 | iokit_no_senders( ipc_port_t port, mach_port_mscount_t mscount ) | 
| 374 | { | 
| 375 | 	io_object_t         obj = NULL; | 
| 376 | 	io_kobject_t        kobj = NULL; | 
| 377 | 	ipc_kobject_type_t  type = IKOT_NONE; | 
| 378 |  | 
| 379 | 	// convert a port to io_object_t. | 
| 380 | 	if (IP_VALID(port)) { | 
| 381 | 		ip_mq_lock(port); | 
| 382 | 		if (ip_active(port)) { | 
| 383 | 			type = ip_kotype( port ); | 
| 384 | 			assert((IKOT_IOKIT_OBJECT == type) | 
| 385 | 			    || (IKOT_IOKIT_CONNECT == type) | 
| 386 | 			    || (IKOT_IOKIT_IDENT == type) | 
| 387 | 			    || (IKOT_UEXT_OBJECT == type)); | 
| 388 | 			kobj = ipc_kobject_get_locked(port, type); | 
| 389 | 			if (kobj) { | 
| 390 | 				iokit_kobject_retain(machPort: kobj); | 
| 391 | 			} | 
| 392 | 		} | 
| 393 | 		ip_mq_unlock(port); | 
| 394 | 		if (kobj) { | 
| 395 | 			// IKOT_IOKIT_OBJECT since iokit_remove_reference() follows | 
| 396 | 			obj = iokit_copy_object_for_consumed_kobject(machPort: kobj, type: IKOT_IOKIT_OBJECT); | 
| 397 | 		} | 
| 398 | 	} | 
| 399 |  | 
| 400 | 	if (obj) { | 
| 401 | 		while (iokit_client_died( obj, port, type, mscount: &mscount ) != KERN_SUCCESS) { | 
| 402 | 			kern_return_t kr; | 
| 403 |  | 
| 404 | 			/* Re-request no-senders notifications on the port (if still active) */ | 
| 405 | 			kr = ipc_kobject_nsrequest(port, sync: mscount + 1, mscount: &mscount); | 
| 406 | 			if (kr != KERN_FAILURE) { | 
| 407 | 				break; | 
| 408 | 			} | 
| 409 | 			/* | 
| 410 | 			 * port has no outstanding rights or pending make-sends, | 
| 411 | 			 * and the notification would fire recursively, try again. | 
| 412 | 			 */ | 
| 413 | 		} | 
| 414 |  | 
| 415 | 		iokit_remove_reference( obj ); | 
| 416 | 	} | 
| 417 | } | 
| 418 |  | 
| 419 |  | 
| 420 | kern_return_t | 
| 421 | iokit_label_dext_task(task_t task) | 
| 422 | { | 
| 423 | 	return ipc_space_add_label(space: task->itk_space, IPC_LABEL_DEXT); | 
| 424 | } | 
| 425 |  | 
| 426 | /* | 
| 427 |  *	Routine:	iokit_clear_registered_ports | 
| 428 |  *	Purpose: | 
| 429 |  *		Clean up a task's registered IOKit kobject ports. | 
| 430 |  *	Conditions: | 
| 431 |  *		Nothing locked. | 
| 432 |  */ | 
| 433 | void | 
| 434 | iokit_clear_registered_ports( | 
| 435 | 	task_t task) | 
| 436 | { | 
| 437 | 	mach_port_t port; | 
| 438 | 	ipc_kobject_type_t type; | 
| 439 |  | 
| 440 | 	itk_lock(task); | 
| 441 | 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) { | 
| 442 | 		port = task->itk_registered[i]; | 
| 443 | 		if (!IP_VALID(port)) { | 
| 444 | 			continue; | 
| 445 | 		} | 
| 446 | 		type = ip_kotype( port ); | 
| 447 | 		if ((IKOT_IOKIT_OBJECT == type) | 
| 448 | 		    || (IKOT_IOKIT_CONNECT == type) | 
| 449 | 		    || (IKOT_IOKIT_IDENT == type) | 
| 450 | 		    || (IKOT_UEXT_OBJECT == type)) { | 
| 451 | 			ipc_port_release_send(port); | 
| 452 | 			task->itk_registered[i] = IP_NULL; | 
| 453 | 		} | 
| 454 | 	} | 
| 455 | 	itk_unlock(task); | 
| 456 | } | 
| 457 |  | 
| 458 | /* need to create a pmap function to generalize */ | 
| 459 | unsigned int | 
| 460 | IODefaultCacheBits(addr64_t pa) | 
| 461 | { | 
| 462 | 	return pmap_cache_attributes(pn: (ppnum_t)(pa >> PAGE_SHIFT)); | 
| 463 | } | 
| 464 |  | 
| 465 | kern_return_t | 
| 466 | IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa, | 
| 467 |     mach_vm_size_t length, unsigned int options) | 
| 468 | { | 
| 469 | 	vm_prot_t    prot; | 
| 470 | 	unsigned int flags; | 
| 471 | 	ppnum_t      pagenum; | 
| 472 | 	pmap_t       pmap = map->pmap; | 
| 473 |  | 
| 474 | 	prot = (options & kIOMapReadOnly) | 
| 475 | 	    ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE); | 
| 476 |  | 
| 477 | 	pagenum = (ppnum_t)atop_64(pa); | 
| 478 |  | 
| 479 | 	switch (options & kIOMapCacheMask) {                    /* What cache mode do we need? */ | 
| 480 | 	case kIOMapDefaultCache: | 
| 481 | 	default: | 
| 482 | 		flags = IODefaultCacheBits(pa); | 
| 483 | 		break; | 
| 484 |  | 
| 485 | 	case kIOMapInhibitCache: | 
| 486 | 		flags = VM_WIMG_IO; | 
| 487 | 		break; | 
| 488 |  | 
| 489 | 	case kIOMapWriteThruCache: | 
| 490 | 		flags = VM_WIMG_WTHRU; | 
| 491 | 		break; | 
| 492 |  | 
| 493 | 	case kIOMapWriteCombineCache: | 
| 494 | 		flags = VM_WIMG_WCOMB; | 
| 495 | 		break; | 
| 496 |  | 
| 497 | 	case kIOMapCopybackCache: | 
| 498 | 		flags = VM_WIMG_COPYBACK; | 
| 499 | 		break; | 
| 500 |  | 
| 501 | 	case kIOMapCopybackInnerCache: | 
| 502 | 		flags = VM_WIMG_INNERWBACK; | 
| 503 | 		break; | 
| 504 |  | 
| 505 | 	case kIOMapPostedWrite: | 
| 506 | 		flags = VM_WIMG_POSTED; | 
| 507 | 		break; | 
| 508 |  | 
| 509 | 	case kIOMapRealTimeCache: | 
| 510 | 		flags = VM_WIMG_RT; | 
| 511 | 		break; | 
| 512 | 	} | 
| 513 |  | 
| 514 | 	pmap_set_cache_attributes(pagenum, flags); | 
| 515 |  | 
| 516 | 	vm_map_set_cache_attr(map, va: (vm_map_offset_t)va); | 
| 517 |  | 
| 518 |  | 
| 519 | 	// Set up a block mapped area | 
| 520 | 	return pmap_map_block(pmap, va, pa: pagenum, size: (uint32_t) atop_64(round_page_64(length)), prot, attr: 0, flags: 0); | 
| 521 | } | 
| 522 |  | 
| 523 | kern_return_t | 
| 524 | IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length) | 
| 525 | { | 
| 526 | 	pmap_t      pmap = map->pmap; | 
| 527 |  | 
| 528 | 	pmap_remove(map: pmap, trunc_page_64(va), e: round_page_64(x: va + length)); | 
| 529 |  | 
| 530 | 	return KERN_SUCCESS; | 
| 531 | } | 
| 532 |  | 
| 533 | kern_return_t | 
| 534 | IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va, | 
| 535 |     mach_vm_size_t __unused length, unsigned int __unused options) | 
| 536 | { | 
| 537 | 	mach_vm_size_t off; | 
| 538 | 	vm_prot_t      prot; | 
| 539 | 	unsigned int   flags; | 
| 540 | 	pmap_t         pmap = map->pmap; | 
| 541 | 	pmap_flush_context  pmap_flush_context_storage; | 
| 542 | 	boolean_t           delayed_pmap_flush = FALSE; | 
| 543 |  | 
| 544 | 	prot = (options & kIOMapReadOnly) | 
| 545 | 	    ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE); | 
| 546 |  | 
| 547 | 	switch (options & kIOMapCacheMask) { | 
| 548 | 	// what cache mode do we need? | 
| 549 | 	case kIOMapDefaultCache: | 
| 550 | 	default: | 
| 551 | 		return KERN_INVALID_ARGUMENT; | 
| 552 |  | 
| 553 | 	case kIOMapInhibitCache: | 
| 554 | 		flags = VM_WIMG_IO; | 
| 555 | 		break; | 
| 556 |  | 
| 557 | 	case kIOMapWriteThruCache: | 
| 558 | 		flags = VM_WIMG_WTHRU; | 
| 559 | 		break; | 
| 560 |  | 
| 561 | 	case kIOMapWriteCombineCache: | 
| 562 | 		flags = VM_WIMG_WCOMB; | 
| 563 | 		break; | 
| 564 |  | 
| 565 | 	case kIOMapCopybackCache: | 
| 566 | 		flags = VM_WIMG_COPYBACK; | 
| 567 | 		break; | 
| 568 |  | 
| 569 | 	case kIOMapCopybackInnerCache: | 
| 570 | 		flags = VM_WIMG_INNERWBACK; | 
| 571 | 		break; | 
| 572 |  | 
| 573 | 	case kIOMapPostedWrite: | 
| 574 | 		flags = VM_WIMG_POSTED; | 
| 575 | 		break; | 
| 576 |  | 
| 577 | 	case kIOMapRealTimeCache: | 
| 578 | 		flags = VM_WIMG_RT; | 
| 579 | 		break; | 
| 580 | 	} | 
| 581 |  | 
| 582 | 	pmap_flush_context_init(&pmap_flush_context_storage); | 
| 583 | 	delayed_pmap_flush = FALSE; | 
| 584 |  | 
| 585 | 	//  enter each page's physical address in the target map | 
| 586 | 	for (off = 0; off < length; off += page_size) { | 
| 587 | 		ppnum_t ppnum = pmap_find_phys(map: pmap, va: va + off); | 
| 588 | 		if (ppnum) { | 
| 589 | 			pmap_enter_options(pmap, v: va + off, pn: ppnum, prot, VM_PROT_NONE, flags, TRUE, | 
| 590 | 			    PMAP_OPTIONS_NOFLUSH, arg: (void *)&pmap_flush_context_storage, mapping_type: PMAP_MAPPING_TYPE_INFER); | 
| 591 | 			delayed_pmap_flush = TRUE; | 
| 592 | 		} | 
| 593 | 	} | 
| 594 | 	if (delayed_pmap_flush == TRUE) { | 
| 595 | 		pmap_flush(&pmap_flush_context_storage); | 
| 596 | 	} | 
| 597 |  | 
| 598 | 	return KERN_SUCCESS; | 
| 599 | } | 
| 600 |  | 
| 601 | ppnum_t | 
| 602 | IOGetLastPageNumber(void) | 
| 603 | { | 
| 604 | #if __i386__ || __x86_64__ | 
| 605 | 	ppnum_t  lastPage, highest = 0; | 
| 606 | 	unsigned int idx; | 
| 607 |  | 
| 608 | 	for (idx = 0; idx < pmap_memory_region_count; idx++) { | 
| 609 | 		lastPage = pmap_memory_regions[idx].end - 1; | 
| 610 | 		if (lastPage > highest) { | 
| 611 | 			highest = lastPage; | 
| 612 | 		} | 
| 613 | 	} | 
| 614 | 	return highest; | 
| 615 | #elif __arm64__ | 
| 616 | 	return 0; | 
| 617 | #else | 
| 618 | #error unknown arch | 
| 619 | #endif | 
| 620 | } | 
| 621 |  | 
| 622 |  | 
| 623 | void IOGetTime( mach_timespec_t * clock_time); | 
| 624 | void | 
| 625 | IOGetTime( mach_timespec_t * clock_time) | 
| 626 | { | 
| 627 | 	clock_sec_t sec; | 
| 628 | 	clock_nsec_t nsec; | 
| 629 | 	clock_get_system_nanotime(secs: &sec, nanosecs: &nsec); | 
| 630 | 	clock_time->tv_sec = (typeof(clock_time->tv_sec))sec; | 
| 631 | 	clock_time->tv_nsec = nsec; | 
| 632 | } | 
| 633 |  |