1 | /* |
2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <sys/errno.h> |
30 | |
31 | #include <mach/mach_types.h> |
32 | #include <mach/kern_return.h> |
33 | #include <mach/memory_object_control.h> |
34 | #include <mach/memory_object_types.h> |
35 | #include <mach/port.h> |
36 | #include <mach/policy.h> |
37 | #include <mach/upl.h> |
38 | #include <kern/kern_types.h> |
39 | #include <kern/ipc_kobject.h> |
40 | #include <kern/host.h> |
41 | #include <kern/thread.h> |
42 | #include <ipc/ipc_port.h> |
43 | #include <ipc/ipc_space.h> |
44 | #include <device/device_port.h> |
45 | #include <vm/memory_object.h> |
46 | #include <vm/vm_pageout.h> |
47 | #include <vm/vm_map.h> |
48 | #include <vm/vm_kern.h> |
49 | #include <vm/vm_pageout.h> |
50 | #include <vm/vm_protos.h> |
51 | #include <mach/sdt.h> |
52 | #include <os/refcnt.h> |
53 | |
54 | |
55 | /* Device VM COMPONENT INTERFACES */ |
56 | |
57 | |
58 | /* |
59 | * Device PAGER |
60 | */ |
61 | |
62 | |
63 | /* until component support available */ |
64 | |
65 | |
66 | |
67 | /* until component support available */ |
68 | const struct memory_object_pager_ops = { |
69 | .memory_object_reference = device_pager_reference, |
70 | .memory_object_deallocate = device_pager_deallocate, |
71 | .memory_object_init = device_pager_init, |
72 | .memory_object_terminate = device_pager_terminate, |
73 | .memory_object_data_request = device_pager_data_request, |
74 | .memory_object_data_return = device_pager_data_return, |
75 | .memory_object_data_initialize = device_pager_data_initialize, |
76 | .memory_object_map = device_pager_map, |
77 | .memory_object_last_unmap = device_pager_last_unmap, |
78 | .memory_object_backing_object = NULL, |
79 | .memory_object_pager_name = "device pager" |
80 | }; |
81 | |
82 | typedef uintptr_t device_port_t; |
83 | |
84 | /* |
85 | * The start of "struct device_pager" MUST match a "struct memory_object". |
86 | */ |
87 | typedef struct { |
88 | /* mandatory generic header */ |
89 | struct memory_object ; |
90 | |
91 | /* pager-specific data */ |
92 | lck_mtx_t ; |
93 | device_port_t device_handle; /* device_handle */ |
94 | vm_size_t ; |
95 | #if MEMORY_OBJECT_HAS_REFCOUNT |
96 | #define dev_pgr_hdr_ref dev_pgr_hdr.mo_ref |
97 | #else |
98 | os_ref_atomic_t dev_pgr_hdr_ref; |
99 | #endif |
100 | int ; |
101 | boolean_t ; |
102 | } *; |
103 | |
104 | __header_always_inline os_ref_count_t |
105 | (device_pager_t device_object) |
106 | { |
107 | return os_ref_get_count_raw(rc: &device_object->dev_pgr_hdr_ref); |
108 | } |
109 | |
110 | LCK_GRP_DECLARE(, "device_pager" ); |
111 | |
112 | KALLOC_TYPE_DEFINE(, struct device_pager, KT_DEFAULT); |
113 | |
114 | #define (pager) \ |
115 | lck_mtx_init(&(pager)->lock, &device_pager_lck_grp, LCK_ATTR_NULL) |
116 | #define (pager) \ |
117 | lck_mtx_destroy(&(pager)->lock, &device_pager_lck_grp) |
118 | #define (pager) lck_mtx_lock(&(pager)->lock) |
119 | #define (pager) lck_mtx_unlock(&(pager)->lock) |
120 | |
121 | device_pager_t |
122 | device_pager_lookup( /* forward */ |
123 | memory_object_t); |
124 | |
125 | device_pager_t |
126 | device_object_create(void); /* forward */ |
127 | |
128 | #define ((device_pager_t) 0) |
129 | |
130 | #define MAX_DNODE 10000 |
131 | |
132 | |
133 | /* |
134 | * |
135 | */ |
136 | memory_object_t |
137 | ( |
138 | __unused memory_object_t device, |
139 | uintptr_t device_handle, |
140 | vm_size_t size, |
141 | int flags) |
142 | { |
143 | device_pager_t device_object; |
144 | memory_object_control_t control; |
145 | vm_object_t object; |
146 | |
147 | device_object = device_object_create(); |
148 | if (device_object == DEVICE_PAGER_NULL) { |
149 | panic("device_pager_setup: device_object_create() failed" ); |
150 | } |
151 | |
152 | device_object->device_handle = device_handle; |
153 | device_object->size = size; |
154 | device_object->flags = flags; |
155 | |
156 | memory_object_create_named(pager: (memory_object_t) device_object, |
157 | size, |
158 | control: &control); |
159 | object = memory_object_control_to_vm_object(control); |
160 | |
161 | memory_object_mark_trusted(control); |
162 | |
163 | assert(object != VM_OBJECT_NULL); |
164 | vm_object_lock(object); |
165 | VM_OBJECT_SET_TRUE_SHARE(object, TRUE); |
166 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { |
167 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; |
168 | } |
169 | vm_object_unlock(object); |
170 | |
171 | return (memory_object_t)device_object; |
172 | } |
173 | |
174 | /* |
175 | * |
176 | */ |
177 | kern_return_t |
178 | ( |
179 | memory_object_t device, |
180 | memory_object_offset_t offset, |
181 | ppnum_t page_num, |
182 | vm_size_t size) |
183 | { |
184 | device_pager_t device_object; |
185 | vm_object_t vm_object; |
186 | kern_return_t kr; |
187 | upl_t upl; |
188 | |
189 | device_object = device_pager_lookup(device); |
190 | if (device_object == DEVICE_PAGER_NULL) { |
191 | return KERN_FAILURE; |
192 | } |
193 | |
194 | vm_object = (vm_object_t)memory_object_control_to_vm_object( |
195 | control: device_object->dev_pgr_hdr.mo_control); |
196 | if (vm_object == NULL) { |
197 | return KERN_FAILURE; |
198 | } |
199 | |
200 | kr = vm_object_populate_with_private( |
201 | object: vm_object, offset, phys_page: page_num, size); |
202 | if (kr != KERN_SUCCESS) { |
203 | return kr; |
204 | } |
205 | |
206 | if (!vm_object->phys_contiguous) { |
207 | unsigned int null_size = 0; |
208 | assert((upl_size_t) size == size); |
209 | kr = vm_object_upl_request(object: vm_object, |
210 | offset: (vm_object_offset_t)offset, |
211 | size: (upl_size_t) size, upl: &upl, NULL, |
212 | count: &null_size, |
213 | flags: (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE), |
214 | VM_KERN_MEMORY_NONE); |
215 | if (kr != KERN_SUCCESS) { |
216 | panic("device_pager_populate_object: list_req failed" ); |
217 | } |
218 | |
219 | upl_commit(upl_object: upl, NULL, page_listCnt: 0); |
220 | upl_deallocate(upl); |
221 | } |
222 | |
223 | |
224 | return kr; |
225 | } |
226 | |
227 | /* |
228 | * |
229 | */ |
230 | device_pager_t |
231 | ( |
232 | memory_object_t mem_obj) |
233 | { |
234 | device_pager_t device_object; |
235 | |
236 | assert(mem_obj->mo_pager_ops == &device_pager_ops); |
237 | device_object = (device_pager_t)mem_obj; |
238 | assert(device_pager_get_refcount(device_object) > 0); |
239 | return device_object; |
240 | } |
241 | |
242 | /* |
243 | * |
244 | */ |
245 | kern_return_t |
246 | ( |
247 | memory_object_t mem_obj, |
248 | memory_object_control_t control, |
249 | __unused memory_object_cluster_size_t pg_size) |
250 | { |
251 | device_pager_t device_object; |
252 | kern_return_t kr; |
253 | memory_object_attr_info_data_t attributes; |
254 | |
255 | vm_object_t vm_object; |
256 | |
257 | |
258 | if (control == MEMORY_OBJECT_CONTROL_NULL) { |
259 | return KERN_INVALID_ARGUMENT; |
260 | } |
261 | |
262 | device_object = device_pager_lookup(mem_obj); |
263 | |
264 | memory_object_control_reference(control); |
265 | device_object->dev_pgr_hdr.mo_control = control; |
266 | |
267 | |
268 | /* The following settings should be done through an expanded change */ |
269 | /* attributes call */ |
270 | |
271 | vm_object = (vm_object_t)memory_object_control_to_vm_object(control); |
272 | vm_object_lock(vm_object); |
273 | VM_OBJECT_SET_PRIVATE(object: vm_object, TRUE); |
274 | if (device_object->flags & DEVICE_PAGER_CONTIGUOUS) { |
275 | VM_OBJECT_SET_PHYS_CONTIGUOUS(object: vm_object, TRUE); |
276 | } |
277 | if (device_object->flags & DEVICE_PAGER_NOPHYSCACHE) { |
278 | VM_OBJECT_SET_NOPHYSCACHE(object: vm_object, TRUE); |
279 | } |
280 | |
281 | vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK; |
282 | vm_object_unlock(vm_object); |
283 | |
284 | |
285 | attributes.copy_strategy = MEMORY_OBJECT_COPY_NONE; |
286 | /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/ |
287 | attributes.cluster_size = (1 << (PAGE_SHIFT)); |
288 | attributes.may_cache_object = FALSE; |
289 | attributes.temporary = TRUE; |
290 | |
291 | kr = memory_object_change_attributes( |
292 | memory_control: control, |
293 | MEMORY_OBJECT_ATTRIBUTE_INFO, |
294 | attributes: (memory_object_info_t) &attributes, |
295 | MEMORY_OBJECT_ATTR_INFO_COUNT); |
296 | if (kr != KERN_SUCCESS) { |
297 | panic("device_pager_init: memory_object_change_attributes() failed" ); |
298 | } |
299 | |
300 | return KERN_SUCCESS; |
301 | } |
302 | |
303 | static kern_return_t |
304 | ( |
305 | memory_object_t mem_obj, |
306 | memory_object_offset_t offset, |
307 | memory_object_cluster_size_t length, |
308 | vm_prot_t protection) |
309 | { |
310 | device_pager_t device_object; |
311 | memory_object_offset_t end_offset; |
312 | kern_return_t kr; |
313 | |
314 | device_object = device_pager_lookup(mem_obj); |
315 | |
316 | if (device_object == DEVICE_PAGER_NULL) { |
317 | panic("%s: lookup failed" , __func__); |
318 | } |
319 | |
320 | if (offset >= device_object->size || |
321 | os_add_overflow(offset, length, &end_offset) || |
322 | end_offset > device_object->size) { |
323 | return KERN_INVALID_VALUE; |
324 | } |
325 | |
326 | __IGNORE_WCASTALIGN(kr = device_data_action(device_object->device_handle, |
327 | (ipc_port_t) device_object, protection, offset, length)); |
328 | |
329 | return kr; |
330 | } |
331 | |
332 | /* |
333 | * |
334 | */ |
335 | /*ARGSUSED6*/ |
336 | kern_return_t |
337 | ( |
338 | memory_object_t mem_obj, |
339 | memory_object_offset_t offset, |
340 | memory_object_cluster_size_t data_cnt, |
341 | __unused memory_object_offset_t *resid_offset, |
342 | __unused int *io_error, |
343 | __unused boolean_t dirty, |
344 | __unused boolean_t kernel_copy, |
345 | __unused int upl_flags) |
346 | { |
347 | return device_pager_data_action(mem_obj, offset, length: data_cnt, |
348 | VM_PROT_READ | VM_PROT_WRITE); |
349 | } |
350 | |
351 | /* |
352 | * |
353 | */ |
354 | kern_return_t |
355 | ( |
356 | memory_object_t mem_obj, |
357 | memory_object_offset_t offset, |
358 | memory_object_cluster_size_t length, |
359 | __unused vm_prot_t protection_required, |
360 | __unused memory_object_fault_info_t fault_info) |
361 | { |
362 | return device_pager_data_action(mem_obj, offset, length, VM_PROT_READ); |
363 | } |
364 | |
365 | /* |
366 | * |
367 | */ |
368 | void |
369 | ( |
370 | memory_object_t mem_obj) |
371 | { |
372 | device_pager_t device_object; |
373 | |
374 | device_object = device_pager_lookup(mem_obj); |
375 | os_ref_retain_raw(&device_object->dev_pgr_hdr_ref, NULL); |
376 | DTRACE_VM2(device_pager_reference, |
377 | device_pager_t, device_object, |
378 | unsigned int, device_pager_get_refcount(device_object)); |
379 | } |
380 | |
381 | /* |
382 | * |
383 | */ |
384 | void |
385 | ( |
386 | memory_object_t mem_obj) |
387 | { |
388 | device_pager_t device_object; |
389 | memory_object_control_t device_control; |
390 | os_ref_count_t ref_count; |
391 | |
392 | device_object = device_pager_lookup(mem_obj); |
393 | |
394 | DTRACE_VM2(device_pager_deallocate, |
395 | device_pager_t, device_object, |
396 | unsigned int, device_pager_get_refcount(device_object)); |
397 | |
398 | ref_count = os_ref_release_raw(&device_object->dev_pgr_hdr_ref, NULL); |
399 | |
400 | if (ref_count == 1) { |
401 | /* |
402 | * The last reference is our "named" reference. |
403 | * Close the device and "destroy" the VM object. |
404 | */ |
405 | |
406 | DTRACE_VM2(device_pager_destroy, |
407 | device_pager_t, device_object, |
408 | unsigned int, device_pager_get_refcount(device_object)); |
409 | |
410 | assert(device_object->is_mapped == FALSE); |
411 | if (device_object->device_handle != (device_port_t) NULL) { |
412 | device_close(device_handle: device_object->device_handle); |
413 | device_object->device_handle = (device_port_t) NULL; |
414 | } |
415 | device_control = device_object->dev_pgr_hdr.mo_control; |
416 | memory_object_destroy(memory_control: device_control, reason: VM_OBJECT_DESTROY_UNKNOWN_REASON); |
417 | } else if (ref_count == 0) { |
418 | /* |
419 | * No more references: free the pager. |
420 | */ |
421 | DTRACE_VM2(device_pager_free, |
422 | device_pager_t, device_object, |
423 | unsigned int, device_pager_get_refcount(device_object)); |
424 | |
425 | device_control = device_object->dev_pgr_hdr.mo_control; |
426 | |
427 | if (device_control != MEMORY_OBJECT_CONTROL_NULL) { |
428 | memory_object_control_deallocate(control: device_control); |
429 | device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; |
430 | } |
431 | device_pager_lock_destroy(device_object); |
432 | |
433 | zfree(device_pager_zone, device_object); |
434 | } |
435 | return; |
436 | } |
437 | |
438 | kern_return_t |
439 | ( |
440 | __unused memory_object_t mem_obj, |
441 | __unused memory_object_offset_t offset, |
442 | __unused memory_object_cluster_size_t data_cnt) |
443 | { |
444 | panic("device_pager_data_initialize" ); |
445 | return KERN_FAILURE; |
446 | } |
447 | |
448 | kern_return_t |
449 | ( |
450 | __unused memory_object_t mem_obj) |
451 | { |
452 | return KERN_SUCCESS; |
453 | } |
454 | |
455 | |
456 | /* |
457 | * |
458 | */ |
459 | kern_return_t |
460 | ( |
461 | memory_object_t mem_obj, |
462 | __unused vm_prot_t prot) |
463 | { |
464 | device_pager_t device_object; |
465 | |
466 | device_object = device_pager_lookup(mem_obj); |
467 | |
468 | device_pager_lock(device_object); |
469 | assert(device_pager_get_refcount(device_object) > 0); |
470 | if (device_object->is_mapped == FALSE) { |
471 | /* |
472 | * First mapping of this pager: take an extra reference |
473 | * that will remain until all the mappings of this pager |
474 | * are removed. |
475 | */ |
476 | device_object->is_mapped = TRUE; |
477 | device_pager_reference(mem_obj); |
478 | } |
479 | device_pager_unlock(device_object); |
480 | |
481 | return KERN_SUCCESS; |
482 | } |
483 | |
484 | kern_return_t |
485 | ( |
486 | memory_object_t mem_obj) |
487 | { |
488 | device_pager_t device_object; |
489 | boolean_t drop_ref; |
490 | |
491 | device_object = device_pager_lookup(mem_obj); |
492 | |
493 | device_pager_lock(device_object); |
494 | assert(device_pager_get_refcount(device_object) > 0); |
495 | if (device_object->is_mapped) { |
496 | device_object->is_mapped = FALSE; |
497 | drop_ref = TRUE; |
498 | } else { |
499 | drop_ref = FALSE; |
500 | } |
501 | device_pager_unlock(device_object); |
502 | |
503 | if (drop_ref) { |
504 | device_pager_deallocate(mem_obj); |
505 | } |
506 | |
507 | return KERN_SUCCESS; |
508 | } |
509 | |
510 | |
511 | |
512 | /* |
513 | * |
514 | */ |
515 | device_pager_t |
516 | device_object_create(void) |
517 | { |
518 | device_pager_t device_object; |
519 | |
520 | device_object = zalloc_flags(device_pager_zone, |
521 | Z_WAITOK | Z_ZERO | Z_NOFAIL); |
522 | |
523 | device_object->dev_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT; |
524 | device_object->dev_pgr_hdr.mo_pager_ops = &device_pager_ops; |
525 | device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; |
526 | |
527 | device_pager_lock_init(device_object); |
528 | os_ref_init_raw(&device_object->dev_pgr_hdr_ref, NULL); |
529 | device_object->is_mapped = FALSE; |
530 | |
531 | DTRACE_VM2(device_pager_create, |
532 | device_pager_t, device_object, |
533 | unsigned int, device_pager_get_refcount(device_object)); |
534 | |
535 | return device_object; |
536 | } |
537 | |
538 | boolean_t |
539 | (const struct memory_object_pager_ops *) |
540 | { |
541 | if (pager_ops == &device_pager_ops) { |
542 | return TRUE; |
543 | } |
544 | return FALSE; |
545 | } |
546 | |