1/*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/assert.h>
30#include <kern/cpu_data.h>
31#include <mach/mach_host.h>
32#include <vm/vm_kern.h>
33
34
35#if defined(__i386__) || defined(__x86_64__)
36#include <i386/mp.h>
37#endif
38
39#if defined (__arm__) || defined (__arm64__)
40#include <arm/cpu_data_internal.h>
41#endif
42
43#define DEFAULT_MAGAZINE_SIZE 8 /* Default number of elements for all magazines allocated from the magazine_zone */
44#define DEFAULT_DEPOT_SIZE 8 /* Default number of elements for the array zcc_depot_list */
45#define ZCC_MAX_CPU_CACHE_LINE_SIZE 64 /* We should use a platform specific macro for this in the future, right now this is the max cache line size for all platforms*/
46
47lck_grp_t zcache_locks_grp; /* lock group for depot_lock */
48zone_t magazine_zone; /* zone to allocate zcc_magazine structs from */
49uint16_t magazine_element_count = 0; /* Size of array in magazine determined by boot-arg or default */
50uint16_t depot_element_count = 0; /* Size of depot lists determined by boot-arg or default */
51bool zone_cache_ready = FALSE; /* Flag to check if zone caching has been set up by zcache_bootstrap */
52uintptr_t zcache_canary = 0; /* Canary used for the caching layer to prevent UaF attacks */
53
54/* The zcc_magazine is used as a stack to store cached zone elements. These
55 * sets of elements can be moved around to perform bulk operations.
56*/
57struct zcc_magazine {
58 uint32_t zcc_magazine_index; /* Used as a stack pointer to acess elements in the array */
59 uint32_t zcc_magazine_capacity; /* Number of pointers able to be stored in the zcc_elements array */
60 void *zcc_elements[0]; /* Array of pointers to objects */
61};
62
63
64/* Each CPU will use one of these to store its elements
65*/
66struct zcc_per_cpu_cache {
67 struct zcc_magazine *current; /* Magazine from which we will always try to allocate from and free to first */
68 struct zcc_magazine *previous; /* Dedicated magazine for a quick reload and to prevent thrashing wen we swap with the depot */
69} __attribute__(( aligned(ZCC_MAX_CPU_CACHE_LINE_SIZE) )); /* we want to align this to a cache line size so it does not thrash when multiple cpus want to access their caches in paralell */
70
71
72/*
73 * The depot layer can be invalid while zone_gc() is draining it out.
74 * During that time, the CPU caches are active. For CPU magazine allocs and
75 * frees, the caching layer reaches directly into the zone allocator.
76 */
77#define ZCACHE_DEPOT_INVALID -1
78#define zcache_depot_available(zcache) (zcache->zcc_depot_index != ZCACHE_DEPOT_INVALID)
79
80/* This is the basic struct to take care of cahing and is included within
81 * the zone.
82*/
83struct zone_cache {
84 lck_mtx_t zcc_depot_lock; /* Lock for the depot layer of caching */
85 struct zcc_per_cpu_cache zcc_per_cpu_caches[MAX_CPUS]; /* An array of caches, one for each CPU */
86 int zcc_depot_index; /* marks the point in the array where empty magazines begin */
87 struct zcc_magazine *zcc_depot_list[0]; /* Stores full and empty magazines in the depot layer */
88};
89
90
91void zcache_init_marked_zones(void);
92bool zcache_mag_fill(zone_t zone, struct zcc_magazine *mag);
93void zcache_mag_drain(zone_t zone, struct zcc_magazine *mag);
94void zcache_mag_init(struct zcc_magazine *mag, int count);
95void *zcache_mag_pop(struct zcc_magazine *mag);
96void zcache_mag_push(struct zcc_magazine *mag, void *elem);
97bool zcache_mag_has_space(struct zcc_magazine *mag);
98bool zcache_mag_has_elements(struct zcc_magazine *mag);
99void zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b);
100void zcache_mag_depot_swap_for_alloc(struct zone_cache *depot, struct zcc_per_cpu_cache *cache);
101void zcache_mag_depot_swap_for_free(struct zone_cache *depot, struct zcc_per_cpu_cache *cache);
102void zcache_mag_depot_swap(struct zone_cache *depot, struct zcc_per_cpu_cache *cache, boolean_t load_full);
103void zcache_canary_add(zone_t zone, void *addr);
104void zcache_canary_validate(zone_t zone, void *addr);
105
106/*
107 * zcache_ready
108 *
109 * Description: returns whether or not the zone caches are ready to use
110 *
111 */
112bool zcache_ready(void){
113 return zone_cache_ready;
114}
115
116/*
117 * zcache_init_marked_zones
118 *
119 * Description: Initializes all parts of the per-cpu caches for the list of
120 * marked zones once we are able to initalize caches. This should
121 * only be called once, and will be called during the time that the
122 * system is single threaded so we don't have to take the lock.
123 *
124 */
125void zcache_init_marked_zones(void){
126 unsigned int i;
127 for(i = 0; i < num_zones; i ++){
128 if(zone_array[i].cpu_cache_enable_when_ready){
129 zcache_init(&zone_array[i]);
130 zone_array[i].cpu_cache_enable_when_ready = FALSE;
131 }
132 }
133}
134
135/*
136 * zcache_bootstrap
137 *
138 * Description: initializes zone to allocate magazines from and sets
139 * magazine_element_count and depot_element_count from
140 * boot-args or default values
141 *
142 */
143void zcache_bootstrap(void)
144{
145 /* use boot-arg for custom magazine size*/
146 if (! PE_parse_boot_argn("zcc_magazine_element_count", &magazine_element_count, sizeof (uint16_t)))
147 magazine_element_count = DEFAULT_MAGAZINE_SIZE;
148
149 int magazine_size = sizeof(struct zcc_magazine) + magazine_element_count * sizeof(void *);
150
151 magazine_zone = zinit(magazine_size, 100000 * magazine_size , magazine_size, "zcc_magazine_zone");
152
153 assert(magazine_zone != NULL);
154
155 /* use boot-arg for custom depot size*/
156 if (! PE_parse_boot_argn("zcc_depot_element_count", &depot_element_count, sizeof (uint16_t)))
157 depot_element_count = DEFAULT_DEPOT_SIZE;
158
159 lck_grp_init(&zcache_locks_grp, "zcc_depot_lock", LCK_GRP_ATTR_NULL);
160
161 /* Generate the canary value for zone caches */
162 zcache_canary = (uintptr_t) early_random();
163
164 zone_cache_ready = TRUE;
165
166 zcache_init_marked_zones();
167}
168
169
170/*
171 * zcache_init
172 *
173 * Description: Initializes all parts of the per-cpu caches for a given zone
174 *
175 * Parameters: zone pointer to zone on which to iniitalize caching
176 *
177 */
178 void zcache_init(zone_t zone)
179 {
180 int i; /* used as index in for loops */
181 vm_size_t total_size; /* Used for allocating the zone_cache struct with the proper size of depot list */
182 struct zone_cache *temp_cache; /* Temporary variable to initialize a zone_cache before assigning to the specified zone */
183
184 /* Allocate chunk of memory for all structs */
185 total_size = sizeof(struct zone_cache) + (depot_element_count * sizeof(void *));
186
187 temp_cache = (struct zone_cache *) kalloc(total_size);
188
189
190 /* Initialize a cache for every CPU */
191 for (i = 0; i < MAX_CPUS; i++) {
192 temp_cache->zcc_per_cpu_caches[i].current = (struct zcc_magazine *)zalloc(magazine_zone);
193 temp_cache->zcc_per_cpu_caches[i].previous = (struct zcc_magazine *)zalloc(magazine_zone);
194
195 assert(temp_cache->zcc_per_cpu_caches[i].current != NULL && temp_cache->zcc_per_cpu_caches[i].previous != NULL);
196
197 zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].current, magazine_element_count);
198 zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].previous, magazine_element_count);
199 }
200
201 /* Initialize the lock on the depot layer */
202 lck_mtx_init(&(temp_cache->zcc_depot_lock), &zcache_locks_grp, LCK_ATTR_NULL);
203
204 /* Initialize empty magazines in the depot list */
205 for (i = 0; i < depot_element_count; i++) {
206 temp_cache->zcc_depot_list[i] = (struct zcc_magazine *)zalloc(magazine_zone);
207
208 assert(temp_cache->zcc_depot_list[i] != NULL);
209
210 zcache_mag_init(temp_cache->zcc_depot_list[i], magazine_element_count);
211 }
212
213 temp_cache->zcc_depot_index = 0;
214
215 lock_zone(zone);
216 zone->zcache = temp_cache;
217 /* Set flag to know caching is enabled */
218 zone->cpu_cache_enabled = TRUE;
219 unlock_zone(zone);
220 return;
221 }
222
223/*
224 * zcache_drain_depot
225 *
226 * Description: Frees all the full magazines from the depot layer to the zone allocator as part
227 * of zone_gc(). The routine assumes that only one zone_gc() is in progress (zone_gc_lock
228 * ensures that)
229 *
230 * Parameters: zone pointer to zone for which the depot layer needs to be drained
231 *
232 * Returns: None
233 *
234 */
235void zcache_drain_depot(zone_t zone)
236{
237 struct zone_cache *zcache = zone->zcache;
238 int drain_depot_index = 0;
239
240 /*
241 * Grab the current depot list from the zone cache. If it has full magazines,
242 * mark the depot as invalid and drain it.
243 */
244 lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
245 if (!zcache_depot_available(zcache) || (zcache->zcc_depot_index == 0)) {
246 /* no full magazines in the depot or depot unavailable; nothing to drain here */
247 lck_mtx_unlock(&(zcache->zcc_depot_lock));
248 return;
249 }
250 drain_depot_index = zcache->zcc_depot_index;
251 /* Mark the depot as unavailable */
252 zcache->zcc_depot_index = ZCACHE_DEPOT_INVALID;
253 lck_mtx_unlock(&(zcache->zcc_depot_lock));
254
255 /* Now drain the full magazines in the depot */
256 for (int i = 0; i < drain_depot_index; i++)
257 zcache_mag_drain(zone, zcache->zcc_depot_list[i]);
258
259 lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
260 /* Mark the depot as available again */
261 zcache->zcc_depot_index = 0;
262 lck_mtx_unlock(&(zcache->zcc_depot_lock));
263}
264
265
266/*
267 * zcache_free_to_cpu_cache
268 *
269 * Description: Checks per-cpu caches to free element there if possible
270 *
271 * Parameters: zone pointer to zone for which element comes from
272 * addr pointer to element to free
273 *
274 * Returns: TRUE if successfull, FALSE otherwise
275 *
276 * Precondition: check that caching is enabled for zone
277 */
278bool zcache_free_to_cpu_cache(zone_t zone, void *addr)
279{
280 int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */
281 struct zone_cache *zcache; /* local storage of the zone's cache */
282 struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */
283
284 disable_preemption();
285 curcpu = current_processor()->cpu_id;
286 zcache = zone->zcache;
287 per_cpu_cache = &zcache->zcc_per_cpu_caches[curcpu];
288
289 if (zcache_mag_has_space(per_cpu_cache->current)) {
290 /* If able, free into current magazine */
291 goto free_to_current;
292 } else if (zcache_mag_has_space(per_cpu_cache->previous)) {
293 /* If able, swap current and previous magazine and retry */
294 zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current);
295 goto free_to_current;
296 } else{
297 lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
298 if (zcache_depot_available(zcache) && (zcache->zcc_depot_index < depot_element_count)) {
299 /* If able, rotate in a new empty magazine from the depot and retry */
300 zcache_mag_depot_swap_for_free(zcache, per_cpu_cache);
301 lck_mtx_unlock(&(zcache->zcc_depot_lock));
302 goto free_to_current;
303 }
304 lck_mtx_unlock(&(zcache->zcc_depot_lock));
305 /* Attempt to free an entire magazine of elements */
306 zcache_mag_drain(zone, per_cpu_cache->current);
307 if(zcache_mag_has_space(per_cpu_cache->current)){
308 goto free_to_current;
309 }
310 }
311
312 /* If not able to use cache return FALSE and fall through to zfree */
313 enable_preemption();
314 return FALSE;
315
316free_to_current:
317 assert(zcache_mag_has_space(per_cpu_cache->current));
318 zcache_canary_add(zone, addr);
319 zcache_mag_push(per_cpu_cache->current, addr);
320
321#if KASAN_ZALLOC
322 kasan_poison_range((vm_offset_t)addr, zone->elem_size, ASAN_HEAP_FREED);
323#endif
324
325 enable_preemption();
326 return TRUE;
327}
328
329
330/*
331 * zcache_alloc_from_cpu_cache
332 *
333 * Description: Checks per-cpu caches to allocate element from there if possible
334 *
335 * Parameters: zone pointer to zone for which element will come from
336 *
337 * Returns: pointer to usable element
338 *
339 * Precondition: check that caching is enabled for zone
340 */
341vm_offset_t zcache_alloc_from_cpu_cache(zone_t zone)
342{
343 int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */
344 void *ret = NULL; /* Points to the element which will be returned */
345 struct zone_cache *zcache; /* local storage of the zone's cache */
346 struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */
347
348 disable_preemption();
349 curcpu = current_processor()->cpu_id;
350 zcache = zone->zcache;
351 per_cpu_cache = &zcache->zcc_per_cpu_caches[curcpu];
352
353 if (zcache_mag_has_elements(per_cpu_cache->current)) {
354 /* If able, allocate from current magazine */
355 goto allocate_from_current;
356 } else if (zcache_mag_has_elements(per_cpu_cache->previous)) {
357 /* If able, swap current and previous magazine and retry */
358 zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current);
359 goto allocate_from_current;
360 } else {
361 lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
362 if (zcache_depot_available(zcache) && (zcache->zcc_depot_index > 0)) {
363 /* If able, rotate in a full magazine from the depot */
364 zcache_mag_depot_swap_for_alloc(zcache, per_cpu_cache);
365 lck_mtx_unlock(&(zcache->zcc_depot_lock));
366 goto allocate_from_current;
367 }
368 lck_mtx_unlock(&(zcache->zcc_depot_lock));
369 /* Attempt to allocate an entire magazine of elements */
370 if(zcache_mag_fill(zone, per_cpu_cache->current)){
371 goto allocate_from_current;
372 }
373 }
374
375 /* If unable to allocate from cache return NULL and fall through to zalloc */
376 enable_preemption();
377 return (vm_offset_t) NULL;
378
379allocate_from_current:
380 ret = zcache_mag_pop(per_cpu_cache->current);
381 assert(ret != NULL);
382 zcache_canary_validate(zone, ret);
383
384#if KASAN_ZALLOC
385 kasan_poison_range((vm_offset_t)ret, zone->elem_size, ASAN_VALID);
386#endif
387
388 enable_preemption();
389 return (vm_offset_t) ret;
390}
391
392
393/*
394 * zcache_mag_init
395 *
396 * Description: initializes fields in a zcc_magazine struct
397 *
398 * Parameters: mag pointer to magazine to initialize
399 *
400 */
401void zcache_mag_init(struct zcc_magazine *mag, int count)
402{
403 mag->zcc_magazine_index = 0;
404 mag->zcc_magazine_capacity = count;
405}
406
407
408/*
409 * zcache_mag_fill
410 *
411 * Description: fills a magazine with as many elements as the zone can give
412 * without blocking to carve out more memory
413 *
414 * Parameters: zone zone from which to allocate
415 * mag pointer to magazine to fill
416 *
417 * Return: True if able to allocate elements, false is mag is still empty
418 */
419bool zcache_mag_fill(zone_t zone, struct zcc_magazine *mag)
420{
421 assert(mag->zcc_magazine_index == 0);
422 void* elem = NULL;
423 uint32_t i;
424 lock_zone(zone);
425 for(i = mag->zcc_magazine_index; i < mag->zcc_magazine_capacity; i ++){
426 elem = zalloc_attempt(zone);
427 if(elem) {
428 zcache_canary_add(zone, elem);
429 zcache_mag_push(mag, elem);
430#if KASAN_ZALLOC
431 kasan_poison_range((vm_offset_t)elem, zone->elem_size, ASAN_HEAP_FREED);
432#endif
433 } else {
434 break;
435 }
436 }
437 unlock_zone(zone);
438 if (i == 0){
439 return FALSE;
440 }
441 return TRUE;
442}
443
444/*
445 * zcache_mag_drain
446 *
447 * Description: frees all elements in a magazine
448 *
449 * Parameters: zone zone to which elements will be freed
450 * mag pointer to magazine to empty
451 *
452 */
453void zcache_mag_drain(zone_t zone, struct zcc_magazine *mag)
454{
455 assert(mag->zcc_magazine_index == mag->zcc_magazine_capacity);
456 lock_zone(zone);
457 while(mag->zcc_magazine_index > 0){
458 uint32_t index = --mag->zcc_magazine_index;
459 zcache_canary_validate(zone, mag->zcc_elements[index]);
460 zfree_direct(zone,(vm_offset_t)mag->zcc_elements[index]);
461 mag->zcc_elements[mag->zcc_magazine_index] = 0;
462 }
463 unlock_zone(zone);
464}
465
466/*
467 * zcache_mag_pop
468 *
469 * Description: removes last element from magazine in a stack pop fashion
470 * zcc_magazine_index represents the number of elements on the
471 * stack, so it the index of where to save the next element, when
472 * full, it will be 1 past the last index of the array
473 *
474 * Parameters: mag pointer to magazine from which to remove element
475 *
476 * Returns: pointer to element removed from magazine
477 *
478 * Precondition: must check that magazine is not empty before calling
479 */
480void *zcache_mag_pop(struct zcc_magazine *mag)
481{
482 void *elem;
483 assert(zcache_mag_has_elements(mag));
484 elem = mag->zcc_elements[--mag->zcc_magazine_index];
485 /* Ensure pointer to element cannot be accessed after we pop it */
486 mag->zcc_elements[mag->zcc_magazine_index] = NULL;
487 assert(elem != NULL);
488 return elem;
489}
490
491
492/*
493 * zcache_mag_push
494 *
495 * Description: adds element to magazine and increments zcc_magazine_index
496 * zcc_magazine_index represents the number of elements on the
497 * stack, so it the index of where to save the next element, when
498 * full, it will be 1 past the last index of the array
499 *
500 * Parameters: mag pointer to magazine from which to remove element
501 * elem pointer to element to add
502 *
503 * Precondition: must check that magazine is not full before calling
504 */
505void zcache_mag_push(struct zcc_magazine *mag, void *elem)
506{
507 assert(zcache_mag_has_space(mag));
508 mag->zcc_elements[mag->zcc_magazine_index ++] = elem;
509}
510
511
512/*
513 * zcache_mag_has_space
514 *
515 * Description: checks if magazine still has capacity
516 *
517 * Parameters: mag pointer to magazine to check
518 *
519 * Returns: true if magazine is full
520 *
521 */
522bool zcache_mag_has_space(struct zcc_magazine *mag)
523{
524 return (mag->zcc_magazine_index < mag->zcc_magazine_capacity);
525}
526
527
528/*
529 * zcache_mag_has_elements
530 *
531 * Description: checks if magazine is empty
532 *
533 * Parameters: mag pointer to magazine to check
534 *
535 * Returns: true if magazine has no elements
536 *
537 */
538bool zcache_mag_has_elements(struct zcc_magazine *mag)
539{
540 return (mag->zcc_magazine_index > 0);
541}
542
543
544/*
545 * zcache_swap_magazines
546 *
547 * Description: Function which swaps two pointers of any type
548 *
549 * Parameters: a pointer to first pointer
550 * b pointer to second pointer
551 */
552void zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b)
553{
554 struct zcc_magazine *temp = *a;
555 *a = *b;
556 *b = temp;
557}
558
559
560/*
561 * zcache_mag_depot_swap_for_alloc
562 *
563 * Description: Swaps a full magazine into the current position
564 *
565 * Parameters: zcache pointer to the zone_cache to access the depot
566 * cache pointer to the current per-cpu cache
567 *
568 * Precondition: Check that the depot list has full elements
569 */
570void zcache_mag_depot_swap_for_alloc(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache)
571{
572 /* Loads a full magazine from which we can allocate */
573 assert(zcache_depot_available(zcache));
574 assert(zcache->zcc_depot_index > 0);
575 zcache->zcc_depot_index --;
576 zcache_swap_magazines(&cache->current, &zcache->zcc_depot_list[zcache->zcc_depot_index]);
577}
578
579
580/*
581 * zcache_mag_depot_swap_for_free
582 *
583 * Description: Swaps an empty magazine into the current position
584 *
585 * Parameters: zcache pointer to the zone_cache to access the depot
586 * cache pointer to the current per-cpu cache
587 *
588 * Precondition: Check that the depot list has empty elements
589 */
590void zcache_mag_depot_swap_for_free(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache)
591{
592 /* Loads an empty magazine into which we can free */
593 assert(zcache_depot_available(zcache));
594 assert(zcache->zcc_depot_index < depot_element_count);
595 zcache_swap_magazines(&cache->current, &zcache->zcc_depot_list[zcache->zcc_depot_index]);
596 zcache->zcc_depot_index ++;
597}
598
599/*
600 * zcache_canary_add
601 *
602 * Description: Adds a canary to an element by putting zcache_canary at the first
603 * and last location of the element
604 *
605 * Parameters: zone zone for the element
606 * addr element address to add canary to
607 *
608 */
609void zcache_canary_add(zone_t zone, void *element)
610{
611 vm_offset_t *primary = (vm_offset_t *)element;
612 vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + zone->elem_size - sizeof(vm_offset_t));
613 *primary = *backup = (zcache_canary ^ (uintptr_t)element);
614}
615
616/*
617 * zcache_canary_validate
618 *
619 * Description: Validates an element of the zone cache to make sure it still contains the zone
620 * caching canary.
621 *
622 * Parameters: zone zone for the element
623 * addr element address to validate
624 *
625 */
626void zcache_canary_validate(zone_t zone, void *element)
627{
628 vm_offset_t *primary = (vm_offset_t *)element;
629 vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + zone->elem_size - sizeof(vm_offset_t));
630
631 vm_offset_t primary_value = (*primary ^ (uintptr_t)element);
632 if (primary_value != zcache_canary) {
633 panic("Zone cache element was used after free! Element %p was corrupted at beginning; Expected %p but found %p; canary %p",
634 element, (void *)(zcache_canary ^ (uintptr_t)element) , (void *)(*primary), (void *)zcache_canary);
635 }
636
637 vm_offset_t backup_value = (*backup ^ (uintptr_t)element);
638 if (backup_value != zcache_canary) {
639 panic("Zone cache element was used after free! Element %p was corrupted at end; Expected %p but found %p; canary %p",
640 element, (void *)(zcache_canary ^ (uintptr_t)element), (void *)(*backup), (void *)zcache_canary);
641 }
642}
643