1/*
2 * runtime.c
3 * libclosure
4 *
5 * Copyright (c) 2008-2010 Apple Inc. All rights reserved.
6 *
7 * @APPLE_LLVM_LICENSE_HEADER@
8 */
9
10
11#ifndef KERNEL
12
13#include "Block_private.h"
14#include <stdio.h>
15#include <stdlib.h>
16#include <dlfcn.h>
17#include <os/assumes.h>
18#include <TargetConditionals.h>
19
20#else /* !KERNEL */
21#define TARGET_OS_WIN32 0
22
23#include <libkern/Block_private.h>
24__BEGIN_DECLS
25#include <kern/kalloc.h>
26__END_DECLS
27
28/* void * is a bit of a lie, but that will have to do */
29KALLOC_TYPE_VAR_DEFINE(KT_BLOCK_LAYOUT, struct Block_layout, void *, KT_DEFAULT);
30KALLOC_TYPE_VAR_DEFINE(KT_BLOCK_BYREF, struct Block_byref, void *, KT_DEFAULT);
31
32static inline struct Block_layout *
33block_layout_alloc(size_t size)
34{
35 return (struct Block_layout *)kalloc_type_var_impl(KT_BLOCK_LAYOUT,
36 size, Z_WAITOK_ZERO_NOFAIL, NULL);
37}
38
39static inline void
40block_layout_free(Block_layout *ptr, size_t size)
41{
42 kfree_type_var_impl(kt_view: KT_BLOCK_LAYOUT, ptr, size);
43}
44
45static inline struct Block_byref *
46block_byref_alloc(size_t size)
47{
48 return (struct Block_byref *)kalloc_type_var_impl(KT_BLOCK_BYREF,
49 size, Z_WAITOK_ZERO_NOFAIL, NULL);
50}
51
52static inline void
53block_byref_free(Block_byref *ptr, size_t size)
54{
55 kfree_type_var_impl(kt_view: KT_BLOCK_BYREF, ptr, size);
56}
57
58#endif /* KERNEL */
59
60#include <machine/atomic.h>
61#include <string.h>
62#include <stdint.h>
63#ifndef os_assumes
64#define os_assumes(_x) (_x)
65#endif
66#ifndef os_assert
67#define os_assert(_x) assert(_x)
68#endif
69
70#if TARGET_OS_WIN32
71#define _CRT_SECURE_NO_WARNINGS 1
72#include <windows.h>
73static __inline bool
74OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
75{
76 // fixme barrier is overkill -- see objc-os.h
77 long original = InterlockedCompareExchange(dst, newl, oldl);
78 return original == oldl;
79}
80
81static __inline bool
82OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst)
83{
84 // fixme barrier is overkill -- see objc-os.h
85 int original = InterlockedCompareExchange(dst, newi, oldi);
86 return original == oldi;
87}
88#else
89#define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) os_atomic_cmpxchg(_Ptr, _Old, _New, relaxed)
90#define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) os_atomic_cmpxchg(_Ptr, _Old, _New, relaxed)
91#endif
92
93
94/*******************************************************************************
95 * Internal Utilities
96 ********************************************************************************/
97
98static int32_t
99latching_incr_int(volatile int32_t *where)
100{
101 while (1) {
102 int32_t old_value = *where;
103 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
104 return BLOCK_REFCOUNT_MASK;
105 }
106 if (OSAtomicCompareAndSwapInt(old_value, old_value + 2, where)) {
107 return old_value + 2;
108 }
109 }
110}
111
112static bool
113latching_incr_int_not_deallocating(volatile int32_t *where)
114{
115 while (1) {
116 int32_t old_value = *where;
117 if (old_value & BLOCK_DEALLOCATING) {
118 // if deallocating we can't do this
119 return false;
120 }
121 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
122 // if latched, we're leaking this block, and we succeed
123 return true;
124 }
125 if (OSAtomicCompareAndSwapInt(old_value, old_value + 2, where)) {
126 // otherwise, we must store a new retained value without the deallocating bit set
127 return true;
128 }
129 }
130}
131
132
133// return should_deallocate?
134static bool
135latching_decr_int_should_deallocate(volatile int32_t *where)
136{
137 while (1) {
138 int32_t old_value = *where;
139 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
140 return false; // latched high
141 }
142 if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
143 return false; // underflow, latch low
144 }
145 int32_t new_value = old_value - 2;
146 bool result = false;
147 if ((old_value & (BLOCK_REFCOUNT_MASK | BLOCK_DEALLOCATING)) == 2) {
148 new_value = old_value - 1;
149 result = true;
150 }
151 if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
152 return result;
153 }
154 }
155}
156
157
158/**************************************************************************
159 * Framework callback functions and their default implementations.
160 ***************************************************************************/
161#if !TARGET_OS_WIN32
162#pragma mark Framework Callback Routines
163#endif
164#if KERNEL
165static inline void
166_Block_retain_object(const void *ptr __unused)
167{
168}
169
170static inline void
171_Block_release_object(const void *ptr __unused)
172{
173}
174
175static inline void
176_Block_destructInstance(const void *aBlock __unused)
177{
178}
179
180#else
181
182static void
183_Block_retain_object_default(const void *ptr __unused)
184{
185}
186
187static void
188_Block_release_object_default(const void *ptr __unused)
189{
190}
191
192static void
193_Block_destructInstance_default(const void *aBlock __unused)
194{
195}
196
197static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
198static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
199static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default;
200
201
202/**************************************************************************
203 * Callback registration from ObjC runtime and CoreFoundation
204 ***************************************************************************/
205
206void
207_Block_use_RR2(const Block_callbacks_RR *callbacks)
208{
209 _Block_retain_object = callbacks->retain;
210 _Block_release_object = callbacks->release;
211 _Block_destructInstance = callbacks->destructInstance;
212}
213#endif // !KERNEL
214
215/****************************************************************************
216 * Accessors for block descriptor fields
217 *****************************************************************************/
218
219#if BLOCK_SMALL_DESCRIPTOR_SUPPORTED
220template <class T>
221static T *
222unwrap_relative_pointer(int32_t &offset)
223{
224 if (offset == 0) {
225 return nullptr;
226 }
227
228 uintptr_t base = (uintptr_t)&offset;
229 uintptr_t extendedOffset = (uintptr_t)(intptr_t)offset;
230 uintptr_t pointer = base + extendedOffset;
231 return (T *)pointer;
232}
233#endif
234
235#if 0
236static struct Block_descriptor_2 *
237_Block_descriptor_2(struct Block_layout *aBlock)
238{
239 uint8_t *desc = (uint8_t *)_Block_get_descriptor(aBlock);
240 desc += sizeof(struct Block_descriptor_1);
241 return __IGNORE_WCASTALIGN((struct Block_descriptor_2 *)desc);
242}
243#endif
244
245static struct Block_descriptor_3 *
246_Block_descriptor_3(struct Block_layout *aBlock)
247{
248 uint8_t *desc = (uint8_t *)_Block_get_descriptor(aBlock);
249 desc += sizeof(struct Block_descriptor_1);
250 if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
251 desc += sizeof(struct Block_descriptor_2);
252 }
253 return __IGNORE_WCASTALIGN((struct Block_descriptor_3 *)desc);
254}
255
256static void
257_Block_call_copy_helper(void *result, struct Block_layout *aBlock)
258{
259 if (auto *pFn = _Block_get_copy_function(aBlock)) {
260 pFn(result, aBlock);
261 }
262}
263
264static void
265_Block_call_dispose_helper(struct Block_layout *aBlock)
266{
267 if (auto *pFn = _Block_get_dispose_function(aBlock)) {
268 pFn(aBlock);
269 }
270}
271
272/*******************************************************************************
273 * Internal Support routines for copying
274 ********************************************************************************/
275
276#if !TARGET_OS_WIN32
277#pragma mark Copy/Release support
278#endif
279
280// Copy, or bump refcount, of a block. If really copying, call the copy helper if present.
281void *
282_Block_copy(const void *arg)
283{
284 struct Block_layout *aBlock;
285
286 if (!arg) {
287 return NULL;
288 }
289
290 // The following would be better done as a switch statement
291 aBlock = (struct Block_layout *)arg;
292 if (aBlock->flags & BLOCK_NEEDS_FREE) {
293 // latches on high
294 latching_incr_int(where: &aBlock->flags);
295 return aBlock;
296 } else if (aBlock->flags & BLOCK_IS_GLOBAL) {
297 return aBlock;
298 } else {
299 // Its a stack block. Make a copy.
300 size_t size = Block_size(aBlock);
301 struct Block_layout *result = block_layout_alloc(size);
302 memmove(dst: result, src: aBlock, n: size); // bitcopy first
303#if __has_feature(ptrauth_calls)
304 // Resign the invoke pointer as it uses address authentication.
305 result->invoke = aBlock->invoke;
306
307#if __has_feature(ptrauth_signed_block_descriptors)
308 uintptr_t oldDesc =
309 ptrauth_blend_discriminator(
310 &aBlock->descriptor, _Block_descriptor_ptrauth_discriminator);
311 uintptr_t newDesc =
312 ptrauth_blend_discriminator(
313 &result->descriptor, _Block_descriptor_ptrauth_discriminator);
314
315 result->descriptor =
316 ptrauth_auth_and_resign(aBlock->descriptor, ptrauth_key_asda, oldDesc,
317 ptrauth_key_asda, newDesc);
318#endif
319#endif
320
321 // reset refcount
322 result->flags &= ~(BLOCK_REFCOUNT_MASK | BLOCK_DEALLOCATING); // XXX not needed
323 result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1
324 _Block_call_copy_helper(result, aBlock);
325 // Set isa last so memory analysis tools see a fully-initialized object.
326 result->isa = _NSConcreteMallocBlock;
327 return result;
328 }
329}
330
331
332// Runtime entry points for maintaining the sharing knowledge of byref data blocks.
333
334// A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
335// Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
336// We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment and return it.
337// Otherwise we need to copy it and update the stack forwarding pointer
338static struct Block_byref *
339_Block_byref_copy(const void *arg)
340{
341 struct Block_byref *src = (struct Block_byref *)arg;
342
343 if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
344 // src points to stack
345 struct Block_byref *copy = block_byref_alloc(size: src->size);
346 copy->isa = NULL;
347 // byref value 4 is logical refcount of 2: one for caller, one for stack
348 copy->flags = src->flags | BLOCK_BYREF_NEEDS_FREE | 4;
349 copy->forwarding = copy; // patch heap copy to point to itself
350 src->forwarding = copy; // patch stack to point to heap copy
351 copy->size = src->size;
352
353 if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
354 // Trust copy helper to copy everything of interest
355 // If more than one field shows up in a byref block this is wrong XXX
356 struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src + 1);
357 struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy + 1);
358 copy2->byref_keep = src2->byref_keep;
359 copy2->byref_destroy = src2->byref_destroy;
360
361 if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) {
362 struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2 + 1);
363 struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2 + 1);
364 copy3->layout = src3->layout;
365 }
366
367 (*src2->byref_keep)(copy, src);
368 } else {
369 // Bitwise copy.
370 // This copy includes Block_byref_3, if any.
371 memmove(dst: copy + 1, src: src + 1, n: src->size - sizeof(*src));
372 }
373 }
374 // already copied to heap
375 else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) {
376 latching_incr_int(where: &src->forwarding->flags);
377 }
378
379 return src->forwarding;
380}
381
382static void
383_Block_byref_release(const void *arg)
384{
385 struct Block_byref *byref = (struct Block_byref *)arg;
386
387 // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
388 byref = byref->forwarding;
389
390 if (byref->flags & BLOCK_BYREF_NEEDS_FREE) {
391 __assert_only int32_t refcount = byref->flags & BLOCK_REFCOUNT_MASK;
392 os_assert(refcount);
393 if (latching_decr_int_should_deallocate(where: &byref->flags)) {
394 if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
395 struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref + 1);
396 (*byref2->byref_destroy)(byref);
397 }
398 block_byref_free(ptr: byref, size: byref->size);
399 }
400 }
401}
402
403
404/************************************************************
405 *
406 * API supporting SPI
407 * _Block_copy, _Block_release, and (old) _Block_destroy
408 *
409 ***********************************************************/
410
411#if !TARGET_OS_WIN32
412#pragma mark SPI/API
413#endif
414
415
416// API entry point to release a copied Block
417void
418_Block_release(const void *arg)
419{
420 struct Block_layout *aBlock = (struct Block_layout *)arg;
421 if (!aBlock) {
422 return;
423 }
424 if (aBlock->flags & BLOCK_IS_GLOBAL) {
425 return;
426 }
427 if (!(aBlock->flags & BLOCK_NEEDS_FREE)) {
428 return;
429 }
430
431 if (latching_decr_int_should_deallocate(where: &aBlock->flags)) {
432 _Block_call_dispose_helper(aBlock);
433 _Block_destructInstance(aBlock);
434 block_layout_free(ptr: aBlock, size: Block_size(aBlock));
435 }
436}
437
438bool
439_Block_tryRetain(const void *arg)
440{
441 struct Block_layout *aBlock = (struct Block_layout *)arg;
442 return latching_incr_int_not_deallocating(where: &aBlock->flags);
443}
444
445bool
446_Block_isDeallocating(const void *arg)
447{
448 struct Block_layout *aBlock = (struct Block_layout *)arg;
449 return (aBlock->flags & BLOCK_DEALLOCATING) != 0;
450}
451
452
453/************************************************************
454 *
455 * SPI used by other layers
456 *
457 ***********************************************************/
458
459size_t
460Block_size(void *aBlock)
461{
462 auto *layout = (Block_layout *)aBlock;
463 void *desc = _Block_get_descriptor(aBlock: layout);
464#if BLOCK_SMALL_DESCRIPTOR_SUPPORTED
465 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
466 return ((Block_descriptor_small *)desc)->size;
467 }
468#endif
469 return ((Block_descriptor_1 *)desc)->size;
470}
471
472bool
473_Block_use_stret(void *aBlock)
474{
475 struct Block_layout *layout = (struct Block_layout *)aBlock;
476
477 int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET;
478 return (layout->flags & requiredFlags) == requiredFlags;
479}
480
481// Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
482bool
483_Block_has_signature(void *aBlock)
484{
485 return _Block_signature(aBlock) ? true : false;
486}
487
488const char *
489_Block_signature(void *aBlock)
490{
491 struct Block_layout *layout = (struct Block_layout *)aBlock;
492 if (!(layout->flags & BLOCK_HAS_SIGNATURE)) {
493 return nullptr;
494 }
495
496#if BLOCK_SMALL_DESCRIPTOR_SUPPORTED
497 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
498 auto *bds = (Block_descriptor_small *)_Block_get_descriptor(aBlock: layout);
499 return unwrap_relative_pointer<const char>(offset&: bds->signature);
500 }
501#endif
502
503 struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock: layout);
504 return desc3->signature;
505}
506
507const char *
508_Block_layout(void *aBlock)
509{
510 // Don't return extended layout to callers expecting old GC layout
511 Block_layout *layout = (Block_layout *)aBlock;
512 if ((layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) ||
513 !(layout->flags & BLOCK_HAS_SIGNATURE)) {
514 return nullptr;
515 }
516
517#if BLOCK_SMALL_DESCRIPTOR_SUPPORTED
518 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
519 auto *bds = (Block_descriptor_small *)_Block_get_descriptor(aBlock: layout);
520 return unwrap_relative_pointer<const char>(offset&: bds->layout);
521 }
522#endif
523
524 Block_descriptor_3 *desc = _Block_descriptor_3(aBlock: layout);
525 return desc->layout;
526}
527
528const char *
529_Block_extended_layout(void *aBlock)
530{
531 // Don't return old GC layout to callers expecting extended layout
532 Block_layout *layout = (Block_layout *)aBlock;
533 if (!(layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) ||
534 !(layout->flags & BLOCK_HAS_SIGNATURE)) {
535 return nullptr;
536 }
537
538 const char *extLayout;
539#if BLOCK_SMALL_DESCRIPTOR_SUPPORTED
540 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
541 auto *bds = (Block_descriptor_small *)_Block_get_descriptor(aBlock: layout);
542 if (layout->flags & BLOCK_INLINE_LAYOUT_STRING) {
543 extLayout = (const char *)(uintptr_t)bds->layout;
544 } else {
545 extLayout = unwrap_relative_pointer<const char>(offset&: bds->layout);
546 }
547 } else
548#endif
549 {
550 Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock: layout);
551 extLayout = desc3->layout;
552 }
553
554 // Return empty string (all non-object bytes) instead of NULL
555 // so callers can distinguish "empty layout" from "no layout".
556 if (!extLayout) {
557 extLayout = "";
558 }
559 return extLayout;
560}
561
562#if !TARGET_OS_WIN32
563#pragma mark Compiler SPI entry points
564#endif
565
566
567/*******************************************************
568 *
569 * Entry points used by the compiler - the real API!
570 *
571 *
572 * A Block can reference four different kinds of things that require help when the Block is copied to the heap.
573 * 1) C++ stack based objects
574 * 2) References to Objective-C objects
575 * 3) Other Blocks
576 * 4) __block variables
577 *
578 * In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
579 *
580 * The flags parameter of _Block_object_assign and _Block_object_dispose is set to
581 * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
582 * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
583 * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
584 * If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
585 *
586 * So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
587 *
588 * When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
589 *
590 * So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
591 * __block id 128+3 (0x83)
592 * __block (^Block) 128+7 (0x87)
593 * __weak __block id 128+3+16 (0x93)
594 * __weak __block (^Block) 128+7+16 (0x97)
595 *
596 *
597 ********************************************************/
598
599//
600// When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
601// to do the assignment.
602//
603void
604_Block_object_assign(void *destArg, const void *object, const int flags)
605{
606 const void **dest = (const void **)destArg;
607 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
608 case BLOCK_FIELD_IS_OBJECT:
609 /*******
610 * id object = ...;
611 * [^{ object; } copy];
612 ********/
613
614 _Block_retain_object(ptr: object);
615 *dest = object;
616 break;
617
618 case BLOCK_FIELD_IS_BLOCK:
619 /*******
620 * void (^object)(void) = ...;
621 * [^{ object; } copy];
622 ********/
623
624 *dest = _Block_copy(arg: object);
625 break;
626
627 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
628 case BLOCK_FIELD_IS_BYREF:
629 /*******
630 * // copy the onstack __block container to the heap
631 * // Note this __weak is old GC-weak/MRC-unretained.
632 * // ARC-style __weak is handled by the copy helper directly.
633 * __block ... x;
634 * __weak __block ... x;
635 * [^{ x; } copy];
636 ********/
637
638 *dest = _Block_byref_copy(arg: object);
639 break;
640
641 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
642 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
643 /*******
644 * // copy the actual field held in the __block container
645 * // Note this is MRC unretained __block only.
646 * // ARC retained __block is handled by the copy helper directly.
647 * __block id object;
648 * __block void (^object)(void);
649 * [^{ object; } copy];
650 ********/
651
652 *dest = object;
653 break;
654
655 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
656 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
657 /*******
658 * // copy the actual field held in the __block container
659 * // Note this __weak is old GC-weak/MRC-unretained.
660 * // ARC-style __weak is handled by the copy helper directly.
661 * __weak __block id object;
662 * __weak __block void (^object)(void);
663 * [^{ object; } copy];
664 ********/
665
666 *dest = object;
667 break;
668
669 default:
670 break;
671 }
672}
673
674// When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
675// to help dispose of the contents
676void
677_Block_object_dispose(const void *object, const int flags)
678{
679 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
680 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
681 case BLOCK_FIELD_IS_BYREF:
682 // get rid of the __block data structure held in a Block
683 _Block_byref_release(arg: object);
684 break;
685 case BLOCK_FIELD_IS_BLOCK:
686 _Block_release(arg: object);
687 break;
688 case BLOCK_FIELD_IS_OBJECT:
689 _Block_release_object(ptr: object);
690 break;
691 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
692 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
693 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
694 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
695 break;
696 default:
697 break;
698 }
699}
700
701
702// Workaround for <rdar://26015603> dylib with no __DATA segment fails to rebase
703__attribute__((used))
704static int let_there_be_data = 42;
705