1/*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21#ifndef __FIREHOSE_CHUNK_PRIVATE__
22#define __FIREHOSE_CHUNK_PRIVATE__
23
24#include <sys/param.h>
25#include "firehose_types_private.h"
26#include "tracepoint_private.h"
27
28__BEGIN_DECLS
29
30#define FIREHOSE_CHUNK_SIZE 4096ul
31
32#define FIREHOSE_CHUNK_POS_ENTRY_OFFS_INC (1ULL << 0)
33#define FIREHOSE_CHUNK_POS_PRIVATE_OFFS_INC (1ULL << 16)
34#define FIREHOSE_CHUNK_POS_REFCNT_INC (1ULL << 32)
35#define FIREHOSE_CHUNK_POS_FULL_BIT (1ULL << 56)
36#define FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(pos, stream) \
37 ((((pos).fcp_pos >> 48) & 0x1ff) == (uint16_t)stream)
38
39typedef union {
40 os_atomic(uint64_t) fcp_atomic_pos;
41 uint64_t fcp_pos;
42 struct {
43 uint16_t fcp_next_entry_offs;
44 uint16_t fcp_private_offs;
45 uint8_t fcp_refcnt;
46 uint8_t fcp_qos;
47 uint8_t fcp_stream;
48 uint8_t fcp_flag_full : 1;
49 uint8_t fcp_flag_io : 1;
50 uint8_t fcp_quarantined : 1;
51 uint8_t _fcp_flag_unused : 5;
52 };
53} firehose_chunk_pos_u;
54
55typedef struct firehose_chunk_s {
56 union {
57 uint8_t fc_start[FIREHOSE_CHUNK_SIZE];
58 struct {
59 firehose_chunk_pos_u fc_pos;
60 uint64_t fc_timestamp;
61 uint8_t fc_data[FIREHOSE_CHUNK_SIZE - 8 - 8];
62 };
63 };
64} *firehose_chunk_t;
65
66typedef struct firehose_chunk_range_s {
67 uint16_t fcr_offset; // offset from the start of the chunk
68 uint16_t fcr_length;
69} *firehose_chunk_range_t;
70
71#if __has_include(<os/atomic_private.h>)
72#if defined(KERNEL) || defined(OS_FIREHOSE_SPI)
73
74OS_ALWAYS_INLINE
75static inline bool
76firehose_chunk_pos_fits(firehose_chunk_pos_u *pos, uint16_t size)
77{
78 return pos->fcp_next_entry_offs + size <= pos->fcp_private_offs;
79}
80
81OS_ALWAYS_INLINE
82static inline firehose_chunk_t
83firehose_chunk_for_address(void *addr)
84{
85 uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_CHUNK_SIZE - 1);
86 return (firehose_chunk_t)chunk_addr;
87}
88
89#define FIREHOSE_CHUNK_TRY_RESERVE_FAIL_ENQUEUE (-1)
90#define FIREHOSE_CHUNK_TRY_RESERVE_FAIL ( 0)
91
92OS_ALWAYS_INLINE
93static inline long
94firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc, uint64_t stamp,
95 firehose_stream_t stream, uint8_t qos, uint16_t pubsize,
96 uint16_t privsize, uint8_t **privptr)
97{
98 const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data);
99 firehose_chunk_pos_u orig, pos;
100 bool reservation_failed, stamp_delta_fits;
101
102 stamp_delta_fits = ((stamp - fc->fc_timestamp) >> 48) == 0;
103
104 // no acquire barrier because the returned space is written to only
105 os_atomic_rmw_loop(&fc->fc_pos.fcp_atomic_pos,
106 orig.fcp_pos, pos.fcp_pos, relaxed, {
107 if (orig.fcp_pos == 0) {
108 // we acquired a really really old reference, and we probably
109 // just faulted in a new page
110 os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL);
111 }
112 if (!FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(orig, stream)) {
113 // nothing to do if the chunk is full, or the stream doesn't match,
114 // in which case the thread probably:
115 // - loaded the chunk ref
116 // - been suspended a long while
117 // - read the chunk to find a very old thing
118 os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL);
119 }
120 pos.fcp_pos = orig.fcp_pos;
121 if (!firehose_chunk_pos_fits(&orig,
122 ft_size + pubsize + privsize) || !stamp_delta_fits) {
123 pos.fcp_flag_full = true;
124 reservation_failed = true;
125 } else {
126 if (qos > pos.fcp_qos) {
127 pos.fcp_qos = qos;
128 }
129 // using these *_INC macros is so that the compiler generates better
130 // assembly: using the struct individual fields forces the compiler
131 // to handle carry propagations, and we know it won't happen
132 pos.fcp_pos += roundup(ft_size + pubsize, 8) *
133 FIREHOSE_CHUNK_POS_ENTRY_OFFS_INC;
134 pos.fcp_pos -= privsize * FIREHOSE_CHUNK_POS_PRIVATE_OFFS_INC;
135 pos.fcp_pos += FIREHOSE_CHUNK_POS_REFCNT_INC;
136 const uint16_t minimum_payload_size = 16;
137 if (!firehose_chunk_pos_fits(&pos,
138 roundup(ft_size + minimum_payload_size, 8))) {
139 // if we can't even have minimum_payload_size bytes of payload
140 // for the next tracepoint, just flush right away
141 pos.fcp_flag_full = true;
142 }
143 reservation_failed = false;
144 }
145 });
146
147 if (reservation_failed) {
148 if (pos.fcp_refcnt) {
149 // nothing to do, there is a thread writing that will pick up
150 // the "FULL" flag on flush and push as a consequence
151 return FIREHOSE_CHUNK_TRY_RESERVE_FAIL;
152 }
153 // caller must enqueue chunk
154 return FIREHOSE_CHUNK_TRY_RESERVE_FAIL_ENQUEUE;
155 }
156 if (privptr) {
157 *privptr = fc->fc_start + pos.fcp_private_offs;
158 }
159 return orig.fcp_next_entry_offs;
160}
161
162OS_ALWAYS_INLINE
163static inline firehose_tracepoint_t
164firehose_chunk_tracepoint_begin(firehose_chunk_t fc, uint64_t stamp,
165 uint16_t pubsize, uint64_t thread_id, long offset)
166{
167 firehose_tracepoint_t ft = (firehose_tracepoint_t)
168 __builtin_assume_aligned(fc->fc_start + offset, 8);
169 stamp -= fc->fc_timestamp;
170 stamp |= (uint64_t)pubsize << 48;
171 // The compiler barrier is needed for userland process death handling, see
172 // (tracepoint-begin) in libdispatch's firehose_buffer_stream_chunk_install.
173 os_atomic_std(atomic_store_explicit)(&ft->ft_atomic_stamp_and_length, stamp,
174 os_atomic_std(memory_order_relaxed));
175 __asm__ __volatile__ ("" ::: "memory");
176 ft->ft_thread = thread_id;
177 return ft;
178}
179
180OS_ALWAYS_INLINE
181static inline bool
182firehose_chunk_tracepoint_end(firehose_chunk_t fc,
183 firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid)
184{
185 firehose_chunk_pos_u pos;
186
187 os_atomic_std(atomic_store_explicit)(&ft->ft_id.ftid_atomic_value,
188 ftid.ftid_value, os_atomic_std(memory_order_release));
189 pos.fcp_pos = os_atomic_std(atomic_fetch_sub_explicit)(&fc->fc_pos.fcp_atomic_pos,
190 FIREHOSE_CHUNK_POS_REFCNT_INC, os_atomic_std(memory_order_relaxed));
191 return pos.fcp_refcnt == 1 && pos.fcp_flag_full;
192}
193
194#endif // defined(KERNEL) || defined(OS_FIREHOSE_SPI)
195#endif // __has_include(<os/atomic_private.h>)
196
197__END_DECLS
198
199#endif // __FIREHOSE_CHUNK_PRIVATE__
200