| 1 | /* |
| 2 | * Copyright (c) 2015-2021 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | #ifndef _SKYWALK_OS_STATS_H_ |
| 30 | #define _SKYWALK_OS_STATS_H_ |
| 31 | |
| 32 | #if defined(PRIVATE) || defined(BSD_KERNEL_PRIVATE) |
| 33 | #include <skywalk/os_channel.h> |
| 34 | #include <skywalk/os_nexus_private.h> |
| 35 | #include <skywalk/os_packet.h> |
| 36 | #include <netinet/in.h> |
| 37 | #include <netinet/in_private.h> |
| 38 | #include <netinet/in_stat.h> |
| 39 | #include <net/if.h> |
| 40 | #include <net/if_var_private.h> |
| 41 | #include <net/ethernet.h> |
| 42 | |
| 43 | /* |
| 44 | * [Intro] |
| 45 | * This file defines new statistics infrastructure using enum and uin64_t array |
| 46 | * for skywalk. BSD network stack statistics definition would be kept for |
| 47 | * backward compatibility. |
| 48 | * |
| 49 | * This new scheme has several advantages over legacy statistics definition. |
| 50 | * 1. Using array based data structure allows efficient folding with vector |
| 51 | * instructions. |
| 52 | * 2. It also allows simple indexing of statistics field using enum value |
| 53 | * rather than struct field name. |
| 54 | * 3. Coupled with descriptive string per statistics enum, it allows convenient |
| 55 | * printing. |
| 56 | * |
| 57 | * The new statistics enum and data structure follows naming convention as: |
| 58 | * 1. It should resemble legacy names to make transition easy. |
| 59 | * 2. It should be easily distinguishable from legacy data structure. |
| 60 | * |
| 61 | * |
| 62 | * [Naming] |
| 63 | * The new data structures are named with suffix _stats (plural) v.s legacy stat |
| 64 | * and are always in snake case, e.g. |
| 65 | * New | Legacy |
| 66 | * -------------------------+-------------------------- |
| 67 | * ip_stats | ipstat |
| 68 | * ip6_stats | ip6stat |
| 69 | * tcp_stats | tcpstat |
| 70 | * if_tcp_ecn_stats | if_tcp_ecn_stat |
| 71 | * if_tcp_ecn_perf_stats | if_tcp_ecn_perf_stat |
| 72 | * udp_stats | udpstat |
| 73 | * |
| 74 | * |
| 75 | * Enums are named as <stats_family>_<type>, e.g. |
| 76 | * New | Legacy |
| 77 | * -------------------------+-------------------------- |
| 78 | * IP_STATS_TOTAL | ipstat.ips_total |
| 79 | * TCP_STATS_CONNATTEMPT | tcpstat.tcps_connattempt |
| 80 | * |
| 81 | * |
| 82 | * [Usage] |
| 83 | * Declare a statistics data structure: |
| 84 | * |
| 85 | * struct ip_stats sample_ip_stats; |
| 86 | * |
| 87 | * To increment a statistics value by one: |
| 88 | * |
| 89 | * STATS_INC(&sample_ip_stats, IP_STATS_TOTAL); |
| 90 | * |
| 91 | * To add to a statistics value by a certain value: |
| 92 | * |
| 93 | * STATS_ADD(&sample_ip_stats, IP_STATS_TOTAL, n_pkts); |
| 94 | * |
| 95 | * To fold one statistics data structure into another one: |
| 96 | * |
| 97 | * ip_stats_fold(&sample_ip_stats, &another_ip_stats); |
| 98 | * |
| 99 | * To reset statistics: |
| 100 | * |
| 101 | * ip_stats_reset(&sample_ip_stats); |
| 102 | * |
| 103 | * |
| 104 | * [Note] |
| 105 | * Some legacy stats member has been removed for convenience reason, e.g. |
| 106 | * |
| 107 | * ip6stat.ip6s_nxthist[256] |
| 108 | * ip6stat.ip6s_sources_sameif[SCOPE6_ID_MAX]; |
| 109 | * |
| 110 | * are removed, as they don't have easy mapping into array based statistics |
| 111 | * scheme and doesn't have explicit usage within the kernel. |
| 112 | * That's been said, those are treated as exceptions and could be reconsidered |
| 113 | * if they are needed in the future. |
| 114 | * |
| 115 | * |
| 116 | * [Adding Stats] |
| 117 | * To add new stats definition, a X-Macro table and STATS_REGISTER is needed. |
| 118 | * |
| 119 | * #define NEW_STATS_TABLE(X)\ |
| 120 | * X(sub_type_1, "sub_type_1_short_name", "sub_type_1_fmt_string")\ |
| 121 | * X(sub_type_2, "sub_type_2_short_name", "sub_type_2_fmt_string")\ |
| 122 | * X(__NEW_STATS_MAX, "", "") |
| 123 | * |
| 124 | * STATS_REGISTER(new_stats, NEW_STATS); |
| 125 | * |
| 126 | * where new_stats is the lower case name, and NEW_STATS is its upper case. |
| 127 | * The following would be generated by STATS_REGISTER Macro: |
| 128 | * |
| 129 | * enum _new_stats { |
| 130 | * sub_type_1, |
| 131 | * sub_type_2, |
| 132 | * __NEW_STATS_MAX, |
| 133 | * }; |
| 134 | * |
| 135 | * struct new_stats { |
| 136 | * uint64_t _arr[__NEW_STATS_NEW]; |
| 137 | * } |
| 138 | * |
| 139 | * static inline const char* new_stats_str(enum _new_stats); |
| 140 | * static inline const char* new_stats_fmt(enum _new_stats); |
| 141 | * |
| 142 | * static inline void __attribute__((always_inline)) |
| 143 | * new_stats_fold(struct new_stats *dst, struct new_stats *src); |
| 144 | * |
| 145 | * static inline void __attribute__((always_inline)) |
| 146 | * new_stats_reset(struct new_stats *s); |
| 147 | * |
| 148 | * The fmt_string should conform to printf convention. Note here as uint64_t is |
| 149 | * used to store stats value, `%llu` is used to format the print value (PRIu64 |
| 150 | * could be used but since long long is guaranteed to be 64bit long, casting |
| 151 | * to llu should work well without introducing definitions from `inttypes.h`. |
| 152 | */ |
| 153 | |
| 154 | /* BEGIN CSTYLED */ |
| 155 | |
| 156 | /* ip stats definitions */ |
| 157 | #define IP_STATS_TABLE(X) \ |
| 158 | /* Input stats */ \ |
| 159 | X(IP_STATS_TOTAL, "TotalRcvd", "\t%llu total packet received\n") \ |
| 160 | X(IP_STATS_BADSUM, "BadCsum", "\t\t%llu bad header checksum\n") \ |
| 161 | X(IP_STATS_RCV_SWCSUM, "RcvSWCsumPkt", "\t\t%llu header checksummed in software") \ |
| 162 | X(IP_STATS_RCV_SWCSUM_BYTES, "RcvSWCsumByte"," (%llu byte)\n") \ |
| 163 | X(IP_STATS_TOOSMALL, "DataTooSmall", "\t\t%llu with size smaller than minimum\n")\ |
| 164 | X(IP_STATS_TOOSHORT, "PktTooShort", "\t\t%llu with data size < data length\n") \ |
| 165 | X(IP_STATS_ADJ, "TotalAdj", "\t\t%llu with data size > data length\n") \ |
| 166 | X(IP_STATS_ADJ_HWCSUM_CLR, "HWCsumDisc", "\t\t\t%llu packet forced to software checksum\n") \ |
| 167 | X(IP_STATS_TOOLONG, "TooLong", "\t\t%llu with ip length > max ip packet size\n") \ |
| 168 | X(IP_STATS_BADHLEN, "BadHdrLen", "\t\t%llu with header length < data size\n") \ |
| 169 | X(IP_STATS_BADLEN, "BadLen", "\t\t%llu with data length < header length\n") \ |
| 170 | X(IP_STATS_BADOPTIONS, "BadOptions", "\t\t%llu with bad options\n") \ |
| 171 | X(IP_STATS_BADVERS, "BadVer", "\t\t%llu with incorrect version number\n") \ |
| 172 | X(IP_STATS_FRAGMENTS, "FragRcvd", "\t\t%llu fragment received\n") \ |
| 173 | X(IP_STATS_FRAGDROPPED, "FragDrop", "\t\t\t%llu dropped (dup or out of space)\n") \ |
| 174 | X(IP_STATS_FRAGTIMEOUT, "FragTimeO", "\t\t\t%llu dropped after timeout\n") \ |
| 175 | X(IP_STATS_REASSEMBLED, "Reassembled", "\t\t\t%llu reassembled ok\n") \ |
| 176 | X(IP_STATS_DELIVERED, "Delivered", "\t\t%llu packet for this host\n") \ |
| 177 | X(IP_STATS_NOPROTO, "UnkwnProto", "\t\t%llu packet for unknown/unsupported protocol\n") \ |
| 178 | X(IP_STATS_FORWARD, "Fwd", "\t\t%llu packet forwarded") \ |
| 179 | X(IP_STATS_FASTFORWARD, "FastFwd", " (%llu packet fast forwarded)\n") \ |
| 180 | X(IP_STATS_CANTFORWARD, "CantFwd", "\t\t%llu packet not forwardable\n") \ |
| 181 | X(IP_STATS_NOTMEMBER, "UnRegGroup", "\t\t%llu packet received for unknown multicast group\n") \ |
| 182 | X(IP_STATS_REDIRECTSENT, "FwdSameNet", "\t\t%llu redirect sent\n") \ |
| 183 | X(IP_STATS_RXC_COLLISIONS, "RxChnColl", "\t\t%llu input packet not chained due to collision\n") \ |
| 184 | X(IP_STATS_RXC_CHAINED, "RxChn", "\t\t%llu input packet processed in a chain\n") \ |
| 185 | X(IP_STATS_RXC_NOTCHAIN, "RxBypChn", "\t\t%llu input packet unable to chain\n") \ |
| 186 | X(IP_STATS_RXC_CHAINSZ_GT2, "RxChnGT2", "\t\t%llu input packet chain processed with length greater than 2\n") \ |
| 187 | X(IP_STATS_RXC_CHAINSZ_GT4, "RxChnGT4", "\t\t%llu input packet chain processed with length greater than 4\n") \ |
| 188 | \ |
| 189 | /* Output stats */ \ |
| 190 | X(IP_STATS_LOCALOUT, "LocalOut", "\t%llu packet sent from this host\n") \ |
| 191 | X(IP_STATS_RAWOUT, "RawPktOut", "\t\t%llu packet sent with fabricated ip header\n") \ |
| 192 | X(IP_STATS_ODROPPED, "DropNoBuf", "\t\t%llu output packet dropped due to no bufs, etc.\n") \ |
| 193 | X(IP_STATS_NOROUTE, "NoRoute", "\t\t%llu output packet discarded due to no route\n") \ |
| 194 | X(IP_STATS_FRAGMENTED, "Fragmented", "\t\t%llu output datagram fragmented\n") \ |
| 195 | X(IP_STATS_OFRAGMENTS, "OutFraged", "\t\t%llu fragment created\n") \ |
| 196 | X(IP_STATS_CANTFRAG, "CantFrag", "\t\t%llu datagram that can't be fragmented\n") \ |
| 197 | X(IP_STATS_NOGIF, "NoGif", "\t\t%llu tunneling packet that can't find gif\n") \ |
| 198 | X(IP_STATS_BADADDR, "BadAddr", "\t\t%llu datagram with bad address in header\n") \ |
| 199 | X(IP_STATS_PKTDROPCNTRL, "DropNoCtl", "\t\t%llu packet dropped due to no bufs for control data\n") \ |
| 200 | X(IP_STATS_SND_SWCSUM, "SndSWCsumPkt", "\t\t%llu header checksummed in software") \ |
| 201 | X(IP_STATS_SND_SWCSUM_BYTES, "SndSWCsumByte"," (%llu byte)\n") \ |
| 202 | X(IP_STATS_RXC_NOTLIST, "IPInPkt", "\t\t%llu input packet did not go through list processing path\n") \ |
| 203 | X(__IP_STATS_MAX, "", "end of ip stats") |
| 204 | |
| 205 | /* ipv6 stats definitions */ |
| 206 | #define IP6_STATS_TABLE(X) \ |
| 207 | /* Input Stats */ \ |
| 208 | X(IP6_STATS_TOTAL, "TotalRcvd", "\t%llu total packet received\n") \ |
| 209 | X(IP6_STATS_TOOSMALL, "DataTooSmall", "\t\t%llu with size smaller than minimum\n") \ |
| 210 | X(IP6_STATS_TOOSHORT, "PktTooShort", "\t\t%llu with data size < data length\n") \ |
| 211 | X(IP6_STATS_ADJ, "TotalAdj", "\t\t%llu with data size > data length\n") \ |
| 212 | X(IP6_STATS_ADJ_HWCSUM_CLR, "HWCsumDisc", "\t\t\t%llu packet forced to software checksum\n") \ |
| 213 | X(IP6_STATS_BADOPTIONS, "BadOptions", "\t\t%llu with bad options\n") \ |
| 214 | X(IP6_STATS_BADVERS, "BadVer", "\t\t%llu with incorrect version number\n") \ |
| 215 | X(IP6_STATS_FRAGMENTS, "FrafRcvd", "\t\t%llu fragment received\n") \ |
| 216 | X(IP6_STATS_FRAGDROPPED, "FragDrop", "\t\t\t%llu dropped (dup or out of space)\n") \ |
| 217 | X(IP6_STATS_FRAGTIMEOUT, "FragTimeO", "\t\t\t%llu dropped after timeout\n") \ |
| 218 | X(IP6_STATS_FRAGOVERFLOW, "FragOverFlow", "\t\t\t%llu exceeded limit\n") \ |
| 219 | X(IP6_STATS_REASSEMBLED, "FragReassembled","\t\t\t%llu reassembled ok\n") \ |
| 220 | X(IP6_STATS_ATMFRAG_RCVD, "RAtomicFrag", "\t\t\t%llu atomic fragments received\n") \ |
| 221 | X(IP6_STATS_DELIVERED, "Delivered", "\t\t%llu packet for this host\n") \ |
| 222 | X(IP6_STATS_FORWARD, "Fwd", "\t\t%llu packet forwarded\n") \ |
| 223 | X(IP6_STATS_CANTFORWARD, "CantFwd", "\t\t%llu packet not forwardable\n") \ |
| 224 | X(IP6_STATS_REDIRECTSENT, "RedirectSent", "\t\t%llu redirect sent\n") \ |
| 225 | X(IP6_STATS_NOTMEMBER, "NoMCGrp", "\t\t%llu multicast packet which we don't join\n") \ |
| 226 | X(IP6_STATS_EXTHDRTOOLONG, "ExtHdrNotCont","\t\t\t%llu packet whose headers are not continuous\n") \ |
| 227 | X(IP6_STATS_NOGIF, "NoGif", "\t\t%llu tunneling packet that can't find gif\n") \ |
| 228 | X(IP6_STATS_TOOMANYHDR, "TooManyHdr", "\t\t%llu packet discarded due to too may headers\n") \ |
| 229 | X(IP6_STATS_FORWARD_CACHEHIT, "FwdCacheHit", "\t\t%llu forward cache hit\n") \ |
| 230 | X(IP6_STATS_FORWARD_CACHEMISS, "FwdCacheMiss", "\t\t%llu forward cache miss\n") \ |
| 231 | X(IP6_STATS_PKTDROPCNTRL, "DropNoCtl", "\t\t%llu packet dropped due to no bufs for control data\n") \ |
| 232 | /* Output stats */ \ |
| 233 | X(IP6_STATS_LOCALOUT, "LocalOut", "\t%llu packet sent from this host\n") \ |
| 234 | X(IP6_STATS_RAWOUT, "RawPktOut", "\t\t%llu packet sent with fabricated ip header\n") \ |
| 235 | X(IP6_STATS_ODROPPED, "DropNoBuf", "\t\t%llu output packet dropped due to no bufs, etc.\n") \ |
| 236 | X(IP6_STATS_NOROUTE, "NoRoute", "\t\t%llu output packet discarded due to no route\n") \ |
| 237 | X(IP6_STATS_FRAGMENTED, "Fragmented", "\t\t%llu output datagram fragmented\n") \ |
| 238 | X(IP6_STATS_OFRAGMENTS, "OutFraged", "\t\t%llu fragment created\n") \ |
| 239 | X(IP6_STATS_CANTFRAG, "CantFrag", "\t\t%llu datagram that can't be fragmented\n")\ |
| 240 | X(IP6_STATS_BADSCOPE, "BadScope", "\t\t%llu packet that violated scope rules\n") \ |
| 241 | X(IP6_STATS_SOURCES_NONE, "AddrSelFail", "\t\t%llu failure of source address selection\n") \ |
| 242 | X(IP6_STATS_DAD_COLLIDE, "DADColl", "\t\t%llu duplicate address detection collision\n") \ |
| 243 | X(IP6_STATS_DAD_LOOPCOUNT, "DADLoop", "\t\t%llu duplicate address detection NS loop\n") \ |
| 244 | X(IP6_STATS_SOURCES_SKIP6_EXPENSIVE_SECONDARY_IF,"Ign2ndIf", "\t\t%llu time ignored source on secondary expensive I/F\n") \ |
| 245 | X(__IP6_STATS_MAX, "", "end of ipv6 stats") |
| 246 | |
| 247 | /* tcp stats definitions */ |
| 248 | #define TCP_STATS_TABLE(X) \ |
| 249 | /* Output stats */ \ |
| 250 | X(TCP_STATS_SNDTOTAL, "SndTotalPkt", "\t%llu packet sent\n") \ |
| 251 | X(TCP_STATS_SNDPACK, "SndTotalDP", "\t\t%llu data packet") \ |
| 252 | X(TCP_STATS_SNDBYTE, "SndDataByte", " (%llu byte)\n") \ |
| 253 | X(TCP_STATS_SNDREXMITPACK, "SndDPktReXmt", "\t\t%llu data packet retransmitted") \ |
| 254 | X(TCP_STATS_SNDREXMITBYTE, "SndDByteReXmt"," (%llu byte)\n") \ |
| 255 | X(TCP_STATS_MTURESENT, "MTUReSnd", "\t\t%llu resend initiated by MTU discovery\n") \ |
| 256 | X(TCP_STATS_SNDACKS, "SndAck", "\t\t%llu ack-only packet") \ |
| 257 | X(TCP_STATS_DELACK, "DelayAck", " (%llu delayed)\n") \ |
| 258 | X(TCP_STATS_SNDURG, "SndURG", "\t\t%llu URG only packet\n") \ |
| 259 | X(TCP_STATS_SNDPROBE, "SndWinProb", "\t\t%llu window probe packet\n") \ |
| 260 | X(TCP_STATS_SNDWINUP, "SndWinUpd", "\t\t%llu window update packet\n") \ |
| 261 | X(TCP_STATS_SNDCTRL, "SndCtlPkt", "\t\t%llu control packet\n") \ |
| 262 | X(TCP_STATS_FCHOLDPACKET, "FlowCtlWh", "\t\t%llu data packet sent after flow control\n") \ |
| 263 | X(TCP_STATS_SYNCHALLENGE, "SYNChallenge", "\t\t%llu challenge ACK sent due to unexpected SYN\n") \ |
| 264 | X(TCP_STATS_RSTCHALLENGE, "RSTChallenge", "\t\t%llu challenge ACK sent due to unexpected RST\n") \ |
| 265 | X(TCP_STATS_SND_SWCSUM, "SndSWCsumPkt", "\t\t%llu checksummed in software") \ |
| 266 | X(TCP_STATS_SND_SWCSUM_BYTES, "SndSWCsumByte"," (%llu byte) over IPv4\n") \ |
| 267 | X(TCP_STATS_SND6_SWCSUM, "SndSWCsumPkt6","\t\t%llu checksummed in software") \ |
| 268 | X(TCP_STATS_SND6_SWCSUM_BYTES, "SndSWCsumByte6"," (%llu byte) over IPv6\n") \ |
| 269 | \ |
| 270 | /* Input stats */ \ |
| 271 | X(TCP_STATS_RCVTOTAL, "RcvTotalPkt", "\t%llu packet received\n") \ |
| 272 | X(TCP_STATS_RCVACKPACK, "RcvAckPkt", "\t\t%llu ack") \ |
| 273 | X(TCP_STATS_RCVACKBYTE, "RcvAckByte", " (for %llu byte)\n") \ |
| 274 | X(TCP_STATS_RCVDUPACK, "RcvDupAck", "\t\t%llu duplicate ack\n") \ |
| 275 | X(TCP_STATS_RCVACKTOOMUCH, "RcvAckUnSnd", "\t\t%llu ack for unsent data\n") \ |
| 276 | X(TCP_STATS_RCVPACK, "RcvPktInSeq", "\t\t%llu packet received in-sequence") \ |
| 277 | X(TCP_STATS_RCVBYTE, "RcvBInSeq", " (%llu byte)\n") \ |
| 278 | X(TCP_STATS_RCVDUPPACK, "RcvDupPkt", "\t\t%llu completely duplicate packet") \ |
| 279 | X(TCP_STATS_RCVDUPBYTE, "RcvDupByte", " (%llu byte)\n") \ |
| 280 | X(TCP_STATS_PAWSDROP, "PAWSDrop", "\t\t%llu old duplicate packet\n") \ |
| 281 | X(TCP_STATS_RCVMEMDROP, "RcvMemDrop", "\t\t%llu received packet dropped due to low memory\n") \ |
| 282 | X(TCP_STATS_RCVPARTDUPPACK, "RcvDupData", "\t\t%llu packet with some dup. data") \ |
| 283 | X(TCP_STATS_RCVPARTDUPBYTE, "RcvPDupByte", " (%llu byte duped)\n") \ |
| 284 | X(TCP_STATS_RCVOOPACK, "RcvOOPkt", "\t\t%llu out-of-order packet") \ |
| 285 | X(TCP_STATS_RCVOOBYTE, "RcvOOByte", " (%llu byte)\n") \ |
| 286 | X(TCP_STATS_RCVPACKAFTERWIN, "RcvAftWinPkt", "\t\t%llu packet of data after window") \ |
| 287 | X(TCP_STATS_RCVBYTEAFTERWIN, "RcvAftWinByte"," (%llu byte)\n") \ |
| 288 | X(TCP_STATS_RCVWINPROBE, "RcvWinProbPkt","\t\t%llu window probe\n") \ |
| 289 | X(TCP_STATS_RCVWINUPD, "RcvWinUpdPkt", "\t\t%llu window update packet\n") \ |
| 290 | X(TCP_STATS_RCVAFTERCLOSE, "RcvAftCloPkt", "\t\t%llu packet received after close\n") \ |
| 291 | X(TCP_STATS_BADRST, "BadRST", "\t\t%llu bad reset\n") \ |
| 292 | X(TCP_STATS_RCVBADSUM, "RcvBadCsum", "\t\t%llu discarded for bad checksum\n") \ |
| 293 | X(TCP_STATS_RCV_SWCSUM, "RcvSWCsumPkt", "\t\t%llu checksummed in software") \ |
| 294 | X(TCP_STATS_RCV_SWCSUM_BYTES, "RcvSWCsumByte"," (%llu byte) over IPv4\n") \ |
| 295 | X(TCP_STATS_RCV6_SWCSUM, "RcvSWCsumPkt6","\t\t%llu checksummed in software") \ |
| 296 | X(TCP_STATS_RCV6_SWCSUM_BYTES, "RcvSWCsumByte6"," (%llu byte) over IPv6\n") \ |
| 297 | X(TCP_STATS_RCVBADOFF, "RcvBadOff", "\t\t%llu discarded for bad header offset field\n") \ |
| 298 | X(TCP_STATS_RCVSHORT, "RcvTooShort", "\t\t%llu discarded because packet too short\n") \ |
| 299 | X(TCP_STATS_CONNATTEMPT, "ConnInit", "\t\t%llu discarded because packet too short\n") \ |
| 300 | \ |
| 301 | /* Connection stats */ \ |
| 302 | X(TCP_STATS_ACCEPTS, "ConnAcpt", "\t%llu connection accept\n") \ |
| 303 | X(TCP_STATS_BADSYN, "BadSYN", "\t%llu bad connection attempt\n") \ |
| 304 | X(TCP_STATS_LISTENDROP, "ListenDrop", "\t%llu listen queue overflow\n") \ |
| 305 | X(TCP_STATS_CONNECTS, "ConnEst", "\t%llu connection established (including accepts)\n") \ |
| 306 | X(TCP_STATS_CLOSED, "ConnClosed", "\t%llu connection closed") \ |
| 307 | X(TCP_STATS_DROPS, "ConnDrop", " (including %llu drop)\n") \ |
| 308 | X(TCP_STATS_CACHEDRTT, "RTTCacheUpd", "\t\t%llu connection updated cached RTT on close\n") \ |
| 309 | X(TCP_STATS_CACHEDRTTVAR, "RTTVarCacheUpd","\t\t%llu connection updated cached RTT variance on close\n") \ |
| 310 | X(TCP_STATS_CACHEDSSTHRESH, "SSTholdCacheUpd","\t\t%llu connection updated cached ssthresh on close\n") \ |
| 311 | X(TCP_STATS_CONNDROPS, "EConnDrop", "\t%llu embryonic connection dropped\n") \ |
| 312 | X(TCP_STATS_RTTUPDATED, "RTTUpdated", "\t%llu segment updated rtt") \ |
| 313 | X(TCP_STATS_SEGSTIMED, "RTTTimed", " (of %llu attempt)\n") \ |
| 314 | X(TCP_STATS_REXMTTIMEO, "ReXmtTO", "\t%llu retransmit timeout\n") \ |
| 315 | X(TCP_STATS_TIMEOUTDROP, "DropTO", "\t\t%llu connection dropped by rexmit timeout\n") \ |
| 316 | X(TCP_STATS_RXTFINDROP, "ReXmtFINDrop", "\t\t%llu connection dropped after retransmitting FIN\n") \ |
| 317 | X(TCP_STATS_PERSISTTIMEO, "PersistTO", "\t%llu persist timeout\n") \ |
| 318 | X(TCP_STATS_PERSISTDROP, "PersisStateTO","\t\t%llu connection dropped by persist timeout\n") \ |
| 319 | X(TCP_STATS_KEEPTIMEO, "KATO", "\t%llu keepalive timeout\n") \ |
| 320 | X(TCP_STATS_KEEPPROBE, "KAProbe", "\t\t%llu keepalive probe sent\n") \ |
| 321 | X(TCP_STATS_KEEPDROPS, "KADrop", "\t\t%llu connection dropped by keepalive\n") \ |
| 322 | X(TCP_STATS_PREDACK, "PredAck", "\t%llu correct ACK header prediction\n") \ |
| 323 | X(TCP_STATS_PREDDAT, "PredData", "\t%llu correct data packet header prediction\n") \ |
| 324 | X(TCP_STATS_PCBCACHEMISS, "Pcb$Miss", "\t%llu times pcb cahce miss") \ |
| 325 | \ |
| 326 | /* SACK related stats */ \ |
| 327 | X(TCP_STATS_SACK_RECOVERY_EPISODE, "SACKRecEpi", "\t%llu SACK recovery episode\n") \ |
| 328 | X(TCP_STATS_SACK_REXMITS, "SACKReXmt", "\t%llu segment rexmit in SACK recovery episodes\n") \ |
| 329 | X(TCP_STATS_SACK_REXMIT_BYTES, "SACKReXmtB", "\t%llu byte rexmit in SACK recovery episodes\n") \ |
| 330 | X(TCP_STATS_SACK_RCV_BLOCKS, "SACKRcvBlk", "\t%llu SACK option (SACK blocks) received\n") \ |
| 331 | X(TCP_STATS_SACK_SEND_BLOCKS, "SACKSntBlk", "\t%llu SACK option (SACK blocks) sent\n") \ |
| 332 | X(TCP_STATS_SACK_SBOVERFLOW, "SACKSndBlkOF", "\t%llu SACK scoreboard overflow\n") \ |
| 333 | \ |
| 334 | /* LRO related stats */ \ |
| 335 | X(TCP_STATS_COALESCED_PACK, "CoalPkt", "\t%llu LRO coalesced packet\n") \ |
| 336 | X(TCP_STATS_FLOWTBL_FULL, "FlowTblFull", "\t\t%llu time LRO flow table was full\n") \ |
| 337 | X(TCP_STATS_FLOWTBL_COLLISION, "FlowTblColl", "\t\t%llu collision in LRO flow table\n") \ |
| 338 | X(TCP_STATS_LRO_TWOPACK, "LRO2Pkt", "\t\t%llu time LRO coalesced 2 packets\n") \ |
| 339 | X(TCP_STATS_LRO_MULTPACK, "LROMultiPkt", "\t\t%llu time LRO coalesced 3 or 4 packets\n") \ |
| 340 | X(TCP_STATS_LRO_LARGEPACK, "LROLargePkt", "\t\t%llu time LRO coalesced 5 or more packets\n") \ |
| 341 | \ |
| 342 | X(TCP_STATS_LIMITED_TXT, "LimitedXmt", "\t%llu limited transmit done\n") \ |
| 343 | X(TCP_STATS_EARLY_REXMT, "EarlyReXmt", "\t%llu early retransmit done\n") \ |
| 344 | X(TCP_STATS_SACK_ACKADV, "SACKAdvAck", "\t%llu time cumulative ack advanced along with SACK\n") \ |
| 345 | X(TCP_STATS_PTO, "ProbTO", "\t%llu probe timeout\n") \ |
| 346 | X(TCP_STATS_RTO_AFTER_PTO, "RTOAfProb", "\t\t%llu time retransmit timeout triggered after probe\n") \ |
| 347 | X(TCP_STATS_PROBE_IF, "ProbeIF", "\t\t%llu time probe packets were sent for an interface\n") \ |
| 348 | X(TCP_STATS_PROBE_IF_CONFLICT, "ProbeIFConfl", "\t\t%llu time couldn't send probe packets for an interface\n") \ |
| 349 | X(TCP_STATS_TLP_RECOVERY, "TLPFastRecvr", "\t\t%llu time fast recovery after tail loss\n") \ |
| 350 | X(TCP_STATS_TLP_RECOVERLASTPKT, "TLPRecvrLPkt", "\t\t%llu time recovered last packet \n") \ |
| 351 | X(TCP_STATS_PTO_IN_RECOVERY, "PTOInRecvr", "\t\t%llu SACK based rescue retransmit\n") \ |
| 352 | \ |
| 353 | /* ECN related stats */ \ |
| 354 | X(TCP_STATS_ECN_CLIENT_SETUP, "ECNCliSetup", "\t%llu client connection attempted to negotiate ECN\n") \ |
| 355 | X(TCP_STATS_ECN_CLIENT_SUCCESS, "ECNNegoSucc", "\t\t%llu client connection successfully negotiated ECN\n") \ |
| 356 | X(TCP_STATS_ECN_NOT_SUPPORTED, "ECNSvrNoSupt", "\t\t%llu time graceful fallback to Non-ECN connection\n") \ |
| 357 | X(TCP_STATS_ECN_LOST_SYN, "ECNLOSSSYN", "\t\t%llu time lost ECN negotiating SYN, followed by retransmission\n") \ |
| 358 | X(TCP_STATS_ECN_SERVER_SETUP, "ECNSvrSetup", "\t\t%llu server connection attempted to negotiate ECN\n") \ |
| 359 | X(TCP_STATS_ECN_SERVER_SUCCESS, "ECNSvrSucc", "\t\t%llu server connection successfully negotiate ECN\n") \ |
| 360 | X(TCP_STATS_ECN_ACE_SYN_NOT_ECT, "ACESynNotECT", "\t\t%llu received AccECN SYN packet with Not-ECT\n") \ |
| 361 | X(TCP_STATS_ECN_ACE_SYN_ECT1, "ACESynECT1", "\t\t%llu received AccECN SYN packet with ECT1\n") \ |
| 362 | X(TCP_STATS_ECN_ACE_SYN_ECT0, "ACESynECT0", "\t\t%llu received AccECN SYN packet with ECT0\n") \ |
| 363 | X(TCP_STATS_ECN_ACE_SYN_CE, "ACESynCE", "\t\t%llu received AccECN SYN packet with CE\n") \ |
| 364 | X(TCP_STATS_ECN_LOST_SYNACK, "ECNLossSYNACK","\t\t%llu time lost ECN negotiating SYN-ACK, followed by retransmission\n") \ |
| 365 | X(TCP_STATS_ECN_RECV_CE, "ECNRcv", "\t\t%llu time received congestion experienced (CE) notification\n") \ |
| 366 | X(TCP_STATS_ECN_RECV_ECE, "ECNRcvECE", "\t\t%llu time CWR was sent in response to ECE\n") \ |
| 367 | X(TCP_STATS_ECN_SENT_ECE, "ECNSndECE", "\t\t%llu time sent ECE notification\n") \ |
| 368 | X(TCP_STATS_ECN_ACE_RECV_CE, "ACERcvECE", "\t\t%llu CE count received in ACE field\n") \ |
| 369 | X(TCP_STATS_ECN_CONN_RECV_CE, "ECNConnRcvCE", "\t\t%llu connection received CE atleast once\n") \ |
| 370 | X(TCP_STATS_ECN_CONN_RECV_ECE, "ECNConnRcvECE","\t\t%llu connection received ECE atleast once\n") \ |
| 371 | X(TCP_STATS_ECN_CONN_PLNOCE, "ECNConnPLNoCE","\t\t%llu connection using ECN have seen packet loss but no CE\n") \ |
| 372 | X(TCP_STATS_ECN_CONN_PL_CE, "ECNConnPLCE", "\t\t%llu connection using ECN have seen packet loss and CE\n") \ |
| 373 | X(TCP_STATS_ECN_CONN_NOPL_CE, "ECNConnNoPLCE","\t\t%llu connection using ECN received CE but no packet loss\n") \ |
| 374 | X(TCP_STATS_ECN_FALLBACK_SYNLOSS, "ECNFbSYNLoss", "\t\t%llu connection fell back to non-ECN due to SYN-loss\n") \ |
| 375 | X(TCP_STATS_ECN_FALLBACK_REORDER, "ECNFbReOrd", "\t\t%llu connection fell back to non-ECN due to reordering\n") \ |
| 376 | X(TCP_STATS_ECN_FALLBACK_CE, "ECNFbCE", "\t\t%llu connection fell back to non-ECN due to excessive CE-markings\n") \ |
| 377 | X(TCP_STATS_ECN_FALLBACK_DROPRST, "ECNFbDrpRST", "\t\t%llu ECN fallback caused by connection drop due to RST\n") \ |
| 378 | X(TCP_STATS_ECN_FALLBACK_DROPRXMT, "ECNFbDrpReXmt","\t\t%llu ECN fallback due to drop after multiple retransmits\n") \ |
| 379 | X(TCP_STATS_DETECT_REORDERING, "ReOrdDetect", "\t%llu time packet reordering was detected on a connection\n") \ |
| 380 | X(TCP_STATS_REORDERED_PKTS, "ReOrdPkt", "\t\t%llu time transmitted packets were reordered\n") \ |
| 381 | X(TCP_STATS_DELAY_RECOVERY, "DlyFastRecvr", "\t\t%llu time fast recovery was delayed to handle reordering\n") \ |
| 382 | X(TCP_STATS_AVOID_RXMT, "AvoidReXmt", "\t\t%llu time retransmission was avoided by delaying recovery\n") \ |
| 383 | X(TCP_STATS_UNNECESSARY_RXMT, "UnNeedReXmt", "\t\t%llu retransmission not needed\n")\ |
| 384 | \ |
| 385 | /* DSACK related statistics */ \ |
| 386 | X(TCP_STATS_DSACK_SENT, "DSACKSnd", "\t%llu time DSACK option was sent\n") \ |
| 387 | X(TCP_STATS_DSACK_RECVD, "DSACKRcv", "\t\t%llu time DSACK option was received\n") \ |
| 388 | X(TCP_STATS_DSACK_DISABLE, "DSACKDisable", "\t\t%llu time DSACK was disabled on a connection\n") \ |
| 389 | X(TCP_STATS_DSACK_BADREXMT, "DSACKBadReXmt","\t\t%llu time recovered from bad retransmission using DSACK\n") \ |
| 390 | X(TCP_STATS_DSACK_ACKLOSS, "DSACKAckLoss", "\t\t%llu time ignored DSACK due to ack loss\n") \ |
| 391 | X(TCP_STATS_DSACK_RECVD_OLD, "DSACKRcvOld", "\t\t%llu time ignored old DSACK options\n") \ |
| 392 | X(TCP_STATS_PMTUDBH_REVERTED, "PMTUDBHRevert","\t%llu time PMTU Blackhole detection, size reverted\n") \ |
| 393 | X(TCP_STATS_DROP_AFTER_SLEEP, "DropAPSleep", "\t%llu connection were dropped after long sleep\n") \ |
| 394 | \ |
| 395 | /* TFO-related statistics */ \ |
| 396 | X(TCP_STATS_TFO_COOKIE_SENT, "TFOCkSnd", "\t%llu time a TFO-cookie has been announced\n") \ |
| 397 | X(TCP_STATS_TFO_SYN_DATA_RCV, "TFOSYNDataRcv","\t%llu SYN with data and a valid TFO-cookie have been received\n") \ |
| 398 | X(TCP_STATS_TFO_COOKIE_REQ_RCV, "TFOCkReqRcv", "\t%llu SYN with TFO-cookie-request received\n") \ |
| 399 | X(TCP_STATS_TFO_COOKIE_INVALID, "TFOCkInv", "\t%llu time an invalid TFO-cookie has been received\n") \ |
| 400 | X(TCP_STATS_TFO_COOKIE_REQ, "TFOCkReq", "\t%llu time we requested a TFO-cookie\n") \ |
| 401 | X(TCP_STATS_TFO_COOKIE_RCV, "TFOCkRcv", "\t\t%llu time the peer announced a TFO-cookie\n") \ |
| 402 | X(TCP_STATS_TFO_SYN_DATA_SENT, "TFOSYNDataSnd","\t%llu time we combined SYN with data and a TFO-cookie\n") \ |
| 403 | X(TCP_STATS_TFO_SYN_DATA_ACKED, "TDOSYNDataAck","\t\t%llu time our SYN with data has been acknowledged\n") \ |
| 404 | X(TCP_STATS_TFO_SYN_LOSS, "TFOSYNLoss", "\t%llu time a connection-attempt with TFO fell back to regular TCP\n") \ |
| 405 | X(TCP_STATS_TFO_BLACKHOLE, "TFOBlackhole", "\t%llu time a TFO-connection blackhole'd\n") \ |
| 406 | X(TCP_STATS_TFO_COOKIE_WRONG, "TFOCkWrong", "\t%llu time TFO-cookie we sent was wrong\n") \ |
| 407 | X(TCP_STATS_TFO_NO_COOKIE_RCV, "TFONoCkRcv", "\t%llu time ee asked for a cookie but didn't get one\n") \ |
| 408 | X(TCP_STATS_TFO_HEURISTICS_DISABLE, "TFOHeuDisable","\t%llu time TFO got disabled due to heuristics\n") \ |
| 409 | X(TCP_STATS_TFO_SNDBLACKHOLE, "TFOSndBH", "\t%llu time TFO got blackholed in the sending direction\n") \ |
| 410 | X(TCP_STATS_MSS_TO_DEFAULT, "MSSToDefault", "\t%llu time maximum segment size was changed to default\n") \ |
| 411 | X(TCP_STATS_MSS_TO_MEDIUM, "MSSToMedium", "\t%llu time maximum segment size was changed to medium\n") \ |
| 412 | X(TCP_STATS_MSS_TO_LOW, "MSSToLow", "\t%llu time maximum segment size was changed to low\n") \ |
| 413 | \ |
| 414 | /* TCP timer statistics */ \ |
| 415 | X(TCP_STATS_TIMER_DRIFT_LE_1_MS, "TmrDriftLE1Ms", "\t%llu timer drift less or equal to 1 ms\n") \ |
| 416 | X(TCP_STATS_TIMER_DRIFT_LE_10_MS, "TmrDriftLE10Ms", "\t%llu timer drift less or equal to 10 ms\n") \ |
| 417 | X(TCP_STATS_TIMER_DRIFT_LE_20_MS, "TmrDriftLE20Ms", "\t%llu timer drift less or equal to 20 ms\n") \ |
| 418 | X(TCP_STATS_TIMER_DRIFT_LE_50_MS, "TmrDriftLE50Ms", "\t%llu timer drift less or equal to 50 ms\n") \ |
| 419 | X(TCP_STATS_TIMER_DRIFT_LE_100_MS, "TmrDriftLE100Ms", "\t%llu timer drift less or equal to 100 ms\n") \ |
| 420 | X(TCP_STATS_TIMER_DRIFT_LE_200_MS, "TmrDriftLE200Ms", "\t%llu timer drift less or equal to 200 ms\n") \ |
| 421 | X(TCP_STATS_TIMER_DRIFT_LE_500_MS, "TmrDriftLE500Ms", "\t%llu timer drift less or equal to 500 ms\n") \ |
| 422 | X(TCP_STATS_TIMER_DRIFT_LE_1000_MS, "TmrDriftLE1000Ms", "\t%llu timer drift less or equal to 1000 ms\n") \ |
| 423 | X(TCP_STATS_TIMER_DRIFT_GT_1000_MS, "TmrDriftGT1000Ms", "\t%llu timer drift greater than 1000 ms\n") \ |
| 424 | X(TCP_STATS_USEDRTT, "RTTUsed", "\t%llu times RTT initialized from route\n") \ |
| 425 | X(TCP_STATS_USEDRTTVAR, "RTTVarUsed", "\t%llu times RTTVAR initialized from rt\n") \ |
| 426 | X(TCP_STATS_USEDSSTHRESH, "SSTholdUsed", "\t%llu times ssthresh initialized from rt\n") \ |
| 427 | X(TCP_STATS_MINMSSDROPS, "MinMssDrop", "\t%llu average minmss too low drops\n") \ |
| 428 | X(TCP_STATS_SNDREXMITBAD, "BadReXmt", "\t%llu unnecessary packet retransmissions\n") \ |
| 429 | \ |
| 430 | /* SYN Cache relaetd stats */ \ |
| 431 | X(TCP_STATS_SC_ADDED, "SCAdded", "\t%llu entry added to syncache\n") \ |
| 432 | X(TCP_STATS_SC_RETRANSMITTED, "SCReXmt", "\t%llu syncache entry was retransmitted\n") \ |
| 433 | X(TCP_STATS_SC_DUPSYN, "SCDupSYN", "\t%llu duplicate SYN packet\n") \ |
| 434 | X(TCP_STATS_SC_DROPPED, "SCDrop", "\t%llu could not reply to packet\n") \ |
| 435 | X(TCP_STATS_SC_COMPLETED, "SCCompl", "\t%llu successful extraction of entry\n") \ |
| 436 | X(TCP_STATS_SC_BUCKETOVERFLOW, "SCBktOF", "\t%llu syncache per-bucket limit hit\n") \ |
| 437 | X(TCP_STATS_SC_CACHEOVERFLOW, "SCOF", "\t%llu syncache cache limit hit\n") \ |
| 438 | X(TCP_STATS_SC_RESET, "SCReset", "\t%llu RST removed entry from syncache\n") \ |
| 439 | X(TCP_STATS_SC_STALE, "SCStale", "\t%llu timed out or listen socket gone\n") \ |
| 440 | X(TCP_STATS_SC_ABORTED, "SCAbrt", "\t%llu syncache entry aborted\n") \ |
| 441 | X(TCP_STATS_SC_BADACK, "SCBadAck", "\t%llu removed due to bad ACK\n") \ |
| 442 | X(TCP_STATS_SC_UNREACH, "SCUnReach", "\t%llu ICMP unreachable received\n") \ |
| 443 | X(TCP_STATS_SC_ZONEFAIL, "SCZoneFail", "\t%llu zalloc() failed\n") \ |
| 444 | X(TCP_STATS_SC_SENDCOOKIE, "SCSndCookie", "\t%llu SYN cookie sent\n") \ |
| 445 | X(TCP_STATS_SC_RECVCOOKIE, "SCRvcCookie", "\t%llu SYN cookie received\n") \ |
| 446 | \ |
| 447 | /* Host Cache related stats */ \ |
| 448 | X(TCP_STATS_HC_ADDED, "HCAdd", "\t%llu entry added to hostcache\n") \ |
| 449 | X(TCP_STATS_HC_BUCKETOVERFLOW, "HCBktOF", "\t%llu hostcache per bucket limit hit\n") \ |
| 450 | \ |
| 451 | /* Misc. */ \ |
| 452 | X(TCP_STATS_BG_RCVTOTAL, "RcvBkgrdPkt", "\t%llu total background packets received\n") \ |
| 453 | X(TCP_STATS_MSG_UNOPKTS, "MsgUnOrdPkt", "\t%llu unordered packet on TCP msg stream\n") \ |
| 454 | X(TCP_STATS_MSG_UNOAPPENDFAIL, "MsgUnOrdFail", "\t%llu failed to append unordered pkt\n") \ |
| 455 | X(TCP_STATS_MSG_SNDWAITHIPRI, "MsgSndW8HPrio","\t%llu send waiting for high priority data\n") \ |
| 456 | \ |
| 457 | /* MPTCP Related stats */ \ |
| 458 | X(TCP_STATS_MP_SNDPACKS, "MPSndPkt", "\t%llu data packet sent\n") \ |
| 459 | X(TCP_STATS_MP_SNDBYTES, "MPSndByte", "\t%llu data byte sent\n") \ |
| 460 | X(TCP_STATS_MP_RCVTOTAL, "MPRcvTotal", "\t%llu data packet received\n") \ |
| 461 | X(TCP_STATS_MP_RCVBYTES, "MPRcvByte", "\t%llu data byte received\n") \ |
| 462 | X(TCP_STATS_INVALID_MPCAP, "InvMPCap", "\t%llu packet with an invalid MPCAP option\n") \ |
| 463 | X(TCP_STATS_INVALID_JOINS, "InvMPJoin", "\t%llu packet with an invalid MPJOIN option\n") \ |
| 464 | X(TCP_STATS_MPCAP_FALLBACK, "MPCapFail", "\t%llu time primary subflow fell back to TCP\n") \ |
| 465 | X(TCP_STATS_JOIN_FALLBACK, "MPJoinFallBk", "\t%llu time secondary subflow fell back to TCP\n") \ |
| 466 | X(TCP_STATS_ESTAB_FALLBACK, "EstFallBk", "\t%llu DSS option drop\n") \ |
| 467 | X(TCP_STATS_INVALID_OPT, "InvOpt", "\t%llu other invalid MPTCP option\n") \ |
| 468 | X(TCP_STATS_MP_REDUCEDWIN, "MPReducedWin", "\t%llu time the MPTCP subflow window was reduced\n") \ |
| 469 | X(TCP_STATS_MP_BADCSUM, "MPBadCSum", "\t%llu bad DSS checksum\n") \ |
| 470 | X(TCP_STATS_MP_OODATA, "MPOOData", "\t%llu time received out of order data\n") \ |
| 471 | X(TCP_STATS_MP_OUTOFWIN, "MPOutOfWin", "\t%llu Packet lies outside the shared recv window\n") \ |
| 472 | X(TCP_STATS_JOIN_RXMTS, "JoinAckReXmt", "\t%llu join ack retransmits\n") \ |
| 473 | X(TCP_STATS_TAILLOSS_RTO, "TailLossTRO", "\t%llu RTO due to tail loss\n") \ |
| 474 | X(TCP_STATS_RECOVERED_PKTS, "RecoveryPkt", "\t%llu recovered after loss\n") \ |
| 475 | X(TCP_STATS_NOSTRETCHACK, "NoStrechAck", "\t%llu disabled stretch ack algorithm on a connection\n") \ |
| 476 | X(TCP_STATS_RESCUE_RXMT, "SACKRsqReXmt", "\t%llu SACK rescue retransmit\n") \ |
| 477 | \ |
| 478 | /* MPTCP Subflow selection stats */ \ |
| 479 | X(TCP_STATS_MP_SWITCHES, "MPSwitches", "\t%llu subflow switch\n") \ |
| 480 | X(TCP_STATS_MP_SEL_SYMTOMSD, "MPSelSymp", "\t%llu subflow switch due to advisory\n") \ |
| 481 | X(TCP_STATS_MP_SEL_RTT, "MPSelRTT", "\t%llu subflow switch due to rtt\n") \ |
| 482 | X(TCP_STATS_MP_SEL_RTO, "MPSelRTO", "\t%llu subflow switch due to rto\n") \ |
| 483 | X(TCP_STATS_MP_SEL_PEER, "MPSelPeer", "\t%llu subflow switch due to peer\n") \ |
| 484 | X(TCP_STATS_MP_NUM_PROBES, "MPProbe", "\t%llu number of subflow probe\n") \ |
| 485 | X(TCP_STATS_MP_VERDOWNGRADE, "MPVerDowngrd", "\t%llu times MPTCP version downgrade\n") \ |
| 486 | \ |
| 487 | /* Miscellaneous statistics */ \ |
| 488 | X(TCP_STATS_TW_PCBCOUNT, "TWPcbCount", "\t%llu pcbs in time-wait state\n") \ |
| 489 | \ |
| 490 | X(__TCP_STATS_MAX, "", "end of tcp stats") |
| 491 | |
| 492 | #define UDP_STATS_TABLE(X) \ |
| 493 | /* Input stats */ \ |
| 494 | X(UDP_STATS_IPACKETS, "RcvPkt", "\t%llu datagram received\n") \ |
| 495 | X(UDP_STATS_HDROPS, "HdrDrop", "\t\t%llu with incomplete header\n") \ |
| 496 | X(UDP_STATS_BADSUM, "BadCsum", "\t\t%llu with bad data length field\n") \ |
| 497 | X(UDP_STATS_BADLEN, "BadLen", "\t\t%llu with bad checksum\n") \ |
| 498 | X(UDP_STATS_NOSUM, "NoCsum", "\t\t%llu with no checksum\n") \ |
| 499 | X(UDP_STATS_RCV_SWCSUM, "RcvSWCsum", "\t\t%llu checksummed in software") \ |
| 500 | X(UDP_STATS_RCV_SWCSUM_BYTES, "RcvSWCsumBytes", " (%llu bytes) over IPv4\n") \ |
| 501 | X(UDP_STATS_RCV6_SWCSUM, "RcvSWCsum", "\t\t%llu checksummed in software") \ |
| 502 | X(UDP_STATS_RCV6_SWCSUM_BYTES, "RcvSWCsumBytes", " (%llu bytes) over IPv6\n") \ |
| 503 | X(UDP_STATS_NOPORT, "NoPort", "\t\t%llu dropped due to no socket\n") \ |
| 504 | X(UDP_STATS_NOPORTBCAST, "NoPortBCast", "\t\t%llu broadcast/multicast datagram undelivered\n") \ |
| 505 | X(UDP_STATS_FILTERMCAST, "FilterMCast", "\t\t%llu time multicast source filter matched\n") \ |
| 506 | X(UDP_STATS_FULLSOCK, "FullSock", "\t\t%llu dropped due to full socket buffers\n") \ |
| 507 | X(UDP_STATS_PCBCACHEMISS, "PCBCacheMiss", "\t\t%llu not for hashed pcb\n") \ |
| 508 | X(UDP_STATS_PCBHASHMISS, "PCBHashMiss", "\t\t%llu input packets not for hashed pcb") \ |
| 509 | \ |
| 510 | /* Output stats */ \ |
| 511 | X(UDP_STATS_OPACKETS, "SndPkt", "\t%llu datagram output\n") \ |
| 512 | X(UDP_STATS_SND_SWCSUM, "SndSWCsum", "\t\t%llu checksummed in software") \ |
| 513 | X(UDP_STATS_SND_SWCSUM_BYTES, "SndSWCsumBytes", " (%llu bytes) over IPv4\n") \ |
| 514 | X(UDP_STATS_SND6_SWCSUM, "SndSWCsum6", "\t\t%llu checksummed in software") \ |
| 515 | X(UDP_STATS_SND6_SWCSUM_BYTES, "SndSWCsumBytes6", " (%llu bytes) over IPv6\n") \ |
| 516 | X(UDP_STATS_FASTOUT, "SndFastPath", "\t\t%llu output packets on fast path\n") \ |
| 517 | X(UDP_STATS_NOPORTMCAST, "SndNoPortMCast", "\t\t%llu output no socket on port, multicast\n") \ |
| 518 | \ |
| 519 | X(__UDP_STATS_MAX, "", "end of UDP stats") |
| 520 | |
| 521 | #define QUIC_STATS_TABLE(X) \ |
| 522 | /* Tx */ \ |
| 523 | X(QUIC_STATS_SNDPKT, "SndTotalPkt", "\t%llu packets sent") \ |
| 524 | X(QUIC_STATS_SNDBYTE, "SndTotalByte", " (%llu bytes)\n") \ |
| 525 | /* Tx Stream */ \ |
| 526 | X(QUIC_STATS_SNDSTREAMFRAME, "SndStreamFrame", "\t\tSTREAM\n\t\t\t%llu stream frames sent") \ |
| 527 | X(QUIC_STATS_SNDSTREAMBYTE, "SndStreamByte", " (%llu bytes)\n") \ |
| 528 | X(QUIC_STATS_SNDSTREAMRESET, "SndStreamReset", "\t\t\t%llu RESET_STREAM frames sent\n") \ |
| 529 | X(QUIC_STATS_SNDSTOPSENDING, "SndStopSending", "\t\t\t%llu STOP_SENDING frames sent\n") \ |
| 530 | X(QUIC_STATS_SNDSTREAMBLKFRAME, "SndStreamBlockedFrame","\t\t\t%llu STREAM_BLOCKED frames sent\n") \ |
| 531 | X(QUIC_STATS_SNDSTMDATABLKFRAME, "SndStreamDataBlocked", "\t\t\t%llu STREAM_DATA_BLOCKED frames sent\n") \ |
| 532 | /* Tx CRYPTO */ \ |
| 533 | X(QUIC_STATS_SNDINITCRYPTOFRAME, "SndInitCryptoFrame", "\t\tCRYPTO\n\t\t\t%llu initial CRYPTO frames sent") \ |
| 534 | X(QUIC_STATS_SNDINITCRYPTOBYTE, "SndInitCryptoByte", " (%llu bytes)\n") \ |
| 535 | X(QUIC_STATS_SNDHDSHKCRYPTOFRAME, "SndHdShkCryptoFrame", "\t\t\t%llu handshake CRYPTO frames sent") \ |
| 536 | X(QUIC_STATS_SNDHDSHKCRYPTOBYTE, "SndHdShkCryptoByte", " (%llu bytes)\n") \ |
| 537 | X(QUIC_STATS_SND1RTTCRYPTOFRAME, "Snd1RttCryptoFrame", "\t\t\t%llu 1-RTT CRYPTO frames sent") \ |
| 538 | X(QUIC_STATS_SND1RTTCRYPTOBYTE, "Snd1RttCryptoByte", " (%llu bytes)\n") \ |
| 539 | X(QUIC_STATS_SND0RTTCRYPTOFRAME, "Snd1RttCryptoFrame", "\t\t\t%llu 0-RTT CRYPTO frames sent") \ |
| 540 | X(QUIC_STATS_SND0RTTCRYPTOBYTE, "Snd1RttCryptoByte", " (%llu bytes)\n") \ |
| 541 | X(QUIC_STATS_SNDCRYPTOREXMTFRAME, "SndReXmtCryptoFrame", "\t\t\t%llu CRYPTO frames retransmitted") \ |
| 542 | X(QUIC_STATS_SNDCRYPTOREXMTBYTE, "SndReXmtCryptoByte", " (%llu bytes)\n") \ |
| 543 | X(QUIC_STATS_SNDDATABLKFRAME, "SndDataBlockedFrame", "\t\t%llu DATA_BLOCKED frames sent\n") \ |
| 544 | X(QUIC_STATS_SNDREXMTPKT, "SndReXmtPkt", "\t\t%llu packets retransmitted") \ |
| 545 | X(QUIC_STATS_SNDREXMTBYTE, "SndReXmtByte", " (%llu bytes)\n") \ |
| 546 | X(QUIC_STATS_SNDLOSTPKT, "SndLostPkt", "\t\t%llu packets lost") \ |
| 547 | X(QUIC_STATS_SNDLOSTBYTE, "SndLostByte", " (%llu bytes)\n") \ |
| 548 | /* End of Tx */ \ |
| 549 | /* Rx */ \ |
| 550 | X(QUIC_STATS_RCVPKT, "RcvTotalPkt", "\t%llu packets received") \ |
| 551 | X(QUIC_STATS_RECVBYTE, "RcvTotalByte", " (%llu bytes)\n") \ |
| 552 | /* Rx Stream */ \ |
| 553 | X(QUIC_STATS_RCVSTREAMFRAME, "RcvStreamFrame", "\t\tSTREAM\n\t\t\t%llu stream frames received") \ |
| 554 | X(QUIC_STATS_RCVSTREAMBYTE, "RcvStreamByte", " (%llu bytes)\n") \ |
| 555 | X(QUIC_STATS_RCVSTREAMRESET, "RcvStreamReset", "\t\t\t%llu RESET_STREAM frames received\n") \ |
| 556 | X(QUIC_STATS_RCVSTOPSENDING, "RcvStopSending", "\t\t\t%llu STOP_SENDING frames received\n") \ |
| 557 | X(QUIC_STATS_RCVSTREAMBLKFRAME, "RcvStreamBlockedFrame","\t\t\t%llu STREAM_BLOCKED frames received\n") \ |
| 558 | X(QUIC_STATS_RCVSTMDATABLKFRAME, "RcvStreamDataBlocked", "\t\t\t%llu STREAM_DATA_BLOCKED frames received\n") \ |
| 559 | /* Rx CRYPTO */ \ |
| 560 | X(QUIC_STATS_RCVINITCRYPTOFRAME, "RcvInitCryptoFrame", "\t\tCRYPTO\n\t\t\t%llu initial CRYPTO frames received") \ |
| 561 | X(QUIC_STATS_RCVINITCRYPTOBYTE, "RcvInitCryptoByte", " (%llu bytes)\n") \ |
| 562 | X(QUIC_STATS_RCVHDSHKCRYPTOFRAME, "RcvHdShkCryptoFrame", "\t\t\t%llu handshake CRYPTO frames received") \ |
| 563 | X(QUIC_STATS_RCVHDSHKCRYPTOBYTE, "RcvHdShkCryptoByte", " (%llu bytes)\n") \ |
| 564 | X(QUIC_STATS_RCV1RTTCRYPTOFRAME, "Rcv1RttCryptoFrame", "\t\t\t%llu 1-RTT CRYPTO frames received") \ |
| 565 | X(QUIC_STATS_RCV1RTTCRYPTOBYTE, "Rcv1RttCryptoByte", " (%llu bytes)\n") \ |
| 566 | X(QUIC_STATS_RCV0RTTCRYPTOFRAME, "Rcv0RttCryptoFrame", "\t\t\t%llu 0-RTT CRYPTO frames received") \ |
| 567 | X(QUIC_STATS_RCV0RTTCRYPTOBYTE, "Rcv0RttCryptoByte", " (%llu bytes)\n") \ |
| 568 | X(QUIC_STATS_RCVDATABLKFRAME, "RcvDataBlockedFrame", "\t\t%llu DATA_BLOCKED frames received\n") \ |
| 569 | X(QUIC_STATS_RCVREORDERPKT, "RcvReorderedPkt", "\t\t%llu packets received reordered") \ |
| 570 | X(QUIC_STATS_RCVREORDERBYTE, "RcvReorderedByte", " (%llu bytes)\n") \ |
| 571 | /* Connections */ \ |
| 572 | X(QUIC_STATS_CONNECTS, "ConnEst", "\t%llu connections established\n") \ |
| 573 | X(QUIC_STATS_SNDCCR, "SndCCR", "\t\t%llu connection close reasons sent\n") \ |
| 574 | X(QUIC_STATS_SNDCCRINTERROR, "SndCCRInternalError", "\t\t\t%llu INTERNAL_ERROR sent\n") \ |
| 575 | X(QUIC_STATS_SNDCCRSVRBUSY, "SndCCRSererBusy", "\t\t\t%llu SERVER_BUSY sent\n") \ |
| 576 | X(QUIC_STATS_SNDCCRFLOWCTLERR, "SndCCRFlowCtl", "\t\t\t%llu FLOW_CONTROL_ERROR sent\n") \ |
| 577 | X(QUIC_STATS_SNDCCRSTREAMLIMIT, "SndCCRStreamLimit", "\t\t\t%llu STREAM_LIMIT_ERROR sent\n") \ |
| 578 | X(QUIC_STATS_SNDCCRSTREAMSTATE, "SndCCRStreamState", "\t\t\t%llu STREAM_STATE_ERROR sent\n") \ |
| 579 | X(QUIC_STATS_SNDCCRFINALOFFSET, "SndCCRFinalOffset", "\t\t\t%llu FINAL_SIZE_ERROR sent\n") \ |
| 580 | X(QUIC_STATS_SNDCCRFRAMEENCODING, "SndCCRFrameEncoding", "\t\t\t%llu FRAME_ENCODING_ERROR sent\n") \ |
| 581 | X(QUIC_STATS_SNDCCRTRNASPARAMS, "SndCCRTransportParams","\t\t\t%llu TRANSPORT_PARAMETER_ERROR sent\n") \ |
| 582 | X(QUIC_STATS_SNDCCRVERSIONNEGO, "SndCCRVersionNego", "\t\t\t%llu VERSION_NEGOTIATION_ERROR sent\n") \ |
| 583 | X(QUIC_STATS_SNDCCRPROTOVIOLATION, "SndCCRProtoViolation", "\t\t\t%llu PROTOCOL_VIOLATION sent\n") \ |
| 584 | X(QUIC_STATS_SNDCCRINVALMIGRATION, "SndCCRInvalMigration", "\t\t\t%llu INVALID_MIGRATION sent\n") \ |
| 585 | X(QUIC_STATS_SNDCCRCRYPTO, "SndCCRCryptoError", "\t\t\t%llu CRYPTO_ERROR sent\n") \ |
| 586 | X(QUIC_STATS_RCVCCR, "RcvCCR", "\t\t%llu connection close reasons received\n") \ |
| 587 | X(QUIC_STATS_RCVCCRINTERROR, "RcvCCRInternalError", "\t\t\t%llu INTERNAL_ERROR received\n") \ |
| 588 | X(QUIC_STATS_RCVCCRSVRBUSY, "RcvCCRSererBusy", "\t\t\t%llu SERVER_BUSY received\n") \ |
| 589 | X(QUIC_STATS_RCVCCRFLOWCTLERR, "RcvCCRFlowCtl", "\t\t\t%llu FLOW_CONTROL_ERROR received\n") \ |
| 590 | X(QUIC_STATS_RCVCCRSTREAMLIMIT, "RcvCCRStreamLimit", "\t\t\t%llu STREAM_LIMIT_ERROR received\n") \ |
| 591 | X(QUIC_STATS_RCVCCRSTREAMSTATE, "RcvCCRStreamState", "\t\t\t%llu STREAM_STATE_ERROR received\n") \ |
| 592 | X(QUIC_STATS_RCVCCRFINALOFFSET, "RcvCCRFinalOffset", "\t\t\t%llu FINAL_SIZE_ERROR received\n") \ |
| 593 | X(QUIC_STATS_RCVCCRFRAMEENCODING, "RcvCCRFrameEncoding", "\t\t\t%llu FRAME_ENCODING_ERROR received\n") \ |
| 594 | X(QUIC_STATS_RCVCCRTRNASPARAMS, "RcvCCRTransportParams","\t\t\t%llu TRANSPORT_PARAMETER_ERROR received\n") \ |
| 595 | X(QUIC_STATS_RCVCCRVERSIONNEGO, "RcvCCRVersionNego", "\t\t\t%llu VERSION_NEGOTIATION_ERROR received\n") \ |
| 596 | X(QUIC_STATS_RCVCCRPROTOVIOLATION, "RcvCCRProtoViolation", "\t\t\t%llu PROTOCOL_VIOLATION received\n") \ |
| 597 | X(QUIC_STATS_RCVCCRINVALMIGRATION, "RcvCCRInvalMigration", "\t\t\t%llu INVALID_MIGRATION received\n") \ |
| 598 | X(QUIC_STATS_RCVCCRCRYPTO, "RcvCCRCryptoError", "\t\t\t%llu CRYPTO_ERROR received\n") \ |
| 599 | X(QUIC_STATS_SNDECT0, "SndECT0", "\t%llu ECT0 sent\n") \ |
| 600 | X(QUIC_STATS_RCVECT0, "RcvECT0", "\t%llu ECT0 received\n") \ |
| 601 | X(QUIC_STATS_SNDECT1, "SndECT1", "\t%llu ECT1 sent\n") \ |
| 602 | X(QUIC_STATS_RCVECT1, "RcvECT1", "\t%llu ECT1 received\n") \ |
| 603 | X(QUIC_STATS_SNDECTCE, "SndECTCE", "\t%llu ECT-CE sent\n") \ |
| 604 | X(QUIC_STATS_RCVECTCE, "RcvECTCE", "\t%llu ECT-CE received\n") \ |
| 605 | X(QUIC_STATS_REXMTTIMEOUT, "ReXmtTimeOut", "\t%llu retransmit timeout\n") \ |
| 606 | X(QUIC_STATS_KEEPALIVETTIMEOUT, "KeepAliveTimeOut", "\t%llu keepalive timeout\n") \ |
| 607 | X(QUIC_STATS_PTO, "ProbeTimeOut", "\t%llu probe timeout\n") \ |
| 608 | \ |
| 609 | X(__QUIC_STATS_MAX, "", "end of quic stats\n") |
| 610 | |
| 611 | #define NETIF_STATS_TABLE(X) \ |
| 612 | /* Rx stats */ \ |
| 613 | X(NETIF_STATS_RX_PACKETS, "RxPackets", "\t%llu total Rx packets\n") \ |
| 614 | X(NETIF_STATS_RX_IRQ, "RxIRQ", "\t\t%llu interrupts\n") \ |
| 615 | X(NETIF_STATS_RX_IRQ_MIT, "RxIRQMIT", "\t\t\t%llu interrupt mitigation thread wakeup\n") \ |
| 616 | X(NETIF_STATS_RX_IRQ_BUSY, "RxIRQBusy", "\t\t\t%llu interrupt notify return busy\n") \ |
| 617 | X(NETIF_STATS_RX_IRQ_AGAIN, "RxIRQAgain", "\t\t\t%llu interrupt notify return retry again\n") \ |
| 618 | X(NETIF_STATS_RX_IRQ_ERR, "RxIRQErr", "\t\t\t%llu interrupt notify return error\n") \ |
| 619 | X(NETIF_STATS_RX_COPY_SUM, "RxCopySum", "\t\t%llu copy+checksumed\n") \ |
| 620 | X(NETIF_STATS_RX_COPY_DIRECT, "RxCopyDirect", "\t\t%llu copy from pkt\n") \ |
| 621 | X(NETIF_STATS_RX_COPY_MBUF, "RxCopyMbuf", "\t\t%llu copy from mbuf\n") \ |
| 622 | X(NETIF_STATS_RX_COPY_ATTACH, "RxCopyAttach", "\t\t%llu copy by attaching mbuf under pkt\n") \ |
| 623 | X(NETIF_STATS_RX_SYNC, "RxSYNC", "\t\t%llu sync\n") \ |
| 624 | \ |
| 625 | /* Tx stats */ \ |
| 626 | X(NETIF_STATS_TX_PACKETS, "TxPkt", "\t%llu total Tx packets\n") \ |
| 627 | X(NETIF_STATS_TX_IRQ, "TxIRQ", "\t\t%llu interupts\n") \ |
| 628 | X(NETIF_STATS_TX_IRQ_MIT, "TxIRQMIT", "\t\t\t%llu interrupt mitigation thread wakeup\n") \ |
| 629 | X(NETIF_STATS_TX_IRQ_BUSY, "TxIRQBusy", "\t\t\t%llu interrupt notify return busy\n") \ |
| 630 | X(NETIF_STATS_TX_IRQ_AGAIN, "TxIRQAgain", "\t\t\t%llu interrupt notify return retry again\n") \ |
| 631 | X(NETIF_STATS_TX_IRQ_ERR, "TxIRQErr", "\t\t\t%llu interrupt notify return error\n") \ |
| 632 | X(NETIF_STATS_TX_COPY_SUM, "TxCopySum", "\t\t%llu copy+checksumed\n") \ |
| 633 | X(NETIF_STATS_TX_COPY_DIRECT, "TxCopyDirect", "\t\t%llu copy from pkt\n") \ |
| 634 | X(NETIF_STATS_TX_COPY_MBUF, "TxCopyMbuf", "\t\t%llu copy from mbuf\n") \ |
| 635 | X(NETIF_STATS_TX_SYNC, "TxSYNC", "\t\t%llu sync\n") \ |
| 636 | X(NETIF_STATS_TX_REPL, "TxRepl", "\t\t%llu pool replenished\n") \ |
| 637 | X(NETIF_STATS_TX_DROP_ENQ_AQM, "TxDropEnqueueAQM", "\t\t%llu dropped due to AQM enqueue failure\n") \ |
| 638 | X(NETIF_STATS_GSO_SEG, "GSOSegments", "\t\t%llu GSO segments created\n") \ |
| 639 | X(NETIF_STATS_GSO_PKT, "GSOPackets", "\t\t%llu GSO packets \n") \ |
| 640 | X(NETIF_STATS_GSO_PKT_DROP_NOMEM, "GSODropNoMem", "\t\t%llu GSO packet dropped due to allocation failure\n") \ |
| 641 | X(NETIF_STATS_GSO_PKT_DROP_NA_INACTIVE, "GSODropNaInactive", "\t\t%llu GSO packet dropped due to inactive netif\n") \ |
| 642 | X(NETIF_STATS_GSO_PKT_DROP_BADLEN, "GSODropBadLen", "\t\t%llu GSO packet dropped due to bad packet length\n") \ |
| 643 | X(NETIF_STATS_GSO_PKT_DROP_NONTCP, "GSODropNonTcp", "\t\t%llu GSO packet dropped as it is not a TCP packet\n") \ |
| 644 | \ |
| 645 | X(NETIF_STATS_DROP, "Drop", "\t%llu dropped\n") \ |
| 646 | X(NETIF_STATS_DROP_NOMEM_BUF, "DropNoMemBuf", "\t\t%llu dropped due to packet alloc failure\n") \ |
| 647 | X(NETIF_STATS_DROP_NOMEM_PKT, "DropNoMemPkt", "\t\t%llu dropped due to buflet alloc failure\n") \ |
| 648 | X(NETIF_STATS_DROP_NOMEM_MBUF, "DropNoMemMbuf", "\t\t%llu dropped due to mbuf alloc failure\n") \ |
| 649 | X(NETIF_STATS_DROP_BADLEN, "DropBadLen", "\t\t%llu dropped due to bad packet length\n") \ |
| 650 | X(NETIF_STATS_DROP_NA_INACTIVE, "DropNaInactive", "\t\t%llu dropped due to dst na inactive\n") \ |
| 651 | X(NETIF_STATS_DROP_KRDROP_MODE, "DropKrDropMode", "\t\t%llu dropped due to dst kring in drop mode\n") \ |
| 652 | X(NETIF_STATS_DROP_RXQ_OVFL, "DropRxqOverflow", "\t\t%llu dropped due to RX Queue overflow\n") \ |
| 653 | X(NETIF_STATS_DROP_NO_RX_CB, "DropNoRxCallback", "\t\t%llu dropped due to missing RX callback\n") \ |
| 654 | X(NETIF_STATS_DROP_NO_DELEGATE, "DropNoDelegate", "\t\t%llu dropped due to missing delegate interface\n") \ |
| 655 | \ |
| 656 | /* Channel event stats */ \ |
| 657 | X(NETIF_STATS_EV_RECV, "EvRecv", "\t%llu channel event received\n") \ |
| 658 | X(NETIF_STATS_EV_RECV_TX_STATUS, "EvRecvTxStatus", "\t\t%llu channel event received, TX status\n") \ |
| 659 | X(NETIF_STATS_EV_RECV_TX_EXPIRED, "EvRecvTxExpired", "\t\t%llu channel event received, TX expired\n") \ |
| 660 | X(NETIF_STATS_EV_SENT, "EvSent", "\t%llu channel event delivered\n") \ |
| 661 | X(NETIF_STATS_EV_DROP, "EvDrop", "\t%llu channel event dropped\n") \ |
| 662 | X(NETIF_STATS_EV_DROP_NOMEM_PKT, "EvDropNoMemPkt", "\t%llu channel event dropped due to packet alloc failure\n") \ |
| 663 | X(NETIF_STATS_EV_DROP_NA_INACTIVE, "EvDropNaInactive", "\t%llu channel event dropped due to na inactive\n") \ |
| 664 | X(NETIF_STATS_EV_DROP_NA_DEFUNCT, "EvDropNaDefunct", "\t%llu channel event dropped due to na defunct\n") \ |
| 665 | X(NETIF_STATS_EV_DROP_KRDROP_MODE, "EvDropKrDropMode", "\t%llu channel event dropped due to dst kring in drop mode\n") \ |
| 666 | X(NETIF_STATS_EV_DROP_KEVENT_INACTIVE, "EvDropKevInactive", "\t%llu channel event dropped due to kevent not registered on channel\n") \ |
| 667 | X(NETIF_STATS_EV_DROP_KRSPACE, "EvDropKrSpaceDrop", "\t%llu channel event dropped due to lack of space in user channel ring\n") \ |
| 668 | X(NETIF_STATS_EV_DROP_DEMUX_ERR, "EvDropDemuxErr", "\t%llu channel event dropped due to demux error\n") \ |
| 669 | X(NETIF_STATS_EV_DROP_EV_VPNA_NOTSUP, "EvDropVpnaEvNotSup", "\t%llu channel event dropped due to vpna not having event ring\n") \ |
| 670 | X(NETIF_STATS_EV_DROP_NO_VPNA, "EvDropNoVpna", "\t%llu channel event dropped due to no vpna ports\n") \ |
| 671 | \ |
| 672 | /* Interface advisory update stats */ \ |
| 673 | X(NETIF_STATS_IF_ADV_UPD_RECV, "IfAdvUpdRecv", "\t%llu interface advisory update received\n") \ |
| 674 | X(NETIF_STATS_IF_ADV_UPD_SENT, "IfAdvUpdSent", "\t%llu interface advisory update event sent\n") \ |
| 675 | X(NETIF_STATS_IF_ADV_UPD_DROP, "IfAdvUpdDrop", "\t%llu interface advisory update event dropped\n") \ |
| 676 | \ |
| 677 | /* Interface filter stats */ \ |
| 678 | X(NETIF_STATS_FILTER_DROP_NO_RX_CB, "FilterDropNoRxCB", "\t%llu dropped due to missing RX callback\n") \ |
| 679 | X(NETIF_STATS_FILTER_DROP_DISABLED, "FilterDropDisabled", "\t%llu dropped due to disabled filter\n") \ |
| 680 | X(NETIF_STATS_FILTER_DROP_REMOVED, "FilterDropRemoved", "\t%llu dropped due to removed filter\n") \ |
| 681 | X(NETIF_STATS_FILTER_DROP_PKTQ_FULL, "FilterDropPktqFull", "\t%llu dropped due to packet queue full\n") \ |
| 682 | X(NETIF_STATS_FILTER_DROP_MBQ_FULL, "FilterDropMbqFull", "\t%llu dropped due to mbuf queue full\n") \ |
| 683 | X(NETIF_STATS_FILTER_DROP_DISABLED_RING,"FilterDropDisabledRing","\t%llu dropped due to disabled ring\n") \ |
| 684 | X(NETIF_STATS_FILTER_DROP_NO_SPACE, "FilterDropNoSpace", "\t%llu dropped due to lack of space in RX ring\n") \ |
| 685 | X(NETIF_STATS_FILTER_DROP_INTERNALIZE, "FilterDropInternalize","\t%llu dropped due to internalize failure\n") \ |
| 686 | X(NETIF_STATS_FILTER_DROP_PKT_ALLOC_FAIL,"FilterDropPktAllocFail","\t%llu dropped due to packet allocation failure\n") \ |
| 687 | X(NETIF_STATS_FILTER_DROP_DEFAULT, "FilterDropDefault", "\t%llu dropped due to default drop policy\n") \ |
| 688 | X(NETIF_STATS_FILTER_PKT_TRUNCATED, "FilterPktTruncated", "\t%llu inbound packets truncated\n") \ |
| 689 | X(NETIF_STATS_FILTER_TX_DELIVER, "FilterTxDeliver", "\t%llu outbound packets delivered to filter\n") \ |
| 690 | X(NETIF_STATS_FILTER_RX_DELIVER, "FilterRxDeliver", "\t%llu inbound packets delivered to filter\n") \ |
| 691 | X(NETIF_STATS_FILTER_TX_INJECT, "FilterTxInject", "\t%llu outbound packets injected by filter\n") \ |
| 692 | X(NETIF_STATS_FILTER_RX_INJECT, "FilterRxInject", "\t%llu inbound packets injected by filter\n") \ |
| 693 | X(NETIF_STATS_FILTER_TX_ENTER, "FilterTxEnter", "\t%llu outbound packets entered the filter chain\n") \ |
| 694 | X(NETIF_STATS_FILTER_RX_ENTER, "FilterRxEnter", "\t%llu inbound packets entered the filter chain\n") \ |
| 695 | X(NETIF_STATS_FILTER_TX_EXIT, "FilterTxExit", "\t%llu outbound packets exited the filter chain\n") \ |
| 696 | X(NETIF_STATS_FILTER_RX_EXIT, "FilterRxExit", "\t%llu inbound packets exited the filter chain\n") \ |
| 697 | X(NETIF_STATS_FILTER_SYNC_NO_PKTS, "FilterSyncNoPkts", "\t%llu filter syncs called with an empty ring\n") \ |
| 698 | X(NETIF_STATS_FILTER_ADD, "FilterAdd", "\t%llu filters added\n") \ |
| 699 | X(NETIF_STATS_FILTER_REMOVE, "FilterRemove", "\t%llu filters removed\n") \ |
| 700 | X(NETIF_STATS_FILTER_TX_FLUSH, "FilterTxFlush", "\t%llu TX packets flushed due to closing filters\n") \ |
| 701 | X(NETIF_STATS_FILTER_RX_NOT_FILTERABLE, "FilterRxNotFilterable","\t%llu RX packets not filterable\n") \ |
| 702 | X(NETIF_STATS_FILTER_BAD_PKT_LEN, "FilterBadPktLen", "\t%llu invalid packet length\n") \ |
| 703 | \ |
| 704 | /* Custom ether and sidecar stats */ \ |
| 705 | X(NETIF_STATS_VP_DROP_USER_RING_DISABLED,"VPDropUserRingDisabled","\t%llu dropped due to disabled user ring\n") \ |
| 706 | X(NETIF_STATS_VP_DROP_DEV_RING_DISABLED,"VPDropDevRingDisabled","\t%llu dropped due to disabled device ring\n") \ |
| 707 | X(NETIF_STATS_VP_DROP_USER_RING_NO_SPACE,"VPDropUserRingNoSpace","\t%llu dropped due to lack of user ring space\n") \ |
| 708 | X(NETIF_STATS_VP_DROP_DEV_RING_NO_SPACE,"VPDropDevRingNoSpace", "\t%llu dropped due to lack of device ring space\n") \ |
| 709 | X(NETIF_STATS_VP_DROP_RX_ALLOC_FAIL, "VPDropRxAllocFail", "\t%llu dropped due to RX allocation failure\n") \ |
| 710 | X(NETIF_STATS_VP_DROP_TX_ALLOC_FAIL, "VPDropTxAllocFail", "\t%llu dropped due to TX allocation failure\n") \ |
| 711 | X(NETIF_STATS_VP_DROP_PKT_TOO_BIG, "VPDropPktTooBig", "\t%llu dropped due to packet being too big\n") \ |
| 712 | X(NETIF_STATS_VP_DROP_INTERNALIZE_FAIL, "VPDropInternalizeFail","\t%llu dropped due to internalize failure\n") \ |
| 713 | X(NETIF_STATS_VP_DROP_UNEXPECTED_ERR, "VPDropUnexpectedErr", "\t%llu dropped due to unexpected TX sync error\n") \ |
| 714 | X(NETIF_STATS_VP_BAD_MADDR_LEN, "VPBadMaddrLen", "\t%llu packets with invalid mac address length\n") \ |
| 715 | X(NETIF_STATS_VP_BAD_MADDR, "VPBadMaddr", "\t%llu packets with invalid mac address\n") \ |
| 716 | X(NETIF_STATS_VP_BAD_PKT_LEN, "VPBadPktLen", "\t%llu packets invalid packet length\n") \ |
| 717 | X(NETIF_STATS_VP_FLOW_INFO_ERR, "VPFlowInfoErr", "\t%llu packets cannot be classified due to flow info error\n") \ |
| 718 | X(NETIF_STATS_VP_FLOW_NOT_MATCH, "VPFlowNotMatch", "\t%llu packets not matching flow description\n") \ |
| 719 | X(NETIF_STATS_VP_KR_ENTER_FAIL, "VPKrEnterFail", "\t%llu failed attempts to acquire RX ring lock\n") \ |
| 720 | X(NETIF_STATS_VP_DEV_RING_DISABLED, "VPDevRingDisabled", "\t%llu failed attempts to get packets due to disabled dev ring\n") \ |
| 721 | X(NETIF_STATS_VP_SYNC_UNKNOWN_ERR, "VPSyncUnknownErr", "\t%llu unknown errors returned by RX sync\n") \ |
| 722 | X(NETIF_STATS_VP_SYNC_NO_PKTS, "VPSyncNoPkts", "\t%llu syncs called with an empty ring\n") \ |
| 723 | X(NETIF_STATS_VP_SPURIOUS_NOTIFY, "VPSpuriousNotify", "\t%llu spurious notifies delivered\n") \ |
| 724 | X(NETIF_STATS_VP_ENQUEUE_FAILED, "VPEnqueueFailed", "\t%llu packets failed to enqueue\n") \ |
| 725 | X(NETIF_STATS_VP_ENQUEUED, "VPEnqueued", "\t%llu packets enqueued\n") \ |
| 726 | X(NETIF_STATS_VP_LL_ENQUEUED, "VPLLEnqueued", "\t%llu low latency packets enqueued\n") \ |
| 727 | X(NETIF_STATS_VP_LL_SENT, "VPLLSent", "\t%llu low latency packets sent\n") \ |
| 728 | X(NETIF_STATS_VP_LL_DELIVERED, "VPLLDelivered", "\t%llu low latency packets delivered\n") \ |
| 729 | X(NETIF_STATS_VP_DELIVERED, "VPDelivered", "\t%llu packets delivered\n") \ |
| 730 | X(NETIF_STATS_VP_FLOW_FOUND, "VPFlowFound", "\t%llu packets found a matching flow\n") \ |
| 731 | X(NETIF_STATS_VP_FLOW_NOT_FOUND, "VPFlowNotFound", "\t%llu packets found no matching flow\n") \ |
| 732 | X(NETIF_STATS_VP_FLOW_DISABLED, "VPFlowDisabled", "\t%llu lookup failures due to disabled flow\n") \ |
| 733 | X(NETIF_STATS_VP_FLOW_EMPTY_TABLE, "VPFlowEmptyTable", "\t%llu lookup failures due to empty flow table\n") \ |
| 734 | X(NETIF_STATS_VP_FLOW_TABLE_INIT_FAIL, "VPFlowTableInitFail", "\t%llu failed attempts to initialize flow table\n") \ |
| 735 | X(NETIF_STATS_VP_FLOW_INSERT_FAIL, "VPFlowInsertFail", "\t%llu failed attempts to insert flow\n") \ |
| 736 | X(NETIF_STATS_VP_FLOW_ADD, "VPFlowAdd", "\t%llu flows added\n") \ |
| 737 | X(NETIF_STATS_VP_FLOW_REMOVE, "VPFlowRemove", "\t%llu flows removed\n") \ |
| 738 | \ |
| 739 | /* Netif agent stats */ \ |
| 740 | X(NETIF_STATS_AGENT_BAD_ETHERTYPE, "AgentBadEthertype", "\t%llu flow add failures due to invalid ethertype\n") \ |
| 741 | X(NETIF_STATS_AGENT_BAD_IPV6_ADDR, "AgentBadIPv6Addr", "\t%llu flow add failures due to invalid IPv6 address\n") \ |
| 742 | X(NETIF_STATS_AGENT_DUP_FLOW, "AgentDupFlow", "\t%llu duplicate flows added\n") \ |
| 743 | \ |
| 744 | /* Netif llink stats */ \ |
| 745 | X(NETIF_STATS_LLINK_ADD, "LLinkAdd", "\t%llu logical links added\n") \ |
| 746 | X(NETIF_STATS_LLINK_REMOVE, "LLinkRemove", "\t%llu logical links removed\n") \ |
| 747 | X(NETIF_STATS_LLINK_DEF_QSET_USED, "LLinkDefQSetUsed", "\t%llu uses of the default qset\n") \ |
| 748 | X(NETIF_STATS_LLINK_NONDEF_QSET_USED, "LLinkNonDefQSetUsed", "\t%llu uses of a non-default qset\n") \ |
| 749 | X(NETIF_STATS_LLINK_HINT_NOT_USEFUL, "LLinkHintNotUseful", "\t%llu hints specified but qset not found\n") \ |
| 750 | X(NETIF_STATS_LLINK_DUP_INT_ID_GENERATED, "LLinkDupIntIDGenerated", "\t%llu duplicate internal llink IDs generated\n") \ |
| 751 | X(NETIF_STATS_LLINK_DUP_ID_GIVEN, "LLinkDupIDGiven", "\t%llu duplicate llink IDs given by the provider\n") \ |
| 752 | X(NETIF_STATS_LLINK_QSET_INIT_FAIL, "LLinkQSetInitFail", "\t%llu queue set initialization failures\n") \ |
| 753 | X(NETIF_STATS_LLINK_RXQ_INIT_FAIL, "LLinkRXQInitFail", "\t%llu RX queue initialization failures\n") \ |
| 754 | X(NETIF_STATS_LLINK_TXQ_INIT_FAIL, "LLinkTXQInitFail", "\t%llu TX queue initialization failures\n") \ |
| 755 | X(NETIF_STATS_LLINK_NOT_FOUND_REMOVE, "LLinkNotFoundRemove", "\t%llu not found during remove\n") \ |
| 756 | X(NETIF_STATS_LLINK_TX_DROP_BAD_STATE, "LLinkTxDroppedBadState", "\t%llu TX packets dropped due to bad llink state\n") \ |
| 757 | X(NETIF_STATS_LLINK_RX_DROP_BAD_STATE, "LLinkRxDroppedBadState", "\t%llu RX packets dropped due to bad llink state\n") \ |
| 758 | X(NETIF_STATS_LLINK_AQM_QFULL, "LLinkAQMQFull", "\t%llu occurances of the queue full condition\n") \ |
| 759 | X(NETIF_STATS_LLINK_AQM_DROPPED, "LLinkAQMDropped", "\t%llu packets dropped due to AQM\n") \ |
| 760 | X(NETIF_STATS_LLINK_AQM_DEQ_BAD_STATE, "LLinkAQMDeqBadState", "\t%llu dequeues occurred while llink is in a bad state\n") \ |
| 761 | X(NETIF_STATS_LLINK_QSET_BAD_STATE, "LLinkQSetAccessBadState", "\t%llu attempts to access a queue set while in bad llink state\n") \ |
| 762 | X(NETIF_STATS_LLINK_ADD_BAD_PARAMS, "LLinkAddBadParams", "\t%llu attempts to add an llink with bad parameters\n") \ |
| 763 | \ |
| 764 | X(__NETIF_STATS_MAX, "", "end of netif stats") |
| 765 | |
| 766 | #define FSW_FPD_STATS(X) \ |
| 767 | X(FSW_STATS_FPD_0, "", "\t%llu") \ |
| 768 | X(FSW_STATS_FPD_1, "", "\t%llu") \ |
| 769 | X(FSW_STATS_FPD_2, "", "\t%llu") \ |
| 770 | X(FSW_STATS_FPD_3, "", "\t%llu") \ |
| 771 | X(FSW_STATS_FPD_4, "", "\t%llu") \ |
| 772 | X(FSW_STATS_FPD_5, "", "\t%llu") \ |
| 773 | X(FSW_STATS_FPD_6, "", "\t%llu") \ |
| 774 | X(FSW_STATS_FPD_7, "", "\t%llu") \ |
| 775 | X(FSW_STATS_FPD_8, "", "\t%llu") \ |
| 776 | X(FSW_STATS_FPD_9, "", "\t%llu") \ |
| 777 | X(FSW_STATS_FPD_10, "", "\t%llu") \ |
| 778 | X(FSW_STATS_FPD_11, "", "\t%llu") |
| 779 | |
| 780 | #define FSW_STATS_TABLE(X) \ |
| 781 | /* Rx stats */ \ |
| 782 | X(FSW_STATS_RX_PACKETS, "RxPackets", "\t%llu total Rx packet\n") \ |
| 783 | X(FSW_STATS_RX_DEMUX_ERR, "RxDemuxErr", "\t\t%llu demux error (passed to BSD)\n") \ |
| 784 | X(FSW_STATS_RX_DEMUX_UNSPEC, "RxDemuxUnspec", "\t\t%llu demux AF unkown (passed to BSD)\n") \ |
| 785 | X(FSW_STATS_RX_DEMUX_PROMISC, "RxDemuxPromisc", "\t\t%llu promiscuous packets (passed to BSD)\n") \ |
| 786 | X(, "RxFlowExtractErr", "\t\t%llu flow extract error (passed to BSD)\n") \ |
| 787 | X(FSW_STATS_RX_PKT_NOT_FINALIZED, "RxPktNotFinalized", "\t\t%llu packet not finalized\n") \ |
| 788 | X(FSW_STATS_RX_FLOW_NOT_FOUND, "RxFlowNotFound", "\t\t%llu dropped, flow lookup failure\n") \ |
| 789 | X(FSW_STATS_RX_FLOW_TRACK_ERR, "RxFlowTrackErr", "\t\t%llu dropped, flow tracker error\n") \ |
| 790 | X(FSW_STATS_RX_FLOW_NONVIABLE, "RxFlowNonviable", "\t\t%llu dropped, flow already nonviable\n") \ |
| 791 | X(FSW_STATS_RX_FLOW_TORNDOWN, "RxFlowTornDown", "\t\t%llu dropped, flow already torndown\n") \ |
| 792 | X(FSW_STATS_RX_DST_RING_FULL, "RxDstRingFull", "\t\t%llu dropped, destination ring full\n") \ |
| 793 | X(FSW_STATS_RX_COPY_PKT2PKT, "RxCopyPktToPkt", "\t\t%llu copied pkt -> pkt\n") \ |
| 794 | X(FSW_STATS_RX_COPY_PKT2MBUF, "RxCopyPktToMbuf", "\t\t%llu copied pkt -> mbuf\n") \ |
| 795 | X(FSW_STATS_RX_COPY_MBUF2PKT, "RxCopyMbufToPkt", "\t\t%llu copied mbuf -> pkt\n") \ |
| 796 | X(FSW_STATS_RX_COPY_SUM, "RxCopySum", "\t\t%llu copy+checksumed\n") \ |
| 797 | X(FSW_STATS_RX_COPY_BAD_LEN, "RxCopyBadLen", "\t\t%llu dropped, bad packet length\n") \ |
| 798 | X(FSW_STATS_RX_DROP_NOMEM_BUF, "RxDropNoMemBuf", "\t\t%llu dropped due to mbuf alloc failure\n") \ |
| 799 | X(FSW_STATS_RX_DEMUX_SHORT_ERR, "RxDemuxShortErr", "\t\t%llu demux failed, classify length short\n") \ |
| 800 | X(FSW_STATS_RX_WASTED_16KMBUF, "RxWasted16KMbuf", "\t\t%llu wasted an entire pre-allocated 16K mbuf\n") \ |
| 801 | X(FSW_STATS_RX_PKT_NOT_LISTENER, "RxPktNotListener", "\t\t%llu packet not for listener\n") \ |
| 802 | /* Rx frag stats (fsw doesn't manage fragments on Tx) */ \ |
| 803 | X(FSW_STATS_RX_FRAG_V4, "RxFragV4", "\t\t%llu total received ipv4 fragments\n") \ |
| 804 | X(FSW_STATS_RX_FRAG_V6, "RxFragV6", "\t\t%llu total received ipv6 fragments\n") \ |
| 805 | X(FSW_STATS_RX_FRAG_REASSED, "RxFragReassed", "\t\t\t%llu frag successfully reassembled\n") \ |
| 806 | X(FSW_STATS_RX_FRAG_DROP_NOSLOT, "RxFragDropNoSlot", "\t\t\t%llu dropped no slot in dring\n") \ |
| 807 | X(FSW_STATS_RX_FRAG_BAD, "RxFragBad", "\t\t\t%llu dropped due to bad fragments\n") \ |
| 808 | X(FSW_STATS_RX_FRAG_DROP_BAD_LEN, "RxFragBadLen", "\t\t\t%llu dropped due to bad fragment length\n") \ |
| 809 | X(FSW_STATS_RX_FRAG_DROP_NOMEM, "RxFragNoMem", "\t\t\t%llu dropped due to no memory\n") \ |
| 810 | X(FSW_STATS_RX_FRAG_DROP_TIMEOUT, "RxFragTimeOut", "\t\t\t%llu dropped due to time out\n") \ |
| 811 | X(FSW_STATS_RX_FRAG_DROP_FRAG_LIMIT, "RxFragHitFragLimit", "\t\t\t%llu dropped due to ipf max limit\n") \ |
| 812 | X(FSW_STATS_RX_FRAG_DROP_REAPED, "RxFragDrained", "\t\t\t%llu dropped due to draining\n") \ |
| 813 | X(FSW_STATS_RX_FRAG_DROP_PER_QUEUE_LIMIT,"RxFragHitPerQueueLimit","\t\t\t%llu dropped due to ipf max per queue limit\n") \ |
| 814 | /* Rx aggregation stats */ \ |
| 815 | X(FSW_STATS_RX_AGG_PKT2PKT, "RxAggPktToPkt", "\t\t%llu aggregated pkt -> super pkt\n") \ |
| 816 | X(FSW_STATS_RX_AGG_PKT2MBUF, "RxAggPktToMbuf", "\t\t%llu aggregated pkt -> super mbuf\n") \ |
| 817 | X(FSW_STATS_RX_AGG_MBUF2PKT, "RxAggMbufToPkt", "\t\t%llu aggregated mbuf -> super pkt\n") \ |
| 818 | X(FSW_STATS_RX_AGG_MBUF2MBUF, "RxAggMbufToMbuf", "\t\t%llu aggregated mbuf -> super mbuf\n") \ |
| 819 | X(FSW_STATS_RX_AGG_LIMIT, "RxAggHitAggLimit", "\t\t%llu reached aggregation limit\n") \ |
| 820 | X(FSW_STATS_RX_AGG_NO_HLEN_IP, "RxAggNoHdrLenIP", "\t\t%llu IP header length compare mismatch\n") \ |
| 821 | X(FSW_STATS_RX_AGG_NO_TTL_IP, "RxAggNoTTLIP", "\t\t%llu IP TTL compare mismatch\n") \ |
| 822 | X(FSW_STATS_RX_AGG_NO_TOS_IP, "RxAggNoTOSIP", "\t\t%llu IP TOS compare mismatch\n") \ |
| 823 | X(FSW_STATS_RX_AGG_NO_OFF_IP, "RxAggNoOffsetIP", "\t\t%llu IP offset compare mismatch\n") \ |
| 824 | X(FSW_STATS_RX_AGG_NO_OPT_IP, "RxAggNoOptionIP", "\t\t%llu IP option compare mismatch\n") \ |
| 825 | X(FSW_STATS_RX_AGG_MERGE_FASTPATH_IP, "RxAggMergeFastpathIP", "\t\t%llu IP header merge via fastpath\n") \ |
| 826 | X(FSW_STATS_RX_AGG_MERGE_SLOWPATH_IP, "RxAggMergeSlowpathIP", "\t\t%llu IP header merge via slowpath\n") \ |
| 827 | X(FSW_STATS_RX_AGG_MERGE_FASTPATH_TCP, "RxAggMergeFastpathTCP", "\t\t%llu TCP header merge via fastpath\n") \ |
| 828 | X(FSW_STATS_RX_AGG_MERGE_SLOWPATH_TCP, "RxAggMergeSlowpathTCP", "\t\t%llu TCP header merge via slowpath\n") \ |
| 829 | X(FSW_STATS_RX_AGG_OK_FASTPATH_TCP, "RxAggFastpathTCP", "\t\t%llu TCP aggregation via fastpath\n") \ |
| 830 | X(FSW_STATS_RX_AGG_OK_SLOWPATH_TCP, "RxAggSlowpathTCP", "\t\t%llu TCP aggregation via slowpath\n") \ |
| 831 | X(FSW_STATS_RX_AGG_NO_SHORT_TCP, "RxAggNoShortTCP", "\t\t%llu TCP packet too short for mask compare\n") \ |
| 832 | X(FSW_STATS_RX_AGG_NO_MASK_TCP, "RxAggNoMaskTCP", "\t\t%llu TCP mask compare mismatch\n") \ |
| 833 | X(FSW_STATS_RX_AGG_NO_HLEN_TCP, "RxAggNoHdrLenTCP", "\t\t%llu TCP header length compare mismatch\n") \ |
| 834 | X(FSW_STATS_RX_AGG_NO_ULEN_TCP, "RxAggNoULenTCP", "\t\t%llu TCP ulength compare mismatch\n") \ |
| 835 | X(FSW_STATS_RX_AGG_NO_SEQN_TCP, "RxAggNoSeqNTCP", "\t\t%llu TCP sequence number compare mismatch\n") \ |
| 836 | X(FSW_STATS_RX_AGG_NO_ACKWIN_TCP, "RxAggNoAckWinTCP", "\t\t%llu TCP ACK or window compare mismatch\n") \ |
| 837 | X(FSW_STATS_RX_AGG_NO_FLAGS_TCP, "RxAggNoFlagsTCP", "\t\t%llu TCP flags compare mismatch\n") \ |
| 838 | X(FSW_STATS_RX_AGG_NO_EXOPT_TCP, "RxAggNoExtraOptionTCP", "\t\t%llu TCP extra option compare mismatch\n") \ |
| 839 | X(FSW_STATS_RX_AGG_NO_OPTTS_TCP, "RxAggNoOptionTStampTCP", "\t\t%llu TCP timestamp option compare mismatch\n") \ |
| 840 | X(FSW_STATS_RX_AGG_BAD_CSUM, "RxAggIncorrectChecksum", "\t\t%llu Incorrect TCP/IP checksum\n") \ |
| 841 | X(FSW_STATS_RX_AGG_NO_SHORT_MBUF, "RxAggNoShortMbuf", "\t\t%llu mbuf too short for mask compare\n") \ |
| 842 | X(FSW_STATS_RX_WASTED_MBUF, "RxAggWastedMbuf", "\t\t%llu wasted pre-allocate mbufs\n") \ |
| 843 | X(FSW_STATS_RX_WASTED_BFLT, "RxAggWastedBflt", "\t\t%llu wasted pre-allocate buflets\n") \ |
| 844 | \ |
| 845 | /* Tx stats */ \ |
| 846 | X(FSW_STATS_TX_PACKETS, "TXPackets", "\t%llu total Tx packets\n") \ |
| 847 | X(FSW_STATS_TX_DEMUX_ERR, "TxDemuxErr", "\t\t%llu dropped, demux error\n") \ |
| 848 | X(, "TxFlowExtractErr", "\t\t%llu dropped, flow extract error\n") \ |
| 849 | X(FSW_STATS_TX_FRAG_BAD_ID, "TxFragID", "\t\t%llu dropped, invalid fragment ID\n") \ |
| 850 | X(FSW_STATS_TX_FRAG_BAD_CONT, "TxContFrag", "\t\t%llu dropped, invalid continuation fragment\n") \ |
| 851 | X(FSW_STATS_TX_FLOW_NOT_FOUND, "TxFlowNotFound", "\t\t%llu dropped, flow lookup failure\n") \ |
| 852 | X(FSW_STATS_TX_FLOW_TRACK_ERR, "TxFlowTrackErr", "\t\t%llu dropped, flow tracker error\n") \ |
| 853 | X(FSW_STATS_TX_FLOW_BAD_ID, "TxFLowBadID", "\t\t%llu dropped, flow id invalid\n") \ |
| 854 | X(FSW_STATS_TX_FLOW_NONVIABLE, "TxFlowNonviable", "\t\t%llu dropped, flow already nonviable\n") \ |
| 855 | X(FSW_STATS_TX_FLOW_TORNDOWN, "TxFlowTornDown", "\t\t%llu dropped, flow already torndown\n") \ |
| 856 | X(FSW_STATS_TX_BAD_LISTENER, "TxBadListener", "\t\t%llu dropped, flow as listener only\n") \ |
| 857 | X(FSW_STATS_TX_AQM_DROP, "TxAQMDrop", "\t\t%llu dropped, AQM enqueue failure\n") \ |
| 858 | X(FSW_STATS_TX_RESOLV_PENDING, "TxResolvePending", "\t\t%llu pending resolve\n") \ |
| 859 | X(FSW_STATS_TX_RESOLV_FAIL, "TxResolveFail", "\t\t%llu dropped due to resolution failure\n") \ |
| 860 | X(FSW_STATS_TX_RESOLV_STALE, "TxResolveStale", "\t\t%llu resolved using existing info\n") \ |
| 861 | X(FSW_STATS_TX_COPY_PKT2PKT, "TxCopyPktToPkt", "\t\t%llu copied pkt -> pkt\n") \ |
| 862 | X(FSW_STATS_TX_COPY_PKT2MBUF, "TxCopyPktToMbuf", "\t\t%llu copied pkt -> mbuf\n") \ |
| 863 | X(FSW_STATS_TX_COPY_SUM, "TxCopySum", "\t\t%llu copy+checksumed\n") \ |
| 864 | X(FSW_STATS_TX_COPY_BAD_LEN, "TxCopyBadLen", "\t\t%llu dropped, bad packet length\n") \ |
| 865 | \ |
| 866 | /* Drop stats (generic bidirectional) */ \ |
| 867 | X(FSW_STATS_DROP, "Drop", "\t%llu total dropped\n") \ |
| 868 | X(FSW_STATS_DROP_NOMEM_PKT, "DropNoMemPkt", "\t\t%llu dropped, packet alloc failure\n") \ |
| 869 | X(FSW_STATS_DROP_NOMEM_MBUF, "DropNoMemMbuf", "\t\t%llu dropped, mbuf alloc failure\n") \ |
| 870 | \ |
| 871 | /* Channel event stats */ \ |
| 872 | X(FSW_STATS_EV_RECV, "EvRecv", "\t%llu channel event received\n") \ |
| 873 | X(FSW_STATS_EV_RECV_TX_STATUS, "EvRecvTxStatus", "\t\t%llu channel event received, TX status\n") \ |
| 874 | X(FSW_STATS_EV_RECV_TX_EXPIRED, "EvRecvTxExpired", "\t\t%llu channel event received, TX expired\n") \ |
| 875 | X(FSW_STATS_EV_SENT, "EvSent", "\t%llu channel event delivered\n") \ |
| 876 | X(FSW_STATS_EV_DROP, "EvDrop", "\t%llu channel event dropped\n") \ |
| 877 | X(FSW_STATS_EV_DROP_NOMEM_PKT, "EvDropNoMemPkt", "\t%llu channel event dropped due to packet alloc failure\n") \ |
| 878 | X(FSW_STATS_EV_DROP_NA_INACTIVE, "EvDropNaInactive", "\t%llu channel event dropped due to na inactive\n") \ |
| 879 | X(FSW_STATS_EV_DROP_NA_DEFUNCT, "EvDropNaDefunct", "\t%llu channel event dropped due to na defunct\n") \ |
| 880 | X(FSW_STATS_EV_DROP_KRDROP_MODE, "EvDropKrDropMode", "\t%llu channel event dropped due to dst kring in drop mode\n") \ |
| 881 | X(FSW_STATS_EV_DROP_KEVENT_INACTIVE, "EvDropKevInactive", "\t%llu channel event dropped due to kevent not registered on channel\n") \ |
| 882 | X(FSW_STATS_EV_DROP_KRSPACE, "EvDropKrSpaceDrop", "\t%llu channel event dropped due to lack of space in user channel ring\n") \ |
| 883 | X(FSW_STATS_EV_DROP_DEMUX_ERR, "EvDropDemuxErr", "\t%llu channel event dropped due to demux error\n") \ |
| 884 | X(FSW_STATS_EV_DROP_EV_VPNA_NOTSUP, "EvDropVpnaEvNotSup", "\t%llu channel event dropped due to vpna not having event ring\n") \ |
| 885 | /* Misc. stats */ \ |
| 886 | X(FSW_STATS_FLOWS_ABORTED, "FlowsAborted", "\t%llu flow aborted\n") \ |
| 887 | X(FSW_STATS_DST_NXPORT_INVALID, "DestNexusPortInvalid", "\t%llu times dst nexus port invalid\n") \ |
| 888 | X(FSW_STATS_DST_NXPORT_INACTIVE, "DestNexusPortInactive","\t%llu times dst nexus port inactive\n") \ |
| 889 | X(FSW_STATS_DST_NXPORT_DEFUNCT, "DestNexusPortDefunct", "\t%llu times dst nexus port defunct\n") \ |
| 890 | X(FSW_STATS_DST_RING_DROPMODE, "DestRingDropMode", "\t\t%llu dropped, dst kring in drop mode\n") \ |
| 891 | X(FSW_STATS_CHAN_ERR_UPP_ALLOC, "ChanErrUppAlloc", "\t%llu user packet pool alloc failure\n") \ |
| 892 | X(FSW_STATS_CHAN_DEFUNCT_SKIP, "ChanDefunctSkipped", "\t%llu defunct skipped due to outstanding packets\n") \ |
| 893 | \ |
| 894 | X(_FSW_STATS_ERROR_INJECTIONS, "_ErrorInjections", "(\t%llu errors injected)\n") \ |
| 895 | X(FSW_STATS_IF_ADV_UPD_SENT, "IfAdvUpdSent", "\t%llu interface advisory update event sent\n") \ |
| 896 | \ |
| 897 | X(FSW_STATS_IF_ADV_UPD_DROP, "IfAdvUpdDrop", "\t%llu interface advisory update event dropped\n") \ |
| 898 | \ |
| 899 | /* FPD stats */ \ |
| 900 | FSW_FPD_STATS(X) \ |
| 901 | \ |
| 902 | X(__FSW_STATS_MAX, "", "end of flowswitch stats") |
| 903 | |
| 904 | /* END CSTYLED */ |
| 905 | |
| 906 | /* |
| 907 | * Common stats operation and macro |
| 908 | */ |
| 909 | #define EXPAND_TO_ENUMERATION(a, b, c) a, |
| 910 | #define EXPAND_TO_STRING(a, b, c) b, |
| 911 | #define EXPAND_TO_FORMAT(a, b, c) c, |
| 912 | |
| 913 | #define DEFINE_STATS_STR_FUNC(type, table) \ |
| 914 | __attribute__((always_inline)) \ |
| 915 | static inline const char * \ |
| 916 | type##_str(enum _##type value) \ |
| 917 | { \ |
| 918 | static const char *table[] = { \ |
| 919 | table(EXPAND_TO_STRING) \ |
| 920 | }; \ |
| 921 | return (table[value]); \ |
| 922 | } |
| 923 | |
| 924 | #define DEFINE_STATS_FMT_FUNC(type, table) \ |
| 925 | __attribute__((always_inline)) \ |
| 926 | static inline const char * \ |
| 927 | type##_fmt(enum _##type value) \ |
| 928 | { \ |
| 929 | static const char *table[] = { \ |
| 930 | table(EXPAND_TO_FORMAT) \ |
| 931 | }; \ |
| 932 | return (table[value]); \ |
| 933 | } |
| 934 | |
| 935 | #define STATS_VAL(s_ptr, t) ((s_ptr)->_arr[(t)]) |
| 936 | #define STATS_INC(s_ptr, t) ((s_ptr)->_arr[(t)]++) |
| 937 | #define STATS_DEC(s_ptr, t) ((s_ptr)->_arr[(t)]--) |
| 938 | #define STATS_ADD(s_ptr, t, v) ((s_ptr)->_arr[(t)] += (v)) |
| 939 | |
| 940 | static inline void __attribute__((always_inline)) |
| 941 | __stats_fold(uint64_t *__counted_by(len)dst, uint64_t *__counted_by(len)src, size_t len) |
| 942 | { |
| 943 | // TODO replace with vector instruction once veclib is ready for xnu |
| 944 | size_t i; |
| 945 | for (i = 0; i < len; i++) { |
| 946 | dst[i] += src[i]; |
| 947 | } |
| 948 | } |
| 949 | |
| 950 | #define DEFINE_STATS_FOLD_FUNC(type, len) \ |
| 951 | static inline void __attribute__((always_inline)) \ |
| 952 | type##_fold(struct type *dst, struct type *src) \ |
| 953 | { \ |
| 954 | __stats_fold(dst->_arr, src->_arr, len); \ |
| 955 | } |
| 956 | |
| 957 | static inline void __attribute__((always_inline)) |
| 958 | __stats_reset(uint64_t *__counted_by(len)_arr, size_t len) |
| 959 | { |
| 960 | // TODO replace with vector instruction once veclib is ready for xnu |
| 961 | size_t i; |
| 962 | for (i = 0; i < len; i++) { |
| 963 | _arr[i] = 0; |
| 964 | } |
| 965 | } |
| 966 | |
| 967 | #define DEFINE_STATS_RESET_FUNC(type, len) \ |
| 968 | static inline void __attribute__((always_inline)) \ |
| 969 | type##_reset(struct type *s) \ |
| 970 | { \ |
| 971 | __stats_reset(s->_arr, len); \ |
| 972 | } |
| 973 | |
| 974 | #define STATS_ALIGN 16 /* align for vector instruction */ |
| 975 | |
| 976 | #define STATS_REGISTER(name, NAME) \ |
| 977 | enum _##name { NAME##_TABLE(EXPAND_TO_ENUMERATION) }; \ |
| 978 | struct name { \ |
| 979 | uint64_t _arr[__##NAME##_MAX]; \ |
| 980 | } __attribute__((aligned(STATS_ALIGN))); \ |
| 981 | DEFINE_STATS_STR_FUNC(name, NAME##_TABLE) \ |
| 982 | DEFINE_STATS_FMT_FUNC(name, NAME##_TABLE) \ |
| 983 | DEFINE_STATS_FOLD_FUNC(name, __##NAME##_MAX) \ |
| 984 | DEFINE_STATS_RESET_FUNC(name, __##NAME##_MAX) |
| 985 | |
| 986 | /* Stats registration stub */ |
| 987 | STATS_REGISTER(ip_stats, IP_STATS); |
| 988 | STATS_REGISTER(ip6_stats, IP6_STATS); |
| 989 | STATS_REGISTER(tcp_stats, TCP_STATS); |
| 990 | STATS_REGISTER(udp_stats, UDP_STATS); |
| 991 | STATS_REGISTER(quic_stats, QUIC_STATS); |
| 992 | |
| 993 | STATS_REGISTER(fsw_stats, FSW_STATS); |
| 994 | STATS_REGISTER(netif_stats, NETIF_STATS); |
| 995 | |
| 996 | #undef STATS_REGISTER |
| 997 | #undef DEFINE_STATS_RESET_FUNC |
| 998 | #undef DEFINE_STATS_FOLD_FUNC |
| 999 | #undef DEFINE_STATS_RESET_FUNC |
| 1000 | #undef DEFINE_STATS_FOLD_FUNC |
| 1001 | #undef DEFINE_STATS_STR_FUNC |
| 1002 | #undef DEFINE_STATS_STR_FUNC |
| 1003 | #undef EXPAND_TO_STRING |
| 1004 | #undef EXPAND_TO_ENUMERATION |
| 1005 | |
| 1006 | /* |
| 1007 | * Channel/Ring stats |
| 1008 | */ |
| 1009 | typedef struct { |
| 1010 | uint32_t cres_pkt_alloc_failures; |
| 1011 | uint32_t __cres_reserved[1]; |
| 1012 | } channel_ring_error_stats, *channel_ring_error_stats_t; |
| 1013 | |
| 1014 | typedef struct { |
| 1015 | uint64_t crsu_total_slots_transferred; |
| 1016 | uint64_t crsu_total_bytes_transferred; |
| 1017 | uint64_t crsu_number_of_syncs; |
| 1018 | uint32_t crsu_min_slots_transferred; |
| 1019 | uint32_t crsu_max_slots_transferred; |
| 1020 | uint32_t crsu_slots_per_sync; |
| 1021 | uint32_t crsu_slots_per_sync_ma; |
| 1022 | uint64_t crsu_bytes_per_sync; |
| 1023 | uint64_t crsu_bytes_per_sync_ma; |
| 1024 | uint32_t __crsu_reserved[2]; |
| 1025 | } channel_ring_user_stats, *channel_ring_user_stats_t; |
| 1026 | |
| 1027 | typedef struct { |
| 1028 | uint64_t crs_total_slots_transferred; |
| 1029 | uint64_t crs_total_bytes_transferred; |
| 1030 | uint64_t crs_number_of_transfers; |
| 1031 | uint32_t crs_min_slots_transferred; |
| 1032 | uint32_t crs_max_slots_transferred; |
| 1033 | uint32_t crs_slots_per_second; |
| 1034 | uint32_t crs_slots_per_second_ma; |
| 1035 | uint64_t crs_bytes_per_second; |
| 1036 | uint64_t crs_bytes_per_second_ma; |
| 1037 | uint32_t __crs_reserved[2]; |
| 1038 | } channel_ring_stats, *channel_ring_stats_t; |
| 1039 | |
| 1040 | struct netif_qstats { |
| 1041 | uint64_t nq_total_pkts; /* total pkts transferred */ |
| 1042 | uint64_t nq_total_bytes; /* total bytes transferred */ |
| 1043 | uint64_t nq_num_xfers; /* number of transfers */ |
| 1044 | uint32_t nq_min_pkts; /* min pkts transferred */ |
| 1045 | uint32_t nq_max_pkts; /* max pkts transferred */ |
| 1046 | uint32_t nq_pkts_ps; /* pkts transferred per second */ |
| 1047 | uint32_t nq_pkts_ps_ma; /* moving avg of pkts transferred per second */ |
| 1048 | uint64_t nq_bytes_ps; /* bytes transferred per second */ |
| 1049 | uint64_t nq_bytes_ps_ma; /* moving avg of bytes transferred per second */ |
| 1050 | }; |
| 1051 | |
| 1052 | /* |
| 1053 | * Netif queue set queue stats |
| 1054 | * Output: An array of netif_qstats_info struct |
| 1055 | */ |
| 1056 | #define SK_STATS_NETIF_QUEUE_SYSCTL "kern.skywalk.stats.netif_queue" |
| 1057 | |
| 1058 | /* Valid value for nqi_queue_flag */ |
| 1059 | #define NQI_QUEUE_FLAG_IS_RX 0x00000001 |
| 1060 | struct netif_qstats_info { |
| 1061 | uint64_t nqi_qset_id; |
| 1062 | uint16_t nqi_queue_flag; |
| 1063 | uint16_t nqi_queue_idx; |
| 1064 | packet_svc_class_t nqi_svc; |
| 1065 | struct netif_qstats nqi_stats; |
| 1066 | }; |
| 1067 | |
| 1068 | /* |
| 1069 | * Nexus provider information. |
| 1070 | * Provides information about the provider including any registered instances. |
| 1071 | * Used with "kern.skywalk.nexus_provider_list" sysctl. |
| 1072 | */ |
| 1073 | #define NEXUS_PROVIDER_LIST_SYSCTL "kern.skywalk.nexus_provider_list" |
| 1074 | |
| 1075 | typedef struct { |
| 1076 | uuid_t npi_prov_uuid; /* nexus provider UUID */ |
| 1077 | struct nxprov_params npi_prov_params; /* nexus provider parameters */ |
| 1078 | uint32_t npi_instance_uuids_count; |
| 1079 | uint32_t __npi_align_reserved; |
| 1080 | uuid_t npi_instance_uuids[0]; /* nexus instance UUIDs */ |
| 1081 | } nexus_provider_info, *nexus_provider_info_t; |
| 1082 | |
| 1083 | #define NEXUS_PROVIDER_INFO_SIZE(a) \ |
| 1084 | offsetof(nexus_provider_info, npi_instance_uuids[a]) |
| 1085 | |
| 1086 | /* |
| 1087 | * Nexus channel information. |
| 1088 | * Provides information about every channel in the system. |
| 1089 | * Used with "kern.skywalk.nexus_channel_list" sysctl. |
| 1090 | */ |
| 1091 | #define NEXUS_CHANNEL_LIST_SYSCTL "kern.skywalk.nexus_channel_list" |
| 1092 | |
| 1093 | /* Enable/Disable channel ring stats collection */ |
| 1094 | #define NEXUS_CHANNEL_RING_STAT_ENABLE_SYSCTL "kern.skywalk.ring_stat_enable" |
| 1095 | |
| 1096 | typedef struct { |
| 1097 | ring_id_t ncre_ring_id; |
| 1098 | uint32_t __ncre_align_reserved; |
| 1099 | channel_ring_stats ncre_stats; |
| 1100 | channel_ring_user_stats ncre_user_stats; |
| 1101 | channel_ring_error_stats ncre_error_stats; |
| 1102 | } nexus_channel_ring_entry, *nexus_channel_ring_entry_t; |
| 1103 | |
| 1104 | typedef struct { |
| 1105 | uuid_t nce_uuid; /* channel uuid */ |
| 1106 | uint32_t nce_flags; /* SCHF_* */ |
| 1107 | pid_t nce_pid; /* channel owner pid */ |
| 1108 | int nce_fd; /* channel pid */ |
| 1109 | nexus_port_t nce_port; /* connected nexus port */ |
| 1110 | uint32_t nce_tx_rings; /* num of tx rings */ |
| 1111 | uint32_t nce_rx_rings; /* num of rx rings */ |
| 1112 | uint32_t __nce_align_reserved; |
| 1113 | nexus_channel_ring_entry nce_ring_entries[0]; /* tx followed by rx */ |
| 1114 | } nexus_channel_entry, *nexus_channel_entry_t; |
| 1115 | |
| 1116 | #define SCHF_MONITOR_TX 0x00000001 |
| 1117 | #define SCHF_MONITOR_RX 0x00000002 |
| 1118 | #define SCHF_MONITOR_NO_COPY 0x00000004 |
| 1119 | #define SCHF_USER_PACKET_POOL 0x00000008 |
| 1120 | #define SCHF_DEFUNCT_OK 0x00000010 |
| 1121 | #define SCHF_EXCLUSIVE 0x00000020 |
| 1122 | #define SCHF_FILTER 0x00000040 |
| 1123 | #define SCHF_EVENT_RING 0x00000080 |
| 1124 | #define SCHF_IF_ADV 0x00000100 |
| 1125 | #define SCHF_DEFUNCT_SKIP 0x00000200 |
| 1126 | #define SCHF_LOW_LATENCY 0x00000400 |
| 1127 | #define SCHF_CLOSING 0x40000000 |
| 1128 | #define SCHF_DEFUNCT 0x80000000 |
| 1129 | |
| 1130 | #define SCHF_BITS \ |
| 1131 | "\020\01MON_TX\02MON_RX\03NO_COPY\04USER_PACKET_POOL\05DEFUNCT_OK" \ |
| 1132 | "\06EXCLUSIVE\07FILTER\010EVENT_RING\011IF_ADV\012DEFUNCT_SKIP" \ |
| 1133 | "\013LOW_LATENCY\037CLOSING\040DEFUNCT" |
| 1134 | |
| 1135 | #define NEXUS_CHANNEL_ENTRY_SIZE(n_rings) \ |
| 1136 | offsetof(nexus_channel_entry, nce_ring_entries[n_rings]) |
| 1137 | |
| 1138 | typedef struct { |
| 1139 | uuid_t nci_instance_uuid; /* nexus instance UUID */ |
| 1140 | uint32_t nci_channel_entries_count; |
| 1141 | uint32_t __nci_align_reserved; |
| 1142 | nexus_channel_entry nci_channel_entries[0]; /* variable length */ |
| 1143 | } nexus_channel_info, *nexus_channel_info_t; |
| 1144 | |
| 1145 | /* |
| 1146 | * Nexus statistics types. |
| 1147 | */ |
| 1148 | typedef enum { |
| 1149 | NEXUS_STATS_TYPE_INVALID = 0, /* invalid type */ |
| 1150 | NEXUS_STATS_TYPE_FSW, /* flowswitch */ |
| 1151 | NEXUS_STATS_TYPE_CHAN_ERRORS, /* Channel error stats */ |
| 1152 | } nexus_stats_type_t; |
| 1153 | |
| 1154 | /* |
| 1155 | * Flowswitch statistics (NEXUS_STATS_TYPE_FSW). |
| 1156 | */ |
| 1157 | struct __nx_stats_fsw { |
| 1158 | struct ip_stats nxs_ipstat; |
| 1159 | struct ip6_stats nxs_ip6stat; |
| 1160 | struct tcp_stats nxs_tcpstat; |
| 1161 | struct udp_stats nxs_udpstat; |
| 1162 | struct quic_stats nxs_quicstat; |
| 1163 | }; |
| 1164 | |
| 1165 | /* |
| 1166 | * Channel error statistics |
| 1167 | */ |
| 1168 | struct __nx_stats_channel_errors { |
| 1169 | channel_ring_error_stats_t nxs_cres; |
| 1170 | }; |
| 1171 | |
| 1172 | /* |
| 1173 | * Nexus advisories. |
| 1174 | */ |
| 1175 | #define NX_INTF_ADV_SIZE (sizeof(uint64_t) + sizeof(struct ifnet_interface_advisory)) |
| 1176 | |
| 1177 | #if defined(LIBSYSCALL_INTERFACE) || defined(BSD_KERNEL_PRIVATE) |
| 1178 | /* |
| 1179 | * Nexus advisory region types. |
| 1180 | */ |
| 1181 | typedef enum { |
| 1182 | #if defined(BSD_KERNEL_PRIVATE) |
| 1183 | NEXUS_ADVISORY_TYPE_INVALID = 0, |
| 1184 | #endif /* BSD_KERNEL_PRIVATE */ |
| 1185 | NEXUS_ADVISORY_TYPE_FLOWSWITCH = 1, /* struct sk_nexusadv */ |
| 1186 | NEXUS_ADVISORY_TYPE_NETIF, /* struct netif_nexus_advisory */ |
| 1187 | } nexus_advisory_type_t; |
| 1188 | |
| 1189 | /* |
| 1190 | * The metadata object is placed at the begining of nexus advisory region. |
| 1191 | */ |
| 1192 | struct __kern_nexus_adv_metadata { |
| 1193 | uint16_t knam_version; |
| 1194 | uint16_t __reserved; |
| 1195 | nexus_advisory_type_t knam_type; |
| 1196 | }; |
| 1197 | #define NX_ADVISORY_MD_VERSION 1 |
| 1198 | #define NX_ADVISORY_MD_CURRENT_VERSION NX_ADVISORY_MD_VERSION |
| 1199 | |
| 1200 | struct __kern_netif_intf_advisory { |
| 1201 | uint32_t cksum; |
| 1202 | uint32_t _reserved; |
| 1203 | struct ifnet_interface_advisory adv; |
| 1204 | } __attribute__((aligned(sizeof(uint64_t)))); |
| 1205 | |
| 1206 | /* |
| 1207 | * Netif nexus advisory. |
| 1208 | * currently this structure is not exposed to the user, but in future we |
| 1209 | * can expose it via os_channel_get_advisory_region() API. |
| 1210 | */ |
| 1211 | struct netif_nexus_advisory { |
| 1212 | uint64_t nna_version; |
| 1213 | /* |
| 1214 | * __nna_intf_adv has been defined as an opaque blob here as the |
| 1215 | * atomicity of the data can be guaranteed only if accessed using |
| 1216 | * the os_channel_get_interface_advisory() API. |
| 1217 | */ |
| 1218 | union { |
| 1219 | #if defined(LIBSYSCALL_INTERFACE) || defined(BSD_KERNEL_PRIVATE) |
| 1220 | struct __kern_netif_intf_advisory __kern_intf_adv; |
| 1221 | #endif /* LIBSYSCALL_INTERFACE || BSD_KERNEL_PRIVATE */ |
| 1222 | uint8_t __nna_intf_adv[NX_INTF_ADV_SIZE]; |
| 1223 | }; |
| 1224 | } __attribute__((aligned(sizeof(uint64_t)))); |
| 1225 | |
| 1226 | /* Netif nexus advisory version */ |
| 1227 | #define NX_NETIF_ADVISORY_VERSION 1 |
| 1228 | #define NX_NETIF_ADVISORY_CURRENT_VERSION NX_NETIF_ADVISORY_VERSION |
| 1229 | #endif /* LIBSYSCALL_INTERFACE || BSD_KERNEL_PRIVATE */ |
| 1230 | |
| 1231 | /* |
| 1232 | * Flowswitch nexus advisory. |
| 1233 | * |
| 1234 | * Note: add new field members at the bottom; if layout changes in the |
| 1235 | * middle, bump up NX_ADVISORY_VERSION and recompile userland clients |
| 1236 | * accessing this structure. |
| 1237 | * |
| 1238 | * Timestamps are stored in nanoseconds, based on microuptime(). |
| 1239 | * Use mach_timebase_info() to acquire numerator and denominator, |
| 1240 | * and then compute current time using the following: |
| 1241 | * |
| 1242 | * uint64_t now = (mach_absolute_time() * tb_info.numer) / tb_info.denom; |
| 1243 | * |
| 1244 | * Comparisons can then be done against that current time. |
| 1245 | */ |
| 1246 | struct sk_nexusadv { |
| 1247 | uint64_t nxadv_ver; /* see NX_ADVISORY_VERSION */ |
| 1248 | uint64_t nxadv_fg_sendts; /* foreground traffic timestamp */ |
| 1249 | uint64_t nxadv_rt_sendts; /* realtime traffic timestamp */ |
| 1250 | union { |
| 1251 | #if defined(LIBSYSCALL_INTERFACE) || defined(BSD_KERNEL_PRIVATE) |
| 1252 | struct __kern_netif_intf_advisory _nxadv_intf_adv; |
| 1253 | #endif /* LIBSYSCALL_INTERFACE || BSD_KERNEL_PRIVATE */ |
| 1254 | uint8_t _nxadv_reserved[NX_INTF_ADV_SIZE]; |
| 1255 | }; |
| 1256 | } __attribute__((aligned(sizeof(uint64_t)))); |
| 1257 | |
| 1258 | /* Flowswitch nexus advisory version */ |
| 1259 | #define NX_ADVISORY_VERSION 1 |
| 1260 | #define NX_ADVISORY_VERSION_2 2 |
| 1261 | #define NX_ADVISORY_CURRENT_VERSION NX_ADVISORY_VERSION_2 |
| 1262 | #define NX_FLOWSWITCH_ADVISORY_CURRENT_VERSION NX_ADVISORY_CURRENT_VERSION |
| 1263 | |
| 1264 | typedef enum { |
| 1265 | /* |
| 1266 | * TCP states. |
| 1267 | */ |
| 1268 | SFT_STATE_CLOSED = 0, /* closed */ |
| 1269 | SFT_STATE_LISTEN, /* listening for connection */ |
| 1270 | SFT_STATE_SYN_SENT, /* active, have sent SYN */ |
| 1271 | SFT_STATE_SYN_RECEIVED, /* have sent and rcvd SYN */ |
| 1272 | SFT_STATE_ESTABLISHED, /* established */ |
| 1273 | SFT_STATE_CLOSE_WAIT, /* rcvd FIN, waiting close */ |
| 1274 | SFT_STATE_FIN_WAIT_1, /* have sent FIN */ |
| 1275 | SFT_STATE_CLOSING, /* exchanged FINs, waiting FIN|ACK */ |
| 1276 | SFT_STATE_LAST_ACK, /* rcvd FIN, closed, waiting FIN|ACK */ |
| 1277 | SFT_STATE_FIN_WAIT_2, /* closed, FIN is ACK'd */ |
| 1278 | SFT_STATE_TIME_WAIT, /* quiet wait after close */ |
| 1279 | |
| 1280 | /* |
| 1281 | * UDP states. |
| 1282 | */ |
| 1283 | SFT_STATE_NO_TRAFFIC = 20, /* no packet observed */ |
| 1284 | SFT_STATE_SINGLE, /* single packet */ |
| 1285 | SFT_STATE_MULTIPLE, /* multiple packets */ |
| 1286 | |
| 1287 | SFT_STATE_MAX = 255 |
| 1288 | } sk_stats_flow_track_state_t; |
| 1289 | |
| 1290 | struct sk_stats_flow_track { |
| 1291 | uint64_t sft_bytes; /* bytes */ |
| 1292 | uint64_t sft_packets; /* packets */ |
| 1293 | uint64_t sft_spackets; /* super packets */ |
| 1294 | sk_stats_flow_track_state_t sft_state; /* SFT_STATE_* */ |
| 1295 | uint32_t sft_rtt; /* avg ack rtt at flowswith */ |
| 1296 | uint32_t sft_seq; /* max sequence number sent */ |
| 1297 | uint16_t sft_max_win; /* largest window (pre scaling) */ |
| 1298 | uint8_t sft_wscale; /* window scaling factor */ |
| 1299 | }; |
| 1300 | |
| 1301 | #define FLOW_STATS_IN_ADD(fe, stat, cnt) { \ |
| 1302 | volatile struct sk_stats_flow_track *fst; \ |
| 1303 | fst = &(fe)->fe_stats->fs_rtrack; \ |
| 1304 | fst->sft_##stat += (cnt); \ |
| 1305 | } |
| 1306 | |
| 1307 | #define FLOW_STATS_OUT_ADD(fe, stat, cnt) { \ |
| 1308 | volatile struct sk_stats_flow_track *fst; \ |
| 1309 | fst = &(fe)->fe_stats->fs_ltrack; \ |
| 1310 | fst->sft_##stat += (cnt); \ |
| 1311 | } |
| 1312 | |
| 1313 | /* |
| 1314 | * Skywalk flow (in kernel), equivalent of "net.inet.*.pcblist_n". |
| 1315 | * Output: Array of struct sk_stats_flow (per flow). |
| 1316 | */ |
| 1317 | #define SK_STATS_FLOW "kern.skywalk.stats.flow" |
| 1318 | struct sk_stats_flow { |
| 1319 | uuid_t sf_nx_uuid; /* nexus instance uuid */ |
| 1320 | uuid_t sf_uuid; /* flow uuid */ |
| 1321 | char sf_if_name[IFNAMSIZ]; /* interface name */ |
| 1322 | uint32_t sf_if_index; /* interface index */ |
| 1323 | uint32_t sf_bucket_idx; /* flow bucket index */ |
| 1324 | |
| 1325 | pid_t sf_pid; /* flow pid */ |
| 1326 | pid_t sf_epid; /* flow effective pid */ |
| 1327 | char sf_proc_name[32]; /* flow proc name */ |
| 1328 | char sf_eproc_name[32]; /* flow effecitve proc name */ |
| 1329 | |
| 1330 | uint32_t sf_flags; /* SFLOWF_* */ |
| 1331 | nexus_port_t sf_nx_port; /* nexus port */ |
| 1332 | |
| 1333 | uint8_t sf_protocol; /* effective protocol */ |
| 1334 | packet_svc_class_t sf_svc_class; /* service class */ |
| 1335 | flowadv_idx_t sf_adv_idx; /* flow advistory table index */ |
| 1336 | struct flow_key sf_key __attribute__((__aligned__(16))); |
| 1337 | |
| 1338 | volatile struct sk_stats_flow_track sf_ltrack; /* local states */ |
| 1339 | volatile struct sk_stats_flow_track sf_rtrack; /* remote states */ |
| 1340 | #define sf_obytes sf_ltrack.sft_bytes |
| 1341 | #define sf_opackets sf_ltrack.sft_packets |
| 1342 | #define sf_ospackets sf_ltrack.sft_spackets |
| 1343 | #define sf_ibytes sf_rtrack.sft_bytes |
| 1344 | #define sf_ipackets sf_rtrack.sft_packets |
| 1345 | #define sf_ispackets sf_rtrack.sft_spackets |
| 1346 | #define sf_lrtt sf_ltrack.sft_rtt |
| 1347 | #define sf_rrtt sf_rtrack.sft_rtt |
| 1348 | #define sf_lseq sf_ltrack.sft_seq |
| 1349 | #define sf_rseq sf_rtrack.sft_seq |
| 1350 | #define sf_lmax_win sf_ltrack.sft_max_win |
| 1351 | #define sf_rmax_win sf_rtrack.sft_max_win |
| 1352 | #define sf_lwscale sf_ltrack.sft_wscale |
| 1353 | #define sf_rwscale sf_rtrack.sft_wscale |
| 1354 | |
| 1355 | activity_bitmap_t sf_activity; /* flow activity bitmap */ |
| 1356 | }; |
| 1357 | |
| 1358 | /* valid values for sf_flags */ |
| 1359 | #define SFLOWF_TRACK 0x00000010 /* flow is tracked */ |
| 1360 | #define SFLOWF_CONNECTED 0x00000020 /* connected mode */ |
| 1361 | #define SFLOWF_LISTENER 0x00000040 /* listener mode */ |
| 1362 | #define SFLOWF_QOS_MARKING 0x00000100 /* flow can have qos marking */ |
| 1363 | #define SFLOWF_BOUND_IP 0x00000200 /* src addr explicity bound */ |
| 1364 | #define SFLOWF_ONLINK 0x00000400 /* dst directly on the link */ |
| 1365 | #define SFLOWF_LOW_LATENCY 0x00000800 /* low latency flow */ |
| 1366 | #define SFLOWF_WAIT_CLOSE 0x00001000 /* defer free after close */ |
| 1367 | #define SFLOWF_CLOSE_NOTIFY 0x00002000 /* notify NECP upon tear down */ |
| 1368 | #define SFLOWF_NOWAKEFROMSLEEP 0x00004000 /* don't wake for this flow */ |
| 1369 | #define SFLOWF_ABORTED 0x01000000 /* has sent RST to peer */ |
| 1370 | #define SFLOWF_NONVIABLE 0x02000000 /* disabled; to be torn down */ |
| 1371 | #define SFLOWF_WITHDRAWN 0x04000000 /* flow has been withdrawn */ |
| 1372 | #define SFLOWF_TORN_DOWN 0x08000000 /* torn down, to be destroyed */ |
| 1373 | #define SFLOWF_PARENT 0x10000000 /* parent flow */ |
| 1374 | #define SFLOWF_CHILD 0x20000000 /* child flow */ |
| 1375 | #define SFLOWF_DESTROYED 0x40000000 /* not in RB trees anymore */ |
| 1376 | #define SFLOWF_LINGERING 0x80000000 /* destroyed and lingering */ |
| 1377 | |
| 1378 | #define SFLOW_BUCKET_NONE ((uint32_t)-1) |
| 1379 | |
| 1380 | #if defined(BSD_KERNEL_PRIVATE) |
| 1381 | #include <os/refcnt.h> |
| 1382 | |
| 1383 | /* |
| 1384 | * flow_stats is the kernel stats object that serves as efficient conduit |
| 1385 | * between stats producer (the Skywalk flowswitch) and consumers |
| 1386 | * (e.g. necp_client/ntstat). It embeds the sk_stats_flow along with a |
| 1387 | * reference count. The flow_stats object would be freed when released and |
| 1388 | * refcnt reaches 0. There must be only one producer and/or multiple consumers. |
| 1389 | * There is no lock protecting the stats object as inconsistent intermediate |
| 1390 | * state data is tolerable for stats consumers and most fields, e.g. integer |
| 1391 | * counters, are updated atomically. Synchronization of producer/consumer |
| 1392 | * should be done via other means, e.g. necp/ntstat events, rather than on the |
| 1393 | * flow_stats itself. |
| 1394 | * |
| 1395 | * Fields in flow_stats.fs_stats are published in different phases: |
| 1396 | * - Descriptor fields |
| 1397 | * ID, names, address, etc. which are immutable during flow lifetime, |
| 1398 | * which are initialized during creation time. |
| 1399 | * - State fields |
| 1400 | * Flow state, track state, etc. which are mutable. They are |
| 1401 | * initialized during flow creation time, but lazily updated during |
| 1402 | * runtime and upon synchronous retrieval. |
| 1403 | * - Runtime fields |
| 1404 | * Counters (packets in/out, bytes in/out, rtt, etc.), which are |
| 1405 | * mutable and updated in real-time. |
| 1406 | * |
| 1407 | * Note the reduced alignment for sk_stats_flow and sk_stats_flow_track to |
| 1408 | * reduce the allocation size. |
| 1409 | */ |
| 1410 | struct flow_stats { |
| 1411 | struct sk_stats_flow fs_stats; |
| 1412 | #define fs_ltrack fs_stats.sf_ltrack |
| 1413 | #define fs_rtrack fs_stats.sf_rtrack |
| 1414 | #define fs_activity fs_stats.sf_activity |
| 1415 | #define fs_lrtt fs_stats.sf_lrtt |
| 1416 | #define fs_rrtt fs_stats.sf_rrtt |
| 1417 | |
| 1418 | os_refcnt_t fs_refcnt; |
| 1419 | }; |
| 1420 | |
| 1421 | extern void flow_stats_free(struct flow_stats *fs); |
| 1422 | |
| 1423 | __attribute__((always_inline)) |
| 1424 | static inline void |
| 1425 | flow_stats_retain(struct flow_stats *fs) |
| 1426 | { |
| 1427 | os_ref_retain(rc: &fs->fs_refcnt); |
| 1428 | } |
| 1429 | |
| 1430 | __attribute__((always_inline)) |
| 1431 | static inline void |
| 1432 | flow_stats_release(struct flow_stats *fs) |
| 1433 | { |
| 1434 | if (__improbable(os_ref_release(&fs->fs_refcnt) == 0)) { |
| 1435 | flow_stats_free(fs); |
| 1436 | } |
| 1437 | } |
| 1438 | |
| 1439 | __attribute__((always_inline)) |
| 1440 | static inline os_ref_count_t |
| 1441 | flow_stats_refcnt(struct flow_stats *fs) |
| 1442 | { |
| 1443 | return os_ref_get_count(rc: &fs->fs_refcnt); |
| 1444 | } |
| 1445 | #endif /* BSD_KERNEL_PRIVATE */ |
| 1446 | |
| 1447 | #define SK_STATS_FLOW_OWNER "kern.skywalk.stats.flow_owner" |
| 1448 | struct sk_stats_flow_owner { |
| 1449 | uuid_t sfo_nx_uuid; /* nexus instance uuid */ |
| 1450 | char sfo_if_name[IFNAMSIZ]; /* attached interface name */ |
| 1451 | uint32_t sfo_bucket_idx; /* flow owner bucket index */ |
| 1452 | |
| 1453 | char sfo_name[32]; /* flow owner name */ |
| 1454 | pid_t sfo_pid; /* flow owner pid */ |
| 1455 | |
| 1456 | nexus_port_t sfo_nx_port; /* flow owner nexus port */ |
| 1457 | boolean_t sfo_nx_port_pid_bound; /* flow owner port pid bound */ |
| 1458 | boolean_t sfo_nx_port_destroyed; /* flow owner port destroyed */ |
| 1459 | } __attribute__((aligned(64))); |
| 1460 | |
| 1461 | #define SK_STATS_FLOW_ROUTE "kern.skywalk.stats.flow_route" |
| 1462 | struct sk_stats_flow_route { |
| 1463 | uuid_t sfr_nx_uuid; /* nexus instance UUID */ |
| 1464 | uuid_t sfr_uuid; /* flow route UUID */ |
| 1465 | char sfr_if_name[IFNAMSIZ]; /* interface name */ |
| 1466 | |
| 1467 | uint32_t sfr_bucket_idx; /* flow route bucket index */ |
| 1468 | uint32_t sfr_id_bucket_idx; /* flow route id bucket index */ |
| 1469 | |
| 1470 | uint32_t sfr_flags; /* SFLOWRTF_* */ |
| 1471 | uint32_t sfr_usecnt; /* flow route usecnt */ |
| 1472 | int64_t sfr_expire; /* seconds left to expire */ |
| 1473 | |
| 1474 | union sockaddr_in_4_6 sfr_laddr; /* local address */ |
| 1475 | union sockaddr_in_4_6 sfr_faddr; /* foreign address */ |
| 1476 | union sockaddr_in_4_6 sfr_gaddr; /* gateway address */ |
| 1477 | |
| 1478 | uint8_t sfr_ether_dhost[ETHER_ADDR_LEN] |
| 1479 | __attribute__((aligned(64))); |
| 1480 | }; |
| 1481 | |
| 1482 | /* valid values for sfr_flags */ |
| 1483 | #define SFLOWRTF_ATTACHED 0x00000001 /* attached to RB trees */ |
| 1484 | #define SFLOWRTF_ONLINK 0x00000010 /* dst directly on the link */ |
| 1485 | #define SFLOWRTF_GATEWAY 0x00000020 /* gw IP address is valid */ |
| 1486 | #define SFLOWRTF_RESOLVED 0x00000040 /* flow route is resolved */ |
| 1487 | #define SFLOWRTF_HAS_LLINFO 0x00000080 /* has dst link-layer address */ |
| 1488 | #define SFLOWRTF_DELETED 0x00000100 /* route has been deleted */ |
| 1489 | #define SFLOWRTF_DST_LL_MCAST 0x00000200 /* dst is link layer multicast */ |
| 1490 | #define SFLOWRTF_DST_LL_BCAST 0x00000400 /* dst is link layer broadcast */ |
| 1491 | |
| 1492 | /* |
| 1493 | * Skywalk netif stats |
| 1494 | * Output: Array of struct sk_stats_net_if entry (per netif nexus instance). |
| 1495 | */ |
| 1496 | #define SK_STATS_NET_IF "kern.skywalk.stats.net_if" |
| 1497 | struct sk_stats_net_if { |
| 1498 | uuid_t sns_nx_uuid; /* nexus netif instance uuid */ |
| 1499 | char sns_if_name[IFNAMSIZ]; /* attached interface name */ |
| 1500 | |
| 1501 | struct netif_stats sns_nifs; /* netif stats */ |
| 1502 | } __attribute__((aligned(64))); |
| 1503 | |
| 1504 | /* |
| 1505 | * Skywalk flowswitch stats |
| 1506 | * Output: Array of struct sk_stats_flow_switch entry (per fsw nexus instance). |
| 1507 | */ |
| 1508 | #define SK_STATS_FLOW_SWITCH "kern.skywalk.stats.flow_switch" |
| 1509 | struct sk_stats_flow_switch { |
| 1510 | uuid_t sfs_nx_uuid; /* nexus fsw instance uuid */ |
| 1511 | char sfs_if_name[IFNAMSIZ]; /* attached interface name */ |
| 1512 | |
| 1513 | struct fsw_stats sfs_fsws; /* flowswitch stats */ |
| 1514 | } __attribute__((aligned(64))); |
| 1515 | |
| 1516 | /* |
| 1517 | * Skywalkuserstack stats |
| 1518 | * |
| 1519 | * With Skywalk, traditional kernel space stacks like tcp/udp/ip/ip6 are now |
| 1520 | * moved to userspace process. Together with this, stack statistics are now kept |
| 1521 | * per process via shared memory described by skywalk channel, which is in a |
| 1522 | * memory mapped region from a kernel nexus to a userspace process. |
| 1523 | * |
| 1524 | * Output: Array of struct sk_stats_userstack (per process, per nexus |
| 1525 | * instance) Entries with nts_owner_pid == 0 are reserved for nexus |
| 1526 | * stats collected from closed nexus ports. |
| 1527 | */ |
| 1528 | #define SK_STATS_USERSTACK "kern.skywalk.stats.userstack" |
| 1529 | struct sk_stats_userstack { |
| 1530 | uuid_t sus_nx_uuid; /* nexus instance uuid */ |
| 1531 | char sus_if_name[IFNAMSIZ]; /* attached interface name */ |
| 1532 | pid_t sus_owner_pid; /* owner process */ |
| 1533 | |
| 1534 | struct ip_stats sus_ip; /* process ip stats */ |
| 1535 | struct ip6_stats sus_ip6; /* process ip6 stats */ |
| 1536 | struct tcp_stats sus_tcp; /* process tcp stats */ |
| 1537 | struct udp_stats sus_udp; /* process udp stats */ |
| 1538 | struct quic_stats sus_quic; /* process quic stats */ |
| 1539 | } __attribute__((aligned(64))); |
| 1540 | |
| 1541 | /* |
| 1542 | * Skywalk flow advisory table dump |
| 1543 | */ |
| 1544 | struct sk_stats_flow_adv_ent { |
| 1545 | uuid_t sfae_flow_id; /* flow ID */ |
| 1546 | uint32_t sfae_flags; /* flags, FLOWADVF_* */ |
| 1547 | }; |
| 1548 | |
| 1549 | #define SK_STATS_FLOW_ADV "kern.skywalk.stats.flow_adv" |
| 1550 | struct sk_stats_flow_adv { |
| 1551 | uuid_t sfa_nx_uuid; /* nexus instance uuid */ |
| 1552 | char sfa_if_name[IFNAMSIZ]; /* attached interface name */ |
| 1553 | pid_t sfa_owner_pid; /* owner process */ |
| 1554 | |
| 1555 | uint32_t sfa_entries_count; /* number of flow adv entries */ |
| 1556 | struct sk_stats_flow_adv_ent sfa_entries[0]; /* flow adv entries */ |
| 1557 | }; |
| 1558 | |
| 1559 | /* |
| 1560 | * struct netns_ctl_dump_header is used for sysctl 'kern.skywalk.stats.netns' |
| 1561 | * which returns a buffer containing the contents of every netns namespace. |
| 1562 | * The buffer is formatted as a series headers, each immediately followed by |
| 1563 | * some number of records: |
| 1564 | * { struct netns_ctl_dump_header h, |
| 1565 | * struct netns_ctl_dump_record r[h->ncdh_n_records] } * total_namespaces |
| 1566 | */ |
| 1567 | #define SK_STATS_NETNS "kern.skywalk.stats.netns" |
| 1568 | struct { |
| 1569 | union { |
| 1570 | uint32_t [4]; |
| 1571 | struct in_addr ; |
| 1572 | struct in6_addr ; |
| 1573 | }; |
| 1574 | uint8_t ; |
| 1575 | uint8_t ; |
| 1576 | uint32_t ; |
| 1577 | /* |
| 1578 | * In a 'kern.skywalk.stats.netns' response, followed by |
| 1579 | * {ncdh_n_records} struct netns_ctl_dump_records |
| 1580 | */ |
| 1581 | } __attribute__((aligned(32))); |
| 1582 | |
| 1583 | struct netns_ctl_dump_record { |
| 1584 | in_port_t ncdr_port; |
| 1585 | in_port_t ncdr_port_end; |
| 1586 | uint32_t ncdr_skywalk_refs; |
| 1587 | uint32_t ncdr_bsd_refs; |
| 1588 | uint32_t ncdr_pf_refs; |
| 1589 | uint32_t ncdr_listener_refs; |
| 1590 | } __attribute__((aligned(32))); |
| 1591 | |
| 1592 | #define NS_DUMP_SIZE(records) (sizeof (struct netns_ctl_dump_header) + \ |
| 1593 | records * sizeof (struct netns_ctl_dump_record)) |
| 1594 | |
| 1595 | #define SK_STATS_PROTONS "kern.skywalk.stats.protons" |
| 1596 | struct sk_stats_protons_token { |
| 1597 | uint8_t spt_protocol; |
| 1598 | uint32_t spt_refcnt; |
| 1599 | pid_t spt_pid; |
| 1600 | pid_t spt_epid; |
| 1601 | }; |
| 1602 | |
| 1603 | /* |
| 1604 | * struct sk_stats_flowidns_header is used for |
| 1605 | * sysctl 'kern.skywalk.stats.flowidns' * which returns a buffer containing |
| 1606 | * the contents of every flowid. |
| 1607 | * The buffer is formatted as a series headers, each immediately followed by |
| 1608 | * some number of records: |
| 1609 | * { struct sk_stats_flowidns_header h, |
| 1610 | * struct sk_stats_flowidns_record r[h->sfh_nrecords] } * num_flow_domains |
| 1611 | */ |
| 1612 | #define SK_STATS_FLOWIDNS "kern.skywalk.stats.flowidns" |
| 1613 | struct { |
| 1614 | uint64_t ; |
| 1615 | uint64_t ; |
| 1616 | uint64_t ; |
| 1617 | uint32_t sfh_domain; |
| 1618 | uint32_t ; |
| 1619 | /* |
| 1620 | * In a 'kern.skywalk.stats.flowidns' response, followed by |
| 1621 | * {sfh_n_records} struct sk_stats_flowidns_record |
| 1622 | */ |
| 1623 | } __attribute__((aligned(32))); |
| 1624 | |
| 1625 | /* valid values for sfh_domain */ |
| 1626 | #define SFH_DOMAIN_IPSEC 0 |
| 1627 | #define SFH_DOMAIN_FLOWSWITCH 1 |
| 1628 | #define SFH_DOMAIN_INPCB 2 |
| 1629 | #define SFH_DOMAIN_PF 3 |
| 1630 | |
| 1631 | |
| 1632 | struct sk_stats_flowidns_record { |
| 1633 | union { |
| 1634 | uint32_t _addr[4]; |
| 1635 | struct in_addr _v4; |
| 1636 | struct in6_addr _v6; |
| 1637 | } sfr_laddr; |
| 1638 | union { |
| 1639 | uint32_t _addr[4]; |
| 1640 | struct in_addr _v4; |
| 1641 | struct in6_addr _v6; |
| 1642 | } sfr_raddr; |
| 1643 | union { |
| 1644 | struct { |
| 1645 | uint16_t _lport; |
| 1646 | uint16_t _rport; |
| 1647 | } sfr_ports; |
| 1648 | uint32_t sfr_spi; |
| 1649 | uint32_t sfr_protoid; |
| 1650 | }; |
| 1651 | uint32_t sfr_flowid; |
| 1652 | uint8_t sfr_ipproto; |
| 1653 | uint8_t sfr_af; |
| 1654 | } __attribute__((aligned(32))); |
| 1655 | |
| 1656 | #define sfr_laddr_v4 sfr_laddr._v4 |
| 1657 | #define sfr_laddr_v6 sfr_laddr._v6 |
| 1658 | #define sfr_raddr_v4 sfr_raddr._v4 |
| 1659 | #define sfr_raddr_v6 sfr_raddr._v6 |
| 1660 | #define sfr_lport sfr_ports._lport |
| 1661 | #define sfr_rport sfr_ports._rport |
| 1662 | |
| 1663 | |
| 1664 | #define FLOWIDNS_BUFFER_SIZE(_records) \ |
| 1665 | (sizeof (struct sk_stats_flowidns_header) + \ |
| 1666 | _records * sizeof (struct sk_stats_flowidns_record)) |
| 1667 | |
| 1668 | typedef enum { |
| 1669 | /* |
| 1670 | * The following are user task mappable. |
| 1671 | */ |
| 1672 | SREG_GUARD_HEAD = 0, /* leading guard page(s) */ |
| 1673 | SREG_SCHEMA, /* channel layout */ |
| 1674 | SREG_RING, /* rings */ |
| 1675 | SREG_BUF_DEF, /* Default rx/tx buffers */ |
| 1676 | SREG_BUF_LARGE, /* Large rx/tx buffers */ |
| 1677 | SREG_RXBUF_DEF, /* Default rx only buffers */ |
| 1678 | SREG_RXBUF_LARGE, /* Large rx only buffers */ |
| 1679 | SREG_TXBUF_DEF, /* Default tx only buffers */ |
| 1680 | SREG_TXBUF_LARGE, /* Large tx only buffers */ |
| 1681 | SREG_UMD, /* userland metadata */ |
| 1682 | SREG_TXAUSD, /* tx/alloc user slot descriptors */ |
| 1683 | SREG_RXFUSD, /* rx/free user slot descriptors */ |
| 1684 | SREG_UBFT, /* userland buflet metadata */ |
| 1685 | SREG_USTATS, /* statistics */ |
| 1686 | SREG_FLOWADV, /* flow advisories */ |
| 1687 | SREG_NEXUSADV, /* nexus advisories */ |
| 1688 | SREG_SYSCTLS, /* sysctl */ |
| 1689 | SREG_GUARD_TAIL, /* trailing guard page(s) */ |
| 1690 | |
| 1691 | /* |
| 1692 | * The following are NOT user task mappable. |
| 1693 | */ |
| 1694 | SREG_KMD, /* rx/tx kernel metadata */ |
| 1695 | SREG_RXKMD, /* rx only kernel metadata */ |
| 1696 | SREG_TXKMD, /* tx only kernel metadata */ |
| 1697 | SREG_KBFT, /* rx/tx kernel buflet metadata */ |
| 1698 | SREG_RXKBFT, /* rx only kernel buflet metadata */ |
| 1699 | SREG_TXKBFT, /* tx only kernel buflet metadata */ |
| 1700 | SREG_TXAKSD, /* tx/alloc kernel slot descriptors */ |
| 1701 | SREG_RXFKSD, /* rx/free kernel slot descriptors */ |
| 1702 | SREG_KSTATS, /* kernel statistics snapshot */ |
| 1703 | SREG_INSTRINSIC, /* intrinsic objects */ |
| 1704 | |
| 1705 | SREG_MAX /* max */ |
| 1706 | } sk_stats_region_id_t; |
| 1707 | |
| 1708 | #define SK_STATS_REGION "kern.skywalk.stats.region" |
| 1709 | struct sk_stats_region { |
| 1710 | /* |
| 1711 | * Region properties. |
| 1712 | */ |
| 1713 | char sreg_name[64]; /* region name */ |
| 1714 | uuid_t sreg_uuid; /* region uuid */ |
| 1715 | sk_stats_region_id_t sreg_id; /* region ID */ |
| 1716 | uint32_t sreg_mode; /* region mode flags */ |
| 1717 | |
| 1718 | /* |
| 1719 | * Region parameters. |
| 1720 | */ |
| 1721 | uint64_t sreg_r_seg_size; /* requested seg size */ |
| 1722 | uint64_t sreg_c_seg_size; /* configured seg size */ |
| 1723 | uint64_t sreg_seg_cnt; /* number of segments */ |
| 1724 | uint64_t sreg_seg_objs; /* # of objects per segment */ |
| 1725 | uint64_t sreg_r_obj_size; /* requested obj size */ |
| 1726 | uint64_t sreg_r_obj_cnt; /* requested obj count */ |
| 1727 | uint64_t sreg_c_obj_size; /* configured obj size */ |
| 1728 | uint64_t sreg_c_obj_cnt; /* configured obj count */ |
| 1729 | uint64_t sreg_align; /* object alignment */ |
| 1730 | uint64_t sreg_max_frags; /* max number of buflets */ |
| 1731 | |
| 1732 | /* |
| 1733 | * Region statistics. |
| 1734 | */ |
| 1735 | uint64_t sreg_meminuse; /* memory in use */ |
| 1736 | uint64_t sreg_w_meminuse; /* wired memory in use */ |
| 1737 | uint64_t sreg_memtotal; /* total memory in region */ |
| 1738 | uint64_t sreg_seginuse; /* total unfreed segments */ |
| 1739 | uint64_t sreg_rescale; /* # of hash table rescales */ |
| 1740 | uint64_t sreg_hash_size; /* size of hash table */ |
| 1741 | uint64_t sreg_alloc; /* number of allocations */ |
| 1742 | uint64_t sreg_free; /* number of frees */ |
| 1743 | }; |
| 1744 | |
| 1745 | /* valid values for sreg_mode */ |
| 1746 | #define SREG_MODE_NOREDIRECT 0x1 /* unaffected by defunct */ |
| 1747 | #define SREG_MODE_MMAPOK 0x2 /* can be mapped to user task */ |
| 1748 | #define SREG_MODE_KREADONLY 0x4 /* kernel read-only */ |
| 1749 | #define SREG_MODE_UREADONLY 0x8 /* if user map, map it read-only */ |
| 1750 | #define SREG_MODE_PERSISTENT 0x10 /* memory stays non-volatile */ |
| 1751 | #define SREG_MODE_MONOLITHIC 0x20 /* monolithic region */ |
| 1752 | #define SREG_MODE_NOMAGAZINES 0x40 /* disable magazines layer */ |
| 1753 | #define SREG_MODE_NOCACHE 0x80 /* caching-inhibited */ |
| 1754 | #define SREG_MODE_SEGPHYSCONTIG 0x100 /* phys. contiguous segment */ |
| 1755 | #define SREG_MODE_SHAREOK 0x200 /* allow object sharing */ |
| 1756 | #define SREG_MODE_IODIR_IN 0x400 /* I/O direction In */ |
| 1757 | #define SREG_MODE_IODIR_OUT 0x800 /* I/O direction Out */ |
| 1758 | #define SREG_MODE_GUARD 0x1000 /* guard pages region */ |
| 1759 | #define SREG_MODE_PUREDATA 0x2000 /* purely data; no pointers */ |
| 1760 | #define SREG_MODE_PSEUDO 0x4000 /* external backing store */ |
| 1761 | #define SREG_MODE_THREADSAFE 0x8000 /* external backing store */ |
| 1762 | #define SREG_MODE_SLAB (1U << 30) /* backend for slab layer */ |
| 1763 | #define SREG_MODE_MIRRORED (1U << 31) /* controlled by another region */ |
| 1764 | |
| 1765 | #define SREG_MODE_BITS \ |
| 1766 | "\020\01NOREDIRECT\02MMAPOK\03KREADONLY\04UREADONLY" \ |
| 1767 | "\05PERSISTENT\06MONOLITHIC\07NOMAGAZINES\10NOCACHE" \ |
| 1768 | "\11SEGPHYSCONTIG\012SHAREOK\013IODIR_IN\014IODIR_OUT" \ |
| 1769 | "\015GUARD\016PUREDATA\017PSEUDO\020THREADSAFE\037SLAB" \ |
| 1770 | "\040MIRRORED" |
| 1771 | |
| 1772 | typedef enum { |
| 1773 | SAR_TYPE_NEXUS, |
| 1774 | SAR_TYPE_NECP, |
| 1775 | SAR_TYPE_SYSTEM, |
| 1776 | } sk_stats_arena_type_t; |
| 1777 | |
| 1778 | #define SK_STATS_ARENA "kern.skywalk.stats.arena" |
| 1779 | struct sk_stats_arena { |
| 1780 | char sar_name[64]; |
| 1781 | sk_stats_arena_type_t sar_type; |
| 1782 | uint64_t sar_mapsize; |
| 1783 | uuid_t sar_regions_uuid[SREG_MAX]; |
| 1784 | #define SK_STATS_ARENA_MAPPED_PID_MAX 8 |
| 1785 | pid_t sar_mapped_pids[SK_STATS_ARENA_MAPPED_PID_MAX]; |
| 1786 | }; |
| 1787 | |
| 1788 | #define SK_STATS_CACHE "kern.skywalk.stats.cache" |
| 1789 | struct sk_stats_cache { |
| 1790 | /* |
| 1791 | * Cache parameters. |
| 1792 | */ |
| 1793 | char sca_name[64]; /* cache name */ |
| 1794 | uuid_t sca_uuid; /* cache uuid */ |
| 1795 | uuid_t sca_ruuid; /* backing region uuid */ |
| 1796 | uint32_t sca_mode; /* cache mode flags */ |
| 1797 | uint64_t sca_bufsize; /* object size */ |
| 1798 | uint64_t sca_objsize; /* actual obj size in slab */ |
| 1799 | uint64_t sca_chunksize; /* bufsize + alignment */ |
| 1800 | uint64_t sca_slabsize; /* size of a slab */ |
| 1801 | uint64_t sca_bufalign; /* buffer alignment */ |
| 1802 | uint64_t sca_objalign; /* object alignment */ |
| 1803 | |
| 1804 | /* |
| 1805 | * Per-CPU caches statistics. |
| 1806 | */ |
| 1807 | uint64_t sca_cpu_mag_size; /* current magazine size */ |
| 1808 | uint64_t sca_cpu_mag_resize; /* # of magazine resizes */ |
| 1809 | uint64_t sca_cpu_mag_purge; /* # of magazine purges */ |
| 1810 | uint64_t sca_cpu_mag_reap; /* # of magazine reaps */ |
| 1811 | uint64_t sca_depot_full; /* # of full magazines */ |
| 1812 | uint64_t sca_depot_empty; /* # of empty magazines */ |
| 1813 | uint64_t sca_depot_ws_zero; /* # of working set flushes */ |
| 1814 | uint64_t sca_depot_contention_factor; /* contention factor */ |
| 1815 | |
| 1816 | uint64_t sca_cpu_rounds; /* current rounds in all cpu */ |
| 1817 | uint64_t sca_cpu_prounds; |
| 1818 | |
| 1819 | /* |
| 1820 | * Slab statistics. |
| 1821 | */ |
| 1822 | uint64_t sca_sl_create; /* slab creates */ |
| 1823 | uint64_t sca_sl_destroy; /* slab destroys */ |
| 1824 | uint64_t sca_sl_alloc; /* slab layer allocations */ |
| 1825 | uint64_t sca_sl_free; /* slab layer frees */ |
| 1826 | uint64_t sca_sl_alloc_fail; /* total failed allocations */ |
| 1827 | uint64_t sca_sl_partial; /* # of partial slabs */ |
| 1828 | uint64_t sca_sl_empty; /* # of empty slabs */ |
| 1829 | uint64_t sca_sl_bufinuse; /* total unfreed buffers */ |
| 1830 | uint64_t sca_sl_rescale; /* # of hash table rescales */ |
| 1831 | uint64_t sca_sl_hash_size; /* size of hash table */ |
| 1832 | }; |
| 1833 | |
| 1834 | /* valid values for sca_mode */ |
| 1835 | #define SCA_MODE_NOMAGAZINES 0x00000001 /* disable magazines layer */ |
| 1836 | #define SCA_MODE_AUDIT 0x00000002 /* audit transactions */ |
| 1837 | #define SCA_MODE_NOREDIRECT 0x00000004 /* unaffected by defunct */ |
| 1838 | #define SCA_MODE_BATCH 0x00000008 /* supports batch alloc/free */ |
| 1839 | #define SCA_MODE_DYNAMIC 0x00000010 /* enable magazine resizing */ |
| 1840 | #define SCA_MODE_CLEARONFREE 0x00000020 /* zero-out upon slab free */ |
| 1841 | #define SCA_MODE_PSEUDO 0x00000040 /* external backing store */ |
| 1842 | #define SCA_MODE_RECLAIM 0x00000080 /* aggressive memory reclaim */ |
| 1843 | |
| 1844 | #define SCA_MODE_BITS \ |
| 1845 | "\020\01NOMAGAZINES\02AUDIT\03NOREDIRECT\04BATCH\05DYNAMIC" \ |
| 1846 | "\06CLEARONFREE\07PSEUDO\10RECLAIM" |
| 1847 | |
| 1848 | #endif /* PRIVATE || BSD_KERNEL_PRIVATE */ |
| 1849 | #endif /* !_SKYWALK_OS_STATS_H_ */ |
| 1850 | |