diff --git a/.github/workflows/schema_compat.yml b/.github/workflows/schema_compat.yml new file mode 100644 index 0000000000..a7117230fc --- /dev/null +++ b/.github/workflows/schema_compat.yml @@ -0,0 +1,21 @@ +name: On Pull Request Schemas +on: + merge_group: + pull_request: + types: [synchronize, opened, reopened, ready_for_review] + paths: + - 'src/disco/metrics/schema/**' +concurrency: + group: on-pull-request_${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true +jobs: + schema_compat: + timeout-minutes: 1 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: 'Check schemas are compatible' + run: python3 check_schemas.py + working-directory: src/disco/metrics diff --git a/src/disco/metrics/Local.mk b/src/disco/metrics/Local.mk index 7c2db34448..8acdd4bfda 100644 --- a/src/disco/metrics/Local.mk +++ b/src/disco/metrics/Local.mk @@ -1,2 +1,2 @@ -$(call add-hdrs,fd_prometheus.h fd_metrics.h) -$(call add-objs,fd_prometheus fd_metrics,fd_disco) +$(call add-hdrs,fd_prometheus.h fd_metrics.h generated/fd_event.h generated/fd_event_metrics.h generated/fd_metric_event_snap.h) +$(call add-objs,fd_prometheus fd_metrics generated/fd_event generated/fd_metric_event_snap,fd_disco) diff --git a/src/disco/metrics/check_schemas.py b/src/disco/metrics/check_schemas.py new file mode 100644 index 0000000000..14d46e9b02 --- /dev/null +++ b/src/disco/metrics/check_schemas.py @@ -0,0 +1,60 @@ +import json +import os +import subprocess +from pathlib import Path +from typing import Dict +from generate.event_types import Event, check_schema, validate_compatability + +def get_files_from_commit(commit: str, directory: Path): + result = subprocess.run( + ['git', 'ls-tree', '--name-only', commit, directory], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=True + ) + return result.stdout.decode('utf-8').splitlines() + +def load_file_content(commit: str, file_path: str): + result = subprocess.run( + ['git', 'show', f'{commit}:{file_path}'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=True + ).stdout.decode('utf-8') + return json.loads(result) + +def load_current_file_content(file_path: str): + with open(file_path, 'r') as file: + return json.load(file) + +def check_schema_compatability(ref: str): + directory = Path(__file__).parent / 'schema' + + parent_commit = subprocess.run( + ['git', 'rev-parse', f'origin/{ref}'], + stdout=subprocess.PIPE, + check=True + ).stdout.strip().decode('utf-8') + + parent_schemas: Dict[str, Event] = {} + parent_files = get_files_from_commit(parent_commit, directory) + for file in parent_files: + if file.endswith('.json'): + parent_schemas[file[:-5]] = load_file_content(parent_commit, os.path.join(directory, file)) + + current_schemas: Dict[str, Event] = {} + working_directory_files = [ + os.path.relpath(os.path.join(root, file), start=directory) + for root, _, files in os.walk(directory) + for file in files + ] + for file in working_directory_files: + if file.endswith('.json'): + current_schemas[file[:-5]] = load_current_file_content(os.path.join(directory, file)) + + check_schema(parent_schemas) + check_schema(current_schemas) + validate_compatability(parent_schemas, current_schemas) + +if __name__ == "__main__": + check_schema_compatability(os.environ['GITHUB_BASE_REF']) diff --git a/src/disco/metrics/gen_metrics.py b/src/disco/metrics/gen_metrics.py index 0c9e889983..30912db217 100644 --- a/src/disco/metrics/gen_metrics.py +++ b/src/disco/metrics/gen_metrics.py @@ -1,14 +1,53 @@ -from generate.types import * -from generate.write_codegen import write_codegen +import copy +import io +import os + +from generate.metric_types import * +from generate.event_types import * +from generate.write_codegen import write_codegen, write_event_snap_codegen from generate.write_docs import write_docs +from generate.write_metric_event_schema import write_metrics_sample_schema +from generate.write_events_codegen import write_event_formatter from pathlib import Path def main(): metrics = parse_metrics(Path('metrics.xml').read_text()) metrics.layout() + schema_before: Dict[str, Any] = {} + for file in os.listdir(Path(__file__).parent / 'schema'): + if file.endswith('.json'): + with open(Path(__file__).parent / 'schema' / file, 'r') as f: + data = json.load(f) + + schema_before[file[:-5]] = Event(data) + + # Check that metrics event schema which goes up to clickhouse is + # still backwards compatible. + event_new = io.StringIO() + write_metrics_sample_schema(metrics, event_new) + schema_after = copy.deepcopy(schema_before) + schema_after['metrics_sample'] = Event(json.loads(event_new.getvalue())) + + check_schema(schema_before) + check_schema(schema_after) + validate_compatability(schema_before, schema_after) + + with open(Path(__file__).parent / 'schema/metrics_sample.json', 'w') as f: + f.write(event_new.getvalue()) + + # Now code generate the metrics structs and accessors. write_codegen(metrics) + + # Now code generate documentation of the metrics. write_docs(metrics) + # Now code generate the transformer that turns the metrics structs + # into a metrics event for remote reporting. + write_event_snap_codegen(metrics) + + # Now code generate a JSON formatter for generic event types. + write_event_formatter(schema_after) + if __name__ == '__main__': main() diff --git a/src/disco/metrics/generate/event_types.py b/src/disco/metrics/generate/event_types.py new file mode 100644 index 0000000000..2494f0c174 --- /dev/null +++ b/src/disco/metrics/generate/event_types.py @@ -0,0 +1,242 @@ +import re +import os +import subprocess +import json +from pathlib import Path +from enum import Enum +from typing import Dict, List, Any + +class ClickHouseType(Enum): + DATETIME_64_9 = 0 + STRING = 1 + ENUM_8 = 2 + LOW_CARDINALITY_STRING = 3 + UINT16 = 4 + UINT32 = 5 + UINT64 = 6 + TUPLE = 7 + NESTED = 8 + IPV6 = 9 + +class Field: + def __init__(self, name: str, field: Any): + self.name = name + + if not 'description' in field: + raise ValueError(f"Field `{name}` is missing description") + + self.description: str = field['description'] + + if self.name.strip() == '': + raise ValueError("Field name is empty") + + if self.description.strip() == '': + raise ValueError(f"Field `{name}` has empty description") + + self.deprecated: bool = False + if 'deprecated' in field: + self.deprecated = field['deprecated'] + + self.server_only = False + if 'server_only' in field: + self.server_only = field['server_only'] + + if field['type'] == "DateTime64(9)": + self.type = ClickHouseType.DATETIME_64_9 + elif field['type'] == "String" or field['type'] == "LowCardinality(String)": + if field['type'] == "String": + self.type = ClickHouseType.STRING + else: + self.type = ClickHouseType.LOW_CARDINALITY_STRING + + self.max_length = None + if 'max_length' in field: + self.max_length = int(field['max_length']) + if field['max_length'] < 1: + raise ValueError(f"String field `{name}` has max_length less than 1") + elif field['type'] == "Enum8": + self.type = ClickHouseType.ENUM_8 + + self.variants: Dict[str, int] = {} + for (variant, value) in field['variants'].items(): + if variant in self.variants: + raise ValueError(f"Duplicate variant {variant}") + + if not re.match(r'^[a-z][a-z0-9]*(_[a-z0-9]+)*$', variant): + raise ValueError(f"Enum `{name}` variant `{variant}` must contain only lowercase characters and underscores") + + if value < -128: + raise ValueError(f"Enum `{name}` variant `{variant}` has value less than -128") + if value > 127: + raise ValueError(f"Enum `{name}` variant `{variant}` has value greater than 127") + + self.variants[variant] = value + if len(self.variants) == 0: + raise ValueError(f"Enum `{name}` has no variants") + elif field['type'] == "UInt16": + self.type = ClickHouseType.UINT16 + elif field['type'] == "UInt32": + self.type = ClickHouseType.UINT32 + elif field['type'] == "UInt64": + self.type = ClickHouseType.UINT64 + elif field['type'] == "IPv6": + self.type = ClickHouseType.IPV6 + elif field['type'] == 'Tuple': + self.type = ClickHouseType.TUPLE + + self.sub_fields: Dict[str, Field] = {} + for sub_field in field['fields']: + if sub_field in self.sub_fields: + raise ValueError(f"Duplicate sub-field {sub_field}") + + self.sub_fields[sub_field] = Field(sub_field, field['fields'][sub_field]) + elif field['type'] == "Nested": + self.type = ClickHouseType.NESTED + + self.sub_fields: Dict[str, Field] = {} + for sub_field in field['fields']: + if sub_field in self.sub_fields: + raise ValueError(f"Duplicate sub-field {sub_field}") + + self.sub_fields[sub_field] = Field(sub_field, field['fields'][sub_field]) + else: + raise ValueError(f"Unknown field type {field['type']}") + +class Event: + def __init__(self, json: Any): + self.name: str = json['name'] + self.id: int = json['id'] + self.description: str = json['description'] + self.deprecated: bool = False + if 'deprecated' in json: + self.deprecated = json['deprecated'] + + if not re.match(r'^[a-z][a-z0-9]*(_[a-z0-9]+)*$', self.name): + raise ValueError(f"Event `{self.name}` must contain only lowercase characters and underscores") + + if self.name.strip() == '': + raise ValueError("Event name is empty") + + if self.description.strip() == '': + raise ValueError(f"Event `{self.name}` has empty description") + + self.fields: Dict[str, Field] = {} + for (name, field) in json['fields'].items(): + if name in self.fields: + raise ValueError(f"Duplicate field {name}") + + self.fields[name] = Field(name, field) + +def validate_field_compatiblity(before: Field, after: Field): + if before.deprecated and not after.deprecated: + raise ValueError(f"Field `{before.name}` is no longer deprecated") + + if before.type != after.type: + raise ValueError(f"Field `{before.name}` has changed type") + + if before.type == ClickHouseType.ENUM_8: + for variant in before.variants: + if variant not in after.variants: + raise ValueError(f"Field `{before.name}` has missing variant `{variant}`") + + if before.variants[variant] != after.variants[variant]: + raise ValueError(f"Field `{before.name}` has changed value for variant `{variant}`") + elif before.type == ClickHouseType.TUPLE: + for sub_field in before.sub_fields: + if sub_field not in after.sub_fields: + raise ValueError(f"Sub-field `{sub_field}` in field `{before.name}` is missing in new schema") + + validate_field_compatiblity(before.sub_fields[sub_field], after.sub_fields[sub_field]) + elif before.type == ClickHouseType.NESTED: + for sub_field in before.sub_fields: + if sub_field not in after.sub_fields: + raise ValueError(f"Sub-field `{sub_field}` in field `{before.name}` is missing in new schema") + + validate_field_compatiblity(before.sub_fields[sub_field], after.sub_fields[sub_field]) + +def validate_compatability(before: Dict[str, Event], after: Dict[str, Event]): + for event in before: + if event not in after: + raise ValueError(f"Event `{event}` is missing in new schema") + + # id changes don't matter, as they don't make it up to + # clickhouse + + if before[event].deprecated and not after[event].deprecated: + raise ValueError(f"Event `{event}` is no longer deprecated") + + for field in before[event].fields: + if field not in after[event].fields: + raise ValueError(f"Field `{field}` in event `{event}` is missing in new schema") + + validate_field_compatiblity(before[event].fields[field], after[event].fields[field]) + +def check_field(is_nested: bool, field: Field): + if field.name.strip() == '': + raise ValueError("Field name is empty") + + if field.description.strip() == '': + raise ValueError(f"Field `{field.name}` has empty description") + + if field.type == ClickHouseType.ENUM_8: + if len(field.variants) == 0: + raise ValueError(f"Enum `{field.name}` has no variants") + + for variant in field.variants: + if not re.match(r'^[a-z][a-z0-9]*(_[a-z0-9]+)*$', variant): + raise ValueError(f"Enum `{field.name}` variant `{variant}` must contain only lowercase characters and underscores") + + if field.variants[variant] < -128: + raise ValueError(f"Enum `{field.name}` variant `{variant}` has value less than -128") + if field.variants[variant] > 127: + raise ValueError(f"Enum `{field.name}` variant `{variant}` has value greater than 127") + elif field.type == ClickHouseType.TUPLE: + for sub_field in field.sub_fields: + check_field(is_nested, field.sub_fields[sub_field]) + elif field.type == ClickHouseType.NESTED: + if is_nested: + raise ValueError(f"Nested fields are not allowed in nested fields") + + for sub_field in field.sub_fields: + check_field(True, field.sub_fields[sub_field]) + +def check_schema(schema: Dict[str, Event]): + for (name, event) in schema.items(): + if event.name != name: + raise ValueError(f"Event name `{event.name}` does not match the key `{name}`") + + if event.name != 'common' and not re.match(r'^[a-z]+_[a-z]+$', event.name): + raise ValueError(f"Event name `{event.name}` must contain only lowercase characters, and be in the format `{{category}}_{{name}}`") + + if not 'common' in schema: + raise ValueError("Missing `common` event") + + for event in schema: + for other in schema: + if event == other: + continue + + if schema[event].id == schema[other].id: + raise ValueError(f"Event `{event}` and `{other}` have the same id") + + ids: List[int] = [] + for event in schema: + if event == 'common': + continue + + ids.append(schema[event].id) + + ids.sort() + for i in range(1, len(ids)): + if ids[i] - ids[i - 1] != 1: + raise ValueError(f"Missing id between {ids[i - 1]} and {ids[i]}") + + for event in schema: + if event == 'common': + continue + + for field in schema[event].fields.values(): + if field.name in schema['common'].fields: + raise ValueError(f"Field `{field.name}` in event `{event}` is also present in `common` event") + + check_field(False, field) diff --git a/src/disco/metrics/generate/generated/fd_event.c b/src/disco/metrics/generate/generated/fd_event.c new file mode 100644 index 0000000000..8366d3da7c --- /dev/null +++ b/src/disco/metrics/generate/generated/fd_event.c @@ -0,0 +1,919 @@ +#include "fd_event.h" + +#pragma GCC diagnostic ignored "-Woverlength-strings" + +static long +format_common( fd_event_common_t const * event, + char * buffer, + ulong buffer_len ) { + + ulong off = 0UL; + ulong printed; + int success; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"timestamp\":%ld," + "\"identity\":\"%.44s\"," + "\"cluster\":\"%s\"," + "\"version\":\"%.11s\"," + "\"client\":\"%s\"," + "\"os\":\"%s\"," + "\"instance_id\":%lu," + "\"machine_id\":%lu," + "\"boot_id\":%lu," + "}", + event->timestamp, + event->identity, + fd_event_common_cluster_str( event->cluster ), + event->version, + fd_event_common_client_str( event->client ), + fd_event_common_os_str( event->os ), + event->instance_id, + event->machine_id, + event->boot_id ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + return (long)off; +} + +static long +format_general_boot( fd_event_general_boot_t const * event, + ulong event_len, + char * buffer, + ulong buffer_len ) { + if( FD_UNLIKELY( event->topology_off+event->topology_len>event_len ) ) return FD_EVENT_FORMAT_INVALID; + if( FD_UNLIKELY( event->configuration_off+event->configuration_len>event_len ) ) return FD_EVENT_FORMAT_INVALID; + if( FD_UNLIKELY( event->meminfo_off+event->meminfo_len>event_len ) ) return FD_EVENT_FORMAT_INVALID; + if( FD_UNLIKELY( event->cpuinfo_off+event->cpuinfo_len>event_len ) ) return FD_EVENT_FORMAT_INVALID; + if( FD_UNLIKELY( event->osversion_off+event->osversion_len>event_len ) ) return FD_EVENT_FORMAT_INVALID; + + ulong off = 0UL; + ulong printed; + int success; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"vote_account\":\"%.44s\"," + "\"genesis_hash\":\"%.44s\"," + "\"commit_hash\":\"%.40s\"," + "\"feature_set\":%u," + "\"topology\":\"%.*s\"," + "\"configuration\":\"%.*s\"," + "\"meminfo\":\"%.*s\"," + "\"cpuinfo\":\"%.*s\"," + "\"osversion\":\"%.*s\"" + "}", + event->vote_account, + event->genesis_hash, + event->commit_hash, + event->feature_set, + (int)event->topology_len, ((char*)event)+event->topology_off, + (int)event->configuration_len, ((char*)event)+event->configuration_off, + (int)event->meminfo_len, ((char*)event)+event->meminfo_off, + (int)event->cpuinfo_len, ((char*)event)+event->cpuinfo_off, + (int)event->osversion_len, ((char*)event)+event->osversion_off ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + return (long)off; +} + +static long +format_metrics_sample( fd_event_metrics_sample_t const * event, + ulong event_len, + char * buffer, + ulong buffer_len ) { + + ulong off = 0UL; + ulong printed; + int success; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"reason\":\"%s\",", fd_event_metrics_sample_reason_str( event->reason ) ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"slot\":%lu,", event->slot ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"tile\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->tile_off+event->tile_len*sizeof(fd_event_metrics_sample_tile_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; itile_len; i++ ) { + fd_event_metrics_sample_tile_t const * tile = ((fd_event_metrics_sample_tile_t const *)(((char*)event)+event->tile_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"kind\":\"%.20s\"," + "\"kind_id\":%hu," + "\"context_switch_involuntary_count\":%lu," + "\"context_switch_voluntary_count\":%lu," + "\"status\":%lu," + "\"heartbeat\":%lu," + "\"in_backpressure\":%lu," + "\"backpressure_count\":%lu," + "\"regime_duration_nanos\":{" + "\"caught_up_housekeeping\":%lu," + "\"processing_housekeeping\":%lu," + "\"backpressure_housekeeping\":%lu," + "\"caught_up_prefrag\":%lu," + "\"processing_prefrag\":%lu," + "\"backpressure_prefrag\":%lu," + "\"caught_up_postfrag\":%lu," + "\"processing_postfrag\":%lu" + "}" + "}", + tile->kind, + tile->kind_id, + tile->context_switch_involuntary_count, + tile->context_switch_voluntary_count, + tile->status, + tile->heartbeat, + tile->in_backpressure, + tile->backpressure_count, + tile->regime_duration_nanos.caught_up_housekeeping, + tile->regime_duration_nanos.processing_housekeeping, + tile->regime_duration_nanos.backpressure_housekeeping, + tile->regime_duration_nanos.caught_up_prefrag, + tile->regime_duration_nanos.processing_prefrag, + tile->regime_duration_nanos.backpressure_prefrag, + tile->regime_duration_nanos.caught_up_postfrag, + tile->regime_duration_nanos.processing_postfrag ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->tile_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"link\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->link_off+event->link_len*sizeof(fd_event_metrics_sample_link_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; ilink_len; i++ ) { + fd_event_metrics_sample_link_t const * link = ((fd_event_metrics_sample_link_t const *)(((char*)event)+event->link_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"kind\":\"%.20s\"," + "\"kind_id\":%hu," + "\"link_kind\":\"%.20s\"," + "\"link_kind_id\":%hu," + "\"consumed_count\":%lu," + "\"consumed_size_bytes\":%lu," + "\"filtered_count\":%lu," + "\"filtered_size_bytes\":%lu," + "\"overrun_polling_count\":%lu," + "\"overrun_polling_frag_count\":%lu," + "\"overrun_reading_count\":%lu," + "\"overrun_reading_frag_count\":%lu," + "\"slow_count\":%lu" + "}", + link->kind, + link->kind_id, + link->link_kind, + link->link_kind_id, + link->consumed_count, + link->consumed_size_bytes, + link->filtered_count, + link->filtered_size_bytes, + link->overrun_polling_count, + link->overrun_polling_frag_count, + link->overrun_reading_count, + link->overrun_reading_frag_count, + link->slow_count ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->link_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"net\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->net_off+event->net_len*sizeof(fd_event_metrics_sample_net_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; inet_len; i++ ) { + fd_event_metrics_sample_net_t const * net = ((fd_event_metrics_sample_net_t const *)(((char*)event)+event->net_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"received_packets\":%lu," + "\"received_bytes\":%lu," + "\"sent_packets\":%lu," + "\"sent_bytes\":%lu," + "\"xdp_rx_dropped_ring_full\":%lu," + "\"xdp_rx_dropped_other\":%lu," + "\"tx_dropped\":%lu" + "}", + net->received_packets, + net->received_bytes, + net->sent_packets, + net->sent_bytes, + net->xdp_rx_dropped_ring_full, + net->xdp_rx_dropped_other, + net->tx_dropped ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->net_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"quic\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->quic_off+event->quic_len*sizeof(fd_event_metrics_sample_quic_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; iquic_len; i++ ) { + fd_event_metrics_sample_quic_t const * quic = ((fd_event_metrics_sample_quic_t const *)(((char*)event)+event->quic_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"txns_overrun\":%lu," + "\"txn_reasms_started\":%lu," + "\"txn_reasms_active\":%lu," + "\"frags_ok\":%lu," + "\"frags_gap\":%lu," + "\"frags_dup\":%lu," + "\"txns_received\":{" + "\"udp\":%lu," + "\"quic_fast\":%lu," + "\"quic_frag\":%lu" + "}," + "\"txns_abandoned\":%lu," + "\"quic_packet_too_small\":%lu," + "\"quic_txn_too_small\":%lu," + "\"quic_txn_too_large\":%lu," + "\"non_quic_packet_too_small\":%lu," + "\"non_quic_packet_too_large\":%lu," + "\"received_packets\":%lu," + "\"received_bytes\":%lu," + "\"sent_packets\":%lu," + "\"sent_bytes\":%lu," + "\"connections_active\":%lu," + "\"connections_created\":%lu," + "\"connections_closed\":%lu," + "\"connections_aborted\":%lu," + "\"connections_timed_out\":%lu," + "\"connections_retried\":%lu," + "\"connection_error_no_slots\":%lu," + "\"connection_error_retry_fail\":%lu," + "\"pkt_crypto_failed\":%lu," + "\"pkt_no_conn\":%lu," + "\"pkt_tx_alloc_fail\":%lu," + "\"handshakes_created\":%lu," + "\"handshake_error_alloc_fail\":%lu," + "\"stream_received_events\":%lu," + "\"stream_received_bytes\":%lu," + "\"received_frames\":{" + "\"unknown\":%lu," + "\"ack\":%lu," + "\"reset_stream\":%lu," + "\"stop_sending\":%lu," + "\"crypto\":%lu," + "\"new_token\":%lu," + "\"stream\":%lu," + "\"max_data\":%lu," + "\"max_stream_data\":%lu," + "\"max_streams\":%lu," + "\"data_blocked\":%lu," + "\"stream_data_blocked\":%lu," + "\"streams_blocked\":%lu," + "\"new_conn_id\":%lu," + "\"retire_conn_id\":%lu," + "\"path_challenge\":%lu," + "\"path_response\":%lu," + "\"conn_close_quic\":%lu," + "\"conn_close_app\":%lu," + "\"handshake_done\":%lu," + "\"ping\":%lu," + "\"padding\":%lu" + "}," + "\"ack_tx\":{" + "\"noop\":%lu," + "\"new\":%lu," + "\"merged\":%lu," + "\"drop\":%lu," + "\"cancel\":%lu" + "}," + "\"frame_fail_parse\":%lu" + "}", + quic->txns_overrun, + quic->txn_reasms_started, + quic->txn_reasms_active, + quic->frags_ok, + quic->frags_gap, + quic->frags_dup, + quic->txns_received.udp, + quic->txns_received.quic_fast, + quic->txns_received.quic_frag, + quic->txns_abandoned, + quic->quic_packet_too_small, + quic->quic_txn_too_small, + quic->quic_txn_too_large, + quic->non_quic_packet_too_small, + quic->non_quic_packet_too_large, + quic->received_packets, + quic->received_bytes, + quic->sent_packets, + quic->sent_bytes, + quic->connections_active, + quic->connections_created, + quic->connections_closed, + quic->connections_aborted, + quic->connections_timed_out, + quic->connections_retried, + quic->connection_error_no_slots, + quic->connection_error_retry_fail, + quic->pkt_crypto_failed, + quic->pkt_no_conn, + quic->pkt_tx_alloc_fail, + quic->handshakes_created, + quic->handshake_error_alloc_fail, + quic->stream_received_events, + quic->stream_received_bytes, + quic->received_frames.unknown, + quic->received_frames.ack, + quic->received_frames.reset_stream, + quic->received_frames.stop_sending, + quic->received_frames.crypto, + quic->received_frames.new_token, + quic->received_frames.stream, + quic->received_frames.max_data, + quic->received_frames.max_stream_data, + quic->received_frames.max_streams, + quic->received_frames.data_blocked, + quic->received_frames.stream_data_blocked, + quic->received_frames.streams_blocked, + quic->received_frames.new_conn_id, + quic->received_frames.retire_conn_id, + quic->received_frames.path_challenge, + quic->received_frames.path_response, + quic->received_frames.conn_close_quic, + quic->received_frames.conn_close_app, + quic->received_frames.handshake_done, + quic->received_frames.ping, + quic->received_frames.padding, + quic->ack_tx.noop, + quic->ack_tx.new, + quic->ack_tx.merged, + quic->ack_tx.drop, + quic->ack_tx.cancel, + quic->frame_fail_parse ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->quic_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"verify\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->verify_off+event->verify_len*sizeof(fd_event_metrics_sample_verify_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; iverify_len; i++ ) { + fd_event_metrics_sample_verify_t const * verify = ((fd_event_metrics_sample_verify_t const *)(((char*)event)+event->verify_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"transaction_parse_failure\":%lu," + "\"transaction_dedup_failure\":%lu," + "\"transaction_verify_failure\":%lu" + "}", + verify->transaction_parse_failure, + verify->transaction_dedup_failure, + verify->transaction_verify_failure ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->verify_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"dedup\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->dedup_off+event->dedup_len*sizeof(fd_event_metrics_sample_dedup_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; idedup_len; i++ ) { + fd_event_metrics_sample_dedup_t const * dedup = ((fd_event_metrics_sample_dedup_t const *)(((char*)event)+event->dedup_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"transaction_dedup_failure\":%lu," + "\"gossiped_votes_received\":%lu" + "}", + dedup->transaction_dedup_failure, + dedup->gossiped_votes_received ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->dedup_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"resolv\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->resolv_off+event->resolv_len*sizeof(fd_event_metrics_sample_resolv_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; iresolv_len; i++ ) { + fd_event_metrics_sample_resolv_t const * resolv = ((fd_event_metrics_sample_resolv_t const *)(((char*)event)+event->resolv_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"no_bank_drop\":%lu," + "\"lut_resolved\":{" + "\"invalid_lookup_index\":%lu," + "\"account_uninitialized\":%lu," + "\"invalid_account_data\":%lu," + "\"invalid_account_owner\":%lu," + "\"account_not_found\":%lu," + "\"success\":%lu" + "}," + "\"blockhash_expired\":%lu," + "\"blockhash_unknown\":%lu" + "}", + resolv->no_bank_drop, + resolv->lut_resolved.invalid_lookup_index, + resolv->lut_resolved.account_uninitialized, + resolv->lut_resolved.invalid_account_data, + resolv->lut_resolved.invalid_account_owner, + resolv->lut_resolved.account_not_found, + resolv->lut_resolved.success, + resolv->blockhash_expired, + resolv->blockhash_unknown ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->resolv_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"pack\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->pack_off+event->pack_len*sizeof(fd_event_metrics_sample_pack_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; ipack_len; i++ ) { + fd_event_metrics_sample_pack_t const * pack = ((fd_event_metrics_sample_pack_t const *)(((char*)event)+event->pack_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"normal_transaction_received\":%lu," + "\"transaction_inserted\":{" + "\"bundle_blacklist\":%lu," + "\"write_sysvar\":%lu," + "\"estimation_fail\":%lu," + "\"duplicate_account\":%lu," + "\"too_many_accounts\":%lu," + "\"too_large\":%lu," + "\"expired\":%lu," + "\"addr_lut\":%lu," + "\"unaffordable\":%lu," + "\"duplicate\":%lu," + "\"priority\":%lu," + "\"nonvote_add\":%lu," + "\"vote_add\":%lu," + "\"nonvote_replace\":%lu," + "\"vote_replace\":%lu" + "}," + "\"metric_timing\":{" + "\"no_txn_no_bank_no_leader_no_microblock\":%lu," + "\"txn_no_bank_no_leader_no_microblock\":%lu," + "\"no_txn_bank_no_leader_no_microblock\":%lu," + "\"txn_bank_no_leader_no_microblock\":%lu," + "\"no_txn_no_bank_leader_no_microblock\":%lu," + "\"txn_no_bank_leader_no_microblock\":%lu," + "\"no_txn_bank_leader_no_microblock\":%lu," + "\"txn_bank_leader_no_microblock\":%lu," + "\"no_txn_no_bank_no_leader_microblock\":%lu," + "\"txn_no_bank_no_leader_microblock\":%lu," + "\"no_txn_bank_no_leader_microblock\":%lu," + "\"txn_bank_no_leader_microblock\":%lu," + "\"no_txn_no_bank_leader_microblock\":%lu," + "\"txn_no_bank_leader_microblock\":%lu," + "\"no_txn_bank_leader_microblock\":%lu," + "\"txn_bank_leader_microblock\":%lu" + "}," + "\"transaction_dropped_from_extra\":%lu," + "\"transaction_inserted_to_extra\":%lu," + "\"transaction_inserted_from_extra\":%lu," + "\"transaction_expired\":%lu," + "\"available_transactions\":%lu," + "\"available_vote_transactions\":%lu," + "\"pending_transactions_heap_size\":%lu," + "\"conflicting_transactions\":%lu," + "\"smallest_pending_transaction\":%lu," + "\"microblock_per_block_limit\":%lu," + "\"data_per_block_limit\":%lu," + "\"transaction_schedule\":{" + "\"taken\":%lu," + "\"cu_limit\":%lu," + "\"fast_path\":%lu," + "\"byte_limit\":%lu," + "\"write_cost\":%lu," + "\"slow_path\":%lu" + "}," + "\"cus_consumed_in_block\":%lu," + "\"delete_missed\":%lu," + "\"delete_hit\":%lu" + "}", + pack->normal_transaction_received, + pack->transaction_inserted.bundle_blacklist, + pack->transaction_inserted.write_sysvar, + pack->transaction_inserted.estimation_fail, + pack->transaction_inserted.duplicate_account, + pack->transaction_inserted.too_many_accounts, + pack->transaction_inserted.too_large, + pack->transaction_inserted.expired, + pack->transaction_inserted.addr_lut, + pack->transaction_inserted.unaffordable, + pack->transaction_inserted.duplicate, + pack->transaction_inserted.priority, + pack->transaction_inserted.nonvote_add, + pack->transaction_inserted.vote_add, + pack->transaction_inserted.nonvote_replace, + pack->transaction_inserted.vote_replace, + pack->metric_timing.no_txn_no_bank_no_leader_no_microblock, + pack->metric_timing.txn_no_bank_no_leader_no_microblock, + pack->metric_timing.no_txn_bank_no_leader_no_microblock, + pack->metric_timing.txn_bank_no_leader_no_microblock, + pack->metric_timing.no_txn_no_bank_leader_no_microblock, + pack->metric_timing.txn_no_bank_leader_no_microblock, + pack->metric_timing.no_txn_bank_leader_no_microblock, + pack->metric_timing.txn_bank_leader_no_microblock, + pack->metric_timing.no_txn_no_bank_no_leader_microblock, + pack->metric_timing.txn_no_bank_no_leader_microblock, + pack->metric_timing.no_txn_bank_no_leader_microblock, + pack->metric_timing.txn_bank_no_leader_microblock, + pack->metric_timing.no_txn_no_bank_leader_microblock, + pack->metric_timing.txn_no_bank_leader_microblock, + pack->metric_timing.no_txn_bank_leader_microblock, + pack->metric_timing.txn_bank_leader_microblock, + pack->transaction_dropped_from_extra, + pack->transaction_inserted_to_extra, + pack->transaction_inserted_from_extra, + pack->transaction_expired, + pack->available_transactions, + pack->available_vote_transactions, + pack->pending_transactions_heap_size, + pack->conflicting_transactions, + pack->smallest_pending_transaction, + pack->microblock_per_block_limit, + pack->data_per_block_limit, + pack->transaction_schedule.taken, + pack->transaction_schedule.cu_limit, + pack->transaction_schedule.fast_path, + pack->transaction_schedule.byte_limit, + pack->transaction_schedule.write_cost, + pack->transaction_schedule.slow_path, + pack->cus_consumed_in_block, + pack->delete_missed, + pack->delete_hit ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->pack_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"bank\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->bank_off+event->bank_len*sizeof(fd_event_metrics_sample_bank_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; ibank_len; i++ ) { + fd_event_metrics_sample_bank_t const * bank = ((fd_event_metrics_sample_bank_t const *)(((char*)event)+event->bank_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"transaction_sanitize_failure\":%lu," + "\"transaction_not_executed_failure\":%lu," + "\"precompile_verify_failure\":%lu," + "\"slot_acquire\":{" + "\"success\":%lu," + "\"too_high\":%lu," + "\"too_low\":%lu" + "}," + "\"transaction_load_address_tables\":{" + "\"success\":%lu," + "\"slot_hashes_sysvar_not_found\":%lu," + "\"account_not_found\":%lu," + "\"invalid_account_owner\":%lu," + "\"invalid_account_data\":%lu," + "\"invalid_index\":%lu" + "}," + "\"transaction_result\":{" + "\"success\":%lu," + "\"account_in_use\":%lu," + "\"account_loaded_twice\":%lu," + "\"account_not_found\":%lu," + "\"program_account_not_found\":%lu," + "\"insufficient_funds_for_fee\":%lu," + "\"invalid_account_for_fee\":%lu," + "\"already_processed\":%lu," + "\"blockhash_not_found\":%lu," + "\"instruction_error\":%lu," + "\"call_chain_too_deep\":%lu," + "\"missing_signature_for_fee\":%lu," + "\"invalid_account_index\":%lu," + "\"signature_failure\":%lu," + "\"invalid_program_for_execution\":%lu," + "\"sanitize_failure\":%lu," + "\"cluster_maintenance\":%lu," + "\"account_borrow_outstanding\":%lu," + "\"would_exceed_max_block_cost_limit\":%lu," + "\"unsupported_version\":%lu," + "\"invalid_writable_account\":%lu," + "\"would_exceed_max_account_cost_limit\":%lu," + "\"would_exceed_account_data_block_limit\":%lu," + "\"too_many_account_locks\":%lu," + "\"address_lookup_table_not_found\":%lu," + "\"invalid_address_lookup_table_owner\":%lu," + "\"invalid_address_lookup_table_data\":%lu," + "\"invalid_address_lookup_table_index\":%lu," + "\"invalid_rent_paying_account\":%lu," + "\"would_exceed_max_vote_cost_limit\":%lu," + "\"would_exceed_account_data_total_limit\":%lu," + "\"duplicate_instruction\":%lu," + "\"insufficient_funds_for_rent\":%lu," + "\"max_loaded_accounts_data_size_exceeded\":%lu," + "\"invalid_loaded_accounts_data_size_limit\":%lu," + "\"resanitization_needed\":%lu," + "\"program_execution_temporarily_restricted\":%lu," + "\"unbalanced_transaction\":%lu," + "\"program_cache_hit_max_limit\":%lu" + "}," + "\"processing_failed\":%lu," + "\"fee_only_transactions\":%lu," + "\"executed_failed_transactions\":%lu," + "\"successful_transactions\":%lu," + "\"cost_model_undercount\":%lu" + "}", + bank->transaction_sanitize_failure, + bank->transaction_not_executed_failure, + bank->precompile_verify_failure, + bank->slot_acquire.success, + bank->slot_acquire.too_high, + bank->slot_acquire.too_low, + bank->transaction_load_address_tables.success, + bank->transaction_load_address_tables.slot_hashes_sysvar_not_found, + bank->transaction_load_address_tables.account_not_found, + bank->transaction_load_address_tables.invalid_account_owner, + bank->transaction_load_address_tables.invalid_account_data, + bank->transaction_load_address_tables.invalid_index, + bank->transaction_result.success, + bank->transaction_result.account_in_use, + bank->transaction_result.account_loaded_twice, + bank->transaction_result.account_not_found, + bank->transaction_result.program_account_not_found, + bank->transaction_result.insufficient_funds_for_fee, + bank->transaction_result.invalid_account_for_fee, + bank->transaction_result.already_processed, + bank->transaction_result.blockhash_not_found, + bank->transaction_result.instruction_error, + bank->transaction_result.call_chain_too_deep, + bank->transaction_result.missing_signature_for_fee, + bank->transaction_result.invalid_account_index, + bank->transaction_result.signature_failure, + bank->transaction_result.invalid_program_for_execution, + bank->transaction_result.sanitize_failure, + bank->transaction_result.cluster_maintenance, + bank->transaction_result.account_borrow_outstanding, + bank->transaction_result.would_exceed_max_block_cost_limit, + bank->transaction_result.unsupported_version, + bank->transaction_result.invalid_writable_account, + bank->transaction_result.would_exceed_max_account_cost_limit, + bank->transaction_result.would_exceed_account_data_block_limit, + bank->transaction_result.too_many_account_locks, + bank->transaction_result.address_lookup_table_not_found, + bank->transaction_result.invalid_address_lookup_table_owner, + bank->transaction_result.invalid_address_lookup_table_data, + bank->transaction_result.invalid_address_lookup_table_index, + bank->transaction_result.invalid_rent_paying_account, + bank->transaction_result.would_exceed_max_vote_cost_limit, + bank->transaction_result.would_exceed_account_data_total_limit, + bank->transaction_result.duplicate_instruction, + bank->transaction_result.insufficient_funds_for_rent, + bank->transaction_result.max_loaded_accounts_data_size_exceeded, + bank->transaction_result.invalid_loaded_accounts_data_size_limit, + bank->transaction_result.resanitization_needed, + bank->transaction_result.program_execution_temporarily_restricted, + bank->transaction_result.unbalanced_transaction, + bank->transaction_result.program_cache_hit_max_limit, + bank->processing_failed, + bank->fee_only_transactions, + bank->executed_failed_transactions, + bank->successful_transactions, + bank->cost_model_undercount ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->bank_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"shred\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->shred_off+event->shred_len*sizeof(fd_event_metrics_sample_shred_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; ishred_len; i++ ) { + fd_event_metrics_sample_shred_t const * shred = ((fd_event_metrics_sample_shred_t const *)(((char*)event)+event->shred_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"microblocks_abandoned\":%lu," + "\"shred_processed\":{" + "\"bad_slot\":%lu," + "\"parse_failed\":%lu," + "\"rejected\":%lu," + "\"ignored\":%lu," + "\"okay\":%lu," + "\"completes\":%lu" + "}," + "\"fec_set_spilled\":%lu," + "\"shred_rejected_initial\":%lu," + "\"fec_rejected_fatal\":%lu" + "}", + shred->microblocks_abandoned, + shred->shred_processed.bad_slot, + shred->shred_processed.parse_failed, + shred->shred_processed.rejected, + shred->shred_processed.ignored, + shred->shred_processed.okay, + shred->shred_processed.completes, + shred->fec_set_spilled, + shred->shred_rejected_initial, + shred->fec_rejected_fatal ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->shred_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"store\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->store_off+event->store_len*sizeof(fd_event_metrics_sample_store_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; istore_len; i++ ) { + fd_event_metrics_sample_store_t const * store = ((fd_event_metrics_sample_store_t const *)(((char*)event)+event->store_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"transactions_inserted\":%lu" + "}", + store->transactions_inserted ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->store_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "]"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + + return (long)off; +} + +long +fd_event_format( fd_event_common_t const * common, + ulong event_type, + fd_event_t const * event, + ulong event_len, + char * buffer, + ulong buffer_len ) { + ulong off = 0UL; + ulong printed; + int success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "{\"kind\":\"%s\",\"common\":", fd_event_type_str( event_type ) ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + long printed2 = format_common( common, buffer+off, buffer_len-off ); + if( FD_UNLIKELY( printed2<0 ) ) return printed2; + off += (ulong)printed2; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ",\"event\":{" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + switch( event_type ) { + case FD_EVENT_GENERAL_BOOT: + printed2 = format_general_boot( &event->general_boot, event_len, buffer+off, buffer_len-off ); + break; + case FD_EVENT_METRICS_SAMPLE: + printed2 = format_metrics_sample( &event->metrics_sample, event_len, buffer+off, buffer_len-off ); + break; + default: + return FD_EVENT_FORMAT_INVALID; + } + + if( FD_UNLIKELY( printed2<0 ) ) return printed2; + off += (ulong)printed2; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "}}" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + return (long)off; +} diff --git a/src/disco/metrics/generate/generated/fd_event.h b/src/disco/metrics/generate/generated/fd_event.h new file mode 100644 index 0000000000..95e876add6 --- /dev/null +++ b/src/disco/metrics/generate/generated/fd_event.h @@ -0,0 +1,827 @@ +#ifndef HEADER_fd_src_disco_events_generated_fd_event_h +#define HEADER_fd_src_disco_events_generated_fd_event_h + +#include "../../fd_disco_base.h" + +#define FD_EVENT_GENERAL_BOOT (1UL) +#define FD_EVENT_METRICS_SAMPLE (2UL) + +static inline char const * +fd_event_type_str( ulong event_type ) { + switch( event_type ) { + case FD_EVENT_GENERAL_BOOT : return "general_boot"; + case FD_EVENT_METRICS_SAMPLE: return "metrics_sample"; + default: return "unknown"; + } +} + +#define FD_EVENT_COMMON_CLUSTER_MAINNET (1) +#define FD_EVENT_COMMON_CLUSTER_DEVNET (2) +#define FD_EVENT_COMMON_CLUSTER_TESTNET (3) +#define FD_EVENT_COMMON_CLUSTER_DEVELOPMENT (4) +#define FD_EVENT_COMMON_CLUSTER_PYTHNET (5) +#define FD_EVENT_COMMON_CLUSTER_PYTHTEST (6) + +static inline char const * +fd_event_common_cluster_str( uchar value ) { + switch( value ) { + case FD_EVENT_COMMON_CLUSTER_MAINNET: return "mainnet"; + case FD_EVENT_COMMON_CLUSTER_DEVNET: return "devnet"; + case FD_EVENT_COMMON_CLUSTER_TESTNET: return "testnet"; + case FD_EVENT_COMMON_CLUSTER_DEVELOPMENT: return "development"; + case FD_EVENT_COMMON_CLUSTER_PYTHNET: return "pythnet"; + case FD_EVENT_COMMON_CLUSTER_PYTHTEST: return "pythtest"; + default: return "unknown"; + } +} + +#define FD_EVENT_COMMON_CLIENT_AGAVE (1) +#define FD_EVENT_COMMON_CLIENT_FRANKENDANCER (2) +#define FD_EVENT_COMMON_CLIENT_FIREDANCER (3) + +static inline char const * +fd_event_common_client_str( uchar value ) { + switch( value ) { + case FD_EVENT_COMMON_CLIENT_AGAVE: return "agave"; + case FD_EVENT_COMMON_CLIENT_FRANKENDANCER: return "frankendancer"; + case FD_EVENT_COMMON_CLIENT_FIREDANCER: return "firedancer"; + default: return "unknown"; + } +} + +#define FD_EVENT_COMMON_OS_LINUX (1) + +static inline char const * +fd_event_common_os_str( uchar value ) { + switch( value ) { + case FD_EVENT_COMMON_OS_LINUX: return "linux"; + default: return "unknown"; + } +} + +#define FD_EVENT_METRICS_SAMPLE_REASON_PERIODIC (1) +#define FD_EVENT_METRICS_SAMPLE_REASON_LEADER_START (2) +#define FD_EVENT_METRICS_SAMPLE_REASON_LEADER_END_START (3) +#define FD_EVENT_METRICS_SAMPLE_REASON_LEADER_END (4) + +static inline char const * +fd_event_metrics_sample_reason_str( uchar value ) { + switch( value ) { + case FD_EVENT_METRICS_SAMPLE_REASON_PERIODIC: return "periodic"; + case FD_EVENT_METRICS_SAMPLE_REASON_LEADER_START: return "leader_start"; + case FD_EVENT_METRICS_SAMPLE_REASON_LEADER_END_START: return "leader_end_start"; + case FD_EVENT_METRICS_SAMPLE_REASON_LEADER_END: return "leader_end"; + default: return "unknown"; + } +} + +/* Fields that are common to and reported by all events. */ +struct fd_event_common { + /* The timestamp that the event was generated on the validator client, with + nanosecond precision. Timestamp is provided by the validator, and + might be skewed. */ + long timestamp; + + /* The base58 encoded validator identity public key. The validator must + prove posession of the identity and metrics reported by identity + are authenticated. */ + char identity[45]; + + /* The cluster that the validator is running on. One of "mainnet", + "devnet", "testnet", "development", or "unknown". Must be one of + FD_EVENT_COMMON_CLUSTER_* */ + uchar cluster; + + /* The version of the validator software that is running. */ + char version[12]; + + /* The client that the validator is running. Currently always + "frankendancer". Must be one of FD_EVENT_COMMON_CLIENT_* */ + uchar client; + + /* The operating system that the validator is running. Currently always + "linux". Must be one of FD_EVENT_COMMON_OS_* */ + uchar os; + + /* A unique identifier for this instance of the validator. Randomly + generated when the validator is booted. */ + ulong instance_id; + + /* A unique identifier for the host running this validator. Will remain the + same between validator and machine restarts. */ + ulong machine_id; + + /* A unique identifier for the boot identifier of the host running this + validator. Will remain the same between validator restarts, but + reset on machine restarts. */ + ulong boot_id; + +}; + +typedef struct fd_event_common fd_event_common_t; + +/* General boot event, reported when the validator is started. */ +struct fd_event_general_boot { + /* The base58 encoding of the validator vote public key. */ + char vote_account[45]; + + /* The genesis hash of the cluster that the validator is running on. */ + char genesis_hash[45]; + + /* The commit hash of the validator software that is running. */ + char commit_hash[41]; + + /* The feature set of the validator software that is running. */ + uint feature_set; + + /* The tile topology of the validator, formatted as JSON. Fields of this + type are arbitrary length strings and are not guaranteed to be + null-terminated. topology_off is an offset from the beginning of + the event to the start of the string, and topology_len is the + length of the string in bytes. */ + ulong topology_off; + ulong topology_len; + + /* The full configuration of the validator, formatted as JSON. Fields of + this type are arbitrary length strings and are not guaranteed to be + null-terminated. configuration_off is an offset from the beginning + of the event to the start of the string, and configuration_len is + the length of the string in bytes. */ + ulong configuration_off; + ulong configuration_len; + + /* A raw dumpout of /proc/meminfo. Fields of this type are arbitrary length + strings and are not guaranteed to be null-terminated. meminfo_off + is an offset from the beginning of the event to the start of the + string, and meminfo_len is the length of the string in bytes. */ + ulong meminfo_off; + ulong meminfo_len; + + /* A raw dumpout of /proc/cpuinfo. Fields of this type are arbitrary length + strings and are not guaranteed to be null-terminated. cpuinfo_off + is an offset from the beginning of the event to the start of the + string, and cpuinfo_len is the length of the string in bytes. */ + ulong cpuinfo_off; + ulong cpuinfo_len; + + /* A raw dumpout of /proc/version. TODO USE uname(2) ?? Fields of this type + are arbitrary length strings and are not guaranteed to be null- + terminated. osversion_off is an offset from the beginning of the + event to the start of the string, and osversion_len is the length + of the string in bytes. */ + ulong osversion_off; + ulong osversion_len; + +}; + +typedef struct fd_event_general_boot fd_event_general_boot_t; + +struct fd_event_metrics_sample_tile { + /* Tile type. */ + char kind[21]; + + /* ID of the tile within the type. */ + ushort kind_id; + + /* The number of involuntary context switches. */ + ulong context_switch_involuntary_count; + + /* The number of voluntary context switches. */ + ulong context_switch_voluntary_count; + + /* The current status of the tile. 0 is booting, 1 is running. */ + ulong status; + + /* The last UNIX timestamp in nanoseconds that the tile heartbeated. */ + ulong heartbeat; + + /* Whether the tile is currently backpressured or not, either 1 or 0. */ + ulong in_backpressure; + + /* Number of times the times the tile has had to wait for one of more + consumers to catch up to resume publishing. */ + ulong backpressure_count; + + /* Mutually exclusive and exhaustive duration of time the tile spent in + each of the regimes. */ + struct { + ulong caught_up_housekeeping; /* Caught up + Housekeeping */ + ulong processing_housekeeping; /* Processing + Housekeeping */ + ulong backpressure_housekeeping; /* Backpressure + Housekeeping */ + ulong caught_up_prefrag; /* Caught up + Prefrag */ + ulong processing_prefrag; /* Processing + Prefrag */ + ulong backpressure_prefrag; /* Backpressure + Prefrag */ + ulong caught_up_postfrag; /* Caught up + Postfrag */ + ulong processing_postfrag; /* Processing + Postfrag */ + } regime_duration_nanos; + +}; + +typedef struct fd_event_metrics_sample_tile fd_event_metrics_sample_tile_t; + +struct fd_event_metrics_sample_link { + /* Tile type. */ + char kind[21]; + + /* ID of the tile within the type. */ + ushort kind_id; + + /* Link type. */ + char link_kind[21]; + + /* ID of the link within the link kind. */ + ushort link_kind_id; + + /* The number of times the link reader has consumed a fragment. */ + ulong consumed_count; + + /* The total number of bytes read by the link consumer. */ + ulong consumed_size_bytes; + + /* The number of fragments that were filtered and not consumed. */ + ulong filtered_count; + + /* The total number of bytes read by the link consumer that were filtered. */ + ulong filtered_size_bytes; + + /* The number of times the link has been overrun while polling. */ + ulong overrun_polling_count; + + /* The number of fragments the link has not processed because it was + overrun while polling. */ + ulong overrun_polling_frag_count; + + /* The number of input overruns detected while reading metadata by the + consumer. */ + ulong overrun_reading_count; + + /* The number of fragments the link has not processed because it was + overrun while reading. */ + ulong overrun_reading_frag_count; + + /* The number of times the consumer was detected as rate limiting consumer + by the producer. */ + ulong slow_count; + +}; + +typedef struct fd_event_metrics_sample_link fd_event_metrics_sample_link_t; + +struct fd_event_metrics_sample_net { + /* Number of IP packets received. */ + ulong received_packets; + + /* Total bytes received (including IP, UDP headers). */ + ulong received_bytes; + + /* Number of IP packets sent. */ + ulong sent_packets; + + /* Total bytes sent (including IP, UDP headers). */ + ulong sent_bytes; + + /* Number of packets dropped because the RX completion queue was empty. + This is only reported for net tile 0, since the measurement is + across all RX queues. */ + ulong xdp_rx_dropped_ring_full; + + /* Number of packets dropped for other reasons. This is only reported for + net tile 0, since the measurement is across all RX queues. */ + ulong xdp_rx_dropped_other; + + /* Number of packets dropped because the TX submission queue was empty. + This is reported for all net tiles. */ + ulong tx_dropped; + +}; + +typedef struct fd_event_metrics_sample_net fd_event_metrics_sample_net_t; + +struct fd_event_metrics_sample_quic { + /* Count of txns overrun before reassembled (too small + txn_reassembly_count). */ + ulong txns_overrun; + + /* Count of fragmented txn receive ops started. */ + ulong txn_reasms_started; + + /* Number of fragmented txn receive ops currently active. */ + ulong txn_reasms_active; + + /* Count of txn frags received */ + ulong frags_ok; + + /* Count of txn frags dropped due to data gap */ + ulong frags_gap; + + /* Count of txn frags dropped due to dup (stream already completed) */ + ulong frags_dup; + + /* Count of txns received via TPU. */ + struct { + ulong udp; /* TPU/UDP */ + ulong quic_fast; /* TPU/QUIC unfragmented */ + ulong quic_frag; /* TPU/QUIC fragmented */ + } txns_received; + + /* Count of txns abandoned because a conn was lost. */ + ulong txns_abandoned; + + /* Count of packets received on the QUIC port that were too small to be a + valid IP packet. */ + ulong quic_packet_too_small; + + /* Count of txns received via QUIC dropped because they were too small. */ + ulong quic_txn_too_small; + + /* Count of txns received via QUIC dropped because they were too large. */ + ulong quic_txn_too_large; + + /* Count of packets received on the non-QUIC port that were too small to be + a valid IP packet. */ + ulong non_quic_packet_too_small; + + /* Count of packets received on the non-QUIC port that were too large to be + a valid transaction. */ + ulong non_quic_packet_too_large; + + /* Number of IP packets received. */ + ulong received_packets; + + /* Total bytes received (including IP, UDP, QUIC headers). */ + ulong received_bytes; + + /* Number of IP packets sent. */ + ulong sent_packets; + + /* Total bytes sent (including IP, UDP, QUIC headers). */ + ulong sent_bytes; + + /* The number of currently active QUIC connections. */ + ulong connections_active; + + /* The total number of connections that have been created. */ + ulong connections_created; + + /* Number of connections gracefully closed. */ + ulong connections_closed; + + /* Number of connections aborted. */ + ulong connections_aborted; + + /* Number of connections timed out. */ + ulong connections_timed_out; + + /* Number of connections established with retry. */ + ulong connections_retried; + + /* Number of connections that failed to create due to lack of slots. */ + ulong connection_error_no_slots; + + /* Number of connections that failed during retry (e.g. invalid token). */ + ulong connection_error_retry_fail; + + /* Number of packets that failed decryption. */ + ulong pkt_crypto_failed; + + /* Number of packets with an unknown connection ID. */ + ulong pkt_no_conn; + + /* Number of packets failed to send because of metadata alloc fail. */ + ulong pkt_tx_alloc_fail; + + /* Number of handshake flows created. */ + ulong handshakes_created; + + /* Number of handshakes dropped due to alloc fail. */ + ulong handshake_error_alloc_fail; + + /* Number of stream RX events. */ + ulong stream_received_events; + + /* Total stream payload bytes received. */ + ulong stream_received_bytes; + + /* Number of QUIC frames received. */ + struct { + ulong unknown; /* Unknown frame type */ + ulong ack; /* ACK frame */ + ulong reset_stream; /* RESET_STREAM frame */ + ulong stop_sending; /* STOP_SENDING frame */ + ulong crypto; /* CRYPTO frame */ + ulong new_token; /* NEW_TOKEN frame */ + ulong stream; /* STREAM frame */ + ulong max_data; /* MAX_DATA frame */ + ulong max_stream_data; /* MAX_STREAM_DATA frame */ + ulong max_streams; /* MAX_STREAMS frame */ + ulong data_blocked; /* DATA_BLOCKED frame */ + ulong stream_data_blocked; /* STREAM_DATA_BLOCKED frame */ + ulong streams_blocked; /* STREAMS_BLOCKED(bidi) frame */ + ulong new_conn_id; /* NEW_CONN_ID frame */ + ulong retire_conn_id; /* RETIRE_CONN_ID frame */ + ulong path_challenge; /* PATH_CHALLENGE frame */ + ulong path_response; /* PATH_RESPONSE frame */ + ulong conn_close_quic; /* CONN_CLOSE(transport) frame */ + ulong conn_close_app; /* CONN_CLOSE(app) frame */ + ulong handshake_done; /* HANDSHAKE_DONE frame */ + ulong ping; /* PING frame */ + ulong padding; /* PADDING frame */ + } received_frames; + + /* ACK events */ + struct { + ulong noop; /* non-ACK-eliciting packet */ + ulong new; /* new ACK range */ + ulong merged; /* merged into existing ACK range */ + ulong drop; /* out of buffers */ + ulong cancel; /* ACK suppressed by handler */ + } ack_tx; + + /* Number of QUIC frames failed to parse. */ + ulong frame_fail_parse; + +}; + +typedef struct fd_event_metrics_sample_quic fd_event_metrics_sample_quic_t; + +struct fd_event_metrics_sample_verify { + /* Count of transactions that failed to parse */ + ulong transaction_parse_failure; + + /* Count of transactions that failed to deduplicate in the verify stage */ + ulong transaction_dedup_failure; + + /* Count of transactions that failed to deduplicate in the verify stage */ + ulong transaction_verify_failure; + +}; + +typedef struct fd_event_metrics_sample_verify fd_event_metrics_sample_verify_t; + +struct fd_event_metrics_sample_dedup { + /* Count of transactions that failed to deduplicate in the dedup stage */ + ulong transaction_dedup_failure; + + /* Count of simple vote transactions received over gossip instead of via + the normal TPU path */ + ulong gossiped_votes_received; + +}; + +typedef struct fd_event_metrics_sample_dedup fd_event_metrics_sample_dedup_t; + +struct fd_event_metrics_sample_resolv { + /* Count of transactions dropped because the bank was not available */ + ulong no_bank_drop; + + /* Count of address lookup tables resolved */ + struct { + ulong invalid_lookup_index; /* The transaction referenced an index in a LUT that didn't exist */ + ulong account_uninitialized; /* The account referenced as a LUT hasn't been initialized */ + ulong invalid_account_data; /* The account referenced as a LUT couldn't be parsed */ + ulong invalid_account_owner; /* The account referenced as a LUT wasn't owned by the ALUT program ID */ + ulong account_not_found; /* The account referenced as a LUT couldn't be found */ + ulong success; /* Resolved successfully */ + } lut_resolved; + + /* Count of transactions that failed to resolve because the blockhash was + expired */ + ulong blockhash_expired; + + /* Count of transactions with an unknown blockhash. These may be very + recent, very old, nonces, or bogus. */ + ulong blockhash_unknown; + +}; + +typedef struct fd_event_metrics_sample_resolv fd_event_metrics_sample_resolv_t; + +struct fd_event_metrics_sample_pack { + /* Count of transactions received via the normal TPU path */ + ulong normal_transaction_received; + + /* Result of inserting a transaction into the pack object */ + struct { + ulong bundle_blacklist; /* Transaction uses an account on the bundle blacklist */ + ulong write_sysvar; /* Transaction tries to write to a sysvar */ + ulong estimation_fail; /* Estimating compute cost and/or fee failed */ + ulong duplicate_account; /* Transaction included an account address twice */ + ulong too_many_accounts; /* Transaction tried to load too many accounts */ + ulong too_large; /* Transaction requests too many CUs */ + ulong expired; /* Transaction already expired */ + ulong addr_lut; /* Transaction loaded accounts from a lookup table */ + ulong unaffordable; /* Fee payer's balance below transaction fee */ + ulong duplicate; /* Pack aware of transaction with same signature */ + ulong priority; /* Transaction's fee was too low given its compute unit requirement and other competing transactions */ + ulong nonvote_add; /* Transaction that was not a simple vote added to pending transactions */ + ulong vote_add; /* Simple vote transaction was added to pending transactions */ + ulong nonvote_replace; /* Transaction that was not a simple vote replaced a lower priority transaction */ + ulong vote_replace; /* Simple vote transaction replaced a lower priority transaction */ + } transaction_inserted; + + /* Time in nanos spent in each state */ + struct { + ulong no_txn_no_bank_no_leader_no_microblock; /* Pack had no transactions available, and wasn't leader */ + ulong txn_no_bank_no_leader_no_microblock; /* Pack had transactions available, but wasn't leader or had hit a limit */ + ulong no_txn_bank_no_leader_no_microblock; /* Pack had no transactions available, had banks but wasn't leader */ + ulong txn_bank_no_leader_no_microblock; /* Pack had transactions available, had banks but wasn't leader */ + ulong no_txn_no_bank_leader_no_microblock; /* Pack had no transactions available, and was leader but had no available banks */ + ulong txn_no_bank_leader_no_microblock; /* Pack had transactions available, was leader, but had no available banks */ + ulong no_txn_bank_leader_no_microblock; /* Pack had available banks but no transactions */ + ulong txn_bank_leader_no_microblock; /* Pack had banks and transactions available but couldn't schedule anything non-conflicting */ + ulong no_txn_no_bank_no_leader_microblock; /* Pack scheduled a non-empty microblock while not leader */ + ulong txn_no_bank_no_leader_microblock; /* Pack scheduled a non-empty microblock while not leader */ + ulong no_txn_bank_no_leader_microblock; /* Pack scheduled a non-empty microblock while not leader */ + ulong txn_bank_no_leader_microblock; /* Pack scheduled a non-empty microblock while not leader */ + ulong no_txn_no_bank_leader_microblock; /* Pack scheduled a non-empty microblock but all banks were busy */ + ulong txn_no_bank_leader_microblock; /* Pack scheduled a non-empty microblock but all banks were busy */ + ulong no_txn_bank_leader_microblock; /* Pack scheduled a non-empty microblock and now has no transactions */ + ulong txn_bank_leader_microblock; /* Pack scheduled a non-empty microblock */ + } metric_timing; + + /* Transactions dropped from the extra transaction storage because it was + full */ + ulong transaction_dropped_from_extra; + + /* Transactions inserted into the extra transaction storage because pack's + primary storage was full */ + ulong transaction_inserted_to_extra; + + /* Transactions pulled from the extra transaction storage and inserted into + pack's primary storage */ + ulong transaction_inserted_from_extra; + + /* Transactions deleted from pack because their TTL expired */ + ulong transaction_expired; + + /* The total number of pending transactions in pack's pool that are + available to be scheduled */ + ulong available_transactions; + + /* The number of pending simple vote transactions in pack's pool that are + available to be scheduled */ + ulong available_vote_transactions; + + /* The maximum number of pending transactions that pack can consider. This + value is fixed at Firedancer startup but is a useful reference for + AvailableTransactions and AvailableVoteTransactions. */ + ulong pending_transactions_heap_size; + + /* The number of available transactions that are temporarily not being + considered due to account lock conflicts with many higher paying + transactions */ + ulong conflicting_transactions; + + /* A lower bound on the smallest non-vote transaction (in cost units) that + is immediately available for scheduling */ + ulong smallest_pending_transaction; + + /* The number of times pack did not pack a microblock because the limit on + microblocks/block had been reached */ + ulong microblock_per_block_limit; + + /* The number of times pack did not pack a microblock because it reached + reached the data per block limit at the start of trying to schedule + a microblock */ + ulong data_per_block_limit; + + /* Result of trying to consider a transaction for scheduling */ + struct { + ulong taken; /* Pack included the transaction in the microblock */ + ulong cu_limit; /* Pack skipped the transaction because it would have exceeded the block CU limit */ + ulong fast_path; /* Pack skipped the transaction because of account conflicts using the fast bitvector check */ + ulong byte_limit; /* Pack skipped the transaction because it would have exceeded the block data size limit */ + ulong write_cost; /* Pack skipped the transaction because it would have caused a writable account to exceed the per-account block write cost limit */ + ulong slow_path; /* Pack skipped the transaction because of account conflicts using the full slow check */ + } transaction_schedule; + + /* The number of cost units consumed in the current block, or 0 if pack is + not currently packing a block */ + ulong cus_consumed_in_block; + + /* Count of attempts to delete a transaction that wasn't found */ + ulong delete_missed; + + /* Count of attempts to delete a transaction that was found and deleted */ + ulong delete_hit; + +}; + +typedef struct fd_event_metrics_sample_pack fd_event_metrics_sample_pack_t; + +struct fd_event_metrics_sample_bank { + /* Number of transactions that failed to sanitize. */ + ulong transaction_sanitize_failure; + + /* Number of transactions that did not execute. This is different than + transactions which fail to execute, which make it onto the chain. */ + ulong transaction_not_executed_failure; + + /* Number of transactions that failed precompile verification and thus will + not execute. */ + ulong precompile_verify_failure; + + /* Result of acquiring a slot. */ + struct { + ulong success; /* Success */ + ulong too_high; /* Too high */ + ulong too_low; /* Too low */ + } slot_acquire; + + /* Result of loading address lookup tables for a transaction. If there are + multiple errors for the transaction, only the first one is + reported. */ + struct { + ulong success; /* Success */ + ulong slot_hashes_sysvar_not_found; /* The slot hashes syvar could not be found. */ + ulong account_not_found; /* The account storing the address lookup table was deactivated or could not be found. */ + ulong invalid_account_owner; /* The account that owns the referenced lookup table is not the address lookup table program. */ + ulong invalid_account_data; /* The data for the referenced address lookup table is malformed. */ + ulong invalid_index; /* The referenced index in the address lookup table does not exist. */ + } transaction_load_address_tables; + + /* Result of loading and executing a transaction. */ + struct { + ulong success; /* Success */ + ulong account_in_use; /* An account is already being processed in another transaction in a way that does not support parallelism. */ + ulong account_loaded_twice; /* A `Pubkey` appears twice in the transaction's `account_keys`. Instructions can reference `Pubkey`s more than once but the message must contain a list with no duplicate keys. */ + ulong account_not_found; /* Attempt to debit an account but found no record of a prior credit. */ + ulong program_account_not_found; /* Attempt to load a program that does not exist. */ + ulong insufficient_funds_for_fee; /* The fee payer `Pubkey` does not have sufficient balance to pay the fee to schedule the transaction. */ + ulong invalid_account_for_fee; /* This account may not be used to pay transaction fees. */ + ulong already_processed; /* The bank has seen this transaction before. This can occur under normal operation when a UDP packet is duplicated, as a user error from a client not updating its `recent_blockhash`, or as a double-spend attack. */ + ulong blockhash_not_found; /* The bank has not seen the given `recent_blockhash` or the transaction is too old and the `recent_blockhash` has been discarded. */ + ulong instruction_error; /* An error occurred while processing an instruction. */ + ulong call_chain_too_deep; /* Loader call chain is too deep. */ + ulong missing_signature_for_fee; /* Transaction requires a fee but has no signature present. */ + ulong invalid_account_index; /* Transaction contains an invalid account reference. */ + ulong signature_failure; /* Transaction did not pass signature verification. */ + ulong invalid_program_for_execution; /* This program may not be used for executing instructions. */ + ulong sanitize_failure; /* Transaction failed to sanitize accounts offsets correctly implies that account locks are not taken for this TX, and should not be unlocked. */ + ulong cluster_maintenance; /* Transactions are currently disabled due to cluster maintenance. */ + ulong account_borrow_outstanding; /* Transaction processing left an account with an outstanding borrowed reference. */ + ulong would_exceed_max_block_cost_limit; /* Transaction would exceed max Block Cost Limit. */ + ulong unsupported_version; /* Transaction version is unsupported. */ + ulong invalid_writable_account; /* Transaction loads a writable account that cannot be written. */ + ulong would_exceed_max_account_cost_limit; /* Transaction would exceed max account limit within the block. */ + ulong would_exceed_account_data_block_limit; /* Transaction would exceed account data limit within the block. */ + ulong too_many_account_locks; /* Transaction locked too many accounts. */ + ulong address_lookup_table_not_found; /* Address lookup table not found. */ + ulong invalid_address_lookup_table_owner; /* Attempted to lookup addresses from an account owned by the wrong program. */ + ulong invalid_address_lookup_table_data; /* Attempted to lookup addresses from an invalid account. */ + ulong invalid_address_lookup_table_index; /* Address table lookup uses an invalid index. */ + ulong invalid_rent_paying_account; /* Transaction leaves an account with a lower balance than rent-exempt minimum. */ + ulong would_exceed_max_vote_cost_limit; /* Transaction would exceed max Vote Cost Limit. */ + ulong would_exceed_account_data_total_limit; /* Transaction would exceed total account data limit. */ + ulong duplicate_instruction; /* Transaction contains a duplicate instruction that is not allowed. */ + ulong insufficient_funds_for_rent; /* Transaction results in an account with insufficient funds for rent. */ + ulong max_loaded_accounts_data_size_exceeded; /* Transaction exceeded max loaded accounts data size cap. */ + ulong invalid_loaded_accounts_data_size_limit; /* LoadedAccountsDataSizeLimit set for transaction must be greater than 0. */ + ulong resanitization_needed; /* Sanitized transaction differed before/after feature activiation. Needs to be resanitized. */ + ulong program_execution_temporarily_restricted; /* Program execution is temporarily restricted on an account. */ + ulong unbalanced_transaction; /* The total balance before the transaction does not equal the total balance after the transaction. */ + ulong program_cache_hit_max_limit; /* The total program cache size hit the maximum allowed limit. */ + } transaction_result; + + /* Count of transactions for which the processing stage failed and won't + land on chain */ + ulong processing_failed; + + /* Count of transactions that will land on chain but without executing */ + ulong fee_only_transactions; + + /* Count of transactions that execute on chain but failed */ + ulong executed_failed_transactions; + + /* Count of transactions that execute on chain and succeed */ + ulong successful_transactions; + + /* Count of transactions that used more CUs than the cost model should have + permitted them to */ + ulong cost_model_undercount; + +}; + +typedef struct fd_event_metrics_sample_bank fd_event_metrics_sample_bank_t; + +struct fd_event_metrics_sample_shred { + /* The number of microblocks that were abandoned because we switched slots + without finishing the current slot */ + ulong microblocks_abandoned; + + /* The result of processing a thread from the network */ + struct { + ulong bad_slot; /* Shred was for a slot for which we don't know the leader */ + ulong parse_failed; /* Shred parsing failed */ + ulong rejected; /* Shred was invalid for one of many reasons */ + ulong ignored; /* Shred was ignored because we had already received or reconstructed it */ + ulong okay; /* Shred accepted to an incomplete FEC set */ + ulong completes; /* Shred accepted and resulted in a valid, complete FEC set */ + } shred_processed; + + /* The number of FEC sets that were spilled because they didn't complete in + time and we needed space */ + ulong fec_set_spilled; + + /* The number shreds that were rejected before any resources were allocated + for the FEC set */ + ulong shred_rejected_initial; + + /* The number of FEC sets that were rejected for reasons that cause the + whole FEC set to become invalid */ + ulong fec_rejected_fatal; + +}; + +typedef struct fd_event_metrics_sample_shred fd_event_metrics_sample_shred_t; + +struct fd_event_metrics_sample_store { + /* Count of transactions produced while we were leader in the shreds that + have been inserted so far */ + ulong transactions_inserted; + +}; + +typedef struct fd_event_metrics_sample_store fd_event_metrics_sample_store_t; + +/* Metric data periodically sampled by the application. */ +struct fd_event_metrics_sample { + /* Reason the metrics snapshot was sampled. Must be one of + FD_EVENT_METRICS_SAMPLE_REASON_* */ + uchar reason; + + /* If the reason the sample was taken is because a leader was starting or + ending, this is the slot that was starting (or ending). If a leader + slot is both ending and starting (leader_end_start), this is the + slot which is starting. */ + ulong slot; + + /* Common metrics shared by all tiles */ + ulong tile_off; + ulong tile_len; + + /* Metrics for links between tiles. */ + ulong link_off; + ulong link_len; + + /* Metrics for net tiles. */ + ulong net_off; + ulong net_len; + + /* Metrics for quic tiles. */ + ulong quic_off; + ulong quic_len; + + /* Metrics for verify tiles. */ + ulong verify_off; + ulong verify_len; + + /* Metrics for dedup tiles. */ + ulong dedup_off; + ulong dedup_len; + + /* Metrics for resolv tiles. */ + ulong resolv_off; + ulong resolv_len; + + /* Metrics for pack tiles. */ + ulong pack_off; + ulong pack_len; + + /* Metrics for bank tiles. */ + ulong bank_off; + ulong bank_len; + + /* Metrics for shred tiles. */ + ulong shred_off; + ulong shred_len; + + /* Metrics for store tiles. */ + ulong store_off; + ulong store_len; + +}; + +typedef struct fd_event_metrics_sample fd_event_metrics_sample_t; + +struct fd_event { + union { + fd_event_general_boot_t general_boot; + fd_event_metrics_sample_t metrics_sample; + }; +}; + +typedef struct fd_event fd_event_t; + +#define FD_EVENT_FORMAT_OVERFLOW (-1) +#define FD_EVENT_FORMAT_INVALID (-2) + +long +fd_event_format( fd_event_common_t const * common, + ulong event_type, + fd_event_t const * event, + ulong event_len, + char * buffer, + ulong buffer_len ); + +#endif /* HEADER_fd_src_disco_events_generated_fd_event_h */ diff --git a/src/disco/metrics/generate/generated/fd_event_metrics.h b/src/disco/metrics/generate/generated/fd_event_metrics.h new file mode 100644 index 0000000000..8262efa593 --- /dev/null +++ b/src/disco/metrics/generate/generated/fd_event_metrics.h @@ -0,0 +1,108 @@ +#ifndef HEADER_fd_src_disco_events_generated_fd_event_metrics_h +#define HEADER_fd_src_disco_events_generated_fd_event_metrics_h + +#include "fd_event.h" +#include "../../metrics/fd_metrics.h" + +#include "../../topo/fd_topo.h" + +ulong +fd_event_metrics_footprint( fd_topo_t const * topo ) { + ulong l = FD_LAYOUT_INIT; l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_t ), sizeof( fd_event_metrics_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_tile_t ), topo->tile_cnt*sizeof( fd_event_metrics_tile_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_link_t ), fd_topo_polled_in_cnt( topo )*sizeof( fd_event_metrics_link_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_tile_t ), fd_topo_tile_name_cnt( topo, "tile" )*sizeof( fd_event_metrics_tile_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_net_t ), fd_topo_tile_name_cnt( topo, "net" )*sizeof( fd_event_metrics_net_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_quic_t ), fd_topo_tile_name_cnt( topo, "quic" )*sizeof( fd_event_metrics_quic_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_verify_t ), fd_topo_tile_name_cnt( topo, "verify" )*sizeof( fd_event_metrics_verify_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_dedup_t ), fd_topo_tile_name_cnt( topo, "dedup" )*sizeof( fd_event_metrics_dedup_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_resolv_t ), fd_topo_tile_name_cnt( topo, "resolv" )*sizeof( fd_event_metrics_resolv_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_pack_t ), fd_topo_tile_name_cnt( topo, "pack" )*sizeof( fd_event_metrics_pack_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_bank_t ), fd_topo_tile_name_cnt( topo, "bank" )*sizeof( fd_event_metrics_bank_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_shred_t ), fd_topo_tile_name_cnt( topo, "shred" )*sizeof( fd_event_metrics_shred_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_store_t ), fd_topo_tile_name_cnt( topo, "store" )*sizeof( fd_event_metrics_store_t ) ); + return l; +} + +void +fd_event_metrics_layout( fd_topo_t const * topo, + uchar * buffer ) { + ulong off = 0UL; + + fd_event_metrics_t * metrics = (fd_event_metrics_t *)(buffer+off); + off += sizeof( fd_event_metrics_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_tile_t ) ); + metrics->tile_off = off; + metrics->tile_len = fd_topo_tile_name_cnt( topo, "tile" ); + off += fd_topo_tile_name_cnt( topo, "tile" )*sizeof( fd_event_metrics_tile_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_link_t ) ); + metrics->link_off = off; + metrics->link_len = fd_topo_polled_in_cnt( topo ); + off += fd_topo_polled_in_cnt( topo )*sizeof( fd_event_metrics_link_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_net_t ) ); + metrics->net_off = off; + metrics->net_len = fd_topo_tile_name_cnt( topo, "net" ); + off += fd_topo_tile_name_cnt( topo, "net" )*sizeof( fd_event_metrics_net_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_quic_t ) ); + metrics->quic_off = off; + metrics->quic_len = fd_topo_tile_name_cnt( topo, "quic" ); + off += fd_topo_tile_name_cnt( topo, "quic" )*sizeof( fd_event_metrics_quic_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_verify_t ) ); + metrics->verify_off = off; + metrics->verify_len = fd_topo_tile_name_cnt( topo, "verify" ); + off += fd_topo_tile_name_cnt( topo, "verify" )*sizeof( fd_event_metrics_verify_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_dedup_t ) ); + metrics->dedup_off = off; + metrics->dedup_len = fd_topo_tile_name_cnt( topo, "dedup" ); + off += fd_topo_tile_name_cnt( topo, "dedup" )*sizeof( fd_event_metrics_dedup_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_resolv_t ) ); + metrics->resolv_off = off; + metrics->resolv_len = fd_topo_tile_name_cnt( topo, "resolv" ); + off += fd_topo_tile_name_cnt( topo, "resolv" )*sizeof( fd_event_metrics_resolv_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_pack_t ) ); + metrics->pack_off = off; + metrics->pack_len = fd_topo_tile_name_cnt( topo, "pack" ); + off += fd_topo_tile_name_cnt( topo, "pack" )*sizeof( fd_event_metrics_pack_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_bank_t ) ); + metrics->bank_off = off; + metrics->bank_len = fd_topo_tile_name_cnt( topo, "bank" ); + off += fd_topo_tile_name_cnt( topo, "bank" )*sizeof( fd_event_metrics_bank_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_shred_t ) ); + metrics->shred_off = off; + metrics->shred_len = fd_topo_tile_name_cnt( topo, "shred" ); + off += fd_topo_tile_name_cnt( topo, "shred" )*sizeof( fd_event_metrics_shred_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_store_t ) ); + metrics->store_off = off; + metrics->store_len = fd_topo_tile_name_cnt( topo, "store" ); + off += fd_topo_tile_name_cnt( topo, "store" )*sizeof( fd_event_metrics_store_t ); + + ulong link_idx = 0UL; + for( ulong i=0UL; itile_cnt; i++ ) { + fd_event_metrics_tile_t * tile = (fd_event_metrics_tile_t *)(buffer+((fd_event_metrics_t*)buffer)->tile_off)+i; + strncpy( tile->kind, topo->tiles[ i ].name, sizeof( tile->kind ) ); + tile->kind_id = (ushort)topo->tiles[ i ].kind_id; + + for( ulong j=0UL; jtiles[ i ].in_cnt; j++ ) { + if( FD_UNLIKELY( !topo->tiles[ i ].in_link_poll[ j ] ) ) continue; + fd_event_metrics_link_t * link = (fd_event_metrics_link_t *)(buffer+((fd_event_metrics_t*)buffer)->link_off)+link_idx; + strncpy( link->kind, topo->tiles[ i ].name, sizeof( link->kind ) ); + link->kind_id = (ushort)topo->tiles[ i ].kind_id; + strncpy( link->link_kind, topo->links[ topo->tiles[ i ].in_link_id[ j ] ].name, sizeof( link->link_kind ) ); + link->link_kind_id = (ushort)topo->links[ topo->tiles[ i ].in_link_id[ j ] ].kind_id; + link_idx++; + } + } +} + +#endif /* HEADER_fd_src_disco_events_generated_fd_event_metrics_h */ diff --git a/src/disco/metrics/generate/types.py b/src/disco/metrics/generate/metric_types.py similarity index 100% rename from src/disco/metrics/generate/types.py rename to src/disco/metrics/generate/metric_types.py diff --git a/src/disco/metrics/generate/write_codegen.py b/src/disco/metrics/generate/write_codegen.py index 115f2662d2..7ce71a8e2a 100644 --- a/src/disco/metrics/generate/write_codegen.py +++ b/src/disco/metrics/generate/write_codegen.py @@ -1,4 +1,4 @@ -from .types import * +from .metric_types import * from pathlib import Path from typing import TextIO import os @@ -234,3 +234,199 @@ def write_codegen(metrics: Metrics): print(f'Generated {metrics.count()} metrics for {len(metrics.tiles)} tiles') + +def write_event_snap_codegen(metrics: Metrics): + os.makedirs(Path(__file__).parent / '../generated', exist_ok=True) + + with open(Path(__file__).parent / '../generated' / 'fd_metric_event_snap.h', 'w') as f: + f.write('/* THIS FILE IS GENERATED BY gen_metrics.py. DO NOT HAND EDIT. */\n\n') + f.write('#ifndef HEADER_fd_src_disco_events_generated_fd_metric_event_snap_h\n') + f.write('#define HEADER_fd_src_disco_events_generated_fd_metric_event_snap_h\n\n') + f.write('#include "../../topo/fd_topo.h"\n') + f.write('#include "fd_event.h"\n\n') + f.write('void\n') + f.write('fd_metric_event_snap( fd_topo_t * topo,\n') + f.write(' fd_event_metrics_sample_t * metrics );\n\n') + f.write('#endif /* HEADER_fd_src_disco_events_generated_fd_metric_event_snap_h */\n') + + with open(Path(__file__).parent / '../generated' / 'fd_metric_event_snap.c', 'w') as f: + f.write('/* THIS FILE IS GENERATED BY gen_metrics.py. DO NOT HAND EDIT. */\n') + f.write('#include "fd_metric_event_snap.h"\n\n') + f.write('#include "../fd_metrics.h"\n') + + f.write('static inline ulong\n') + f.write('find_producer_out_idx( fd_topo_t * topo,\n') + f.write(' fd_topo_tile_t * producer,\n') + f.write(' fd_topo_tile_t * consumer,\n') + f.write(' ulong consumer_in_idx ) {\n') + f.write(' ulong reliable_cons_cnt = 0UL;\n') + f.write(' for( ulong i=0UL; itile_cnt; i++ ) {\n') + f.write(' fd_topo_tile_t * consumer_tile = &topo->tiles[ i ];\n') + f.write(' for( ulong j=0UL; jin_cnt; j++ ) {\n') + f.write(' for( ulong k=0UL; kout_cnt; k++ ) {\n') + f.write(' if( FD_UNLIKELY( consumer_tile->in_link_id[ j ]==producer->out_link_id[ k ] && consumer_tile->in_link_reliable[ j ] ) ) {\n') + f.write(' if( FD_UNLIKELY( consumer==consumer_tile && consumer_in_idx==j ) ) return reliable_cons_cnt;\n') + f.write(' reliable_cons_cnt++;\n') + f.write(' }\n') + f.write(' }\n') + f.write(' }\n') + f.write(' }\n') + f.write('\n') + f.write(' return ULONG_MAX;\n') + f.write('}\n') + + f.write(f'void\n') + f.write(f'fd_metric_event_snap( fd_topo_t * topo,\n') + f.write(f' fd_event_metrics_sample_t * event ) {{\n') + f.write(f' uchar * buffer = (uchar *)event;\n\n') + + f.write(f' for( ulong i=0UL; itile_len; i++ ) {{\n') + f.write(f' fd_event_metrics_sample_tile_t * event_tile = ((fd_event_metrics_sample_tile_t *)(buffer+event->tile_off))+i;\n') + f.write(f' fd_topo_tile_t const * tile = &topo->tiles[ i ];\n') + f.write(f' volatile ulong const * metrics = fd_metrics_tile( tile->metrics );\n') + + for metric in metrics.common: + if metric.clickhouse_exclude or isinstance(metric, HistogramMetric): + continue + + metric_name = re.sub(r'(?{metric_name}.{value_name} = metrics[ MIDX( COUNTER, TILE, {metric_name.upper()}_{value_name.upper()} ) ];\n') + elif metric.type == MetricType.GAUGE: + f.write(f' event_tile->{metric_name}.{value_name} = metrics[ MIDX( GAUGE, TILE, {metric_name.upper()}_{value_name.upper()} ) ];\n') + elif metric.type == MetricType.HISTOGRAM: + pass + else: + raise ValueError(f'Unknown metric type {metric.type}') + else: + if metric.type == MetricType.COUNTER: + f.write(f' event_tile->{metric_name} = metrics[ MIDX( COUNTER, TILE, {metric_name.upper()} ) ];\n') + elif metric.type == MetricType.GAUGE: + f.write(f' event_tile->{metric_name} = metrics[ MIDX( GAUGE, TILE, {metric_name.upper()} ) ];\n') + elif metric.type == MetricType.HISTOGRAM: + pass + else: + raise ValueError(f'Unknown metric type {metric.type}') + + f.write(f' }}\n\n') + + f.write(f' ulong link_idx = 0UL;\n') + f.write(f' for( ulong i=0UL; itile_cnt; i++ ) {{\n') + f.write(f' fd_topo_tile_t * tile = &topo->tiles[ i ];\n') + f.write(f' ulong in_idx = 0UL;\n') + f.write(f' for( ulong j=0UL; jin_cnt; j++ ) {{\n') + f.write(f' if( FD_UNLIKELY( !tile->in_link_poll[ j ] ) ) continue;\n\n') + f.write(f' fd_event_metrics_sample_link_t * link = ((fd_event_metrics_sample_link_t *)(buffer+event->link_off))+link_idx;\n') + f.write(f' volatile ulong const * metrics = fd_metrics_link_in( tile->metrics, in_idx );\n\n') + + for metric in metrics.link_in: + if metric.clickhouse_exclude or isinstance(metric, HistogramMetric): + continue + + metric_name = re.sub(r'(?{metric_name}.{value_name} = metrics[ MIDX( COUNTER, LINK, {metric_name.upper()}_{value_name.upper()} ) ];\n') + elif metric.type == MetricType.GAUGE: + f.write(f' link->{metric_name}.{value_name} = metrics[ MIDX( GAUGE, LINK, {metric_name.upper()}_{value_name.upper()} ) ];\n') + elif metric.type == MetricType.HISTOGRAM: + pass + else: + raise ValueError(f'Unknown metric type {metric.type}') + else: + if metric.type == MetricType.COUNTER: + f.write(f' link->{metric_name} = metrics[ MIDX( COUNTER, LINK, {metric_name.upper()} ) ];\n') + elif metric.type == MetricType.GAUGE: + f.write(f' link->{metric_name} = metrics[ MIDX( GAUGE, LINK, {metric_name.upper()} ) ];\n') + elif metric.type == MetricType.HISTOGRAM: + pass + else: + raise ValueError(f'Unknown metric type {metric.type}') + + f.write(f'\n fd_topo_tile_t * producer = &topo->tiles[ fd_topo_find_link_producer( topo, &topo->links[ tile->in_link_id[ j ] ] ) ];\n') + f.write(f' ulong producer_out_idx = find_producer_out_idx( topo, producer, tile, j );\n') + f.write(f' metrics = fd_metrics_link_out( producer->metrics, producer_out_idx );\n') + + for metric in metrics.link_out: + if metric.clickhouse_exclude or isinstance(metric, HistogramMetric): + continue + + metric_name = re.sub(r'(?{metric_name}.{value_name} = metrics[ MIDX( COUNTER, LINK, {metric_name.upper()}_{value_name.upper()} ) ];\n') + elif metric.type == MetricType.GAUGE: + f.write(f' link->{metric_name}.{value_name} = metrics[ MIDX( GAUGE, LINK, {metric_name.upper()}_{value_name.upper()} ) ];\n') + elif metric.type == MetricType.HISTOGRAM: + pass + else: + raise ValueError(f'Unknown metric type {metric.type}') + else: + if metric.type == MetricType.COUNTER: + f.write(f' link->{metric_name} = metrics[ MIDX( COUNTER, LINK, {metric_name.upper()} ) ];\n') + elif metric.type == MetricType.GAUGE: + f.write(f' link->{metric_name} = metrics[ MIDX( GAUGE, LINK, {metric_name.upper()} ) ];\n') + elif metric.type == MetricType.HISTOGRAM: + pass + else: + raise ValueError(f'Unknown metric type {metric.type}') + + f.write(f'\n in_idx++;\n') + f.write(f' link_idx++;\n') + + f.write(f' }}\n') + f.write(f' }}\n\n') + + for tile in Tile: + if tile not in metrics.tiles: + continue + + tile_metrics = [metric for metric in metrics.tiles[tile] if not metric.clickhouse_exclude and not isinstance(metric, HistogramMetric)] + if len(tile_metrics) == 0: + continue + + f.write(f' for( ulong i=0UL; i{tile.name.lower()}_len; i++ ) {{\n') + f.write(f' fd_event_metrics_sample_{tile.name.lower()}_t * {tile.name.lower()} = ((fd_event_metrics_sample_{tile.name.lower()}_t *)(buffer+event->{tile.name.lower()}_off))+i;\n\n') + f.write(f' fd_topo_tile_t const * tile = &topo->tiles[ fd_topo_find_tile( topo, "{tile.name.lower()}", i ) ];\n') + f.write(f' volatile ulong const * metrics = fd_metrics_tile( tile->metrics );\n') + f.write(f' (void){tile.name.lower()}; (void)metrics;\n\n') + + for metric in metrics.tiles[tile]: + if metric.clickhouse_exclude: + continue + + metric_name = re.sub(r'(?{metric_name}.{value_name} = metrics[ MIDX( COUNTER, {tile.name.upper()}, {metric_name.upper()}_{value_name.upper()} ) ];\n') + elif metric.type == MetricType.GAUGE: + f.write(f' {tile.name.lower()}->{metric_name}.{value_name} = metrics[ MIDX( GAUGE, {tile.name.upper()}, {metric_name.upper()}_{value_name.upper()} ) ];\n') + elif metric.type == MetricType.HISTOGRAM: + pass + else: + raise ValueError(f'Unknown metric type {metric.type}') + else: + if metric.type == MetricType.COUNTER: + f.write(f' {tile.name.lower()}->{metric_name} = metrics[ MIDX( COUNTER, {tile.name.upper()}, {metric_name.upper()} ) ];\n') + elif metric.type == MetricType.GAUGE: + f.write(f' {tile.name.lower()}->{metric_name} = metrics[ MIDX( GAUGE, {tile.name.upper()}, {metric_name.upper()} ) ];\n') + elif metric.type == MetricType.HISTOGRAM: + pass + else: + raise ValueError(f'Unknown metric type {metric.type}') + + f.write(f' }}\n\n') + f.write(f'}}\n\n') diff --git a/src/disco/metrics/generate/write_docs.py b/src/disco/metrics/generate/write_docs.py index 3c707303fc..16c5c016d5 100644 --- a/src/disco/metrics/generate/write_docs.py +++ b/src/disco/metrics/generate/write_docs.py @@ -1,4 +1,4 @@ -from .types import * +from .metric_types import * from typing import TextIO import re diff --git a/src/disco/metrics/generate/write_events_codegen.py b/src/disco/metrics/generate/write_events_codegen.py new file mode 100644 index 0000000000..43dd3c2690 --- /dev/null +++ b/src/disco/metrics/generate/write_events_codegen.py @@ -0,0 +1,540 @@ +import os +from pathlib import Path +import textwrap +from typing import Dict, List, TextIO +from .event_types import ClickHouseType, Field, Event + + +def generate_header(events: Dict[str, Event]): + os.makedirs(Path(__file__).parent.parent / 'generated', exist_ok=True) + + with open(Path(__file__).parent.parent / 'generated' / 'fd_event.h', 'w') as f: + f.write('#ifndef HEADER_fd_src_disco_events_generated_fd_event_h\n') + f.write('#define HEADER_fd_src_disco_events_generated_fd_event_h\n\n') + + f.write('#include "../../fd_disco_base.h"\n\n') + + max_name_length = max([len(f'FD_EVENT_{event.name}') for event in events.values()]) + for event in events.values(): + if event.name == 'common': + continue + + name = f'FD_EVENT_{event.name.upper()}'.ljust(max_name_length) + f.write(f'#define {name} ({event.id}UL)\n') + f.write('\n') + + f.write('static inline char const *\n') + f.write('fd_event_type_str( ulong event_type ) {\n') + f.write(' switch( event_type ) {\n') + + for event in events.values(): + if event.name == 'common': + continue + + name = f'FD_EVENT_{event.name.upper()}'.ljust(max_name_length) + f.write(f' case {name}: return "{event.name}";\n') + f.write(' default: return "unknown";\n') + f.write(' }\n') + f.write('}\n\n') + + for event in events.values(): + for field in event.fields.values(): + if field.deprecated or field.server_only: + continue + + if field.type == ClickHouseType.ENUM_8: + max_variant_length = max([len(f'FD_EVENT_{event.name}_{field.name}_{variant}') for variant in field.variants]) + for (variant, value) in field.variants.items(): + name = f'FD_EVENT_{event.name.upper()}_{field.name.upper()}_{variant.upper()}'.ljust(max_variant_length) + f.write(f'#define {name} ({value})\n') + f.write('\n') + + f.write('static inline char const *\n') + f.write(f'fd_event_{event.name}_{field.name}_str( uchar value ) {{\n') + f.write(f' switch( value ) {{\n') + for (variant, value) in field.variants.items(): + f.write(f' case FD_EVENT_{event.name.upper()}_{field.name.upper()}_{variant.upper()}: return "{variant}";\n') + f.write(' default: return "unknown";\n') + f.write(' }\n') + f.write('}\n\n') + + elif field.type == ClickHouseType.NESTED: + for sub_field in field.sub_fields.values(): + if sub_field.type == ClickHouseType.ENUM_8: + max_variant_length = max([len(f'FD_EVENT_{event.name}_{field.name}_{sub_field.name}_{variant}') for variant in sub_field.variants]) + for (variant, value) in sub_field.variants.items(): + name = f'FD_EVENT_{event.name.upper()}_{field.name.upper()}_{sub_field.name.upper()}_{variant.upper()}'.ljust(max_variant_length) + f.write(f'#define {name} ({value})\n') + f.write('\n') + + f.write('static inline char const *\n') + f.write(f'fd_event_{event.name}_{field.name}_{sub_field.name}_str( uchar value ) {{\n') + f.write(f' switch( value ) {{\n') + for (variant, value) in sub_field.variants.items(): + f.write(f' case FD_EVENT_{event.name.upper()}_{field.name.upper()}_{sub_field.name.upper()}_{variant.upper()}: return "{variant}";\n') + f.write(' default: return "unknown";\n') + f.write(' }\n') + f.write('}\n\n') + + for event in events.values(): + for field in event.fields.values(): + if field.type == ClickHouseType.NESTED: + f.write(f'struct fd_event_{event.name}_{field.name} {{\n') + for sub_field in field.sub_fields.values(): + if sub_field.type == ClickHouseType.DATETIME_64_9: + f.write(f' /* {textwrap.fill(sub_field.description, width=72, subsequent_indent=" ")} */\n') + f.write(f' long {sub_field.name};\n\n') + elif sub_field.type == ClickHouseType.ENUM_8: + description = sub_field.description + " Must be one of FD_EVENT_" + event.name.upper() + "_" + field.name.upper() + "_" + sub_field.name.upper() + "_*" + f.write(f' /* {textwrap.fill(description, width=72, subsequent_indent=" ")} */\n') + f.write(f' uchar {sub_field.name};\n\n') + elif sub_field.type == ClickHouseType.LOW_CARDINALITY_STRING or sub_field.type == ClickHouseType.STRING: + if sub_field.max_length is None: + description = f'{sub_field.description} Fields of this type are arbitrary length strings ' + \ + f'and are not guaranteed to be null-terminated. {sub_field.name}_off is an offset from ' + \ + f'the beginning of the event to the start of the string, and {sub_field.name}_len is the ' + \ + 'length of the string in bytes.' + f.write(f' /* {textwrap.fill(description, width=72, subsequent_indent=" ")} */\n') + f.write(f' ulong {sub_field.name}_off;\n') + f.write(f' ulong {sub_field.name}_len;\n\n') + continue + else: + f.write(f' /* {textwrap.fill(sub_field.description, width=72, subsequent_indent=" ")} */\n') + f.write(f' char {sub_field.name}[{sub_field.max_length + 1}];\n\n') + elif sub_field.type == ClickHouseType.UINT16: + f.write(f' /* {textwrap.fill(sub_field.description, width=72, subsequent_indent=" ")} */\n') + f.write(f' ushort {sub_field.name};\n\n') + elif sub_field.type == ClickHouseType.UINT32: + f.write(f' /* {textwrap.fill(sub_field.description, width=72, subsequent_indent=" ")} */\n') + f.write(f' uint {sub_field.name};\n\n') + elif sub_field.type == ClickHouseType.UINT64: + f.write(f' /* {textwrap.fill(sub_field.description, width=72, subsequent_indent=" ")} */\n') + f.write(f' ulong {sub_field.name};\n\n') + elif sub_field.type == ClickHouseType.TUPLE: + f.write(f' /* {textwrap.fill(sub_field.description, width=72, subsequent_indent=" ")} */\n') + f.write(f' struct {{\n') + for tuple_field in sub_field.sub_fields.values(): + if tuple_field.type == ClickHouseType.DATETIME_64_9: + f.write(f' long {tuple_field.name}; /* {tuple_field.description} */ \n') + elif tuple_field.type == ClickHouseType.ENUM_8: + description = tuple_field.description + " Must be one of FD_EVENT_" + event.name.upper() + "_" + field.name.upper() + "_" + sub_field.name.upper() + "_" + tuple_field.name.upper() + "_*" + f.write(f' uchar {tuple_field.name}; /* {tuple_field.description} */ \n') + elif tuple_field.type == ClickHouseType.LOW_CARDINALITY_STRING or tuple_field.type == ClickHouseType.STRING: + if tuple_field.max_length is None: + description = f'{tuple_field.description} Fields of this type are arbitrary length strings ' + \ + f'and are not guaranteed to be null-terminated. {tuple_field.name}_off is an offset from ' + \ + f'the beginning of the event to the start of the string, and {tuple_field.name}_len is the ' + \ + 'length of the string in bytes.' + f.write(f' ulong {tuple_field.name}_off;\n') + f.write(f' ulong {tuple_field.name}_len;\n') + continue + else: + f.write(f' char {tuple_field.name}[{tuple_field.max_length + 1}];\n') + elif tuple_field.type == ClickHouseType.UINT16: + f.write(f' ushort {tuple_field.name}; /* {tuple_field.description} */ \n') + elif tuple_field.type == ClickHouseType.UINT32: + f.write(f' uint {tuple_field.name}; /* {tuple_field.description} */ \n') + elif tuple_field.type == ClickHouseType.UINT64: + f.write(f' ulong {tuple_field.name}; /* {tuple_field.description} */ \n') + else: + raise ValueError(f"Unknown field type {tuple_field.type}") + f.write(f' }} {sub_field.name};\n\n') + else: + raise ValueError(f"Unknown field type {sub_field.type}") + f.write('};\n\n') + + f.write(f'typedef struct fd_event_{event.name}_{field.name} fd_event_{event.name}_{field.name}_t;\n\n') + + f.write(f'/* {textwrap.fill(event.description, width=72, subsequent_indent=" ")} */\n') + f.write(f'struct fd_event_{event.name} {{\n') + for field in event.fields.values(): + if field.deprecated or field.server_only: + continue + + if field.type == ClickHouseType.DATETIME_64_9: + f.write(f' /* {textwrap.fill(field.description, width=72, subsequent_indent=" ")} */\n') + f.write(f' long {field.name};\n\n') + elif field.type == ClickHouseType.ENUM_8: + description = field.description + " Must be one of FD_EVENT_" + event.name.upper() + "_" + field.name.upper() + "_*" + f.write(f' /* {textwrap.fill(description, width=72, subsequent_indent=" ")} */\n') + f.write(f' uchar {field.name};\n\n') + elif field.type == ClickHouseType.LOW_CARDINALITY_STRING or field.type == ClickHouseType.STRING: + if field.max_length is None: + description = f'{field.description} Fields of this type are arbitrary length strings ' + \ + f'and are not guaranteed to be null-terminated. {field.name}_off is an offset from ' + \ + f'the beginning of the event to the start of the string, and {field.name}_len is the ' + \ + 'length of the string in bytes.' + f.write(f' /* {textwrap.fill(description, width=72, subsequent_indent=" ")} */\n') + f.write(f' ulong {field.name}_off;\n') + f.write(f' ulong {field.name}_len;\n\n') + continue + else: + f.write(f' /* {textwrap.fill(field.description, width=72, subsequent_indent=" ")} */\n') + f.write(f' char {field.name}[{field.max_length + 1}];\n\n') + elif field.type == ClickHouseType.UINT16: + f.write(f' /* {textwrap.fill(field.description, width=72, subsequent_indent=" ")} */\n') + f.write(f' ushort {field.name};\n\n') + elif field.type == ClickHouseType.UINT32: + f.write(f' /* {textwrap.fill(field.description, width=72, subsequent_indent=" ")} */\n') + f.write(f' uint {field.name};\n\n') + elif field.type == ClickHouseType.UINT64: + f.write(f' /* {textwrap.fill(field.description, width=72, subsequent_indent=" ")} */\n') + f.write(f' ulong {field.name};\n\n') + elif field.type == ClickHouseType.NESTED: + f.write(f' /* {textwrap.fill(field.description, width=72, subsequent_indent=" ")} */\n') + f.write(f' ulong {field.name}_off;\n') + f.write(f' ulong {field.name}_len;\n\n') + else: + raise ValueError(f"Unknown field type {field.type}") + f.write('};\n\n') + + f.write(f'typedef struct fd_event_{event.name} fd_event_{event.name}_t;\n\n') + + f.write('struct fd_event {\n') + f.write(' union {\n') + for event in events.values(): + if event.name == 'common': + continue + + f.write(f' fd_event_{event.name}_t {event.name};\n') + f.write(' };\n') + f.write('};\n\n') + + f.write('typedef struct fd_event fd_event_t;\n\n') + + f.write('#define FD_EVENT_FORMAT_OVERFLOW (-1)\n') + f.write('#define FD_EVENT_FORMAT_INVALID (-2)\n\n') + + f.write('long\n') + f.write('fd_event_format( fd_event_common_t const * common,\n') + f.write(' ulong event_type,\n') + f.write(' fd_event_t const * event,\n') + f.write(' ulong event_len,\n') + f.write(' char * buffer,\n') + f.write(' ulong buffer_len );\n\n') + + f.write('#endif /* HEADER_fd_src_disco_events_generated_fd_event_h */\n') + + with open(Path(__file__).parent.parent / 'generated' / 'fd_event_metrics.h', 'w') as f: + f.write('#ifndef HEADER_fd_src_disco_events_generated_fd_event_metrics_h\n') + f.write('#define HEADER_fd_src_disco_events_generated_fd_event_metrics_h\n\n') + + f.write('#include "fd_event.h"\n') + f.write('#include "../../metrics/fd_metrics.h"\n\n') + f.write('#include "../../topo/fd_topo.h"\n\n') + + f.write('ulong\n') + f.write('fd_event_metrics_footprint( fd_topo_t const * topo ) {\n') + f.write(' ulong l = FD_LAYOUT_INIT;') + f.write(' l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_t ), sizeof( fd_event_metrics_sample_t ) );\n') + f.write(' l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_tile_t ), topo->tile_cnt*sizeof( fd_event_metrics_sample_tile_t ) );\n') + f.write(' l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_link_t ), fd_topo_polled_in_cnt( topo )*sizeof( fd_event_metrics_sample_link_t ) );\n') + for field in events['metrics_sample'].fields.values(): + if field.deprecated or field.server_only: + continue + + if field.type != ClickHouseType.NESTED or field.name == 'common' or field.name == 'link': + continue + + f.write(f' l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_{field.name}_t ), fd_topo_tile_name_cnt( topo, "{field.name}" )*sizeof( fd_event_metrics_sample_{field.name}_t ) );\n') + + f.write(' return l;\n') + f.write('}\n\n') + + f.write('void\n') + f.write('fd_event_metrics_layout( fd_topo_t const * topo,\n') + f.write(' uchar * buffer ) {\n') + f.write(' ulong off = 0UL;\n\n') + f.write(' fd_event_metrics_sample_t * metrics = (fd_event_metrics_sample_t *)(buffer+off);\n') + f.write(' off += sizeof( fd_event_metrics_sample_t );\n\n') + + for field in events['metrics_sample'].fields.values(): + if field.deprecated or field.server_only: + continue + + if field.type != ClickHouseType.NESTED: + continue + + f.write(f' off = fd_ulong_align_up( off, alignof( fd_event_metrics_sample_{field.name}_t ) );\n') + f.write(f' metrics->{field.name}_off = off;\n') + if field.name == 'common': + cnt = 'topo->tile_cnt' + elif field.name == 'link': + cnt = 'fd_topo_polled_in_cnt( topo )' + else: + cnt = f'fd_topo_tile_name_cnt( topo, "{field.name}" )' + f.write(f' metrics->{field.name}_len = {cnt};\n') + f.write(f' off += {cnt}*sizeof( fd_event_metrics_sample_{field.name}_t );\n\n') + + f.write(' ulong link_idx = 0UL;\n') + f.write(' for( ulong i=0UL; itile_cnt; i++ ) {\n') + f.write(' fd_event_metrics_sample_tile_t * tile = (fd_event_metrics_sample_tile_t *)(buffer+((fd_event_metrics_sample_t*)buffer)->tile_off)+i;\n') + f.write(' strncpy( tile->kind, topo->tiles[ i ].name, sizeof( tile->kind ) );\n') + f.write(' tile->kind_id = (ushort)topo->tiles[ i ].kind_id;\n\n') + f.write(' for( ulong j=0UL; jtiles[ i ].in_cnt; j++ ) {\n') + f.write(' if( FD_UNLIKELY( !topo->tiles[ i ].in_link_poll[ j ] ) ) continue;\n') + f.write(' fd_event_metrics_sample_link_t * link = (fd_event_metrics_sample_link_t *)(buffer+((fd_event_metrics_sample_t*)buffer)->link_off)+link_idx;\n') + f.write(' strncpy( link->kind, topo->tiles[ i ].name, sizeof( link->kind ) );\n') + f.write(' link->kind_id = (ushort)topo->tiles[ i ].kind_id;\n') + f.write(' strncpy( link->link_kind, topo->links[ topo->tiles[ i ].in_link_id[ j ] ].name, sizeof( link->link_kind ) );\n') + f.write(' link->link_kind_id = (ushort)topo->links[ topo->tiles[ i ].in_link_id[ j ] ].kind_id;\n') + f.write(' link_idx++;\n') + f.write(' }\n') + f.write(' }\n') + f.write('}\n\n') + + f.write('#endif /* HEADER_fd_src_disco_events_generated_fd_event_metrics_h */\n') + +def write_fields(f: TextIO, indent: int, name: str, prefix: str, fields: Dict[str, Field]): + f.write(''.ljust(indent) + 'success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed,\n') + f.write(''.ljust(indent) + ' "{"\n') + + for (i, field) in enumerate(fields.values()): + if field.deprecated or field.server_only: + continue + + if field.type == ClickHouseType.DATETIME_64_9: + f.write(''.ljust(indent) + f' "\\"{field.name}\\":%ld') + elif field.type == ClickHouseType.ENUM_8: + f.write(''.ljust(indent) + f' "\\"{field.name}\\":\\"%s\\"') + elif field.type == ClickHouseType.LOW_CARDINALITY_STRING or field.type == ClickHouseType.STRING: + if field.max_length is None: + f.write(''.ljust(indent) + f' "\\"{field.name}\\":\\"%.*s\\"') + else: + f.write(''.ljust(indent) + f' "\\"{field.name}\\":\\"%.{field.max_length}s\\"') + elif field.type == ClickHouseType.UINT16: + f.write(''.ljust(indent) + f' "\\"{field.name}\\":%hu') + elif field.type == ClickHouseType.UINT32: + f.write(''.ljust(indent) + f' "\\"{field.name}\\":%u') + elif field.type == ClickHouseType.UINT64: + f.write(''.ljust(indent) + f' "\\"{field.name}\\":%lu') + elif field.type == ClickHouseType.TUPLE: + f.write(''.ljust(indent) + f' "\\"{field.name}\\":{{"\n') + for (j, tuple_field) in enumerate(field.sub_fields.values()): + if tuple_field.type == ClickHouseType.DATETIME_64_9: + f.write(''.ljust(indent) + f' "\\"{tuple_field.name}\\":\\"%ld\\"') + elif tuple_field.type == ClickHouseType.ENUM_8: + f.write(''.ljust(indent) + f' "\\"{tuple_field.name}\\":\\"%s\\"') + elif tuple_field.type == ClickHouseType.LOW_CARDINALITY_STRING or tuple_field.type == ClickHouseType.STRING: + if tuple_field.max_length is None: + f.write(''.ljust(indent) + f' "\\"{tuple_field.name}\\":\\"%.*s\\"') + else: + f.write(''.ljust(indent) + f' "\\"{tuple_field.name}\\":\\"%.{tuple_field.max_length}s\\"') + elif tuple_field.type == ClickHouseType.UINT16: + f.write(''.ljust(indent) + f' "\\"{tuple_field.name}\\":%hu') + elif tuple_field.type == ClickHouseType.UINT32: + f.write(''.ljust(indent) + f' "\\"{tuple_field.name}\\":%u') + elif tuple_field.type == ClickHouseType.UINT64: + f.write(''.ljust(indent) + f' "\\"{tuple_field.name}\\":%lu') + else: + raise ValueError(f"Unknown field type {tuple_field.type}") + + if j < len(field.sub_fields) - 1: + f.write(',"\n') + else: + f.write('"\n') + f.write(''.ljust(indent) + f' "}}') + elif field.type == ClickHouseType.NESTED: + pass + + if i < len(fields) - 1: + f.write(',"\n') + else: + f.write('"\n') + + f.write(''.ljust(indent) + ' "}",\n') + active_fields: List[Field] = [field for field in fields.values() if not field.deprecated and not field.server_only] + for (i, field) in enumerate(active_fields): + if field.type == ClickHouseType.DATETIME_64_9: + f.write(''.ljust(indent) + f' {name}->{field.name}') + elif field.type == ClickHouseType.ENUM_8: + f.write(''.ljust(indent) + f' fd_event_{prefix}_{field.name}_str( {name}->{field.name} )') + elif field.type == ClickHouseType.LOW_CARDINALITY_STRING or field.type == ClickHouseType.STRING: + if field.max_length is None: + f.write(''.ljust(indent) + f' (int){name}->{field.name}_len, ((char*){name})+{name}->{field.name}_off') + else: + f.write(''.ljust(indent) + f' {name}->{field.name}') + elif field.type == ClickHouseType.UINT16: + f.write(''.ljust(indent) + f' {name}->{field.name}') + elif field.type == ClickHouseType.UINT32: + f.write(''.ljust(indent) + f' {name}->{field.name}') + elif field.type == ClickHouseType.UINT64: + f.write(''.ljust(indent) + f' {name}->{field.name}') + elif field.type == ClickHouseType.TUPLE: + sub_fields = [field for field in field.sub_fields.values() if not field.deprecated and not field.server_only] + for (j, tuple_field) in enumerate(sub_fields): + if tuple_field.type == ClickHouseType.DATETIME_64_9: + f.write(''.ljust(indent) + f' {name}->{field.name}.{tuple_field.name}') + elif tuple_field.type == ClickHouseType.ENUM_8: + f.write(''.ljust(indent) + f' fd_event_{prefix}_{field.name}_{tuple_field.name}_str( {name}->{field.name}.{tuple_field.name} )') + elif tuple_field.type == ClickHouseType.LOW_CARDINALITY_STRING or tuple_field.type == ClickHouseType.STRING: + if tuple_field.max_length is None: + f.write(''.ljust(indent) + f' (int){name}->{field.name}.{tuple_field.name}_len, ((char*){name})+{name}->{field.name}.{tuple_field.name}_off') + else: + f.write(''.ljust(indent) + f' {name}->{field.name}.{tuple_field.name}') + elif tuple_field.type == ClickHouseType.UINT16: + f.write(''.ljust(indent) + f' {name}->{field.name}.{tuple_field.name}') + elif tuple_field.type == ClickHouseType.UINT32: + f.write(''.ljust(indent) + f' {name}->{field.name}.{tuple_field.name}') + elif tuple_field.type == ClickHouseType.UINT64: + f.write(''.ljust(indent) + f' {name}->{field.name}.{tuple_field.name}') + else: + raise ValueError(f"Unknown field type {tuple_field.type}") + + if j < len(sub_fields) - 1: + f.write(',\n') + else: + raise ValueError(f"Unknown field type {field.type}") + + if i < len(active_fields) - 1: + f.write(',\n') + else: + f.write(' );\n') + + f.write('\n') + f.write(''.ljust(indent) + 'if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW;\n') + f.write(''.ljust(indent) + 'off += printed;\n') + +def write_event(f: TextIO, name: str, event: Event): + has_complex = [field for field in event.fields.values() if field.type == ClickHouseType.NESTED] + + if not has_complex: + write_fields(f, 2, name, event.name, event.fields) + else: + for (i, field) in enumerate(event.fields.values()): + if field.deprecated or field.server_only: + continue + + comma = ',' if i < len(event.fields) - 1 else '' + if field.type == ClickHouseType.DATETIME_64_9: + f.write(f' success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\\"{field.name}\\":%ld,", {name}->{field.name} );\n') + f.write(' if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW;\n') + f.write(' off += printed;\n\n') + elif field.type == ClickHouseType.ENUM_8: + f.write(f' success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\\"{field.name}\\":\\"%s\\",", fd_event_{event.name}_{field.name}_str( {name}->{field.name} ) );\n') + f.write(' if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW;\n') + f.write(' off += printed;\n\n') + elif field.type == ClickHouseType.LOW_CARDINALITY_STRING or field.type == ClickHouseType.STRING: + if field.max_length is None: + f.write(f' success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\\"{field.name}\\":%.*s{comma}", (int){name}->{field.name}_len, ((char*)event)+event->{field.name}_off );\n') + else: + f.write(f' success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\\"{field.name}\\":%.*s{comma}", {field.max_length}, {name}->{field.name} );\n') + f.write(' if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW;\n') + f.write(' off += printed;\n\n') + elif field.type == ClickHouseType.UINT16: + f.write(f' success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\\"{field.name}\\":%hu{comma}", {name}->{field.name} );\n') + f.write(' if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW;\n') + f.write(' off += printed;\n\n') + elif field.type == ClickHouseType.UINT32: + f.write(f' success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\\"{field.name}\\":%u{comma}", {name}->{field.name} );\n') + f.write(' if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW;\n') + f.write(' off += printed;\n\n') + elif field.type == ClickHouseType.UINT64: + f.write(f' success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\\"{field.name}\\":%lu{comma}", {name}->{field.name} );\n') + f.write(' if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW;\n') + f.write(' off += printed;\n\n') + elif field.type == ClickHouseType.NESTED: + f.write(f' success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\\"{field.name}\\":[" );\n') + f.write(f' if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW;\n') + f.write(f' off += printed;\n\n') + f.write(f' if( FD_UNLIKELY( event->{field.name}_off+event->{field.name}_len*sizeof(fd_event_{event.name}_{field.name}_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID;\n') + f.write(f' for( ulong i=0UL; i{field.name}_len; i++ ) {{\n') + f.write(f' fd_event_{event.name}_{field.name}_t const * {field.name} = ((fd_event_{event.name}_{field.name}_t const *)(((char*)event)+{name}->{field.name}_off))+i;\n\n') + + write_fields(f, 4, field.name, f'{event.name}_{field.name}', field.sub_fields) + + f.write(f'\n if( FD_LIKELY( i!=event->{field.name}_len-1UL ) ) {{\n' ); + f.write(' success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ",");\n') + f.write(' if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW;\n') + f.write(' off += printed;\n') + f.write(' }\n') + + f.write(f' }}\n\n') + f.write(f' success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "]{comma}");\n') + f.write(f' if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW;\n') + f.write(f' off += printed;\n\n') + +def generate_impl(events: Dict[str, Event]): + os.makedirs(Path(__file__).parent.parent / 'generated', exist_ok=True) + + with open(Path(__file__).parent.parent / 'generated' / 'fd_event.c', 'w') as f: + f.write('#include "fd_event.h"\n\n') + + f.write('#pragma GCC diagnostic ignored "-Woverlength-strings"\n\n') + + for event in events.values(): + f.write(f'static long\n') + if event.name != 'common': + f.write(f'format_{event.name}( fd_event_{event.name}_t const * event,\n') + f.write(f' ulong event_len,\n') + else: + f.write(f'format_common( fd_event_common_t const * event,\n') + f.write(f' char * buffer,\n') + f.write(f' ulong buffer_len ) {{\n') + + for field in event.fields.values(): + if field.deprecated or field.server_only: + continue + + if field.type == ClickHouseType.LOW_CARDINALITY_STRING or field.type == ClickHouseType.STRING: + if field.max_length is None: + f.write(f' if( FD_UNLIKELY( event->{field.name}_off+event->{field.name}_len>event_len ) ) return FD_EVENT_FORMAT_INVALID;\n') + + f.write('\n') + + f.write(' ulong off = 0UL;\n') + f.write(' ulong printed;\n') + f.write(' int success;\n\n') + + write_event(f, 'event', event) + + f.write('\n return (long)off;\n') + f.write('}\n\n') + + f.write('long\n') + f.write('fd_event_format( fd_event_common_t const * common,\n') + f.write(' ulong event_type,\n') + f.write(' fd_event_t const * event,\n') + f.write(' ulong event_len,\n') + f.write(' char * buffer,\n') + f.write(' ulong buffer_len ) {\n') + f.write(' ulong off = 0UL;\n') + f.write(' ulong printed;\n') + f.write(' int success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "{\\"kind\\":\\"%s\\",\\"common\\":", fd_event_type_str( event_type ) );\n') + f.write(' if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW;\n') + f.write(' off += printed;\n\n') + + f.write(' long printed2 = format_common( common, buffer+off, buffer_len-off );\n') + f.write(' if( FD_UNLIKELY( printed2<0 ) ) return printed2;\n') + f.write(' off += (ulong)printed2;\n\n') + + f.write(' success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ",\\"event\\":{" );\n') + f.write(' if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW;\n') + f.write(' off += printed;\n\n') + + f.write(' switch( event_type ) {\n') + + for event in events.values(): + if event.name == 'common': + continue + + f.write(f' case FD_EVENT_{event.name.upper()}:\n') + f.write(f' printed2 = format_{event.name}( &event->{event.name}, event_len, buffer+off, buffer_len-off );\n') + f.write(' break;\n') + + f.write(' default:\n') + f.write(' return FD_EVENT_FORMAT_INVALID;\n') + f.write(' }\n\n') + f.write(' if( FD_UNLIKELY( printed2<0 ) ) return printed2;\n') + f.write(' off += (ulong)printed2;\n\n') + f.write(' success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "}}" );\n') + f.write(' if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW;\n') + f.write(' off += printed;\n\n') + f.write(' return (long)off;\n') + f.write('}\n') + +def write_event_formatter(events: Dict[str, Event]): + generate_header(events) + generate_impl(events) + + print(f'Wrote {len(events)} events to src/disco/metrics/generated') diff --git a/src/disco/metrics/generate/write_metric_event_schema.py b/src/disco/metrics/generate/write_metric_event_schema.py new file mode 100644 index 0000000000..7196082da3 --- /dev/null +++ b/src/disco/metrics/generate/write_metric_event_schema.py @@ -0,0 +1,143 @@ +from typing import TextIO +from .metric_types import * +import re + +def name(name: str): + return re.sub(r'(?timestamp, + event->identity, + fd_event_common_cluster_str( event->cluster ), + event->version, + fd_event_common_client_str( event->client ), + fd_event_common_os_str( event->os ), + event->instance_id, + event->machine_id, + event->boot_id ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + return (long)off; +} + +static long +format_metrics_sample( fd_event_metrics_sample_t const * event, + ulong event_len, + char * buffer, + ulong buffer_len ) { + + ulong off = 0UL; + ulong printed; + int success; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"reason\":\"%s\",", fd_event_metrics_sample_reason_str( event->reason ) ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"slot\":%lu,", event->slot ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"tile\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->tile_off+event->tile_len*sizeof(fd_event_metrics_sample_tile_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; itile_len; i++ ) { + fd_event_metrics_sample_tile_t const * tile = ((fd_event_metrics_sample_tile_t const *)(((char*)event)+event->tile_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"kind\":\"%.20s\"," + "\"kind_id\":%hu," + "\"context_switch_involuntary_count\":%lu," + "\"context_switch_voluntary_count\":%lu," + "\"status\":%lu," + "\"heartbeat\":%lu," + "\"in_backpressure\":%lu," + "\"backpressure_count\":%lu," + "\"regime_duration_nanos\":{" + "\"caught_up_housekeeping\":%lu," + "\"processing_housekeeping\":%lu," + "\"backpressure_housekeeping\":%lu," + "\"caught_up_prefrag\":%lu," + "\"processing_prefrag\":%lu," + "\"backpressure_prefrag\":%lu," + "\"caught_up_postfrag\":%lu," + "\"processing_postfrag\":%lu" + "}" + "}", + tile->kind, + tile->kind_id, + tile->context_switch_involuntary_count, + tile->context_switch_voluntary_count, + tile->status, + tile->heartbeat, + tile->in_backpressure, + tile->backpressure_count, + tile->regime_duration_nanos.caught_up_housekeeping, + tile->regime_duration_nanos.processing_housekeeping, + tile->regime_duration_nanos.backpressure_housekeeping, + tile->regime_duration_nanos.caught_up_prefrag, + tile->regime_duration_nanos.processing_prefrag, + tile->regime_duration_nanos.backpressure_prefrag, + tile->regime_duration_nanos.caught_up_postfrag, + tile->regime_duration_nanos.processing_postfrag ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->tile_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"link\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->link_off+event->link_len*sizeof(fd_event_metrics_sample_link_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; ilink_len; i++ ) { + fd_event_metrics_sample_link_t const * link = ((fd_event_metrics_sample_link_t const *)(((char*)event)+event->link_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"kind\":\"%.20s\"," + "\"kind_id\":%hu," + "\"link_kind\":\"%.20s\"," + "\"link_kind_id\":%hu," + "\"consumed_count\":%lu," + "\"consumed_size_bytes\":%lu," + "\"filtered_count\":%lu," + "\"filtered_size_bytes\":%lu," + "\"overrun_polling_count\":%lu," + "\"overrun_polling_frag_count\":%lu," + "\"overrun_reading_count\":%lu," + "\"overrun_reading_frag_count\":%lu," + "\"slow_count\":%lu" + "}", + link->kind, + link->kind_id, + link->link_kind, + link->link_kind_id, + link->consumed_count, + link->consumed_size_bytes, + link->filtered_count, + link->filtered_size_bytes, + link->overrun_polling_count, + link->overrun_polling_frag_count, + link->overrun_reading_count, + link->overrun_reading_frag_count, + link->slow_count ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->link_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"net\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->net_off+event->net_len*sizeof(fd_event_metrics_sample_net_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; inet_len; i++ ) { + fd_event_metrics_sample_net_t const * net = ((fd_event_metrics_sample_net_t const *)(((char*)event)+event->net_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"received_packets\":%lu," + "\"received_bytes\":%lu," + "\"sent_packets\":%lu," + "\"sent_bytes\":%lu," + "\"xdp_rx_dropped_ring_full\":%lu," + "\"xdp_rx_dropped_other\":%lu," + "\"tx_dropped\":%lu" + "}", + net->received_packets, + net->received_bytes, + net->sent_packets, + net->sent_bytes, + net->xdp_rx_dropped_ring_full, + net->xdp_rx_dropped_other, + net->tx_dropped ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->net_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"quic\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->quic_off+event->quic_len*sizeof(fd_event_metrics_sample_quic_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; iquic_len; i++ ) { + fd_event_metrics_sample_quic_t const * quic = ((fd_event_metrics_sample_quic_t const *)(((char*)event)+event->quic_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"txns_overrun\":%lu," + "\"txn_reasms_started\":%lu," + "\"txn_reasms_active\":%lu," + "\"frags_ok\":%lu," + "\"frags_gap\":%lu," + "\"frags_dup\":%lu," + "\"txns_received\":{" + "\"udp\":%lu," + "\"quic_fast\":%lu," + "\"quic_frag\":%lu" + "}," + "\"txns_abandoned\":%lu," + "\"quic_packet_too_small\":%lu," + "\"quic_txn_too_small\":%lu," + "\"quic_txn_too_large\":%lu," + "\"non_quic_packet_too_small\":%lu," + "\"non_quic_packet_too_large\":%lu," + "\"received_packets\":%lu," + "\"received_bytes\":%lu," + "\"sent_packets\":%lu," + "\"sent_bytes\":%lu," + "\"connections_active\":%lu," + "\"connections_created\":%lu," + "\"connections_closed\":%lu," + "\"connections_aborted\":%lu," + "\"connections_timed_out\":%lu," + "\"connections_retried\":%lu," + "\"connection_error_no_slots\":%lu," + "\"connection_error_retry_fail\":%lu," + "\"pkt_crypto_failed\":%lu," + "\"pkt_no_conn\":%lu," + "\"pkt_tx_alloc_fail\":%lu," + "\"handshakes_created\":%lu," + "\"handshake_error_alloc_fail\":%lu," + "\"stream_received_events\":%lu," + "\"stream_received_bytes\":%lu," + "\"received_frames\":{" + "\"unknown\":%lu," + "\"ack\":%lu," + "\"reset_stream\":%lu," + "\"stop_sending\":%lu," + "\"crypto\":%lu," + "\"new_token\":%lu," + "\"stream\":%lu," + "\"max_data\":%lu," + "\"max_stream_data\":%lu," + "\"max_streams\":%lu," + "\"data_blocked\":%lu," + "\"stream_data_blocked\":%lu," + "\"streams_blocked\":%lu," + "\"new_conn_id\":%lu," + "\"retire_conn_id\":%lu," + "\"path_challenge\":%lu," + "\"path_response\":%lu," + "\"conn_close_quic\":%lu," + "\"conn_close_app\":%lu," + "\"handshake_done\":%lu," + "\"ping\":%lu," + "\"padding\":%lu" + "}," + "\"ack_tx\":{" + "\"noop\":%lu," + "\"new\":%lu," + "\"merged\":%lu," + "\"drop\":%lu," + "\"cancel\":%lu" + "}," + "\"frame_fail_parse\":%lu" + "}", + quic->txns_overrun, + quic->txn_reasms_started, + quic->txn_reasms_active, + quic->frags_ok, + quic->frags_gap, + quic->frags_dup, + quic->txns_received.udp, + quic->txns_received.quic_fast, + quic->txns_received.quic_frag, + quic->txns_abandoned, + quic->quic_packet_too_small, + quic->quic_txn_too_small, + quic->quic_txn_too_large, + quic->non_quic_packet_too_small, + quic->non_quic_packet_too_large, + quic->received_packets, + quic->received_bytes, + quic->sent_packets, + quic->sent_bytes, + quic->connections_active, + quic->connections_created, + quic->connections_closed, + quic->connections_aborted, + quic->connections_timed_out, + quic->connections_retried, + quic->connection_error_no_slots, + quic->connection_error_retry_fail, + quic->pkt_crypto_failed, + quic->pkt_no_conn, + quic->pkt_tx_alloc_fail, + quic->handshakes_created, + quic->handshake_error_alloc_fail, + quic->stream_received_events, + quic->stream_received_bytes, + quic->received_frames.unknown, + quic->received_frames.ack, + quic->received_frames.reset_stream, + quic->received_frames.stop_sending, + quic->received_frames.crypto, + quic->received_frames.new_token, + quic->received_frames.stream, + quic->received_frames.max_data, + quic->received_frames.max_stream_data, + quic->received_frames.max_streams, + quic->received_frames.data_blocked, + quic->received_frames.stream_data_blocked, + quic->received_frames.streams_blocked, + quic->received_frames.new_conn_id, + quic->received_frames.retire_conn_id, + quic->received_frames.path_challenge, + quic->received_frames.path_response, + quic->received_frames.conn_close_quic, + quic->received_frames.conn_close_app, + quic->received_frames.handshake_done, + quic->received_frames.ping, + quic->received_frames.padding, + quic->ack_tx.noop, + quic->ack_tx.new, + quic->ack_tx.merged, + quic->ack_tx.drop, + quic->ack_tx.cancel, + quic->frame_fail_parse ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->quic_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"verify\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->verify_off+event->verify_len*sizeof(fd_event_metrics_sample_verify_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; iverify_len; i++ ) { + fd_event_metrics_sample_verify_t const * verify = ((fd_event_metrics_sample_verify_t const *)(((char*)event)+event->verify_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"transaction_parse_failure\":%lu," + "\"transaction_dedup_failure\":%lu," + "\"transaction_verify_failure\":%lu" + "}", + verify->transaction_parse_failure, + verify->transaction_dedup_failure, + verify->transaction_verify_failure ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->verify_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"dedup\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->dedup_off+event->dedup_len*sizeof(fd_event_metrics_sample_dedup_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; idedup_len; i++ ) { + fd_event_metrics_sample_dedup_t const * dedup = ((fd_event_metrics_sample_dedup_t const *)(((char*)event)+event->dedup_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"transaction_dedup_failure\":%lu," + "\"gossiped_votes_received\":%lu" + "}", + dedup->transaction_dedup_failure, + dedup->gossiped_votes_received ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->dedup_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"resolv\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->resolv_off+event->resolv_len*sizeof(fd_event_metrics_sample_resolv_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; iresolv_len; i++ ) { + fd_event_metrics_sample_resolv_t const * resolv = ((fd_event_metrics_sample_resolv_t const *)(((char*)event)+event->resolv_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"no_bank_drop\":%lu," + "\"lut_resolved\":{" + "\"invalid_lookup_index\":%lu," + "\"account_uninitialized\":%lu," + "\"invalid_account_data\":%lu," + "\"invalid_account_owner\":%lu," + "\"account_not_found\":%lu," + "\"success\":%lu" + "}," + "\"blockhash_expired\":%lu," + "\"blockhash_unknown\":%lu" + "}", + resolv->no_bank_drop, + resolv->lut_resolved.invalid_lookup_index, + resolv->lut_resolved.account_uninitialized, + resolv->lut_resolved.invalid_account_data, + resolv->lut_resolved.invalid_account_owner, + resolv->lut_resolved.account_not_found, + resolv->lut_resolved.success, + resolv->blockhash_expired, + resolv->blockhash_unknown ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->resolv_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"pack\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->pack_off+event->pack_len*sizeof(fd_event_metrics_sample_pack_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; ipack_len; i++ ) { + fd_event_metrics_sample_pack_t const * pack = ((fd_event_metrics_sample_pack_t const *)(((char*)event)+event->pack_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"normal_transaction_received\":%lu," + "\"transaction_inserted\":{" + "\"bundle_blacklist\":%lu," + "\"write_sysvar\":%lu," + "\"estimation_fail\":%lu," + "\"duplicate_account\":%lu," + "\"too_many_accounts\":%lu," + "\"too_large\":%lu," + "\"expired\":%lu," + "\"addr_lut\":%lu," + "\"unaffordable\":%lu," + "\"duplicate\":%lu," + "\"priority\":%lu," + "\"nonvote_add\":%lu," + "\"vote_add\":%lu," + "\"nonvote_replace\":%lu," + "\"vote_replace\":%lu" + "}," + "\"metric_timing\":{" + "\"no_txn_no_bank_no_leader_no_microblock\":%lu," + "\"txn_no_bank_no_leader_no_microblock\":%lu," + "\"no_txn_bank_no_leader_no_microblock\":%lu," + "\"txn_bank_no_leader_no_microblock\":%lu," + "\"no_txn_no_bank_leader_no_microblock\":%lu," + "\"txn_no_bank_leader_no_microblock\":%lu," + "\"no_txn_bank_leader_no_microblock\":%lu," + "\"txn_bank_leader_no_microblock\":%lu," + "\"no_txn_no_bank_no_leader_microblock\":%lu," + "\"txn_no_bank_no_leader_microblock\":%lu," + "\"no_txn_bank_no_leader_microblock\":%lu," + "\"txn_bank_no_leader_microblock\":%lu," + "\"no_txn_no_bank_leader_microblock\":%lu," + "\"txn_no_bank_leader_microblock\":%lu," + "\"no_txn_bank_leader_microblock\":%lu," + "\"txn_bank_leader_microblock\":%lu" + "}," + "\"transaction_dropped_from_extra\":%lu," + "\"transaction_inserted_to_extra\":%lu," + "\"transaction_inserted_from_extra\":%lu," + "\"transaction_expired\":%lu," + "\"available_transactions\":%lu," + "\"available_vote_transactions\":%lu," + "\"pending_transactions_heap_size\":%lu," + "\"conflicting_transactions\":%lu," + "\"smallest_pending_transaction\":%lu," + "\"microblock_per_block_limit\":%lu," + "\"data_per_block_limit\":%lu," + "\"transaction_schedule\":{" + "\"taken\":%lu," + "\"cu_limit\":%lu," + "\"fast_path\":%lu," + "\"byte_limit\":%lu," + "\"write_cost\":%lu," + "\"slow_path\":%lu" + "}," + "\"cus_consumed_in_block\":%lu," + "\"delete_missed\":%lu," + "\"delete_hit\":%lu" + "}", + pack->normal_transaction_received, + pack->transaction_inserted.bundle_blacklist, + pack->transaction_inserted.write_sysvar, + pack->transaction_inserted.estimation_fail, + pack->transaction_inserted.duplicate_account, + pack->transaction_inserted.too_many_accounts, + pack->transaction_inserted.too_large, + pack->transaction_inserted.expired, + pack->transaction_inserted.addr_lut, + pack->transaction_inserted.unaffordable, + pack->transaction_inserted.duplicate, + pack->transaction_inserted.priority, + pack->transaction_inserted.nonvote_add, + pack->transaction_inserted.vote_add, + pack->transaction_inserted.nonvote_replace, + pack->transaction_inserted.vote_replace, + pack->metric_timing.no_txn_no_bank_no_leader_no_microblock, + pack->metric_timing.txn_no_bank_no_leader_no_microblock, + pack->metric_timing.no_txn_bank_no_leader_no_microblock, + pack->metric_timing.txn_bank_no_leader_no_microblock, + pack->metric_timing.no_txn_no_bank_leader_no_microblock, + pack->metric_timing.txn_no_bank_leader_no_microblock, + pack->metric_timing.no_txn_bank_leader_no_microblock, + pack->metric_timing.txn_bank_leader_no_microblock, + pack->metric_timing.no_txn_no_bank_no_leader_microblock, + pack->metric_timing.txn_no_bank_no_leader_microblock, + pack->metric_timing.no_txn_bank_no_leader_microblock, + pack->metric_timing.txn_bank_no_leader_microblock, + pack->metric_timing.no_txn_no_bank_leader_microblock, + pack->metric_timing.txn_no_bank_leader_microblock, + pack->metric_timing.no_txn_bank_leader_microblock, + pack->metric_timing.txn_bank_leader_microblock, + pack->transaction_dropped_from_extra, + pack->transaction_inserted_to_extra, + pack->transaction_inserted_from_extra, + pack->transaction_expired, + pack->available_transactions, + pack->available_vote_transactions, + pack->pending_transactions_heap_size, + pack->conflicting_transactions, + pack->smallest_pending_transaction, + pack->microblock_per_block_limit, + pack->data_per_block_limit, + pack->transaction_schedule.taken, + pack->transaction_schedule.cu_limit, + pack->transaction_schedule.fast_path, + pack->transaction_schedule.byte_limit, + pack->transaction_schedule.write_cost, + pack->transaction_schedule.slow_path, + pack->cus_consumed_in_block, + pack->delete_missed, + pack->delete_hit ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->pack_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"bank\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->bank_off+event->bank_len*sizeof(fd_event_metrics_sample_bank_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; ibank_len; i++ ) { + fd_event_metrics_sample_bank_t const * bank = ((fd_event_metrics_sample_bank_t const *)(((char*)event)+event->bank_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"transaction_sanitize_failure\":%lu," + "\"transaction_not_executed_failure\":%lu," + "\"precompile_verify_failure\":%lu," + "\"slot_acquire\":{" + "\"success\":%lu," + "\"too_high\":%lu," + "\"too_low\":%lu" + "}," + "\"transaction_load_address_tables\":{" + "\"success\":%lu," + "\"slot_hashes_sysvar_not_found\":%lu," + "\"account_not_found\":%lu," + "\"invalid_account_owner\":%lu," + "\"invalid_account_data\":%lu," + "\"invalid_index\":%lu" + "}," + "\"transaction_result\":{" + "\"success\":%lu," + "\"account_in_use\":%lu," + "\"account_loaded_twice\":%lu," + "\"account_not_found\":%lu," + "\"program_account_not_found\":%lu," + "\"insufficient_funds_for_fee\":%lu," + "\"invalid_account_for_fee\":%lu," + "\"already_processed\":%lu," + "\"blockhash_not_found\":%lu," + "\"instruction_error\":%lu," + "\"call_chain_too_deep\":%lu," + "\"missing_signature_for_fee\":%lu," + "\"invalid_account_index\":%lu," + "\"signature_failure\":%lu," + "\"invalid_program_for_execution\":%lu," + "\"sanitize_failure\":%lu," + "\"cluster_maintenance\":%lu," + "\"account_borrow_outstanding\":%lu," + "\"would_exceed_max_block_cost_limit\":%lu," + "\"unsupported_version\":%lu," + "\"invalid_writable_account\":%lu," + "\"would_exceed_max_account_cost_limit\":%lu," + "\"would_exceed_account_data_block_limit\":%lu," + "\"too_many_account_locks\":%lu," + "\"address_lookup_table_not_found\":%lu," + "\"invalid_address_lookup_table_owner\":%lu," + "\"invalid_address_lookup_table_data\":%lu," + "\"invalid_address_lookup_table_index\":%lu," + "\"invalid_rent_paying_account\":%lu," + "\"would_exceed_max_vote_cost_limit\":%lu," + "\"would_exceed_account_data_total_limit\":%lu," + "\"duplicate_instruction\":%lu," + "\"insufficient_funds_for_rent\":%lu," + "\"max_loaded_accounts_data_size_exceeded\":%lu," + "\"invalid_loaded_accounts_data_size_limit\":%lu," + "\"resanitization_needed\":%lu," + "\"program_execution_temporarily_restricted\":%lu," + "\"unbalanced_transaction\":%lu," + "\"program_cache_hit_max_limit\":%lu" + "}," + "\"processing_failed\":%lu," + "\"fee_only_transactions\":%lu," + "\"executed_failed_transactions\":%lu," + "\"successful_transactions\":%lu," + "\"cost_model_undercount\":%lu" + "}", + bank->transaction_sanitize_failure, + bank->transaction_not_executed_failure, + bank->precompile_verify_failure, + bank->slot_acquire.success, + bank->slot_acquire.too_high, + bank->slot_acquire.too_low, + bank->transaction_load_address_tables.success, + bank->transaction_load_address_tables.slot_hashes_sysvar_not_found, + bank->transaction_load_address_tables.account_not_found, + bank->transaction_load_address_tables.invalid_account_owner, + bank->transaction_load_address_tables.invalid_account_data, + bank->transaction_load_address_tables.invalid_index, + bank->transaction_result.success, + bank->transaction_result.account_in_use, + bank->transaction_result.account_loaded_twice, + bank->transaction_result.account_not_found, + bank->transaction_result.program_account_not_found, + bank->transaction_result.insufficient_funds_for_fee, + bank->transaction_result.invalid_account_for_fee, + bank->transaction_result.already_processed, + bank->transaction_result.blockhash_not_found, + bank->transaction_result.instruction_error, + bank->transaction_result.call_chain_too_deep, + bank->transaction_result.missing_signature_for_fee, + bank->transaction_result.invalid_account_index, + bank->transaction_result.signature_failure, + bank->transaction_result.invalid_program_for_execution, + bank->transaction_result.sanitize_failure, + bank->transaction_result.cluster_maintenance, + bank->transaction_result.account_borrow_outstanding, + bank->transaction_result.would_exceed_max_block_cost_limit, + bank->transaction_result.unsupported_version, + bank->transaction_result.invalid_writable_account, + bank->transaction_result.would_exceed_max_account_cost_limit, + bank->transaction_result.would_exceed_account_data_block_limit, + bank->transaction_result.too_many_account_locks, + bank->transaction_result.address_lookup_table_not_found, + bank->transaction_result.invalid_address_lookup_table_owner, + bank->transaction_result.invalid_address_lookup_table_data, + bank->transaction_result.invalid_address_lookup_table_index, + bank->transaction_result.invalid_rent_paying_account, + bank->transaction_result.would_exceed_max_vote_cost_limit, + bank->transaction_result.would_exceed_account_data_total_limit, + bank->transaction_result.duplicate_instruction, + bank->transaction_result.insufficient_funds_for_rent, + bank->transaction_result.max_loaded_accounts_data_size_exceeded, + bank->transaction_result.invalid_loaded_accounts_data_size_limit, + bank->transaction_result.resanitization_needed, + bank->transaction_result.program_execution_temporarily_restricted, + bank->transaction_result.unbalanced_transaction, + bank->transaction_result.program_cache_hit_max_limit, + bank->processing_failed, + bank->fee_only_transactions, + bank->executed_failed_transactions, + bank->successful_transactions, + bank->cost_model_undercount ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->bank_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"shred\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->shred_off+event->shred_len*sizeof(fd_event_metrics_sample_shred_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; ishred_len; i++ ) { + fd_event_metrics_sample_shred_t const * shred = ((fd_event_metrics_sample_shred_t const *)(((char*)event)+event->shred_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"microblocks_abandoned\":%lu," + "\"shred_processed\":{" + "\"bad_slot\":%lu," + "\"parse_failed\":%lu," + "\"rejected\":%lu," + "\"ignored\":%lu," + "\"okay\":%lu," + "\"completes\":%lu" + "}," + "\"fec_set_spilled\":%lu," + "\"shred_rejected_initial\":%lu," + "\"fec_rejected_fatal\":%lu" + "}", + shred->microblocks_abandoned, + shred->shred_processed.bad_slot, + shred->shred_processed.parse_failed, + shred->shred_processed.rejected, + shred->shred_processed.ignored, + shred->shred_processed.okay, + shred->shred_processed.completes, + shred->fec_set_spilled, + shred->shred_rejected_initial, + shred->fec_rejected_fatal ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->shred_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "],"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "\"store\":[" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_UNLIKELY( event->store_off+event->store_len*sizeof(fd_event_metrics_sample_store_t)>event_len ) ) return FD_EVENT_FORMAT_INVALID; + for( ulong i=0UL; istore_len; i++ ) { + fd_event_metrics_sample_store_t const * store = ((fd_event_metrics_sample_store_t const *)(((char*)event)+event->store_off))+i; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, + "{" + "\"transactions_inserted\":%lu" + "}", + store->transactions_inserted ); + + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + if( FD_LIKELY( i!=event->store_len-1UL ) ) { + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ","); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + } + } + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "]"); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + + return (long)off; +} + +long +fd_event_format( fd_event_common_t const * common, + ulong event_type, + fd_event_t const * event, + ulong event_len, + char * buffer, + ulong buffer_len ) { + ulong off = 0UL; + ulong printed; + int success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "{\"kind\":\"%s\",\"common\":", fd_event_type_str( event_type ) ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + long printed2 = format_common( common, buffer+off, buffer_len-off ); + if( FD_UNLIKELY( printed2<0 ) ) return printed2; + off += (ulong)printed2; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, ",\"event\":{" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + switch( event_type ) { + case FD_EVENT_METRICS_SAMPLE: + printed2 = format_metrics_sample( &event->metrics_sample, event_len, buffer+off, buffer_len-off ); + break; + default: + return FD_EVENT_FORMAT_INVALID; + } + + if( FD_UNLIKELY( printed2<0 ) ) return printed2; + off += (ulong)printed2; + + success = fd_cstr_printf_check( buffer+off, buffer_len-off, &printed, "}}" ); + if( FD_UNLIKELY( !success ) ) return FD_EVENT_FORMAT_OVERFLOW; + off += printed; + + return (long)off; +} diff --git a/src/disco/metrics/generated/fd_event.h b/src/disco/metrics/generated/fd_event.h new file mode 100644 index 0000000000..88f927eaad --- /dev/null +++ b/src/disco/metrics/generated/fd_event.h @@ -0,0 +1,768 @@ +#ifndef HEADER_fd_src_disco_events_generated_fd_event_h +#define HEADER_fd_src_disco_events_generated_fd_event_h + +#include "../../fd_disco_base.h" + +#define FD_EVENT_METRICS_SAMPLE (1UL) + +static inline char const * +fd_event_type_str( ulong event_type ) { + switch( event_type ) { + case FD_EVENT_METRICS_SAMPLE: return "metrics_sample"; + default: return "unknown"; + } +} + +#define FD_EVENT_COMMON_CLUSTER_MAINNET (1) +#define FD_EVENT_COMMON_CLUSTER_DEVNET (2) +#define FD_EVENT_COMMON_CLUSTER_TESTNET (3) +#define FD_EVENT_COMMON_CLUSTER_DEVELOPMENT (4) +#define FD_EVENT_COMMON_CLUSTER_PYTHNET (5) +#define FD_EVENT_COMMON_CLUSTER_PYTHTEST (6) + +static inline char const * +fd_event_common_cluster_str( uchar value ) { + switch( value ) { + case FD_EVENT_COMMON_CLUSTER_MAINNET: return "mainnet"; + case FD_EVENT_COMMON_CLUSTER_DEVNET: return "devnet"; + case FD_EVENT_COMMON_CLUSTER_TESTNET: return "testnet"; + case FD_EVENT_COMMON_CLUSTER_DEVELOPMENT: return "development"; + case FD_EVENT_COMMON_CLUSTER_PYTHNET: return "pythnet"; + case FD_EVENT_COMMON_CLUSTER_PYTHTEST: return "pythtest"; + default: return "unknown"; + } +} + +#define FD_EVENT_COMMON_CLIENT_AGAVE (1) +#define FD_EVENT_COMMON_CLIENT_FRANKENDANCER (2) +#define FD_EVENT_COMMON_CLIENT_FIREDANCER (3) + +static inline char const * +fd_event_common_client_str( uchar value ) { + switch( value ) { + case FD_EVENT_COMMON_CLIENT_AGAVE: return "agave"; + case FD_EVENT_COMMON_CLIENT_FRANKENDANCER: return "frankendancer"; + case FD_EVENT_COMMON_CLIENT_FIREDANCER: return "firedancer"; + default: return "unknown"; + } +} + +#define FD_EVENT_COMMON_OS_LINUX (1) + +static inline char const * +fd_event_common_os_str( uchar value ) { + switch( value ) { + case FD_EVENT_COMMON_OS_LINUX: return "linux"; + default: return "unknown"; + } +} + +#define FD_EVENT_METRICS_SAMPLE_REASON_PERIODIC (1) +#define FD_EVENT_METRICS_SAMPLE_REASON_LEADER_START (2) +#define FD_EVENT_METRICS_SAMPLE_REASON_LEADER_END_START (3) +#define FD_EVENT_METRICS_SAMPLE_REASON_LEADER_END (4) + +static inline char const * +fd_event_metrics_sample_reason_str( uchar value ) { + switch( value ) { + case FD_EVENT_METRICS_SAMPLE_REASON_PERIODIC: return "periodic"; + case FD_EVENT_METRICS_SAMPLE_REASON_LEADER_START: return "leader_start"; + case FD_EVENT_METRICS_SAMPLE_REASON_LEADER_END_START: return "leader_end_start"; + case FD_EVENT_METRICS_SAMPLE_REASON_LEADER_END: return "leader_end"; + default: return "unknown"; + } +} + +/* Fields that are common to and reported by all events. */ +struct fd_event_common { + /* The timestamp that the event was generated on the validator client, with + nanosecond precision. Timestamp is provided by the validator, and + might be skewed. */ + long timestamp; + + /* The base58 encoded validator identity public key. The validator must + prove posession of the identity and metrics reported by identity + are authenticated. */ + char identity[45]; + + /* The cluster that the validator is running on. One of "mainnet", + "devnet", "testnet", "development", or "unknown". Must be one of + FD_EVENT_COMMON_CLUSTER_* */ + uchar cluster; + + /* The version of the validator software that is running. */ + char version[12]; + + /* The client that the validator is running. Currently always + "frankendancer". Must be one of FD_EVENT_COMMON_CLIENT_* */ + uchar client; + + /* The operating system that the validator is running. Currently always + "linux". Must be one of FD_EVENT_COMMON_OS_* */ + uchar os; + + /* A unique identifier for this instance of the validator. Randomly + generated when the validator is booted. */ + ulong instance_id; + + /* A unique identifier for the host running this validator. Will remain the + same between validator and machine restarts. */ + ulong machine_id; + + /* A unique identifier for the boot identifier of the host running this + validator. Will remain the same between validator restarts, but + reset on machine restarts. */ + ulong boot_id; + +}; + +typedef struct fd_event_common fd_event_common_t; + +struct fd_event_metrics_sample_tile { + /* Tile type. */ + char kind[21]; + + /* ID of the tile within the type. */ + ushort kind_id; + + /* The number of involuntary context switches. */ + ulong context_switch_involuntary_count; + + /* The number of voluntary context switches. */ + ulong context_switch_voluntary_count; + + /* The current status of the tile. 0 is booting, 1 is running. */ + ulong status; + + /* The last UNIX timestamp in nanoseconds that the tile heartbeated. */ + ulong heartbeat; + + /* Whether the tile is currently backpressured or not, either 1 or 0. */ + ulong in_backpressure; + + /* Number of times the times the tile has had to wait for one of more + consumers to catch up to resume publishing. */ + ulong backpressure_count; + + /* Mutually exclusive and exhaustive duration of time the tile spent in + each of the regimes. */ + struct { + ulong caught_up_housekeeping; /* Caught up + Housekeeping */ + ulong processing_housekeeping; /* Processing + Housekeeping */ + ulong backpressure_housekeeping; /* Backpressure + Housekeeping */ + ulong caught_up_prefrag; /* Caught up + Prefrag */ + ulong processing_prefrag; /* Processing + Prefrag */ + ulong backpressure_prefrag; /* Backpressure + Prefrag */ + ulong caught_up_postfrag; /* Caught up + Postfrag */ + ulong processing_postfrag; /* Processing + Postfrag */ + } regime_duration_nanos; + +}; + +typedef struct fd_event_metrics_sample_tile fd_event_metrics_sample_tile_t; + +struct fd_event_metrics_sample_link { + /* Tile type. */ + char kind[21]; + + /* ID of the tile within the type. */ + ushort kind_id; + + /* Link type. */ + char link_kind[21]; + + /* ID of the link within the link kind. */ + ushort link_kind_id; + + /* The number of times the link reader has consumed a fragment. */ + ulong consumed_count; + + /* The total number of bytes read by the link consumer. */ + ulong consumed_size_bytes; + + /* The number of fragments that were filtered and not consumed. */ + ulong filtered_count; + + /* The total number of bytes read by the link consumer that were filtered. */ + ulong filtered_size_bytes; + + /* The number of times the link has been overrun while polling. */ + ulong overrun_polling_count; + + /* The number of fragments the link has not processed because it was + overrun while polling. */ + ulong overrun_polling_frag_count; + + /* The number of input overruns detected while reading metadata by the + consumer. */ + ulong overrun_reading_count; + + /* The number of fragments the link has not processed because it was + overrun while reading. */ + ulong overrun_reading_frag_count; + + /* The number of times the consumer was detected as rate limiting consumer + by the producer. */ + ulong slow_count; + +}; + +typedef struct fd_event_metrics_sample_link fd_event_metrics_sample_link_t; + +struct fd_event_metrics_sample_net { + /* Number of IP packets received. */ + ulong received_packets; + + /* Total bytes received (including IP, UDP headers). */ + ulong received_bytes; + + /* Number of IP packets sent. */ + ulong sent_packets; + + /* Total bytes sent (including IP, UDP headers). */ + ulong sent_bytes; + + /* Number of packets dropped because the RX completion queue was empty. + This is only reported for net tile 0, since the measurement is + across all RX queues. */ + ulong xdp_rx_dropped_ring_full; + + /* Number of packets dropped for other reasons. This is only reported for + net tile 0, since the measurement is across all RX queues. */ + ulong xdp_rx_dropped_other; + + /* Number of packets dropped because the TX submission queue was empty. + This is reported for all net tiles. */ + ulong tx_dropped; + +}; + +typedef struct fd_event_metrics_sample_net fd_event_metrics_sample_net_t; + +struct fd_event_metrics_sample_quic { + /* Count of txns overrun before reassembled (too small + txn_reassembly_count). */ + ulong txns_overrun; + + /* Count of fragmented txn receive ops started. */ + ulong txn_reasms_started; + + /* Number of fragmented txn receive ops currently active. */ + ulong txn_reasms_active; + + /* Count of txn frags received */ + ulong frags_ok; + + /* Count of txn frags dropped due to data gap */ + ulong frags_gap; + + /* Count of txn frags dropped due to dup (stream already completed) */ + ulong frags_dup; + + /* Count of txns received via TPU. */ + struct { + ulong udp; /* TPU/UDP */ + ulong quic_fast; /* TPU/QUIC unfragmented */ + ulong quic_frag; /* TPU/QUIC fragmented */ + } txns_received; + + /* Count of txns abandoned because a conn was lost. */ + ulong txns_abandoned; + + /* Count of packets received on the QUIC port that were too small to be a + valid IP packet. */ + ulong quic_packet_too_small; + + /* Count of txns received via QUIC dropped because they were too small. */ + ulong quic_txn_too_small; + + /* Count of txns received via QUIC dropped because they were too large. */ + ulong quic_txn_too_large; + + /* Count of packets received on the non-QUIC port that were too small to be + a valid IP packet. */ + ulong non_quic_packet_too_small; + + /* Count of packets received on the non-QUIC port that were too large to be + a valid transaction. */ + ulong non_quic_packet_too_large; + + /* Number of IP packets received. */ + ulong received_packets; + + /* Total bytes received (including IP, UDP, QUIC headers). */ + ulong received_bytes; + + /* Number of IP packets sent. */ + ulong sent_packets; + + /* Total bytes sent (including IP, UDP, QUIC headers). */ + ulong sent_bytes; + + /* The number of currently active QUIC connections. */ + ulong connections_active; + + /* The total number of connections that have been created. */ + ulong connections_created; + + /* Number of connections gracefully closed. */ + ulong connections_closed; + + /* Number of connections aborted. */ + ulong connections_aborted; + + /* Number of connections timed out. */ + ulong connections_timed_out; + + /* Number of connections established with retry. */ + ulong connections_retried; + + /* Number of connections that failed to create due to lack of slots. */ + ulong connection_error_no_slots; + + /* Number of connections that failed during retry (e.g. invalid token). */ + ulong connection_error_retry_fail; + + /* Number of packets that failed decryption. */ + ulong pkt_crypto_failed; + + /* Number of packets with an unknown connection ID. */ + ulong pkt_no_conn; + + /* Number of packets failed to send because of metadata alloc fail. */ + ulong pkt_tx_alloc_fail; + + /* Number of handshake flows created. */ + ulong handshakes_created; + + /* Number of handshakes dropped due to alloc fail. */ + ulong handshake_error_alloc_fail; + + /* Number of stream RX events. */ + ulong stream_received_events; + + /* Total stream payload bytes received. */ + ulong stream_received_bytes; + + /* Number of QUIC frames received. */ + struct { + ulong unknown; /* Unknown frame type */ + ulong ack; /* ACK frame */ + ulong reset_stream; /* RESET_STREAM frame */ + ulong stop_sending; /* STOP_SENDING frame */ + ulong crypto; /* CRYPTO frame */ + ulong new_token; /* NEW_TOKEN frame */ + ulong stream; /* STREAM frame */ + ulong max_data; /* MAX_DATA frame */ + ulong max_stream_data; /* MAX_STREAM_DATA frame */ + ulong max_streams; /* MAX_STREAMS frame */ + ulong data_blocked; /* DATA_BLOCKED frame */ + ulong stream_data_blocked; /* STREAM_DATA_BLOCKED frame */ + ulong streams_blocked; /* STREAMS_BLOCKED(bidi) frame */ + ulong new_conn_id; /* NEW_CONN_ID frame */ + ulong retire_conn_id; /* RETIRE_CONN_ID frame */ + ulong path_challenge; /* PATH_CHALLENGE frame */ + ulong path_response; /* PATH_RESPONSE frame */ + ulong conn_close_quic; /* CONN_CLOSE(transport) frame */ + ulong conn_close_app; /* CONN_CLOSE(app) frame */ + ulong handshake_done; /* HANDSHAKE_DONE frame */ + ulong ping; /* PING frame */ + ulong padding; /* PADDING frame */ + } received_frames; + + /* ACK events */ + struct { + ulong noop; /* non-ACK-eliciting packet */ + ulong new; /* new ACK range */ + ulong merged; /* merged into existing ACK range */ + ulong drop; /* out of buffers */ + ulong cancel; /* ACK suppressed by handler */ + } ack_tx; + + /* Number of QUIC frames failed to parse. */ + ulong frame_fail_parse; + +}; + +typedef struct fd_event_metrics_sample_quic fd_event_metrics_sample_quic_t; + +struct fd_event_metrics_sample_verify { + /* Count of transactions that failed to parse */ + ulong transaction_parse_failure; + + /* Count of transactions that failed to deduplicate in the verify stage */ + ulong transaction_dedup_failure; + + /* Count of transactions that failed to deduplicate in the verify stage */ + ulong transaction_verify_failure; + +}; + +typedef struct fd_event_metrics_sample_verify fd_event_metrics_sample_verify_t; + +struct fd_event_metrics_sample_dedup { + /* Count of transactions that failed to deduplicate in the dedup stage */ + ulong transaction_dedup_failure; + + /* Count of simple vote transactions received over gossip instead of via + the normal TPU path */ + ulong gossiped_votes_received; + +}; + +typedef struct fd_event_metrics_sample_dedup fd_event_metrics_sample_dedup_t; + +struct fd_event_metrics_sample_resolv { + /* Count of transactions dropped because the bank was not available */ + ulong no_bank_drop; + + /* Count of address lookup tables resolved */ + struct { + ulong invalid_lookup_index; /* The transaction referenced an index in a LUT that didn't exist */ + ulong account_uninitialized; /* The account referenced as a LUT hasn't been initialized */ + ulong invalid_account_data; /* The account referenced as a LUT couldn't be parsed */ + ulong invalid_account_owner; /* The account referenced as a LUT wasn't owned by the ALUT program ID */ + ulong account_not_found; /* The account referenced as a LUT couldn't be found */ + ulong success; /* Resolved successfully */ + } lut_resolved; + + /* Count of transactions that failed to resolve because the blockhash was + expired */ + ulong blockhash_expired; + + /* Count of transactions with an unknown blockhash. These may be very + recent, very old, nonces, or bogus. */ + ulong blockhash_unknown; + +}; + +typedef struct fd_event_metrics_sample_resolv fd_event_metrics_sample_resolv_t; + +struct fd_event_metrics_sample_pack { + /* Count of transactions received via the normal TPU path */ + ulong normal_transaction_received; + + /* Result of inserting a transaction into the pack object */ + struct { + ulong bundle_blacklist; /* Transaction uses an account on the bundle blacklist */ + ulong write_sysvar; /* Transaction tries to write to a sysvar */ + ulong estimation_fail; /* Estimating compute cost and/or fee failed */ + ulong duplicate_account; /* Transaction included an account address twice */ + ulong too_many_accounts; /* Transaction tried to load too many accounts */ + ulong too_large; /* Transaction requests too many CUs */ + ulong expired; /* Transaction already expired */ + ulong addr_lut; /* Transaction loaded accounts from a lookup table */ + ulong unaffordable; /* Fee payer's balance below transaction fee */ + ulong duplicate; /* Pack aware of transaction with same signature */ + ulong priority; /* Transaction's fee was too low given its compute unit requirement and other competing transactions */ + ulong nonvote_add; /* Transaction that was not a simple vote added to pending transactions */ + ulong vote_add; /* Simple vote transaction was added to pending transactions */ + ulong nonvote_replace; /* Transaction that was not a simple vote replaced a lower priority transaction */ + ulong vote_replace; /* Simple vote transaction replaced a lower priority transaction */ + } transaction_inserted; + + /* Time in nanos spent in each state */ + struct { + ulong no_txn_no_bank_no_leader_no_microblock; /* Pack had no transactions available, and wasn't leader */ + ulong txn_no_bank_no_leader_no_microblock; /* Pack had transactions available, but wasn't leader or had hit a limit */ + ulong no_txn_bank_no_leader_no_microblock; /* Pack had no transactions available, had banks but wasn't leader */ + ulong txn_bank_no_leader_no_microblock; /* Pack had transactions available, had banks but wasn't leader */ + ulong no_txn_no_bank_leader_no_microblock; /* Pack had no transactions available, and was leader but had no available banks */ + ulong txn_no_bank_leader_no_microblock; /* Pack had transactions available, was leader, but had no available banks */ + ulong no_txn_bank_leader_no_microblock; /* Pack had available banks but no transactions */ + ulong txn_bank_leader_no_microblock; /* Pack had banks and transactions available but couldn't schedule anything non-conflicting */ + ulong no_txn_no_bank_no_leader_microblock; /* Pack scheduled a non-empty microblock while not leader */ + ulong txn_no_bank_no_leader_microblock; /* Pack scheduled a non-empty microblock while not leader */ + ulong no_txn_bank_no_leader_microblock; /* Pack scheduled a non-empty microblock while not leader */ + ulong txn_bank_no_leader_microblock; /* Pack scheduled a non-empty microblock while not leader */ + ulong no_txn_no_bank_leader_microblock; /* Pack scheduled a non-empty microblock but all banks were busy */ + ulong txn_no_bank_leader_microblock; /* Pack scheduled a non-empty microblock but all banks were busy */ + ulong no_txn_bank_leader_microblock; /* Pack scheduled a non-empty microblock and now has no transactions */ + ulong txn_bank_leader_microblock; /* Pack scheduled a non-empty microblock */ + } metric_timing; + + /* Transactions dropped from the extra transaction storage because it was + full */ + ulong transaction_dropped_from_extra; + + /* Transactions inserted into the extra transaction storage because pack's + primary storage was full */ + ulong transaction_inserted_to_extra; + + /* Transactions pulled from the extra transaction storage and inserted into + pack's primary storage */ + ulong transaction_inserted_from_extra; + + /* Transactions deleted from pack because their TTL expired */ + ulong transaction_expired; + + /* The total number of pending transactions in pack's pool that are + available to be scheduled */ + ulong available_transactions; + + /* The number of pending simple vote transactions in pack's pool that are + available to be scheduled */ + ulong available_vote_transactions; + + /* The maximum number of pending transactions that pack can consider. This + value is fixed at Firedancer startup but is a useful reference for + AvailableTransactions and AvailableVoteTransactions. */ + ulong pending_transactions_heap_size; + + /* The number of available transactions that are temporarily not being + considered due to account lock conflicts with many higher paying + transactions */ + ulong conflicting_transactions; + + /* A lower bound on the smallest non-vote transaction (in cost units) that + is immediately available for scheduling */ + ulong smallest_pending_transaction; + + /* The number of times pack did not pack a microblock because the limit on + microblocks/block had been reached */ + ulong microblock_per_block_limit; + + /* The number of times pack did not pack a microblock because it reached + reached the data per block limit at the start of trying to schedule + a microblock */ + ulong data_per_block_limit; + + /* Result of trying to consider a transaction for scheduling */ + struct { + ulong taken; /* Pack included the transaction in the microblock */ + ulong cu_limit; /* Pack skipped the transaction because it would have exceeded the block CU limit */ + ulong fast_path; /* Pack skipped the transaction because of account conflicts using the fast bitvector check */ + ulong byte_limit; /* Pack skipped the transaction because it would have exceeded the block data size limit */ + ulong write_cost; /* Pack skipped the transaction because it would have caused a writable account to exceed the per-account block write cost limit */ + ulong slow_path; /* Pack skipped the transaction because of account conflicts using the full slow check */ + } transaction_schedule; + + /* The number of cost units consumed in the current block, or 0 if pack is + not currently packing a block */ + ulong cus_consumed_in_block; + + /* Count of attempts to delete a transaction that wasn't found */ + ulong delete_missed; + + /* Count of attempts to delete a transaction that was found and deleted */ + ulong delete_hit; + +}; + +typedef struct fd_event_metrics_sample_pack fd_event_metrics_sample_pack_t; + +struct fd_event_metrics_sample_bank { + /* Number of transactions that failed to sanitize. */ + ulong transaction_sanitize_failure; + + /* Number of transactions that did not execute. This is different than + transactions which fail to execute, which make it onto the chain. */ + ulong transaction_not_executed_failure; + + /* Number of transactions that failed precompile verification and thus will + not execute. */ + ulong precompile_verify_failure; + + /* Result of acquiring a slot. */ + struct { + ulong success; /* Success */ + ulong too_high; /* Too high */ + ulong too_low; /* Too low */ + } slot_acquire; + + /* Result of loading address lookup tables for a transaction. If there are + multiple errors for the transaction, only the first one is + reported. */ + struct { + ulong success; /* Success */ + ulong slot_hashes_sysvar_not_found; /* The slot hashes syvar could not be found. */ + ulong account_not_found; /* The account storing the address lookup table was deactivated or could not be found. */ + ulong invalid_account_owner; /* The account that owns the referenced lookup table is not the address lookup table program. */ + ulong invalid_account_data; /* The data for the referenced address lookup table is malformed. */ + ulong invalid_index; /* The referenced index in the address lookup table does not exist. */ + } transaction_load_address_tables; + + /* Result of loading and executing a transaction. */ + struct { + ulong success; /* Success */ + ulong account_in_use; /* An account is already being processed in another transaction in a way that does not support parallelism. */ + ulong account_loaded_twice; /* A `Pubkey` appears twice in the transaction's `account_keys`. Instructions can reference `Pubkey`s more than once but the message must contain a list with no duplicate keys. */ + ulong account_not_found; /* Attempt to debit an account but found no record of a prior credit. */ + ulong program_account_not_found; /* Attempt to load a program that does not exist. */ + ulong insufficient_funds_for_fee; /* The fee payer `Pubkey` does not have sufficient balance to pay the fee to schedule the transaction. */ + ulong invalid_account_for_fee; /* This account may not be used to pay transaction fees. */ + ulong already_processed; /* The bank has seen this transaction before. This can occur under normal operation when a UDP packet is duplicated, as a user error from a client not updating its `recent_blockhash`, or as a double-spend attack. */ + ulong blockhash_not_found; /* The bank has not seen the given `recent_blockhash` or the transaction is too old and the `recent_blockhash` has been discarded. */ + ulong instruction_error; /* An error occurred while processing an instruction. */ + ulong call_chain_too_deep; /* Loader call chain is too deep. */ + ulong missing_signature_for_fee; /* Transaction requires a fee but has no signature present. */ + ulong invalid_account_index; /* Transaction contains an invalid account reference. */ + ulong signature_failure; /* Transaction did not pass signature verification. */ + ulong invalid_program_for_execution; /* This program may not be used for executing instructions. */ + ulong sanitize_failure; /* Transaction failed to sanitize accounts offsets correctly implies that account locks are not taken for this TX, and should not be unlocked. */ + ulong cluster_maintenance; /* Transactions are currently disabled due to cluster maintenance. */ + ulong account_borrow_outstanding; /* Transaction processing left an account with an outstanding borrowed reference. */ + ulong would_exceed_max_block_cost_limit; /* Transaction would exceed max Block Cost Limit. */ + ulong unsupported_version; /* Transaction version is unsupported. */ + ulong invalid_writable_account; /* Transaction loads a writable account that cannot be written. */ + ulong would_exceed_max_account_cost_limit; /* Transaction would exceed max account limit within the block. */ + ulong would_exceed_account_data_block_limit; /* Transaction would exceed account data limit within the block. */ + ulong too_many_account_locks; /* Transaction locked too many accounts. */ + ulong address_lookup_table_not_found; /* Address lookup table not found. */ + ulong invalid_address_lookup_table_owner; /* Attempted to lookup addresses from an account owned by the wrong program. */ + ulong invalid_address_lookup_table_data; /* Attempted to lookup addresses from an invalid account. */ + ulong invalid_address_lookup_table_index; /* Address table lookup uses an invalid index. */ + ulong invalid_rent_paying_account; /* Transaction leaves an account with a lower balance than rent-exempt minimum. */ + ulong would_exceed_max_vote_cost_limit; /* Transaction would exceed max Vote Cost Limit. */ + ulong would_exceed_account_data_total_limit; /* Transaction would exceed total account data limit. */ + ulong duplicate_instruction; /* Transaction contains a duplicate instruction that is not allowed. */ + ulong insufficient_funds_for_rent; /* Transaction results in an account with insufficient funds for rent. */ + ulong max_loaded_accounts_data_size_exceeded; /* Transaction exceeded max loaded accounts data size cap. */ + ulong invalid_loaded_accounts_data_size_limit; /* LoadedAccountsDataSizeLimit set for transaction must be greater than 0. */ + ulong resanitization_needed; /* Sanitized transaction differed before/after feature activiation. Needs to be resanitized. */ + ulong program_execution_temporarily_restricted; /* Program execution is temporarily restricted on an account. */ + ulong unbalanced_transaction; /* The total balance before the transaction does not equal the total balance after the transaction. */ + ulong program_cache_hit_max_limit; /* The total program cache size hit the maximum allowed limit. */ + } transaction_result; + + /* Count of transactions for which the processing stage failed and won't + land on chain */ + ulong processing_failed; + + /* Count of transactions that will land on chain but without executing */ + ulong fee_only_transactions; + + /* Count of transactions that execute on chain but failed */ + ulong executed_failed_transactions; + + /* Count of transactions that execute on chain and succeed */ + ulong successful_transactions; + + /* Count of transactions that used more CUs than the cost model should have + permitted them to */ + ulong cost_model_undercount; + +}; + +typedef struct fd_event_metrics_sample_bank fd_event_metrics_sample_bank_t; + +struct fd_event_metrics_sample_shred { + /* The number of microblocks that were abandoned because we switched slots + without finishing the current slot */ + ulong microblocks_abandoned; + + /* The result of processing a thread from the network */ + struct { + ulong bad_slot; /* Shred was for a slot for which we don't know the leader */ + ulong parse_failed; /* Shred parsing failed */ + ulong rejected; /* Shred was invalid for one of many reasons */ + ulong ignored; /* Shred was ignored because we had already received or reconstructed it */ + ulong okay; /* Shred accepted to an incomplete FEC set */ + ulong completes; /* Shred accepted and resulted in a valid, complete FEC set */ + } shred_processed; + + /* The number of FEC sets that were spilled because they didn't complete in + time and we needed space */ + ulong fec_set_spilled; + + /* The number shreds that were rejected before any resources were allocated + for the FEC set */ + ulong shred_rejected_initial; + + /* The number of FEC sets that were rejected for reasons that cause the + whole FEC set to become invalid */ + ulong fec_rejected_fatal; + +}; + +typedef struct fd_event_metrics_sample_shred fd_event_metrics_sample_shred_t; + +struct fd_event_metrics_sample_store { + /* Count of transactions produced while we were leader in the shreds that + have been inserted so far */ + ulong transactions_inserted; + +}; + +typedef struct fd_event_metrics_sample_store fd_event_metrics_sample_store_t; + +/* Metric data periodically sampled by the application. */ +struct fd_event_metrics_sample { + /* Reason the metrics snapshot was sampled. Must be one of + FD_EVENT_METRICS_SAMPLE_REASON_* */ + uchar reason; + + /* If the reason the sample was taken is because a leader was starting or + ending, this is the slot that was starting (or ending). If a leader + slot is both ending and starting (leader_end_start), this is the + slot which is starting. */ + ulong slot; + + /* Common metrics shared by all tiles */ + ulong tile_off; + ulong tile_len; + + /* Metrics for links between tiles. */ + ulong link_off; + ulong link_len; + + /* Metrics for net tiles. */ + ulong net_off; + ulong net_len; + + /* Metrics for quic tiles. */ + ulong quic_off; + ulong quic_len; + + /* Metrics for verify tiles. */ + ulong verify_off; + ulong verify_len; + + /* Metrics for dedup tiles. */ + ulong dedup_off; + ulong dedup_len; + + /* Metrics for resolv tiles. */ + ulong resolv_off; + ulong resolv_len; + + /* Metrics for pack tiles. */ + ulong pack_off; + ulong pack_len; + + /* Metrics for bank tiles. */ + ulong bank_off; + ulong bank_len; + + /* Metrics for shred tiles. */ + ulong shred_off; + ulong shred_len; + + /* Metrics for store tiles. */ + ulong store_off; + ulong store_len; + +}; + +typedef struct fd_event_metrics_sample fd_event_metrics_sample_t; + +struct fd_event { + union { + fd_event_metrics_sample_t metrics_sample; + }; +}; + +typedef struct fd_event fd_event_t; + +#define FD_EVENT_FORMAT_OVERFLOW (-1) +#define FD_EVENT_FORMAT_INVALID (-2) + +long +fd_event_format( fd_event_common_t const * common, + ulong event_type, + fd_event_t const * event, + ulong event_len, + char * buffer, + ulong buffer_len ); + +#endif /* HEADER_fd_src_disco_events_generated_fd_event_h */ diff --git a/src/disco/metrics/generated/fd_event_metrics.h b/src/disco/metrics/generated/fd_event_metrics.h new file mode 100644 index 0000000000..d63d147b1f --- /dev/null +++ b/src/disco/metrics/generated/fd_event_metrics.h @@ -0,0 +1,108 @@ +#ifndef HEADER_fd_src_disco_events_generated_fd_event_metrics_h +#define HEADER_fd_src_disco_events_generated_fd_event_metrics_h + +#include "fd_event.h" +#include "../../metrics/fd_metrics.h" + +#include "../../topo/fd_topo.h" + +ulong +fd_event_metrics_footprint( fd_topo_t const * topo ) { + ulong l = FD_LAYOUT_INIT; l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_t ), sizeof( fd_event_metrics_sample_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_tile_t ), topo->tile_cnt*sizeof( fd_event_metrics_sample_tile_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_link_t ), fd_topo_polled_in_cnt( topo )*sizeof( fd_event_metrics_sample_link_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_tile_t ), fd_topo_tile_name_cnt( topo, "tile" )*sizeof( fd_event_metrics_sample_tile_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_net_t ), fd_topo_tile_name_cnt( topo, "net" )*sizeof( fd_event_metrics_sample_net_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_quic_t ), fd_topo_tile_name_cnt( topo, "quic" )*sizeof( fd_event_metrics_sample_quic_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_verify_t ), fd_topo_tile_name_cnt( topo, "verify" )*sizeof( fd_event_metrics_sample_verify_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_dedup_t ), fd_topo_tile_name_cnt( topo, "dedup" )*sizeof( fd_event_metrics_sample_dedup_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_resolv_t ), fd_topo_tile_name_cnt( topo, "resolv" )*sizeof( fd_event_metrics_sample_resolv_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_pack_t ), fd_topo_tile_name_cnt( topo, "pack" )*sizeof( fd_event_metrics_sample_pack_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_bank_t ), fd_topo_tile_name_cnt( topo, "bank" )*sizeof( fd_event_metrics_sample_bank_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_shred_t ), fd_topo_tile_name_cnt( topo, "shred" )*sizeof( fd_event_metrics_sample_shred_t ) ); + l = FD_LAYOUT_APPEND( l, alignof( fd_event_metrics_sample_store_t ), fd_topo_tile_name_cnt( topo, "store" )*sizeof( fd_event_metrics_sample_store_t ) ); + return l; +} + +void +fd_event_metrics_layout( fd_topo_t const * topo, + uchar * buffer ) { + ulong off = 0UL; + + fd_event_metrics_sample_t * metrics = (fd_event_metrics_sample_t *)(buffer+off); + off += sizeof( fd_event_metrics_sample_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_sample_tile_t ) ); + metrics->tile_off = off; + metrics->tile_len = fd_topo_tile_name_cnt( topo, "tile" ); + off += fd_topo_tile_name_cnt( topo, "tile" )*sizeof( fd_event_metrics_sample_tile_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_sample_link_t ) ); + metrics->link_off = off; + metrics->link_len = fd_topo_polled_in_cnt( topo ); + off += fd_topo_polled_in_cnt( topo )*sizeof( fd_event_metrics_sample_link_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_sample_net_t ) ); + metrics->net_off = off; + metrics->net_len = fd_topo_tile_name_cnt( topo, "net" ); + off += fd_topo_tile_name_cnt( topo, "net" )*sizeof( fd_event_metrics_sample_net_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_sample_quic_t ) ); + metrics->quic_off = off; + metrics->quic_len = fd_topo_tile_name_cnt( topo, "quic" ); + off += fd_topo_tile_name_cnt( topo, "quic" )*sizeof( fd_event_metrics_sample_quic_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_sample_verify_t ) ); + metrics->verify_off = off; + metrics->verify_len = fd_topo_tile_name_cnt( topo, "verify" ); + off += fd_topo_tile_name_cnt( topo, "verify" )*sizeof( fd_event_metrics_sample_verify_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_sample_dedup_t ) ); + metrics->dedup_off = off; + metrics->dedup_len = fd_topo_tile_name_cnt( topo, "dedup" ); + off += fd_topo_tile_name_cnt( topo, "dedup" )*sizeof( fd_event_metrics_sample_dedup_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_sample_resolv_t ) ); + metrics->resolv_off = off; + metrics->resolv_len = fd_topo_tile_name_cnt( topo, "resolv" ); + off += fd_topo_tile_name_cnt( topo, "resolv" )*sizeof( fd_event_metrics_sample_resolv_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_sample_pack_t ) ); + metrics->pack_off = off; + metrics->pack_len = fd_topo_tile_name_cnt( topo, "pack" ); + off += fd_topo_tile_name_cnt( topo, "pack" )*sizeof( fd_event_metrics_sample_pack_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_sample_bank_t ) ); + metrics->bank_off = off; + metrics->bank_len = fd_topo_tile_name_cnt( topo, "bank" ); + off += fd_topo_tile_name_cnt( topo, "bank" )*sizeof( fd_event_metrics_sample_bank_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_sample_shred_t ) ); + metrics->shred_off = off; + metrics->shred_len = fd_topo_tile_name_cnt( topo, "shred" ); + off += fd_topo_tile_name_cnt( topo, "shred" )*sizeof( fd_event_metrics_sample_shred_t ); + + off = fd_ulong_align_up( off, alignof( fd_event_metrics_sample_store_t ) ); + metrics->store_off = off; + metrics->store_len = fd_topo_tile_name_cnt( topo, "store" ); + off += fd_topo_tile_name_cnt( topo, "store" )*sizeof( fd_event_metrics_sample_store_t ); + + ulong link_idx = 0UL; + for( ulong i=0UL; itile_cnt; i++ ) { + fd_event_metrics_sample_tile_t * tile = (fd_event_metrics_sample_tile_t *)(buffer+((fd_event_metrics_sample_t*)buffer)->tile_off)+i; + strncpy( tile->kind, topo->tiles[ i ].name, sizeof( tile->kind ) ); + tile->kind_id = (ushort)topo->tiles[ i ].kind_id; + + for( ulong j=0UL; jtiles[ i ].in_cnt; j++ ) { + if( FD_UNLIKELY( !topo->tiles[ i ].in_link_poll[ j ] ) ) continue; + fd_event_metrics_sample_link_t * link = (fd_event_metrics_sample_link_t *)(buffer+((fd_event_metrics_sample_t*)buffer)->link_off)+link_idx; + strncpy( link->kind, topo->tiles[ i ].name, sizeof( link->kind ) ); + link->kind_id = (ushort)topo->tiles[ i ].kind_id; + strncpy( link->link_kind, topo->links[ topo->tiles[ i ].in_link_id[ j ] ].name, sizeof( link->link_kind ) ); + link->link_kind_id = (ushort)topo->links[ topo->tiles[ i ].in_link_id[ j ] ].kind_id; + link_idx++; + } + } +} + +#endif /* HEADER_fd_src_disco_events_generated_fd_event_metrics_h */ diff --git a/src/disco/metrics/generated/fd_metric_event_snap.c b/src/disco/metrics/generated/fd_metric_event_snap.c new file mode 100644 index 0000000000..d88e1ea358 --- /dev/null +++ b/src/disco/metrics/generated/fd_metric_event_snap.c @@ -0,0 +1,363 @@ +/* THIS FILE IS GENERATED BY gen_metrics.py. DO NOT HAND EDIT. */ +#include "fd_metric_event_snap.h" + +#include "../fd_metrics.h" +static inline ulong +find_producer_out_idx( fd_topo_t * topo, + fd_topo_tile_t * producer, + fd_topo_tile_t * consumer, + ulong consumer_in_idx ) { + ulong reliable_cons_cnt = 0UL; + for( ulong i=0UL; itile_cnt; i++ ) { + fd_topo_tile_t * consumer_tile = &topo->tiles[ i ]; + for( ulong j=0UL; jin_cnt; j++ ) { + for( ulong k=0UL; kout_cnt; k++ ) { + if( FD_UNLIKELY( consumer_tile->in_link_id[ j ]==producer->out_link_id[ k ] && consumer_tile->in_link_reliable[ j ] ) ) { + if( FD_UNLIKELY( consumer==consumer_tile && consumer_in_idx==j ) ) return reliable_cons_cnt; + reliable_cons_cnt++; + } + } + } + } + + return ULONG_MAX; +} +void +fd_metric_event_snap( fd_topo_t * topo, + fd_event_metrics_sample_t * event ) { + uchar * buffer = (uchar *)event; + + for( ulong i=0UL; itile_len; i++ ) { + fd_event_metrics_sample_tile_t * event_tile = ((fd_event_metrics_sample_tile_t *)(buffer+event->tile_off))+i; + fd_topo_tile_t const * tile = &topo->tiles[ i ]; + volatile ulong const * metrics = fd_metrics_tile( tile->metrics ); + event_tile->context_switch_involuntary_count = metrics[ MIDX( COUNTER, TILE, CONTEXT_SWITCH_INVOLUNTARY_COUNT ) ]; + event_tile->context_switch_voluntary_count = metrics[ MIDX( COUNTER, TILE, CONTEXT_SWITCH_VOLUNTARY_COUNT ) ]; + event_tile->status = metrics[ MIDX( GAUGE, TILE, STATUS ) ]; + event_tile->heartbeat = metrics[ MIDX( GAUGE, TILE, HEARTBEAT ) ]; + event_tile->in_backpressure = metrics[ MIDX( GAUGE, TILE, IN_BACKPRESSURE ) ]; + event_tile->backpressure_count = metrics[ MIDX( COUNTER, TILE, BACKPRESSURE_COUNT ) ]; + event_tile->regime_duration_nanos.caught_up_housekeeping = metrics[ MIDX( COUNTER, TILE, REGIME_DURATION_NANOS_CAUGHT_UP_HOUSEKEEPING ) ]; + event_tile->regime_duration_nanos.processing_housekeeping = metrics[ MIDX( COUNTER, TILE, REGIME_DURATION_NANOS_PROCESSING_HOUSEKEEPING ) ]; + event_tile->regime_duration_nanos.backpressure_housekeeping = metrics[ MIDX( COUNTER, TILE, REGIME_DURATION_NANOS_BACKPRESSURE_HOUSEKEEPING ) ]; + event_tile->regime_duration_nanos.caught_up_prefrag = metrics[ MIDX( COUNTER, TILE, REGIME_DURATION_NANOS_CAUGHT_UP_PREFRAG ) ]; + event_tile->regime_duration_nanos.processing_prefrag = metrics[ MIDX( COUNTER, TILE, REGIME_DURATION_NANOS_PROCESSING_PREFRAG ) ]; + event_tile->regime_duration_nanos.backpressure_prefrag = metrics[ MIDX( COUNTER, TILE, REGIME_DURATION_NANOS_BACKPRESSURE_PREFRAG ) ]; + event_tile->regime_duration_nanos.caught_up_postfrag = metrics[ MIDX( COUNTER, TILE, REGIME_DURATION_NANOS_CAUGHT_UP_POSTFRAG ) ]; + event_tile->regime_duration_nanos.processing_postfrag = metrics[ MIDX( COUNTER, TILE, REGIME_DURATION_NANOS_PROCESSING_POSTFRAG ) ]; + } + + ulong link_idx = 0UL; + for( ulong i=0UL; itile_cnt; i++ ) { + fd_topo_tile_t * tile = &topo->tiles[ i ]; + ulong in_idx = 0UL; + for( ulong j=0UL; jin_cnt; j++ ) { + if( FD_UNLIKELY( !tile->in_link_poll[ j ] ) ) continue; + + fd_event_metrics_sample_link_t * link = ((fd_event_metrics_sample_link_t *)(buffer+event->link_off))+link_idx; + volatile ulong const * metrics = fd_metrics_link_in( tile->metrics, in_idx ); + + link->consumed_count = metrics[ MIDX( COUNTER, LINK, CONSUMED_COUNT ) ]; + link->consumed_size_bytes = metrics[ MIDX( COUNTER, LINK, CONSUMED_SIZE_BYTES ) ]; + link->filtered_count = metrics[ MIDX( COUNTER, LINK, FILTERED_COUNT ) ]; + link->filtered_size_bytes = metrics[ MIDX( COUNTER, LINK, FILTERED_SIZE_BYTES ) ]; + link->overrun_polling_count = metrics[ MIDX( COUNTER, LINK, OVERRUN_POLLING_COUNT ) ]; + link->overrun_polling_frag_count = metrics[ MIDX( COUNTER, LINK, OVERRUN_POLLING_FRAG_COUNT ) ]; + link->overrun_reading_count = metrics[ MIDX( COUNTER, LINK, OVERRUN_READING_COUNT ) ]; + link->overrun_reading_frag_count = metrics[ MIDX( COUNTER, LINK, OVERRUN_READING_FRAG_COUNT ) ]; + + fd_topo_tile_t * producer = &topo->tiles[ fd_topo_find_link_producer( topo, &topo->links[ tile->in_link_id[ j ] ] ) ]; + ulong producer_out_idx = find_producer_out_idx( topo, producer, tile, j ); + metrics = fd_metrics_link_out( producer->metrics, producer_out_idx ); + link->slow_count = metrics[ MIDX( COUNTER, LINK, SLOW_COUNT ) ]; + + in_idx++; + link_idx++; + } + } + + for( ulong i=0UL; inet_len; i++ ) { + fd_event_metrics_sample_net_t * net = ((fd_event_metrics_sample_net_t *)(buffer+event->net_off))+i; + + fd_topo_tile_t const * tile = &topo->tiles[ fd_topo_find_tile( topo, "net", i ) ]; + volatile ulong const * metrics = fd_metrics_tile( tile->metrics ); + (void)net; (void)metrics; + + net->received_packets = metrics[ MIDX( COUNTER, NET, RECEIVED_PACKETS ) ]; + net->received_bytes = metrics[ MIDX( COUNTER, NET, RECEIVED_BYTES ) ]; + net->sent_packets = metrics[ MIDX( COUNTER, NET, SENT_PACKETS ) ]; + net->sent_bytes = metrics[ MIDX( COUNTER, NET, SENT_BYTES ) ]; + net->xdp_rx_dropped_ring_full = metrics[ MIDX( COUNTER, NET, XDP_RX_DROPPED_RING_FULL ) ]; + net->xdp_rx_dropped_other = metrics[ MIDX( COUNTER, NET, XDP_RX_DROPPED_OTHER ) ]; + net->tx_dropped = metrics[ MIDX( COUNTER, NET, TX_DROPPED ) ]; + } + + for( ulong i=0UL; iquic_len; i++ ) { + fd_event_metrics_sample_quic_t * quic = ((fd_event_metrics_sample_quic_t *)(buffer+event->quic_off))+i; + + fd_topo_tile_t const * tile = &topo->tiles[ fd_topo_find_tile( topo, "quic", i ) ]; + volatile ulong const * metrics = fd_metrics_tile( tile->metrics ); + (void)quic; (void)metrics; + + quic->txns_overrun = metrics[ MIDX( COUNTER, QUIC, TXNS_OVERRUN ) ]; + quic->txn_reasms_started = metrics[ MIDX( COUNTER, QUIC, TXN_REASMS_STARTED ) ]; + quic->txn_reasms_active = metrics[ MIDX( GAUGE, QUIC, TXN_REASMS_ACTIVE ) ]; + quic->frags_ok = metrics[ MIDX( COUNTER, QUIC, FRAGS_OK ) ]; + quic->frags_gap = metrics[ MIDX( COUNTER, QUIC, FRAGS_GAP ) ]; + quic->frags_dup = metrics[ MIDX( COUNTER, QUIC, FRAGS_DUP ) ]; + quic->txns_received.udp = metrics[ MIDX( COUNTER, QUIC, TXNS_RECEIVED_UDP ) ]; + quic->txns_received.quic_fast = metrics[ MIDX( COUNTER, QUIC, TXNS_RECEIVED_QUIC_FAST ) ]; + quic->txns_received.quic_frag = metrics[ MIDX( COUNTER, QUIC, TXNS_RECEIVED_QUIC_FRAG ) ]; + quic->txns_abandoned = metrics[ MIDX( COUNTER, QUIC, TXNS_ABANDONED ) ]; + quic->quic_packet_too_small = metrics[ MIDX( COUNTER, QUIC, QUIC_PACKET_TOO_SMALL ) ]; + quic->quic_txn_too_small = metrics[ MIDX( COUNTER, QUIC, QUIC_TXN_TOO_SMALL ) ]; + quic->quic_txn_too_large = metrics[ MIDX( COUNTER, QUIC, QUIC_TXN_TOO_LARGE ) ]; + quic->non_quic_packet_too_small = metrics[ MIDX( COUNTER, QUIC, NON_QUIC_PACKET_TOO_SMALL ) ]; + quic->non_quic_packet_too_large = metrics[ MIDX( COUNTER, QUIC, NON_QUIC_PACKET_TOO_LARGE ) ]; + quic->received_packets = metrics[ MIDX( COUNTER, QUIC, RECEIVED_PACKETS ) ]; + quic->received_bytes = metrics[ MIDX( COUNTER, QUIC, RECEIVED_BYTES ) ]; + quic->sent_packets = metrics[ MIDX( COUNTER, QUIC, SENT_PACKETS ) ]; + quic->sent_bytes = metrics[ MIDX( COUNTER, QUIC, SENT_BYTES ) ]; + quic->connections_active = metrics[ MIDX( GAUGE, QUIC, CONNECTIONS_ACTIVE ) ]; + quic->connections_created = metrics[ MIDX( COUNTER, QUIC, CONNECTIONS_CREATED ) ]; + quic->connections_closed = metrics[ MIDX( COUNTER, QUIC, CONNECTIONS_CLOSED ) ]; + quic->connections_aborted = metrics[ MIDX( COUNTER, QUIC, CONNECTIONS_ABORTED ) ]; + quic->connections_timed_out = metrics[ MIDX( COUNTER, QUIC, CONNECTIONS_TIMED_OUT ) ]; + quic->connections_retried = metrics[ MIDX( COUNTER, QUIC, CONNECTIONS_RETRIED ) ]; + quic->connection_error_no_slots = metrics[ MIDX( COUNTER, QUIC, CONNECTION_ERROR_NO_SLOTS ) ]; + quic->connection_error_retry_fail = metrics[ MIDX( COUNTER, QUIC, CONNECTION_ERROR_RETRY_FAIL ) ]; + quic->pkt_crypto_failed = metrics[ MIDX( COUNTER, QUIC, PKT_CRYPTO_FAILED ) ]; + quic->pkt_no_conn = metrics[ MIDX( COUNTER, QUIC, PKT_NO_CONN ) ]; + quic->pkt_tx_alloc_fail = metrics[ MIDX( COUNTER, QUIC, PKT_TX_ALLOC_FAIL ) ]; + quic->handshakes_created = metrics[ MIDX( COUNTER, QUIC, HANDSHAKES_CREATED ) ]; + quic->handshake_error_alloc_fail = metrics[ MIDX( COUNTER, QUIC, HANDSHAKE_ERROR_ALLOC_FAIL ) ]; + quic->stream_received_events = metrics[ MIDX( COUNTER, QUIC, STREAM_RECEIVED_EVENTS ) ]; + quic->stream_received_bytes = metrics[ MIDX( COUNTER, QUIC, STREAM_RECEIVED_BYTES ) ]; + quic->received_frames.unknown = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_UNKNOWN ) ]; + quic->received_frames.ack = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_ACK ) ]; + quic->received_frames.reset_stream = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_RESET_STREAM ) ]; + quic->received_frames.stop_sending = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_STOP_SENDING ) ]; + quic->received_frames.crypto = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_CRYPTO ) ]; + quic->received_frames.new_token = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_NEW_TOKEN ) ]; + quic->received_frames.stream = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_STREAM ) ]; + quic->received_frames.max_data = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_MAX_DATA ) ]; + quic->received_frames.max_stream_data = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_MAX_STREAM_DATA ) ]; + quic->received_frames.max_streams = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_MAX_STREAMS ) ]; + quic->received_frames.data_blocked = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_DATA_BLOCKED ) ]; + quic->received_frames.stream_data_blocked = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_STREAM_DATA_BLOCKED ) ]; + quic->received_frames.streams_blocked = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_STREAMS_BLOCKED ) ]; + quic->received_frames.new_conn_id = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_NEW_CONN_ID ) ]; + quic->received_frames.retire_conn_id = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_RETIRE_CONN_ID ) ]; + quic->received_frames.path_challenge = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_PATH_CHALLENGE ) ]; + quic->received_frames.path_response = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_PATH_RESPONSE ) ]; + quic->received_frames.conn_close_quic = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_CONN_CLOSE_QUIC ) ]; + quic->received_frames.conn_close_app = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_CONN_CLOSE_APP ) ]; + quic->received_frames.handshake_done = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_HANDSHAKE_DONE ) ]; + quic->received_frames.ping = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_PING ) ]; + quic->received_frames.padding = metrics[ MIDX( COUNTER, QUIC, RECEIVED_FRAMES_PADDING ) ]; + quic->ack_tx.noop = metrics[ MIDX( COUNTER, QUIC, ACK_TX_NOOP ) ]; + quic->ack_tx.new = metrics[ MIDX( COUNTER, QUIC, ACK_TX_NEW ) ]; + quic->ack_tx.merged = metrics[ MIDX( COUNTER, QUIC, ACK_TX_MERGED ) ]; + quic->ack_tx.drop = metrics[ MIDX( COUNTER, QUIC, ACK_TX_DROP ) ]; + quic->ack_tx.cancel = metrics[ MIDX( COUNTER, QUIC, ACK_TX_CANCEL ) ]; + quic->frame_fail_parse = metrics[ MIDX( COUNTER, QUIC, FRAME_FAIL_PARSE ) ]; + } + + for( ulong i=0UL; iverify_len; i++ ) { + fd_event_metrics_sample_verify_t * verify = ((fd_event_metrics_sample_verify_t *)(buffer+event->verify_off))+i; + + fd_topo_tile_t const * tile = &topo->tiles[ fd_topo_find_tile( topo, "verify", i ) ]; + volatile ulong const * metrics = fd_metrics_tile( tile->metrics ); + (void)verify; (void)metrics; + + verify->transaction_parse_failure = metrics[ MIDX( COUNTER, VERIFY, TRANSACTION_PARSE_FAILURE ) ]; + verify->transaction_dedup_failure = metrics[ MIDX( COUNTER, VERIFY, TRANSACTION_DEDUP_FAILURE ) ]; + verify->transaction_verify_failure = metrics[ MIDX( COUNTER, VERIFY, TRANSACTION_VERIFY_FAILURE ) ]; + } + + for( ulong i=0UL; idedup_len; i++ ) { + fd_event_metrics_sample_dedup_t * dedup = ((fd_event_metrics_sample_dedup_t *)(buffer+event->dedup_off))+i; + + fd_topo_tile_t const * tile = &topo->tiles[ fd_topo_find_tile( topo, "dedup", i ) ]; + volatile ulong const * metrics = fd_metrics_tile( tile->metrics ); + (void)dedup; (void)metrics; + + dedup->transaction_dedup_failure = metrics[ MIDX( COUNTER, DEDUP, TRANSACTION_DEDUP_FAILURE ) ]; + dedup->gossiped_votes_received = metrics[ MIDX( COUNTER, DEDUP, GOSSIPED_VOTES_RECEIVED ) ]; + } + + for( ulong i=0UL; iresolv_len; i++ ) { + fd_event_metrics_sample_resolv_t * resolv = ((fd_event_metrics_sample_resolv_t *)(buffer+event->resolv_off))+i; + + fd_topo_tile_t const * tile = &topo->tiles[ fd_topo_find_tile( topo, "resolv", i ) ]; + volatile ulong const * metrics = fd_metrics_tile( tile->metrics ); + (void)resolv; (void)metrics; + + resolv->no_bank_drop = metrics[ MIDX( COUNTER, RESOLV, NO_BANK_DROP ) ]; + resolv->lut_resolved.invalid_lookup_index = metrics[ MIDX( COUNTER, RESOLV, LUT_RESOLVED_INVALID_LOOKUP_INDEX ) ]; + resolv->lut_resolved.account_uninitialized = metrics[ MIDX( COUNTER, RESOLV, LUT_RESOLVED_ACCOUNT_UNINITIALIZED ) ]; + resolv->lut_resolved.invalid_account_data = metrics[ MIDX( COUNTER, RESOLV, LUT_RESOLVED_INVALID_ACCOUNT_DATA ) ]; + resolv->lut_resolved.invalid_account_owner = metrics[ MIDX( COUNTER, RESOLV, LUT_RESOLVED_INVALID_ACCOUNT_OWNER ) ]; + resolv->lut_resolved.account_not_found = metrics[ MIDX( COUNTER, RESOLV, LUT_RESOLVED_ACCOUNT_NOT_FOUND ) ]; + resolv->lut_resolved.success = metrics[ MIDX( COUNTER, RESOLV, LUT_RESOLVED_SUCCESS ) ]; + resolv->blockhash_expired = metrics[ MIDX( COUNTER, RESOLV, BLOCKHASH_EXPIRED ) ]; + resolv->blockhash_unknown = metrics[ MIDX( COUNTER, RESOLV, BLOCKHASH_UNKNOWN ) ]; + } + + for( ulong i=0UL; ipack_len; i++ ) { + fd_event_metrics_sample_pack_t * pack = ((fd_event_metrics_sample_pack_t *)(buffer+event->pack_off))+i; + + fd_topo_tile_t const * tile = &topo->tiles[ fd_topo_find_tile( topo, "pack", i ) ]; + volatile ulong const * metrics = fd_metrics_tile( tile->metrics ); + (void)pack; (void)metrics; + + pack->normal_transaction_received = metrics[ MIDX( COUNTER, PACK, NORMAL_TRANSACTION_RECEIVED ) ]; + pack->transaction_inserted.bundle_blacklist = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_BUNDLE_BLACKLIST ) ]; + pack->transaction_inserted.write_sysvar = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_WRITE_SYSVAR ) ]; + pack->transaction_inserted.estimation_fail = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_ESTIMATION_FAIL ) ]; + pack->transaction_inserted.duplicate_account = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_DUPLICATE_ACCOUNT ) ]; + pack->transaction_inserted.too_many_accounts = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_TOO_MANY_ACCOUNTS ) ]; + pack->transaction_inserted.too_large = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_TOO_LARGE ) ]; + pack->transaction_inserted.expired = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_EXPIRED ) ]; + pack->transaction_inserted.addr_lut = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_ADDR_LUT ) ]; + pack->transaction_inserted.unaffordable = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_UNAFFORDABLE ) ]; + pack->transaction_inserted.duplicate = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_DUPLICATE ) ]; + pack->transaction_inserted.priority = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_PRIORITY ) ]; + pack->transaction_inserted.nonvote_add = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_NONVOTE_ADD ) ]; + pack->transaction_inserted.vote_add = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_VOTE_ADD ) ]; + pack->transaction_inserted.nonvote_replace = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_NONVOTE_REPLACE ) ]; + pack->transaction_inserted.vote_replace = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_VOTE_REPLACE ) ]; + pack->metric_timing.no_txn_no_bank_no_leader_no_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_NO_TXN_NO_BANK_NO_LEADER_NO_MICROBLOCK ) ]; + pack->metric_timing.txn_no_bank_no_leader_no_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_TXN_NO_BANK_NO_LEADER_NO_MICROBLOCK ) ]; + pack->metric_timing.no_txn_bank_no_leader_no_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_NO_TXN_BANK_NO_LEADER_NO_MICROBLOCK ) ]; + pack->metric_timing.txn_bank_no_leader_no_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_TXN_BANK_NO_LEADER_NO_MICROBLOCK ) ]; + pack->metric_timing.no_txn_no_bank_leader_no_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_NO_TXN_NO_BANK_LEADER_NO_MICROBLOCK ) ]; + pack->metric_timing.txn_no_bank_leader_no_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_TXN_NO_BANK_LEADER_NO_MICROBLOCK ) ]; + pack->metric_timing.no_txn_bank_leader_no_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_NO_TXN_BANK_LEADER_NO_MICROBLOCK ) ]; + pack->metric_timing.txn_bank_leader_no_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_TXN_BANK_LEADER_NO_MICROBLOCK ) ]; + pack->metric_timing.no_txn_no_bank_no_leader_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_NO_TXN_NO_BANK_NO_LEADER_MICROBLOCK ) ]; + pack->metric_timing.txn_no_bank_no_leader_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_TXN_NO_BANK_NO_LEADER_MICROBLOCK ) ]; + pack->metric_timing.no_txn_bank_no_leader_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_NO_TXN_BANK_NO_LEADER_MICROBLOCK ) ]; + pack->metric_timing.txn_bank_no_leader_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_TXN_BANK_NO_LEADER_MICROBLOCK ) ]; + pack->metric_timing.no_txn_no_bank_leader_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_NO_TXN_NO_BANK_LEADER_MICROBLOCK ) ]; + pack->metric_timing.txn_no_bank_leader_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_TXN_NO_BANK_LEADER_MICROBLOCK ) ]; + pack->metric_timing.no_txn_bank_leader_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_NO_TXN_BANK_LEADER_MICROBLOCK ) ]; + pack->metric_timing.txn_bank_leader_microblock = metrics[ MIDX( COUNTER, PACK, METRIC_TIMING_TXN_BANK_LEADER_MICROBLOCK ) ]; + pack->transaction_dropped_from_extra = metrics[ MIDX( COUNTER, PACK, TRANSACTION_DROPPED_FROM_EXTRA ) ]; + pack->transaction_inserted_to_extra = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_TO_EXTRA ) ]; + pack->transaction_inserted_from_extra = metrics[ MIDX( COUNTER, PACK, TRANSACTION_INSERTED_FROM_EXTRA ) ]; + pack->transaction_expired = metrics[ MIDX( COUNTER, PACK, TRANSACTION_EXPIRED ) ]; + pack->available_transactions = metrics[ MIDX( GAUGE, PACK, AVAILABLE_TRANSACTIONS ) ]; + pack->available_vote_transactions = metrics[ MIDX( GAUGE, PACK, AVAILABLE_VOTE_TRANSACTIONS ) ]; + pack->pending_transactions_heap_size = metrics[ MIDX( GAUGE, PACK, PENDING_TRANSACTIONS_HEAP_SIZE ) ]; + pack->conflicting_transactions = metrics[ MIDX( GAUGE, PACK, CONFLICTING_TRANSACTIONS ) ]; + pack->smallest_pending_transaction = metrics[ MIDX( GAUGE, PACK, SMALLEST_PENDING_TRANSACTION ) ]; + pack->microblock_per_block_limit = metrics[ MIDX( COUNTER, PACK, MICROBLOCK_PER_BLOCK_LIMIT ) ]; + pack->data_per_block_limit = metrics[ MIDX( COUNTER, PACK, DATA_PER_BLOCK_LIMIT ) ]; + pack->transaction_schedule.taken = metrics[ MIDX( COUNTER, PACK, TRANSACTION_SCHEDULE_TAKEN ) ]; + pack->transaction_schedule.cu_limit = metrics[ MIDX( COUNTER, PACK, TRANSACTION_SCHEDULE_CU_LIMIT ) ]; + pack->transaction_schedule.fast_path = metrics[ MIDX( COUNTER, PACK, TRANSACTION_SCHEDULE_FAST_PATH ) ]; + pack->transaction_schedule.byte_limit = metrics[ MIDX( COUNTER, PACK, TRANSACTION_SCHEDULE_BYTE_LIMIT ) ]; + pack->transaction_schedule.write_cost = metrics[ MIDX( COUNTER, PACK, TRANSACTION_SCHEDULE_WRITE_COST ) ]; + pack->transaction_schedule.slow_path = metrics[ MIDX( COUNTER, PACK, TRANSACTION_SCHEDULE_SLOW_PATH ) ]; + pack->cus_consumed_in_block = metrics[ MIDX( GAUGE, PACK, CUS_CONSUMED_IN_BLOCK ) ]; + pack->delete_missed = metrics[ MIDX( COUNTER, PACK, DELETE_MISSED ) ]; + pack->delete_hit = metrics[ MIDX( COUNTER, PACK, DELETE_HIT ) ]; + } + + for( ulong i=0UL; ibank_len; i++ ) { + fd_event_metrics_sample_bank_t * bank = ((fd_event_metrics_sample_bank_t *)(buffer+event->bank_off))+i; + + fd_topo_tile_t const * tile = &topo->tiles[ fd_topo_find_tile( topo, "bank", i ) ]; + volatile ulong const * metrics = fd_metrics_tile( tile->metrics ); + (void)bank; (void)metrics; + + bank->transaction_sanitize_failure = metrics[ MIDX( COUNTER, BANK, TRANSACTION_SANITIZE_FAILURE ) ]; + bank->transaction_not_executed_failure = metrics[ MIDX( COUNTER, BANK, TRANSACTION_NOT_EXECUTED_FAILURE ) ]; + bank->precompile_verify_failure = metrics[ MIDX( COUNTER, BANK, PRECOMPILE_VERIFY_FAILURE ) ]; + bank->slot_acquire.success = metrics[ MIDX( COUNTER, BANK, SLOT_ACQUIRE_SUCCESS ) ]; + bank->slot_acquire.too_high = metrics[ MIDX( COUNTER, BANK, SLOT_ACQUIRE_TOO_HIGH ) ]; + bank->slot_acquire.too_low = metrics[ MIDX( COUNTER, BANK, SLOT_ACQUIRE_TOO_LOW ) ]; + bank->transaction_load_address_tables.success = metrics[ MIDX( COUNTER, BANK, TRANSACTION_LOAD_ADDRESS_TABLES_SUCCESS ) ]; + bank->transaction_load_address_tables.slot_hashes_sysvar_not_found = metrics[ MIDX( COUNTER, BANK, TRANSACTION_LOAD_ADDRESS_TABLES_SLOT_HASHES_SYSVAR_NOT_FOUND ) ]; + bank->transaction_load_address_tables.account_not_found = metrics[ MIDX( COUNTER, BANK, TRANSACTION_LOAD_ADDRESS_TABLES_ACCOUNT_NOT_FOUND ) ]; + bank->transaction_load_address_tables.invalid_account_owner = metrics[ MIDX( COUNTER, BANK, TRANSACTION_LOAD_ADDRESS_TABLES_INVALID_ACCOUNT_OWNER ) ]; + bank->transaction_load_address_tables.invalid_account_data = metrics[ MIDX( COUNTER, BANK, TRANSACTION_LOAD_ADDRESS_TABLES_INVALID_ACCOUNT_DATA ) ]; + bank->transaction_load_address_tables.invalid_index = metrics[ MIDX( COUNTER, BANK, TRANSACTION_LOAD_ADDRESS_TABLES_INVALID_INDEX ) ]; + bank->transaction_result.success = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_SUCCESS ) ]; + bank->transaction_result.account_in_use = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_ACCOUNT_IN_USE ) ]; + bank->transaction_result.account_loaded_twice = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_ACCOUNT_LOADED_TWICE ) ]; + bank->transaction_result.account_not_found = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_ACCOUNT_NOT_FOUND ) ]; + bank->transaction_result.program_account_not_found = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_PROGRAM_ACCOUNT_NOT_FOUND ) ]; + bank->transaction_result.insufficient_funds_for_fee = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_INSUFFICIENT_FUNDS_FOR_FEE ) ]; + bank->transaction_result.invalid_account_for_fee = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_INVALID_ACCOUNT_FOR_FEE ) ]; + bank->transaction_result.already_processed = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_ALREADY_PROCESSED ) ]; + bank->transaction_result.blockhash_not_found = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_BLOCKHASH_NOT_FOUND ) ]; + bank->transaction_result.instruction_error = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_INSTRUCTION_ERROR ) ]; + bank->transaction_result.call_chain_too_deep = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_CALL_CHAIN_TOO_DEEP ) ]; + bank->transaction_result.missing_signature_for_fee = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_MISSING_SIGNATURE_FOR_FEE ) ]; + bank->transaction_result.invalid_account_index = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_INVALID_ACCOUNT_INDEX ) ]; + bank->transaction_result.signature_failure = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_SIGNATURE_FAILURE ) ]; + bank->transaction_result.invalid_program_for_execution = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_INVALID_PROGRAM_FOR_EXECUTION ) ]; + bank->transaction_result.sanitize_failure = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_SANITIZE_FAILURE ) ]; + bank->transaction_result.cluster_maintenance = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_CLUSTER_MAINTENANCE ) ]; + bank->transaction_result.account_borrow_outstanding = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_ACCOUNT_BORROW_OUTSTANDING ) ]; + bank->transaction_result.would_exceed_max_block_cost_limit = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_WOULD_EXCEED_MAX_BLOCK_COST_LIMIT ) ]; + bank->transaction_result.unsupported_version = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_UNSUPPORTED_VERSION ) ]; + bank->transaction_result.invalid_writable_account = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_INVALID_WRITABLE_ACCOUNT ) ]; + bank->transaction_result.would_exceed_max_account_cost_limit = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_WOULD_EXCEED_MAX_ACCOUNT_COST_LIMIT ) ]; + bank->transaction_result.would_exceed_account_data_block_limit = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_WOULD_EXCEED_ACCOUNT_DATA_BLOCK_LIMIT ) ]; + bank->transaction_result.too_many_account_locks = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_TOO_MANY_ACCOUNT_LOCKS ) ]; + bank->transaction_result.address_lookup_table_not_found = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_ADDRESS_LOOKUP_TABLE_NOT_FOUND ) ]; + bank->transaction_result.invalid_address_lookup_table_owner = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_INVALID_ADDRESS_LOOKUP_TABLE_OWNER ) ]; + bank->transaction_result.invalid_address_lookup_table_data = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_INVALID_ADDRESS_LOOKUP_TABLE_DATA ) ]; + bank->transaction_result.invalid_address_lookup_table_index = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_INVALID_ADDRESS_LOOKUP_TABLE_INDEX ) ]; + bank->transaction_result.invalid_rent_paying_account = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_INVALID_RENT_PAYING_ACCOUNT ) ]; + bank->transaction_result.would_exceed_max_vote_cost_limit = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_WOULD_EXCEED_MAX_VOTE_COST_LIMIT ) ]; + bank->transaction_result.would_exceed_account_data_total_limit = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_WOULD_EXCEED_ACCOUNT_DATA_TOTAL_LIMIT ) ]; + bank->transaction_result.duplicate_instruction = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_DUPLICATE_INSTRUCTION ) ]; + bank->transaction_result.insufficient_funds_for_rent = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_INSUFFICIENT_FUNDS_FOR_RENT ) ]; + bank->transaction_result.max_loaded_accounts_data_size_exceeded = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_MAX_LOADED_ACCOUNTS_DATA_SIZE_EXCEEDED ) ]; + bank->transaction_result.invalid_loaded_accounts_data_size_limit = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_INVALID_LOADED_ACCOUNTS_DATA_SIZE_LIMIT ) ]; + bank->transaction_result.resanitization_needed = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_RESANITIZATION_NEEDED ) ]; + bank->transaction_result.program_execution_temporarily_restricted = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_PROGRAM_EXECUTION_TEMPORARILY_RESTRICTED ) ]; + bank->transaction_result.unbalanced_transaction = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_UNBALANCED_TRANSACTION ) ]; + bank->transaction_result.program_cache_hit_max_limit = metrics[ MIDX( COUNTER, BANK, TRANSACTION_RESULT_PROGRAM_CACHE_HIT_MAX_LIMIT ) ]; + bank->processing_failed = metrics[ MIDX( COUNTER, BANK, PROCESSING_FAILED ) ]; + bank->fee_only_transactions = metrics[ MIDX( COUNTER, BANK, FEE_ONLY_TRANSACTIONS ) ]; + bank->executed_failed_transactions = metrics[ MIDX( COUNTER, BANK, EXECUTED_FAILED_TRANSACTIONS ) ]; + bank->successful_transactions = metrics[ MIDX( COUNTER, BANK, SUCCESSFUL_TRANSACTIONS ) ]; + bank->cost_model_undercount = metrics[ MIDX( COUNTER, BANK, COST_MODEL_UNDERCOUNT ) ]; + } + + for( ulong i=0UL; ishred_len; i++ ) { + fd_event_metrics_sample_shred_t * shred = ((fd_event_metrics_sample_shred_t *)(buffer+event->shred_off))+i; + + fd_topo_tile_t const * tile = &topo->tiles[ fd_topo_find_tile( topo, "shred", i ) ]; + volatile ulong const * metrics = fd_metrics_tile( tile->metrics ); + (void)shred; (void)metrics; + + shred->microblocks_abandoned = metrics[ MIDX( COUNTER, SHRED, MICROBLOCKS_ABANDONED ) ]; + shred->shred_processed.bad_slot = metrics[ MIDX( COUNTER, SHRED, SHRED_PROCESSED_BAD_SLOT ) ]; + shred->shred_processed.parse_failed = metrics[ MIDX( COUNTER, SHRED, SHRED_PROCESSED_PARSE_FAILED ) ]; + shred->shred_processed.rejected = metrics[ MIDX( COUNTER, SHRED, SHRED_PROCESSED_REJECTED ) ]; + shred->shred_processed.ignored = metrics[ MIDX( COUNTER, SHRED, SHRED_PROCESSED_IGNORED ) ]; + shred->shred_processed.okay = metrics[ MIDX( COUNTER, SHRED, SHRED_PROCESSED_OKAY ) ]; + shred->shred_processed.completes = metrics[ MIDX( COUNTER, SHRED, SHRED_PROCESSED_COMPLETES ) ]; + shred->fec_set_spilled = metrics[ MIDX( COUNTER, SHRED, FEC_SET_SPILLED ) ]; + shred->shred_rejected_initial = metrics[ MIDX( COUNTER, SHRED, SHRED_REJECTED_INITIAL ) ]; + shred->fec_rejected_fatal = metrics[ MIDX( COUNTER, SHRED, FEC_REJECTED_FATAL ) ]; + } + + for( ulong i=0UL; istore_len; i++ ) { + fd_event_metrics_sample_store_t * store = ((fd_event_metrics_sample_store_t *)(buffer+event->store_off))+i; + + fd_topo_tile_t const * tile = &topo->tiles[ fd_topo_find_tile( topo, "store", i ) ]; + volatile ulong const * metrics = fd_metrics_tile( tile->metrics ); + (void)store; (void)metrics; + + store->transactions_inserted = metrics[ MIDX( COUNTER, STORE, TRANSACTIONS_INSERTED ) ]; + } + +} + diff --git a/src/disco/metrics/generated/fd_metric_event_snap.h b/src/disco/metrics/generated/fd_metric_event_snap.h new file mode 100644 index 0000000000..5e3753c6b2 --- /dev/null +++ b/src/disco/metrics/generated/fd_metric_event_snap.h @@ -0,0 +1,13 @@ +/* THIS FILE IS GENERATED BY gen_metrics.py. DO NOT HAND EDIT. */ + +#ifndef HEADER_fd_src_disco_events_generated_fd_metric_event_snap_h +#define HEADER_fd_src_disco_events_generated_fd_metric_event_snap_h + +#include "../../topo/fd_topo.h" +#include "fd_event.h" + +void +fd_metric_event_snap( fd_topo_t * topo, + fd_event_metrics_sample_t * metrics ); + +#endif /* HEADER_fd_src_disco_events_generated_fd_metric_event_snap_h */ diff --git a/src/disco/metrics/schema/common.json b/src/disco/metrics/schema/common.json new file mode 100644 index 0000000000..9337047bbd --- /dev/null +++ b/src/disco/metrics/schema/common.json @@ -0,0 +1,77 @@ +{ + "name": "common", + "id": -1, + "common": false, + "description": "Fields that are common to and reported by all events.", + "fields": { + "timestamp": { + "type": "DateTime64(9)", + "description": "The timestamp that the event was generated on the validator client, with nanosecond precision. Timestamp is provided by the validator, and might be skewed." + }, + "server_timestamp": { + "type": "DateTime64(9)", + "server_only": true, + "description": "The timestamp that the event was received by the server, with nanosecond precision. Timestamp is provided by the server, and might be skewed." + }, + "identity": { + "type": "String", + "max_length": 44, + "description": "The base58 encoded validator identity public key. The validator must prove posession of the identity and metrics reported by identity are authenticated." + }, + "client_ip": { + "type": "IPv6", + "server_only": true, + "description": "The IP address of the client that reported the event, as determined by the edge reverse proxy." + }, + "cluster": { + "type": "Enum8", + "description": "The cluster that the validator is running on. One of \"mainnet\", \"devnet\", \"testnet\", \"development\", or \"unknown\".", + "variants": { + "mainnet": 1, + "devnet": 2, + "testnet": 3, + "development": 4, + "pythnet": 5, + "pythtest": 6 + } + }, + "version" : { + "type": "LowCardinality(String)", + "max_length": 11, + "description": "The version of the validator software that is running." + }, + "client": { + "type": "Enum8", + "description": "The client that the validator is running. Currently always \"frankendancer\".", + "variants": { + "agave": 1, + "frankendancer": 2, + "firedancer": 3 + } + }, + "os": { + "type": "Enum8", + "description": "The operating system that the validator is running. Currently always \"linux\".", + "variants": { + "linux": 1 + } + }, + "instance_id": { + "type": "UInt64", + "description": "A unique identifier for this instance of the validator. Randomly generated when the validator is booted." + }, + "machine_id": { + "type": "UInt64", + "description": "A unique identifier for the host running this validator. Will remain the same between validator and machine restarts." + }, + "boot_id": { + "type": "UInt64", + "description": "A unique identifier for the boot identifier of the host running this validator. Will remain the same between validator restarts, but reset on machine restarts." + }, + "unparsed": { + "type": "String", + "server_only": true, + "description": "Any JSON leftover in the event after a parse of known fields is dumped out here." + } + } +} diff --git a/src/disco/metrics/schema/metrics_sample.json b/src/disco/metrics/schema/metrics_sample.json new file mode 100644 index 0000000000..f29c2aca66 --- /dev/null +++ b/src/disco/metrics/schema/metrics_sample.json @@ -0,0 +1,387 @@ +{ + "name": "metrics_sample", + "id": 1, + "description": "Metric data periodically sampled by the application.", + "fields": { + "reason": { + "type": "Enum8", + "description": "Reason the metrics snapshot was sampled.", + "variants": { + "periodic": 1, + "leader_start": 2, + "leader_end_start": 3, + "leader_end": 4 + } + }, + "slot": { "type": "UInt64", "description": "If the reason the sample was taken is because a leader was starting or ending, this is the slot that was starting (or ending). If a leader slot is both ending and starting (leader_end_start), this is the slot which is starting." }, + "tile": { + "type": "Nested", + "description": "Common metrics shared by all tiles", + "fields": { + "kind": { "type": "LowCardinality(String)", "max_length": 20, "description": "Tile type." }, + "kind_id": { "type": "UInt16", "description": "ID of the tile within the type." }, + "context_switch_involuntary_count": { "type": "UInt64", "description": "The number of involuntary context switches." }, + "context_switch_voluntary_count": { "type": "UInt64", "description": "The number of voluntary context switches." }, + "status": { "type": "UInt64", "description": "The current status of the tile. 0 is booting, 1 is running." }, + "heartbeat": { "type": "UInt64", "description": "The last UNIX timestamp in nanoseconds that the tile heartbeated." }, + "in_backpressure": { "type": "UInt64", "description": "Whether the tile is currently backpressured or not, either 1 or 0." }, + "backpressure_count": { "type": "UInt64", "description": "Number of times the times the tile has had to wait for one of more consumers to catch up to resume publishing." }, + "regime_duration_nanos": { + "type": "Tuple", + "description": "Mutually exclusive and exhaustive duration of time the tile spent in each of the regimes.", + "fields": { + "caught_up_housekeeping": { "type": "UInt64", "description": "Caught up + Housekeeping" }, + "processing_housekeeping": { "type": "UInt64", "description": "Processing + Housekeeping" }, + "backpressure_housekeeping": { "type": "UInt64", "description": "Backpressure + Housekeeping" }, + "caught_up_prefrag": { "type": "UInt64", "description": "Caught up + Prefrag" }, + "processing_prefrag": { "type": "UInt64", "description": "Processing + Prefrag" }, + "backpressure_prefrag": { "type": "UInt64", "description": "Backpressure + Prefrag" }, + "caught_up_postfrag": { "type": "UInt64", "description": "Caught up + Postfrag" }, + "processing_postfrag": { "type": "UInt64", "description": "Processing + Postfrag" } + } + } + } + }, + "link": { + "type": "Nested", + "description": "Metrics for links between tiles.", + "fields": { + "kind": { "type": "LowCardinality(String)", "max_length": 20, "description": "Tile type." }, + "kind_id": { "type": "UInt16", "description": "ID of the tile within the type." }, + "link_kind": { "type": "LowCardinality(String)", "max_length": 20, "description": "Link type." }, + "link_kind_id": { "type": "UInt16", "description": "ID of the link within the link kind." }, + "consumed_count": { "type": "UInt64", "description": "The number of times the link reader has consumed a fragment." }, + "consumed_size_bytes": { "type": "UInt64", "description": "The total number of bytes read by the link consumer." }, + "filtered_count": { "type": "UInt64", "description": "The number of fragments that were filtered and not consumed." }, + "filtered_size_bytes": { "type": "UInt64", "description": "The total number of bytes read by the link consumer that were filtered." }, + "overrun_polling_count": { "type": "UInt64", "description": "The number of times the link has been overrun while polling." }, + "overrun_polling_frag_count": { "type": "UInt64", "description": "The number of fragments the link has not processed because it was overrun while polling." }, + "overrun_reading_count": { "type": "UInt64", "description": "The number of input overruns detected while reading metadata by the consumer." }, + "overrun_reading_frag_count": { "type": "UInt64", "description": "The number of fragments the link has not processed because it was overrun while reading." }, + "slow_count": { "type": "UInt64", "description": "The number of times the consumer was detected as rate limiting consumer by the producer." } + } + }, + "net": { + "type": "Nested", + "description": "Metrics for net tiles.", + "fields": { + "received_packets": { "type": "UInt64", "description": "Number of IP packets received." }, + "received_bytes": { "type": "UInt64", "description": "Total bytes received (including IP, UDP headers)." }, + "sent_packets": { "type": "UInt64", "description": "Number of IP packets sent." }, + "sent_bytes": { "type": "UInt64", "description": "Total bytes sent (including IP, UDP headers)." }, + "xdp_rx_dropped_ring_full": { "type": "UInt64", "description": "Number of packets dropped because the RX completion queue was empty. This is only reported for net tile 0, since the measurement is across all RX queues." }, + "xdp_rx_dropped_other": { "type": "UInt64", "description": "Number of packets dropped for other reasons. This is only reported for net tile 0, since the measurement is across all RX queues." }, + "tx_dropped": { "type": "UInt64", "description": "Number of packets dropped because the TX submission queue was empty. This is reported for all net tiles." } + } + }, + "quic": { + "type": "Nested", + "description": "Metrics for quic tiles.", + "fields": { + "txns_overrun": { "type": "UInt64", "description": "Count of txns overrun before reassembled (too small txn_reassembly_count)." }, + "txn_reasms_started": { "type": "UInt64", "description": "Count of fragmented txn receive ops started." }, + "txn_reasms_active": { "type": "UInt64", "description": "Number of fragmented txn receive ops currently active." }, + "frags_ok": { "type": "UInt64", "description": "Count of txn frags received" }, + "frags_gap": { "type": "UInt64", "description": "Count of txn frags dropped due to data gap" }, + "frags_dup": { "type": "UInt64", "description": "Count of txn frags dropped due to dup (stream already completed)" }, + "txns_received": { + "type": "Tuple", + "description": "Count of txns received via TPU.", + "fields": { + "udp": { "type": "UInt64", "description": "TPU/UDP" }, + "quic_fast": { "type": "UInt64", "description": "TPU/QUIC unfragmented" }, + "quic_frag": { "type": "UInt64", "description": "TPU/QUIC fragmented" } + } + }, + "txns_abandoned": { "type": "UInt64", "description": "Count of txns abandoned because a conn was lost." }, + "quic_packet_too_small": { "type": "UInt64", "description": "Count of packets received on the QUIC port that were too small to be a valid IP packet." }, + "quic_txn_too_small": { "type": "UInt64", "description": "Count of txns received via QUIC dropped because they were too small." }, + "quic_txn_too_large": { "type": "UInt64", "description": "Count of txns received via QUIC dropped because they were too large." }, + "non_quic_packet_too_small": { "type": "UInt64", "description": "Count of packets received on the non-QUIC port that were too small to be a valid IP packet." }, + "non_quic_packet_too_large": { "type": "UInt64", "description": "Count of packets received on the non-QUIC port that were too large to be a valid transaction." }, + "received_packets": { "type": "UInt64", "description": "Number of IP packets received." }, + "received_bytes": { "type": "UInt64", "description": "Total bytes received (including IP, UDP, QUIC headers)." }, + "sent_packets": { "type": "UInt64", "description": "Number of IP packets sent." }, + "sent_bytes": { "type": "UInt64", "description": "Total bytes sent (including IP, UDP, QUIC headers)." }, + "connections_active": { "type": "UInt64", "description": "The number of currently active QUIC connections." }, + "connections_created": { "type": "UInt64", "description": "The total number of connections that have been created." }, + "connections_closed": { "type": "UInt64", "description": "Number of connections gracefully closed." }, + "connections_aborted": { "type": "UInt64", "description": "Number of connections aborted." }, + "connections_timed_out": { "type": "UInt64", "description": "Number of connections timed out." }, + "connections_retried": { "type": "UInt64", "description": "Number of connections established with retry." }, + "connection_error_no_slots": { "type": "UInt64", "description": "Number of connections that failed to create due to lack of slots." }, + "connection_error_retry_fail": { "type": "UInt64", "description": "Number of connections that failed during retry (e.g. invalid token)." }, + "pkt_crypto_failed": { "type": "UInt64", "description": "Number of packets that failed decryption." }, + "pkt_no_conn": { "type": "UInt64", "description": "Number of packets with an unknown connection ID." }, + "pkt_tx_alloc_fail": { "type": "UInt64", "description": "Number of packets failed to send because of metadata alloc fail." }, + "handshakes_created": { "type": "UInt64", "description": "Number of handshake flows created." }, + "handshake_error_alloc_fail": { "type": "UInt64", "description": "Number of handshakes dropped due to alloc fail." }, + "stream_received_events": { "type": "UInt64", "description": "Number of stream RX events." }, + "stream_received_bytes": { "type": "UInt64", "description": "Total stream payload bytes received." }, + "received_frames": { + "type": "Tuple", + "description": "Number of QUIC frames received.", + "fields": { + "unknown": { "type": "UInt64", "description": "Unknown frame type" }, + "ack": { "type": "UInt64", "description": "ACK frame" }, + "reset_stream": { "type": "UInt64", "description": "RESET_STREAM frame" }, + "stop_sending": { "type": "UInt64", "description": "STOP_SENDING frame" }, + "crypto": { "type": "UInt64", "description": "CRYPTO frame" }, + "new_token": { "type": "UInt64", "description": "NEW_TOKEN frame" }, + "stream": { "type": "UInt64", "description": "STREAM frame" }, + "max_data": { "type": "UInt64", "description": "MAX_DATA frame" }, + "max_stream_data": { "type": "UInt64", "description": "MAX_STREAM_DATA frame" }, + "max_streams": { "type": "UInt64", "description": "MAX_STREAMS frame" }, + "data_blocked": { "type": "UInt64", "description": "DATA_BLOCKED frame" }, + "stream_data_blocked": { "type": "UInt64", "description": "STREAM_DATA_BLOCKED frame" }, + "streams_blocked": { "type": "UInt64", "description": "STREAMS_BLOCKED(bidi) frame" }, + "new_conn_id": { "type": "UInt64", "description": "NEW_CONN_ID frame" }, + "retire_conn_id": { "type": "UInt64", "description": "RETIRE_CONN_ID frame" }, + "path_challenge": { "type": "UInt64", "description": "PATH_CHALLENGE frame" }, + "path_response": { "type": "UInt64", "description": "PATH_RESPONSE frame" }, + "conn_close_quic": { "type": "UInt64", "description": "CONN_CLOSE(transport) frame" }, + "conn_close_app": { "type": "UInt64", "description": "CONN_CLOSE(app) frame" }, + "handshake_done": { "type": "UInt64", "description": "HANDSHAKE_DONE frame" }, + "ping": { "type": "UInt64", "description": "PING frame" }, + "padding": { "type": "UInt64", "description": "PADDING frame" } + } + }, + "ack_tx": { + "type": "Tuple", + "description": "ACK events", + "fields": { + "noop": { "type": "UInt64", "description": "non-ACK-eliciting packet" }, + "new": { "type": "UInt64", "description": "new ACK range" }, + "merged": { "type": "UInt64", "description": "merged into existing ACK range" }, + "drop": { "type": "UInt64", "description": "out of buffers" }, + "cancel": { "type": "UInt64", "description": "ACK suppressed by handler" } + } + }, + "frame_fail_parse": { "type": "UInt64", "description": "Number of QUIC frames failed to parse." } + } + }, + "verify": { + "type": "Nested", + "description": "Metrics for verify tiles.", + "fields": { + "transaction_parse_failure": { "type": "UInt64", "description": "Count of transactions that failed to parse" }, + "transaction_dedup_failure": { "type": "UInt64", "description": "Count of transactions that failed to deduplicate in the verify stage" }, + "transaction_verify_failure": { "type": "UInt64", "description": "Count of transactions that failed to deduplicate in the verify stage" } + } + }, + "dedup": { + "type": "Nested", + "description": "Metrics for dedup tiles.", + "fields": { + "transaction_dedup_failure": { "type": "UInt64", "description": "Count of transactions that failed to deduplicate in the dedup stage" }, + "gossiped_votes_received": { "type": "UInt64", "description": "Count of simple vote transactions received over gossip instead of via the normal TPU path" } + } + }, + "resolv": { + "type": "Nested", + "description": "Metrics for resolv tiles.", + "fields": { + "no_bank_drop": { "type": "UInt64", "description": "Count of transactions dropped because the bank was not available" }, + "lut_resolved": { + "type": "Tuple", + "description": "Count of address lookup tables resolved", + "fields": { + "invalid_lookup_index": { "type": "UInt64", "description": "The transaction referenced an index in a LUT that didn't exist" }, + "account_uninitialized": { "type": "UInt64", "description": "The account referenced as a LUT hasn't been initialized" }, + "invalid_account_data": { "type": "UInt64", "description": "The account referenced as a LUT couldn't be parsed" }, + "invalid_account_owner": { "type": "UInt64", "description": "The account referenced as a LUT wasn't owned by the ALUT program ID" }, + "account_not_found": { "type": "UInt64", "description": "The account referenced as a LUT couldn't be found" }, + "success": { "type": "UInt64", "description": "Resolved successfully" } + } + }, + "blockhash_expired": { "type": "UInt64", "description": "Count of transactions that failed to resolve because the blockhash was expired" }, + "blockhash_unknown": { "type": "UInt64", "description": "Count of transactions with an unknown blockhash. These may be very recent, very old, nonces, or bogus." } + } + }, + "pack": { + "type": "Nested", + "description": "Metrics for pack tiles.", + "fields": { + "normal_transaction_received": { "type": "UInt64", "description": "Count of transactions received via the normal TPU path" }, + "transaction_inserted": { + "type": "Tuple", + "description": "Result of inserting a transaction into the pack object", + "fields": { + "bundle_blacklist": { "type": "UInt64", "description": "Transaction uses an account on the bundle blacklist" }, + "write_sysvar": { "type": "UInt64", "description": "Transaction tries to write to a sysvar" }, + "estimation_fail": { "type": "UInt64", "description": "Estimating compute cost and/or fee failed" }, + "duplicate_account": { "type": "UInt64", "description": "Transaction included an account address twice" }, + "too_many_accounts": { "type": "UInt64", "description": "Transaction tried to load too many accounts" }, + "too_large": { "type": "UInt64", "description": "Transaction requests too many CUs" }, + "expired": { "type": "UInt64", "description": "Transaction already expired" }, + "addr_lut": { "type": "UInt64", "description": "Transaction loaded accounts from a lookup table" }, + "unaffordable": { "type": "UInt64", "description": "Fee payer's balance below transaction fee" }, + "duplicate": { "type": "UInt64", "description": "Pack aware of transaction with same signature" }, + "priority": { "type": "UInt64", "description": "Transaction's fee was too low given its compute unit requirement and other competing transactions" }, + "nonvote_add": { "type": "UInt64", "description": "Transaction that was not a simple vote added to pending transactions" }, + "vote_add": { "type": "UInt64", "description": "Simple vote transaction was added to pending transactions" }, + "nonvote_replace": { "type": "UInt64", "description": "Transaction that was not a simple vote replaced a lower priority transaction" }, + "vote_replace": { "type": "UInt64", "description": "Simple vote transaction replaced a lower priority transaction" } + } + }, + "metric_timing": { + "type": "Tuple", + "description": "Time in nanos spent in each state", + "fields": { + "no_txn_no_bank_no_leader_no_microblock": { "type": "UInt64", "description": "Pack had no transactions available, and wasn't leader" }, + "txn_no_bank_no_leader_no_microblock": { "type": "UInt64", "description": "Pack had transactions available, but wasn't leader or had hit a limit" }, + "no_txn_bank_no_leader_no_microblock": { "type": "UInt64", "description": "Pack had no transactions available, had banks but wasn't leader" }, + "txn_bank_no_leader_no_microblock": { "type": "UInt64", "description": "Pack had transactions available, had banks but wasn't leader" }, + "no_txn_no_bank_leader_no_microblock": { "type": "UInt64", "description": "Pack had no transactions available, and was leader but had no available banks" }, + "txn_no_bank_leader_no_microblock": { "type": "UInt64", "description": "Pack had transactions available, was leader, but had no available banks" }, + "no_txn_bank_leader_no_microblock": { "type": "UInt64", "description": "Pack had available banks but no transactions" }, + "txn_bank_leader_no_microblock": { "type": "UInt64", "description": "Pack had banks and transactions available but couldn't schedule anything non-conflicting" }, + "no_txn_no_bank_no_leader_microblock": { "type": "UInt64", "description": "Pack scheduled a non-empty microblock while not leader" }, + "txn_no_bank_no_leader_microblock": { "type": "UInt64", "description": "Pack scheduled a non-empty microblock while not leader" }, + "no_txn_bank_no_leader_microblock": { "type": "UInt64", "description": "Pack scheduled a non-empty microblock while not leader" }, + "txn_bank_no_leader_microblock": { "type": "UInt64", "description": "Pack scheduled a non-empty microblock while not leader" }, + "no_txn_no_bank_leader_microblock": { "type": "UInt64", "description": "Pack scheduled a non-empty microblock but all banks were busy" }, + "txn_no_bank_leader_microblock": { "type": "UInt64", "description": "Pack scheduled a non-empty microblock but all banks were busy" }, + "no_txn_bank_leader_microblock": { "type": "UInt64", "description": "Pack scheduled a non-empty microblock and now has no transactions" }, + "txn_bank_leader_microblock": { "type": "UInt64", "description": "Pack scheduled a non-empty microblock" } + } + }, + "transaction_dropped_from_extra": { "type": "UInt64", "description": "Transactions dropped from the extra transaction storage because it was full" }, + "transaction_inserted_to_extra": { "type": "UInt64", "description": "Transactions inserted into the extra transaction storage because pack's primary storage was full" }, + "transaction_inserted_from_extra": { "type": "UInt64", "description": "Transactions pulled from the extra transaction storage and inserted into pack's primary storage" }, + "transaction_expired": { "type": "UInt64", "description": "Transactions deleted from pack because their TTL expired" }, + "available_transactions": { "type": "UInt64", "description": "The total number of pending transactions in pack's pool that are available to be scheduled" }, + "available_vote_transactions": { "type": "UInt64", "description": "The number of pending simple vote transactions in pack's pool that are available to be scheduled" }, + "pending_transactions_heap_size": { "type": "UInt64", "description": "The maximum number of pending transactions that pack can consider. This value is fixed at Firedancer startup but is a useful reference for AvailableTransactions and AvailableVoteTransactions." }, + "conflicting_transactions": { "type": "UInt64", "description": "The number of available transactions that are temporarily not being considered due to account lock conflicts with many higher paying transactions" }, + "smallest_pending_transaction": { "type": "UInt64", "description": "A lower bound on the smallest non-vote transaction (in cost units) that is immediately available for scheduling" }, + "microblock_per_block_limit": { "type": "UInt64", "description": "The number of times pack did not pack a microblock because the limit on microblocks/block had been reached" }, + "data_per_block_limit": { "type": "UInt64", "description": "The number of times pack did not pack a microblock because it reached reached the data per block limit at the start of trying to schedule a microblock" }, + "transaction_schedule": { + "type": "Tuple", + "description": "Result of trying to consider a transaction for scheduling", + "fields": { + "taken": { "type": "UInt64", "description": "Pack included the transaction in the microblock" }, + "cu_limit": { "type": "UInt64", "description": "Pack skipped the transaction because it would have exceeded the block CU limit" }, + "fast_path": { "type": "UInt64", "description": "Pack skipped the transaction because of account conflicts using the fast bitvector check" }, + "byte_limit": { "type": "UInt64", "description": "Pack skipped the transaction because it would have exceeded the block data size limit" }, + "write_cost": { "type": "UInt64", "description": "Pack skipped the transaction because it would have caused a writable account to exceed the per-account block write cost limit" }, + "slow_path": { "type": "UInt64", "description": "Pack skipped the transaction because of account conflicts using the full slow check" } + } + }, + "cus_consumed_in_block": { "type": "UInt64", "description": "The number of cost units consumed in the current block, or 0 if pack is not currently packing a block" }, + "delete_missed": { "type": "UInt64", "description": "Count of attempts to delete a transaction that wasn't found" }, + "delete_hit": { "type": "UInt64", "description": "Count of attempts to delete a transaction that was found and deleted" } + } + }, + "bank": { + "type": "Nested", + "description": "Metrics for bank tiles.", + "fields": { + "transaction_sanitize_failure": { "type": "UInt64", "description": "Number of transactions that failed to sanitize." }, + "transaction_not_executed_failure": { "type": "UInt64", "description": "Number of transactions that did not execute. This is different than transactions which fail to execute, which make it onto the chain." }, + "precompile_verify_failure": { "type": "UInt64", "description": "Number of transactions that failed precompile verification and thus will not execute." }, + "slot_acquire": { + "type": "Tuple", + "description": "Result of acquiring a slot.", + "fields": { + "success": { "type": "UInt64", "description": "Success" }, + "too_high": { "type": "UInt64", "description": "Too high" }, + "too_low": { "type": "UInt64", "description": "Too low" } + } + }, + "transaction_load_address_tables": { + "type": "Tuple", + "description": "Result of loading address lookup tables for a transaction. If there are multiple errors for the transaction, only the first one is reported.", + "fields": { + "success": { "type": "UInt64", "description": "Success" }, + "slot_hashes_sysvar_not_found": { "type": "UInt64", "description": "The slot hashes syvar could not be found." }, + "account_not_found": { "type": "UInt64", "description": "The account storing the address lookup table was deactivated or could not be found." }, + "invalid_account_owner": { "type": "UInt64", "description": "The account that owns the referenced lookup table is not the address lookup table program." }, + "invalid_account_data": { "type": "UInt64", "description": "The data for the referenced address lookup table is malformed." }, + "invalid_index": { "type": "UInt64", "description": "The referenced index in the address lookup table does not exist." } + } + }, + "transaction_result": { + "type": "Tuple", + "description": "Result of loading and executing a transaction.", + "fields": { + "success": { "type": "UInt64", "description": "Success" }, + "account_in_use": { "type": "UInt64", "description": "An account is already being processed in another transaction in a way that does not support parallelism." }, + "account_loaded_twice": { "type": "UInt64", "description": "A `Pubkey` appears twice in the transaction's `account_keys`. Instructions can reference `Pubkey`s more than once but the message must contain a list with no duplicate keys." }, + "account_not_found": { "type": "UInt64", "description": "Attempt to debit an account but found no record of a prior credit." }, + "program_account_not_found": { "type": "UInt64", "description": "Attempt to load a program that does not exist." }, + "insufficient_funds_for_fee": { "type": "UInt64", "description": "The fee payer `Pubkey` does not have sufficient balance to pay the fee to schedule the transaction." }, + "invalid_account_for_fee": { "type": "UInt64", "description": "This account may not be used to pay transaction fees." }, + "already_processed": { "type": "UInt64", "description": "The bank has seen this transaction before. This can occur under normal operation when a UDP packet is duplicated, as a user error from a client not updating its `recent_blockhash`, or as a double-spend attack." }, + "blockhash_not_found": { "type": "UInt64", "description": "The bank has not seen the given `recent_blockhash` or the transaction is too old and the `recent_blockhash` has been discarded." }, + "instruction_error": { "type": "UInt64", "description": "An error occurred while processing an instruction." }, + "call_chain_too_deep": { "type": "UInt64", "description": "Loader call chain is too deep." }, + "missing_signature_for_fee": { "type": "UInt64", "description": "Transaction requires a fee but has no signature present." }, + "invalid_account_index": { "type": "UInt64", "description": "Transaction contains an invalid account reference." }, + "signature_failure": { "type": "UInt64", "description": "Transaction did not pass signature verification." }, + "invalid_program_for_execution": { "type": "UInt64", "description": "This program may not be used for executing instructions." }, + "sanitize_failure": { "type": "UInt64", "description": "Transaction failed to sanitize accounts offsets correctly implies that account locks are not taken for this TX, and should not be unlocked." }, + "cluster_maintenance": { "type": "UInt64", "description": "Transactions are currently disabled due to cluster maintenance." }, + "account_borrow_outstanding": { "type": "UInt64", "description": "Transaction processing left an account with an outstanding borrowed reference." }, + "would_exceed_max_block_cost_limit": { "type": "UInt64", "description": "Transaction would exceed max Block Cost Limit." }, + "unsupported_version": { "type": "UInt64", "description": "Transaction version is unsupported." }, + "invalid_writable_account": { "type": "UInt64", "description": "Transaction loads a writable account that cannot be written." }, + "would_exceed_max_account_cost_limit": { "type": "UInt64", "description": "Transaction would exceed max account limit within the block." }, + "would_exceed_account_data_block_limit": { "type": "UInt64", "description": "Transaction would exceed account data limit within the block." }, + "too_many_account_locks": { "type": "UInt64", "description": "Transaction locked too many accounts." }, + "address_lookup_table_not_found": { "type": "UInt64", "description": "Address lookup table not found." }, + "invalid_address_lookup_table_owner": { "type": "UInt64", "description": "Attempted to lookup addresses from an account owned by the wrong program." }, + "invalid_address_lookup_table_data": { "type": "UInt64", "description": "Attempted to lookup addresses from an invalid account." }, + "invalid_address_lookup_table_index": { "type": "UInt64", "description": "Address table lookup uses an invalid index." }, + "invalid_rent_paying_account": { "type": "UInt64", "description": "Transaction leaves an account with a lower balance than rent-exempt minimum." }, + "would_exceed_max_vote_cost_limit": { "type": "UInt64", "description": "Transaction would exceed max Vote Cost Limit." }, + "would_exceed_account_data_total_limit": { "type": "UInt64", "description": "Transaction would exceed total account data limit." }, + "duplicate_instruction": { "type": "UInt64", "description": "Transaction contains a duplicate instruction that is not allowed." }, + "insufficient_funds_for_rent": { "type": "UInt64", "description": "Transaction results in an account with insufficient funds for rent." }, + "max_loaded_accounts_data_size_exceeded": { "type": "UInt64", "description": "Transaction exceeded max loaded accounts data size cap." }, + "invalid_loaded_accounts_data_size_limit": { "type": "UInt64", "description": "LoadedAccountsDataSizeLimit set for transaction must be greater than 0." }, + "resanitization_needed": { "type": "UInt64", "description": "Sanitized transaction differed before/after feature activiation. Needs to be resanitized." }, + "program_execution_temporarily_restricted": { "type": "UInt64", "description": "Program execution is temporarily restricted on an account." }, + "unbalanced_transaction": { "type": "UInt64", "description": "The total balance before the transaction does not equal the total balance after the transaction." }, + "program_cache_hit_max_limit": { "type": "UInt64", "description": "The total program cache size hit the maximum allowed limit." } + } + }, + "processing_failed": { "type": "UInt64", "description": "Count of transactions for which the processing stage failed and won't land on chain" }, + "fee_only_transactions": { "type": "UInt64", "description": "Count of transactions that will land on chain but without executing" }, + "executed_failed_transactions": { "type": "UInt64", "description": "Count of transactions that execute on chain but failed" }, + "successful_transactions": { "type": "UInt64", "description": "Count of transactions that execute on chain and succeed" }, + "cost_model_undercount": { "type": "UInt64", "description": "Count of transactions that used more CUs than the cost model should have permitted them to" } + } + }, + "shred": { + "type": "Nested", + "description": "Metrics for shred tiles.", + "fields": { + "microblocks_abandoned": { "type": "UInt64", "description": "The number of microblocks that were abandoned because we switched slots without finishing the current slot" }, + "shred_processed": { + "type": "Tuple", + "description": "The result of processing a thread from the network", + "fields": { + "bad_slot": { "type": "UInt64", "description": "Shred was for a slot for which we don't know the leader" }, + "parse_failed": { "type": "UInt64", "description": "Shred parsing failed" }, + "rejected": { "type": "UInt64", "description": "Shred was invalid for one of many reasons" }, + "ignored": { "type": "UInt64", "description": "Shred was ignored because we had already received or reconstructed it" }, + "okay": { "type": "UInt64", "description": "Shred accepted to an incomplete FEC set" }, + "completes": { "type": "UInt64", "description": "Shred accepted and resulted in a valid, complete FEC set" } + } + }, + "fec_set_spilled": { "type": "UInt64", "description": "The number of FEC sets that were spilled because they didn't complete in time and we needed space" }, + "shred_rejected_initial": { "type": "UInt64", "description": "The number shreds that were rejected before any resources were allocated for the FEC set" }, + "fec_rejected_fatal": { "type": "UInt64", "description": "The number of FEC sets that were rejected for reasons that cause the whole FEC set to become invalid" } + } + }, + "store": { + "type": "Nested", + "description": "Metrics for store tiles.", + "fields": { + "transactions_inserted": { "type": "UInt64", "description": "Count of transactions produced while we were leader in the shreds that have been inserted so far" } + } + } + } +} diff --git a/src/disco/topo/fd_topo.h b/src/disco/topo/fd_topo.h index e8d2e81f03..a99a77d8fa 100644 --- a/src/disco/topo/fd_topo.h +++ b/src/disco/topo/fd_topo.h @@ -419,6 +419,17 @@ fd_topo_tile_name_cnt( fd_topo_t const * topo, return cnt; } +FD_FN_PURE static inline ulong +fd_topo_polled_in_cnt( fd_topo_t const * topo ) { + ulong cnt = 0; + for( ulong i=0; itile_cnt; i++ ) { + for( ulong j=0; jtiles[ i ].in_cnt; j++ ) { + if( FD_UNLIKELY( topo->tiles[ i ].in_link_poll[ j ] ) ) cnt++; + } + } + return cnt; +} + /* Finds the workspace of a given name in the topology. Returns ULONG_MAX if there is no such workspace. There can be at most one workspace of a given name. */