mirror of
https://github.com/XRPLF/rippled.git
synced 2026-04-28 23:17:59 +00:00
Compare commits
14 Commits
pratik/ote
...
copilot/re
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2e0ea38d7d | ||
|
|
aa1f84e226 | ||
|
|
ae7076c054 | ||
|
|
9a221d1291 | ||
|
|
5e6d8a4692 | ||
|
|
11c7d912f6 | ||
|
|
b7d6cdf713 | ||
|
|
193ddcbfac | ||
|
|
3a70d9dfba | ||
|
|
03e8a68670 | ||
|
|
28143d74af | ||
|
|
ff4c538a9f | ||
|
|
9fe94c47c3 | ||
|
|
3f307f8128 |
@@ -36,8 +36,3 @@ ignore:
|
||||
- "src/tests/"
|
||||
- "include/xrpl/beast/test/"
|
||||
- "include/xrpl/beast/unit_test/"
|
||||
# Telemetry modules — conditionally compiled behind XRPL_ENABLE_TELEMETRY,
|
||||
# which is not enabled in coverage builds.
|
||||
- "src/xrpld/telemetry/"
|
||||
- "src/libxrpl/beast/insight/OTelCollector.cpp"
|
||||
- "include/xrpl/beast/insight/OTelCollector.h"
|
||||
|
||||
@@ -4,9 +4,6 @@ Loop: test.jtx test.toplevel
|
||||
Loop: test.jtx test.unit_test
|
||||
test.unit_test ~= test.jtx
|
||||
|
||||
Loop: xrpl.telemetry xrpld.rpc
|
||||
xrpld.rpc ~= xrpl.telemetry
|
||||
|
||||
Loop: xrpld.app xrpld.overlay
|
||||
xrpld.app > xrpld.overlay
|
||||
|
||||
|
||||
@@ -41,8 +41,6 @@ libxrpl.shamap > xrpl.basics
|
||||
libxrpl.shamap > xrpl.nodestore
|
||||
libxrpl.shamap > xrpl.protocol
|
||||
libxrpl.shamap > xrpl.shamap
|
||||
libxrpl.telemetry > xrpl.basics
|
||||
libxrpl.telemetry > xrpl.telemetry
|
||||
libxrpl.tx > xrpl.basics
|
||||
libxrpl.tx > xrpl.conditions
|
||||
libxrpl.tx > xrpl.core
|
||||
@@ -101,7 +99,6 @@ test.core > xrpl.server
|
||||
test.csf > xrpl.basics
|
||||
test.csf > xrpld.consensus
|
||||
test.csf > xrpl.json
|
||||
test.csf > xrpl.telemetry
|
||||
test.csf > xrpl.ledger
|
||||
test.csf > xrpl.protocol
|
||||
test.json > test.jtx
|
||||
@@ -195,8 +192,6 @@ tests.libxrpl > xrpl.json
|
||||
tests.libxrpl > xrpl.net
|
||||
tests.libxrpl > xrpl.protocol
|
||||
tests.libxrpl > xrpl.protocol_autogen
|
||||
tests.libxrpl > xrpl.telemetry
|
||||
tests.libxrpl > xrpld.telemetry
|
||||
xrpl.conditions > xrpl.basics
|
||||
xrpl.conditions > xrpl.protocol
|
||||
xrpl.core > xrpl.basics
|
||||
@@ -230,7 +225,6 @@ xrpl.server > xrpl.shamap
|
||||
xrpl.shamap > xrpl.basics
|
||||
xrpl.shamap > xrpl.nodestore
|
||||
xrpl.shamap > xrpl.protocol
|
||||
xrpl.telemetry > xrpl.basics
|
||||
xrpl.tx > xrpl.basics
|
||||
xrpl.tx > xrpl.core
|
||||
xrpl.tx > xrpl.ledger
|
||||
@@ -240,7 +234,6 @@ xrpld.app > xrpl.basics
|
||||
xrpld.app > xrpl.core
|
||||
xrpld.app > xrpld.consensus
|
||||
xrpld.app > xrpld.core
|
||||
xrpld.app > xrpld.telemetry
|
||||
xrpld.app > xrpl.json
|
||||
xrpld.app > xrpl.ledger
|
||||
xrpld.app > xrpl.net
|
||||
@@ -250,14 +243,11 @@ xrpld.app > xrpl.rdb
|
||||
xrpld.app > xrpl.resource
|
||||
xrpld.app > xrpl.server
|
||||
xrpld.app > xrpl.shamap
|
||||
xrpld.app > xrpl.telemetry
|
||||
xrpld.app > xrpl.tx
|
||||
xrpld.consensus > xrpl.basics
|
||||
xrpld.consensus > xrpl.json
|
||||
xrpld.consensus > xrpl.ledger
|
||||
xrpld.consensus > xrpl.protocol
|
||||
xrpld.consensus > xrpl.telemetry
|
||||
xrpld.consensus > xrpld.telemetry
|
||||
xrpld.core > xrpl.basics
|
||||
xrpld.core > xrpl.core
|
||||
xrpld.core > xrpl.net
|
||||
@@ -268,7 +258,6 @@ xrpld.overlay > xrpl.core
|
||||
xrpld.overlay > xrpld.consensus
|
||||
xrpld.overlay > xrpld.core
|
||||
xrpld.overlay > xrpld.peerfinder
|
||||
xrpld.overlay > xrpld.telemetry
|
||||
xrpld.overlay > xrpl.json
|
||||
xrpld.overlay > xrpl.ledger
|
||||
xrpld.overlay > xrpl.protocol
|
||||
|
||||
51
.github/scripts/strategy-matrix/generate.py
vendored
51
.github/scripts/strategy-matrix/generate.py
vendored
@@ -51,21 +51,20 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
# Only generate a subset of configurations in PRs.
|
||||
if not all:
|
||||
# Debian:
|
||||
# - Bookworm using GCC 13: Debug on linux/amd64, set the reference
|
||||
# fee to 500 and enable code coverage (which will be done below).
|
||||
# - Bookworm using GCC 15: Debug on linux/amd64, enable Address and
|
||||
# UB sanitizers (which will be done below).
|
||||
# - Bookworm using GCC 13: Release on linux/amd64, set the reference
|
||||
# fee to 500.
|
||||
# - Bookworm using GCC 15: Debug on linux/amd64, enable code
|
||||
# coverage (which will be done below).
|
||||
# - Bookworm using Clang 16: Debug on linux/amd64, enable voidstar.
|
||||
# - Bookworm using Clang 17: Release on linux/amd64, set the
|
||||
# reference fee to 1000.
|
||||
# - Bookworm using Clang 20: Debug on linux/amd64, enable Address
|
||||
# and UB sanitizers (which will be done below).
|
||||
# - Bookworm using Clang 20: Debug on linux/amd64.
|
||||
if os["distro_name"] == "debian":
|
||||
skip = True
|
||||
if os["distro_version"] == "bookworm":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-13"
|
||||
and build_type == "Debug"
|
||||
and build_type == "Release"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}"
|
||||
@@ -194,11 +193,11 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
):
|
||||
continue
|
||||
|
||||
# Enable code coverage for Debian Bookworm using GCC 13 in Debug on
|
||||
# linux/amd64.
|
||||
# Enable code coverage for Debian Bookworm using GCC 15 in Debug on
|
||||
# linux/amd64
|
||||
if (
|
||||
f"{os['distro_name']}-{os['distro_version']}" == "debian-bookworm"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-13"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
|
||||
and build_type == "Debug"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
@@ -235,39 +234,23 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
# Add the configuration to the list, with the most unique fields first,
|
||||
# so that they are easier to identify in the GitHub Actions UI, as long
|
||||
# names get truncated.
|
||||
# Add Address and UB sanitizers as separate configurations for specific
|
||||
# bookworm distros. Thread sanitizer is currently disabled (see below).
|
||||
# Add Address and Thread (both coupled with UB) sanitizers for specific bookworm distros.
|
||||
# GCC-Asan xrpld-embedded tests are failing because of https://github.com/google/sanitizers/issues/856
|
||||
if os[
|
||||
"distro_version"
|
||||
] == "bookworm" and f"{os['compiler_name']}-{os['compiler_version']}" in [
|
||||
"gcc-15",
|
||||
"clang-20",
|
||||
]:
|
||||
# Add ASAN configuration.
|
||||
if (
|
||||
os["distro_version"] == "bookworm"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "clang-20"
|
||||
):
|
||||
# Add ASAN + UBSAN configuration.
|
||||
configurations.append(
|
||||
{
|
||||
"config_name": config_name + "-asan",
|
||||
"config_name": config_name + "-asan-ubsan",
|
||||
"cmake_args": cmake_args,
|
||||
"cmake_target": cmake_target,
|
||||
"build_only": build_only,
|
||||
"build_type": build_type,
|
||||
"os": os,
|
||||
"architecture": architecture,
|
||||
"sanitizers": "address",
|
||||
}
|
||||
)
|
||||
# Add UBSAN configuration.
|
||||
configurations.append(
|
||||
{
|
||||
"config_name": config_name + "-ubsan",
|
||||
"cmake_args": cmake_args,
|
||||
"cmake_target": cmake_target,
|
||||
"build_only": build_only,
|
||||
"build_type": build_type,
|
||||
"os": os,
|
||||
"architecture": architecture,
|
||||
"sanitizers": "undefinedbehavior",
|
||||
"sanitizers": "address,undefinedbehavior",
|
||||
}
|
||||
)
|
||||
# TSAN is deactivated due to seg faults with latest compilers.
|
||||
|
||||
@@ -117,18 +117,6 @@ if(rocksdb)
|
||||
target_link_libraries(xrpl_libs INTERFACE RocksDB::rocksdb)
|
||||
endif()
|
||||
|
||||
# OpenTelemetry distributed tracing (optional).
|
||||
# When ON, links against opentelemetry-cpp and defines XRPL_ENABLE_TELEMETRY
|
||||
# so that SpanGuard factory methods produce real OTel spans.
|
||||
# When OFF (default), all tracing code compiles to no-ops with zero overhead.
|
||||
# Enable via: conan install -o telemetry=True, or cmake -Dtelemetry=ON.
|
||||
option(telemetry "Enable OpenTelemetry tracing" OFF)
|
||||
if(telemetry)
|
||||
find_package(opentelemetry-cpp CONFIG REQUIRED)
|
||||
add_compile_definitions(XRPL_ENABLE_TELEMETRY)
|
||||
message(STATUS "OpenTelemetry tracing enabled")
|
||||
endif()
|
||||
|
||||
# Work around changes to Conan recipe for now.
|
||||
if(TARGET nudb::core)
|
||||
set(nudb nudb::core)
|
||||
|
||||
@@ -1,567 +0,0 @@
|
||||
# Distributed Tracing Fundamentals
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Next**: [Architecture Analysis](./01-architecture-analysis.md)
|
||||
|
||||
---
|
||||
|
||||
## What is Distributed Tracing?
|
||||
|
||||
Distributed tracing is a method for tracking data objects as they flow through distributed systems. In a network like XRP Ledger, a single transaction touches multiple independent nodes—each with no shared memory or logging. Distributed tracing connects these dots.
|
||||
|
||||
**Without tracing:** You see isolated logs on each node with no way to correlate them.
|
||||
|
||||
**With tracing:** You see the complete journey of a transaction or an event across all nodes it touched.
|
||||
|
||||
---
|
||||
|
||||
## Actors and Actions at a Glance
|
||||
|
||||
### Actors
|
||||
|
||||
| Who (Plain English) | Technical Term |
|
||||
| ---------------------------------------------- | --------------- |
|
||||
| A single unit of work being tracked | Span |
|
||||
| The complete journey of a request | Trace |
|
||||
| Data that links spans across services | Trace Context |
|
||||
| Code that creates spans and propagates context | Instrumentation |
|
||||
| Service that receives and processes traces | Collector |
|
||||
| Storage and visualization system | Backend (Tempo) |
|
||||
| Decision logic for which traces to keep | Sampler |
|
||||
|
||||
### Actions
|
||||
|
||||
| What Happens (Plain English) | Technical Term |
|
||||
| --------------------------------------- | ----------------------- |
|
||||
| Start tracking a new operation | Create a Span |
|
||||
| Connect a child operation to its parent | Set `parent_span_id` |
|
||||
| Group all related operations together | Share a `trace_id` |
|
||||
| Pass tracking data between services | Context Propagation |
|
||||
| Decide whether to record a trace | Sampling (Head or Tail) |
|
||||
| Send completed traces to storage | Export (OTLP) |
|
||||
|
||||
---
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### 1. Trace
|
||||
|
||||
A **trace** represents the entire journey of a request through the system. It has a unique `trace_id` that stays constant across all nodes.
|
||||
|
||||
```
|
||||
Trace ID: abc123
|
||||
├── Node A: received transaction
|
||||
├── Node B: relayed transaction
|
||||
├── Node C: included in consensus
|
||||
└── Node D: applied to ledger
|
||||
```
|
||||
|
||||
### 2. Span
|
||||
|
||||
A **span** represents a single unit of work within a trace. Each span has:
|
||||
|
||||
| Attribute | Description | Example |
|
||||
| ---------------- | -------------------------------- | -------------------------- |
|
||||
| `trace_id` | Identifies the trace | `event123` |
|
||||
| `span_id` | Unique identifier | `span456` |
|
||||
| `parent_span_id` | Parent span (if any) | `p_span123` |
|
||||
| `name` | Operation name | `rpc.submit` |
|
||||
| `start_time` | When work began (local time) | `2024-01-15T10:30:00Z` |
|
||||
| `end_time` | When work completed (local time) | `2024-01-15T10:30:00.050Z` |
|
||||
| `attributes` | Key-value metadata | `tx.hash=ABC...` |
|
||||
| `status` | OK, ERROR MSG | `OK` |
|
||||
|
||||
### 3. Trace Context
|
||||
|
||||
**Trace context** is the data that propagates between services to link spans together. It contains:
|
||||
|
||||
- `trace_id` - The trace this span belongs to
|
||||
- `span_id` - The current span (becomes parent for child spans)
|
||||
- `trace_flags` - Sampling decisions
|
||||
|
||||
---
|
||||
|
||||
## How Spans Form a Trace
|
||||
|
||||
Spans have parent-child relationships forming a tree structure:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph trace["Trace: abc123"]
|
||||
A["tx.submit<br/>span_id: 001<br/>50ms"] --> B["tx.validate<br/>span_id: 002<br/>5ms"]
|
||||
A --> C["tx.relay<br/>span_id: 003<br/>10ms"]
|
||||
A --> D["tx.apply<br/>span_id: 004<br/>30ms"]
|
||||
D --> E["ledger.update<br/>span_id: 005<br/>20ms"]
|
||||
end
|
||||
|
||||
style A fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style B fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style C fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style D fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style E fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **tx.submit (blue, root)**: The top-level span representing the entire transaction submission; all other spans are its descendants.
|
||||
- **tx.validate, tx.relay, tx.apply (green)**: Direct children of tx.submit, representing the three main stages -- validation, relay to peers, and application to the ledger.
|
||||
- **ledger.update (red)**: A grandchild span nested under tx.apply, representing the actual ledger state mutation triggered by applying the transaction.
|
||||
- **Arrows (parent to child)**: Each arrow indicates a parent-child span relationship where the parent's completion depends on the child finishing.
|
||||
|
||||
The same trace visualized as a **timeline (Gantt chart)**:
|
||||
|
||||
```
|
||||
Time → 0ms 10ms 20ms 30ms 40ms 50ms
|
||||
├───────────────────────────────────────────┤
|
||||
tx.submit│▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓│
|
||||
├─────┤
|
||||
tx.valid │▓▓▓▓▓│
|
||||
│ ├──────────┤
|
||||
tx.relay │ │▓▓▓▓▓▓▓▓▓▓│
|
||||
│ ├────────────────────────────┤
|
||||
tx.apply │ │▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓│
|
||||
│ ├──────────────────┤
|
||||
ledger │ │▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓│
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Span Relationships
|
||||
|
||||
Spans don't always form simple parent-child trees. Distributed tracing defines several relationship types to capture different causal patterns:
|
||||
|
||||
### 1. Parent-Child (ChildOf)
|
||||
|
||||
The default relationship. The parent span **depends on** or **contains** the child span. The child runs within the scope of the parent.
|
||||
|
||||
```
|
||||
tx.submit (parent)
|
||||
├── tx.validate (child) ← parent waits for this
|
||||
├── tx.relay (child) ← parent waits for this
|
||||
└── tx.apply (child) ← parent waits for this
|
||||
```
|
||||
|
||||
**When to use:** Synchronous calls, nested operations, any case where the parent's completion depends on the child.
|
||||
|
||||
### 2. Follows-From
|
||||
|
||||
A causal relationship where the first span **triggers** the second, but does **not wait** for it. The originator fires and moves on.
|
||||
|
||||
```
|
||||
Time →
|
||||
|
||||
tx.receive [=======]
|
||||
↓ triggers (follows-from)
|
||||
tx.relay [===========] ← runs independently
|
||||
```
|
||||
|
||||
**When to use:** Asynchronous jobs, queued work, fire-and-forget patterns. For example, a node receives a transaction and queues it for relay — the relay span _follows from_ the receive span but the receiver doesn't wait for relaying to complete.
|
||||
|
||||
> **OpenTracing** defined `FollowsFrom` as a first-class reference type alongside `ChildOf`.
|
||||
> **OpenTelemetry** represents this using **Span Links** with descriptive attributes instead (see below).
|
||||
|
||||
### 3. Span Links (Cross-Trace and Non-Hierarchical)
|
||||
|
||||
Links connect spans that are **causally related but not in a parent-child hierarchy**. Unlike parent-child, links can cross trace boundaries.
|
||||
|
||||
```
|
||||
Trace A Trace B
|
||||
────── ──────
|
||||
batch.schedule batch.execute
|
||||
├─ item.enqueue (span X) ┌──► process.item
|
||||
├─ item.enqueue (span Y) ───┤ (links to X, Y, Z)
|
||||
├─ item.enqueue (span Z) └──►
|
||||
```
|
||||
|
||||
**Use cases:**
|
||||
|
||||
| Pattern | Description |
|
||||
| -------------------- | --------------------------------------------------------------------------- |
|
||||
| **Batch processing** | A batch span links back to all individual spans that contributed to it |
|
||||
| **Fan-in** | An aggregation span links to the multiple producer spans it merges |
|
||||
| **Fan-out** | Multiple downstream spans link back to the single span that triggered them |
|
||||
| **Async handoff** | A deferred job links back to the request that queued it (follows-from) |
|
||||
| **Cross-trace** | Correlating spans across independent traces (e.g., retries, related events) |
|
||||
|
||||
**Link structure:** Each link carries the target span's context plus optional attributes:
|
||||
|
||||
```
|
||||
Link {
|
||||
trace_id: <target trace>
|
||||
span_id: <target span>
|
||||
attributes: { "link.description": "triggered by batch scheduler" }
|
||||
}
|
||||
```
|
||||
|
||||
### Relationship Summary
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph parent_child["Parent-Child"]
|
||||
direction TB
|
||||
P["Parent"] --> C["Child"]
|
||||
end
|
||||
|
||||
subgraph follows_from["Follows-From"]
|
||||
direction TB
|
||||
A["Span A"] -.->|triggers| B["Span B"]
|
||||
end
|
||||
|
||||
subgraph links["Span Links"]
|
||||
direction TB
|
||||
X["Span X\n(Trace 1)"] -.-|link| Y["Span Y\n(Trace 2)"]
|
||||
end
|
||||
|
||||
parent_child ~~~ follows_from ~~~ links
|
||||
|
||||
style P fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style C fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style A fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style B fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
style X fill:#4a148c,stroke:#38006b,color:#ffffff
|
||||
style Y fill:#4a148c,stroke:#38006b,color:#ffffff
|
||||
```
|
||||
|
||||
| Relationship | Same Trace? | Dependency? | OTel Mechanism |
|
||||
| ---------------- | ----------- | -------------------------- | ----------------- |
|
||||
| **Parent-Child** | Yes | Parent depends on child | `parent_span_id` |
|
||||
| **Follows-From** | Usually | Causal but no dependency | Link + attributes |
|
||||
| **Span Link** | Either | Correlation, no dependency | Link + attributes |
|
||||
|
||||
---
|
||||
|
||||
## Trace ID Generation
|
||||
|
||||
A `trace_id` is a 128-bit (16-byte) identifier that groups all spans belonging to one logical operation. How it's generated determines how easily you can find and correlate traces later.
|
||||
|
||||
### General Approaches
|
||||
|
||||
#### 1. Random (W3C Default)
|
||||
|
||||
Generate a random 128-bit ID when a trace starts. Standard approach for most services.
|
||||
|
||||
```
|
||||
trace_id = random_128_bits()
|
||||
```
|
||||
|
||||
| Pros | Cons |
|
||||
| --------------------------- | --------------------------------------------- |
|
||||
| Simple, standard | No natural correlation to domain events |
|
||||
| Guaranteed unique per trace | If propagation is lost, trace is broken |
|
||||
| Works with all OTel tooling | "Find trace for TX abc" requires index lookup |
|
||||
|
||||
#### 2. Deterministic (Derived from Domain Data)
|
||||
|
||||
Compute the trace_id from a hash of a natural identifier. Every node independently derives the **same** trace_id for the same event.
|
||||
|
||||
```
|
||||
trace_id = SHA-256(domain_identifier)[0:16] // truncate to 128 bits
|
||||
```
|
||||
|
||||
| Pros | Cons |
|
||||
| --------------------------------------------------- | ---------------------------------------------------------- |
|
||||
| Propagation-resilient — same ID computed everywhere | Same event processed twice (retry) shares trace_id |
|
||||
| Natural search — domain ID maps directly to trace | Non-standard (tooling assumes random) |
|
||||
| No coordination needed between nodes | 256→128 bit truncation (collision risk negligible at ~2⁶⁴) |
|
||||
|
||||
#### 3. Hybrid (Deterministic Prefix + Random Suffix)
|
||||
|
||||
First 8 bytes derived from domain data, last 8 bytes random.
|
||||
|
||||
```
|
||||
trace_id = SHA-256(domain_identifier)[0:8] || random_64_bits()
|
||||
```
|
||||
|
||||
| Pros | Cons |
|
||||
| ------------------------------------------- | ---------------------------------------- |
|
||||
| Prefix search: "find all traces for TX abc" | Must propagate to maintain full trace_id |
|
||||
| Unique per processing instance | More complex generation logic |
|
||||
| Retries get distinct trace_ids | Partial correlation only (prefix match) |
|
||||
|
||||
### XRPL Workflow Analysis
|
||||
|
||||
XRPL has a unique advantage: its core workflows produce **globally unique 256-bit hashes** that are known on every node. This makes deterministic trace_id generation practical in ways most systems can't achieve.
|
||||
|
||||
#### Natural Identifiers by Workflow
|
||||
|
||||
| Workflow | Natural Identifier | Size | Known at Start? | Same on All Nodes? |
|
||||
| ------------------- | --------------------------------- | ---------- | ----------------------------- | -------------------------------- |
|
||||
| **Transaction** | Transaction hash (`tid_`) | 256-bit | Yes — computed before signing | Yes — hash of canonical tx data |
|
||||
| **Consensus round** | Previous ledger hash + ledger seq | 256+32 bit | Yes — known when round opens | Yes — all validators agree |
|
||||
| **Validation** | Ledger hash being validated | 256-bit | Yes — from consensus result | Yes — same closed ledger |
|
||||
| **Ledger catch-up** | Target ledger hash | 256-bit | Yes — we know what to fetch | Yes — identifies ledger globally |
|
||||
|
||||
#### Where These Identifiers Live in Code
|
||||
|
||||
```
|
||||
Transaction: STTx::getTransactionID() → uint256 tid_
|
||||
TMTransaction::rawTransaction → recompute hash from bytes
|
||||
|
||||
Consensus: ConsensusProposal::prevLedger_ → uint256 (previous ledger hash)
|
||||
ConsensusProposal::position_ → uint256 (TxSet hash)
|
||||
LedgerHeader::seq → uint32_t (ledger sequence)
|
||||
|
||||
Validation: STValidation::getLedgerHash() → uint256
|
||||
STValidation::getNodeID() → NodeID (160-bit)
|
||||
|
||||
Ledger fetch: InboundLedger constructor → uint256 hash, uint32_t seq
|
||||
TMGetLedger::ledgerHash → bytes (uint256)
|
||||
```
|
||||
|
||||
### Recommended Strategy: Workflow-Scoped Deterministic
|
||||
|
||||
Each workflow type derives its trace_id from its natural domain identifier:
|
||||
|
||||
```
|
||||
Transaction trace: trace_id = SHA-256("tx" || tx_hash)[0:16]
|
||||
Consensus trace: trace_id = SHA-256("cons" || prev_ledger_hash || ledger_seq)[0:16]
|
||||
Ledger catch-up: trace_id = SHA-256("fetch" || target_ledger_hash)[0:16]
|
||||
```
|
||||
|
||||
The string prefix (`"tx"`, `"cons"`, `"fetch"`) prevents collisions between workflows that might share underlying hashes.
|
||||
|
||||
**Why this works for XRPL:**
|
||||
|
||||
1. **Propagation-resilient** — Even if a P2P message drops trace context, every node independently computes the same trace_id from the same tx_hash or ledger_hash. Spans still correlate.
|
||||
|
||||
2. **Zero-cost search** — "Show me the trace for transaction ABC" becomes a direct lookup: compute `SHA-256("tx" || ABC)[0:16]` and query. No secondary index needed.
|
||||
|
||||
3. **Cross-workflow linking via Span Links** — A consensus trace links to individual transaction traces. A validation span links to the consensus trace. This connects the full picture without forcing everything into one giant trace.
|
||||
|
||||
### Cross-Workflow Correlation
|
||||
|
||||
Each workflow gets its own trace. Span Links tie them together:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph tx_trace["Transaction Trace"]
|
||||
direction LR
|
||||
Tn["trace_id = f(tx_hash)"]:::note --> T1["tx.receive"] --> T2["tx.validate"] --> T3["tx.relay"]
|
||||
end
|
||||
|
||||
subgraph cons_trace["Consensus Trace"]
|
||||
direction LR
|
||||
Cn["trace_id = f(prev_ledger, seq)"]:::note --> C1["cons.open"] --> C2["cons.propose"] --> C3["cons.accept"]
|
||||
end
|
||||
|
||||
subgraph val_trace["Validation"]
|
||||
direction LR
|
||||
Vn["spans within consensus trace"]:::note --> V1["val.create"] --> V2["val.broadcast"]
|
||||
end
|
||||
|
||||
subgraph fetch_trace["Catch-Up Trace"]
|
||||
direction LR
|
||||
Fn["trace_id = f(ledger_hash)"]:::note --> F1["fetch.request"] --> F2["fetch.receive"] --> F3["fetch.apply"]
|
||||
end
|
||||
|
||||
C1 -.-|"span link\n(tx traces)"| T3
|
||||
C3 --> V1
|
||||
F1 -.-|"span link\n(target ledger)"| C3
|
||||
|
||||
classDef note fill:none,stroke:#888,stroke-dasharray:5 5,color:#333,font-style:italic
|
||||
style T1 fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style T2 fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style T3 fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style C1 fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style C2 fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style C3 fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style V1 fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
style V2 fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
style F1 fill:#4a148c,stroke:#38006b,color:#ffffff
|
||||
style F2 fill:#4a148c,stroke:#38006b,color:#ffffff
|
||||
style F3 fill:#4a148c,stroke:#38006b,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Transaction Trace (blue)**: An independent trace whose `trace_id` is deterministically derived from the transaction hash. Contains receive, validate, and relay spans.
|
||||
- **Consensus Trace (green)**: An independent trace whose `trace_id` is derived from the previous ledger hash and sequence number. Covers the open, propose, and accept phases.
|
||||
- **Validation (red)**: Validation spans live within the consensus trace (not a separate trace). They are created after the accept phase completes.
|
||||
- **Catch-Up Trace (purple)**: An independent trace for ledger acquisition, derived from the target ledger hash. Used when a node is behind and fetching missing ledgers.
|
||||
- **Dotted arrows (span links)**: Cross-trace correlations. Consensus links to transaction traces it included; catch-up links to the consensus trace that produced the target ledger.
|
||||
- **Solid arrow (C3 to V1)**: A parent-child relationship -- validation spans are direct children of the consensus accept span within the same trace.
|
||||
|
||||
**How a query flows:**
|
||||
|
||||
```
|
||||
"Why was TX abc slow?"
|
||||
1. Compute trace_id = SHA-256("tx" || abc)[0:16]
|
||||
2. Find transaction trace → see it was included in consensus round N
|
||||
3. Follow span link → consensus trace for round N
|
||||
4. See which phase was slow (propose? accept?)
|
||||
5. If a node was catching up, follow link → catch-up trace
|
||||
```
|
||||
|
||||
### Trade-offs to Consider
|
||||
|
||||
| Concern | Mitigation |
|
||||
| ----------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Retries get same trace_id** | Add `attempt` attribute to root span; spans have unique span_ids and timestamps |
|
||||
| **256→128 bit truncation** | Birthday-bound collision at ~2⁶⁴ operations — negligible for XRPL's throughput |
|
||||
| **Non-standard generation** | OTel spec allows any 16-byte non-zero value; tooling works on the hex string |
|
||||
| **Hash computation cost** | SHA-256 is ~0.3μs per call; XRPL already computes these hashes for other purposes |
|
||||
| **Late-binding identifiers** | Ledger hash isn't known until after consensus — validation spans use ledger_seq as fallback, then link to the consensus trace |
|
||||
|
||||
---
|
||||
|
||||
## Distributed Traces Across Nodes
|
||||
|
||||
In distributed systems like xrpld, traces span **multiple independent nodes**. The trace context must be propagated in network messages:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client
|
||||
participant NodeA as Node A
|
||||
participant NodeB as Node B
|
||||
participant NodeC as Node C
|
||||
|
||||
Client->>NodeA: Submit TX<br/>(no trace context)
|
||||
|
||||
Note over NodeA: Creates new trace<br/>trace_id: abc123<br/>span: tx.receive
|
||||
|
||||
NodeA->>NodeB: Relay TX<br/>(trace_id: abc123, parent: 001)
|
||||
|
||||
Note over NodeB: Creates child span<br/>span: tx.relay<br/>parent_span_id: 001
|
||||
|
||||
NodeA->>NodeC: Relay TX<br/>(trace_id: abc123, parent: 001)
|
||||
|
||||
Note over NodeC: Creates child span<br/>span: tx.relay<br/>parent_span_id: 001
|
||||
|
||||
Note over NodeA,NodeC: All spans share trace_id: abc123<br/>enabling correlation across nodes
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Client**: The external entity that submits a transaction. It does not carry trace context -- the trace originates at the first node.
|
||||
- **Node A**: The entry point that creates a new trace (trace_id: abc123) and the root span `tx.receive`. It relays the transaction to peers with trace context attached.
|
||||
- **Node B and Node C**: Peer nodes that receive the relayed transaction along with the propagated trace context. Each creates a child span under Node A's span, preserving the same `trace_id`.
|
||||
- **Arrows with trace context**: The relay messages carry `trace_id` and `parent_span_id`, allowing each downstream node to link its spans back to the originating span on Node A.
|
||||
|
||||
---
|
||||
|
||||
## Context Propagation
|
||||
|
||||
For traces to work across nodes, **trace context must be propagated** in messages.
|
||||
|
||||
### What's in the Context (~26 bytes)
|
||||
|
||||
| Field | Size | Description |
|
||||
| ------------- | -------- | ------------------------------------------------------- |
|
||||
| `trace_id` | 16 bytes | Identifies the entire trace (constant across all nodes) |
|
||||
| `span_id` | 8 bytes | The sender's current span (becomes parent on receiver) |
|
||||
| `trace_flags` | 1 byte | Sampling decision (bit 0 = sampled; bits 1-7 reserved) |
|
||||
| `trace_state` | variable | Optional vendor-specific data (typically omitted) |
|
||||
|
||||
### How span_id Changes at Each Hop
|
||||
|
||||
Only **one** `span_id` travels in the context - the sender's current span. Each node:
|
||||
|
||||
1. Extracts the received `span_id` and uses it as the `parent_span_id`
|
||||
2. Creates a **new** `span_id` for its own span
|
||||
3. Sends its own `span_id` as the parent when forwarding
|
||||
|
||||
```
|
||||
Node A Node B Node C
|
||||
────── ────── ──────
|
||||
|
||||
Span AAA Span BBB Span CCC
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
Context out: Context out: Context out:
|
||||
├─ trace_id: abc123 ├─ trace_id: abc123 ├─ trace_id: abc123
|
||||
├─ span_id: AAA ──────────► ├─ span_id: BBB ──────────► ├─ span_id: CCC ──────►
|
||||
└─ flags: 01 └─ flags: 01 └─ flags: 01
|
||||
│ │
|
||||
parent = AAA parent = BBB
|
||||
```
|
||||
|
||||
The `trace_id` stays constant, but `span_id` **changes at every hop** to maintain the parent-child chain.
|
||||
|
||||
### Propagation Formats
|
||||
|
||||
There are two patterns:
|
||||
|
||||
### HTTP/RPC Headers (W3C Trace Context)
|
||||
|
||||
```
|
||||
traceparent: 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01
|
||||
│ │ │ │
|
||||
│ │ │ └── Flags (sampled)
|
||||
│ │ └── Parent span ID (16 hex)
|
||||
│ └── Trace ID (32 hex)
|
||||
└── Version
|
||||
```
|
||||
|
||||
### Protocol Buffers (xrpld P2P messages)
|
||||
|
||||
```protobuf
|
||||
message TMTransaction {
|
||||
bytes rawTransaction = 1;
|
||||
// ... existing fields ...
|
||||
|
||||
// Trace context extension
|
||||
bytes trace_parent = 100; // W3C traceparent
|
||||
bytes trace_state = 101; // W3C tracestate
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Sampling
|
||||
|
||||
Not every trace needs to be recorded. **Sampling** reduces overhead:
|
||||
|
||||
### Head Sampling (at trace start)
|
||||
|
||||
```
|
||||
Request arrives → Random 10% chance → Record or skip entire trace
|
||||
```
|
||||
|
||||
- ✅ Low overhead
|
||||
- ❌ May miss interesting traces
|
||||
|
||||
### Tail Sampling (after trace completes)
|
||||
|
||||
```
|
||||
Trace completes → Collector evaluates:
|
||||
- Error? → KEEP
|
||||
- Slow? → KEEP
|
||||
- Normal? → Sample 10%
|
||||
```
|
||||
|
||||
- ✅ Never loses important traces
|
||||
- ❌ Higher memory usage at collector
|
||||
|
||||
---
|
||||
|
||||
## Key Benefits for xrpld
|
||||
|
||||
| Challenge | How Tracing Helps |
|
||||
| ---------------------------------- | ---------------------------------------- |
|
||||
| "Where is my transaction?" | Follow trace across all nodes it touched |
|
||||
| "Why was consensus slow?" | See timing breakdown of each phase |
|
||||
| "Which node is the bottleneck?" | Compare span durations across nodes |
|
||||
| "What happened during the outage?" | Correlate errors across the network |
|
||||
|
||||
---
|
||||
|
||||
## Glossary
|
||||
|
||||
| Term | Definition |
|
||||
| -------------------- | ------------------------------------------------------------------- |
|
||||
| **Trace** | Complete journey of a request, identified by `trace_id` |
|
||||
| **Span** | Single operation within a trace |
|
||||
| **Parent-Child** | Span relationship where the parent depends on the child |
|
||||
| **Follows-From** | Causal relationship where originator doesn't wait for the result |
|
||||
| **Span Link** | Non-hierarchical connection between spans, possibly across traces |
|
||||
| **Deterministic ID** | Trace ID derived from domain data (e.g., tx_hash) instead of random |
|
||||
| **Context** | Data propagated between services (`trace_id`, `span_id`, flags) |
|
||||
| **Instrumentation** | Code that creates spans and propagates context |
|
||||
| **Collector** | Service that receives, processes, and exports traces |
|
||||
| **Backend** | Storage/visualization system (Tempo) |
|
||||
| **Head Sampling** | Sampling decision at trace start |
|
||||
| **Tail Sampling** | Sampling decision after trace completes |
|
||||
|
||||
---
|
||||
|
||||
_Next: [Architecture Analysis](./01-architecture-analysis.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
@@ -1,467 +0,0 @@
|
||||
# Architecture Analysis
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Design Decisions](./02-design-decisions.md) | [Implementation Strategy](./03-implementation-strategy.md)
|
||||
|
||||
---
|
||||
|
||||
## 1.1 Current xrpld Architecture Overview
|
||||
|
||||
> **WS** = WebSocket | **UNL** = Unique Node List | **TxQ** = Transaction Queue | **StatsD** = Statistics Daemon
|
||||
|
||||
The xrpld node software consists of several interconnected components that need instrumentation for distributed tracing:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph xrpld["xrpld Node"]
|
||||
subgraph services["Core Services"]
|
||||
RPC["RPC Server<br/>(HTTP/WS/gRPC)"]
|
||||
Overlay["Overlay<br/>(P2P Network)"]
|
||||
Consensus["Consensus<br/>(RCLConsensus)"]
|
||||
ValidatorList["ValidatorList<br/>(UNL Mgmt)"]
|
||||
end
|
||||
|
||||
JobQueue["JobQueue<br/>(Thread Pool)"]
|
||||
|
||||
subgraph processing["Processing Layer"]
|
||||
NetworkOPs["NetworkOPs<br/>(Tx Processing)"]
|
||||
LedgerMaster["LedgerMaster<br/>(Ledger Mgmt)"]
|
||||
NodeStore["NodeStore<br/>(Database)"]
|
||||
InboundLedgers["InboundLedgers<br/>(Ledger Sync)"]
|
||||
end
|
||||
|
||||
subgraph appservices["Application Services"]
|
||||
PathFind["PathFinding<br/>(Payment Paths)"]
|
||||
TxQ["TxQ<br/>(Fee Escalation)"]
|
||||
LoadMgr["LoadManager<br/>(Fee/Load)"]
|
||||
end
|
||||
|
||||
subgraph observability["Existing Observability"]
|
||||
PerfLog["PerfLog<br/>(JSON)"]
|
||||
Insight["Insight<br/>(StatsD)"]
|
||||
Logging["Logging<br/>(Journal)"]
|
||||
end
|
||||
|
||||
services --> JobQueue
|
||||
JobQueue --> processing
|
||||
JobQueue --> appservices
|
||||
end
|
||||
|
||||
style xrpld fill:#424242,stroke:#212121,color:#ffffff
|
||||
style services fill:#1565c0,stroke:#0d47a1,color:#ffffff
|
||||
style processing fill:#2e7d32,stroke:#1b5e20,color:#ffffff
|
||||
style appservices fill:#6a1b9a,stroke:#4a148c,color:#ffffff
|
||||
style observability fill:#e65100,stroke:#bf360c,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Core Services (blue)**: The entry points into xrpld -- RPC Server handles client requests, Overlay manages peer-to-peer networking, Consensus drives agreement, and ValidatorList manages trusted validators.
|
||||
- **JobQueue (center)**: The asynchronous thread pool that decouples Core Services from the Processing and Application layers. All work flows through it.
|
||||
- **Processing Layer (green)**: Core business logic -- NetworkOPs processes transactions, LedgerMaster manages ledger state, NodeStore handles persistence, and InboundLedgers synchronizes missing data.
|
||||
- **Application Services (purple)**: Higher-level features -- PathFinding computes payment routes, TxQ manages fee-based queuing, and LoadManager tracks server load.
|
||||
- **Existing Observability (orange)**: The current monitoring stack (PerfLog, Insight, Journal logging) that OpenTelemetry will complement, not replace.
|
||||
- **Arrows (Services to JobQueue to layers)**: Work originates at Core Services, is enqueued onto the JobQueue, and dispatched to Processing or Application layers for execution.
|
||||
|
||||
---
|
||||
|
||||
## 1.1.1 Actors and Actions
|
||||
|
||||
### Actors
|
||||
|
||||
| Who (Plain English) | Technical Term |
|
||||
| ----------------------------------------- | -------------------------- |
|
||||
| Network node running XRPL software | xrpld node |
|
||||
| External client submitting requests | RPC Client |
|
||||
| Network neighbor sharing data | Peer (PeerImp) |
|
||||
| Request handler for client queries | RPC Server (ServerHandler) |
|
||||
| Command executor for specific RPC methods | RPCHandler |
|
||||
| Agreement process between nodes | Consensus (RCLConsensus) |
|
||||
| Transaction processing coordinator | NetworkOPs |
|
||||
| Background task scheduler | JobQueue |
|
||||
| Ledger state manager | LedgerMaster |
|
||||
| Payment route calculator | PathFinding (Pathfinder) |
|
||||
| Transaction waiting room | TxQ (Transaction Queue) |
|
||||
| Fee adjustment system | LoadManager |
|
||||
| Trusted validator list manager | ValidatorList |
|
||||
| Protocol upgrade tracker | AmendmentTable |
|
||||
| Ledger state hash tree | SHAMap |
|
||||
| Persistent key-value storage | NodeStore |
|
||||
|
||||
### Actions
|
||||
|
||||
| What Happens (Plain English) | Technical Term |
|
||||
| ---------------------------------------------- | ---------------------- |
|
||||
| Client sends a request to a node | `rpc.request` |
|
||||
| Node executes a specific RPC command | `rpc.command.*` |
|
||||
| Node receives a transaction from a peer | `tx.receive` |
|
||||
| Node checks if a transaction is valid | `tx.validate` |
|
||||
| Node forwards a transaction to neighbors | `tx.relay` |
|
||||
| Nodes agree on which transactions to include | `consensus.round` |
|
||||
| Consensus progresses through phases | `consensus.phase.*` |
|
||||
| Node builds a new confirmed ledger | `ledger.build` |
|
||||
| Node fetches missing ledger data from peers | `ledger.acquire` |
|
||||
| Node computes payment routes | `pathfind.compute` |
|
||||
| Node queues a transaction for later processing | `txq.enqueue` |
|
||||
| Node increases fees due to high load | `fee.escalate` |
|
||||
| Node fetches the latest trusted validator list | `validator.list.fetch` |
|
||||
| Node votes on a protocol amendment | `amendment.vote` |
|
||||
| Node synchronizes state tree data | `shamap.sync` |
|
||||
|
||||
---
|
||||
|
||||
## 1.2 Key Components for Instrumentation
|
||||
|
||||
> **TxQ** = Transaction Queue | **UNL** = Unique Node List
|
||||
|
||||
| Component | Location | Purpose | Trace Value |
|
||||
| ------------------ | ------------------------------------------ | ------------------------ | -------------------------------- |
|
||||
| **Overlay** | `src/xrpld/overlay/` | P2P communication | Message propagation timing |
|
||||
| **PeerImp** | `src/xrpld/overlay/detail/PeerImp.cpp` | Individual peer handling | Per-peer latency |
|
||||
| **RCLConsensus** | `src/xrpld/app/consensus/RCLConsensus.cpp` | Consensus algorithm | Round timing, phase analysis |
|
||||
| **NetworkOPs** | `src/xrpld/app/misc/NetworkOPs.cpp` | Transaction processing | Tx lifecycle tracking |
|
||||
| **ServerHandler** | `src/xrpld/rpc/detail/ServerHandler.cpp` | RPC entry point | Request latency |
|
||||
| **RPCHandler** | `src/xrpld/rpc/detail/RPCHandler.cpp` | Command execution | Per-command timing |
|
||||
| **JobQueue** | `src/xrpl/core/JobQueue.h` | Async task execution | Queue wait times |
|
||||
| **PathFinding** | `src/xrpld/app/paths/` | Payment path computation | Path latency, cache hits |
|
||||
| **TxQ** | `src/xrpld/app/misc/TxQ.cpp` | Transaction queue/fees | Queue depth, eviction rates |
|
||||
| **LoadManager** | `src/xrpld/app/main/LoadManager.cpp` | Fee escalation/load | Fee levels, load factors |
|
||||
| **InboundLedgers** | `src/xrpld/app/ledger/InboundLedgers.cpp` | Ledger acquisition | Sync time, peer reliability |
|
||||
| **ValidatorList** | `src/xrpld/app/misc/ValidatorList.cpp` | UNL management | List freshness, fetch failures |
|
||||
| **AmendmentTable** | `src/xrpld/app/misc/AmendmentTable.cpp` | Protocol amendments | Voting status, activation events |
|
||||
| **SHAMap** | `src/xrpld/shamap/` | State hash tree | Sync speed, missing nodes |
|
||||
|
||||
---
|
||||
|
||||
## 1.3 Transaction Flow Diagram
|
||||
|
||||
Transaction flow spans multiple nodes in the network. Each node creates linked spans to form a distributed trace:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client
|
||||
participant PeerA as Peer A (Receive)
|
||||
participant PeerB as Peer B (Relay)
|
||||
participant PeerC as Peer C (Validate)
|
||||
|
||||
Client->>PeerA: 1. Submit TX
|
||||
|
||||
rect rgb(230, 245, 255)
|
||||
Note over PeerA: tx.receive SPAN START
|
||||
PeerA->>PeerA: HashRouter Deduplication
|
||||
PeerA->>PeerA: tx.validate (child span)
|
||||
end
|
||||
|
||||
PeerA->>PeerB: 2. Relay TX (with trace ctx)
|
||||
|
||||
rect rgb(230, 245, 255)
|
||||
Note over PeerB: tx.receive (linked span)
|
||||
end
|
||||
|
||||
PeerB->>PeerC: 3. Relay TX
|
||||
|
||||
rect rgb(230, 245, 255)
|
||||
Note over PeerC: tx.receive (linked span)
|
||||
PeerC->>PeerC: tx.process
|
||||
end
|
||||
|
||||
Note over Client,PeerC: DISTRIBUTED TRACE (same trace_id: abc123)
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Client**: The external entity that submits a transaction to Peer A. It has no trace context -- the trace starts at the first node.
|
||||
- **Peer A (Receive)**: The entry node that creates the root span `tx.receive`, runs HashRouter deduplication to avoid processing duplicates, and creates a child `tx.validate` span.
|
||||
- **Peer A to Peer B arrow**: The relay message carries trace context (trace_id + parent span_id), enabling Peer B to create a linked span under the same trace.
|
||||
- **Peer B (Relay)**: Receives the transaction and trace context, creates a `tx.receive` span linked to Peer A's trace, then relays onward.
|
||||
- **Peer C (Validate)**: Final hop in this example. Creates a linked `tx.receive` span and runs `tx.process` to fully process the transaction.
|
||||
- **Blue rectangles**: Highlight the span boundaries on each node, showing where instrumentation creates and closes spans.
|
||||
|
||||
### Trace Structure
|
||||
|
||||
```
|
||||
trace_id: abc123
|
||||
├── span: tx.receive (Peer A)
|
||||
│ ├── span: tx.validate
|
||||
│ └── span: tx.relay
|
||||
├── span: tx.receive (Peer B) [parent: Peer A]
|
||||
│ └── span: tx.relay
|
||||
└── span: tx.receive (Peer C) [parent: Peer B]
|
||||
└── span: tx.process
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.4 Consensus Round Flow
|
||||
|
||||
Consensus rounds are multi-phase operations that benefit significantly from tracing:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph round["consensus.round (root span)"]
|
||||
attrs["Attributes:<br/>xrpl.consensus.ledger.seq = 12345678<br/>xrpl.consensus.mode = proposing<br/>xrpl.consensus.proposers = 35"]
|
||||
|
||||
subgraph open["consensus.phase.open"]
|
||||
open_desc["Duration: ~3s<br/>Waiting for transactions"]
|
||||
end
|
||||
|
||||
subgraph establish["consensus.phase.establish"]
|
||||
est_attrs["proposals_received = 28<br/>disputes_resolved = 3"]
|
||||
est_children["├── consensus.proposal.receive (×28)<br/>├── consensus.proposal.send (×1)<br/>└── consensus.dispute.resolve (×3)"]
|
||||
end
|
||||
|
||||
subgraph accept["consensus.phase.accept"]
|
||||
acc_attrs["transactions_applied = 150<br/>ledger.hash = DEF456..."]
|
||||
acc_children["├── ledger.build<br/>└── ledger.validate"]
|
||||
end
|
||||
|
||||
attrs --> open
|
||||
open --> establish
|
||||
establish --> accept
|
||||
end
|
||||
|
||||
style round fill:#f57f17,stroke:#e65100,color:#ffffff
|
||||
style open fill:#1565c0,stroke:#0d47a1,color:#ffffff
|
||||
style establish fill:#2e7d32,stroke:#1b5e20,color:#ffffff
|
||||
style accept fill:#c2185b,stroke:#880e4f,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **consensus.round (orange, root span)**: The top-level span encompassing the entire consensus round, with attributes like ledger sequence, mode, and proposer count.
|
||||
- **consensus.phase.open (blue)**: The first phase where the node waits (~3s) to collect incoming transactions before proposing.
|
||||
- **consensus.phase.establish (green)**: The negotiation phase where validators exchange proposals, resolve disputes, and converge on a transaction set. Child spans track each proposal received/sent and each dispute resolved.
|
||||
- **consensus.phase.accept (pink)**: The final phase where the agreed transaction set is applied, a new ledger is built, and the ledger is validated. Child spans cover `ledger.build` and `ledger.validate`.
|
||||
- **Arrows (open to establish to accept)**: The sequential flow through the three consensus phases. Each phase must complete before the next begins.
|
||||
|
||||
---
|
||||
|
||||
## 1.5 RPC Request Flow
|
||||
|
||||
> **WS** = WebSocket
|
||||
|
||||
RPC requests support W3C Trace Context headers for distributed tracing across services:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph request["rpc.request (root span)"]
|
||||
http["HTTP Request — POST /<br/>traceparent:<br/>00-abc123...-def456...-01"]
|
||||
|
||||
attrs["Attributes:<br/>http.method = POST<br/>net.peer.ip = 192.168.1.100<br/>xrpl.rpc.command = submit"]
|
||||
|
||||
subgraph enqueue["jobqueue.enqueue"]
|
||||
job_attr["xrpl.job.type = jtCLIENT_RPC"]
|
||||
end
|
||||
|
||||
subgraph command["rpc.command.submit"]
|
||||
cmd_attrs["xrpl.rpc.version = 2<br/>xrpl.rpc.role = user"]
|
||||
cmd_children["├── tx.deserialize<br/>├── tx.validate_local<br/>└── tx.submit_to_network"]
|
||||
end
|
||||
|
||||
response["Response: 200 OK<br/>Duration: 45ms"]
|
||||
|
||||
http --> attrs
|
||||
attrs --> enqueue
|
||||
enqueue --> command
|
||||
command --> response
|
||||
end
|
||||
|
||||
style request fill:#2e7d32,stroke:#1b5e20,color:#ffffff
|
||||
style enqueue fill:#1565c0,stroke:#0d47a1,color:#ffffff
|
||||
style command fill:#e65100,stroke:#bf360c,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **rpc.request (green, root span)**: The outermost span representing the full RPC request lifecycle, from HTTP receipt to response. Carries the W3C `traceparent` header for distributed tracing.
|
||||
- **HTTP Request node**: Shows the incoming POST request with its `traceparent` header and extracted attributes (method, peer IP, command name).
|
||||
- **jobqueue.enqueue (blue)**: The span covering the asynchronous handoff from the RPC thread to the JobQueue worker thread. The trace context is preserved across this async boundary.
|
||||
- **rpc.command.submit (orange)**: The span for the actual command execution, with child spans for deserialization, local validation, and network submission.
|
||||
- **Response node**: The final output with HTTP status and total duration, marking the end of the root span.
|
||||
- **Arrows (top to bottom)**: The sequential processing pipeline -- receive request, extract attributes, enqueue job, execute command, return response.
|
||||
|
||||
---
|
||||
|
||||
## 1.6 Key Trace Points
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
The following table identifies priority instrumentation points across the codebase:
|
||||
|
||||
| Category | Span Name | File | Method | Priority |
|
||||
| --------------- | ---------------------- | ---------------------- | ----------------------- | -------- |
|
||||
| **Transaction** | `tx.receive` | `PeerImp.cpp` | `handleTransaction()` | High |
|
||||
| **Transaction** | `tx.validate` | `NetworkOPs.cpp` | `processTransaction()` | High |
|
||||
| **Transaction** | `tx.process` | `NetworkOPs.cpp` | `doTransactionSync()` | High |
|
||||
| **Transaction** | `tx.relay` | `OverlayImpl.cpp` | `relay()` | Medium |
|
||||
| **Consensus** | `consensus.round` | `RCLConsensus.cpp` | `startRound()` | High |
|
||||
| **Consensus** | `consensus.phase.*` | `Consensus.h` | `timerEntry()` | High |
|
||||
| **Consensus** | `consensus.proposal.*` | `RCLConsensus.cpp` | `peerProposal()` | Medium |
|
||||
| **RPC** | `rpc.request` | `ServerHandler.cpp` | `onRequest()` | High |
|
||||
| **RPC** | `rpc.command.*` | `RPCHandler.cpp` | `doCommand()` | High |
|
||||
| **Peer** | `peer.connect` | `OverlayImpl.cpp` | `onHandoff()` | Low |
|
||||
| **Peer** | `peer.message.*` | `PeerImp.cpp` | `onMessage()` | Low |
|
||||
| **Ledger** | `ledger.acquire` | `InboundLedgers.cpp` | `acquire()` | Medium |
|
||||
| **Ledger** | `ledger.build` | `RCLConsensus.cpp` | `buildLCL()` | High |
|
||||
| **PathFinding** | `pathfind.request` | `PathRequest.cpp` | `doUpdate()` | High |
|
||||
| **PathFinding** | `pathfind.compute` | `Pathfinder.cpp` | `findPaths()` | High |
|
||||
| **TxQ** | `txq.enqueue` | `TxQ.cpp` | `apply()` | High |
|
||||
| **TxQ** | `txq.apply` | `TxQ.cpp` | `processClosedLedger()` | High |
|
||||
| **Fee** | `fee.escalate` | `LoadManager.cpp` | `raiseLocalFee()` | Medium |
|
||||
| **Ledger** | `ledger.replay` | `LedgerReplayer.h` | `replay()` | Medium |
|
||||
| **Ledger** | `ledger.delta` | `LedgerDeltaAcquire.h` | `processData()` | Medium |
|
||||
| **Validator** | `validator.list.fetch` | `ValidatorList.cpp` | `verify()` | Medium |
|
||||
| **Validator** | `validator.manifest` | `Manifest.cpp` | `applyManifest()` | Low |
|
||||
| **Amendment** | `amendment.vote` | `AmendmentTable.cpp` | `doVoting()` | Low |
|
||||
| **SHAMap** | `shamap.sync` | `SHAMap.cpp` | `fetchRoot()` | Medium |
|
||||
|
||||
---
|
||||
|
||||
## 1.7 Instrumentation Priority
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Instrumentation Priority Matrix
|
||||
x-axis Low Complexity --> High Complexity
|
||||
y-axis Low Value --> High Value
|
||||
quadrant-1 Implement First
|
||||
quadrant-2 Plan Carefully
|
||||
quadrant-3 Quick Wins
|
||||
quadrant-4 Consider Later
|
||||
|
||||
RPC Tracing: [0.2, 0.92]
|
||||
Transaction Tracing: [0.55, 0.88]
|
||||
Consensus Tracing: [0.78, 0.82]
|
||||
PathFinding: [0.38, 0.75]
|
||||
TxQ and Fees: [0.25, 0.65]
|
||||
Ledger Sync: [0.62, 0.58]
|
||||
Peer Message Tracing: [0.35, 0.25]
|
||||
JobQueue Tracing: [0.2, 0.48]
|
||||
Validator Mgmt: [0.48, 0.42]
|
||||
Amendment Tracking: [0.15, 0.32]
|
||||
SHAMap Operations: [0.72, 0.45]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.8 Observable Outcomes
|
||||
|
||||
> **TxQ** = Transaction Queue | **UNL** = Unique Node List
|
||||
|
||||
After implementing OpenTelemetry, operators and developers will gain visibility into the following:
|
||||
|
||||
### 1.8.1 What You Will See: Traces
|
||||
|
||||
| Trace Type | Description | Example Query in Grafana/Tempo |
|
||||
| -------------------------- | ------------------------------------------------------------------------------------------- | ---------------------------------------------------- |
|
||||
| **Transaction Lifecycle** | Full journey from RPC submission through validation, relay, consensus, and ledger inclusion | `{service.name="xrpld" && xrpl.tx.hash="ABC123..."}` |
|
||||
| **Cross-Node Propagation** | Transaction path across multiple xrpld nodes with timing | `{xrpl.tx.relay_count > 0}` |
|
||||
| **Consensus Rounds** | Complete round with all phases (open, establish, accept) | `{span.name=~"consensus.round.*"}` |
|
||||
| **RPC Request Processing** | Individual command execution with timing breakdown | `{xrpl.rpc.command="account_info"}` |
|
||||
| **Ledger Acquisition** | Peer-to-peer ledger data requests and responses | `{span.name="ledger.acquire"}` |
|
||||
| **PathFinding Latency** | Path computation time and cache effectiveness for payment RPCs | `{span.name="pathfind.compute"}` |
|
||||
| **TxQ Behavior** | Queue depth, eviction patterns, fee escalation during congestion | `{span.name=~"txq.*"}` |
|
||||
| **Ledger Sync** | Full acquisition timeline including delta and transaction fetches | `{span.name=~"ledger.acquire.*"}` |
|
||||
| **Validator Health** | UNL fetch success, manifest updates, stale list detection | `{span.name=~"validator.*"}` |
|
||||
|
||||
### 1.8.2 What You Will See: Metrics (Derived from Traces)
|
||||
|
||||
| Metric | Description | Dashboard Panel |
|
||||
| ----------------------------- | --------------------------------------- | --------------------------- |
|
||||
| **RPC Latency (p50/p95/p99)** | Response time distribution per command | Heatmap by command |
|
||||
| **Transaction Throughput** | Transactions processed per second | Time series graph |
|
||||
| **Consensus Round Duration** | Time to complete consensus phases | Histogram |
|
||||
| **Cross-Node Latency** | Time for transaction to reach N nodes | Line chart with percentiles |
|
||||
| **Error Rate** | Failed transactions/RPC calls by type | Stacked bar chart |
|
||||
| **PathFinding Latency** | Path computation time per currency pair | Heatmap by currency |
|
||||
| **TxQ Depth** | Queued transactions over time | Time series with thresholds |
|
||||
| **Fee Escalation Level** | Current fee multiplier | Gauge with alert thresholds |
|
||||
| **Ledger Sync Duration** | Time to acquire missing ledgers | Histogram |
|
||||
|
||||
### 1.8.3 Concrete Dashboard Examples
|
||||
|
||||
**Transaction Trace View (Tempo):**
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Trace: abc123... (Transaction Submission) Duration: 847ms │
|
||||
├────────────────────────────────────────────────────────────────────────────────┤
|
||||
│ ├── rpc.request [ServerHandler] ████░░░░░░ 45ms │
|
||||
│ │ └── rpc.command.submit [RPCHandler] ████░░░░░░ 42ms │
|
||||
│ │ └── tx.receive [NetworkOPs] ███░░░░░░░ 35ms │
|
||||
│ │ ├── tx.validate [TxQ] █░░░░░░░░░ 8ms │
|
||||
│ │ └── tx.relay [Overlay] ██░░░░░░░░ 15ms │
|
||||
│ │ ├── tx.receive [Node-B] █████░░░░░ 52ms │
|
||||
│ │ │ └── tx.relay [Node-B] ██░░░░░░░░ 18ms │
|
||||
│ │ └── tx.receive [Node-C] ██████░░░░ 65ms │
|
||||
│ └── consensus.round [RCLConsensus] ████████░░ 720ms │
|
||||
│ ├── consensus.phase.open ██░░░░░░░░ 180ms │
|
||||
│ ├── consensus.phase.establish █████░░░░░ 480ms │
|
||||
│ └── consensus.phase.accept █░░░░░░░░░ 60ms │
|
||||
└────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**RPC Performance Dashboard Panel:**
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ RPC Command Latency (Last 1 Hour) │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ Command │ p50 │ p95 │ p99 │ Errors │ Rate │
|
||||
│──────────────────┼────────┼────────┼────────┼────────┼──────│
|
||||
│ account_info │ 12ms │ 45ms │ 89ms │ 0.1% │ 150/s│
|
||||
│ submit │ 35ms │ 120ms │ 250ms │ 2.3% │ 45/s│
|
||||
│ ledger │ 8ms │ 25ms │ 55ms │ 0.0% │ 80/s│
|
||||
│ tx │ 15ms │ 50ms │ 100ms │ 0.5% │ 60/s│
|
||||
│ server_info │ 5ms │ 12ms │ 20ms │ 0.0% │ 200/s│
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Consensus Health Dashboard Panel:**
|
||||
|
||||
```mermaid
|
||||
---
|
||||
config:
|
||||
xyChart:
|
||||
width: 1200
|
||||
height: 400
|
||||
plotReservedSpacePercent: 50
|
||||
chartOrientation: vertical
|
||||
themeVariables:
|
||||
xyChart:
|
||||
plotColorPalette: "#3498db"
|
||||
---
|
||||
xychart-beta
|
||||
title "Consensus Round Duration (Last 24 Hours)"
|
||||
x-axis "Time of Day (Hours)" [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
|
||||
y-axis "Duration (seconds)" 1 --> 5
|
||||
line [2.1, 2.4, 2.8, 3.2, 3.8, 4.3, 4.5, 5.0, 4.7, 4.0, 3.2, 2.6, 2.0]
|
||||
```
|
||||
|
||||
### 1.8.4 Operator Actionable Insights
|
||||
|
||||
| Scenario | What You'll See | Action |
|
||||
| ------------------------- | ---------------------------------------------------------------------------- | ------------------------------------------------ |
|
||||
| **Slow RPC** | Span showing which phase is slow (parsing, execution, serialization) | Optimize specific code path |
|
||||
| **Transaction Stuck** | Trace stops at validation; error attribute shows reason | Fix transaction parameters |
|
||||
| **Consensus Delay** | Phase.establish taking too long; proposer attribute shows missing validators | Investigate network connectivity |
|
||||
| **Memory Spike** | Large batch of spans correlating with memory increase | Tune batch_size or sampling |
|
||||
| **Network Partition** | Traces missing cross-node links for specific peer | Check peer connectivity |
|
||||
| **Path Computation Slow** | pathfind.compute span shows high latency; cache miss rate in attributes | Warm the RippleLineCache, check order book depth |
|
||||
| **TxQ Full** | txq.enqueue spans show evictions; fee.escalate spans increasing | Monitor fee levels, alert operators |
|
||||
| **Ledger Sync Stalled** | ledger.acquire spans timing out; peer reliability attributes show issues | Check peer connectivity, add trusted peers |
|
||||
| **UNL Stale** | validator.list.fetch spans failing; last_update attribute aging | Verify validator site URLs, check DNS |
|
||||
|
||||
### 1.8.5 Developer Debugging Workflow
|
||||
|
||||
1. **Find Transaction**: Query by `xrpl.tx.hash` to get full trace
|
||||
2. **Identify Bottleneck**: Look at span durations to find slowest component
|
||||
3. **Check Attributes**: Review `xrpl.tx.validity`, `xrpl.rpc.status` for errors
|
||||
4. **Correlate Logs**: Use `trace_id` to find related PerfLog entries
|
||||
5. **Compare Nodes**: Filter by `service.instance.id` to compare behavior across nodes
|
||||
|
||||
---
|
||||
|
||||
_Next: [Design Decisions](./02-design-decisions.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
@@ -1,706 +0,0 @@
|
||||
# Design Decisions
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Architecture Analysis](./01-architecture-analysis.md) | [Code Samples](./04-code-samples.md)
|
||||
|
||||
---
|
||||
|
||||
## 2.1 OpenTelemetry Components
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
### 2.1.1 SDK Selection
|
||||
|
||||
**Primary Choice**: OpenTelemetry C++ SDK (`opentelemetry-cpp`)
|
||||
|
||||
| Component | Purpose | Required |
|
||||
| --------------------------------------- | ---------------------- | ----------- |
|
||||
| `opentelemetry-cpp::api` | Tracing API headers | Yes |
|
||||
| `opentelemetry-cpp::sdk` | SDK implementation | Yes |
|
||||
| `opentelemetry-cpp::ext` | Extensions (exporters) | Yes |
|
||||
| `opentelemetry-cpp::otlp_grpc_exporter` | OTLP/gRPC export | Recommended |
|
||||
| `opentelemetry-cpp::otlp_http_exporter` | OTLP/HTTP export | Alternative |
|
||||
|
||||
### 2.1.2 Instrumentation Strategy
|
||||
|
||||
**Manual Instrumentation** (recommended):
|
||||
|
||||
| Approach | Pros | Cons |
|
||||
| ---------- | --------------------------------------------------------------- | ------------------------------------------------------- |
|
||||
| **Manual** | Precise control, optimized placement, xrpld-specific attributes | More development effort |
|
||||
| **Auto** | Less code, automatic coverage | Less control, potential overhead, limited customization |
|
||||
|
||||
---
|
||||
|
||||
## 2.2 Exporter Configuration
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph nodes["xrpld Nodes"]
|
||||
node1["xrpld<br/>Node 1"]
|
||||
node2["xrpld<br/>Node 2"]
|
||||
node3["xrpld<br/>Node 3"]
|
||||
end
|
||||
|
||||
collector["OpenTelemetry<br/>Collector<br/>(sidecar or standalone)"]
|
||||
|
||||
subgraph backends["Observability Backends"]
|
||||
tempo["Tempo"]
|
||||
elastic["Elastic<br/>APM"]
|
||||
end
|
||||
|
||||
node1 -->|"OTLP/gRPC<br/>:4317"| collector
|
||||
node2 -->|"OTLP/gRPC<br/>:4317"| collector
|
||||
node3 -->|"OTLP/gRPC<br/>:4317"| collector
|
||||
|
||||
collector --> tempo
|
||||
collector --> elastic
|
||||
|
||||
style nodes fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style backends fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style collector fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **xrpld Nodes (blue)**: The source of telemetry data. Each xrpld node exports spans via OTLP/gRPC on port 4317.
|
||||
- **OpenTelemetry Collector (red)**: The central aggregation point that receives spans from all nodes. Can run as a sidecar (per-node) or standalone (shared). Handles batching, filtering, and routing.
|
||||
- **Observability Backends (green)**: The storage and visualization destinations. Tempo is the recommended backend for both development and production, and Elastic APM is an alternative. The Collector routes to one or more backends.
|
||||
- **Arrows (nodes to collector to backends)**: The data pipeline -- spans flow from nodes to the Collector over gRPC, then the Collector fans out to the configured backends.
|
||||
|
||||
### 2.2.1 OTLP/gRPC (Recommended)
|
||||
|
||||
```cpp
|
||||
// Configuration for OTLP over gRPC
|
||||
namespace otlp = opentelemetry::exporter::otlp;
|
||||
|
||||
otlp::OtlpGrpcExporterOptions opts;
|
||||
opts.endpoint = "localhost:4317";
|
||||
opts.useTls = true;
|
||||
opts.sslCaCertPath = "/path/to/ca.crt";
|
||||
```
|
||||
|
||||
### 2.2.2 OTLP/HTTP (Alternative)
|
||||
|
||||
```cpp
|
||||
// Configuration for OTLP over HTTP
|
||||
namespace otlp = opentelemetry::exporter::otlp;
|
||||
|
||||
otlp::OtlpHttpExporterOptions opts;
|
||||
opts.url = "http://localhost:4318/v1/traces";
|
||||
opts.content_type = otlp::HttpRequestContentType::kJson; // or kBinary
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2.3 Span Naming Conventions
|
||||
|
||||
> **TxQ** = Transaction Queue | **UNL** = Unique Node List | **WS** = WebSocket
|
||||
|
||||
### 2.3.1 Naming Schema
|
||||
|
||||
```
|
||||
<component>.<operation>[.<sub-operation>]
|
||||
```
|
||||
|
||||
**Examples**:
|
||||
|
||||
- `tx.receive` - Transaction received from peer
|
||||
- `consensus.phase.establish` - Consensus establish phase
|
||||
- `rpc.command.server_info` - server_info RPC command
|
||||
|
||||
### 2.3.2 Complete Span Catalog
|
||||
|
||||
```yaml
|
||||
# Transaction Spans
|
||||
tx:
|
||||
receive: "Transaction received from network"
|
||||
validate: "Transaction signature/format validation"
|
||||
process: "Full transaction processing"
|
||||
relay: "Transaction relay to peers"
|
||||
apply: "Apply transaction to ledger"
|
||||
|
||||
# Consensus Spans
|
||||
consensus:
|
||||
round: "Complete consensus round"
|
||||
phase:
|
||||
open: "Open phase - collecting transactions"
|
||||
establish: "Establish phase - reaching agreement"
|
||||
accept: "Accept phase - applying consensus"
|
||||
proposal:
|
||||
receive: "Receive peer proposal"
|
||||
send: "Send our proposal"
|
||||
validation:
|
||||
receive: "Receive peer validation"
|
||||
send: "Send our validation"
|
||||
|
||||
# RPC Spans
|
||||
rpc:
|
||||
request: "HTTP/WebSocket request handling"
|
||||
command:
|
||||
"*": "Specific RPC command (dynamic)"
|
||||
|
||||
# Peer Spans
|
||||
peer:
|
||||
connect: "Peer connection establishment"
|
||||
disconnect: "Peer disconnection"
|
||||
message:
|
||||
send: "Send protocol message"
|
||||
receive: "Receive protocol message"
|
||||
|
||||
# Ledger Spans
|
||||
ledger:
|
||||
acquire: "Ledger acquisition from network"
|
||||
build: "Build new ledger"
|
||||
validate: "Ledger validation"
|
||||
close: "Close ledger"
|
||||
replay: "Ledger replay executed"
|
||||
delta: "Delta-based ledger acquired"
|
||||
|
||||
# PathFinding Spans
|
||||
pathfind:
|
||||
request: "Path request initiated"
|
||||
compute: "Path computation executed"
|
||||
|
||||
# TxQ Spans
|
||||
txq:
|
||||
enqueue: "Transaction queued"
|
||||
apply: "Queued transaction applied"
|
||||
|
||||
# Fee/Load Spans
|
||||
fee:
|
||||
escalate: "Fee escalation triggered"
|
||||
|
||||
# Validator Spans
|
||||
validator:
|
||||
list:
|
||||
fetch: "UNL list fetched"
|
||||
manifest: "Manifest update processed"
|
||||
|
||||
# Amendment Spans
|
||||
amendment:
|
||||
vote: "Amendment voting executed"
|
||||
|
||||
# SHAMap Spans
|
||||
shamap:
|
||||
sync: "State tree synchronization"
|
||||
|
||||
# Job Spans
|
||||
job:
|
||||
enqueue: "Job added to queue"
|
||||
execute: "Job execution"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2.4 Attribute Schema
|
||||
|
||||
> **TxQ** = Transaction Queue | **UNL** = Unique Node List | **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
### 2.4.1 Resource Attributes (Set Once at Startup)
|
||||
|
||||
```cpp
|
||||
// Standard OpenTelemetry semantic conventions
|
||||
resource::SemanticConventions::SERVICE_NAME = "xrpld"
|
||||
resource::SemanticConventions::SERVICE_VERSION = BuildInfo::getVersionString()
|
||||
resource::SemanticConventions::SERVICE_INSTANCE_ID = <node_public_key_base58>
|
||||
|
||||
// Custom xrpld attributes
|
||||
"xrpl.network.id" = <network_id> // e.g., 0 for mainnet
|
||||
"xrpl.network.type" = "mainnet" | "testnet" | "devnet" | "standalone"
|
||||
"xrpl.node.type" = "validator" | "stock" | "reporting"
|
||||
"xrpl.node.cluster" = <cluster_name> // If clustered
|
||||
```
|
||||
|
||||
### 2.4.2 Span Attributes by Category
|
||||
|
||||
#### Transaction Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.tx.hash" = string // Transaction hash (hex)
|
||||
"xrpl.tx.type" = string // "Payment", "OfferCreate", etc.
|
||||
"xrpl.tx.account" = string // Source account (redacted in prod)
|
||||
"xrpl.tx.sequence" = int64 // Account sequence number
|
||||
"xrpl.tx.fee" = int64 // Fee in drops
|
||||
"xrpl.tx.result" = string // "tesSUCCESS", "tecPATH_DRY", etc.
|
||||
"xrpl.tx.ledger_index" = int64 // Ledger containing transaction
|
||||
```
|
||||
|
||||
#### Consensus Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.consensus.round" = int64 // Round number
|
||||
"xrpl.consensus.phase" = string // "open", "establish", "accept"
|
||||
"xrpl.consensus.mode" = string // "proposing", "observing", etc.
|
||||
"xrpl.consensus.proposers" = int64 // Number of proposers
|
||||
"xrpl.consensus.ledger.prev" = string // Previous ledger hash
|
||||
"xrpl.consensus.ledger.seq" = int64 // Ledger sequence
|
||||
"xrpl.consensus.tx_count" = int64 // Transactions in consensus set
|
||||
"xrpl.consensus.duration_ms" = float64 // Round duration
|
||||
|
||||
// Phase 4a: Establish-phase gap fill & cross-node correlation
|
||||
"xrpl.consensus.round_id" = int64 // Consensus round number
|
||||
"xrpl.consensus.ledger_id" = string // previousLedger.id() — shared across nodes
|
||||
"xrpl.consensus.trace_strategy" = string // "deterministic" or "attribute"
|
||||
"xrpl.consensus.converge_percent" = int64 // Convergence % (0-100+)
|
||||
"xrpl.consensus.establish_count" = int64 // Number of establish iterations
|
||||
"xrpl.consensus.disputes_count" = int64 // Active disputed transactions
|
||||
"xrpl.consensus.proposers_agreed" = int64 // Peers agreeing with our position
|
||||
"xrpl.consensus.proposers_total" = int64 // Total peer positions
|
||||
"xrpl.consensus.agree_count" = int64 // Peers that agree (haveConsensus)
|
||||
"xrpl.consensus.disagree_count" = int64 // Peers that disagree
|
||||
"xrpl.consensus.threshold_percent" = int64 // Current threshold (50/65/70/95)
|
||||
"xrpl.consensus.result" = string // "yes", "no", "moved_on"
|
||||
"xrpl.consensus.mode.old" = string // Previous consensus mode
|
||||
"xrpl.consensus.mode.new" = string // New consensus mode
|
||||
```
|
||||
|
||||
#### RPC Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.rpc.command" = string // Command name
|
||||
"xrpl.rpc.version" = int64 // API version
|
||||
"xrpl.rpc.role" = string // "admin" or "user"
|
||||
"xrpl.rpc.params" = string // Sanitized parameters (optional)
|
||||
```
|
||||
|
||||
#### Peer & Message Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.peer.id" = string // Peer public key (base58)
|
||||
"xrpl.peer.address" = string // IP:port
|
||||
"xrpl.peer.latency_ms" = float64 // Measured latency
|
||||
"xrpl.peer.cluster" = string // Cluster name if clustered
|
||||
"xrpl.message.type" = string // Protocol message type name
|
||||
"xrpl.message.size_bytes" = int64 // Message size
|
||||
"xrpl.message.compressed" = bool // Whether compressed
|
||||
```
|
||||
|
||||
#### Ledger & Job Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.ledger.hash" = string // Ledger hash
|
||||
"xrpl.ledger.index" = int64 // Ledger sequence/index
|
||||
"xrpl.ledger.close_time" = int64 // Close time (epoch)
|
||||
"xrpl.ledger.tx_count" = int64 // Transaction count
|
||||
"xrpl.job.type" = string // Job type name
|
||||
"xrpl.job.queue_ms" = float64 // Time spent in queue
|
||||
"xrpl.job.worker" = int64 // Worker thread ID
|
||||
```
|
||||
|
||||
#### PathFinding Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.pathfind.source_currency" = string // Source currency code
|
||||
"xrpl.pathfind.dest_currency" = string // Destination currency code
|
||||
"xrpl.pathfind.path_count" = int64 // Number of paths found
|
||||
"xrpl.pathfind.cache_hit" = bool // RippleLineCache hit
|
||||
```
|
||||
|
||||
#### TxQ Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.txq.queue_depth" = int64 // Current queue depth
|
||||
"xrpl.txq.fee_level" = int64 // Fee level of transaction
|
||||
"xrpl.txq.eviction_reason" = string // Why transaction was evicted
|
||||
```
|
||||
|
||||
#### Fee Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.fee.load_factor" = int64 // Current load factor
|
||||
"xrpl.fee.escalation_level" = int64 // Fee escalation multiplier
|
||||
```
|
||||
|
||||
#### Validator Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.validator.list_size" = int64 // UNL size
|
||||
"xrpl.validator.list_age_sec" = int64 // Seconds since last update
|
||||
```
|
||||
|
||||
#### Amendment Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.amendment.name" = string // Amendment name
|
||||
"xrpl.amendment.status" = string // "enabled", "vetoed", "supported"
|
||||
```
|
||||
|
||||
#### SHAMap Attributes
|
||||
|
||||
```cpp
|
||||
"xrpl.shamap.type" = string // "transaction", "state", "account_state"
|
||||
"xrpl.shamap.missing_nodes" = int64 // Number of missing nodes during sync
|
||||
"xrpl.shamap.duration_ms" = float64 // Sync duration
|
||||
```
|
||||
|
||||
### 2.4.3 Data Collection Summary
|
||||
|
||||
The following table summarizes what data is collected by category:
|
||||
|
||||
| Category | Attributes Collected | Purpose |
|
||||
| --------------- | ---------------------------------------------------------------------- | ---------------------------- |
|
||||
| **Transaction** | `tx.hash`, `tx.type`, `tx.result`, `tx.fee`, `ledger_index` | Trace transaction lifecycle |
|
||||
| **Consensus** | `round`, `phase`, `mode`, `proposers` (public keys), `duration_ms` | Analyze consensus timing |
|
||||
| **RPC** | `command`, `version`, `status`, `duration_ms` | Monitor RPC performance |
|
||||
| **Peer** | `peer.id` (public key), `latency_ms`, `message.type`, `message.size` | Network topology analysis |
|
||||
| **Ledger** | `ledger.hash`, `ledger.index`, `close_time`, `tx_count` | Ledger progression tracking |
|
||||
| **Job** | `job.type`, `queue_ms`, `worker` | JobQueue performance |
|
||||
| **PathFinding** | `pathfind.source_currency`, `dest_currency`, `path_count`, `cache_hit` | Payment path analysis |
|
||||
| **TxQ** | `txq.queue_depth`, `fee_level`, `eviction_reason` | Queue depth and fee tracking |
|
||||
| **Fee** | `fee.load_factor`, `escalation_level` | Fee escalation monitoring |
|
||||
| **Validator** | `validator.list_size`, `list_age_sec` | UNL health monitoring |
|
||||
| **Amendment** | `amendment.name`, `status` | Protocol upgrade tracking |
|
||||
| **SHAMap** | `shamap.type`, `missing_nodes`, `duration_ms` | State tree sync performance |
|
||||
|
||||
### 2.4.4 Privacy & Sensitive Data Policy
|
||||
|
||||
> **PII** = Personally Identifiable Information
|
||||
|
||||
OpenTelemetry instrumentation is designed to collect **operational metadata only**, never sensitive content.
|
||||
|
||||
#### Data NOT Collected
|
||||
|
||||
The following data is explicitly **excluded** from telemetry collection:
|
||||
|
||||
| Excluded Data | Reason |
|
||||
| ----------------------- | ----------------------------------------- |
|
||||
| **Private Keys** | Never exposed; not relevant to tracing |
|
||||
| **Account Balances** | Financial data; privacy sensitive |
|
||||
| **Transaction Amounts** | Financial data; privacy sensitive |
|
||||
| **Raw TX Payloads** | May contain sensitive memo/data fields |
|
||||
| **Personal Data** | No PII collected |
|
||||
| **IP Addresses** | Configurable; excluded by default in prod |
|
||||
|
||||
#### Privacy Protection Mechanisms
|
||||
|
||||
| Mechanism | Description |
|
||||
| ----------------------------- | ------------------------------------------------------------------------- |
|
||||
| **Account Hashing** | `xrpl.tx.account` is hashed at collector level before storage |
|
||||
| **Configurable Redaction** | Sensitive fields can be excluded via `[telemetry]` config section |
|
||||
| **Sampling** | Only 10% of traces recorded by default, reducing data exposure |
|
||||
| **Local Control** | Node operators have full control over what gets exported |
|
||||
| **No Raw Payloads** | Transaction content is never recorded, only metadata (hash, type, result) |
|
||||
| **Collector-Level Filtering** | Additional redaction/hashing can be configured at OTel Collector |
|
||||
|
||||
#### Collector-Level Data Protection
|
||||
|
||||
The OpenTelemetry Collector can be configured to hash or redact sensitive attributes before export:
|
||||
|
||||
```yaml
|
||||
processors:
|
||||
attributes:
|
||||
actions:
|
||||
# Hash account addresses before storage
|
||||
- key: xrpl.tx.account
|
||||
action: hash
|
||||
# Remove IP addresses entirely
|
||||
- key: xrpl.peer.address
|
||||
action: delete
|
||||
# Redact specific fields
|
||||
- key: xrpl.rpc.params
|
||||
action: delete
|
||||
```
|
||||
|
||||
#### Configuration Options for Privacy
|
||||
|
||||
In `xrpld.cfg`, operators can control data collection granularity:
|
||||
|
||||
```ini
|
||||
[telemetry]
|
||||
enabled=1
|
||||
|
||||
# Disable collection of specific components
|
||||
trace_transactions=1
|
||||
trace_consensus=1
|
||||
trace_rpc=1
|
||||
trace_peer=0 # Disable peer tracing (high volume, includes addresses)
|
||||
|
||||
# Redact specific attributes
|
||||
redact_account=1 # Hash account addresses before export
|
||||
redact_peer_address=1 # Remove peer IP addresses
|
||||
```
|
||||
|
||||
> **Note**: The `redact_account` configuration in `xrpld.cfg` controls SDK-level redaction before export, while collector-level filtering (see [Collector-Level Data Protection](#collector-level-data-protection) above) provides an additional defense-in-depth layer. Both can operate independently.
|
||||
|
||||
> **Key Principle**: Telemetry collects **operational metadata** (timing, counts, hashes) — never **sensitive content** (keys, balances, amounts, raw payloads).
|
||||
|
||||
---
|
||||
|
||||
## 2.5 Context Propagation Design
|
||||
|
||||
> **WS** = WebSocket
|
||||
|
||||
### 2.5.0 Deterministic Trace ID Strategy
|
||||
|
||||
Both transaction and consensus tracing use **deterministic trace IDs** derived from
|
||||
a globally known hash, so all nodes handling the same workflow independently produce
|
||||
spans under the same `trace_id`. This is combined with protobuf `span_id` propagation
|
||||
for parent-child relay ordering when available.
|
||||
|
||||
#### Transactions — `trace_id = txHash[0:16]`
|
||||
|
||||
Every node that handles a transaction knows its `txID` (the `uint256` transaction
|
||||
hash). The first 16 bytes of this hash are used as the OTel `trace_id`:
|
||||
|
||||
```
|
||||
uint256 txHash: A1B2C3D4 E5F6A7B8 C9D0E1F2 A3B4C5D6 E7F8A9B0 C1D2E3F4 A5B6C7D8 E9F0A1B2
|
||||
|---------- trace_id (16 bytes) ---------| (remaining 16 bytes unused)
|
||||
```
|
||||
|
||||
Each node generates a **random 8-byte `span_id`** so its span is unique within the
|
||||
shared trace. When protobuf `TraceContext` is present in the incoming `TMTransaction`,
|
||||
the sender's `span_id` is extracted and used as the parent — preserving the relay
|
||||
chain as a parent-child tree. When absent (older peers, first hop from client), the
|
||||
span appears as a root in the same trace — correlation is preserved, only the tree
|
||||
structure degrades.
|
||||
|
||||
```
|
||||
Node A (submitter) Node B (relay) Node C (relay)
|
||||
trace_id: A1B2... trace_id: A1B2... trace_id: A1B2...
|
||||
span_id: 1234 (random) span_id: 5678 (random) span_id: 9ABC (random)
|
||||
parent: (none) parent: 1234 (proto) parent: 5678 (proto)
|
||||
↑ ↑
|
||||
protobuf propagation protobuf propagation
|
||||
```
|
||||
|
||||
If protobuf propagation fails at Node B (old peer):
|
||||
|
||||
```
|
||||
Node A Node B (old peer) Node C
|
||||
trace_id: A1B2... trace_id: A1B2... trace_id: A1B2...
|
||||
span_id: 1234 span_id: 5678 span_id: 9ABC
|
||||
parent: (none) parent: (none) parent: 5678 (proto)
|
||||
↑ no parent, but same trace_id — still grouped
|
||||
```
|
||||
|
||||
#### Consensus — `trace_id = prevLedgerHash[0:16]`
|
||||
|
||||
All validators in the same consensus round share the same `previousLedger.id()`.
|
||||
The first 16 bytes are used as trace_id. See [Phase 4a implementation status](./06-implementation-phases.md)
|
||||
and `createDeterministicContext()` in `RCLConsensus.cpp` for the implementation.
|
||||
|
||||
Switchable via `consensus_trace_strategy` config:
|
||||
`"deterministic"` (default) or `"attribute"` (random trace_id, correlation via attribute queries).
|
||||
|
||||
#### Why Not Random IDs with Propagation Only?
|
||||
|
||||
Random trace IDs require **unbroken context propagation** across every hop. In a
|
||||
mixed-version network (common during upgrades), older peers silently drop the
|
||||
`trace_context` protobuf field. The trace splits and downstream spans become
|
||||
impossible to find. Deterministic IDs make correlation **propagation-resilient** — the trace
|
||||
backend groups all spans for the same transaction/round regardless of whether
|
||||
propagation succeeded.
|
||||
|
||||
#### Why Keep Protobuf Propagation?
|
||||
|
||||
Deterministic trace IDs alone provide correlation (all spans grouped) but not
|
||||
**causality** (which node relayed to which). Protobuf `span_id` propagation adds
|
||||
parent-child ordering that shows the exact relay path. The two mechanisms complement
|
||||
each other:
|
||||
|
||||
| Mechanism | Provides | Fails when |
|
||||
| ---------------------------- | --------------------------- | -------------------------------------- |
|
||||
| Deterministic trace_id | Cross-node correlation | Never (hash is always known) |
|
||||
| Protobuf span_id propagation | Parent-child relay ordering | Older peer drops `trace_context` field |
|
||||
|
||||
#### Implementation Reference
|
||||
|
||||
The utility function `createDeterministicTxContext(uint256 const& txHash)` follows
|
||||
the same pattern as `createDeterministicContext(uint256 const& ledgerId)` in
|
||||
`RCLConsensus.cpp`. See [Phase 3 Task 3.9](./Phase3_taskList.md) for the full spec.
|
||||
|
||||
### 2.5.1 Propagation Boundaries
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph http["HTTP/WebSocket (RPC)"]
|
||||
w3c["W3C Trace Context Headers:<br/>traceparent:<br/>00-trace_id-span_id-flags<br/>tracestate: xrpld=..."]
|
||||
end
|
||||
|
||||
subgraph protobuf["Protocol Buffers (P2P)"]
|
||||
proto["message TraceContext {<br/> bytes trace_id = 1; // 16 bytes<br/> bytes span_id = 2; // 8 bytes<br/> uint32 trace_flags = 3;<br/> string trace_state = 4;<br/>}"]
|
||||
end
|
||||
|
||||
subgraph jobqueue["JobQueue (Internal Async)"]
|
||||
job["Context captured at job creation,<br/>restored at execution<br/><br/>class Job {<br/> otel::context::Context<br/> traceContext_;<br/>};"]
|
||||
end
|
||||
|
||||
style http fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style protobuf fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style jobqueue fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **HTTP/WebSocket - RPC (blue)**: For client-facing RPC requests, trace context is propagated using the W3C `traceparent` header. This is the standard approach and works with any OTel-compatible client.
|
||||
- **Protocol Buffers - P2P (green)**: For peer-to-peer messages between xrpld nodes, trace context is embedded as a protobuf `TraceContext` message carrying trace_id, span_id, flags, and optional trace_state.
|
||||
- **JobQueue - Internal Async (red)**: For asynchronous work within a single node, the OTel context is captured when a job is created and restored when the job executes on a worker thread. This bridges the async gap so spans remain linked.
|
||||
|
||||
---
|
||||
|
||||
## 2.6 Integration with Existing Observability
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **WS** = WebSocket
|
||||
|
||||
### 2.6.1 Existing Frameworks Comparison
|
||||
|
||||
xrpld already has two observability mechanisms. OpenTelemetry complements (not replaces) them:
|
||||
|
||||
| Aspect | PerfLog | Beast Insight (StatsD) | OpenTelemetry |
|
||||
| --------------------- | ----------------------------- | ---------------------------- | ------------------------- |
|
||||
| **Type** | Logging | Metrics | Distributed Tracing |
|
||||
| **Data** | JSON log entries | Counters, gauges, histograms | Spans with context |
|
||||
| **Scope** | Single node | Single node | **Cross-node** |
|
||||
| **Output** | `perf.log` file | StatsD server | OTLP Collector |
|
||||
| **Question answered** | "What happened on this node?" | "How many? How fast?" | "What was the journey?" |
|
||||
| **Correlation** | By timestamp | By metric name | By `trace_id` |
|
||||
| **Overhead** | Low (file I/O) | Low (UDP packets) | Low-Medium (configurable) |
|
||||
|
||||
### 2.6.2 What Each Framework Does Best
|
||||
|
||||
#### PerfLog
|
||||
|
||||
- **Purpose**: Detailed local event logging for RPC and job execution
|
||||
- **Strengths**:
|
||||
- Rich JSON output with timing data
|
||||
- Already integrated in RPC handlers
|
||||
- File-based, no external dependencies
|
||||
- **Limitations**:
|
||||
- Single-node only (no cross-node correlation)
|
||||
- No parent-child relationships between events
|
||||
- Manual log parsing required
|
||||
|
||||
```json
|
||||
// Example PerfLog entry
|
||||
{
|
||||
"time": "2024-01-15T10:30:00.123Z",
|
||||
"method": "submit",
|
||||
"duration_us": 1523,
|
||||
"result": "tesSUCCESS"
|
||||
}
|
||||
```
|
||||
|
||||
#### Beast Insight (StatsD)
|
||||
|
||||
- **Purpose**: Real-time metrics for monitoring dashboards
|
||||
- **Strengths**:
|
||||
- Aggregated metrics (counters, gauges, histograms)
|
||||
- Low overhead (UDP, fire-and-forget)
|
||||
- Good for alerting thresholds
|
||||
- **Limitations**:
|
||||
- No request-level detail
|
||||
- No causal relationships
|
||||
- Single-node perspective
|
||||
|
||||
```cpp
|
||||
// Example StatsD usage in xrpld
|
||||
insight.increment("rpc.submit.count");
|
||||
insight.gauge("ledger.age", age);
|
||||
insight.timing("consensus.round", duration);
|
||||
```
|
||||
|
||||
#### OpenTelemetry (NEW)
|
||||
|
||||
- **Purpose**: Distributed request tracing across nodes
|
||||
- **Strengths**:
|
||||
- **Cross-node correlation** via `trace_id`
|
||||
- Parent-child span relationships
|
||||
- Rich attributes per span
|
||||
- Industry standard (CNCF)
|
||||
- **Limitations**:
|
||||
- Requires collector infrastructure
|
||||
- Higher complexity than logging
|
||||
|
||||
```cpp
|
||||
// Example OpenTelemetry span
|
||||
auto span = telemetry.startSpan("tx.relay");
|
||||
span->SetAttribute("tx.hash", hash);
|
||||
span->SetAttribute("peer.id", peerId);
|
||||
// Span automatically linked to parent via context
|
||||
```
|
||||
|
||||
### 2.6.3 When to Use Each
|
||||
|
||||
| Scenario | PerfLog | StatsD | OpenTelemetry |
|
||||
| --------------------------------------- | ---------- | ------ | ------------- |
|
||||
| "How many TXs per second?" | ❌ | ✅ | ✅ |
|
||||
| "What's the p99 RPC latency?" | ❌ | ✅ | ✅ |
|
||||
| "Why was this specific TX slow?" | ⚠️ partial | ❌ | ✅ |
|
||||
| "Which node delayed consensus?" | ❌ | ❌ | ✅ |
|
||||
| "What happened on node X at time T?" | ✅ | ❌ | ✅ |
|
||||
| "Show me the TX journey across 5 nodes" | ❌ | ❌ | ✅ |
|
||||
|
||||
### 2.6.4 Coexistence Strategy
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph xrpld["xrpld Process"]
|
||||
perflog["PerfLog<br/>(JSON to file)"]
|
||||
insight["Beast Insight<br/>(StatsD)"]
|
||||
otel["OpenTelemetry<br/>(Tracing)"]
|
||||
end
|
||||
|
||||
perflog --> perffile["perf.log"]
|
||||
insight --> statsd["StatsD Server"]
|
||||
otel --> collector["OTLP Collector"]
|
||||
|
||||
perffile --> grafana["Grafana<br/>(Unified UI)"]
|
||||
statsd --> grafana
|
||||
collector --> grafana
|
||||
|
||||
style xrpld fill:#212121,stroke:#0a0a0a,color:#ffffff
|
||||
style grafana fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **xrpld Process (dark gray)**: The single xrpld node running all three observability frameworks side by side. Each framework operates independently with no interference.
|
||||
- **PerfLog to perf.log**: PerfLog writes JSON-formatted event logs to a local file. Grafana can ingest these via Loki or a file-based datasource.
|
||||
- **Beast Insight to StatsD Server**: Insight sends aggregated metrics (counters, gauges) over UDP to a StatsD server. Grafana reads from StatsD-compatible backends like Graphite or Prometheus (via StatsD exporter).
|
||||
- **OpenTelemetry to OTLP Collector**: OTel exports spans over OTLP/gRPC to a Collector, which then forwards to a trace backend (Tempo).
|
||||
- **Grafana (red, unified UI)**: All three data streams converge in Grafana, enabling operators to correlate logs, metrics, and traces in a single dashboard.
|
||||
|
||||
### 2.6.5 Correlation with PerfLog
|
||||
|
||||
Trace IDs can be correlated with existing PerfLog entries for comprehensive debugging:
|
||||
|
||||
```cpp
|
||||
// In RPCHandler.cpp - correlate trace with PerfLog
|
||||
Status doCommand(RPC::JsonContext& context, Json::Value& result)
|
||||
{
|
||||
// Start OpenTelemetry span
|
||||
auto span = context.app.getTelemetry().startSpan(
|
||||
"rpc.command." + context.method);
|
||||
|
||||
// Get trace ID for correlation
|
||||
auto traceId = span->GetContext().trace_id().IsValid()
|
||||
? toHex(span->GetContext().trace_id())
|
||||
: "";
|
||||
|
||||
// Use existing PerfLog with trace correlation
|
||||
auto const curId = context.app.getPerfLog().currentId();
|
||||
context.app.getPerfLog().rpcStart(context.method, curId);
|
||||
|
||||
// Future: Add trace ID to PerfLog entry
|
||||
// context.app.getPerfLog().setTraceId(curId, traceId);
|
||||
|
||||
try {
|
||||
auto ret = handler(context, result);
|
||||
context.app.getPerfLog().rpcFinish(context.method, curId);
|
||||
span->SetStatus(opentelemetry::trace::StatusCode::kOk);
|
||||
return ret;
|
||||
} catch (std::exception const& e) {
|
||||
context.app.getPerfLog().rpcError(context.method, curId);
|
||||
span->RecordException(e);
|
||||
span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
_Previous: [Architecture Analysis](./01-architecture-analysis.md)_ | _Next: [Implementation Strategy](./03-implementation-strategy.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
@@ -1,530 +0,0 @@
|
||||
# Implementation Strategy
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Code Samples](./04-code-samples.md) | [Configuration Reference](./05-configuration-reference.md)
|
||||
|
||||
---
|
||||
|
||||
## 3.1 Directory Structure
|
||||
|
||||
The telemetry implementation follows xrpld's existing code organization pattern:
|
||||
|
||||
```
|
||||
include/xrpl/
|
||||
├── telemetry/
|
||||
│ ├── Telemetry.h # Main telemetry interface (global singleton)
|
||||
│ ├── TelemetryConfig.h # Configuration structures
|
||||
│ ├── TraceContext.h # Context propagation utilities
|
||||
│ ├── SpanGuard.h # RAII span management with factory methods + discard()
|
||||
│ ├── DiscardFlag.h # Thread-local discard flag
|
||||
│ └── SpanAttributes.h # Attribute helper functions
|
||||
|
||||
src/libxrpl/
|
||||
├── telemetry/
|
||||
│ ├── Telemetry.cpp # Implementation + FilteringSpanProcessor
|
||||
│ ├── TelemetryConfig.cpp # Config parsing
|
||||
│ ├── TraceContext.cpp # Context serialization
|
||||
│ └── NullTelemetry.cpp # No-op implementation
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3.2 Implementation Approach
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
%%{init: {'flowchart': {'nodeSpacing': 20, 'rankSpacing': 30}}}%%
|
||||
flowchart TB
|
||||
subgraph phase1["Phase 1: Core"]
|
||||
direction LR
|
||||
sdk["SDK Integration"] ~~~ interface["Telemetry Interface"] ~~~ config["Configuration"]
|
||||
end
|
||||
|
||||
subgraph phase2["Phase 2: RPC"]
|
||||
direction LR
|
||||
http["HTTP Context"] ~~~ rpc["RPC Handlers"]
|
||||
end
|
||||
|
||||
subgraph phase3["Phase 3: P2P"]
|
||||
direction LR
|
||||
proto["Protobuf Context"] ~~~ tx["Transaction Relay"]
|
||||
end
|
||||
|
||||
subgraph phase4["Phase 4: Consensus"]
|
||||
direction LR
|
||||
consensus["Consensus Rounds"] ~~~ proposals["Proposals"]
|
||||
end
|
||||
|
||||
phase1 --> phase2 --> phase3 --> phase4
|
||||
|
||||
style phase1 fill:#1565c0,stroke:#0d47a1,color:#ffffff
|
||||
style phase2 fill:#2e7d32,stroke:#1b5e20,color:#ffffff
|
||||
style phase3 fill:#e65100,stroke:#bf360c,color:#ffffff
|
||||
style phase4 fill:#c2185b,stroke:#880e4f,color:#ffffff
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
### Key Principles
|
||||
|
||||
1. **Minimal Intrusion**: Instrumentation should not alter existing control flow
|
||||
2. **Zero-Cost When Disabled**: Use compile-time flags and no-op implementations
|
||||
3. **Backward Compatibility**: Protocol Buffer extensions use high field numbers
|
||||
4. **Graceful Degradation**: Tracing failures must not affect node operation
|
||||
|
||||
---
|
||||
|
||||
## 3.3 Performance Overhead Summary
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
| Metric | Overhead | Notes |
|
||||
| ------------- | ---------- | ------------------------------------------------ |
|
||||
| CPU | 1-3% | Of per-transaction CPU cost (~200μs baseline) |
|
||||
| Memory | ~10 MB | SDK statics + batch buffer + worker thread stack |
|
||||
| Network | 10-50 KB/s | Compressed OTLP export to collector |
|
||||
| Latency (p99) | <2% | With proper sampling configuration |
|
||||
|
||||
---
|
||||
|
||||
## 3.4 Detailed CPU Overhead Analysis
|
||||
|
||||
### 3.4.1 Per-Operation Costs
|
||||
|
||||
> **Note on hardware assumptions**: The costs below are based on the official OTel C++ SDK CI benchmarks
|
||||
> (969 runs on GitHub Actions 2-core shared runners). On production server hardware (3+ GHz Xeon),
|
||||
> expect costs at the **lower end** of each range (~30-50% improvement over CI hardware).
|
||||
|
||||
| Operation | Time (ns) | Frequency | Impact |
|
||||
| --------------------- | --------- | ---------------------- | ---------- |
|
||||
| Span creation | 500-1000 | Every traced operation | Low |
|
||||
| Span end | 100-200 | Every traced operation | Low |
|
||||
| SetAttribute (string) | 80-120 | 3-5 per span | Low |
|
||||
| SetAttribute (int) | 40-60 | 2-3 per span | Negligible |
|
||||
| AddEvent | 100-200 | 0-2 per span | Low |
|
||||
| Context injection | 150-250 | Per outgoing message | Low |
|
||||
| Context extraction | 100-180 | Per incoming message | Low |
|
||||
| GetCurrent context | 10-20 | Thread-local access | Negligible |
|
||||
|
||||
**Source**: Span creation based on OTel C++ SDK `BM_SpanCreation` benchmark (AlwaysOnSampler +
|
||||
SimpleSpanProcessor + InMemoryExporter), median ~1,000 ns on CI hardware. AddEvent includes
|
||||
timestamp read + string copy + vector push + mutex acquisition. Context injection/extraction
|
||||
confirmed by `BM_SpanCreationWithScope` benchmark delta (~160 ns).
|
||||
|
||||
### 3.4.2 Transaction Processing Overhead
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
%%{init: {'pie': {'textPosition': 0.75}}}%%
|
||||
pie showData
|
||||
"tx.receive (1400ns)" : 1400
|
||||
"tx.validate (1200ns)" : 1200
|
||||
"tx.relay (1200ns)" : 1200
|
||||
"Context inject (200ns)" : 200
|
||||
```
|
||||
|
||||
**Transaction Tracing Overhead (~4.0μs total)**
|
||||
|
||||
</div>
|
||||
|
||||
**Overhead percentage**: 4.0 μs / 200 μs (avg tx processing) = **~2.0%**
|
||||
|
||||
> **Breakdown**: Each span (tx.receive, tx.validate, tx.relay) costs ~1,000 ns for creation plus
|
||||
> ~200-400 ns for 3-5 attribute sets. Context injection is ~200 ns (confirmed by benchmarks).
|
||||
> On production hardware, expect ~2.6 μs total (~1.3% overhead) due to faster span creation (~500-600 ns).
|
||||
|
||||
### 3.4.3 Consensus Round Overhead
|
||||
|
||||
| Operation | Count | Cost (ns) | Total |
|
||||
| ---------------------- | ----- | --------- | ---------- |
|
||||
| consensus.round span | 1 | ~1200 | ~1.2 μs |
|
||||
| consensus.phase spans | 3 | ~1100 | ~3.3 μs |
|
||||
| proposal.receive spans | ~20 | ~1100 | ~22 μs |
|
||||
| proposal.send spans | ~3 | ~1100 | ~3.3 μs |
|
||||
| Context operations | ~30 | ~200 | ~6 μs |
|
||||
| **TOTAL** | | | **~36 μs** |
|
||||
|
||||
> **Why higher**: Each span costs ~1,000 ns creation + ~100-200 ns for 1-2 attributes, totaling ~1,100-1,200 ns.
|
||||
> Context operations remain ~200 ns (confirmed by benchmarks). On production hardware, expect ~24 μs total.
|
||||
|
||||
**Overhead percentage**: 36 μs / 3s (typical round) = **~0.001%** (negligible)
|
||||
|
||||
### 3.4.4 RPC Request Overhead
|
||||
|
||||
| Operation | Cost (ns) |
|
||||
| ---------------- | ------------ |
|
||||
| rpc.request span | ~1200 |
|
||||
| rpc.command span | ~1100 |
|
||||
| Context extract | ~250 |
|
||||
| Context inject | ~200 |
|
||||
| **TOTAL** | **~2.75 μs** |
|
||||
|
||||
> **Why higher**: Each span costs ~1,000 ns creation + ~100-200 ns for attributes (command name,
|
||||
> version, role). Context extract/inject costs are confirmed by OTel C++ benchmarks.
|
||||
|
||||
- Fast RPC (1ms): 2.75 μs / 1ms = **~0.275%**
|
||||
- Slow RPC (100ms): 2.75 μs / 100ms = **~0.003%**
|
||||
|
||||
---
|
||||
|
||||
## 3.5 Memory Overhead Analysis
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
### 3.5.1 Static Memory
|
||||
|
||||
| Component | Size | Allocated |
|
||||
| ------------------------------------ | ----------- | ---------- |
|
||||
| TracerProvider singleton | ~64 KB | At startup |
|
||||
| BatchSpanProcessor (circular buffer) | ~16 KB | At startup |
|
||||
| BatchSpanProcessor (worker thread) | ~8 MB | At startup |
|
||||
| OTLP exporter (gRPC channel init) | ~256 KB | At startup |
|
||||
| Propagator registry | ~8 KB | At startup |
|
||||
| **Total static** | **~8.3 MB** | |
|
||||
|
||||
> **Why higher than earlier estimate**: The BatchSpanProcessor's circular buffer itself is only ~16 KB
|
||||
> (2049 x 8-byte `AtomicUniquePtr` entries), but it spawns a dedicated worker thread whose default
|
||||
> stack size on Linux is ~8 MB. The OTLP gRPC exporter allocates memory for channel stubs and TLS
|
||||
> initialization. The worker thread stack dominates the static footprint.
|
||||
|
||||
### 3.5.2 Dynamic Memory
|
||||
|
||||
| Component | Size per unit | Max units | Peak |
|
||||
| -------------------- | -------------- | ---------- | --------------- |
|
||||
| Active span | ~500-800 bytes | 1000 | ~500-800 KB |
|
||||
| Queued span (export) | ~500 bytes | 2048 | ~1 MB |
|
||||
| Attribute storage | ~80 bytes | 5 per span | Included |
|
||||
| Context storage | ~64 bytes | Per thread | ~6.4 KB |
|
||||
| **Total dynamic** | | | **~1.5-1.8 MB** |
|
||||
|
||||
> **Why active spans are larger**: An active `Span` object includes the wrapper (~88 bytes: shared_ptr,
|
||||
> mutex, unique_ptr to Recordable) plus `SpanData` (~250 bytes: SpanContext, timestamps, name, status,
|
||||
> empty containers) plus attribute storage (~200-500 bytes for 3-5 string attributes in a `std::map`).
|
||||
> Source: `sdk/src/trace/span.h` and `sdk/include/opentelemetry/sdk/trace/span_data.h`.
|
||||
> Queued spans release the wrapper, keeping only `SpanData` + attributes (~500 bytes).
|
||||
|
||||
### 3.5.3 Memory Growth Characteristics
|
||||
|
||||
```mermaid
|
||||
---
|
||||
config:
|
||||
xyChart:
|
||||
width: 700
|
||||
height: 400
|
||||
---
|
||||
xychart-beta
|
||||
title "Memory Usage vs Span Rate (bounded by queue limit)"
|
||||
x-axis "Spans/second" [0, 200, 400, 600, 800, 1000]
|
||||
y-axis "Memory (MB)" 0 --> 12
|
||||
line [8.5, 9.2, 9.6, 9.9, 10.0, 10.0]
|
||||
```
|
||||
|
||||
**Notes**:
|
||||
|
||||
- Memory increases with span rate but **plateaus at queue capacity** (default 2048 spans)
|
||||
- Batch export prevents unbounded growth
|
||||
- At queue limit, oldest spans are dropped (not blocked)
|
||||
- Maximum memory is bounded: ~8.3 MB static (dominated by worker thread stack) + 2048 queued spans x ~500 bytes (~1 MB) + active spans (~0.8 MB) ≈ **~10 MB ceiling**
|
||||
- The worker thread stack (~8 MB) is virtual memory; actual RSS depends on stack usage (typically much less)
|
||||
|
||||
### 3.5.4 Performance Data Sources
|
||||
|
||||
The overhead estimates in Sections 3.3-3.5 are derived from the following sources:
|
||||
|
||||
| Source | What it covers | URL |
|
||||
| ------------------------------------------------ | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| OTel C++ SDK CI benchmarks (969 runs) | Span creation, context activation, sampler overhead | [Benchmark Dashboard](https://open-telemetry.github.io/opentelemetry-cpp/benchmarks/) |
|
||||
| `api/test/trace/span_benchmark.cc` | API-level span creation (~22 ns no-op) | [Source](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/api/test/trace/span_benchmark.cc) |
|
||||
| `sdk/test/trace/sampler_benchmark.cc` | SDK span creation with samplers (~1,000 ns AlwaysOn) | [Source](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/sdk/test/trace/sampler_benchmark.cc) |
|
||||
| `sdk/include/.../span_data.h` | SpanData memory layout (~250 bytes base) | [Source](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/sdk/include/opentelemetry/sdk/trace/span_data.h) |
|
||||
| `sdk/src/trace/span.h` | Span wrapper memory layout (~88 bytes) | [Source](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/sdk/src/trace/span.h) |
|
||||
| `sdk/include/.../batch_span_processor_options.h` | Default queue size (2048), batch size (512) | [Source](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/sdk/include/opentelemetry/sdk/trace/batch_span_processor_options.h) |
|
||||
| `sdk/include/.../circular_buffer.h` | CircularBuffer implementation (AtomicUniquePtr array) | [Source](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/sdk/include/opentelemetry/sdk/common/circular_buffer.h) |
|
||||
| OTLP proto definition | Serialized span size estimation | [Proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto) |
|
||||
|
||||
---
|
||||
|
||||
## 3.6 Network Overhead Analysis
|
||||
|
||||
### 3.6.1 Export Bandwidth
|
||||
|
||||
> **Bytes per span**: Estimates use ~500 bytes/span (conservative upper bound). OTLP protobuf analysis
|
||||
> shows a typical span with 3-5 string attributes serializes to ~200-300 bytes raw; with gzip
|
||||
> compression (~60-70% of raw) and batching (amortized headers), ~350 bytes/span is more realistic.
|
||||
> The table uses the conservative estimate for capacity planning.
|
||||
|
||||
| Sampling Rate | Spans/sec | Bandwidth | Notes |
|
||||
| ------------- | --------- | --------- | ---------------- |
|
||||
| 100% | ~500 | ~250 KB/s | Development only |
|
||||
| 10% | ~50 | ~25 KB/s | Staging |
|
||||
| 1% | ~5 | ~2.5 KB/s | Production |
|
||||
| Error-only | ~1 | ~0.5 KB/s | Minimal overhead |
|
||||
|
||||
### 3.6.2 Trace Context Propagation
|
||||
|
||||
| Message Type | Context Size | Messages/sec | Overhead |
|
||||
| ---------------------- | ------------ | ------------ | ----------- |
|
||||
| TMTransaction | 25 bytes | ~100 | ~2.5 KB/s |
|
||||
| TMProposeSet | 25 bytes | ~10 | ~250 B/s |
|
||||
| TMValidation | 25 bytes | ~50 | ~1.25 KB/s |
|
||||
| **Total P2P overhead** | | | **~4 KB/s** |
|
||||
|
||||
---
|
||||
|
||||
## 3.7 Optimization Strategies
|
||||
|
||||
### 3.7.1 Sampling Strategies
|
||||
|
||||
#### Tail Sampling
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
trace["New Trace"]
|
||||
|
||||
trace --> errors{"Is Error?"}
|
||||
errors -->|Yes| sample["SAMPLE"]
|
||||
errors -->|No| consensus{"Is Consensus?"}
|
||||
|
||||
consensus -->|Yes| sample
|
||||
consensus -->|No| slow{"Is Slow?"}
|
||||
|
||||
slow -->|Yes| sample
|
||||
slow -->|No| prob{"Random < 10%?"}
|
||||
|
||||
prob -->|Yes| sample
|
||||
prob -->|No| drop["DROP"]
|
||||
|
||||
style sample fill:#4caf50,stroke:#388e3c,color:#fff
|
||||
style drop fill:#f44336,stroke:#c62828,color:#fff
|
||||
```
|
||||
|
||||
### 3.7.2 Batch Tuning Recommendations
|
||||
|
||||
| Environment | Batch Size | Batch Delay | Max Queue |
|
||||
| ------------------ | ---------- | ----------- | --------- |
|
||||
| Low-latency | 128 | 1000ms | 512 |
|
||||
| High-throughput | 1024 | 10000ms | 8192 |
|
||||
| Memory-constrained | 256 | 2000ms | 512 |
|
||||
|
||||
### 3.7.3 Conditional Instrumentation
|
||||
|
||||
SpanGuard's static factory methods handle both compile-time and runtime
|
||||
checks internally. When `XRPL_ENABLE_TELEMETRY` is not defined, the
|
||||
entire SpanGuard class compiles to a no-op stub with empty method bodies.
|
||||
When it is defined, the factory methods check the global Telemetry
|
||||
instance and the relevant component filter before creating a span:
|
||||
|
||||
```cpp
|
||||
// SpanGuard factory methods handle all conditional logic internally.
|
||||
// When XRPL_ENABLE_TELEMETRY is not defined, these are no-ops.
|
||||
// When defined, they check Telemetry::getInstance() and the
|
||||
// component filter (e.g. shouldTracePeer()) at runtime.
|
||||
auto span = telemetry::SpanGuard::peerSpan("peer.message.receive");
|
||||
span.setAttribute("xrpl.peer.id", peerId);
|
||||
// No overhead when telemetry is disabled at compile time or runtime
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3.8 Links to Detailed Documentation
|
||||
|
||||
- **[Code Samples](./04-code-samples.md)**: Complete implementation code for all components
|
||||
- **[Configuration Reference](./05-configuration-reference.md)**: Configuration options and collector setup
|
||||
- **[Implementation Phases](./06-implementation-phases.md)**: Detailed timeline and milestones
|
||||
|
||||
---
|
||||
|
||||
## 3.9 Code Intrusiveness Assessment
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
This section provides a detailed assessment of how intrusive the OpenTelemetry integration is to the existing xrpld codebase.
|
||||
|
||||
### 3.9.1 Files Modified Summary
|
||||
|
||||
| Component | Files Modified | Lines Added | Lines Changed | Architectural Impact |
|
||||
| --------------------- | -------------- | ----------- | ------------- | -------------------- |
|
||||
| **Core Telemetry** | 7 new files | ~800 | 0 | None (new module) |
|
||||
| **Application Init** | 2 files | ~30 | ~5 | Minimal |
|
||||
| **RPC Layer** | 3 files | ~80 | ~20 | Minimal |
|
||||
| **Transaction Relay** | 4 files | ~120 | ~40 | Low |
|
||||
| **Consensus** | 3 files | ~100 | ~30 | Low-Medium |
|
||||
| **Protocol Buffers** | 1 file | ~25 | 0 | Low |
|
||||
| **CMake/Build** | 3 files | ~50 | ~10 | Minimal |
|
||||
| **PathFinding** | 2 | ~80 | ~5 | Minimal |
|
||||
| **TxQ/Fee** | 2 | ~60 | ~5 | Minimal |
|
||||
| **Validator/Amend** | 3 | ~40 | ~5 | Minimal |
|
||||
| **Total** | **~27 files** | **~1,490** | **~120** | **Low** |
|
||||
|
||||
### 3.9.2 Detailed File Impact
|
||||
|
||||
```mermaid
|
||||
pie title Code Changes by Component
|
||||
"New Telemetry Module" : 800
|
||||
"Transaction Relay" : 160
|
||||
"Consensus" : 130
|
||||
"RPC Layer" : 100
|
||||
"PathFinding" : 80
|
||||
"TxQ/Fee" : 60
|
||||
"Validator/Amendment" : 40
|
||||
"Application Init" : 35
|
||||
"Protocol Buffers" : 25
|
||||
"Build System" : 60
|
||||
```
|
||||
|
||||
#### New Files (No Impact on Existing Code)
|
||||
|
||||
| File | Lines | Purpose |
|
||||
| ------------------------------------------- | ----- | ----------------------------------------------------- |
|
||||
| `include/xrpl/telemetry/Telemetry.h` | ~160 | Main interface (global singleton) |
|
||||
| `include/xrpl/telemetry/SpanGuard.h` | ~250 | RAII wrapper + factory methods + discard + no-op stub |
|
||||
| `include/xrpl/telemetry/DiscardFlag.h` | ~28 | Thread-local discard flag |
|
||||
| `include/xrpl/telemetry/TraceContext.h` | ~80 | Context propagation |
|
||||
| `src/libxrpl/telemetry/Telemetry.cpp` | ~400 | Implementation + FilteringSpanProcessor |
|
||||
| `src/libxrpl/telemetry/TelemetryConfig.cpp` | ~60 | Config parsing |
|
||||
| `src/libxrpl/telemetry/NullTelemetry.cpp` | ~40 | No-op implementation |
|
||||
|
||||
#### Modified Files (Existing Xrpld Code)
|
||||
|
||||
| File | Lines Added | Lines Changed | Risk Level |
|
||||
| ------------------------------------------------- | ----------- | ------------- | ---------- |
|
||||
| `src/xrpld/app/main/Application.cpp` | ~15 | ~3 | Low |
|
||||
| `include/xrpl/core/ServiceRegistry.h` | ~5 | ~2 | Low |
|
||||
| `src/xrpld/rpc/detail/ServerHandler.cpp` | ~40 | ~10 | Low |
|
||||
| `src/xrpld/rpc/handlers/*.cpp` | ~30 | ~8 | Low |
|
||||
| `src/xrpld/overlay/detail/PeerImp.cpp` | ~60 | ~15 | Medium |
|
||||
| `src/xrpld/overlay/detail/OverlayImpl.cpp` | ~30 | ~10 | Medium |
|
||||
| `src/xrpld/app/consensus/RCLConsensus.cpp` | ~50 | ~15 | Medium |
|
||||
| `src/xrpld/app/consensus/RCLConsensusAdaptor.cpp` | ~40 | ~12 | Medium |
|
||||
| `src/xrpld/core/JobQueue.cpp` | ~20 | ~5 | Low |
|
||||
| `src/xrpld/app/paths/PathRequest.cpp` | ~40 | ~3 | Low |
|
||||
| `src/xrpld/app/paths/Pathfinder.cpp` | ~40 | ~2 | Low |
|
||||
| `src/xrpld/app/misc/TxQ.cpp` | ~40 | ~3 | Low |
|
||||
| `src/xrpld/app/main/LoadManager.cpp` | ~20 | ~2 | Low |
|
||||
| `src/xrpld/app/misc/ValidatorList.cpp` | ~20 | ~2 | Low |
|
||||
| `src/xrpld/app/misc/AmendmentTable.cpp` | ~10 | ~2 | Low |
|
||||
| `src/xrpld/app/misc/Manifest.cpp` | ~10 | ~1 | Low |
|
||||
| `src/xrpld/shamap/SHAMap.cpp` | ~20 | ~3 | Low |
|
||||
| `src/xrpld/overlay/detail/ripple.proto` | ~25 | 0 | Low |
|
||||
| `CMakeLists.txt` | ~40 | ~8 | Low |
|
||||
| `cmake/FindOpenTelemetry.cmake` | ~50 | 0 | None (new) |
|
||||
|
||||
### 3.9.3 Risk Assessment by Component
|
||||
|
||||
<div align="center">
|
||||
|
||||
**Do First** ↖ ↗ **Plan Carefully**
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Code Intrusiveness Risk Matrix
|
||||
x-axis Low Risk --> High Risk
|
||||
y-axis Low Value --> High Value
|
||||
|
||||
RPC Tracing: [0.2, 0.55]
|
||||
Transaction Relay: [0.55, 0.85]
|
||||
Consensus Tracing: [0.75, 0.92]
|
||||
Peer Message Tracing: [0.85, 0.35]
|
||||
JobQueue Context: [0.3, 0.42]
|
||||
Ledger Acquisition: [0.48, 0.65]
|
||||
PathFinding: [0.38, 0.72]
|
||||
TxQ and Fees: [0.25, 0.62]
|
||||
Validator Mgmt: [0.15, 0.35]
|
||||
```
|
||||
|
||||
**Optional** ↙ ↘ **Avoid**
|
||||
|
||||
</div>
|
||||
|
||||
#### Risk Level Definitions
|
||||
|
||||
| Risk Level | Definition | Mitigation |
|
||||
| ---------- | ---------------------------------------------------------------- | ---------------------------------- |
|
||||
| **Low** | Additive changes only; no modification to existing logic | Standard code review |
|
||||
| **Medium** | Minor modifications to existing functions; clear boundaries | Comprehensive unit tests |
|
||||
| **High** | Changes to core logic or data structures; potential side effects | Integration tests + staged rollout |
|
||||
|
||||
### 3.9.4 Architectural Impact Assessment
|
||||
|
||||
| Aspect | Impact | Justification |
|
||||
| -------------------- | ------- | -------------------------------------------------------------------------------- |
|
||||
| **Data Flow** | Minimal | Read-only instrumentation; no modification to consensus or transaction data flow |
|
||||
| **Threading Model** | Minimal | Context propagation uses thread-local storage (standard OTel pattern) |
|
||||
| **Memory Model** | Low | Bounded queues prevent unbounded growth; RAII ensures cleanup |
|
||||
| **Network Protocol** | Low | Optional fields in protobuf (high field numbers); backward compatible |
|
||||
| **Configuration** | None | New config section; existing configs unaffected |
|
||||
| **Build System** | Low | Optional CMake flag; builds work without OpenTelemetry |
|
||||
| **Dependencies** | Low | OpenTelemetry SDK is optional; null implementation when disabled |
|
||||
|
||||
### 3.9.5 Backward Compatibility
|
||||
|
||||
| Compatibility | Status | Notes |
|
||||
| --------------- | ------- | ----------------------------------------------------- |
|
||||
| **Config File** | ✅ Full | New `[telemetry]` section is optional |
|
||||
| **Protocol** | ✅ Full | Optional protobuf fields with high field numbers |
|
||||
| **Build** | ✅ Full | `XRPL_ENABLE_TELEMETRY=OFF` produces identical binary |
|
||||
| **Runtime** | ✅ Full | `enabled=0` produces zero overhead |
|
||||
| **API** | ✅ Full | No changes to public RPC or P2P APIs |
|
||||
|
||||
### 3.9.6 Rollback Strategy
|
||||
|
||||
If issues are discovered after deployment:
|
||||
|
||||
1. **Immediate**: Set `enabled=0` in config and restart (zero code change)
|
||||
2. **Quick**: Rebuild with `XRPL_ENABLE_TELEMETRY=OFF`
|
||||
3. **Complete**: Revert telemetry commits (clean separation makes this easy)
|
||||
|
||||
### 3.9.7 Code Change Examples
|
||||
|
||||
**Minimal RPC Instrumentation (Low Intrusiveness):**
|
||||
|
||||
```cpp
|
||||
// Before
|
||||
void ServerHandler::onRequest(...) {
|
||||
auto result = processRequest(req);
|
||||
send(result);
|
||||
}
|
||||
|
||||
// After (only ~4 lines added)
|
||||
void ServerHandler::onRequest(...) {
|
||||
auto span = telemetry::SpanGuard::rpcSpan("rpc.request"); // +1 line
|
||||
span.setAttribute("xrpl.rpc.command", command); // +1 line
|
||||
|
||||
auto result = processRequest(req);
|
||||
|
||||
span.setAttribute("xrpl.rpc.status", status); // +1 line
|
||||
send(result);
|
||||
}
|
||||
```
|
||||
|
||||
SpanGuard factory methods (`rpcSpan`, `txSpan`, `consensusSpan`, etc.)
|
||||
access the global `Telemetry` instance internally and check the relevant
|
||||
component filter (`shouldTraceRpc()`, etc.) before creating a span. The
|
||||
public SpanGuard header has zero `opentelemetry/` includes -- all OTel
|
||||
types are hidden behind the pimpl idiom.
|
||||
|
||||
**Consensus Instrumentation (Medium Intrusiveness):**
|
||||
|
||||
```cpp
|
||||
// Before
|
||||
void RCLConsensusAdaptor::startRound(...) {
|
||||
// ... existing logic
|
||||
}
|
||||
|
||||
// After (context storage required)
|
||||
void RCLConsensusAdaptor::startRound(...) {
|
||||
auto span = telemetry::SpanGuard::consensusSpan("consensus.round");
|
||||
span.setAttribute("xrpl.consensus.ledger.seq", seq);
|
||||
|
||||
// Store context for child spans in phase transitions
|
||||
currentRoundContext_ = span.context(); // New member variable
|
||||
|
||||
// ... existing logic unchanged
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
_Previous: [Design Decisions](./02-design-decisions.md)_ | _Next: [Code Samples](./04-code-samples.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,974 +0,0 @@
|
||||
# Configuration Reference
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Code Samples](./04-code-samples.md) | [Implementation Phases](./06-implementation-phases.md)
|
||||
|
||||
---
|
||||
|
||||
## 5.1 xrpld Configuration
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **TxQ** = Transaction Queue
|
||||
|
||||
### 5.1.1 Configuration File Section
|
||||
|
||||
Add to `cfg/xrpld-example.cfg`:
|
||||
|
||||
```ini
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# TELEMETRY (OpenTelemetry Distributed Tracing)
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
#
|
||||
# Enables distributed tracing for transaction flow, consensus, and RPC calls.
|
||||
# Traces are exported to an OpenTelemetry Collector using OTLP protocol.
|
||||
#
|
||||
# [telemetry]
|
||||
#
|
||||
# # Enable/disable telemetry (default: 0 = disabled)
|
||||
# enabled=1
|
||||
#
|
||||
# # Exporter type: "otlp_grpc" (default), "otlp_http", or "none"
|
||||
# exporter=otlp_grpc
|
||||
#
|
||||
# # OTLP endpoint (default: localhost:4317 for gRPC, localhost:4318 for HTTP)
|
||||
# endpoint=localhost:4317
|
||||
#
|
||||
# # Use TLS for exporter connection (default: 0)
|
||||
# use_tls=0
|
||||
#
|
||||
# # Path to CA certificate for TLS (optional)
|
||||
# # tls_ca_cert=/path/to/ca.crt
|
||||
#
|
||||
# # Sampling ratio: 0.0-1.0 (default: 1.0 = 100% sampling)
|
||||
# # Use lower values in production to reduce overhead
|
||||
# # Default: 1.0 (all traces). For production deployments with high
|
||||
# # throughput, 0.1 (10%) is recommended to reduce overhead.
|
||||
# # See Section 7.4.2 for sampling strategy details.
|
||||
# sampling_ratio=0.1
|
||||
#
|
||||
# # Batch processor settings
|
||||
# batch_size=512 # Spans per batch (default: 512)
|
||||
# batch_delay_ms=5000 # Max delay before sending batch (default: 5000)
|
||||
# max_queue_size=2048 # Max queued spans (default: 2048)
|
||||
#
|
||||
# # Component-specific tracing (default: all enabled except peer)
|
||||
# trace_transactions=1 # Transaction relay and processing
|
||||
# trace_consensus=1 # Consensus rounds and proposals
|
||||
# trace_rpc=1 # RPC request handling
|
||||
# trace_peer=0 # Peer messages (high volume, disabled by default)
|
||||
# trace_ledger=1 # Ledger acquisition and building
|
||||
# trace_pathfind=1 # Path computation (can be expensive)
|
||||
# trace_txq=1 # Transaction queue and fee escalation
|
||||
# trace_validator=0 # Validator list and manifest updates (low volume)
|
||||
# trace_amendment=0 # Amendment voting (very low volume)
|
||||
#
|
||||
# # Trace ID strategies for cross-node correlation
|
||||
# # "deterministic" (default) derives trace_id from a workflow hash
|
||||
# # (txHash for transactions, prevLedgerHash for consensus) so all nodes
|
||||
# # produce spans under the same trace_id for the same workflow.
|
||||
# # "attribute" uses random trace_id; correlation via attribute queries.
|
||||
# tx_trace_strategy=deterministic
|
||||
# consensus_trace_strategy=deterministic
|
||||
#
|
||||
# # Service identification (automatically detected if not specified)
|
||||
# # service_name=xrpld
|
||||
# # service_instance_id=<node_public_key>
|
||||
|
||||
[telemetry]
|
||||
enabled=0
|
||||
```
|
||||
|
||||
### 5.1.2 Configuration Options Summary
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
| -------------------------- | ------ | ----------------- | ---------------------------------------------------------------------------------------------------------- |
|
||||
| `enabled` | bool | `false` | Enable/disable telemetry |
|
||||
| `exporter` | string | `"otlp_grpc"` | Exporter type: otlp_grpc, otlp_http, none |
|
||||
| `endpoint` | string | `localhost:4317` | OTLP collector endpoint |
|
||||
| `use_tls` | bool | `false` | Enable TLS for exporter connection |
|
||||
| `tls_ca_cert` | string | `""` | Path to CA certificate file |
|
||||
| `sampling_ratio` | float | `1.0` | Sampling ratio (0.0-1.0) |
|
||||
| `batch_size` | uint | `512` | Spans per export batch |
|
||||
| `batch_delay_ms` | uint | `5000` | Max delay before sending batch (ms) |
|
||||
| `max_queue_size` | uint | `2048` | Maximum queued spans |
|
||||
| `trace_transactions` | bool | `true` | Enable transaction tracing |
|
||||
| `trace_consensus` | bool | `true` | Enable consensus tracing |
|
||||
| `trace_rpc` | bool | `true` | Enable RPC tracing |
|
||||
| `trace_peer` | bool | `false` | Enable peer message tracing (high volume) |
|
||||
| `trace_ledger` | bool | `true` | Enable ledger tracing |
|
||||
| `trace_pathfind` | bool | `true` | Enable path computation tracing |
|
||||
| `trace_txq` | bool | `true` | Enable transaction queue tracing |
|
||||
| `trace_validator` | bool | `false` | Enable validator list/manifest tracing |
|
||||
| `trace_amendment` | bool | `false` | Enable amendment voting tracing |
|
||||
| `tx_trace_strategy` | string | `"deterministic"` | TX trace ID strategy: `"deterministic"` (trace_id = txHash[0:16]) or `"attribute"` (random) |
|
||||
| `consensus_trace_strategy` | string | `"deterministic"` | Consensus trace ID strategy: `"deterministic"` (trace_id = prevLedgerHash[0:16]) or `"attribute"` (random) |
|
||||
| `service_name` | string | `"xrpld"` | Service name for traces |
|
||||
| `service_instance_id` | string | `<node_pubkey>` | Instance identifier |
|
||||
|
||||
---
|
||||
|
||||
## 5.2 Configuration Parser
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
```cpp
|
||||
// src/libxrpl/telemetry/TelemetryConfig.cpp
|
||||
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
Telemetry::Setup
|
||||
setup_Telemetry(
|
||||
Section const& section,
|
||||
std::string const& nodePublicKey,
|
||||
std::string const& version)
|
||||
{
|
||||
Telemetry::Setup setup;
|
||||
|
||||
// Basic settings
|
||||
setup.enabled = section.value_or("enabled", false);
|
||||
setup.serviceName = section.value_or("service_name", "xrpld");
|
||||
setup.serviceVersion = version;
|
||||
setup.serviceInstanceId = section.value_or(
|
||||
"service_instance_id", nodePublicKey);
|
||||
|
||||
// Exporter settings
|
||||
setup.exporterType = section.value_or("exporter", "otlp_grpc");
|
||||
|
||||
if (setup.exporterType == "otlp_grpc")
|
||||
setup.exporterEndpoint = section.value_or("endpoint", "localhost:4317");
|
||||
else if (setup.exporterType == "otlp_http")
|
||||
setup.exporterEndpoint = section.value_or("endpoint", "localhost:4318");
|
||||
|
||||
setup.useTls = section.value_or("use_tls", false);
|
||||
setup.tlsCertPath = section.value_or("tls_ca_cert", "");
|
||||
|
||||
// Sampling
|
||||
setup.samplingRatio = section.value_or("sampling_ratio", 1.0);
|
||||
if (setup.samplingRatio < 0.0 || setup.samplingRatio > 1.0)
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"telemetry.sampling_ratio must be between 0.0 and 1.0");
|
||||
}
|
||||
|
||||
// Batch processor
|
||||
setup.batchSize = section.value_or("batch_size", 512u);
|
||||
setup.batchDelay = std::chrono::milliseconds{
|
||||
section.value_or("batch_delay_ms", 5000u)};
|
||||
setup.maxQueueSize = section.value_or("max_queue_size", 2048u);
|
||||
|
||||
// Component filtering
|
||||
setup.traceTransactions = section.value_or("trace_transactions", true);
|
||||
setup.traceConsensus = section.value_or("trace_consensus", true);
|
||||
setup.traceRpc = section.value_or("trace_rpc", true);
|
||||
setup.tracePeer = section.value_or("trace_peer", false);
|
||||
setup.traceLedger = section.value_or("trace_ledger", true);
|
||||
setup.tracePathfind = section.value_or("trace_pathfind", true);
|
||||
setup.traceTxQ = section.value_or("trace_txq", true);
|
||||
setup.traceValidator = section.value_or("trace_validator", false);
|
||||
setup.traceAmendment = section.value_or("trace_amendment", false);
|
||||
|
||||
return setup;
|
||||
}
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.3 Application Integration
|
||||
|
||||
### 5.3.1 ApplicationImp Changes
|
||||
|
||||
> **Deferred identity**: The node public key (`nodeIdentity_`) is not
|
||||
> available during `ApplicationImp`'s member initializer list — it is
|
||||
> resolved later in `setup()`. The `Telemetry` object is therefore
|
||||
> constructed with an empty `serviceInstanceId` and patched via
|
||||
> `setServiceInstanceId()` once `setup()` has called `getNodeIdentity()`.
|
||||
|
||||
```cpp
|
||||
// src/xrpld/app/main/Application.cpp (modified)
|
||||
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
|
||||
class ApplicationImp : public Application, public BasicApp
|
||||
{
|
||||
// ... existing members (perfLog_, etc.) ...
|
||||
|
||||
// Telemetry — constructed in the member initializer list with
|
||||
// an empty serviceInstanceId, patched in setup().
|
||||
std::unique_ptr<telemetry::Telemetry> telemetry_;
|
||||
|
||||
// Member initializer list (excerpt):
|
||||
// ...
|
||||
// , telemetry_(
|
||||
// telemetry::make_Telemetry(
|
||||
// telemetry::setup_Telemetry(
|
||||
// config_->section("telemetry"),
|
||||
// "", // Updated later via setServiceInstanceId()
|
||||
// BuildInfo::getVersionString()),
|
||||
// logs_->journal("Telemetry")))
|
||||
// ...
|
||||
|
||||
bool setup(...) override
|
||||
{
|
||||
// ... existing setup code ...
|
||||
|
||||
nodeIdentity_ = getNodeIdentity(*this, cmdline);
|
||||
|
||||
// Inject node identity into telemetry resource attributes,
|
||||
// unless the user already set a custom service_instance_id.
|
||||
if (!config_->section("telemetry").exists("service_instance_id"))
|
||||
telemetry_->setServiceInstanceId(
|
||||
toBase58(TokenType::NodePublic, nodeIdentity_->first));
|
||||
|
||||
// ... rest of setup ...
|
||||
}
|
||||
|
||||
void start(bool withTimers) override
|
||||
{
|
||||
// ... existing start code ...
|
||||
telemetry_->start();
|
||||
}
|
||||
|
||||
void run() override
|
||||
{
|
||||
// ... existing run/shutdown code ...
|
||||
telemetry_->stop();
|
||||
}
|
||||
|
||||
telemetry::Telemetry&
|
||||
getTelemetry() override
|
||||
{
|
||||
return *telemetry_;
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### 5.3.2 ServiceRegistry Interface Addition
|
||||
|
||||
```cpp
|
||||
// include/xrpl/core/ServiceRegistry.h (modified)
|
||||
|
||||
namespace telemetry {
|
||||
class Telemetry;
|
||||
} // namespace telemetry
|
||||
|
||||
class ServiceRegistry
|
||||
{
|
||||
public:
|
||||
// ... existing virtual methods ...
|
||||
|
||||
/** Get the telemetry system for distributed tracing. */
|
||||
virtual telemetry::Telemetry&
|
||||
getTelemetry() = 0;
|
||||
};
|
||||
```
|
||||
|
||||
> **Note:** `Application` extends `ServiceRegistry`, so `getTelemetry()` is
|
||||
> available on both. Components that hold a `ServiceRegistry&` (e.g.
|
||||
> `NetworkOPsImp`) call `registry_.get().getTelemetry()`. Components that
|
||||
> still hold an `Application&` (e.g. `ServerHandler`, `PeerImp`,
|
||||
> `RCLConsensusAdaptor`) call `app_.getTelemetry()` directly.
|
||||
|
||||
---
|
||||
|
||||
## 5.4 CMake Integration
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
### 5.4.1 Find OpenTelemetry Module
|
||||
|
||||
```cmake
|
||||
# cmake/FindOpenTelemetry.cmake
|
||||
|
||||
# Find OpenTelemetry C++ SDK
|
||||
#
|
||||
# This module defines:
|
||||
# OpenTelemetry_FOUND - System has OpenTelemetry
|
||||
# OpenTelemetry::api - API library target
|
||||
# OpenTelemetry::sdk - SDK library target
|
||||
# OpenTelemetry::otlp_grpc_exporter - OTLP gRPC exporter target
|
||||
# OpenTelemetry::otlp_http_exporter - OTLP HTTP exporter target
|
||||
|
||||
find_package(opentelemetry-cpp CONFIG QUIET)
|
||||
|
||||
if(opentelemetry-cpp_FOUND)
|
||||
set(OpenTelemetry_FOUND TRUE)
|
||||
|
||||
# Create imported targets if not already created by config
|
||||
if(NOT TARGET OpenTelemetry::api)
|
||||
add_library(OpenTelemetry::api ALIAS opentelemetry-cpp::api)
|
||||
endif()
|
||||
if(NOT TARGET OpenTelemetry::sdk)
|
||||
add_library(OpenTelemetry::sdk ALIAS opentelemetry-cpp::sdk)
|
||||
endif()
|
||||
if(NOT TARGET OpenTelemetry::otlp_grpc_exporter)
|
||||
add_library(OpenTelemetry::otlp_grpc_exporter ALIAS
|
||||
opentelemetry-cpp::otlp_grpc_exporter)
|
||||
endif()
|
||||
else()
|
||||
# Try pkg-config fallback
|
||||
find_package(PkgConfig QUIET)
|
||||
if(PKG_CONFIG_FOUND)
|
||||
pkg_check_modules(OTEL opentelemetry-cpp QUIET)
|
||||
if(OTEL_FOUND)
|
||||
set(OpenTelemetry_FOUND TRUE)
|
||||
# Create imported targets from pkg-config
|
||||
add_library(OpenTelemetry::api INTERFACE IMPORTED)
|
||||
target_include_directories(OpenTelemetry::api INTERFACE
|
||||
${OTEL_INCLUDE_DIRS})
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(OpenTelemetry
|
||||
REQUIRED_VARS OpenTelemetry_FOUND)
|
||||
```
|
||||
|
||||
### 5.4.2 CMakeLists.txt Changes
|
||||
|
||||
```cmake
|
||||
# CMakeLists.txt (additions)
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# TELEMETRY OPTIONS
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
option(XRPL_ENABLE_TELEMETRY
|
||||
"Enable OpenTelemetry distributed tracing support" OFF)
|
||||
|
||||
if(XRPL_ENABLE_TELEMETRY)
|
||||
find_package(OpenTelemetry REQUIRED)
|
||||
|
||||
# Define compile-time flag
|
||||
add_compile_definitions(XRPL_ENABLE_TELEMETRY)
|
||||
|
||||
message(STATUS "OpenTelemetry tracing: ENABLED")
|
||||
else()
|
||||
message(STATUS "OpenTelemetry tracing: DISABLED")
|
||||
endif()
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# TELEMETRY LIBRARY
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
if(XRPL_ENABLE_TELEMETRY)
|
||||
add_library(xrpl_telemetry
|
||||
src/libxrpl/telemetry/Telemetry.cpp
|
||||
src/libxrpl/telemetry/TelemetryConfig.cpp
|
||||
src/libxrpl/telemetry/TraceContext.cpp
|
||||
)
|
||||
|
||||
target_include_directories(xrpl_telemetry
|
||||
PUBLIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/include
|
||||
)
|
||||
|
||||
target_link_libraries(xrpl_telemetry
|
||||
PUBLIC
|
||||
OpenTelemetry::api
|
||||
OpenTelemetry::sdk
|
||||
OpenTelemetry::otlp_grpc_exporter
|
||||
PRIVATE
|
||||
xrpl_basics
|
||||
)
|
||||
|
||||
# Add to main library dependencies
|
||||
target_link_libraries(xrpld PRIVATE xrpl_telemetry)
|
||||
else()
|
||||
# Create null implementation library
|
||||
add_library(xrpl_telemetry
|
||||
src/libxrpl/telemetry/NullTelemetry.cpp
|
||||
)
|
||||
target_include_directories(xrpl_telemetry
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include
|
||||
)
|
||||
endif()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.5 OpenTelemetry Collector Configuration
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **APM** = Application Performance Monitoring
|
||||
|
||||
### 5.5.1 Development Configuration
|
||||
|
||||
```yaml
|
||||
# otel-collector-dev.yaml
|
||||
# Minimal configuration for local development
|
||||
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
|
||||
processors:
|
||||
batch:
|
||||
timeout: 1s
|
||||
send_batch_size: 100
|
||||
|
||||
exporters:
|
||||
# Console output for debugging
|
||||
logging:
|
||||
verbosity: detailed
|
||||
sampling_initial: 5
|
||||
sampling_thereafter: 200
|
||||
|
||||
# Tempo for trace storage
|
||||
otlp/tempo:
|
||||
endpoint: tempo:4317
|
||||
tls:
|
||||
insecure: true
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [logging, otlp/tempo]
|
||||
```
|
||||
|
||||
### 5.5.2 Production Configuration
|
||||
|
||||
```yaml
|
||||
# otel-collector-prod.yaml
|
||||
# Production configuration with filtering, sampling, and multiple backends
|
||||
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
tls:
|
||||
cert_file: /etc/otel/server.crt
|
||||
key_file: /etc/otel/server.key
|
||||
ca_file: /etc/otel/ca.crt
|
||||
|
||||
processors:
|
||||
# Memory limiter to prevent OOM
|
||||
memory_limiter:
|
||||
check_interval: 1s
|
||||
limit_mib: 1000
|
||||
spike_limit_mib: 200
|
||||
|
||||
# Batch processing for efficiency
|
||||
batch:
|
||||
timeout: 5s
|
||||
send_batch_size: 512
|
||||
send_batch_max_size: 1024
|
||||
|
||||
# Tail-based sampling (keep errors and slow traces)
|
||||
tail_sampling:
|
||||
decision_wait: 10s
|
||||
num_traces: 100000
|
||||
expected_new_traces_per_sec: 1000
|
||||
policies:
|
||||
# Always keep error traces
|
||||
- name: errors
|
||||
type: status_code
|
||||
status_code:
|
||||
status_codes: [ERROR]
|
||||
# Keep slow consensus rounds (>5s)
|
||||
- name: slow-consensus
|
||||
type: latency
|
||||
latency:
|
||||
threshold_ms: 5000
|
||||
# Keep slow RPC requests (>1s)
|
||||
- name: slow-rpc
|
||||
type: and
|
||||
and:
|
||||
and_sub_policy:
|
||||
- name: rpc-spans
|
||||
type: string_attribute
|
||||
string_attribute:
|
||||
key: xrpl.rpc.command
|
||||
values: [".*"]
|
||||
enabled_regex_matching: true
|
||||
- name: latency
|
||||
type: latency
|
||||
latency:
|
||||
threshold_ms: 1000
|
||||
# Probabilistic sampling for the rest
|
||||
- name: probabilistic
|
||||
type: probabilistic
|
||||
probabilistic:
|
||||
sampling_percentage: 10
|
||||
|
||||
# Attribute processing
|
||||
attributes:
|
||||
actions:
|
||||
# Hash sensitive data
|
||||
- key: xrpl.tx.account
|
||||
action: hash
|
||||
# Add deployment info
|
||||
- key: deployment.environment
|
||||
value: production
|
||||
action: upsert
|
||||
|
||||
exporters:
|
||||
# Grafana Tempo for long-term storage
|
||||
otlp/tempo:
|
||||
endpoint: tempo.monitoring:4317
|
||||
tls:
|
||||
insecure: false
|
||||
ca_file: /etc/otel/tempo-ca.crt
|
||||
|
||||
# Elastic APM for correlation with logs
|
||||
otlp/elastic:
|
||||
endpoint: apm.elastic:8200
|
||||
headers:
|
||||
Authorization: "Bearer ${ELASTIC_APM_TOKEN}"
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [memory_limiter, tail_sampling, attributes, batch]
|
||||
exporters: [otlp/tempo, otlp/elastic]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.6 Docker Compose Development Environment
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
```yaml
|
||||
# docker-compose-telemetry.yaml
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
# OpenTelemetry Collector
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector-contrib:0.92.0
|
||||
container_name: otel-collector
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-dev.yaml:/etc/otel-collector-config.yaml:ro
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC
|
||||
- "4318:4318" # OTLP HTTP
|
||||
- "13133:13133" # Health check
|
||||
depends_on:
|
||||
- tempo
|
||||
|
||||
# Tempo for trace storage
|
||||
tempo:
|
||||
image: grafana/tempo:2.6.1
|
||||
container_name: tempo
|
||||
ports:
|
||||
- "3200:3200" # Tempo HTTP API
|
||||
- "4317" # OTLP gRPC (internal)
|
||||
|
||||
# Grafana for dashboards
|
||||
grafana:
|
||||
image: grafana/grafana:10.2.3
|
||||
container_name: grafana
|
||||
environment:
|
||||
- GF_AUTH_ANONYMOUS_ENABLED=true
|
||||
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
|
||||
volumes:
|
||||
- ./grafana/provisioning:/etc/grafana/provisioning:ro
|
||||
- ./grafana/dashboards:/var/lib/grafana/dashboards:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
depends_on:
|
||||
- tempo
|
||||
|
||||
# Prometheus for metrics (optional, for correlation)
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.48.1
|
||||
container_name: prometheus
|
||||
volumes:
|
||||
- ./prometheus.yaml:/etc/prometheus/prometheus.yml:ro
|
||||
ports:
|
||||
- "9090:9090"
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: xrpld-telemetry
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.7 Configuration Architecture
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph config["Configuration Sources"]
|
||||
cfgFile["xrpld.cfg<br/>[telemetry] section"]
|
||||
cmake["CMake<br/>XRPL_ENABLE_TELEMETRY"]
|
||||
end
|
||||
|
||||
subgraph init["Initialization"]
|
||||
parse["setup_Telemetry()"]
|
||||
factory["make_Telemetry()"]
|
||||
end
|
||||
|
||||
subgraph runtime["Runtime Components"]
|
||||
tracer["TracerProvider"]
|
||||
exporter["OTLP Exporter"]
|
||||
processor["BatchProcessor"]
|
||||
end
|
||||
|
||||
subgraph collector["Collector Pipeline"]
|
||||
recv["Receivers"]
|
||||
proc["Processors"]
|
||||
exp["Exporters"]
|
||||
end
|
||||
|
||||
cfgFile --> parse
|
||||
cmake -->|"compile flag"| parse
|
||||
parse --> factory
|
||||
factory --> tracer
|
||||
tracer --> processor
|
||||
processor --> exporter
|
||||
exporter -->|"OTLP"| recv
|
||||
recv --> proc
|
||||
proc --> exp
|
||||
|
||||
style config fill:#e3f2fd,stroke:#1976d2
|
||||
style runtime fill:#e8f5e9,stroke:#388e3c
|
||||
style collector fill:#fff3e0,stroke:#ff9800
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Configuration Sources**: `xrpld.cfg` provides runtime settings (endpoint, sampling) while the CMake flag controls whether telemetry is compiled in at all.
|
||||
- **Initialization**: `setup_Telemetry()` parses config values, then `make_Telemetry()` constructs the provider, processor, and exporter objects.
|
||||
- **Runtime Components**: The `TracerProvider` creates spans, the `BatchProcessor` buffers them, and the `OTLP Exporter` serializes and sends them over the wire.
|
||||
- **OTLP arrow to Collector**: Trace data leaves the xrpld process via OTLP (gRPC or HTTP) and enters the external Collector pipeline.
|
||||
- **Collector Pipeline**: `Receivers` ingest OTLP data, `Processors` apply sampling/filtering/enrichment, and `Exporters` forward traces to storage backends (Tempo, etc.).
|
||||
|
||||
---
|
||||
|
||||
## 5.8 Grafana Integration
|
||||
|
||||
> **APM** = Application Performance Monitoring
|
||||
|
||||
Step-by-step instructions for integrating xrpld traces with Grafana.
|
||||
|
||||
### 5.8.1 Data Source Configuration
|
||||
|
||||
#### Tempo (Recommended)
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/datasources/tempo.yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Tempo
|
||||
type: tempo
|
||||
access: proxy
|
||||
url: http://tempo:3200
|
||||
jsonData:
|
||||
httpMethod: GET
|
||||
tracesToLogs:
|
||||
datasourceUid: loki
|
||||
tags: ["service.name", "xrpl.tx.hash"]
|
||||
mappedTags: [{ key: "trace_id", value: "traceID" }]
|
||||
mapTagNamesEnabled: true
|
||||
filterByTraceID: true
|
||||
serviceMap:
|
||||
datasourceUid: prometheus
|
||||
nodeGraph:
|
||||
enabled: true
|
||||
search:
|
||||
hide: false
|
||||
lokiSearch:
|
||||
datasourceUid: loki
|
||||
```
|
||||
|
||||
#### Elastic APM
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/datasources/elastic-apm.yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Elasticsearch-APM
|
||||
type: elasticsearch
|
||||
access: proxy
|
||||
url: http://elasticsearch:9200
|
||||
database: "apm-*"
|
||||
jsonData:
|
||||
esVersion: "8.0.0"
|
||||
timeField: "@timestamp"
|
||||
logMessageField: message
|
||||
logLevelField: log.level
|
||||
```
|
||||
|
||||
### 5.8.2 Dashboard Provisioning
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/dashboards/dashboards.yaml
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: "xrpld-dashboards"
|
||||
orgId: 1
|
||||
folder: "xrpld"
|
||||
folderUid: "xrpld"
|
||||
type: file
|
||||
disableDeletion: false
|
||||
updateIntervalSeconds: 30
|
||||
options:
|
||||
path: /var/lib/grafana/dashboards/rippled
|
||||
```
|
||||
|
||||
### 5.8.3 Example Dashboard: RPC Performance
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "xrpld RPC Performance",
|
||||
"uid": "xrpld-rpc-performance",
|
||||
"panels": [
|
||||
{
|
||||
"title": "RPC Latency by Command",
|
||||
"type": "heatmap",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\" && span.xrpl.rpc.command != \"\"} | histogram_over_time(duration) by (span.xrpl.rpc.command)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "RPC Error Rate",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\" && status.code=error} | rate() by (span.xrpl.rpc.command)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Top 10 Slowest RPC Commands",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\" && span.xrpl.rpc.command != \"\"} | avg(duration) by (span.xrpl.rpc.command) | topk(10)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 8 }
|
||||
},
|
||||
{
|
||||
"title": "Recent Traces",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\"}"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 16 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 5.8.4 Example Dashboard: Transaction Tracing
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "xrpld Transaction Tracing",
|
||||
"uid": "xrpld-tx-tracing",
|
||||
"panels": [
|
||||
{
|
||||
"title": "Transaction Throughput",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\" && name=\"tx.receive\"} | rate()"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Cross-Node Relay Count",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\" && name=\"tx.relay\"} | avg(span.xrpl.tx.relay_count)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 4 }
|
||||
},
|
||||
{
|
||||
"title": "Transaction Validation Errors",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\" && name=\"tx.validate\" && status.code=error}"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 4 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 5.8.5 TraceQL Query Examples
|
||||
|
||||
Common queries for xrpld traces:
|
||||
|
||||
```
|
||||
# Find all traces for a specific transaction hash
|
||||
{resource.service.name="xrpld" && span.xrpl.tx.hash="ABC123..."}
|
||||
|
||||
# Find slow RPC commands (>100ms)
|
||||
{resource.service.name="xrpld" && name=~"rpc.command.*"} | duration > 100ms
|
||||
|
||||
# Find consensus rounds taking >5 seconds
|
||||
{resource.service.name="xrpld" && name="consensus.round"} | duration > 5s
|
||||
|
||||
# Find failed transactions with error details
|
||||
{resource.service.name="xrpld" && name="tx.validate" && status.code=error}
|
||||
|
||||
# Find transactions relayed to many peers
|
||||
{resource.service.name="xrpld" && name="tx.relay"} | span.xrpl.tx.relay_count > 10
|
||||
|
||||
# Compare latency across nodes
|
||||
{resource.service.name="xrpld" && name="rpc.command.account_info"} | avg(duration) by (resource.service.instance.id)
|
||||
```
|
||||
|
||||
### 5.8.6 Correlation with PerfLog
|
||||
|
||||
To correlate OpenTelemetry traces with existing PerfLog data:
|
||||
|
||||
**Step 1: Configure Loki to ingest PerfLog**
|
||||
|
||||
```yaml
|
||||
# promtail-config.yaml
|
||||
scrape_configs:
|
||||
- job_name: xrpld-perflog
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: xrpld
|
||||
__path__: /var/log/rippled/perf*.log
|
||||
pipeline_stages:
|
||||
- json:
|
||||
expressions:
|
||||
trace_id: trace_id
|
||||
ledger_seq: ledger_seq
|
||||
tx_hash: tx_hash
|
||||
- labels:
|
||||
trace_id:
|
||||
ledger_seq:
|
||||
tx_hash:
|
||||
```
|
||||
|
||||
**Step 2: Add trace_id to PerfLog entries**
|
||||
|
||||
Modify PerfLog to include trace_id when available:
|
||||
|
||||
```cpp
|
||||
// In PerfLog output, add trace_id from current span context
|
||||
void logPerf(Json::Value& entry) {
|
||||
auto span = opentelemetry::trace::GetSpan(
|
||||
opentelemetry::context::RuntimeContext::GetCurrent());
|
||||
if (span && span->GetContext().IsValid()) {
|
||||
char traceIdHex[33];
|
||||
span->GetContext().trace_id().ToLowerBase16(traceIdHex);
|
||||
entry["trace_id"] = std::string(traceIdHex, 32);
|
||||
}
|
||||
// ... existing logging
|
||||
}
|
||||
```
|
||||
|
||||
**Step 3: Configure Grafana trace-to-logs link**
|
||||
|
||||
In Tempo data source configuration, set up the derived field:
|
||||
|
||||
```yaml
|
||||
jsonData:
|
||||
tracesToLogs:
|
||||
datasourceUid: loki
|
||||
tags: ["trace_id", "xrpl.tx.hash"]
|
||||
filterByTraceID: true
|
||||
filterBySpanID: false
|
||||
```
|
||||
|
||||
### 5.8.7 Correlation with Insight/StatsD Metrics
|
||||
|
||||
To correlate traces with existing Beast Insight metrics:
|
||||
|
||||
**Step 1: Export Insight metrics to Prometheus**
|
||||
|
||||
```yaml
|
||||
# prometheus.yaml
|
||||
scrape_configs:
|
||||
- job_name: "xrpld-statsd"
|
||||
static_configs:
|
||||
- targets: ["statsd-exporter:9102"]
|
||||
```
|
||||
|
||||
**Step 2: Add exemplars to metrics**
|
||||
|
||||
OpenTelemetry SDK automatically adds exemplars (trace IDs) to metrics when using the Prometheus exporter. This links metrics spikes to specific traces.
|
||||
|
||||
**Step 3: Configure Grafana metric-to-trace link**
|
||||
|
||||
```yaml
|
||||
# In Prometheus data source
|
||||
jsonData:
|
||||
exemplarTraceIdDestinations:
|
||||
- name: trace_id
|
||||
datasourceUid: tempo
|
||||
```
|
||||
|
||||
**Step 4: Dashboard panel with exemplars**
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "RPC Latency with Trace Links",
|
||||
"type": "timeseries",
|
||||
"datasource": "Prometheus",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "histogram_quantile(0.99, rate(xrpld_rpc_duration_seconds_bucket[5m]))",
|
||||
"exemplar": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This allows clicking on metric data points to jump directly to the related trace.
|
||||
|
||||
---
|
||||
|
||||
_Previous: [Code Samples](./04-code-samples.md)_ | _Next: [Implementation Phases](./06-implementation-phases.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
@@ -1,735 +0,0 @@
|
||||
# Implementation Phases
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Configuration Reference](./05-configuration-reference.md) | [Observability Backends](./07-observability-backends.md)
|
||||
|
||||
---
|
||||
|
||||
## 6.1 Phase Overview
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
```mermaid
|
||||
gantt
|
||||
title OpenTelemetry Implementation Timeline
|
||||
dateFormat YYYY-MM-DD
|
||||
axisFormat Week %W
|
||||
|
||||
section Phase 1
|
||||
Core Infrastructure :p1, 2024-01-01, 2w
|
||||
SDK Integration :p1a, 2024-01-01, 4d
|
||||
Telemetry Interface :p1b, after p1a, 3d
|
||||
Configuration & CMake :p1c, after p1b, 3d
|
||||
Unit Tests :p1d, after p1c, 2d
|
||||
Buffer & Integration :p1e, after p1d, 2d
|
||||
|
||||
section Phase 2
|
||||
RPC Tracing :p2, after p1, 2w
|
||||
HTTP Context Extraction :p2a, after p1, 2d
|
||||
RPC Handler Instrumentation :p2b, after p2a, 4d
|
||||
PathFinding Instrumentation :p2f, after p2b, 2d
|
||||
TxQ Instrumentation :p2g, after p2f, 2d
|
||||
WebSocket Support :p2c, after p2g, 2d
|
||||
Integration Tests :p2d, after p2c, 2d
|
||||
Buffer & Review :p2e, after p2d, 4d
|
||||
|
||||
section Phase 3
|
||||
Transaction Tracing :p3, after p2, 2w
|
||||
Protocol Buffer Extension :p3a, after p2, 2d
|
||||
PeerImp Instrumentation :p3b, after p3a, 3d
|
||||
Fee Escalation Instrumentation :p3f, after p3b, 2d
|
||||
Relay Context Propagation :p3c, after p3f, 3d
|
||||
Multi-node Tests :p3d, after p3c, 2d
|
||||
Buffer & Review :p3e, after p3d, 4d
|
||||
|
||||
section Phase 4
|
||||
Consensus Tracing :p4, after p3, 2w
|
||||
Consensus Round Spans :p4a, after p3, 3d
|
||||
Proposal Handling :p4b, after p4a, 3d
|
||||
Validator List & Manifest Tracing :p4f, after p4b, 2d
|
||||
Amendment Voting Tracing :p4g, after p4f, 2d
|
||||
SHAMap Sync Tracing :p4h, after p4g, 2d
|
||||
Validation Tests :p4c, after p4h, 4d
|
||||
Buffer & Review :p4e, after p4c, 4d
|
||||
|
||||
section Phase 5
|
||||
Documentation & Deploy :p5, after p4, 1w
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6.2 Phase 1: Core Infrastructure (Weeks 1-2)
|
||||
|
||||
**Objective**: Establish foundational telemetry infrastructure
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description |
|
||||
| ---- | ----------------------------------------------------- |
|
||||
| 1.1 | Add OpenTelemetry C++ SDK to Conan/CMake |
|
||||
| 1.2 | Implement `Telemetry` interface and factory |
|
||||
| 1.3 | Implement `SpanGuard` RAII wrapper |
|
||||
| 1.4 | Implement configuration parser |
|
||||
| 1.5 | Integrate into `ApplicationImp` |
|
||||
| 1.6 | Add conditional compilation (`XRPL_ENABLE_TELEMETRY`) |
|
||||
| 1.7 | Create `NullTelemetry` no-op implementation |
|
||||
| 1.8 | Unit tests for core infrastructure |
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] OpenTelemetry SDK compiles and links
|
||||
- [ ] Telemetry can be enabled/disabled via config
|
||||
- [ ] Basic span creation works
|
||||
- [ ] No performance regression when disabled
|
||||
- [ ] Unit tests passing
|
||||
|
||||
---
|
||||
|
||||
## 6.3 Phase 2: RPC Tracing (Weeks 3-4)
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
**Objective**: Complete tracing for all RPC operations
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description |
|
||||
| ---- | -------------------------------------------------------------------------- |
|
||||
| 2.1 | Implement W3C Trace Context HTTP header extraction |
|
||||
| 2.2 | Instrument `ServerHandler::onRequest()` |
|
||||
| 2.3 | Instrument `RPCHandler::doCommand()` |
|
||||
| 2.4 | Add RPC-specific attributes |
|
||||
| 2.5 | Instrument WebSocket handler |
|
||||
| 2.6 | PathFinding instrumentation (`pathfind.request`, `pathfind.compute` spans) |
|
||||
| 2.7 | TxQ instrumentation (`txq.enqueue`, `txq.apply` spans) |
|
||||
| 2.8 | Integration tests for RPC tracing |
|
||||
| 2.9 | Performance benchmarks |
|
||||
| 2.10 | Documentation |
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] All RPC commands traced
|
||||
- [ ] Trace context propagates from HTTP headers
|
||||
- [ ] WebSocket and HTTP both instrumented
|
||||
- [ ] <1ms overhead per RPC call
|
||||
- [ ] Integration tests passing
|
||||
|
||||
---
|
||||
|
||||
## 6.4 Phase 3: Transaction Tracing (Weeks 5-6)
|
||||
|
||||
**Objective**: Trace transaction lifecycle across network with deterministic cross-node correlation
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description |
|
||||
| ---- | -------------------------------------------------------------- |
|
||||
| 3.1 | Define `TraceContext` Protocol Buffer message |
|
||||
| 3.2 | Implement protobuf context serialization |
|
||||
| 3.3 | Instrument `PeerImp::handleTransaction()` |
|
||||
| 3.4 | Instrument `NetworkOPs::submitTransaction()` |
|
||||
| 3.5 | Instrument HashRouter integration |
|
||||
| 3.6 | Fee escalation instrumentation (`fee.escalate` span) |
|
||||
| 3.7 | Implement relay context propagation |
|
||||
| 3.8 | Integration tests (multi-node) |
|
||||
| 3.9 | Deterministic transaction trace ID (`trace_id = txHash[0:16]`) |
|
||||
| 3.10 | Performance benchmarks |
|
||||
|
||||
### Deterministic Trace ID (Task 3.9)
|
||||
|
||||
Transaction spans use **deterministic trace IDs** derived from the transaction hash:
|
||||
`trace_id = txHash[0:16]`. All nodes handling the same transaction independently
|
||||
produce spans under the same trace_id. Protobuf `span_id` propagation (Task 3.7)
|
||||
additionally provides parent-child relay ordering when available. See
|
||||
[02-design-decisions.md §2.5.0](./02-design-decisions.md) for the design rationale
|
||||
and [Phase3_taskList.md Task 3.9](./Phase3_taskList.md) for the full implementation spec.
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] Transaction traces span across nodes
|
||||
- [ ] Trace context in Protocol Buffer messages
|
||||
- [ ] HashRouter deduplication visible in traces
|
||||
- [ ] Multi-node integration tests passing
|
||||
- [ ] <5% overhead on transaction throughput
|
||||
- [ ] Deterministic trace_id: all nodes produce same trace_id for same transaction
|
||||
- [ ] Protobuf span_id propagation preserves parent-child ordering when available
|
||||
|
||||
---
|
||||
|
||||
## 6.5 Phase 4: Consensus Tracing (Weeks 7-8)
|
||||
|
||||
**Objective**: Full observability into consensus rounds
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description |
|
||||
| ---- | ---------------------------------------------- |
|
||||
| 4.1 | Instrument `RCLConsensusAdaptor::startRound()` |
|
||||
| 4.2 | Instrument phase transitions |
|
||||
| 4.3 | Instrument proposal handling |
|
||||
| 4.4 | Instrument validation handling |
|
||||
| 4.5 | Add consensus-specific attributes |
|
||||
| 4.6 | Correlate with transaction traces |
|
||||
| 4.7 | Validator list and manifest tracing |
|
||||
| 4.8 | Amendment voting tracing |
|
||||
| 4.9 | SHAMap sync tracing |
|
||||
| 4.10 | Multi-validator integration tests |
|
||||
| 4.11 | Performance validation |
|
||||
|
||||
### Spans Produced
|
||||
|
||||
| Span Name | Location | Attributes |
|
||||
| --------------------------- | ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `consensus.proposal.send` | `RCLConsensus.cpp:177` | `xrpl.consensus.round` |
|
||||
| `consensus.ledger_close` | `RCLConsensus.cpp:282` | `xrpl.consensus.ledger.seq`, `xrpl.consensus.mode` |
|
||||
| `consensus.accept` | `RCLConsensus.cpp:395` | `xrpl.consensus.proposers`, `xrpl.consensus.round_time_ms` |
|
||||
| `consensus.accept.apply` | `RCLConsensus.cpp:521` | `xrpl.consensus.close_time`, `close_time_correct`, `close_resolution_ms`, `state`, `proposing`, `round_time_ms`, `ledger.seq`, `parent_close_time`, `close_time_self`, `close_time_vote_bins`, `resolution_direction` |
|
||||
| `consensus.validation.send` | `RCLConsensus.cpp:753` | `xrpl.consensus.proposing` |
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [x] Complete consensus round traces
|
||||
- [x] Phase transitions visible
|
||||
- [x] Proposals and validations traced
|
||||
- [x] Close time agreement tracked (per `avCT_CONSENSUS_PCT`)
|
||||
- [x] No impact on consensus timing
|
||||
- [ ] Multi-validator test network validated
|
||||
|
||||
### Implementation Status — Phase 4a Complete
|
||||
|
||||
Phase 4a (establish-phase gap fill & cross-node correlation) adds:
|
||||
|
||||
- **Deterministic trace ID** derived from `previousLedger.id()` so all validators
|
||||
in the same round share the same `trace_id` (switchable via
|
||||
`consensus_trace_strategy` config: `"deterministic"` or `"attribute"`).
|
||||
See [Configuration Reference](./05-configuration-reference.md) for full
|
||||
configuration options. The `consensus_trace_strategy` option will be
|
||||
documented in the configuration reference as part of Phase 4a implementation.
|
||||
- **Round lifecycle spans**: `consensus.round` with round-to-round span links.
|
||||
- **Establish phase**: `consensus.establish`, `consensus.update_positions` (with
|
||||
`dispute.resolve` events), `consensus.check` (with threshold tracking).
|
||||
- **Mode changes**: `consensus.mode_change` spans.
|
||||
- **Validation**: `consensus.validation.send` with span link to round span
|
||||
(thread-safe cross-thread access via `roundSpanContext_` snapshot).
|
||||
- **Separation of concerns**: telemetry extracted to private helpers
|
||||
(`startRoundTracing`, `createValidationSpan`, `startEstablishTracing`,
|
||||
`updateEstablishTracing`, `endEstablishTracing`).
|
||||
|
||||
See [Phase4_taskList.md](./Phase4_taskList.md) for the full spec and implementation notes.
|
||||
|
||||
---
|
||||
|
||||
## 6.5a Phase 4a: Establish-Phase Gap Fill & Cross-Node Correlation
|
||||
|
||||
**Objective**: Fill tracing gaps in the establish phase and establish cross-node
|
||||
correlation using deterministic trace IDs derived from `previousLedger.id()`.
|
||||
|
||||
**Approach**: Direct instrumentation in `Consensus.h` and `RCLConsensus.cpp`.
|
||||
All spans use `SpanGuard` factory methods with `TraceCategory::Consensus` gating.
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description | Effort | Risk |
|
||||
| ---- | ------------------------------------------------ | ------ | ------ |
|
||||
| 4a.0 | Prerequisites: extend SpanGuard & Telemetry APIs | 1d | Medium |
|
||||
| 4a.1 | Adaptor `getTelemetry()` method | 0.5d | Low |
|
||||
| 4a.2 | Switchable round span with deterministic traceID | 2d | High |
|
||||
| 4a.3 | Span members in `Consensus.h` | 0.5d | Medium |
|
||||
| 4a.4 | Instrument `phaseEstablish()` | 1d | Medium |
|
||||
| 4a.5 | Instrument `updateOurPositions()` | 1d | Medium |
|
||||
| 4a.6 | Instrument `haveConsensus()` (thresholds) | 1d | Medium |
|
||||
| 4a.7 | Instrument mode changes | 0.5d | Low |
|
||||
| 4a.8 | Reparent existing spans under round | 0.5d | Low |
|
||||
| 4a.9 | Build verification and testing | 1d | Low |
|
||||
|
||||
**Total Effort**: 9 days
|
||||
|
||||
### Spans Produced
|
||||
|
||||
| Span Name | Location | Key Attributes |
|
||||
| ---------------------------- | ------------------ | ---------------------------------------------------------------- |
|
||||
| `consensus.round` | `RCLConsensus.cpp` | `round_id`, `ledger_id`, `ledger.seq`, `mode`; link → prev round |
|
||||
| `consensus.establish` | `Consensus.h` | `converge_percent`, `establish_count`, `proposers` |
|
||||
| `consensus.update_positions` | `Consensus.h` | `disputes_count`, `converge_percent`, `proposers_agreed/total` |
|
||||
| `consensus.check` | `Consensus.h` | `agree/disagree_count`, `threshold_percent`, `result` |
|
||||
| `consensus.mode_change` | `RCLConsensus.cpp` | `mode.old`, `mode.new` |
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] Establish phase internals fully traced (disputes, convergence, thresholds)
|
||||
- [ ] Cross-node correlation works via deterministic trace_id
|
||||
- [ ] Strategy switchable via config (`deterministic` / `attribute`)
|
||||
- [ ] Consecutive rounds linked via follows-from spans
|
||||
- [ ] Build passes with telemetry ON and OFF
|
||||
- [ ] No impact on consensus timing
|
||||
|
||||
See [Phase4_taskList.md](./Phase4_taskList.md) for full task details.
|
||||
|
||||
---
|
||||
|
||||
## 6.5b Phase 4b: Cross-Node Propagation (Future)
|
||||
|
||||
**Objective**: Wire `TraceContextPropagator` for P2P messages (proposals,
|
||||
validations) to enable true distributed tracing between nodes.
|
||||
|
||||
**Status**: Design documented, NOT implemented. Protobuf fields (field 1001)
|
||||
and `TraceContextPropagator` class exist. Wiring deferred until Phase 4a is
|
||||
validated in a multi-node environment.
|
||||
|
||||
**Prerequisites**: Phase 4a complete and validated.
|
||||
|
||||
See [Phase4_taskList.md § Phase 4b](./Phase4_taskList.md) for full design.
|
||||
|
||||
---
|
||||
|
||||
## 6.6 Phase 5: Documentation & Deployment (Week 9)
|
||||
|
||||
**Objective**: Production readiness
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description | Status |
|
||||
| ---- | ----------------------------- | ------------------- |
|
||||
| 5.1 | Operator runbook | Complete |
|
||||
| 5.2 | Grafana dashboards | Complete |
|
||||
| 5.3 | Alert definitions | Deferred — post-MVP |
|
||||
| 5.4 | Collector deployment examples | Complete |
|
||||
| 5.5 | Developer documentation | Complete |
|
||||
| 5.6 | Training materials | Deferred — post-MVP |
|
||||
| 5.7 | Final integration testing | Complete |
|
||||
|
||||
---
|
||||
|
||||
## 6.7 Phase 6: StatsD Metrics Integration (Week 10)
|
||||
|
||||
**Objective**: Bridge rippled's existing `beast::insight` StatsD metrics into the OpenTelemetry collection pipeline, exposing 300+ pre-existing metrics alongside span-derived RED metrics in Prometheus/Grafana.
|
||||
|
||||
### Background
|
||||
|
||||
rippled has a mature metrics framework (`beast::insight`) that emits StatsD-format metrics over UDP. These metrics cover node health, peer networking, RPC performance, job queue, and overlay traffic — data that **does not** overlap with the span-based instrumentation from Phases 1-5. By adding a StatsD receiver to the OTel Collector, both metric sources converge in Prometheus.
|
||||
|
||||
### Metric Inventory
|
||||
|
||||
| Category | Group | Type | Count | Key Metrics |
|
||||
| --------------- | ------------------ | ------------- | ---------- | ------------------------------------------------------ |
|
||||
| Node State | `State_Accounting` | Gauge | 10 | `*_duration`, `*_transitions` per operating mode |
|
||||
| Ledger | `LedgerMaster` | Gauge | 2 | `Validated_Ledger_Age`, `Published_Ledger_Age` |
|
||||
| Ledger Fetch | — | Counter | 1 | `ledger_fetches` |
|
||||
| Ledger History | `ledger.history` | Counter | 1 | `mismatch` |
|
||||
| RPC | `rpc` | Counter+Event | 3 | `requests`, `time` (histogram), `size` (histogram) |
|
||||
| Job Queue | — | Gauge+Event | 1 + 2×N | `job_count`, per-job `{name}` and `{name}_q` |
|
||||
| Peer Finder | `Peer_Finder` | Gauge | 2 | `Active_Inbound_Peers`, `Active_Outbound_Peers` |
|
||||
| Overlay | `Overlay` | Gauge | 1 | `Peer_Disconnects` |
|
||||
| Overlay Traffic | per-category | Gauge | 4×57 = 228 | `Bytes_In/Out`, `Messages_In/Out` per traffic category |
|
||||
| Pathfinding | — | Event | 2 | `pathfind_fast`, `pathfind_full` (histograms) |
|
||||
| I/O | — | Event | 1 | `ios_latency` (histogram) |
|
||||
| Resource Mgr | — | Meter | 2 | `warn`, `drop` (rate counters) |
|
||||
| Caches | per-cache | Gauge | 2×N | `{cache}.size`, `{cache}.hit_rate` |
|
||||
|
||||
**Total**: ~255+ unique metrics (plus dynamic job-type and cache metrics)
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description |
|
||||
| ---- | --------------------------------------------------------------------------------------------------------------- |
|
||||
| 6.1 | **DEFERRED** Fix Meter wire format (`\|m` → `\|c`) in StatsDCollector.cpp — breaking change, tracked separately |
|
||||
| 6.2 | Add `statsd` receiver to OTel Collector config |
|
||||
| 6.3 | Expose UDP port 8125 in docker-compose.yml |
|
||||
| 6.4 | Add `[insight]` config to integration test node configs |
|
||||
| 6.5 | Create "Node Health" Grafana dashboard (8 panels) |
|
||||
| 6.6 | Create "Network Traffic" Grafana dashboard (8 panels) |
|
||||
| 6.7 | Create "RPC & Pathfinding (StatsD)" Grafana dashboard (8 panels) |
|
||||
| 6.8 | Update integration test to verify StatsD metrics in Prometheus |
|
||||
| 6.9 | Update TESTING.md and telemetry-runbook.md |
|
||||
|
||||
### Wire Format Fix (Task 6.1) — DEFERRED
|
||||
|
||||
The `StatsDMeterImpl` in `StatsDCollector.cpp:706` sends metrics with `|m` suffix, which is non-standard StatsD. The OTel StatsD receiver silently drops these. Fix: change `|m` to `|c` (counter), which is semantically correct since meters are increment-only counters. Only 2 metrics are affected (`warn`, `drop` in Resource Manager).
|
||||
|
||||
**Status**: Deferred as a separate change — this is a breaking change for any StatsD backend that previously consumed the custom `|m` type. The Resource Warnings and Resource Drops dashboard panels will show no data until this fix is applied.
|
||||
|
||||
### New Grafana Dashboards
|
||||
|
||||
**Node Health** (`statsd-node-health.json`, uid: `rippled-statsd-node-health`):
|
||||
|
||||
- Validated/Published Ledger Age, Operating Mode Duration/Transitions, I/O Latency, Job Queue Depth, Ledger Fetch Rate, Ledger History Mismatches
|
||||
|
||||
**Network Traffic** (`statsd-network-traffic.json`, uid: `rippled-statsd-network`):
|
||||
|
||||
- Active Inbound/Outbound Peers, Peer Disconnects, Total Bytes/Messages In/Out, Transaction/Proposal/Validation Traffic, Top Traffic Categories
|
||||
|
||||
**RPC & Pathfinding (StatsD)** (`statsd-rpc-pathfinding.json`, uid: `rippled-statsd-rpc`):
|
||||
|
||||
- RPC Request Rate, Response Time p95/p50, Response Size p95/p50, Pathfinding Fast/Full Duration, Resource Warnings/Drops, Response Time Heatmap
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] StatsD metrics visible in Prometheus (`curl localhost:9090/api/v1/query?query=rippled_LedgerMaster_Validated_Ledger_Age`)
|
||||
- [ ] All 3 new Grafana dashboards load without errors
|
||||
- [ ] Integration test verifies at least core StatsD metrics (ledger age, peer counts, RPC requests)
|
||||
- [ ] ~~Meter metrics (`warn`, `drop`) flow correctly after `|m` → `|c` fix~~ — DEFERRED (breaking change, tracked separately)
|
||||
|
||||
---
|
||||
|
||||
## 6.9 Risk Assessment
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Risk Assessment Matrix
|
||||
x-axis Low Impact --> High Impact
|
||||
y-axis Low Likelihood --> High Likelihood
|
||||
quadrant-1 Mitigate Immediately
|
||||
quadrant-2 Plan Mitigation
|
||||
quadrant-3 Accept Risk
|
||||
quadrant-4 Monitor Closely
|
||||
|
||||
SDK Compat: [0.2, 0.18]
|
||||
Protocol Chg: [0.75, 0.72]
|
||||
Perf Overhead: [0.58, 0.42]
|
||||
Context Prop: [0.4, 0.55]
|
||||
Memory Leaks: [0.85, 0.25]
|
||||
```
|
||||
|
||||
### Risk Details
|
||||
|
||||
| Risk | Likelihood | Impact | Mitigation |
|
||||
| ------------------------------------ | ---------- | ------ | --------------------------------------- |
|
||||
| Protocol changes break compatibility | Medium | High | Use high field numbers, optional fields |
|
||||
| Performance overhead unacceptable | Medium | Medium | Sampling, conditional compilation |
|
||||
| Context propagation complexity | Medium | Medium | Phased rollout, extensive testing |
|
||||
| SDK compatibility issues | Low | Medium | Pin SDK version, fallback to no-op |
|
||||
| Memory leaks in long-running nodes | Low | High | Memory profiling, bounded queues |
|
||||
|
||||
---
|
||||
|
||||
## 6.10 Success Metrics
|
||||
|
||||
| Metric | Target | Measurement |
|
||||
| ------------------------ | -------------------------------------------------------------- | --------------------- |
|
||||
| Trace coverage | >95% of transaction code paths (independent of sampling ratio) | Sampling verification |
|
||||
| CPU overhead | <3% | Benchmark tests |
|
||||
| Memory overhead | <10 MB | Memory profiling |
|
||||
| Latency impact (p99) | <2% | Performance tests |
|
||||
| Trace completeness | >99% spans with required attrs | Validation script |
|
||||
| Cross-node trace linkage | >90% of multi-hop transactions | Integration tests |
|
||||
|
||||
---
|
||||
|
||||
## 6.9 Quick Wins and Crawl-Walk-Run Strategy
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
This section outlines a prioritized approach to maximize ROI with minimal initial investment.
|
||||
|
||||
### 6.9.1 Crawl-Walk-Run Overview
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph crawl["🐢 CRAWL (Week 1-2)"]
|
||||
direction LR
|
||||
c1[Core SDK Setup] ~~~ c2[RPC Tracing Only] ~~~ c3[PathFinding + TxQ Tracing] ~~~ c4[Single Node]
|
||||
end
|
||||
|
||||
subgraph walk["🚶 WALK (Week 3-5)"]
|
||||
direction LR
|
||||
w1[Transaction Tracing] ~~~ w2[Fee Escalation Tracing] ~~~ w3[Cross-Node Context] ~~~ w4[Basic Dashboards]
|
||||
end
|
||||
|
||||
subgraph run["🏃 RUN (Week 6-9)"]
|
||||
direction LR
|
||||
r1[Consensus Tracing] ~~~ r2[Validator, Amendment,<br/>SHAMap Tracing] ~~~ r3[Full Correlation] ~~~ r4[Production Deploy]
|
||||
end
|
||||
|
||||
crawl --> walk --> run
|
||||
|
||||
style crawl fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style walk fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style run fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style c1 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style c2 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style c3 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style c4 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style w1 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style w2 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style w3 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style w4 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style r1 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style r2 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style r3 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style r4 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **CRAWL (Weeks 1-2)**: Minimal investment -- set up the SDK, instrument RPC and PathFinding/TxQ handlers, and verify on a single node. Delivers immediate latency visibility.
|
||||
- **WALK (Weeks 3-5)**: Expand to transaction lifecycle tracing, fee escalation, cross-node context propagation, and basic Grafana dashboards. This is where distributed tracing starts working.
|
||||
- **RUN (Weeks 6-9)**: Full consensus instrumentation, validator/amendment/SHAMap tracing, end-to-end correlation, and production deployment with sampling and alerting.
|
||||
- **Arrows (crawl → walk → run)**: Each phase builds on the prior one; you cannot skip ahead because later phases depend on infrastructure established earlier.
|
||||
|
||||
### 6.9.2 Quick Wins (Immediate Value)
|
||||
|
||||
| Quick Win | Value | When to Deploy |
|
||||
| ------------------------------ | ------ | -------------- |
|
||||
| **RPC Command Tracing** | High | Week 2 |
|
||||
| **RPC Latency Histograms** | High | Week 2 |
|
||||
| **Error Rate Dashboard** | Medium | Week 2 |
|
||||
| **Transaction Submit Tracing** | High | Week 3 |
|
||||
| **Consensus Round Duration** | Medium | Week 6 |
|
||||
|
||||
### 6.9.3 CRAWL Phase (Weeks 1-2)
|
||||
|
||||
**Goal**: Get basic tracing working with minimal code changes.
|
||||
|
||||
**What You Get**:
|
||||
|
||||
- RPC request/response traces for all commands
|
||||
- Latency breakdown per RPC command
|
||||
- PathFinding and TxQ tracing (directly impacts RPC latency)
|
||||
- Error visibility with stack traces
|
||||
- Basic Grafana dashboard
|
||||
|
||||
**Code Changes**: ~15 lines in `ServerHandler.cpp`, ~40 lines in new telemetry module
|
||||
|
||||
**Why Start Here**:
|
||||
|
||||
- RPC is the lowest-risk, highest-visibility component
|
||||
- PathFinding and TxQ are RPC-adjacent and directly affect latency
|
||||
- Immediate value for debugging client issues
|
||||
- No cross-node complexity
|
||||
- Single file modification to existing code
|
||||
|
||||
### 6.9.4 WALK Phase (Weeks 3-5)
|
||||
|
||||
**Goal**: Add transaction lifecycle tracing across nodes.
|
||||
|
||||
**What You Get**:
|
||||
|
||||
- End-to-end transaction traces from submit to relay
|
||||
- Fee escalation tracing within the transaction pipeline
|
||||
- Cross-node correlation (see transaction path)
|
||||
- HashRouter deduplication visibility
|
||||
- Relay latency metrics
|
||||
|
||||
**Code Changes**: ~120 lines across 4 files, plus protobuf extension
|
||||
|
||||
**Why Do This Second**:
|
||||
|
||||
- Builds on RPC tracing (transactions submitted via RPC)
|
||||
- Fee escalation is integral to the transaction processing pipeline
|
||||
- Moderate complexity (requires context propagation)
|
||||
- High value for debugging transaction issues
|
||||
|
||||
### 6.9.5 RUN Phase (Weeks 6-9)
|
||||
|
||||
**Goal**: Full observability including consensus.
|
||||
|
||||
**What You Get**:
|
||||
|
||||
- Complete consensus round visibility
|
||||
- Phase transition timing
|
||||
- Validator proposal tracking
|
||||
- Validator list and manifest tracing
|
||||
- Amendment voting tracing
|
||||
- SHAMap sync tracing
|
||||
- Full end-to-end traces (client → RPC → TX → consensus → ledger)
|
||||
|
||||
**Code Changes**: ~100 lines across 3 consensus files, plus validator/amendment/SHAMap modules
|
||||
|
||||
**Why Do This Last**:
|
||||
|
||||
- Highest complexity (consensus is critical path)
|
||||
- Validator, amendment, and SHAMap components are lower priority
|
||||
- Requires thorough testing
|
||||
- Lower relative value (consensus issues are rarer)
|
||||
|
||||
### 6.9.6 ROI Prioritization Matrix
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Implementation ROI Matrix
|
||||
x-axis Low Effort --> High Effort
|
||||
y-axis Low Value --> High Value
|
||||
quadrant-1 Quick Wins - Do First
|
||||
quadrant-2 Major Projects - Plan Carefully
|
||||
quadrant-3 Nice to Have - Optional
|
||||
quadrant-4 Time Sinks - Avoid
|
||||
|
||||
RPC Tracing: [0.15, 0.92]
|
||||
TX Submit Trace: [0.3, 0.78]
|
||||
TX Relay Trace: [0.5, 0.88]
|
||||
Consensus Trace: [0.72, 0.72]
|
||||
Peer Msg Trace: [0.85, 0.3]
|
||||
Ledger Acquire: [0.55, 0.52]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6.13 Definition of Done
|
||||
|
||||
> **TxQ** = Transaction Queue | **HA** = High Availability
|
||||
|
||||
Clear, measurable criteria for each phase.
|
||||
|
||||
### 6.13.1 Phase 1: Core Infrastructure
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| --------------- | ---------------------------------------------------------- | ---------------------------- |
|
||||
| SDK Integration | `cmake --build` succeeds with `-DXRPL_ENABLE_TELEMETRY=ON` | ✅ Compiles |
|
||||
| Runtime Toggle | `enabled=0` produces zero overhead | <0.1% CPU difference |
|
||||
| Span Creation | Unit test creates and exports span | Span appears in Tempo |
|
||||
| Configuration | All config options parsed correctly | Config validation tests pass |
|
||||
| Documentation | Developer guide exists | PR approved |
|
||||
|
||||
**Definition of Done**: All criteria met, PR merged, no regressions in CI.
|
||||
|
||||
### 6.13.2 Phase 2: RPC Tracing
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| ------------------ | ---------------------------------- | -------------------------- |
|
||||
| Coverage | All RPC commands instrumented | 100% of commands |
|
||||
| Context Extraction | traceparent header propagates | Integration test passes |
|
||||
| Attributes | Command, status, duration recorded | Validation script confirms |
|
||||
| Performance | RPC latency overhead | <1ms p99 |
|
||||
| Dashboard | Grafana dashboard deployed | Screenshot in docs |
|
||||
|
||||
**Definition of Done**: RPC traces visible in Tempo for all commands, dashboard shows latency distribution.
|
||||
|
||||
### 6.13.3 Phase 3: Transaction Tracing
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| --------------------- | ------------------------------------------------- | -------------------------------------------------------- |
|
||||
| Local Trace | Submit → validate → TxQ traced | Single-node test passes |
|
||||
| Cross-Node | Context propagates via protobuf | Multi-node test passes |
|
||||
| Deterministic TraceID | Same trace_id on all nodes for same tx | Multi-node test: query by txHash[0:16] returns all spans |
|
||||
| Relay Ordering | Protobuf span_id propagation creates parent-child | Tempo trace tree shows relay chain |
|
||||
| Graceful Degradation | Old peer drops trace_context | Spans still grouped by deterministic trace_id |
|
||||
| Relay Visibility | relay_count attribute correct | Spot check 100 txs |
|
||||
| HashRouter | Deduplication visible in trace | Duplicate txs show suppressed=true |
|
||||
| Performance | TX throughput overhead | <5% degradation |
|
||||
|
||||
**Definition of Done**: Transaction traces span 3+ nodes in test network with deterministic trace_id correlation, parent-child ordering via protobuf propagation, and performance within bounds.
|
||||
|
||||
### 6.13.4 Phase 4: Consensus Tracing
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| -------------------- | ----------------------------- | ------------------------- |
|
||||
| Round Tracing | startRound creates root span | Unit test passes |
|
||||
| Phase Visibility | All phases have child spans | Integration test confirms |
|
||||
| Proposer Attribution | Proposer ID in attributes | Spot check 50 rounds |
|
||||
| Timing Accuracy | Phase durations match PerfLog | <5% variance |
|
||||
| No Consensus Impact | Round timing unchanged | Performance test passes |
|
||||
|
||||
**Definition of Done**: Consensus rounds fully traceable, no impact on consensus timing.
|
||||
|
||||
### 6.13.5 Phase 5: Production Deployment
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| ------------ | ---------------------------- | -------------------------- |
|
||||
| Collector HA | Multiple collectors deployed | No single point of failure |
|
||||
| Sampling | Tail sampling configured | 10% base + errors + slow |
|
||||
| Retention | Data retained per policy | 7 days hot, 30 days warm |
|
||||
| Alerting | Alerts configured | Error spike, high latency |
|
||||
| Runbook | Operator documentation | Approved by ops team |
|
||||
| Training | Team trained | Session completed |
|
||||
|
||||
**Definition of Done**: Telemetry running in production, operators trained, alerts active.
|
||||
|
||||
### 6.13.6 Success Metrics Summary
|
||||
|
||||
| Phase | Primary Metric | Secondary Metric | Deadline |
|
||||
| ------- | ---------------------- | --------------------------- | ------------- |
|
||||
| Phase 1 | SDK compiles and runs | Zero overhead when disabled | End of Week 2 |
|
||||
| Phase 2 | 100% RPC coverage | <1ms latency overhead | End of Week 4 |
|
||||
| Phase 3 | Cross-node traces work | <5% throughput impact | End of Week 6 |
|
||||
| Phase 4 | Consensus fully traced | No consensus timing impact | End of Week 8 |
|
||||
| Phase 5 | Production deployment | Operators trained | End of Week 9 |
|
||||
|
||||
---
|
||||
|
||||
## 6.14 Recommended Implementation Order
|
||||
|
||||
Based on ROI analysis, implement in this exact order:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph week1["Week 1"]
|
||||
t1[1. OpenTelemetry SDK<br/>Conan/CMake integration]
|
||||
t2[2. Telemetry interface<br/>SpanGuard, config]
|
||||
end
|
||||
|
||||
subgraph week2["Week 2"]
|
||||
t3[3. RPC ServerHandler<br/>instrumentation]
|
||||
t4[4. Basic Tempo setup<br/>for testing]
|
||||
end
|
||||
|
||||
subgraph week3["Week 3"]
|
||||
t5[5. Transaction submit<br/>tracing]
|
||||
t6[6. Grafana dashboard<br/>v1]
|
||||
end
|
||||
|
||||
subgraph week4["Week 4"]
|
||||
t7[7. Protobuf context<br/>extension]
|
||||
t8[8. PeerImp tx.relay<br/>instrumentation]
|
||||
end
|
||||
|
||||
subgraph week5["Week 5"]
|
||||
t9[9. Multi-node<br/>integration tests]
|
||||
t10[10. Performance<br/>benchmarks]
|
||||
end
|
||||
|
||||
subgraph week6_8["Weeks 6-8"]
|
||||
t11[11. Consensus<br/>instrumentation]
|
||||
t12[12. Full integration<br/>testing]
|
||||
end
|
||||
|
||||
subgraph week9["Week 9"]
|
||||
t13[13. Production<br/>deployment]
|
||||
t14[14. Documentation<br/>& training]
|
||||
end
|
||||
|
||||
t1 --> t2 --> t3 --> t4
|
||||
t4 --> t5 --> t6
|
||||
t6 --> t7 --> t8
|
||||
t8 --> t9 --> t10
|
||||
t10 --> t11 --> t12
|
||||
t12 --> t13 --> t14
|
||||
|
||||
style week1 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style week2 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style week3 fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style week4 fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style week5 fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style week6_8 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style week9 fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style t1 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style t2 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style t3 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style t4 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style t5 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t6 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t7 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t8 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t9 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t10 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t11 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style t12 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style t13 fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style t14 fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Week 1 (tasks 1-2)**: Foundation work -- integrate the OpenTelemetry SDK via Conan/CMake and build the `Telemetry` interface with `SpanGuard` and config parsing.
|
||||
- **Week 2 (tasks 3-4)**: First observable output -- instrument `ServerHandler` for RPC tracing and stand up Tempo so developers can see traces immediately.
|
||||
- **Weeks 3-5 (tasks 5-10)**: Transaction lifecycle -- add submit tracing, build the first Grafana dashboard, extend protobuf for cross-node context, instrument `PeerImp` relay, then validate with multi-node integration tests and performance benchmarks.
|
||||
- **Weeks 6-8 (tasks 11-12)**: Consensus deep-dive -- instrument consensus rounds and phases, then run full integration testing across all instrumented paths.
|
||||
- **Week 9 (tasks 13-14)**: Go-live -- deploy to production with sampling/alerting configured, and deliver documentation and operator training.
|
||||
- **Arrow chain (t1 → ... → t14)**: Strict sequential dependency; each task's output is a prerequisite for the next.
|
||||
|
||||
---
|
||||
|
||||
_Previous: [Configuration Reference](./05-configuration-reference.md)_ | _Next: [Observability Backends](./07-observability-backends.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
@@ -1,641 +0,0 @@
|
||||
# Observability Backend Recommendations
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Implementation Phases](./06-implementation-phases.md) | [Appendix](./08-appendix.md)
|
||||
|
||||
---
|
||||
|
||||
## 7.1 Development/Testing Backends
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
| Backend | Pros | Cons | Use Case |
|
||||
| ---------- | ----------------------------------- | ---------------------- | ------------------- |
|
||||
| **Tempo** | Cost-effective, Grafana integration | Requires Grafana stack | Local dev, CI, Prod |
|
||||
| **Zipkin** | Simple, lightweight | Basic features | Quick prototyping |
|
||||
|
||||
### Quick Start with Tempo
|
||||
|
||||
```bash
|
||||
# Start Tempo with OTLP support
|
||||
docker run -d --name tempo \
|
||||
-p 3200:3200 \
|
||||
-p 4317:4317 \
|
||||
-p 4318:4318 \
|
||||
grafana/tempo:2.6.1
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7.2 Production Backends
|
||||
|
||||
> **APM** = Application Performance Monitoring
|
||||
|
||||
| Backend | Pros | Cons | Use Case |
|
||||
| ----------------- | ----------------------------------------- | ---------------------- | --------------------------- |
|
||||
| **Grafana Tempo** | Cost-effective, Grafana integration | Requires Grafana stack | Most production deployments |
|
||||
| **Elastic APM** | Full observability stack, log correlation | Resource intensive | Existing Elastic users |
|
||||
| **Honeycomb** | Excellent query, high cardinality | SaaS cost | Deep debugging needs |
|
||||
| **Datadog APM** | Full platform, easy setup | SaaS cost | Enterprise with budget |
|
||||
|
||||
### Backend Selection Flowchart
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
start[Select Backend] --> budget{Budget<br/>Constraints?}
|
||||
|
||||
budget -->|Yes| oss[Open Source]
|
||||
budget -->|No| saas{Prefer<br/>SaaS?}
|
||||
|
||||
oss --> existing{Existing<br/>Stack?}
|
||||
existing -->|Grafana| tempo[Grafana Tempo]
|
||||
existing -->|Elastic| elastic[Elastic APM]
|
||||
existing -->|None| tempo
|
||||
|
||||
saas -->|Yes| enterprise{Enterprise<br/>Support?}
|
||||
saas -->|No| oss
|
||||
|
||||
enterprise -->|Yes| datadog[Datadog APM]
|
||||
enterprise -->|No| honeycomb[Honeycomb]
|
||||
|
||||
tempo --> final[Configure Collector]
|
||||
elastic --> final
|
||||
honeycomb --> final
|
||||
datadog --> final
|
||||
|
||||
style start fill:#0f172a,stroke:#020617,color:#fff
|
||||
style budget fill:#334155,stroke:#1e293b,color:#fff
|
||||
style oss fill:#1e293b,stroke:#0f172a,color:#fff
|
||||
style existing fill:#334155,stroke:#1e293b,color:#fff
|
||||
style saas fill:#334155,stroke:#1e293b,color:#fff
|
||||
style enterprise fill:#334155,stroke:#1e293b,color:#fff
|
||||
style final fill:#0f172a,stroke:#020617,color:#fff
|
||||
style tempo fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style elastic fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style honeycomb fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style datadog fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Budget Constraints? (Yes)**: Leads to open-source options. If you already run Grafana or Elastic, pick the matching backend; otherwise default to Grafana Tempo.
|
||||
- **Budget Constraints? (No) → Prefer SaaS?**: If you want a managed service, choose between Datadog (enterprise support) and Honeycomb (developer-focused). If not, fall back to open-source.
|
||||
- **Terminal nodes (Tempo / Elastic / Honeycomb / Datadog)**: Each represents a concrete backend choice, all of which feed into the same final step.
|
||||
- **Configure Collector**: Regardless of backend, you always finish by configuring the OTel Collector to export to your chosen destination.
|
||||
|
||||
---
|
||||
|
||||
## 7.3 Recommended Production Architecture
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **APM** = Application Performance Monitoring | **HA** = High Availability
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph validators["Validator Nodes"]
|
||||
v1[xrpld<br/>Validator 1]
|
||||
v2[xrpld<br/>Validator 2]
|
||||
end
|
||||
|
||||
subgraph stock["Stock Nodes"]
|
||||
s1[xrpld<br/>Stock 1]
|
||||
s2[xrpld<br/>Stock 2]
|
||||
end
|
||||
|
||||
subgraph collector["OTel Collector Cluster"]
|
||||
c1[Collector<br/>DC1]
|
||||
c2[Collector<br/>DC2]
|
||||
end
|
||||
|
||||
subgraph backends["Storage Backends"]
|
||||
tempo[(Grafana<br/>Tempo)]
|
||||
elastic[(Elastic<br/>APM)]
|
||||
archive[(S3/GCS<br/>Archive)]
|
||||
end
|
||||
|
||||
subgraph ui["Visualization"]
|
||||
grafana[Grafana<br/>Dashboards]
|
||||
end
|
||||
|
||||
v1 -->|OTLP| c1
|
||||
v2 -->|OTLP| c1
|
||||
s1 -->|OTLP| c2
|
||||
s2 -->|OTLP| c2
|
||||
|
||||
c1 --> tempo
|
||||
c1 --> elastic
|
||||
c2 --> tempo
|
||||
c2 --> archive
|
||||
|
||||
tempo --> grafana
|
||||
elastic --> grafana
|
||||
|
||||
%% Note: simplified single-collector-per-DC topology shown for clarity
|
||||
|
||||
style validators fill:#b71c1c,stroke:#7f1d1d,color:#ffffff
|
||||
style stock fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style collector fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
style backends fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style ui fill:#4a148c,stroke:#2e0d57,color:#ffffff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Validator / Stock Nodes**: All xrpld nodes emit trace data via OTLP. Validators and stock nodes are grouped separately because they may reside in different network zones.
|
||||
- **Collector Cluster (DC1, DC2)**: Regional collectors receive OTLP from nodes in their datacenter, apply processing (sampling, enrichment), and fan out to multiple backends.
|
||||
- **Storage Backends**: Tempo and Elastic provide queryable trace storage; S3/GCS Archive provides long-term cold storage for compliance or post-incident analysis.
|
||||
- **Grafana Dashboards**: The single visualization layer that queries both Tempo and Elastic, giving operators a unified view of all traces.
|
||||
- **Data flow direction**: Nodes → Collectors → Storage → Grafana. Each arrow represents a network hop; minimizing collector-to-backend hops reduces latency.
|
||||
|
||||
> **Note**: Production deployments should use multiple collector instances behind a load balancer for high availability. The diagram shows a simplified single-collector topology for clarity.
|
||||
|
||||
---
|
||||
|
||||
## 7.4 Architecture Considerations
|
||||
|
||||
### 7.4.1 Collector Placement
|
||||
|
||||
| Strategy | Description | Pros | Cons |
|
||||
| ------------- | -------------------- | ------------------------ | ----------------------- |
|
||||
| **Sidecar** | Collector per node | Isolation, simple config | Resource overhead |
|
||||
| **DaemonSet** | Collector per host | Shared resources | Complexity |
|
||||
| **Gateway** | Central collector(s) | Centralized processing | Single point of failure |
|
||||
|
||||
**Recommendation**: Use **Gateway** pattern with regional collectors for xrpld networks:
|
||||
|
||||
- One collector cluster per datacenter/region
|
||||
- Tail-based sampling at collector level
|
||||
- Multiple export destinations for redundancy
|
||||
|
||||
### 7.4.2 Sampling Strategy
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph head["Head Sampling (Node)"]
|
||||
hs[Node-level head sampling<br/>configurable, default: 100%<br/>recommended production: 10%]
|
||||
end
|
||||
|
||||
subgraph tail["Tail Sampling (Collector)"]
|
||||
ts1[Keep all errors]
|
||||
ts2[Keep slow >5s]
|
||||
ts3[Keep 10% rest]
|
||||
end
|
||||
|
||||
head --> tail
|
||||
|
||||
ts1 --> final[Final Traces]
|
||||
ts2 --> final
|
||||
ts3 --> final
|
||||
|
||||
style head fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style tail fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style hs fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style ts1 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style ts2 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style ts3 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style final fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Head Sampling (Node)**: The first filter -- each xrpld node decides whether to sample a trace at creation time (default 100%, recommended 10% in production). This controls the volume leaving the node.
|
||||
- **Tail Sampling (Collector)**: The second filter -- the collector inspects completed traces and applies rules: keep all errors, keep anything slower than 5 seconds, and keep 10% of the remainder.
|
||||
- **Arrow head → tail**: All head-sampled traces flow to the collector, where tail sampling further reduces volume while preserving the most valuable data.
|
||||
- **Final Traces**: The output after both sampling stages; this is what gets stored and queried. The two-stage approach balances cost with debuggability.
|
||||
|
||||
### 7.4.3 Data Retention
|
||||
|
||||
| Environment | Hot Storage | Warm Storage | Cold Archive |
|
||||
| ----------- | ----------- | ------------ | ------------ |
|
||||
| Development | 24 hours | N/A | N/A |
|
||||
| Staging | 7 days | N/A | N/A |
|
||||
| Production | 7 days | 30 days | many years |
|
||||
|
||||
---
|
||||
|
||||
## 7.5 Integration Checklist
|
||||
|
||||
- [ ] Choose primary backend (Tempo recommended for cost/features)
|
||||
- [ ] Deploy collector cluster with high availability
|
||||
- [ ] Configure tail-based sampling for error/latency traces
|
||||
- [ ] Set up Grafana dashboards for trace visualization
|
||||
- [ ] Configure alerts for trace anomalies
|
||||
- [ ] Establish data retention policies
|
||||
- [ ] Test trace correlation with logs and metrics
|
||||
|
||||
---
|
||||
|
||||
## 7.6 Grafana Dashboard Examples
|
||||
|
||||
Pre-built dashboards for xrpld observability.
|
||||
|
||||
### 7.6.1 Consensus Health Dashboard
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "xrpld Consensus Health",
|
||||
"uid": "xrpld-consensus-health",
|
||||
"tags": ["xrpld", "consensus", "tracing"],
|
||||
"panels": [
|
||||
{
|
||||
"title": "Consensus Round Duration",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\" && name=\"consensus.round\"} | avg(duration) by (resource.service.instance.id)"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 4000 },
|
||||
{ "color": "red", "value": 5000 }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Phase Duration Breakdown",
|
||||
"type": "barchart",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\" && name=~\"consensus.phase.*\"} | avg(duration) by (name)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Proposers per Round",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\" && name=\"consensus.round\"} | avg(span.xrpl.consensus.proposers)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 8 }
|
||||
},
|
||||
{
|
||||
"title": "Recent Slow Rounds (>5s)",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\" && name=\"consensus.round\"} | duration > 5s"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 12 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 7.6.2 Node Overview Dashboard
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "xrpld Node Overview",
|
||||
"uid": "xrpld-node-overview",
|
||||
"panels": [
|
||||
{
|
||||
"title": "Active Nodes",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\"} | count_over_time() by (resource.service.instance.id) | count()"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 4, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Total Transactions (1h)",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\" && name=\"tx.receive\"} | count()"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 4, "x": 4, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Error Rate",
|
||||
"type": "gauge",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\" && status.code=error} | rate() / {resource.service.name=\"xrpld\"} | rate() * 100"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "percent",
|
||||
"max": 10,
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 1 },
|
||||
{ "color": "red", "value": 5 }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 4, "w": 4, "x": 8, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Service Map",
|
||||
"type": "nodeGraph",
|
||||
"datasource": "Tempo",
|
||||
"gridPos": { "h": 12, "w": 12, "x": 12, "y": 0 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 7.6.3 Alert Rules
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/alerting/rippled-alerts.yaml
|
||||
apiVersion: 1
|
||||
|
||||
groups:
|
||||
- name: xrpld-tracing-alerts
|
||||
folder: xrpld
|
||||
interval: 1m
|
||||
rules:
|
||||
- uid: consensus-slow
|
||||
title: Consensus Round Slow
|
||||
condition: A
|
||||
data:
|
||||
- refId: A
|
||||
datasourceUid: tempo
|
||||
model:
|
||||
queryType: traceql
|
||||
query: '{resource.service.name="xrpld" && name="consensus.round"} | avg(duration) > 5s'
|
||||
# Note: Verify TraceQL aggregate queries are supported by your
|
||||
# Tempo version. Aggregate alerting (e.g., avg(duration)) requires
|
||||
# Tempo 2.3+ with TraceQL metrics enabled.
|
||||
for: 5m
|
||||
annotations:
|
||||
summary: Consensus rounds taking >5 seconds
|
||||
description: "Consensus duration: {{ $value }}ms"
|
||||
labels:
|
||||
severity: warning
|
||||
|
||||
- uid: rpc-error-spike
|
||||
title: RPC Error Rate Spike
|
||||
condition: B
|
||||
data:
|
||||
- refId: B
|
||||
datasourceUid: tempo
|
||||
model:
|
||||
queryType: traceql
|
||||
query: '{resource.service.name="xrpld" && name=~"rpc.command.*" && status.code=error} | rate() > 0.05'
|
||||
# Note: Verify TraceQL aggregate queries are supported by your
|
||||
# Tempo version. Aggregate alerting (e.g., rate()) requires
|
||||
# Tempo 2.3+ with TraceQL metrics enabled.
|
||||
for: 2m
|
||||
annotations:
|
||||
summary: RPC error rate >5%
|
||||
labels:
|
||||
severity: critical
|
||||
|
||||
- uid: tx-throughput-drop
|
||||
title: Transaction Throughput Drop
|
||||
condition: C
|
||||
data:
|
||||
- refId: C
|
||||
datasourceUid: tempo
|
||||
model:
|
||||
queryType: traceql
|
||||
query: '{resource.service.name="xrpld" && name="tx.receive"} | rate() < 10'
|
||||
for: 10m
|
||||
annotations:
|
||||
summary: Transaction throughput below threshold
|
||||
labels:
|
||||
severity: warning
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7.7 PerfLog and Insight Correlation
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
How to correlate OpenTelemetry traces with existing xrpld observability.
|
||||
|
||||
### 7.7.1 Correlation Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph xrpld["xrpld Node"]
|
||||
otel[OpenTelemetry<br/>Spans]
|
||||
perflog[PerfLog<br/>JSON Logs]
|
||||
insight[Beast Insight<br/>StatsD Metrics]
|
||||
end
|
||||
|
||||
subgraph collectors["Data Collection"]
|
||||
otelc[OTel Collector]
|
||||
promtail[Promtail/Fluentd]
|
||||
statsd[StatsD Exporter]
|
||||
end
|
||||
|
||||
subgraph storage["Storage"]
|
||||
tempo[(Tempo)]
|
||||
loki[(Loki)]
|
||||
prom[(Prometheus)]
|
||||
end
|
||||
|
||||
subgraph grafana["Grafana"]
|
||||
traces[Trace View]
|
||||
logs[Log View]
|
||||
metrics[Metrics View]
|
||||
corr[Correlation<br/>Panel]
|
||||
end
|
||||
|
||||
otel -->|OTLP| otelc --> tempo
|
||||
perflog -->|JSON| promtail --> loki
|
||||
insight -->|StatsD| statsd --> prom
|
||||
|
||||
tempo --> traces
|
||||
loki --> logs
|
||||
prom --> metrics
|
||||
|
||||
traces --> corr
|
||||
logs --> corr
|
||||
metrics --> corr
|
||||
|
||||
style xrpld fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style collectors fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style storage fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style grafana fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style otel fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style perflog fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style insight fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style otelc fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style promtail fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style statsd fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style tempo fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style loki fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style prom fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style traces fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style logs fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style metrics fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style corr fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **xrpld Node (three sources)**: A single node emits three independent data streams -- OpenTelemetry spans, PerfLog JSON logs, and Beast Insight StatsD metrics.
|
||||
- **Data Collection layer**: Each stream has its own collector -- OTel Collector for spans, Promtail/Fluentd for logs, and a StatsD exporter for metrics. They operate independently.
|
||||
- **Storage layer (Tempo, Loki, Prometheus)**: Each data type lands in a purpose-built store optimized for its query patterns (trace search, log grep, metric aggregation).
|
||||
- **Grafana Correlation Panel**: The key integration point -- Grafana queries all three stores and links them via shared fields (`trace_id`, `xrpl.tx.hash`, `ledger_seq`), enabling a single-pane debugging experience.
|
||||
|
||||
### 7.7.2 Correlation Fields
|
||||
|
||||
| Source | Field | Link To | Purpose |
|
||||
| ----------- | --------------------------- | ------------- | -------------------------- |
|
||||
| **Trace** | `trace_id` | Logs | Find log entries for trace |
|
||||
| **Trace** | `xrpl.tx.hash` | Logs, Metrics | Find TX-related data |
|
||||
| **Trace** | `xrpl.consensus.ledger.seq` | Logs | Find ledger-related logs |
|
||||
| **PerfLog** | `trace_id` (new) | Traces | Jump to trace from log |
|
||||
| **PerfLog** | `ledger_seq` | Traces | Find consensus trace |
|
||||
| **Insight** | `exemplar.trace_id` | Traces | Jump from metric spike |
|
||||
|
||||
### 7.7.3 Example: Debugging a Slow Transaction
|
||||
|
||||
**Step 1: Find the trace**
|
||||
|
||||
```
|
||||
# In Grafana Explore with Tempo
|
||||
{resource.service.name="xrpld" && span.xrpl.tx.hash="ABC123..."}
|
||||
```
|
||||
|
||||
**Step 2: Get the trace_id from the trace view**
|
||||
|
||||
```
|
||||
Trace ID: 4bf92f3577b34da6a3ce929d0e0e4736
|
||||
```
|
||||
|
||||
**Step 3: Find related PerfLog entries**
|
||||
|
||||
```
|
||||
# In Grafana Explore with Loki
|
||||
{job="xrpld"} |= "4bf92f3577b34da6a3ce929d0e0e4736"
|
||||
```
|
||||
|
||||
**Step 4: Check Insight metrics for the time window**
|
||||
|
||||
```
|
||||
# In Grafana with Prometheus
|
||||
rate(xrpld_tx_applied_total[1m])
|
||||
@ timestamp_from_trace
|
||||
```
|
||||
|
||||
### 7.7.4 Unified Dashboard Example
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "xrpld Unified Observability",
|
||||
"uid": "xrpld-unified",
|
||||
"panels": [
|
||||
{
|
||||
"title": "Transaction Latency (Traces)",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\" && name=\"tx.receive\"} | histogram_over_time(duration)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 6, "w": 8, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Transaction Rate (Metrics)",
|
||||
"type": "timeseries",
|
||||
"datasource": "Prometheus",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(xrpld_tx_received_total[5m])",
|
||||
"legendFormat": "{{ instance }}"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"links": [
|
||||
{
|
||||
"title": "View traces",
|
||||
"url": "/explore?left={\"datasource\":\"Tempo\",\"query\":\"{resource.service.name=\\\"xrpld\\\" && name=\\\"tx.receive\\\"}\"}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 6, "w": 8, "x": 8, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Recent Logs",
|
||||
"type": "logs",
|
||||
"datasource": "Loki",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "{job=\"xrpld\"} | json"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 6, "w": 8, "x": 16, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Trace Search",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"xrpld\"}"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": { "id": "byName", "options": "traceID" },
|
||||
"properties": [
|
||||
{
|
||||
"id": "links",
|
||||
"value": [
|
||||
{
|
||||
"title": "View trace",
|
||||
"url": "/explore?left={\"datasource\":\"Tempo\",\"query\":\"${__value.raw}\"}"
|
||||
},
|
||||
{
|
||||
"title": "View logs",
|
||||
"url": "/explore?left={\"datasource\":\"Loki\",\"query\":\"{job=\\\"xrpld\\\"} |= \\\"${__value.raw}\\\"\"}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": { "h": 12, "w": 24, "x": 0, "y": 6 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
_Previous: [Implementation Phases](./06-implementation-phases.md)_ | _Next: [Appendix](./08-appendix.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
@@ -1,202 +0,0 @@
|
||||
# Appendix
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Observability Backends](./07-observability-backends.md)
|
||||
|
||||
---
|
||||
|
||||
## 8.1 Glossary
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **TxQ** = Transaction Queue
|
||||
|
||||
| Term | Definition |
|
||||
| --------------------- | ---------------------------------------------------------- |
|
||||
| **Span** | A unit of work with start/end time, name, and attributes |
|
||||
| **Trace** | A collection of spans representing a complete request flow |
|
||||
| **Trace ID** | 128-bit unique identifier for a trace |
|
||||
| **Span ID** | 64-bit unique identifier for a span within a trace |
|
||||
| **Context** | Carrier for trace/span IDs across boundaries |
|
||||
| **Propagator** | Component that injects/extracts context |
|
||||
| **Sampler** | Decides which traces to record |
|
||||
| **Exporter** | Sends spans to backend |
|
||||
| **Collector** | Receives, processes, and forwards telemetry |
|
||||
| **OTLP** | OpenTelemetry Protocol (wire format) |
|
||||
| **W3C Trace Context** | Standard HTTP headers for trace propagation |
|
||||
| **Baggage** | Key-value pairs propagated across service boundaries |
|
||||
| **Resource** | Entity producing telemetry (service, host, etc.) |
|
||||
| **Instrumentation** | Code that creates telemetry data |
|
||||
|
||||
### xrpld-Specific Terms
|
||||
|
||||
| Term | Definition |
|
||||
| ----------------- | ------------------------------------------------------------- |
|
||||
| **Overlay** | P2P network layer managing peer connections |
|
||||
| **Consensus** | XRP Ledger consensus algorithm (RCL) |
|
||||
| **Proposal** | Validator's suggested transaction set for a ledger |
|
||||
| **Validation** | Validator's signature on a closed ledger |
|
||||
| **HashRouter** | Component for transaction deduplication |
|
||||
| **JobQueue** | Thread pool for asynchronous task execution |
|
||||
| **PerfLog** | Existing performance logging system in xrpld |
|
||||
| **Beast Insight** | Existing metrics framework in xrpld |
|
||||
| **PathFinding** | Payment path computation engine for cross-currency payments |
|
||||
| **TxQ** | Transaction queue managing fee-based prioritization |
|
||||
| **LoadManager** | Dynamic fee escalation based on network load |
|
||||
| **SHAMap** | SHA-256 hash-based map (Merkle trie variant) for ledger state |
|
||||
|
||||
---
|
||||
|
||||
## 8.2 Span Hierarchy Visualization
|
||||
|
||||
> **TxQ** = Transaction Queue
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph trace["Trace: Transaction Lifecycle"]
|
||||
rpc["rpc.request<br/>(entry point)"]
|
||||
validate["tx.validate"]
|
||||
relay["tx.relay<br/>(parent span)"]
|
||||
|
||||
subgraph peers["Peer Spans"]
|
||||
p1["peer.send<br/>Peer A"]
|
||||
p2["peer.send<br/>Peer B"]
|
||||
p3["peer.send<br/>Peer C"]
|
||||
end
|
||||
|
||||
subgraph pathfinding["PathFinding Spans"]
|
||||
pathfind["pathfind.request"]
|
||||
pathcomp["pathfind.compute"]
|
||||
end
|
||||
|
||||
consensus["consensus.round"]
|
||||
apply["tx.apply"]
|
||||
|
||||
subgraph txqueue["TxQ Spans"]
|
||||
txq["txq.enqueue"]
|
||||
txqApply["txq.apply"]
|
||||
end
|
||||
|
||||
feeCalc["fee.escalate"]
|
||||
end
|
||||
|
||||
subgraph validators["Validator Spans"]
|
||||
valFetch["validator.list.fetch"]
|
||||
valManifest["validator.manifest"]
|
||||
end
|
||||
|
||||
rpc --> validate
|
||||
rpc --> pathfind
|
||||
pathfind --> pathcomp
|
||||
validate --> relay
|
||||
relay --> p1
|
||||
relay --> p2
|
||||
relay --> p3
|
||||
p1 -.->|"context propagation"| consensus
|
||||
consensus --> apply
|
||||
apply --> txq
|
||||
txq --> txqApply
|
||||
txq --> feeCalc
|
||||
|
||||
style trace fill:#0f172a,stroke:#020617,color:#fff
|
||||
style peers fill:#1e3a8a,stroke:#172554,color:#fff
|
||||
style pathfinding fill:#134e4a,stroke:#0f766e,color:#fff
|
||||
style txqueue fill:#064e3b,stroke:#047857,color:#fff
|
||||
style validators fill:#4c1d95,stroke:#6d28d9,color:#fff
|
||||
style rpc fill:#1d4ed8,stroke:#1e40af,color:#fff
|
||||
style validate fill:#047857,stroke:#064e3b,color:#fff
|
||||
style relay fill:#047857,stroke:#064e3b,color:#fff
|
||||
style p1 fill:#0e7490,stroke:#155e75,color:#fff
|
||||
style p2 fill:#0e7490,stroke:#155e75,color:#fff
|
||||
style p3 fill:#0e7490,stroke:#155e75,color:#fff
|
||||
style consensus fill:#fef3c7,stroke:#fde68a,color:#1e293b
|
||||
style apply fill:#047857,stroke:#064e3b,color:#fff
|
||||
style pathfind fill:#0e7490,stroke:#155e75,color:#fff
|
||||
style pathcomp fill:#0e7490,stroke:#155e75,color:#fff
|
||||
style txq fill:#047857,stroke:#064e3b,color:#fff
|
||||
style txqApply fill:#047857,stroke:#064e3b,color:#fff
|
||||
style feeCalc fill:#047857,stroke:#064e3b,color:#fff
|
||||
style valFetch fill:#6d28d9,stroke:#4c1d95,color:#fff
|
||||
style valManifest fill:#6d28d9,stroke:#4c1d95,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **rpc.request (blue, top)**: The entry point — every traced transaction starts as an RPC call; this root span is the parent of all downstream work.
|
||||
- **tx.validate and pathfind.request (green/teal, first fork)**: The RPC request fans out into transaction validation and, for cross-currency payments, a PathFinding branch (`pathfind.request` -> `pathfind.compute`).
|
||||
- **tx.relay -> Peer Spans (teal, middle)**: After validation, the transaction is relayed to peers A, B, and C in parallel; each `peer.send` is a sibling child span showing fan-out across the network.
|
||||
- **context propagation (dashed arrow)**: The dotted line from `peer.send Peer A` to `consensus.round` represents the trace context crossing a node boundary — the receiving validator picks up the same `trace_id` and continues the trace.
|
||||
- **consensus.round -> tx.apply -> TxQ Spans (green, lower)**: Once consensus accepts the transaction, it is applied to the ledger; the TxQ spans (`txq.enqueue`, `txq.apply`, `fee.escalate`) capture queue depth and fee escalation behavior.
|
||||
- **Validator Spans (purple, detached)**: `validator.list.fetch` and `validator.manifest` are independent workflows for UNL management — they run on their own traces and are linked to consensus via Span Links, not parent-child relationships.
|
||||
|
||||
---
|
||||
|
||||
## 8.3 References
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
### OpenTelemetry Resources
|
||||
|
||||
1. [OpenTelemetry C++ SDK](https://github.com/open-telemetry/opentelemetry-cpp)
|
||||
2. [OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel/)
|
||||
3. [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/)
|
||||
4. [OTLP Protocol Specification](https://opentelemetry.io/docs/specs/otlp/)
|
||||
|
||||
### Standards
|
||||
|
||||
5. [W3C Trace Context](https://www.w3.org/TR/trace-context/)
|
||||
6. [W3C Baggage](https://www.w3.org/TR/baggage/)
|
||||
7. [Protocol Buffers](https://protobuf.dev/)
|
||||
|
||||
### xrpld Resources
|
||||
|
||||
8. [xrpld Source Code](https://github.com/XRPLF/rippled)
|
||||
9. [XRP Ledger Documentation](https://xrpl.org/docs/)
|
||||
10. [xrpld Overlay README](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/overlay/README.md)
|
||||
11. [xrpld RPC README](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/rpc/README.md)
|
||||
12. [xrpld Consensus README](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/app/consensus/README.md)
|
||||
|
||||
---
|
||||
|
||||
## 8.4 Version History
|
||||
|
||||
| Version | Date | Author | Changes |
|
||||
| ------- | ---------- | ------ | -------------------------------------------------------------- |
|
||||
| 1.0 | 2026-02-12 | - | Initial implementation plan |
|
||||
| 1.1 | 2026-02-13 | - | Refactored into modular documents |
|
||||
| 1.2 | 2026-03-24 | - | Review fixes: accuracy corrections, cross-document consistency |
|
||||
|
||||
---
|
||||
|
||||
## 8.5 Document Index
|
||||
|
||||
### Plan Documents
|
||||
|
||||
| Document | Description |
|
||||
| -------------------------------------------------------------------- | -------------------------------------------- |
|
||||
| [OpenTelemetryPlan.md](./OpenTelemetryPlan.md) | Master overview and executive summary |
|
||||
| [00-tracing-fundamentals.md](./00-tracing-fundamentals.md) | Distributed tracing concepts and OTel primer |
|
||||
| [01-architecture-analysis.md](./01-architecture-analysis.md) | xrpld architecture and trace points |
|
||||
| [02-design-decisions.md](./02-design-decisions.md) | SDK selection, exporters, span conventions |
|
||||
| [03-implementation-strategy.md](./03-implementation-strategy.md) | Directory structure, performance analysis |
|
||||
| [04-code-samples.md](./04-code-samples.md) | C++ code examples for all components |
|
||||
| [05-configuration-reference.md](./05-configuration-reference.md) | xrpld config, CMake, Collector configs |
|
||||
| [06-implementation-phases.md](./06-implementation-phases.md) | Timeline, tasks, risks, success metrics |
|
||||
| [07-observability-backends.md](./07-observability-backends.md) | Backend selection and architecture |
|
||||
| [08-appendix.md](./08-appendix.md) | Glossary, references, version history |
|
||||
| [09-data-collection-reference.md](./09-data-collection-reference.md) | Span/metric/dashboard inventory |
|
||||
| [presentation.md](./presentation.md) | Slide deck for OTel plan overview |
|
||||
|
||||
### Task Lists
|
||||
|
||||
| Document | Description |
|
||||
| -------------------------------------------------------------------------- | --------------------------------------------------- |
|
||||
| [POC_taskList.md](./POC_taskList.md) | Proof-of-concept telemetry integration |
|
||||
| [Phase2_taskList.md](./Phase2_taskList.md) | RPC layer trace instrumentation |
|
||||
| [Phase3_taskList.md](./Phase3_taskList.md) | Peer overlay & consensus tracing |
|
||||
| [Phase4_taskList.md](./Phase4_taskList.md) | Transaction lifecycle tracing |
|
||||
| [Phase5_taskList.md](./Phase5_taskList.md) | Ledger processing & advanced tracing |
|
||||
| [Phase5_IntegrationTest_taskList.md](./Phase5_IntegrationTest_taskList.md) | Observability stack integration tests |
|
||||
| [presentation.md](./presentation.md) | Presentation slides for OpenTelemetry plan overview |
|
||||
|
||||
---
|
||||
|
||||
_Previous: [Observability Backends](./07-observability-backends.md)_ | _Back to: [Overview](./OpenTelemetryPlan.md)_
|
||||
@@ -1,719 +0,0 @@
|
||||
# Observability Data Collection Reference
|
||||
|
||||
> **Audience**: Developers and operators. This is the single source of truth for all telemetry data collected by rippled's observability stack.
|
||||
>
|
||||
> **Related docs**: [docs/telemetry-runbook.md](../docs/telemetry-runbook.md) (operator runbook with alerting and troubleshooting) | [03-implementation-strategy.md](./03-implementation-strategy.md) (code structure and performance optimization) | [04-code-samples.md](./04-code-samples.md) (C++ instrumentation examples)
|
||||
|
||||
## Data Flow Overview
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph rippledNode["rippled Node"]
|
||||
A["Trace Macros<br/>XRPL_TRACE_SPAN<br/>(OTLP/HTTP exporter)"]
|
||||
B["beast::insight<br/>StatsD metrics<br/>(UDP sender)"]
|
||||
end
|
||||
|
||||
subgraph collector["OTel Collector :4317 / :4318 / :8125"]
|
||||
direction TB
|
||||
R1["OTLP Receiver<br/>:4317 gRPC | :4318 HTTP"]
|
||||
R2["StatsD Receiver<br/>:8125 UDP"]
|
||||
BP["Batch Processor<br/>timeout 1s, batch 100"]
|
||||
SM["SpanMetrics Connector<br/>derives RED metrics<br/>from trace spans"]
|
||||
|
||||
R1 --> BP
|
||||
BP --> SM
|
||||
end
|
||||
|
||||
subgraph backends["Trace Backend"]
|
||||
D["Grafana Tempo :3200<br/>TraceQL search &<br/>S3/GCS long-term storage"]
|
||||
end
|
||||
|
||||
subgraph metrics["Metrics Stack"]
|
||||
E["Prometheus :9090<br/>scrapes :8889<br/>span-derived + StatsD metrics"]
|
||||
end
|
||||
|
||||
subgraph viz["Visualization"]
|
||||
F["Grafana :3000<br/>10 dashboards"]
|
||||
end
|
||||
|
||||
A -->|"OTLP/HTTP :4318<br/>(traces + attributes)"| R1
|
||||
B -->|"UDP :8125<br/>(gauges, counters, timers)"| R2
|
||||
|
||||
BP -->|"OTLP/gRPC :4317"| D
|
||||
|
||||
SM -->|"span_calls_total<br/>span_duration_ms<br/>(6 dimension labels)"| E
|
||||
R2 -->|"rippled_* gauges<br/>rippled_* counters<br/>rippled_* summaries"| E
|
||||
|
||||
E -->|"Prometheus<br/>data source"| F
|
||||
D -->|"Tempo<br/>data source"| F
|
||||
|
||||
style A fill:#4a90d9,color:#fff,stroke:#2a6db5
|
||||
style B fill:#d9534f,color:#fff,stroke:#b52d2d
|
||||
style R1 fill:#5cb85c,color:#fff,stroke:#3d8b3d
|
||||
style R2 fill:#5cb85c,color:#fff,stroke:#3d8b3d
|
||||
style BP fill:#449d44,color:#fff,stroke:#2d6e2d
|
||||
style SM fill:#449d44,color:#fff,stroke:#2d6e2d
|
||||
style D fill:#f0ad4e,color:#000,stroke:#c78c2e
|
||||
style E fill:#f0ad4e,color:#000,stroke:#c78c2e
|
||||
style F fill:#5bc0de,color:#000,stroke:#3aa8c1
|
||||
style rippledNode fill:#1a2633,color:#ccc,stroke:#4a90d9
|
||||
style collector fill:#1a3320,color:#ccc,stroke:#5cb85c
|
||||
style backends fill:#332a1a,color:#ccc,stroke:#f0ad4e
|
||||
style metrics fill:#332a1a,color:#ccc,stroke:#f0ad4e
|
||||
style viz fill:#1a2d33,color:#ccc,stroke:#5bc0de
|
||||
```
|
||||
|
||||
There are two independent telemetry pipelines entering a single **OTel Collector**:
|
||||
|
||||
1. **OpenTelemetry Traces** — Distributed spans with attributes, exported via OTLP/HTTP (:4318) to the collector's **OTLP Receiver**. The **Batch Processor** groups spans (1s timeout, batch size 100) before forwarding to trace backends. The **SpanMetrics Connector** derives RED metrics (rate, errors, duration) from every span and feeds them into the metrics pipeline.
|
||||
2. **beast::insight StatsD** — System-level gauges, counters, and timers emitted as StatsD UDP packets to port :8125, ingested by the collector's **StatsD Receiver**, and exported alongside span-derived metrics to Prometheus.
|
||||
|
||||
**Trace backend** — The collector exports traces via OTLP/gRPC to:
|
||||
|
||||
- **Grafana Tempo** — Preferred trace backend. Supports TraceQL queries at `:3200`, S3/GCS object storage for cost-effective long-term trace retention, and integrates natively with Grafana.
|
||||
|
||||
> **Further reading**: [00-tracing-fundamentals.md](./00-tracing-fundamentals.md) for core OpenTelemetry concepts (traces, spans, context propagation, sampling). [07-observability-backends.md](./07-observability-backends.md) for production backend selection, collector placement, and sampling strategies.
|
||||
|
||||
---
|
||||
|
||||
## 1. OpenTelemetry Spans
|
||||
|
||||
### 1.1 Complete Span Inventory (35 spans)
|
||||
|
||||
> **See also**: [02-design-decisions.md §2.3](./02-design-decisions.md#23-span-naming-conventions) for naming conventions and the full span catalog with rationale. [04-code-samples.md §4.6](./04-code-samples.md#46-span-flow-visualization) for span flow diagrams.
|
||||
|
||||
#### RPC Spans
|
||||
|
||||
Controlled by `trace_rpc=1` in `[telemetry]` config.
|
||||
|
||||
| Span Name | Parent | Source File | Description |
|
||||
| -------------------- | ------------------ | ----------------- | ------------------------------------------------------------------------ |
|
||||
| `rpc.http_request` | — | ServerHandler.cpp | Top-level HTTP RPC request entry point |
|
||||
| `rpc.process` | `rpc.http_request` | ServerHandler.cpp | RPC processing pipeline |
|
||||
| `rpc.ws_message` | — | ServerHandler.cpp | WebSocket message handling |
|
||||
| `rpc.ws_upgrade` | — | ServerHandler.cpp | WebSocket upgrade handshake (error path) |
|
||||
| `rpc.command.<name>` | `rpc.process` | RPCHandler.cpp | Per-command span (e.g., `rpc.command.server_info`, `rpc.command.ledger`) |
|
||||
|
||||
**Where to find**: Tempo → TraceQL: `{resource.service.name="rippled" && name=~"rpc.http_request|rpc.command.*"}`
|
||||
|
||||
**Grafana dashboard**: _RPC Performance_ (`rippled-rpc-perf`)
|
||||
|
||||
#### Transaction Spans
|
||||
|
||||
Controlled by `trace_transactions=1` in `[telemetry]` config.
|
||||
|
||||
| Span Name | Parent | Source File | Description |
|
||||
| ------------ | -------------- | --------------- | ----------------------------------------------------------------- |
|
||||
| `tx.process` | — | NetworkOPs.cpp | Transaction submission entry point (local or peer-relayed) |
|
||||
| `tx.receive` | — | PeerImp.cpp | Raw transaction received from peer overlay (before deduplication) |
|
||||
| `tx.apply` | `ledger.build` | BuildLedger.cpp | Transaction set applied to new ledger during consensus |
|
||||
|
||||
**Where to find**: Tempo → TraceQL: `{resource.service.name="rippled" && name=~"tx.process|tx.receive"}`
|
||||
|
||||
**Grafana dashboard**: _Transaction Overview_ (`rippled-transactions`)
|
||||
|
||||
#### PathFind Spans
|
||||
|
||||
Controlled by `trace_rpc=1` in `[telemetry]` config (pathfinding spans fire within RPC request handling).
|
||||
|
||||
| Span Name | Parent | Source File | Description |
|
||||
| --------------------- | ------------------ | ---------------- | -------------------------------------------------------- |
|
||||
| `pathfind.request` | `rpc.command.*` | PathRequests.cpp | RPC entry for path_find / ripple_path_find |
|
||||
| `pathfind.compute` | `pathfind.request` | PathRequest.cpp | Single path computation (doUpdate) |
|
||||
| `pathfind.update_all` | — | PathRequests.cpp | Async recomputation of all active path requests on close |
|
||||
| `pathfind.discover` | `pathfind.compute` | Pathfinder.cpp | Graph exploration phase (Pathfinder::find) |
|
||||
| `pathfind.rank` | `pathfind.compute` | Pathfinder.cpp | Path ranking and selection phase |
|
||||
|
||||
**Where to find**: Tempo → TraceQL: `{resource.service.name="rippled" && name=~"pathfind.*"}`
|
||||
|
||||
**Grafana dashboard**: _RPC & Pathfinding (StatsD)_ (`rippled-statsd-rpc`) for StatsD timers; span-derived metrics via _RPC Performance_ (`rippled-rpc-perf`)
|
||||
|
||||
#### TxQ Spans
|
||||
|
||||
Controlled by `trace_transactions=1` in `[telemetry]` config.
|
||||
|
||||
| Span Name | Parent | Source File | Description |
|
||||
| ------------------ | ------------- | ----------- | ---------------------------------------------------- |
|
||||
| `txq.enqueue` | `tx.process` | TxQ.cpp | Queue admission decision (apply/queue/reject) |
|
||||
| `txq.apply_direct` | `txq.enqueue` | TxQ.cpp | Direct application attempt (bypassing queue) |
|
||||
| `txq.batch_clear` | `txq.enqueue` | TxQ.cpp | Batch clear of account's queued transactions |
|
||||
| `txq.accept` | — | TxQ.cpp | Ledger-close accept loop (drain queued transactions) |
|
||||
| `txq.accept.tx` | `txq.accept` | TxQ.cpp | Per-transaction apply within accept loop |
|
||||
| `txq.cleanup` | — | TxQ.cpp | Post-close cleanup (expire old transactions) |
|
||||
|
||||
**Where to find**: Tempo → TraceQL: `{resource.service.name="rippled" && name=~"txq.*"}`
|
||||
|
||||
**Grafana dashboard**: _Transaction Overview_ (`rippled-transactions`)
|
||||
|
||||
#### gRPC Spans
|
||||
|
||||
Controlled by `trace_rpc=1` in `[telemetry]` config.
|
||||
|
||||
| Span Name | Parent | Source File | Description |
|
||||
| -------------- | ------ | -------------- | ----------------------------------------------------------------------------- |
|
||||
| `grpc.request` | — | GRPCServer.cpp | Single gRPC request (GetLedger, GetLedgerData, GetLedgerDiff, GetLedgerEntry) |
|
||||
|
||||
**Where to find**: Tempo → TraceQL: `{resource.service.name="rippled" && name="grpc.request"}`
|
||||
|
||||
#### Consensus Spans
|
||||
|
||||
Controlled by `trace_consensus=1` in `[telemetry]` config.
|
||||
|
||||
| Span Name | Parent | Source File | Description |
|
||||
| ---------------------------- | ----------------- | ---------------- | ----------------------------------------------------- |
|
||||
| `consensus.round` | — | RCLConsensus.cpp | Top-level round span (deterministic trace ID) |
|
||||
| `consensus.proposal.send` | `consensus.round` | RCLConsensus.cpp | Node broadcasts its transaction set proposal |
|
||||
| `consensus.ledger_close` | `consensus.round` | RCLConsensus.cpp | Ledger close event triggered by consensus |
|
||||
| `consensus.establish` | `consensus.round` | Consensus.h | Establish phase — convergence loop |
|
||||
| `consensus.update_positions` | `consensus.round` | Consensus.h | Update positions during establish phase |
|
||||
| `consensus.check` | `consensus.round` | Consensus.h | Check for consensus agreement |
|
||||
| `consensus.accept` | `consensus.round` | RCLConsensus.cpp | Consensus accepts a ledger (round complete) |
|
||||
| `consensus.accept.apply` | `consensus.round` | RCLConsensus.cpp | Ledger application with close time details |
|
||||
| `consensus.validation.send` | `consensus.round` | RCLConsensus.cpp | Validation message sent after ledger accepted |
|
||||
| `consensus.mode_change` | `consensus.round` | RCLConsensus.cpp | Consensus mode transition (e.g., tracking->proposing) |
|
||||
|
||||
> **Note**: `toDisplayString(ConsensusMode)` (in `ConsensusTypes.h`) provides Title Case display names for mode attribute values: `"Proposing"`, `"Observing"`, `"Wrong Ledger"`, `"Switched Ledger"`. This is separate from `to_string()` which returns stable log-format strings.
|
||||
|
||||
**Where to find**: Tempo → TraceQL: `{resource.service.name="rippled" && name=~"consensus.*"}`
|
||||
|
||||
**Grafana dashboard**: _Consensus Health_ (`rippled-consensus`)
|
||||
|
||||
#### Ledger Spans
|
||||
|
||||
Controlled by `trace_ledger=1` in `[telemetry]` config.
|
||||
|
||||
| Span Name | Parent | Source File | Description |
|
||||
| ----------------- | ------ | ---------------- | ---------------------------------------------- |
|
||||
| `ledger.build` | — | BuildLedger.cpp | Build new ledger from accepted transaction set |
|
||||
| `ledger.validate` | — | LedgerMaster.cpp | Ledger promoted to validated status |
|
||||
| `ledger.store` | — | LedgerMaster.cpp | Ledger stored to database/history |
|
||||
|
||||
**Where to find**: Tempo → TraceQL: `{resource.service.name="rippled" && name=~"ledger.*"}`
|
||||
|
||||
**Grafana dashboard**: _Ledger Operations_ (`rippled-ledger-ops`)
|
||||
|
||||
#### Peer Spans
|
||||
|
||||
Controlled by `trace_peer=1` in `[telemetry]` config. **Disabled by default** (high volume).
|
||||
|
||||
| Span Name | Parent | Source File | Description |
|
||||
| ------------------------- | ------ | ----------- | ------------------------------------- |
|
||||
| `peer.proposal.receive` | — | PeerImp.cpp | Consensus proposal received from peer |
|
||||
| `peer.validation.receive` | — | PeerImp.cpp | Validation message received from peer |
|
||||
|
||||
**Where to find**: Tempo → TraceQL: `{resource.service.name="rippled" && name=~"peer.*"}`
|
||||
|
||||
**Grafana dashboard**: _Peer Network_ (`rippled-peer-net`)
|
||||
|
||||
---
|
||||
|
||||
### 1.2 Complete Attribute Inventory (81 attributes)
|
||||
|
||||
> **See also**: [02-design-decisions.md §2.4.2](./02-design-decisions.md#242-span-attributes-by-category) for attribute design rationale and privacy considerations.
|
||||
|
||||
Every span can carry key-value attributes that provide context for filtering and aggregation.
|
||||
|
||||
#### RPC Attributes
|
||||
|
||||
| Attribute | Type | Set On | Description |
|
||||
| ----------------------- | ------ | --------------- | ------------------------------------------------ |
|
||||
| `xrpl.rpc.command` | string | `rpc.command.*` | RPC command name (e.g., `server_info`, `ledger`) |
|
||||
| `xrpl.rpc.version` | int64 | `rpc.command.*` | API version number |
|
||||
| `xrpl.rpc.role` | string | `rpc.command.*` | Caller role: `"admin"` or `"user"` |
|
||||
| `xrpl.rpc.status` | string | `rpc.command.*` | Result: `"success"` or `"error"` |
|
||||
| `xrpl.rpc.payload_size` | int64 | `rpc.command.*` | Request payload size in bytes |
|
||||
|
||||
**Tempo query**: `{span.xrpl.rpc.command="server_info"}` to find all `server_info` calls.
|
||||
|
||||
**Prometheus label**: `xrpl_rpc_command` (dots converted to underscores by SpanMetrics).
|
||||
|
||||
#### Transaction Attributes
|
||||
|
||||
| Attribute | Type | Set On | Description |
|
||||
| -------------------- | ------- | -------------------------- | ---------------------------------------------------- |
|
||||
| `xrpl.tx.hash` | string | `tx.process`, `tx.receive` | Transaction hash (hex-encoded) |
|
||||
| `xrpl.tx.local` | boolean | `tx.process` | `true` if locally submitted, `false` if peer-relayed |
|
||||
| `xrpl.tx.path` | string | `tx.process` | Submission path: `"sync"` or `"async"` |
|
||||
| `xrpl.tx.suppressed` | boolean | `tx.receive` | `true` if transaction was suppressed (duplicate) |
|
||||
| `xrpl.tx.status` | string | `tx.receive` | Transaction status (e.g., `"known_bad"`) |
|
||||
| `xrpl.peer.id` | int64 | `tx.receive` | Peer identifier (also set on peer spans) |
|
||||
| `xrpl.peer.version` | string | `tx.receive` | Peer protocol version string |
|
||||
|
||||
**Tempo query**: `{span.xrpl.tx.hash="<hash>"}` to trace a specific transaction across nodes.
|
||||
|
||||
**Prometheus label**: `xrpl_tx_local` (used as SpanMetrics dimension).
|
||||
|
||||
#### PathFind Attributes
|
||||
|
||||
| Attribute | Type | Set On | Description |
|
||||
| ---------------------------------- | ------- | --------------------- | ----------------------------------------------- |
|
||||
| `xrpl.pathfind.source_account` | string | `pathfind.request` | Source account address |
|
||||
| `xrpl.pathfind.dest_account` | string | `pathfind.request` | Destination account address |
|
||||
| `xrpl.pathfind.fast` | boolean | `pathfind.compute` | Whether this is a fast (non-full) pathfind |
|
||||
| `xrpl.pathfind.search_level` | int64 | `pathfind.compute` | Search depth level |
|
||||
| `xrpl.pathfind.num_complete_paths` | int64 | `pathfind.compute` | Number of complete paths found |
|
||||
| `xrpl.pathfind.num_paths` | int64 | `pathfind.compute` | Total number of paths explored |
|
||||
| `xrpl.pathfind.num_requests` | int64 | `pathfind.update_all` | Number of active path requests being recomputed |
|
||||
| `xrpl.pathfind.ledger_index` | int64 | `pathfind.update_all` | Ledger index used for recomputation |
|
||||
|
||||
**Tempo query**: `{span.xrpl.pathfind.source_account="rHb9..."}` to find pathfind requests from a specific account.
|
||||
|
||||
#### TxQ Attributes
|
||||
|
||||
| Attribute | Type | Set On | Description |
|
||||
| ----------------------------- | ------- | ------------------------------ | ---------------------------------------------------------- |
|
||||
| `xrpl.txq.tx_hash` | string | `txq.enqueue`, `txq.accept.tx` | Transaction hash in the queue |
|
||||
| `xrpl.txq.status` | string | `txq.enqueue` | Queue result: `"queued"`, `"applied_direct"`, `"rejected"` |
|
||||
| `xrpl.txq.fee_level_paid` | int64 | `txq.enqueue` | Fee level paid by the transaction |
|
||||
| `xrpl.txq.required_fee_level` | int64 | `txq.enqueue` | Minimum fee level required for queue admission |
|
||||
| `xrpl.txq.queue_size` | int64 | `txq.accept` | Queue depth at start of accept |
|
||||
| `xrpl.txq.ledger_changed` | boolean | `txq.accept` | Whether the open ledger changed since last accept |
|
||||
| `xrpl.txq.ledger_seq` | int64 | `txq.cleanup` | Ledger sequence for cleanup |
|
||||
| `xrpl.txq.expired_count` | int64 | `txq.cleanup` | Number of expired transactions removed |
|
||||
| `xrpl.txq.ter_code` | string | `txq.accept.tx` | Transaction engine result code |
|
||||
| `xrpl.txq.retries_remaining` | int64 | `txq.accept.tx` | Remaining retry attempts for this transaction |
|
||||
| `xrpl.txq.num_cleared` | int64 | `txq.batch_clear` | Number of transactions cleared in batch |
|
||||
|
||||
**Tempo query**: `{span.xrpl.txq.status="rejected"}` to find rejected queue attempts.
|
||||
|
||||
#### gRPC Attributes
|
||||
|
||||
| Attribute | Type | Set On | Description |
|
||||
| ------------------ | ------ | -------------- | ------------------------------------------------------------ |
|
||||
| `xrpl.grpc.method` | string | `grpc.request` | gRPC method name (e.g., `GetLedger`, `GetLedgerData`) |
|
||||
| `xrpl.grpc.role` | string | `grpc.request` | Caller role: `"admin"` or `"user"` |
|
||||
| `xrpl.grpc.status` | string | `grpc.request` | Result: `"success"`, `"error"`, `"resource_exhausted"`, etc. |
|
||||
|
||||
**Tempo query**: `{span.xrpl.grpc.method="GetLedger"}` to find gRPC ledger requests.
|
||||
|
||||
#### Consensus Attributes
|
||||
|
||||
| Attribute | Type | Set On | Description |
|
||||
| ------------------------------------------ | ------- | ---------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- |
|
||||
| `xrpl.consensus.ledger_id` | string | `consensus.round` | Previous ledger hash (used for deterministic trace ID) |
|
||||
| `xrpl.consensus.ledger.seq` | int64 | `consensus.round`, `consensus.ledger_close`, `consensus.accept`, `consensus.validation.send`, `consensus.accept.apply` | Ledger sequence number |
|
||||
| `xrpl.consensus.mode` | string | `consensus.round`, `consensus.proposal.send`, `consensus.ledger_close` | Node mode via `toDisplayString()`: `"Proposing"`, `"Observing"`, etc. |
|
||||
| `xrpl.consensus.round` | int64 | `consensus.proposal.send` | Consensus round number |
|
||||
| `xrpl.consensus.proposers` | int64 | `consensus.proposal.send`, `consensus.accept` | Number of proposers in the round |
|
||||
| `xrpl.consensus.round_time_ms` | int64 | `consensus.accept`, `consensus.accept.apply` | Total consensus round duration in milliseconds |
|
||||
| `xrpl.consensus.proposing` | boolean | `consensus.validation.send` | Whether this node was a proposer |
|
||||
| `xrpl.consensus.state` | string | `consensus.accept.apply` | Consensus outcome: `"finished"` or `"moved_on"` |
|
||||
| `xrpl.consensus.close_time` | int64 | `consensus.accept.apply` | Agreed-upon ledger close time (epoch seconds) |
|
||||
| `xrpl.consensus.close_time_correct` | boolean | `consensus.accept.apply` | Whether validators reached agreement on close time |
|
||||
| `xrpl.consensus.close_resolution_ms` | int64 | `consensus.accept.apply` | Close time rounding granularity in milliseconds |
|
||||
| `xrpl.consensus.parent_close_time` | int64 | `consensus.accept.apply` | Parent ledger's close time (epoch seconds) |
|
||||
| `xrpl.consensus.close_time_self` | int64 | `consensus.accept.apply` | This node's proposed close time |
|
||||
| `xrpl.consensus.close_time_vote_bins` | string | `consensus.accept.apply` | Histogram of close time votes from validators |
|
||||
| `xrpl.consensus.resolution_direction` | string | `consensus.accept.apply` | Resolution change: `"increased"`, `"decreased"`, or `"unchanged"` |
|
||||
| `xrpl.consensus.converge_percent` | int64 | `consensus.establish` | Convergence percentage threshold |
|
||||
| `xrpl.consensus.establish_count` | int64 | `consensus.establish` | Number of establish iterations completed |
|
||||
| `xrpl.consensus.proposers_agreed` | int64 | `consensus.establish` | Number of proposers that agreed on this round |
|
||||
| `xrpl.consensus.avalanche_threshold` | int64 | `consensus.update_positions` | Avalanche threshold for dispute resolution |
|
||||
| `xrpl.consensus.close_time_threshold` | int64 | `consensus.update_positions` | Close time agreement threshold |
|
||||
| `xrpl.consensus.have_close_time_consensus` | boolean | `consensus.update_positions` | Whether close time consensus has been reached |
|
||||
| `xrpl.consensus.agree_count` | int64 | `consensus.check` | Number of proposers that agree with our position |
|
||||
| `xrpl.consensus.disagree_count` | int64 | `consensus.check` | Number of proposers that disagree with our position |
|
||||
| `xrpl.consensus.threshold_percent` | int64 | `consensus.check` | Required agreement threshold percentage |
|
||||
| `xrpl.consensus.result` | string | `consensus.check` | Check result: `"yes"`, `"no"`, or `"expired"` |
|
||||
| `xrpl.consensus.quorum` | int64 | `consensus.check` | Required quorum for validation |
|
||||
| `xrpl.consensus.validation_count` | int64 | `consensus.check` | Number of validations received |
|
||||
| `xrpl.consensus.trace_strategy` | string | `consensus.round` | Trace sampling strategy used for this round |
|
||||
| `xrpl.consensus.round_id` | string | `consensus.round` | Deterministic round identifier |
|
||||
| `xrpl.consensus.mode.old` | string | `consensus.mode_change` | Previous consensus mode |
|
||||
| `xrpl.consensus.mode.new` | string | `consensus.mode_change` | New consensus mode |
|
||||
| `xrpl.tx.id` | string | `consensus.update_positions` | Disputed transaction ID |
|
||||
| `xrpl.dispute.our_vote` | boolean | `consensus.update_positions` | Our vote on the disputed transaction |
|
||||
| `xrpl.dispute.yays` | int64 | `consensus.update_positions` | Number of proposers voting to include |
|
||||
| `xrpl.dispute.nays` | int64 | `consensus.update_positions` | Number of proposers voting to exclude |
|
||||
|
||||
**Tempo query**: `{span.xrpl.consensus.mode="Proposing"}` to find rounds where node was proposing.
|
||||
|
||||
**Prometheus label**: `xrpl_consensus_mode` (used as SpanMetrics dimension).
|
||||
|
||||
#### Ledger Attributes
|
||||
|
||||
| Attribute | Type | Set On | Description |
|
||||
| --------------------------------- | ------- | ------------------------------------------------------------- | ------------------------------------------------ |
|
||||
| `xrpl.ledger.seq` | int64 | `ledger.build`, `ledger.validate`, `ledger.store`, `tx.apply` | Ledger sequence number |
|
||||
| `xrpl.ledger.close_time` | int64 | `ledger.build` | Ledger close time (epoch seconds) |
|
||||
| `xrpl.ledger.close_time_correct` | boolean | `ledger.build` | Whether close time was agreed upon by validators |
|
||||
| `xrpl.ledger.close_resolution_ms` | int64 | `ledger.build` | Close time rounding granularity in milliseconds |
|
||||
| `xrpl.ledger.tx_count` | int64 | `ledger.build`, `tx.apply` | Transactions in the ledger |
|
||||
| `xrpl.ledger.tx_failed` | int64 | `ledger.build`, `tx.apply` | Failed transactions in the ledger |
|
||||
| `xrpl.ledger.validations` | int64 | `ledger.validate` | Number of validations received for this ledger |
|
||||
|
||||
**Tempo query**: `{span.xrpl.ledger.seq=12345}` to find all spans for a specific ledger.
|
||||
|
||||
#### Peer Attributes
|
||||
|
||||
| Attribute | Type | Set On | Description |
|
||||
| ---------------------------------- | ------- | ---------------------------------------------------------------- | ---------------------------------------------------- |
|
||||
| `xrpl.peer.id` | int64 | `tx.receive`, `peer.proposal.receive`, `peer.validation.receive` | Peer identifier |
|
||||
| `xrpl.peer.proposal.trusted` | boolean | `peer.proposal.receive` | Whether the proposal came from a trusted validator |
|
||||
| `xrpl.peer.validation.ledger_hash` | string | `peer.validation.receive` | Ledger hash the validation refers to |
|
||||
| `xrpl.peer.validation.full` | boolean | `peer.validation.receive` | Whether this is a full (not partial) validation |
|
||||
| `xrpl.peer.validation.trusted` | boolean | `peer.validation.receive` | Whether the validation came from a trusted validator |
|
||||
|
||||
**Prometheus labels**: `xrpl_peer_proposal_trusted`, `xrpl_peer_validation_trusted` (SpanMetrics dimensions).
|
||||
|
||||
---
|
||||
|
||||
### 1.3 SpanMetrics — Derived Prometheus Metrics
|
||||
|
||||
> **See also**: [01-architecture-analysis.md](./01-architecture-analysis.md) §1.8.2 for how span-derived metrics map to operational insights.
|
||||
|
||||
The OTel Collector's SpanMetrics connector automatically generates RED (Rate, Errors, Duration) metrics from every span. No custom metrics code in rippled is needed.
|
||||
|
||||
| Prometheus Metric | Type | Description |
|
||||
| -------------------------------------------------- | --------- | ------------------------------------------------------------------------------ |
|
||||
| `traces_span_metrics_calls_total` | Counter | Total span invocations |
|
||||
| `traces_span_metrics_duration_milliseconds_bucket` | Histogram | Latency distribution (buckets: 1, 5, 10, 25, 50, 100, 250, 500, 1000, 5000 ms) |
|
||||
| `traces_span_metrics_duration_milliseconds_count` | Histogram | Observation count |
|
||||
| `traces_span_metrics_duration_milliseconds_sum` | Histogram | Cumulative latency |
|
||||
|
||||
**Standard labels on every metric**: `span_name`, `status_code`, `service_name`, `span_kind`
|
||||
|
||||
**Additional dimension labels** (configured in `otel-collector-config.yaml`):
|
||||
|
||||
| Span Attribute | Prometheus Label | Applies To |
|
||||
| ------------------------------ | ------------------------------ | ------------------------- |
|
||||
| `xrpl.rpc.command` | `xrpl_rpc_command` | `rpc.command.*` |
|
||||
| `xrpl.rpc.status` | `xrpl_rpc_status` | `rpc.command.*` |
|
||||
| `xrpl.consensus.mode` | `xrpl_consensus_mode` | `consensus.ledger_close` |
|
||||
| `xrpl.tx.local` | `xrpl_tx_local` | `tx.process` |
|
||||
| `xrpl.peer.proposal.trusted` | `xrpl_peer_proposal_trusted` | `peer.proposal.receive` |
|
||||
| `xrpl.peer.validation.trusted` | `xrpl_peer_validation_trusted` | `peer.validation.receive` |
|
||||
|
||||
**Where to query**: Prometheus → `traces_span_metrics_calls_total{span_name="rpc.command.server_info"}`
|
||||
|
||||
---
|
||||
|
||||
## 2. StatsD Metrics (beast::insight)
|
||||
|
||||
> **See also**: [02-design-decisions.md](./02-design-decisions.md) for the beast::insight coexistence design. [06-implementation-phases.md](./06-implementation-phases.md) for the Phase 6 metric inventory.
|
||||
|
||||
These are system-level metrics emitted by rippled's `beast::insight` framework via StatsD UDP. They cover operational data that doesn't map to individual trace spans.
|
||||
|
||||
### Configuration
|
||||
|
||||
```ini
|
||||
[insight]
|
||||
server=statsd
|
||||
address=127.0.0.1:8125
|
||||
prefix=rippled
|
||||
```
|
||||
|
||||
### 2.1 Gauges
|
||||
|
||||
| Prometheus Metric | Source File | Description | Typical Range |
|
||||
| --------------------------------------------------- | --------------------- | ----------------------------------------- | ------------------------------- |
|
||||
| `rippled_LedgerMaster_Validated_Ledger_Age` | LedgerMaster.h | Seconds since last validated ledger | 0–10 (healthy), >30 (stale) |
|
||||
| `rippled_LedgerMaster_Published_Ledger_Age` | LedgerMaster.h | Seconds since last published ledger | 0–10 (healthy) |
|
||||
| `rippled_State_Accounting_Disconnected_duration` | NetworkOPs.cpp | Cumulative seconds in Disconnected state | Monotonic |
|
||||
| `rippled_State_Accounting_Connected_duration` | NetworkOPs.cpp | Cumulative seconds in Connected state | Monotonic |
|
||||
| `rippled_State_Accounting_Syncing_duration` | NetworkOPs.cpp | Cumulative seconds in Syncing state | Monotonic |
|
||||
| `rippled_State_Accounting_Tracking_duration` | NetworkOPs.cpp | Cumulative seconds in Tracking state | Monotonic |
|
||||
| `rippled_State_Accounting_Full_duration` | NetworkOPs.cpp | Cumulative seconds in Full state | Monotonic (should dominate) |
|
||||
| `rippled_State_Accounting_Disconnected_transitions` | NetworkOPs.cpp | Count of transitions to Disconnected | Low |
|
||||
| `rippled_State_Accounting_Connected_transitions` | NetworkOPs.cpp | Count of transitions to Connected | Low |
|
||||
| `rippled_State_Accounting_Syncing_transitions` | NetworkOPs.cpp | Count of transitions to Syncing | Low |
|
||||
| `rippled_State_Accounting_Tracking_transitions` | NetworkOPs.cpp | Count of transitions to Tracking | Low |
|
||||
| `rippled_State_Accounting_Full_transitions` | NetworkOPs.cpp | Count of transitions to Full | Low (should be 1 after startup) |
|
||||
| `rippled_Peer_Finder_Active_Inbound_Peers` | PeerfinderManager.cpp | Active inbound peer connections | 0–85 |
|
||||
| `rippled_Peer_Finder_Active_Outbound_Peers` | PeerfinderManager.cpp | Active outbound peer connections | 10–21 |
|
||||
| `rippled_Overlay_Peer_Disconnects` | OverlayImpl.cpp | Cumulative peer disconnection count | Low growth |
|
||||
| `rippled_Overlay_Peer_Disconnects_Charges` | OverlayImpl.cpp | Disconnects due to resource limit charges | Low growth (subset of above) |
|
||||
| `rippled_job_count` | JobQueue.cpp | Current job queue depth | 0–100 (healthy) |
|
||||
|
||||
**Grafana dashboard**: _Node Health (StatsD)_ (`rippled-statsd-node-health`)
|
||||
|
||||
### 2.2 Counters
|
||||
|
||||
| Prometheus Metric | Source File | Description |
|
||||
| --------------------------------- | ------------------ | --------------------------------------------- |
|
||||
| `rippled_rpc_requests` | ServerHandler.cpp | Total RPC requests received |
|
||||
| `rippled_ledger_fetches` | InboundLedgers.cpp | Inbound ledger fetch attempts |
|
||||
| `rippled_ledger_history_mismatch` | LedgerHistory.cpp | Ledger hash mismatches detected |
|
||||
| `rippled_warn` | Logic.h | Resource manager warnings issued |
|
||||
| `rippled_drop` | Logic.h | Resource manager drops (connections rejected) |
|
||||
|
||||
**Note**: `rippled_warn` and `rippled_drop` use non-standard StatsD meter type (`|m`). The OTel StatsD receiver only recognizes `|c`, `|g`, `|ms`, `|h`, `|s` — these metrics may be silently dropped. See Known Issues below.
|
||||
|
||||
**Grafana dashboard**: _RPC & Pathfinding (StatsD)_ (`rippled-statsd-rpc`)
|
||||
|
||||
### 2.3 Histograms (from StatsD timers)
|
||||
|
||||
| Prometheus Metric | Source File | Unit | Description |
|
||||
| ----------------------- | ----------------- | ----- | ------------------------------ |
|
||||
| `rippled_rpc_time` | ServerHandler.cpp | ms | RPC response time distribution |
|
||||
| `rippled_rpc_size` | ServerHandler.cpp | bytes | RPC response size distribution |
|
||||
| `rippled_ios_latency` | Application.cpp | ms | I/O service loop latency |
|
||||
| `rippled_pathfind_fast` | PathRequests.h | ms | Fast pathfinding duration |
|
||||
| `rippled_pathfind_full` | PathRequests.h | ms | Full pathfinding duration |
|
||||
|
||||
Quantiles collected: 0th, 50th, 90th, 95th, 99th, 100th percentile.
|
||||
|
||||
**Grafana dashboards**: _Node Health_ (`ios_latency`), _RPC & Pathfinding_ (`rpc_time`, `rpc_size`, `pathfind_*`)
|
||||
|
||||
### 2.4 Overlay Traffic Metrics
|
||||
|
||||
For each of the 45+ overlay traffic categories (defined in `TrafficCount.h`), four gauges are emitted:
|
||||
|
||||
- `rippled_{category}_Bytes_In`
|
||||
- `rippled_{category}_Bytes_Out`
|
||||
- `rippled_{category}_Messages_In`
|
||||
- `rippled_{category}_Messages_Out`
|
||||
|
||||
**Key categories**:
|
||||
|
||||
| Category | Description |
|
||||
| ----------------------------------------------------------------- | -------------------------- |
|
||||
| `total` | All traffic aggregated |
|
||||
| `overhead` / `overhead_overlay` | Protocol overhead |
|
||||
| `transactions` / `transactions_duplicate` | Transaction relay |
|
||||
| `proposals` / `proposals_untrusted` / `proposals_duplicate` | Consensus proposals |
|
||||
| `validations` / `validations_untrusted` / `validations_duplicate` | Consensus validations |
|
||||
| `ledger_data_get` / `ledger_data_share` | Ledger data exchange |
|
||||
| `ledger_data_Transaction_Node_get/share` | Transaction node data |
|
||||
| `ledger_data_Account_State_Node_get/share` | Account state node data |
|
||||
| `ledger_data_Transaction_Set_candidate_get/share` | Transaction set candidates |
|
||||
| `getObject` / `haveTxSet` / `ledgerData` | Object requests |
|
||||
| `ping` / `status` | Keepalive and status |
|
||||
| `set_get` | Set requests |
|
||||
|
||||
**Grafana dashboards**: _Network Traffic_ (`rippled-statsd-network`), _Overlay Traffic Detail_ (`rippled-statsd-overlay-detail`), _Ledger Data & Sync_ (`rippled-statsd-ledger-sync`)
|
||||
|
||||
---
|
||||
|
||||
## 3. Grafana Dashboard Reference
|
||||
|
||||
> **See also**: [05-configuration-reference.md](./05-configuration-reference.md) §5.8 for Grafana data source provisioning (Tempo, Prometheus) and TraceQL query examples.
|
||||
|
||||
### 3.1 Span-Derived Dashboards (5)
|
||||
|
||||
| Dashboard | UID | Data Source | Key Panels |
|
||||
| -------------------- | ---------------------- | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| RPC Performance | `rippled-rpc-perf` | Prometheus (SpanMetrics) | Request rate by command, p95 latency by command, error rate, heatmap, top commands |
|
||||
| Transaction Overview | `rippled-transactions` | Prometheus (SpanMetrics) | Processing rate, latency p95/p50, local vs relay split, apply duration, heatmap |
|
||||
| Consensus Health | `rippled-consensus` | Prometheus (SpanMetrics) | Round duration p95/p50, proposals rate, close duration, mode timeline, heatmap, close time correctness, resolution direction, close time drift, resolution change timeline, close time vote distribution |
|
||||
| Ledger Operations | `rippled-ledger-ops` | Prometheus (SpanMetrics) | Build rate, build duration, validation rate, store rate, build vs close comparison |
|
||||
| Peer Network | `rippled-peer-net` | Prometheus (SpanMetrics) | Proposal receive rate, validation receive rate, trusted vs untrusted breakdown |
|
||||
|
||||
### 3.2 StatsD Dashboards (5)
|
||||
|
||||
| Dashboard | UID | Data Source | Key Panels |
|
||||
| ---------------------- | ------------------------------- | ------------------- | --------------------------------------------------------------------------------- |
|
||||
| Node Health | `rippled-statsd-node-health` | Prometheus (StatsD) | Ledger age, operating mode, I/O latency, job queue, fetch rate |
|
||||
| Network Traffic | `rippled-statsd-network` | Prometheus (StatsD) | Active peers, disconnects, bytes in/out, messages in/out, traffic by category |
|
||||
| RPC & Pathfinding | `rippled-statsd-rpc` | Prometheus (StatsD) | RPC rate, response time/size, pathfinding duration, resource warnings/drops |
|
||||
| Overlay Traffic Detail | `rippled-statsd-overlay-detail` | Prometheus (StatsD) | Squelch, overhead, validator lists, set get/share, have/requested tx, proof paths |
|
||||
| Ledger Data & Sync | `rippled-statsd-ledger-sync` | Prometheus (StatsD) | Ledger data exchange, legacy ledger share/get, getobject by type, traffic heatmap |
|
||||
|
||||
### 3.3 Consensus Close-Time Panels
|
||||
|
||||
The Consensus Health dashboard includes 5 close-time panels added in Phase 4:
|
||||
|
||||
| Panel | Metric / Attribute | Description |
|
||||
| ---------------------------- | --------------------------------------------------------------- | ------------------------------------------------------------------------ |
|
||||
| Close Time Correctness | `xrpl.consensus.close_time_correct` | Percentage of rounds with agreed-upon close time |
|
||||
| Resolution Direction | `xrpl.consensus.resolution_direction` | Rate of resolution increases, decreases, and unchanged per time interval |
|
||||
| Close Time Drift | `xrpl.consensus.close_time` vs `xrpl.consensus.close_time_self` | Difference between agreed close time and node's own proposed close time |
|
||||
| Resolution Change Timeline | `xrpl.consensus.close_resolution_ms` | Close time resolution granularity over time |
|
||||
| Close Time Vote Distribution | `xrpl.consensus.close_time_vote_bins` | Histogram of validator close time votes per round |
|
||||
|
||||
**Template variables** (Consensus Health dashboard):
|
||||
|
||||
| Variable | Source Attribute | Description |
|
||||
| ----------------------- | ------------------------------------- | ------------------------------------------------------------------------ |
|
||||
| `$node` | `exported_instance` | Filter by rippled node instance |
|
||||
| `$close_time_correct` | `xrpl_consensus_close_time_correct` | Filter by close time correctness (`true` / `false`) |
|
||||
| `$resolution_direction` | `xrpl_consensus_resolution_direction` | Filter by resolution direction (`increased` / `decreased` / `unchanged`) |
|
||||
|
||||
### 3.4 Accessing the Dashboards
|
||||
|
||||
1. Open Grafana at **http://localhost:3000**
|
||||
2. Navigate to **Dashboards → rippled** folder
|
||||
3. All 10 dashboards are auto-provisioned from `docker/telemetry/grafana/dashboards/`
|
||||
|
||||
---
|
||||
|
||||
## 4. Tempo Trace Search Guide
|
||||
|
||||
> **See also**: [08-appendix.md](./08-appendix.md) §8.2 for span hierarchy visualizations. [05-configuration-reference.md](./05-configuration-reference.md) §5.8.5 for TraceQL query examples.
|
||||
|
||||
### Finding Traces by Type
|
||||
|
||||
| What to Find | Tempo TraceQL Query |
|
||||
| ------------------------ | -------------------------------------------------------------------------------- |
|
||||
| All RPC calls | `{resource.service.name="rippled" && name="rpc.http_request"}` |
|
||||
| Specific RPC command | `{resource.service.name="rippled" && name="rpc.command.server_info"}` |
|
||||
| Slow RPC calls | `{resource.service.name="rippled" && name=~"rpc.command.*"} \| duration > 100ms` |
|
||||
| Failed RPC calls | `{span.xrpl.rpc.status="error"}` |
|
||||
| Specific transaction | `{span.xrpl.tx.hash="<hex_hash>"}` |
|
||||
| Local transactions only | `{span.xrpl.tx.local=true}` |
|
||||
| Consensus rounds | `{resource.service.name="rippled" && name="consensus.accept"}` |
|
||||
| Rounds by mode | `{span.xrpl.consensus.mode="proposing"}` |
|
||||
| Specific ledger | `{span.xrpl.ledger.seq=12345}` |
|
||||
| Peer proposals (trusted) | `{span.xrpl.peer.proposal.trusted=true}` |
|
||||
|
||||
### Trace Structure
|
||||
|
||||
A typical RPC trace shows the span hierarchy:
|
||||
|
||||
```
|
||||
rpc.http_request (ServerHandler)
|
||||
└── rpc.process (ServerHandler)
|
||||
└── rpc.command.server_info (RPCHandler)
|
||||
```
|
||||
|
||||
A consensus round groups child spans under a deterministic trace ID:
|
||||
|
||||
```
|
||||
consensus.round (top-level, deterministic trace ID from ledger hash)
|
||||
├── consensus.ledger_close (close event)
|
||||
├── consensus.proposal.send (broadcast proposal)
|
||||
├── consensus.establish (convergence loop)
|
||||
│ ├── consensus.update_positions (update disputes)
|
||||
│ └── consensus.check (check agreement)
|
||||
├── consensus.accept (accept result)
|
||||
├── consensus.accept.apply (apply with close time details)
|
||||
├── consensus.validation.send (send validation)
|
||||
└── consensus.mode_change (mode transition, if any)
|
||||
ledger.build (build new ledger)
|
||||
└── tx.apply (apply transaction set)
|
||||
ledger.validate (promote to validated)
|
||||
ledger.store (persist to DB)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Prometheus Query Examples
|
||||
|
||||
> **See also**: [05-configuration-reference.md](./05-configuration-reference.md) §5.8.7 for correlating Prometheus StatsD metrics with trace-derived metrics.
|
||||
|
||||
### Span-Derived Metrics
|
||||
|
||||
```promql
|
||||
# RPC request rate by command (last 5 minutes)
|
||||
sum by (xrpl_rpc_command) (rate(traces_span_metrics_calls_total{span_name=~"rpc.command.*"}[5m]))
|
||||
|
||||
# RPC p95 latency by command
|
||||
histogram_quantile(0.95, sum by (le, xrpl_rpc_command) (rate(traces_span_metrics_duration_milliseconds_bucket{span_name=~"rpc.command.*"}[5m])))
|
||||
|
||||
# Consensus round duration p95
|
||||
histogram_quantile(0.95, sum by (le) (rate(traces_span_metrics_duration_milliseconds_bucket{span_name="consensus.accept"}[5m])))
|
||||
|
||||
# Transaction processing rate (local vs relay)
|
||||
sum by (xrpl_tx_local) (rate(traces_span_metrics_calls_total{span_name="tx.process"}[5m]))
|
||||
|
||||
# Trusted vs untrusted proposal rate
|
||||
sum by (xrpl_peer_proposal_trusted) (rate(traces_span_metrics_calls_total{span_name="peer.proposal.receive"}[5m]))
|
||||
```
|
||||
|
||||
### StatsD Metrics
|
||||
|
||||
```promql
|
||||
# Validated ledger age (should be < 10s)
|
||||
rippled_LedgerMaster_Validated_Ledger_Age
|
||||
|
||||
# Active peer count
|
||||
rippled_Peer_Finder_Active_Inbound_Peers + rippled_Peer_Finder_Active_Outbound_Peers
|
||||
|
||||
# RPC response time p95
|
||||
histogram_quantile(0.95, rippled_rpc_time_bucket)
|
||||
|
||||
# Total network bytes in (rate)
|
||||
rate(rippled_total_Bytes_In[5m])
|
||||
|
||||
# Operating mode (should be "Full" after startup)
|
||||
rippled_State_Accounting_Full_duration
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6. SpanNames Header File Inventory
|
||||
|
||||
All span names and attributes are defined as compile-time constants in colocated `SpanNames.h` headers. Each header lives next to its subsystem's implementation.
|
||||
|
||||
| Header File | Subsystem | Span Count | Attribute Count | Notes |
|
||||
| ----------------------------------------------- | ------------- | ---------- | --------------- | ------------------------------------------- |
|
||||
| `src/xrpld/rpc/detail/RpcSpanNames.h` | RPC (HTTP/WS) | 5 | 5 | Includes `rpc.ws_upgrade` error path |
|
||||
| `src/xrpld/rpc/detail/PathFindSpanNames.h` | PathFind | 5 | 8 | Covers one-shot and subscription paths |
|
||||
| `src/xrpld/app/main/GrpcSpanNames.h` | gRPC | 1 | 3 | Flat single-span structure per request |
|
||||
| `src/xrpld/app/misc/TxSpanNames.h` | Transaction | 2 | 7 | Includes peer context attributes |
|
||||
| `src/xrpld/app/misc/detail/TxQSpanNames.h` | TxQ | 6 | 11 | Queue lifecycle: enqueue through cleanup |
|
||||
| `src/xrpld/app/consensus/ConsensusSpanNames.h` | Consensus | 10 | 35 | Deterministic trace IDs, close-time details |
|
||||
| `src/xrpld/app/ledger/detail/LedgerSpanNames.h` | Ledger | 4 | 7 | Build, store, validate, tx.apply |
|
||||
| `src/xrpld/overlay/detail/PeerSpanNames.h` | Peer Overlay | 2 | 5 | Proposal and validation receive |
|
||||
|
||||
> **Design convention**: SpanNames headers are colocated with their subsystem classes rather than centralized in `telemetry/`. See [memory/feedback_span-names-colocation.md](../.claude/memory/feedback_span-names-colocation.md) for rationale.
|
||||
|
||||
---
|
||||
|
||||
## 7. Known Issues
|
||||
|
||||
| Issue | Impact | Status |
|
||||
| ------------------------------------------------------------------ | ------------------------------------------------ | -------------------------------------------------------------------- |
|
||||
| `warn` and `drop` metrics use non-standard StatsD `\|m` meter type | Metrics silently dropped by OTel StatsD receiver | Phase 6 Task 6.1 — needs `\|m` → `\|c` change in StatsDCollector.cpp |
|
||||
| `rippled_job_count` may not emit in standalone mode | Missing from Prometheus in some test configs | Requires active job queue activity |
|
||||
| `rippled_rpc_requests` depends on `[insight]` config | Zero series if StatsD not configured | Requires `[insight] server=statsd` in xrpld.cfg |
|
||||
| Peer tracing disabled by default | No `peer.*` spans unless `trace_peer=1` | Intentional — high volume on mainnet |
|
||||
|
||||
---
|
||||
|
||||
## 8. Privacy and Data Collection
|
||||
|
||||
The telemetry system is designed with privacy in mind:
|
||||
|
||||
- **No private keys** are ever included in spans or metrics
|
||||
- **No account balances** or financial data is traced
|
||||
- **Transaction hashes** are included (public on-ledger data) but not transaction contents
|
||||
- **Peer IDs** are internal identifiers, not IP addresses
|
||||
- **All telemetry is opt-in** — disabled by default at build time (`-Dtelemetry=OFF`)
|
||||
- **Sampling** reduces data volume — `sampling_ratio=0.01` recommended for production
|
||||
- **Data stays local** — the default stack sends data to `localhost` only
|
||||
|
||||
---
|
||||
|
||||
## 9. Configuration Quick Reference
|
||||
|
||||
> **Full reference**: [05-configuration-reference.md](./05-configuration-reference.md) §5.1 for all `[telemetry]` options with defaults, the config parser implementation, and collector YAML configurations (dev and production).
|
||||
|
||||
### Minimal Setup (development)
|
||||
|
||||
```ini
|
||||
[telemetry]
|
||||
enabled=1
|
||||
|
||||
[insight]
|
||||
server=statsd
|
||||
address=127.0.0.1:8125
|
||||
prefix=rippled
|
||||
```
|
||||
|
||||
### Production Setup
|
||||
|
||||
```ini
|
||||
[telemetry]
|
||||
enabled=1
|
||||
endpoint=http://otel-collector:4318/v1/traces
|
||||
sampling_ratio=0.01
|
||||
trace_peer=0
|
||||
batch_size=1024
|
||||
max_queue_size=4096
|
||||
|
||||
[insight]
|
||||
server=statsd
|
||||
address=otel-collector:8125
|
||||
prefix=rippled
|
||||
```
|
||||
|
||||
### Trace Category Toggle
|
||||
|
||||
| Config Key | Default | Controls |
|
||||
| -------------------- | ------- | ---------------------------- |
|
||||
| `trace_rpc` | `1` | `rpc.*` spans |
|
||||
| `trace_transactions` | `1` | `tx.*` spans |
|
||||
| `trace_consensus` | `1` | `consensus.*` spans |
|
||||
| `trace_ledger` | `1` | `ledger.*` spans |
|
||||
| `trace_peer` | `0` | `peer.*` spans (high volume) |
|
||||
@@ -1,243 +0,0 @@
|
||||
# [OpenTelemetry](00-tracing-fundamentals.md) Distributed Tracing Implementation Plan for xrpld (xrpld)
|
||||
|
||||
## Executive Summary
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
This document provides a comprehensive implementation plan for integrating OpenTelemetry distributed tracing into the xrpld XRP Ledger node software. The plan addresses the unique challenges of a decentralized peer-to-peer system where trace context must propagate across network boundaries between independent nodes.
|
||||
|
||||
### Key Benefits
|
||||
|
||||
- **End-to-end transaction visibility**: Track transactions from submission through consensus to ledger inclusion
|
||||
- **Consensus round analysis**: Understand timing and behavior of consensus phases across validators
|
||||
- **RPC performance insights**: Identify slow handlers and optimize response times
|
||||
- **Network topology understanding**: Visualize message propagation patterns between peers
|
||||
- **Incident debugging**: Correlate events across distributed nodes during issues
|
||||
|
||||
### Estimated Performance Overhead
|
||||
|
||||
| Metric | Overhead | Notes |
|
||||
| ------------- | ---------- | ----------------------------------- |
|
||||
| CPU | 1-3% | Span creation and attribute setting |
|
||||
| Memory | 2-5 MB | Batch buffer for pending spans |
|
||||
| Network | 10-50 KB/s | Compressed OTLP export to collector |
|
||||
| Latency (p99) | <2% | With proper sampling configuration |
|
||||
|
||||
---
|
||||
|
||||
## Document Structure
|
||||
|
||||
This implementation plan is organized into modular documents for easier navigation:
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
overview["📋 OpenTelemetryPlan.md<br/>(This Document)"]
|
||||
|
||||
subgraph fundamentals["Fundamentals"]
|
||||
fund["00-tracing-fundamentals.md"]
|
||||
end
|
||||
|
||||
subgraph analysis["Analysis & Design"]
|
||||
arch["01-architecture-analysis.md"]
|
||||
design["02-design-decisions.md"]
|
||||
end
|
||||
|
||||
subgraph impl["Implementation"]
|
||||
strategy["03-implementation-strategy.md"]
|
||||
code["04-code-samples.md"]
|
||||
config["05-configuration-reference.md"]
|
||||
end
|
||||
|
||||
subgraph deploy["Deployment & Planning"]
|
||||
phases["06-implementation-phases.md"]
|
||||
backends["07-observability-backends.md"]
|
||||
appendix["08-appendix.md"]
|
||||
poc["POC_taskList.md"]
|
||||
dataref["09-data-collection-reference.md"]
|
||||
end
|
||||
|
||||
overview --> fundamentals
|
||||
overview --> analysis
|
||||
overview --> impl
|
||||
overview --> deploy
|
||||
|
||||
fund --> arch
|
||||
arch --> design
|
||||
design --> strategy
|
||||
strategy --> code
|
||||
code --> config
|
||||
config --> phases
|
||||
phases --> backends
|
||||
backends --> appendix
|
||||
phases --> poc
|
||||
appendix --> dataref
|
||||
|
||||
style overview fill:#1b5e20,stroke:#0d3d14,color:#fff,stroke-width:2px
|
||||
style fundamentals fill:#00695c,stroke:#004d40,color:#fff
|
||||
style fund fill:#00695c,stroke:#004d40,color:#fff
|
||||
style analysis fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style impl fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style deploy fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style arch fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style design fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style strategy fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style code fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style config fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style phases fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style backends fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style appendix fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style poc fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style dataref fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
| Section | Document | Description |
|
||||
| ------- | -------------------------------------------------------------- | ---------------------------------------------------------------------- |
|
||||
| **0** | [Tracing Fundamentals](./00-tracing-fundamentals.md) | Distributed tracing concepts, span relationships, context propagation |
|
||||
| **1** | [Architecture Analysis](./01-architecture-analysis.md) | xrpld component analysis, trace points, instrumentation priorities |
|
||||
| **2** | [Design Decisions](./02-design-decisions.md) | SDK selection, exporters, span naming, attributes, context propagation |
|
||||
| **3** | [Implementation Strategy](./03-implementation-strategy.md) | Directory structure, key principles, performance optimization |
|
||||
| **4** | [Code Samples](./04-code-samples.md) | C++ implementation examples for core infrastructure and key modules |
|
||||
| **5** | [Configuration Reference](./05-configuration-reference.md) | xrpld config, CMake integration, Collector configurations |
|
||||
| **6** | [Implementation Phases](./06-implementation-phases.md) | 5-phase timeline, tasks, risks, success metrics |
|
||||
| **7** | [Observability Backends](./07-observability-backends.md) | Backend selection guide and production architecture |
|
||||
| **8** | [Appendix](./08-appendix.md) | Glossary, references, version history |
|
||||
| **9** | [Data Collection Reference](./09-data-collection-reference.md) | Complete inventory of spans, attributes, metrics, and dashboards |
|
||||
| **POC** | [POC Task List](./POC_taskList.md) | Proof of concept tasks for RPC tracing end-to-end demo |
|
||||
|
||||
---
|
||||
|
||||
## 0. Tracing Fundamentals
|
||||
|
||||
This document introduces distributed tracing concepts for readers unfamiliar with the domain. It covers what traces and spans are, how parent-child and follows-from relationships model causality, how context propagates across service boundaries, and how sampling controls data volume. It also maps these concepts to xrpld-specific scenarios like transaction relay and consensus.
|
||||
|
||||
➡️ **[Read Tracing Fundamentals](./00-tracing-fundamentals.md)**
|
||||
|
||||
---
|
||||
|
||||
## 1. Architecture Analysis
|
||||
|
||||
> **WS** = WebSocket | **TxQ** = Transaction Queue
|
||||
|
||||
The xrpld node consists of several key components that require instrumentation for comprehensive distributed tracing. The main areas include the RPC server (HTTP/WebSocket), Overlay P2P network, Consensus mechanism (RCLConsensus), JobQueue for async task execution, PathFinding, Transaction Queue (TxQ), fee escalation (LoadManager), ledger acquisition, validator management, and existing observability infrastructure (PerfLog, Insight/StatsD, Journal logging).
|
||||
|
||||
Key trace points span across transaction submission via RPC, peer-to-peer message propagation, consensus round execution, ledger building, path computation, transaction queue behavior, fee escalation, and validator health. The implementation prioritizes high-value, low-risk components first: RPC handlers provide immediate value with minimal risk, while consensus tracing requires careful implementation to avoid timing impacts.
|
||||
|
||||
➡️ **[Read full Architecture Analysis](./01-architecture-analysis.md)**
|
||||
|
||||
---
|
||||
|
||||
## 2. Design Decisions
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **CNCF** = Cloud Native Computing Foundation
|
||||
|
||||
The OpenTelemetry C++ SDK is selected for its CNCF backing, active development, and native performance characteristics. Traces are exported via OTLP/gRPC (primary) or OTLP/HTTP (fallback) to an OpenTelemetry Collector, which provides flexible routing and sampling.
|
||||
|
||||
Span naming follows a hierarchical `<component>.<operation>` convention (e.g., `rpc.submit`, `tx.relay`, `consensus.round`). Context propagation uses W3C Trace Context headers for HTTP and embedded Protocol Buffer fields for P2P messages. The implementation coexists with existing PerfLog and Insight observability systems through correlation IDs.
|
||||
|
||||
**Data Collection & Privacy**: Telemetry collects only operational metadata (timing, counts, hashes) — never sensitive content (private keys, balances, amounts, raw payloads). Privacy protection includes account hashing, configurable redaction, sampling, and collector-level filtering. Node operators retain full control over telemetry configuration.
|
||||
|
||||
➡️ **[Read full Design Decisions](./02-design-decisions.md)**
|
||||
|
||||
---
|
||||
|
||||
## 3. Implementation Strategy
|
||||
|
||||
The telemetry code is organized under `include/xrpl/telemetry/` for headers and `src/libxrpl/telemetry/` for implementation. Key principles include RAII-based span management via `SpanGuard` (with `discard()` for dropping unwanted spans), a `FilteringSpanProcessor` that intercepts `OnEnd()` to prevent discarded spans from entering the export pipeline, conditional compilation with `XRPL_ENABLE_TELEMETRY`, and minimal runtime overhead through batch processing and efficient sampling.
|
||||
|
||||
Performance optimization strategies include probabilistic head sampling (10% default), tail-based sampling at the collector for errors and slow traces, batch export to reduce network overhead, and conditional instrumentation that compiles to no-ops when disabled.
|
||||
|
||||
➡️ **[Read full Implementation Strategy](./03-implementation-strategy.md)**
|
||||
|
||||
---
|
||||
|
||||
## 4. Code Samples
|
||||
|
||||
C++ implementation examples are provided for the core telemetry infrastructure and key modules:
|
||||
|
||||
- `Telemetry.h` - Core interface for tracer access and span creation
|
||||
- `SpanGuard.h` - RAII wrapper for automatic span lifecycle management with `discard()` support
|
||||
- `DiscardFlag.h` - Thread-local flag for span discard signaling between SpanGuard and FilteringSpanProcessor
|
||||
- `SpanGuard.cpp` - Pimpl implementation confining all OTel SDK types
|
||||
- Protocol Buffer extensions for trace context propagation
|
||||
- Module-specific instrumentation (RPC, Consensus, P2P, JobQueue)
|
||||
- Remaining modules (PathFinding, TxQ, Validator, etc.) follow the same patterns
|
||||
|
||||
➡️ **[View all Code Samples](./04-code-samples.md)**
|
||||
|
||||
---
|
||||
|
||||
## 5. Configuration Reference
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **APM** = Application Performance Monitoring
|
||||
|
||||
Configuration is handled through the `[telemetry]` section in `xrpld.cfg` with options for enabling/disabling, exporter selection, endpoint configuration, sampling ratios, and component-level filtering. CMake integration includes a `XRPL_ENABLE_TELEMETRY` option for compile-time control.
|
||||
|
||||
OpenTelemetry Collector configurations are provided for development and production (with tail-based sampling, Tempo, and Elastic APM). Docker Compose examples enable quick local development environment setup.
|
||||
|
||||
➡️ **[View full Configuration Reference](./05-configuration-reference.md)**
|
||||
|
||||
---
|
||||
|
||||
## 6. Implementation Phases
|
||||
|
||||
The implementation spans 9 weeks across 5 phases:
|
||||
|
||||
| Phase | Duration | Focus | Key Deliverables |
|
||||
| ----- | --------- | ------------------- | --------------------------------------------------- |
|
||||
| 1 | Weeks 1-2 | Core Infrastructure | SDK integration, Telemetry interface, Configuration |
|
||||
| 2 | Weeks 3-4 | RPC Tracing | HTTP context extraction, Handler instrumentation |
|
||||
| 3 | Weeks 5-6 | Transaction Tracing | Protocol Buffer context, Relay propagation |
|
||||
| 4 | Weeks 7-8 | Consensus Tracing | Round spans, Proposal/validation tracing |
|
||||
| 5 | Week 9 | Documentation | Runbook, Dashboards, Training |
|
||||
|
||||
**Total Effort**: 47 person-days (2 developers working in parallel)
|
||||
|
||||
➡️ **[View full Implementation Phases](./06-implementation-phases.md)**
|
||||
|
||||
---
|
||||
|
||||
## 7. Observability Backends
|
||||
|
||||
> **APM** = Application Performance Monitoring | **GCS** = Google Cloud Storage
|
||||
|
||||
Grafana Tempo is recommended for all environments due to its cost-effectiveness and Grafana integration, while Elastic APM is ideal for organizations with existing Elastic infrastructure.
|
||||
|
||||
The recommended production architecture uses a gateway collector pattern with regional collectors performing tail-based sampling, routing traces to multiple backends (Tempo for primary storage, Elastic for log correlation, S3/GCS for long-term archive).
|
||||
|
||||
➡️ **[View Observability Backend Recommendations](./07-observability-backends.md)**
|
||||
|
||||
---
|
||||
|
||||
## 8. Appendix
|
||||
|
||||
The appendix contains a glossary of OpenTelemetry and xrpld-specific terms, references to external documentation and specifications, version history for this implementation plan, and a complete document index.
|
||||
|
||||
➡️ **[View Appendix](./08-appendix.md)**
|
||||
|
||||
---
|
||||
|
||||
## 9. Data Collection Reference
|
||||
|
||||
A single-source-of-truth reference documenting every piece of telemetry data collected by rippled. Covers all 16 OpenTelemetry spans with their 22 attributes, all StatsD metrics (gauges, counters, histograms, overlay traffic), SpanMetrics-derived Prometheus metrics, and all 8 Grafana dashboards. Includes Jaeger search guides and Prometheus query examples.
|
||||
|
||||
➡️ **[View Data Collection Reference](./09-data-collection-reference.md)**
|
||||
|
||||
---
|
||||
|
||||
## POC Task List
|
||||
|
||||
A step-by-step task list for building a minimal end-to-end proof of concept that demonstrates distributed tracing in xrpld. The POC scope is limited to RPC tracing — showing request traces flowing from xrpld through an OpenTelemetry Collector into Tempo, viewable in Grafana.
|
||||
|
||||
➡️ **[View POC Task List](./POC_taskList.md)**
|
||||
|
||||
---
|
||||
|
||||
_This document provides a comprehensive implementation plan for integrating OpenTelemetry distributed tracing into the xrpld XRP Ledger node software. For detailed information on any section, follow the links to the corresponding sub-documents._
|
||||
@@ -1,628 +0,0 @@
|
||||
# OpenTelemetry POC Task List
|
||||
|
||||
> **Goal**: Build a minimal end-to-end proof of concept that demonstrates distributed tracing in xrpld. A successful POC will show RPC request traces flowing from xrpld through an OTel Collector into Tempo, viewable in Grafana.
|
||||
>
|
||||
> **Scope**: RPC tracing only (highest value, lowest risk per the [CRAWL phase](./06-implementation-phases.md#6102-quick-wins-immediate-value) in the implementation phases). No cross-node P2P context propagation or consensus tracing in the POC.
|
||||
|
||||
### Related Plan Documents
|
||||
|
||||
| Document | Relevance to POC |
|
||||
| ---------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| [00-tracing-fundamentals.md](./00-tracing-fundamentals.md) | Core concepts: traces, spans, context propagation, sampling |
|
||||
| [01-architecture-analysis.md](./01-architecture-analysis.md) | RPC request flow (§1.5), key trace points (§1.6), instrumentation priority (§1.7) |
|
||||
| [02-design-decisions.md](./02-design-decisions.md) | SDK selection (§2.1), exporter config (§2.2), span naming (§2.3), attribute schema (§2.4), coexistence with PerfLog/Insight (§2.6) |
|
||||
| [03-implementation-strategy.md](./03-implementation-strategy.md) | Directory structure (§3.1), key principles (§3.2), performance overhead (§3.3-3.6), conditional compilation (§3.7.3), code intrusiveness (§3.9) |
|
||||
| [04-code-samples.md](./04-code-samples.md) | Telemetry interface (§4.1), SpanGuard factory methods (§4.2-4.3), RPC instrumentation (§4.5.3) |
|
||||
| [05-configuration-reference.md](./05-configuration-reference.md) | xrpld config (§5.1), config parser (§5.2), Application integration (§5.3), CMake (§5.4), Collector config (§5.5), Docker Compose (§5.6), Grafana (§5.8) |
|
||||
| [06-implementation-phases.md](./06-implementation-phases.md) | Phase 1 core tasks (§6.2), Phase 2 RPC tasks (§6.3), quick wins (§6.10), definition of done (§6.11) |
|
||||
| [07-observability-backends.md](./07-observability-backends.md) | Tempo dev setup (§7.1), Grafana dashboards (§7.6), alert rules (§7.6.3) |
|
||||
|
||||
---
|
||||
|
||||
## Task 0: Docker Observability Stack Setup
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
**Objective**: Stand up the backend infrastructure to receive, store, and display traces.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `docker/telemetry/docker-compose.yml` in the repo with three services:
|
||||
1. **OpenTelemetry Collector** (`otel/opentelemetry-collector-contrib:0.92.0`)
|
||||
- Expose ports `4317` (OTLP gRPC) and `4318` (OTLP HTTP)
|
||||
- Expose port `13133` (health check)
|
||||
- Mount a config file `docker/telemetry/otel-collector-config.yaml`
|
||||
2. **Tempo** (`grafana/tempo:2.6.1`)
|
||||
- Expose port `3200` (HTTP API) and `4317` (OTLP gRPC, internal)
|
||||
3. **Grafana** (`grafana/grafana:latest`) — optional but useful
|
||||
- Expose port `3000`
|
||||
- Enable anonymous admin access for local dev (`GF_AUTH_ANONYMOUS_ENABLED=true`, `GF_AUTH_ANONYMOUS_ORG_ROLE=Admin`)
|
||||
- Provision Tempo as a data source via `docker/telemetry/grafana/provisioning/datasources/tempo.yaml`
|
||||
|
||||
- Create `docker/telemetry/otel-collector-config.yaml`:
|
||||
|
||||
```yaml
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
|
||||
processors:
|
||||
batch:
|
||||
timeout: 1s
|
||||
send_batch_size: 100
|
||||
|
||||
exporters:
|
||||
logging:
|
||||
verbosity: detailed
|
||||
otlp/tempo:
|
||||
endpoint: tempo:4317
|
||||
tls:
|
||||
insecure: true
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [logging, otlp/tempo]
|
||||
```
|
||||
|
||||
- Create Grafana Tempo datasource provisioning file at `docker/telemetry/grafana/provisioning/datasources/tempo.yaml`:
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: Tempo
|
||||
type: tempo
|
||||
access: proxy
|
||||
url: http://tempo:3200
|
||||
```
|
||||
|
||||
**Verification**: Run `docker compose -f docker/telemetry/docker-compose.yml up -d`, then:
|
||||
|
||||
- `curl http://localhost:13133` returns healthy (Collector)
|
||||
- `http://localhost:3000` opens Grafana (Tempo datasource available, no traces yet)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [05-configuration-reference.md §5.5](./05-configuration-reference.md) — Collector config (dev YAML with Tempo exporter)
|
||||
- [05-configuration-reference.md §5.6](./05-configuration-reference.md) — Docker Compose development environment
|
||||
- [07-observability-backends.md §7.1](./07-observability-backends.md) — Tempo quick start and backend selection
|
||||
- [05-configuration-reference.md §5.8](./05-configuration-reference.md) — Grafana datasource provisioning and dashboards
|
||||
|
||||
---
|
||||
|
||||
## Task 1: Add OpenTelemetry C++ SDK Dependency
|
||||
|
||||
**Objective**: Make `opentelemetry-cpp` available to the build system.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `conanfile.py` to add `opentelemetry-cpp` as an **optional** dependency. The gRPC otel plugin flag (`"grpc/*:otel_plugin": False`) in the existing conanfile may need to remain false — we pull the OTel SDK separately.
|
||||
- Add a Conan option: `with_telemetry = [True, False]` defaulting to `False`
|
||||
- When `with_telemetry` is `True`, add `opentelemetry-cpp` to `self.requires()`
|
||||
- Required OTel Conan components: `opentelemetry-cpp` (which bundles api, sdk, and exporters). If the package isn't in Conan Center, consider using `FetchContent` in CMake or building from source as a fallback.
|
||||
- Edit `CMakeLists.txt`:
|
||||
- Add option: `option(XRPL_ENABLE_TELEMETRY "Enable OpenTelemetry tracing" OFF)`
|
||||
- When ON, `find_package(opentelemetry-cpp CONFIG REQUIRED)` and add compile definition `XRPL_ENABLE_TELEMETRY`
|
||||
- When OFF, do nothing (zero build impact)
|
||||
- Verify the build succeeds with `-DXRPL_ENABLE_TELEMETRY=OFF` (no regressions) and with `-DXRPL_ENABLE_TELEMETRY=ON` (SDK links successfully).
|
||||
|
||||
**Key files**:
|
||||
|
||||
- `conanfile.py`
|
||||
- `CMakeLists.txt`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [05-configuration-reference.md §5.4](./05-configuration-reference.md) — CMake integration, `FindOpenTelemetry.cmake`, `XRPL_ENABLE_TELEMETRY` option
|
||||
- [03-implementation-strategy.md §3.2](./03-implementation-strategy.md) — Key principle: zero-cost when disabled via compile-time flags
|
||||
- [02-design-decisions.md §2.1](./02-design-decisions.md) — SDK selection rationale and required OTel components
|
||||
|
||||
---
|
||||
|
||||
## Task 2: Create Core Telemetry Interface and NullTelemetry
|
||||
|
||||
**Objective**: Define the `Telemetry` abstract interface and a no-op implementation so the rest of the codebase can reference telemetry without hard-depending on the OTel SDK.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `include/xrpl/telemetry/Telemetry.h`:
|
||||
- Define `namespace xrpl::telemetry`
|
||||
- Define `struct Telemetry::Setup` holding: `enabled`, `exporterEndpoint`, `samplingRatio`, `serviceName`, `serviceVersion`, `serviceInstanceId`, `traceRpc`, `traceTransactions`, `traceConsensus`, `tracePeer`
|
||||
- Define abstract `class Telemetry` with:
|
||||
- `virtual void start() = 0;`
|
||||
- `virtual void stop() = 0;`
|
||||
- `virtual bool isEnabled() const = 0;`
|
||||
- `virtual nostd::shared_ptr<Tracer> getTracer(string_view name = "xrpld") = 0;`
|
||||
- `virtual nostd::shared_ptr<Span> startSpan(string_view name, SpanKind kind = kInternal) = 0;`
|
||||
- `virtual nostd::shared_ptr<Span> startSpan(string_view name, Context const& parentContext, SpanKind kind = kInternal) = 0;`
|
||||
- `virtual bool shouldTraceRpc() const = 0;`
|
||||
- `virtual bool shouldTraceTransactions() const = 0;`
|
||||
- `virtual bool shouldTraceConsensus() const = 0;`
|
||||
- Factory: `std::unique_ptr<Telemetry> make_Telemetry(Setup const&, beast::Journal);`
|
||||
- Config parser: `Telemetry::Setup setup_Telemetry(Section const&, std::string const& nodePublicKey, std::string const& version);`
|
||||
|
||||
- Create `include/xrpl/telemetry/SpanGuard.h`:
|
||||
- RAII guard with static factory methods (`rpcSpan()`, `txSpan()`, `consensusSpan()`, etc.) that access the global `Telemetry::getInstance()` singleton internally.
|
||||
- Uses pimpl idiom to hide all OTel types -- the public header has zero `opentelemetry/` includes.
|
||||
- Convenience instance methods: `setAttribute()`, `setOk()`, `setStatus()`, `addEvent()`, `recordException()`, `context()`, `discard()`
|
||||
- When `XRPL_ENABLE_TELEMETRY` is not defined, the entire class compiles to a no-op stub.
|
||||
- See [04-code-samples.md](./04-code-samples.md) §4.2-4.3 for the full API reference.
|
||||
|
||||
- Create `src/libxrpl/telemetry/NullTelemetry.cpp`:
|
||||
- Implements `Telemetry` with all no-ops.
|
||||
- `isEnabled()` returns `false`, `startSpan()` returns a noop span.
|
||||
- This is used when `XRPL_ENABLE_TELEMETRY` is OFF or `enabled=0` in config.
|
||||
|
||||
- Guard all OTel SDK headers behind `#ifdef XRPL_ENABLE_TELEMETRY`. The `NullTelemetry` implementation should compile without the OTel SDK present.
|
||||
|
||||
**Key new files**:
|
||||
|
||||
- `include/xrpl/telemetry/Telemetry.h`
|
||||
- `include/xrpl/telemetry/SpanGuard.h`
|
||||
- `src/libxrpl/telemetry/NullTelemetry.cpp`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.1](./04-code-samples.md) — Full `Telemetry` interface with `Setup` struct, lifecycle, tracer access, span creation, and component filtering methods
|
||||
- [04-code-samples.md §4.2-4.3](./04-code-samples.md) — SpanGuard with factory methods, pimpl design, no-op stub, and discard support
|
||||
- [03-implementation-strategy.md §3.1](./03-implementation-strategy.md) — Directory structure: `include/xrpl/telemetry/` for headers, `src/libxrpl/telemetry/` for implementation
|
||||
- [03-implementation-strategy.md §3.7.3](./03-implementation-strategy.md) — Conditional instrumentation and zero-cost compile-time disabled pattern
|
||||
|
||||
---
|
||||
|
||||
## Task 3: Implement OTel-Backed Telemetry
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
**Objective**: Implement the real `Telemetry` class that initializes the OTel SDK, configures the OTLP exporter and batch processor, and creates tracers/spans.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `src/libxrpl/telemetry/Telemetry.cpp` (compiled only when `XRPL_ENABLE_TELEMETRY=ON`):
|
||||
- `class TelemetryImpl : public Telemetry` that:
|
||||
- In `start()`: creates a `TracerProvider` with:
|
||||
- Resource attributes: `service.name`, `service.version`, `service.instance.id`
|
||||
- An `OtlpHttpExporter` pointed at `setup.exporterEndpoint` (default `localhost:4318`)
|
||||
- A `BatchSpanProcessor` with configurable batch size and delay
|
||||
- A `TraceIdRatioBasedSampler` using `setup.samplingRatio`
|
||||
- Sets the global `TracerProvider`
|
||||
- In `stop()`: calls `ForceFlush()` then shuts down the provider
|
||||
- In `startSpan()`: delegates to `getTracer()->StartSpan(name, ...)`
|
||||
- `shouldTraceRpc()` etc. read from `Setup` fields
|
||||
|
||||
- Create `src/libxrpl/telemetry/TelemetryConfig.cpp`:
|
||||
- `setup_Telemetry()` parses the `[telemetry]` config section from `xrpld.cfg`
|
||||
- Maps config keys: `enabled`, `exporter`, `endpoint`, `sampling_ratio`, `trace_rpc`, `trace_transactions`, `trace_consensus`, `trace_peer`
|
||||
|
||||
- Wire `make_Telemetry()` factory:
|
||||
- If `setup.enabled` is true AND `XRPL_ENABLE_TELEMETRY` is defined: return `TelemetryImpl`
|
||||
- Otherwise: return `NullTelemetry`
|
||||
|
||||
- Add telemetry source files to CMake. When `XRPL_ENABLE_TELEMETRY=ON`, compile `Telemetry.cpp` and `TelemetryConfig.cpp` and link against `opentelemetry-cpp::api`, `opentelemetry-cpp::sdk`, `opentelemetry-cpp::otlp_grpc_exporter`. When OFF, compile only `NullTelemetry.cpp`.
|
||||
|
||||
**Key new files**:
|
||||
|
||||
- `src/libxrpl/telemetry/Telemetry.cpp`
|
||||
- `src/libxrpl/telemetry/TelemetryConfig.cpp`
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `CMakeLists.txt` (add telemetry library target)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.1](./04-code-samples.md) — `Telemetry` interface that `TelemetryImpl` must implement
|
||||
- [05-configuration-reference.md §5.2](./05-configuration-reference.md) — `setup_Telemetry()` config parser implementation
|
||||
- [02-design-decisions.md §2.2](./02-design-decisions.md) — OTLP/gRPC exporter config (endpoint, TLS options)
|
||||
- [02-design-decisions.md §2.4.1](./02-design-decisions.md) — Resource attributes: `service.name`, `service.version`, `service.instance.id`, `xrpl.network.id`
|
||||
- [03-implementation-strategy.md §3.4](./03-implementation-strategy.md) — Per-operation CPU costs and overhead budget for span creation
|
||||
- [03-implementation-strategy.md §3.5](./03-implementation-strategy.md) — Memory overhead: static (~456 KB) and dynamic (~1.2 MB) budgets
|
||||
|
||||
---
|
||||
|
||||
## Task 4: Integrate Telemetry into Application Lifecycle
|
||||
|
||||
**Objective**: Wire the `Telemetry` object into the `ServiceRegistry` / `Application` so all components can access it.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `include/xrpl/core/ServiceRegistry.h`:
|
||||
- Forward-declare `namespace telemetry { class Telemetry; }` inside `namespace xrpl`
|
||||
- Add pure virtual method: `virtual telemetry::Telemetry& getTelemetry() = 0;`
|
||||
- (`Application` extends `ServiceRegistry`, so this is automatically available on `Application` too)
|
||||
|
||||
- Edit `src/xrpld/app/main/Application.cpp` (the `ApplicationImp` class):
|
||||
- Add member: `std::unique_ptr<telemetry::Telemetry> telemetry_;`
|
||||
- In the member initializer list, construct telemetry with an empty
|
||||
`serviceInstanceId` (node identity is not yet known):
|
||||
```cpp
|
||||
, telemetry_(
|
||||
telemetry::make_Telemetry(
|
||||
telemetry::setup_Telemetry(
|
||||
config_->section("telemetry"),
|
||||
"", // Updated later via setServiceInstanceId()
|
||||
BuildInfo::getVersionString()),
|
||||
logs_->journal("Telemetry")))
|
||||
```
|
||||
- In `setup()`, after `nodeIdentity_` is resolved, inject the node
|
||||
public key as the service instance ID:
|
||||
```cpp
|
||||
if (!config_->section("telemetry").exists("service_instance_id"))
|
||||
telemetry_->setServiceInstanceId(
|
||||
toBase58(TokenType::NodePublic, nodeIdentity_->first));
|
||||
```
|
||||
- In `start()`: call `telemetry_->start()`
|
||||
- In `run()` (shutdown path): call `telemetry_->stop()` (to flush pending spans)
|
||||
- Implement `getTelemetry()` override: return `*telemetry_`
|
||||
|
||||
- Add `[telemetry]` section to the example config `cfg/xrpld-example.cfg`:
|
||||
```ini
|
||||
# [telemetry]
|
||||
# enabled=1
|
||||
# endpoint=http://localhost:4318/v1/traces
|
||||
# sampling_ratio=1.0
|
||||
# trace_rpc=1
|
||||
```
|
||||
|
||||
> **Access patterns**: Components holding `ServiceRegistry&` (e.g.
|
||||
> `NetworkOPsImp`) call `registry_.get().getTelemetry()`. Components
|
||||
> holding `Application&` (e.g. `ServerHandler`, `PeerImp`,
|
||||
> `RCLConsensusAdaptor`) call `app_.getTelemetry()` directly. Both
|
||||
> resolve to the same `Telemetry` instance.
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `include/xrpl/core/ServiceRegistry.h`
|
||||
- `src/xrpld/app/main/Application.cpp`
|
||||
- `cfg/xrpld-example.cfg` (example config)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [05-configuration-reference.md §5.3](./05-configuration-reference.md) — `ApplicationImp` changes: member declaration, constructor init, `start()`/`stop()` wiring, `getTelemetry()` override
|
||||
- [05-configuration-reference.md §5.1](./05-configuration-reference.md) — `[telemetry]` config section format and all option defaults
|
||||
- [03-implementation-strategy.md §3.9.2](./03-implementation-strategy.md) — File impact assessment: `Application.cpp` ~15 lines added, ~3 changed (Low risk)
|
||||
|
||||
---
|
||||
|
||||
## Task 5: Add SpanGuard Factory Methods
|
||||
|
||||
**Objective**: Add static factory methods to SpanGuard that provide type-safe, one-liner instrumentation and compile to zero-cost no-ops when telemetry is disabled. This replaces the earlier macro-based approach (`TracingInstrumentation.h` has been removed).
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Update `include/xrpl/telemetry/SpanGuard.h`:
|
||||
- Add static factory methods that access the global `Telemetry::getInstance()` singleton and check the relevant component filter before creating a span:
|
||||
|
||||
```cpp
|
||||
// Each factory checks the global Telemetry instance internally.
|
||||
// No Telemetry& reference needed at the call site.
|
||||
auto span = telemetry::SpanGuard::rpcSpan("rpc.request");
|
||||
span.setAttribute("xrpl.rpc.command", command);
|
||||
span.setAttribute("xrpl.rpc.status", status);
|
||||
```
|
||||
|
||||
- Factory methods: `rpcSpan()`, `txSpan()`, `consensusSpan()`, `peerSpan()`, `ledgerSpan()`, `span()`
|
||||
- Use the pimpl idiom to hide all OTel types from the public header (zero `opentelemetry/` includes)
|
||||
- When `XRPL_ENABLE_TELEMETRY` is NOT defined, the entire class compiles to a no-op stub with empty inline method bodies
|
||||
|
||||
- No separate `TracingInstrumentation.h` file is needed. All instrumentation call sites use `#include <xrpl/telemetry/SpanGuard.h>` directly.
|
||||
|
||||
**Key modified file**:
|
||||
|
||||
- `include/xrpl/telemetry/SpanGuard.h`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.3](./04-code-samples.md) — SpanGuard API reference: factory methods, usage patterns, compile-time disabled behavior, and discard support
|
||||
- [03-implementation-strategy.md §3.7.3](./03-implementation-strategy.md) — Conditional instrumentation pattern: factory methods handle compile-time and runtime checks internally
|
||||
- [03-implementation-strategy.md §3.9.7](./03-implementation-strategy.md) — Before/after code examples showing minimal intrusiveness (~1-3 lines per instrumentation point)
|
||||
|
||||
---
|
||||
|
||||
## Task 6: Instrument RPC ServerHandler
|
||||
|
||||
> **WS** = WebSocket
|
||||
|
||||
**Objective**: Add tracing to the HTTP RPC entry point so every incoming RPC request creates a span.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/rpc/detail/ServerHandler.cpp`:
|
||||
- `#include <xrpl/telemetry/SpanGuard.h>`
|
||||
- In `ServerHandler::onRequest(Session& session)`:
|
||||
- At the top of the method, add: `auto span = telemetry::SpanGuard::rpcSpan("rpc.request");`
|
||||
- After the RPC command name is extracted, set attribute: `span.setAttribute("xrpl.rpc.command", command);`
|
||||
- After the response status is known, set: `span.setAttribute("http.status_code", static_cast<int64_t>(statusCode));`
|
||||
- Wrap error paths with: `span.recordException(e);`
|
||||
- In `ServerHandler::processRequest(...)`:
|
||||
- Add a child span: `auto span = telemetry::SpanGuard::rpcSpan("rpc.process");`
|
||||
- Set method attribute: `span.setAttribute("xrpl.rpc.method", request_method);`
|
||||
- In `ServerHandler::onWSMessage(...)` (WebSocket path):
|
||||
- Add: `auto span = telemetry::SpanGuard::rpcSpan("rpc.ws.message");`
|
||||
|
||||
- The goal is to see spans like:
|
||||
```
|
||||
rpc.request
|
||||
└── rpc.process
|
||||
```
|
||||
in Tempo/Grafana for every HTTP RPC call.
|
||||
|
||||
**Key modified file**:
|
||||
|
||||
- `src/xrpld/rpc/detail/ServerHandler.cpp` (~15-25 lines added)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.5.3](./04-code-samples.md) — Complete `ServerHandler::onRequest()` instrumented code sample using SpanGuard factory methods
|
||||
- [01-architecture-analysis.md §1.5](./01-architecture-analysis.md) — RPC request flow diagram: HTTP request -> attributes -> jobqueue.enqueue -> rpc.command -> response
|
||||
- [01-architecture-analysis.md §1.6](./01-architecture-analysis.md) — Key trace points table: `rpc.request` in `ServerHandler.cpp::onRequest()` (Priority: High)
|
||||
- [02-design-decisions.md §2.3](./02-design-decisions.md) — Span naming convention: `rpc.request`, `rpc.command.*`
|
||||
- [02-design-decisions.md §2.4.2](./02-design-decisions.md) — RPC span attributes: `xrpl.rpc.command`, `xrpl.rpc.version`, `xrpl.rpc.role`, `xrpl.rpc.params`
|
||||
- [03-implementation-strategy.md §3.9.2](./03-implementation-strategy.md) — File impact: `ServerHandler.cpp` ~40 lines added, ~10 changed (Low risk)
|
||||
|
||||
---
|
||||
|
||||
## Task 7: Instrument RPC Command Execution
|
||||
|
||||
**Objective**: Add per-command tracing inside the RPC handler so each command (e.g., `submit`, `account_info`, `server_info`) gets its own child span.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/rpc/detail/RPCHandler.cpp`:
|
||||
- `#include <xrpl/telemetry/SpanGuard.h>`
|
||||
- In `doCommand(RPC::JsonContext& context, Json::Value& result)`:
|
||||
- At the top: `auto span = telemetry::SpanGuard::rpcSpan("rpc.command." + context.method);`
|
||||
- Set attributes:
|
||||
- `span.setAttribute("xrpl.rpc.command", context.method);`
|
||||
- `span.setAttribute("xrpl.rpc.version", static_cast<int64_t>(context.apiVersion));`
|
||||
- `span.setAttribute("xrpl.rpc.role", (context.role == Role::ADMIN) ? "admin" : "user");`
|
||||
- On success: `span.setAttribute("xrpl.rpc.status", "success");`
|
||||
- On error: `span.setAttribute("xrpl.rpc.status", "error");` and set the error message
|
||||
|
||||
- After this, traces in Tempo/Grafana should look like:
|
||||
```
|
||||
rpc.request (xrpl.rpc.command=account_info)
|
||||
└── rpc.process
|
||||
└── rpc.command.account_info (xrpl.rpc.version=2, xrpl.rpc.role=user, xrpl.rpc.status=success)
|
||||
```
|
||||
|
||||
**Key modified file**:
|
||||
|
||||
- `src/xrpld/rpc/detail/RPCHandler.cpp` (~15-20 lines added)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.5.3](./04-code-samples.md) — `ServerHandler::onRequest()` code sample (includes child span pattern for `rpc.command.*`)
|
||||
- [02-design-decisions.md §2.3](./02-design-decisions.md) — Span naming: `rpc.command.*` pattern with dynamic command name (e.g., `rpc.command.server_info`)
|
||||
- [02-design-decisions.md §2.4.2](./02-design-decisions.md) — RPC attribute schema: `xrpl.rpc.command`, `xrpl.rpc.version`, `xrpl.rpc.role`, `xrpl.rpc.status`
|
||||
- [01-architecture-analysis.md §1.6](./01-architecture-analysis.md) — Key trace points table: `rpc.command.*` in `RPCHandler.cpp::doCommand()` (Priority: High)
|
||||
- [02-design-decisions.md §2.6.5](./02-design-decisions.md) — Correlation with PerfLog: how `doCommand()` can link trace_id with existing PerfLog entries
|
||||
- [03-implementation-strategy.md §3.4.4](./03-implementation-strategy.md) — RPC request overhead budget: ~1.75 μs total per request
|
||||
|
||||
---
|
||||
|
||||
## Task 8: Build, Run, and Verify End-to-End
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
**Objective**: Prove the full pipeline works: xrpld emits traces -> OTel Collector receives them -> Tempo stores them for Grafana visualization.
|
||||
|
||||
**What to do**:
|
||||
|
||||
1. **Start the Docker stack**:
|
||||
|
||||
```bash
|
||||
docker compose -f docker/telemetry/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
Verify Collector health: `curl http://localhost:13133`
|
||||
|
||||
2. **Build xrpld with telemetry**:
|
||||
|
||||
```bash
|
||||
# Adjust for your actual build workflow
|
||||
conan install . --build=missing -o with_telemetry=True
|
||||
cmake --preset default -DXRPL_ENABLE_TELEMETRY=ON
|
||||
cmake --build --preset default
|
||||
```
|
||||
|
||||
3. **Configure xrpld**:
|
||||
Add to `xrpld.cfg` (or your local test config):
|
||||
|
||||
```ini
|
||||
[telemetry]
|
||||
enabled=1
|
||||
endpoint=localhost:4317
|
||||
sampling_ratio=1.0
|
||||
trace_rpc=1
|
||||
```
|
||||
|
||||
4. **Start xrpld** in standalone mode:
|
||||
|
||||
```bash
|
||||
./rippled --conf xrpld.cfg -a --start
|
||||
```
|
||||
|
||||
5. **Generate RPC traffic**:
|
||||
|
||||
```bash
|
||||
# server_info
|
||||
curl -s -X POST http://localhost:5005 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"method":"server_info","params":[{}]}'
|
||||
|
||||
# ledger
|
||||
curl -s -X POST http://localhost:5005 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"method":"ledger","params":[{"ledger_index":"current"}]}'
|
||||
|
||||
# account_info (will error in standalone, that's fine — we trace errors too)
|
||||
curl -s -X POST http://localhost:5005 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"method":"account_info","params":[{"account":"rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"}]}'
|
||||
```
|
||||
|
||||
6. **Verify in Grafana (Tempo)**:
|
||||
- Open `http://localhost:3000`
|
||||
- Navigate to Explore → select Tempo datasource
|
||||
- Search for service `xrpld`
|
||||
- Confirm you see traces with spans: `rpc.request` -> `rpc.process` -> `rpc.command.server_info`
|
||||
- Click into a trace and verify attributes: `xrpl.rpc.command`, `xrpl.rpc.status`, `xrpl.rpc.version`
|
||||
|
||||
7. **Verify zero-overhead when disabled**:
|
||||
- Rebuild with `XRPL_ENABLE_TELEMETRY=OFF`, or set `enabled=0` in config
|
||||
- Run the same RPC calls
|
||||
- Confirm no new traces appear and no errors in xrpld logs
|
||||
|
||||
**Verification Checklist**:
|
||||
|
||||
- [ ] Docker stack starts without errors
|
||||
- [ ] xrpld builds with `-DXRPL_ENABLE_TELEMETRY=ON`
|
||||
- [ ] xrpld starts and connects to OTel Collector (check xrpld logs for telemetry messages)
|
||||
- [ ] Traces appear in Grafana/Tempo under service "xrpld"
|
||||
- [ ] Span hierarchy is correct (parent-child relationships)
|
||||
- [ ] Span attributes are populated (`xrpl.rpc.command`, `xrpl.rpc.status`, etc.)
|
||||
- [ ] Error spans show error status and message
|
||||
- [ ] Building with `XRPL_ENABLE_TELEMETRY=OFF` produces no regressions
|
||||
- [ ] Setting `enabled=0` at runtime produces no traces and no errors
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [06-implementation-phases.md §6.11.1](./06-implementation-phases.md) — Phase 1 definition of done: SDK compiles, runtime toggle works, span creation verified in Tempo, config validation passes
|
||||
- [06-implementation-phases.md §6.11.2](./06-implementation-phases.md#6112-phase-2-rpc-tracing) — Phase 2 definition of done: 100% RPC coverage, traceparent propagation, <1ms p99 overhead, dashboard deployed
|
||||
- [06-implementation-phases.md §6.8](./06-implementation-phases.md) — Success metrics: trace coverage >95%, CPU overhead <3%, memory <5 MB, latency impact <2%
|
||||
- [03-implementation-strategy.md §3.9.5](./03-implementation-strategy.md) — Backward compatibility: config optional, protocol unchanged, `XRPL_ENABLE_TELEMETRY=OFF` produces identical binary
|
||||
- [01-architecture-analysis.md §1.8](./01-architecture-analysis.md) — Observable outcomes: what traces, metrics, and dashboards to expect
|
||||
|
||||
---
|
||||
|
||||
## Task 9: Document POC Results and Next Steps
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **WS** = WebSocket
|
||||
|
||||
**Objective**: Capture findings, screenshots, and remaining work for the team.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Take screenshots of Grafana/Tempo showing:
|
||||
- The service list with "xrpld"
|
||||
- A trace with the full span tree
|
||||
- Span detail view showing attributes
|
||||
- Document any issues encountered (build issues, SDK quirks, missing attributes)
|
||||
- Note performance observations (build time impact, any noticeable runtime overhead)
|
||||
- Write a short summary of what the POC proves and what it doesn't cover yet:
|
||||
- **Proves**: OTel SDK integrates with xrpld, OTLP export works, RPC traces visible
|
||||
- **Doesn't cover**: Cross-node P2P context propagation, consensus tracing, protobuf trace context, W3C traceparent header extraction, tail-based sampling, production deployment
|
||||
- Outline next steps (mapping to the full plan phases):
|
||||
- [Phase 2](./06-implementation-phases.md) completion: [W3C header extraction](./02-design-decisions.md) (§2.5), WebSocket tracing, all [RPC handlers](./01-architecture-analysis.md) (§1.6)
|
||||
- [Phase 3](./06-implementation-phases.md): [Protobuf `TraceContext` message](./04-code-samples.md) (§4.4), [transaction relay tracing](./04-code-samples.md) (§4.5.1) across nodes
|
||||
- [Phase 4](./06-implementation-phases.md): [Consensus round and phase tracing](./04-code-samples.md) (§4.5.2)
|
||||
- [Phase 5](./06-implementation-phases.md): [Production collector config](./05-configuration-reference.md) (§5.5.2), [Grafana dashboards](./07-observability-backends.md) (§7.6), [alerting](./07-observability-backends.md) (§7.6.3)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [06-implementation-phases.md §6.1](./06-implementation-phases.md) — Full 5-phase timeline overview and Gantt chart
|
||||
- [06-implementation-phases.md §6.10](./06-implementation-phases.md) — Crawl-Walk-Run strategy: POC is the CRAWL phase, next steps are WALK and RUN
|
||||
- [06-implementation-phases.md §6.12](./06-implementation-phases.md) — Recommended implementation order (14 steps across 9 weeks)
|
||||
- [03-implementation-strategy.md §3.9](./03-implementation-strategy.md) — Code intrusiveness assessment and risk matrix for each remaining component
|
||||
- [07-observability-backends.md §7.2](./07-observability-backends.md) — Production backend selection (Tempo, Elastic APM, Honeycomb, Datadog)
|
||||
- [02-design-decisions.md §2.5](./02-design-decisions.md) — Context propagation design: W3C HTTP headers, protobuf P2P, JobQueue internal
|
||||
- [00-tracing-fundamentals.md](./00-tracing-fundamentals.md) — Reference for team onboarding on distributed tracing concepts
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| Task | Description | New Files | Modified Files | Depends On |
|
||||
| ---- | ------------------------------------ | --------- | -------------- | ---------- |
|
||||
| 0 | Docker observability stack | 4 | 0 | — |
|
||||
| 1 | OTel C++ SDK dependency | 0 | 2 | — |
|
||||
| 2 | Core Telemetry interface + NullImpl | 3 | 0 | 1 |
|
||||
| 3 | OTel-backed Telemetry implementation | 2 | 1 | 1, 2 |
|
||||
| 4 | Application lifecycle integration | 0 | 3 | 2, 3 |
|
||||
| 5 | SpanGuard factory methods | 0 | 1 | 2 |
|
||||
| 6 | Instrument RPC ServerHandler | 0 | 1 | 4, 5 |
|
||||
| 7 | Instrument RPC command execution | 0 | 1 | 4, 5 |
|
||||
| 8 | End-to-end verification | 0 | 0 | 0-7 |
|
||||
| 9 | Document results and next steps | 1 | 0 | 8 |
|
||||
|
||||
**Parallel work**: Tasks 0 and 1 can run in parallel. Tasks 2 and 5 have no dependency on each other. Tasks 6 and 7 can be done in parallel once Tasks 4 and 5 are complete.
|
||||
|
||||
---
|
||||
|
||||
## Next Steps (Post-POC)
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **WS** = WebSocket
|
||||
|
||||
### Metrics Pipeline for Grafana Dashboards
|
||||
|
||||
The current POC exports **traces only**. Grafana's Explore view can query Tempo for individual traces, but time-series charts (latency histograms, request throughput, error rates) require a **metrics pipeline**. To enable this:
|
||||
|
||||
1. **Add a `spanmetrics` connector** to the OTel Collector config that derives RED metrics (Rate, Errors, Duration) from trace spans automatically:
|
||||
|
||||
```yaml
|
||||
connectors:
|
||||
spanmetrics:
|
||||
histogram:
|
||||
explicit:
|
||||
buckets: [1ms, 5ms, 10ms, 25ms, 50ms, 100ms, 250ms, 500ms, 1s, 5s]
|
||||
dimensions:
|
||||
- name: xrpl.rpc.command
|
||||
- name: xrpl.rpc.status
|
||||
|
||||
exporters:
|
||||
prometheus:
|
||||
endpoint: 0.0.0.0:8889
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [debug, otlp/tempo, spanmetrics]
|
||||
metrics:
|
||||
receivers: [spanmetrics]
|
||||
exporters: [prometheus]
|
||||
```
|
||||
|
||||
2. **Add Prometheus** to the Docker Compose stack to scrape the collector's metrics endpoint.
|
||||
|
||||
3. **Add Prometheus as a Grafana datasource** and build dashboards for:
|
||||
- RPC request latency (p50/p95/p99) by command
|
||||
- RPC throughput (requests/sec) by command
|
||||
- Error rate by command
|
||||
- Span duration distribution
|
||||
|
||||
### Additional Instrumentation
|
||||
|
||||
- **W3C `traceparent` header extraction** in `ServerHandler` to support cross-service context propagation from external callers
|
||||
- **WebSocket RPC tracing** in `ServerHandler::onWSMessage()`
|
||||
- **Transaction relay tracing** across nodes using protobuf `TraceContext` messages
|
||||
- **Consensus round and phase tracing** for validator coordination visibility
|
||||
- **Ledger close tracing** to measure close-to-validated latency
|
||||
|
||||
### Production Hardening
|
||||
|
||||
- **Tail-based sampling** in the OTel Collector to reduce volume while retaining error/slow traces
|
||||
- **TLS configuration** for the OTLP exporter in production deployments
|
||||
- **Resource limits** on the batch processor queue to prevent unbounded memory growth
|
||||
- **Health monitoring** for the telemetry pipeline itself (collector lag, export failures)
|
||||
|
||||
### POC Lessons Learned
|
||||
|
||||
Issues encountered during POC implementation that inform future work:
|
||||
|
||||
| Issue | Resolution | Impact on Future Work |
|
||||
| -------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ---------------------------------------------------------------- |
|
||||
| Conan lockfile rejected `opentelemetry-cpp/1.18.0` | Used `--lockfile=""` to bypass | Lockfile must be regenerated when adding new dependencies |
|
||||
| Conan package only builds OTLP HTTP exporter, not gRPC | Switched from gRPC to HTTP exporter (`localhost:4318/v1/traces`) | HTTP exporter is the default; gRPC requires custom Conan profile |
|
||||
| CMake target `opentelemetry-cpp::api` etc. don't exist in Conan package | Use umbrella target `opentelemetry-cpp::opentelemetry-cpp` | Conan targets differ from upstream CMake targets |
|
||||
| OTel Collector `logging` exporter deprecated | Renamed to `debug` exporter | Use `debug` in all collector configs going forward |
|
||||
| Macro parameter `telemetry` collided with `::xrpl::telemetry::` namespace | Replaced macros with SpanGuard factory methods (no macros needed) | Factory methods avoid macro hygiene issues entirely |
|
||||
| `opentelemetry::trace::Scope` creates new context on move | Store scope as member, create once in constructor | SpanGuard move semantics need care with Scope lifecycle |
|
||||
| `TracerProviderFactory::Create` returns `unique_ptr<sdk::TracerProvider>`, not `nostd::shared_ptr` | Use `std::shared_ptr` member, wrap in `nostd::shared_ptr` for global provider | OTel SDK factory return types don't match API provider types |
|
||||
@@ -1,239 +0,0 @@
|
||||
# Phase 2: RPC Tracing Completion Task List
|
||||
|
||||
> **Goal**: Complete RPC tracing coverage with unit tests, Grafana search filters, node health attributes, and config hardening. Build on the Phase 1c SpanGuard factory foundation to achieve production-quality RPC observability.
|
||||
>
|
||||
> **Scope**: Unit tests for core telemetry, Grafana Tempo search filters, node health span attributes, config validation (`std::clamp`).
|
||||
>
|
||||
> **Branch**: `pratik/otel-phase2-rpc-tracing` (from `pratik/otel-phase1c-rpc-integration`)
|
||||
|
||||
### Related Plan Documents
|
||||
|
||||
| Document | Relevance |
|
||||
| ------------------------------------------------------------ | ------------------------------------------------------------- |
|
||||
| [04-code-samples.md](./04-code-samples.md) | TraceContextPropagator (§4.4.2), RPC instrumentation (§4.5.3) |
|
||||
| [02-design-decisions.md](./02-design-decisions.md) | W3C Trace Context (§2.5), span attributes (§2.4.2) |
|
||||
| [06-implementation-phases.md](./06-implementation-phases.md) | Phase 2 tasks (§6.3), definition of done (§6.11.2) |
|
||||
|
||||
---
|
||||
|
||||
## Task 2.1: W3C Trace Context HTTP Header Extraction
|
||||
|
||||
**Status**: DEFERRED → Phase 3
|
||||
|
||||
**Reason**: W3C context propagation (`traceparent`/`tracestate` headers) requires a consumer — in Phase 2, RPC spans are entirely local to the node. Phase 3 introduces cross-node transaction tracing via protobuf context propagation, which is the first use case for extracted trace context. Implementing it here without a consumer would be dead code.
|
||||
|
||||
**Implemented in**: `pratik/otel-phase3-tx-tracing` — `TraceContextPropagator.h/.cpp`
|
||||
|
||||
---
|
||||
|
||||
## Task 2.2: Per-Category Span Creation
|
||||
|
||||
**Status**: COMPLETE (superseded by Phase 1c design)
|
||||
|
||||
**Original plan**: Add `XRPL_TRACE_PEER` and `XRPL_TRACE_LEDGER` macros.
|
||||
|
||||
**Actual implementation**: Phase 1c replaced all tracing macros with the `SpanGuard::span(TraceCategory, prefix, name)` factory pattern. The `TraceCategory` enum (`Rpc`, `Transactions`, `Consensus`, `Peer`, `Ledger`) serves the same conditional-creation purpose without macros. No separate task needed — the factory already supports all categories.
|
||||
|
||||
---
|
||||
|
||||
## Task 2.3: Add shouldTraceLedger() to Telemetry Interface
|
||||
|
||||
**Objective**: The `Setup` struct has a `traceLedger` field but there's no corresponding virtual method. Add it for interface completeness.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `include/xrpl/telemetry/Telemetry.h`:
|
||||
- Add `virtual bool shouldTraceLedger() const = 0;`
|
||||
|
||||
- Update all implementations:
|
||||
- `src/libxrpl/telemetry/Telemetry.cpp` (TelemetryImpl, NullTelemetryOtel)
|
||||
- `src/libxrpl/telemetry/NullTelemetry.cpp` (NullTelemetry)
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `include/xrpl/telemetry/Telemetry.h`
|
||||
- `src/libxrpl/telemetry/Telemetry.cpp`
|
||||
- `src/libxrpl/telemetry/NullTelemetry.cpp`
|
||||
|
||||
---
|
||||
|
||||
## Task 2.4: Unit Tests for Core Telemetry Infrastructure
|
||||
|
||||
**Status**: COMPLETE
|
||||
|
||||
**Objective**: Add unit tests for the core telemetry abstractions to validate correctness and catch regressions.
|
||||
|
||||
**Implemented**:
|
||||
|
||||
- `src/tests/libxrpl/telemetry/TelemetryConfig.cpp`:
|
||||
- Test Setup defaults (all fields have correct initial values)
|
||||
- Test `setup_Telemetry` config parser (empty section, full section, edge cases)
|
||||
- Test `samplingRatio` clamping (values outside 0.0-1.0)
|
||||
|
||||
- `src/tests/libxrpl/telemetry/SpanGuardFactory.cpp`:
|
||||
- Test null guard methods are safe (setAttribute, setOk, setError, addEvent on null)
|
||||
- Test category span returns null when telemetry disabled
|
||||
- Test child/linked span null when no parent context
|
||||
- Test move construction transfers ownership
|
||||
- Test recordException safe on null guard
|
||||
- Test discard() safe on null guard
|
||||
|
||||
- `src/tests/libxrpl/telemetry/main.cpp` — GTest runner
|
||||
- `src/tests/libxrpl/CMakeLists.txt` — test target with optional OTel linking
|
||||
|
||||
---
|
||||
|
||||
## Task 2.5: Enhance RPC Span Attributes
|
||||
|
||||
**Status**: DEFERRED (low priority)
|
||||
|
||||
**Reason**: The high-value attributes (`command`, `version`, `role`, `status`) are already set by Phase 1c. The remaining HTTP transport-level attributes (`http.method`, `net.peer.ip`, `http.status_code`) provide limited additional insight since:
|
||||
|
||||
- `http.method` is always POST for JSON-RPC
|
||||
- `net.peer.ip` is debug-level info available in logs
|
||||
- `xrpl.rpc.duration_ms` is redundant with span duration (OTel captures start/end time natively)
|
||||
|
||||
These can be added later if dashboard queries specifically need them. The node health attributes (Task 2.8) provide far more operational value and were prioritized instead.
|
||||
|
||||
---
|
||||
|
||||
## Task 2.6: Build Verification and Performance Baseline
|
||||
|
||||
**Objective**: Verify the build succeeds with and without telemetry, and establish a performance baseline.
|
||||
|
||||
**What to do**:
|
||||
|
||||
1. Build with `telemetry=ON` and verify no compilation errors
|
||||
2. Build with `telemetry=OFF` and verify no regressions
|
||||
3. Run existing unit tests to verify no breakage
|
||||
4. Document any build issues in lessons.md
|
||||
|
||||
**Verification Checklist**:
|
||||
|
||||
- [ ] `conan install . --build=missing -o telemetry=True` succeeds
|
||||
- [ ] `cmake --preset default -Dtelemetry=ON` configures correctly
|
||||
- [ ] Build succeeds with telemetry ON
|
||||
- [ ] Build succeeds with telemetry OFF
|
||||
- [ ] Existing tests pass with telemetry ON
|
||||
- [ ] Existing tests pass with telemetry OFF
|
||||
|
||||
---
|
||||
|
||||
## Task 2.8: RPC Span Attribute Enrichment — Node Health Context
|
||||
|
||||
> **Source**: [External Dashboard Parity](../docs/superpowers/specs/2026-03-30-external-dashboard-parity-design.md) — adds node-level health context inspired by the community [xrpl-validator-dashboard](https://github.com/realgrapedrop/xrpl-validator-dashboard).
|
||||
>
|
||||
> **Downstream**: Phase 7 (MetricsRegistry uses these attributes for alerting context), Phase 10 (validation checks for these attributes).
|
||||
|
||||
**Objective**: Add node-level health state to every `rpc.command.*` span so operators can correlate RPC behavior with node state in Tempo.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/rpc/detail/RPCHandler.cpp`:
|
||||
- In the `rpc.command.*` span creation block (after existing `setAttribute` calls for `xrpl.rpc.command`, `xrpl.rpc.version`, etc.):
|
||||
- Add `xrpl.node.amendment_blocked` (bool) — from `context.app.getOPs().isAmendmentBlocked()`
|
||||
- Add `xrpl.node.server_state` (string) — from `context.app.getOPs().strOperatingMode()`
|
||||
|
||||
**New span attributes**:
|
||||
|
||||
| Attribute | Type | Source | Example |
|
||||
| ----------------------------- | ------ | ------------------------------------------- | -------- |
|
||||
| `xrpl.node.amendment_blocked` | bool | `context.app.getOPs().isAmendmentBlocked()` | `true` |
|
||||
| `xrpl.node.server_state` | string | `context.app.getOPs().strOperatingMode()` | `"full"` |
|
||||
|
||||
**Rationale**: When a node is amendment-blocked or in a degraded state, every RPC response is suspect. Tagging spans with this state enables Tempo TraceQL queries like:
|
||||
|
||||
```
|
||||
{name=~"rpc.command.*"} | xrpl.node.amendment_blocked = true
|
||||
```
|
||||
|
||||
This surfaces all RPCs served during a blocked period — critical for post-incident analysis.
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/rpc/detail/RPCHandler.cpp`
|
||||
|
||||
**Exit Criteria**:
|
||||
|
||||
- [ ] `rpc.command.server_info` spans carry `xrpl.node.amendment_blocked` and `xrpl.node.server_state` attributes
|
||||
- [ ] No measurable latency impact (attribute values are cached atomics, not computed per-call)
|
||||
- [ ] Attributes appear in Tempo trace detail view
|
||||
|
||||
---
|
||||
|
||||
## Task 2.9: PathFind RPC Instrumentation
|
||||
|
||||
**Status**: COMPLETE
|
||||
|
||||
**Objective**: Trace the path_find and ripple_path_find RPC handlers to capture request latency and computation cost.
|
||||
|
||||
**Spans added**:
|
||||
|
||||
- `pathfind.request` — wraps `doPathFind()` and `doRipplePathFind()` RPC handlers
|
||||
- `pathfind.compute` — wraps `PathRequest::doUpdate()` (fast/normal attr)
|
||||
- `pathfind.update_all` — wraps `PathRequestManager::updateAll()` on ledger close (ledger_index attr)
|
||||
- `pathfind.discover` — wraps `Pathfinder::findPaths()` graph exploration (search_level attr)
|
||||
- `pathfind.rank` — wraps `Pathfinder::computePathRanks()` liquidity validation (num_paths attr)
|
||||
|
||||
**New file**: `src/xrpld/rpc/detail/PathFindSpanNames.h`
|
||||
|
||||
**Modified files**:
|
||||
|
||||
- `src/xrpld/rpc/handlers/orderbook/PathFind.cpp`
|
||||
- `src/xrpld/rpc/handlers/orderbook/RipplePathFind.cpp`
|
||||
- `src/xrpld/rpc/detail/PathRequest.cpp`
|
||||
- `src/xrpld/rpc/detail/PathRequestManager.cpp`
|
||||
- `src/xrpld/rpc/detail/Pathfinder.cpp`
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| Task | Description | Status | Notes |
|
||||
| ---- | ------------------------------------------- | ------------------- | ------------------------------------------------ |
|
||||
| 2.1 | W3C Trace Context header extraction | Deferred → Phase 3 | No consumer in Phase 2; needs cross-node tracing |
|
||||
| 2.2 | Per-category span creation | Complete (Phase 1c) | Superseded by TraceCategory enum + SpanGuard |
|
||||
| 2.3 | Add shouldTraceLedger() interface method | Complete (Phase 1c) | Delivered in Phase 1c base branch |
|
||||
| 2.4 | Unit tests for core telemetry | Complete | TelemetryConfig + SpanGuardFactory tests |
|
||||
| 2.5 | Enhanced RPC span attributes (HTTP-level) | Deferred | Low value; span duration covers timing natively |
|
||||
| 2.6 | Build verification and performance baseline | Complete | Verified in CI on Phase 1c |
|
||||
| 2.7 | Grafana Tempo search filters | Complete | rpc-command, rpc-status, rpc-role filters |
|
||||
| 2.8 | RPC span attribute enrichment (node health) | Complete | amendment_blocked + server_state |
|
||||
| 2.9 | PathFind RPC instrumentation (5 spans) | Complete | request, compute, update_all, discover, rank |
|
||||
|
||||
**Delivered in this branch**: Tasks 2.4, 2.7, 2.8, 2.9.
|
||||
**Deferred with rationale**: Tasks 2.1 (→Phase 3), 2.5 (low priority).
|
||||
**Superseded**: Task 2.2 (Phase 1c SpanGuard factory covers this).
|
||||
|
||||
---
|
||||
|
||||
## Known Issues / Future Work
|
||||
|
||||
### Thread safety of TelemetryImpl::stop() vs startSpan()
|
||||
|
||||
`TelemetryImpl::stop()` resets `sdkProvider_` (a `std::shared_ptr`) without
|
||||
synchronization. `getTracer()` reads the same member from RPC handler threads.
|
||||
This is a data race if any thread calls `startSpan()` concurrently with `stop()`.
|
||||
|
||||
**Current mitigation**: `Application::stop()` shuts down `serverHandler_`,
|
||||
`overlay_`, and `jobQueue_` before calling `telemetry_->stop()`, so no callers
|
||||
remain. See comments in `Telemetry.cpp:stop()` and `Application.cpp`.
|
||||
|
||||
**TODO**: Add an `std::atomic<bool> stopped_` flag checked in `getTracer()` to
|
||||
make this robust against future shutdown order changes.
|
||||
|
||||
### Macro incompatibility: XRPL_TRACE_SPAN vs XRPL_TRACE_SET_ATTR
|
||||
|
||||
`XRPL_TRACE_SPAN` and `XRPL_TRACE_SPAN_KIND` declare `_xrpl_guard_` as a bare
|
||||
`SpanGuard`, but `XRPL_TRACE_SET_ATTR` and `XRPL_TRACE_EXCEPTION` call
|
||||
`_xrpl_guard_.has_value()` which requires `std::optional<SpanGuard>`. Using
|
||||
`XRPL_TRACE_SPAN` followed by `XRPL_TRACE_SET_ATTR` in the same scope would
|
||||
fail to compile.
|
||||
|
||||
**Current mitigation**: No call site currently uses `XRPL_TRACE_SPAN` — all
|
||||
production code uses the conditional macros (`XRPL_TRACE_RPC`, `XRPL_TRACE_TX`,
|
||||
etc.) which correctly wrap the guard in `std::optional`.
|
||||
|
||||
**TODO**: Either make `XRPL_TRACE_SPAN`/`XRPL_TRACE_SPAN_KIND` also wrap in
|
||||
`std::optional`, or document that `XRPL_TRACE_SET_ATTR` is only compatible with
|
||||
the conditional macros.
|
||||
@@ -1,491 +0,0 @@
|
||||
# Phase 3: Transaction Tracing Task List
|
||||
|
||||
> **Goal**: Trace the full transaction lifecycle from RPC submission through peer relay, including cross-node context propagation via Protocol Buffer extensions. This is the WALK phase that demonstrates true distributed tracing.
|
||||
>
|
||||
> **Scope**: Protocol Buffer `TraceContext` message, context serialization, PeerImp transaction instrumentation, NetworkOPs processing instrumentation, HashRouter visibility, and multi-node relay context propagation.
|
||||
>
|
||||
> **Branch**: `pratik/otel-phase3-tx-tracing` (from `pratik/otel-phase2-rpc-tracing`)
|
||||
|
||||
### Related Plan Documents
|
||||
|
||||
| Document | Relevance |
|
||||
| ------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ |
|
||||
| [04-code-samples.md](./04-code-samples.md) | TraceContext protobuf (§4.4.1), PeerImp instrumentation (§4.5.1), context serialization (§4.4.2) |
|
||||
| [01-architecture-analysis.md](./01-architecture-analysis.md) | Transaction flow (§1.3), key trace points (§1.6) |
|
||||
| [06-implementation-phases.md](./06-implementation-phases.md) | Phase 3 tasks (§6.4), definition of done (§6.11.3) |
|
||||
| [02-design-decisions.md](./02-design-decisions.md) | Context propagation design (§2.5), attribute schema (§2.4.3) |
|
||||
|
||||
---
|
||||
|
||||
## Task 3.1: Define TraceContext Protocol Buffer Message
|
||||
|
||||
**Objective**: Add trace context fields to the P2P protocol messages so trace IDs can propagate across nodes.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `include/xrpl/proto/xrpl.proto` (or `src/ripple/proto/ripple.proto`, wherever the proto is):
|
||||
- Add `TraceContext` message definition:
|
||||
```protobuf
|
||||
message TraceContext {
|
||||
bytes trace_id = 1; // 16-byte trace identifier
|
||||
bytes span_id = 2; // 8-byte span identifier
|
||||
uint32 trace_flags = 3; // bit 0 = sampled
|
||||
string trace_state = 4; // W3C tracestate value
|
||||
}
|
||||
```
|
||||
- Add `optional TraceContext trace_context = 1001;` to:
|
||||
- `TMTransaction`
|
||||
- `TMProposeSet` (for Phase 4 use)
|
||||
- `TMValidation` (for Phase 4 use)
|
||||
- Use high field numbers (1001+) to avoid conflicts with existing fields
|
||||
|
||||
- Regenerate protobuf C++ code
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `include/xrpl/proto/xrpl.proto` (or equivalent)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.4.1](./04-code-samples.md) — TraceContext message definition
|
||||
- [02-design-decisions.md §2.5.2](./02-design-decisions.md) — Protocol buffer context propagation design
|
||||
|
||||
---
|
||||
|
||||
## Task 3.2: Implement Protobuf Context Serialization
|
||||
|
||||
**Objective**: Create utilities to serialize/deserialize OTel trace context to/from protobuf `TraceContext` messages.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `include/xrpl/telemetry/TraceContextPropagator.h` (extend from Phase 2 if exists, or add protobuf methods):
|
||||
- Add protobuf-specific methods:
|
||||
- `static Context extractFromProtobuf(protocol::TraceContext const& proto)` — reconstruct OTel context from protobuf fields
|
||||
- `static void injectToProtobuf(Context const& ctx, protocol::TraceContext& proto)` — serialize current span context into protobuf fields
|
||||
- Both methods guard behind `#ifdef XRPL_ENABLE_TELEMETRY`
|
||||
|
||||
- Create/extend `src/libxrpl/telemetry/TraceContextPropagator.cpp`:
|
||||
- Implement extraction: read trace_id (16 bytes), span_id (8 bytes), trace_flags from protobuf, construct `SpanContext`, wrap in `Context`
|
||||
- Implement injection: get current span from context, serialize its TraceId, SpanId, and TraceFlags into protobuf fields
|
||||
|
||||
**Key new/modified files**:
|
||||
|
||||
- `include/xrpl/telemetry/TraceContextPropagator.h`
|
||||
- `src/libxrpl/telemetry/TraceContextPropagator.cpp`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.4.2](./04-code-samples.md) — Full extract/inject implementation
|
||||
|
||||
---
|
||||
|
||||
## Task 3.3: Instrument PeerImp Transaction Handling
|
||||
|
||||
**Objective**: Add trace spans to the peer-level transaction receive and relay path.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/overlay/detail/PeerImp.cpp`:
|
||||
- In `onMessage(TMTransaction)` / `handleTransaction()`:
|
||||
- Extract parent trace context from incoming `TMTransaction::trace_context` field (if present)
|
||||
- Create `tx.receive` span as child of extracted context (or new root if none)
|
||||
- Set attributes: `xrpl.tx.hash`, `xrpl.peer.id`, `xrpl.tx.status`
|
||||
- On HashRouter suppression (duplicate): set `xrpl.tx.suppressed=true`, add `tx.duplicate` event
|
||||
- Wrap validation call with child span `tx.validate`
|
||||
- Wrap relay with `tx.relay` span
|
||||
- When relaying to peers:
|
||||
- Inject current trace context into outgoing `TMTransaction::trace_context`
|
||||
- Set `xrpl.tx.relay_count` attribute
|
||||
|
||||
- Use `SpanGuard::span(TraceCategory::Transactions, "tx", "receive")` factory
|
||||
(Phase 1c replaced macros with the SpanGuard factory pattern)
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/overlay/detail/PeerImp.cpp`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.5.1](./04-code-samples.md) — Full PeerImp instrumentation example
|
||||
- [01-architecture-analysis.md §1.3](./01-architecture-analysis.md) — Transaction flow diagram
|
||||
- [01-architecture-analysis.md §1.6](./01-architecture-analysis.md) — tx.receive trace point
|
||||
|
||||
---
|
||||
|
||||
## Task 3.4: Instrument NetworkOPs Transaction Processing
|
||||
|
||||
**Objective**: Trace the transaction processing pipeline in NetworkOPs, covering both sync and async paths.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/app/misc/NetworkOPs.cpp`:
|
||||
- In `processTransaction()`:
|
||||
- Create `tx.process` span
|
||||
- Set attributes: `xrpl.tx.hash`, `xrpl.tx.type`, `xrpl.tx.local` (whether from RPC or peer)
|
||||
- Record whether sync or async path is taken
|
||||
|
||||
- In `doTransactionAsync()`:
|
||||
- Capture parent context before queuing
|
||||
- Create `tx.queue` span with queue depth attribute
|
||||
- Add event when transaction is dequeued for processing
|
||||
|
||||
- In `doTransactionSync()`:
|
||||
- Create `tx.process_sync` span
|
||||
- Record result (applied, queued, rejected)
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/app/misc/NetworkOPs.cpp`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [01-architecture-analysis.md §1.6](./01-architecture-analysis.md) — tx.validate and tx.process trace points
|
||||
- [02-design-decisions.md §2.4.3](./02-design-decisions.md) — Transaction attribute schema
|
||||
|
||||
---
|
||||
|
||||
## Task 3.5: Instrument HashRouter for Dedup Visibility
|
||||
|
||||
**Objective**: Make transaction deduplication visible in traces by recording HashRouter decisions as span attributes/events.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/overlay/detail/PeerImp.cpp` (in handleTransaction):
|
||||
- After calling `HashRouter::shouldProcess()` or `addSuppressionPeer()`:
|
||||
- Record `xrpl.tx.suppressed` attribute (true/false)
|
||||
- Record `xrpl.tx.flags` showing current HashRouter state (SAVED, TRUSTED, etc.)
|
||||
- Add `tx.first_seen` or `tx.duplicate` event
|
||||
|
||||
- This is NOT a modification to HashRouter itself — just recording its decisions as span attributes in the existing PeerImp instrumentation from Task 3.3.
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/overlay/detail/PeerImp.cpp` (same changes as 3.3, logically grouped)
|
||||
|
||||
---
|
||||
|
||||
## Task 3.6: Context Propagation in Transaction Relay
|
||||
|
||||
**Objective**: Ensure trace context flows correctly when transactions are relayed between peers, creating linked spans across nodes.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Verify the relay path injects trace context:
|
||||
- When `PeerImp` relays a transaction, the `TMTransaction` message should carry `trace_context`
|
||||
- When a remote peer receives it, the context is extracted and used as parent
|
||||
|
||||
- Test context propagation:
|
||||
- Manually verify with 2+ node setup that trace IDs match across nodes
|
||||
- Confirm parent-child span relationships are correct in Tempo
|
||||
|
||||
- Handle edge cases:
|
||||
- Missing trace context (older peers): create new root span
|
||||
- Corrupted trace context: log warning, create new root span
|
||||
- Sampled-out traces: respect trace flags
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/overlay/detail/PeerImp.cpp`
|
||||
- `src/xrpld/overlay/detail/OverlayImpl.cpp` (if relay method needs context param)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [02-design-decisions.md §2.5](./02-design-decisions.md) — Context propagation design
|
||||
- [04-code-samples.md §4.5.1](./04-code-samples.md) — Relay context injection pattern
|
||||
|
||||
---
|
||||
|
||||
## Task 3.7: Build Verification and Testing
|
||||
|
||||
**Objective**: Verify all Phase 3 changes compile and work correctly.
|
||||
|
||||
**What to do**:
|
||||
|
||||
1. Build with `telemetry=ON` — verify no compilation errors
|
||||
2. Build with `telemetry=OFF` — verify no regressions
|
||||
3. Run existing unit tests
|
||||
4. Verify protobuf regeneration produces correct C++ code
|
||||
5. Document any issues encountered
|
||||
|
||||
**Verification Checklist**:
|
||||
|
||||
- [ ] Protobuf changes generate valid C++
|
||||
- [ ] Build succeeds with telemetry ON
|
||||
- [ ] Build succeeds with telemetry OFF
|
||||
- [ ] Existing tests pass
|
||||
- [ ] No undefined symbols from new telemetry calls
|
||||
|
||||
---
|
||||
|
||||
## Task 3.8: Transaction Span Peer Version Attribute
|
||||
|
||||
> **Source**: [External Dashboard Parity](../docs/superpowers/specs/2026-03-30-external-dashboard-parity-design.md) — adds peer version context inspired by the community [xrpl-validator-dashboard](https://github.com/realgrapedrop/xrpl-validator-dashboard).
|
||||
>
|
||||
> **Upstream**: Phase 2 (RPC span infrastructure must exist).
|
||||
> **Downstream**: Phase 10 (validation checks for this attribute).
|
||||
|
||||
**Objective**: Add the relaying peer's rippled version to `tx.receive` spans so operators can correlate transaction issues with peer version mismatches during network upgrades.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/overlay/detail/PeerImp.cpp`:
|
||||
- In the `tx.receive` span block (after existing `xrpl.peer.id` setAttribute call):
|
||||
- Add `xrpl.peer.version` (string) — from `this->getVersion()`
|
||||
- Only set if `getVersion()` returns a non-empty string (avoid empty-string attributes)
|
||||
|
||||
**New span attribute**:
|
||||
|
||||
| Attribute | Type | Source | Example |
|
||||
| ------------------- | ------ | -------------------- | ----------------- |
|
||||
| `xrpl.peer.version` | string | `peer->getVersion()` | `"rippled-2.4.0"` |
|
||||
|
||||
**Rationale**: Transaction relay is where version mismatches cause subtle serialization or validation bugs. Tracing "this tx came from a v2.3.0 peer" helps diagnose compatibility issues. The community dashboard tracks peer versions externally; this brings version awareness into the trace itself.
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/overlay/detail/PeerImp.cpp`
|
||||
|
||||
**Exit Criteria**:
|
||||
|
||||
- [ ] `tx.receive` spans carry `xrpl.peer.version` attribute with a non-empty version string
|
||||
- [ ] Attribute is omitted (not set to empty string) when `getVersion()` returns empty
|
||||
- [ ] Attribute visible in Jaeger span detail view
|
||||
|
||||
---
|
||||
|
||||
## Task 3.9: Deterministic Transaction Trace ID
|
||||
|
||||
> **Upstream**: Task 3.2 (protobuf serialization), Task 3.3 (PeerImp span exists).
|
||||
> **Downstream**: Phase 10 (workload validation can query by tx hash directly).
|
||||
> **Pattern**: Mirrors the consensus deterministic trace ID in Phase 4a
|
||||
> (`createDeterministicContext` in `RCLConsensus.cpp`), adapted for transactions.
|
||||
|
||||
**Objective**: Derive the trace_id for transaction spans deterministically from the
|
||||
transaction hash so that all nodes handling the same transaction independently produce
|
||||
spans under the same trace_id — regardless of whether protobuf context propagation
|
||||
succeeds.
|
||||
|
||||
**Why**: The current approach creates spans with random trace_ids and relies entirely
|
||||
on protobuf `TraceContext` propagation to link them. If any hop in the relay chain
|
||||
drops the context (older peers, message corruption, mixed-version networks), the trace
|
||||
splits and downstream spans become impossible to find. With deterministic trace_ids,
|
||||
correlation is guaranteed because every node derives the same trace_id from the same
|
||||
`txID`.
|
||||
|
||||
**Approach — deterministic trace_id + protobuf span_id propagation**:
|
||||
|
||||
1. Derive `trace_id = txHash[0:16]` (first 16 bytes of the 32-byte transaction hash).
|
||||
2. Generate a random 8-byte `span_id` per node (each node's span is unique within
|
||||
the shared trace).
|
||||
3. Create the span under this deterministic context as parent.
|
||||
4. **Additionally**, if protobuf `TraceContext` is present in the incoming
|
||||
`TMTransaction` message, extract the sender's `span_id` and use it as the span's
|
||||
parent — this preserves parent-child ordering in the trace tree.
|
||||
5. If protobuf context is absent (older peer, first hop), the span still has the
|
||||
correct deterministic `trace_id` — it appears as a sibling root in the same trace
|
||||
rather than being lost.
|
||||
|
||||
This gives the best of both worlds: guaranteed cross-node correlation via deterministic
|
||||
`trace_id`, plus parent-child relay ordering via protobuf `span_id` when available.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `createDeterministicTxContext(uint256 const& txHash)` utility function:
|
||||
- Location: shared header or file-local in `PeerImp.cpp` and `NetworkOPs.cpp`
|
||||
(or a shared telemetry utility if both need it).
|
||||
- Pattern: identical to `createDeterministicContext(uint256 const& ledgerId)` in
|
||||
`RCLConsensus.cpp` — take `txHash[0:16]` as trace_id, random span_id via
|
||||
`default_prng()`, sampled flag set, `remote=false`.
|
||||
- Guard behind `#ifdef XRPL_ENABLE_TELEMETRY`.
|
||||
|
||||
```cpp
|
||||
opentelemetry::context::Context
|
||||
createDeterministicTxContext(uint256 const& txHash)
|
||||
{
|
||||
namespace trace = opentelemetry::trace;
|
||||
|
||||
// First 16 bytes of the 32-byte tx hash as trace ID.
|
||||
trace::TraceId traceId(
|
||||
opentelemetry::nostd::span<uint8_t const, 16>(txHash.data(), 16));
|
||||
|
||||
// Random span_id so each node's span is unique within the trace.
|
||||
uint8_t spanIdBytes[8];
|
||||
auto const rval = default_prng()();
|
||||
std::memcpy(spanIdBytes, &rval, sizeof(spanIdBytes));
|
||||
trace::SpanId spanId(
|
||||
opentelemetry::nostd::span<uint8_t const, 8>(spanIdBytes, 8));
|
||||
|
||||
trace::SpanContext syntheticCtx(
|
||||
traceId, spanId, trace::TraceFlags(1), /* remote = */ false);
|
||||
|
||||
return opentelemetry::context::Context{}.SetValue(
|
||||
trace::kSpanKey,
|
||||
opentelemetry::nostd::shared_ptr<trace::Span>(
|
||||
new trace::DefaultSpan(syntheticCtx)));
|
||||
}
|
||||
```
|
||||
|
||||
- Edit `src/xrpld/overlay/detail/PeerImp.cpp` — restructure `handleTransaction()`:
|
||||
- **Move span creation after deserialization** (txID must be known first):
|
||||
1. Deserialize `STTx` and get `txID` (existing code at line ~1382).
|
||||
2. Create deterministic parent context: `auto detCtx = createDeterministicTxContext(txID)`.
|
||||
3. If `m->has_trace_context()`: extract protobuf context via `extractFromProtobuf()`,
|
||||
**combine** with deterministic trace_id — use the protobuf span_id as parent
|
||||
to preserve relay ordering, but override trace_id with the deterministic one.
|
||||
4. If no protobuf context: create span under `detCtx` directly.
|
||||
5. Set all existing attributes (`hash`, `peerId`, `peerVersion`, `suppressed`, etc.).
|
||||
|
||||
- **Combining deterministic trace_id with protobuf parent span_id**:
|
||||
When both are available, construct a synthetic `SpanContext` with:
|
||||
- `trace_id` = `txHash[0:16]` (deterministic)
|
||||
- `span_id` = extracted from protobuf (sender's span_id → becomes parent)
|
||||
- `trace_flags` = from protobuf
|
||||
- `remote` = true (came from another node)
|
||||
|
||||
```cpp
|
||||
// Pseudo-code for the combined context:
|
||||
auto detTraceId = trace::TraceId(txHash.data(), 16);
|
||||
auto remoteSpanId = /* from extractFromProtobuf */;
|
||||
auto remoteFlags = /* from extractFromProtobuf */;
|
||||
|
||||
trace::SpanContext combinedCtx(
|
||||
detTraceId, remoteSpanId, remoteFlags, /* remote = */ true);
|
||||
// Use as parent context for the new span.
|
||||
```
|
||||
|
||||
- Edit `src/xrpld/app/misc/NetworkOPs.cpp` — update `processTransaction()`:
|
||||
- `transaction->getID()` is already available at the top of the function.
|
||||
- Create deterministic parent context from `txID`.
|
||||
- Create `tx.process` span under this context.
|
||||
- No protobuf context to extract here (NetworkOPs is intra-node), so
|
||||
deterministic context alone is sufficient.
|
||||
|
||||
- Add `tx_trace_strategy` attribute to spans:
|
||||
- Add `inline constexpr auto traceStrategy = join(xrplTx, makeStr("trace_strategy"));`
|
||||
to `TxSpanNames.h`.
|
||||
- Set on each tx span: `span.setAttribute(tx_span::attr::traceStrategy, "deterministic")`.
|
||||
|
||||
**Key new/modified files**:
|
||||
|
||||
- `src/xrpld/overlay/detail/PeerImp.cpp` — restructured span creation
|
||||
- `src/xrpld/app/misc/NetworkOPs.cpp` — deterministic context for tx.process
|
||||
- `src/xrpld/app/misc/TxSpanNames.h` — new `traceStrategy` attribute constant
|
||||
- New or shared utility for `createDeterministicTxContext()` (location TBD: could be
|
||||
a shared header like `include/xrpl/telemetry/DeterministicContext.h`, or file-local
|
||||
if only used in two places)
|
||||
|
||||
**Interaction with existing tasks**:
|
||||
|
||||
- **Task 3.3 (PeerImp instrumentation)**: The span creation in `handleTransaction()`
|
||||
must be restructured — the span currently starts before `txID` is known. This task
|
||||
moves it after deserialization.
|
||||
- **Task 3.6 (Relay context propagation)**: Protobuf injection at the relay site
|
||||
remains the same — `injectToProtobuf()` serializes the current span's `span_id`.
|
||||
The receiver extracts it and combines with the deterministic `trace_id`.
|
||||
- **Phase 4a (Consensus deterministic trace ID)**: This task follows the same pattern.
|
||||
Consider extracting a shared utility (e.g., `createDeterministicContext(uint256)`)
|
||||
that both consensus and transaction tracing use.
|
||||
|
||||
**Exit Criteria**:
|
||||
|
||||
- [ ] `tx.receive` and `tx.process` spans have deterministic trace_id = `txHash[0:16]`
|
||||
- [ ] All nodes handling the same transaction produce spans under the same trace_id
|
||||
- [ ] Protobuf `span_id` propagation still works when available (parent-child ordering)
|
||||
- [ ] Missing protobuf context (old peer) degrades gracefully to sibling spans, not lost traces
|
||||
- [ ] `xrpl.tx.trace_strategy` attribute set to `"deterministic"` on all tx spans
|
||||
- [ ] Trace queryable by tx hash (truncate hash → trace_id → direct lookup in Tempo)
|
||||
|
||||
**Deliverables implemented (not in original plan)**:
|
||||
|
||||
- **`SpanGuard::txSpan()` factory method** (`include/xrpl/telemetry/SpanGuard.h`):
|
||||
Two overloads for creating transaction spans with deterministic trace IDs:
|
||||
- `txSpan(category, group, name, txHash)` — standalone span (deterministic
|
||||
trace_id from `txHash[0:16]`, no parent span_id).
|
||||
- `txSpan(category, group, name, txHash, parentCtx)` — child span (deterministic
|
||||
trace_id combined with protobuf-extracted parent span_id for relay ordering).
|
||||
|
||||
- **`TxTracing.h` helper functions** (`src/xrpld/overlay/detail/TxTracing.h`):
|
||||
File-local helpers that wrap `SpanGuard::txSpan()` for the two main PeerImp call
|
||||
sites:
|
||||
- `txReceiveSpan(txHash, parentCtx)` — creates `tx.receive` span with
|
||||
deterministic trace_id and optional protobuf parent context.
|
||||
- `txProcessSpan(txHash)` — creates `tx.process` span with deterministic
|
||||
trace_id only (no protobuf parent, used intra-node).
|
||||
- **Note**: `TxTracing.h` includes `xrpl.pb.h` unconditionally (outside
|
||||
`#ifdef XRPL_ENABLE_TELEMETRY`) because `protocol::TMTransaction` appears in
|
||||
the function signatures regardless of telemetry build mode.
|
||||
|
||||
---
|
||||
|
||||
## Task 3.10: TxQ Instrumentation
|
||||
|
||||
**Status**: COMPLETE
|
||||
|
||||
**Objective**: Trace the transaction queue lifecycle — enqueue decisions, direct apply, batch clear, ledger-close accept loop, per-tx apply, and cleanup.
|
||||
|
||||
**Spans added**:
|
||||
|
||||
- `txq.enqueue` — wraps `TxQ::apply()` with tx_hash attribute
|
||||
- `txq.apply_direct` — wraps `TxQ::tryDirectApply()` fast-path
|
||||
- `txq.batch_clear` — wraps `TxQ::tryClearAccountQueueUpThruTx()`
|
||||
- `txq.accept` — wraps `TxQ::accept()` ledger-close dequeue with queue_size attr
|
||||
- `txq.accept_tx` — per-tx span inside accept loop with tx_hash, ter_code,
|
||||
retries_remaining attributes
|
||||
- `txq.cleanup` — wraps `TxQ::processClosedLedger()` with ledger_seq attribute
|
||||
|
||||
**New file**: `src/xrpld/app/misc/detail/TxQSpanNames.h`
|
||||
|
||||
**Modified file**: `src/xrpld/app/misc/detail/TxQ.cpp`
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| Task | Description | New Files | Modified Files | Depends On |
|
||||
| ---- | ----------------------------------- | --------- | -------------- | ---------- |
|
||||
| 3.1 | TraceContext protobuf message | 0 | 1 | Phase 2 |
|
||||
| 3.2 | Protobuf context serialization | 1-2 | 0 | 3.1 |
|
||||
| 3.3 | PeerImp transaction instrumentation | 0 | 1 | 3.2 |
|
||||
| 3.4 | NetworkOPs transaction processing | 0 | 1 | Phase 2 |
|
||||
| 3.5 | HashRouter dedup visibility | 0 | 1 | 3.3 |
|
||||
| 3.6 | Relay context propagation | 0 | 1-2 | 3.3, 3.5 |
|
||||
| 3.7 | Build verification and testing | 0 | 0 | 3.1-3.6 |
|
||||
| 3.8 | TX span peer version attribute | 0 | 1 | 3.3 |
|
||||
| 3.9 | Deterministic transaction trace ID | 0-1 | 3 | 3.2, 3.3 |
|
||||
| 3.10 | TxQ instrumentation (6 spans) | 1 | 1 | 3.4 |
|
||||
|
||||
**Parallel work**: Tasks 3.1 and 3.4 can start in parallel. Task 3.2 depends on 3.1. Tasks 3.3 and 3.5 depend on 3.2. Task 3.6 depends on 3.3 and 3.5. Task 3.8 depends on 3.3 (span must exist). Task 3.9 depends on 3.2 and 3.3. Task 3.10 depends on 3.4 (tx.process span must exist).
|
||||
|
||||
**Exit Criteria** (from [06-implementation-phases.md §6.11.3](./06-implementation-phases.md)):
|
||||
|
||||
- [ ] Transaction traces span across nodes
|
||||
- [ ] Trace context in Protocol Buffer messages
|
||||
- [ ] HashRouter deduplication visible in traces
|
||||
- [ ] <5% overhead on transaction throughput
|
||||
- [ ] Deterministic trace_id: same trace_id for same tx across all nodes
|
||||
- [ ] Protobuf span_id propagation preserves parent-child ordering when available
|
||||
|
||||
---
|
||||
|
||||
## Known Issues / Future Work
|
||||
|
||||
### Propagation utilities not yet wired into P2P flow
|
||||
|
||||
`extractFromProtobuf()` and `injectToProtobuf()` in `TraceContextPropagator.h`
|
||||
are implemented and tested but not called from production code. To enable
|
||||
cross-node distributed traces:
|
||||
|
||||
- Call `injectToProtobuf()` in `PeerImp` when sending `TMTransaction` /
|
||||
`TMProposeSet` messages
|
||||
- Call `extractFromProtobuf()` in the corresponding message handlers to
|
||||
reconstruct the parent span context, then pass it to `startSpan()` as the
|
||||
parent
|
||||
|
||||
This was deferred to validate single-node tracing performance first.
|
||||
|
||||
### Unused trace_state proto field
|
||||
|
||||
The `TraceContext.trace_state` field (field 4) in `xrpl.proto` is reserved for
|
||||
W3C `tracestate` vendor-specific key-value pairs but is not read or written by
|
||||
`TraceContextPropagator`. Wire it when cross-vendor trace propagation is needed.
|
||||
No wire cost since proto `optional` fields are zero-cost when absent.
|
||||
@@ -1,929 +0,0 @@
|
||||
# Phase 4: Consensus Tracing Task List
|
||||
|
||||
> **Goal**: Full observability into consensus rounds — track round lifecycle, phase transitions, proposal handling, and validation. This is the RUN phase that completes the distributed tracing story.
|
||||
>
|
||||
> **Scope**: RCLConsensus instrumentation for round starts, phase transitions (open/establish/accept), proposal send/receive, validation handling, and correlation with transaction traces from Phase 3.
|
||||
>
|
||||
> **Branch**: `pratik/otel-phase4-consensus-tracing` (from `pratik/otel-phase3-tx-tracing`)
|
||||
|
||||
### Related Plan Documents
|
||||
|
||||
| Document | Relevance |
|
||||
| ------------------------------------------------------------ | ----------------------------------------------------------- |
|
||||
| [04-code-samples.md](./04-code-samples.md) | Consensus instrumentation (§4.5.2), consensus span patterns |
|
||||
| [01-architecture-analysis.md](./01-architecture-analysis.md) | Consensus round flow (§1.4), key trace points (§1.6) |
|
||||
| [06-implementation-phases.md](./06-implementation-phases.md) | Phase 4 tasks (§6.5), definition of done (§6.11.4) |
|
||||
| [02-design-decisions.md](./02-design-decisions.md) | Consensus attribute schema (§2.4.4) |
|
||||
|
||||
---
|
||||
|
||||
## Task 4.1: Instrument Consensus Round Start
|
||||
|
||||
**Objective**: Create a root span for each consensus round that captures the round's key parameters.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/app/consensus/RCLConsensus.cpp`:
|
||||
- In `RCLConsensus::startRound()` (or the Adaptor's startRound):
|
||||
- Create `consensus.round` span using `XRPL_TRACE_CONSENSUS` macro
|
||||
- Set attributes:
|
||||
- `xrpl.consensus.ledger.prev` — previous ledger hash
|
||||
- `xrpl.consensus.ledger.seq` — target ledger sequence
|
||||
- `xrpl.consensus.proposers` — number of trusted proposers
|
||||
- `xrpl.consensus.mode` — "proposing" or "observing"
|
||||
- Store the span context for use by child spans in phase transitions
|
||||
|
||||
- Add a member to hold current round trace context:
|
||||
- `opentelemetry::context::Context currentRoundContext_` (guarded by `#ifdef`)
|
||||
- Updated at round start, used by phase transition spans
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/app/consensus/RCLConsensus.cpp`
|
||||
- `src/xrpld/app/consensus/RCLConsensus.h` (add context member)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.5.2](./04-code-samples.md) — startRound instrumentation example
|
||||
- [01-architecture-analysis.md §1.4](./01-architecture-analysis.md) — Consensus round flow
|
||||
|
||||
---
|
||||
|
||||
## Task 4.2: Instrument Phase Transitions
|
||||
|
||||
**Objective**: Create child spans for each consensus phase (open, establish, accept) to show timing breakdown.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/app/consensus/RCLConsensus.cpp`:
|
||||
- Identify where phase transitions occur (the `Consensus<Adaptor>` template drives this)
|
||||
- For each phase entry:
|
||||
- Create span as child of `currentRoundContext_`: `consensus.phase.open`, `consensus.phase.establish`, `consensus.phase.accept`
|
||||
- Set `xrpl.consensus.phase` attribute
|
||||
- Add `phase.enter` event at start, `phase.exit` event at end
|
||||
- Record phase duration in milliseconds
|
||||
|
||||
- In the `onClose` adaptor method:
|
||||
- Create `consensus.ledger_close` span
|
||||
- Set attributes: close_time, mode, transaction count in initial position
|
||||
|
||||
- Note: The Consensus template class in `src/xrpld/consensus/Consensus.h` drives phase transitions — Phase 4a instruments directly in the template
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/app/consensus/RCLConsensus.cpp`
|
||||
- Possibly `include/xrpl/consensus/Consensus.h` (for template-level phase tracking)
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.5.2](./04-code-samples.md) — phaseTransition instrumentation
|
||||
|
||||
---
|
||||
|
||||
## Task 4.3: Instrument Proposal Handling
|
||||
|
||||
**Objective**: Trace proposal send and receive to show validator coordination.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/app/consensus/RCLConsensus.cpp`:
|
||||
- In `Adaptor::propose()`:
|
||||
- Create `consensus.proposal.send` span
|
||||
- Set attributes: `xrpl.consensus.round` (proposal sequence), proposal hash
|
||||
- Inject trace context into outgoing `TMProposeSet::trace_context` (from Phase 3 protobuf)
|
||||
|
||||
- In `Adaptor::peerProposal()` (or wherever peer proposals are received):
|
||||
- Extract trace context from incoming `TMProposeSet::trace_context`
|
||||
- Create `consensus.proposal.receive` span as child of extracted context
|
||||
- Set attributes: `xrpl.consensus.proposer` (node ID), `xrpl.consensus.round`
|
||||
|
||||
- In `Adaptor::share(RCLCxPeerPos)`:
|
||||
- Create `consensus.proposal.relay` span for relaying peer proposals
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/app/consensus/RCLConsensus.cpp`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [04-code-samples.md §4.5.2](./04-code-samples.md) — peerProposal instrumentation
|
||||
- [02-design-decisions.md §2.4.4](./02-design-decisions.md) — Consensus attribute schema
|
||||
|
||||
---
|
||||
|
||||
## Task 4.4: Instrument Validation Handling
|
||||
|
||||
**Objective**: Trace validation send and receive to show ledger validation flow.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/app/consensus/RCLConsensus.cpp` (or the validation handler):
|
||||
- When sending our validation:
|
||||
- Create `consensus.validation.send` span
|
||||
- Set attributes: validated ledger hash, sequence, signing time
|
||||
|
||||
- When receiving a peer validation:
|
||||
- Extract trace context from `TMValidation::trace_context` (if present)
|
||||
- Create `consensus.validation.receive` span
|
||||
- Set attributes: `xrpl.consensus.validator` (node ID), ledger hash
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/app/consensus/RCLConsensus.cpp`
|
||||
- `src/xrpld/app/misc/NetworkOPs.cpp` (if validation handling is here)
|
||||
|
||||
---
|
||||
|
||||
## Task 4.5: Add Consensus-Specific Attributes
|
||||
|
||||
**Objective**: Enrich consensus spans with detailed attributes for debugging and analysis.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Review all consensus spans and ensure they include:
|
||||
- `xrpl.consensus.ledger.seq` — target ledger sequence number
|
||||
- `xrpl.consensus.round` — consensus round number
|
||||
- `xrpl.consensus.mode` — proposing/observing/wrongLedger
|
||||
- `xrpl.consensus.phase` — current phase name
|
||||
- `xrpl.consensus.phase_duration_ms` — time spent in phase
|
||||
- `xrpl.consensus.proposers` — number of trusted proposers
|
||||
- `xrpl.consensus.tx_count` — transactions in proposed set
|
||||
- `xrpl.consensus.disputes` — number of disputed transactions
|
||||
- `xrpl.consensus.converge_percent` — convergence percentage
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/app/consensus/RCLConsensus.cpp`
|
||||
|
||||
---
|
||||
|
||||
## Task 4.6: Correlate Transaction and Consensus Traces
|
||||
|
||||
**Objective**: Link transaction traces from Phase 3 with consensus traces so you can follow a transaction from submission through consensus into the ledger.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- In `onClose()` or `onAccept()`:
|
||||
- When building the consensus position, link the round span to individual transaction spans using span links (if OTel SDK supports it) or events
|
||||
- At minimum, record the transaction hashes included in the consensus set as span events: `tx.included` with `xrpl.tx.hash` attribute
|
||||
|
||||
- In `processTransactionSet()` (NetworkOPs):
|
||||
- If the consensus round span context is available, create child spans for each transaction applied to the ledger
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/app/consensus/RCLConsensus.cpp`
|
||||
- `src/xrpld/app/misc/NetworkOPs.cpp`
|
||||
|
||||
---
|
||||
|
||||
## Task 4.7: Build Verification and Testing
|
||||
|
||||
**Objective**: Verify all Phase 4 changes compile and don't affect consensus timing.
|
||||
|
||||
**What to do**:
|
||||
|
||||
1. Build with `telemetry=ON` — verify no compilation errors
|
||||
2. Build with `telemetry=OFF` — verify no regressions (critical for consensus code)
|
||||
3. Run existing consensus-related unit tests
|
||||
4. Verify that all macros expand to no-ops when disabled
|
||||
5. Check that no consensus-critical code paths are affected by instrumentation overhead
|
||||
|
||||
**Verification Checklist**:
|
||||
|
||||
- [ ] Build succeeds with telemetry ON
|
||||
- [ ] Build succeeds with telemetry OFF
|
||||
- [ ] Existing consensus tests pass
|
||||
- [ ] No new includes in consensus headers when telemetry is OFF
|
||||
- [ ] Phase timing instrumentation doesn't use blocking operations
|
||||
|
||||
---
|
||||
|
||||
## Task 4.8: Consensus Validation Span Enrichment — External Dashboard Parity
|
||||
|
||||
> **Source**: [External Dashboard Parity](../docs/superpowers/specs/2026-03-30-external-dashboard-parity-design.md) — adds validation agreement context inspired by the community [xrpl-validator-dashboard](https://github.com/realgrapedrop/xrpl-validator-dashboard).
|
||||
>
|
||||
> **Upstream**: Phase 4 tasks 4.1-4.4 (span creation must exist).
|
||||
> **Downstream**: Phase 7 (ValidationTracker reads these attributes), Phase 10 (validation checks).
|
||||
|
||||
**Objective**: Add ledger hash, validation type, and quorum data to consensus validation spans on both send and receive paths. This enables trace-level validation agreement analysis — filter by ledger hash to see which validators agreed for a given ledger.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `src/xrpld/app/consensus/RCLConsensus.cpp`:
|
||||
- On the `consensus.validation.send` span (in `validate()` / `doAccept()`):
|
||||
- Add `xrpl.validation.ledger_hash` (string) — the ledger hash being validated
|
||||
- Add `xrpl.validation.full` (bool) — whether this is a full validation (not partial)
|
||||
- On the `consensus.accept` span (in `onAccept()`):
|
||||
- Add `xrpl.consensus.validation_quorum` (int64) — from `app_.validators().quorum()`
|
||||
- Add `xrpl.consensus.proposers_validated` (int64) — from `result.proposers`
|
||||
|
||||
- Edit `src/xrpld/overlay/detail/PeerImp.cpp`:
|
||||
- On the `peer.validation.receive` span:
|
||||
- Add `xrpl.peer.validation.ledger_hash` (string) — from deserialized `STValidation` object
|
||||
- Add `xrpl.peer.validation.full` (bool) — from `STValidation` flags
|
||||
|
||||
**New span attributes**:
|
||||
|
||||
| Span | Attribute | Type | Source |
|
||||
| --------------------------- | ------------------------------------ | ------ | --------------------------------- |
|
||||
| `consensus.validation.send` | `xrpl.validation.ledger_hash` | string | Ledger hash from validate() args |
|
||||
| `consensus.validation.send` | `xrpl.validation.full` | bool | Full vs partial validation |
|
||||
| `peer.validation.receive` | `xrpl.peer.validation.ledger_hash` | string | From STValidation deserialization |
|
||||
| `peer.validation.receive` | `xrpl.peer.validation.full` | bool | From STValidation flags |
|
||||
| `consensus.accept` | `xrpl.consensus.validation_quorum` | int64 | `app_.validators().quorum()` |
|
||||
| `consensus.accept` | `xrpl.consensus.proposers_validated` | int64 | `result.proposers` |
|
||||
|
||||
**Rationale**: The external dashboard's most valuable feature is validation agreement tracking. By recording the ledger hash on both outgoing and incoming validation spans, we create the raw data for agreement analysis at the trace level. Example Tempo query:
|
||||
|
||||
```
|
||||
{name="consensus.validation.send"} | xrpl.validation.ledger_hash = "A1B2C3..."
|
||||
```
|
||||
|
||||
Phase 7's `ValidationTracker` builds metric-level aggregation (1h/24h agreement %) on top of this data.
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/app/consensus/RCLConsensus.cpp`
|
||||
- `src/xrpld/overlay/detail/PeerImp.cpp`
|
||||
|
||||
**Exit Criteria**:
|
||||
|
||||
- [ ] `consensus.validation.send` spans carry `xrpl.validation.ledger_hash` and `xrpl.validation.full`
|
||||
- [ ] `peer.validation.receive` spans carry `xrpl.peer.validation.ledger_hash` and `xrpl.peer.validation.full`
|
||||
- [ ] `consensus.accept` spans carry `xrpl.consensus.validation_quorum` and `xrpl.consensus.proposers_validated`
|
||||
- [ ] Ledger hash attributes match between send and receive for the same ledger
|
||||
- [ ] No impact on consensus performance
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| Task | Description | New Files | Modified Files | Depends On |
|
||||
| ---- | ------------------------------------------- | --------- | -------------- | ------------- |
|
||||
| 4.1 | Consensus round start instrumentation | 0 | 2 | Phase 3 |
|
||||
| 4.2 | Phase transition instrumentation | 0 | 1-2 | 4.1 |
|
||||
| 4.3 | Proposal handling instrumentation | 0 | 1 | 4.1 |
|
||||
| 4.4 | Validation handling instrumentation | 0 | 1-2 | 4.1 |
|
||||
| 4.5 | Consensus-specific attributes | 0 | 1 | 4.2, 4.3, 4.4 |
|
||||
| 4.6 | Transaction-consensus correlation | 0 | 2 | 4.2, Phase 3 |
|
||||
| 4.7 | Build verification and testing | 0 | 0 | 4.1-4.6 |
|
||||
| 4.8 | Validation span enrichment (ext. dashboard) | 0 | 2 | 4.4 |
|
||||
|
||||
**Parallel work**: Tasks 4.2, 4.3, and 4.4 can run in parallel after 4.1 is complete. Task 4.5 depends on all three. Task 4.6 depends on 4.2 and Phase 3. Task 4.8 depends on 4.4 (validation spans must exist).
|
||||
|
||||
### Implemented Spans
|
||||
|
||||
| Span Name | Method | Key Attributes |
|
||||
| --------------------------- | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `consensus.proposal.send` | `Adaptor::propose` | `xrpl.consensus.round` |
|
||||
| `consensus.ledger_close` | `Adaptor::onClose` | `xrpl.consensus.ledger.seq`, `xrpl.consensus.mode` |
|
||||
| `consensus.accept` | `Adaptor::onAccept` | `xrpl.consensus.proposers`, `xrpl.consensus.round_time_ms` |
|
||||
| `consensus.accept.apply` | `Adaptor::doAccept` | `xrpl.consensus.close_time`, `close_time_correct`, `close_resolution_ms`, `state`, `proposing`, `round_time_ms`, `ledger.seq`, `parent_close_time`, `close_time_self`, `close_time_vote_bins`, `resolution_direction` |
|
||||
| `consensus.validation.send` | `Adaptor::onAccept` (via validate) | `xrpl.consensus.proposing` |
|
||||
|
||||
#### Close Time Attributes (consensus.accept.apply)
|
||||
|
||||
The `consensus.accept.apply` span captures ledger close time agreement details
|
||||
driven by `avCT_CONSENSUS_PCT` (75% validator agreement threshold):
|
||||
|
||||
- **`xrpl.consensus.close_time`** — Agreed-upon ledger close time (epoch seconds). When validators disagree (`consensusCloseTime == epoch`), this is synthetically set to `prevCloseTime + 1s`.
|
||||
- **`xrpl.consensus.close_time_correct`** — `true` if validators reached agreement, `false` if they "agreed to disagree" (close time forced to prev+1s).
|
||||
- **`xrpl.consensus.close_resolution_ms`** — Rounding granularity for close time (starts at 30s, decreases as ledger interval stabilizes).
|
||||
- **`xrpl.consensus.state`** — `"finished"` (normal) or `"moved_on"` (consensus failed, adopted best available).
|
||||
- **`xrpl.consensus.proposing`** — Whether this node was proposing.
|
||||
- **`xrpl.consensus.round_time_ms`** — Total consensus round duration.
|
||||
- **`xrpl.consensus.parent_close_time`** — Previous ledger's close time (epoch seconds). Enables computing close-time deltas across consecutive rounds without correlating separate spans.
|
||||
- **`xrpl.consensus.close_time_self`** — This node's own proposed close time before consensus voting.
|
||||
- **`xrpl.consensus.close_time_vote_bins`** — Number of distinct close-time vote bins from peer proposals. Higher values indicate less agreement among validators.
|
||||
- **`xrpl.consensus.resolution_direction`** — Whether close-time resolution `"increased"` (coarser), `"decreased"` (finer), or stayed `"unchanged"` relative to the previous ledger.
|
||||
|
||||
**Exit Criteria** (from [06-implementation-phases.md §6.11.4](./06-implementation-phases.md)):
|
||||
|
||||
- [x] Complete consensus round traces
|
||||
- [x] Phase transitions visible
|
||||
- [x] Proposals and validations traced
|
||||
- [x] Close time agreement tracked (per `avCT_CONSENSUS_PCT`)
|
||||
- [x] No impact on consensus timing
|
||||
|
||||
---
|
||||
|
||||
# Phase 4a: Establish-Phase Gap Fill & Cross-Node Correlation
|
||||
|
||||
> **Goal**: Fill tracing gaps in the consensus establish phase (disputes, convergence,
|
||||
> threshold escalation, mode changes) and establish cross-node correlation using a
|
||||
> deterministic shared trace ID derived from `previousLedger.id()`.
|
||||
>
|
||||
> **Approach**: Direct instrumentation in `Consensus.h` — the generic consensus
|
||||
> template has full access to internal state (`convergePercent_`, `result_->disputes`,
|
||||
> `mode_`, threshold logic). Telemetry access comes via a single new adaptor
|
||||
> method `getTelemetry()`. Long-lived spans (round, establish) are stored as
|
||||
> class members using `SpanGuard` directly — NOT the `XRPL_TRACE_*` convenience
|
||||
> macros (which create local variables named `_xrpl_guard_`). Short-lived
|
||||
> scoped spans (update_positions, check) can use the macros. All code compiles
|
||||
> to no-ops when `XRPL_ENABLE_TELEMETRY` is not defined.
|
||||
>
|
||||
> **Branch**: `pratik/otel-phase4-consensus-tracing`
|
||||
|
||||
## Design: Switchable Correlation Strategy
|
||||
|
||||
Two strategies for cross-node trace correlation, switchable via config:
|
||||
|
||||
### Strategy A — Deterministic Trace ID (Default)
|
||||
|
||||
Derive `trace_id = SHA256(previousLedger.id())[0:16]` so all nodes in the same
|
||||
consensus round share the same trace_id without P2P context propagation.
|
||||
|
||||
- **Pros**: All nodes appear in the same trace in Tempo/Jaeger automatically.
|
||||
No collector-side post-processing needed.
|
||||
- **Cons**: Overrides OTel's random trace_id generation; requires custom
|
||||
`IdGenerator` or manual span context construction.
|
||||
|
||||
### Strategy B — Attribute-Based Correlation
|
||||
|
||||
Use normal random trace_id but attach `xrpl.consensus.ledger_id` as an attribute
|
||||
on every consensus span. Correlation happens at query time via Tempo/Grafana
|
||||
`by attribute` queries.
|
||||
|
||||
- **Pros**: Standard OTel trace_id semantics; no SDK customization.
|
||||
- **Cons**: Cross-node correlation requires query-time joins, not automatic.
|
||||
|
||||
### Config
|
||||
|
||||
```ini
|
||||
[telemetry]
|
||||
# "deterministic" (default) or "attribute"
|
||||
consensus_trace_strategy=deterministic
|
||||
```
|
||||
|
||||
The C++ API to query this at runtime is `Telemetry::getConsensusTraceStrategy()`,
|
||||
which returns a `std::string const&` (`"deterministic"` or `"attribute"`).
|
||||
|
||||
### Implementation
|
||||
|
||||
In `RCLConsensus::Adaptor::startRound()`:
|
||||
|
||||
- If `deterministic`:
|
||||
1. Compute `trace_id_bytes = SHA256(prevLedgerID)[0:16]`
|
||||
2. Construct `opentelemetry::trace::TraceId(trace_id_bytes)`
|
||||
3. Create a synthetic `SpanContext` with this trace_id and a random span_id:
|
||||
```cpp
|
||||
auto traceId = opentelemetry::trace::TraceId(trace_id_bytes);
|
||||
auto spanId = opentelemetry::trace::SpanId(random_8_bytes);
|
||||
auto syntheticCtx = opentelemetry::trace::SpanContext(
|
||||
traceId, spanId, opentelemetry::trace::TraceFlags(1), false);
|
||||
```
|
||||
4. Wrap in `opentelemetry::context::Context` via
|
||||
`opentelemetry::trace::SetSpan(context, syntheticSpan)`
|
||||
5. Call `startSpan("consensus.round", parentContext)` so the new span
|
||||
inherits the deterministic trace_id.
|
||||
- If `attribute`: start a normal `consensus.round` span, set
|
||||
`xrpl.consensus.ledger_id = previousLedger.id()` as attribute.
|
||||
|
||||
Both strategies always set `xrpl.consensus.round_id` (round number) and
|
||||
`xrpl.consensus.ledger_id` (previous ledger hash) as attributes.
|
||||
|
||||
---
|
||||
|
||||
## Design: Span Hierarchy
|
||||
|
||||
```
|
||||
consensus.round (root — created in RCLConsensus::startRound, closed at accept)
|
||||
│ link → previous round's SpanContext (follows-from)
|
||||
│
|
||||
├── consensus.establish (phaseEstablish → acceptance, in Consensus.h)
|
||||
│ ├── consensus.update_positions (each updateOurPositions call)
|
||||
│ │ └── consensus.dispute.resolve (per-tx dispute resolution event)
|
||||
│ ├── consensus.check (each haveConsensus call)
|
||||
│ └── consensus.mode_change (short-lived span in adaptor on mode transition)
|
||||
│
|
||||
├── consensus.accept (existing onAccept span — reparented under round)
|
||||
│
|
||||
└── consensus.validation.send (existing — reparented, follows-from link to round)
|
||||
```
|
||||
|
||||
### Span Links (follows-from relationships)
|
||||
|
||||
| Link Source | Link Target | Rationale |
|
||||
| ----------------------------------------- | -------------------------- | ------------------------------------------------------------------------------ |
|
||||
| `consensus.round` (N+1) | `consensus.round` (N) | Causal chain: round N+1 exists because round N accepted |
|
||||
| `consensus.validation.send` | `consensus.round` | Validation follows from the round that produced it; may outlive the round span |
|
||||
| _(Phase 4b)_ Received proposal processing | Sender's `consensus.round` | Cross-node causal link via P2P context propagation |
|
||||
|
||||
---
|
||||
|
||||
## Task 4a.0: Prerequisites — Extend SpanGuard and Telemetry APIs
|
||||
|
||||
**Objective**: Add missing API surface needed by later tasks.
|
||||
|
||||
**What to do**:
|
||||
|
||||
1. **Add `SpanGuard::addEvent()` with attributes** (needed by Task 4a.5):
|
||||
The current `addEvent(string_view name)` only accepts a name. Add an
|
||||
overload that accepts key-value attributes:
|
||||
|
||||
```cpp
|
||||
using EventAttribute = std::pair<std::string_view, std::string_view>;
|
||||
|
||||
void addEvent(std::string_view name,
|
||||
std::initializer_list<EventAttribute> attrs);
|
||||
```
|
||||
|
||||
The `EventAttribute` type alias (defined in `SpanGuard.h`) keeps the
|
||||
public API free of OTel SDK types — callers pass plain `string_view`
|
||||
pairs and the implementation converts internally.
|
||||
|
||||
```cpp
|
||||
// Example usage:
|
||||
guard.addEvent("dispute.resolve", {
|
||||
{"xrpl.tx.id", txIdStr},
|
||||
{"xrpl.dispute.our_vote", voteStr}
|
||||
});
|
||||
```
|
||||
|
||||
2. **Add a `Telemetry::startSpan()` overload that accepts span links** (needed by Tasks 4a.2, 4a.8):
|
||||
The current `startSpan()` has no span link support. Add an overload that
|
||||
accepts a vector of `SpanContext` links for follows-from relationships:
|
||||
|
||||
```cpp
|
||||
virtual opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span>
|
||||
startSpan(
|
||||
std::string_view name,
|
||||
opentelemetry::context::Context const& parentContext,
|
||||
std::vector<opentelemetry::trace::SpanContext> const& links,
|
||||
opentelemetry::trace::SpanKind kind = opentelemetry::trace::SpanKind::kInternal) = 0;
|
||||
```
|
||||
|
||||
3. **Add `XRPL_TRACE_ADD_EVENT` macro** (needed by Task 4a.5):
|
||||
Add to `TracingInstrumentation.h` to expose `addEvent(name, attrs)` through
|
||||
the macro interface (consistent with `XRPL_TRACE_SET_ATTR` pattern):
|
||||
```cpp
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
#define XRPL_TRACE_ADD_EVENT(name, ...) \
|
||||
if (_xrpl_guard_.has_value()) \
|
||||
{ \
|
||||
_xrpl_guard_->addEvent(name, __VA_ARGS__); \
|
||||
}
|
||||
#else
|
||||
#define XRPL_TRACE_ADD_EVENT(name, ...) ((void)0)
|
||||
#endif
|
||||
```
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `include/xrpl/telemetry/SpanGuard.h` — add `addEvent()` overload
|
||||
- `include/xrpl/telemetry/Telemetry.h` — add `startSpan()` with links
|
||||
- `src/xrpld/telemetry/Telemetry.cpp` — implement new overload
|
||||
- `src/xrpld/telemetry/NullTelemetry.cpp` — no-op implementation
|
||||
- `src/xrpld/telemetry/TracingInstrumentation.h` — add `XRPL_TRACE_ADD_EVENT` macro
|
||||
|
||||
---
|
||||
|
||||
## Task 4a.1: Adaptor `getTelemetry()` Method
|
||||
|
||||
**Objective**: Give `Consensus.h` access to the telemetry subsystem without
|
||||
coupling the generic template to OTel headers.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Add `getTelemetry()` method to the Adaptor concept (returns
|
||||
`xrpl::telemetry::Telemetry&`). The return type is already forward-declared
|
||||
behind `#ifdef XRPL_ENABLE_TELEMETRY`.
|
||||
- Implement in `RCLConsensus::Adaptor` — delegates to `app_.getTelemetry()`.
|
||||
- In `Consensus.h`, the `XRPL_TRACE_*` macros call
|
||||
`adaptor_.getTelemetry()` — when telemetry is disabled, the macros expand to
|
||||
`((void)0)` and the method is never called.
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/app/consensus/RCLConsensus.h` — declare `getTelemetry()`
|
||||
- `src/xrpld/app/consensus/RCLConsensus.cpp` — implement `getTelemetry()`
|
||||
|
||||
---
|
||||
|
||||
## Task 4a.2: Switchable Round Span with Deterministic Trace ID
|
||||
|
||||
**Objective**: Create a `consensus.round` root span in `startRound()` that uses
|
||||
the switchable correlation strategy. Store span context as a member for child
|
||||
spans in `Consensus.h`.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- In `RCLConsensus::Adaptor::startRound()` (or a new helper):
|
||||
- Read `consensus_trace_strategy` from config.
|
||||
- **Deterministic**: compute `trace_id = SHA256(prevLedgerID)[0:16]`.
|
||||
Construct a `SpanContext` with this trace_id, then start
|
||||
`consensus.round` span as child of that context.
|
||||
- **Attribute**: start normal `consensus.round` span.
|
||||
- Set attributes on both: `xrpl.consensus.round_id`,
|
||||
`xrpl.consensus.ledger_id`, `xrpl.consensus.ledger.seq`,
|
||||
`xrpl.consensus.mode`.
|
||||
- Store the round span in `Consensus` as a member (see Task 4a.3).
|
||||
- If a previous round's span context is available, add a **span link**
|
||||
(follows-from) to establish the round chain.
|
||||
|
||||
- **`SpanGuard::hashSpan()` factory**: The deterministic trace ID logic is
|
||||
encapsulated in a static factory method on `SpanGuard`:
|
||||
|
||||
```cpp
|
||||
static SpanGuard hashSpan(
|
||||
TraceCategory cat, std::string_view name,
|
||||
std::uint8_t const* hashData, std::size_t hashSize);
|
||||
```
|
||||
|
||||
`hashSpan()` derives `trace_id = hashData[0:16]` and creates a span whose
|
||||
trace ID matches on every node that shares the same hash input (e.g.
|
||||
`previousLedger.id()`). It is the consensus equivalent of `txSpan()` (which
|
||||
derives trace IDs from transaction hashes). Both factories live in
|
||||
`SpanGuard.h` and compile to no-ops when telemetry is disabled.
|
||||
|
||||
- Add `createDeterministicTraceId(hash)` utility to
|
||||
`include/xrpl/telemetry/Telemetry.h` (returns 16-byte trace ID from a
|
||||
256-bit hash by truncation).
|
||||
|
||||
- Add `consensus_trace_strategy` to `Telemetry::Setup` and
|
||||
`TelemetryConfig.cpp` parser:
|
||||
```cpp
|
||||
/** Cross-node correlation strategy: "deterministic" or "attribute". */
|
||||
std::string consensusTraceStrategy = "deterministic";
|
||||
```
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/app/consensus/RCLConsensus.cpp`
|
||||
- `src/xrpld/app/consensus/ConsensusSpanNames.h` — **(new)** span name constants for consensus spans, following the `*SpanNames.h` colocation pattern (header lives next to its class, not in `telemetry/`)
|
||||
- `include/xrpl/telemetry/Telemetry.h` — `createDeterministicTraceId()`
|
||||
- `src/xrpld/telemetry/TelemetryConfig.cpp` — parse new config option
|
||||
|
||||
---
|
||||
|
||||
## Task 4a.3: Span Members in `Consensus.h`
|
||||
|
||||
**Objective**: Add span storage to the `Consensus` class so that spans created
|
||||
in `startRound()` (adaptor) are accessible from `phaseEstablish()`,
|
||||
`updateOurPositions()`, and `haveConsensus()` (template methods).
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Add to `Consensus` private members (guarded by `#ifdef XRPL_ENABLE_TELEMETRY`):
|
||||
```cpp
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
std::optional<xrpl::telemetry::SpanGuard> roundSpan_;
|
||||
std::optional<xrpl::telemetry::SpanGuard> establishSpan_;
|
||||
opentelemetry::context::Context prevRoundContext_;
|
||||
#endif
|
||||
```
|
||||
- `roundSpan_` is created in `startRound()` via the adaptor and stored.
|
||||
Its `SpanGuard::Scope` member keeps the span active on the thread context
|
||||
for the entire round lifetime.
|
||||
- `establishSpan_` is created when entering phaseEstablish and cleared on accept.
|
||||
It becomes a child of `roundSpan_` via OTel's thread-local context propagation.
|
||||
- `prevRoundContext_` stores the previous round's context for follows-from links.
|
||||
|
||||
**Threading assumption**: `startRound()`, `phaseEstablish()`, `updateOurPositions()`,
|
||||
and `haveConsensus()` all run on the same thread (the consensus job queue thread).
|
||||
This is required for the `SpanGuard::Scope`-based parent-child hierarchy to work.
|
||||
The `Consensus` class documentation confirms it is NOT thread-safe and calls are
|
||||
serialized by the application.
|
||||
|
||||
- Add conditional include at top of `Consensus.h`:
|
||||
```cpp
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
#include <xrpl/telemetry/SpanGuard.h>
|
||||
#include <xrpld/telemetry/TracingInstrumentation.h>
|
||||
#endif
|
||||
```
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/consensus/Consensus.h`
|
||||
|
||||
---
|
||||
|
||||
## Task 4a.4: Instrument `phaseEstablish()`
|
||||
|
||||
**Objective**: Create `consensus.establish` span wrapping the establish phase,
|
||||
with attributes for convergence progress.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- At the start of `phaseEstablish()` (line 1298), if `establishSpan_` is not
|
||||
yet created, create it as child of `roundSpan_` using the **direct API**
|
||||
(NOT the `XRPL_TRACE_CONSENSUS` macro, which creates a local variable):
|
||||
|
||||
```cpp
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
if (!establishSpan_ && adaptor_.getTelemetry().shouldTraceConsensus())
|
||||
{
|
||||
establishSpan_.emplace(
|
||||
adaptor_.getTelemetry().startSpan("consensus.establish"));
|
||||
}
|
||||
#endif
|
||||
```
|
||||
|
||||
- Set attributes on each call:
|
||||
- `xrpl.consensus.converge_percent` — `convergePercent_`
|
||||
- `xrpl.consensus.establish_count` — `establishCounter_`
|
||||
- `xrpl.consensus.proposers` — `currPeerPositions_.size()`
|
||||
|
||||
- On phase exit (transition to accept), close the establish span and record
|
||||
final duration.
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/consensus/Consensus.h` — `phaseEstablish()` method
|
||||
|
||||
---
|
||||
|
||||
## Task 4a.5: Instrument `updateOurPositions()`
|
||||
|
||||
**Objective**: Trace each position update cycle including dispute resolution
|
||||
details.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- At the start of `updateOurPositions()` (line 1418), create a scoped child
|
||||
span. This method is called and returns within a single `phaseEstablish()`
|
||||
call, so the `XRPL_TRACE_CONSENSUS` macro works here (scoped local):
|
||||
|
||||
```cpp
|
||||
XRPL_TRACE_CONSENSUS(adaptor_.getTelemetry(), "consensus.update_positions");
|
||||
```
|
||||
|
||||
- Set attributes:
|
||||
- `xrpl.consensus.disputes_count` — `result_->disputes.size()`
|
||||
- `xrpl.consensus.converge_percent` — current convergence
|
||||
- `xrpl.consensus.proposers_agreed` — count of peers with same position
|
||||
- `xrpl.consensus.proposers_total` — total peer positions
|
||||
|
||||
- Inside the dispute resolution loop, for each dispute that changes our vote,
|
||||
add an **event** with attributes using `XRPL_TRACE_ADD_EVENT` (from Task 4a.0):
|
||||
```cpp
|
||||
XRPL_TRACE_ADD_EVENT("dispute.resolve", {
|
||||
{"xrpl.tx.id", std::string(tx_id)},
|
||||
{"xrpl.dispute.our_vote", our_vote},
|
||||
{"xrpl.dispute.yays", static_cast<int64_t>(yays)},
|
||||
{"xrpl.dispute.nays", static_cast<int64_t>(nays)}
|
||||
});
|
||||
```
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/consensus/Consensus.h` — `updateOurPositions()` method
|
||||
|
||||
---
|
||||
|
||||
## Task 4a.6: Instrument `haveConsensus()` (Threshold & Convergence)
|
||||
|
||||
**Objective**: Trace consensus checking including threshold escalation
|
||||
(`ConsensusParms::AvalancheState::{init, mid, late, stuck}`).
|
||||
|
||||
**What to do**:
|
||||
|
||||
- At the start of `haveConsensus()` (line 1598), create a scoped child span:
|
||||
|
||||
```cpp
|
||||
XRPL_TRACE_CONSENSUS(adaptor_.getTelemetry(), "consensus.check");
|
||||
```
|
||||
|
||||
- Set attributes:
|
||||
- `xrpl.consensus.agree_count` — peers that agree with our position
|
||||
- `xrpl.consensus.disagree_count` — peers that disagree
|
||||
- `xrpl.consensus.converge_percent` — convergence percentage
|
||||
- `xrpl.consensus.result` — ConsensusState result (Yes/No/MovedOn)
|
||||
|
||||
- The free function `checkConsensus()` in `Consensus.cpp` (line 151) determines
|
||||
thresholds based on `currentAgreeTime`. Threshold values come from
|
||||
`ConsensusParms::avalancheCutoffs` (defined in `ConsensusParms.h`).
|
||||
The escalation states are `ConsensusParms::AvalancheState::{init, mid, late, stuck}`.
|
||||
Record the effective threshold and close time consensus state:
|
||||
- `xrpl.consensus.threshold_percent` — consensus threshold (avCT_CONSENSUS_PCT = 75%)
|
||||
- `xrpl.consensus.close_time_threshold` — close time voting threshold (avCT_CONSENSUS_PCT)
|
||||
- `xrpl.consensus.have_close_time_consensus` — whether close time consensus was reached
|
||||
- `xrpl.consensus.avalanche_threshold` — the avalanche-escalated weight from `getNeededWeight()`
|
||||
|
||||
These are recorded on both `consensus.update_positions` and `consensus.check` spans.
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/consensus/Consensus.h` — `haveConsensus()` and `updateOurPositions()` methods
|
||||
|
||||
---
|
||||
|
||||
## Task 4a.7: Instrument Mode Changes
|
||||
|
||||
**Objective**: Trace consensus mode transitions (proposing ↔ observing,
|
||||
wrongLedger, switchedLedger).
|
||||
|
||||
**What to do**:
|
||||
|
||||
Mode changes are rare (typically 0-1 per round), so a **standalone short-lived
|
||||
span** is appropriate (not an event). This captures timing of the mode change
|
||||
itself.
|
||||
|
||||
- In `RCLConsensus::Adaptor::onModeChange()`, create a scoped span:
|
||||
|
||||
```cpp
|
||||
XRPL_TRACE_CONSENSUS(app_.getTelemetry(), "consensus.mode_change");
|
||||
XRPL_TRACE_SET_ATTR("xrpl.consensus.mode.old", to_string(before).c_str());
|
||||
XRPL_TRACE_SET_ATTR("xrpl.consensus.mode.new", to_string(after).c_str());
|
||||
```
|
||||
|
||||
- Note: `MonitoredMode::set()` (line 304 in `Consensus.h`) calls
|
||||
`adaptor_.onModeChange(before, after)` — so the span is created in the
|
||||
adaptor, which already has telemetry access. No instrumentation needed
|
||||
in `Consensus.h` for this task.
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/app/consensus/RCLConsensus.cpp` — `onModeChange()`
|
||||
|
||||
---
|
||||
|
||||
## Task 4a.8: Reparent Existing Spans Under Round
|
||||
|
||||
**Objective**: Make existing consensus spans (`consensus.accept`,
|
||||
`consensus.accept.apply`, `consensus.validation.send`) children of the
|
||||
`consensus.round` root span instead of being standalone.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- The existing spans in `onAccept()`, `doAccept()`, and `validate()` use
|
||||
`XRPL_TRACE_CONSENSUS(app_.getTelemetry(), ...)` which creates standalone
|
||||
spans on the current thread's context.
|
||||
- After Task 4a.2 creates the round span and stores it, these methods run on
|
||||
the same thread within the round span's scope, so they automatically become
|
||||
children. Verify this works correctly.
|
||||
- For `consensus.validation.send`: add a **span link** (follows-from) to the
|
||||
round span context, since the validation may be processed after the round
|
||||
completes.
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `src/xrpld/app/consensus/RCLConsensus.cpp` — verify parent-child hierarchy
|
||||
|
||||
---
|
||||
|
||||
## Task 4a.9: Build Verification and Testing
|
||||
|
||||
**Objective**: Verify all Phase 4a changes compile cleanly with telemetry ON
|
||||
and OFF, and don't affect consensus timing.
|
||||
|
||||
**What to do**:
|
||||
|
||||
1. Build with `telemetry=ON` — verify no compilation errors
|
||||
2. Build with `telemetry=OFF` — verify macros expand to no-ops, no new includes
|
||||
leak into `Consensus.h` when disabled
|
||||
3. Run existing consensus unit tests
|
||||
4. Verify `#ifdef XRPL_ENABLE_TELEMETRY` guards on all new members in
|
||||
`Consensus.h`
|
||||
5. Run `pccl` pre-commit checks
|
||||
|
||||
**Verification Checklist**:
|
||||
|
||||
- [x] Build succeeds with telemetry ON
|
||||
- [x] Build succeeds with telemetry OFF
|
||||
- [x] Existing consensus tests pass
|
||||
- [x] `Consensus.h` has zero OTel includes when telemetry is OFF
|
||||
- [x] No new virtual calls in hot consensus paths
|
||||
- [x] `pccl` passes
|
||||
|
||||
---
|
||||
|
||||
## Phase 4a Summary
|
||||
|
||||
| Task | Description | New Files | Modified Files | Depends On |
|
||||
| ---- | ------------------------------------------------ | --------- | -------------- | ---------- |
|
||||
| 4a.0 | Prerequisites: extend SpanGuard & Telemetry APIs | 0 | 4 | Phase 4 |
|
||||
| 4a.1 | Adaptor `getTelemetry()` method | 0 | 2 | Phase 4 |
|
||||
| 4a.2 | Switchable round span with deterministic traceID | 1 | 3 | 4a.0, 4a.1 |
|
||||
| 4a.3 | Span members in `Consensus.h` | 0 | 1 | 4a.1 |
|
||||
| 4a.4 | Instrument `phaseEstablish()` | 0 | 1 | 4a.3 |
|
||||
| 4a.5 | Instrument `updateOurPositions()` | 0 | 1 | 4a.0, 4a.3 |
|
||||
| 4a.6 | Instrument `haveConsensus()` (thresholds) | 0 | 1 | 4a.3 |
|
||||
| 4a.7 | Instrument mode changes | 0 | 1 | 4a.1 |
|
||||
| 4a.8 | Reparent existing spans under round | 0 | 1 | 4a.0, 4a.2 |
|
||||
| 4a.9 | Build verification and testing | 0 | 0 | 4a.0-4a.8 |
|
||||
|
||||
**Parallel work**: Tasks 4a.0 and 4a.1 can run in parallel. Tasks 4a.4, 4a.5, 4a.6, and 4a.7 can run in parallel after 4a.3 (and 4a.0 for 4a.5).
|
||||
|
||||
### New Spans (Phase 4a)
|
||||
|
||||
| Span Name | Location | Key Attributes |
|
||||
| ---------------------------- | ------------------ | ---------------------------------------------------------------------------------- |
|
||||
| `consensus.round` | `RCLConsensus.cpp` | `round_id`, `ledger_id`, `ledger.seq`, `mode`; link → prev round |
|
||||
| `consensus.establish` | `Consensus.h` | `converge_percent`, `establish_count`, `proposers` |
|
||||
| `consensus.update_positions` | `Consensus.h` | `disputes_count`, `converge_percent`, `proposers_agreed`, `proposers_total` |
|
||||
| `consensus.check` | `Consensus.h` | `agree_count`, `disagree_count`, `converge_percent`, `result`, `threshold_percent` |
|
||||
| `consensus.mode_change` | `RCLConsensus.cpp` | `mode.old`, `mode.new` |
|
||||
|
||||
### New Events (Phase 4a)
|
||||
|
||||
| Event Name | Parent Span | Attributes |
|
||||
| ----------------- | ---------------------------- | ----------------------------------- |
|
||||
| `dispute.resolve` | `consensus.update_positions` | `tx_id`, `our_vote`, `yays`, `nays` |
|
||||
|
||||
### New Attributes (Phase 4a)
|
||||
|
||||
```cpp
|
||||
// Round-level (on consensus.round)
|
||||
"xrpl.consensus.round_id" = int64 // Consensus round number
|
||||
"xrpl.consensus.ledger_id" = string // previousLedger.id() hash
|
||||
"xrpl.consensus.trace_strategy" = string // "deterministic" or "attribute"
|
||||
|
||||
// Establish-level
|
||||
"xrpl.consensus.converge_percent" = int64 // Convergence % (0-100+)
|
||||
"xrpl.consensus.establish_count" = int64 // Number of establish iterations
|
||||
"xrpl.consensus.disputes_count" = int64 // Active disputes
|
||||
"xrpl.consensus.proposers_agreed" = int64 // Peers agreeing with us
|
||||
"xrpl.consensus.proposers_total" = int64 // Total peer positions
|
||||
"xrpl.consensus.agree_count" = int64 // Peers that agree (haveConsensus)
|
||||
"xrpl.consensus.disagree_count" = int64 // Peers that disagree
|
||||
"xrpl.consensus.threshold_percent" = int64 // Current threshold (50/65/70/95)
|
||||
"xrpl.consensus.result" = string // "yes", "no", "moved_on"
|
||||
|
||||
// Mode change
|
||||
"xrpl.consensus.mode.old" = string // Previous mode
|
||||
"xrpl.consensus.mode.new" = string // New mode
|
||||
```
|
||||
|
||||
### Implementation Notes
|
||||
|
||||
- **Separation of concerns**: All non-trivial telemetry code extracted to private
|
||||
helpers (`startRoundTracing`, `createValidationSpan`, `startEstablishTracing`,
|
||||
`updateEstablishTracing`, `endEstablishTracing`). Business logic methods contain
|
||||
only single-line `#ifdef` blocks calling these helpers.
|
||||
- **Thread safety**: `createValidationSpan()` runs on the jtACCEPT worker thread.
|
||||
Instead of accessing `roundSpan_` across threads, a `roundSpanContext_` snapshot
|
||||
(lightweight `SpanContext` value type) is captured on the consensus thread in
|
||||
`startRoundTracing()` and read by `createValidationSpan()`. The job queue
|
||||
provides the happens-before guarantee.
|
||||
- **Macro safety**: `XRPL_TRACE_ADD_EVENT` uses `do { } while (0)` to prevent
|
||||
dangling-else issues.
|
||||
- **Config validation**: `consensus_trace_strategy` is validated to be either
|
||||
`"deterministic"` or `"attribute"`, falling back to `"deterministic"` for
|
||||
unrecognised values.
|
||||
- **Plan deviation**: `roundSpan_` is stored in `RCLConsensus::Adaptor` (not
|
||||
`Consensus.h`) because the adaptor has access to telemetry config and can
|
||||
implement the deterministic trace ID strategy. `establishSpan_` is correctly
|
||||
in `Consensus.h` as planned.
|
||||
|
||||
---
|
||||
|
||||
# Phase 4b: Cross-Node Propagation (Future — Documentation Only)
|
||||
|
||||
> **Goal**: Wire `TraceContextPropagator` for P2P messages so that proposals
|
||||
> and validations carry trace context between nodes. This enables true
|
||||
> distributed tracing where a proposal sent by Node A creates a child span
|
||||
> on Node B.
|
||||
>
|
||||
> **Status**: NOT IMPLEMENTED. The protobuf fields and propagator class exist
|
||||
> but are not wired. This section documents the design for future work.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Node A (proposing) Node B (receiving)
|
||||
───────────────── ──────────────────
|
||||
consensus.round consensus.round
|
||||
├── propose() ├── peerProposal()
|
||||
│ └── TraceContextPropagator │ └── TraceContextPropagator
|
||||
│ ::injectToProtobuf( │ ::extractFromProtobuf(
|
||||
│ TMProposeSet.trace_context) │ TMProposeSet.trace_context)
|
||||
│ │ └── span link → Node A's context
|
||||
└── validate() └── onValidation()
|
||||
└── inject into TMValidation └── extract from TMValidation
|
||||
```
|
||||
|
||||
## Wiring Points
|
||||
|
||||
| Message | Inject Location | Extract Location | Protobuf Field |
|
||||
| --------------- | ---------------------------------- | ----------------------------------- | -------------------------- |
|
||||
| `TMProposeSet` | `Adaptor::propose()` | `PeerImp::onMessage(TMProposeSet)` | field 1001: `TraceContext` |
|
||||
| `TMValidation` | `Adaptor::validate()` | `PeerImp::onMessage(TMValidation)` | field 1001: `TraceContext` |
|
||||
| `TMTransaction` | `NetworkOPs::processTransaction()` | `PeerImp::onMessage(TMTransaction)` | field 1001: `TraceContext` |
|
||||
|
||||
## Span Link Semantics
|
||||
|
||||
Received messages use **span links** (follows-from), NOT parent-child:
|
||||
|
||||
- The receiver's processing span links to the sender's context
|
||||
- This preserves each node's independent trace tree
|
||||
- Cross-node correlation visible via linked traces in Tempo/Jaeger
|
||||
|
||||
## Interaction with Deterministic Trace ID (Strategy A)
|
||||
|
||||
When using deterministic trace_id (Phase 4a default), cross-node spans already
|
||||
share the same trace_id. P2P propagation adds **span-level** linking:
|
||||
|
||||
- Without propagation: spans from different nodes appear in the same trace
|
||||
(same trace_id) but without parent-child or follows-from relationships.
|
||||
- With propagation: spans have explicit links showing which proposal/validation
|
||||
from Node A caused processing on Node B.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Phase 4a (this task list) — establish phase tracing must be in place
|
||||
- `TraceContextPropagator` class (already exists in
|
||||
`include/xrpl/telemetry/TraceContextPropagator.h`)
|
||||
- Protobuf `TraceContext` message (already exists, field 1001)
|
||||
@@ -1,221 +0,0 @@
|
||||
# Phase 5: Integration Test Task List
|
||||
|
||||
> **Goal**: End-to-end verification of the complete telemetry pipeline using a
|
||||
> 6-node consensus network. Proves that RPC, transaction, and consensus spans
|
||||
> flow through the observability stack (otel-collector, Tempo, Prometheus,
|
||||
> Grafana) under realistic conditions.
|
||||
>
|
||||
> **Scope**: Integration test script, manual testing plan, 6-node local network
|
||||
> setup, Tempo/Prometheus/Grafana verification.
|
||||
>
|
||||
> **Branch**: `pratik/otel-phase5-docs-deployment`
|
||||
|
||||
### Related Plan Documents
|
||||
|
||||
| Document | Relevance |
|
||||
| ---------------------------------------------------------------- | ------------------------------------------ |
|
||||
| [07-observability-backends.md](./07-observability-backends.md) | Tempo, Grafana, Prometheus setup |
|
||||
| [05-configuration-reference.md](./05-configuration-reference.md) | Collector config, Docker Compose |
|
||||
| [06-implementation-phases.md](./06-implementation-phases.md) | Phase 5 tasks, definition of done |
|
||||
| [Phase5_taskList.md](./Phase5_taskList.md) | Phase 5 main task list (5.6 = integration) |
|
||||
|
||||
---
|
||||
|
||||
## Task IT.1: Create Integration Test Script
|
||||
|
||||
**Objective**: Automated bash script that stands up a 6-node xrpld network
|
||||
with telemetry, exercises all span categories, and verifies data in
|
||||
Tempo/Prometheus.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `docker/telemetry/integration-test.sh`:
|
||||
- Prerequisites check (docker, xrpld binary, curl, jq)
|
||||
- Start observability stack via `docker compose`
|
||||
- Generate 6 validator key pairs via temp standalone xrpld
|
||||
- Generate 6 node configs + shared `validators.txt`
|
||||
- Start 6 xrpld nodes in consensus mode (`--start`, no `-a`)
|
||||
- Wait for all nodes to reach `"proposing"` state (120s timeout)
|
||||
|
||||
**Key new file**: `docker/telemetry/integration-test.sh`
|
||||
|
||||
**Verification**:
|
||||
|
||||
- [ ] Script starts without errors
|
||||
- [ ] All 6 nodes reach "proposing" state
|
||||
- [ ] Observability stack is healthy (otel-collector, Tempo, Prometheus, Grafana)
|
||||
|
||||
---
|
||||
|
||||
## Task IT.2: RPC Span Verification (Phase 2)
|
||||
|
||||
**Objective**: Verify RPC spans flow through the telemetry pipeline.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Send `server_info`, `server_state`, `ledger` RPCs to node1 (port 5005)
|
||||
- Wait for batch export (5s)
|
||||
- Query Tempo API for:
|
||||
- `rpc.request` spans (ServerHandler::onRequest)
|
||||
- `rpc.process` spans (ServerHandler::processRequest)
|
||||
- `rpc.command.server_info` spans (callMethod)
|
||||
- `rpc.command.server_state` spans (callMethod)
|
||||
- `rpc.command.ledger` spans (callMethod)
|
||||
- Verify `xrpl.rpc.command` attribute present on `rpc.command.*` spans
|
||||
|
||||
**Verification**:
|
||||
|
||||
- [ ] Tempo shows `rpc.request` traces
|
||||
- [ ] Tempo shows `rpc.process` traces
|
||||
- [ ] Tempo shows `rpc.command.*` traces with correct attributes
|
||||
|
||||
---
|
||||
|
||||
## Task IT.3: Transaction Span Verification (Phase 3)
|
||||
|
||||
**Objective**: Verify transaction spans flow through the telemetry pipeline.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Get genesis account sequence via `account_info` RPC
|
||||
- Submit Payment transaction using genesis seed (`snoPBrXtMeMyMHUVTgbuqAfg1SUTb`)
|
||||
- Wait for consensus inclusion (10s)
|
||||
- Query Tempo API for:
|
||||
- `tx.process` spans (NetworkOPsImp::processTransaction) on submitting node
|
||||
- `tx.receive` spans (PeerImp::handleTransaction) on peer nodes
|
||||
- Verify `xrpl.tx.hash` attribute on `tx.process` spans
|
||||
- Verify `xrpl.peer.id` attribute on `tx.receive` spans
|
||||
|
||||
**Verification**:
|
||||
|
||||
- [ ] Tempo shows `tx.process` traces with `xrpl.tx.hash`
|
||||
- [ ] Tempo shows `tx.receive` traces with `xrpl.peer.id`
|
||||
|
||||
---
|
||||
|
||||
## Task IT.4: Consensus Span Verification (Phase 4)
|
||||
|
||||
**Objective**: Verify consensus spans flow through the telemetry pipeline.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Consensus runs automatically in 6-node network
|
||||
- Query Tempo API for:
|
||||
- `consensus.proposal.send` (Adaptor::propose)
|
||||
- `consensus.ledger_close` (Adaptor::onClose)
|
||||
- `consensus.accept` (Adaptor::onAccept)
|
||||
- `consensus.validation.send` (Adaptor::validate)
|
||||
- Verify attributes:
|
||||
- `xrpl.consensus.mode` on `consensus.ledger_close`
|
||||
- `xrpl.consensus.proposers` on `consensus.accept`
|
||||
- `xrpl.consensus.ledger.seq` on `consensus.validation.send`
|
||||
|
||||
**Verification**:
|
||||
|
||||
- [ ] Tempo shows `consensus.ledger_close` traces with `xrpl.consensus.mode`
|
||||
- [ ] Tempo shows `consensus.accept` traces with `xrpl.consensus.proposers`
|
||||
- [ ] Tempo shows `consensus.proposal.send` traces
|
||||
- [ ] Tempo shows `consensus.validation.send` traces
|
||||
|
||||
---
|
||||
|
||||
## Task IT.5: Spanmetrics Verification (Phase 5)
|
||||
|
||||
**Objective**: Verify spanmetrics connector derives RED metrics from spans.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Query Prometheus for `traces_span_metrics_calls_total`
|
||||
- Query Prometheus for `traces_span_metrics_duration_milliseconds_count`
|
||||
- Verify Grafana loads at `http://localhost:3000`
|
||||
|
||||
**Verification**:
|
||||
|
||||
- [ ] Prometheus returns non-empty results for `traces_span_metrics_calls_total`
|
||||
- [ ] Prometheus returns non-empty results for duration histogram
|
||||
- [ ] Grafana UI accessible with dashboards visible
|
||||
|
||||
---
|
||||
|
||||
## Task IT.6: Manual Testing Plan
|
||||
|
||||
**Objective**: Document how to run tests manually for future reference.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `docker/telemetry/TESTING.md` with:
|
||||
- Prerequisites section
|
||||
- Single-node standalone test (quick verification)
|
||||
- 6-node consensus test (full verification)
|
||||
- Expected span catalog (all 12 span names with attributes)
|
||||
- Verification queries (Tempo API, Prometheus API)
|
||||
- Troubleshooting guide
|
||||
|
||||
**Key new file**: `docker/telemetry/TESTING.md`
|
||||
|
||||
**Verification**:
|
||||
|
||||
- [ ] Document covers both single-node and multi-node testing
|
||||
- [ ] All 12 span names documented with source file and attributes
|
||||
- [ ] Troubleshooting section covers common failure modes
|
||||
|
||||
---
|
||||
|
||||
## Task IT.7: Run and Verify
|
||||
|
||||
**Objective**: Execute the integration test and validate results.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Run `docker/telemetry/integration-test.sh` locally
|
||||
- Debug any failures
|
||||
- Leave stack running for manual verification
|
||||
- Share URLs:
|
||||
- Tempo: `http://localhost:3200`
|
||||
- Grafana: `http://localhost:3000`
|
||||
- Prometheus: `http://localhost:9090`
|
||||
|
||||
**Verification**:
|
||||
|
||||
- [ ] Script completes with all checks passing
|
||||
- [ ] Tempo UI shows rippled service with all expected span names
|
||||
- [ ] Grafana dashboards load and show data
|
||||
|
||||
---
|
||||
|
||||
## Task IT.8: Commit
|
||||
|
||||
**Objective**: Commit all new files to Phase 5 branch.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Run `pcc` (pre-commit checks)
|
||||
- Commit 3 new files to `pratik/otel-phase5-docs-deployment`
|
||||
|
||||
**Verification**:
|
||||
|
||||
- [ ] `pcc` passes
|
||||
- [ ] Commit created on Phase 5 branch
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| Task | Description | New Files | Depends On |
|
||||
| ---- | ----------------------------- | --------- | ---------- |
|
||||
| IT.1 | Integration test script | 1 | Phase 5 |
|
||||
| IT.2 | RPC span verification | 0 | IT.1 |
|
||||
| IT.3 | Transaction span verification | 0 | IT.1 |
|
||||
| IT.4 | Consensus span verification | 0 | IT.1 |
|
||||
| IT.5 | Spanmetrics verification | 0 | IT.1 |
|
||||
| IT.6 | Manual testing plan | 1 | -- |
|
||||
| IT.7 | Run and verify | 0 | IT.1-IT.6 |
|
||||
| IT.8 | Commit | 0 | IT.7 |
|
||||
|
||||
**Exit Criteria**:
|
||||
|
||||
- [ ] All 6 xrpld nodes reach "proposing" state
|
||||
- [ ] All 11 expected span names visible in Tempo
|
||||
- [ ] Spanmetrics available in Prometheus
|
||||
- [ ] Grafana dashboards show data
|
||||
- [ ] Manual testing plan document complete
|
||||
@@ -1,241 +0,0 @@
|
||||
# Phase 5: Documentation & Deployment Task List
|
||||
|
||||
> **Goal**: Production readiness — Grafana dashboards, spanmetrics pipeline, operator runbook, alert definitions, and final integration testing. This phase ensures the telemetry system is useful and maintainable in production.
|
||||
>
|
||||
> **Scope**: Grafana dashboard definitions, OTel Collector spanmetrics connector, Prometheus integration, alert rules, operator documentation, and production-ready Docker Compose stack.
|
||||
>
|
||||
> **Branch**: `pratik/otel-phase5-docs-deployment` (from `pratik/otel-phase4-consensus-tracing`)
|
||||
|
||||
### Related Plan Documents
|
||||
|
||||
| Document | Relevance |
|
||||
| ---------------------------------------------------------------- | -------------------------------------------------------------------------- |
|
||||
| [07-observability-backends.md](./07-observability-backends.md) | Tempo setup (§7.1), Grafana dashboards (§7.6), alerts (§7.6.3) |
|
||||
| [05-configuration-reference.md](./05-configuration-reference.md) | Collector config (§5.5), production config (§5.5.2), Docker Compose (§5.6) |
|
||||
| [06-implementation-phases.md](./06-implementation-phases.md) | Phase 5 tasks (§6.6), definition of done (§6.11.5) |
|
||||
|
||||
---
|
||||
|
||||
## Task 5.1: Add Spanmetrics Connector to OTel Collector
|
||||
|
||||
**Objective**: Derive RED metrics (Rate, Errors, Duration) from trace spans automatically, enabling Grafana time-series dashboards.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Edit `docker/telemetry/otel-collector-config.yaml`:
|
||||
- Add `spanmetrics` connector:
|
||||
```yaml
|
||||
connectors:
|
||||
spanmetrics:
|
||||
histogram:
|
||||
explicit:
|
||||
buckets: [1ms, 5ms, 10ms, 25ms, 50ms, 100ms, 250ms, 500ms, 1s, 5s]
|
||||
dimensions:
|
||||
- name: xrpl.rpc.command
|
||||
- name: xrpl.rpc.status
|
||||
- name: xrpl.consensus.phase
|
||||
- name: xrpl.tx.type
|
||||
```
|
||||
- Add `prometheus` exporter:
|
||||
```yaml
|
||||
exporters:
|
||||
prometheus:
|
||||
endpoint: 0.0.0.0:8889
|
||||
```
|
||||
- Wire the pipeline:
|
||||
```yaml
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [debug, otlp/tempo, spanmetrics]
|
||||
metrics:
|
||||
receivers: [spanmetrics]
|
||||
exporters: [prometheus]
|
||||
```
|
||||
|
||||
- Edit `docker/telemetry/docker-compose.yml`:
|
||||
- Expose port `8889` on the collector for Prometheus scraping
|
||||
- Add Prometheus service
|
||||
- Add Prometheus as Grafana datasource
|
||||
|
||||
**Key modified files**:
|
||||
|
||||
- `docker/telemetry/otel-collector-config.yaml`
|
||||
- `docker/telemetry/docker-compose.yml`
|
||||
|
||||
**Key new files**:
|
||||
|
||||
- `docker/telemetry/prometheus.yml` (Prometheus scrape config)
|
||||
- `docker/telemetry/grafana/provisioning/datasources/prometheus.yaml`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [POC_taskList.md §Next Steps](./POC_taskList.md) — Metrics pipeline for Grafana dashboards
|
||||
|
||||
---
|
||||
|
||||
## Task 5.2: Create Grafana Dashboards
|
||||
|
||||
**Objective**: Provide pre-built Grafana dashboards for RPC performance, transaction lifecycle, and consensus health.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `docker/telemetry/grafana/provisioning/dashboards/dashboards.yaml` (provisioning config)
|
||||
- Create dashboard JSON files:
|
||||
1. **RPC Performance Dashboard** (`rpc-performance.json`):
|
||||
- RPC request latency (p50/p95/p99) by command — histogram panel
|
||||
- RPC throughput (requests/sec) by command — time series
|
||||
- RPC error rate by command — bar gauge
|
||||
- Top slowest RPC commands — table
|
||||
|
||||
2. **Transaction Overview Dashboard** (`transaction-overview.json`):
|
||||
- Transaction processing rate — time series
|
||||
- Transaction latency distribution — histogram
|
||||
- Suppression rate (duplicates) — stat panel
|
||||
- Transaction processing path (sync vs async) — pie chart
|
||||
|
||||
3. **Consensus Health Dashboard** (`consensus-health.json`):
|
||||
- Consensus round duration — time series
|
||||
- Phase duration breakdown (open/establish/accept) — stacked bar
|
||||
- Proposals sent/received per round — stat panel
|
||||
- Consensus mode distribution (proposing/observing) — pie chart
|
||||
|
||||
- Store dashboards in `docker/telemetry/grafana/dashboards/`
|
||||
|
||||
**Key new files**:
|
||||
|
||||
- `docker/telemetry/grafana/provisioning/dashboards/dashboards.yaml`
|
||||
- `docker/telemetry/grafana/dashboards/rpc-performance.json`
|
||||
- `docker/telemetry/grafana/dashboards/transaction-overview.json`
|
||||
- `docker/telemetry/grafana/dashboards/consensus-health.json`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [07-observability-backends.md §7.6](./07-observability-backends.md) — Grafana dashboard specifications
|
||||
- [01-architecture-analysis.md §1.8.3](./01-architecture-analysis.md) — Dashboard panel examples
|
||||
|
||||
---
|
||||
|
||||
## Task 5.3: Define Alert Rules
|
||||
|
||||
**Objective**: Create alert definitions for key telemetry anomalies.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `docker/telemetry/grafana/provisioning/alerting/alerts.yaml`:
|
||||
- **RPC Latency Alert**: p99 latency > 1s for any command over 5 minutes
|
||||
- **RPC Error Rate Alert**: Error rate > 5% for any command over 5 minutes
|
||||
- **Consensus Duration Alert**: Round duration > 10s (warn), > 30s (critical)
|
||||
- **Transaction Processing Alert**: Processing rate drops below threshold
|
||||
- **Telemetry Pipeline Health**: No spans received for > 2 minutes
|
||||
|
||||
**Key new files**:
|
||||
|
||||
- `docker/telemetry/grafana/provisioning/alerting/alerts.yaml`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [07-observability-backends.md §7.6.3](./07-observability-backends.md) — Alert rule definitions
|
||||
|
||||
---
|
||||
|
||||
## Task 5.4: Production Collector Configuration
|
||||
|
||||
**Objective**: Create a production-ready OTel Collector configuration with tail-based sampling and resource limits.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `docker/telemetry/otel-collector-config-production.yaml`:
|
||||
- Tail-based sampling policy:
|
||||
- Always sample errors and slow traces
|
||||
- 10% base sampling rate for normal traces
|
||||
- Always sample first trace for each unique RPC command
|
||||
- Resource limits:
|
||||
- Memory limiter processor (80% of available memory)
|
||||
- Queued retry for export failures
|
||||
- TLS configuration for production endpoints
|
||||
- Health check endpoint
|
||||
|
||||
**Key new files**:
|
||||
|
||||
- `docker/telemetry/otel-collector-config-production.yaml`
|
||||
|
||||
**Reference**:
|
||||
|
||||
- [05-configuration-reference.md §5.5.2](./05-configuration-reference.md) — Production collector config
|
||||
|
||||
---
|
||||
|
||||
## Task 5.5: Operator Runbook
|
||||
|
||||
**Objective**: Create operator documentation for managing the telemetry system in production.
|
||||
|
||||
**What to do**:
|
||||
|
||||
- Create `docs/telemetry-runbook.md`:
|
||||
- **Setup**: How to enable telemetry in rippled
|
||||
- **Configuration**: All config options with descriptions
|
||||
- **Collector Deployment**: Docker Compose vs. Kubernetes vs. bare metal
|
||||
- **Troubleshooting**: Common issues and resolutions
|
||||
- No traces appearing
|
||||
- High memory usage from telemetry
|
||||
- Collector connection failures
|
||||
- Sampling configuration tuning
|
||||
- **Performance Tuning**: Batch size, queue size, sampling ratio guidelines
|
||||
- **Upgrading**: How to upgrade OTel SDK and Collector versions
|
||||
|
||||
**Key new files**:
|
||||
|
||||
- `docs/telemetry-runbook.md`
|
||||
|
||||
---
|
||||
|
||||
## Task 5.6: Final Integration Testing
|
||||
|
||||
**Objective**: Validate the complete telemetry stack end-to-end.
|
||||
|
||||
**What to do**:
|
||||
|
||||
1. Start full Docker stack (Collector, Tempo, Grafana, Prometheus)
|
||||
2. Build rippled with `telemetry=ON`
|
||||
3. Run in standalone mode with telemetry enabled
|
||||
4. Generate RPC traffic and verify traces in Tempo
|
||||
5. Verify dashboards populate in Grafana
|
||||
6. Verify alerts trigger correctly
|
||||
7. Test telemetry OFF path (no regressions)
|
||||
8. Run full test suite
|
||||
|
||||
**Verification Checklist**:
|
||||
|
||||
- [ ] Docker stack starts without errors
|
||||
- [ ] Traces appear in Tempo with correct hierarchy
|
||||
- [ ] Grafana dashboards show metrics derived from spans
|
||||
- [ ] Prometheus scrapes spanmetrics successfully
|
||||
- [ ] Alerts can be triggered by simulated conditions
|
||||
- [ ] Build succeeds with telemetry ON and OFF
|
||||
- [ ] Full test suite passes
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| Task | Description | New Files | Modified Files | Depends On |
|
||||
| ---- | ---------------------------------- | --------- | -------------- | ---------- |
|
||||
| 5.1 | Spanmetrics connector + Prometheus | 2 | 2 | Phase 4 |
|
||||
| 5.2 | Grafana dashboards | 4 | 0 | 5.1 |
|
||||
| 5.3 | Alert definitions | 1 | 0 | 5.1 |
|
||||
| 5.4 | Production collector config | 1 | 0 | Phase 4 |
|
||||
| 5.5 | Operator runbook | 1 | 0 | Phase 4 |
|
||||
| 5.6 | Final integration testing | 0 | 0 | 5.1-5.5 |
|
||||
|
||||
**Parallel work**: Tasks 5.1, 5.4, and 5.5 can run in parallel. Tasks 5.2 and 5.3 depend on 5.1. Task 5.6 depends on all others.
|
||||
|
||||
**Exit Criteria** (from [06-implementation-phases.md §6.11.5](./06-implementation-phases.md)):
|
||||
|
||||
- [ ] Dashboards deployed and showing data
|
||||
- [ ] Alerts configured and tested
|
||||
- [ ] Operator documentation complete
|
||||
- [ ] Production collector config ready
|
||||
- [ ] Full test suite passes
|
||||
@@ -1,673 +0,0 @@
|
||||
# OpenTelemetry Distributed Tracing for xrpld
|
||||
|
||||
---
|
||||
|
||||
## Slide 1: Introduction
|
||||
|
||||
> **CNCF** = Cloud Native Computing Foundation
|
||||
|
||||
### What is OpenTelemetry?
|
||||
|
||||
OpenTelemetry is an open-source, CNCF-backed observability framework for distributed tracing, metrics, and logs.
|
||||
|
||||
### Why OpenTelemetry for xrpld?
|
||||
|
||||
- **End-to-End Transaction Visibility**: Track transactions from submission → consensus → ledger inclusion
|
||||
- **Cross-Node Correlation**: Follow requests across multiple independent nodes using a unique `trace_id`
|
||||
- **Consensus Round Analysis**: Understand timing and behavior across validators
|
||||
- **Incident Debugging**: Correlate events across distributed nodes during issues
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A["Node A<br/>tx.receive<br/>trace_id: abc123"] --> B["Node B<br/>tx.relay<br/>trace_id: abc123"] --> C["Node C<br/>tx.validate<br/>trace_id: abc123"] --> D["Node D<br/>ledger.apply<br/>trace_id: abc123"]
|
||||
|
||||
style A fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style B fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style C fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style D fill:#e65100,stroke:#bf360c,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Node A (blue, leftmost)**: The originating node that first receives the transaction and assigns a new `trace_id: abc123`; this ID becomes the correlation key for the entire distributed trace.
|
||||
- **Node B and Node C (green, middle)**: Relay and validation nodes — each creates its own span but carries the same `trace_id`, so their work is linked to the original submission without any central coordinator.
|
||||
- **Node D (orange, rightmost)**: The final node that applies the transaction to the ledger; the trace now spans the full lifecycle from submission to ledger inclusion.
|
||||
- **Left-to-right flow**: The horizontal progression shows the real-world message path — a transaction hops from node to node, and the shared `trace_id` stitches all hops into a single queryable trace.
|
||||
|
||||
> **Trace ID: abc123** — All nodes share the same trace, enabling cross-node correlation.
|
||||
|
||||
---
|
||||
|
||||
## Slide 2: OpenTelemetry vs Open Source Alternatives
|
||||
|
||||
> **CNCF** = Cloud Native Computing Foundation
|
||||
|
||||
| Feature | OpenTelemetry | Jaeger | Zipkin | SkyWalking | Pinpoint | Prometheus |
|
||||
| ------------------- | ---------------- | ---------------- | ------------------ | ---------- | ---------- | ---------- |
|
||||
| **Tracing** | YES | YES | YES | YES | YES | NO |
|
||||
| **Metrics** | YES | NO | NO | YES | YES | YES |
|
||||
| **Logs** | YES | NO | NO | YES | NO | NO |
|
||||
| **C++ SDK** | YES Official | YES (Deprecated) | YES (Unmaintained) | NO | NO | YES |
|
||||
| **Vendor Neutral** | YES Primary goal | NO | NO | NO | NO | NO |
|
||||
| **Instrumentation** | Manual + Auto | Manual | Manual | Auto-first | Auto-first | Manual |
|
||||
| **Backend** | Any (exporters) | Self | Self | Self | Self | Self |
|
||||
| **CNCF Status** | Incubating | Graduated | NO | Incubating | NO | Graduated |
|
||||
|
||||
> **Why OpenTelemetry?** It's the only actively maintained, full-featured C++ option with vendor neutrality — allowing export to Tempo, Prometheus, Grafana, or any commercial backend without changing instrumentation.
|
||||
|
||||
---
|
||||
|
||||
## Slide 3: Adoption Scope — Traces Only (Current Plan)
|
||||
|
||||
OpenTelemetry supports three signal types: **Traces**, **Metrics**, and **Logs**. xrpld already captures metrics (StatsD via Beast Insight) and logs (Journal/PerfLog). The question is: how much of OTel do we adopt?
|
||||
|
||||
> **Scenario A**: Add distributed tracing. Keep StatsD for metrics and Journal for logs.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph xrpld["xrpld Process"]
|
||||
direction TB
|
||||
OTel["OTel SDK<br/>(Traces)"]
|
||||
Insight["Beast Insight<br/>(StatsD Metrics)"]
|
||||
Journal["Journal + PerfLog<br/>(Logging)"]
|
||||
end
|
||||
|
||||
OTel -->|"OTLP"| Collector["OTel Collector"]
|
||||
Insight -->|"UDP"| StatsD["StatsD Server"]
|
||||
Journal -->|"File I/O"| LogFile["perf.log / debug.log"]
|
||||
|
||||
Collector --> Tempo["Tempo"]
|
||||
StatsD --> Graphite["Graphite / Grafana"]
|
||||
LogFile --> Loki["Loki (optional)"]
|
||||
|
||||
style xrpld fill:#424242,stroke:#212121,color:#fff
|
||||
style OTel fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style Insight fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style Journal fill:#e65100,stroke:#bf360c,color:#fff
|
||||
style Collector fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
```
|
||||
|
||||
| Aspect | Details |
|
||||
| ------------------------------ | --------------------------------------------------------------------------------------------------------------- |
|
||||
| **What changes for operators** | Deploy OTel Collector + trace backend. Existing StatsD and log pipelines stay as-is. |
|
||||
| **Codebase impact** | New `Telemetry` module (~1500 LOC). Beast Insight and Journal untouched. |
|
||||
| **New capabilities** | Cross-node trace correlation, span-based debugging, request lifecycle visibility. |
|
||||
| **What we still can't do** | Correlate metrics with specific traces natively. StatsD metrics remain fire-and-forget with no trace exemplars. |
|
||||
| **Maintenance burden** | Three separate observability systems to maintain (OTel + StatsD + Journal). |
|
||||
| **Risk** | Lowest — additive change, no existing systems disturbed. |
|
||||
|
||||
---
|
||||
|
||||
## Slide 4: Future Adoption — Metrics & Logs via OTel
|
||||
|
||||
### Scenario B: + OTel Metrics (Replace StatsD)
|
||||
|
||||
> Migrate StatsD to OTel Metrics API, exposing Prometheus-compatible metrics. Remove Beast Insight.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph xrpld["xrpld Process"]
|
||||
direction TB
|
||||
OTel["OTel SDK<br/>(Traces + Metrics)"]
|
||||
Journal["Journal + PerfLog<br/>(Logging)"]
|
||||
end
|
||||
|
||||
OTel -->|"OTLP"| Collector["OTel Collector"]
|
||||
Journal -->|"File I/O"| LogFile["perf.log / debug.log"]
|
||||
|
||||
Collector --> Tempo["Tempo<br/>(Traces)"]
|
||||
Collector --> Prom["Prometheus<br/>(Metrics)"]
|
||||
LogFile --> Loki["Loki (optional)"]
|
||||
|
||||
style xrpld fill:#424242,stroke:#212121,color:#fff
|
||||
style OTel fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style Journal fill:#e65100,stroke:#bf360c,color:#fff
|
||||
style Collector fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
```
|
||||
|
||||
- **Better metrics?** Yes — Prometheus gives native histograms (p50/p95/p99), multi-dimensional labels, and exemplars linking metric spikes to traces.
|
||||
- **Codebase**: Remove `Beast::Insight` + `StatsDCollector` (~2000 LOC). Single SDK for traces and metrics.
|
||||
- **Operator effort**: Rewrite dashboards from StatsD/Graphite queries to PromQL. Run both in parallel during transition.
|
||||
- **Risk**: Medium — operators must migrate monitoring infrastructure.
|
||||
|
||||
### Scenario C: + OTel Logs (Full Stack)
|
||||
|
||||
> Also replace Journal logging with OTel Logs API. Single SDK for everything.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph xrpld["xrpld Process"]
|
||||
OTel["OTel SDK<br/>(Traces + Metrics + Logs)"]
|
||||
end
|
||||
|
||||
OTel -->|"OTLP"| Collector["OTel Collector"]
|
||||
|
||||
Collector --> Tempo["Tempo<br/>(Traces)"]
|
||||
Collector --> Prom["Prometheus<br/>(Metrics)"]
|
||||
Collector --> Loki["Loki / Elastic<br/>(Logs)"]
|
||||
|
||||
style xrpld fill:#424242,stroke:#212121,color:#fff
|
||||
style OTel fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style Collector fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
```
|
||||
|
||||
- **Structured logging**: OTel Logs API outputs structured records with `trace_id`, `span_id`, severity, and attributes by design.
|
||||
- **Full correlation**: Every log line carries `trace_id`. Click trace → see logs. Click metric spike → see trace → see logs.
|
||||
- **Codebase**: Remove Beast Insight (~2000 LOC) + simplify Journal/PerfLog (~3000 LOC). One dependency instead of three.
|
||||
- **Risk**: Highest — `beast::Journal` is deeply embedded in every component. Large refactor. OTel C++ Logs API is newer (stable since v1.11, less battle-tested).
|
||||
|
||||
### Recommendation
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A["Phase 1<br/><b>Traces Only</b><br/>(Current Plan)"] --> B["Phase 2<br/><b>+ Metrics</b><br/>(Replace StatsD)"] --> C["Phase 3<br/><b>+ Logs</b><br/>(Full OTel)"]
|
||||
|
||||
style A fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style B fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style C fill:#e65100,stroke:#bf360c,color:#fff
|
||||
```
|
||||
|
||||
| Phase | Signal | Strategy | Risk |
|
||||
| -------------------- | --------- | -------------------------------------------------------------- | ------ |
|
||||
| **Phase 1** (now) | Traces | Add OTel traces. Keep StatsD and Journal. Prove value. | Low |
|
||||
| **Phase 2** (future) | + Metrics | Migrate StatsD → Prometheus via OTel. Remove Beast Insight. | Medium |
|
||||
| **Phase 3** (future) | + Logs | Adopt OTel Logs API. Align with structured logging initiative. | High |
|
||||
|
||||
> **Key Takeaway**: Start with traces (unique value, lowest risk), then incrementally adopt metrics and logs as the OTel infrastructure proves itself.
|
||||
|
||||
---
|
||||
|
||||
## Slide 5: Comparison with xrpld's Existing Solutions
|
||||
|
||||
### Current Observability Stack
|
||||
|
||||
| Aspect | PerfLog (JSON) | StatsD (Metrics) | OpenTelemetry (NEW) |
|
||||
| --------------------- | --------------------- | --------------------- | --------------------------- |
|
||||
| **Type** | Logging | Metrics | Distributed Tracing |
|
||||
| **Scope** | Single node | Single node | **Cross-node** |
|
||||
| **Data** | JSON log entries | Counters, gauges | Spans with context |
|
||||
| **Correlation** | By timestamp | By metric name | By `trace_id` |
|
||||
| **Overhead** | Low (file I/O) | Low (UDP) | Low-Medium (configurable) |
|
||||
| **Question Answered** | "What happened here?" | "How many? How fast?" | **"What was the journey?"** |
|
||||
|
||||
### Use Case Matrix
|
||||
|
||||
| Scenario | PerfLog | StatsD | OpenTelemetry |
|
||||
| -------------------------------- | ------- | ------ | ------------- |
|
||||
| "How many TXs per second?" | ❌ | ✅ | ❌ |
|
||||
| "Why was this specific TX slow?" | ⚠️ | ❌ | ✅ |
|
||||
| "Which node delayed consensus?" | ❌ | ❌ | ✅ |
|
||||
| "Show TX journey across 5 nodes" | ❌ | ❌ | ✅ |
|
||||
|
||||
> **Key Insight**: In the **traces-only** approach (Phase 1), OpenTelemetry **complements** existing systems. In future phases, OTel metrics and logs could **replace** StatsD and Journal respectively — see Slides 3-4 for the full adoption roadmap.
|
||||
|
||||
---
|
||||
|
||||
## Slide 6: Architecture
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol | **WS** = WebSocket
|
||||
|
||||
### High-Level Integration Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph xrpld["xrpld Node"]
|
||||
subgraph services["Core Services"]
|
||||
direction LR
|
||||
RPC["RPC Server<br/>(HTTP/WS)"] ~~~ Overlay["Overlay<br/>(P2P Network)"] ~~~ Consensus["Consensus<br/>(RCLConsensus)"]
|
||||
end
|
||||
|
||||
Telemetry["Telemetry Module<br/>(OpenTelemetry SDK)"]
|
||||
|
||||
services --> Telemetry
|
||||
end
|
||||
|
||||
Telemetry -->|OTLP/gRPC| Collector["OTel Collector"]
|
||||
|
||||
Collector --> Tempo["Grafana Tempo"]
|
||||
Collector --> Elastic["Elastic APM"]
|
||||
|
||||
style xrpld fill:#424242,stroke:#212121,color:#fff
|
||||
style services fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style Telemetry fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style Collector fill:#e65100,stroke:#bf360c,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Core Services (blue, top)**: RPC Server, Overlay, and Consensus are the three primary components that generate trace data — they represent the entry points for client requests, peer messages, and consensus rounds respectively.
|
||||
- **Telemetry Module (green, middle)**: The OpenTelemetry SDK sits below the core services and receives span data from all three; it acts as a single collection point within the xrpld process.
|
||||
- **OTel Collector (orange, center)**: An external process that receives spans over OTLP/gRPC from the Telemetry Module; it decouples xrpld from backend choices and handles batching, sampling, and routing.
|
||||
- **Backends (bottom row)**: Tempo and Elastic APM are interchangeable — the Collector fans out to any combination, so operators can switch backends without modifying xrpld code.
|
||||
- **Top-to-bottom flow**: Data flows from instrumented code down through the SDK, out over the network to the Collector, and finally into storage/visualization backends.
|
||||
|
||||
### Context Propagation
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client
|
||||
participant NodeA as Node A
|
||||
participant NodeB as Node B
|
||||
|
||||
Client->>NodeA: Submit TX (no context)
|
||||
Note over NodeA: Creates trace_id: abc123<br/>span: tx.receive
|
||||
NodeA->>NodeB: Relay TX<br/>(traceparent: abc123)
|
||||
Note over NodeB: Links to trace_id: abc123<br/>span: tx.relay
|
||||
```
|
||||
|
||||
- **HTTP/RPC**: W3C Trace Context headers (`traceparent`)
|
||||
- **P2P Messages**: Protocol Buffer extension fields
|
||||
|
||||
---
|
||||
|
||||
## Slide 7: Implementation Plan
|
||||
|
||||
### 5-Phase Rollout (9 Weeks)
|
||||
|
||||
> **Note**: Dates shown are relative to project start, not calendar dates.
|
||||
|
||||
```mermaid
|
||||
gantt
|
||||
title Implementation Timeline
|
||||
dateFormat YYYY-MM-DD
|
||||
axisFormat Week %W
|
||||
|
||||
section Phase 1
|
||||
Core Infrastructure :p1, 2024-01-01, 2w
|
||||
|
||||
section Phase 2
|
||||
RPC Tracing :p2, after p1, 2w
|
||||
|
||||
section Phase 3
|
||||
Transaction Tracing :p3, after p2, 2w
|
||||
|
||||
section Phase 4
|
||||
Consensus Tracing :p4, after p3, 2w
|
||||
|
||||
section Phase 5
|
||||
Documentation :p5, after p4, 1w
|
||||
```
|
||||
|
||||
### Phase Details
|
||||
|
||||
| Phase | Focus | Key Deliverables | Effort |
|
||||
| ----- | ------------------- | -------------------------------------------- | ------- |
|
||||
| 1 | Core Infrastructure | SDK integration, Telemetry interface, Config | 10 days |
|
||||
| 2 | RPC Tracing | HTTP context extraction, Handler spans | 10 days |
|
||||
| 3 | Transaction Tracing | Protobuf context, P2P relay propagation | 10 days |
|
||||
| 4 | Consensus Tracing | Round spans, Proposal/validation tracing | 10 days |
|
||||
| 5 | Documentation | Runbook, Dashboards, Training | 7 days |
|
||||
|
||||
**Total Effort**: ~47 developer-days (2 developers)
|
||||
|
||||
> **Future Phases** (not in current scope): After traces are stable, OTel metrics can replace StatsD (~3 weeks), and OTel logs can replace Journal (~4 weeks, aligned with structured logging initiative). See Slides 3-4 for the full adoption roadmap.
|
||||
|
||||
---
|
||||
|
||||
## Slide 8: Performance Overhead
|
||||
|
||||
> **OTLP** = OpenTelemetry Protocol
|
||||
|
||||
### Estimated System Impact
|
||||
|
||||
| Metric | Overhead | Notes |
|
||||
| ----------------- | ---------- | ------------------------------------------------ |
|
||||
| **CPU** | 1-3% | Span creation and attribute setting |
|
||||
| **Memory** | ~10 MB | SDK statics + batch buffer + worker thread stack |
|
||||
| **Network** | 10-50 KB/s | Compressed OTLP export to collector |
|
||||
| **Latency (p99)** | <2% | With proper sampling configuration |
|
||||
|
||||
#### How We Arrived at These Numbers
|
||||
|
||||
**Assumptions (XRPL mainnet baseline)**:
|
||||
|
||||
| Parameter | Value | Source |
|
||||
| ------------------------- | ---------------------- | --------------------------------------------------------------------------------------------------- |
|
||||
| Transaction throughput | ~25 TPS (peaks to ~50) | Mainnet average |
|
||||
| Default peers per node | 21 | `peerfinder/detail/Tuning.h` (`defaultMaxPeers`) |
|
||||
| Consensus round frequency | ~1 round / 3-4 seconds | `ConsensusParms.h` (`ledgerMIN_CONSENSUS=1950ms`) |
|
||||
| Proposers per round | ~20-35 | Mainnet UNL size |
|
||||
| P2P message rate | ~160 msgs/sec | See message breakdown below |
|
||||
| Avg TX processing time | ~200 μs | Profiled baseline |
|
||||
| Single span creation cost | 500-1000 ns | OTel C++ SDK benchmarks (see [3.5.4](./03-implementation-strategy.md#354-performance-data-sources)) |
|
||||
|
||||
**P2P message breakdown** (per node, mainnet):
|
||||
|
||||
| Message Type | Rate | Derivation |
|
||||
| ------------- | ------------ | --------------------------------------------------------------------- |
|
||||
| TMTransaction | ~100/sec | ~25 TPS × ~4 relay hops per TX, deduplicated by HashRouter |
|
||||
| TMValidation | ~50/sec | ~35 validators × ~1 validation/3s round ≈ ~12/sec, plus relay fan-out |
|
||||
| TMProposeSet | ~10/sec | ~35 proposers / 3s round ≈ ~12/round, clustered in establish phase |
|
||||
| **Total** | **~160/sec** | **Only traced message types counted** |
|
||||
|
||||
**CPU (1-3%) — Calculation**:
|
||||
|
||||
Per-transaction tracing cost breakdown:
|
||||
|
||||
| Operation | Cost | Notes |
|
||||
| ----------------------------------------------- | ----------- | ------------------------------------------ |
|
||||
| `tx.receive` span (create + end + 4 attributes) | ~1400 ns | ~1000ns create + ~200ns end + 4×50ns attrs |
|
||||
| `tx.validate` span | ~1200 ns | ~1000ns create + ~200ns for 2 attributes |
|
||||
| `tx.relay` span | ~1200 ns | ~1000ns create + ~200ns for 2 attributes |
|
||||
| Context injection into P2P message | ~200 ns | Serialize trace_id + span_id into protobuf |
|
||||
| **Total per TX** | **~4.0 μs** | |
|
||||
|
||||
> **CPU overhead**: 4.0 μs / 200 μs baseline = **~2.0% per transaction**. Under high load with consensus + RPC spans overlapping, reaches ~3%. Consensus itself adds only ~36 μs per 3-second round (~0.001%), so the TX path dominates. On production server hardware (3+ GHz Xeon), span creation drops to ~500-600 ns, bringing per-TX cost to ~2.6 μs (~1.3%). See [Section 3.5.4](./03-implementation-strategy.md#354-performance-data-sources) for benchmark sources.
|
||||
|
||||
**Memory (~10 MB) — Calculation**:
|
||||
|
||||
| Component | Size | Notes |
|
||||
| --------------------------------------------- | ------------------ | ------------------------------------- |
|
||||
| TracerProvider + Exporter (gRPC channel init) | ~320 KB | Allocated once at startup |
|
||||
| BatchSpanProcessor (circular buffer) | ~16 KB | 2049 × 8-byte AtomicUniquePtr entries |
|
||||
| BatchSpanProcessor (worker thread stack) | ~8 MB | Default Linux thread stack size |
|
||||
| Active spans (in-flight, max ~1000) | ~500-800 KB | ~500-800 bytes/span × 1000 concurrent |
|
||||
| Export queue (batch buffer, max 2048 spans) | ~1 MB | ~500 bytes/span × 2048 queue depth |
|
||||
| Thread-local context storage (~100 threads) | ~6.4 KB | ~64 bytes/thread |
|
||||
| **Total** | **~10 MB ceiling** | |
|
||||
|
||||
> Memory plateaus once the export queue fills — the `max_queue_size=2048` config bounds growth.
|
||||
> The worker thread stack (~8 MB) dominates the static footprint but is virtual memory; actual RSS
|
||||
> depends on stack usage (typically much less). Active spans are larger than originally estimated
|
||||
> (~500-800 bytes) because the OTel SDK `Span` object includes a mutex (~40 bytes), `SpanData`
|
||||
> recordable (~250 bytes base), and `std::map`-based attribute storage (~200-500 bytes for 3-5
|
||||
> string attributes). See [Section 3.5.4](./03-implementation-strategy.md#354-performance-data-sources) for source references.
|
||||
|
||||
**Network (10-50 KB/s) — Calculation**:
|
||||
|
||||
Two sources of network overhead:
|
||||
|
||||
**(A) OTLP span export to Collector:**
|
||||
|
||||
| Sampling Rate | Effective Spans/sec | Avg Span Size (compressed) | Bandwidth |
|
||||
| -------------------------- | ------------------- | -------------------------- | ------------ |
|
||||
| 100% (dev only) | ~500 | ~500 bytes | ~250 KB/s |
|
||||
| **10% (recommended prod)** | **~50** | **~500 bytes** | **~25 KB/s** |
|
||||
| 1% (minimal) | ~5 | ~500 bytes | ~2.5 KB/s |
|
||||
|
||||
> The ~500 spans/sec at 100% comes from: ~100 TX spans + ~160 P2P context spans + ~23 consensus spans/round + ~50 RPC spans = ~500/sec. OTLP protobuf with gzip compression yields ~500 bytes/span average.
|
||||
|
||||
**(B) P2P trace context overhead** (added to existing messages, always-on regardless of sampling):
|
||||
|
||||
| Message Type | Rate | Context Size | Bandwidth |
|
||||
| ------------- | -------- | ------------ | ------------- |
|
||||
| TMTransaction | ~100/sec | 29 bytes | ~2.9 KB/s |
|
||||
| TMValidation | ~50/sec | 29 bytes | ~1.5 KB/s |
|
||||
| TMProposeSet | ~10/sec | 29 bytes | ~0.3 KB/s |
|
||||
| **Total P2P** | | | **~4.7 KB/s** |
|
||||
|
||||
> **Combined**: 25 KB/s (OTLP export at 10%) + 5 KB/s (P2P context) ≈ **~30 KB/s typical**. The 10-50 KB/s range covers 10-20% sampling under normal to peak mainnet load.
|
||||
|
||||
**Latency (<2%) — Calculation**:
|
||||
|
||||
| Path | Tracing Cost | Baseline | Overhead |
|
||||
| ------------------------------ | ------------ | -------- | -------- |
|
||||
| Fast RPC (e.g., `server_info`) | 2.75 μs | ~1 ms | 0.275% |
|
||||
| Slow RPC (e.g., `path_find`) | 2.75 μs | ~100 ms | 0.003% |
|
||||
| Transaction processing | 4.0 μs | ~200 μs | 2.0% |
|
||||
| Consensus round | 36 μs | ~3 sec | 0.001% |
|
||||
|
||||
> At p99, even the worst case (TX processing at 2.0%) is within the 1-3% range. RPC and consensus overhead are negligible. On production hardware, TX overhead drops to ~1.3%.
|
||||
|
||||
### Per-Message Overhead (Context Propagation)
|
||||
|
||||
Each P2P message carries trace context with the following overhead:
|
||||
|
||||
| Field | Size | Description |
|
||||
| ------------- | ------------- | ----------------------------------------- |
|
||||
| `trace_id` | 16 bytes | Unique identifier for the entire trace |
|
||||
| `span_id` | 8 bytes | Current span (becomes parent on receiver) |
|
||||
| `trace_flags` | 1 byte | Sampling decision flags |
|
||||
| `trace_state` | 0-4 bytes | Optional vendor-specific data |
|
||||
| **Total** | **~29 bytes** | **Added per traced P2P message** |
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph msg["P2P Message with Trace Context"]
|
||||
A["Original Message<br/>(variable size)"] --> B["+ TraceContext<br/>(~29 bytes)"]
|
||||
end
|
||||
|
||||
subgraph breakdown["Context Breakdown"]
|
||||
C["trace_id<br/>16 bytes"]
|
||||
D["span_id<br/>8 bytes"]
|
||||
E["flags<br/>1 byte"]
|
||||
F["state<br/>0-4 bytes"]
|
||||
end
|
||||
|
||||
B --> breakdown
|
||||
|
||||
style A fill:#424242,stroke:#212121,color:#fff
|
||||
style B fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style C fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style D fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style E fill:#e65100,stroke:#bf360c,color:#fff
|
||||
style F fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **Original Message (gray, left)**: The existing P2P message payload of variable size — this is unchanged; trace context is appended, never modifying the original data.
|
||||
- **+ TraceContext (green, right of message)**: The additional 29-byte context block attached to each traced message; the arrow from the original message shows it is a pure addition.
|
||||
- **Context Breakdown (right subgraph)**: The four fields — `trace_id` (16 bytes), `span_id` (8 bytes), `flags` (1 byte), and `state` (0-4 bytes) — show exactly what is added and their individual sizes.
|
||||
- **Color coding**: Blue fields (`trace_id`, `span_id`) are the core identifiers required for trace correlation; orange (`flags`) controls sampling decisions; purple (`state`) is optional vendor data typically omitted.
|
||||
|
||||
> **Note**: 29 bytes represents ~1-6% overhead depending on message size (500B simple TX to 5KB proposal), which is acceptable for the observability benefits provided.
|
||||
|
||||
### Mitigation Strategies
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A["Head Sampling<br/>10% default"] --> B["Tail Sampling<br/>Keep errors/slow"] --> C["Batch Export<br/>Reduce I/O"] --> D["Conditional Compile<br/>XRPL_ENABLE_TELEMETRY"]
|
||||
|
||||
style A fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style B fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style C fill:#e65100,stroke:#bf360c,color:#fff
|
||||
style D fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
> For a detailed explanation of head vs. tail sampling, see Slide 9.
|
||||
|
||||
### Kill Switches (Rollback Options)
|
||||
|
||||
1. **Config Disable**: Set `enabled=0` in config → instant disable, no restart needed for sampling
|
||||
2. **Rebuild**: Compile with `XRPL_ENABLE_TELEMETRY=OFF` → zero overhead (no-op)
|
||||
3. **Full Revert**: Clean separation allows easy commit reversion
|
||||
|
||||
---
|
||||
|
||||
## Slide 9: Sampling Strategies — Head vs. Tail
|
||||
|
||||
> Sampling controls **which traces are recorded and exported**. Without sampling, every operation generates a trace — at 500+ spans/sec, this overwhelms storage and network. Sampling lets you keep the signal, discard the noise.
|
||||
|
||||
### Head Sampling (Decision at Start)
|
||||
|
||||
The sampling decision is made **when a trace begins**, before any work is done. A random number is generated; if it falls within the configured ratio, the entire trace is recorded. Otherwise, the trace is silently dropped.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A["New Request<br/>Arrives"] --> B{"Random < 10%?"}
|
||||
B -->|"Yes (1 in 10)"| C["Record Entire Trace<br/>(all spans)"]
|
||||
B -->|"No (9 in 10)"| D["Drop Entire Trace<br/>(zero overhead)"]
|
||||
|
||||
style C fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style D fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style B fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
```
|
||||
|
||||
| Aspect | Details |
|
||||
| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Where it runs** | Inside xrpld (SDK-level). Configured via `sampling_ratio` in `xrpld.cfg`. |
|
||||
| **When the decision happens** | At trace creation time — before the first span is even populated. |
|
||||
| **How it works** | `sampling_ratio=0.1` means each trace has a 10% probability of being recorded. Dropped traces incur near-zero overhead (no spans created, no attributes set, no export). |
|
||||
| **Propagation** | Once a trace is sampled, the `trace_flags` field (1 byte in the context header) tells downstream nodes to also sample it. Unsampled traces propagate `trace_flags=0`, so downstream nodes skip them too. |
|
||||
| **Pros** | Lowest overhead. Simple to configure. Predictable resource usage. |
|
||||
| **Cons** | **Blind** — it doesn't know if the trace will be interesting. A rare error or slow consensus round has only a 10% chance of being captured. |
|
||||
| **Best for** | High-volume, steady-state traffic where most traces look similar (e.g., routine RPC requests). |
|
||||
|
||||
**xrpld configuration**:
|
||||
|
||||
```ini
|
||||
[telemetry]
|
||||
# Record 10% of traces (recommended for production)
|
||||
sampling_ratio=0.1
|
||||
```
|
||||
|
||||
### Tail Sampling (Decision at End)
|
||||
|
||||
The sampling decision is made **after the trace completes**, based on its actual content — was it slow? Did it error? Was it a consensus round? This requires buffering complete traces before deciding.
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
A["All Traces<br/>Buffered (100%)"] --> B["OTel Collector<br/>Evaluates Rules"]
|
||||
|
||||
B --> C{"Error?"}
|
||||
C -->|Yes| K["KEEP"]
|
||||
|
||||
C -->|No| D{"Slow?<br/>(>5s consensus,<br/>>1s RPC)"}
|
||||
D -->|Yes| K
|
||||
|
||||
D -->|No| E{"Random < 10%?"}
|
||||
E -->|Yes| K
|
||||
E -->|No| F["DROP"]
|
||||
|
||||
style K fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
style F fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style B fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style C fill:#e65100,stroke:#bf360c,color:#fff
|
||||
style D fill:#e65100,stroke:#bf360c,color:#fff
|
||||
style E fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
| Aspect | Details |
|
||||
| ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Where it runs** | In the **OTel Collector** (external process), not inside xrpld. xrpld exports 100% of traces; the Collector decides what to keep. |
|
||||
| **When the decision happens** | After the Collector has received all spans for a trace (waits `decision_wait=10s` for stragglers). |
|
||||
| **How it works** | Policy rules evaluate the completed trace: keep all errors, keep slow operations above a threshold, keep all consensus rounds, then probabilistically sample the rest at 10%. |
|
||||
| **Pros** | **Never misses important traces**. Errors, slow requests, and consensus anomalies are always captured regardless of probability. |
|
||||
| **Cons** | Higher resource usage — xrpld must export 100% of spans to the Collector, which buffers them in memory before deciding. The Collector needs more RAM (configured via `num_traces` and `decision_wait`). |
|
||||
| **Best for** | Production troubleshooting where you can't afford to miss errors or anomalies. |
|
||||
|
||||
**Collector configuration** (tail sampling rules for xrpld):
|
||||
|
||||
```yaml
|
||||
processors:
|
||||
tail_sampling:
|
||||
decision_wait: 10s # Wait for all spans in a trace
|
||||
num_traces: 100000 # Buffer up to 100K concurrent traces
|
||||
policies:
|
||||
- name: errors # Always keep error traces
|
||||
type: status_code
|
||||
status_code: { status_codes: [ERROR] }
|
||||
|
||||
- name: slow-consensus # Keep consensus rounds >5s
|
||||
type: latency
|
||||
latency: { threshold_ms: 5000 }
|
||||
|
||||
- name: slow-rpc # Keep slow RPC requests >1s
|
||||
type: latency
|
||||
latency: { threshold_ms: 1000 }
|
||||
|
||||
- name: probabilistic # Sample 10% of everything else
|
||||
type: probabilistic
|
||||
probabilistic: { sampling_percentage: 10 }
|
||||
```
|
||||
|
||||
### Head vs. Tail — Side-by-Side
|
||||
|
||||
| | Head Sampling | Tail Sampling |
|
||||
| ----------------------------- | ---------------------------------------- | ------------------------------------------------ |
|
||||
| **Decision point** | Trace start (inside xrpld) | Trace end (in OTel Collector) |
|
||||
| **Knows trace content?** | No (random coin flip) | Yes (evaluates completed trace) |
|
||||
| **Overhead on xrpld** | Lowest (dropped traces = no-op) | Higher (must export 100% to Collector) |
|
||||
| **Collector resource usage** | Low (receives only sampled traces) | Higher (buffers all traces before deciding) |
|
||||
| **Captures all errors?** | No (only if trace was randomly selected) | **Yes** (error policy catches them) |
|
||||
| **Captures slow operations?** | No (random) | **Yes** (latency policy catches them) |
|
||||
| **Configuration** | `xrpld.cfg`: `sampling_ratio=0.1` | `otel-collector.yaml`: `tail_sampling` processor |
|
||||
| **Best for** | High-throughput steady-state | Troubleshooting & anomaly detection |
|
||||
|
||||
### Recommended Strategy for xrpld
|
||||
|
||||
Use **both** in a layered approach:
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph xrpld["xrpld (Head Sampling)"]
|
||||
HS["sampling_ratio=1.0<br/>(export everything)"]
|
||||
end
|
||||
|
||||
subgraph collector["OTel Collector (Tail Sampling)"]
|
||||
TS["Keep: errors + slow + 10% random<br/>Drop: routine traces"]
|
||||
end
|
||||
|
||||
subgraph storage["Backend Storage"]
|
||||
ST["Only interesting traces<br/>stored long-term"]
|
||||
end
|
||||
|
||||
xrpld -->|"100% of spans"| collector -->|"~15-20% kept"| storage
|
||||
|
||||
style xrpld fill:#424242,stroke:#212121,color:#fff
|
||||
style collector fill:#1565c0,stroke:#0d47a1,color:#fff
|
||||
style storage fill:#2e7d32,stroke:#1b5e20,color:#fff
|
||||
```
|
||||
|
||||
> **Why this works**: xrpld exports everything (no blind drops), the Collector applies intelligent filtering (keep errors/slow/anomalies, sample the rest), and only ~15-20% of traces reach storage. If Collector resource usage becomes a concern, add head sampling at `sampling_ratio=0.5` to halve the export volume while still giving the Collector enough data for good tail-sampling decisions.
|
||||
|
||||
---
|
||||
|
||||
## Slide 10: Data Collection & Privacy
|
||||
|
||||
### What Data is Collected
|
||||
|
||||
| Category | Attributes Collected | Purpose |
|
||||
| --------------- | ------------------------------------------------------------------------------------ | --------------------------- |
|
||||
| **Transaction** | `tx.hash`, `tx.type`, `tx.result`, `tx.fee`, `ledger_index` | Trace transaction lifecycle |
|
||||
| **Consensus** | `round`, `phase`, `mode`, `proposers` (count of proposing validators), `duration_ms` | Analyze consensus timing |
|
||||
| **RPC** | `command`, `version`, `status`, `duration_ms` | Monitor RPC performance |
|
||||
| **Peer** | `peer.id`(public key), `latency_ms`, `message.type`, `message.size` | Network topology analysis |
|
||||
| **Ledger** | `ledger.hash`, `ledger.index`, `close_time`, `tx_count` | Ledger progression tracking |
|
||||
| **Job** | `job.type`, `queue_ms`, `worker` | JobQueue performance |
|
||||
|
||||
### What is NOT Collected (Privacy Guarantees)
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph notCollected["❌ NOT Collected"]
|
||||
direction LR
|
||||
A["Private Keys"] ~~~ B["Account Balances"] ~~~ C["Transaction Amounts"]
|
||||
end
|
||||
|
||||
subgraph alsoNot["❌ Also Excluded"]
|
||||
direction LR
|
||||
D["IP Addresses<br/>(configurable)"] ~~~ E["Personal Data"] ~~~ F["Raw TX Payloads"]
|
||||
end
|
||||
|
||||
style A fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style B fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style C fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style D fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style E fill:#c62828,stroke:#8c2809,color:#fff
|
||||
style F fill:#c62828,stroke:#8c2809,color:#fff
|
||||
```
|
||||
|
||||
**Reading the diagram:**
|
||||
|
||||
- **NOT Collected (top row, red)**: Private Keys, Account Balances, and Transaction Amounts are explicitly excluded — these are financial/security-sensitive fields that telemetry never touches.
|
||||
- **Also Excluded (bottom row, red)**: IP Addresses (configurable per deployment), Personal Data, and Raw TX Payloads are also excluded — these protect operator and user privacy.
|
||||
- **All-red styling**: Every box is styled in red to visually reinforce that these are hard exclusions, not optional — the telemetry system has no code path to collect any of these fields.
|
||||
- **Two-row layout**: The split between "NOT Collected" and "Also Excluded" distinguishes between financial data (top) and operational/personal data (bottom), making the privacy boundaries clear to auditors.
|
||||
|
||||
### Privacy Protection Mechanisms
|
||||
|
||||
| Mechanism | Description |
|
||||
| -------------------------- | ------------------------------------------------------------- |
|
||||
| **Account Hashing** | `xrpl.tx.account` is hashed at collector level before storage |
|
||||
| **Configurable Redaction** | Sensitive fields can be excluded via config |
|
||||
| **Sampling** | Only 10% of traces recorded by default (reduces exposure) |
|
||||
| **Local Control** | Node operators control what gets exported |
|
||||
| **No Raw Payloads** | Transaction content is never recorded, only metadata |
|
||||
|
||||
> **Key Principle**: Telemetry collects **operational metadata** (timing, counts, hashes) — never **sensitive content** (keys, balances, amounts).
|
||||
|
||||
---
|
||||
|
||||
_End of Presentation_
|
||||
@@ -1598,65 +1598,3 @@ validators.txt
|
||||
# set to ssl_verify to 0.
|
||||
[ssl_verify]
|
||||
1
|
||||
#-------------------------------------------------------------------------------
|
||||
#
|
||||
# 11. Telemetry (OpenTelemetry Tracing)
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
#
|
||||
# Enables distributed tracing via OpenTelemetry. Requires building with
|
||||
# -DXRPL_ENABLE_TELEMETRY=ON (telemetry Conan option).
|
||||
#
|
||||
# [telemetry]
|
||||
#
|
||||
# enabled=0
|
||||
#
|
||||
# Enable or disable telemetry at runtime. Default: 0 (disabled).
|
||||
#
|
||||
# service_name=xrpld
|
||||
#
|
||||
# OTel resource attribute `service.name`. Default: xrpld.
|
||||
# The node's network ID (from [network_id]) is automatically added
|
||||
# as the `xrpl.network.id` and `xrpl.network.type` resource attributes.
|
||||
#
|
||||
# endpoint=http://localhost:4318/v1/traces
|
||||
#
|
||||
# The OTLP/HTTP exporter endpoint. The server sends trace data as
|
||||
# protobuf-encoded HTTP POST requests to this URL.
|
||||
# Default: http://localhost:4318/v1/traces.
|
||||
#
|
||||
# sampling_ratio=1.0
|
||||
#
|
||||
# Head-based sampling ratio using TraceIdRatioBasedSampler. The decision
|
||||
# to record or drop a trace is made at span creation time, before the
|
||||
# span starts, based on the trace ID. Values in [0.0, 1.0].
|
||||
# 1.0 = trace everything, 0.1 = sample ~10% of traces. Default: 1.0.
|
||||
# For tail-based (post-hoc) filtering — where you decide to drop a span
|
||||
# after inspecting its content — use SpanGuard::discard() in code.
|
||||
#
|
||||
# trace_rpc=1
|
||||
#
|
||||
# Enable tracing for JSON-RPC and WebSocket API request handling —
|
||||
# command parsing, execution, and response serialization. Default: 1.
|
||||
#
|
||||
# trace_transactions=1
|
||||
#
|
||||
# Enable tracing for the transaction lifecycle — submission, validation,
|
||||
# application to ledgers, and final disposition. Default: 1.
|
||||
#
|
||||
# trace_consensus=1
|
||||
#
|
||||
# Enable tracing for the consensus round lifecycle — proposals,
|
||||
# validations, mode changes, and ledger acceptance. Default: 1.
|
||||
#
|
||||
# trace_peer=0
|
||||
#
|
||||
# Enable tracing for peer-to-peer protocol messages — overlay message
|
||||
# send/receive, peer handshakes, and routing. High volume; disabled
|
||||
# by default. Default: 0.
|
||||
#
|
||||
# trace_ledger=1
|
||||
#
|
||||
# Enable tracing for ledger close and accept operations — ledger
|
||||
# building, state hashing, and write-back to the node store. Default: 1.
|
||||
#
|
||||
|
||||
@@ -192,23 +192,6 @@ target_link_libraries(
|
||||
add_module(xrpl tx)
|
||||
target_link_libraries(xrpl.libxrpl.tx PUBLIC xrpl.libxrpl.ledger)
|
||||
|
||||
# Telemetry module — OpenTelemetry distributed tracing support.
|
||||
# Sources: include/xrpl/telemetry/ (headers), src/libxrpl/telemetry/ (impl).
|
||||
# When telemetry=ON, links the Conan-provided umbrella target
|
||||
# opentelemetry-cpp::opentelemetry-cpp (individual component targets like
|
||||
# ::api, ::sdk are not available in the Conan package).
|
||||
add_module(xrpl telemetry)
|
||||
target_link_libraries(
|
||||
xrpl.libxrpl.telemetry
|
||||
PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.beast
|
||||
)
|
||||
if(telemetry)
|
||||
target_link_libraries(
|
||||
xrpl.libxrpl.telemetry
|
||||
PUBLIC opentelemetry-cpp::opentelemetry-cpp
|
||||
)
|
||||
endif()
|
||||
|
||||
add_library(xrpl.libxrpl)
|
||||
set_target_properties(xrpl.libxrpl PROPERTIES OUTPUT_NAME xrpl)
|
||||
|
||||
@@ -240,7 +223,6 @@ target_link_modules(
|
||||
resource
|
||||
server
|
||||
shamap
|
||||
telemetry
|
||||
tx
|
||||
)
|
||||
|
||||
|
||||
@@ -10,13 +10,10 @@
|
||||
"rocksdb/10.5.1#4a197eca381a3e5ae8adf8cffa5aacd0%1765850186.86",
|
||||
"re2/20251105#8579cfd0bda4daf0683f9e3898f964b4%1774398111.888",
|
||||
"protobuf/6.33.5#d96d52ba5baaaa532f47bda866ad87a5%1774467363.12",
|
||||
"opentelemetry-cpp/1.18.0#efd9851e173f8a13b9c7d35232de8cf1%1750409186.472",
|
||||
"openssl/3.6.1#e6399de266349245a4542fc5f6c71552%1774458290.139",
|
||||
"nudb/2.0.9#11149c73f8f2baff9a0198fe25971fc7%1774883011.384",
|
||||
"nlohmann_json/3.11.3#45828be26eb619a2e04ca517bb7b828d%1701220705.259",
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1765850143.914",
|
||||
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1765842973.492",
|
||||
"libcurl/8.18.0#364bc3755cb9ef84ed9a7ae9c7efc1c1%1770984390.024",
|
||||
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1765842973.03",
|
||||
"libarchive/3.8.1#ffee18995c706e02bf96e7a2f7042e0d%1765850144.736",
|
||||
"jemalloc/5.3.0#e951da9cf599e956cebc117880d2d9f8%1729241615.244",
|
||||
@@ -33,15 +30,9 @@
|
||||
"zlib/1.3.1#cac0f6daea041b0ccf42934163defb20%1774439233.809",
|
||||
"strawberryperl/5.32.1.1#8d114504d172cfea8ea1662d09b6333e%1774447376.964",
|
||||
"protobuf/6.33.5#d96d52ba5baaaa532f47bda866ad87a5%1774467363.12",
|
||||
"pkgconf/2.5.1#93c2051284cba1279494a43a4fcfeae2%1757684701.089",
|
||||
"opentelemetry-proto/1.4.0#4096a3b05916675ef9628f3ffd571f51%1732731336.11",
|
||||
"ninja/1.13.2#c8c5dc2a52ed6e4e42a66d75b4717ceb%1764096931.974",
|
||||
"nasm/2.16.01#31e26f2ee3c4346ecd347911bd126904%1765850144.707",
|
||||
"msys2/cci.latest#d22fe7b2808f5fd34d0a7923ace9c54f%1770657326.649",
|
||||
"meson/1.10.0#60786758ea978964c24525de19603cf4%1768294926.103",
|
||||
"m4/1.4.19#5d7a4994e5875d76faf7acf3ed056036%1774365463.87",
|
||||
"libtool/2.4.7#14e7739cc128bc1623d2ed318008e47e%1755679003.847",
|
||||
"gnu-config/cci.20210814#466e9d4d7779e1c142443f7ea44b4284%1762363589.329",
|
||||
"cmake/4.3.0#b939a42e98f593fb34d3a8c5cc860359%1774439249.183",
|
||||
"b2/5.4.2#ffd6084a119587e70f11cd45d1a386e2%1774439233.447",
|
||||
"automake/1.16.5#b91b7c384c3deaa9d535be02da14d04f%1755524470.56",
|
||||
|
||||
@@ -22,7 +22,6 @@ class Xrpl(ConanFile):
|
||||
"rocksdb": [True, False],
|
||||
"shared": [True, False],
|
||||
"static": [True, False],
|
||||
"telemetry": [True, False],
|
||||
"tests": [True, False],
|
||||
"unity": [True, False],
|
||||
"xrpld": [True, False],
|
||||
@@ -55,7 +54,6 @@ class Xrpl(ConanFile):
|
||||
"rocksdb": True,
|
||||
"shared": False,
|
||||
"static": True,
|
||||
"telemetry": False,
|
||||
"tests": False,
|
||||
"unity": False,
|
||||
"xrpld": False,
|
||||
@@ -147,10 +145,6 @@ class Xrpl(ConanFile):
|
||||
self.requires("jemalloc/5.3.0")
|
||||
if self.options.rocksdb:
|
||||
self.requires("rocksdb/10.5.1")
|
||||
# OpenTelemetry C++ SDK for distributed tracing (optional).
|
||||
# Provides OTLP/HTTP exporter, batch span processor, and trace API.
|
||||
if self.options.telemetry:
|
||||
self.requires("opentelemetry-cpp/1.18.0")
|
||||
self.requires("xxhash/0.8.3", transitive_headers=True)
|
||||
|
||||
exports_sources = (
|
||||
@@ -179,7 +173,6 @@ class Xrpl(ConanFile):
|
||||
tc.variables["rocksdb"] = self.options.rocksdb
|
||||
tc.variables["BUILD_SHARED_LIBS"] = self.options.shared
|
||||
tc.variables["static"] = self.options.static
|
||||
tc.variables["telemetry"] = self.options.telemetry
|
||||
tc.variables["unity"] = self.options.unity
|
||||
tc.variables["xrpld"] = self.options.xrpld
|
||||
tc.generate()
|
||||
@@ -232,5 +225,3 @@ class Xrpl(ConanFile):
|
||||
]
|
||||
if self.options.rocksdb:
|
||||
libxrpl.requires.append("rocksdb::librocksdb")
|
||||
if self.options.telemetry:
|
||||
libxrpl.requires.append("opentelemetry-cpp::opentelemetry-cpp")
|
||||
|
||||
@@ -102,7 +102,6 @@ words:
|
||||
- dxrpl
|
||||
- enabled
|
||||
- endmacro
|
||||
- EOCFG
|
||||
- exceptioned
|
||||
- Falco
|
||||
- fcontext
|
||||
@@ -191,7 +190,6 @@ words:
|
||||
- NOLINTNEXTLINE
|
||||
- nonxrp
|
||||
- noripple
|
||||
- nostd
|
||||
- nudb
|
||||
- nullptr
|
||||
- nunl
|
||||
@@ -204,9 +202,6 @@ words:
|
||||
- permdex
|
||||
- perminute
|
||||
- permissioned
|
||||
- pgrep
|
||||
- pkill
|
||||
- pimpl
|
||||
- pointee
|
||||
- populator
|
||||
- preauth
|
||||
@@ -223,9 +218,7 @@ words:
|
||||
- qalloc
|
||||
- queuable
|
||||
- Raphson
|
||||
- reparent
|
||||
- replayer
|
||||
- reqps
|
||||
- rerere
|
||||
- retriable
|
||||
- RIPD
|
||||
@@ -282,7 +275,6 @@ words:
|
||||
- txjson
|
||||
- txn
|
||||
- txns
|
||||
- txqueue
|
||||
- txs
|
||||
- UBSAN
|
||||
- ubsan
|
||||
@@ -323,10 +315,6 @@ words:
|
||||
- xchain
|
||||
- ximinez
|
||||
- EXPECT_STREQ
|
||||
- Gantt
|
||||
- gantt
|
||||
- otelc
|
||||
- traceql
|
||||
- XMACRO
|
||||
- xrpkuwait
|
||||
- xrpl
|
||||
@@ -334,7 +322,3 @@ words:
|
||||
- xrplf
|
||||
- xxhash
|
||||
- xxhasher
|
||||
- xychart
|
||||
- zpages
|
||||
- pratik
|
||||
- dedup
|
||||
|
||||
2
docker/telemetry/.gitignore
vendored
2
docker/telemetry/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
# Runtime data generated by xrpld and telemetry stack
|
||||
data/
|
||||
@@ -1,522 +0,0 @@
|
||||
# OpenTelemetry Integration Testing Guide
|
||||
|
||||
This document describes how to verify the xrpld OpenTelemetry telemetry
|
||||
pipeline end-to-end, from span generation through the observability stack
|
||||
(otel-collector, Tempo, Prometheus, Grafana).
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Build xrpld with telemetry
|
||||
|
||||
```bash
|
||||
conan install . --build=missing -o telemetry=True
|
||||
cmake --preset default -Dtelemetry=ON
|
||||
cmake --build --preset default --target xrpld
|
||||
```
|
||||
|
||||
The binary is at `.build/xrpld`.
|
||||
|
||||
### Required tools
|
||||
|
||||
- **Docker** with `docker compose` (v2)
|
||||
- **curl**
|
||||
- **jq** (JSON processor)
|
||||
|
||||
### Verify binary
|
||||
|
||||
```bash
|
||||
.build/xrpld --version
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Test 1: Single-Node Standalone (Quick Verification)
|
||||
|
||||
This test verifies RPC and transaction spans in standalone mode. Consensus
|
||||
spans will not fire because standalone mode does not run consensus.
|
||||
|
||||
### Step 1: Start the observability stack
|
||||
|
||||
```bash
|
||||
docker compose -f docker/telemetry/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
Wait for services to be ready:
|
||||
|
||||
```bash
|
||||
# otel-collector health
|
||||
curl -sf http://localhost:13133/ && echo "collector ready"
|
||||
|
||||
# Tempo readiness
|
||||
curl -sf http://localhost:3200/ready > /dev/null && echo "tempo ready"
|
||||
```
|
||||
|
||||
### Step 2: Start xrpld in standalone mode
|
||||
|
||||
```bash
|
||||
.build/xrpld --conf docker/telemetry/xrpld-telemetry.cfg -a --start
|
||||
```
|
||||
|
||||
Wait a few seconds for the node to initialize.
|
||||
|
||||
### Step 3: Exercise RPC spans
|
||||
|
||||
```bash
|
||||
# server_info
|
||||
curl -s http://localhost:5005 \
|
||||
-d '{"method":"server_info"}' | jq .result.info.server_state
|
||||
|
||||
# server_state
|
||||
curl -s http://localhost:5005 \
|
||||
-d '{"method":"server_state"}' | jq .result.state.server_state
|
||||
|
||||
# ledger
|
||||
curl -s http://localhost:5005 \
|
||||
-d '{"method":"ledger","params":[{"ledger_index":"current"}]}' \
|
||||
| jq .result.ledger_current_index
|
||||
```
|
||||
|
||||
### Step 4: Submit a transaction
|
||||
|
||||
Close the ledger first (required in standalone mode):
|
||||
|
||||
```bash
|
||||
curl -s http://localhost:5005 -d '{"method":"ledger_accept"}'
|
||||
```
|
||||
|
||||
Submit a Payment from the genesis account:
|
||||
|
||||
```bash
|
||||
curl -s http://localhost:5005 -d '{
|
||||
"method": "submit",
|
||||
"params": [{
|
||||
"secret": "snoPBrXtMeMyMHUVTgbuqAfg1SUTb",
|
||||
"tx_json": {
|
||||
"TransactionType": "Payment",
|
||||
"Account": "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh",
|
||||
"Destination": "rPMh7Pi9ct699iZUTWzJaUMR1o42VEfGqF",
|
||||
"Amount": "10000000"
|
||||
}
|
||||
}]
|
||||
}' | jq .result.engine_result
|
||||
```
|
||||
|
||||
Expected result: `"tesSUCCESS"`.
|
||||
|
||||
Close the ledger again to finalize:
|
||||
|
||||
```bash
|
||||
curl -s http://localhost:5005 -d '{"method":"ledger_accept"}'
|
||||
```
|
||||
|
||||
### Step 5: Verify traces in Tempo
|
||||
|
||||
Wait 5 seconds for the batch export, then:
|
||||
|
||||
```bash
|
||||
TEMPO="http://localhost:3200"
|
||||
|
||||
# Check xrpld service is registered
|
||||
curl -s "$TEMPO/api/v2/search/tag/resource.service.name/values" | jq '.tagValues[].value'
|
||||
|
||||
# Check RPC spans
|
||||
curl -s "$TEMPO/api/search" \
|
||||
--data-urlencode 'q={resource.service.name="xrpld" && name="rpc.request"}' \
|
||||
--data-urlencode 'limit=5' | jq '.traces | length'
|
||||
|
||||
curl -s "$TEMPO/api/search" \
|
||||
--data-urlencode 'q={resource.service.name="xrpld" && name="rpc.process"}' \
|
||||
--data-urlencode 'limit=5' | jq '.traces | length'
|
||||
|
||||
curl -s "$TEMPO/api/search" \
|
||||
--data-urlencode 'q={resource.service.name="xrpld" && name="rpc.command.server_info"}' \
|
||||
--data-urlencode 'limit=5' | jq '.traces | length'
|
||||
|
||||
# Check transaction spans
|
||||
curl -s "$TEMPO/api/search" \
|
||||
--data-urlencode 'q={resource.service.name="xrpld" && name="tx.process"}' \
|
||||
--data-urlencode 'limit=5' | jq '.traces | length'
|
||||
```
|
||||
|
||||
Or open Grafana Explore with Tempo datasource: http://localhost:3000
|
||||
|
||||
### Step 6: Teardown
|
||||
|
||||
```bash
|
||||
# Kill xrpld (Ctrl+C or)
|
||||
kill $(pgrep -f 'xrpld.*xrpld-telemetry')
|
||||
|
||||
# Stop observability stack
|
||||
docker compose -f docker/telemetry/docker-compose.yml down
|
||||
|
||||
# Clean xrpld data
|
||||
rm -rf data/
|
||||
```
|
||||
|
||||
### Expected spans (standalone mode)
|
||||
|
||||
| Span Name | Expected | Notes |
|
||||
| --------------------------- | -------- | ----------------------------- |
|
||||
| `rpc.request` | Yes | Every HTTP RPC call |
|
||||
| `rpc.process` | Yes | Every RPC processing |
|
||||
| `rpc.command.server_info` | Yes | server_info RPC |
|
||||
| `rpc.command.server_state` | Yes | server_state RPC |
|
||||
| `rpc.command.ledger` | Yes | ledger RPC |
|
||||
| `rpc.command.submit` | Yes | submit RPC |
|
||||
| `rpc.command.ledger_accept` | Yes | ledger_accept RPC |
|
||||
| `tx.process` | Yes | Transaction submission |
|
||||
| `tx.receive` | No | No peers in standalone |
|
||||
| `consensus.*` | No | Consensus disabled standalone |
|
||||
|
||||
---
|
||||
|
||||
## Test 2: 6-Node Consensus Network (Full Verification)
|
||||
|
||||
This test verifies ALL span categories including consensus and peer
|
||||
transaction relay, using a 6-node validator network.
|
||||
|
||||
### Automated
|
||||
|
||||
Run the integration test script:
|
||||
|
||||
```bash
|
||||
bash docker/telemetry/integration-test.sh
|
||||
```
|
||||
|
||||
The script will:
|
||||
|
||||
1. Start the observability stack
|
||||
2. Generate 6 validator key pairs
|
||||
3. Create config files for each node
|
||||
4. Start all 6 nodes
|
||||
5. Wait for consensus ("proposing" state)
|
||||
6. Exercise RPC, submit transactions
|
||||
7. Verify all span categories in Tempo
|
||||
8. Verify spanmetrics in Prometheus
|
||||
9. Print results and leave the stack running
|
||||
|
||||
### Manual
|
||||
|
||||
If you prefer to run the steps manually:
|
||||
|
||||
#### Step 1: Start observability stack
|
||||
|
||||
```bash
|
||||
docker compose -f docker/telemetry/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
#### Step 2: Generate validator keys
|
||||
|
||||
Start a temporary standalone xrpld:
|
||||
|
||||
```bash
|
||||
.build/xrpld --conf docker/telemetry/xrpld-telemetry.cfg -a --start &
|
||||
TEMP_PID=$!
|
||||
sleep 5
|
||||
```
|
||||
|
||||
Generate 6 key pairs:
|
||||
|
||||
```bash
|
||||
for i in $(seq 1 6); do
|
||||
curl -s http://localhost:5005 \
|
||||
-d '{"method":"validation_create"}' | jq '.result'
|
||||
done
|
||||
```
|
||||
|
||||
Record the `validation_seed` and `validation_public_key` for each.
|
||||
Kill the temporary node:
|
||||
|
||||
```bash
|
||||
kill $TEMP_PID
|
||||
rm -rf data/
|
||||
```
|
||||
|
||||
#### Step 3: Create node configs
|
||||
|
||||
For each node (1-6), create a config file. Template:
|
||||
|
||||
```ini
|
||||
[server]
|
||||
port_rpc
|
||||
port_peer
|
||||
|
||||
[port_rpc]
|
||||
port = {5004 + node_number}
|
||||
ip = 127.0.0.1
|
||||
admin = 127.0.0.1
|
||||
protocol = http
|
||||
|
||||
[port_peer]
|
||||
port = {51234 + node_number}
|
||||
ip = 0.0.0.0
|
||||
protocol = peer
|
||||
|
||||
[node_db]
|
||||
type=NuDB
|
||||
path=/tmp/xrpld-integration/node{N}/nudb
|
||||
online_delete=256
|
||||
|
||||
[database_path]
|
||||
/tmp/xrpld-integration/node{N}/db
|
||||
|
||||
[debug_logfile]
|
||||
/tmp/xrpld-integration/node{N}/debug.log
|
||||
|
||||
[validation_seed]
|
||||
{seed from step 2}
|
||||
|
||||
[validators_file]
|
||||
/tmp/xrpld-integration/validators.txt
|
||||
|
||||
[ips_fixed]
|
||||
127.0.0.1 51235
|
||||
127.0.0.1 51236
|
||||
127.0.0.1 51237
|
||||
127.0.0.1 51238
|
||||
127.0.0.1 51239
|
||||
127.0.0.1 51240
|
||||
|
||||
[peer_private]
|
||||
1
|
||||
|
||||
[telemetry]
|
||||
enabled=1
|
||||
endpoint=http://localhost:4318/v1/traces
|
||||
exporter=otlp_http
|
||||
sampling_ratio=1.0
|
||||
batch_size=512
|
||||
batch_delay_ms=2000
|
||||
max_queue_size=2048
|
||||
trace_rpc=1
|
||||
trace_transactions=1
|
||||
trace_consensus=1
|
||||
trace_peer=0
|
||||
trace_ledger=1
|
||||
|
||||
[rpc_startup]
|
||||
{ "command": "log_level", "severity": "warning" }
|
||||
|
||||
[ssl_verify]
|
||||
0
|
||||
```
|
||||
|
||||
#### Step 4: Create validators.txt
|
||||
|
||||
```ini
|
||||
[validators]
|
||||
{public_key_1}
|
||||
{public_key_2}
|
||||
{public_key_3}
|
||||
{public_key_4}
|
||||
{public_key_5}
|
||||
{public_key_6}
|
||||
```
|
||||
|
||||
#### Step 5: Start all 6 nodes
|
||||
|
||||
```bash
|
||||
for i in $(seq 1 6); do
|
||||
.build/xrpld --conf /tmp/xrpld-integration/node$i/xrpld.cfg --start &
|
||||
echo $! > /tmp/xrpld-integration/node$i/xrpld.pid
|
||||
done
|
||||
```
|
||||
|
||||
#### Step 6: Wait for consensus
|
||||
|
||||
Poll each node until `server_state` = `"proposing"`:
|
||||
|
||||
```bash
|
||||
for port in 5005 5006 5007 5008 5009 5010; do
|
||||
while true; do
|
||||
state=$(curl -s http://localhost:$port \
|
||||
-d '{"method":"server_info"}' \
|
||||
| jq -r '.result.info.server_state')
|
||||
echo "Port $port: $state"
|
||||
[ "$state" = "proposing" ] && break
|
||||
sleep 5
|
||||
done
|
||||
done
|
||||
```
|
||||
|
||||
#### Step 7: Exercise RPC and submit transaction
|
||||
|
||||
```bash
|
||||
# RPC calls
|
||||
curl -s http://localhost:5005 -d '{"method":"server_info"}'
|
||||
curl -s http://localhost:5005 -d '{"method":"server_state"}'
|
||||
curl -s http://localhost:5005 -d '{"method":"ledger","params":[{"ledger_index":"current"}]}'
|
||||
|
||||
# Submit transaction
|
||||
curl -s http://localhost:5005 -d '{
|
||||
"method": "submit",
|
||||
"params": [{
|
||||
"secret": "snoPBrXtMeMyMHUVTgbuqAfg1SUTb",
|
||||
"tx_json": {
|
||||
"TransactionType": "Payment",
|
||||
"Account": "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh",
|
||||
"Destination": "rPMh7Pi9ct699iZUTWzJaUMR1o42VEfGqF",
|
||||
"Amount": "10000000"
|
||||
}
|
||||
}]
|
||||
}'
|
||||
```
|
||||
|
||||
Wait 15 seconds for consensus and batch export.
|
||||
|
||||
#### Step 8: Verify in Tempo
|
||||
|
||||
See the "Verification Queries" section below.
|
||||
|
||||
---
|
||||
|
||||
## Expected Span Catalog
|
||||
|
||||
All 16 production span names instrumented across Phases 2-5:
|
||||
|
||||
| Span Name | Source File | Phase | Key Attributes | How to Trigger |
|
||||
| --------------------------- | --------------------- | ----- | ---------------------------------------------------------------------------------------- | ------------------------- |
|
||||
| `rpc.request` | ServerHandler.cpp:271 | 2 | -- | Any HTTP RPC call |
|
||||
| `rpc.process` | ServerHandler.cpp:573 | 2 | -- | Any HTTP RPC call |
|
||||
| `rpc.ws_message` | ServerHandler.cpp:384 | 2 | -- | WebSocket RPC message |
|
||||
| `rpc.command.<name>` | RPCHandler.cpp:161 | 2 | `xrpl.rpc.command`, `xrpl.rpc.version`, `xrpl.rpc.role` | Any RPC command |
|
||||
| `tx.process` | NetworkOPs.cpp:1227 | 3 | `xrpl.tx.hash`, `xrpl.tx.local`, `xrpl.tx.path` | Submit transaction |
|
||||
| `tx.receive` | PeerImp.cpp:1273 | 3 | `xrpl.peer.id` | Peer relays transaction |
|
||||
| `consensus.proposal.send` | RCLConsensus.cpp:177 | 4 | `xrpl.consensus.round` | Consensus proposing phase |
|
||||
| `consensus.ledger_close` | RCLConsensus.cpp:282 | 4 | `xrpl.consensus.ledger.seq`, `xrpl.consensus.mode` | Ledger close event |
|
||||
| `consensus.accept` | RCLConsensus.cpp:395 | 4 | `xrpl.consensus.proposers`, `xrpl.consensus.round_time_ms` | Ledger accepted |
|
||||
| `consensus.validation.send` | RCLConsensus.cpp:753 | 4 | `xrpl.consensus.ledger.seq`, `xrpl.consensus.proposing` | Validation sent |
|
||||
| `consensus.accept.apply` | RCLConsensus.cpp:453 | 4 | `xrpl.consensus.close_time`, `close_time_correct`, `close_resolution_ms`, `state` | Ledger apply + close time |
|
||||
| `tx.apply` | BuildLedger.cpp:88 | 5 | `xrpl.ledger.tx_count`, `xrpl.ledger.tx_failed` | Ledger close (tx set) |
|
||||
| `ledger.build` | BuildLedger.cpp:31 | 5 | `xrpl.ledger.seq`, `xrpl.ledger.close_time`, `close_time_correct`, `close_resolution_ms` | Ledger build |
|
||||
| `ledger.validate` | LedgerMaster.cpp:915 | 5 | `xrpl.ledger.seq`, `xrpl.ledger.validations` | Ledger validated |
|
||||
| `ledger.store` | LedgerMaster.cpp:409 | 5 | `xrpl.ledger.seq` | Ledger stored |
|
||||
| `peer.proposal.receive` | PeerImp.cpp:1667 | 5 | `xrpl.peer.id`, `xrpl.peer.proposal.trusted` | Peer sends proposal |
|
||||
| `peer.validation.receive` | PeerImp.cpp:2264 | 5 | `xrpl.peer.id`, `xrpl.peer.validation.trusted` | Peer sends validation |
|
||||
|
||||
---
|
||||
|
||||
## Verification Queries
|
||||
|
||||
### Tempo API
|
||||
|
||||
Base URL: `http://localhost:3200`
|
||||
|
||||
```bash
|
||||
TEMPO="http://localhost:3200"
|
||||
|
||||
# List all services
|
||||
curl -s "$TEMPO/api/v2/search/tag/resource.service.name/values" | jq '.tagValues[].value'
|
||||
|
||||
# Query traces by operation
|
||||
for op in "rpc.request" "rpc.process" \
|
||||
"rpc.command.server_info" "rpc.command.server_state" "rpc.command.ledger" \
|
||||
"tx.process" "tx.receive" "tx.apply" \
|
||||
"consensus.proposal.send" "consensus.ledger_close" \
|
||||
"consensus.accept" "consensus.accept.apply" \
|
||||
"consensus.validation.send" \
|
||||
"ledger.build" "ledger.validate" "ledger.store" \
|
||||
"peer.proposal.receive" "peer.validation.receive"; do
|
||||
count=$(curl -s "$TEMPO/api/search" \
|
||||
--data-urlencode "q={resource.service.name=\"xrpld\" && name=\"$op\"}" \
|
||||
--data-urlencode "limit=5" \
|
||||
| jq '.traces | length')
|
||||
printf "%-35s %s traces\n" "$op" "$count"
|
||||
done
|
||||
```
|
||||
|
||||
### Prometheus API
|
||||
|
||||
Base URL: `http://localhost:9090`
|
||||
|
||||
```bash
|
||||
PROM="http://localhost:9090"
|
||||
|
||||
# Span call counts (from spanmetrics connector)
|
||||
curl -s "$PROM/api/v1/query?query=traces_span_metrics_calls_total" \
|
||||
| jq '.data.result[] | {span: .metric.span_name, count: .value[1]}'
|
||||
|
||||
# Latency histogram
|
||||
curl -s "$PROM/api/v1/query?query=traces_span_metrics_duration_milliseconds_count" \
|
||||
| jq '.data.result[] | {span: .metric.span_name, count: .value[1]}'
|
||||
|
||||
# RPC calls by command
|
||||
curl -s "$PROM/api/v1/query?query=traces_span_metrics_calls_total{span_name=~\"rpc.command.*\"}" \
|
||||
| jq '.data.result[] | {command: .metric["xrpl.rpc.command"], count: .value[1]}'
|
||||
```
|
||||
|
||||
### Grafana
|
||||
|
||||
Open http://localhost:3000 (anonymous admin access enabled).
|
||||
|
||||
Pre-configured dashboards:
|
||||
|
||||
- **RPC Performance**: Request rates, latency percentiles by command, top commands, WebSocket rate
|
||||
- **Transaction Overview**: Transaction processing rates, apply duration, peer relay, failed tx rate
|
||||
- **Consensus Health**: Consensus round duration, proposer counts, mode tracking, accept heatmap
|
||||
- **Ledger Operations**: Build/validate/store rates and durations, TX apply metrics
|
||||
- **Peer Network**: Proposal/validation receive rates, trusted vs untrusted breakdown (requires `trace_peer=1`)
|
||||
|
||||
Pre-configured datasources:
|
||||
|
||||
- **Tempo**: Trace data at `http://tempo:3200`
|
||||
- **Prometheus**: Metrics at `http://prometheus:9090`
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### No traces in Tempo
|
||||
|
||||
1. Check otel-collector logs:
|
||||
```bash
|
||||
docker compose -f docker/telemetry/docker-compose.yml logs otel-collector
|
||||
```
|
||||
2. Verify xrpld telemetry config has `enabled=1` and correct endpoint
|
||||
3. Check that otel-collector port 4318 is accessible:
|
||||
```bash
|
||||
curl -sf http://localhost:4318 && echo "reachable"
|
||||
```
|
||||
4. Increase `batch_delay_ms` or decrease `batch_size` in xrpld config
|
||||
|
||||
### Nodes not reaching "proposing" state
|
||||
|
||||
1. Check that all peer ports (51235-51240) are not in use:
|
||||
```bash
|
||||
for p in 51235 51236 51237 51238 51239 51240; do
|
||||
ss -tlnp | grep ":$p " && echo "port $p in use"
|
||||
done
|
||||
```
|
||||
2. Verify `[ips_fixed]` lists all 6 peer ports
|
||||
3. Verify `validators.txt` has all 6 public keys
|
||||
4. Check node debug logs: `tail -50 /tmp/xrpld-integration/node1/debug.log`
|
||||
5. Ensure `[peer_private]` is set to `1` (prevents reaching out to public network)
|
||||
|
||||
### Transaction not processing
|
||||
|
||||
1. Verify genesis account exists:
|
||||
```bash
|
||||
curl -s http://localhost:5005 \
|
||||
-d '{"method":"account_info","params":[{"account":"rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"}]}' \
|
||||
| jq .result.account_data.Balance
|
||||
```
|
||||
2. Check submit response for error codes
|
||||
3. In standalone mode, remember to call `ledger_accept` after submitting
|
||||
|
||||
### Spanmetrics not appearing in Prometheus
|
||||
|
||||
1. Verify otel-collector config has `spanmetrics` connector
|
||||
2. Check that the metrics pipeline is configured:
|
||||
```yaml
|
||||
service:
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [spanmetrics]
|
||||
exporters: [prometheus]
|
||||
```
|
||||
3. Verify Prometheus can reach collector:
|
||||
```bash
|
||||
curl -s http://localhost:9090/api/v1/targets | jq '.data.activeTargets'
|
||||
```
|
||||
@@ -1,95 +0,0 @@
|
||||
# Docker Compose stack for xrpld OpenTelemetry observability.
|
||||
#
|
||||
# Provides services for local development:
|
||||
# - otel-collector: receives OTLP traces from xrpld, batches and
|
||||
# forwards them to Tempo. Listens on ports 4317 (gRPC)
|
||||
# and 4318 (HTTP).
|
||||
# - tempo: Grafana Tempo tracing backend, queryable via Grafana Explore
|
||||
# on port 3000. Recommended for production (S3/GCS storage, TraceQL).
|
||||
# - grafana: dashboards on port 3000, pre-configured with Tempo
|
||||
# and Prometheus datasources.
|
||||
#
|
||||
# Usage:
|
||||
# docker compose -f docker/telemetry/docker-compose.yml up -d
|
||||
#
|
||||
# Configure xrpld to export traces by adding to xrpld.cfg:
|
||||
# [telemetry]
|
||||
# enabled=1
|
||||
# endpoint=http://localhost:4318/v1/traces
|
||||
|
||||
services:
|
||||
# OpenTelemetry Collector: receives spans from xrpld via OTLP protocol,
|
||||
# batches them for efficiency, and forwards to Tempo for storage.
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector-contrib:0.121.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC
|
||||
- "4318:4318" # OTLP HTTP
|
||||
- "8125:8125/udp" # StatsD UDP (beast::insight metrics)
|
||||
- "8889:8889" # Prometheus metrics (spanmetrics + statsd)
|
||||
- "13133:13133" # Health check
|
||||
volumes:
|
||||
# Mount collector pipeline config (receivers → processors → exporters)
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml:ro
|
||||
depends_on:
|
||||
- tempo
|
||||
networks:
|
||||
- xrpld-telemetry
|
||||
|
||||
# Grafana Tempo: distributed tracing backend that stores and indexes
|
||||
# spans. Queryable via TraceQL in Grafana Explore.
|
||||
tempo:
|
||||
image: grafana/tempo:2.7.2
|
||||
command: ["-config.file=/etc/tempo.yaml"]
|
||||
ports:
|
||||
- "3200:3200" # Tempo HTTP API (health check, query)
|
||||
volumes:
|
||||
# Mount Tempo storage and ingestion config
|
||||
- ./tempo.yaml:/etc/tempo.yaml:ro
|
||||
# Persistent volume for trace data (WAL + blocks)
|
||||
- tempo-data:/var/tempo
|
||||
networks:
|
||||
- xrpld-telemetry
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
ports:
|
||||
- "9090:9090"
|
||||
volumes:
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||
depends_on:
|
||||
- otel-collector
|
||||
networks:
|
||||
- xrpld-telemetry
|
||||
|
||||
# Grafana: visualization UI with Tempo pre-configured as a datasource.
|
||||
# Anonymous admin access enabled for local development convenience.
|
||||
grafana:
|
||||
image: grafana/grafana:11.5.2
|
||||
environment:
|
||||
- GF_AUTH_ANONYMOUS_ENABLED=true # No login required for local dev
|
||||
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin # Full access without auth
|
||||
ports:
|
||||
- "3000:3000" # Grafana web UI
|
||||
volumes:
|
||||
# Auto-provision Tempo datasource and search filters on startup
|
||||
- ./grafana/provisioning:/etc/grafana/provisioning:ro
|
||||
- ./grafana/dashboards:/var/lib/grafana/dashboards:ro
|
||||
depends_on:
|
||||
- tempo
|
||||
- prometheus
|
||||
networks:
|
||||
- xrpld-telemetry
|
||||
|
||||
# Named volume for Tempo trace storage (WAL and compacted blocks).
|
||||
# Data persists across container restarts. Remove with:
|
||||
# docker compose -f docker/telemetry/docker-compose.yml down -v
|
||||
volumes:
|
||||
tempo-data:
|
||||
|
||||
# Isolated bridge network so services communicate by container name
|
||||
# (e.g., the collector reaches Tempo at http://tempo:4317).
|
||||
networks:
|
||||
xrpld-telemetry:
|
||||
driver: bridge
|
||||
@@ -1,776 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": []
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"title": "Consensus Round Duration",
|
||||
"description": "p95 and p50 duration of consensus accept rounds. The consensus.accept span (RCLConsensus.cpp:395) measures the time to process an accepted ledger including transaction application and state finalization. The span carries xrpl.consensus.proposers and xrpl.consensus.round_time_ms attributes. Normal range is 3-6 seconds on mainnet.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.95, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"consensus.accept\"}[5m])))",
|
||||
"legendFormat": "P95 Round Duration [{{exported_instance}}]"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.50, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"consensus.accept\"}[5m])))",
|
||||
"legendFormat": "P50 Round Duration [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Duration (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Consensus Proposals Sent Rate",
|
||||
"description": "Rate at which this node sends consensus proposals to the network. Sourced from the consensus.proposal.send span (RCLConsensus.cpp:177) which fires each time the node proposes a transaction set. The span carries xrpl.consensus.round identifying the consensus round number. A healthy proposing node should show steady proposal output.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"consensus.proposal.send\"}[5m]))",
|
||||
"legendFormat": "Proposals / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"custom": {
|
||||
"axisLabel": "Proposals / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Ledger Close Duration",
|
||||
"description": "p95 duration of the ledger close event. The consensus.ledger_close span (RCLConsensus.cpp:282) measures the time from when consensus triggers a ledger close to completion. Carries xrpl.consensus.ledger.seq and xrpl.consensus.mode attributes. Compare with Consensus Round Duration to understand how close timing relates to overall round time.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.95, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"consensus.ledger_close\"}[5m])))",
|
||||
"legendFormat": "P95 Close Duration [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Duration (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Validation Send Rate",
|
||||
"description": "Rate at which this node sends ledger validations to the network. Sourced from the consensus.validation.send span (RCLConsensus.cpp:753). Each validation confirms the node has fully validated a ledger. The span carries xrpl.consensus.ledger.seq and xrpl.consensus.proposing. Should closely track the ledger close rate when the node is healthy.",
|
||||
"type": "stat",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"consensus.validation.send\"}[5m]))",
|
||||
"legendFormat": "Validations / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Ledger Apply Duration (doAccept)",
|
||||
"description": "Time spent applying the consensus result to build a new ledger. Measured by the consensus.accept.apply span in doAccept().",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.95, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"consensus.accept.apply\"}[5m])))",
|
||||
"legendFormat": "P95 Apply Duration [{{exported_instance}}]"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.50, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"consensus.accept.apply\"}[5m])))",
|
||||
"legendFormat": "P50 Apply Duration [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Duration (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Close Time Agreement",
|
||||
"description": "Rate of close time agreement vs disagreement across consensus rounds. Based on xrpl.consensus.close_time_correct attribute (true = validators agreed, false = agreed to disagree per avCT_CONSENSUS_PCT).",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"consensus.accept.apply\"}[5m]))",
|
||||
"legendFormat": "Total Rounds / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"custom": {
|
||||
"axisLabel": "Rounds / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Consensus Mode Over Time",
|
||||
"description": "Breakdown of consensus ledger close events by the node's consensus mode (Proposing, Observing, Wrong Ledger, Switched Ledger). Grouped by the xrpl.consensus.mode span attribute from consensus.ledger_close. A healthy validator should be predominantly in Proposing mode. Frequent Wrong Ledger or Switched Ledger indicates sync issues.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (xrpl_consensus_mode, exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", xrpl_consensus_mode=~\"$consensus_mode\", span_name=\"consensus.ledger_close\"}[5m]))",
|
||||
"legendFormat": "{{xrpl_consensus_mode}} [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"custom": {
|
||||
"axisLabel": "Events / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Accept vs Close Rate",
|
||||
"description": "Compares the rate of consensus.accept (ledger accepted after consensus) vs consensus.ledger_close (ledger close initiated). These should track closely in a healthy network. A divergence means some close events are not completing the accept phase, potentially indicating consensus failures or timeouts.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"consensus.accept\"}[5m]))",
|
||||
"legendFormat": "Accepts / Sec [{{exported_instance}}]"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"consensus.ledger_close\"}[5m]))",
|
||||
"legendFormat": "Closes / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"custom": {
|
||||
"axisLabel": "Events / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Validation vs Close Rate",
|
||||
"description": "Compares the rate of consensus.validation.send vs consensus.ledger_close. Each validated ledger should produce one validation message. If validations lag behind closes, the node may be falling behind on validation or experiencing issues with the validation pipeline.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 32
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"consensus.validation.send\"}[5m]))",
|
||||
"legendFormat": "Validations / Sec [{{exported_instance}}]"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"consensus.ledger_close\"}[5m]))",
|
||||
"legendFormat": "Closes / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"custom": {
|
||||
"axisLabel": "Events / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Consensus Accept Duration Heatmap",
|
||||
"description": "Heatmap showing the distribution of consensus.accept span durations across histogram buckets over time. Each cell represents how many accept events fell into that duration bucket in a 5m window. Useful for detecting outlier consensus rounds that take abnormally long.",
|
||||
"type": "heatmap",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 32
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
},
|
||||
"yAxis": {
|
||||
"axisLabel": "Duration (ms)"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum(increase(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"consensus.accept\"}[5m])) by (le)",
|
||||
"legendFormat": "{{le}}",
|
||||
"format": "heatmap"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "Close Time: Raw Proposals (Per Node)",
|
||||
"description": "Each node's raw proposed close time (xrpl.consensus.close_time_self) \u2014 the unrounded wall clock value at the moment the node closed its ledger. Compare across nodes to see clock drift.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 40
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "dateTimeFromNow",
|
||||
"custom": {
|
||||
"drawStyle": "points",
|
||||
"pointSize": 6,
|
||||
"showPoints": "always"
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
},
|
||||
"legend": {
|
||||
"displayMode": "table",
|
||||
"placement": "bottom",
|
||||
"calcs": ["lastNotNull"]
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "tempo"
|
||||
},
|
||||
"queryType": "traceql",
|
||||
"query": "{name=\"consensus.accept.apply\" && resource.service.instance.id=~\"$node\" && span.xrpl.consensus.close_time_correct=~\"$close_time_correct\"} | select(span.xrpl.consensus.close_time_self)",
|
||||
"refId": "A"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "Close Time: Effective / Quantized",
|
||||
"description": "The consensus-agreed close time after rounding to the current resolution bin (xrpl.consensus.close_time). This is the value written to the ledger header. All nodes in agreement produce the same value.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 40
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "dateTimeFromNow",
|
||||
"custom": {
|
||||
"drawStyle": "points",
|
||||
"pointSize": 6,
|
||||
"showPoints": "always"
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
},
|
||||
"legend": {
|
||||
"displayMode": "table",
|
||||
"placement": "bottom",
|
||||
"calcs": ["lastNotNull"]
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "tempo"
|
||||
},
|
||||
"queryType": "traceql",
|
||||
"query": "{name=\"consensus.accept.apply\" && resource.service.instance.id=~\"$node\" && span.xrpl.consensus.close_time_correct=~\"$close_time_correct\"} | select(span.xrpl.consensus.close_time)",
|
||||
"refId": "A"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "Close Time Vote Bins & Resolution",
|
||||
"description": "Number of distinct close time vote bins (xrpl.consensus.close_time_vote_bins) and the bin size / resolution in ms (xrpl.consensus.close_resolution_ms). More bins = more clock disagreement. Resolution adapts: finer (10s) when validators agree, coarser (120s) when they disagree.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 48
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {
|
||||
"drawStyle": "line",
|
||||
"lineInterpolation": "stepAfter",
|
||||
"pointSize": 5,
|
||||
"showPoints": "auto"
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Vote Bins"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "short"
|
||||
},
|
||||
{
|
||||
"id": "custom.axisPlacement",
|
||||
"value": "left"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Resolution"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "ms"
|
||||
},
|
||||
{
|
||||
"id": "custom.axisPlacement",
|
||||
"value": "right"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
},
|
||||
"legend": {
|
||||
"displayMode": "table",
|
||||
"placement": "bottom",
|
||||
"calcs": ["mean", "max"]
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "tempo"
|
||||
},
|
||||
"queryType": "traceql",
|
||||
"query": "{name=\"consensus.accept.apply\" && resource.service.instance.id=~\"$node\" && span.xrpl.consensus.close_time_correct=~\"$close_time_correct\"} | select(span.xrpl.consensus.close_time_vote_bins)",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "tempo"
|
||||
},
|
||||
"queryType": "traceql",
|
||||
"query": "{name=\"consensus.accept.apply\" && resource.service.instance.id=~\"$node\" && span.xrpl.consensus.close_time_correct=~\"$close_time_correct\"} | select(span.xrpl.consensus.close_resolution_ms)",
|
||||
"refId": "B"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "Close Time Resolution Direction",
|
||||
"description": "Whether close time resolution increased (coarser bins, more disagreement), decreased (finer bins, better agreement), or stayed unchanged relative to the previous ledger. Based on xrpl.consensus.resolution_direction attribute.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 48
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {
|
||||
"drawStyle": "bars",
|
||||
"fillOpacity": 40,
|
||||
"pointSize": 5,
|
||||
"showPoints": "auto"
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
},
|
||||
"legend": {
|
||||
"displayMode": "table",
|
||||
"placement": "bottom",
|
||||
"calcs": ["lastNotNull"]
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "tempo"
|
||||
},
|
||||
"queryType": "traceql",
|
||||
"query": "{name=\"consensus.accept.apply\" && resource.service.instance.id=~\"$node\" && span.xrpl.consensus.close_time_correct=~\"$close_time_correct\" && span.xrpl.consensus.resolution_direction=~\"$resolution_direction\"} | select(span.xrpl.consensus.resolution_direction)",
|
||||
"refId": "A"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "Close Time Bin Distribution",
|
||||
"description": "Distribution of raw proposed close times across quantized bins. Shows how many nodes' proposals landed in each resolution bin per consensus round. A single dominant bin indicates good clock agreement; spread across bins indicates drift or network latency.",
|
||||
"type": "barchart",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 56
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"fillOpacity": 60
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
},
|
||||
"legend": {
|
||||
"displayMode": "table",
|
||||
"placement": "bottom",
|
||||
"calcs": ["sum"]
|
||||
},
|
||||
"xTickLabelRotation": -45,
|
||||
"barWidth": 0.8,
|
||||
"stacking": "normal"
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "tempo"
|
||||
},
|
||||
"queryType": "traceql",
|
||||
"query": "{name=\"consensus.accept.apply\" && resource.service.instance.id=~\"$node\" && span.xrpl.consensus.close_time_correct=~\"$close_time_correct\"} | select(span.xrpl.consensus.close_time, span.xrpl.consensus.close_time_vote_bins)",
|
||||
"refId": "A"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"schemaVersion": 39,
|
||||
"tags": ["rippled", "consensus", "telemetry"],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"name": "node",
|
||||
"label": "Node",
|
||||
"description": "Filter by rippled node (service.instance.id \u2014 e.g. Node-1)",
|
||||
"type": "query",
|
||||
"query": "label_values(traces_span_metrics_calls_total, exported_instance)",
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"includeAll": true,
|
||||
"allValue": ".*",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"multi": true,
|
||||
"refresh": 2,
|
||||
"sort": 1
|
||||
},
|
||||
{
|
||||
"name": "consensus_mode",
|
||||
"label": "Consensus Mode",
|
||||
"description": "Filter by consensus mode (Proposing, Observing, Wrong Ledger, Switched Ledger)",
|
||||
"type": "query",
|
||||
"query": "label_values(traces_span_metrics_calls_total{span_name=\"consensus.ledger_close\"}, xrpl_consensus_mode)",
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"includeAll": true,
|
||||
"allValue": ".*",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"multi": true,
|
||||
"refresh": 2,
|
||||
"sort": 1
|
||||
},
|
||||
{
|
||||
"name": "close_time_correct",
|
||||
"label": "Close Time Agreed",
|
||||
"type": "custom",
|
||||
"query": "true,false",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"includeAll": true,
|
||||
"allValue": ".*",
|
||||
"multi": true,
|
||||
"options": [
|
||||
{
|
||||
"text": "All",
|
||||
"value": "$__all",
|
||||
"selected": true
|
||||
},
|
||||
{
|
||||
"text": "true",
|
||||
"value": "true",
|
||||
"selected": false
|
||||
},
|
||||
{
|
||||
"text": "false",
|
||||
"value": "false",
|
||||
"selected": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "resolution_direction",
|
||||
"label": "Resolution Direction",
|
||||
"type": "custom",
|
||||
"query": "increased,decreased,unchanged",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"includeAll": true,
|
||||
"allValue": ".*",
|
||||
"multi": true,
|
||||
"options": [
|
||||
{
|
||||
"text": "All",
|
||||
"value": "$__all",
|
||||
"selected": true
|
||||
},
|
||||
{
|
||||
"text": "increased",
|
||||
"value": "increased",
|
||||
"selected": false
|
||||
},
|
||||
{
|
||||
"text": "decreased",
|
||||
"value": "decreased",
|
||||
"selected": false
|
||||
},
|
||||
{
|
||||
"text": "unchanged",
|
||||
"value": "unchanged",
|
||||
"selected": false
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"title": "Consensus Health",
|
||||
"uid": "rippled-consensus"
|
||||
}
|
||||
@@ -1,353 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": []
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"title": "Ledger Build Rate",
|
||||
"description": "Rate at which new ledgers are being built. The ledger.build span (BuildLedger.cpp:31) wraps the entire buildLedgerImpl() function which creates a new ledger from a parent, applies transactions, flushes SHAMap nodes, and sets the accepted state. Should match the consensus close rate (~0.25/sec on mainnet with ~4s rounds).",
|
||||
"type": "stat",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"ledger.build\"}[5m]))",
|
||||
"legendFormat": "Builds / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Ledger Build Duration",
|
||||
"description": "p95 and p50 duration of ledger builds. Measures the full buildLedgerImpl() call including transaction application, SHAMap flushing, and ledger acceptance. The span records xrpl.ledger.seq as an attribute. Long build times indicate expensive transaction sets or I/O pressure from SHAMap flushes.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.95, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"ledger.build\"}[5m])))",
|
||||
"legendFormat": "P95 Build Duration [{{exported_instance}}]"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.50, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"ledger.build\"}[5m])))",
|
||||
"legendFormat": "P50 Build Duration [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Duration (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Ledger Validation Rate",
|
||||
"description": "Rate at which ledgers pass the validation threshold and are accepted as fully validated. The ledger.validate span (LedgerMaster.cpp:915) fires in checkAccept() only after the ledger receives sufficient trusted validations (>= quorum). Records xrpl.ledger.seq and xrpl.ledger.validations (the number of validations received).",
|
||||
"type": "stat",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"ledger.validate\"}[5m]))",
|
||||
"legendFormat": "Validations / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Ledger Build Duration Heatmap",
|
||||
"description": "Heatmap showing the distribution of ledger.build durations across histogram buckets over time. Each cell represents the count of ledger builds that fell into that duration bucket in a 5m window. Useful for spotting occasional slow ledger builds that may not appear in percentile charts.",
|
||||
"type": "heatmap",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
},
|
||||
"yAxis": {
|
||||
"axisLabel": "Duration (ms)"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum(increase(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"ledger.build\"}[5m])) by (le)",
|
||||
"legendFormat": "{{le}}",
|
||||
"format": "heatmap"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms"
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Transaction Apply Duration",
|
||||
"description": "p95 and p50 duration of applying the consensus transaction set during ledger building. The tx.apply span (BuildLedger.cpp:88) wraps applyTransactions() which iterates through the CanonicalTXSet with multiple retry passes. Records xrpl.ledger.tx_count (successful) and xrpl.ledger.tx_failed (failed) as attributes.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.95, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"tx.apply\"}[5m])))",
|
||||
"legendFormat": "P95 tx.apply [{{exported_instance}}]"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.50, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"tx.apply\"}[5m])))",
|
||||
"legendFormat": "P50 tx.apply [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Duration (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Transaction Apply Rate",
|
||||
"description": "Rate of tx.apply span invocations, reflecting how frequently the transaction application phase runs during ledger building. Each ledger build triggers one tx.apply call. Should closely match the ledger build rate.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"tx.apply\"}[5m]))",
|
||||
"legendFormat": "tx.apply / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"custom": {
|
||||
"axisLabel": "Operations / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Ledger Store Rate",
|
||||
"description": "Rate at which ledgers are stored into the ledger history. The ledger.store span (LedgerMaster.cpp:409) wraps storeLedger() which inserts the ledger into the LedgerHistory cache. Records xrpl.ledger.seq. Should match the ledger build rate under normal operation.",
|
||||
"type": "stat",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"ledger.store\"}[5m]))",
|
||||
"legendFormat": "Stores / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Build vs Close Duration",
|
||||
"description": "Compares p95 durations of ledger.build (the actual ledger construction in BuildLedger.cpp) vs consensus.ledger_close (the consensus close event in RCLConsensus.cpp). Build time is a subset of close time. A large gap between them indicates overhead in the consensus pipeline outside of ledger construction itself.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.95, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"ledger.build\"}[5m])))",
|
||||
"legendFormat": "P95 ledger.build [{{exported_instance}}]"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.95, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"consensus.ledger_close\"}[5m])))",
|
||||
"legendFormat": "P95 consensus.ledger_close [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Duration (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
}
|
||||
],
|
||||
"schemaVersion": 39,
|
||||
"tags": ["rippled", "ledger", "telemetry"],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"name": "node",
|
||||
"label": "Node",
|
||||
"description": "Filter by rippled node (service.instance.id \u2014 e.g. Node-1)",
|
||||
"type": "query",
|
||||
"query": "label_values(traces_span_metrics_calls_total, exported_instance)",
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"includeAll": true,
|
||||
"allValue": ".*",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"multi": true,
|
||||
"refresh": 2,
|
||||
"sort": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"title": "Ledger Operations",
|
||||
"uid": "rippled-ledger-ops"
|
||||
}
|
||||
@@ -1,227 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": []
|
||||
},
|
||||
"description": "Requires trace_peer=1 in the [telemetry] config section.",
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"title": "Peer Proposal Receive Rate",
|
||||
"description": "Rate of consensus proposals received from network peers. The peer.proposal.receive span (PeerImp.cpp:1667) fires in onMessage(TMProposeSet) for each incoming proposal. Records xrpl.peer.id (sending peer) and xrpl.peer.proposal.trusted (whether the proposer is in our UNL). Requires trace_peer=1 in the telemetry config.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"peer.proposal.receive\"}[5m]))",
|
||||
"legendFormat": "Proposals Received / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"custom": {
|
||||
"axisLabel": "Proposals / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Peer Validation Receive Rate",
|
||||
"description": "Rate of ledger validations received from network peers. The peer.validation.receive span (PeerImp.cpp:2264) fires in onMessage(TMValidation) for each incoming validation message. Records xrpl.peer.id (sending peer) and xrpl.peer.validation.trusted (whether the validator is trusted). Requires trace_peer=1 in the telemetry config.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"peer.validation.receive\"}[5m]))",
|
||||
"legendFormat": "Validations Received / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"custom": {
|
||||
"axisLabel": "Validations / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Proposals Trusted vs Untrusted",
|
||||
"description": "Pie chart showing the ratio of proposals received from trusted validators (in our UNL) vs untrusted validators. Grouped by the xrpl.peer.proposal.trusted span attribute (true/false). A healthy node connected to a well-configured UNL should see a significant portion of trusted proposals. Note: proposals that fail early validation may not have the trusted attribute set.",
|
||||
"type": "piechart",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (xrpl_peer_proposal_trusted, exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", xrpl_peer_proposal_trusted=~\"$proposal_trusted\", span_name=\"peer.proposal.receive\"}[5m]))",
|
||||
"legendFormat": "Trusted = {{xrpl_peer_proposal_trusted}} [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Validations Trusted vs Untrusted",
|
||||
"description": "Pie chart showing the ratio of validations received from trusted validators (in our UNL) vs untrusted validators. Grouped by the xrpl.peer.validation.trusted span attribute (true/false). Monitoring this helps detect if the node is receiving validations from the expected set of trusted validators. Note: validations that fail early checks may not have the trusted attribute set.",
|
||||
"type": "piechart",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (xrpl_peer_validation_trusted, exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", xrpl_peer_validation_trusted=~\"$validation_trusted\", span_name=\"peer.validation.receive\"}[5m]))",
|
||||
"legendFormat": "Trusted = {{xrpl_peer_validation_trusted}} [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
}
|
||||
],
|
||||
"schemaVersion": 39,
|
||||
"tags": ["rippled", "peer", "telemetry"],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"name": "node",
|
||||
"label": "Node",
|
||||
"description": "Filter by rippled node (service.instance.id \u2014 e.g. Node-1)",
|
||||
"type": "query",
|
||||
"query": "label_values(traces_span_metrics_calls_total, exported_instance)",
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"includeAll": true,
|
||||
"allValue": ".*",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"multi": true,
|
||||
"refresh": 2,
|
||||
"sort": 1
|
||||
},
|
||||
{
|
||||
"name": "proposal_trusted",
|
||||
"label": "Proposal Trusted",
|
||||
"description": "Filter by proposal trust status (true = from trusted validator)",
|
||||
"type": "query",
|
||||
"query": "label_values(traces_span_metrics_calls_total{span_name=\"peer.proposal.receive\"}, xrpl_peer_proposal_trusted)",
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"includeAll": true,
|
||||
"allValue": ".*",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"multi": true,
|
||||
"refresh": 2,
|
||||
"sort": 1
|
||||
},
|
||||
{
|
||||
"name": "validation_trusted",
|
||||
"label": "Validation Trusted",
|
||||
"description": "Filter by validation trust status (true = from trusted validator)",
|
||||
"type": "query",
|
||||
"query": "label_values(traces_span_metrics_calls_total{span_name=\"peer.validation.receive\"}, xrpl_peer_validation_trusted)",
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"includeAll": true,
|
||||
"allValue": ".*",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"multi": true,
|
||||
"refresh": 2,
|
||||
"sort": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"title": "Peer Network",
|
||||
"uid": "rippled-peer-net"
|
||||
}
|
||||
@@ -1,376 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": []
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"title": "RPC Request Rate by Command",
|
||||
"description": "Per-second rate of RPC command executions, broken down by command name (e.g. server_info, submit). Calculated as rate(traces_span_metrics_calls_total{span_name=~\"rpc.command.*\"}) over a 5m window, grouped by the xrpl.rpc.command span attribute.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (xrpl_rpc_command, exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", xrpl_rpc_command=~\"$command\", span_name=~\"rpc.command.*\"}[5m]))",
|
||||
"legendFormat": "{{xrpl_rpc_command}} [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "reqps",
|
||||
"custom": {
|
||||
"axisLabel": "Requests / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "RPC Latency P95 by Command",
|
||||
"description": "95th percentile response time for each RPC command. Computed from the spanmetrics duration histogram using histogram_quantile(0.95) over rpc.command.* spans, grouped by xrpl.rpc.command. High values indicate slow commands that may need optimization.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.95, sum by (le, xrpl_rpc_command, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", xrpl_rpc_command=~\"$command\", span_name=~\"rpc.command.*\"}[5m])))",
|
||||
"legendFormat": "P95 {{xrpl_rpc_command}} [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Latency (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "RPC Error Rate",
|
||||
"description": "Percentage of RPC commands that completed with an error status, per command. Calculated as (error calls / total calls) * 100, where errors have status_code=STATUS_CODE_ERROR. Thresholds: green < 1%, yellow 1-5%, red > 5%.",
|
||||
"type": "bargauge",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (xrpl_rpc_command, exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", xrpl_rpc_command=~\"$command\", span_name=~\"rpc.command.*\", status_code=\"STATUS_CODE_ERROR\"}[5m])) / sum by (xrpl_rpc_command, exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", xrpl_rpc_command=~\"$command\", span_name=~\"rpc.command.*\"}[5m])) * 100",
|
||||
"legendFormat": "{{xrpl_rpc_command}} [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "percent",
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "yellow",
|
||||
"value": 1
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 5
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "RPC Latency Heatmap",
|
||||
"description": "Distribution of RPC command response times across histogram buckets. Shows the density of requests at each latency level over time. Each cell represents the count of requests that fell into that duration bucket in a 5m window. Useful for spotting bimodal latency patterns.",
|
||||
"type": "heatmap",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
},
|
||||
"yAxis": {
|
||||
"axisLabel": "Duration (ms)"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum(increase(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", xrpl_rpc_command=~\"$command\", span_name=~\"rpc.command.*\"}[5m])) by (le)",
|
||||
"legendFormat": "{{le}}",
|
||||
"format": "heatmap"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "Overall RPC Throughput",
|
||||
"description": "Aggregate RPC throughput showing two layers of the request pipeline. rpc.request is the outer HTTP handler (ServerHandler.cpp:271) that accepts incoming connections. rpc.process is the inner processing layer (ServerHandler.cpp:573) that parses and dispatches. A gap between the two indicates requests being queued or rejected before processing.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", xrpl_rpc_command=~\"$command\", span_name=\"rpc.request\"}[5m]))",
|
||||
"legendFormat": "rpc.request / Sec [{{exported_instance}}]"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", xrpl_rpc_command=~\"$command\", span_name=\"rpc.process\"}[5m]))",
|
||||
"legendFormat": "rpc.process / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "reqps",
|
||||
"custom": {
|
||||
"axisLabel": "Requests / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "RPC Success vs Error",
|
||||
"description": "Aggregate rate of successful vs failed RPC commands across all command types. Success = status_code UNSET (OpenTelemetry default for OK spans). Error = status_code STATUS_CODE_ERROR. A sustained error rate warrants investigation via per-command breakdown above.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", xrpl_rpc_command=~\"$command\", span_name=~\"rpc.command.*\", status_code=\"STATUS_CODE_UNSET\"}[5m]))",
|
||||
"legendFormat": "Success [{{exported_instance}}]"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", xrpl_rpc_command=~\"$command\", span_name=~\"rpc.command.*\", status_code=\"STATUS_CODE_ERROR\"}[5m]))",
|
||||
"legendFormat": "Error [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"custom": {
|
||||
"axisLabel": "Commands / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Top Commands by Volume",
|
||||
"description": "Top 10 most frequently called RPC commands by total invocation count over the last 5 minutes. Uses topk(10, increase(calls_total)) to rank commands. Helps identify the hottest API endpoints driving load on the node.",
|
||||
"type": "bargauge",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "topk(10, sum by (xrpl_rpc_command, exported_instance) (increase(traces_span_metrics_calls_total{exported_instance=~\"$node\", xrpl_rpc_command=~\"$command\", span_name=~\"rpc.command.*\"}[5m])))",
|
||||
"legendFormat": "{{xrpl_rpc_command}} [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "WebSocket Message Rate",
|
||||
"description": "Rate of incoming WebSocket RPC messages processed by the server. Sourced from the rpc.ws_message span (ServerHandler.cpp:384). Only active when clients connect via WebSocket instead of HTTP. Zero is normal if only HTTP RPC is in use.",
|
||||
"type": "stat",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", xrpl_rpc_command=~\"$command\", span_name=\"rpc.ws_message\"}[5m]))",
|
||||
"legendFormat": "WS Messages / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
}
|
||||
],
|
||||
"schemaVersion": 39,
|
||||
"tags": ["rippled", "rpc", "telemetry"],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"name": "node",
|
||||
"label": "Node",
|
||||
"description": "Filter by rippled node (service.instance.id — e.g. Node-1)",
|
||||
"type": "query",
|
||||
"query": "label_values(traces_span_metrics_calls_total, exported_instance)",
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"includeAll": true,
|
||||
"allValue": ".*",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"multi": true,
|
||||
"refresh": 2,
|
||||
"sort": 1
|
||||
},
|
||||
{
|
||||
"name": "command",
|
||||
"label": "RPC Command",
|
||||
"description": "Filter by RPC command name (e.g., server_info, submit)",
|
||||
"type": "query",
|
||||
"query": "label_values(traces_span_metrics_calls_total{span_name=~\"rpc.command.*\"}, xrpl_rpc_command)",
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"includeAll": true,
|
||||
"allValue": ".*",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"multi": true,
|
||||
"refresh": 2,
|
||||
"sort": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"title": "RPC Performance",
|
||||
"uid": "rippled-rpc-perf"
|
||||
}
|
||||
@@ -1,506 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": []
|
||||
},
|
||||
"description": "Ledger data exchange and object fetch traffic from beast::insight StatsD. Covers ledger sync, node data retrieval, and transaction set exchange. Requires [insight] server=statsd in rippled config.",
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"title": "Ledger Data Exchange (Bytes In)",
|
||||
"description": "Inbound bytes for ledger data sub-categories. 'ledger_data' = aggregated ledger data, sub-types include Transaction_Set_candidate (proposed tx sets), Transaction_Node (tx tree nodes), and Account_State_Node (state tree nodes). High Account_State_Node traffic indicates state sync; high Transaction_Set_candidate indicates consensus catch-up. Sourced from TrafficCount.h ledger_data_* categories.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_data_get_Bytes_In",
|
||||
"legendFormat": "Ledger Data Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_data_share_Bytes_In",
|
||||
"legendFormat": "Ledger Data Share"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_data_Transaction_Set_candidate_get_Bytes_In",
|
||||
"legendFormat": "TX Set Candidate Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_data_Transaction_Set_candidate_share_Bytes_In",
|
||||
"legendFormat": "TX Set Candidate Share"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_data_Transaction_Node_get_Bytes_In",
|
||||
"legendFormat": "TX Node Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_data_Transaction_Node_share_Bytes_In",
|
||||
"legendFormat": "TX Node Share"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_data_Account_State_Node_get_Bytes_In",
|
||||
"legendFormat": "Account State Node Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_data_Account_State_Node_share_Bytes_In",
|
||||
"legendFormat": "Account State Node Share"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "decbytes",
|
||||
"custom": {
|
||||
"axisLabel": "Bytes In",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Ledger Share/Get Traffic (Bytes)",
|
||||
"description": "Legacy ledger share and get traffic by sub-type. These are the older ledger fetch protocol categories (as opposed to ledger_data_* which is the newer protocol). Sub-types: Transaction_Set_candidate, Transaction_node, Account_State_node, plus aggregate ledger_share and ledger_get. Sourced from TrafficCount.h ledger_* categories.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_share_Bytes_In",
|
||||
"legendFormat": "Ledger Share In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_get_Bytes_In",
|
||||
"legendFormat": "Ledger Get In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_Transaction_Set_candidate_share_Bytes_In",
|
||||
"legendFormat": "TX Set Candidate Share"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_Transaction_Set_candidate_get_Bytes_In",
|
||||
"legendFormat": "TX Set Candidate Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_Transaction_node_share_Bytes_In",
|
||||
"legendFormat": "TX Node Share"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_Transaction_node_get_Bytes_In",
|
||||
"legendFormat": "TX Node Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_Account_State_node_share_Bytes_In",
|
||||
"legendFormat": "Account State Share"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ledger_Account_State_node_get_Bytes_In",
|
||||
"legendFormat": "Account State Get"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "decbytes",
|
||||
"custom": {
|
||||
"axisLabel": "Bytes In",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "GetObject Traffic by Type (Bytes In)",
|
||||
"description": "Object fetch traffic by object type. GetObject is the protocol for fetching specific SHAMap nodes. Types: Ledger (full ledger headers), Transaction (individual txs), Transaction_node (tx tree nodes), Account_State_node (state tree nodes), CAS (Content Addressable Storage objects), Fetch_Pack (batch fetch during catch-up), Transactions (bulk tx fetch). High Fetch_Pack traffic indicates a node is catching up. Sourced from TrafficCount.h getobject_* categories.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Ledger_get_Bytes_In",
|
||||
"legendFormat": "Ledger Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Ledger_share_Bytes_In",
|
||||
"legendFormat": "Ledger Share"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Transaction_get_Bytes_In",
|
||||
"legendFormat": "Transaction Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Transaction_share_Bytes_In",
|
||||
"legendFormat": "Transaction Share"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Transaction_node_get_Bytes_In",
|
||||
"legendFormat": "TX Node Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Transaction_node_share_Bytes_In",
|
||||
"legendFormat": "TX Node Share"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Account_State_node_get_Bytes_In",
|
||||
"legendFormat": "Account State Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Account_State_node_share_Bytes_In",
|
||||
"legendFormat": "Account State Share"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "decbytes",
|
||||
"custom": {
|
||||
"axisLabel": "Bytes In",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "GetObject Aggregate & Special Types (Bytes In)",
|
||||
"description": "Aggregate getobject traffic plus special categories: CAS (Content Addressable Storage) for SHAMap node fetch, Fetch_Pack for bulk batch downloads during catch-up, Transactions for bulk tx fetch, and the aggregate getobject_get/getobject_share totals. Sourced from TrafficCount.h getobject_* categories.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_CAS_get_Bytes_In",
|
||||
"legendFormat": "CAS Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_CAS_share_Bytes_In",
|
||||
"legendFormat": "CAS Share"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Fetch_Pack_share_Bytes_In",
|
||||
"legendFormat": "Fetch Pack Share"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Fetch_Pack_get_Bytes_In",
|
||||
"legendFormat": "Fetch Pack Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Transactions_get_Bytes_In",
|
||||
"legendFormat": "Transactions Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_get_Bytes_In",
|
||||
"legendFormat": "Aggregate Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_share_Bytes_In",
|
||||
"legendFormat": "Aggregate Share"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "decbytes",
|
||||
"custom": {
|
||||
"axisLabel": "Bytes In",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "GetObject Messages by Type",
|
||||
"description": "Message counts for object fetch operations. Shows how many individual fetch requests and responses are exchanged per type. High message counts with low byte counts indicate small object fetches; the inverse indicates large batch transfers. Sourced from TrafficCount.h getobject_* categories.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Ledger_get_Messages_In",
|
||||
"legendFormat": "Ledger Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Transaction_get_Messages_In",
|
||||
"legendFormat": "Transaction Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Transaction_node_get_Messages_In",
|
||||
"legendFormat": "TX Node Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Account_State_node_get_Messages_In",
|
||||
"legendFormat": "Account State Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_CAS_get_Messages_In",
|
||||
"legendFormat": "CAS Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Fetch_Pack_get_Messages_In",
|
||||
"legendFormat": "Fetch Pack Get"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_getobject_Transactions_get_Messages_In",
|
||||
"legendFormat": "Transactions Get"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"axisLabel": "Messages In",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Overlay Traffic Heatmap (All Categories, Bytes In)",
|
||||
"description": "Bar gauge showing all overlay traffic categories ranked by inbound bytes. Provides a complete at-a-glance view of which protocol message types consume the most bandwidth across all 57+ traffic categories. Sourced from all TrafficCount.h categories via wildcard match.",
|
||||
"type": "bargauge",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
},
|
||||
"displayMode": "gradient",
|
||||
"orientation": "horizontal",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "topk(20, {__name__=~\"rippled_.*_Bytes_In\", __name__!~\"rippled_total_.*\"})",
|
||||
"legendFormat": "{{__name__}}"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "decbytes",
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "yellow",
|
||||
"value": 1048576
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 104857600
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
}
|
||||
],
|
||||
"schemaVersion": 39,
|
||||
"tags": ["rippled", "statsd", "ledger", "sync", "telemetry"],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"title": "Ledger Data & Sync (StatsD)",
|
||||
"uid": "rippled-statsd-ledger-sync"
|
||||
}
|
||||
@@ -1,671 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": []
|
||||
},
|
||||
"description": "Network traffic and peer metrics from beast::insight StatsD. Requires [insight] server=statsd in rippled config.",
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"title": "Active Peers",
|
||||
"description": "Number of active inbound and outbound peer connections. Sourced from Peer_Finder.Active_Inbound_Peers and Peer_Finder.Active_Outbound_Peers gauges (PeerfinderManager.cpp:214-215). A healthy mainnet node typically has 10-21 outbound and 0-85 inbound peers depending on configuration.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_Peer_Finder_Active_Inbound_Peers",
|
||||
"legendFormat": "Inbound Peers"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_Peer_Finder_Active_Outbound_Peers",
|
||||
"legendFormat": "Outbound Peers"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"axisLabel": "Peers",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Peer Disconnects",
|
||||
"description": "Cumulative count of peer disconnections. Sourced from the Overlay.Peer_Disconnects gauge (OverlayImpl.h:557). A rising trend indicates network instability, aggressive peer management, or resource exhaustion causing connection drops.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_Overlay_Peer_Disconnects",
|
||||
"legendFormat": "Disconnects"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"axisLabel": "Disconnects",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Total Network Bytes",
|
||||
"description": "Total bytes sent and received across all peer connections. Sourced from the total.Bytes_In and total.Bytes_Out traffic category gauges (OverlayImpl.h:535-548). Provides a high-level view of network bandwidth consumption.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_total_Bytes_In",
|
||||
"legendFormat": "Bytes In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_total_Bytes_Out",
|
||||
"legendFormat": "Bytes Out"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "decbytes",
|
||||
"custom": {
|
||||
"axisLabel": "Bytes",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Total Network Messages",
|
||||
"description": "Total messages sent and received across all peer connections. Sourced from the total.Messages_In and total.Messages_Out traffic category gauges (OverlayImpl.h:535-548). Shows the overall message throughput of the overlay network.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_total_Messages_In",
|
||||
"legendFormat": "Messages In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_total_Messages_Out",
|
||||
"legendFormat": "Messages Out"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"axisLabel": "Messages",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Transaction Traffic",
|
||||
"description": "Bytes and messages for transaction-related overlay traffic. Includes the transactions traffic category (OverlayImpl/TrafficCount.h). Spikes indicate high transaction volume on the network or transaction flooding.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_transactions_Messages_In",
|
||||
"legendFormat": "TX Messages In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_transactions_Messages_Out",
|
||||
"legendFormat": "TX Messages Out"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_transactions_duplicate_Messages_In",
|
||||
"legendFormat": "TX Duplicate In"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"axisLabel": "Messages",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Proposal Traffic",
|
||||
"description": "Messages for consensus proposal overlay traffic. Includes proposals, proposals_untrusted, and proposals_duplicate categories (TrafficCount.h). High untrusted or duplicate counts may indicate UNL misconfiguration or network spam.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_proposals_Messages_In",
|
||||
"legendFormat": "Proposals In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_proposals_Messages_Out",
|
||||
"legendFormat": "Proposals Out"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_proposals_untrusted_Messages_In",
|
||||
"legendFormat": "Untrusted In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_proposals_duplicate_Messages_In",
|
||||
"legendFormat": "Duplicate In"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"axisLabel": "Messages",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Validation Traffic",
|
||||
"description": "Messages for validation overlay traffic. Includes validations, validations_untrusted, and validations_duplicate categories (TrafficCount.h). Monitoring trusted vs untrusted validation traffic helps detect UNL health issues.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_validations_Messages_In",
|
||||
"legendFormat": "Validations In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_validations_Messages_Out",
|
||||
"legendFormat": "Validations Out"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_validations_untrusted_Messages_In",
|
||||
"legendFormat": "Untrusted In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_validations_duplicate_Messages_In",
|
||||
"legendFormat": "Duplicate In"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"axisLabel": "Messages",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Overlay Traffic by Category (Bytes In)",
|
||||
"description": "Top traffic categories by inbound bytes. Includes all 57 overlay traffic categories from TrafficCount.h. Shows which protocol message types consume the most bandwidth. Categories include transactions, proposals, validations, ledger data, getobject, and overlay overhead.",
|
||||
"type": "bargauge",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "topk(10, {__name__=~\"rippled_.*_Bytes_In\", __name__!~\"rippled_total_.*\"})",
|
||||
"legendFormat": "{{__name__}}"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "decbytes"
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_transactions_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Transactions"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_proposals_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Proposals"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_validations_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Validations"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_overhead_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Overhead"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_overhead_overlay_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Overhead Overlay"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_ping_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Ping"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_status_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Status"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_getObject_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Get Object"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_haveTxSet_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Have Tx Set"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_ledgerData_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Ledger Data"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_ledger_share_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Ledger Share"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_ledger_data_get_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Ledger Data Get"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_ledger_data_share_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Ledger Data Share"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_ledger_data_Account_State_Node_get_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Account State Node Get"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_ledger_data_Account_State_Node_share_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Account State Node Share"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_ledger_data_Transaction_Node_get_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Transaction Node Get"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_ledger_data_Transaction_Node_share_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Transaction Node Share"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_ledger_data_Transaction_Set_candidate_get_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Tx Set Candidate Get"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_ledger_Account_State_node_share_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Account State Node Share (Legacy)"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_ledger_Transaction_Set_candidate_share_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Tx Set Candidate Share"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_ledger_Transaction_node_share_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Transaction Node Share (Legacy)"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rippled_set_get_Bytes_In"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Set Get"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"schemaVersion": 39,
|
||||
"tags": ["rippled", "statsd", "network", "telemetry"],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"title": "Network Traffic (StatsD)",
|
||||
"uid": "rippled-statsd-network"
|
||||
}
|
||||
@@ -1,415 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": []
|
||||
},
|
||||
"description": "Node health metrics from beast::insight StatsD. Requires [insight] server=statsd in rippled config.",
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"title": "Validated Ledger Age",
|
||||
"description": "Age of the most recently validated ledger in seconds. Sourced from the LedgerMaster.Validated_Ledger_Age gauge (LedgerMaster.h:373) which is updated every collection interval via the insight hook. Values above 20s indicate the node is falling behind the network.",
|
||||
"type": "stat",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_LedgerMaster_Validated_Ledger_Age",
|
||||
"legendFormat": "Validated Age"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "s",
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "yellow",
|
||||
"value": 10
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 20
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Published Ledger Age",
|
||||
"description": "Age of the most recently published ledger in seconds. Sourced from the LedgerMaster.Published_Ledger_Age gauge (LedgerMaster.h:374). Published ledger age should track close to validated ledger age. A growing gap indicates publish pipeline backlog.",
|
||||
"type": "stat",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_LedgerMaster_Published_Ledger_Age",
|
||||
"legendFormat": "Published Age"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "s",
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "yellow",
|
||||
"value": 10
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 20
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Operating Mode Duration",
|
||||
"description": "Cumulative time spent in each operating mode (Disconnected, Connected, Syncing, Tracking, Full). Sourced from State_Accounting.*_duration gauges (NetworkOPs.cpp:774-778). A healthy node should spend the vast majority of time in Full mode.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_State_Accounting_Full_duration",
|
||||
"legendFormat": "Full"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_State_Accounting_Tracking_duration",
|
||||
"legendFormat": "Tracking"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_State_Accounting_Syncing_duration",
|
||||
"legendFormat": "Syncing"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_State_Accounting_Connected_duration",
|
||||
"legendFormat": "Connected"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_State_Accounting_Disconnected_duration",
|
||||
"legendFormat": "Disconnected"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "s",
|
||||
"custom": {
|
||||
"axisLabel": "Duration (Sec)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Operating Mode Transitions",
|
||||
"description": "Count of transitions into each operating mode. Sourced from State_Accounting.*_transitions gauges (NetworkOPs.cpp:780-786). Frequent transitions out of Full mode indicate instability. Transitions to Disconnected or Syncing warrant investigation.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_State_Accounting_Full_transitions",
|
||||
"legendFormat": "Full"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_State_Accounting_Tracking_transitions",
|
||||
"legendFormat": "Tracking"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_State_Accounting_Syncing_transitions",
|
||||
"legendFormat": "Syncing"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_State_Accounting_Connected_transitions",
|
||||
"legendFormat": "Connected"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_State_Accounting_Disconnected_transitions",
|
||||
"legendFormat": "Disconnected"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"axisLabel": "Transitions",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "I/O Latency",
|
||||
"description": "P95 and P50 of the I/O service loop latency in milliseconds. Sourced from the ios_latency event (Application.cpp:438) which measures how long it takes for the io_context to process a timer callback. Values above 10ms are logged; above 500ms trigger warnings. High values indicate thread pool saturation or blocking operations.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ios_latency{quantile=\"0.95\"}",
|
||||
"legendFormat": "P95 I/O Latency"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_ios_latency{quantile=\"0.5\"}",
|
||||
"legendFormat": "P50 I/O Latency"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Latency (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Job Queue Depth",
|
||||
"description": "Current number of jobs waiting in the job queue. Sourced from the job_count gauge (JobQueue.cpp:26). A sustained high value indicates the node cannot process work fast enough \u2014 common during ledger replay or heavy RPC load.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_job_count",
|
||||
"legendFormat": "Job Queue Depth"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"axisLabel": "Jobs",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Ledger Fetch Rate",
|
||||
"description": "Rate of ledger fetch requests initiated by the node. Sourced from the ledger_fetches counter (InboundLedgers.cpp:44) which increments each time the node requests a ledger from a peer. High rates indicate the node is catching up or missing ledgers.",
|
||||
"type": "stat",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rate(rippled_ledger_fetches_total[5m])",
|
||||
"legendFormat": "Fetches / Sec"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Ledger History Mismatches",
|
||||
"description": "Rate of ledger history hash mismatches. Sourced from the ledger.history.mismatch counter (LedgerHistory.cpp:16) which increments when a built ledger hash does not match the expected validated hash. Non-zero values indicate consensus divergence or database corruption.",
|
||||
"type": "stat",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rate(rippled_ledger_history_mismatch_total[5m])",
|
||||
"legendFormat": "Mismatches / Sec"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 0.01
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
}
|
||||
],
|
||||
"schemaVersion": 39,
|
||||
"tags": ["rippled", "statsd", "node-health", "telemetry"],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"title": "Node Health (StatsD)",
|
||||
"uid": "rippled-statsd-node-health"
|
||||
}
|
||||
@@ -1,566 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": []
|
||||
},
|
||||
"description": "Detailed overlay traffic breakdown for categories not covered by the main Network Traffic dashboard. Includes squelch, overhead, validator lists, object fetch, ledger sync, and protocol negotiation traffic. Requires [insight] server=statsd in rippled config.",
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"title": "Squelch Traffic (Messages)",
|
||||
"description": "Squelch-related overlay messages. Squelch is the peer traffic management protocol that suppresses redundant message forwarding. 'squelch' = squelch control messages, 'squelch_suppressed' = messages suppressed by squelch, 'squelch_ignored' = squelch directives that were ignored. High suppressed counts indicate effective bandwidth savings; high ignored counts may indicate misconfigured peers. Sourced from TrafficCount.h squelch categories.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_squelch_Messages_In",
|
||||
"legendFormat": "Squelch In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_squelch_Messages_Out",
|
||||
"legendFormat": "Squelch Out"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_squelch_suppressed_Messages_In",
|
||||
"legendFormat": "Suppressed In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_squelch_suppressed_Messages_Out",
|
||||
"legendFormat": "Suppressed Out"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_squelch_ignored_Messages_In",
|
||||
"legendFormat": "Ignored In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_squelch_ignored_Messages_Out",
|
||||
"legendFormat": "Ignored Out"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"axisLabel": "Messages",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Overhead Traffic Breakdown (Bytes)",
|
||||
"description": "Overlay protocol overhead by sub-category. 'overhead' = base protocol overhead (ping, status, etc.), 'overhead_cluster' = intra-cluster communication overhead, 'overhead_manifest' = validator manifest distribution overhead. High cluster overhead may indicate frequent cluster state syncs; high manifest overhead occurs during UNL changes. Sourced from TrafficCount.h overhead categories.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_overhead_Bytes_In",
|
||||
"legendFormat": "Base Overhead In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_overhead_Bytes_Out",
|
||||
"legendFormat": "Base Overhead Out"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_overhead_cluster_Bytes_In",
|
||||
"legendFormat": "Cluster In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_overhead_cluster_Bytes_Out",
|
||||
"legendFormat": "Cluster Out"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_overhead_manifest_Bytes_In",
|
||||
"legendFormat": "Manifest In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_overhead_manifest_Bytes_Out",
|
||||
"legendFormat": "Manifest Out"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "decbytes",
|
||||
"custom": {
|
||||
"axisLabel": "Bytes",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Validator List Traffic",
|
||||
"description": "Validator list (UNL) distribution traffic. Validator lists are exchanged when peers share their trusted validator configurations. Spikes occur during UNL updates or when new peers connect. Sourced from TrafficCount.h validator_lists category.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_validator_lists_Bytes_In",
|
||||
"legendFormat": "Bytes In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_validator_lists_Bytes_Out",
|
||||
"legendFormat": "Bytes Out"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_validator_lists_Messages_In",
|
||||
"legendFormat": "Messages In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_validator_lists_Messages_Out",
|
||||
"legendFormat": "Messages Out"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"axisLabel": "Count",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byRegexp",
|
||||
"options": "/Bytes/"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.axisPlacement",
|
||||
"value": "right"
|
||||
},
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "decbytes"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Set Get/Share Traffic (Bytes)",
|
||||
"description": "Transaction set get and share traffic. 'set_get' = requests to fetch transaction sets (sent during ledger close), 'set_share' = responses sharing transaction sets. High set_get traffic indicates peers frequently requesting missing transaction sets, which may signal sync delays. Sourced from TrafficCount.h set_get/set_share categories.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_set_get_Bytes_In",
|
||||
"legendFormat": "Set Get In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_set_get_Bytes_Out",
|
||||
"legendFormat": "Set Get Out"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_set_share_Bytes_In",
|
||||
"legendFormat": "Set Share In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_set_share_Bytes_Out",
|
||||
"legendFormat": "Set Share Out"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "decbytes",
|
||||
"custom": {
|
||||
"axisLabel": "Bytes",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Have/Requested Transactions (Messages)",
|
||||
"description": "Transaction availability protocol messages. 'have_transactions' = advertisements that a peer has specific transactions available, 'requested_transactions' = explicit requests for transaction data. A high ratio of requested to have may indicate peers are behind on transaction propagation. Sourced from TrafficCount.h have_transactions/requested_transactions categories.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_have_transactions_Messages_In",
|
||||
"legendFormat": "Have TX In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_have_transactions_Messages_Out",
|
||||
"legendFormat": "Have TX Out"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_requested_transactions_Messages_In",
|
||||
"legendFormat": "Requested TX In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_requested_transactions_Messages_Out",
|
||||
"legendFormat": "Requested TX Out"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"axisLabel": "Messages",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Unknown / Unclassified Traffic",
|
||||
"description": "Traffic that does not match any known overlay message category. Non-zero values may indicate protocol version mismatches, corrupted messages, or new message types not yet classified. Sourced from TrafficCount.h unknown category.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_unknown_Bytes_In",
|
||||
"legendFormat": "Unknown Bytes In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_unknown_Bytes_Out",
|
||||
"legendFormat": "Unknown Bytes Out"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_unknown_Messages_In",
|
||||
"legendFormat": "Unknown Messages In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_unknown_Messages_Out",
|
||||
"legendFormat": "Unknown Messages Out"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "short",
|
||||
"custom": {
|
||||
"axisLabel": "Count",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byRegexp",
|
||||
"options": "/Bytes/"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.axisPlacement",
|
||||
"value": "right"
|
||||
},
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "decbytes"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Proof Path Traffic",
|
||||
"description": "Proof path request/response traffic for ledger state proof exchange. Used by peers to verify specific ledger entries without downloading the full ledger. High request volume may indicate peers validating state during catch-up. Sourced from TrafficCount.h proof_path_request/proof_path_response categories.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_proof_path_request_Bytes_In",
|
||||
"legendFormat": "Request Bytes In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_proof_path_request_Bytes_Out",
|
||||
"legendFormat": "Request Bytes Out"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_proof_path_response_Bytes_In",
|
||||
"legendFormat": "Response Bytes In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_proof_path_response_Bytes_Out",
|
||||
"legendFormat": "Response Bytes Out"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "decbytes",
|
||||
"custom": {
|
||||
"axisLabel": "Bytes",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Replay Delta Traffic",
|
||||
"description": "Replay delta request/response traffic for ledger replay protocol. Used during catch-up to efficiently replay ledger state changes. Sourced from TrafficCount.h replay_delta_request/replay_delta_response categories.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_replay_delta_request_Bytes_In",
|
||||
"legendFormat": "Request Bytes In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_replay_delta_request_Bytes_Out",
|
||||
"legendFormat": "Request Bytes Out"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_replay_delta_response_Bytes_In",
|
||||
"legendFormat": "Response Bytes In"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_replay_delta_response_Bytes_Out",
|
||||
"legendFormat": "Response Bytes Out"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "decbytes",
|
||||
"custom": {
|
||||
"axisLabel": "Bytes",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
}
|
||||
],
|
||||
"schemaVersion": 39,
|
||||
"tags": ["rippled", "statsd", "overlay", "network", "telemetry"],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"title": "Overlay Traffic Detail (StatsD)",
|
||||
"uid": "rippled-statsd-overlay-detail"
|
||||
}
|
||||
@@ -1,396 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": []
|
||||
},
|
||||
"description": "RPC and pathfinding metrics from beast::insight StatsD. Requires [insight] server=statsd in rippled config.",
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"title": "RPC Request Rate (StatsD)",
|
||||
"description": "Rate of RPC requests as counted by the beast::insight counter. Sourced from rpc.requests (ServerHandler.cpp:108) which increments on every HTTP and WebSocket RPC request. Compare with the span-based rpc.request rate in the RPC Performance dashboard for cross-validation.",
|
||||
"type": "stat",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rate(rippled_rpc_requests_total[5m])",
|
||||
"legendFormat": "Requests / Sec"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "reqps"
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "RPC Response Time (StatsD)",
|
||||
"description": "P95 and P50 of RPC response time from the beast::insight timer. Sourced from the rpc.time event (ServerHandler.cpp:110) which records elapsed milliseconds for each RPC response. This measures the full HTTP handler time, not just command execution. Compare with span-based rpc.request duration.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_rpc_time{quantile=\"0.95\"}",
|
||||
"legendFormat": "P95 Response Time"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_rpc_time{quantile=\"0.5\"}",
|
||||
"legendFormat": "P50 Response Time"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Latency (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "RPC Response Size",
|
||||
"description": "P95 and P50 of RPC response payload size in bytes. Sourced from the rpc.size event (ServerHandler.cpp:109) which records the byte length of each RPC JSON response. Large responses may indicate expensive queries (e.g. account_tx with many results) or API misuse.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_rpc_size{quantile=\"0.95\"}",
|
||||
"legendFormat": "P95 Response Size"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_rpc_size{quantile=\"0.5\"}",
|
||||
"legendFormat": "P50 Response Size"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "decbytes",
|
||||
"custom": {
|
||||
"axisLabel": "Size (Bytes)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "RPC Response Time Distribution",
|
||||
"description": "Distribution of RPC response times from the beast::insight timer showing P50, P90, P95, and P99 quantiles. Sourced from the rpc.time event (ServerHandler.cpp:110). Useful for detecting bimodal latency or long-tail requests.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_rpc_time{quantile=\"0.5\"}",
|
||||
"legendFormat": "P50"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_rpc_time{quantile=\"0.9\"}",
|
||||
"legendFormat": "P90"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_rpc_time{quantile=\"0.95\"}",
|
||||
"legendFormat": "P95"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_rpc_time{quantile=\"0.99\"}",
|
||||
"legendFormat": "P99"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Latency (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Pathfinding Fast Duration",
|
||||
"description": "P95 and P50 of fast pathfinding execution time. Sourced from the pathfind_fast event (PathRequests.h:23) which records the duration of the fast pathfinding algorithm. Fast pathfinding uses a simplified search that trades accuracy for speed.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_pathfind_fast{quantile=\"0.95\"}",
|
||||
"legendFormat": "P95 Fast Pathfind"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_pathfind_fast{quantile=\"0.5\"}",
|
||||
"legendFormat": "P50 Fast Pathfind"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Duration (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Pathfinding Full Duration",
|
||||
"description": "P95 and P50 of full pathfinding execution time. Sourced from the pathfind_full event (PathRequests.h:24) which records the duration of the exhaustive pathfinding search. Full pathfinding is more expensive and can take significantly longer than fast mode.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_pathfind_full{quantile=\"0.95\"}",
|
||||
"legendFormat": "P95 Full Pathfind"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rippled_pathfind_full{quantile=\"0.5\"}",
|
||||
"legendFormat": "P50 Full Pathfind"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Duration (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Resource Warnings Rate",
|
||||
"description": "Rate of resource warning events from the Resource Manager. Sourced from the warn meter (Logic.h:33) which increments when a consumer (peer or RPC client) exceeds the warning threshold for resource usage. A rising rate indicates aggressive clients that may need throttling. NOTE: This panel will show no data until the |m -> |c fix is applied in StatsDCollector.cpp:706 (Phase 6 Task 6.1).",
|
||||
"type": "stat",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rate(rippled_warn_total[5m])",
|
||||
"legendFormat": "Warnings / Sec"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "yellow",
|
||||
"value": 0.1
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Resource Drops Rate",
|
||||
"description": "Rate of resource drop events from the Resource Manager. Sourced from the drop meter (Logic.h:34) which increments when a consumer is disconnected or blocked due to excessive resource usage. Non-zero values mean the node is actively rejecting abusive connections. NOTE: This panel will show no data until the |m -> |c fix is applied in StatsDCollector.cpp:706 (Phase 6 Task 6.1).",
|
||||
"type": "stat",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "rate(rippled_drop_total[5m])",
|
||||
"legendFormat": "Drops / Sec"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "yellow",
|
||||
"value": 0.01
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 0.1
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
}
|
||||
],
|
||||
"schemaVersion": 39,
|
||||
"tags": ["rippled", "statsd", "rpc", "pathfinding", "telemetry"],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"title": "RPC & Pathfinding (StatsD)",
|
||||
"uid": "rippled-statsd-rpc"
|
||||
}
|
||||
@@ -1,384 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": []
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"title": "Transaction Processing Rate",
|
||||
"description": "Rate of transactions entering the processing pipeline. tx.process (NetworkOPs.cpp:1227) fires when a transaction is submitted locally or received from a peer and enters processTransaction(). tx.receive (PeerImp.cpp:1273) fires when a raw transaction message arrives from a peer before deduplication.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"tx.process\"}[5m]))",
|
||||
"legendFormat": "tx.process / Sec [{{exported_instance}}]"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"tx.receive\"}[5m]))",
|
||||
"legendFormat": "tx.receive / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"custom": {
|
||||
"axisLabel": "Transactions / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Transaction Processing Latency",
|
||||
"description": "p95 and p50 latency of transaction processing (tx.process span). Measures the time from when a transaction enters processTransaction() to completion. Computed via histogram_quantile() over the spanmetrics duration histogram with a 5m rate window.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.95, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"tx.process\"}[5m])))",
|
||||
"legendFormat": "P95 [{{exported_instance}}]"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.50, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"tx.process\"}[5m])))",
|
||||
"legendFormat": "P50 [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Latency (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Transaction Path Distribution",
|
||||
"description": "Breakdown of transactions by origin path. The xrpl.tx.local attribute indicates whether the transaction was submitted locally (true) or received from a peer (false). Helps understand the ratio of locally-originated vs relayed transactions.",
|
||||
"type": "piechart",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (xrpl_tx_local, exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", xrpl_tx_local=~\"$tx_origin\", span_name=\"tx.process\"}[5m]))",
|
||||
"legendFormat": "Local = {{xrpl_tx_local}} [{{exported_instance}}]"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "Transaction Receive vs Suppressed",
|
||||
"description": "Total rate of raw transaction messages received from peers (tx.receive span from PeerImp.cpp:1273). This fires before deduplication via the HashRouter, so the difference between tx.receive and tx.process reflects suppressed duplicate transactions.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"tx.receive\"}[5m]))",
|
||||
"legendFormat": "Total Received [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"custom": {
|
||||
"axisLabel": "Transactions / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Transaction Processing Duration Heatmap",
|
||||
"description": "Heatmap showing the distribution of tx.process span durations across histogram buckets over time. Each cell represents the count of transactions that completed within that latency bucket in a 5m window. Reveals whether processing times are consistent or exhibit multi-modal patterns.",
|
||||
"type": "heatmap",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
},
|
||||
"yAxis": {
|
||||
"axisLabel": "Duration (ms)"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum(increase(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"tx.process\"}[5m])) by (le)",
|
||||
"legendFormat": "{{le}}",
|
||||
"format": "heatmap"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "Transaction Apply Duration per Ledger",
|
||||
"description": "p95 and p50 latency of applying the consensus transaction set to a new ledger. The tx.apply span (BuildLedger.cpp:88) wraps the applyTransactions() function that iterates through the CanonicalTXSet and applies each transaction to the OpenView. Long durations indicate heavy transaction sets or expensive transaction processing.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.95, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"tx.apply\"}[5m])))",
|
||||
"legendFormat": "P95 tx.apply [{{exported_instance}}]"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "histogram_quantile(0.50, sum by (le, exported_instance) (rate(traces_span_metrics_duration_milliseconds_bucket{exported_instance=~\"$node\", span_name=\"tx.apply\"}[5m])))",
|
||||
"legendFormat": "P50 tx.apply [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"axisLabel": "Latency (ms)",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Peer Transaction Receive Rate",
|
||||
"description": "Rate of transaction messages received from network peers. Sourced from the tx.receive span (PeerImp.cpp:1273) which fires in the onMessage(TMTransaction) handler. High rates may indicate network-wide transaction volume spikes or peer flooding.",
|
||||
"type": "timeseries",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"tx.receive\"}[5m]))",
|
||||
"legendFormat": "tx.receive / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"custom": {
|
||||
"axisLabel": "Transactions / Sec",
|
||||
"spanNulls": true,
|
||||
"insertNulls": false,
|
||||
"showPoints": "auto",
|
||||
"pointSize": 3
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Transaction Apply Failed Rate",
|
||||
"description": "Rate of tx.apply spans completing with error status, indicating transaction application failures during ledger building. The span records xrpl.ledger.tx_failed as an attribute. Thresholds: green < 0.1/sec, yellow 0.1-1/sec, red > 1/sec. Some failures are normal (e.g. conflicting offers) but sustained high rates may indicate issues.",
|
||||
"type": "stat",
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 24
|
||||
},
|
||||
"options": {
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus"
|
||||
},
|
||||
"expr": "sum by (exported_instance) (rate(traces_span_metrics_calls_total{exported_instance=~\"$node\", span_name=\"tx.apply\", status_code=\"STATUS_CODE_ERROR\"}[5m]))",
|
||||
"legendFormat": "Failed / Sec [{{exported_instance}}]"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ops",
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "yellow",
|
||||
"value": 0.1
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
}
|
||||
}
|
||||
],
|
||||
"schemaVersion": 39,
|
||||
"tags": ["rippled", "transactions", "telemetry"],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"name": "node",
|
||||
"label": "Node",
|
||||
"description": "Filter by rippled node (service.instance.id — e.g. Node-1)",
|
||||
"type": "query",
|
||||
"query": "label_values(traces_span_metrics_calls_total, exported_instance)",
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"includeAll": true,
|
||||
"allValue": ".*",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"multi": true,
|
||||
"refresh": 2,
|
||||
"sort": 1
|
||||
},
|
||||
{
|
||||
"name": "tx_origin",
|
||||
"label": "TX Origin",
|
||||
"description": "Filter by transaction origin (true = local submit, false = peer relay)",
|
||||
"type": "query",
|
||||
"query": "label_values(traces_span_metrics_calls_total{span_name=\"tx.process\"}, xrpl_tx_local)",
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"includeAll": true,
|
||||
"allValue": ".*",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"multi": true,
|
||||
"refresh": 2,
|
||||
"sort": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"title": "Transaction Overview",
|
||||
"uid": "rippled-transactions"
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: rippled-telemetry
|
||||
orgId: 1
|
||||
folder: rippled
|
||||
type: file
|
||||
disableDeletion: false
|
||||
editable: true
|
||||
options:
|
||||
path: /var/lib/grafana/dashboards
|
||||
foldersFromFilesStructure: false
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
uid: prometheus
|
||||
access: proxy
|
||||
url: http://prometheus:9090
|
||||
isDefault: true
|
||||
editable: true
|
||||
@@ -1,168 +0,0 @@
|
||||
# Grafana datasource provisioning for Grafana Tempo.
|
||||
# Auto-configures Tempo as a trace data source on Grafana startup.
|
||||
# Access Grafana at http://localhost:3000, then use Explore -> Tempo
|
||||
# to browse xrpld traces using TraceQL.
|
||||
#
|
||||
# Search filters provide pre-configured dropdowns in the Explore UI.
|
||||
# Each phase adds filters for the span attributes it introduces.
|
||||
# Phase 1b (infra): Base filters — node identity, service, span name, status.
|
||||
# Phase 2 (RPC): RPC command, status, role filters.
|
||||
# Phase 3 (TX): Transaction hash, local/peer origin, status.
|
||||
# Phase 4 (Cons): Consensus mode, round, ledger sequence, close time.
|
||||
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Tempo
|
||||
type: tempo
|
||||
access: proxy
|
||||
url: http://tempo:3200
|
||||
uid: tempo
|
||||
jsonData:
|
||||
nodeGraph:
|
||||
enabled: true
|
||||
# Service map and traces-to-metrics require a Prometheus datasource
|
||||
# (not included in this stack). These features are inactive until a
|
||||
# Prometheus service is added to docker-compose.yml.
|
||||
serviceMap:
|
||||
datasourceUid: prometheus
|
||||
tracesToMetrics:
|
||||
datasourceUid: prometheus
|
||||
spanStartTimeShift: "-1h"
|
||||
spanEndTimeShift: "1h"
|
||||
search:
|
||||
filters:
|
||||
# --- Node identification filters ---
|
||||
# service.name: logical service name (default: "xrpld").
|
||||
# Useful when running multiple service types in the same collector.
|
||||
- id: service-name
|
||||
tag: service.name
|
||||
operator: "="
|
||||
scope: resource
|
||||
type: static
|
||||
# service.instance.id: unique node identifier — configurable via
|
||||
# the service_instance_id setting in [telemetry], defaults to the
|
||||
# node's public key. E.g. "Node-1" or "nHB1X37...".
|
||||
- id: node-id
|
||||
tag: service.instance.id
|
||||
operator: "="
|
||||
scope: resource
|
||||
type: static
|
||||
# service.version: xrpld build version (e.g., "2.4.0-b1").
|
||||
# Filter traces from specific software releases.
|
||||
- id: node-version
|
||||
tag: service.version
|
||||
operator: "="
|
||||
scope: resource
|
||||
type: dynamic
|
||||
# xrpl.network.id: numeric network identifier
|
||||
# (0 = mainnet, 1 = testnet, 2 = devnet, etc.).
|
||||
# Derived from the [network_id] config section.
|
||||
- id: network-id
|
||||
tag: xrpl.network.id
|
||||
operator: "="
|
||||
scope: resource
|
||||
type: dynamic
|
||||
# xrpl.network.type: human-readable network name derived from
|
||||
# network ID ("mainnet", "testnet", "devnet", "unknown").
|
||||
- id: network-type
|
||||
tag: xrpl.network.type
|
||||
operator: "="
|
||||
scope: resource
|
||||
type: static
|
||||
# --- Span intrinsic filters ---
|
||||
# name: the span operation name (e.g., "rpc.command.server_info").
|
||||
# Use to find traces for a specific RPC command or subsystem.
|
||||
- id: span-name
|
||||
tag: name
|
||||
operator: "="
|
||||
scope: intrinsic
|
||||
type: static
|
||||
# status: span completion status ("ok", "error", "unset").
|
||||
# Filter for failed operations to diagnose errors.
|
||||
- id: span-status
|
||||
tag: status
|
||||
operator: "="
|
||||
scope: intrinsic
|
||||
type: static
|
||||
# duration: span wall-clock duration. Use with ">" operator
|
||||
# to find slow operations (e.g., duration > 500ms).
|
||||
- id: span-duration
|
||||
tag: duration
|
||||
operator: ">"
|
||||
scope: intrinsic
|
||||
type: static
|
||||
# Phase 2: RPC tracing filters
|
||||
- id: rpc-command
|
||||
tag: xrpl.rpc.command
|
||||
operator: "="
|
||||
scope: span
|
||||
type: static
|
||||
- id: rpc-status
|
||||
tag: xrpl.rpc.status
|
||||
operator: "="
|
||||
scope: span
|
||||
type: dynamic
|
||||
- id: rpc-role
|
||||
tag: xrpl.rpc.role
|
||||
operator: "="
|
||||
scope: span
|
||||
type: dynamic
|
||||
# Phase 2: Node health filters (Task 2.8)
|
||||
- id: node-amendment-blocked
|
||||
tag: xrpl.node.amendment_blocked
|
||||
operator: "="
|
||||
scope: span
|
||||
type: static
|
||||
- id: node-server-state
|
||||
tag: xrpl.node.server_state
|
||||
operator: "="
|
||||
scope: span
|
||||
type: dynamic
|
||||
# Phase 3: Transaction tracing filters
|
||||
- id: tx-hash
|
||||
tag: xrpl.tx.hash
|
||||
operator: "="
|
||||
scope: span
|
||||
type: static
|
||||
- id: tx-origin
|
||||
tag: xrpl.tx.local
|
||||
operator: "="
|
||||
scope: span
|
||||
type: dynamic
|
||||
- id: tx-status
|
||||
tag: xrpl.tx.status
|
||||
operator: "="
|
||||
scope: span
|
||||
type: dynamic
|
||||
# Phase 4: Consensus tracing filters
|
||||
- id: consensus-mode
|
||||
tag: xrpl.consensus.mode
|
||||
operator: "="
|
||||
scope: span
|
||||
type: static
|
||||
- id: consensus-round
|
||||
tag: xrpl.consensus.round
|
||||
operator: "="
|
||||
scope: span
|
||||
type: dynamic
|
||||
- id: consensus-ledger-seq
|
||||
tag: xrpl.consensus.ledger.seq
|
||||
operator: "="
|
||||
scope: span
|
||||
type: static
|
||||
- id: consensus-close-time-correct
|
||||
tag: xrpl.consensus.close_time_correct
|
||||
operator: "="
|
||||
scope: span
|
||||
type: dynamic
|
||||
- id: consensus-state
|
||||
tag: xrpl.consensus.state
|
||||
operator: "="
|
||||
scope: span
|
||||
type: dynamic
|
||||
- id: consensus-close-resolution
|
||||
tag: xrpl.consensus.close_resolution_ms
|
||||
operator: "="
|
||||
scope: span
|
||||
type: dynamic
|
||||
@@ -1,613 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Integration test for rippled OpenTelemetry instrumentation.
|
||||
#
|
||||
# Launches a 6-node xrpld consensus network with telemetry enabled,
|
||||
# exercises RPC / transaction / consensus code paths, then verifies
|
||||
# that the expected spans and metrics appear in Tempo and Prometheus.
|
||||
#
|
||||
# Usage:
|
||||
# bash docker/telemetry/integration-test.sh
|
||||
#
|
||||
# Prerequisites:
|
||||
# - .build/xrpld built with telemetry=ON
|
||||
# - docker compose (v2)
|
||||
# - curl, jq
|
||||
#
|
||||
# The script leaves the observability stack and xrpld nodes running
|
||||
# so you can manually inspect Tempo (localhost:3200) and Grafana
|
||||
# (localhost:3000). Run with --cleanup to tear down instead.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
XRPLD="$REPO_ROOT/.build/xrpld"
|
||||
COMPOSE_FILE="$SCRIPT_DIR/docker-compose.yml"
|
||||
STANDALONE_CFG="$SCRIPT_DIR/xrpld-telemetry.cfg"
|
||||
WORKDIR="/tmp/xrpld-integration"
|
||||
NUM_NODES=6
|
||||
PEER_PORT_BASE=51235
|
||||
RPC_PORT_BASE=5005
|
||||
CONSENSUS_TIMEOUT=120
|
||||
GENESIS_ACCOUNT="rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"
|
||||
GENESIS_SEED="snoPBrXtMeMyMHUVTgbuqAfg1SUTb"
|
||||
DEST_ACCOUNT="" # Generated dynamically via wallet_propose
|
||||
TEMPO="http://localhost:3200"
|
||||
PROM="http://localhost:9090"
|
||||
|
||||
# Counters for pass/fail
|
||||
PASS=0
|
||||
FAIL=0
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
log() { printf "\033[1;34m[INFO]\033[0m %s\n" "$*"; }
|
||||
ok() { printf "\033[1;32m[PASS]\033[0m %s\n" "$*"; PASS=$((PASS + 1)); }
|
||||
fail() { printf "\033[1;31m[FAIL]\033[0m %s\n" "$*"; FAIL=$((FAIL + 1)); }
|
||||
die() { printf "\033[1;31m[ERROR]\033[0m %s\n" "$*" >&2; exit 1; }
|
||||
|
||||
check_span() {
|
||||
local op="$1"
|
||||
local count
|
||||
count=$(curl -sf "$TEMPO/api/search" \
|
||||
--data-urlencode "q={resource.service.name=\"rippled\" && name=\"$op\"}" \
|
||||
--data-urlencode "limit=5" \
|
||||
| jq '.traces | length' 2>/dev/null || echo 0)
|
||||
if [ "$count" -gt 0 ]; then
|
||||
ok "$op ($count traces)"
|
||||
else
|
||||
fail "$op (0 traces)"
|
||||
fi
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
log "Cleaning up..."
|
||||
# Kill xrpld nodes
|
||||
for i in $(seq 1 "$NUM_NODES"); do
|
||||
local pidfile="$WORKDIR/node$i/xrpld.pid"
|
||||
if [ -f "$pidfile" ]; then
|
||||
kill "$(cat "$pidfile")" 2>/dev/null || true
|
||||
rm -f "$pidfile"
|
||||
fi
|
||||
done
|
||||
# Also kill any straggling xrpld processes from our workdir
|
||||
pkill -f "$WORKDIR" 2>/dev/null || true
|
||||
# Stop docker stack
|
||||
docker compose -f "$COMPOSE_FILE" down 2>/dev/null || true
|
||||
# Remove workdir
|
||||
rm -rf "$WORKDIR"
|
||||
log "Cleanup complete."
|
||||
}
|
||||
|
||||
# Handle --cleanup flag
|
||||
if [ "${1:-}" = "--cleanup" ]; then
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 0: Prerequisites
|
||||
# ---------------------------------------------------------------------------
|
||||
log "Checking prerequisites..."
|
||||
|
||||
command -v docker >/dev/null 2>&1 || die "docker not found"
|
||||
docker compose version >/dev/null 2>&1 || die "docker compose (v2) not found"
|
||||
command -v curl >/dev/null 2>&1 || die "curl not found"
|
||||
command -v jq >/dev/null 2>&1 || die "jq not found"
|
||||
[ -x "$XRPLD" ] || die "xrpld binary not found at $XRPLD (build with telemetry=ON)"
|
||||
[ -f "$COMPOSE_FILE" ] || die "docker-compose.yml not found at $COMPOSE_FILE"
|
||||
[ -f "$STANDALONE_CFG" ] || die "xrpld-telemetry.cfg not found at $STANDALONE_CFG"
|
||||
|
||||
log "All prerequisites met."
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 1: Clean previous run
|
||||
# ---------------------------------------------------------------------------
|
||||
log "Cleaning previous run data..."
|
||||
for i in $(seq 1 "$NUM_NODES"); do
|
||||
pidfile="$WORKDIR/node$i/xrpld.pid"
|
||||
if [ -f "$pidfile" ]; then
|
||||
kill "$(cat "$pidfile")" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
pkill -f "$WORKDIR" 2>/dev/null || true
|
||||
# Kill any xrpld using the standalone config (from key generation)
|
||||
pkill -f "xrpld-telemetry.cfg" 2>/dev/null || true
|
||||
sleep 2
|
||||
rm -rf "$WORKDIR"
|
||||
mkdir -p "$WORKDIR"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 2: Start observability stack
|
||||
# ---------------------------------------------------------------------------
|
||||
log "Starting observability stack..."
|
||||
docker compose -f "$COMPOSE_FILE" up -d
|
||||
|
||||
log "Waiting for otel-collector to be ready..."
|
||||
for attempt in $(seq 1 30); do
|
||||
# The OTLP HTTP endpoint returns 405 for GET (expects POST), which
|
||||
# means it is listening. curl -sf would fail on 405, so we check
|
||||
# the HTTP status code explicitly.
|
||||
status=$(curl -so /dev/null -w '%{http_code}' http://localhost:4318/ 2>/dev/null || echo 000)
|
||||
if [ "$status" != "000" ]; then
|
||||
log "otel-collector ready (attempt $attempt, HTTP $status)."
|
||||
break
|
||||
fi
|
||||
if [ "$attempt" -eq 30 ]; then
|
||||
die "otel-collector not ready after 30s"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
log "Waiting for Tempo to be ready..."
|
||||
for attempt in $(seq 1 30); do
|
||||
if curl -sf "$TEMPO/ready" >/dev/null 2>&1; then
|
||||
log "Tempo ready (attempt $attempt)."
|
||||
break
|
||||
fi
|
||||
if [ "$attempt" -eq 30 ]; then
|
||||
die "Tempo not ready after 30s"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 3: Generate validator keys
|
||||
# ---------------------------------------------------------------------------
|
||||
log "Generating $NUM_NODES validator key pairs..."
|
||||
|
||||
# Start a temporary standalone xrpld for key generation
|
||||
TEMP_DATA="$WORKDIR/temp-keygen"
|
||||
mkdir -p "$TEMP_DATA"
|
||||
|
||||
# Create a minimal temp config for key generation
|
||||
TEMP_CFG="$TEMP_DATA/xrpld.cfg"
|
||||
cat > "$TEMP_CFG" <<EOCFG
|
||||
[server]
|
||||
port_rpc_temp
|
||||
|
||||
[port_rpc_temp]
|
||||
port = 5099
|
||||
ip = 127.0.0.1
|
||||
admin = 127.0.0.1
|
||||
protocol = http
|
||||
|
||||
[node_db]
|
||||
type=NuDB
|
||||
path=$TEMP_DATA/nudb
|
||||
online_delete=256
|
||||
|
||||
[database_path]
|
||||
$TEMP_DATA/db
|
||||
|
||||
[debug_logfile]
|
||||
$TEMP_DATA/debug.log
|
||||
|
||||
[ssl_verify]
|
||||
0
|
||||
EOCFG
|
||||
|
||||
"$XRPLD" --conf "$TEMP_CFG" -a --start > "$TEMP_DATA/stdout.log" 2>&1 &
|
||||
TEMP_PID=$!
|
||||
log "Temporary xrpld started (PID $TEMP_PID), waiting for RPC..."
|
||||
|
||||
for attempt in $(seq 1 30); do
|
||||
if curl -sf http://localhost:5099 -d '{"method":"server_info"}' >/dev/null 2>&1; then
|
||||
log "Temporary xrpld RPC ready (attempt $attempt)."
|
||||
break
|
||||
fi
|
||||
if [ "$attempt" -eq 30 ]; then
|
||||
kill "$TEMP_PID" 2>/dev/null || true
|
||||
die "Temporary xrpld RPC not ready after 30s"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
declare -a SEEDS
|
||||
declare -a PUBKEYS
|
||||
|
||||
for i in $(seq 1 "$NUM_NODES"); do
|
||||
result=$(curl -sf http://localhost:5099 -d '{"method":"validation_create"}')
|
||||
seed=$(echo "$result" | jq -r '.result.validation_seed')
|
||||
pubkey=$(echo "$result" | jq -r '.result.validation_public_key')
|
||||
if [ -z "$seed" ] || [ "$seed" = "null" ]; then
|
||||
kill "$TEMP_PID" 2>/dev/null || true
|
||||
die "Failed to generate key pair $i"
|
||||
fi
|
||||
SEEDS+=("$seed")
|
||||
PUBKEYS+=("$pubkey")
|
||||
log " Node $i: $pubkey"
|
||||
done
|
||||
|
||||
kill "$TEMP_PID" 2>/dev/null || true
|
||||
wait "$TEMP_PID" 2>/dev/null || true
|
||||
rm -rf "$TEMP_DATA"
|
||||
log "Key generation complete."
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 4: Generate node configs and validators.txt
|
||||
# ---------------------------------------------------------------------------
|
||||
log "Generating node configs..."
|
||||
|
||||
# Create shared validators.txt
|
||||
VALIDATORS_FILE="$WORKDIR/validators.txt"
|
||||
{
|
||||
echo "[validators]"
|
||||
for i in $(seq 0 $((NUM_NODES - 1))); do
|
||||
echo "${PUBKEYS[$i]}"
|
||||
done
|
||||
} > "$VALIDATORS_FILE"
|
||||
|
||||
# Create per-node configs
|
||||
for i in $(seq 1 "$NUM_NODES"); do
|
||||
NODE_DIR="$WORKDIR/node$i"
|
||||
mkdir -p "$NODE_DIR/nudb" "$NODE_DIR/db"
|
||||
|
||||
RPC_PORT=$((RPC_PORT_BASE + i - 1))
|
||||
PEER_PORT=$((PEER_PORT_BASE + i - 1))
|
||||
SEED="${SEEDS[$((i - 1))]}"
|
||||
|
||||
# Build ips_fixed list (all peers except self)
|
||||
IPS_FIXED=""
|
||||
for j in $(seq 1 "$NUM_NODES"); do
|
||||
if [ "$j" -ne "$i" ]; then
|
||||
IPS_FIXED="${IPS_FIXED}127.0.0.1 $((PEER_PORT_BASE + j - 1))
|
||||
"
|
||||
fi
|
||||
done
|
||||
|
||||
cat > "$NODE_DIR/xrpld.cfg" <<EOCFG
|
||||
[server]
|
||||
port_rpc
|
||||
port_peer
|
||||
|
||||
[port_rpc]
|
||||
port = $RPC_PORT
|
||||
ip = 127.0.0.1
|
||||
admin = 127.0.0.1
|
||||
protocol = http
|
||||
|
||||
[port_peer]
|
||||
port = $PEER_PORT
|
||||
ip = 0.0.0.0
|
||||
protocol = peer
|
||||
|
||||
[node_db]
|
||||
type=NuDB
|
||||
path=$NODE_DIR/nudb
|
||||
online_delete=256
|
||||
|
||||
[database_path]
|
||||
$NODE_DIR/db
|
||||
|
||||
[debug_logfile]
|
||||
$NODE_DIR/debug.log
|
||||
|
||||
[validation_seed]
|
||||
$SEED
|
||||
|
||||
[validators_file]
|
||||
$VALIDATORS_FILE
|
||||
|
||||
[ips_fixed]
|
||||
${IPS_FIXED}
|
||||
[peer_private]
|
||||
1
|
||||
|
||||
[telemetry]
|
||||
enabled=1
|
||||
service_instance_id=Node-${i}
|
||||
endpoint=http://localhost:4318/v1/traces
|
||||
exporter=otlp_http
|
||||
sampling_ratio=1.0
|
||||
batch_size=512
|
||||
batch_delay_ms=2000
|
||||
max_queue_size=2048
|
||||
trace_rpc=1
|
||||
trace_transactions=1
|
||||
trace_consensus=1
|
||||
trace_peer=1
|
||||
trace_ledger=1
|
||||
|
||||
[insight]
|
||||
server=statsd
|
||||
address=127.0.0.1:8125
|
||||
prefix=rippled
|
||||
|
||||
[rpc_startup]
|
||||
{ "command": "log_level", "severity": "warning" }
|
||||
|
||||
[ssl_verify]
|
||||
0
|
||||
EOCFG
|
||||
|
||||
log " Node $i config: RPC=$RPC_PORT, Peer=$PEER_PORT"
|
||||
done
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 5: Start all 6 nodes
|
||||
# ---------------------------------------------------------------------------
|
||||
log "Starting $NUM_NODES xrpld nodes..."
|
||||
|
||||
for i in $(seq 1 "$NUM_NODES"); do
|
||||
NODE_DIR="$WORKDIR/node$i"
|
||||
"$XRPLD" --conf "$NODE_DIR/xrpld.cfg" --start > "$NODE_DIR/stdout.log" 2>&1 &
|
||||
echo $! > "$NODE_DIR/xrpld.pid"
|
||||
log " Node $i started (PID $(cat "$NODE_DIR/xrpld.pid"))"
|
||||
done
|
||||
|
||||
# Give nodes a moment to initialize
|
||||
sleep 5
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 6: Wait for consensus
|
||||
# ---------------------------------------------------------------------------
|
||||
log "Waiting for nodes to reach 'proposing' state (timeout: ${CONSENSUS_TIMEOUT}s)..."
|
||||
|
||||
start_time=$(date +%s)
|
||||
nodes_ready=0
|
||||
|
||||
while [ "$nodes_ready" -lt "$NUM_NODES" ]; do
|
||||
elapsed=$(( $(date +%s) - start_time ))
|
||||
if [ "$elapsed" -ge "$CONSENSUS_TIMEOUT" ]; then
|
||||
fail "Consensus timeout after ${CONSENSUS_TIMEOUT}s ($nodes_ready/$NUM_NODES nodes ready)"
|
||||
log "Continuing with partial consensus..."
|
||||
break
|
||||
fi
|
||||
|
||||
nodes_ready=0
|
||||
for i in $(seq 1 "$NUM_NODES"); do
|
||||
RPC_PORT=$((RPC_PORT_BASE + i - 1))
|
||||
state=$(curl -sf "http://localhost:$RPC_PORT" \
|
||||
-d '{"method":"server_info"}' 2>/dev/null \
|
||||
| jq -r '.result.info.server_state' 2>/dev/null || echo "unreachable")
|
||||
if [ "$state" = "proposing" ]; then
|
||||
nodes_ready=$((nodes_ready + 1))
|
||||
fi
|
||||
done
|
||||
printf "\r %d/%d nodes proposing (%ds elapsed)..." "$nodes_ready" "$NUM_NODES" "$elapsed"
|
||||
if [ "$nodes_ready" -lt "$NUM_NODES" ]; then
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
if [ "$nodes_ready" -eq "$NUM_NODES" ]; then
|
||||
ok "All $NUM_NODES nodes reached 'proposing' state"
|
||||
else
|
||||
fail "Only $nodes_ready/$NUM_NODES nodes reached 'proposing' state"
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 6b: Wait for validated ledger
|
||||
# ---------------------------------------------------------------------------
|
||||
log "Waiting for first validated ledger..."
|
||||
for attempt in $(seq 1 60); do
|
||||
val_seq=$(curl -sf "http://localhost:$RPC_PORT_BASE" \
|
||||
-d '{"method":"server_info"}' 2>/dev/null \
|
||||
| jq -r '.result.info.validated_ledger.seq // 0' 2>/dev/null || echo 0)
|
||||
if [ "$val_seq" -gt 2 ] 2>/dev/null; then
|
||||
ok "First validated ledger: seq $val_seq"
|
||||
break
|
||||
fi
|
||||
if [ "$attempt" -eq 60 ]; then
|
||||
fail "No validated ledger after 60s"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 7: Exercise RPC spans (Phase 2)
|
||||
# ---------------------------------------------------------------------------
|
||||
log "Exercising RPC spans..."
|
||||
|
||||
curl -sf "http://localhost:$RPC_PORT_BASE" \
|
||||
-d '{"method":"server_info"}' > /dev/null
|
||||
curl -sf "http://localhost:$RPC_PORT_BASE" \
|
||||
-d '{"method":"server_state"}' > /dev/null
|
||||
curl -sf "http://localhost:$RPC_PORT_BASE" \
|
||||
-d '{"method":"ledger","params":[{"ledger_index":"current"}]}' > /dev/null
|
||||
|
||||
log "RPC commands sent. Waiting 5s for batch export..."
|
||||
sleep 5
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 8: Submit transaction (Phase 3)
|
||||
# ---------------------------------------------------------------------------
|
||||
log "Submitting Payment transaction..."
|
||||
|
||||
# Generate a destination wallet
|
||||
log " Generating destination wallet..."
|
||||
wallet_result=$(curl -sf "http://localhost:$RPC_PORT_BASE" \
|
||||
-d '{"method":"wallet_propose"}')
|
||||
DEST_ACCOUNT=$(echo "$wallet_result" | jq -r '.result.account_id' 2>/dev/null)
|
||||
if [ -z "$DEST_ACCOUNT" ] || [ "$DEST_ACCOUNT" = "null" ]; then
|
||||
fail "Could not generate destination wallet"
|
||||
DEST_ACCOUNT="rrrrrrrrrrrrrrrrrrrrrhoLvTp" # ACCOUNT_ZERO fallback
|
||||
fi
|
||||
log " Destination: $DEST_ACCOUNT"
|
||||
|
||||
# Get genesis account info
|
||||
acct_result=$(curl -sf "http://localhost:$RPC_PORT_BASE" \
|
||||
-d "{\"method\":\"account_info\",\"params\":[{\"account\":\"$GENESIS_ACCOUNT\"}]}")
|
||||
seq_num=$(echo "$acct_result" | jq -r '.result.account_data.Sequence' 2>/dev/null || echo "unknown")
|
||||
log " Genesis account sequence: $seq_num"
|
||||
|
||||
# Submit payment
|
||||
submit_result=$(curl -sf "http://localhost:$RPC_PORT_BASE" -d "{
|
||||
\"method\": \"submit\",
|
||||
\"params\": [{
|
||||
\"secret\": \"$GENESIS_SEED\",
|
||||
\"tx_json\": {
|
||||
\"TransactionType\": \"Payment\",
|
||||
\"Account\": \"$GENESIS_ACCOUNT\",
|
||||
\"Destination\": \"$DEST_ACCOUNT\",
|
||||
\"Amount\": \"10000000\"
|
||||
}
|
||||
}]
|
||||
}")
|
||||
|
||||
engine_result=$(echo "$submit_result" | jq -r '.result.engine_result' 2>/dev/null || echo "unknown")
|
||||
tx_hash=$(echo "$submit_result" | jq -r '.result.tx_json.hash' 2>/dev/null || echo "unknown")
|
||||
|
||||
if [ "$engine_result" = "tesSUCCESS" ] || [ "$engine_result" = "terQUEUED" ]; then
|
||||
ok "Transaction submitted: $engine_result (hash: ${tx_hash:0:16}...)"
|
||||
else
|
||||
fail "Transaction submission: $engine_result"
|
||||
log " Full response: $(echo "$submit_result" | jq -c .result 2>/dev/null)"
|
||||
fi
|
||||
|
||||
log "Waiting 15s for consensus round + batch export..."
|
||||
sleep 15
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 9: Verify Tempo traces
|
||||
# ---------------------------------------------------------------------------
|
||||
log "Verifying spans in Tempo..."
|
||||
|
||||
# Check service registration
|
||||
services=$(curl -sf "$TEMPO/api/v2/search/tag/resource.service.name/values" \
|
||||
| jq -r '.tagValues[].value' 2>/dev/null || echo "")
|
||||
if echo "$services" | grep -q "rippled"; then
|
||||
ok "Service 'rippled' registered in Tempo"
|
||||
else
|
||||
fail "Service 'rippled' NOT found in Tempo (found: $services)"
|
||||
fi
|
||||
|
||||
log ""
|
||||
log "--- Phase 2: RPC Spans ---"
|
||||
check_span "rpc.request"
|
||||
check_span "rpc.process"
|
||||
check_span "rpc.command.server_info"
|
||||
check_span "rpc.command.server_state"
|
||||
check_span "rpc.command.ledger"
|
||||
|
||||
log ""
|
||||
log "--- Phase 3: Transaction Spans ---"
|
||||
check_span "tx.process"
|
||||
check_span "tx.receive"
|
||||
check_span "tx.apply"
|
||||
|
||||
log ""
|
||||
log "--- Phase 4: Consensus Spans ---"
|
||||
check_span "consensus.proposal.send"
|
||||
check_span "consensus.ledger_close"
|
||||
check_span "consensus.accept"
|
||||
check_span "consensus.validation.send"
|
||||
|
||||
log ""
|
||||
log "--- Phase 5: Ledger Spans ---"
|
||||
check_span "ledger.build"
|
||||
check_span "ledger.validate"
|
||||
check_span "ledger.store"
|
||||
|
||||
log ""
|
||||
log "--- Phase 5: Peer Spans (trace_peer=1) ---"
|
||||
check_span "peer.proposal.receive"
|
||||
check_span "peer.validation.receive"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 10: Verify Prometheus spanmetrics
|
||||
# ---------------------------------------------------------------------------
|
||||
log ""
|
||||
log "--- Phase 5: Spanmetrics ---"
|
||||
log "Waiting 20s for Prometheus scrape cycle..."
|
||||
sleep 20
|
||||
|
||||
calls_count=$(curl -sf "$PROM/api/v1/query?query=traces_span_metrics_calls_total" \
|
||||
| jq '.data.result | length' 2>/dev/null || echo 0)
|
||||
if [ "$calls_count" -gt 0 ]; then
|
||||
ok "Prometheus: traces_span_metrics_calls_total ($calls_count series)"
|
||||
else
|
||||
fail "Prometheus: traces_span_metrics_calls_total (0 series)"
|
||||
fi
|
||||
|
||||
duration_count=$(curl -sf "$PROM/api/v1/query?query=traces_span_metrics_duration_milliseconds_count" \
|
||||
| jq '.data.result | length' 2>/dev/null || echo 0)
|
||||
if [ "$duration_count" -gt 0 ]; then
|
||||
ok "Prometheus: duration histogram ($duration_count series)"
|
||||
else
|
||||
fail "Prometheus: duration histogram (0 series)"
|
||||
fi
|
||||
|
||||
# Check Grafana
|
||||
if curl -sf http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||
ok "Grafana: healthy at localhost:3000"
|
||||
else
|
||||
fail "Grafana: not reachable at localhost:3000"
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 10b: Verify StatsD metrics in Prometheus
|
||||
# ---------------------------------------------------------------------------
|
||||
log ""
|
||||
log "--- Phase 6: StatsD Metrics (beast::insight) ---"
|
||||
log "Waiting 20s for StatsD aggregation + Prometheus scrape..."
|
||||
sleep 20
|
||||
|
||||
check_statsd_metric() {
|
||||
local metric_name="$1"
|
||||
local result
|
||||
result=$(curl -sf "$PROM/api/v1/query?query=$metric_name" \
|
||||
| jq '.data.result | length' 2>/dev/null || echo 0)
|
||||
if [ "$result" -gt 0 ]; then
|
||||
ok "StatsD: $metric_name ($result series)"
|
||||
else
|
||||
fail "StatsD: $metric_name (0 series)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Node health gauges
|
||||
check_statsd_metric "rippled_LedgerMaster_Validated_Ledger_Age"
|
||||
check_statsd_metric "rippled_LedgerMaster_Published_Ledger_Age"
|
||||
check_statsd_metric "rippled_job_count"
|
||||
|
||||
# State accounting
|
||||
check_statsd_metric "rippled_State_Accounting_Full_duration"
|
||||
|
||||
# Peer finder
|
||||
check_statsd_metric "rippled_Peer_Finder_Active_Inbound_Peers"
|
||||
check_statsd_metric "rippled_Peer_Finder_Active_Outbound_Peers"
|
||||
|
||||
# RPC counters (only if RPC was exercised — should be true from Steps 5-8)
|
||||
check_statsd_metric "rippled_rpc_requests"
|
||||
|
||||
# Overlay traffic
|
||||
check_statsd_metric "rippled_total_Bytes_In"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step 11: Summary
|
||||
# ---------------------------------------------------------------------------
|
||||
echo ""
|
||||
echo "==========================================================="
|
||||
echo " INTEGRATION TEST RESULTS"
|
||||
echo "==========================================================="
|
||||
printf " \033[1;32mPASSED: %d\033[0m\n" "$PASS"
|
||||
printf " \033[1;31mFAILED: %d\033[0m\n" "$FAIL"
|
||||
echo "==========================================================="
|
||||
echo ""
|
||||
echo " Observability stack is running:"
|
||||
echo ""
|
||||
echo " Tempo: http://localhost:3200"
|
||||
echo " Grafana: http://localhost:3000"
|
||||
echo " Prometheus: http://localhost:9090"
|
||||
echo ""
|
||||
echo " xrpld nodes (6) are running:"
|
||||
for i in $(seq 1 "$NUM_NODES"); do
|
||||
RPC_PORT=$((RPC_PORT_BASE + i - 1))
|
||||
PEER_PORT=$((PEER_PORT_BASE + i - 1))
|
||||
echo " Node $i: RPC=localhost:$RPC_PORT Peer=:$PEER_PORT PID=$(cat "$WORKDIR/node$i/xrpld.pid" 2>/dev/null || echo 'unknown')"
|
||||
done
|
||||
echo ""
|
||||
echo " To tear down:"
|
||||
echo " bash docker/telemetry/integration-test.sh --cleanup"
|
||||
echo ""
|
||||
echo "==========================================================="
|
||||
|
||||
if [ "$FAIL" -gt 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,64 +0,0 @@
|
||||
# OpenTelemetry Collector configuration for xrpld development.
|
||||
#
|
||||
# Pipelines:
|
||||
# traces: OTLP receiver -> batch processor -> debug + Tempo + spanmetrics
|
||||
# metrics: spanmetrics connector -> Prometheus exporter
|
||||
#
|
||||
# xrpld sends traces via OTLP/HTTP to port 4318. The collector batches
|
||||
# them, forwards to Tempo, and derives RED metrics via the spanmetrics
|
||||
# connector, which Prometheus scrapes on port 8889.
|
||||
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
|
||||
processors:
|
||||
batch:
|
||||
timeout: 1s
|
||||
send_batch_size: 100
|
||||
|
||||
connectors:
|
||||
spanmetrics:
|
||||
# Expose service.instance.id (node public key) as a Prometheus label so
|
||||
# Grafana dashboards can filter metrics by individual node.
|
||||
resource_metrics_key_attributes:
|
||||
- service.instance.id
|
||||
histogram:
|
||||
explicit:
|
||||
buckets: [1ms, 5ms, 10ms, 25ms, 50ms, 100ms, 250ms, 500ms, 1s, 5s]
|
||||
dimensions:
|
||||
- name: xrpl.rpc.command
|
||||
- name: xrpl.rpc.status
|
||||
- name: xrpl.consensus.mode
|
||||
- name: xrpl.tx.local
|
||||
- name: xrpl.peer.proposal.trusted
|
||||
- name: xrpl.peer.validation.trusted
|
||||
|
||||
exporters:
|
||||
debug:
|
||||
verbosity: detailed
|
||||
otlp/tempo:
|
||||
endpoint: tempo:4317
|
||||
tls:
|
||||
insecure: true
|
||||
prometheus:
|
||||
endpoint: 0.0.0.0:8889
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
|
||||
service:
|
||||
extensions: [health_check]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [debug, otlp/tempo, spanmetrics]
|
||||
metrics:
|
||||
receivers: [spanmetrics]
|
||||
exporters: [prometheus]
|
||||
@@ -1,9 +0,0 @@
|
||||
# Prometheus configuration for scraping spanmetrics from OTel Collector.
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets: ["otel-collector:8889"]
|
||||
@@ -1,61 +0,0 @@
|
||||
# Grafana Tempo configuration for xrpld telemetry stack.
|
||||
#
|
||||
# Runs in single-binary mode for local development.
|
||||
# Receives traces via OTLP/gRPC from the OTel Collector and stores
|
||||
# them locally. Queryable via Grafana Explore using the Tempo datasource.
|
||||
#
|
||||
# Search filters are configured on the Grafana datasource side
|
||||
# (grafana/provisioning/datasources/tempo.yaml). Tempo auto-indexes
|
||||
# all span attributes for search in single-binary mode.
|
||||
#
|
||||
# For production, replace local storage with S3/GCS backend and adjust
|
||||
# retention via the compactor settings. See:
|
||||
# https://grafana.com/docs/tempo/latest/configuration/
|
||||
|
||||
stream_over_http_enabled: true
|
||||
|
||||
server:
|
||||
http_listen_port: 3200
|
||||
|
||||
distributor:
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
|
||||
ingester:
|
||||
max_block_duration: 5m
|
||||
|
||||
compactor:
|
||||
compaction:
|
||||
block_retention: 1h
|
||||
|
||||
# Enable metrics generator for service graph and span metrics.
|
||||
# Produces RED metrics (rate, errors, duration) per service/span,
|
||||
# feeding Grafana's service map visualization.
|
||||
metrics_generator:
|
||||
registry:
|
||||
external_labels:
|
||||
source: tempo
|
||||
storage:
|
||||
path: /var/tempo/generator/wal
|
||||
# Uncomment and add a Prometheus service to docker-compose.yml
|
||||
# to enable remote_write for service graph metrics:
|
||||
# remote_write:
|
||||
# - url: http://prometheus:9090/api/v1/write
|
||||
|
||||
overrides:
|
||||
defaults:
|
||||
metrics_generator:
|
||||
processors:
|
||||
- service-graphs
|
||||
- span-metrics
|
||||
|
||||
storage:
|
||||
trace:
|
||||
backend: local
|
||||
wal:
|
||||
path: /var/tempo/wal
|
||||
local:
|
||||
path: /var/tempo/blocks
|
||||
@@ -1,60 +0,0 @@
|
||||
# Standalone xrpld configuration with OpenTelemetry enabled.
|
||||
#
|
||||
# Usage:
|
||||
# 1. Start the observability stack:
|
||||
# docker compose -f docker/telemetry/docker-compose.yml up -d
|
||||
# 2. Run xrpld in standalone mode:
|
||||
# ./xrpld --conf docker/telemetry/xrpld-telemetry.cfg -a --start
|
||||
# 3. Send RPC commands to exercise tracing:
|
||||
# curl -s http://localhost:5005 -d '{"method":"server_info"}'
|
||||
# 4. View traces in Jaeger UI: http://localhost:16686
|
||||
|
||||
[server]
|
||||
port_rpc_admin_local
|
||||
port_ws_admin_local
|
||||
|
||||
[port_rpc_admin_local]
|
||||
port = 5005
|
||||
ip = 127.0.0.1
|
||||
admin = 127.0.0.1
|
||||
protocol = http
|
||||
|
||||
[port_ws_admin_local]
|
||||
port = 6006
|
||||
ip = 127.0.0.1
|
||||
admin = 127.0.0.1
|
||||
protocol = ws
|
||||
|
||||
[node_db]
|
||||
type=NuDB
|
||||
path=docker/telemetry/data/nudb
|
||||
online_delete=256
|
||||
advisory_delete=0
|
||||
|
||||
[database_path]
|
||||
docker/telemetry/data
|
||||
|
||||
[debug_logfile]
|
||||
docker/telemetry/data/debug.log
|
||||
|
||||
[rpc_startup]
|
||||
{ "command": "log_level", "severity": "debug" }
|
||||
|
||||
[ssl_verify]
|
||||
0
|
||||
|
||||
# --- OpenTelemetry tracing ---
|
||||
[telemetry]
|
||||
enabled=1
|
||||
service_instance_id=xrpld-standalone
|
||||
endpoint=http://localhost:4318/v1/traces
|
||||
exporter=otlp_http
|
||||
sampling_ratio=1.0
|
||||
batch_size=512
|
||||
batch_delay_ms=5000
|
||||
max_queue_size=2048
|
||||
trace_rpc=1
|
||||
trace_transactions=1
|
||||
trace_consensus=1
|
||||
trace_peer=0
|
||||
trace_ledger=1
|
||||
298
docs/build/telemetry.md
vendored
298
docs/build/telemetry.md
vendored
@@ -1,298 +0,0 @@
|
||||
# OpenTelemetry Tracing for xrpld
|
||||
|
||||
This document explains how to build xrpld with OpenTelemetry distributed tracing support, configure the runtime telemetry options, and set up the observability backend to view traces.
|
||||
|
||||
- [OpenTelemetry Tracing for xrpld](#opentelemetry-tracing-for-xrpld)
|
||||
- [Overview](#overview)
|
||||
- [Building with Telemetry](#building-with-telemetry)
|
||||
- [Summary](#summary)
|
||||
- [Build steps](#build-steps)
|
||||
- [Install dependencies](#install-dependencies)
|
||||
- [Call CMake](#call-cmake)
|
||||
- [Build](#build)
|
||||
- [Building without telemetry](#building-without-telemetry)
|
||||
- [Runtime Configuration](#runtime-configuration)
|
||||
- [Configuration options](#configuration-options)
|
||||
- [Observability Stack](#observability-stack)
|
||||
- [Start the stack](#start-the-stack)
|
||||
- [Verify the stack](#verify-the-stack)
|
||||
- [View traces in Grafana Explore](#view-traces-in-grafana-explore)
|
||||
- [Running Tests](#running-tests)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [No traces appear in Grafana](#no-traces-appear-in-grafana)
|
||||
- [Conan lockfile error](#conan-lockfile-error)
|
||||
- [CMake target not found](#cmake-target-not-found)
|
||||
- [Architecture](#architecture)
|
||||
- [Key files](#key-files)
|
||||
- [Conditional compilation](#conditional-compilation)
|
||||
|
||||
## Overview
|
||||
|
||||
xrpld supports optional [OpenTelemetry](https://opentelemetry.io/) distributed tracing.
|
||||
When enabled, it instruments RPC requests with trace spans that are exported via
|
||||
OTLP/HTTP to an OpenTelemetry Collector, which forwards them to a tracing backend
|
||||
such as Grafana Tempo.
|
||||
|
||||
Telemetry is **off by default** at both compile time and runtime:
|
||||
|
||||
- **Compile time**: The Conan option `telemetry` and CMake option `telemetry` must be set to `True`/`ON`.
|
||||
When disabled, all tracing macros compile to `((void)0)` with zero overhead.
|
||||
- **Runtime**: The `[telemetry]` config section must set `enabled=1`.
|
||||
When disabled at runtime, a no-op implementation is used.
|
||||
|
||||
## Building with Telemetry
|
||||
|
||||
### Summary
|
||||
|
||||
Follow the same instructions as mentioned in [BUILD.md](../../BUILD.md) but with the following changes:
|
||||
|
||||
1. Pass `-o telemetry=True` to `conan install` to pull the `opentelemetry-cpp` dependency.
|
||||
2. CMake will automatically pick up `telemetry=ON` from the Conan-generated toolchain.
|
||||
3. Build as usual.
|
||||
|
||||
---
|
||||
|
||||
### Build steps
|
||||
|
||||
```bash
|
||||
cd /path/to/xrpld
|
||||
rm -rf .build
|
||||
mkdir .build
|
||||
cd .build
|
||||
```
|
||||
|
||||
#### Install dependencies
|
||||
|
||||
The `telemetry` option adds `opentelemetry-cpp/1.18.0` as a dependency.
|
||||
If the Conan lockfile does not yet include this package, bypass it with `--lockfile=""`.
|
||||
|
||||
```bash
|
||||
conan install .. \
|
||||
--output-folder . \
|
||||
--build missing \
|
||||
--settings build_type=Debug \
|
||||
-o telemetry=True \
|
||||
-o tests=True \
|
||||
-o xrpld=True \
|
||||
--lockfile=""
|
||||
```
|
||||
|
||||
> **Note**: The first build with telemetry may take longer as `opentelemetry-cpp`
|
||||
> and its transitive dependencies are compiled from source.
|
||||
|
||||
#### Call CMake
|
||||
|
||||
The Conan-generated toolchain file sets `telemetry=ON` automatically.
|
||||
No additional CMake flags are needed beyond the standard ones.
|
||||
|
||||
```bash
|
||||
cmake .. -G Ninja \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||
-DCMAKE_BUILD_TYPE=Debug \
|
||||
-Dtests=ON -Dxrpld=ON
|
||||
```
|
||||
|
||||
You should see in the CMake output:
|
||||
|
||||
```
|
||||
-- OpenTelemetry tracing enabled
|
||||
```
|
||||
|
||||
#### Build
|
||||
|
||||
```bash
|
||||
cmake --build . --parallel $(nproc)
|
||||
```
|
||||
|
||||
### Building without telemetry
|
||||
|
||||
Omit the `-o telemetry=True` option (or pass `-o telemetry=False`).
|
||||
The `opentelemetry-cpp` dependency will not be downloaded,
|
||||
the `XRPL_ENABLE_TELEMETRY` preprocessor define will not be set,
|
||||
and all tracing macros will compile to no-ops.
|
||||
The resulting binary is identical to one built before telemetry support was added.
|
||||
|
||||
## Runtime Configuration
|
||||
|
||||
Add a `[telemetry]` section to your `xrpld.cfg` file:
|
||||
|
||||
```ini
|
||||
[telemetry]
|
||||
enabled=1
|
||||
endpoint=http://localhost:4318/v1/traces
|
||||
sampling_ratio=1.0
|
||||
trace_rpc=1
|
||||
trace_transactions=1
|
||||
trace_consensus=1
|
||||
trace_peer=0
|
||||
trace_ledger=1
|
||||
```
|
||||
|
||||
### Configuration options
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
| --------------------- | ------ | --------------------------------- | -------------------------------------------------- |
|
||||
| `enabled` | int | `0` | Enable (`1`) or disable (`0`) telemetry at runtime |
|
||||
| `service_name` | string | `xrpld` | Service name reported in traces |
|
||||
| `service_instance_id` | string | node public key | Unique instance identifier |
|
||||
| `endpoint` | string | `http://localhost:4318/v1/traces` | OTLP/HTTP collector endpoint |
|
||||
| `use_tls` | int | `0` | Enable TLS for the exporter connection |
|
||||
| `tls_ca_cert` | string | (empty) | Path to CA certificate for TLS |
|
||||
| `sampling_ratio` | double | `1.0` | Head-based sampling ratio (`0.0` to `1.0`) |
|
||||
| `batch_size` | uint32 | `512` | Maximum spans per export batch |
|
||||
| `batch_delay_ms` | uint32 | `5000` | Maximum delay (ms) before flushing a batch |
|
||||
| `max_queue_size` | uint32 | `2048` | Maximum spans queued in memory |
|
||||
| `trace_rpc` | int | `1` | Enable RPC request tracing |
|
||||
| `trace_transactions` | int | `1` | Enable transaction lifecycle tracing |
|
||||
| `trace_consensus` | int | `1` | Enable consensus round tracing |
|
||||
| `trace_peer` | int | `0` | Enable peer message tracing (high volume) |
|
||||
| `trace_ledger` | int | `1` | Enable ledger close tracing |
|
||||
|
||||
## Observability Stack
|
||||
|
||||
A Docker Compose stack is provided in `docker/telemetry/` with three services:
|
||||
|
||||
| Service | Port | Purpose |
|
||||
| ------------------ | ---------------------------------------------- | --------------------------------------------------- |
|
||||
| **OTel Collector** | `4317` (gRPC), `4318` (HTTP), `13133` (health) | Receives OTLP spans, batches, and forwards to Tempo |
|
||||
| **Tempo** | `3200` (HTTP API) | Trace storage backend |
|
||||
| **Grafana** | `3000` | Dashboards (Tempo pre-configured as datasource) |
|
||||
|
||||
### Start the stack
|
||||
|
||||
```bash
|
||||
docker compose -f docker/telemetry/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
### Verify the stack
|
||||
|
||||
```bash
|
||||
# Collector health
|
||||
curl http://localhost:13133
|
||||
|
||||
# Grafana (Explore -> Tempo for traces)
|
||||
open http://localhost:3000
|
||||
```
|
||||
|
||||
### View traces in Grafana Explore
|
||||
|
||||
1. Open `http://localhost:3000` in a browser.
|
||||
2. Navigate to **Explore** and select the **Tempo** datasource.
|
||||
3. Use **Search** or **TraceQL** to find traces by service name (e.g. `xrpld`).
|
||||
4. Click into any trace to see the span tree and attributes.
|
||||
|
||||
Traced RPC operations produce a span hierarchy like:
|
||||
|
||||
```
|
||||
rpc.request
|
||||
└── rpc.command.server_info (xrpl.rpc.command=server_info, xrpl.rpc.status=success)
|
||||
```
|
||||
|
||||
Each span includes attributes:
|
||||
|
||||
- `xrpl.rpc.command` — the RPC method name
|
||||
- `xrpl.rpc.version` — API version
|
||||
- `xrpl.rpc.role` — `admin` or `user`
|
||||
- `xrpl.rpc.status` — `success` or `error`
|
||||
|
||||
## Running Tests
|
||||
|
||||
Unit tests run with the telemetry-enabled build regardless of whether the
|
||||
observability stack is running. When no collector is available, the exporter
|
||||
silently drops spans with no impact on test results.
|
||||
|
||||
```bash
|
||||
# Run all RPC tests
|
||||
./xrpld --unittest=RPCCall,ServerInfo,AccountTx,LedgerRPC,Transaction --unittest-jobs $(nproc)
|
||||
|
||||
# Run the full test suite
|
||||
./xrpld --unittest --unittest-jobs $(nproc)
|
||||
```
|
||||
|
||||
To generate traces during manual testing, start xrpld in standalone mode:
|
||||
|
||||
```bash
|
||||
./xrpld --conf /path/to/xrpld.cfg --standalone --start
|
||||
```
|
||||
|
||||
Then send RPC requests:
|
||||
|
||||
```bash
|
||||
curl -s -X POST http://127.0.0.1:5005/ \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"method":"server_info","params":[{}]}'
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### No traces appear in Grafana
|
||||
|
||||
1. Confirm the OTel Collector is running: `docker compose -f docker/telemetry/docker-compose.yml ps`
|
||||
2. Check collector logs for errors: `docker compose -f docker/telemetry/docker-compose.yml logs otel-collector`
|
||||
3. Confirm `[telemetry] enabled=1` is set in the xrpld config.
|
||||
4. Confirm `endpoint` points to the correct collector address (`http://localhost:4318/v1/traces`).
|
||||
5. Wait for the batch delay to elapse (default `5000` ms) before checking Grafana Explore.
|
||||
|
||||
### Conan lockfile error
|
||||
|
||||
If you see `ERROR: Requirement 'opentelemetry-cpp/1.18.0' not in lockfile 'requires'`,
|
||||
the lockfile was generated without the telemetry dependency.
|
||||
Pass `--lockfile=""` to bypass the lockfile, or regenerate it with telemetry enabled.
|
||||
|
||||
### CMake target not found
|
||||
|
||||
If CMake reports that `opentelemetry-cpp` targets are not found,
|
||||
ensure you ran `conan install` with `-o telemetry=True` and that the
|
||||
Conan-generated toolchain file is being used.
|
||||
The Conan package provides a single umbrella target
|
||||
`opentelemetry-cpp::opentelemetry-cpp` (not individual component targets).
|
||||
|
||||
## Architecture
|
||||
|
||||
### Key files
|
||||
|
||||
| File | Purpose |
|
||||
| --------------------------------------------- | ------------------------------------------------------------ |
|
||||
| `include/xrpl/telemetry/Telemetry.h` | Abstract telemetry interface and `Setup` struct |
|
||||
| `include/xrpl/telemetry/SpanGuard.h` | RAII span guard with `discard()` for dropping unwanted spans |
|
||||
| `include/xrpl/telemetry/DiscardFlag.h` | Thread-local discard flag (zero-dependency header) |
|
||||
| `src/libxrpl/telemetry/Telemetry.cpp` | OTel SDK setup, `FilteringSpanProcessor`, provider lifecycle |
|
||||
| `src/libxrpl/telemetry/TelemetryConfig.cpp` | Config parser (`setup_Telemetry()`) |
|
||||
| `src/libxrpl/telemetry/NullTelemetry.cpp` | No-op implementation (used when disabled) |
|
||||
| `src/libxrpl/telemetry/SpanGuard.cpp` | Pimpl implementation for SpanGuard (all OTel types confined) |
|
||||
| `src/xrpld/rpc/detail/ServerHandler.cpp` | RPC entry point instrumentation |
|
||||
| `src/xrpld/rpc/detail/RPCHandler.cpp` | Per-command instrumentation |
|
||||
| `docker/telemetry/docker-compose.yml` | Observability stack (Collector + Tempo + Grafana) |
|
||||
| `docker/telemetry/otel-collector-config.yaml` | OTel Collector pipeline configuration |
|
||||
|
||||
### Span discard mechanism
|
||||
|
||||
`SpanGuard::discard()` allows callers to silently drop spans that turn out to be
|
||||
uninteresting (e.g., failed preflight transactions). This saves both network bandwidth
|
||||
and storage by preventing the span from being exported.
|
||||
|
||||
The mechanism uses a thread-local flag (`tl_discardCurrentSpan` in `DiscardFlag.h`) as a
|
||||
side-channel to the `FilteringSpanProcessor` (in `Telemetry.cpp`):
|
||||
|
||||
1. `SpanGuard::discard()` sets the thread-local flag and calls `Span::End()`
|
||||
2. The OTel SDK calls `FilteringSpanProcessor::OnEnd()` synchronously on the same thread
|
||||
3. The processor checks the flag, clears it, and drops the span before it enters the batch queue
|
||||
|
||||
```cpp
|
||||
SpanGuard guard(telemetry.startSpan("tx.process"));
|
||||
auto result = preflight(tx);
|
||||
if (result != tesSUCCESS)
|
||||
{
|
||||
guard.discard(); // span is dropped, never exported
|
||||
return result;
|
||||
}
|
||||
```
|
||||
|
||||
### Conditional compilation
|
||||
|
||||
All OpenTelemetry SDK types are hidden behind the pimpl idiom in `SpanGuard.cpp`.
|
||||
When `XRPL_ENABLE_TELEMETRY` is not defined, `SpanGuard.h` provides an all-inline
|
||||
no-op stub class with zero overhead and zero OTel dependencies.
|
||||
At runtime, if `enabled=0` is set in config (or the section is omitted), a
|
||||
`NullTelemetry` implementation is used that returns no-op spans.
|
||||
This two-layer approach ensures zero overhead when telemetry is not wanted.
|
||||
@@ -1,386 +0,0 @@
|
||||
# xrpld Telemetry Operator Runbook
|
||||
|
||||
## Overview
|
||||
|
||||
xrpld supports OpenTelemetry distributed tracing to provide visibility into RPC requests, transaction processing, and consensus rounds.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Start the observability stack
|
||||
|
||||
```bash
|
||||
docker compose -f docker/telemetry/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
This starts:
|
||||
|
||||
- **OTel Collector** on ports 4317 (gRPC) and 4318 (HTTP)
|
||||
- **Jaeger** UI on http://localhost:16686
|
||||
- **Prometheus** on http://localhost:9090
|
||||
- **Grafana** on http://localhost:3000
|
||||
|
||||
### 2. Enable telemetry in xrpld
|
||||
|
||||
Add to your `xrpld.cfg`:
|
||||
|
||||
```ini
|
||||
[telemetry]
|
||||
enabled=1
|
||||
endpoint=http://localhost:4318/v1/traces
|
||||
```
|
||||
|
||||
### 3. Build with telemetry support
|
||||
|
||||
```bash
|
||||
conan install . --build=missing -o telemetry=True
|
||||
cmake --preset default -Dtelemetry=ON
|
||||
cmake --build --preset default
|
||||
```
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
| Option | Default | Description |
|
||||
| -------------------- | --------------------------------- | ----------------------------------------- |
|
||||
| `enabled` | `0` | Master switch for telemetry |
|
||||
| `endpoint` | `http://localhost:4318/v1/traces` | OTLP/HTTP endpoint |
|
||||
| `exporter` | `otlp_http` | Exporter type |
|
||||
| `sampling_ratio` | `1.0` | Head-based sampling ratio (0.0–1.0) |
|
||||
| `trace_rpc` | `1` | Enable RPC request tracing |
|
||||
| `trace_transactions` | `1` | Enable transaction tracing |
|
||||
| `trace_consensus` | `1` | Enable consensus tracing |
|
||||
| `trace_peer` | `0` | Enable peer message tracing (high volume) |
|
||||
| `trace_ledger` | `1` | Enable ledger tracing |
|
||||
| `batch_size` | `512` | Max spans per batch export |
|
||||
| `batch_delay_ms` | `5000` | Delay between batch exports |
|
||||
| `max_queue_size` | `2048` | Max spans queued before dropping |
|
||||
| `use_tls` | `0` | Use TLS for exporter connection |
|
||||
| `tls_ca_cert` | (empty) | Path to CA certificate bundle |
|
||||
|
||||
## Span Reference
|
||||
|
||||
All spans instrumented in xrpld, grouped by subsystem:
|
||||
|
||||
### RPC Spans (Phase 2)
|
||||
|
||||
| Span Name | Source File | Attributes | Description |
|
||||
| -------------------- | --------------------- | ---------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------- |
|
||||
| `rpc.request` | ServerHandler.cpp:271 | — | Top-level HTTP RPC request |
|
||||
| `rpc.process` | ServerHandler.cpp:573 | — | RPC processing (child of rpc.request) |
|
||||
| `rpc.ws_message` | ServerHandler.cpp:384 | — | WebSocket RPC message |
|
||||
| `rpc.command.<name>` | RPCHandler.cpp:161 | `xrpl.rpc.command`, `xrpl.rpc.version`, `xrpl.rpc.role`, `xrpl.rpc.status`, `xrpl.rpc.duration_ms`, `xrpl.rpc.error_message` | Per-command span (e.g., `rpc.command.server_info`) |
|
||||
|
||||
### Transaction Spans (Phase 3)
|
||||
|
||||
| Span Name | Source File | Attributes | Description |
|
||||
| ------------ | ------------------- | ---------------------------------------------------------------------- | ------------------------------------- |
|
||||
| `tx.process` | NetworkOPs.cpp:1227 | `xrpl.tx.hash`, `xrpl.tx.local`, `xrpl.tx.path` | Transaction submission and processing |
|
||||
| `tx.receive` | PeerImp.cpp:1273 | `xrpl.peer.id`, `xrpl.tx.hash`, `xrpl.tx.suppressed`, `xrpl.tx.status` | Transaction received from peer relay |
|
||||
| `tx.apply` | BuildLedger.cpp:88 | `xrpl.ledger.seq`, `xrpl.ledger.tx_count`, `xrpl.ledger.tx_failed` | Transaction set applied per ledger |
|
||||
|
||||
### Consensus Spans (Phase 4)
|
||||
|
||||
| Span Name | Source File | Attributes | Description |
|
||||
| --------------------------- | -------------------- | ----------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
|
||||
| `consensus.proposal.send` | RCLConsensus.cpp:177 | `xrpl.consensus.round` | Consensus proposal broadcast |
|
||||
| `consensus.ledger_close` | RCLConsensus.cpp:282 | `xrpl.consensus.ledger.seq`, `xrpl.consensus.mode` | Ledger close event |
|
||||
| `consensus.accept` | RCLConsensus.cpp:395 | `xrpl.consensus.proposers`, `xrpl.consensus.round_time_ms` | Ledger accepted by consensus |
|
||||
| `consensus.validation.send` | RCLConsensus.cpp:753 | `xrpl.consensus.ledger.seq`, `xrpl.consensus.proposing` | Validation sent after accept |
|
||||
| `consensus.accept.apply` | RCLConsensus.cpp:453 | `xrpl.consensus.close_time`, `close_time_correct`, `close_resolution_ms`, `state`, `proposing`, `round_time_ms`, `ledger.seq` | Ledger application with close time details |
|
||||
|
||||
#### Close Time Queries (Tempo TraceQL)
|
||||
|
||||
```
|
||||
# Find rounds where validators disagreed on close time
|
||||
{name="consensus.accept.apply"} | xrpl.consensus.close_time_correct = false
|
||||
|
||||
# Find consensus failures (moved_on)
|
||||
{name="consensus.accept.apply"} | xrpl.consensus.state = "moved_on"
|
||||
|
||||
# Find slow ledger applications (>5s)
|
||||
{name="consensus.accept.apply"} | duration > 5s
|
||||
|
||||
# Find specific ledger's consensus details
|
||||
{name="consensus.accept.apply"} | xrpl.consensus.ledger.seq = 92345678
|
||||
```
|
||||
|
||||
### Ledger Spans (Phase 5)
|
||||
|
||||
| Span Name | Source File | Attributes | Description |
|
||||
| ----------------- | -------------------- | ------------------------------------------------------------------ | ----------------------------- |
|
||||
| `ledger.build` | BuildLedger.cpp:31 | `xrpl.ledger.seq`, `xrpl.ledger.tx_count`, `xrpl.ledger.tx_failed` | Ledger build during consensus |
|
||||
| `ledger.validate` | LedgerMaster.cpp:915 | `xrpl.ledger.seq`, `xrpl.ledger.validations` | Ledger promoted to validated |
|
||||
| `ledger.store` | LedgerMaster.cpp:409 | `xrpl.ledger.seq` | Ledger stored in history |
|
||||
|
||||
### Peer Spans (Phase 5)
|
||||
|
||||
| Span Name | Source File | Attributes | Description |
|
||||
| ------------------------- | ---------------- | ---------------------------------------------- | ----------------------------- |
|
||||
| `peer.proposal.receive` | PeerImp.cpp:1667 | `xrpl.peer.id`, `xrpl.peer.proposal.trusted` | Proposal received from peer |
|
||||
| `peer.validation.receive` | PeerImp.cpp:2264 | `xrpl.peer.id`, `xrpl.peer.validation.trusted` | Validation received from peer |
|
||||
|
||||
## Prometheus Metrics (Spanmetrics)
|
||||
|
||||
The OTel Collector's spanmetrics connector automatically derives RED (Rate, Errors, Duration) metrics from every span. No custom metrics code is needed in xrpld.
|
||||
|
||||
### Generated Metric Names
|
||||
|
||||
| Prometheus Metric | Type | Description |
|
||||
| -------------------------------------------------- | --------- | ---------------------------- |
|
||||
| `traces_span_metrics_calls_total` | Counter | Total span invocations |
|
||||
| `traces_span_metrics_duration_milliseconds_bucket` | Histogram | Latency distribution buckets |
|
||||
| `traces_span_metrics_duration_milliseconds_count` | Histogram | Latency observation count |
|
||||
| `traces_span_metrics_duration_milliseconds_sum` | Histogram | Cumulative latency |
|
||||
|
||||
### Metric Labels
|
||||
|
||||
Every metric carries these standard labels:
|
||||
|
||||
| Label | Source | Example |
|
||||
| -------------- | ------------------ | ---------------------------------------- |
|
||||
| `span_name` | Span name | `rpc.command.server_info` |
|
||||
| `status_code` | Span status | `STATUS_CODE_UNSET`, `STATUS_CODE_ERROR` |
|
||||
| `service_name` | Resource attribute | `xrpld` |
|
||||
| `span_kind` | Span kind | `SPAN_KIND_INTERNAL` |
|
||||
|
||||
Additionally, span attributes configured as dimensions in the collector become metric labels (dots → underscores):
|
||||
|
||||
| Span Attribute | Metric Label | Applies To |
|
||||
| ------------------------------ | ------------------------------ | ------------------------------- |
|
||||
| `xrpl.rpc.command` | `xrpl_rpc_command` | `rpc.command.*` spans |
|
||||
| `xrpl.rpc.status` | `xrpl_rpc_status` | `rpc.command.*` spans |
|
||||
| `xrpl.consensus.mode` | `xrpl_consensus_mode` | `consensus.ledger_close` spans |
|
||||
| `xrpl.tx.local` | `xrpl_tx_local` | `tx.process` spans |
|
||||
| `xrpl.peer.proposal.trusted` | `xrpl_peer_proposal_trusted` | `peer.proposal.receive` spans |
|
||||
| `xrpl.peer.validation.trusted` | `xrpl_peer_validation_trusted` | `peer.validation.receive` spans |
|
||||
|
||||
### Histogram Buckets
|
||||
|
||||
Configured in `otel-collector-config.yaml`:
|
||||
|
||||
```
|
||||
1ms, 5ms, 10ms, 25ms, 50ms, 100ms, 250ms, 500ms, 1s, 5s
|
||||
```
|
||||
|
||||
## StatsD Metrics (beast::insight)
|
||||
|
||||
rippled has a built-in metrics framework (`beast::insight`) that emits StatsD-format metrics over UDP. These complement the span-derived RED metrics by providing system-level gauges, counters, and timers that don't map to individual trace spans.
|
||||
|
||||
### Configuration
|
||||
|
||||
Add to `xrpld.cfg`:
|
||||
|
||||
```ini
|
||||
[insight]
|
||||
server=statsd
|
||||
address=127.0.0.1:8125
|
||||
prefix=rippled
|
||||
```
|
||||
|
||||
The OTel Collector receives these via a `statsd` receiver on UDP port 8125 and exports them to Prometheus alongside spanmetrics.
|
||||
|
||||
### Metric Reference
|
||||
|
||||
#### Gauges
|
||||
|
||||
| Prometheus Metric | Source | Description |
|
||||
| --------------------------------------------- | ------------------------- | -------------------------------------------------------------------------- |
|
||||
| `rippled_LedgerMaster_Validated_Ledger_Age` | LedgerMaster.h:373 | Age of validated ledger (seconds) |
|
||||
| `rippled_LedgerMaster_Published_Ledger_Age` | LedgerMaster.h:374 | Age of published ledger (seconds) |
|
||||
| `rippled_State_Accounting_{Mode}_duration` | NetworkOPs.cpp:774 | Time in each operating mode (Disconnected/Connected/Syncing/Tracking/Full) |
|
||||
| `rippled_State_Accounting_{Mode}_transitions` | NetworkOPs.cpp:780 | Transition count per mode |
|
||||
| `rippled_Peer_Finder_Active_Inbound_Peers` | PeerfinderManager.cpp:214 | Active inbound peer connections |
|
||||
| `rippled_Peer_Finder_Active_Outbound_Peers` | PeerfinderManager.cpp:215 | Active outbound peer connections |
|
||||
| `rippled_Overlay_Peer_Disconnects` | OverlayImpl.h:557 | Peer disconnect count |
|
||||
| `rippled_job_count` | JobQueue.cpp:26 | Current job queue depth |
|
||||
| `rippled_{category}_Bytes_In/Out` | OverlayImpl.h:535 | Overlay traffic bytes per category (57 categories) |
|
||||
| `rippled_{category}_Messages_In/Out` | OverlayImpl.h:535 | Overlay traffic messages per category |
|
||||
|
||||
#### Counters
|
||||
|
||||
| Prometheus Metric | Source | Description |
|
||||
| --------------------------------- | --------------------- | ------------------------------ |
|
||||
| `rippled_rpc_requests` | ServerHandler.cpp:108 | Total RPC request count |
|
||||
| `rippled_ledger_fetches` | InboundLedgers.cpp:44 | Ledger fetch request count |
|
||||
| `rippled_ledger_history_mismatch` | LedgerHistory.cpp:16 | Ledger hash mismatch count |
|
||||
| `rippled_warn` | Logic.h:33 | Resource manager warning count |
|
||||
| `rippled_drop` | Logic.h:34 | Resource manager drop count |
|
||||
|
||||
#### Histograms (from StatsD timers)
|
||||
|
||||
| Prometheus Metric | Source | Description |
|
||||
| ----------------------- | --------------------- | ------------------------------ |
|
||||
| `rippled_rpc_time` | ServerHandler.cpp:110 | RPC response time (ms) |
|
||||
| `rippled_rpc_size` | ServerHandler.cpp:109 | RPC response size (bytes) |
|
||||
| `rippled_ios_latency` | Application.cpp:438 | I/O service loop latency (ms) |
|
||||
| `rippled_pathfind_fast` | PathRequests.h:23 | Fast pathfinding duration (ms) |
|
||||
| `rippled_pathfind_full` | PathRequests.h:24 | Full pathfinding duration (ms) |
|
||||
|
||||
## Grafana Dashboards
|
||||
|
||||
Eight dashboards are pre-provisioned in `docker/telemetry/grafana/dashboards/`:
|
||||
|
||||
### RPC Performance (`xrpld-rpc-perf`)
|
||||
|
||||
| Panel | Type | PromQL | Labels Used |
|
||||
| --------------------------- | ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------- |
|
||||
| RPC Request Rate by Command | timeseries | `sum by (xrpl_rpc_command) (rate(traces_span_metrics_calls_total{span_name=~"rpc.command.*"}[5m]))` | `xrpl_rpc_command` |
|
||||
| RPC Latency p95 by Command | timeseries | `histogram_quantile(0.95, sum by (le, xrpl_rpc_command) (rate(traces_span_metrics_duration_milliseconds_bucket{span_name=~"rpc.command.*"}[5m])))` | `xrpl_rpc_command` |
|
||||
| RPC Error Rate | bargauge | Error spans / total spans × 100, grouped by `xrpl_rpc_command` | `xrpl_rpc_command`, `status_code` |
|
||||
| RPC Latency Heatmap | heatmap | `sum(increase(traces_span_metrics_duration_milliseconds_bucket{span_name=~"rpc.command.*"}[5m])) by (le)` | `le` (bucket boundaries) |
|
||||
| Overall RPC Throughput | timeseries | `rpc.request` + `rpc.process` rate | — |
|
||||
| RPC Success vs Error | timeseries | by `status_code` (UNSET vs ERROR) | `status_code` |
|
||||
| Top Commands by Volume | bargauge | `topk(10, ...)` by `xrpl_rpc_command` | `xrpl_rpc_command` |
|
||||
| WebSocket Message Rate | stat | `rpc.ws_message` rate | — |
|
||||
|
||||
### Transaction Overview (`xrpld-transactions`)
|
||||
|
||||
| Panel | Type | PromQL | Labels Used |
|
||||
| --------------------------------- | ---------- | -------------------------------------------------------------------------------------------- | --------------- |
|
||||
| Transaction Processing Rate | timeseries | `rate(traces_span_metrics_calls_total{span_name="tx.process"}[5m])` and `tx.receive` | `span_name` |
|
||||
| Transaction Processing Latency | timeseries | `histogram_quantile(0.95 / 0.50, ... {span_name="tx.process"})` | — |
|
||||
| Transaction Path Distribution | piechart | `sum by (xrpl_tx_local) (rate(traces_span_metrics_calls_total{span_name="tx.process"}[5m]))` | `xrpl_tx_local` |
|
||||
| Transaction Receive vs Suppressed | timeseries | `rate(traces_span_metrics_calls_total{span_name="tx.receive"}[5m])` | — |
|
||||
| TX Processing Duration Heatmap | heatmap | `tx.process` histogram buckets | `le` |
|
||||
| TX Apply Duration per Ledger | timeseries | p95/p50 of `tx.apply` | — |
|
||||
| Peer TX Receive Rate | timeseries | `tx.receive` rate | — |
|
||||
| TX Apply Failed Rate | stat | `tx.apply` with `STATUS_CODE_ERROR` | `status_code` |
|
||||
|
||||
### Consensus Health (`xrpld-consensus`)
|
||||
|
||||
| Panel | Type | PromQL | Labels Used |
|
||||
| ----------------------------- | ---------- | ---------------------------------------------------------------------------------- | --------------------- |
|
||||
| Consensus Round Duration | timeseries | `histogram_quantile(0.95 / 0.50, ... {span_name="consensus.accept"})` | — |
|
||||
| Consensus Proposals Sent Rate | timeseries | `rate(traces_span_metrics_calls_total{span_name="consensus.proposal.send"}[5m])` | — |
|
||||
| Ledger Close Duration | timeseries | `histogram_quantile(0.95, ... {span_name="consensus.ledger_close"})` | — |
|
||||
| Validation Send Rate | stat | `rate(traces_span_metrics_calls_total{span_name="consensus.validation.send"}[5m])` | — |
|
||||
| Ledger Apply Duration | timeseries | `histogram_quantile(0.95 / 0.50, ... {span_name="consensus.accept.apply"})` | — |
|
||||
| Close Time Agreement | timeseries | `rate(traces_span_metrics_calls_total{span_name="consensus.accept.apply"}[5m])` | — |
|
||||
| Consensus Mode Over Time | timeseries | `consensus.ledger_close` by `xrpl_consensus_mode` | `xrpl_consensus_mode` |
|
||||
| Accept vs Close Rate | timeseries | `consensus.accept` vs `consensus.ledger_close` rate | — |
|
||||
| Validation vs Close Rate | timeseries | `consensus.validation.send` vs `consensus.ledger_close` | — |
|
||||
| Accept Duration Heatmap | heatmap | `consensus.accept` histogram buckets | `le` |
|
||||
|
||||
### Ledger Operations (`rippled-ledger-ops`)
|
||||
|
||||
| Panel | Type | PromQL | Labels Used |
|
||||
| ----------------------- | ---------- | ---------------------------------------------- | ----------- |
|
||||
| Ledger Build Rate | stat | `ledger.build` call rate | — |
|
||||
| Ledger Build Duration | timeseries | p95/p50 of `ledger.build` | — |
|
||||
| Ledger Validation Rate | stat | `ledger.validate` call rate | — |
|
||||
| Build Duration Heatmap | heatmap | `ledger.build` histogram buckets | `le` |
|
||||
| TX Apply Duration | timeseries | p95/p50 of `tx.apply` | — |
|
||||
| TX Apply Rate | timeseries | `tx.apply` call rate | — |
|
||||
| Ledger Store Rate | stat | `ledger.store` call rate | — |
|
||||
| Build vs Close Duration | timeseries | p95 `ledger.build` vs `consensus.ledger_close` | — |
|
||||
|
||||
### Peer Network (`rippled-peer-net`)
|
||||
|
||||
Requires `trace_peer=1` in the `[telemetry]` config section.
|
||||
|
||||
| Panel | Type | PromQL | Labels Used |
|
||||
| -------------------------------- | ---------- | --------------------------------- | ------------------------------ |
|
||||
| Proposal Receive Rate | timeseries | `peer.proposal.receive` rate | — |
|
||||
| Validation Receive Rate | timeseries | `peer.validation.receive` rate | — |
|
||||
| Proposals Trusted vs Untrusted | piechart | by `xrpl_peer_proposal_trusted` | `xrpl_peer_proposal_trusted` |
|
||||
| Validations Trusted vs Untrusted | piechart | by `xrpl_peer_validation_trusted` | `xrpl_peer_validation_trusted` |
|
||||
|
||||
### Node Health — StatsD (`rippled-statsd-node-health`)
|
||||
|
||||
| Panel | Type | PromQL | Labels Used |
|
||||
| -------------------------- | ---------- | ------------------------------------------------------ | ----------- |
|
||||
| Validated Ledger Age | stat | `rippled_LedgerMaster_Validated_Ledger_Age` | — |
|
||||
| Published Ledger Age | stat | `rippled_LedgerMaster_Published_Ledger_Age` | — |
|
||||
| Operating Mode Duration | timeseries | `rippled_State_Accounting_*_duration` | — |
|
||||
| Operating Mode Transitions | timeseries | `rippled_State_Accounting_*_transitions` | — |
|
||||
| I/O Latency | timeseries | `histogram_quantile(0.95, rippled_ios_latency_bucket)` | — |
|
||||
| Job Queue Depth | timeseries | `rippled_job_count` | — |
|
||||
| Ledger Fetch Rate | stat | `rate(rippled_ledger_fetches[5m])` | — |
|
||||
| Ledger History Mismatches | stat | `rate(rippled_ledger_history_mismatch[5m])` | — |
|
||||
|
||||
### Network Traffic — StatsD (`rippled-statsd-network`)
|
||||
|
||||
| Panel | Type | PromQL | Labels Used |
|
||||
| ---------------------- | ---------- | -------------------------------------- | ----------- |
|
||||
| Active Peers | timeseries | `rippled_Peer_Finder_Active_*_Peers` | — |
|
||||
| Peer Disconnects | timeseries | `rippled_Overlay_Peer_Disconnects` | — |
|
||||
| Total Network Bytes | timeseries | `rippled_total_Bytes_In/Out` | — |
|
||||
| Total Network Messages | timeseries | `rippled_total_Messages_In/Out` | — |
|
||||
| Transaction Traffic | timeseries | `rippled_transactions_Messages_In/Out` | — |
|
||||
| Proposal Traffic | timeseries | `rippled_proposals_Messages_In/Out` | — |
|
||||
| Validation Traffic | timeseries | `rippled_validations_Messages_In/Out` | — |
|
||||
| Traffic by Category | bargauge | `topk(10, rippled_*_Bytes_In)` | — |
|
||||
|
||||
### RPC & Pathfinding — StatsD (`rippled-statsd-rpc`)
|
||||
|
||||
| Panel | Type | PromQL | Labels Used |
|
||||
| ------------------------- | ---------- | -------------------------------------------------------- | ----------- |
|
||||
| RPC Request Rate | stat | `rate(rippled_rpc_requests[5m])` | — |
|
||||
| RPC Response Time | timeseries | `histogram_quantile(0.95, rippled_rpc_time_bucket)` | — |
|
||||
| RPC Response Size | timeseries | `histogram_quantile(0.95, rippled_rpc_size_bucket)` | — |
|
||||
| RPC Response Time Heatmap | heatmap | `rippled_rpc_time_bucket` | — |
|
||||
| Pathfinding Fast Duration | timeseries | `histogram_quantile(0.95, rippled_pathfind_fast_bucket)` | — |
|
||||
| Pathfinding Full Duration | timeseries | `histogram_quantile(0.95, rippled_pathfind_full_bucket)` | — |
|
||||
| Resource Warnings Rate | stat | `rate(rippled_warn[5m])` | — |
|
||||
| Resource Drops Rate | stat | `rate(rippled_drop[5m])` | — |
|
||||
|
||||
### Span → Metric → Dashboard Summary
|
||||
|
||||
| Span Name | Prometheus Metric Filter | Grafana Dashboard |
|
||||
| --------------------------- | ----------------------------------------- | --------------------------------------------- |
|
||||
| `rpc.request` | `{span_name="rpc.request"}` | RPC Performance (Overall Throughput) |
|
||||
| `rpc.process` | `{span_name="rpc.process"}` | RPC Performance (Overall Throughput) |
|
||||
| `rpc.ws_message` | `{span_name="rpc.ws_message"}` | RPC Performance (WebSocket Rate) |
|
||||
| `rpc.command.*` | `{span_name=~"rpc.command.*"}` | RPC Performance (Rate, Latency, Error, Top) |
|
||||
| `tx.process` | `{span_name="tx.process"}` | Transaction Overview (Rate, Latency, Heatmap) |
|
||||
| `tx.receive` | `{span_name="tx.receive"}` | Transaction Overview (Rate, Receive) |
|
||||
| `tx.apply` | `{span_name="tx.apply"}` | Transaction Overview + Ledger Ops (Apply) |
|
||||
| `consensus.accept` | `{span_name="consensus.accept"}` | Consensus Health (Duration, Rate, Heatmap) |
|
||||
| `consensus.proposal.send` | `{span_name="consensus.proposal.send"}` | Consensus Health (Proposals Rate) |
|
||||
| `consensus.ledger_close` | `{span_name="consensus.ledger_close"}` | Consensus Health (Close, Mode) |
|
||||
| `consensus.validation.send` | `{span_name="consensus.validation.send"}` | Consensus Health (Validation Rate) |
|
||||
| `consensus.accept.apply` | `{span_name="consensus.accept.apply"}` | Consensus Health (Apply Duration, Close Time) |
|
||||
| `ledger.build` | `{span_name="ledger.build"}` | Ledger Ops (Build Rate, Duration, Heatmap) |
|
||||
| `ledger.validate` | `{span_name="ledger.validate"}` | Ledger Ops (Validation Rate) |
|
||||
| `ledger.store` | `{span_name="ledger.store"}` | Ledger Ops (Store Rate) |
|
||||
| `peer.proposal.receive` | `{span_name="peer.proposal.receive"}` | Peer Network (Rate, Trusted/Untrusted) |
|
||||
| `peer.validation.receive` | `{span_name="peer.validation.receive"}` | Peer Network (Rate, Trusted/Untrusted) |
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### No traces appearing in Jaeger
|
||||
|
||||
1. Check xrpld logs for `Telemetry starting` message
|
||||
2. Verify `enabled=1` in the `[telemetry]` config section
|
||||
3. Test collector connectivity: `curl -v http://localhost:4318/v1/traces`
|
||||
4. Check collector logs: `docker compose logs otel-collector`
|
||||
|
||||
### High memory usage
|
||||
|
||||
- Reduce `sampling_ratio` (e.g., `0.1` for 10% sampling)
|
||||
- Reduce `max_queue_size` and `batch_size`
|
||||
- Disable high-volume trace categories: `trace_peer=0`
|
||||
|
||||
### Collector connection failures
|
||||
|
||||
- Verify endpoint URL matches collector address
|
||||
- Check firewall rules for ports 4317/4318
|
||||
- If using TLS, verify certificate path with `tls_ca_cert`
|
||||
|
||||
## Performance Tuning
|
||||
|
||||
| Scenario | Recommendation |
|
||||
| ------------------------ | ------------------------------------------------- |
|
||||
| Production mainnet | `sampling_ratio=0.01`, `trace_peer=0` |
|
||||
| Testnet/devnet | `sampling_ratio=1.0` (full tracing) |
|
||||
| Debugging specific issue | `sampling_ratio=1.0` temporarily |
|
||||
| High-throughput node | Increase `batch_size=1024`, `max_queue_size=4096` |
|
||||
|
||||
## Disabling Telemetry
|
||||
|
||||
Set `enabled=0` in config (runtime disable) or build without the flag:
|
||||
|
||||
```bash
|
||||
cmake --preset default -Dtelemetry=OFF
|
||||
```
|
||||
|
||||
When telemetry is compiled out, all trace macros expand to no-ops with zero overhead.
|
||||
@@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <filesystem>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
@@ -12,6 +12,6 @@ namespace xrpl {
|
||||
@throws runtime_error
|
||||
*/
|
||||
void
|
||||
extractTarLz4(boost::filesystem::path const& src, boost::filesystem::path const& dst);
|
||||
extractTarLz4(std::filesystem::path const& src, std::filesystem::path const& dst);
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
@@ -67,10 +67,8 @@ private:
|
||||
}
|
||||
else
|
||||
{
|
||||
for (; elapsed > 0; --elapsed)
|
||||
{
|
||||
while ((elapsed--) != 0u)
|
||||
m_value -= (m_value + Window - 1) / Window;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,22 +1,22 @@
|
||||
#pragma once
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <boost/system/error_code.hpp>
|
||||
|
||||
#include <filesystem>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <system_error>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
std::string
|
||||
getFileContents(
|
||||
boost::system::error_code& ec,
|
||||
boost::filesystem::path const& sourcePath,
|
||||
std::error_code& ec,
|
||||
std::filesystem::path const& sourcePath,
|
||||
std::optional<std::size_t> maxSize = std::nullopt);
|
||||
|
||||
void
|
||||
writeFileContents(
|
||||
boost::system::error_code& ec,
|
||||
boost::filesystem::path const& destPath,
|
||||
std::error_code& ec,
|
||||
std::filesystem::path const& destPath,
|
||||
std::string const& contents);
|
||||
|
||||
} // namespace xrpl
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
@@ -88,7 +88,7 @@ private:
|
||||
@return `true` if the file was opened.
|
||||
*/
|
||||
bool
|
||||
open(boost::filesystem::path const& path);
|
||||
open(std::filesystem::path const& path);
|
||||
|
||||
/** Close and re-open the system file associated with the log
|
||||
This assists in interoperating with external log management tools.
|
||||
@@ -130,7 +130,7 @@ private:
|
||||
|
||||
private:
|
||||
std::unique_ptr<std::ofstream> m_stream;
|
||||
boost::filesystem::path m_path;
|
||||
std::filesystem::path m_path;
|
||||
};
|
||||
|
||||
std::mutex mutable mutex_;
|
||||
@@ -149,7 +149,7 @@ public:
|
||||
virtual ~Logs() = default;
|
||||
|
||||
bool
|
||||
open(boost::filesystem::path const& pathToLogFile);
|
||||
open(std::filesystem::path const& pathToLogFile);
|
||||
|
||||
beast::Journal::Sink&
|
||||
get(std::string const& name);
|
||||
|
||||
@@ -43,10 +43,8 @@ public:
|
||||
: work_(boost::asio::make_work_guard(ios_))
|
||||
{
|
||||
threads_.reserve(concurrency);
|
||||
for (std::size_t i = 0; i < concurrency; ++i)
|
||||
{
|
||||
while ((concurrency--) != 0u)
|
||||
threads_.emplace_back([&] { ios_.run(); });
|
||||
}
|
||||
}
|
||||
|
||||
~enable_yield_to()
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
|
||||
#include <xrpl/beast/unit_test/runner.h>
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <boost/throw_exception.hpp>
|
||||
|
||||
#include <filesystem>
|
||||
#include <ostream>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
@@ -25,7 +25,7 @@ make_reason(String const& reason, char const* file, int line)
|
||||
std::string s(reason);
|
||||
if (!s.empty())
|
||||
s.append(": ");
|
||||
namespace fs = boost::filesystem;
|
||||
namespace fs = std::filesystem;
|
||||
s.append(fs::path{file}.filename().string());
|
||||
s.append("(");
|
||||
s.append(boost::lexical_cast<std::string>(line));
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
#include <filesystem>
|
||||
#include <iomanip>
|
||||
#include <random>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <system_error>
|
||||
|
||||
namespace beast {
|
||||
|
||||
@@ -13,7 +16,7 @@ namespace beast {
|
||||
*/
|
||||
class temp_dir
|
||||
{
|
||||
boost::filesystem::path path_;
|
||||
std::filesystem::path path_;
|
||||
|
||||
public:
|
||||
#if !GENERATING_DOCS
|
||||
@@ -25,20 +28,30 @@ public:
|
||||
/// Construct a temporary directory.
|
||||
temp_dir()
|
||||
{
|
||||
auto const dir = boost::filesystem::temp_directory_path();
|
||||
do
|
||||
auto const dir = std::filesystem::temp_directory_path();
|
||||
std::random_device rd;
|
||||
constexpr std::size_t maxAttempts = 100;
|
||||
for (std::size_t attempt = 0; attempt < maxAttempts; ++attempt)
|
||||
{
|
||||
path_ = dir / boost::filesystem::unique_path();
|
||||
} while (boost::filesystem::exists(path_));
|
||||
boost::filesystem::create_directory(path_);
|
||||
std::error_code ec;
|
||||
std::ostringstream oss;
|
||||
oss << std::hex << std::setfill('0') << std::setw(8) << rd() << std::setw(8) << rd();
|
||||
path_ = dir / oss.str();
|
||||
if (!std::filesystem::exists(path_, ec) && !ec)
|
||||
break;
|
||||
path_.clear();
|
||||
}
|
||||
if (path_.empty())
|
||||
throw std::runtime_error("Unable to generate a unique temporary directory path");
|
||||
std::filesystem::create_directory(path_);
|
||||
}
|
||||
|
||||
/// Destroy a temporary directory.
|
||||
~temp_dir()
|
||||
{
|
||||
// use non-throwing calls in the destructor
|
||||
boost::system::error_code ec;
|
||||
boost::filesystem::remove_all(path_, ec);
|
||||
std::error_code ec;
|
||||
std::filesystem::remove_all(path_, ec);
|
||||
// TODO: warn/notify if ec set ?
|
||||
}
|
||||
|
||||
|
||||
@@ -4,10 +4,9 @@
|
||||
#include <xrpl/core/JobTypes.h>
|
||||
#include <xrpl/json/json_value.h>
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
@@ -43,7 +42,7 @@ public:
|
||||
*/
|
||||
struct Setup
|
||||
{
|
||||
boost::filesystem::path perfLog;
|
||||
std::filesystem::path perfLog;
|
||||
// log_interval is in milliseconds to support faster testing.
|
||||
milliseconds logInterval{seconds(1)};
|
||||
};
|
||||
@@ -148,7 +147,7 @@ public:
|
||||
};
|
||||
|
||||
PerfLog::Setup
|
||||
setup_PerfLog(Section const& section, boost::filesystem::path const& configDir);
|
||||
setup_PerfLog(Section const& section, std::filesystem::path const& configDir);
|
||||
|
||||
std::unique_ptr<PerfLog>
|
||||
make_PerfLog(
|
||||
|
||||
@@ -18,9 +18,6 @@ class Manager;
|
||||
namespace perf {
|
||||
class PerfLog;
|
||||
} // namespace perf
|
||||
namespace telemetry {
|
||||
class Telemetry;
|
||||
} // namespace telemetry
|
||||
|
||||
// This is temporary until we migrate all code to use ServiceRegistry.
|
||||
class Application;
|
||||
@@ -221,9 +218,6 @@ public:
|
||||
virtual perf::PerfLog&
|
||||
getPerfLog() = 0;
|
||||
|
||||
virtual telemetry::Telemetry&
|
||||
getTelemetry() = 0;
|
||||
|
||||
// Configuration and state
|
||||
[[nodiscard]] virtual bool
|
||||
isStopping() const = 0;
|
||||
|
||||
@@ -54,9 +54,8 @@ read_varint(void const* buf, std::size_t buflen, std::size_t& t)
|
||||
return 1;
|
||||
}
|
||||
auto const used = n;
|
||||
while (n > 0)
|
||||
while (n--)
|
||||
{
|
||||
--n;
|
||||
auto const d = p[n];
|
||||
auto const t0 = t;
|
||||
t *= 127;
|
||||
|
||||
@@ -85,19 +85,6 @@ message TMPublicKey {
|
||||
// If you want to send an amount that is greater than any single address of yours
|
||||
// you must first combine coins from one address to another.
|
||||
|
||||
// Trace context for OpenTelemetry distributed tracing across nodes.
|
||||
// Uses W3C Trace Context format internally.
|
||||
message TraceContext {
|
||||
optional bytes trace_id = 1; // 16-byte trace identifier
|
||||
optional bytes span_id = 2; // 8-byte parent span identifier
|
||||
optional uint32 trace_flags = 3; // bit 0 = sampled
|
||||
// TODO: trace_state is reserved for W3C tracestate vendor-specific
|
||||
// key-value pairs but is not yet read or written by
|
||||
// TraceContextPropagator. Wire it when cross-vendor trace
|
||||
// propagation is needed.
|
||||
optional string trace_state = 4; // W3C tracestate header value
|
||||
}
|
||||
|
||||
enum TransactionStatus {
|
||||
tsNEW = 1; // origin node did/could not validate
|
||||
tsCURRENT = 2; // scheduled to go in this ledger
|
||||
@@ -114,9 +101,6 @@ message TMTransaction {
|
||||
required TransactionStatus status = 2;
|
||||
optional uint64 receiveTimestamp = 3;
|
||||
optional bool deferred = 4; // not applied to open ledger
|
||||
|
||||
// Optional trace context for OpenTelemetry distributed tracing
|
||||
optional TraceContext trace_context = 1001;
|
||||
}
|
||||
|
||||
message TMTransactions {
|
||||
@@ -165,9 +149,6 @@ message TMProposeSet {
|
||||
|
||||
// Number of hops traveled
|
||||
optional uint32 hops = 12 [deprecated = true];
|
||||
|
||||
// Optional trace context for OpenTelemetry distributed tracing
|
||||
optional TraceContext trace_context = 1001;
|
||||
}
|
||||
|
||||
enum TxSetStatus {
|
||||
@@ -213,9 +194,6 @@ message TMValidation {
|
||||
|
||||
// Number of hops traveled
|
||||
optional uint32 hops = 3 [deprecated = true];
|
||||
|
||||
// Optional trace context for OpenTelemetry distributed tracing
|
||||
optional TraceContext trace_context = 1001;
|
||||
}
|
||||
|
||||
// An array of Endpoint messages
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
// Add new amendments to the top of this list.
|
||||
// Keep it sorted in reverse chronological order.
|
||||
|
||||
XRPL_FIX (Cleanup3_2_0, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(MPTokensV2, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (Security3_1_3, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (PermissionedDomainInvariant, Supported::yes, VoteBehavior::DefaultNo)
|
||||
|
||||
@@ -6,8 +6,7 @@
|
||||
#include <xrpl/rdb/DBInit.h>
|
||||
#include <xrpl/rdb/SociDB.h>
|
||||
|
||||
#include <boost/filesystem/path.hpp>
|
||||
|
||||
#include <filesystem>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
@@ -72,7 +71,7 @@ public:
|
||||
|
||||
StartUpType startUp = StartUpType::Normal;
|
||||
bool standAlone = false;
|
||||
boost::filesystem::path dataDir;
|
||||
std::filesystem::path dataDir;
|
||||
// Indicates whether or not to return the `globalPragma`
|
||||
// from commonPragma()
|
||||
bool useGlobalPragma = false;
|
||||
@@ -135,7 +134,7 @@ public:
|
||||
|
||||
template <std::size_t N, std::size_t M>
|
||||
DatabaseCon(
|
||||
boost::filesystem::path const& dataDir,
|
||||
std::filesystem::path const& dataDir,
|
||||
std::string const& dbName,
|
||||
std::array<std::string, N> const& pragma,
|
||||
std::array<char const*, M> const& initSQL,
|
||||
@@ -147,7 +146,7 @@ public:
|
||||
// Use this constructor to setup checkpointing
|
||||
template <std::size_t N, std::size_t M>
|
||||
DatabaseCon(
|
||||
boost::filesystem::path const& dataDir,
|
||||
std::filesystem::path const& dataDir,
|
||||
std::string const& dbName,
|
||||
std::array<std::string, N> const& pragma,
|
||||
std::array<char const*, M> const& initSQL,
|
||||
@@ -182,7 +181,7 @@ private:
|
||||
|
||||
template <std::size_t N, std::size_t M>
|
||||
DatabaseCon(
|
||||
boost::filesystem::path const& pPath,
|
||||
std::filesystem::path const& pPath,
|
||||
std::vector<std::string> const* commonPragma,
|
||||
std::array<std::string, N> const& pragma,
|
||||
std::array<char const*, M> const& initSQL,
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
#include <xrpl/protocol/TxSearched.h>
|
||||
#include <xrpl/rdb/DatabaseCon.h>
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <boost/variant.hpp>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
#include <xrpl/rdb/DatabaseCon.h>
|
||||
#include <xrpl/server/Manifest.h>
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
struct SavedState
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
/** Thread-local flag for span discard signaling.
|
||||
|
||||
SpanGuard::discard() sets tl_discardCurrentSpan to true before calling
|
||||
Span::End(). The OTel SDK calls SpanProcessor::OnEnd() synchronously on
|
||||
the same thread, so FilteringSpanProcessor checks and clears this flag
|
||||
in OnEnd() to drop the span before it enters the batch export queue.
|
||||
|
||||
This side-channel avoids inspecting the Recordable's internals (which
|
||||
vary by exporter type — SpanData vs OtlpRecordable).
|
||||
|
||||
Kept in a separate header to avoid transitive include bloat: SpanGuard.h
|
||||
only needs this flag, not the full Telemetry.h with BasicConfig/Journal.
|
||||
|
||||
@see SpanGuard::discard(), FilteringSpanProcessor (Telemetry.cpp)
|
||||
*/
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
/** When true, the FilteringSpanProcessor drops the current span in
|
||||
OnEnd(). Set by SpanGuard::discard(), cleared by OnEnd(). */
|
||||
inline thread_local bool tl_discardCurrentSpan = false;
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
@@ -1,491 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
/** RAII guard for OpenTelemetry trace spans.
|
||||
|
||||
Wraps an OTel Span and Scope behind the pimpl idiom so that no
|
||||
opentelemetry headers are exposed in this public header. When
|
||||
XRPL_ENABLE_TELEMETRY is not defined, SpanGuard is an empty class
|
||||
with all-inline no-op methods — zero overhead, zero dependencies.
|
||||
|
||||
Dependency diagram:
|
||||
|
||||
+------------------------------------------------+
|
||||
| SpanGuard |
|
||||
+------------------------------------------------+
|
||||
| - impl_ : unique_ptr<Impl> (pimpl) |
|
||||
+------------------------------------------------+
|
||||
| + span(cat, prefix, name) [static] |
|
||||
| + childSpan(name) : SpanGuard |
|
||||
| + linkedSpan(name) : SpanGuard |
|
||||
| + hashSpan(cat, name, hash) [static] |
|
||||
| + hashSpan(cat, name, hash, parent) [static] |
|
||||
| + captureContext() : SpanContext |
|
||||
| + setAttribute(key, value) |
|
||||
| + setOk() / setError(desc) |
|
||||
| + addEvent(name) |
|
||||
| + recordException(e) |
|
||||
| + discard() |
|
||||
| + operator bool() |
|
||||
+------------------------------------------------+
|
||||
| hides (pimpl)
|
||||
+-------+-------+
|
||||
| |
|
||||
+--------+ +-------------+
|
||||
| Span | | Scope |
|
||||
| (OTel) | | (OTel, non- |
|
||||
| | | movable) |
|
||||
+--------+ +-------------+
|
||||
|
||||
Static factory methods access the global Telemetry instance
|
||||
internally (via Telemetry::getInstance()), check whether tracing
|
||||
is enabled for the requested subsystem, and return either an
|
||||
active guard or a null (no-op) guard. Callers never need a
|
||||
Telemetry reference.
|
||||
|
||||
Usage examples:
|
||||
|
||||
1. Basic RPC tracing (factory method with category):
|
||||
@code
|
||||
#include <xrpld/rpc/detail/RpcSpanNames.h>
|
||||
|
||||
// At the call site (constants from RpcSpanNames.h):
|
||||
auto span = SpanGuard::span(
|
||||
TraceCategory::Rpc, rpc_span::prefix::command, "submit");
|
||||
span.setAttribute(rpc_span::attr::command, "submit");
|
||||
span.setAttribute(rpc_span::attr::status, rpc_span::val::success);
|
||||
// span ended automatically on scope exit
|
||||
@endcode
|
||||
|
||||
2. Error recording:
|
||||
@code
|
||||
auto span = SpanGuard::span(
|
||||
TraceCategory::Rpc, rpc_span::prefix::command, "submit");
|
||||
try {
|
||||
doWork();
|
||||
span.setOk();
|
||||
} catch (std::exception const& e) {
|
||||
span.recordException(e);
|
||||
}
|
||||
@endcode
|
||||
|
||||
3. Cross-thread context propagation:
|
||||
@code
|
||||
// Thread A: create span and capture context
|
||||
auto span = SpanGuard::span(
|
||||
TraceCategory::Consensus, seg::consensus, "round");
|
||||
auto ctx = span.captureContext();
|
||||
|
||||
// Thread B: create child with captured context
|
||||
auto child = SpanGuard::childSpan("consensus.accept", ctx);
|
||||
@endcode
|
||||
|
||||
4. Conditional check (rarely needed — methods are no-ops on null):
|
||||
@code
|
||||
auto span = SpanGuard::span(
|
||||
TraceCategory::Rpc, rpc_span::prefix::rpc, "request");
|
||||
if (span) {
|
||||
// expensive attribute computation only when active
|
||||
span.setAttribute(rpc_span::attr::payloadSize, computeSize());
|
||||
}
|
||||
@endcode
|
||||
|
||||
5. Tail-based filtering via discard():
|
||||
@code
|
||||
auto span = SpanGuard::span(
|
||||
TraceCategory::Transactions, seg::tx, "process");
|
||||
auto result = preflight(tx);
|
||||
if (result != tesSUCCESS) {
|
||||
span.discard(); // drop span, never exported
|
||||
return result;
|
||||
}
|
||||
@endcode
|
||||
|
||||
@note Thread safety: A SpanGuard must only be used on the thread
|
||||
where it was constructed (the internal Scope binds to the
|
||||
thread-local context stack). Use captureContext() to propagate
|
||||
the trace to other threads.
|
||||
|
||||
@note Move semantics: Move construction transfers ownership of
|
||||
the pimpl pointer — no double-Scope issues. Move assignment is
|
||||
deleted to prevent re-scoping mid-flight.
|
||||
|
||||
@note Known limitations:
|
||||
- Attributes cannot be removed per the OTel spec; use
|
||||
setAttribute with an empty value as a convention.
|
||||
- SpanGuard::span() (raw Span access) is intentionally not
|
||||
exposed — all interaction goes through the public methods.
|
||||
*/
|
||||
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <initializer_list>
|
||||
#include <memory>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
|
||||
namespace xrpl::telemetry {
|
||||
|
||||
/** Trace subsystem categories for conditional span creation.
|
||||
|
||||
Each value maps to a runtime config flag (e.g. `trace_rpc=1`).
|
||||
Used by SpanGuard::span(TraceCategory, prefix, name) to decide
|
||||
whether to create a real span or return a null guard.
|
||||
*/
|
||||
enum class TraceCategory { Rpc, Transactions, Consensus, Peer, Ledger };
|
||||
|
||||
/** Key-value pair for span event attributes.
|
||||
Used by addEvent(name, attrs) to attach structured metadata to events.
|
||||
*/
|
||||
using EventAttribute = std::pair<std::string_view, std::string_view>;
|
||||
|
||||
/** Opaque wrapper for an OTel context snapshot.
|
||||
|
||||
Used to propagate trace context across threads. Created by
|
||||
SpanGuard::captureContext(), consumed by SpanGuard::childSpan()
|
||||
or SpanGuard::linkedSpan() with an explicit parent/link context.
|
||||
*/
|
||||
class SpanContext
|
||||
{
|
||||
friend class SpanGuard;
|
||||
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
struct Impl;
|
||||
std::shared_ptr<Impl> impl_;
|
||||
explicit SpanContext(std::shared_ptr<Impl> impl);
|
||||
#endif
|
||||
|
||||
public:
|
||||
SpanContext() = default;
|
||||
|
||||
/** @return true if this context holds a valid trace context. */
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
[[nodiscard]] bool
|
||||
isValid() const;
|
||||
#else
|
||||
// NOLINTBEGIN(readability-convert-member-functions-to-static)
|
||||
[[nodiscard]] bool
|
||||
isValid() const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// NOLINTEND(readability-convert-member-functions-to-static)
|
||||
#endif
|
||||
};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Real implementation (pimpl, compiled in SpanGuard.cpp)
|
||||
// ---------------------------------------------------------------------------
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
|
||||
/** RAII wrapper that activates a span on construction and ends it on
|
||||
destruction. All OTel types are hidden behind the Impl pointer.
|
||||
Non-copyable, move-constructible.
|
||||
*/
|
||||
class SpanGuard
|
||||
{
|
||||
struct Impl;
|
||||
std::unique_ptr<Impl> impl_;
|
||||
|
||||
explicit SpanGuard(std::unique_ptr<Impl> impl);
|
||||
|
||||
public:
|
||||
/** Construct a null (no-op) guard. All methods are safe to call. */
|
||||
SpanGuard();
|
||||
~SpanGuard();
|
||||
|
||||
SpanGuard(SpanGuard&& other) noexcept;
|
||||
SpanGuard&
|
||||
operator=(SpanGuard&&) = delete;
|
||||
SpanGuard(SpanGuard const&) = delete;
|
||||
SpanGuard&
|
||||
operator=(SpanGuard const&) = delete;
|
||||
|
||||
// --- Static factory methods ----------------------------------------
|
||||
|
||||
/** Create a span guarded by a TraceCategory flag.
|
||||
The span name is built as "prefix.name". Returns a null guard
|
||||
if the category is disabled in config.
|
||||
@param cat Trace subsystem category.
|
||||
@param prefix Span name prefix (e.g. "rpc.command").
|
||||
@param name Span name suffix (e.g. "submit").
|
||||
*/
|
||||
[[nodiscard]] static SpanGuard
|
||||
span(TraceCategory cat, std::string_view prefix, std::string_view name);
|
||||
|
||||
// --- Child / linked span creation ----------------------------------
|
||||
|
||||
/** Create a child span parented to this guard's active context.
|
||||
@param name Span name for the child.
|
||||
@return A new guard, or null if this guard is inactive.
|
||||
*/
|
||||
[[nodiscard]] SpanGuard
|
||||
childSpan(std::string_view name) const;
|
||||
|
||||
/** Create a child span parented to an explicit captured context.
|
||||
@param name Span name for the child.
|
||||
@param parentCtx Context captured via captureContext().
|
||||
@return A new guard, or null if parentCtx is invalid.
|
||||
*/
|
||||
[[nodiscard]] static SpanGuard
|
||||
childSpan(std::string_view name, SpanContext const& parentCtx);
|
||||
|
||||
/** Create a span linked (follows-from) to this guard's span.
|
||||
The new span is NOT a child — it starts a new sub-tree but
|
||||
carries a causal link to this span.
|
||||
@param name Span name for the linked span.
|
||||
@return A new guard, or null if this guard is inactive.
|
||||
*/
|
||||
[[nodiscard]] SpanGuard
|
||||
linkedSpan(std::string_view name) const;
|
||||
|
||||
/** Create a span linked to an explicit captured context.
|
||||
@param name Span name for the linked span.
|
||||
@param linkCtx Context to link from.
|
||||
@return A new guard, or null if linkCtx is invalid.
|
||||
*/
|
||||
[[nodiscard]] static SpanGuard
|
||||
linkedSpan(std::string_view name, SpanContext const& linkCtx);
|
||||
|
||||
// --- Hash-derived span (category-gated) -----------------------------
|
||||
|
||||
/** Create a span whose trace_id is derived from arbitrary hash data.
|
||||
trace_id = hashData[0:16], span_id = random. Gated by the given
|
||||
TraceCategory. All nodes using the same hash independently produce
|
||||
spans under the same trace_id, enabling cross-node correlation
|
||||
without context propagation.
|
||||
@param cat Trace subsystem category.
|
||||
@param name Full span name (e.g. "tx.receive").
|
||||
@param hashData Pointer to at least 16 bytes of hash data.
|
||||
@param hashSize Size of the hash buffer (must be >= 16).
|
||||
*/
|
||||
static SpanGuard
|
||||
hashSpan(
|
||||
TraceCategory cat,
|
||||
std::string_view name,
|
||||
std::uint8_t const* hashData,
|
||||
std::size_t hashSize);
|
||||
|
||||
/** Create a hash-derived span with a remote parent.
|
||||
trace_id = hashData[0:16], parent span_id from protobuf context
|
||||
propagation. Produces a child span of the sender's span while
|
||||
sharing the deterministic trace_id.
|
||||
@param cat Trace subsystem category.
|
||||
@param name Full span name.
|
||||
@param hashData Pointer to at least 16 bytes of hash data.
|
||||
@param hashSize Size of the hash buffer (must be >= 16).
|
||||
@param parentSpanId Pointer to 8 bytes of parent span ID.
|
||||
@param parentSpanSize Size of parent span ID buffer (must be 8).
|
||||
@param traceFlags Trace flags from remote context.
|
||||
*/
|
||||
static SpanGuard
|
||||
hashSpan(
|
||||
TraceCategory cat,
|
||||
std::string_view name,
|
||||
std::uint8_t const* hashData,
|
||||
std::size_t hashSize,
|
||||
std::uint8_t const* parentSpanId,
|
||||
std::size_t parentSpanSize,
|
||||
std::uint8_t traceFlags);
|
||||
|
||||
// --- Context capture -----------------------------------------------
|
||||
|
||||
/** Snapshot the current thread's OTel context for cross-thread use.
|
||||
@return An opaque SpanContext, or an invalid one if null guard.
|
||||
*/
|
||||
[[nodiscard]] SpanContext
|
||||
captureContext() const;
|
||||
|
||||
// --- Attribute setters (explicit overloads, no OTel types) ---------
|
||||
|
||||
/** Set a string attribute. No-op on a null guard. */
|
||||
void
|
||||
setAttribute(std::string_view key, std::string_view value);
|
||||
|
||||
/** Set a string attribute (C-string overload). No-op on a null guard. */
|
||||
void
|
||||
setAttribute(std::string_view key, char const* value);
|
||||
|
||||
/** Set an integer attribute. No-op on a null guard. */
|
||||
void
|
||||
setAttribute(std::string_view key, std::int64_t value);
|
||||
|
||||
/** Set a floating-point attribute. No-op on a null guard. */
|
||||
void
|
||||
setAttribute(std::string_view key, double value);
|
||||
|
||||
/** Set a boolean attribute. No-op on a null guard. */
|
||||
void
|
||||
setAttribute(std::string_view key, bool value);
|
||||
|
||||
// --- Status / events -----------------------------------------------
|
||||
|
||||
/** Mark the span status as OK. No-op on a null guard. */
|
||||
void
|
||||
setOk();
|
||||
|
||||
/** Mark the span status as error. No-op on a null guard.
|
||||
@param description Optional human-readable error description.
|
||||
*/
|
||||
void
|
||||
setError(std::string_view description = "");
|
||||
|
||||
/** Add a named event to the span's timeline. No-op on a null guard.
|
||||
@param name Event name.
|
||||
*/
|
||||
void
|
||||
addEvent(std::string_view name);
|
||||
|
||||
/** Add a named event with key-value attributes to the span's timeline.
|
||||
No-op on a null guard.
|
||||
@param name Event name.
|
||||
@param attrs Attribute pairs (all string_view for simplicity).
|
||||
*/
|
||||
void
|
||||
addEvent(std::string_view name, std::initializer_list<EventAttribute> attrs);
|
||||
|
||||
/** Record an exception as a span event following OTel semantic
|
||||
conventions, and mark the span status as error.
|
||||
No-op on a null guard.
|
||||
@param e The exception to record.
|
||||
*/
|
||||
void
|
||||
recordException(std::exception const& e);
|
||||
|
||||
/** Mark this span for discard and end it immediately.
|
||||
The FilteringSpanProcessor drops the span before it enters the
|
||||
batch export queue. After discard(), the guard is inert.
|
||||
*/
|
||||
void
|
||||
discard();
|
||||
|
||||
/** @return true if this guard holds an active span. */
|
||||
explicit
|
||||
operator bool() const;
|
||||
};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// No-op stub (all inline, zero overhead, no OTel dependency)
|
||||
// ---------------------------------------------------------------------------
|
||||
#else // XRPL_ENABLE_TELEMETRY not defined
|
||||
|
||||
class SpanGuard
|
||||
{
|
||||
public:
|
||||
SpanGuard() = default;
|
||||
~SpanGuard() = default;
|
||||
SpanGuard(SpanGuard&&) noexcept = default;
|
||||
SpanGuard&
|
||||
operator=(SpanGuard&&) = delete;
|
||||
SpanGuard(SpanGuard const&) = delete;
|
||||
SpanGuard&
|
||||
operator=(SpanGuard const&) = delete;
|
||||
|
||||
[[nodiscard]] static SpanGuard
|
||||
span(TraceCategory, std::string_view, std::string_view)
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
// NOLINTBEGIN(readability-convert-member-functions-to-static)
|
||||
[[nodiscard]] SpanGuard
|
||||
childSpan(std::string_view) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
[[nodiscard]] static SpanGuard
|
||||
childSpan(std::string_view, SpanContext const&)
|
||||
{
|
||||
return {};
|
||||
}
|
||||
[[nodiscard]] SpanGuard
|
||||
linkedSpan(std::string_view) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
[[nodiscard]] static SpanGuard
|
||||
linkedSpan(std::string_view, SpanContext const&)
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
[[nodiscard]] static SpanGuard
|
||||
hashSpan(TraceCategory, std::string_view, std::uint8_t const*, std::size_t)
|
||||
{
|
||||
return {};
|
||||
}
|
||||
[[nodiscard]] static SpanGuard
|
||||
hashSpan(
|
||||
TraceCategory,
|
||||
std::string_view,
|
||||
std::uint8_t const*,
|
||||
std::size_t,
|
||||
std::uint8_t const*,
|
||||
std::size_t,
|
||||
std::uint8_t)
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
[[nodiscard]] SpanContext
|
||||
captureContext() const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
// NOLINTEND(readability-convert-member-functions-to-static)
|
||||
|
||||
void
|
||||
setAttribute(std::string_view, std::string_view)
|
||||
{
|
||||
}
|
||||
void
|
||||
setAttribute(std::string_view, char const*)
|
||||
{
|
||||
}
|
||||
void
|
||||
setAttribute(std::string_view, std::int64_t)
|
||||
{
|
||||
}
|
||||
void
|
||||
setAttribute(std::string_view, double)
|
||||
{
|
||||
}
|
||||
void
|
||||
setAttribute(std::string_view, bool)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
setOk()
|
||||
{
|
||||
}
|
||||
void
|
||||
setError(std::string_view = "")
|
||||
{
|
||||
}
|
||||
void
|
||||
addEvent(std::string_view)
|
||||
{
|
||||
}
|
||||
void
|
||||
addEvent(std::string_view, std::initializer_list<EventAttribute>)
|
||||
{
|
||||
}
|
||||
void
|
||||
recordException(std::exception const&)
|
||||
{
|
||||
}
|
||||
void
|
||||
discard()
|
||||
{
|
||||
}
|
||||
|
||||
explicit
|
||||
operator bool() const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // XRPL_ENABLE_TELEMETRY
|
||||
|
||||
} // namespace xrpl::telemetry
|
||||
@@ -1,121 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
/** Compile-time string concatenation utility and shared telemetry constants.
|
||||
*
|
||||
* Provides StaticStr<N> — a compile-time string buffer that implicitly
|
||||
* converts to std::string_view — and join() for dot-separated concatenation.
|
||||
* Module-specific span names (e.g. RPC, consensus) live in their respective
|
||||
* modules and build upon these shared primitives.
|
||||
*
|
||||
* @note These constants are NOT guarded by XRPL_ENABLE_TELEMETRY because
|
||||
* call sites reference them even when SpanGuard methods are no-ops
|
||||
* (the no-op stubs still accept string_view parameters). The compiler
|
||||
* elides all inline constexpr values whose only uses are in dead code.
|
||||
*
|
||||
* @note Json::StaticString (jss.h) is a pointer wrapper without
|
||||
* concatenation support. boost::static_string is not constexpr.
|
||||
* StaticStr<N> exists specifically for compile-time dot-join composition.
|
||||
*
|
||||
* Naming conventions follow OpenTelemetry semantic conventions:
|
||||
* - Attribute keys: "xrpl.<subsystem>.<field>"
|
||||
* - Span prefixes: "<subsystem>[.<component>]"
|
||||
*/
|
||||
|
||||
#include <cstddef>
|
||||
#include <string_view>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
// ===== Compile-time string utility =========================================
|
||||
|
||||
/// Fixed-size character buffer for compile-time string operations.
|
||||
/// Implicitly converts to std::string_view at zero cost.
|
||||
template <std::size_t N>
|
||||
struct StaticStr
|
||||
{
|
||||
char data[N + 1]{};
|
||||
static constexpr std::size_t size = N;
|
||||
|
||||
constexpr StaticStr() = default;
|
||||
|
||||
constexpr explicit StaticStr(char const (&str)[N + 1])
|
||||
{
|
||||
for (std::size_t i = 0; i <= N; ++i)
|
||||
data[i] = str[i];
|
||||
}
|
||||
|
||||
constexpr
|
||||
operator std::string_view() const noexcept
|
||||
{
|
||||
return {data, N};
|
||||
}
|
||||
};
|
||||
|
||||
/// Deduction guide: StaticStr from string literal.
|
||||
template <std::size_t N>
|
||||
StaticStr(char const (&)[N]) -> StaticStr<N - 1>;
|
||||
|
||||
/// Create a StaticStr from a string literal.
|
||||
template <std::size_t N>
|
||||
constexpr auto
|
||||
makeStr(char const (&str)[N])
|
||||
{
|
||||
return StaticStr<N - 1>(str);
|
||||
}
|
||||
|
||||
/// Concatenate two StaticStr values with a dot separator.
|
||||
template <std::size_t A, std::size_t B>
|
||||
constexpr auto
|
||||
join(StaticStr<A> const& lhs, StaticStr<B> const& rhs)
|
||||
{
|
||||
constexpr std::size_t len = A + 1 + B; // lhs + '.' + rhs
|
||||
StaticStr<len> result;
|
||||
std::size_t pos = 0;
|
||||
for (std::size_t i = 0; i < A; ++i)
|
||||
result.data[pos++] = lhs.data[i];
|
||||
result.data[pos++] = '.';
|
||||
for (std::size_t i = 0; i < B; ++i)
|
||||
result.data[pos++] = rhs.data[i];
|
||||
result.data[pos] = '\0';
|
||||
return result;
|
||||
}
|
||||
|
||||
// ===== Shared root segments ================================================
|
||||
|
||||
namespace seg {
|
||||
inline constexpr auto xrpl = makeStr("xrpl");
|
||||
inline constexpr auto rpc = makeStr("rpc");
|
||||
inline constexpr auto tx = makeStr("tx");
|
||||
inline constexpr auto consensus = makeStr("consensus");
|
||||
inline constexpr auto peer = makeStr("peer");
|
||||
inline constexpr auto ledger = makeStr("ledger");
|
||||
inline constexpr auto network = makeStr("network");
|
||||
inline constexpr auto link = makeStr("link");
|
||||
} // namespace seg
|
||||
|
||||
// ===== Shared attribute keys (used across modules) =========================
|
||||
|
||||
namespace attr {
|
||||
inline constexpr auto networkId = join(join(seg::xrpl, seg::network), makeStr("id"));
|
||||
inline constexpr auto networkType = join(join(seg::xrpl, seg::network), makeStr("type"));
|
||||
inline constexpr auto linkType = join(join(seg::xrpl, seg::link), makeStr("type"));
|
||||
|
||||
/// Node health attributes (cross-cutting, used by RPC/consensus/tx spans).
|
||||
inline constexpr auto xrplNode = join(seg::xrpl, makeStr("node"));
|
||||
/// "xrpl.node.amendment_blocked"
|
||||
inline constexpr auto nodeAmendmentBlocked = join(xrplNode, makeStr("amendment_blocked"));
|
||||
/// "xrpl.node.server_state"
|
||||
inline constexpr auto nodeServerState = join(xrplNode, makeStr("server_state"));
|
||||
} // namespace attr
|
||||
|
||||
// ===== Shared attribute values =============================================
|
||||
|
||||
namespace attr_val {
|
||||
inline constexpr auto success = makeStr("success");
|
||||
inline constexpr auto error = makeStr("error");
|
||||
inline constexpr auto followsFrom = makeStr("follows_from");
|
||||
} // namespace attr_val
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
@@ -1,333 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
/** Abstract interface for OpenTelemetry distributed tracing.
|
||||
|
||||
Provides the Telemetry base class that all components use to create trace
|
||||
spans. Two concrete implementations exist, selected at construction time
|
||||
by make_Telemetry():
|
||||
|
||||
- TelemetryImpl (Telemetry.cpp): real OTel SDK integration, compiled
|
||||
only when XRPL_ENABLE_TELEMETRY is defined and enabled at runtime.
|
||||
- NullTelemetry (NullTelemetry.cpp): no-op stub used when telemetry is
|
||||
disabled at compile time or runtime.
|
||||
|
||||
Inheritance / dependency diagram:
|
||||
|
||||
+--------------------+
|
||||
| Telemetry | (abstract, this file)
|
||||
| <<interface>> |
|
||||
+---------+----------+
|
||||
|
|
||||
+---------+-----------+-------------------+
|
||||
| | |
|
||||
+---+------------+ +-----+---------+ +------+----------+
|
||||
| TelemetryImpl | | NullTelemetry | | NullTelemetryOtel|
|
||||
| (Telemetry.cpp)| |(NullTelemetry | | (Telemetry.cpp) |
|
||||
| OTel SDK | | .cpp) | | noop w/ OTel API |
|
||||
+----------------+ +---------------+ +------------------+
|
||||
|
||||
The Setup struct holds all configuration parsed from the [telemetry]
|
||||
section of xrpld.cfg. See TelemetryConfig.cpp for the parser and
|
||||
cfg/xrpld-example.cfg for the available options.
|
||||
|
||||
OTel SDK headers are conditionally included behind XRPL_ENABLE_TELEMETRY
|
||||
so that builds without telemetry have zero dependency on opentelemetry-cpp.
|
||||
|
||||
Usage examples:
|
||||
|
||||
1. Check before tracing (typical guard pattern):
|
||||
@code
|
||||
auto& telemetry = registry.getTelemetry();
|
||||
if (telemetry.isEnabled() && telemetry.shouldTraceRpc())
|
||||
{
|
||||
auto span = telemetry.startSpan("rpc.command.server_info");
|
||||
// ... do work, span ends when shared_ptr refcount drops to 0
|
||||
}
|
||||
@endcode
|
||||
|
||||
2. RAII tracing with SpanGuard (preferred):
|
||||
@code
|
||||
if (telemetry.isEnabled() && telemetry.shouldTraceRpc())
|
||||
{
|
||||
SpanGuard guard(telemetry.startSpan("rpc.command.submit"));
|
||||
guard.setAttribute("xrpl.rpc.command", "submit");
|
||||
// ... guard ends span automatically on scope exit
|
||||
}
|
||||
@endcode
|
||||
|
||||
3. Cross-thread context propagation:
|
||||
@code
|
||||
// On thread A: capture context
|
||||
auto ctx = guard.context();
|
||||
// On thread B: create child span with explicit parent
|
||||
auto child = telemetry.startSpan("async.work", ctx);
|
||||
@endcode
|
||||
|
||||
@note Thread safety: The Telemetry interface is safe for concurrent reads
|
||||
(isEnabled, shouldTrace*, getTracer, startSpan) after start() completes.
|
||||
setServiceInstanceId() must be called before start() and is not thread-safe.
|
||||
The OTel SDK's TracerProvider and Tracer are internally thread-safe.
|
||||
*/
|
||||
|
||||
#include <xrpl/basics/BasicConfig.h>
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
#include <opentelemetry/context/context.h>
|
||||
#include <opentelemetry/nostd/shared_ptr.h>
|
||||
#include <opentelemetry/trace/span.h>
|
||||
#include <opentelemetry/trace/tracer.h>
|
||||
#endif
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
class Telemetry
|
||||
{
|
||||
/** Global singleton pointer, set by start()/stop() in the active
|
||||
implementation. Allows SpanGuard factory methods to access the
|
||||
Telemetry instance without callers passing it explicitly.
|
||||
|
||||
Atomic with acquire/release ordering: start()/stop() store on
|
||||
the initialization thread, factory methods load on worker threads.
|
||||
@see setInstance(), getInstance()
|
||||
*/
|
||||
inline static std::atomic<Telemetry*> instance_{nullptr};
|
||||
|
||||
public:
|
||||
/** Get the global Telemetry instance.
|
||||
@return Pointer to the active instance, or nullptr if not started.
|
||||
*/
|
||||
static Telemetry*
|
||||
getInstance()
|
||||
{
|
||||
return instance_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
/** Set the global Telemetry instance.
|
||||
Called by start()/stop() in concrete implementations.
|
||||
Tests can call this with a mock to override the global instance.
|
||||
@param t Pointer to the Telemetry instance, or nullptr to clear.
|
||||
*/
|
||||
static void
|
||||
setInstance(Telemetry* t)
|
||||
{
|
||||
instance_.store(t, std::memory_order_release);
|
||||
}
|
||||
|
||||
/** Configuration parsed from the [telemetry] section of xrpld.cfg.
|
||||
|
||||
All fields have sensible defaults so the section can be minimal
|
||||
or omitted entirely. See TelemetryConfig.cpp for the parser.
|
||||
*/
|
||||
struct Setup
|
||||
{
|
||||
/** Master switch: true to enable tracing at runtime. */
|
||||
bool enabled = false;
|
||||
|
||||
/** OTel resource attribute `service.name`. */
|
||||
std::string serviceName = "xrpld";
|
||||
|
||||
/** OTel resource attribute `service.version` (set from BuildInfo). */
|
||||
std::string serviceVersion;
|
||||
|
||||
/** OTel resource attribute `service.instance.id` (defaults to node
|
||||
public key). */
|
||||
std::string serviceInstanceId;
|
||||
|
||||
/** OTLP/HTTP endpoint URL where spans are sent. */
|
||||
std::string exporterEndpoint = "http://localhost:4318/v1/traces";
|
||||
|
||||
/** Whether to use TLS for the exporter connection. */
|
||||
bool useTls = false;
|
||||
|
||||
/** Path to a CA certificate bundle for TLS verification. */
|
||||
std::string tlsCertPath;
|
||||
|
||||
/** Head-based sampling ratio in [0.0, 1.0]. 1.0 = trace everything.
|
||||
This is a head-based (pre-decision) sampler using
|
||||
TraceIdRatioBasedSampler — the decision to record or drop a
|
||||
trace is made before the root span starts. For post-hoc
|
||||
(tail-based) filtering, see SpanGuard::discard().
|
||||
*/
|
||||
double samplingRatio = 1.0;
|
||||
|
||||
/** Maximum number of spans per batch export. */
|
||||
std::uint32_t batchSize = 512;
|
||||
|
||||
/** Delay between batch exports. */
|
||||
std::chrono::milliseconds batchDelay{5000};
|
||||
|
||||
/** Maximum number of spans queued before dropping. */
|
||||
std::uint32_t maxQueueSize = 2048;
|
||||
|
||||
/** Network identifier, added as an OTel resource attribute. */
|
||||
std::uint32_t networkId = 0;
|
||||
|
||||
/** Network type label (e.g. "mainnet", "testnet", "devnet"). */
|
||||
std::string networkType = "mainnet";
|
||||
|
||||
/** Enable tracing for transaction processing. */
|
||||
bool traceTransactions = true;
|
||||
|
||||
/** Enable tracing for consensus rounds. */
|
||||
bool traceConsensus = true;
|
||||
|
||||
/** Enable tracing for RPC request handling. */
|
||||
bool traceRpc = true;
|
||||
|
||||
/** Enable tracing for peer-to-peer messages (disabled by default
|
||||
due to high volume). */
|
||||
bool tracePeer = false;
|
||||
|
||||
/** Enable tracing for ledger close/accept. */
|
||||
bool traceLedger = true;
|
||||
|
||||
/** Strategy for cross-node consensus trace correlation.
|
||||
"deterministic" — derive trace_id from ledger hash so all
|
||||
validators in the same round share the same trace_id.
|
||||
"attribute" — random trace_id, correlate via ledger_id attribute.
|
||||
*/
|
||||
std::string consensusTraceStrategy = "deterministic";
|
||||
};
|
||||
|
||||
virtual ~Telemetry() = default;
|
||||
|
||||
/** Update the service instance ID (OTel resource attribute
|
||||
`service.instance.id`).
|
||||
|
||||
Must be called before start(). The node public key is not available
|
||||
when Telemetry is constructed (during the ApplicationImp member
|
||||
initializer list), so this setter allows Application::setup() to
|
||||
inject the identity once nodeIdentity_ is known.
|
||||
|
||||
@param id The node's base58-encoded public key or custom identifier.
|
||||
*/
|
||||
virtual void
|
||||
setServiceInstanceId(std::string const& id)
|
||||
{
|
||||
// Default no-op for NullTelemetry implementations.
|
||||
(void)id;
|
||||
}
|
||||
|
||||
/** Initialize the tracing pipeline (exporter, processor, provider).
|
||||
Call after construction.
|
||||
*/
|
||||
virtual void
|
||||
start() = 0;
|
||||
|
||||
/** Flush pending spans and shut down the tracing pipeline.
|
||||
Call before destruction.
|
||||
*/
|
||||
virtual void
|
||||
stop() = 0;
|
||||
|
||||
/** @return true if this instance is actively exporting spans. */
|
||||
virtual bool
|
||||
isEnabled() const = 0;
|
||||
|
||||
/** @return true if transaction processing should be traced. */
|
||||
virtual bool
|
||||
shouldTraceTransactions() const = 0;
|
||||
|
||||
/** @return true if consensus rounds should be traced. */
|
||||
virtual bool
|
||||
shouldTraceConsensus() const = 0;
|
||||
|
||||
/** @return true if RPC request handling should be traced. */
|
||||
virtual bool
|
||||
shouldTraceRpc() const = 0;
|
||||
|
||||
/** @return true if peer-to-peer messages should be traced. */
|
||||
virtual bool
|
||||
shouldTracePeer() const = 0;
|
||||
|
||||
/** @return true if ledger close/accept should be traced. */
|
||||
virtual bool
|
||||
shouldTraceLedger() const = 0;
|
||||
|
||||
/** @return The configured consensus trace correlation strategy. */
|
||||
virtual std::string const&
|
||||
getConsensusTraceStrategy() const = 0;
|
||||
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
/** Get or create a named tracer instance.
|
||||
|
||||
@param name Tracer name used to identify the instrumentation library.
|
||||
@return A shared pointer to the Tracer.
|
||||
*/
|
||||
virtual opentelemetry::nostd::shared_ptr<opentelemetry::trace::Tracer>
|
||||
getTracer(std::string_view name = "xrpld") = 0;
|
||||
|
||||
/** Start a new span on the current thread's context.
|
||||
|
||||
The span becomes a child of the current active span (if any) via
|
||||
OpenTelemetry's context propagation.
|
||||
|
||||
@param name Span name (typically "rpc.command.<cmd>").
|
||||
@param kind The span kind (defaults to kInternal). Possible values:
|
||||
- kInternal: default, in-process operation
|
||||
- kServer: incoming synchronous request (e.g. RPC)
|
||||
- kClient: outgoing synchronous request
|
||||
- kProducer: async message send (e.g. peer broadcast)
|
||||
- kConsumer: async message receive
|
||||
@return A shared pointer to the new Span.
|
||||
*/
|
||||
virtual opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span>
|
||||
startSpan(
|
||||
std::string_view name,
|
||||
opentelemetry::trace::SpanKind kind = opentelemetry::trace::SpanKind::kInternal) = 0;
|
||||
|
||||
/** Start a new span with an explicit parent context.
|
||||
|
||||
Use this overload when the parent span is not on the current
|
||||
thread's context stack (e.g. cross-thread trace propagation).
|
||||
|
||||
@param name Span name.
|
||||
@param parentContext The parent span's context.
|
||||
@param kind The span kind (defaults to kInternal).
|
||||
@return A shared pointer to the new Span.
|
||||
*/
|
||||
virtual opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span>
|
||||
startSpan(
|
||||
std::string_view name,
|
||||
opentelemetry::context::Context const& parentContext,
|
||||
opentelemetry::trace::SpanKind kind = opentelemetry::trace::SpanKind::kInternal) = 0;
|
||||
#endif
|
||||
};
|
||||
|
||||
/** Create a Telemetry instance.
|
||||
|
||||
Returns a TelemetryImpl when setup.enabled is true, or a
|
||||
NullTelemetry no-op stub otherwise.
|
||||
|
||||
@param setup Configuration from the [telemetry] config section.
|
||||
@param journal Journal for log output during initialization.
|
||||
*/
|
||||
std::unique_ptr<Telemetry>
|
||||
make_Telemetry(Telemetry::Setup const& setup, beast::Journal journal);
|
||||
|
||||
/** Parse the [telemetry] config section into a Setup struct.
|
||||
|
||||
@param section The [telemetry] config section.
|
||||
@param nodePublicKey Node public key, used as default instance ID.
|
||||
@param version Build version string.
|
||||
@param networkId Network identifier from [network_id] config
|
||||
(0 = mainnet, 1 = testnet, 2 = devnet).
|
||||
@return A populated Setup struct with defaults for missing values.
|
||||
*/
|
||||
Telemetry::Setup
|
||||
setup_Telemetry(
|
||||
Section const& section,
|
||||
std::string const& nodePublicKey,
|
||||
std::string const& version,
|
||||
std::uint32_t networkId);
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
@@ -1,100 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
/** Utilities for trace context propagation across nodes.
|
||||
|
||||
Provides serialization/deserialization of OTel trace context to/from
|
||||
Protocol Buffer TraceContext messages (P2P cross-node propagation).
|
||||
|
||||
Only compiled when XRPL_ENABLE_TELEMETRY is defined.
|
||||
|
||||
TODO: These utilities are not yet wired into the P2P message flow.
|
||||
To enable cross-node distributed traces, call injectToProtobuf() in
|
||||
PeerImp when sending TMTransaction/TMProposeSet messages, and call
|
||||
extractFromProtobuf() in the corresponding message handlers to
|
||||
reconstruct the parent span context before starting a child span.
|
||||
This was deferred to validate single-node tracing performance first.
|
||||
*/
|
||||
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
|
||||
#include <xrpl/proto/xrpl.pb.h>
|
||||
|
||||
#include <opentelemetry/context/context.h>
|
||||
#include <opentelemetry/trace/context.h>
|
||||
#include <opentelemetry/trace/default_span.h>
|
||||
#include <opentelemetry/trace/span_context.h>
|
||||
#include <opentelemetry/trace/trace_flags.h>
|
||||
#include <opentelemetry/trace/trace_id.h>
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
/** Extract OTel context from a protobuf TraceContext message.
|
||||
|
||||
@param proto The protobuf TraceContext received from a peer.
|
||||
@return An OTel Context with the extracted parent span, or an empty
|
||||
context if the protobuf fields are missing or invalid.
|
||||
*/
|
||||
inline opentelemetry::context::Context
|
||||
extractFromProtobuf(protocol::TraceContext const& proto)
|
||||
{
|
||||
namespace trace = opentelemetry::trace;
|
||||
|
||||
if (!proto.has_trace_id() || proto.trace_id().size() != 16 || !proto.has_span_id() ||
|
||||
proto.span_id().size() != 8)
|
||||
{
|
||||
return opentelemetry::context::Context{};
|
||||
}
|
||||
|
||||
auto const* rawTraceId = reinterpret_cast<std::uint8_t const*>(proto.trace_id().data());
|
||||
auto const* rawSpanId = reinterpret_cast<std::uint8_t const*>(proto.span_id().data());
|
||||
trace::TraceId const traceId(
|
||||
opentelemetry::nostd::span<std::uint8_t const, 16>(rawTraceId, 16));
|
||||
trace::SpanId const spanId(opentelemetry::nostd::span<std::uint8_t const, 8>(rawSpanId, 8));
|
||||
trace::TraceFlags const flags(
|
||||
proto.has_trace_flags() ? static_cast<std::uint8_t>(proto.trace_flags())
|
||||
: static_cast<std::uint8_t>(0));
|
||||
|
||||
trace::SpanContext const spanCtx(traceId, spanId, flags, /* remote = */ true);
|
||||
|
||||
return opentelemetry::context::Context{}.SetValue(
|
||||
trace::kSpanKey,
|
||||
opentelemetry::nostd::shared_ptr<trace::Span>(new trace::DefaultSpan(spanCtx)));
|
||||
}
|
||||
|
||||
/** Inject the current span's trace context into a protobuf TraceContext.
|
||||
|
||||
@param ctx The OTel context containing the span to propagate.
|
||||
@param proto The protobuf TraceContext to populate.
|
||||
*/
|
||||
inline void
|
||||
injectToProtobuf(opentelemetry::context::Context const& ctx, protocol::TraceContext& proto)
|
||||
{
|
||||
namespace trace = opentelemetry::trace;
|
||||
|
||||
auto const span = trace::GetSpan(ctx);
|
||||
if (!span)
|
||||
return;
|
||||
|
||||
auto const& spanCtx = span->GetContext();
|
||||
if (!spanCtx.IsValid())
|
||||
return;
|
||||
|
||||
// Serialize trace_id (16 bytes)
|
||||
auto const& traceId = spanCtx.trace_id();
|
||||
proto.set_trace_id(traceId.Id().data(), trace::TraceId::kSize);
|
||||
|
||||
// Serialize span_id (8 bytes)
|
||||
auto const& spanId = spanCtx.span_id();
|
||||
proto.set_span_id(spanId.Id().data(), trace::SpanId::kSize);
|
||||
|
||||
// Serialize flags
|
||||
proto.set_trace_flags(spanCtx.trace_flags().flags());
|
||||
}
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
|
||||
#endif // XRPL_ENABLE_TELEMETRY
|
||||
@@ -11,6 +11,7 @@ float-cast-overflow:external
|
||||
float-divide-by-zero:external
|
||||
function:external
|
||||
implicit-integer-sign-change:external
|
||||
implicit-signed-integer-truncation::external
|
||||
implicit-signed-integer-truncation:external
|
||||
implicit-unsigned-integer-truncation:external
|
||||
integer-divide-by-zero:external
|
||||
@@ -70,15 +71,145 @@ vla-bound:boost
|
||||
vptr_check:boost
|
||||
vptr:boost
|
||||
|
||||
# Google protobuf - intentional overflows in hash functions
|
||||
# Google protobuf
|
||||
undefined:protobuf
|
||||
|
||||
# Suppress UBSan errors in xrpld code by source file path
|
||||
undefined:src/libxrpl/basics/base64.cpp
|
||||
undefined:src/libxrpl/basics/Number.cpp
|
||||
undefined:src/libxrpl/beast/utility/beast_Journal.cpp
|
||||
undefined:src/libxrpl/crypto/RFC1751.cpp
|
||||
undefined:src/libxrpl/ledger/ApplyView.cpp
|
||||
undefined:src/libxrpl/ledger/View.cpp
|
||||
undefined:src/libxrpl/protocol/Permissions.cpp
|
||||
undefined:src/libxrpl/protocol/STAmount.cpp
|
||||
undefined:src/libxrpl/protocol/STPathSet.cpp
|
||||
undefined:src/libxrpl/protocol/tokens.cpp
|
||||
undefined:src/libxrpl/shamap/SHAMap.cpp
|
||||
undefined:src/test/app/Batch_test.cpp
|
||||
undefined:src/test/app/Invariants_test.cpp
|
||||
undefined:src/test/app/NFToken_test.cpp
|
||||
undefined:src/test/app/Offer_test.cpp
|
||||
undefined:src/test/app/Path_test.cpp
|
||||
undefined:src/test/basics/XRPAmount_test.cpp
|
||||
undefined:src/test/beast/LexicalCast_test.cpp
|
||||
undefined:src/test/jtx/impl/acctdelete.cpp
|
||||
undefined:src/test/ledger/SkipList_test.cpp
|
||||
undefined:src/test/rpc/Subscribe_test.cpp
|
||||
undefined:src/tests/libxrpl/basics/RangeSet.cpp
|
||||
undefined:src/xrpld/app/main/BasicApp.cpp
|
||||
undefined:src/xrpld/app/main/BasicApp.cpp
|
||||
undefined:src/xrpld/app/misc/detail/AmendmentTable.cpp
|
||||
undefined:src/xrpld/app/misc/NetworkOPs.cpp
|
||||
undefined:src/libxrpl/json/json_value.cpp
|
||||
undefined:src/xrpld/app/paths/detail/StrandFlow.h
|
||||
undefined:src/xrpld/app/tx/detail/NFTokenMint.cpp
|
||||
undefined:src/xrpld/app/tx/detail/OracleSet.cpp
|
||||
undefined:src/xrpld/core/detail/JobQueue.cpp
|
||||
undefined:src/xrpld/core/detail/Workers.cpp
|
||||
undefined:src/xrpld/rpc/detail/Role.cpp
|
||||
undefined:src/xrpld/rpc/handlers/GetAggregatePrice.cpp
|
||||
undefined:xrpl/basics/base_uint.h
|
||||
undefined:xrpl/basics/DecayingSample.h
|
||||
undefined:xrpl/beast/test/yield_to.h
|
||||
undefined:xrpl/beast/xor_shift_engine.h
|
||||
undefined:xrpl/nodestore/detail/varint.h
|
||||
undefined:xrpl/peerfinder/detail/Counts.h
|
||||
undefined:xrpl/protocol/nft.h
|
||||
|
||||
# basic_string.h:483:51: runtime error: unsigned integer overflow
|
||||
unsigned-integer-overflow:basic_string.h
|
||||
unsigned-integer-overflow:bits/chrono.h
|
||||
unsigned-integer-overflow:bits/random.h
|
||||
unsigned-integer-overflow:bits/random.tcc
|
||||
unsigned-integer-overflow:bits/stl_algobase.h
|
||||
unsigned-integer-overflow:bits/uniform_int_dist.h
|
||||
unsigned-integer-overflow:string_view
|
||||
|
||||
# runtime error: unsigned integer overflow: 0 - 1 cannot be represented in type 'std::size_t' (aka 'unsigned long')
|
||||
unsigned-integer-overflow:src/libxrpl/basics/base64.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/basics/Number.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/crypto/RFC1751.cpp
|
||||
unsigned-integer-overflow:rc/libxrpl/json/json_value.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/ledger/ApplyView.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/ledger/View.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/protocol/Permissions.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/protocol/STAmount.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/protocol/STPathSet.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/protocol/tokens.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/shamap/SHAMap.cpp
|
||||
unsigned-integer-overflow:src/test/app/Batch_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/Invariants_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/NFToken_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/Offer_test.cpp
|
||||
unsigned-integer-overflow:src/test/app/Path_test.cpp
|
||||
unsigned-integer-overflow:src/test/basics/XRPAmount_test.cpp
|
||||
unsigned-integer-overflow:src/test/beast/LexicalCast_test.cpp
|
||||
unsigned-integer-overflow:src/test/jtx/impl/acctdelete.cpp
|
||||
unsigned-integer-overflow:src/test/ledger/SkipList_test.cpp
|
||||
unsigned-integer-overflow:src/test/rpc/Subscribe_test.cpp
|
||||
unsigned-integer-overflow:src/tests/libxrpl/basics/RangeSet.cpp
|
||||
unsigned-integer-overflow:src/xrpld/app/main/BasicApp.cpp
|
||||
unsigned-integer-overflow:src/xrpld/app/misc/detail/AmendmentTable.cpp
|
||||
unsigned-integer-overflow:src/xrpld/app/misc/NetworkOPs.cpp
|
||||
unsigned-integer-overflow:src/xrpld/app/paths/detail/StrandFlow.h
|
||||
unsigned-integer-overflow:src/xrpld/app/tx/detail/NFTokenMint.cpp
|
||||
unsigned-integer-overflow:src/xrpld/app/tx/detail/OracleSet.cpp
|
||||
unsigned-integer-overflow:src/xrpld/rpc/detail/Role.cpp
|
||||
unsigned-integer-overflow:src/xrpld/rpc/handlers/GetAggregatePrice.cpp
|
||||
unsigned-integer-overflow:xrpl/basics/base_uint.h
|
||||
unsigned-integer-overflow:xrpl/basics/DecayingSample.h
|
||||
unsigned-integer-overflow:xrpl/beast/test/yield_to.h
|
||||
unsigned-integer-overflow:xrpl/beast/xor_shift_engine.h
|
||||
unsigned-integer-overflow:xrpl/nodestore/detail/varint.h
|
||||
unsigned-integer-overflow:xrpl/peerfinder/detail/Counts.h
|
||||
unsigned-integer-overflow:xrpl/protocol/nft.h
|
||||
|
||||
# Xrpld intentional overflows and operations
|
||||
# STAmount uses intentional negation of INT64_MIN and overflow in arithmetic
|
||||
signed-integer-overflow:src/libxrpl/protocol/STAmount.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/protocol/STAmount.cpp
|
||||
|
||||
# XRPAmount test intentional overflows
|
||||
signed-integer-overflow:src/test/basics/XRPAmount_test.cpp
|
||||
|
||||
# Peerfinder intentional overflow in counter arithmetic
|
||||
unsigned-integer-overflow:src/xrpld/peerfinder/detail/Counts.h
|
||||
|
||||
# Signed integer overflow suppressions
|
||||
signed-integer-overflow:src/test/beast/LexicalCast_test.cpp
|
||||
|
||||
# External library suppressions
|
||||
unsigned-integer-overflow:nudb/detail/xxhash.hpp
|
||||
|
||||
# Loan_test.cpp intentional underflow in test arithmetic
|
||||
unsigned-integer-overflow:src/test/app/Loan_test.cpp
|
||||
undefined:src/test/app/Loan_test.cpp
|
||||
|
||||
# Source tree restructured paths (libxrpl/tx/transactors/)
|
||||
# These duplicate the xrpld/app/tx/detail entries above for the new layout
|
||||
unsigned-integer-overflow:src/libxrpl/tx/transactors/oracle/OracleSet.cpp
|
||||
undefined:src/libxrpl/tx/transactors/oracle/OracleSet.cpp
|
||||
unsigned-integer-overflow:src/libxrpl/tx/transactors/nft/NFTokenMint.cpp
|
||||
undefined:src/libxrpl/tx/transactors/nft/NFTokenMint.cpp
|
||||
|
||||
# Protobuf intentional overflows in hash functions
|
||||
# Protobuf uses intentional unsigned overflow for hash computation (stringpiece.h:393)
|
||||
unsigned-integer-overflow:google/protobuf/stubs/stringpiece.h
|
||||
|
||||
# gRPC intentional overflows in timer calculations
|
||||
# gRPC intentional overflows
|
||||
# gRPC uses intentional overflow in timer calculations
|
||||
unsigned-integer-overflow:grpc
|
||||
unsigned-integer-overflow:timer_manager.cc
|
||||
|
||||
# RocksDB intentional unsigned integer overflows in hash functions and CRC calculations
|
||||
# Standard library intentional overflows
|
||||
# These are intentional overflows in random number generation and character conversion
|
||||
unsigned-integer-overflow:__random/seed_seq.h
|
||||
unsigned-integer-overflow:__charconv/traits.h
|
||||
|
||||
|
||||
# Suppress errors in RocksDB
|
||||
# RocksDB uses intentional unsigned integer overflows in hash functions and CRC calculations
|
||||
unsigned-integer-overflow:rocks*/*/util/xxhash.h
|
||||
unsigned-integer-overflow:rocks*/*/util/xxph3.h
|
||||
unsigned-integer-overflow:rocks*/*/util/hash.cc
|
||||
@@ -90,14 +221,13 @@ unsigned-integer-overflow:rocks*/*/table/format.cc
|
||||
unsigned-integer-overflow:rocks*/*/table/block_based/block_based_table_builder.cc
|
||||
unsigned-integer-overflow:rocks*/*/table/block_based/reader_common.cc
|
||||
unsigned-integer-overflow:rocks*/*/db/version_set.cc
|
||||
|
||||
# RocksDB misaligned loads (intentional for performance on ARM64)
|
||||
alignment:rocks*/*/util/crc32c_arm64.cc
|
||||
undefined:rocks*/*/util/crc32c_arm64.cc
|
||||
undefined:rocks*/*/util/xxhash.h
|
||||
|
||||
# nudb intentional overflows in hash functions
|
||||
unsigned-integer-overflow:nudb/detail/xxhash.hpp
|
||||
alignment:nudb/detail/xxhash.hpp
|
||||
undefined:nudb
|
||||
|
||||
# Snappy compression library intentional overflows
|
||||
unsigned-integer-overflow:snappy.cc
|
||||
@@ -109,40 +239,10 @@ unsigned-integer-overflow:absl/base/internal/low_level_alloc.cc
|
||||
unsigned-integer-overflow:absl/hash/internal/hash.h
|
||||
unsigned-integer-overflow:absl/container/internal/raw_hash_set.h
|
||||
|
||||
# Standard library intentional overflows
|
||||
unsigned-integer-overflow:basic_string.h
|
||||
unsigned-integer-overflow:bits/chrono.h
|
||||
unsigned-integer-overflow:bits/random.h
|
||||
unsigned-integer-overflow:bits/random.tcc
|
||||
unsigned-integer-overflow:bits/stl_algobase.h
|
||||
unsigned-integer-overflow:bits/uniform_int_dist.h
|
||||
unsigned-integer-overflow:string_view
|
||||
unsigned-integer-overflow:__random/seed_seq.h
|
||||
unsigned-integer-overflow:__charconv/traits.h
|
||||
# Standard library intentional overflows in chrono duration arithmetic
|
||||
unsigned-integer-overflow:__chrono/duration.h
|
||||
|
||||
# =============================================================================
|
||||
# Rippled code suppressions
|
||||
# =============================================================================
|
||||
|
||||
# Signed integer negation (-value) in amount types.
|
||||
# INT64_MIN cannot occur in practice due to domain invariants (mantissa ranges
|
||||
# are well within int64_t bounds), but UBSan flags the pattern as potential
|
||||
# signed overflow. Narrowed to operator- to avoid suppressing unrelated
|
||||
# overflows anywhere in a stack trace containing these type names.
|
||||
signed-integer-overflow:operator-*IOUAmount*
|
||||
signed-integer-overflow:operator-*XRPAmount*
|
||||
signed-integer-overflow:operator-*MPTAmount*
|
||||
signed-integer-overflow:operator-*STAmount*
|
||||
|
||||
# STAmount::operator+ signed addition — operands are bounded by total supply
|
||||
# (~10^17 for XRP, ~10^18 for MPT) so overflow cannot occur in practice.
|
||||
signed-integer-overflow:operator+*STAmount*
|
||||
|
||||
# STAmount::getRate uses unsigned shift and addition
|
||||
unsigned-integer-overflow:*STAmount*getRate*
|
||||
# STAmount::serialize uses unsigned bitwise operations
|
||||
unsigned-integer-overflow:*STAmount*serialize*
|
||||
|
||||
# nft::cipheredTaxon uses intentional uint32 wraparound (LCG permutation)
|
||||
unsigned-integer-overflow:cipheredTaxon
|
||||
# Suppress undefined errors in RocksDB and nudb
|
||||
undefined:rocks.*/*/util/crc32c_arm64.cc
|
||||
undefined:rocks.*/*/util/xxhash.h
|
||||
undefined:nudb
|
||||
|
||||
@@ -2,20 +2,18 @@
|
||||
|
||||
#include <xrpl/basics/contract.h>
|
||||
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
|
||||
#include <archive.h>
|
||||
#include <archive_entry.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <filesystem>
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
void
|
||||
extractTarLz4(boost::filesystem::path const& src, boost::filesystem::path const& dst)
|
||||
extractTarLz4(std::filesystem::path const& src, std::filesystem::path const& dst)
|
||||
{
|
||||
if (!is_regular_file(src))
|
||||
Throw<std::runtime_error>("Invalid source file");
|
||||
|
||||
@@ -1,29 +1,24 @@
|
||||
#include <xrpl/basics/FileUtilities.h>
|
||||
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
#include <boost/system/detail/errc.hpp>
|
||||
#include <boost/system/detail/error_code.hpp>
|
||||
#include <boost/system/errc.hpp>
|
||||
|
||||
#include <cerrno>
|
||||
#include <cstddef>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <ios>
|
||||
#include <iterator>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <system_error>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
std::string
|
||||
getFileContents(
|
||||
boost::system::error_code& ec,
|
||||
boost::filesystem::path const& sourcePath,
|
||||
std::error_code& ec,
|
||||
std::filesystem::path const& sourcePath,
|
||||
std::optional<std::size_t> maxSize)
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace boost::system::errc;
|
||||
using namespace std::filesystem;
|
||||
|
||||
path const fullPath{canonical(sourcePath, ec)};
|
||||
if (ec)
|
||||
@@ -32,15 +27,15 @@ getFileContents(
|
||||
if (maxSize && (file_size(fullPath, ec) > *maxSize || ec))
|
||||
{
|
||||
if (!ec)
|
||||
ec = make_error_code(file_too_large);
|
||||
ec = make_error_code(std::errc::file_too_large);
|
||||
return {};
|
||||
}
|
||||
|
||||
std::ifstream fileStream(fullPath.string(), std::ios::in);
|
||||
std::ifstream fileStream(fullPath, std::ios::in);
|
||||
|
||||
if (!fileStream)
|
||||
{
|
||||
ec = make_error_code(static_cast<errc_t>(errno));
|
||||
ec.assign(errno, std::generic_category());
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -49,7 +44,7 @@ getFileContents(
|
||||
|
||||
if (fileStream.bad())
|
||||
{
|
||||
ec = make_error_code(static_cast<errc_t>(errno));
|
||||
ec.assign(errno, std::generic_category());
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -58,18 +53,15 @@ getFileContents(
|
||||
|
||||
void
|
||||
writeFileContents(
|
||||
boost::system::error_code& ec,
|
||||
boost::filesystem::path const& destPath,
|
||||
std::error_code& ec,
|
||||
std::filesystem::path const& destPath,
|
||||
std::string const& contents)
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace boost::system::errc;
|
||||
|
||||
std::ofstream fileStream(destPath.string(), std::ios::out | std::ios::trunc);
|
||||
std::ofstream fileStream(destPath, std::ios::out | std::ios::trunc);
|
||||
|
||||
if (!fileStream)
|
||||
{
|
||||
ec = make_error_code(static_cast<errc_t>(errno));
|
||||
ec.assign(errno, std::generic_category());
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -77,7 +69,7 @@ writeFileContents(
|
||||
|
||||
if (fileStream.bad())
|
||||
{
|
||||
ec = make_error_code(static_cast<errc_t>(errno));
|
||||
ec.assign(errno, std::generic_category());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,10 +5,10 @@
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstring>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <functional>
|
||||
#include <iostream>
|
||||
@@ -53,7 +53,7 @@ Logs::File::isOpen() const noexcept
|
||||
}
|
||||
|
||||
bool
|
||||
Logs::File::open(boost::filesystem::path const& path)
|
||||
Logs::File::open(std::filesystem::path const& path)
|
||||
{
|
||||
close();
|
||||
|
||||
@@ -112,7 +112,7 @@ Logs::Logs(beast::severities::Severity thresh) : thresh_(thresh) // default sev
|
||||
}
|
||||
|
||||
bool
|
||||
Logs::open(boost::filesystem::path const& pathToLogFile)
|
||||
Logs::open(std::filesystem::path const& pathToLogFile)
|
||||
{
|
||||
return file_.open(pathToLogFile);
|
||||
}
|
||||
|
||||
@@ -107,7 +107,7 @@ encode(void* dest, void const* src, std::size_t len)
|
||||
char const* in = static_cast<char const*>(src);
|
||||
auto const tab = base64::get_alphabet();
|
||||
|
||||
for (auto n = len / 3; n > 0; --n)
|
||||
for (auto n = len / 3; n != 0u; --n)
|
||||
{
|
||||
*out++ = tab[(in[0] & 0xfc) >> 2];
|
||||
*out++ = tab[((in[0] & 0x03) << 4) + ((in[1] & 0xf0) >> 4)];
|
||||
|
||||
@@ -15,8 +15,6 @@
|
||||
#include <xrpl/nodestore/detail/EncodedBlob.h>
|
||||
#include <xrpl/nodestore/detail/codec.h>
|
||||
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
#include <boost/system/detail/errc.hpp>
|
||||
|
||||
#include <nudb/context.hpp>
|
||||
@@ -35,12 +33,14 @@
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <exception>
|
||||
#include <filesystem>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <sstream>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <system_error>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
@@ -131,7 +131,7 @@ public:
|
||||
void
|
||||
open(bool createIfMissing, uint64_t appType, uint64_t uid, uint64_t salt) override
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
if (db_.is_open())
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
@@ -194,11 +194,12 @@ public:
|
||||
|
||||
if (deletePath_)
|
||||
{
|
||||
boost::filesystem::remove_all(name_, ec);
|
||||
if (ec)
|
||||
std::error_code fsec;
|
||||
std::filesystem::remove_all(name_, fsec);
|
||||
if (fsec)
|
||||
{
|
||||
JLOG(j_.fatal())
|
||||
<< "Filesystem remove_all of " << name_ << " failed with: " << ec.message();
|
||||
JLOG(j_.fatal()) << "Filesystem remove_all of " << name_
|
||||
<< " failed with: " << fsec.message();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -374,7 +375,7 @@ private:
|
||||
static std::size_t
|
||||
parseBlockSize(std::string const& name, Section const& keyValues, beast::Journal journal)
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
auto const folder = path(name);
|
||||
auto const kp = (folder / "nudb.key").string();
|
||||
|
||||
|
||||
@@ -8,9 +8,6 @@
|
||||
#include <xrpl/nodestore/Scheduler.h>
|
||||
#include <xrpl/nodestore/Types.h>
|
||||
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
|
||||
#include <rocksdb/advanced_options.h>
|
||||
#include <rocksdb/cache.h>
|
||||
#include <rocksdb/compression_type.h>
|
||||
@@ -26,6 +23,7 @@
|
||||
|
||||
#include <bit>
|
||||
#include <cstddef>
|
||||
#include <filesystem>
|
||||
#include <functional>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
@@ -265,8 +263,8 @@ public:
|
||||
m_db.reset();
|
||||
if (m_deletePath)
|
||||
{
|
||||
boost::filesystem::path const dir = m_name;
|
||||
boost::filesystem::remove_all(dir);
|
||||
std::filesystem::path const dir = m_name;
|
||||
std::filesystem::remove_all(dir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,13 +4,11 @@
|
||||
#include <xrpl/core/JobQueue.h>
|
||||
#include <xrpl/core/ServiceRegistry.h>
|
||||
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
|
||||
#include <soci/blob.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <mutex>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
@@ -44,8 +42,8 @@ getSociSqliteInit(std::string const& name, std::string const& dir, std::string c
|
||||
Throw<std::runtime_error>(
|
||||
"Sqlite databases must specify a dir and a name. Name: " + name + " Dir: " + dir);
|
||||
}
|
||||
boost::filesystem::path file(dir);
|
||||
if (is_directory(file))
|
||||
std::filesystem::path file(dir);
|
||||
if (std::filesystem::is_directory(file))
|
||||
file /= name + ext;
|
||||
return file.string();
|
||||
}
|
||||
|
||||
@@ -5,13 +5,12 @@
|
||||
#include <xrpl/rdb/DBInit.h>
|
||||
#include <xrpl/rdb/DatabaseCon.h>
|
||||
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
#include <boost/format.hpp> // IWYU pragma: keep
|
||||
|
||||
#include <soci/into.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
|
||||
@@ -20,7 +19,7 @@ namespace xrpl {
|
||||
bool
|
||||
doVacuumDB(DatabaseCon::Setup const& setup, beast::Journal j)
|
||||
{
|
||||
boost::filesystem::path const dbPath = setup.dataDir / TxDBName;
|
||||
std::filesystem::path const dbPath = setup.dataDir / TxDBName;
|
||||
|
||||
uintmax_t const dbSize = file_size(dbPath);
|
||||
XRPL_ASSERT(dbSize != static_cast<uintmax_t>(-1), "xrpl::doVacuumDB : file_size succeeded");
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
/** No-op implementation of the Telemetry interface.
|
||||
|
||||
Always compiled (regardless of XRPL_ENABLE_TELEMETRY). Provides the
|
||||
make_Telemetry() factory when telemetry is compiled out (#ifndef), which
|
||||
unconditionally returns a NullTelemetry that does nothing.
|
||||
|
||||
When XRPL_ENABLE_TELEMETRY IS defined, the OTel virtual methods
|
||||
(getTracer, startSpan) return noop tracers/spans. The make_Telemetry()
|
||||
factory in this file is not used in that case -- Telemetry.cpp provides
|
||||
its own factory that can return the real TelemetryImpl.
|
||||
*/
|
||||
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
#include <opentelemetry/trace/noop.h>
|
||||
#endif
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
namespace {
|
||||
|
||||
/** No-op Telemetry that returns immediately from every method.
|
||||
|
||||
Used as the sole implementation when XRPL_ENABLE_TELEMETRY is not
|
||||
defined, or as a fallback when it is defined but enabled=0.
|
||||
*/
|
||||
class NullTelemetry : public Telemetry
|
||||
{
|
||||
/** Retained configuration (unused, kept for diagnostic access). */
|
||||
Setup const setup_;
|
||||
|
||||
public:
|
||||
explicit NullTelemetry(Setup const& setup) : setup_(setup)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
start() override
|
||||
{
|
||||
Telemetry::setInstance(this);
|
||||
}
|
||||
|
||||
void
|
||||
stop() override
|
||||
{
|
||||
Telemetry::setInstance(nullptr);
|
||||
}
|
||||
|
||||
bool
|
||||
isEnabled() const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTraceTransactions() const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTraceConsensus() const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTraceRpc() const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTracePeer() const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTraceLedger() const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string const&
|
||||
getConsensusTraceStrategy() const override
|
||||
{
|
||||
return setup_.consensusTraceStrategy;
|
||||
}
|
||||
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
opentelemetry::nostd::shared_ptr<opentelemetry::trace::Tracer>
|
||||
getTracer(std::string_view) override
|
||||
{
|
||||
static auto noopTracer = opentelemetry::nostd::shared_ptr<opentelemetry::trace::Tracer>(
|
||||
new opentelemetry::trace::NoopTracer());
|
||||
return noopTracer;
|
||||
}
|
||||
|
||||
opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span>
|
||||
startSpan(std::string_view, opentelemetry::trace::SpanKind) override
|
||||
{
|
||||
return opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span>(
|
||||
new opentelemetry::trace::NoopSpan(nullptr));
|
||||
}
|
||||
|
||||
opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span>
|
||||
startSpan(
|
||||
std::string_view,
|
||||
opentelemetry::context::Context const&,
|
||||
opentelemetry::trace::SpanKind) override
|
||||
{
|
||||
return opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span>(
|
||||
new opentelemetry::trace::NoopSpan(nullptr));
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
/** Factory used when XRPL_ENABLE_TELEMETRY is not defined.
|
||||
Unconditionally returns a NullTelemetry instance.
|
||||
*/
|
||||
#ifndef XRPL_ENABLE_TELEMETRY
|
||||
std::unique_ptr<Telemetry>
|
||||
make_Telemetry(Telemetry::Setup const& setup, beast::Journal)
|
||||
{
|
||||
return std::make_unique<NullTelemetry>(setup);
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
@@ -1,413 +0,0 @@
|
||||
/** Pimpl implementation for SpanGuard and SpanContext.
|
||||
|
||||
All OpenTelemetry SDK types are confined to this translation unit.
|
||||
The public SpanGuard.h header contains only standard-library types
|
||||
and forward-declares the Impl struct.
|
||||
|
||||
Static factory methods access the global Telemetry instance via
|
||||
Telemetry::getInstance(), check whether the requested TraceCategory
|
||||
is enabled, and return either an active guard with a real Span+Scope
|
||||
or a null guard whose methods are all no-ops.
|
||||
|
||||
The Impl struct holds the OTel Span (shared_ptr) and Scope.
|
||||
Scope is non-movable, but since Impl lives behind a unique_ptr,
|
||||
SpanGuard's move constructor simply transfers the pointer — no
|
||||
double-Scope issues.
|
||||
|
||||
@see SpanGuard (SpanGuard.h), Telemetry (Telemetry.h),
|
||||
FilteringSpanProcessor (Telemetry.cpp)
|
||||
*/
|
||||
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
|
||||
#include <xrpl/telemetry/SpanGuard.h>
|
||||
|
||||
#include <xrpl/basics/random.h>
|
||||
#include <xrpl/telemetry/DiscardFlag.h>
|
||||
#include <xrpl/telemetry/SpanNames.h>
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
|
||||
#include <opentelemetry/context/runtime_context.h>
|
||||
#include <opentelemetry/nostd/shared_ptr.h>
|
||||
#include <opentelemetry/trace/context.h>
|
||||
#include <opentelemetry/trace/default_span.h>
|
||||
#include <opentelemetry/trace/provider.h>
|
||||
#include <opentelemetry/trace/scope.h>
|
||||
#include <opentelemetry/trace/span.h>
|
||||
#include <opentelemetry/trace/span_context.h>
|
||||
#include <opentelemetry/trace/span_startoptions.h>
|
||||
#include <opentelemetry/trace/trace_flags.h>
|
||||
#include <opentelemetry/trace/trace_id.h>
|
||||
#include <opentelemetry/trace/tracer.h>
|
||||
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
namespace otel_trace = opentelemetry::trace;
|
||||
|
||||
// ===== SpanContext::Impl ===================================================
|
||||
|
||||
struct SpanContext::Impl
|
||||
{
|
||||
opentelemetry::context::Context ctx;
|
||||
|
||||
explicit Impl(opentelemetry::context::Context c) : ctx(std::move(c))
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
SpanContext::SpanContext(std::shared_ptr<Impl> impl) : impl_(std::move(impl))
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
SpanContext::isValid() const
|
||||
{
|
||||
return impl_ != nullptr;
|
||||
}
|
||||
|
||||
// ===== SpanGuard::Impl ====================================================
|
||||
|
||||
struct SpanGuard::Impl
|
||||
{
|
||||
/** The OTel span being guarded. Set to nullptr after discard(). */
|
||||
opentelemetry::nostd::shared_ptr<otel_trace::Span> span;
|
||||
|
||||
/** Scope that activates span on the current thread's context stack. */
|
||||
otel_trace::Scope scope;
|
||||
|
||||
explicit Impl(opentelemetry::nostd::shared_ptr<otel_trace::Span> s)
|
||||
: span(std::move(s)), scope(span)
|
||||
{
|
||||
}
|
||||
|
||||
~Impl()
|
||||
{
|
||||
if (span)
|
||||
span->End();
|
||||
}
|
||||
|
||||
Impl(Impl const&) = delete;
|
||||
Impl&
|
||||
operator=(Impl const&) = delete;
|
||||
Impl(Impl&&) = delete;
|
||||
Impl&
|
||||
operator=(Impl&&) = delete;
|
||||
};
|
||||
|
||||
// ===== SpanGuard core lifecycle ============================================
|
||||
|
||||
SpanGuard::SpanGuard() = default;
|
||||
SpanGuard::~SpanGuard() = default;
|
||||
SpanGuard::SpanGuard(SpanGuard&&) noexcept = default;
|
||||
|
||||
SpanGuard::SpanGuard(std::unique_ptr<Impl> impl) : impl_(std::move(impl))
|
||||
{
|
||||
}
|
||||
|
||||
SpanGuard::
|
||||
operator bool() const
|
||||
{
|
||||
return impl_ != nullptr;
|
||||
}
|
||||
|
||||
// ===== Static factory methods ==============================================
|
||||
|
||||
/** Check whether the given TraceCategory is enabled on the Telemetry instance.
|
||||
@return true if the category's shouldTrace*() flag is on.
|
||||
*/
|
||||
static bool
|
||||
isCategoryEnabled(Telemetry const& tel, TraceCategory cat)
|
||||
{
|
||||
switch (cat)
|
||||
{
|
||||
case TraceCategory::Rpc:
|
||||
return tel.shouldTraceRpc();
|
||||
case TraceCategory::Transactions:
|
||||
return tel.shouldTraceTransactions();
|
||||
case TraceCategory::Consensus:
|
||||
return tel.shouldTraceConsensus();
|
||||
case TraceCategory::Peer:
|
||||
return tel.shouldTracePeer();
|
||||
case TraceCategory::Ledger:
|
||||
return tel.shouldTraceLedger();
|
||||
}
|
||||
return false; // unreachable, silences compiler warning
|
||||
}
|
||||
|
||||
SpanGuard
|
||||
SpanGuard::span(TraceCategory cat, std::string_view prefix, std::string_view name)
|
||||
{
|
||||
auto* tel = Telemetry::getInstance();
|
||||
if (!tel || !tel->isEnabled() || !isCategoryEnabled(*tel, cat))
|
||||
return {};
|
||||
auto fullName = std::string(prefix) + "." + std::string(name);
|
||||
return SpanGuard(std::make_unique<Impl>(tel->startSpan(fullName)));
|
||||
}
|
||||
|
||||
// ===== Child / linked span creation ========================================
|
||||
|
||||
SpanGuard
|
||||
SpanGuard::childSpan(std::string_view name) const
|
||||
{
|
||||
if (!impl_)
|
||||
return {};
|
||||
auto* tel = Telemetry::getInstance();
|
||||
if (!tel || !tel->isEnabled())
|
||||
return {};
|
||||
auto ctx = opentelemetry::context::RuntimeContext::GetCurrent();
|
||||
return SpanGuard(std::make_unique<Impl>(tel->startSpan(name, ctx)));
|
||||
}
|
||||
|
||||
SpanGuard
|
||||
SpanGuard::childSpan(std::string_view name, SpanContext const& parentCtx)
|
||||
{
|
||||
if (!parentCtx.isValid())
|
||||
return {};
|
||||
auto* tel = Telemetry::getInstance();
|
||||
if (!tel || !tel->isEnabled())
|
||||
return {};
|
||||
return SpanGuard(std::make_unique<Impl>(tel->startSpan(name, parentCtx.impl_->ctx)));
|
||||
}
|
||||
|
||||
SpanGuard
|
||||
SpanGuard::linkedSpan(std::string_view name) const
|
||||
{
|
||||
if (!impl_)
|
||||
return {};
|
||||
auto* tel = Telemetry::getInstance();
|
||||
if (!tel || !tel->isEnabled())
|
||||
return {};
|
||||
|
||||
auto tracer = tel->getTracer("xrpld");
|
||||
auto spanCtx = impl_->span->GetContext();
|
||||
|
||||
// Mark as root span so it starts a new trace sub-tree rather than
|
||||
// inheriting the current thread's active span as parent.
|
||||
otel_trace::StartSpanOptions opts;
|
||||
opentelemetry::context::Context rootCtx;
|
||||
rootCtx = rootCtx.SetValue(otel_trace::kIsRootSpanKey, true);
|
||||
opts.parent = rootCtx;
|
||||
|
||||
return SpanGuard(
|
||||
std::make_unique<Impl>(tracer->StartSpan(
|
||||
std::string(name),
|
||||
{},
|
||||
{{spanCtx, {{std::string(attr::linkType), std::string(attr_val::followsFrom)}}}},
|
||||
opts)));
|
||||
}
|
||||
|
||||
SpanGuard
|
||||
SpanGuard::linkedSpan(std::string_view name, SpanContext const& linkCtx)
|
||||
{
|
||||
if (!linkCtx.isValid())
|
||||
return {};
|
||||
auto* tel = Telemetry::getInstance();
|
||||
if (!tel || !tel->isEnabled())
|
||||
return {};
|
||||
|
||||
auto tracer = tel->getTracer("xrpld");
|
||||
|
||||
// Extract the span from the captured context to get its SpanContext.
|
||||
auto linkSpan = otel_trace::GetSpan(linkCtx.impl_->ctx);
|
||||
if (!linkSpan || !linkSpan->GetContext().IsValid())
|
||||
return {};
|
||||
|
||||
// Mark as root span so it starts a new trace sub-tree rather than
|
||||
// inheriting the current thread's active span as parent.
|
||||
otel_trace::StartSpanOptions opts;
|
||||
opentelemetry::context::Context rootCtx;
|
||||
rootCtx = rootCtx.SetValue(otel_trace::kIsRootSpanKey, true);
|
||||
opts.parent = rootCtx;
|
||||
|
||||
return SpanGuard(
|
||||
std::make_unique<Impl>(tracer->StartSpan(
|
||||
std::string(name),
|
||||
{},
|
||||
{{linkSpan->GetContext(),
|
||||
{{std::string(attr::linkType), std::string(attr_val::followsFrom)}}}},
|
||||
opts)));
|
||||
}
|
||||
|
||||
// ===== Hash-derived span (category-gated) ==================================
|
||||
|
||||
SpanGuard
|
||||
SpanGuard::hashSpan(
|
||||
TraceCategory cat,
|
||||
std::string_view name,
|
||||
std::uint8_t const* hashData,
|
||||
std::size_t hashSize)
|
||||
{
|
||||
if (hashSize < 16)
|
||||
return {};
|
||||
auto* tel = Telemetry::getInstance();
|
||||
if (!tel || !tel->isEnabled() || !isCategoryEnabled(*tel, cat))
|
||||
return {};
|
||||
|
||||
otel_trace::TraceId traceId(opentelemetry::nostd::span<std::uint8_t const, 16>(hashData, 16));
|
||||
|
||||
auto const rval = default_prng()();
|
||||
std::uint8_t spanIdBytes[8];
|
||||
std::memcpy(spanIdBytes, &rval, sizeof(spanIdBytes));
|
||||
otel_trace::SpanId spanId(opentelemetry::nostd::span<std::uint8_t const, 8>(spanIdBytes, 8));
|
||||
|
||||
otel_trace::SpanContext syntheticCtx(
|
||||
traceId, spanId, otel_trace::TraceFlags(1), /* remote = */ false);
|
||||
|
||||
auto parentCtx = opentelemetry::context::Context{}.SetValue(
|
||||
otel_trace::kSpanKey,
|
||||
opentelemetry::nostd::shared_ptr<otel_trace::Span>(
|
||||
new otel_trace::DefaultSpan(syntheticCtx)));
|
||||
|
||||
return SpanGuard(std::make_unique<Impl>(tel->startSpan(std::string(name), parentCtx)));
|
||||
}
|
||||
|
||||
SpanGuard
|
||||
SpanGuard::hashSpan(
|
||||
TraceCategory cat,
|
||||
std::string_view name,
|
||||
std::uint8_t const* hashData,
|
||||
std::size_t hashSize,
|
||||
std::uint8_t const* parentSpanId,
|
||||
std::size_t parentSpanSize,
|
||||
std::uint8_t traceFlags)
|
||||
{
|
||||
if (hashSize < 16 || parentSpanSize != 8)
|
||||
return {};
|
||||
auto* tel = Telemetry::getInstance();
|
||||
if (!tel || !tel->isEnabled() || !isCategoryEnabled(*tel, cat))
|
||||
return {};
|
||||
|
||||
otel_trace::TraceId traceId(opentelemetry::nostd::span<std::uint8_t const, 16>(hashData, 16));
|
||||
|
||||
otel_trace::SpanId parentSpan(
|
||||
opentelemetry::nostd::span<std::uint8_t const, 8>(parentSpanId, 8));
|
||||
|
||||
otel_trace::SpanContext combinedCtx(
|
||||
traceId, parentSpan, otel_trace::TraceFlags(traceFlags), /* remote = */ true);
|
||||
|
||||
auto parentCtx = opentelemetry::context::Context{}.SetValue(
|
||||
otel_trace::kSpanKey,
|
||||
opentelemetry::nostd::shared_ptr<otel_trace::Span>(
|
||||
new otel_trace::DefaultSpan(combinedCtx)));
|
||||
|
||||
return SpanGuard(std::make_unique<Impl>(tel->startSpan(std::string(name), parentCtx)));
|
||||
}
|
||||
|
||||
// ===== Context capture =====================================================
|
||||
|
||||
SpanContext
|
||||
SpanGuard::captureContext() const
|
||||
{
|
||||
if (!impl_)
|
||||
return {};
|
||||
auto ctx = opentelemetry::context::RuntimeContext::GetCurrent();
|
||||
return SpanContext(std::make_shared<SpanContext::Impl>(ctx));
|
||||
}
|
||||
|
||||
// ===== Attribute setters ===================================================
|
||||
|
||||
void
|
||||
SpanGuard::setAttribute(std::string_view key, std::string_view value)
|
||||
{
|
||||
if (impl_)
|
||||
impl_->span->SetAttribute(
|
||||
opentelemetry::nostd::string_view(key.data(), key.size()),
|
||||
opentelemetry::nostd::string_view(value.data(), value.size()));
|
||||
}
|
||||
|
||||
void
|
||||
SpanGuard::setAttribute(std::string_view key, char const* value)
|
||||
{
|
||||
setAttribute(key, std::string_view(value));
|
||||
}
|
||||
|
||||
void
|
||||
SpanGuard::setAttribute(std::string_view key, std::int64_t value)
|
||||
{
|
||||
if (impl_)
|
||||
impl_->span->SetAttribute(opentelemetry::nostd::string_view(key.data(), key.size()), value);
|
||||
}
|
||||
|
||||
void
|
||||
SpanGuard::setAttribute(std::string_view key, double value)
|
||||
{
|
||||
if (impl_)
|
||||
impl_->span->SetAttribute(opentelemetry::nostd::string_view(key.data(), key.size()), value);
|
||||
}
|
||||
|
||||
void
|
||||
SpanGuard::setAttribute(std::string_view key, bool value)
|
||||
{
|
||||
if (impl_)
|
||||
impl_->span->SetAttribute(opentelemetry::nostd::string_view(key.data(), key.size()), value);
|
||||
}
|
||||
|
||||
// ===== Status / events =====================================================
|
||||
|
||||
void
|
||||
SpanGuard::setOk()
|
||||
{
|
||||
if (impl_)
|
||||
impl_->span->SetStatus(otel_trace::StatusCode::kOk);
|
||||
}
|
||||
|
||||
void
|
||||
SpanGuard::setError(std::string_view description)
|
||||
{
|
||||
if (impl_)
|
||||
impl_->span->SetStatus(otel_trace::StatusCode::kError, std::string(description));
|
||||
}
|
||||
|
||||
void
|
||||
SpanGuard::addEvent(std::string_view name)
|
||||
{
|
||||
if (impl_)
|
||||
impl_->span->AddEvent(std::string(name));
|
||||
}
|
||||
|
||||
void
|
||||
SpanGuard::addEvent(std::string_view name, std::initializer_list<EventAttribute> attrs)
|
||||
{
|
||||
if (!impl_)
|
||||
return;
|
||||
// Own the strings to ensure lifetime safety through the AddEvent call.
|
||||
std::vector<std::pair<std::string, std::string>> owned;
|
||||
owned.reserve(attrs.size());
|
||||
for (auto const& [k, v] : attrs)
|
||||
owned.emplace_back(std::string(k), std::string(v));
|
||||
impl_->span->AddEvent(std::string(name), owned);
|
||||
}
|
||||
|
||||
void
|
||||
SpanGuard::recordException(std::exception const& e)
|
||||
{
|
||||
if (!impl_)
|
||||
return;
|
||||
impl_->span->AddEvent(
|
||||
"exception",
|
||||
{{"exception.type", "std::exception"}, {"exception.message", std::string(e.what())}});
|
||||
impl_->span->SetStatus(otel_trace::StatusCode::kError, e.what());
|
||||
}
|
||||
|
||||
void
|
||||
SpanGuard::discard()
|
||||
{
|
||||
if (impl_)
|
||||
{
|
||||
tl_discardCurrentSpan = true;
|
||||
impl_->span->End();
|
||||
impl_->span = nullptr; // prevent ~Impl from calling End() again
|
||||
impl_.reset();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
|
||||
#endif // XRPL_ENABLE_TELEMETRY
|
||||
@@ -1,426 +0,0 @@
|
||||
/** OpenTelemetry SDK implementation of the Telemetry interface.
|
||||
|
||||
Compiled only when XRPL_ENABLE_TELEMETRY is defined (via CMake
|
||||
telemetry=ON). Contains:
|
||||
|
||||
- FilteringSpanProcessor: decorator that drops spans marked with
|
||||
kDiscardedAttr before they enter the batch export queue.
|
||||
- TelemetryImpl: configures the OTel SDK with an OTLP/HTTP exporter,
|
||||
FilteringSpanProcessor wrapping a batch span processor,
|
||||
trace-ID-ratio sampler, and resource attributes.
|
||||
- NullTelemetryOtel: no-op fallback used when telemetry is compiled in
|
||||
but disabled at runtime (enabled=0 in config).
|
||||
- make_Telemetry(): factory that selects the appropriate implementation.
|
||||
*/
|
||||
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/telemetry/DiscardFlag.h>
|
||||
#include <xrpl/telemetry/SpanNames.h>
|
||||
|
||||
#include <opentelemetry/exporters/otlp/otlp_http_exporter_factory.h>
|
||||
#include <opentelemetry/exporters/otlp/otlp_http_exporter_options.h>
|
||||
#include <opentelemetry/sdk/resource/semantic_conventions.h>
|
||||
#include <opentelemetry/sdk/trace/batch_span_processor_factory.h>
|
||||
#include <opentelemetry/sdk/trace/batch_span_processor_options.h>
|
||||
#include <opentelemetry/sdk/trace/processor.h>
|
||||
#include <opentelemetry/sdk/trace/sampler.h>
|
||||
#include <opentelemetry/sdk/trace/samplers/trace_id_ratio.h>
|
||||
#include <opentelemetry/sdk/trace/tracer_provider.h>
|
||||
#include <opentelemetry/sdk/trace/tracer_provider_factory.h>
|
||||
#include <opentelemetry/trace/noop.h>
|
||||
#include <opentelemetry/trace/provider.h>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
namespace {
|
||||
|
||||
namespace trace_api = opentelemetry::trace;
|
||||
namespace trace_sdk = opentelemetry::sdk::trace;
|
||||
namespace otlp_http = opentelemetry::exporter::otlp;
|
||||
namespace resource = opentelemetry::sdk::resource;
|
||||
|
||||
/** SpanProcessor decorator that drops discarded spans.
|
||||
|
||||
Wraps a delegate processor (typically BatchSpanProcessor). In OnEnd(),
|
||||
checks the tl_discardCurrentSpan thread-local flag. If set (by
|
||||
SpanGuard::discard()), the span is silently dropped — never entering
|
||||
the batch queue, never sent over the network, never stored.
|
||||
|
||||
Uses a thread-local flag rather than inspecting Recordable attributes
|
||||
because the Recordable type varies by exporter (SpanData for simple
|
||||
exporters, OtlpRecordable for OTLP) and none expose a uniform getter.
|
||||
The flag is safe because Span::End() calls OnEnd() synchronously on
|
||||
the same thread.
|
||||
|
||||
All other methods delegate directly to the wrapped processor.
|
||||
|
||||
Dependency diagram:
|
||||
|
||||
+---------------------------+
|
||||
| FilteringSpanProcessor |
|
||||
+---------------------------+
|
||||
| - delegate_ : unique_ptr |
|
||||
| <SpanProcessor> |
|
||||
+---------------------------+
|
||||
| wraps
|
||||
+---------+-----------+
|
||||
| BatchSpanProcessor |
|
||||
+---------------------+
|
||||
|
||||
@note Thread safety: OnEnd() may be called concurrently from multiple
|
||||
threads. The tl_discardCurrentSpan flag is thread-local, so each
|
||||
thread's discard state is independent — no synchronization needed.
|
||||
*/
|
||||
class FilteringSpanProcessor : public trace_sdk::SpanProcessor
|
||||
{
|
||||
std::unique_ptr<trace_sdk::SpanProcessor> delegate_;
|
||||
|
||||
public:
|
||||
explicit FilteringSpanProcessor(std::unique_ptr<trace_sdk::SpanProcessor> delegate)
|
||||
: delegate_(std::move(delegate))
|
||||
{
|
||||
}
|
||||
|
||||
std::unique_ptr<trace_sdk::Recordable>
|
||||
MakeRecordable() noexcept override
|
||||
{
|
||||
return delegate_->MakeRecordable();
|
||||
}
|
||||
|
||||
void
|
||||
OnStart(
|
||||
trace_sdk::Recordable& span,
|
||||
opentelemetry::trace::SpanContext const& parentContext) noexcept override
|
||||
{
|
||||
delegate_->OnStart(span, parentContext);
|
||||
}
|
||||
|
||||
void
|
||||
OnEnd(std::unique_ptr<trace_sdk::Recordable>&& span) noexcept override
|
||||
{
|
||||
if (tl_discardCurrentSpan)
|
||||
{
|
||||
// SpanGuard::discard() set the flag on this thread just before
|
||||
// calling Span::End(), which invokes OnEnd() synchronously.
|
||||
// Clear the flag and drop the span.
|
||||
tl_discardCurrentSpan = false;
|
||||
return;
|
||||
}
|
||||
delegate_->OnEnd(std::move(span));
|
||||
}
|
||||
|
||||
bool
|
||||
ForceFlush(
|
||||
std::chrono::microseconds timeout = (std::chrono::microseconds::max)()) noexcept override
|
||||
{
|
||||
return delegate_->ForceFlush(timeout);
|
||||
}
|
||||
|
||||
bool
|
||||
Shutdown(
|
||||
std::chrono::microseconds timeout = (std::chrono::microseconds::max)()) noexcept override
|
||||
{
|
||||
return delegate_->Shutdown(timeout);
|
||||
}
|
||||
};
|
||||
|
||||
/** No-op implementation used when XRPL_ENABLE_TELEMETRY is defined but
|
||||
setup.enabled is false at runtime.
|
||||
|
||||
Lives in the anonymous namespace so there is no ODR conflict with the
|
||||
NullTelemetry in NullTelemetry.cpp.
|
||||
*/
|
||||
class NullTelemetryOtel : public Telemetry
|
||||
{
|
||||
/** Retained configuration (unused, kept for diagnostic access). */
|
||||
Setup const setup_;
|
||||
|
||||
public:
|
||||
explicit NullTelemetryOtel(Setup const& setup) : setup_(setup)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
start() override
|
||||
{
|
||||
Telemetry::setInstance(this);
|
||||
}
|
||||
|
||||
void
|
||||
stop() override
|
||||
{
|
||||
Telemetry::setInstance(nullptr);
|
||||
}
|
||||
|
||||
bool
|
||||
isEnabled() const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTraceTransactions() const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTraceConsensus() const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTraceRpc() const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTracePeer() const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTraceLedger() const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string const&
|
||||
getConsensusTraceStrategy() const override
|
||||
{
|
||||
return setup_.consensusTraceStrategy;
|
||||
}
|
||||
|
||||
opentelemetry::nostd::shared_ptr<trace_api::Tracer>
|
||||
getTracer(std::string_view) override
|
||||
{
|
||||
static auto noopTracer =
|
||||
opentelemetry::nostd::shared_ptr<trace_api::Tracer>(new trace_api::NoopTracer());
|
||||
return noopTracer;
|
||||
}
|
||||
|
||||
opentelemetry::nostd::shared_ptr<trace_api::Span>
|
||||
startSpan(std::string_view, trace_api::SpanKind) override
|
||||
{
|
||||
return opentelemetry::nostd::shared_ptr<trace_api::Span>(new trace_api::NoopSpan(nullptr));
|
||||
}
|
||||
|
||||
opentelemetry::nostd::shared_ptr<trace_api::Span>
|
||||
startSpan(std::string_view, opentelemetry::context::Context const&, trace_api::SpanKind)
|
||||
override
|
||||
{
|
||||
return opentelemetry::nostd::shared_ptr<trace_api::Span>(new trace_api::NoopSpan(nullptr));
|
||||
}
|
||||
};
|
||||
|
||||
/** Full OTel SDK implementation that exports trace spans via OTLP/HTTP.
|
||||
|
||||
Configures an OTLP/HTTP exporter, batch span processor,
|
||||
TraceIdRatioBasedSampler, and resource attributes on start().
|
||||
*/
|
||||
class TelemetryImpl : public Telemetry
|
||||
{
|
||||
/** Configuration from the [telemetry] config section.
|
||||
Non-const so setServiceInstanceId() can update the instance ID
|
||||
before start() creates the OTel resource.
|
||||
*/
|
||||
Setup setup_;
|
||||
|
||||
/** Journal used for log output during start/stop. */
|
||||
beast::Journal const journal_;
|
||||
|
||||
/** The SDK TracerProvider that owns the export pipeline.
|
||||
|
||||
Held as std::shared_ptr so we can call ForceFlush() on shutdown.
|
||||
Wrapped in a nostd::shared_ptr when registered as the global provider.
|
||||
*/
|
||||
std::shared_ptr<trace_sdk::TracerProvider> sdkProvider_;
|
||||
|
||||
public:
|
||||
TelemetryImpl(Setup const& setup, beast::Journal journal) : setup_(setup), journal_(journal)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
setServiceInstanceId(std::string const& id) override
|
||||
{
|
||||
setup_.serviceInstanceId = id;
|
||||
}
|
||||
|
||||
void
|
||||
start() override
|
||||
{
|
||||
JLOG(journal_.info()) << "Telemetry starting: endpoint=" << setup_.exporterEndpoint
|
||||
<< " sampling=" << setup_.samplingRatio;
|
||||
|
||||
// Configure OTLP HTTP exporter
|
||||
otlp_http::OtlpHttpExporterOptions exporterOpts;
|
||||
exporterOpts.url = setup_.exporterEndpoint;
|
||||
if (setup_.useTls)
|
||||
exporterOpts.ssl_ca_cert_path = setup_.tlsCertPath;
|
||||
|
||||
auto exporter = otlp_http::OtlpHttpExporterFactory::Create(exporterOpts);
|
||||
|
||||
// Configure batch processor
|
||||
trace_sdk::BatchSpanProcessorOptions processorOpts;
|
||||
processorOpts.max_queue_size = setup_.maxQueueSize;
|
||||
processorOpts.schedule_delay_millis = std::chrono::milliseconds(setup_.batchDelay);
|
||||
processorOpts.max_export_batch_size = setup_.batchSize;
|
||||
|
||||
auto batchProcessor =
|
||||
trace_sdk::BatchSpanProcessorFactory::Create(std::move(exporter), processorOpts);
|
||||
|
||||
// Wrap batch processor with filtering processor that drops spans
|
||||
// marked with kDiscardedAttr (via SpanGuard::discard()).
|
||||
auto processor = std::make_unique<FilteringSpanProcessor>(std::move(batchProcessor));
|
||||
|
||||
// Configure resource attributes
|
||||
auto resourceAttrs = resource::Resource::Create({
|
||||
{resource::SemanticConventions::kServiceName, setup_.serviceName},
|
||||
{resource::SemanticConventions::kServiceVersion, setup_.serviceVersion},
|
||||
{resource::SemanticConventions::kServiceInstanceId, setup_.serviceInstanceId},
|
||||
{std::string(attr::networkId), static_cast<int64_t>(setup_.networkId)},
|
||||
{std::string(attr::networkType), setup_.networkType},
|
||||
});
|
||||
|
||||
// Configure sampler
|
||||
auto sampler = std::make_unique<trace_sdk::TraceIdRatioBasedSampler>(setup_.samplingRatio);
|
||||
|
||||
// Create TracerProvider
|
||||
sdkProvider_ = trace_sdk::TracerProviderFactory::Create(
|
||||
std::move(processor), resourceAttrs, std::move(sampler));
|
||||
|
||||
// Set as global provider
|
||||
trace_api::Provider::SetTracerProvider(
|
||||
opentelemetry::nostd::shared_ptr<trace_api::TracerProvider>(sdkProvider_));
|
||||
|
||||
// Register as the global Telemetry instance so SpanGuard factory
|
||||
// methods can access it without callers passing a reference.
|
||||
Telemetry::setInstance(this);
|
||||
|
||||
JLOG(journal_.info()) << "Telemetry started successfully";
|
||||
}
|
||||
|
||||
void
|
||||
stop() override
|
||||
{
|
||||
JLOG(journal_.info()) << "Telemetry stopping";
|
||||
|
||||
// Unregister global instance before tearing down the pipeline.
|
||||
Telemetry::setInstance(nullptr);
|
||||
|
||||
if (sdkProvider_)
|
||||
{
|
||||
// Force flush with timeout to avoid blocking indefinitely
|
||||
// when the OTLP endpoint is unreachable.
|
||||
sdkProvider_->ForceFlush(std::chrono::milliseconds(5000));
|
||||
// TODO: sdkProvider_ is not thread-safe. This reset() races with
|
||||
// getTracer() if any thread is still calling startSpan().
|
||||
// Currently safe because Application::stop() shuts down
|
||||
// serverHandler_, overlay_, and jobQueue_ before calling
|
||||
// telemetry_->stop() — so no callers should remain. If the
|
||||
// shutdown order ever changes, add an std::atomic<bool> stopped_
|
||||
// flag checked in getTracer() to make this robust.
|
||||
sdkProvider_.reset();
|
||||
trace_api::Provider::SetTracerProvider(
|
||||
opentelemetry::nostd::shared_ptr<trace_api::TracerProvider>(
|
||||
new trace_api::NoopTracerProvider()));
|
||||
}
|
||||
JLOG(journal_.info()) << "Telemetry stopped";
|
||||
}
|
||||
|
||||
bool
|
||||
isEnabled() const override
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTraceTransactions() const override
|
||||
{
|
||||
return setup_.traceTransactions;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTraceConsensus() const override
|
||||
{
|
||||
return setup_.traceConsensus;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTraceRpc() const override
|
||||
{
|
||||
return setup_.traceRpc;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTracePeer() const override
|
||||
{
|
||||
return setup_.tracePeer;
|
||||
}
|
||||
|
||||
bool
|
||||
shouldTraceLedger() const override
|
||||
{
|
||||
return setup_.traceLedger;
|
||||
}
|
||||
|
||||
std::string const&
|
||||
getConsensusTraceStrategy() const override
|
||||
{
|
||||
return setup_.consensusTraceStrategy;
|
||||
}
|
||||
|
||||
opentelemetry::nostd::shared_ptr<trace_api::Tracer>
|
||||
getTracer(std::string_view name) override
|
||||
{
|
||||
if (!sdkProvider_)
|
||||
return trace_api::Provider::GetTracerProvider()->GetTracer(std::string(name));
|
||||
return sdkProvider_->GetTracer(std::string(name));
|
||||
}
|
||||
|
||||
opentelemetry::nostd::shared_ptr<trace_api::Span>
|
||||
startSpan(std::string_view name, trace_api::SpanKind kind) override
|
||||
{
|
||||
auto tracer = getTracer("xrpld");
|
||||
trace_api::StartSpanOptions opts;
|
||||
opts.kind = kind;
|
||||
return tracer->StartSpan(std::string(name), opts);
|
||||
}
|
||||
|
||||
opentelemetry::nostd::shared_ptr<trace_api::Span>
|
||||
startSpan(
|
||||
std::string_view name,
|
||||
opentelemetry::context::Context const& parentContext,
|
||||
trace_api::SpanKind kind) override
|
||||
{
|
||||
auto tracer = getTracer("xrpld");
|
||||
trace_api::StartSpanOptions opts;
|
||||
opts.kind = kind;
|
||||
opts.parent = parentContext;
|
||||
return tracer->StartSpan(std::string(name), opts);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
std::unique_ptr<Telemetry>
|
||||
make_Telemetry(Telemetry::Setup const& setup, beast::Journal journal)
|
||||
{
|
||||
if (setup.enabled)
|
||||
return std::make_unique<TelemetryImpl>(setup, journal);
|
||||
return std::make_unique<NullTelemetryOtel>(setup);
|
||||
}
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
|
||||
#endif // XRPL_ENABLE_TELEMETRY
|
||||
@@ -1,85 +0,0 @@
|
||||
/** Parser for the [telemetry] section of xrpld.cfg.
|
||||
|
||||
Reads configuration values from the config file and populates a
|
||||
Telemetry::Setup struct. All options have sensible defaults so the
|
||||
section can be minimal or omitted entirely.
|
||||
|
||||
See cfg/xrpld-example.cfg for the full list of available options.
|
||||
*/
|
||||
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
namespace {
|
||||
|
||||
/** Derive a human-readable network type label from the numeric network ID.
|
||||
@param networkId The network identifier from [network_id] config.
|
||||
@return "mainnet", "testnet", "devnet", or "unknown" for other values.
|
||||
*/
|
||||
std::string
|
||||
networkTypeFromId(std::uint32_t networkId)
|
||||
{
|
||||
switch (networkId)
|
||||
{
|
||||
case 0:
|
||||
return "mainnet";
|
||||
case 1:
|
||||
return "testnet";
|
||||
case 2:
|
||||
return "devnet";
|
||||
default:
|
||||
return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
Telemetry::Setup
|
||||
setup_Telemetry(
|
||||
Section const& section,
|
||||
std::string const& nodePublicKey,
|
||||
std::string const& version,
|
||||
std::uint32_t networkId)
|
||||
{
|
||||
Telemetry::Setup setup;
|
||||
|
||||
setup.enabled = section.value_or<int>("enabled", 0) != 0;
|
||||
setup.serviceName = section.value_or<std::string>("service_name", "xrpld");
|
||||
setup.serviceVersion = version;
|
||||
setup.serviceInstanceId = section.value_or<std::string>("service_instance_id", nodePublicKey);
|
||||
|
||||
setup.exporterEndpoint =
|
||||
section.value_or<std::string>("endpoint", "http://localhost:4318/v1/traces");
|
||||
|
||||
setup.useTls = section.value_or<int>("use_tls", 0) != 0;
|
||||
setup.tlsCertPath = section.value_or<std::string>("tls_ca_cert", "");
|
||||
|
||||
setup.samplingRatio = section.value_or<double>("sampling_ratio", 1.0);
|
||||
setup.samplingRatio = std::clamp(setup.samplingRatio, 0.0, 1.0);
|
||||
|
||||
setup.batchSize = section.value_or<std::uint32_t>("batch_size", 512u);
|
||||
setup.batchDelay =
|
||||
std::chrono::milliseconds{section.value_or<std::uint32_t>("batch_delay_ms", 5000u)};
|
||||
setup.maxQueueSize = section.value_or<std::uint32_t>("max_queue_size", 2048u);
|
||||
|
||||
setup.networkId = networkId;
|
||||
setup.networkType = networkTypeFromId(networkId);
|
||||
|
||||
setup.traceTransactions = section.value_or<int>("trace_transactions", 1) != 0;
|
||||
setup.traceConsensus = section.value_or<int>("trace_consensus", 1) != 0;
|
||||
setup.traceRpc = section.value_or<int>("trace_rpc", 1) != 0;
|
||||
setup.tracePeer = section.value_or<int>("trace_peer", 0) != 0;
|
||||
setup.traceLedger = section.value_or<int>("trace_ledger", 1) != 0;
|
||||
|
||||
setup.consensusTraceStrategy =
|
||||
section.value_or<std::string>("consensus_trace_strategy", "deterministic");
|
||||
|
||||
return setup;
|
||||
}
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
@@ -7,8 +7,6 @@
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
|
||||
#include <grpcpp/client_context.h>
|
||||
#include <grpcpp/create_channel.h>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
@@ -16,9 +14,14 @@
|
||||
#include <grpcpp/support/status.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <iomanip>
|
||||
#include <ios>
|
||||
#include <memory>
|
||||
#include <random>
|
||||
#include <sstream>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
@@ -256,9 +259,23 @@ public:
|
||||
TemporaryTLSCertificates()
|
||||
{
|
||||
auto tmpDir = std::filesystem::temp_directory_path();
|
||||
auto uniqueDirName =
|
||||
boost::filesystem::unique_path(std::string(kCERTS_DIR_PREFIX) + "%%%%%%%%");
|
||||
tempDir_ = tmpDir / uniqueDirName.string();
|
||||
std::random_device rd;
|
||||
constexpr std::size_t maxAttempts = 100;
|
||||
for (std::size_t attempt = 0; attempt < maxAttempts; ++attempt)
|
||||
{
|
||||
std::error_code ec;
|
||||
std::ostringstream oss;
|
||||
oss << kCERTS_DIR_PREFIX << std::hex << std::setfill('0') << std::setw(8) << rd();
|
||||
tempDir_ = tmpDir / oss.str();
|
||||
if (!std::filesystem::exists(tempDir_, ec) && !ec)
|
||||
break;
|
||||
tempDir_.clear();
|
||||
}
|
||||
if (tempDir_.empty())
|
||||
{
|
||||
throw std::runtime_error(
|
||||
"Unable to generate a unique temporary TLS certificate directory");
|
||||
}
|
||||
std::filesystem::create_directories(tempDir_);
|
||||
|
||||
writeFile(tempDir_ / kCA_CERT_FILENAME, kCA_CERT_CONTENT);
|
||||
|
||||
@@ -18,16 +18,16 @@
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <boost/algorithm/string/erase.hpp>
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/system/detail/error_code.hpp>
|
||||
|
||||
#include <cassert>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <ios>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <system_error>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
@@ -139,7 +139,7 @@ class LedgerLoad_test : public beast::unit_test::suite
|
||||
{
|
||||
testcase("Load ledger: Bad Files");
|
||||
using namespace test::jtx;
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
|
||||
// empty path
|
||||
except([&] {
|
||||
@@ -161,8 +161,8 @@ class LedgerLoad_test : public beast::unit_test::suite
|
||||
});
|
||||
|
||||
// make a corrupted version of the ledger file (last 10 bytes removed).
|
||||
boost::system::error_code ec;
|
||||
auto ledgerFileCorrupt = boost::filesystem::path{sd.dbPath} / "ledgerdata_bad.json";
|
||||
std::error_code ec;
|
||||
auto ledgerFileCorrupt = std::filesystem::path{sd.dbPath} / "ledgerdata_bad.json";
|
||||
copy_file(sd.ledgerFile, ledgerFileCorrupt, copy_options::overwrite_existing, ec);
|
||||
if (!BEAST_EXPECTS(!ec, ec.message()))
|
||||
return;
|
||||
|
||||
@@ -21,14 +21,12 @@
|
||||
#include <xrpl/server/Manifest.h>
|
||||
#include <xrpl/server/Wallet.h>
|
||||
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <filesystem>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
@@ -55,18 +53,18 @@ private:
|
||||
}
|
||||
|
||||
static void
|
||||
cleanupDatabaseDir(boost::filesystem::path const& dbPath)
|
||||
cleanupDatabaseDir(std::filesystem::path const& dbPath)
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
if (!exists(dbPath) || !is_directory(dbPath) || !is_empty(dbPath))
|
||||
return;
|
||||
remove(dbPath);
|
||||
}
|
||||
|
||||
static void
|
||||
setupDatabaseDir(boost::filesystem::path const& dbPath)
|
||||
setupDatabaseDir(std::filesystem::path const& dbPath)
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
if (!exists(dbPath))
|
||||
{
|
||||
create_directory(dbPath);
|
||||
@@ -79,10 +77,10 @@ private:
|
||||
Throw<std::runtime_error>("Cannot create directory: " + dbPath.string());
|
||||
}
|
||||
}
|
||||
static boost::filesystem::path
|
||||
static std::filesystem::path
|
||||
getDatabasePath()
|
||||
{
|
||||
return boost::filesystem::current_path() / "manifest_test_databases";
|
||||
return std::filesystem::current_path() / "manifest_test_databases";
|
||||
}
|
||||
|
||||
public:
|
||||
@@ -351,7 +349,7 @@ public:
|
||||
BEAST_EXPECT(loaded.revoked(pk));
|
||||
}
|
||||
}
|
||||
boost::filesystem::remove(getDatabasePath() / boost::filesystem::path(dbName));
|
||||
std::filesystem::remove(getDatabasePath() / std::filesystem::path(dbName));
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -21,10 +21,9 @@
|
||||
#include <xrpl/protocol/XRPAmount.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <boost/filesystem/path.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <limits>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
@@ -491,7 +490,7 @@ public:
|
||||
makeBackendRotating(jtx::Env& env, NodeStoreScheduler& scheduler, std::string path)
|
||||
{
|
||||
Section section{env.app().config().section(ConfigSection::nodeDatabase())};
|
||||
boost::filesystem::path newPath;
|
||||
std::filesystem::path newPath;
|
||||
|
||||
if (!BEAST_EXPECT(path.size()))
|
||||
return {};
|
||||
|
||||
@@ -15,13 +15,12 @@
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <boost/filesystem/directory.hpp>
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/range/adaptor/transformed.hpp>
|
||||
|
||||
#include <date/date.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <memory>
|
||||
#include <ostream>
|
||||
@@ -601,7 +600,7 @@ public:
|
||||
detail::default_effective_overlap,
|
||||
60 * 24}}); // max of 24 hours
|
||||
}
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
for (auto const& file : directory_iterator(good.subdir()))
|
||||
{
|
||||
remove_all(file);
|
||||
|
||||
@@ -4,8 +4,7 @@
|
||||
#include <xrpl/basics/FileUtilities.h>
|
||||
#include <xrpl/beast/unit_test/suite.h>
|
||||
|
||||
#include <boost/system/detail/errc.hpp>
|
||||
#include <boost/system/detail/error_code.hpp>
|
||||
#include <system_error>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
@@ -16,14 +15,13 @@ public:
|
||||
testGetFileContents()
|
||||
{
|
||||
using namespace xrpl::detail;
|
||||
using namespace boost::system;
|
||||
|
||||
constexpr char const* expectedContents = "This file is very short. That's all we need.";
|
||||
|
||||
FileDirGuard const file(
|
||||
*this, "test_file", "test.txt", "This is temporary text that should get overwritten");
|
||||
|
||||
error_code ec;
|
||||
std::error_code ec;
|
||||
auto const path = file.file();
|
||||
|
||||
writeFileContents(ec, path, expectedContents);
|
||||
@@ -46,7 +44,7 @@ public:
|
||||
{
|
||||
// Test with small max
|
||||
auto const bad = getFileContents(ec, path, 16);
|
||||
BEAST_EXPECT(ec && ec.value() == boost::system::errc::file_too_large);
|
||||
BEAST_EXPECT(ec && ec == std::errc::file_too_large);
|
||||
BEAST_EXPECT(bad.empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,14 +15,10 @@
|
||||
#include <xrpl/protocol/ErrorCodes.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <boost/filesystem/file_status.hpp>
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
#include <boost/system/detail/error_code.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <ios>
|
||||
#include <iterator>
|
||||
@@ -31,6 +27,7 @@
|
||||
#include <ostream>
|
||||
#include <random>
|
||||
#include <string>
|
||||
#include <system_error>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
@@ -43,7 +40,7 @@ class PerfLog_test : public beast::unit_test::suite
|
||||
{
|
||||
enum class WithFile : bool { no = false, yes = true };
|
||||
|
||||
using path = boost::filesystem::path;
|
||||
using path = std::filesystem::path;
|
||||
|
||||
// We're only using Env for its Journal. That Journal gives better
|
||||
// coverage in unit tests.
|
||||
@@ -66,14 +63,14 @@ class PerfLog_test : public beast::unit_test::suite
|
||||
// The error code is intentionally ignored: if the path doesn't
|
||||
// exist (the common case on a clean runner) remove_all returns
|
||||
// an error, and that's fine — there's nothing to clean up.
|
||||
using namespace boost::filesystem;
|
||||
boost::system::error_code ec;
|
||||
using namespace std::filesystem;
|
||||
std::error_code ec;
|
||||
remove_all(logDir(), ec);
|
||||
}
|
||||
|
||||
~Fixture()
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
|
||||
auto const dir{logDir()};
|
||||
auto const file{logFile()};
|
||||
@@ -96,7 +93,7 @@ class PerfLog_test : public beast::unit_test::suite
|
||||
static path
|
||||
logDir()
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
return temp_directory_path() / "perf_log_test_dir";
|
||||
}
|
||||
|
||||
@@ -129,7 +126,7 @@ class PerfLog_test : public beast::unit_test::suite
|
||||
static void
|
||||
wait()
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
|
||||
auto const path = logFile();
|
||||
if (!exists(path))
|
||||
@@ -201,7 +198,7 @@ public:
|
||||
void
|
||||
testFileCreation()
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
|
||||
{
|
||||
// Verify a PerfLog creates its file when constructed.
|
||||
@@ -256,22 +253,23 @@ public:
|
||||
|
||||
// Construct and write protect a file to prevent PerfLog
|
||||
// from creating its file.
|
||||
boost::system::error_code ec;
|
||||
boost::filesystem::create_directories(fixture.logDir(), ec);
|
||||
std::error_code ec;
|
||||
std::filesystem::create_directories(fixture.logDir(), ec);
|
||||
if (!BEAST_EXPECT(!ec))
|
||||
return;
|
||||
|
||||
auto fileWriteable = [](boost::filesystem::path const& p) -> bool {
|
||||
auto fileWriteable = [](std::filesystem::path const& p) -> bool {
|
||||
return std::ofstream{p.c_str(), std::ios::out | std::ios::app}.is_open();
|
||||
};
|
||||
|
||||
if (!BEAST_EXPECT(fileWriteable(fixture.logFile())))
|
||||
return;
|
||||
|
||||
boost::filesystem::permissions(
|
||||
std::filesystem::permissions(
|
||||
fixture.logFile(),
|
||||
perms::remove_perms | perms::owner_write | perms::others_write |
|
||||
perms::group_write);
|
||||
std::filesystem::perms::owner_write | std::filesystem::perms::others_write |
|
||||
std::filesystem::perms::group_write,
|
||||
std::filesystem::perm_options::remove);
|
||||
|
||||
// If the test is running as root, then the write protect may have
|
||||
// no effect. Make sure write protect worked before proceeding.
|
||||
@@ -295,9 +293,11 @@ public:
|
||||
perfLog->stop();
|
||||
|
||||
// Fix file permissions so the file can be cleaned up.
|
||||
boost::filesystem::permissions(
|
||||
std::filesystem::permissions(
|
||||
fixture.logFile(),
|
||||
perms::add_perms | perms::owner_write | perms::others_write | perms::group_write);
|
||||
std::filesystem::perms::owner_write | std::filesystem::perms::others_write |
|
||||
std::filesystem::perms::group_write,
|
||||
std::filesystem::perm_options::add);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -962,7 +962,7 @@ public:
|
||||
// We can't fully test rotate because unit tests must run on Windows,
|
||||
// and Windows doesn't (may not?) support rotate. But at least call
|
||||
// the interface and see that it doesn't crash.
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
|
||||
Fixture fixture{env_.app(), j_};
|
||||
BEAST_EXPECT(!exists(fixture.logDir()));
|
||||
|
||||
@@ -24,7 +24,7 @@ public:
|
||||
testInteger(IntType in)
|
||||
{
|
||||
std::string s;
|
||||
IntType out = static_cast<IntType>(~in); // Ensure out != in
|
||||
IntType out(in + 1);
|
||||
|
||||
expect(lexicalCastChecked(s, in));
|
||||
expect(lexicalCastChecked(out, s));
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
#include <xrpl/protocol/SystemParameters.h> // IWYU pragma: keep
|
||||
#include <xrpl/server/Port.h>
|
||||
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/format.hpp> // IWYU pragma: keep
|
||||
#include <boost/format/free_funcs.hpp>
|
||||
#include <boost/lexical_cast/bad_lexical_cast.hpp>
|
||||
@@ -20,6 +19,7 @@
|
||||
#include <cstdint>
|
||||
#include <cstdlib>
|
||||
#include <exception>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <optional>
|
||||
#include <ostream>
|
||||
@@ -179,7 +179,7 @@ public:
|
||||
[[nodiscard]] bool
|
||||
dataDirExists() const
|
||||
{
|
||||
return boost::filesystem::is_directory(dataDir_);
|
||||
return std::filesystem::is_directory(dataDir_);
|
||||
}
|
||||
|
||||
[[nodiscard]] bool
|
||||
@@ -192,7 +192,7 @@ public:
|
||||
{
|
||||
try
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
if (rmDataDir_)
|
||||
rmDir(dataDir_);
|
||||
}
|
||||
@@ -273,7 +273,7 @@ public:
|
||||
class Config_test final : public TestSuite
|
||||
{
|
||||
private:
|
||||
using path = boost::filesystem::path;
|
||||
using path = std::filesystem::path;
|
||||
|
||||
public:
|
||||
void
|
||||
@@ -309,7 +309,7 @@ port_wss_admin
|
||||
{
|
||||
testcase("config_file");
|
||||
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
auto const cwd = current_path();
|
||||
|
||||
// Test both config file names.
|
||||
@@ -425,7 +425,7 @@ port_wss_admin
|
||||
{
|
||||
testcase("database_path");
|
||||
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
{
|
||||
boost::format cc("[database_path]\n%1%\n");
|
||||
|
||||
@@ -601,7 +601,7 @@ main
|
||||
{
|
||||
testcase("validators_file");
|
||||
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
{
|
||||
// load should throw for missing specified validators file
|
||||
boost::format cc("[validators_file]\n%1%\n");
|
||||
|
||||
@@ -6,8 +6,6 @@
|
||||
#include <xrpl/rdb/SociDB.h>
|
||||
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
#include <boost/optional/optional.hpp>
|
||||
|
||||
#include <soci/into.h>
|
||||
@@ -18,6 +16,7 @@
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <exception>
|
||||
#include <filesystem>
|
||||
#include <iterator>
|
||||
#include <limits>
|
||||
#include <stdexcept>
|
||||
@@ -30,7 +29,7 @@ class SociDB_test final : public TestSuite
|
||||
{
|
||||
private:
|
||||
static void
|
||||
setupSQLiteConfig(BasicConfig& config, boost::filesystem::path const& dbPath)
|
||||
setupSQLiteConfig(BasicConfig& config, std::filesystem::path const& dbPath)
|
||||
{
|
||||
config.overwrite("sqdb", "backend", "sqlite");
|
||||
auto value = dbPath.string();
|
||||
@@ -39,18 +38,18 @@ private:
|
||||
}
|
||||
|
||||
static void
|
||||
cleanupDatabaseDir(boost::filesystem::path const& dbPath)
|
||||
cleanupDatabaseDir(std::filesystem::path const& dbPath)
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
if (!exists(dbPath) || !is_directory(dbPath) || !is_empty(dbPath))
|
||||
return;
|
||||
remove(dbPath);
|
||||
}
|
||||
|
||||
static void
|
||||
setupDatabaseDir(boost::filesystem::path const& dbPath)
|
||||
setupDatabaseDir(std::filesystem::path const& dbPath)
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
if (!exists(dbPath))
|
||||
{
|
||||
create_directory(dbPath);
|
||||
@@ -63,10 +62,10 @@ private:
|
||||
Throw<std::runtime_error>("Cannot create directory: " + dbPath.string());
|
||||
}
|
||||
}
|
||||
static boost::filesystem::path
|
||||
static std::filesystem::path
|
||||
getDatabasePath()
|
||||
{
|
||||
return boost::filesystem::current_path() / "socidb_test_databases";
|
||||
return std::filesystem::current_path() / "socidb_test_databases";
|
||||
}
|
||||
|
||||
public:
|
||||
@@ -156,7 +155,7 @@ public:
|
||||
checkValues(s);
|
||||
}
|
||||
{
|
||||
namespace bfs = boost::filesystem;
|
||||
namespace bfs = std::filesystem;
|
||||
// Remove the database
|
||||
bfs::path const dbPath(sc.connectionString());
|
||||
if (bfs::is_regular_file(dbPath))
|
||||
@@ -286,7 +285,7 @@ public:
|
||||
#endif
|
||||
}
|
||||
{
|
||||
namespace bfs = boost::filesystem;
|
||||
namespace bfs = std::filesystem;
|
||||
// Remove the database
|
||||
bfs::path const dbPath(sc.connectionString());
|
||||
if (bfs::is_regular_file(dbPath))
|
||||
@@ -338,7 +337,7 @@ public:
|
||||
s << "SELECT LedgerSeq FROM Ledgers;", soci::into(ledgersLS);
|
||||
BEAST_EXPECT(ledgersLS.size() == numRows);
|
||||
}
|
||||
namespace bfs = boost::filesystem;
|
||||
namespace bfs = std::filesystem;
|
||||
// Remove the database
|
||||
bfs::path const dbPath(sc.connectionString());
|
||||
if (bfs::is_regular_file(dbPath))
|
||||
|
||||
@@ -4,8 +4,7 @@
|
||||
|
||||
#include <xrpl/basics/contract.h>
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
|
||||
namespace xrpl::detail {
|
||||
@@ -16,7 +15,7 @@ namespace xrpl::detail {
|
||||
class DirGuard
|
||||
{
|
||||
protected:
|
||||
using path = boost::filesystem::path;
|
||||
using path = std::filesystem::path;
|
||||
|
||||
private:
|
||||
path subDir_;
|
||||
@@ -43,7 +42,7 @@ public:
|
||||
DirGuard(beast::unit_test::suite& test, path subDir, bool useCounter = true)
|
||||
: subDir_(std::move(subDir)), test_(test)
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
|
||||
static auto subDirCounter = 0;
|
||||
if (useCounter)
|
||||
@@ -69,7 +68,7 @@ public:
|
||||
{
|
||||
try
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
|
||||
if (rmSubDir_)
|
||||
rmDir(subDir_);
|
||||
@@ -126,7 +125,7 @@ public:
|
||||
{
|
||||
try
|
||||
{
|
||||
using namespace boost::filesystem;
|
||||
using namespace std::filesystem;
|
||||
if (exists(file_))
|
||||
{
|
||||
remove(file_);
|
||||
@@ -156,7 +155,7 @@ public:
|
||||
[[nodiscard]] bool
|
||||
fileExists() const
|
||||
{
|
||||
return boost::filesystem::exists(file_);
|
||||
return std::filesystem::exists(file_);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -42,14 +42,3 @@ if(NOT WIN32)
|
||||
target_link_libraries(xrpl.test.net PRIVATE xrpl.imports.test)
|
||||
add_dependencies(xrpl.tests xrpl.test.net)
|
||||
endif()
|
||||
|
||||
xrpl_add_test(telemetry)
|
||||
target_link_libraries(xrpl.test.telemetry PRIVATE xrpl.imports.test)
|
||||
target_include_directories(xrpl.test.telemetry PRIVATE ${CMAKE_SOURCE_DIR}/src)
|
||||
if(telemetry)
|
||||
target_link_libraries(
|
||||
xrpl.test.telemetry
|
||||
PRIVATE opentelemetry-cpp::opentelemetry-cpp
|
||||
)
|
||||
endif()
|
||||
add_dependencies(xrpl.tests xrpl.test.telemetry)
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
#include <xrpl/telemetry/SpanGuard.h>
|
||||
#include <xrpl/telemetry/SpanNames.h>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
using namespace xrpl;
|
||||
using namespace xrpl::telemetry;
|
||||
|
||||
TEST(SpanGuardFactory, null_guard_methods_are_safe)
|
||||
{
|
||||
auto span = SpanGuard::span("nonexistent.span");
|
||||
EXPECT_FALSE(span);
|
||||
|
||||
span.setAttribute("key", "value");
|
||||
span.setAttribute("int_key", static_cast<int64_t>(42));
|
||||
span.setAttribute("bool_key", true);
|
||||
span.setOk();
|
||||
span.setError("test");
|
||||
span.addEvent("event");
|
||||
}
|
||||
|
||||
TEST(SpanGuardFactory, category_span_returns_null_when_disabled)
|
||||
{
|
||||
auto span = SpanGuard::span(TraceCategory::Rpc, "rpc", "test");
|
||||
EXPECT_FALSE(span);
|
||||
|
||||
span.setAttribute("xrpl.rpc.command", "test");
|
||||
span.setAttribute("xrpl.rpc.status", "success");
|
||||
}
|
||||
|
||||
TEST(SpanGuardFactory, child_span_null_when_no_parent)
|
||||
{
|
||||
auto span = SpanGuard::span("parent.test");
|
||||
auto child = span.childSpan("child.test");
|
||||
EXPECT_FALSE(child);
|
||||
}
|
||||
|
||||
TEST(SpanGuardFactory, linked_span_null_when_no_context)
|
||||
{
|
||||
auto span = SpanGuard::span("source.test");
|
||||
auto linked = span.linkedSpan("linked.test");
|
||||
EXPECT_FALSE(linked);
|
||||
}
|
||||
|
||||
TEST(SpanGuardFactory, capture_context_returns_invalid_on_null)
|
||||
{
|
||||
auto span = SpanGuard::span("ctx.test");
|
||||
auto ctx = span.captureContext();
|
||||
EXPECT_FALSE(ctx.isValid());
|
||||
}
|
||||
|
||||
TEST(SpanGuardFactory, move_construction_transfers_ownership)
|
||||
{
|
||||
auto span = SpanGuard::span("move.test");
|
||||
auto moved = std::move(span);
|
||||
EXPECT_FALSE(span);
|
||||
moved.setAttribute("key", "value");
|
||||
}
|
||||
|
||||
TEST(SpanGuardFactory, record_exception_safe_on_null)
|
||||
{
|
||||
auto span = SpanGuard::span(TraceCategory::Rpc, "rpc.command", "test");
|
||||
try
|
||||
{
|
||||
throw std::runtime_error("test error");
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
span.recordException(e);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(SpanGuardFactory, discard_safe_on_null)
|
||||
{
|
||||
auto span = SpanGuard::span(TraceCategory::Transactions, "tx", "process");
|
||||
span.discard();
|
||||
EXPECT_FALSE(span);
|
||||
}
|
||||
|
||||
TEST(SpanGuardFactory, consensus_close_time_attributes)
|
||||
{
|
||||
// Verify the consensus attribute pattern compiles and
|
||||
// doesn't crash with null SpanGuard.
|
||||
{
|
||||
auto span = telemetry::SpanGuard::span(
|
||||
telemetry::TraceCategory::Consensus, telemetry::seg::consensus, "accept.apply");
|
||||
span.setAttribute("xrpl.consensus.ledger.seq", static_cast<int64_t>(42));
|
||||
span.setAttribute("xrpl.consensus.close_time", static_cast<int64_t>(780000000));
|
||||
span.setAttribute("xrpl.consensus.close_time_correct", true);
|
||||
span.setAttribute("xrpl.consensus.close_resolution_ms", static_cast<int64_t>(30000));
|
||||
span.setAttribute("xrpl.consensus.state", std::string("finished"));
|
||||
span.setAttribute("xrpl.consensus.proposing", true);
|
||||
span.setAttribute("xrpl.consensus.round_time_ms", static_cast<int64_t>(3500));
|
||||
}
|
||||
{
|
||||
auto span = telemetry::SpanGuard::span(
|
||||
telemetry::TraceCategory::Consensus, telemetry::seg::consensus, "accept.apply");
|
||||
span.setAttribute("xrpl.consensus.close_time_correct", false);
|
||||
span.setAttribute("xrpl.consensus.state", std::string("moved_on"));
|
||||
}
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
#include <xrpl/basics/BasicConfig.h>
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <chrono>
|
||||
|
||||
using namespace xrpl;
|
||||
|
||||
TEST(TelemetryConfig, setup_defaults)
|
||||
{
|
||||
telemetry::Telemetry::Setup s;
|
||||
EXPECT_FALSE(s.enabled);
|
||||
EXPECT_EQ(s.serviceName, "xrpld");
|
||||
EXPECT_TRUE(s.serviceVersion.empty());
|
||||
EXPECT_TRUE(s.serviceInstanceId.empty());
|
||||
EXPECT_EQ(s.exporterEndpoint, "http://localhost:4318/v1/traces");
|
||||
EXPECT_FALSE(s.useTls);
|
||||
EXPECT_TRUE(s.tlsCertPath.empty());
|
||||
EXPECT_DOUBLE_EQ(s.samplingRatio, 1.0);
|
||||
EXPECT_EQ(s.batchSize, 512u);
|
||||
EXPECT_EQ(s.batchDelay, std::chrono::milliseconds{5000});
|
||||
EXPECT_EQ(s.maxQueueSize, 2048u);
|
||||
EXPECT_EQ(s.networkId, 0u);
|
||||
EXPECT_EQ(s.networkType, "mainnet");
|
||||
EXPECT_TRUE(s.traceTransactions);
|
||||
EXPECT_TRUE(s.traceConsensus);
|
||||
EXPECT_TRUE(s.traceRpc);
|
||||
EXPECT_FALSE(s.tracePeer);
|
||||
EXPECT_TRUE(s.traceLedger);
|
||||
}
|
||||
|
||||
TEST(TelemetryConfig, parse_empty_section)
|
||||
{
|
||||
Section section;
|
||||
auto setup = telemetry::setup_Telemetry(section, "nHUtest123", "2.0.0", 0);
|
||||
|
||||
EXPECT_FALSE(setup.enabled);
|
||||
EXPECT_EQ(setup.serviceName, "xrpld");
|
||||
EXPECT_EQ(setup.serviceVersion, "2.0.0");
|
||||
EXPECT_EQ(setup.serviceInstanceId, "nHUtest123");
|
||||
EXPECT_DOUBLE_EQ(setup.samplingRatio, 1.0);
|
||||
EXPECT_TRUE(setup.traceRpc);
|
||||
EXPECT_TRUE(setup.traceTransactions);
|
||||
EXPECT_TRUE(setup.traceConsensus);
|
||||
EXPECT_FALSE(setup.tracePeer);
|
||||
EXPECT_TRUE(setup.traceLedger);
|
||||
}
|
||||
|
||||
TEST(TelemetryConfig, parse_full_section)
|
||||
{
|
||||
Section section;
|
||||
section.set("enabled", "1");
|
||||
section.set("service_name", "my-rippled");
|
||||
section.set("service_instance_id", "custom-id");
|
||||
section.set("exporter", "otlp_http");
|
||||
section.set("endpoint", "http://collector:4318/v1/traces");
|
||||
section.set("use_tls", "1");
|
||||
section.set("tls_ca_cert", "/etc/ssl/ca.pem");
|
||||
section.set("sampling_ratio", "0.5");
|
||||
section.set("batch_size", "256");
|
||||
section.set("batch_delay_ms", "3000");
|
||||
section.set("max_queue_size", "4096");
|
||||
section.set("trace_transactions", "0");
|
||||
section.set("trace_consensus", "0");
|
||||
section.set("trace_rpc", "1");
|
||||
section.set("trace_peer", "1");
|
||||
section.set("trace_ledger", "0");
|
||||
|
||||
auto setup = telemetry::setup_Telemetry(section, "nHUtest123", "2.0.0", 1);
|
||||
|
||||
EXPECT_TRUE(setup.enabled);
|
||||
EXPECT_EQ(setup.serviceName, "my-rippled");
|
||||
EXPECT_EQ(setup.serviceInstanceId, "custom-id");
|
||||
EXPECT_EQ(setup.exporterEndpoint, "http://collector:4318/v1/traces");
|
||||
EXPECT_TRUE(setup.useTls);
|
||||
EXPECT_EQ(setup.tlsCertPath, "/etc/ssl/ca.pem");
|
||||
EXPECT_DOUBLE_EQ(setup.samplingRatio, 0.5);
|
||||
EXPECT_EQ(setup.batchSize, 256u);
|
||||
EXPECT_EQ(setup.batchDelay, std::chrono::milliseconds{3000});
|
||||
EXPECT_EQ(setup.maxQueueSize, 4096u);
|
||||
EXPECT_FALSE(setup.traceTransactions);
|
||||
EXPECT_FALSE(setup.traceConsensus);
|
||||
EXPECT_TRUE(setup.traceRpc);
|
||||
EXPECT_TRUE(setup.tracePeer);
|
||||
EXPECT_FALSE(setup.traceLedger);
|
||||
}
|
||||
|
||||
TEST(TelemetryConfig, null_telemetry_factory)
|
||||
{
|
||||
telemetry::Telemetry::Setup setup;
|
||||
setup.enabled = false;
|
||||
|
||||
beast::Journal::Sink& sink = beast::Journal::getNullSink();
|
||||
beast::Journal j(sink);
|
||||
auto tel = telemetry::make_Telemetry(setup, j);
|
||||
EXPECT_TRUE(tel != nullptr);
|
||||
EXPECT_FALSE(tel->isEnabled());
|
||||
EXPECT_FALSE(tel->shouldTraceRpc());
|
||||
EXPECT_FALSE(tel->shouldTraceTransactions());
|
||||
EXPECT_FALSE(tel->shouldTraceConsensus());
|
||||
EXPECT_FALSE(tel->shouldTracePeer());
|
||||
EXPECT_FALSE(tel->shouldTraceLedger());
|
||||
|
||||
// start/stop should be no-ops without crashing
|
||||
tel->start();
|
||||
tel->stop();
|
||||
}
|
||||
|
||||
TEST(TelemetryConfig, sampling_ratio_clamped)
|
||||
{
|
||||
Section section;
|
||||
section.set("sampling_ratio", "2.5");
|
||||
auto setup = telemetry::setup_Telemetry(section, "nHUtest123", "2.0.0", 0);
|
||||
EXPECT_DOUBLE_EQ(setup.samplingRatio, 1.0);
|
||||
|
||||
Section section2;
|
||||
section2.set("sampling_ratio", "-0.5");
|
||||
auto setup2 = telemetry::setup_Telemetry(section2, "nHUtest123", "2.0.0", 0);
|
||||
EXPECT_DOUBLE_EQ(setup2.samplingRatio, 0.0);
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
|
||||
#include <xrpl/telemetry/TraceContextPropagator.h>
|
||||
|
||||
#include <opentelemetry/context/context.h>
|
||||
#include <opentelemetry/nostd/span.h>
|
||||
#include <opentelemetry/trace/context.h>
|
||||
#include <opentelemetry/trace/default_span.h>
|
||||
#include <opentelemetry/trace/span_context.h>
|
||||
#include <opentelemetry/trace/trace_flags.h>
|
||||
#include <opentelemetry/trace/trace_id.h>
|
||||
|
||||
#include <cstring>
|
||||
|
||||
namespace trace = opentelemetry::trace;
|
||||
|
||||
TEST(TraceContextPropagator, round_trip)
|
||||
{
|
||||
std::uint8_t traceIdBuf[16] = {
|
||||
0x01,
|
||||
0x02,
|
||||
0x03,
|
||||
0x04,
|
||||
0x05,
|
||||
0x06,
|
||||
0x07,
|
||||
0x08,
|
||||
0x09,
|
||||
0x0a,
|
||||
0x0b,
|
||||
0x0c,
|
||||
0x0d,
|
||||
0x0e,
|
||||
0x0f,
|
||||
0x10};
|
||||
std::uint8_t spanIdBuf[8] = {0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, 0x11, 0x22};
|
||||
|
||||
trace::TraceId traceId(opentelemetry::nostd::span<uint8_t const, 16>(traceIdBuf, 16));
|
||||
trace::SpanId spanId(opentelemetry::nostd::span<uint8_t const, 8>(spanIdBuf, 8));
|
||||
trace::TraceFlags flags(trace::TraceFlags::kIsSampled);
|
||||
trace::SpanContext spanCtx(traceId, spanId, flags, true);
|
||||
|
||||
auto ctx = opentelemetry::context::Context{}.SetValue(
|
||||
trace::kSpanKey,
|
||||
opentelemetry::nostd::shared_ptr<trace::Span>(new trace::DefaultSpan(spanCtx)));
|
||||
|
||||
protocol::TraceContext proto;
|
||||
xrpl::telemetry::injectToProtobuf(ctx, proto);
|
||||
|
||||
EXPECT_TRUE(proto.has_trace_id());
|
||||
EXPECT_EQ(proto.trace_id().size(), 16u);
|
||||
EXPECT_TRUE(proto.has_span_id());
|
||||
EXPECT_EQ(proto.span_id().size(), 8u);
|
||||
EXPECT_EQ(proto.trace_flags(), static_cast<uint32_t>(trace::TraceFlags::kIsSampled));
|
||||
EXPECT_EQ(std::memcmp(proto.trace_id().data(), traceIdBuf, 16), 0);
|
||||
EXPECT_EQ(std::memcmp(proto.span_id().data(), spanIdBuf, 8), 0);
|
||||
|
||||
auto extractedCtx = xrpl::telemetry::extractFromProtobuf(proto);
|
||||
auto extractedSpan = trace::GetSpan(extractedCtx);
|
||||
ASSERT_NE(extractedSpan, nullptr);
|
||||
|
||||
auto const& extracted = extractedSpan->GetContext();
|
||||
EXPECT_TRUE(extracted.IsValid());
|
||||
EXPECT_TRUE(extracted.IsRemote());
|
||||
EXPECT_EQ(extracted.trace_id(), traceId);
|
||||
EXPECT_EQ(extracted.span_id(), spanId);
|
||||
EXPECT_TRUE(extracted.trace_flags().IsSampled());
|
||||
}
|
||||
|
||||
TEST(TraceContextPropagator, extract_empty_protobuf)
|
||||
{
|
||||
protocol::TraceContext proto;
|
||||
auto ctx = xrpl::telemetry::extractFromProtobuf(proto);
|
||||
auto span = trace::GetSpan(ctx);
|
||||
if (span)
|
||||
{
|
||||
EXPECT_FALSE(span->GetContext().IsValid());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(TraceContextPropagator, extract_wrong_size_trace_id)
|
||||
{
|
||||
protocol::TraceContext proto;
|
||||
proto.set_trace_id(std::string(8, '\x01'));
|
||||
proto.set_span_id(std::string(8, '\xaa'));
|
||||
|
||||
auto ctx = xrpl::telemetry::extractFromProtobuf(proto);
|
||||
auto span = trace::GetSpan(ctx);
|
||||
if (span)
|
||||
{
|
||||
EXPECT_FALSE(span->GetContext().IsValid());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(TraceContextPropagator, extract_wrong_size_span_id)
|
||||
{
|
||||
protocol::TraceContext proto;
|
||||
proto.set_trace_id(std::string(16, '\x01'));
|
||||
proto.set_span_id(std::string(4, '\xaa'));
|
||||
|
||||
auto ctx = xrpl::telemetry::extractFromProtobuf(proto);
|
||||
auto span = trace::GetSpan(ctx);
|
||||
if (span)
|
||||
{
|
||||
EXPECT_FALSE(span->GetContext().IsValid());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(TraceContextPropagator, inject_invalid_span)
|
||||
{
|
||||
auto ctx = opentelemetry::context::Context{};
|
||||
protocol::TraceContext proto;
|
||||
xrpl::telemetry::injectToProtobuf(ctx, proto);
|
||||
|
||||
EXPECT_FALSE(proto.has_trace_id());
|
||||
EXPECT_FALSE(proto.has_span_id());
|
||||
}
|
||||
|
||||
TEST(TraceContextPropagator, flags_preservation)
|
||||
{
|
||||
std::uint8_t traceIdBuf[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
|
||||
std::uint8_t spanIdBuf[8] = {1, 2, 3, 4, 5, 6, 7, 8};
|
||||
|
||||
// Test with flags NOT sampled (flags = 0)
|
||||
trace::TraceFlags flags(0);
|
||||
trace::SpanContext spanCtx(
|
||||
trace::TraceId(opentelemetry::nostd::span<uint8_t const, 16>(traceIdBuf, 16)),
|
||||
trace::SpanId(opentelemetry::nostd::span<uint8_t const, 8>(spanIdBuf, 8)),
|
||||
flags,
|
||||
true);
|
||||
|
||||
auto ctx = opentelemetry::context::Context{}.SetValue(
|
||||
trace::kSpanKey,
|
||||
opentelemetry::nostd::shared_ptr<trace::Span>(new trace::DefaultSpan(spanCtx)));
|
||||
|
||||
protocol::TraceContext proto;
|
||||
xrpl::telemetry::injectToProtobuf(ctx, proto);
|
||||
EXPECT_EQ(proto.trace_flags(), 0u);
|
||||
|
||||
auto extracted = xrpl::telemetry::extractFromProtobuf(proto);
|
||||
auto span = trace::GetSpan(extracted);
|
||||
ASSERT_NE(span, nullptr);
|
||||
EXPECT_FALSE(span->GetContext().trace_flags().IsSampled());
|
||||
}
|
||||
|
||||
#else // XRPL_ENABLE_TELEMETRY not defined
|
||||
|
||||
TEST(TraceContextPropagator, compiles_without_telemetry)
|
||||
{
|
||||
SUCCEED();
|
||||
}
|
||||
|
||||
#endif // XRPL_ENABLE_TELEMETRY
|
||||
@@ -1,8 +0,0 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
int
|
||||
main(int argc, char** argv)
|
||||
{
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user