style: Add prettier pre-commit hook (#2031)

There are 2 things to know about prettier:
- it's quite pretty most of the time
- it's not configurable
This commit is contained in:
Ayaz Salikhov
2025-04-25 16:24:45 +01:00
committed by GitHub
parent 593d7298b1
commit 1e0a2f5162
45 changed files with 632 additions and 548 deletions

View File

@@ -23,7 +23,7 @@ BreakBeforeBraces: WebKit
BreakBeforeTernaryOperators: true BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: true BreakConstructorInitializersBeforeComma: true
ColumnLimit: 120 ColumnLimit: 120
CommentPragmas: '^ IWYU pragma:' CommentPragmas: "^ IWYU pragma:"
ConstructorInitializerAllOnOneLineOrOnePerLine: true ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4 ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4 ContinuationIndentWidth: 4
@@ -39,11 +39,11 @@ IncludeCategories:
Priority: 1 Priority: 1
- Regex: '^<.*\.(h|hpp)>$' - Regex: '^<.*\.(h|hpp)>$'
Priority: 2 Priority: 2
- Regex: '^<.*>$' - Regex: "^<.*>$"
Priority: 3 Priority: 3
- Regex: '.*' - Regex: ".*"
Priority: 4 Priority: 4
IncludeIsMainRegex: '$' IncludeIsMainRegex: "$"
IndentCaseLabels: true IndentCaseLabels: true
IndentFunctionDeclarationAfterType: false IndentFunctionDeclarationAfterType: false
IndentWidth: 4 IndentWidth: 4

View File

@@ -1,5 +1,5 @@
--- ---
Checks: '-*, Checks: "-*,
bugprone-argument-comment, bugprone-argument-comment,
bugprone-assert-side-effect, bugprone-assert-side-effect,
bugprone-bad-signal-to-kill-thread, bugprone-bad-signal-to-kill-thread,
@@ -146,7 +146,7 @@ Checks: '-*,
readability-static-definition-in-anonymous-namespace, readability-static-definition-in-anonymous-namespace,
readability-suspicious-call-argument, readability-suspicious-call-argument,
readability-use-std-min-max readability-use-std-min-max
' "
CheckOptions: CheckOptions:
readability-braces-around-statements.ShortStatementLines: 2 readability-braces-around-statements.ShortStatementLines: 2
@@ -158,21 +158,21 @@ CheckOptions:
readability-identifier-naming.EnumConstantCase: CamelCase readability-identifier-naming.EnumConstantCase: CamelCase
readability-identifier-naming.ScopedEnumConstantCase: CamelCase readability-identifier-naming.ScopedEnumConstantCase: CamelCase
readability-identifier-naming.GlobalConstantCase: UPPER_CASE readability-identifier-naming.GlobalConstantCase: UPPER_CASE
readability-identifier-naming.GlobalConstantPrefix: 'k' readability-identifier-naming.GlobalConstantPrefix: "k"
readability-identifier-naming.GlobalVariableCase: CamelCase readability-identifier-naming.GlobalVariableCase: CamelCase
readability-identifier-naming.GlobalVariablePrefix: 'g' readability-identifier-naming.GlobalVariablePrefix: "g"
readability-identifier-naming.ConstexprFunctionCase: camelBack readability-identifier-naming.ConstexprFunctionCase: camelBack
readability-identifier-naming.ConstexprMethodCase: camelBack readability-identifier-naming.ConstexprMethodCase: camelBack
readability-identifier-naming.ClassMethodCase: camelBack readability-identifier-naming.ClassMethodCase: camelBack
readability-identifier-naming.ClassMemberCase: camelBack readability-identifier-naming.ClassMemberCase: camelBack
readability-identifier-naming.ClassConstantCase: UPPER_CASE readability-identifier-naming.ClassConstantCase: UPPER_CASE
readability-identifier-naming.ClassConstantPrefix: 'k' readability-identifier-naming.ClassConstantPrefix: "k"
readability-identifier-naming.StaticConstantCase: UPPER_CASE readability-identifier-naming.StaticConstantCase: UPPER_CASE
readability-identifier-naming.StaticConstantPrefix: 'k' readability-identifier-naming.StaticConstantPrefix: "k"
readability-identifier-naming.StaticVariableCase: UPPER_CASE readability-identifier-naming.StaticVariableCase: UPPER_CASE
readability-identifier-naming.StaticVariablePrefix: 'k' readability-identifier-naming.StaticVariablePrefix: "k"
readability-identifier-naming.ConstexprVariableCase: UPPER_CASE readability-identifier-naming.ConstexprVariableCase: UPPER_CASE
readability-identifier-naming.ConstexprVariablePrefix: 'k' readability-identifier-naming.ConstexprVariablePrefix: "k"
readability-identifier-naming.LocalConstantCase: camelBack readability-identifier-naming.LocalConstantCase: camelBack
readability-identifier-naming.LocalVariableCase: camelBack readability-identifier-naming.LocalVariableCase: camelBack
readability-identifier-naming.TemplateParameterCase: CamelCase readability-identifier-naming.TemplateParameterCase: CamelCase
@@ -181,11 +181,11 @@ CheckOptions:
readability-identifier-naming.MemberCase: camelBack readability-identifier-naming.MemberCase: camelBack
readability-identifier-naming.PrivateMemberSuffix: _ readability-identifier-naming.PrivateMemberSuffix: _
readability-identifier-naming.ProtectedMemberSuffix: _ readability-identifier-naming.ProtectedMemberSuffix: _
readability-identifier-naming.PublicMemberSuffix: '' readability-identifier-naming.PublicMemberSuffix: ""
readability-identifier-naming.FunctionIgnoredRegexp: '.*tag_invoke.*' readability-identifier-naming.FunctionIgnoredRegexp: ".*tag_invoke.*"
bugprone-unsafe-functions.ReportMoreUnsafeFunctions: true bugprone-unsafe-functions.ReportMoreUnsafeFunctions: true
bugprone-unused-return-value.CheckedReturnTypes: ::std::error_code;::std::error_condition;::std::errc bugprone-unused-return-value.CheckedReturnTypes: ::std::error_code;::std::error_condition;::std::errc
misc-include-cleaner.IgnoreHeaders: '.*/(detail|impl)/.*;.*(expected|unexpected).*;.*ranges_lower_bound\.h;time.h;stdlib.h;__chrono/.*;fmt/chrono.h;boost/uuid/uuid_hash.hpp' misc-include-cleaner.IgnoreHeaders: '.*/(detail|impl)/.*;.*(expected|unexpected).*;.*ranges_lower_bound\.h;time.h;stdlib.h;__chrono/.*;fmt/chrono.h;boost/uuid/uuid_hash.hpp'
HeaderFilterRegex: '^.*/(src|tests)/.*\.(h|hpp)$' HeaderFilterRegex: '^.*/(src|tests)/.*\.(h|hpp)$'
WarningsAsErrors: '*' WarningsAsErrors: "*"

View File

@@ -8,9 +8,9 @@ parse:
- BAR - BAR
- BAZ - BAZ
kwargs: kwargs:
HEADERS: '*' HEADERS: "*"
SOURCES: '*' SOURCES: "*"
DEPENDS: '*' DEPENDS: "*"
_help_override_spec: _help_override_spec:
- Override configurations per-command where available - Override configurations per-command where available
override_spec: {} override_spec: {}
@@ -43,7 +43,7 @@ format:
- indicates how fractional indentions are handled during - indicates how fractional indentions are handled during
- whitespace replacement. If set to 'use-space', fractional - whitespace replacement. If set to 'use-space', fractional
- indentation is left as spaces (utf-8 0x20). If set to - indentation is left as spaces (utf-8 0x20). If set to
- '`round-up` fractional indentation is replaced with a single' - "`round-up` fractional indentation is replaced with a single"
- tab character (utf-8 0x09) effectively shifting the column - tab character (utf-8 0x09) effectively shifting the column
- to the next tabstop - to the next tabstop
fractional_tab_policy: use-space fractional_tab_policy: use-space
@@ -73,9 +73,9 @@ format:
dangle_parens: true dangle_parens: true
_help_dangle_align: _help_dangle_align:
- If the trailing parenthesis must be 'dangled' on its on - If the trailing parenthesis must be 'dangled' on its on
- 'line, then align it to this reference: `prefix`: the start' - "line, then align it to this reference: `prefix`: the start"
- 'of the statement, `prefix-indent`: the start of the' - "of the statement, `prefix-indent`: the start of the"
- 'statement, plus one indentation level, `child`: align to' - "statement, plus one indentation level, `child`: align to"
- the column of the arguments - the column of the arguments
dangle_align: prefix dangle_align: prefix
_help_min_prefix_chars: _help_min_prefix_chars:
@@ -127,7 +127,7 @@ _help_markup: Options affecting comment reflow and formatting.
markup: markup:
_help_bullet_char: _help_bullet_char:
- What character to use for bulleted lists - What character to use for bulleted lists
bullet_char: '*' bullet_char: "*"
_help_enum_char: _help_enum_char:
- What character to use as punctuation after numerals in an - What character to use as punctuation after numerals in an
- enumerated list - enumerated list
@@ -154,7 +154,7 @@ markup:
- If a comment line matches starts with this pattern then it - If a comment line matches starts with this pattern then it
- is explicitly a trailing comment for the preceeding - is explicitly a trailing comment for the preceeding
- argument. Default is '#<' - argument. Default is '#<'
explicit_trailing_pattern: '#<' explicit_trailing_pattern: "#<"
_help_hashruler_min_length: _help_hashruler_min_length:
- If a comment line starts with at least this many consecutive - If a comment line starts with at least this many consecutive
- hash characters, then don't lstrip() them off. This allows - hash characters, then don't lstrip() them off. This allows
@@ -176,14 +176,14 @@ lint:
disabled_codes: [] disabled_codes: []
_help_function_pattern: _help_function_pattern:
- regular expression pattern describing valid function names - regular expression pattern describing valid function names
function_pattern: '[0-9a-z_]+' function_pattern: "[0-9a-z_]+"
_help_macro_pattern: _help_macro_pattern:
- regular expression pattern describing valid macro names - regular expression pattern describing valid macro names
macro_pattern: '[0-9A-Z_]+' macro_pattern: "[0-9A-Z_]+"
_help_global_var_pattern: _help_global_var_pattern:
- regular expression pattern describing valid names for - regular expression pattern describing valid names for
- variables with global (cache) scope - variables with global (cache) scope
global_var_pattern: '[A-Z][0-9A-Z_]+' global_var_pattern: "[A-Z][0-9A-Z_]+"
_help_internal_var_pattern: _help_internal_var_pattern:
- regular expression pattern describing valid names for - regular expression pattern describing valid names for
- variables with global scope (but internal semantic) - variables with global scope (but internal semantic)
@@ -191,7 +191,7 @@ lint:
_help_local_var_pattern: _help_local_var_pattern:
- regular expression pattern describing valid names for - regular expression pattern describing valid names for
- variables with local scope - variables with local scope
local_var_pattern: '[a-z][a-z0-9_]+' local_var_pattern: "[a-z][a-z0-9_]+"
_help_private_var_pattern: _help_private_var_pattern:
- regular expression pattern describing valid names for - regular expression pattern describing valid names for
- privatedirectory variables - privatedirectory variables
@@ -199,15 +199,15 @@ lint:
_help_public_var_pattern: _help_public_var_pattern:
- regular expression pattern describing valid names for public - regular expression pattern describing valid names for public
- directory variables - directory variables
public_var_pattern: '[A-Z][0-9A-Z_]+' public_var_pattern: "[A-Z][0-9A-Z_]+"
_help_argument_var_pattern: _help_argument_var_pattern:
- regular expression pattern describing valid names for - regular expression pattern describing valid names for
- function/macro arguments and loop variables. - function/macro arguments and loop variables.
argument_var_pattern: '[a-z][a-z0-9_]+' argument_var_pattern: "[a-z][a-z0-9_]+"
_help_keyword_pattern: _help_keyword_pattern:
- regular expression pattern describing valid names for - regular expression pattern describing valid names for
- keywords used in functions or macros - keywords used in functions or macros
keyword_pattern: '[A-Z][0-9A-Z_]+' keyword_pattern: "[A-Z][0-9A-Z_]+"
_help_max_conditionals_custom_parser: _help_max_conditionals_custom_parser:
- In the heuristic for C0201, how many conditionals to match - In the heuristic for C0201, how many conditionals to match
- within a loop in before considering the loop a parser. - within a loop in before considering the loop a parser.

View File

@@ -3,29 +3,34 @@ name: Bug report
about: Create a report to help us improve about: Create a report to help us improve
title: "[Title with short description] (Version: [Clio version])" title: "[Title with short description] (Version: [Clio version])"
labels: bug labels: bug
assignees: '' assignees: ""
--- ---
<!-- Please search existing issues to avoid creating duplicates. --> <!-- Please search existing issues to avoid creating duplicates. -->
<!-- Kindly refrain from posting any credentials or sensitive information in this issue --> <!-- Kindly refrain from posting any credentials or sensitive information in this issue -->
## Issue Description ## Issue Description
<!-- Provide a summary for your issue/bug. --> <!-- Provide a summary for your issue/bug. -->
## Steps to Reproduce ## Steps to Reproduce
<!-- List in detail the exact steps to reproduce the unexpected behavior of the software. --> <!-- List in detail the exact steps to reproduce the unexpected behavior of the software. -->
## Expected Result ## Expected Result
<!-- Explain in detail what behavior you expected to happen. --> <!-- Explain in detail what behavior you expected to happen. -->
## Actual Result ## Actual Result
<!-- Explain in detail what behavior actually happened. --> <!-- Explain in detail what behavior actually happened. -->
## Environment ## Environment
<!-- Please describe your environment setup (such as Ubuntu 20.04.2 with Boost 1.82). --> <!-- Please describe your environment setup (such as Ubuntu 20.04.2 with Boost 1.82). -->
<!-- Please use the version returned by './clio_server --version' as the version number --> <!-- Please use the version returned by './clio_server --version' as the version number -->
## Supporting Files ## Supporting Files
<!-- If you have supporting files such as a log, feel free to post a link here using Github Gist. --> <!-- If you have supporting files such as a log, feel free to post a link here using Github Gist. -->
<!-- Consider adding configuration files with private information removed via Github Gist. --> <!-- Consider adding configuration files with private information removed via Github Gist. -->

View File

@@ -3,21 +3,24 @@ name: Feature request
about: Suggest an idea for this project about: Suggest an idea for this project
title: "[Title with short description] (Version: [Clio version])" title: "[Title with short description] (Version: [Clio version])"
labels: enhancement labels: enhancement
assignees: '' assignees: ""
--- ---
<!-- Please search existing issues to avoid creating duplicates. --> <!-- Please search existing issues to avoid creating duplicates. -->
<!-- Kindly refrain from posting any credentials or sensitive information in this issue --> <!-- Kindly refrain from posting any credentials or sensitive information in this issue -->
## Summary ## Summary
<!-- Provide a summary to the feature request --> <!-- Provide a summary to the feature request -->
## Motivation ## Motivation
<!-- Why do we need this feature? --> <!-- Why do we need this feature? -->
## Solution ## Solution
<!-- What is the solution? --> <!-- What is the solution? -->
## Paths Not Taken ## Paths Not Taken
<!-- What other alternatives have been considered? --> <!-- What other alternatives have been considered? -->

View File

@@ -3,8 +3,7 @@ name: Question
about: A question in form of an issue about: A question in form of an issue
title: "[Title with short description] (Version: Clio version)" title: "[Title with short description] (Version: Clio version)"
labels: question labels: question
assignees: '' assignees: ""
--- ---
<!-- Please search existing issues to avoid creating duplicates. --> <!-- Please search existing issues to avoid creating duplicates. -->
@@ -12,7 +11,9 @@ assignees: ''
<!-- Kindly refrain from posting any credentials or sensitive information in this issue --> <!-- Kindly refrain from posting any credentials or sensitive information in this issue -->
## Question ## Question
<!-- Your question --> <!-- Your question -->
## Paths Not Taken ## Paths Not Taken
<!-- If applicable, what other alternatives have been considered? --> <!-- If applicable, what other alternatives have been considered? -->

View File

@@ -7,7 +7,7 @@ inputs:
substract_threads: substract_threads:
description: An option for the action get_number_of_threads. See get_number_of_threads description: An option for the action get_number_of_threads. See get_number_of_threads
required: true required: true
default: '0' default: "0"
runs: runs:
using: composite using: composite
steps: steps:

View File

@@ -10,11 +10,11 @@ inputs:
labels: labels:
description: Comma-separated list of labels description: Comma-separated list of labels
required: true required: true
default: 'bug' default: "bug"
assignees: assignees:
description: Comma-separated list of assignees description: Comma-separated list of assignees
required: true required: true
default: 'godexsoft,kuznetsss,PeterChen13579' default: "godexsoft,kuznetsss,PeterChen13579"
outputs: outputs:
created_issue_id: created_issue_id:
description: Created issue id description: Created issue id

View File

@@ -7,27 +7,27 @@ inputs:
conan_cache_hit: conan_cache_hit:
description: Whether conan cache has been downloaded description: Whether conan cache has been downloaded
required: true required: true
default: 'false' default: "false"
build_type: build_type:
description: Build type for third-party libraries and clio. Could be 'Release', 'Debug' description: Build type for third-party libraries and clio. Could be 'Release', 'Debug'
required: true required: true
default: 'Release' default: "Release"
build_integration_tests: build_integration_tests:
description: Whether to build integration tests description: Whether to build integration tests
required: true required: true
default: 'true' default: "true"
code_coverage: code_coverage:
description: Whether conan's coverage option should be on or not description: Whether conan's coverage option should be on or not
required: true required: true
default: 'false' default: "false"
static: static:
description: Whether Clio is to be statically linked description: Whether Clio is to be statically linked
required: true required: true
default: 'false' default: "false"
sanitizer: sanitizer:
description: Sanitizer to use description: Sanitizer to use
required: true required: true
default: 'false' # false, tsan, asan or ubsan default: "false" # false, tsan, asan or ubsan
runs: runs:
using: composite using: composite
steps: steps:

View File

@@ -4,7 +4,7 @@ inputs:
substract_threads: substract_threads:
description: How many threads to substract from the calculated number description: How many threads to substract from the calculated number
required: true required: true
default: '0' default: "0"
outputs: outputs:
threads_number: threads_number:
description: Number of threads to use description: Number of threads to use

View File

@@ -17,7 +17,7 @@ inputs:
code_coverage: code_coverage:
description: Whether code coverage is on description: Whether code coverage is on
required: true required: true
default: 'false' default: "false"
outputs: outputs:
conan_hash: conan_hash:
description: Hash to use as a part of conan cache key description: Hash to use as a part of conan cache key

View File

@@ -28,7 +28,7 @@ inputs:
code_coverage: code_coverage:
description: Whether code coverage is on description: Whether code coverage is on
required: true required: true
default: 'false' default: "false"
runs: runs:
using: composite using: composite
steps: steps:

View File

@@ -70,7 +70,7 @@ on:
description: Sanitizer to use description: Sanitizer to use
required: false required: false
type: string type: string
default: 'false' default: "false"
jobs: jobs:
build: build:

View File

@@ -83,8 +83,8 @@ jobs:
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
with: with:
labels: 'compatibility,bug' labels: "compatibility,bug"
title: 'Proposed libXRPL check failed' title: "Proposed libXRPL check failed"
body: > body: >
Clio build or tests failed against `libXRPL ${{ github.event.client_payload.version }}`. Clio build or tests failed against `libXRPL ${{ github.event.client_payload.version }}`.

View File

@@ -83,7 +83,7 @@ jobs:
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
with: with:
title: 'Clang-tidy found bugs in code 🐛' title: "Clang-tidy found bugs in code 🐛"
body: > body: >
Clang-tidy found issues in the code: Clang-tidy found issues in the code:

View File

@@ -1,12 +1,12 @@
name: Nightly release name: Nightly release
on: on:
schedule: schedule:
- cron: '0 8 * * 1-5' - cron: "0 8 * * 1-5"
workflow_dispatch: workflow_dispatch:
pull_request: pull_request:
paths: paths:
- '.github/workflows/nightly.yml' - ".github/workflows/nightly.yml"
- '.github/workflows/build_clio_docker_image.yml' - ".github/workflows/build_clio_docker_image.yml"
jobs: jobs:
build: build:
@@ -121,6 +121,8 @@ jobs:
run: | run: |
cp ${{ github.workspace }}/.github/workflows/nightly_notes.md "${RUNNER_TEMP}/nightly_notes.md" cp ${{ github.workspace }}/.github/workflows/nightly_notes.md "${RUNNER_TEMP}/nightly_notes.md"
cd nightly_release cd nightly_release
echo '' >> "${RUNNER_TEMP}/nightly_notes.md"
echo '```' >> "${RUNNER_TEMP}/nightly_notes.md"
for d in $(ls); do for d in $(ls); do
archive_name=$(ls $d) archive_name=$(ls $d)
mv ${d}/${archive_name} ./ mv ${d}/${archive_name} ./
@@ -170,7 +172,7 @@ jobs:
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
with: with:
title: 'Nightly release failed 🌙' title: "Nightly release failed 🌙"
body: > body: >
Nightly release failed: Nightly release failed:

View File

@@ -3,4 +3,3 @@
Changelog (including previous releases): https://github.com/XRPLF/clio/commits/nightly Changelog (including previous releases): https://github.com/XRPLF/clio/commits/nightly
## SHA256 checksums ## SHA256 checksums
```

View File

@@ -5,7 +5,7 @@ on:
workflow_dispatch: workflow_dispatch:
pull_request: pull_request:
paths: paths:
- '.github/workflows/sanitizers.yml' - ".github/workflows/sanitizers.yml"
jobs: jobs:
build: build:

View File

@@ -2,14 +2,16 @@ name: Update CI docker image
on: on:
pull_request: pull_request:
paths: paths:
- 'docker/ci/**' - "docker/ci/**"
- 'docker/compilers/**' - "docker/compilers/**"
- .github/workflows/update_docker_ci.yml - .github/workflows/update_docker_ci.yml
push: push:
branches: [develop] branches: [develop]
paths: paths:
- 'docker/ci/**' # CI image must update when either its dockerfile changes # CI image must update when either its dockerfile changes
- 'docker/compilers/**' # or any compilers changed and were pushed by hand # or any compilers changed and were pushed by hand
- "docker/ci/**"
- "docker/compilers/**"
- .github/workflows/update_docker_ci.yml - .github/workflows/update_docker_ci.yml
workflow_dispatch: workflow_dispatch:

View File

@@ -23,3 +23,10 @@ repos:
exclude: ^docs/doxygen-awesome-theme/ exclude: ^docs/doxygen-awesome-theme/
- id: trailing-whitespace - id: trailing-whitespace
exclude: ^docs/doxygen-awesome-theme/ exclude: ^docs/doxygen-awesome-theme/
# Autoformat: YAML, JSON, Markdown, etc.
- repo: https://github.com/rbubley/mirrors-prettier
rev: v3.5.3
hooks:
- id: prettier
exclude: ^docs/doxygen-awesome-theme/

View File

@@ -1,7 +1,9 @@
# Contributing # Contributing
Thank you for your interest in contributing to the `clio` project 🙏 Thank you for your interest in contributing to the `clio` project 🙏
To contribute, please: To contribute, please:
1. Fork the repository under your own user. 1. Fork the repository under your own user.
2. Create a new branch on which to commit/push your changes. 2. Create a new branch on which to commit/push your changes.
3. Write and test your code. 3. Write and test your code.
@@ -14,6 +16,7 @@ To contribute, please:
> **Note:** Please read the [Style guide](#style-guide). > **Note:** Please read the [Style guide](#style-guide).
## Install git hooks ## Install git hooks
Please run the following command in order to use git hooks that are helpful for `clio` development. Please run the following command in order to use git hooks that are helpful for `clio` development.
```bash ```bash
@@ -21,12 +24,14 @@ git config --local core.hooksPath .githooks
``` ```
## Git hooks dependencies ## Git hooks dependencies
The pre-commit hook requires `clang-format >= 19.0.0` and `cmake-format` to be installed on your machine. The pre-commit hook requires `clang-format >= 19.0.0` and `cmake-format` to be installed on your machine.
`clang-format` can be installed using `brew` on macOS and default package manager on Linux. `clang-format` can be installed using `brew` on macOS and default package manager on Linux.
`cmake-format` can be installed using `pip`. `cmake-format` can be installed using `pip`.
The hook will also attempt to automatically use `doxygen` to verify that everything public in the codebase is covered by doc comments. If `doxygen` is not installed, the hook will raise a warning suggesting to install `doxygen` for future commits. The hook will also attempt to automatically use `doxygen` to verify that everything public in the codebase is covered by doc comments. If `doxygen` is not installed, the hook will raise a warning suggesting to install `doxygen` for future commits.
## Git commands ## Git commands
This sections offers a detailed look at the git commands you will need to use to get your PR submitted. This sections offers a detailed look at the git commands you will need to use to get your PR submitted.
Please note that there are more than one way to do this and these commands are provided for your convenience. Please note that there are more than one way to do this and these commands are provided for your convenience.
At this point it's assumed that you have already finished working on your feature/bug. At this point it's assumed that you have already finished working on your feature/bug.
@@ -43,6 +48,7 @@ git pull origin develop
git checkout <your feature branch> git checkout <your feature branch>
git rebase -i develop git rebase -i develop
``` ```
For each commit in the list other than the first one, enter `s` to squash. For each commit in the list other than the first one, enter `s` to squash.
After this is done, you will have the opportunity to write a message for the squashed commit. After this is done, you will have the opportunity to write a message for the squashed commit.
@@ -52,6 +58,7 @@ After this is done, you will have the opportunity to write a message for the squ
# You should now have a single commit on top of a commit in `develop` # You should now have a single commit on top of a commit in `develop`
git log git log
``` ```
> **Note:** If there are merge conflicts, please resolve them now. > **Note:** If there are merge conflicts, please resolve them now.
```bash ```bash
@@ -69,14 +76,17 @@ git push --force
``` ```
## Use ccache (optional) ## Use ccache (optional)
Clio uses `ccache` to speed up compilation. If you want to use it, please make sure it is installed on your machine. Clio uses `ccache` to speed up compilation. If you want to use it, please make sure it is installed on your machine.
CMake will automatically detect it and use it if it is available. CMake will automatically detect it and use it if it is available.
## Opening a pull request ## Opening a pull request
When a pull request is open CI will perform checks on the new code. When a pull request is open CI will perform checks on the new code.
Title of the pull request and squashed commit should follow [conventional commits specification](https://www.conventionalcommits.org/en/v1.0.0/). Title of the pull request and squashed commit should follow [conventional commits specification](https://www.conventionalcommits.org/en/v1.0.0/).
## Fixing issues found during code review ## Fixing issues found during code review
While your code is in review, it's possible that some changes will be requested by reviewer(s). While your code is in review, it's possible that some changes will be requested by reviewer(s).
This section describes the process of adding your fixes. This section describes the process of adding your fixes.
@@ -95,6 +105,7 @@ git push
``` ```
## After code review ## After code review
When your PR is approved and ready to merge, use `Squash and merge`. When your PR is approved and ready to merge, use `Squash and merge`.
The button for that is near the bottom of the PR's page on GitHub. The button for that is near the bottom of the PR's page on GitHub.
@@ -102,55 +113,63 @@ The button for that is near the bottom of the PR's page on GitHub.
> **Note:** See [issues](https://github.com/XRPLF/clio/issues) to find the `ISSUE_ID` for the feature/bug you were working on. > **Note:** See [issues](https://github.com/XRPLF/clio/issues) to find the `ISSUE_ID` for the feature/bug you were working on.
# Style guide # Style guide
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent. This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent.
## Formatting ## Formatting
Code must conform to `clang-format` version 19, unless the result would be unreasonably difficult to read or maintain. Code must conform to `clang-format` version 19, unless the result would be unreasonably difficult to read or maintain.
In most cases the pre-commit hook will take care of formatting and will fix any issues automatically. In most cases the pre-commit hook will take care of formatting and will fix any issues automatically.
To manually format your code, use `clang-format -i <your changed files>` for C++ files and `cmake-format -i <your changed files>` for CMake files. To manually format your code, use `clang-format -i <your changed files>` for C++ files and `cmake-format -i <your changed files>` for CMake files.
## Documentation ## Documentation
All public namespaces, classes and functions must be covered by doc (`doxygen`) comments. Everything that is not within a nested `impl` namespace is considered public. All public namespaces, classes and functions must be covered by doc (`doxygen`) comments. Everything that is not within a nested `impl` namespace is considered public.
> **Note:** Keep in mind that this is enforced by Clio's CI and your build will fail if newly added public code lacks documentation. > **Note:** Keep in mind that this is enforced by Clio's CI and your build will fail if newly added public code lacks documentation.
## Avoid ## Avoid
* Proliferation of nearly identical code.
* Proliferation of new files and classes unless it improves readability or/and compilation time. - Proliferation of nearly identical code.
* Unmanaged memory allocation and raw pointers. - Proliferation of new files and classes unless it improves readability or/and compilation time.
* Macros (unless they add significant value.) - Unmanaged memory allocation and raw pointers.
* Lambda patterns (unless these add significant value.) - Macros (unless they add significant value.)
* CPU or architecture-specific code unless there is a good reason to include it, and where it is used guard it with macros and provide explanatory comments. - Lambda patterns (unless these add significant value.)
* Importing new libraries unless there is a very good reason to do so. - CPU or architecture-specific code unless there is a good reason to include it, and where it is used guard it with macros and provide explanatory comments.
- Importing new libraries unless there is a very good reason to do so.
## Seek to ## Seek to
* Extend functionality of existing code rather than creating new code.
* Prefer readability over terseness where important logic is concerned. - Extend functionality of existing code rather than creating new code.
* Inline functions that are not used or are not likely to be used elsewhere in the codebase. - Prefer readability over terseness where important logic is concerned.
* Use clear and self-explanatory names for functions, variables, structs and classes. - Inline functions that are not used or are not likely to be used elsewhere in the codebase.
* Use TitleCase for classes, structs and filenames, camelCase for function and variable names, lower case for namespaces and folders. - Use clear and self-explanatory names for functions, variables, structs and classes.
* Provide as many comments as you feel that a competent programmer would need to understand what your code does. - Use TitleCase for classes, structs and filenames, camelCase for function and variable names, lower case for namespaces and folders.
- Provide as many comments as you feel that a competent programmer would need to understand what your code does.
# Maintainers # Maintainers
Maintainers are ecosystem participants with elevated access to the repository. They are able to push new code, make decisions on when a release should be made, etc. Maintainers are ecosystem participants with elevated access to the repository. They are able to push new code, make decisions on when a release should be made, etc.
## Code Review ## Code Review
A PR must be reviewed and approved by at least one of the maintainers before it can be merged. A PR must be reviewed and approved by at least one of the maintainers before it can be merged.
## Adding and Removing ## Adding and Removing
New maintainers can be proposed by two existing maintainers, subject to a vote by a quorum of the existing maintainers. A minimum of 50% support and a 50% participation is required. In the event of a tie vote, the addition of the new maintainer will be rejected. New maintainers can be proposed by two existing maintainers, subject to a vote by a quorum of the existing maintainers. A minimum of 50% support and a 50% participation is required. In the event of a tie vote, the addition of the new maintainer will be rejected.
Existing maintainers can resign, or be subject to a vote for removal at the behest of two existing maintainers. A minimum of 60% agreement and 50% participation are required. The XRP Ledger Foundation will have the ability, for cause, to remove an existing maintainer without a vote. Existing maintainers can resign, or be subject to a vote for removal at the behest of two existing maintainers. A minimum of 60% agreement and 50% participation are required. The XRP Ledger Foundation will have the ability, for cause, to remove an existing maintainer without a vote.
## Existing Maintainers ## Existing Maintainers
* [godexsoft](https://github.com/godexsoft) (Ripple) - [godexsoft](https://github.com/godexsoft) (Ripple)
* [kuznetsss](https://github.com/kuznetsss) (Ripple) - [kuznetsss](https://github.com/kuznetsss) (Ripple)
* [legleux](https://github.com/legleux) (Ripple) - [legleux](https://github.com/legleux) (Ripple)
* [PeterChen13579](https://github.com/PeterChen13579) (Ripple) - [PeterChen13579](https://github.com/PeterChen13579) (Ripple)
## Honorable ex-Maintainers ## Honorable ex-Maintainers
* [cindyyan317](https://github.com/cindyyan317) (ex-Ripple) - [cindyyan317](https://github.com/cindyyan317) (ex-Ripple)
* [cjcobb23](https://github.com/cjcobb23) (ex-Ripple) - [cjcobb23](https://github.com/cjcobb23) (ex-Ripple)
* [natenichols](https://github.com/natenichols) (ex-Ripple) - [natenichols](https://github.com/natenichols) (ex-Ripple)

View File

@@ -16,7 +16,7 @@ Multiple Clio nodes can share access to the same dataset, which allows for a hig
Clio offers the full `rippled` API, with the caveat that Clio by default only returns validated data. This means that `ledger_index` defaults to `validated` instead of `current` for all requests. Other non-validated data, such as information about queued transactions, is also not returned. Clio offers the full `rippled` API, with the caveat that Clio by default only returns validated data. This means that `ledger_index` defaults to `validated` instead of `current` for all requests. Other non-validated data, such as information about queued transactions, is also not returned.
Clio retrieves data from a designated group of `rippled` nodes instead of connecting to the peer-to-peer network. Clio retrieves data from a designated group of `rippled` nodes instead of connecting to the peer-to-peer network.
For requests that require access to the peer-to-peer network, such as `fee` or `submit`, Clio automatically forwards the request to a `rippled` node and propagates the response back to the client. To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to `rippled`. For requests that require access to the peer-to-peer network, such as `fee` or `submit`, Clio automatically forwards the request to a `rippled` node and propagates the response back to the client. To access non-validated data for _any_ request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to `rippled`.
> [!NOTE] > [!NOTE]
> Clio requires access to at least one `rippled` node, which can run on the same machine as Clio or separately. > Clio requires access to at least one `rippled` node, which can run on the same machine as Clio or separately.

View File

@@ -4,6 +4,7 @@ This image contains an environment to build [Clio](https://github.com/XRPLF/clio
It is used in [Clio Github Actions](https://github.com/XRPLF/clio/actions) but can also be used to compile Clio locally. It is used in [Clio Github Actions](https://github.com/XRPLF/clio/actions) but can also be used to compile Clio locally.
The image is based on Ubuntu 20.04 and contains: The image is based on Ubuntu 20.04 and contains:
- clang 16.0.6 - clang 16.0.6
- gcc 12.3 - gcc 12.3
- doxygen 1.12 - doxygen 1.12

View File

@@ -12,12 +12,14 @@ Your configuration file should be mounted under the path `/opt/clio/etc/config.j
Clio repository provides an [example](https://github.com/XRPLF/clio/blob/develop/docs/examples/config/example-config.json) of the configuration file. Clio repository provides an [example](https://github.com/XRPLF/clio/blob/develop/docs/examples/config/example-config.json) of the configuration file.
Config file recommendations: Config file recommendations:
- Set `log_to_console` to `false` if you want to avoid logs being written to `stdout`. - Set `log_to_console` to `false` if you want to avoid logs being written to `stdout`.
- Set `log_directory` to `/opt/clio/log` to store logs in a volume. - Set `log_directory` to `/opt/clio/log` to store logs in a volume.
## Usage ## Usage
The following command can be used to run Clio in docker (assuming server's port is `51233` in your config): The following command can be used to run Clio in docker (assuming server's port is `51233` in your config):
```bash ```bash
docker run -d -v <path to your config.json>:/opt/clio/etc/config.json -v <path to store logs>:/opt/clio/log -p 51233:51233 rippleci/clio docker run -d -v <path to your config.json>:/opt/clio/etc/config.json -v <path to store logs>:/opt/clio/log -p 51233:51233 rippleci/clio
``` ```

View File

@@ -11,7 +11,7 @@ Clio is built with [CMake](https://cmake.org/) and uses [Conan](https://conan.io
- [**Optional**] [CCache](https://ccache.dev/): speeds up compilation if you are going to compile Clio often - [**Optional**] [CCache](https://ccache.dev/): speeds up compilation if you are going to compile Clio often
| Compiler | Version | | Compiler | Version |
|-------------|---------| | ----------- | ------- |
| GCC | 12.3 | | GCC | 12.3 |
| Clang | 16 | | Clang | 16 |
| Apple Clang | 15 | | Apple Clang | 15 |

View File

@@ -36,7 +36,7 @@
} }
], ],
"forwarding": { "forwarding": {
"cache_timeout": 0.250, // in seconds, could be 0, which means no cache "cache_timeout": 0.25, // in seconds, could be 0, which means no cache
"request_timeout": 10.0 // time for Clio to wait for rippled to reply on a forwarded request (default is 10 seconds) "request_timeout": 10.0 // time for Clio to wait for rippled to reply on a forwarded request (default is 10 seconds)
}, },
"rpc": { "rpc": {
@@ -44,9 +44,7 @@
}, },
"dos_guard": { "dos_guard": {
// Comma-separated list of IPs to exclude from rate limiting // Comma-separated list of IPs to exclude from rate limiting
"whitelist": [ "whitelist": ["127.0.0.1"],
"127.0.0.1"
],
// //
// The below values are the default values and are only specified here // The below values are the default values and are only specified here
// for documentation purposes. The rate limiter currently limits // for documentation purposes. The rate limiter currently limits

View File

@@ -7,6 +7,7 @@
This directory contains an example of docker based infrastructure to collect and visualise metrics from clio. This directory contains an example of docker based infrastructure to collect and visualise metrics from clio.
The structure of the directory: The structure of the directory:
- `compose.yaml` - `compose.yaml`
Docker-compose file with Prometheus and Grafana set up. Docker-compose file with Prometheus and Grafana set up.
- `prometheus.yaml` - `prometheus.yaml`

View File

@@ -6,7 +6,7 @@ services:
volumes: volumes:
- ./prometheus.yaml:/etc/prometheus/prometheus.yml - ./prometheus.yaml:/etc/prometheus/prometheus.yml
command: command:
- '--config.file=/etc/prometheus/prometheus.yml' - "--config.file=/etc/prometheus/prometheus.yml"
grafana: grafana:
image: grafana/grafana image: grafana/grafana
ports: ports:

View File

@@ -80,9 +80,7 @@
"orientation": "auto", "orientation": "auto",
"percentChangeColorMode": "standard", "percentChangeColorMode": "standard",
"reduceOptions": { "reduceOptions": {
"calcs": [ "calcs": ["lastNotNull"],
"lastNotNull"
],
"fields": "", "fields": "",
"values": false "values": false
}, },
@@ -161,9 +159,7 @@
"orientation": "auto", "orientation": "auto",
"percentChangeColorMode": "standard", "percentChangeColorMode": "standard",
"reduceOptions": { "reduceOptions": {
"calcs": [ "calcs": ["lastNotNull"],
"lastNotNull"
],
"fields": "", "fields": "",
"values": false "values": false
}, },
@@ -246,9 +242,7 @@
"orientation": "auto", "orientation": "auto",
"percentChangeColorMode": "standard", "percentChangeColorMode": "standard",
"reduceOptions": { "reduceOptions": {
"calcs": [ "calcs": ["lastNotNull"],
"lastNotNull"
],
"fields": "", "fields": "",
"values": false "values": false
}, },
@@ -331,9 +325,7 @@
"orientation": "auto", "orientation": "auto",
"percentChangeColorMode": "standard", "percentChangeColorMode": "standard",
"reduceOptions": { "reduceOptions": {
"calcs": [ "calcs": ["lastNotNull"],
"lastNotNull"
],
"fields": "", "fields": "",
"values": false "values": false
}, },

View File

@@ -1,13 +1,13 @@
apiVersion: 1 apiVersion: 1
providers: providers:
- name: 'Clio dashboard' - name: "Clio dashboard"
# <int> Org id. Default to 1 # <int> Org id. Default to 1
orgId: 1 orgId: 1
# <string> name of the dashboard folder. # <string> name of the dashboard folder.
folder: '' folder: ""
# <string> folder UID. will be automatically generated if not specified # <string> folder UID. will be automatically generated if not specified
folderUid: '' folderUid: ""
# <string> provider type. Default to 'file' # <string> provider type. Default to 'file'
type: file type: file
# <bool> disable dashboard deletion # <bool> disable dashboard deletion

View File

@@ -3,6 +3,7 @@
## Prerequisites ## Prerequisites
- Access to a Cassandra cluster or ScyllaDB cluster. Can be local or remote. - Access to a Cassandra cluster or ScyllaDB cluster. Can be local or remote.
> [!IMPORTANT] > [!IMPORTANT]
> There are some key considerations when using **ScyllaDB**. By default, Scylla reserves all free RAM on a machine for itself. If you are running `rippled` or other services on the same machine, restrict its memory usage using the `--memory` argument. > There are some key considerations when using **ScyllaDB**. By default, Scylla reserves all free RAM on a machine for itself. If you are running `rippled` or other services on the same machine, restrict its memory usage using the `--memory` argument.
> >

View File

@@ -1,44 +1,60 @@
# Troubleshooting Guide # Troubleshooting Guide
This guide will help you troubleshoot common issues of Clio. This guide will help you troubleshoot common issues of Clio.
## Can't connect to DB ## Can't connect to DB
If you see the error log message `Could not connect to Cassandra: No hosts available`, this means that Clio can't connect to the database. Check the following: If you see the error log message `Could not connect to Cassandra: No hosts available`, this means that Clio can't connect to the database. Check the following:
- Make sure the database is running at the specified address and port. - Make sure the database is running at the specified address and port.
- Make sure the database is accessible from the machine where Clio is running. - Make sure the database is accessible from the machine where Clio is running.
You can use [cqlsh](https://pypi.org/project/cqlsh/) to check the connection to the database. You can use [cqlsh](https://pypi.org/project/cqlsh/) to check the connection to the database.
If you would like to run a local ScyllaDB, you can call: If you would like to run a local ScyllaDB, you can call:
```sh ```sh
docker run --rm -p 9042:9042 --name clio-scylla -d scylladb/scylla docker run --rm -p 9042:9042 --name clio-scylla -d scylladb/scylla
``` ```
## Check the server status of Clio ## Check the server status of Clio
To check if Clio is syncing with rippled: To check if Clio is syncing with rippled:
```sh ```sh
curl -v -d '{"method":"server_info", "params":[{}]}' 127.0.0.1:51233|python3 -m json.tool|grep seq curl -v -d '{"method":"server_info", "params":[{}]}' 127.0.0.1:51233|python3 -m json.tool|grep seq
``` ```
If Clio is syncing with rippled, the `seq` value will be increasing. If Clio is syncing with rippled, the `seq` value will be increasing.
## Clio fails to start ## Clio fails to start
If you see the error log message `Failed to fetch ETL state from...`, this means the configured rippled node is not reachable. Check the following: If you see the error log message `Failed to fetch ETL state from...`, this means the configured rippled node is not reachable. Check the following:
- Make sure the rippled node is running at the specified address and port. - Make sure the rippled node is running at the specified address and port.
- Make sure the rippled node is accessible from the machine where Clio is running. - Make sure the rippled node is accessible from the machine where Clio is running.
If you would like to run Clio without an avaliable rippled node, you can add below setting to Clio's configuration file: If you would like to run Clio without an avaliable rippled node, you can add below setting to Clio's configuration file:
``` ```
"allow_no_etl": true "allow_no_etl": true
``` ```
## Clio is not added to secure_gateway in rippled's config ## Clio is not added to secure_gateway in rippled's config
If you see the warning message `AsyncCallData is_unlimited is false.`, this means that Clio is not added to the `secure_gateway` of `port_grpc` session in the rippled configuration file. It will slow down the sync process. Please add Clio's IP to the `secure_gateway` in the rippled configuration file for both grpc and ws port. If you see the warning message `AsyncCallData is_unlimited is false.`, this means that Clio is not added to the `secure_gateway` of `port_grpc` session in the rippled configuration file. It will slow down the sync process. Please add Clio's IP to the `secure_gateway` in the rippled configuration file for both grpc and ws port.
## Clio is slow ## Clio is slow
To speed up the response time, Clio has a cache inside. However, cache can take time to warm up. If you see slow response time, you can firstly check if cache is still loading. To speed up the response time, Clio has a cache inside. However, cache can take time to warm up. If you see slow response time, you can firstly check if cache is still loading.
You can check the cache status by calling: You can check the cache status by calling:
```sh ```sh
curl -v -d '{"method":"server_info", "params":[{}]}' 127.0.0.1:51233|python3 -m json.tool|grep is_full curl -v -d '{"method":"server_info", "params":[{}]}' 127.0.0.1:51233|python3 -m json.tool|grep is_full
curl -v -d '{"method":"server_info", "params":[{}]}' 127.0.0.1:51233|python3 -m json.tool|grep is_enabled curl -v -d '{"method":"server_info", "params":[{}]}' 127.0.0.1:51233|python3 -m json.tool|grep is_enabled
``` ```
If `is_full` is false, it means the cache is still loading. Normally, the Clio can respond quicker if cache finishs loading. If `is_enabled` is false, it means the cache is disabled in the configuration file or there is data corruption in the database. If `is_full` is false, it means the cache is still loading. Normally, the Clio can respond quicker if cache finishs loading. If `is_enabled` is false, it means the cache is disabled in the configuration file or there is data corruption in the database.
## Receive error message `Too many requests` ## Receive error message `Too many requests`
If client sees the error message `Too many requests`, this means that the client is blocked by Clio's DosGuard protection. You may want to add the client's IP to the whitelist in the configuration file, Or update other your DosGuard settings. If client sees the error message `Too many requests`, this means that the client is blocked by Clio's DosGuard protection. You may want to add the client's IP to the whitelist in the configuration file, Or update other your DosGuard settings.

View File

@@ -6,7 +6,7 @@ To support additional database types, you can create new classes that implement
## Data Model ## Data Model
The data model used by Clio to read and write ledger data is different from what `rippled` uses. `rippled` uses a novel data structure named [*SHAMap*](https://github.com/ripple/rippled/blob/master/src/ripple/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record. The data model used by Clio to read and write ledger data is different from what `rippled` uses. `rippled` uses a novel data structure named [_SHAMap_](https://github.com/ripple/rippled/blob/master/src/ripple/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record.
`rippled` nodes can also generate a proof-tree by forming a subtree with all the path nodes and their neighbors, which can then be used to prove the existence of the leaf node data to other `rippled` nodes. In short, the main purpose of the SHAMap data structure is to facilitate the fast validation of data integrity between different decentralized `rippled` nodes. `rippled` nodes can also generate a proof-tree by forming a subtree with all the path nodes and their neighbors, which can then be used to prove the existence of the leaf node data to other `rippled` nodes. In short, the main purpose of the SHAMap data structure is to facilitate the fast validation of data integrity between different decentralized `rippled` nodes.
@@ -170,7 +170,7 @@ CREATE TABLE clio.successor (
This table is the important backbone of how histories of ledger objects are stored in Cassandra. The `successor` table stores the object index of all ledger objects that were validated on the XRP network along with the ledger sequence that the object was updated on. This table is the important backbone of how histories of ledger objects are stored in Cassandra. The `successor` table stores the object index of all ledger objects that were validated on the XRP network along with the ledger sequence that the object was updated on.
As each key is ordered by the sequence, which is achieved by tracing through the table with a specific sequence number, Clio can recreate a Linked List data structure that represents all the existing ledger objects at that ledger sequence. The special values of `0x00...00` and `0xFF...FF` are used to label the *head* and *tail* of the Linked List in the successor table. As each key is ordered by the sequence, which is achieved by tracing through the table with a specific sequence number, Clio can recreate a Linked List data structure that represents all the existing ledger objects at that ledger sequence. The special values of `0x00...00` and `0xFF...FF` are used to label the _head_ and _tail_ of the Linked List in the successor table.
The diagram below showcases how tracing through the same table, but with different sequence parameter filtering, can result in different Linked List data representing the corresponding past state of the ledger objects. A query like `SELECT * FROM successor WHERE key = ? AND seq <= n ORDER BY seq DESC LIMIT 1;` can effectively trace through the successor table and get the Linked List of a specific sequence `n`. The diagram below showcases how tracing through the same table, but with different sequence parameter filtering, can result in different Linked List data representing the corresponding past state of the ledger objects. A query like `SELECT * FROM successor WHERE key = ? AND seq <= n ORDER BY seq DESC LIMIT 1;` can effectively trace through the successor table and get the Linked List of a specific sequence `n`.

View File

@@ -23,8 +23,8 @@ For example, if segment **0x08581464C55B0B2C8C4FA27FA8DE0ED695D3BE019E7BE0969C92
Because of the nature of the Linked List, the cursors are crucial to balancing the workload of each coroutine. There are 3 types of cursor generation that can be used: Because of the nature of the Linked List, the cursors are crucial to balancing the workload of each coroutine. There are 3 types of cursor generation that can be used:
- **cache.num_diffs**: Cursors will be generated by the changed objects in the latest `cache.num_diffs` number of ledgers. The default value is 32. In *mainnet*, this type works well because the network is fairly busy and the number of changed objects in each ledger is relatively stable. Thus, we are able to get enough cursors after removing the deleted objects on *mainnet*. - **cache.num_diffs**: Cursors will be generated by the changed objects in the latest `cache.num_diffs` number of ledgers. The default value is 32. In _mainnet_, this type works well because the network is fairly busy and the number of changed objects in each ledger is relatively stable. Thus, we are able to get enough cursors after removing the deleted objects on _mainnet_.
For other networks, like the *devnet*, the number of changed objects in each ledger is not stable. When the network is silent, one coroutine may load a large number of objects while the other coroutines are idle. Below is a comparison of the number of cursors and loading time on *devnet*: For other networks, like the _devnet_, the number of changed objects in each ledger is not stable. When the network is silent, one coroutine may load a large number of objects while the other coroutines are idle. Below is a comparison of the number of cursors and loading time on _devnet_:
| Cursors | Loading time /seconds | | Cursors | Loading time /seconds |
| ------- | --------------------- | | ------- | --------------------- |

View File

@@ -1,35 +1,26 @@
# Clio Migration # Clio Migration
Clio maintains the off-chain data of XRPL and multiple indexes tables to powering complex queries. To simplify the creation of index tables, this migration framework handles the process of database change and facilitates the migration of historical data seamlessly. Clio maintains the off-chain data of XRPL and multiple indexes tables to powering complex queries. To simplify the creation of index tables, this migration framework handles the process of database change and facilitates the migration of historical data seamlessly.
## Command Line Usage ## Command Line Usage
Clio provides a migration command-line tool to migrate data in database. Clio provides a migration command-line tool to migrate data in database.
> Note: We need a **configuration file** to run the migration tool. This configuration file has the same format as the configuration file of the Clio server, ensuring consistency and ease of use. It reads the database configuration from the same session as the server's configuration, eliminating the need for separate setup or additional configuration files. Be aware that migration-specific configuration is under `.migration` session. > Note: We need a **configuration file** to run the migration tool. This configuration file has the same format as the configuration file of the Clio server, ensuring consistency and ease of use. It reads the database configuration from the same session as the server's configuration, eliminating the need for separate setup or additional configuration files. Be aware that migration-specific configuration is under `.migration` session.
### To query migration status: ### To query migration status:
./clio_server --migrate status ~/config/migrator.json ./clio_server --migrate status ~/config/migrator.json
This command returns the current migration status of each migrator. The example output: This command returns the current migration status of each migrator. The example output:
Current Migration Status: Current Migration Status:
Migrator: ExampleMigrator - Feature v1, Clio v3 - not migrated Migrator: ExampleMigrator - Feature v1, Clio v3 - not migrated
### To start a migration: ### To start a migration:
./clio_server --migrate ExampleMigrator ~/config/migrator.json ./clio_server --migrate ExampleMigrator ~/config/migrator.json
Migration will run if the migrator has not been migrated. The migrator will be marked as migrated after the migration is completed. Migration will run if the migrator has not been migrated. The migrator will be marked as migrated after the migration is completed.
## How to write a migrator ## How to write a migrator
@@ -54,8 +45,8 @@ It contains:
- Register your migrator in MigrationManager. Currently we only support Cassandra/ScyllaDB. Migrator needs to be registered in `CassandraSupportedMigrators`. - Register your migrator in MigrationManager. Currently we only support Cassandra/ScyllaDB. Migrator needs to be registered in `CassandraSupportedMigrators`.
## How to use full table scanner (Only for Cassandra/ScyllaDB) ## How to use full table scanner (Only for Cassandra/ScyllaDB)
Sometimes migrator isn't able to query the historical data by table's partition key. For example, migrator of transactions needs the historical transaction data without knowing each transaction hash. Full table scanner can help to get all the rows in parallel. Sometimes migrator isn't able to query the historical data by table's partition key. For example, migrator of transactions needs the historical transaction data without knowing each transaction hash. Full table scanner can help to get all the rows in parallel.
Most indexes are based on either ledger states or transactions. We provide the `objects` and `transactions` scanner. Developers only need to implement the callback function to receive the historical data. Please find the examples in `tests/integration/migration/cassandra/ExampleTransactionsMigrator.cpp` and `tests/integration/migration/cassandra/ExampleObjectsMigrator.cpp`. Most indexes are based on either ledger states or transactions. We provide the `objects` and `transactions` scanner. Developers only need to implement the callback function to receive the historical data. Please find the examples in `tests/integration/migration/cassandra/ExampleTransactionsMigrator.cpp` and `tests/integration/migration/cassandra/ExampleObjectsMigrator.cpp`.
@@ -65,7 +56,9 @@ Most indexes are based on either ledger states or transactions. We provide the `
## How to write a full table scan adapter (Only for Cassandra/ScyllaDB) ## How to write a full table scan adapter (Only for Cassandra/ScyllaDB)
If you need to do full scan against other table, you can follow below steps: If you need to do full scan against other table, you can follow below steps:
- Describe the table which needs full scan in a struct. It has to satisfy the `TableSpec`(cassandra/Spec.hpp) concept, containing static member: - Describe the table which needs full scan in a struct. It has to satisfy the `TableSpec`(cassandra/Spec.hpp) concept, containing static member:
- Tuple type `Row`, it's the type of each field in a row. The order of types should match what database will return in a row. Key types should come first, followed by other field types sorted in alphabetical order. - Tuple type `Row`, it's the type of each field in a row. The order of types should match what database will return in a row. Key types should come first, followed by other field types sorted in alphabetical order.
- `kPARTITION_KEY`, it's the name of the partition key of the table. - `kPARTITION_KEY`, it's the name of the partition key of the table.
- `kTABLE_NAME` - `kTABLE_NAME`
@@ -73,7 +66,6 @@ If you need to do full scan against other table, you can follow below steps:
- Inherent from `FullTableScannerAdapterBase`. - Inherent from `FullTableScannerAdapterBase`.
- Implement `onRowRead`, its parameter is the `Row` we defined. It's the callback function when a row is read. - Implement `onRowRead`, its parameter is the `Row` we defined. It's the callback function when a row is read.
Please take ObjectsAdapter/TransactionsAdapter as example. Please take ObjectsAdapter/TransactionsAdapter as example.
## Examples: ## Examples:
@@ -83,12 +75,15 @@ We have some example migrators under `tests/integration/migration/cassandra` fol
- ExampleDropTableMigrator - ExampleDropTableMigrator
This migrator drops `diff` table. This migrator drops `diff` table.
- ExampleLedgerMigrator - ExampleLedgerMigrator
This migrator shows how to migrate data when we don't need to do full table scan. This migrator creates an index table `ledger_example` which maintains the map of ledger sequence and its account hash. This migrator shows how to migrate data when we don't need to do full table scan. This migrator creates an index table `ledger_example` which maintains the map of ledger sequence and its account hash.
- ExampleObjectsMigrator - ExampleObjectsMigrator
This migrator shows how to migrate ledger states related data. It uses `ObjectsScanner` to proceed the full scan in parallel. It counts the number of ACCOUNT_ROOT. This migrator shows how to migrate ledger states related data. It uses `ObjectsScanner` to proceed the full scan in parallel. It counts the number of ACCOUNT_ROOT.
- ExampleTransactionsMigrator - ExampleTransactionsMigrator
This migrator shows how to migrate transactions related data. It uses `TransactionsScanner` to proceed the `transactions` table full scan in parallel. It creates an index table `tx_index_example` which tracks the transaction hash and its according transaction type. This migrator shows how to migrate transactions related data. It uses `TransactionsScanner` to proceed the `transactions` table full scan in parallel. It creates an index table `tx_index_example` which tracks the transaction hash and its according transaction type.

View File

@@ -7,6 +7,7 @@ Clio uses threads intensively. Multiple parts of Clio were/are implemented by ru
On the other hand, Clio also uses `Boost.Asio` for more complex tasks such as networking, scheduling RPC handlers, and even interacting with the database is done via Asios coroutines. On the other hand, Clio also uses `Boost.Asio` for more complex tasks such as networking, scheduling RPC handlers, and even interacting with the database is done via Asios coroutines.
There was a need for a simple yet powerful framework that will cover the following in a unified way: There was a need for a simple yet powerful framework that will cover the following in a unified way:
- Exception/error handling and propagation - Exception/error handling and propagation
- Ability to return a value of any type as a result of a successful operation - Ability to return a value of any type as a result of a successful operation
- Cancellation (cooperative) of inflight operations - Cancellation (cooperative) of inflight operations
@@ -28,6 +29,7 @@ At the core of async framework are the execution contexts. Each execution contex
There are multiple execution contexts to choose from, each with their own pros and cons. There are multiple execution contexts to choose from, each with their own pros and cons.
#### CoroExecutionContext #### CoroExecutionContext
This context wraps a thread pool and executes blocks of code by means of `boost::asio::spawn` which spawns coroutines. This context wraps a thread pool and executes blocks of code by means of `boost::asio::spawn` which spawns coroutines.
Deep inside the framework it hides `boost::asio::yield_context` and automatically switches coroutine contexts everytime users code is checking `isStopRequested()` on the `StopToken` given to the user-provided lambda. Deep inside the framework it hides `boost::asio::yield_context` and automatically switches coroutine contexts everytime users code is checking `isStopRequested()` on the `StopToken` given to the user-provided lambda.
@@ -37,71 +39,89 @@ The benefit is that both timers and async operations can work concurrently on a
Users of this execution context should take care to split their work in reasonably sized batches to avoid incurring a performance penalty caused by switching coroutine contexts too often. However if the batches are too time consuming it may lead to slower cooperative cancellation. Users of this execution context should take care to split their work in reasonably sized batches to avoid incurring a performance penalty caused by switching coroutine contexts too often. However if the batches are too time consuming it may lead to slower cooperative cancellation.
#### PoolExecutionContext #### PoolExecutionContext
This context wraps a thread pool but executes blocks of code without using coroutines. This context wraps a thread pool but executes blocks of code without using coroutines.
Note: A downside of this execution context is that if there is only 1 thread in the thread pool, timers can not execute while the thread is busy executing user-provided code. It's up to the user of this execution context to decide how to deal with this and whether it's important for their use case. Note: A downside of this execution context is that if there is only 1 thread in the thread pool, timers can not execute while the thread is busy executing user-provided code. It's up to the user of this execution context to decide how to deal with this and whether it's important for their use case.
#### SyncExecutionContext #### SyncExecutionContext
This is a fully synchronous execution context. It runs the scheduled operations right on the caller thread. By the time `execute([]{ … })` returns the Operation its guaranteed to be ready (i.e. value or error can be immediately queried with `.get()`). This is a fully synchronous execution context. It runs the scheduled operations right on the caller thread. By the time `execute([]{ … })` returns the Operation its guaranteed to be ready (i.e. value or error can be immediately queried with `.get()`).
In order to support scheduled operations and timeout-based cancellation, this context schedules all timers on the SystemExecutionContext instead. In order to support scheduled operations and timeout-based cancellation, this context schedules all timers on the SystemExecutionContext instead.
#### SystemExecutionContext #### SystemExecutionContext
This context of 1 thread is always readily available system-wide and can be used for This context of 1 thread is always readily available system-wide and can be used for
- fire and forget operations where it makes no sense to create an entirely new context for them - fire and forget operations where it makes no sense to create an entirely new context for them
- as an external context for scheduling timers (used by SyncExecutionContext automatically) - as an external context for scheduling timers (used by SyncExecutionContext automatically)
### Strand ### Strand
Any execution context provides a convenient `makeStrand` member function which will return a strand object for the execution context. Any execution context provides a convenient `makeStrand` member function which will return a strand object for the execution context.
The strand can then be used with the same set of APIs that the execution context provides with the difference being that everything that is executed through a strand is guaranteed to be serially executed within the strand. This is a way to avoid the need for using a mutex or other explic synchronization mechanisms. The strand can then be used with the same set of APIs that the execution context provides with the difference being that everything that is executed through a strand is guaranteed to be serially executed within the strand. This is a way to avoid the need for using a mutex or other explic synchronization mechanisms.
### Outcome ### Outcome
An outcome is like a `std::promise` to the operations that execute on the execution context. An outcome is like a `std::promise` to the operations that execute on the execution context.
The framework will hold onto the outcome object internally and the user of the framework will only receive an operation object that is like the `std::future` to the outcome. The framework will hold onto the outcome object internally and the user of the framework will only receive an operation object that is like the `std::future` to the outcome.
The framework will set the final value or error through the outcome object so that the user can receive it on the operation side as a `std::expected`. The framework will set the final value or error through the outcome object so that the user can receive it on the operation side as a `std::expected`.
### Operation ### Operation
There are several different operation types available. The one used will depend on the signature of the executable lambda passed by the user of this framework. There are several different operation types available. The one used will depend on the signature of the executable lambda passed by the user of this framework.
#### Stoppable and non-stoppable operations #### Stoppable and non-stoppable operations
Stoppable operations can be cooperatively stopped via a stop token that is passed to the user-provided function/lambda. A stoppable operation is returned to the user if they specify a stop token as the first argument of the function/lambda for execution. Stoppable operations can be cooperatively stopped via a stop token that is passed to the user-provided function/lambda. A stoppable operation is returned to the user if they specify a stop token as the first argument of the function/lambda for execution.
Regular, non-stoppable operations, can not be stopped. A non-stoppable operation is returned to the user if they did not request a stop token as the first argument of the function/lambda for execution. Regular, non-stoppable operations, can not be stopped. A non-stoppable operation is returned to the user if they did not request a stop token as the first argument of the function/lambda for execution.
#### Scheduled operations #### Scheduled operations
Scheduled operations are wrappers on top of Stoppable and regular Operations and provide the functionality of a timer that needs to run out before the given block of code will finally be executed on the Execution Context. Scheduled operations are wrappers on top of Stoppable and regular Operations and provide the functionality of a timer that needs to run out before the given block of code will finally be executed on the Execution Context.
Scheduled operations can be aborted by calling Scheduled operations can be aborted by calling
- `cancel` - will only cancel the timer. If the timer already fired this will have no effect - `cancel` - will only cancel the timer. If the timer already fired this will have no effect
- `requestStop` - will stop the operation if it's already running or as soon as the timer runs out - `requestStop` - will stop the operation if it's already running or as soon as the timer runs out
- `abort` - will call `cancel` immediatelly followed by `requestStop` - `abort` - will call `cancel` immediatelly followed by `requestStop`
### Error handling ### Error handling
By default, exceptions that happen during the execution of user-provided code are caught and returned in the error channel of `std::expected` as an instance of the `ExecutionError` struct. The user can then extract the error message by calling `what()` or directly accessing the `message` member. By default, exceptions that happen during the execution of user-provided code are caught and returned in the error channel of `std::expected` as an instance of the `ExecutionError` struct. The user can then extract the error message by calling `what()` or directly accessing the `message` member.
### Returned value ### Returned value
If the user-provided lambda returns anything but `void`, the type and value will propagate through the operation object and can be received by calling `get` which will block until a value or an error is available. If the user-provided lambda returns anything but `void`, the type and value will propagate through the operation object and can be received by calling `get` which will block until a value or an error is available.
The `wait` member function can be used when the user just wants to wait for the value to become available but not necessarily getting at the value just yet. The `wait` member function can be used when the user just wants to wait for the value to become available but not necessarily getting at the value just yet.
### Type erasure ### Type erasure
On top of the templated execution contexts, outcomes, operations, strands and stop tokens this framework provides the type-erased wrappers with (mostly) the same interface. On top of the templated execution contexts, outcomes, operations, strands and stop tokens this framework provides the type-erased wrappers with (mostly) the same interface.
#### AnyExecutionContext #### AnyExecutionContext
This provides the same interface as any other execution context in this framework. This provides the same interface as any other execution context in this framework.
Note: the original context is taken in by reference. Note: the original context is taken in by reference.
See examples of use below. See examples of use below.
#### AnyOperation<T> #### AnyOperation<T>
Wraps any type of operations including regular, stoppable and scheduled. Wraps any type of operations including regular, stoppable and scheduled.
Since this wrapper does not know which operation type it's wrapping it only provides an `abort` member function that will call the correct underlying functions depending on the real type of the operation. If `abort` is called on a regular (non-stoppable and not scheduled) operation, the call will result in an assertion failure. Since this wrapper does not know which operation type it's wrapping it only provides an `abort` member function that will call the correct underlying functions depending on the real type of the operation. If `abort` is called on a regular (non-stoppable and not scheduled) operation, the call will result in an assertion failure.
## Examples ## Examples
This section provides some examples. For more examples take a look at `ExecutionContextBenchmarks`, `AsyncExecutionContextTests` and `AnyExecutionContextTests`. This section provides some examples. For more examples take a look at `ExecutionContextBenchmarks`, `AsyncExecutionContextTests` and `AnyExecutionContextTests`.
### Regular operation ### Regular operation
#### Awaiting and reading values #### Awaiting and reading values
```cpp ```cpp
auto res = ctx.execute([]() { return 42; }); auto res = ctx.execute([]() { return 42; });
EXPECT_EQ(res.get().value(), 42); EXPECT_EQ(res.get().value(), 42);
@@ -114,8 +134,11 @@ ASSERT_EQ(value, 42);
``` ```
### Stoppable operation ### Stoppable operation
#### Requesting stoppage #### Requesting stoppage
The stop token can be used via the `isStopRequested()` member function: The stop token can be used via the `isStopRequested()` member function:
```cpp ```cpp
auto res = ctx.execute([](auto stopToken) { auto res = ctx.execute([](auto stopToken) {
while (not stopToken.isStopRequested()) while (not stopToken.isStopRequested())
@@ -128,6 +151,7 @@ res.requestStop();
``` ```
Alternatively, the stop token is implicity convertible to `bool` so you can also use it like so: Alternatively, the stop token is implicity convertible to `bool` so you can also use it like so:
```cpp ```cpp
auto res = ctx.execute([](auto stopRequested) { auto res = ctx.execute([](auto stopRequested) {
while (not stopRequested) while (not stopRequested)
@@ -140,7 +164,9 @@ res.requestStop();
``` ```
#### Automatic stoppage on timeout #### Automatic stoppage on timeout
By adding an optional timeout as the last arg to `execute` you can have the framework automatically call `requestStop()`: By adding an optional timeout as the last arg to `execute` you can have the framework automatically call `requestStop()`:
```cpp ```cpp
auto res = ctx.execute([](auto stopRequested) { auto res = ctx.execute([](auto stopRequested) {
while (not stopRequested) while (not stopRequested)
@@ -153,7 +179,9 @@ auto res = ctx.execute([](auto stopRequested) {
``` ```
### Scheduled operation ### Scheduled operation
#### Cancelling an outstanding operation #### Cancelling an outstanding operation
```cpp ```cpp
auto res = ctx.scheduleAfter( auto res = ctx.scheduleAfter(
10ms, []([[maybe_unused]] auto stopRequested, auto cancelled) { 10ms, []([[maybe_unused]] auto stopRequested, auto cancelled) {
@@ -166,6 +194,7 @@ res.cancel(); // or .abort()
``` ```
#### Get value after stopping #### Get value after stopping
```cpp ```cpp
auto res = ctx.scheduleAfter(1ms, [](auto stopRequested) { auto res = ctx.scheduleAfter(1ms, [](auto stopRequested) {
while (not stopRequested) while (not stopRequested)
@@ -178,6 +207,7 @@ res.requestStop();
``` ```
#### Handling an exception #### Handling an exception
```cpp ```cpp
auto res = auto res =
ctx.scheduleAfter(1s, []([[maybe_unused]] auto stopRequested, auto cancelled) { ctx.scheduleAfter(1s, []([[maybe_unused]] auto stopRequested, auto cancelled) {
@@ -192,9 +222,11 @@ EXPECT_TRUE(std::string{err}.ends_with("test"));
``` ```
### Strand ### Strand
The APIs are basically the same as with the parent `ExecutionContext`. The APIs are basically the same as with the parent `ExecutionContext`.
#### Computing a value on a strand #### Computing a value on a strand
```cpp ```cpp
auto strand = ctx.makeStrand(); auto strand = ctx.makeStrand();
auto res = strand.execute([] { return 42; }); auto res = strand.execute([] { return 42; });
@@ -203,7 +235,9 @@ EXPECT_EQ(res.get().value(), 42);
``` ```
### Type erasure ### Type erasure
#### Simple use #### Simple use
```cpp ```cpp
auto ctx = CoroExecutionContext{4}; auto ctx = CoroExecutionContext{4};
auto anyCtx = AnyExecutionContext{ctx}; auto anyCtx = AnyExecutionContext{ctx};
@@ -215,6 +249,7 @@ auto op = anyCtx.execute([](auto stopToken) {
``` ```
#### Aborting the operation #### Aborting the operation
Erased operations only expose the `abort` member function that can be used to both cancel an outstanding and/or stop a running operation. Erased operations only expose the `abort` member function that can be used to both cancel an outstanding and/or stop a running operation.
```cpp ```cpp

View File

@@ -3,8 +3,11 @@
Tests that hit the real database are separate from the unit test suite found under `tests/unit`. Tests that hit the real database are separate from the unit test suite found under `tests/unit`.
## Requirements ## Requirements
### Cassandra/ScyllaDB cluster ### Cassandra/ScyllaDB cluster
If you wish to test the backend component you will need to have access to a **local (127.0.0.1)** Cassandra cluster, opened at port **9042**. Please ensure that the cluster is successfully running before running these tests. If you wish to test the backend component you will need to have access to a **local (127.0.0.1)** Cassandra cluster, opened at port **9042**. Please ensure that the cluster is successfully running before running these tests.
## Running ## Running
To run the DB tests, first build Clio as normal, then execute `./clio_integration_tests` to run all database tests. To run the DB tests, first build Clio as normal, then execute `./clio_integration_tests` to run all database tests.

View File

@@ -3,7 +3,9 @@
The correctness of new implementations can be verified via running unit tests. Below are the information on how to run unit tests. The correctness of new implementations can be verified via running unit tests. Below are the information on how to run unit tests.
## Running ## Running
To run the unit tests, first build Clio as normal, then execute `./clio_tests` to run all unit tests. To run the unit tests, first build Clio as normal, then execute `./clio_tests` to run all unit tests.
# Adding Unit Tests # Adding Unit Tests
To add unit tests, please create a separate file for the component you are trying to cover (unless it already exists) and use any other existing unit test file as an example. To add unit tests, please create a separate file for the component you are trying to cover (unless it already exists) and use any other existing unit test file as an example.