Compare commits

...

55 Commits

Author SHA1 Message Date
Ayaz Salikhov
2b8a7b95e4 chore: Remove explicit accountNotFound message (#2978)
I searched rippled code, and there is no `accountNotFound` anywhere.
So, we should probably stick to the default, it looks reasonable
2026-03-09 17:17:09 +00:00
Sergey Kuznetsov
fbdd6d6105 feat: Limit cache loading in cluster (#2985)
This PR adds an option to limit simultaneous cache loading in a cluster
to one node at a time.
Fixes #2707
2026-03-09 17:11:20 +00:00
Ayaz Salikhov
53d9617b26 chore: Update XRPLF/actions (#2996) 2026-03-09 16:28:09 +00:00
Ayaz Salikhov
80743406ba chore: Use check-pr-title from XRPLF/actions (#2984) 2026-03-09 15:21:16 +00:00
dependabot[bot]
f82196d9a2 ci: [DEPENDABOT] Bump docker/setup-qemu-action from 3.7.0 to 4.0.0 in /.github/actions/build-docker-image (#2991)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-03-09 13:02:12 +00:00
Ayaz Salikhov
9385b77483 chore: Remove cmake-format mentions (#2986) 2026-03-09 11:34:17 +00:00
dependabot[bot]
88e8118cce ci: [DEPENDABOT] Bump docker/setup-buildx-action from 3.12.0 to 4.0.0 in /.github/actions/build-docker-image (#2995) 2026-03-09 09:27:56 +00:00
dependabot[bot]
7f34bebad1 ci: [DEPENDABOT] Bump docker/build-push-action from 6.18.0 to 7.0.0 in /.github/actions/build-docker-image (#2994) 2026-03-09 09:27:18 +00:00
dependabot[bot]
675046f9f2 ci: [DEPENDABOT] Bump docker/login-action from 3.7.0 to 4.0.0 in /.github/actions/build-docker-image (#2993) 2026-03-09 09:26:59 +00:00
dependabot[bot]
b3c70a2f35 ci: [DEPENDABOT] Bump docker/metadata-action from 5.10.0 to 6.0.0 in /.github/actions/build-docker-image (#2992) 2026-03-09 09:26:42 +00:00
dependabot[bot]
1defdd7312 ci: [DEPENDABOT] Bump tj-actions/changed-files from 47.0.1 to 47.0.5 (#2990) 2026-03-09 09:26:00 +00:00
dependabot[bot]
45cc3aa57b ci: [DEPENDABOT] Bump crazy-max/ghaction-import-gpg from 6.3.0 to 7.0.0 (#2989) 2026-03-09 09:25:33 +00:00
dependabot[bot]
ef0c3847fd ci: [DEPENDABOT] Bump docker/login-action from 3.7.0 to 4.0.0 (#2988) 2026-03-09 09:24:41 +00:00
dependabot[bot]
b571e11f63 ci: [DEPENDABOT] Bump docker/setup-buildx-action from 3.12.0 to 4.0.0 (#2987) 2026-03-09 09:24:13 +00:00
Sergey Kuznetsov
05e52ee7a4 fix: Start without cache file (#2976)
#2830 introduced a bug that clio couldn't start without having a cache
file. This PR fixes the problem.
2026-03-06 15:59:27 +00:00
Ayaz Salikhov
3737459d09 style: Apply custom cmake definitions (#2983) 2026-03-06 14:28:13 +00:00
Ayaz Salikhov
43e8c8cddb style: Unify style for 'not expectedLgrInfo.has_value()' (#2977) 2026-03-06 12:27:54 +00:00
Ayaz Salikhov
9cbc99651c style: Unify style for 'not expectedNext.has_value()' (#2979) 2026-03-06 12:21:53 +00:00
Ayaz Salikhov
967590e639 style: Use gersemi instead of ancient cmake-format (#2980) 2026-03-06 12:21:01 +00:00
github-actions[bot]
395e87b9c5 style: clang-tidy auto fixes (#2982) 2026-03-06 10:10:16 +00:00
Ayaz Salikhov
6d56ed5ccf chore: Move sharedPtrBackend (#2974) 2026-03-05 19:01:57 +00:00
github-actions[bot]
a4d6caf8a7 style: Update pre-commit hooks (#2968)
Co-authored-by: mathbunnyru <12270691+mathbunnyru@users.noreply.github.com>
2026-03-03 15:58:30 +00:00
dependabot[bot]
b9ee5187b6 ci: [DEPENDABOT] Bump actions/upload-artifact from 6.0.0 to 7.0.0 (#2969)
Bumps
[actions/upload-artifact](https://github.com/actions/upload-artifact)
from 6.0.0 to 7.0.0.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a
href="https://github.com/actions/upload-artifact/releases">actions/upload-artifact's
releases</a>.</em></p>
<blockquote>
<h2>v7.0.0</h2>
<h2>v7 What's new</h2>
<h3>Direct Uploads</h3>
<p>Adds support for uploading single files directly (unzipped). Callers
can set the new <code>archive</code> parameter to <code>false</code> to
skip zipping the file during upload. Right now, we only support single
files. The action will fail if the glob passed resolves to multiple
files. The <code>name</code> parameter is also ignored with this
setting. Instead, the name of the artifact will be the name of the
uploaded file.</p>
<h3>ESM</h3>
<p>To support new versions of the <code>@actions/*</code> packages,
we've upgraded the package to ESM.</p>
<h2>What's Changed</h2>
<ul>
<li>Add proxy integration test by <a
href="https://github.com/Link"><code>@​Link</code></a>- in <a
href="https://redirect.github.com/actions/upload-artifact/pull/754">actions/upload-artifact#754</a></li>
<li>Upgrade the module to ESM and bump dependencies by <a
href="https://github.com/danwkennedy"><code>@​danwkennedy</code></a> in
<a
href="https://redirect.github.com/actions/upload-artifact/pull/762">actions/upload-artifact#762</a></li>
<li>Support direct file uploads by <a
href="https://github.com/danwkennedy"><code>@​danwkennedy</code></a> in
<a
href="https://redirect.github.com/actions/upload-artifact/pull/764">actions/upload-artifact#764</a></li>
</ul>
<h2>New Contributors</h2>
<ul>
<li><a href="https://github.com/Link"><code>@​Link</code></a>- made
their first contribution in <a
href="https://redirect.github.com/actions/upload-artifact/pull/754">actions/upload-artifact#754</a></li>
</ul>
<p><strong>Full Changelog</strong>: <a
href="https://github.com/actions/upload-artifact/compare/v6...v7.0.0">https://github.com/actions/upload-artifact/compare/v6...v7.0.0</a></p>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a
href="bbbca2ddaa"><code>bbbca2d</code></a>
Support direct file uploads (<a
href="https://redirect.github.com/actions/upload-artifact/issues/764">#764</a>)</li>
<li><a
href="589182c5a4"><code>589182c</code></a>
Upgrade the module to ESM and bump dependencies (<a
href="https://redirect.github.com/actions/upload-artifact/issues/762">#762</a>)</li>
<li><a
href="47309c993a"><code>47309c9</code></a>
Merge pull request <a
href="https://redirect.github.com/actions/upload-artifact/issues/754">#754</a>
from actions/Link-/add-proxy-integration-tests</li>
<li><a
href="02a8460834"><code>02a8460</code></a>
Add proxy integration test</li>
<li>See full diff in <a
href="b7c566a772...bbbca2ddaa">compare
view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility
score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/upload-artifact&package-manager=github_actions&previous-version=6.0.0&new-version=7.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)

Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot show <dependency name> ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)


</details>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-03-02 12:55:05 +01:00
dependabot[bot]
e43b678872 ci: [DEPENDABOT] Bump actions/download-artifact from 7.0.0 to 8.0.0 (#2970)
Bumps
[actions/download-artifact](https://github.com/actions/download-artifact)
from 7.0.0 to 8.0.0.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a
href="https://github.com/actions/download-artifact/releases">actions/download-artifact's
releases</a>.</em></p>
<blockquote>
<h2>v8.0.0</h2>
<h2>v8 - What's new</h2>
<h3>Direct downloads</h3>
<p>To support direct uploads in <code>actions/upload-artifact</code>,
the action will no longer attempt to unzip all downloaded files.
Instead, the action checks the <code>Content-Type</code> header ahead of
unzipping and skips non-zipped files. Callers wishing to download a
zipped file as-is can also set the new <code>skip-decompress</code>
parameter to <code>false</code>.</p>
<h3>Enforced checks (breaking)</h3>
<p>A previous release introduced digest checks on the download. If a
download hash didn't match the expected hash from the server, the action
would log a warning. Callers can now configure the behavior on mismatch
with the <code>digest-mismatch</code> parameter. To be secure by
default, we are now defaulting the behavior to <code>error</code> which
will fail the workflow run.</p>
<h3>ESM</h3>
<p>To support new versions of the @actions/* packages, we've upgraded
the package to ESM.</p>
<h2>What's Changed</h2>
<ul>
<li>Don't attempt to un-zip non-zipped downloads by <a
href="https://github.com/danwkennedy"><code>@​danwkennedy</code></a> in
<a
href="https://redirect.github.com/actions/download-artifact/pull/460">actions/download-artifact#460</a></li>
<li>Add a setting to specify what to do on hash mismatch and default it
to <code>error</code> by <a
href="https://github.com/danwkennedy"><code>@​danwkennedy</code></a> in
<a
href="https://redirect.github.com/actions/download-artifact/pull/461">actions/download-artifact#461</a></li>
</ul>
<p><strong>Full Changelog</strong>: <a
href="https://github.com/actions/download-artifact/compare/v7...v8.0.0">https://github.com/actions/download-artifact/compare/v7...v8.0.0</a></p>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a
href="70fc10c6e5"><code>70fc10c</code></a>
Merge pull request <a
href="https://redirect.github.com/actions/download-artifact/issues/461">#461</a>
from actions/danwkennedy/digest-mismatch-behavior</li>
<li><a
href="f258da9a50"><code>f258da9</code></a>
Add change docs</li>
<li><a
href="ccc058e5fb"><code>ccc058e</code></a>
Fix linting issues</li>
<li><a
href="bd7976ba57"><code>bd7976b</code></a>
Add a setting to specify what to do on hash mismatch and default it to
<code>error</code></li>
<li><a
href="ac21fcf45e"><code>ac21fcf</code></a>
Merge pull request <a
href="https://redirect.github.com/actions/download-artifact/issues/460">#460</a>
from actions/danwkennedy/download-no-unzip</li>
<li><a
href="15999bff51"><code>15999bf</code></a>
Add note about package bumps</li>
<li><a
href="974686ed50"><code>974686e</code></a>
Bump the version to <code>v8</code> and add release notes</li>
<li><a
href="fbe48b1d27"><code>fbe48b1</code></a>
Update test names to make it clearer what they do</li>
<li><a
href="96bf374a61"><code>96bf374</code></a>
One more test fix</li>
<li><a
href="b8c4819ef5"><code>b8c4819</code></a>
Fix skip decompress test</li>
<li>Additional commits viewable in <a
href="37930b1c2a...70fc10c6e5">compare
view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility
score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/download-artifact&package-manager=github_actions&previous-version=7.0.0&new-version=8.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)

Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot show <dependency name> ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)


</details>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-03-02 12:54:56 +01:00
dependabot[bot]
ef0a765e07 ci: [DEPENDABOT] Bump actions/upload-artifact from 6.0.0 to 7.0.0 in /.github/actions/code-coverage (#2971)
Bumps
[actions/upload-artifact](https://github.com/actions/upload-artifact)
from 6.0.0 to 7.0.0.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a
href="https://github.com/actions/upload-artifact/releases">actions/upload-artifact's
releases</a>.</em></p>
<blockquote>
<h2>v7.0.0</h2>
<h2>v7 What's new</h2>
<h3>Direct Uploads</h3>
<p>Adds support for uploading single files directly (unzipped). Callers
can set the new <code>archive</code> parameter to <code>false</code> to
skip zipping the file during upload. Right now, we only support single
files. The action will fail if the glob passed resolves to multiple
files. The <code>name</code> parameter is also ignored with this
setting. Instead, the name of the artifact will be the name of the
uploaded file.</p>
<h3>ESM</h3>
<p>To support new versions of the <code>@actions/*</code> packages,
we've upgraded the package to ESM.</p>
<h2>What's Changed</h2>
<ul>
<li>Add proxy integration test by <a
href="https://github.com/Link"><code>@​Link</code></a>- in <a
href="https://redirect.github.com/actions/upload-artifact/pull/754">actions/upload-artifact#754</a></li>
<li>Upgrade the module to ESM and bump dependencies by <a
href="https://github.com/danwkennedy"><code>@​danwkennedy</code></a> in
<a
href="https://redirect.github.com/actions/upload-artifact/pull/762">actions/upload-artifact#762</a></li>
<li>Support direct file uploads by <a
href="https://github.com/danwkennedy"><code>@​danwkennedy</code></a> in
<a
href="https://redirect.github.com/actions/upload-artifact/pull/764">actions/upload-artifact#764</a></li>
</ul>
<h2>New Contributors</h2>
<ul>
<li><a href="https://github.com/Link"><code>@​Link</code></a>- made
their first contribution in <a
href="https://redirect.github.com/actions/upload-artifact/pull/754">actions/upload-artifact#754</a></li>
</ul>
<p><strong>Full Changelog</strong>: <a
href="https://github.com/actions/upload-artifact/compare/v6...v7.0.0">https://github.com/actions/upload-artifact/compare/v6...v7.0.0</a></p>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a
href="bbbca2ddaa"><code>bbbca2d</code></a>
Support direct file uploads (<a
href="https://redirect.github.com/actions/upload-artifact/issues/764">#764</a>)</li>
<li><a
href="589182c5a4"><code>589182c</code></a>
Upgrade the module to ESM and bump dependencies (<a
href="https://redirect.github.com/actions/upload-artifact/issues/762">#762</a>)</li>
<li><a
href="47309c993a"><code>47309c9</code></a>
Merge pull request <a
href="https://redirect.github.com/actions/upload-artifact/issues/754">#754</a>
from actions/Link-/add-proxy-integration-tests</li>
<li><a
href="02a8460834"><code>02a8460</code></a>
Add proxy integration test</li>
<li>See full diff in <a
href="b7c566a772...bbbca2ddaa">compare
view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility
score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/upload-artifact&package-manager=github_actions&previous-version=6.0.0&new-version=7.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)

Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot show <dependency name> ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)


</details>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-03-02 12:54:46 +01:00
Ayaz Salikhov
f9c89264da chore: Enable debug symbols (#2967) 2026-02-25 16:42:23 +00:00
Ayaz Salikhov
16030d1d81 chore: Update cleanup-workspace to delete old .conan2 dir on macOS (#2964) 2026-02-24 02:40:06 +00:00
Ayaz Salikhov
1220d632b5 style: Fix lint comments due to style changes (#2963) 2026-02-23 16:24:27 +00:00
Ayaz Salikhov
e9052bcd80 style: Remove readability-identifier-naming where not needed (#2962) 2026-02-23 14:43:19 +00:00
Ayaz Salikhov
c1f6a6eb31 chore: Fix compilation due to use of std::ranges::mismatch (#2960) 2026-02-23 14:32:07 +00:00
github-actions[bot]
af736717fc style: clang-tidy auto fixes (#2958)
Co-authored-by: godexsoft <385326+godexsoft@users.noreply.github.com>
2026-02-23 13:54:07 +00:00
Sergey Kuznetsov
2d6f82c27f feat: Metrics for requested ledger age (#2947)
Adding metrics to be able to analyse requested ledger age distribution.

---------

Co-authored-by: Claude Sonnet 4.5 <noreply@anthropic.com>
2026-02-20 18:01:15 +00:00
Ayaz Salikhov
6ba58f42f0 style: Set clang-format width 100 (#2953) 2026-02-20 15:56:03 +00:00
Ayaz Salikhov
480264ff8f perf: Use string_view in AccountInfo (#2951) 2026-02-18 17:11:31 +00:00
Ayaz Salikhov
584d2bb5f2 feat: Support Lending Protocol (#2945) 2026-02-17 22:54:24 +00:00
emrearıyürek
9d3dbce73b fix: Remove RpcEntryNotFound from ClioError (#2661)
Since rippled has its own error codes, all occurrences have been
replaced except for [PR2549](https://github.com/XRPLF/clio/pull/2549),
and the entry has also been removed entirely from Clio’s error enum.
This modification follows the feedback provided in this comment:
https://github.com/XRPLF/clio/pull/2549#discussion_r2394840688

Pending compatible libxrpl — temporarily on hold

---------

Co-authored-by: Ayaz Salikhov <mathbunnyru@users.noreply.github.com>
Co-authored-by: Alex Kremer <akremer@ripple.com>
2026-02-06 15:33:18 +00:00
Ayaz Salikhov
8b6f65f0b7 chore: Update hashes of XRPLF/actions (#2944) 2026-02-02 20:48:50 +00:00
dependabot[bot]
91aba853af ci: [DEPENDABOT] Bump docker/login-action from 3.6.0 to 3.7.0 in /.github/actions/build-docker-image (#2942)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-02-02 14:50:37 +00:00
dependabot[bot]
af03a2fbe4 ci: [DEPENDABOT] Bump actions/cache from 5.0.2 to 5.0.3 (#2941)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-02-02 14:50:28 +00:00
dependabot[bot]
61c47c8efd ci: [DEPENDABOT] Bump docker/login-action from 3.6.0 to 3.7.0 (#2940)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-02-02 14:50:14 +00:00
github-actions[bot]
8f36b1d5ca style: Update pre-commit hooks (#2939)
Co-authored-by: mathbunnyru <12270691+mathbunnyru@users.noreply.github.com>
2026-02-02 14:49:33 +00:00
Alex Kremer
9fd15eb08b chore: Enable TSAN without ignoring errors (#2828) 2026-01-30 19:14:36 +00:00
Ayaz Salikhov
cf77a10555 style: Adopt cmake-format from xrpld (#2938) 2026-01-29 16:44:04 +00:00
Sergey Kuznetsov
437168aa13 docs: Add section about faster cache loading (#2932)
Co-authored-by: Maria Shodunke <maria-robobug@users.noreply.github.com>
2026-01-26 13:30:58 +00:00
dependabot[bot]
8a7c6b0aa4 ci: [DEPENDABOT] Bump actions/checkout from 6.0.1 to 6.0.2 (#2933) 2026-01-26 08:33:39 +00:00
dependabot[bot]
93a344c3fa ci: [DEPENDABOT] Bump peter-evans/create-pull-request from 8.0.0 to 8.1.0 (#2934) 2026-01-26 08:33:08 +00:00
Sergey Kuznetsov
59d07fab64 fix: Flush buffers before renaming cache file (#2927)
If clio shuts itself down due to exceeding graceful period when cache is
saved and renamed but the buffers are not flushed, we may end up with a
corrupted cache file. Clio will detect corruption and will not load
corrupted cache file, but we could avoid it by explicitly flushing
ofstream buffer.
2026-01-22 11:35:00 +00:00
github-actions[bot]
3bb3e0b9f9 style: clang-tidy auto fixes (#2930)
Fixes #2929.
2026-01-22 11:34:04 +00:00
Sergey Kuznetsov
a72e5a180f feat: Choose writer by cluster communication (#2830)
Fixes #1974
2026-01-21 11:41:26 +00:00
dependabot[bot]
0ebbaaadef ci: [DEPENDABOT] Bump actions/cache from 5.0.1 to 5.0.2 (#2925)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-01-19 14:09:14 +00:00
Ayaz Salikhov
b983aea15d chore: Delete duplicate etlng tests (#2920) 2026-01-14 15:28:31 +00:00
Ayaz Salikhov
63e7f9a72b ci: Run colima delete on macOS (#2915) 2026-01-14 15:21:02 +00:00
Alex Kremer
eebee4d671 chore: Fix linker warning in benchmark (#2918) 2026-01-14 14:10:21 +00:00
Alex Kremer
a6d5f94470 chore: Add .zed to .gitignore (#2919) 2026-01-14 13:38:00 +00:00
github-actions[bot]
2b473c8613 style: clang-tidy auto fixes (#2917) 2026-01-14 09:50:38 +00:00
660 changed files with 21548 additions and 8895 deletions

View File

@@ -22,7 +22,7 @@ BreakBeforeBinaryOperators: false
BreakBeforeBraces: WebKit
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 120
ColumnLimit: 100
CommentPragmas: "^ IWYU pragma:"
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4

View File

@@ -1,245 +0,0 @@
_help_parse: Options affecting listfile parsing
parse:
_help_additional_commands:
- Specify structure for custom cmake functions
additional_commands:
foo:
flags:
- BAR
- BAZ
kwargs:
HEADERS: "*"
SOURCES: "*"
DEPENDS: "*"
_help_override_spec:
- Override configurations per-command where available
override_spec: {}
_help_vartags:
- Specify variable tags.
vartags: []
_help_proptags:
- Specify property tags.
proptags: []
_help_format: Options affecting formatting.
format:
_help_disable:
- Disable formatting entirely, making cmake-format a no-op
disable: false
_help_line_width:
- How wide to allow formatted cmake files
line_width: 120
_help_tab_size:
- How many spaces to tab for indent
tab_size: 2
_help_use_tabchars:
- If true, lines are indented using tab characters (utf-8
- 0x09) instead of <tab_size> space characters (utf-8 0x20).
- In cases where the layout would require a fractional tab
- character, the behavior of the fractional indentation is
- governed by <fractional_tab_policy>
use_tabchars: false
_help_fractional_tab_policy:
- If <use_tabchars> is True, then the value of this variable
- indicates how fractional indentions are handled during
- whitespace replacement. If set to 'use-space', fractional
- indentation is left as spaces (utf-8 0x20). If set to
- "`round-up` fractional indentation is replaced with a single"
- tab character (utf-8 0x09) effectively shifting the column
- to the next tabstop
fractional_tab_policy: use-space
_help_max_subgroups_hwrap:
- If an argument group contains more than this many sub-groups
- (parg or kwarg groups) then force it to a vertical layout.
max_subgroups_hwrap: 4
_help_max_pargs_hwrap:
- If a positional argument group contains more than this many
- arguments, then force it to a vertical layout.
max_pargs_hwrap: 5
_help_max_rows_cmdline:
- If a cmdline positional group consumes more than this many
- lines without nesting, then invalidate the layout (and nest)
max_rows_cmdline: 2
_help_separate_ctrl_name_with_space:
- If true, separate flow control names from their parentheses
- with a space
separate_ctrl_name_with_space: true
_help_separate_fn_name_with_space:
- If true, separate function names from parentheses with a
- space
separate_fn_name_with_space: false
_help_dangle_parens:
- If a statement is wrapped to more than one line, than dangle
- the closing parenthesis on its own line.
dangle_parens: true
_help_dangle_align:
- If the trailing parenthesis must be 'dangled' on its on
- "line, then align it to this reference: `prefix`: the start"
- "of the statement, `prefix-indent`: the start of the"
- "statement, plus one indentation level, `child`: align to"
- the column of the arguments
dangle_align: prefix
_help_min_prefix_chars:
- If the statement spelling length (including space and
- parenthesis) is smaller than this amount, then force reject
- nested layouts.
min_prefix_chars: 4
_help_max_prefix_chars:
- If the statement spelling length (including space and
- parenthesis) is larger than the tab width by more than this
- amount, then force reject un-nested layouts.
max_prefix_chars: 10
_help_max_lines_hwrap:
- If a candidate layout is wrapped horizontally but it exceeds
- this many lines, then reject the layout.
max_lines_hwrap: 2
_help_line_ending:
- What style line endings to use in the output.
line_ending: unix
_help_command_case:
- Format command names consistently as 'lower' or 'upper' case
command_case: canonical
_help_keyword_case:
- Format keywords consistently as 'lower' or 'upper' case
keyword_case: unchanged
_help_always_wrap:
- A list of command names which should always be wrapped
always_wrap: []
_help_enable_sort:
- If true, the argument lists which are known to be sortable
- will be sorted lexicographicall
enable_sort: true
_help_autosort:
- If true, the parsers may infer whether or not an argument
- list is sortable (without annotation).
autosort: true
_help_require_valid_layout:
- By default, if cmake-format cannot successfully fit
- everything into the desired linewidth it will apply the
- last, most aggressive attempt that it made. If this flag is
- True, however, cmake-format will print error, exit with non-
- zero status code, and write-out nothing
require_valid_layout: false
_help_layout_passes:
- A dictionary mapping layout nodes to a list of wrap
- decisions. See the documentation for more information.
layout_passes: {}
_help_markup: Options affecting comment reflow and formatting.
markup:
_help_bullet_char:
- What character to use for bulleted lists
bullet_char: "*"
_help_enum_char:
- What character to use as punctuation after numerals in an
- enumerated list
enum_char: .
_help_first_comment_is_literal:
- If comment markup is enabled, don't reflow the first comment
- block in each listfile. Use this to preserve formatting of
- your copyright/license statements.
first_comment_is_literal: false
_help_literal_comment_pattern:
- If comment markup is enabled, don't reflow any comment block
- which matches this (regex) pattern. Default is `None`
- (disabled).
literal_comment_pattern: null
_help_fence_pattern:
- Regular expression to match preformat fences in comments
- default= ``r'^\s*([`~]{3}[`~]*)(.*)$'``
fence_pattern: ^\s*([`~]{3}[`~]*)(.*)$
_help_ruler_pattern:
- Regular expression to match rulers in comments default=
- '``r''^\s*[^\w\s]{3}.*[^\w\s]{3}$''``'
ruler_pattern: ^\s*[^\w\s]{3}.*[^\w\s]{3}$
_help_explicit_trailing_pattern:
- If a comment line matches starts with this pattern then it
- is explicitly a trailing comment for the preceding
- argument. Default is '#<'
explicit_trailing_pattern: "#<"
_help_hashruler_min_length:
- If a comment line starts with at least this many consecutive
- hash characters, then don't lstrip() them off. This allows
- for lazy hash rulers where the first hash char is not
- separated by space
hashruler_min_length: 10
_help_canonicalize_hashrulers:
- If true, then insert a space between the first hash char and
- remaining hash chars in a hash ruler, and normalize its
- length to fill the column
canonicalize_hashrulers: true
_help_enable_markup:
- enable comment markup parsing and reflow
enable_markup: true
_help_lint: Options affecting the linter
lint:
_help_disabled_codes:
- a list of lint codes to disable
disabled_codes: []
_help_function_pattern:
- regular expression pattern describing valid function names
function_pattern: "[0-9a-z_]+"
_help_macro_pattern:
- regular expression pattern describing valid macro names
macro_pattern: "[0-9A-Z_]+"
_help_global_var_pattern:
- regular expression pattern describing valid names for
- variables with global (cache) scope
global_var_pattern: "[A-Z][0-9A-Z_]+"
_help_internal_var_pattern:
- regular expression pattern describing valid names for
- variables with global scope (but internal semantic)
internal_var_pattern: _[A-Z][0-9A-Z_]+
_help_local_var_pattern:
- regular expression pattern describing valid names for
- variables with local scope
local_var_pattern: "[a-z][a-z0-9_]+"
_help_private_var_pattern:
- regular expression pattern describing valid names for
- privatedirectory variables
private_var_pattern: _[0-9a-z_]+
_help_public_var_pattern:
- regular expression pattern describing valid names for public
- directory variables
public_var_pattern: "[A-Z][0-9A-Z_]+"
_help_argument_var_pattern:
- regular expression pattern describing valid names for
- function/macro arguments and loop variables.
argument_var_pattern: "[a-z][a-z0-9_]+"
_help_keyword_pattern:
- regular expression pattern describing valid names for
- keywords used in functions or macros
keyword_pattern: "[A-Z][0-9A-Z_]+"
_help_max_conditionals_custom_parser:
- In the heuristic for C0201, how many conditionals to match
- within a loop in before considering the loop a parser.
max_conditionals_custom_parser: 2
_help_min_statement_spacing:
- Require at least this many newlines between statements
min_statement_spacing: 1
_help_max_statement_spacing:
- Require no more than this many newlines between statements
max_statement_spacing: 2
max_returns: 6
max_branches: 12
max_arguments: 5
max_localvars: 15
max_statements: 50
_help_encode: Options affecting file encoding
encode:
_help_emit_byteorder_mark:
- If true, emit the unicode byte-order mark (BOM) at the start
- of the file
emit_byteorder_mark: false
_help_input_encoding:
- Specify the encoding of the input file. Defaults to utf-8
input_encoding: utf-8
_help_output_encoding:
- Specify the encoding of the output file. Defaults to utf-8.
- Note that cmake only claims to support utf-8 so be careful
- when using anything else
output_encoding: utf-8
_help_misc: Miscellaneous configurations options.
misc:
_help_per_command:
- A dictionary containing any per-command configuration
- overrides. Currently only `command_case` is supported.
per_command: {}

View File

@@ -0,0 +1,19 @@
# Custom CMake command definitions for gersemi formatting.
# These stubs teach gersemi the signatures of project-specific commands
# so it can format their invocations correctly.
function(setup_target_for_coverage_gcovr)
set(options NONE)
set(oneValueArgs BASE_DIRECTORY NAME FORMAT)
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
cmake_parse_arguments(
THIS_FUNCTION_PREFIX
"${options}"
"${oneValueArgs}"
"${multiValueArgs}"
${ARGN}
)
endfunction()
function(append_coverage_compiler_flags_to_target name mode)
endfunction()

1
.gersemirc Normal file
View File

@@ -0,0 +1 @@
definitions: [.gersemi]

View File

@@ -34,32 +34,32 @@ runs:
steps:
- name: Login to DockerHub
if: ${{ inputs.push_image == 'true' && inputs.dockerhub_repo != '' }}
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
with:
username: ${{ env.DOCKERHUB_USER }}
password: ${{ env.DOCKERHUB_PW }}
- name: Login to GitHub Container Registry
if: ${{ inputs.push_image == 'true' }}
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ env.GITHUB_TOKEN }}
- uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
- uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0
with:
cache-image: false
- uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
- uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
- uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
- uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6.0.0
id: meta
with:
images: ${{ inputs.images }}
tags: ${{ inputs.tags }}
- name: Build and push
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0
with:
context: ${{ inputs.directory }}
platforms: ${{ inputs.platforms }}

View File

@@ -24,7 +24,7 @@ runs:
-j8 --exclude-throw-branches
- name: Archive coverage report
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: coverage-report.xml
path: build/coverage_report.xml

View File

@@ -1,46 +0,0 @@
#!/bin/bash
set -o pipefail
# Note: This script is intended to be run from the root of the repository.
#
# This script runs each unit-test separately and generates reports from the currently active sanitizer.
# Output is saved in ./.sanitizer-report in the root of the repository
if [[ -z "$1" ]]; then
cat <<EOF
ERROR
-----------------------------------------------------------------------------
Path to clio_tests should be passed as first argument to the script.
-----------------------------------------------------------------------------
EOF
exit 1
fi
TEST_BINARY=$1
if [[ ! -f "$TEST_BINARY" ]]; then
echo "Test binary not found: $TEST_BINARY"
exit 1
fi
TESTS=$($TEST_BINARY --gtest_list_tests | awk '/^ / {print suite $1} !/^ / {suite=$1}')
OUTPUT_DIR="./.sanitizer-report"
mkdir -p "$OUTPUT_DIR"
export TSAN_OPTIONS="die_after_fork=0"
export MallocNanoZone='0' # for MacOSX
for TEST in $TESTS; do
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}.log"
$TEST_BINARY --gtest_filter="$TEST" >"$OUTPUT_FILE" 2>&1
if [ $? -ne 0 ]; then
echo "'$TEST' failed a sanitizer check."
else
rm "$OUTPUT_FILE"
fi
done

View File

@@ -48,11 +48,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Download Clio binary from artifact
if: ${{ inputs.artifact_name != null }}
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0
with:
name: ${{ inputs.artifact_name }}
path: ./docker/clio/artifact/

View File

@@ -23,6 +23,7 @@ on:
- "cmake/**"
- "src/**"
- "tests/**"
- "benchmarks/**"
- docs/config-description.md
workflow_dispatch:
@@ -100,9 +101,9 @@ jobs:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
- uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0
with:
name: clio_server_Linux_Release_gcc

View File

@@ -24,12 +24,12 @@ jobs:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
with:
enable_ccache: false
@@ -59,7 +59,7 @@ jobs:
run: strip build/clio_tests
- name: Upload clio_tests
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: clio_tests_check_libxrpl
path: build/clio_tests
@@ -72,7 +72,7 @@ jobs:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
steps:
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
- uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0
with:
name: clio_tests_check_libxrpl
@@ -92,7 +92,7 @@ jobs:
issues: write
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Create an issue
uses: ./.github/actions/create-issue

View File

@@ -5,26 +5,6 @@ on:
types: [opened, edited, reopened, synchronize]
branches: [develop]
defaults:
run:
shell: bash
jobs:
check_title:
runs-on: ubuntu-latest
steps:
- uses: ytanikin/pr-conventional-commits@fda730cb152c05a849d6d84325e50c6182d9d1e9 # 1.5.1
with:
task_types: '["build","feat","fix","docs","test","ci","style","refactor","perf","chore"]'
add_label: false
custom_labels: '{"build":"build", "feat":"enhancement", "fix":"bug", "docs":"documentation", "test":"testability", "ci":"ci", "style":"refactoring", "refactor":"refactoring", "perf":"performance", "chore":"tooling"}'
- name: Check if message starts with upper-case letter
env:
PR_TITLE: ${{ github.event.pull_request.title }}
run: |
if [[ ! "${PR_TITLE}" =~ ^[a-z]+:\ [\[A-Z] ]]; then
echo "Error: PR title must start with an upper-case letter."
exit 1
fi
uses: XRPLF/actions/.github/workflows/check-pr-title.yml@943eb8277e8f4b010fde0c826ce4154c36c39509

View File

@@ -39,12 +39,12 @@ jobs:
pull-requests: write
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
with:
enable_ccache: false
@@ -97,7 +97,7 @@ jobs:
List of the issues found: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
- uses: crazy-max/ghaction-import-gpg@e89d40939c28e39f97cf32126055eeae86ba74ec # v6.3.0
- uses: crazy-max/ghaction-import-gpg@2dc316deee8e90f13e1a351ab510b4d5bc0c82cd # v7.0.0
if: ${{ steps.files_changed.outcome != 'success' && github.event_name != 'pull_request' }}
with:
gpg_private_key: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }}
@@ -107,7 +107,7 @@ jobs:
- name: Create PR with fixes
if: ${{ steps.files_changed.outcome != 'success' && github.event_name != 'pull_request' }}
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
env:
GH_REPO: ${{ github.repository }}
GH_TOKEN: ${{ github.token }}

View File

@@ -22,12 +22,12 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
lfs: true
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
with:
enable_ccache: false

View File

@@ -169,7 +169,7 @@ jobs:
issues: write
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Create an issue
uses: ./.github/actions/create-issue

View File

@@ -12,7 +12,7 @@ on:
jobs:
auto-update:
uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@ad4ab1ae5a54a4bab0e87294c31fc0729f788b2b
uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@44856eb0d6ecb7d376370244324ab3dc8b863bad
with:
sign_commit: true
committer: "Clio CI <skuznetsov@ripple.com>"

View File

@@ -8,7 +8,7 @@ on:
jobs:
run-hooks:
uses: XRPLF/actions/.github/workflows/pre-commit.yml@282890f46d6921249d5659dd38babcb0bd8aef48
uses: XRPLF/actions/.github/workflows/pre-commit.yml@44856eb0d6ecb7d376370244324ab3dc8b863bad
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:14342e087ceb8b593027198bf9ef06a43833c696" }'

View File

@@ -88,14 +88,14 @@ jobs:
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
uses: XRPLF/actions/cleanup-workspace@c7d9ce5ebb03c752a354889ecd870cadfc2b1cd4
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
with:
enable_ccache: ${{ inputs.download_ccache }}
@@ -113,7 +113,7 @@ jobs:
- name: Restore ccache cache
if: ${{ inputs.download_ccache && github.ref != 'refs/heads/develop' }}
uses: actions/cache/restore@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ steps.cache_key.outputs.key }}
@@ -151,7 +151,7 @@ jobs:
- name: Upload build time analyze report
if: ${{ inputs.analyze_build_time }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: build_time_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build_time_report.txt
@@ -164,7 +164,7 @@ jobs:
- name: Save ccache cache
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
uses: actions/cache/save@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache/save@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ steps.cache_key.outputs.key }}
@@ -179,28 +179,28 @@ jobs:
- name: Upload clio_server
if: ${{ inputs.upload_clio_server && !inputs.code_coverage && !inputs.analyze_build_time }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: clio_server_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/clio_server
- name: Upload clio_tests
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/clio_tests
- name: Upload clio_integration_tests
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/clio_integration_tests
- name: Upload Clio Linux package
if: ${{ inputs.package }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: clio_deb_package_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/*.deb

View File

@@ -55,16 +55,16 @@ jobs:
contents: write
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
with:
enable_ccache: false
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
- uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0
with:
path: release_artifacts
pattern: clio_server_*
@@ -72,7 +72,7 @@ jobs:
- name: Prepare release artifacts
run: .github/scripts/prepare-release-artifacts.sh release_artifacts
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
- uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0
with:
path: release_artifacts
pattern: clio_deb_package_*
@@ -94,7 +94,7 @@ jobs:
git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b|-rc" >> "${RUNNER_TEMP}/release_notes.md"
- name: Upload release notes
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: release_notes_${{ inputs.version }}
path: "${RUNNER_TEMP}/release_notes.md"

View File

@@ -45,54 +45,29 @@ jobs:
if: ${{ inputs.run_unit_tests }}
env:
# TODO: remove completely when we have fixed all currently existing issues with sanitizers
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') }}
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
uses: XRPLF/actions/cleanup-workspace@c7d9ce5ebb03c752a354889ecd870cadfc2b1cd4
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
- uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0
with:
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
- name: Make clio_tests executable
run: chmod +x ./clio_tests
- name: Run clio_tests (regular)
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'false' }}
- name: Run clio_tests
continue-on-error: true
id: run_clio_tests
run: ./clio_tests
- name: Run clio_tests (sanitizer errors ignored)
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
run: ./.github/scripts/execute-tests-under-sanitizer.sh ./clio_tests
- name: Check for sanitizer report
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
id: check_report
run: |
if ls .sanitizer-report/* 1> /dev/null 2>&1; then
echo "found_report=true" >> $GITHUB_OUTPUT
else
echo "found_report=false" >> $GITHUB_OUTPUT
fi
- name: Upload sanitizer report
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: sanitizer_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: .sanitizer-report/*
include-hidden-files: true
- name: Create an issue
if: ${{ false && env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
if: ${{ steps.run_clio_tests.outcome == 'failure' && endsWith(inputs.conan_profile, 'san') }}
uses: ./.github/actions/create-issue
env:
GH_TOKEN: ${{ github.token }}
@@ -100,10 +75,13 @@ jobs:
labels: "bug"
title: "[${{ inputs.conan_profile }}] reported issues"
body: >
Clio tests failed one or more sanitizer checks when built with ${{ inputs.conan_profile }}`.
Clio tests failed one or more sanitizer checks when built with `${{ inputs.conan_profile }}`.
Workflow: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
Reports are available as artifacts.
- name: Fail the job if clio_tests failed
if: ${{ steps.run_clio_tests.outcome == 'failure' }}
run: exit 1
integration_tests:
name: Integration testing
@@ -124,13 +102,19 @@ jobs:
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
uses: XRPLF/actions/cleanup-workspace@c7d9ce5ebb03c752a354889ecd870cadfc2b1cd4
- name: Spin up scylladb
- name: Delete and start colima (macOS)
# This is a temporary workaround for colima issues on macOS runners
if: ${{ runner.os == 'macOS' }}
timeout-minutes: 3
run: |
docker rm --force scylladb || true
colima delete --force
colima start
- name: Spin up scylladb (macOS)
if: ${{ runner.os == 'macOS' }}
timeout-minutes: 1
run: |
docker run \
--detach \
--name scylladb \
@@ -142,11 +126,15 @@ jobs:
--memory 16G \
scylladb/scylla
- name: Wait for scylladb container to be healthy (macOS)
if: ${{ runner.os == 'macOS' }}
timeout-minutes: 1
run: |
until [ "$(docker inspect -f '{{.State.Health.Status}}' scylladb)" == "healthy" ]; do
sleep 5
sleep 1
done
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
- uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0
with:
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}

View File

@@ -16,12 +16,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- name: Download report artifact
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0
with:
name: coverage-report.xml
path: build

View File

@@ -15,7 +15,6 @@ on:
- ".github/actions/**"
- "!.github/actions/build-docker-image/**"
- "!.github/actions/create-issue/**"
- .github/scripts/execute-tests-under-sanitizer.sh
- CMakeLists.txt
- conanfile.py

View File

@@ -56,11 +56,11 @@ jobs:
needs: repo
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with:
files: "docker/compilers/gcc/**"
@@ -94,11 +94,11 @@ jobs:
needs: repo
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with:
files: "docker/compilers/gcc/**"
@@ -132,20 +132,20 @@ jobs:
needs: [repo, gcc-amd64, gcc-arm64]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with:
files: "docker/compilers/gcc/**"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
- name: Login to GitHub Container Registry
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@@ -153,7 +153,7 @@ jobs:
- name: Login to DockerHub
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_PW }}
@@ -183,11 +183,11 @@ jobs:
needs: repo
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with:
files: "docker/compilers/clang/**"
@@ -219,11 +219,11 @@ jobs:
needs: [repo, gcc-merge]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with:
files: "docker/tools/**"
@@ -250,11 +250,11 @@ jobs:
needs: [repo, gcc-merge]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with:
files: "docker/tools/**"
@@ -281,20 +281,20 @@ jobs:
needs: [repo, tools-amd64, tools-arm64]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with:
files: "docker/tools/**"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
- name: Login to GitHub Container Registry
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@@ -316,7 +316,7 @@ jobs:
needs: [repo, tools-merge]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: ./.github/actions/build-docker-image
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -338,7 +338,7 @@ jobs:
needs: [repo, gcc-merge, clang, tools-merge]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: ./.github/actions/build-docker-image
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -52,7 +52,7 @@ jobs:
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Calculate conan matrix
id: set-matrix
@@ -75,10 +75,10 @@ jobs:
CONAN_PROFILE: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
with:
enable_ccache: false

3
.gitignore vendored
View File

@@ -4,8 +4,11 @@
.build
.cache
.vscode
.zed
.python-version
.DS_Store
.sanitizer-report
CMakeUserPresets.json
config.json
CLAUDE.md
.claude/**

View File

@@ -29,7 +29,7 @@ repos:
# Autoformat: YAML, JSON, Markdown, etc.
- repo: https://github.com/rbubley/mirrors-prettier
rev: 14abee445aea04b39069c19b4bd54efff6775819 # frozen: v3.7.4
rev: c2bc67fe8f8f549cc489e00ba8b45aa18ee713b1 # frozen: v3.8.1
hooks:
- id: prettier
@@ -59,7 +59,7 @@ repos:
]
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 831207fd435b47aeffdf6af853097e64322b4d44 # frozen: 25.12.0
rev: ea488cebbfd88a5f50b8bd95d5c829d0bb76feb8 # frozen: 26.1.0
hooks:
- id: black
@@ -94,17 +94,16 @@ repos:
language: script
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: 75ca4ad908dc4a99f57921f29b7e6c1521e10b26 # frozen: v21.1.8
rev: cd481d7b0bfb5c7b3090c21846317f9a8262e891 # frozen: v22.1.0
hooks:
- id: clang-format
args: [--style=file]
types: [c++]
- repo: https://github.com/cheshirekow/cmake-format-precommit
rev: e2c2116d86a80e72e7146a06e68b7c228afc6319 # frozen: v0.6.13
- repo: https://github.com/BlankSpruce/gersemi
rev: 0.26.0
hooks:
- id: cmake-format
additional_dependencies: [PyYAML]
- id: gersemi
- repo: local
hooks:

View File

@@ -1,7 +1,10 @@
cmake_minimum_required(VERSION 3.20)
project(clio VERSION ${CLIO_VERSION} HOMEPAGE_URL "https://github.com/XRPLF/clio"
DESCRIPTION "An XRP Ledger API Server"
project(
clio
VERSION ${CLIO_VERSION}
HOMEPAGE_URL "https://github.com/XRPLF/clio"
DESCRIPTION "An XRP Ledger API Server"
)
# =========================== Options ====================================== #
@@ -15,7 +18,11 @@ option(package "Create distribution packages" FALSE)
option(lint "Run clang-tidy checks during compilation" FALSE)
option(static "Statically linked Clio" FALSE)
option(snapshot "Build snapshot tool" FALSE)
option(time_trace "Build using -ftime-trace to create compiler trace reports" FALSE)
option(
time_trace
"Build using -ftime-trace to create compiler trace reports"
FALSE
)
# ========================================================================== #
set(san "" CACHE STRING "Add sanitizer instrumentation")
@@ -35,9 +42,9 @@ add_library(clio_options INTERFACE)
target_compile_features(clio_options INTERFACE cxx_std_23) # Clio needs c++23 but deps can remain c++20 for now
target_include_directories(clio_options INTERFACE ${CMAKE_SOURCE_DIR}/src)
if (verbose)
set(CMAKE_VERBOSE_MAKEFILE TRUE)
endif ()
if(verbose)
set(CMAKE_VERBOSE_MAKEFILE TRUE)
endif()
# Clio tweaks and checks
include(CheckCompiler)
@@ -57,37 +64,43 @@ include(deps/spdlog)
add_subdirectory(src)
add_subdirectory(tests)
if (benchmark)
add_subdirectory(benchmarks)
endif ()
if(benchmark)
add_subdirectory(benchmarks)
endif()
# Enable selected sanitizer if enabled via `san`
if (san)
set(SUPPORTED_SANITIZERS "address" "thread" "memory" "undefined")
if (NOT san IN_LIST SUPPORTED_SANITIZERS)
message(FATAL_ERROR "Error: Unsupported sanitizer '${san}'. Supported values are: ${SUPPORTED_SANITIZERS}.")
endif ()
if(san)
set(SUPPORTED_SANITIZERS "address" "thread" "memory" "undefined")
if(NOT san IN_LIST SUPPORTED_SANITIZERS)
message(
FATAL_ERROR
"Error: Unsupported sanitizer '${san}'. Supported values are: ${SUPPORTED_SANITIZERS}."
)
endif()
# Sanitizers recommend minimum of -O1 for reasonable performance so we enable it for debug builds
set(SAN_OPTIMIZATION_FLAG "")
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
set(SAN_OPTIMIZATION_FLAG -O1)
endif ()
target_compile_options(clio_options INTERFACE ${SAN_OPTIMIZATION_FLAG} ${SAN_FLAG} -fno-omit-frame-pointer)
# Sanitizers recommend minimum of -O1 for reasonable performance so we enable it for debug builds
set(SAN_OPTIMIZATION_FLAG "")
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
set(SAN_OPTIMIZATION_FLAG -O1)
endif()
target_compile_options(
clio_options
INTERFACE ${SAN_OPTIMIZATION_FLAG} ${SAN_FLAG} -fno-omit-frame-pointer
)
target_link_libraries(clio_options INTERFACE ${SAN_FLAG} ${SAN_LIB})
endif ()
target_link_libraries(clio_options INTERFACE ${SAN_FLAG} ${SAN_LIB})
endif()
# Generate `docs` target for doxygen documentation if enabled Note: use `make docs` to generate the documentation
if (docs)
add_subdirectory(docs)
endif ()
if(docs)
add_subdirectory(docs)
endif()
include(install/install)
if (package)
include(ClioPackage)
endif ()
if(package)
include(ClioPackage)
endif()
if (snapshot)
add_subdirectory(tools/snapshot)
endif ()
if(snapshot)
add_subdirectory(tools/snapshot)
endif()

View File

@@ -133,7 +133,7 @@ This is a non-exhaustive list of recommended style guidelines. These are not alw
Code must conform to `clang-format`, unless the result is unreasonably difficult to read or maintain.
In most cases the `pre-commit` hook takes care of formatting and fixes any issues automatically.
To manually format your code, run `pre-commit run clang-format --files <your changed files>` for C++ files, and `pre-commit run cmake-format --files <your changed files>` for CMake files.
To manually format your code, run `pre-commit run clang-format --files <your changed files>` for C++ files, and `pre-commit run gersemi --files <your changed files>` for CMake files.
### Documentation

View File

@@ -1,20 +1,27 @@
add_executable(clio_benchmark)
target_sources(
clio_benchmark
PRIVATE # Common
Main.cpp
Playground.cpp
# ExecutionContext
util/async/ExecutionContextBenchmarks.cpp
# Logger
util/log/LoggerBenchmark.cpp
# WorkQueue
rpc/WorkQueueBenchmarks.cpp
clio_benchmark
PRIVATE
# Common
Main.cpp
Playground.cpp
# ExecutionContext
util/async/ExecutionContextBenchmarks.cpp
# Logger
util/log/LoggerBenchmark.cpp
# WorkQueue
rpc/WorkQueueBenchmarks.cpp
)
include(deps/gbench)
target_include_directories(clio_benchmark PRIVATE .)
target_link_libraries(clio_benchmark PUBLIC clio_util clio_rpc benchmark::benchmark_main spdlog::spdlog)
set_target_properties(clio_benchmark PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
target_link_libraries(
clio_benchmark
PRIVATE clio_rpc clio_util benchmark::benchmark_main spdlog::spdlog
)
set_target_properties(
clio_benchmark
PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
)

View File

@@ -29,8 +29,6 @@
#include <benchmark/benchmark.h>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/thread_pool.hpp>
#include <boost/json/object.hpp>
#include <algorithm>
#include <atomic>
@@ -53,12 +51,15 @@ auto const kCONFIG = ClioConfigDefinition{
{"log.channels.[].channel", Array{ConfigValue{ConfigType::String}}},
{"log.channels.[].level", Array{ConfigValue{ConfigType::String}}},
{"log.level", ConfigValue{ConfigType::String}.defaultValue("info")},
{"log.format", ConfigValue{ConfigType::String}.defaultValue(R"(%Y-%m-%d %H:%M:%S.%f %^%3!l:%n%$ - %v)")},
{"log.format",
ConfigValue{ConfigType::String}.defaultValue(R"(%Y-%m-%d %H:%M:%S.%f %^%3!l:%n%$ - %v)")},
{"log.is_async", ConfigValue{ConfigType::Boolean}.defaultValue(false)},
{"log.enable_console", ConfigValue{ConfigType::Boolean}.defaultValue(false)},
{"log.directory", ConfigValue{ConfigType::String}.optional()},
{"log.rotation_size", ConfigValue{ConfigType::Integer}.defaultValue(2048).withConstraint(gValidateUint32)},
{"log.directory_max_files", ConfigValue{ConfigType::Integer}.defaultValue(25).withConstraint(gValidateUint32)},
{"log.rotation_size",
ConfigValue{ConfigType::Integer}.defaultValue(2048).withConstraint(gValidateUint32)},
{"log.directory_max_files",
ConfigValue{ConfigType::Integer}.defaultValue(25).withConstraint(gValidateUint32)},
{"log.tag_style", ConfigValue{ConfigType::String}.defaultValue("none")},
};
@@ -126,9 +127,14 @@ benchmarkWorkQueue(benchmark::State& state)
ASSERT(totalQueued <= itemsPerClient * clientThreads, "Queued more than requested");
if (maxQueueSize == 0) {
ASSERT(totalQueued == itemsPerClient * clientThreads, "Queued exactly the expected amount");
ASSERT(
totalQueued == itemsPerClient * clientThreads, "Queued exactly the expected amount"
);
} else {
ASSERT(totalQueued >= std::min(maxQueueSize, itemsPerClient * clientThreads), "Queued less than expected");
ASSERT(
totalQueued >= std::min(maxQueueSize, itemsPerClient * clientThreads),
"Queued less than expected"
);
}
}
}

View File

@@ -62,8 +62,9 @@ uniqueLogDir()
{
auto const epochTime = std::chrono::high_resolution_clock::now().time_since_epoch();
auto const tmpDir = std::filesystem::temp_directory_path();
std::string const dirName =
fmt::format("logs_{}", std::chrono::duration_cast<std::chrono::microseconds>(epochTime).count());
std::string const dirName = fmt::format(
"logs_{}", std::chrono::duration_cast<std::chrono::microseconds>(epochTime).count()
);
return tmpDir / "clio_benchmark" / dirName;
}
@@ -108,7 +109,8 @@ benchmarkConcurrentFileLogging(benchmark::State& state)
channel, fileSink, spdlog::thread_pool(), spdlog::async_overflow_policy::block
);
spdlog::register_logger(logger);
Logger const threadLogger = BenchmarkLoggingInitializer::getLogger(std::move(logger));
Logger const threadLogger =
BenchmarkLoggingInitializer::getLogger(std::move(logger));
barrier.arrive_and_wait();
@@ -124,13 +126,16 @@ benchmarkConcurrentFileLogging(benchmark::State& state)
spdlog::shutdown();
auto const end = std::chrono::high_resolution_clock::now();
state.SetIterationTime(std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count());
state.SetIterationTime(
std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count()
);
std::filesystem::remove_all(logDir);
}
auto const totalMessages = numThreads * messagesPerThread;
state.counters["TotalMessagesRate"] = benchmark::Counter(totalMessages, benchmark::Counter::kIsRate);
state.counters["TotalMessagesRate"] =
benchmark::Counter(totalMessages, benchmark::Counter::kIsRate);
state.counters["Threads"] = numThreads;
state.counters["MessagesPerThread"] = messagesPerThread;
}

View File

@@ -1,5 +1,5 @@
find_program(CCACHE_PATH "ccache")
if (CCACHE_PATH)
set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PATH}")
message(STATUS "Using ccache: ${CCACHE_PATH}")
endif ()
if(CCACHE_PATH)
set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PATH}")
message(STATUS "Using ccache: ${CCACHE_PATH}")
endif()

View File

@@ -1,42 +1,48 @@
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16)
message(FATAL_ERROR "Clang 16+ required for building clio")
endif ()
set(is_clang TRUE)
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 15)
message(FATAL_ERROR "AppleClang 15+ required for building clio")
endif ()
set(is_appleclang TRUE)
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12)
message(FATAL_ERROR "GCC 12+ required for building clio")
endif ()
set(is_gcc TRUE)
else ()
message(FATAL_ERROR "Supported compilers: AppleClang 15+, Clang 16+, GCC 12+")
endif ()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16)
message(FATAL_ERROR "Clang 16+ required for building clio")
endif()
set(is_clang TRUE)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 15)
message(FATAL_ERROR "AppleClang 15+ required for building clio")
endif()
set(is_appleclang TRUE)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12)
message(FATAL_ERROR "GCC 12+ required for building clio")
endif()
set(is_gcc TRUE)
else()
message(
FATAL_ERROR
"Supported compilers: AppleClang 15+, Clang 16+, GCC 12+"
)
endif()
if (san)
string(TOLOWER ${san} san)
set(SAN_FLAG "-fsanitize=${san}")
set(SAN_LIB "")
if (is_gcc)
if (san STREQUAL "address")
set(SAN_LIB "asan")
elseif (san STREQUAL "thread")
set(SAN_LIB "tsan")
elseif (san STREQUAL "memory")
set(SAN_LIB "msan")
elseif (san STREQUAL "undefined")
set(SAN_LIB "ubsan")
endif ()
endif ()
set(_saved_CRL ${CMAKE_REQUIRED_LIBRARIES})
set(CMAKE_REQUIRED_LIBRARIES "${SAN_FLAG};${SAN_LIB}")
check_cxx_compiler_flag(${SAN_FLAG} COMPILER_SUPPORTS_SAN)
set(CMAKE_REQUIRED_LIBRARIES ${_saved_CRL})
if (NOT COMPILER_SUPPORTS_SAN)
message(FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
endif ()
endif ()
if(san)
string(TOLOWER ${san} san)
set(SAN_FLAG "-fsanitize=${san}")
set(SAN_LIB "")
if(is_gcc)
if(san STREQUAL "address")
set(SAN_LIB "asan")
elseif(san STREQUAL "thread")
set(SAN_LIB "tsan")
elseif(san STREQUAL "memory")
set(SAN_LIB "msan")
elseif(san STREQUAL "undefined")
set(SAN_LIB "ubsan")
endif()
endif()
set(_saved_CRL ${CMAKE_REQUIRED_LIBRARIES})
set(CMAKE_REQUIRED_LIBRARIES "${SAN_FLAG};${SAN_LIB}")
check_cxx_compiler_flag(${SAN_FLAG} COMPILER_SUPPORTS_SAN)
set(CMAKE_REQUIRED_LIBRARIES ${_saved_CRL})
if(NOT COMPILER_SUPPORTS_SAN)
message(
FATAL_ERROR
"${san} sanitizer does not seem to be supported by your compiler"
)
endif()
endif()

View File

@@ -1,33 +1,41 @@
if (lint)
if(lint)
# Find clang-tidy binary
if(DEFINED ENV{CLIO_CLANG_TIDY_BIN})
set(_CLANG_TIDY_BIN $ENV{CLIO_CLANG_TIDY_BIN})
if((NOT EXISTS ${_CLANG_TIDY_BIN}) OR IS_DIRECTORY ${_CLANG_TIDY_BIN})
message(
FATAL_ERROR
"$ENV{CLIO_CLANG_TIDY_BIN} no such file. Check CLIO_CLANG_TIDY_BIN env variable"
)
endif()
message(STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
else()
find_program(
_CLANG_TIDY_BIN
NAMES "clang-tidy-20" "clang-tidy"
REQUIRED
)
endif()
# Find clang-tidy binary
if (DEFINED ENV{CLIO_CLANG_TIDY_BIN})
set(_CLANG_TIDY_BIN $ENV{CLIO_CLANG_TIDY_BIN})
if ((NOT EXISTS ${_CLANG_TIDY_BIN}) OR IS_DIRECTORY ${_CLANG_TIDY_BIN})
message(FATAL_ERROR "$ENV{CLIO_CLANG_TIDY_BIN} no such file. Check CLIO_CLANG_TIDY_BIN env variable")
endif ()
message(STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
else ()
find_program(_CLANG_TIDY_BIN NAMES "clang-tidy-20" "clang-tidy" REQUIRED)
endif ()
if(NOT _CLANG_TIDY_BIN)
message(
FATAL_ERROR
"clang-tidy binary not found. Please set the CLIO_CLANG_TIDY_BIN environment variable or install clang-tidy."
)
endif()
if (NOT _CLANG_TIDY_BIN)
message(
FATAL_ERROR
"clang-tidy binary not found. Please set the CLIO_CLANG_TIDY_BIN environment variable or install clang-tidy."
)
endif ()
# Support for https://github.com/matus-chochlik/ctcache
find_program(CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
if(CLANG_TIDY_CACHE_PATH)
set(_CLANG_TIDY_CMD
"${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_BIN}"
CACHE STRING
"A combined command to run clang-tidy with caching wrapper"
)
else()
set(_CLANG_TIDY_CMD "${_CLANG_TIDY_BIN}")
endif()
# Support for https://github.com/matus-chochlik/ctcache
find_program(CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
if (CLANG_TIDY_CACHE_PATH)
set(_CLANG_TIDY_CMD "${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_BIN}"
CACHE STRING "A combined command to run clang-tidy with caching wrapper"
)
else ()
set(_CLANG_TIDY_CMD "${_CLANG_TIDY_BIN}")
endif ()
set(CMAKE_CXX_CLANG_TIDY "${_CLANG_TIDY_CMD};--quiet")
message(STATUS "Using clang-tidy: ${CMAKE_CXX_CLANG_TIDY}")
endif ()
set(CMAKE_CXX_CLANG_TIDY "${_CLANG_TIDY_CMD};--quiet")
message(STATUS "Using clang-tidy: ${CMAKE_CXX_CLANG_TIDY}")
endif()

View File

@@ -1,47 +1,67 @@
find_package(Git REQUIRED)
if (DEFINED ENV{GITHUB_BRANCH_NAME})
set(GIT_BUILD_BRANCH $ENV{GITHUB_BRANCH_NAME})
set(GIT_COMMIT_HASH $ENV{GITHUB_HEAD_SHA})
else ()
set(GIT_COMMAND branch --show-current)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE GIT_BUILD_BRANCH
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
if(DEFINED ENV{GITHUB_BRANCH_NAME})
set(GIT_BUILD_BRANCH $ENV{GITHUB_BRANCH_NAME})
set(GIT_COMMIT_HASH $ENV{GITHUB_HEAD_SHA})
else()
set(GIT_COMMAND branch --show-current)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE GIT_BUILD_BRANCH
OUTPUT_STRIP_TRAILING_WHITESPACE
COMMAND_ERROR_IS_FATAL ANY
)
set(GIT_COMMAND rev-parse HEAD)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMIT_HASH
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
endif ()
set(GIT_COMMAND rev-parse HEAD)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE GIT_COMMIT_HASH
OUTPUT_STRIP_TRAILING_WHITESPACE
COMMAND_ERROR_IS_FATAL ANY
)
endif()
execute_process(
COMMAND date +%Y%m%d%H%M%S WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BUILD_DATE
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
COMMAND date +%Y%m%d%H%M%S
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE BUILD_DATE
OUTPUT_STRIP_TRAILING_WHITESPACE
COMMAND_ERROR_IS_FATAL ANY
)
message(STATUS "Git branch: ${GIT_BUILD_BRANCH}")
message(STATUS "Git commit hash: ${GIT_COMMIT_HASH}")
message(STATUS "Build date: ${BUILD_DATE}")
if (DEFINED ENV{FORCE_CLIO_VERSION} AND NOT "$ENV{FORCE_CLIO_VERSION}" STREQUAL "")
message(STATUS "Using explicitly provided '${FORCE_CLIO_VERSION}' as Clio version")
if(
DEFINED ENV{FORCE_CLIO_VERSION}
AND NOT "$ENV{FORCE_CLIO_VERSION}" STREQUAL ""
)
message(
STATUS
"Using explicitly provided '${FORCE_CLIO_VERSION}' as Clio version"
)
set(CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
set(DOC_CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
else ()
message(STATUS "Using 'YYYYMMDDHMS-<branch>-<git short rev>' as Clio version")
set(CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
set(DOC_CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
else()
message(
STATUS
"Using 'YYYYMMDDHMS-<branch>-<git short rev>' as Clio version"
)
string(SUBSTRING ${GIT_COMMIT_HASH} 0 7 GIT_COMMIT_HASH_SHORT)
string(SUBSTRING ${GIT_COMMIT_HASH} 0 7 GIT_COMMIT_HASH_SHORT)
set(CLIO_VERSION "${BUILD_DATE}-${GIT_BUILD_BRANCH}-${GIT_COMMIT_HASH_SHORT}")
set(DOC_CLIO_VERSION "develop")
endif ()
set(CLIO_VERSION
"${BUILD_DATE}-${GIT_BUILD_BRANCH}-${GIT_COMMIT_HASH_SHORT}"
)
set(DOC_CLIO_VERSION "develop")
endif()
if (CMAKE_BUILD_TYPE MATCHES Debug)
set(CLIO_VERSION "${CLIO_VERSION}+DEBUG")
endif ()
if(CMAKE_BUILD_TYPE MATCHES Debug)
set(CLIO_VERSION "${CLIO_VERSION}+DEBUG")
endif()
message(STATUS "Build version: ${CLIO_VERSION}")

View File

@@ -106,78 +106,120 @@ option(CODE_COVERAGE_VERBOSE "Verbose information" FALSE)
# Check prereqs
find_program(GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test)
if (DEFINED CODE_COVERAGE_GCOV_TOOL)
set(GCOV_TOOL "${CODE_COVERAGE_GCOV_TOOL}")
elseif (DEFINED ENV{CODE_COVERAGE_GCOV_TOOL})
set(GCOV_TOOL "$ENV{CODE_COVERAGE_GCOV_TOOL}")
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
if (APPLE)
execute_process(COMMAND xcrun -f llvm-cov OUTPUT_VARIABLE LLVMCOV_PATH OUTPUT_STRIP_TRAILING_WHITESPACE)
else ()
find_program(LLVMCOV_PATH llvm-cov)
endif ()
if (LLVMCOV_PATH)
set(GCOV_TOOL "${LLVMCOV_PATH} gcov")
endif ()
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
find_program(GCOV_PATH gcov)
set(GCOV_TOOL "${GCOV_PATH}")
endif ()
if(DEFINED CODE_COVERAGE_GCOV_TOOL)
set(GCOV_TOOL "${CODE_COVERAGE_GCOV_TOOL}")
elseif(DEFINED ENV{CODE_COVERAGE_GCOV_TOOL})
set(GCOV_TOOL "$ENV{CODE_COVERAGE_GCOV_TOOL}")
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
if(APPLE)
execute_process(
COMMAND xcrun -f llvm-cov
OUTPUT_VARIABLE LLVMCOV_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE
)
else()
find_program(LLVMCOV_PATH llvm-cov)
endif()
if(LLVMCOV_PATH)
set(GCOV_TOOL "${LLVMCOV_PATH} gcov")
endif()
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
find_program(GCOV_PATH gcov)
set(GCOV_TOOL "${GCOV_PATH}")
endif()
# Check supported compiler (Clang, GNU and Flang)
get_property(LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
foreach (LANG ${LANGUAGES})
if ("${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
if ("${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS 3)
message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...")
endif ()
elseif (NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU" AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES
"(LLVM)?[Ff]lang"
)
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
endif ()
endforeach ()
foreach(LANG ${LANGUAGES})
if("${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
if("${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS 3)
message(
FATAL_ERROR
"Clang version must be 3.0.0 or greater! Aborting..."
)
endif()
elseif(
NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU"
AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang"
)
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
endif()
endforeach()
set(COVERAGE_COMPILER_FLAGS "-g --coverage" CACHE INTERNAL "")
if (CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path)
if (HAVE_cxx_fprofile_abs_path)
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
endif ()
include(CheckCCompilerFlag)
check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path)
if (HAVE_c_fprofile_abs_path)
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
endif ()
endif ()
if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path)
if(HAVE_cxx_fprofile_abs_path)
set(COVERAGE_CXX_COMPILER_FLAGS
"${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path"
)
endif()
include(CheckCCompilerFlag)
check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path)
if(HAVE_c_fprofile_abs_path)
set(COVERAGE_C_COMPILER_FLAGS
"${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path"
)
endif()
endif()
set(CMAKE_Fortran_FLAGS_COVERAGE ${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the Fortran compiler during coverage builds." FORCE
set(CMAKE_Fortran_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING
"Flags used by the Fortran compiler during coverage builds."
FORCE
)
set(CMAKE_CXX_FLAGS_COVERAGE ${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the C++ compiler during coverage builds." FORCE
set(CMAKE_CXX_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING
"Flags used by the C++ compiler during coverage builds."
FORCE
)
set(CMAKE_C_FLAGS_COVERAGE ${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the C compiler during coverage builds." FORCE
set(CMAKE_C_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING
"Flags used by the C compiler during coverage builds."
FORCE
)
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE "" CACHE STRING "Flags used for linking binaries during coverage builds." FORCE)
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE ""
CACHE STRING "Flags used by the shared libraries linker during coverage builds." FORCE
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
""
CACHE STRING
"Flags used for linking binaries during coverage builds."
FORCE
)
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
""
CACHE STRING
"Flags used by the shared libraries linker during coverage builds."
FORCE
)
mark_as_advanced(
CMAKE_Fortran_FLAGS_COVERAGE CMAKE_CXX_FLAGS_COVERAGE CMAKE_C_FLAGS_COVERAGE CMAKE_EXE_LINKER_FLAGS_COVERAGE
CMAKE_SHARED_LINKER_FLAGS_COVERAGE
CMAKE_Fortran_FLAGS_COVERAGE
CMAKE_CXX_FLAGS_COVERAGE
CMAKE_C_FLAGS_COVERAGE
CMAKE_EXE_LINKER_FLAGS_COVERAGE
CMAKE_SHARED_LINKER_FLAGS_COVERAGE
)
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
if (NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading")
endif () # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
get_property(
GENERATOR_IS_MULTI_CONFIG
GLOBAL
PROPERTY GENERATOR_IS_MULTI_CONFIG
)
if(NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
message(
WARNING
"Code coverage results with an optimised (non-Debug) build may be misleading"
)
endif() # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
if (CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
link_libraries(gcov)
endif ()
if(
CMAKE_C_COMPILER_ID STREQUAL "GNU"
OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU"
)
link_libraries(gcov)
endif()
# Defines a target for running and collection code coverage information Builds dependencies, runs the given executable
# and outputs reports. NOTE! The executable should always have a ZERO as exit code otherwise the coverage generation
@@ -191,171 +233,236 @@ endif ()
# defaults to xml) EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative # to BASE_DIRECTORY,
# with CMake 3.4+) ) The user can set the variable GCOVR_ADDITIONAL_ARGS to supply additional flags to the GCVOR
# command.
function (setup_target_for_coverage_gcovr)
set(options NONE)
set(oneValueArgs BASE_DIRECTORY NAME FORMAT)
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
if (NOT GCOV_TOOL)
message(FATAL_ERROR "Could not find gcov or llvm-cov tool! Aborting...")
endif ()
if (NOT GCOVR_PATH)
message(FATAL_ERROR "Could not find gcovr tool! Aborting...")
endif ()
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
if (DEFINED Coverage_BASE_DIRECTORY)
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
else ()
set(BASEDIR ${PROJECT_SOURCE_DIR})
endif ()
if (NOT DEFINED Coverage_FORMAT)
set(Coverage_FORMAT xml)
endif ()
if ("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting...")
else ()
if ((Coverage_FORMAT STREQUAL "html-details") OR (Coverage_FORMAT STREQUAL "html-nested"))
set(GCOVR_OUTPUT_FILE ${PROJECT_BINARY_DIR}/${Coverage_NAME}/index.html)
set(GCOVR_CREATE_FOLDER ${PROJECT_BINARY_DIR}/${Coverage_NAME})
elseif (Coverage_FORMAT STREQUAL "html-single")
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.html)
elseif ((Coverage_FORMAT STREQUAL "json-summary") OR (Coverage_FORMAT STREQUAL "json-details")
OR (Coverage_FORMAT STREQUAL "coveralls")
function(setup_target_for_coverage_gcovr)
set(options NONE)
set(oneValueArgs BASE_DIRECTORY NAME FORMAT)
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
cmake_parse_arguments(
Coverage
"${options}"
"${oneValueArgs}"
"${multiValueArgs}"
${ARGN}
)
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.json)
elseif (Coverage_FORMAT STREQUAL "txt")
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.txt)
elseif (Coverage_FORMAT STREQUAL "csv")
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.csv)
else ()
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.xml)
endif ()
endif ()
if ((Coverage_FORMAT STREQUAL "cobertura") OR (Coverage_FORMAT STREQUAL "xml"))
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura "${GCOVR_OUTPUT_FILE}")
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura-pretty)
set(Coverage_FORMAT cobertura) # overwrite xml
elseif (Coverage_FORMAT STREQUAL "sonarqube")
list(APPEND GCOVR_ADDITIONAL_ARGS --sonarqube "${GCOVR_OUTPUT_FILE}")
elseif (Coverage_FORMAT STREQUAL "json-summary")
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary "${GCOVR_OUTPUT_FILE}")
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary-pretty)
elseif (Coverage_FORMAT STREQUAL "json-details")
list(APPEND GCOVR_ADDITIONAL_ARGS --json "${GCOVR_OUTPUT_FILE}")
list(APPEND GCOVR_ADDITIONAL_ARGS --json-pretty)
elseif (Coverage_FORMAT STREQUAL "coveralls")
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls "${GCOVR_OUTPUT_FILE}")
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls-pretty)
elseif (Coverage_FORMAT STREQUAL "csv")
list(APPEND GCOVR_ADDITIONAL_ARGS --csv "${GCOVR_OUTPUT_FILE}")
elseif (Coverage_FORMAT STREQUAL "txt")
list(APPEND GCOVR_ADDITIONAL_ARGS --txt "${GCOVR_OUTPUT_FILE}")
elseif (Coverage_FORMAT STREQUAL "html-single")
list(APPEND GCOVR_ADDITIONAL_ARGS --html "${GCOVR_OUTPUT_FILE}")
list(APPEND GCOVR_ADDITIONAL_ARGS --html-self-contained)
elseif (Coverage_FORMAT STREQUAL "html-nested")
list(APPEND GCOVR_ADDITIONAL_ARGS --html-nested "${GCOVR_OUTPUT_FILE}")
elseif (Coverage_FORMAT STREQUAL "html-details")
list(APPEND GCOVR_ADDITIONAL_ARGS --html-details "${GCOVR_OUTPUT_FILE}")
else ()
message(FATAL_ERROR "Unsupported output style ${Coverage_FORMAT}! Aborting...")
endif ()
if(NOT GCOV_TOOL)
message(FATAL_ERROR "Could not find gcov or llvm-cov tool! Aborting...")
endif()
# Collect excludes (CMake 3.4+: Also compute absolute paths)
set(GCOVR_EXCLUDES "")
foreach (EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES})
if (CMAKE_VERSION VERSION_GREATER 3.4)
get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR})
endif ()
list(APPEND GCOVR_EXCLUDES "${EXCLUDE}")
endforeach ()
list(REMOVE_DUPLICATES GCOVR_EXCLUDES)
if(NOT GCOVR_PATH)
message(FATAL_ERROR "Could not find gcovr tool! Aborting...")
endif()
# Combine excludes to several -e arguments
set(GCOVR_EXCLUDE_ARGS "")
foreach (EXCLUDE ${GCOVR_EXCLUDES})
list(APPEND GCOVR_EXCLUDE_ARGS "-e")
list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}")
endforeach ()
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
if(DEFINED Coverage_BASE_DIRECTORY)
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
else()
set(BASEDIR ${PROJECT_SOURCE_DIR})
endif()
# Set up commands which will be run to generate coverage data Run tests
set(GCOVR_EXEC_TESTS_CMD ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS})
if(NOT DEFINED Coverage_FORMAT)
set(Coverage_FORMAT xml)
endif()
# Create folder
if (DEFINED GCOVR_CREATE_FOLDER)
set(GCOVR_FOLDER_CMD ${CMAKE_COMMAND} -E make_directory ${GCOVR_CREATE_FOLDER})
else ()
set(GCOVR_FOLDER_CMD echo) # dummy
endif ()
if("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
message(
FATAL_ERROR
"Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting..."
)
else()
if(
(Coverage_FORMAT STREQUAL "html-details")
OR (Coverage_FORMAT STREQUAL "html-nested")
)
set(GCOVR_OUTPUT_FILE
${PROJECT_BINARY_DIR}/${Coverage_NAME}/index.html
)
set(GCOVR_CREATE_FOLDER ${PROJECT_BINARY_DIR}/${Coverage_NAME})
elseif(Coverage_FORMAT STREQUAL "html-single")
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.html)
elseif(
(Coverage_FORMAT STREQUAL "json-summary")
OR (Coverage_FORMAT STREQUAL "json-details")
OR (Coverage_FORMAT STREQUAL "coveralls")
)
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.json)
elseif(Coverage_FORMAT STREQUAL "txt")
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.txt)
elseif(Coverage_FORMAT STREQUAL "csv")
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.csv)
else()
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.xml)
endif()
endif()
# Running gcovr
set(GCOVR_CMD
${GCOVR_PATH}
--gcov-executable
${GCOV_TOOL}
--gcov-ignore-parse-errors=negative_hits.warn_once_per_file
-r
${BASEDIR}
${GCOVR_ADDITIONAL_ARGS}
${GCOVR_EXCLUDE_ARGS}
--object-directory=${PROJECT_BINARY_DIR}
)
if(
(Coverage_FORMAT STREQUAL "cobertura")
OR (Coverage_FORMAT STREQUAL "xml")
)
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura "${GCOVR_OUTPUT_FILE}")
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura-pretty)
set(Coverage_FORMAT cobertura) # overwrite xml
elseif(Coverage_FORMAT STREQUAL "sonarqube")
list(APPEND GCOVR_ADDITIONAL_ARGS --sonarqube "${GCOVR_OUTPUT_FILE}")
elseif(Coverage_FORMAT STREQUAL "json-summary")
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary "${GCOVR_OUTPUT_FILE}")
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary-pretty)
elseif(Coverage_FORMAT STREQUAL "json-details")
list(APPEND GCOVR_ADDITIONAL_ARGS --json "${GCOVR_OUTPUT_FILE}")
list(APPEND GCOVR_ADDITIONAL_ARGS --json-pretty)
elseif(Coverage_FORMAT STREQUAL "coveralls")
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls "${GCOVR_OUTPUT_FILE}")
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls-pretty)
elseif(Coverage_FORMAT STREQUAL "csv")
list(APPEND GCOVR_ADDITIONAL_ARGS --csv "${GCOVR_OUTPUT_FILE}")
elseif(Coverage_FORMAT STREQUAL "txt")
list(APPEND GCOVR_ADDITIONAL_ARGS --txt "${GCOVR_OUTPUT_FILE}")
elseif(Coverage_FORMAT STREQUAL "html-single")
list(APPEND GCOVR_ADDITIONAL_ARGS --html "${GCOVR_OUTPUT_FILE}")
list(APPEND GCOVR_ADDITIONAL_ARGS --html-self-contained)
elseif(Coverage_FORMAT STREQUAL "html-nested")
list(APPEND GCOVR_ADDITIONAL_ARGS --html-nested "${GCOVR_OUTPUT_FILE}")
elseif(Coverage_FORMAT STREQUAL "html-details")
list(APPEND GCOVR_ADDITIONAL_ARGS --html-details "${GCOVR_OUTPUT_FILE}")
else()
message(
FATAL_ERROR
"Unsupported output style ${Coverage_FORMAT}! Aborting..."
)
endif()
if (CODE_COVERAGE_VERBOSE)
message(STATUS "Executed command report")
# Collect excludes (CMake 3.4+: Also compute absolute paths)
set(GCOVR_EXCLUDES "")
foreach(
EXCLUDE
${Coverage_EXCLUDE}
${COVERAGE_EXCLUDES}
${COVERAGE_GCOVR_EXCLUDES}
)
if(CMAKE_VERSION VERSION_GREATER 3.4)
get_filename_component(
EXCLUDE
${EXCLUDE}
ABSOLUTE
BASE_DIR ${BASEDIR}
)
endif()
list(APPEND GCOVR_EXCLUDES "${EXCLUDE}")
endforeach()
list(REMOVE_DUPLICATES GCOVR_EXCLUDES)
message(STATUS "Command to run tests: ")
string(REPLACE ";" " " GCOVR_EXEC_TESTS_CMD_SPACED "${GCOVR_EXEC_TESTS_CMD}")
message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
# Combine excludes to several -e arguments
set(GCOVR_EXCLUDE_ARGS "")
foreach(EXCLUDE ${GCOVR_EXCLUDES})
list(APPEND GCOVR_EXCLUDE_ARGS "-e")
list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}")
endforeach()
if (NOT GCOVR_FOLDER_CMD STREQUAL "echo")
message(STATUS "Command to create a folder: ")
string(REPLACE ";" " " GCOVR_FOLDER_CMD_SPACED "${GCOVR_FOLDER_CMD}")
message(STATUS "${GCOVR_FOLDER_CMD_SPACED}")
endif ()
# Set up commands which will be run to generate coverage data Run tests
set(GCOVR_EXEC_TESTS_CMD ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS})
message(STATUS "Command to generate gcovr coverage data: ")
string(REPLACE ";" " " GCOVR_CMD_SPACED "${GCOVR_CMD}")
message(STATUS "${GCOVR_CMD_SPACED}")
endif ()
# Create folder
if(DEFINED GCOVR_CREATE_FOLDER)
set(GCOVR_FOLDER_CMD
${CMAKE_COMMAND}
-E
make_directory
${GCOVR_CREATE_FOLDER}
)
else()
set(GCOVR_FOLDER_CMD echo) # dummy
endif()
add_custom_target(
${Coverage_NAME}
COMMAND ${GCOVR_EXEC_TESTS_CMD}
COMMAND ${GCOVR_FOLDER_CMD}
COMMAND ${GCOVR_CMD}
BYPRODUCTS ${GCOVR_OUTPUT_FILE}
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
DEPENDS ${Coverage_DEPENDENCIES}
VERBATIM # Protect arguments to commands
COMMENT "Running gcovr to produce code coverage report."
)
# Running gcovr
set(GCOVR_CMD
${GCOVR_PATH}
--gcov-executable
${GCOV_TOOL}
--gcov-ignore-parse-errors=negative_hits.warn_once_per_file
-r
${BASEDIR}
${GCOVR_ADDITIONAL_ARGS}
${GCOVR_EXCLUDE_ARGS}
--object-directory=${PROJECT_BINARY_DIR}
)
# Show info where to find the report
add_custom_command(
TARGET ${Coverage_NAME} POST_BUILD COMMAND ;
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
)
endfunction () # setup_target_for_coverage_gcovr
if(CODE_COVERAGE_VERBOSE)
message(STATUS "Executed command report")
function (append_coverage_compiler_flags)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}")
endfunction () # append_coverage_compiler_flags
message(STATUS "Command to run tests: ")
string(
REPLACE ";"
" "
GCOVR_EXEC_TESTS_CMD_SPACED
"${GCOVR_EXEC_TESTS_CMD}"
)
message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
if(NOT GCOVR_FOLDER_CMD STREQUAL "echo")
message(STATUS "Command to create a folder: ")
string(
REPLACE ";"
" "
GCOVR_FOLDER_CMD_SPACED
"${GCOVR_FOLDER_CMD}"
)
message(STATUS "${GCOVR_FOLDER_CMD_SPACED}")
endif()
message(STATUS "Command to generate gcovr coverage data: ")
string(REPLACE ";" " " GCOVR_CMD_SPACED "${GCOVR_CMD}")
message(STATUS "${GCOVR_CMD_SPACED}")
endif()
add_custom_target(
${Coverage_NAME}
COMMAND ${GCOVR_EXEC_TESTS_CMD}
COMMAND ${GCOVR_FOLDER_CMD}
COMMAND ${GCOVR_CMD}
BYPRODUCTS ${GCOVR_OUTPUT_FILE}
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
DEPENDS ${Coverage_DEPENDENCIES}
VERBATIM # Protect arguments to commands
COMMENT "Running gcovr to produce code coverage report."
)
# Show info where to find the report
add_custom_command(
TARGET ${Coverage_NAME}
POST_BUILD
COMMAND ;
COMMENT
"Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
)
endfunction() # setup_target_for_coverage_gcovr
function(append_coverage_compiler_flags)
set(CMAKE_C_FLAGS
"${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}"
PARENT_SCOPE
)
set(CMAKE_CXX_FLAGS
"${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}"
PARENT_SCOPE
)
set(CMAKE_Fortran_FLAGS
"${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}"
PARENT_SCOPE
)
message(
STATUS
"Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}"
)
endfunction() # append_coverage_compiler_flags
# Setup coverage for specific library
function (append_coverage_compiler_flags_to_target name mode)
separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}")
target_compile_options(${name} ${mode} ${_flag_list})
if (CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
target_link_libraries(${name} ${mode} gcov)
endif ()
endfunction ()
function(append_coverage_compiler_flags_to_target name mode)
separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}")
target_compile_options(${name} ${mode} ${_flag_list})
if(
CMAKE_C_COMPILER_ID STREQUAL "GNU"
OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU"
)
target_link_libraries(${name} ${mode} gcov)
endif()
endfunction()

View File

@@ -12,9 +12,9 @@ set(DOXYGEN_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT})
add_custom_target(
docs
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating API documentation with Doxygen"
VERBATIM
docs
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating API documentation with Doxygen"
VERBATIM
)

View File

@@ -1,11 +1,11 @@
if (DEFINED CMAKE_LINKER_TYPE)
message(STATUS "Custom linker is already set: ${CMAKE_LINKER_TYPE}")
return()
endif ()
if(DEFINED CMAKE_LINKER_TYPE)
message(STATUS "Custom linker is already set: ${CMAKE_LINKER_TYPE}")
return()
endif()
find_program(MOLD_PATH mold)
if (MOLD_PATH AND CMAKE_SYSTEM_NAME STREQUAL "Linux")
message(STATUS "Using Mold linker: ${MOLD_PATH}")
set(CMAKE_LINKER_TYPE MOLD)
endif ()
if(MOLD_PATH AND CMAKE_SYSTEM_NAME STREQUAL "Linux")
message(STATUS "Using Mold linker: ${MOLD_PATH}")
set(CMAKE_LINKER_TYPE MOLD)
endif()

View File

@@ -30,53 +30,58 @@ set(COMPILER_FLAGS
# TODO: re-enable when we change CI #884 if (is_gcc AND NOT lint) list(APPEND COMPILER_FLAGS -Wduplicated-branches
# -Wduplicated-cond -Wlogical-op -Wuseless-cast ) endif ()
if (is_clang)
list(APPEND COMPILER_FLAGS -Wshadow # gcc is to aggressive with shadowing
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78147
)
endif ()
if(is_clang)
# gcc is too aggressive with shadowing
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78147
list(APPEND COMPILER_FLAGS -Wshadow)
endif()
if (is_appleclang)
list(APPEND COMPILER_FLAGS -Wreorder-init-list)
endif ()
if(is_appleclang)
list(APPEND COMPILER_FLAGS -Wreorder-init-list)
endif()
if (san)
# When building with sanitizers some compilers will actually produce extra warnings/errors. We don't want this yet, at
# least not until we have fixed all runtime issues reported by the sanitizers. Once that is done we can start removing
# some of these and trying to fix it in our codebase. We can never remove all of below because most of them are
# reported from deep inside libraries like boost or libxrpl.
#
# TODO: Address in https://github.com/XRPLF/clio/issues/1885
list(
APPEND
COMPILER_FLAGS
-Wno-error=tsan # Disables treating TSAN warnings as errors
-Wno-tsan # Disables TSAN warnings (thread-safety analysis)
-Wno-uninitialized # Disables warnings about uninitialized variables (AddressSanitizer, UndefinedBehaviorSanitizer,
# etc.)
-Wno-stringop-overflow # Disables warnings about potential string operation overflows (AddressSanitizer)
-Wno-unsafe-buffer-usage # Disables warnings about unsafe memory operations (AddressSanitizer)
-Wno-frame-larger-than # Disables warnings about stack frame size being too large (AddressSanitizer)
-Wno-unused-function # Disables warnings about unused functions (LeakSanitizer, memory-related issues)
-Wno-unused-but-set-variable # Disables warnings about unused variables (MemorySanitizer)
-Wno-thread-safety-analysis # Disables warnings related to thread safety usage (ThreadSanitizer)
-Wno-thread-safety # Disables warnings related to thread safety usage (ThreadSanitizer)
-Wno-sign-compare # Disables warnings about signed/unsigned comparison (UndefinedBehaviorSanitizer)
-Wno-nonnull # Disables warnings related to null pointer dereferencing (UndefinedBehaviorSanitizer)
-Wno-address # Disables warnings about address-related issues (UndefinedBehaviorSanitizer)
-Wno-array-bounds # Disables array bounds checks (UndefinedBehaviorSanitizer)
)
endif ()
if(san)
# When building with sanitizers some compilers will actually produce extra warnings/errors. We don't want this yet,
# at least not until we have fixed all runtime issues reported by the sanitizers. Once that is done we can start
# removing some of these and trying to fix it in our codebase. We can never remove all of below because most of them
# are reported from deep inside libraries like boost or libxrpl.
#
# TODO: Address in https://github.com/XRPLF/clio/issues/1885
list(
APPEND COMPILER_FLAGS
-Wno-error=tsan # Disables treating TSAN warnings as errors
-Wno-tsan # Disables TSAN warnings (thread-safety analysis)
-Wno-uninitialized # Disables warnings about uninitialized variables (AddressSanitizer,
# UndefinedBehaviorSanitizer, etc.)
-Wno-stringop-overflow # Disables warnings about potential string operation overflows (AddressSanitizer)
-Wno-unsafe-buffer-usage # Disables warnings about unsafe memory operations (AddressSanitizer)
-Wno-frame-larger-than # Disables warnings about stack frame size being too large (AddressSanitizer)
-Wno-unused-function # Disables warnings about unused functions (LeakSanitizer, memory-related issues)
-Wno-unused-but-set-variable # Disables warnings about unused variables (MemorySanitizer)
-Wno-thread-safety-analysis # Disables warnings related to thread safety usage (ThreadSanitizer)
-Wno-thread-safety # Disables warnings related to thread safety usage (ThreadSanitizer)
-Wno-sign-compare # Disables warnings about signed/unsigned comparison (UndefinedBehaviorSanitizer)
-Wno-nonnull # Disables warnings related to null pointer dereferencing (UndefinedBehaviorSanitizer)
-Wno-address # Disables warnings about address-related issues (UndefinedBehaviorSanitizer)
-Wno-array-bounds # Disables array bounds checks (UndefinedBehaviorSanitizer)
)
endif()
# See https://github.com/cpp-best-practices/cppbestpractices/blob/master/02-Use_the_Tools_Available.md#gcc--clang for
# the flags description
if (time_trace)
if (is_clang OR is_appleclang)
list(APPEND COMPILER_FLAGS -ftime-trace)
else ()
message(FATAL_ERROR "Clang or AppleClang is required to use `-ftime-trace`")
endif ()
endif ()
if(time_trace)
if(is_clang OR is_appleclang)
list(APPEND COMPILER_FLAGS -ftime-trace)
else()
message(
FATAL_ERROR
"Clang or AppleClang is required to use `-ftime-trace`"
)
endif()
endif()
target_compile_options(clio_options INTERFACE ${COMPILER_FLAGS})
# Add debug symbols for all builds, including Release. This is needed to get useful stack traces in production.
target_compile_options(clio_options INTERFACE -g)

View File

@@ -1,11 +1,17 @@
include(CheckIncludeFileCXX)
check_include_file_cxx("source_location" SOURCE_LOCATION_AVAILABLE)
if (SOURCE_LOCATION_AVAILABLE)
target_compile_definitions(clio_options INTERFACE "HAS_SOURCE_LOCATION")
endif ()
if(SOURCE_LOCATION_AVAILABLE)
target_compile_definitions(clio_options INTERFACE "HAS_SOURCE_LOCATION")
endif()
check_include_file_cxx("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
if (EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
target_compile_definitions(clio_options INTERFACE "HAS_EXPERIMENTAL_SOURCE_LOCATION")
endif ()
check_include_file_cxx(
"experimental/source_location"
EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE
)
if(EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
target_compile_definitions(
clio_options
INTERFACE "HAS_EXPERIMENTAL_SOURCE_LOCATION"
)
endif()

View File

@@ -1,4 +1,10 @@
set(Boost_USE_STATIC_LIBS ON)
set(Boost_USE_STATIC_RUNTIME ON)
find_package(Boost 1.82 REQUIRED CONFIG COMPONENTS program_options coroutine system log log_setup)
find_package(
Boost
1.82
REQUIRED
CONFIG
COMPONENTS program_options coroutine system log log_setup
)

View File

@@ -1,3 +1,6 @@
find_package(OpenSSL 1.1.1 REQUIRED CONFIG)
set_target_properties(OpenSSL::SSL PROPERTIES INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2)
set_target_properties(
OpenSSL::SSL
PROPERTIES INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
)

View File

@@ -1,11 +1,14 @@
if ("${san}" STREQUAL "")
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_LINK)
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_USE_BACKTRACE)
find_package(libbacktrace REQUIRED CONFIG)
else ()
# Some sanitizers (TSAN and ASAN for sure) can't be used with libbacktrace because they have their own backtracing
# capabilities and there are conflicts. In any case, this makes sure Clio code knows that backtrace is not available.
# See relevant conan profiles for sanitizers where we disable stacktrace in Boost explicitly.
target_compile_definitions(clio_options INTERFACE CLIO_WITHOUT_STACKTRACE)
message(STATUS "Sanitizer enabled, disabling stacktrace")
endif ()
if("${san}" STREQUAL "")
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_LINK)
target_compile_definitions(
clio_options
INTERFACE BOOST_STACKTRACE_USE_BACKTRACE
)
find_package(libbacktrace REQUIRED CONFIG)
else()
# Some sanitizers (TSAN and ASAN for sure) can't be used with libbacktrace because they have their own backtracing
# capabilities and there are conflicts. In any case, this makes sure Clio code knows that backtrace is not
# available. See relevant conan profiles for sanitizers where we disable stacktrace in Boost explicitly.
target_compile_definitions(clio_options INTERFACE CLIO_WITHOUT_STACKTRACE)
message(STATUS "Sanitizer enabled, disabling stacktrace")
endif()

View File

@@ -1,5 +1,5 @@
find_package(spdlog REQUIRED)
if (NOT TARGET spdlog::spdlog)
message(FATAL_ERROR "spdlog::spdlog target not found")
endif ()
if(NOT TARGET spdlog::spdlog)
message(FATAL_ERROR "spdlog::spdlog target not found")
endif()

View File

@@ -1,5 +1,10 @@
set(CLIO_INSTALL_DIR "/opt/clio")
set(CMAKE_INSTALL_PREFIX "${CLIO_INSTALL_DIR}" CACHE PATH "Install prefix" FORCE)
set(CMAKE_INSTALL_PREFIX
"${CLIO_INSTALL_DIR}"
CACHE PATH
"Install prefix"
FORCE
)
set(CPACK_PACKAGING_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}")
@@ -10,7 +15,17 @@ install(TARGETS clio_server DESTINATION "${CMAKE_INSTALL_BINDIR}")
file(READ docs/examples/config/example-config.json config)
string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
install(
FILES ${CMAKE_BINARY_DIR}/install-config.json
DESTINATION etc
RENAME config.json
)
configure_file("${CMAKE_SOURCE_DIR}/cmake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)
configure_file(
"${CMAKE_SOURCE_DIR}/cmake/install/clio.service.in"
"${CMAKE_BINARY_DIR}/clio.service"
)
install(
FILES "${CMAKE_BINARY_DIR}/clio.service"
DESTINATION /lib/systemd/system
)

View File

@@ -3,15 +3,15 @@
"requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1765850150.075",
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1765850149.987",
"xrpl/3.0.0#534d3f65a336109eee929b88962bae4e%1765375071.547",
"xrpl/3.1.0#3d408ab8c8020014fa7dd52bc7cc7ea8%1769706825.165",
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1765850149.926",
"spdlog/1.17.0#bcbaaf7147bda6ad24ffbd1ac3d7142c%1767636069.964",
"spdlog/1.17.0#bcbaaf7147bda6ad24ffbd1ac3d7142c%1768312128.781",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1765850149.46",
"re2/20230301#ca3b241baec15bd31ea9187150e0b333%1765850148.103",
"rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1754325007.656",
"protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88%1764175361.456",
"openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729",
"nudb/2.0.9#fb8dfd1a5557f5e0528114c2da17721e%1765850143.957",
"nudb/2.0.9#0432758a24204da08fee953ec9ea03cb%1769436073.32",
"minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1754325004.374",
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1765850143.914",
"libuv/1.46.0#dc28c1f653fa197f00db5b577a6f6011%1754325003.592",
@@ -19,7 +19,7 @@
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1765842973.03",
"libarchive/3.8.1#ffee18995c706e02bf96e7a2f7042e0d%1765850144.736",
"http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1754325001.385",
"gtest/1.17.0#5224b3b3ff3b4ce1133cbdd27d53ee7d%1755784855.585",
"gtest/1.17.0#5224b3b3ff3b4ce1133cbdd27d53ee7d%1768312129.152",
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
"fmt/12.1.0#50abab23274d56bb8f42c94b3b9a40c7%1763984116.926",
"doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819",
@@ -40,17 +40,20 @@
],
"python_requires": [],
"overrides": {
"boost/1.83.0": [
"protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88": [
null,
"protobuf/3.21.12"
],
"boost/1.83.0#91d8b1572534d2c334d6790e3c34d0c1": [
null,
"boost/1.83.0#91d8b1572534d2c334d6790e3c34d0c1"
],
"protobuf/3.21.12": [
null,
"protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88"
],
"lz4/1.9.4": [
"lz4/1.10.0"
],
"boost/1.90.0": [
"boost/1.83.0"
],
"sqlite3/3.44.2": [
"sqlite3/3.49.1"
]

View File

@@ -12,7 +12,6 @@ class ClioConan(ConanFile):
options = {}
requires = [
"boost/1.83.0",
"cassandra-cpp-driver/2.17.0",
"fmt/12.1.0",
"grpc/1.50.1",
@@ -20,7 +19,7 @@ class ClioConan(ConanFile):
"openssl/1.1.1w",
"protobuf/3.21.12",
"spdlog/1.17.0",
"xrpl/3.0.0",
"xrpl/3.1.0",
"zlib/1.3.1",
]
@@ -43,6 +42,7 @@ class ClioConan(ConanFile):
exports_sources = ("CMakeLists.txt", "cmake/*", "src/*")
def requirements(self):
self.requires("boost/1.83.0", force=True)
self.requires("gtest/1.17.0")
self.requires("benchmark/1.9.4")

View File

@@ -3,8 +3,6 @@ project(docs)
include(${CMAKE_CURRENT_SOURCE_DIR}/../cmake/ClioVersion.cmake)
# cmake-format: off
# Generate `docs` target for doxygen documentation
# Note: use `cmake --build . --target docs` from your `build` directory to generate the documentation
# cmake-format: on
include(${CMAKE_CURRENT_SOURCE_DIR}/../cmake/Docs.cmake)

View File

@@ -433,6 +433,14 @@ This document provides a list of all available Clio configuration properties in
- **Constraints**: The minimum value is `1`. The maximum value is `65535`.
- **Description**: The number of ledger objects to fetch concurrently per marker.
### cache.limit_load_in_cluster
- **Required**: True
- **Type**: boolean
- **Default value**: `False`
- **Constraints**: None
- **Description**: If enabled only one clio node in a cluster (sharing the same database) will load cache at a time
### cache.load
- **Required**: True

View File

@@ -93,3 +93,42 @@ To completely disable Prometheus metrics add `"prometheus": { "enabled": false }
It is important to know that Clio responds to Prometheus request only if they are admin requests. If you are using the admin password feature, the same password should be provided in the Authorization header of Prometheus requests.
You can find an example Docker Compose file, with Prometheus and Grafana configs, in [examples/infrastructure](../docs/examples/infrastructure/).
## Ledger cache file
Since version 2.7.0, Clio supports saving the ledger cache to a local file on shutdown and loading it on startup. This feature is disabled by default but can significantly improve restart times.
### Benefits
- **Faster startup**: Loading cache from a file takes less than a minute, compared to 40-90 minutes on Mainnet when loading from the database.
- **Reduced database load**: Clio doesn't put extra load on the database when starting with a cache file.
- **Improved availability**: Faster restart times mean less downtime during maintenance or updates.
> [!NOTE]
> This feature only works when Clio is restarted. When starting Clio for the first time, the cache must be loaded from `rippled` or the database as usual.
### Configuration
To enable the ledger cache file feature, specify the [`cache.file.path`](./config-description.md#cachefilepath) option in your `config.json`:
```json
"cache": {
"file": {
"path": "/path/to/cache/file"
}
}
```
You can optionally configure additional settings such as [`cache.file.max_sequence_age`](./config-description.md#cachefilemax_sequence_age) and [`cache.file.async_save`](./config-description.md#cachefileasync_save) to fine-tune the behavior. For a complete list of available options and their default values, see the [Configuration Description](./config-description.md#cachefilepath) documentation.
### How it works
1. **On shutdown**: Clio saves the current ledger cache to the specified file path. The file includes a hash for integrity verification.
2. **On startup**: Clio checks if a cache file exists at the configured path. If the file exists, Clio will:
- Verify the file's integrity to ensure it is complete and not corrupted.
- Compare the latest ledger sequence in the cache file with the latest sequence in the database.
- Use the cache file only if the difference is less than [`cache.file.max_sequence_age`](./config-description.md#cachefilemax_sequence_age).
- If validation fails or the cache is too old, Clio will fall back to loading from the database.
> [!IMPORTANT]
> The cache file path should point to a location with sufficient disk space. On typical deployments, the cache file size can be several gigabytes.

View File

@@ -1,13 +1,11 @@
add_library(clio_app)
target_sources(clio_app PRIVATE CliArgs.cpp ClioApplication.cpp Stopper.cpp WebHandlers.cpp)
target_sources(
clio_app
PRIVATE CliArgs.cpp ClioApplication.cpp Stopper.cpp WebHandlers.cpp
)
target_link_libraries(
clio_app
PUBLIC clio_cluster
clio_etl
clio_feed
clio_migration
clio_rpc
clio_web
PRIVATE Boost::program_options
clio_app
PUBLIC clio_cluster clio_etl clio_feed clio_migration clio_rpc clio_web
PRIVATE Boost::program_options
)

View File

@@ -58,12 +58,16 @@ CliArgs::parse(int argc, char const* argv[])
positional.add("conf", 1);
auto const printHelp = [&description]() {
std::cout << "Clio server " << util::build::getClioFullVersionString() << "\n\n" << description;
std::cout << "Clio server " << util::build::getClioFullVersionString() << "\n\n"
<< description;
};
po::variables_map parsed;
try {
po::store(po::command_line_parser(argc, argv).options(description).positional(positional).run(), parsed);
po::store(
po::command_line_parser(argc, argv).options(description).positional(positional).run(),
parsed
);
po::notify(parsed);
} catch (po::error const& e) {
std::cerr << "Error: " << e.what() << std::endl << std::endl;
@@ -87,7 +91,8 @@ CliArgs::parse(int argc, char const* argv[])
if (parsed.contains("config-description")) {
std::filesystem::path const filePath = parsed["config-description"].as<std::string>();
auto const res = util::config::ClioConfigDescription::generateConfigDescriptionToFile(filePath);
auto const res =
util::config::ClioConfigDescription::generateConfigDescriptionToFile(filePath);
if (res.has_value())
return Action{Action::Exit{EXIT_SUCCESS}};
@@ -99,15 +104,22 @@ CliArgs::parse(int argc, char const* argv[])
if (parsed.contains("migrate")) {
auto const opt = parsed["migrate"].as<std::string>();
if (opt == "status")
return Action{Action::Migrate{.configPath = std::move(configPath), .subCmd = MigrateSubCmd::status()}};
return Action{Action::Migrate{.configPath = std::move(configPath), .subCmd = MigrateSubCmd::migration(opt)}};
if (opt == "status") {
return Action{Action::Migrate{
.configPath = std::move(configPath), .subCmd = MigrateSubCmd::status()
}};
}
return Action{Action::Migrate{
.configPath = std::move(configPath), .subCmd = MigrateSubCmd::migration(opt)
}};
}
if (parsed.contains("verify"))
return Action{Action::VerifyConfig{.configPath = std::move(configPath)}};
return Action{Action::Run{.configPath = std::move(configPath), .useNgWebServer = parsed.contains("ng-web-server")}};
return Action{Action::Run{
.configPath = std::move(configPath), .useNgWebServer = parsed.contains("ng-web-server")
}};
}
} // namespace app

View File

@@ -79,7 +79,8 @@ public:
/**
* @brief Apply a function to the action.
*
* @tparam Processors Action processors types. Must be callable with the action type and return int.
* @tparam Processors Action processors types. Must be callable with the action type and
* return int.
* @param processors Action processors.
* @return Exit code.
*/

View File

@@ -29,6 +29,8 @@
#include "etl/ETLService.hpp"
#include "etl/LoadBalancer.hpp"
#include "etl/NetworkValidatedLedgers.hpp"
#include "etl/SystemState.hpp"
#include "etl/WriterState.hpp"
#include "feed/SubscriptionManager.hpp"
#include "migration/MigrationInspectorFactory.hpp"
#include "rpc/Counters.hpp"
@@ -121,8 +123,11 @@ ClioApplication::run(bool const useNgWebServer)
// Interface to the database
auto backend = data::makeBackend(config_, cache);
cluster::ClusterCommunicationService clusterCommunicationService{backend};
clusterCommunicationService.run();
auto systemState = etl::SystemState::makeSystemState(config_);
auto [clusterCommunicationService, cacheLoadingState] =
cluster::ClusterCommunicationService::make(config_, backend, systemState);
clusterCommunicationService->run();
auto const amendmentCenter = std::make_shared<data::AmendmentCenter const>(backend);
@@ -130,14 +135,15 @@ ClioApplication::run(bool const useNgWebServer)
auto const migrationInspector = migration::makeMigrationInspector(config_, backend);
// Check if any migration is blocking Clio server starting.
if (migrationInspector->isBlockingClio() and backend->hardFetchLedgerRangeNoThrow()) {
LOG(util::LogService::error())
<< "Existing Migration is blocking Clio, Please complete the database migration first.";
LOG(util::LogService::error()) << "Existing Migration is blocking Clio, Please "
"complete the database migration first.";
return EXIT_FAILURE;
}
}
// Manages clients subscribed to streams
auto subscriptions = feed::SubscriptionManager::makeSubscriptionManager(config_, backend, amendmentCenter);
auto subscriptions =
feed::SubscriptionManager::makeSubscriptionManager(config_, backend, amendmentCenter);
// Tracks which ledgers have been validated by the network
auto ledgers = etl::NetworkValidatedLedgers::makeValidatedLedgers();
@@ -150,8 +156,18 @@ ClioApplication::run(bool const useNgWebServer)
config_, ioc, backend, subscriptions, std::make_unique<util::MTRandomGenerator>(), ledgers
);
// ETL is responsible for writing and publishing to streams. In read-only mode, ETL only publishes
auto etl = etl::ETLService::makeETLService(config_, ctx, backend, subscriptions, balancer, ledgers);
// ETL is responsible for writing and publishing to streams. In read-only mode, ETL only
// publishes
auto etl = etl::ETLService::makeETLService(
config_,
std::move(systemState),
std::move(cacheLoadingState),
ctx,
backend,
subscriptions,
balancer,
ledgers
);
auto workQueue = rpc::WorkQueue::makeWorkQueue(config_);
auto counters = rpc::Counters::makeCounters(workQueue);
@@ -161,15 +177,19 @@ ClioApplication::run(bool const useNgWebServer)
);
using RPCEngineType = rpc::RPCEngine<rpc::Counters>;
auto const rpcEngine =
RPCEngineType::makeRPCEngine(config_, backend, balancer, dosGuard, workQueue, counters, handlerProvider);
auto const rpcEngine = RPCEngineType::makeRPCEngine(
config_, backend, balancer, dosGuard, workQueue, counters, handlerProvider
);
if (useNgWebServer or config_.get<bool>("server.__ng_web_server")) {
web::ng::RPCServerHandler<RPCEngineType> handler{config_, backend, rpcEngine, etl, dosGuard};
web::ng::RPCServerHandler<RPCEngineType> handler{
config_, backend, rpcEngine, etl, dosGuard
};
auto expectedAdminVerifier = web::makeAdminVerificationStrategy(config_);
if (not expectedAdminVerifier.has_value()) {
LOG(util::LogService::error()) << "Error creating admin verifier: " << expectedAdminVerifier.error();
LOG(util::LogService::error())
<< "Error creating admin verifier: " << expectedAdminVerifier.error();
return EXIT_FAILURE;
}
auto const adminVerifier = std::move(expectedAdminVerifier).value();
@@ -197,7 +217,16 @@ ClioApplication::run(bool const useNgWebServer)
}
appStopper_.setOnStop(
Stopper::makeOnStopCallback(httpServer.value(), *balancer, *etl, *subscriptions, *backend, cacheSaver, ioc)
Stopper::makeOnStopCallback(
httpServer.value(),
*balancer,
*etl,
*subscriptions,
*backend,
cacheSaver,
*clusterCommunicationService,
ioc
)
);
// Blocks until stopped.
@@ -209,11 +238,22 @@ ClioApplication::run(bool const useNgWebServer)
}
// Init the web server
auto handler = std::make_shared<web::RPCServerHandler<RPCEngineType>>(config_, backend, rpcEngine, etl, dosGuard);
auto handler = std::make_shared<web::RPCServerHandler<RPCEngineType>>(
config_, backend, rpcEngine, etl, dosGuard
);
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler, cache);
appStopper_.setOnStop(
Stopper::makeOnStopCallback(*httpServer, *balancer, *etl, *subscriptions, *backend, cacheSaver, ioc)
Stopper::makeOnStopCallback(
*httpServer,
*balancer,
*etl,
*subscriptions,
*backend,
cacheSaver,
*clusterCommunicationService,
ioc
)
);
// Blocks until stopped.

View File

@@ -19,6 +19,7 @@
#pragma once
#include "cluster/Concepts.hpp"
#include "data/BackendInterface.hpp"
#include "data/LedgerCacheSaver.hpp"
#include "etl/ETLServiceInterface.hpp"
@@ -38,7 +39,8 @@
namespace app {
/**
* @brief Application stopper class. On stop it will create a new thread to run all the shutdown tasks.
* @brief Application stopper class. On stop it will create a new thread to run all the shutdown
* tasks.
*/
class Stopper {
boost::asio::io_context ctx_;
@@ -82,10 +84,14 @@ public:
* @param subscriptions The subscription manager to stop.
* @param backend The backend to stop.
* @param cacheSaver The ledger cache saver
* @param clusterCommunicationService The cluster communication service to stop.
* @param ioc The io_context to stop.
* @return The callback to be called on application stop.
*/
template <web::SomeServer ServerType, data::SomeLedgerCacheSaver LedgerCacheSaverType>
template <
web::SomeServer ServerType,
data::SomeLedgerCacheSaver LedgerCacheSaverType,
cluster::SomeClusterCommunicationService ClusterCommunicationServiceType>
static std::function<void(boost::asio::yield_context)>
makeOnStopCallback(
ServerType& server,
@@ -94,6 +100,7 @@ public:
feed::SubscriptionManagerInterface& subscriptions,
data::BackendInterface& backend,
LedgerCacheSaverType& cacheSaver,
ClusterCommunicationServiceType& clusterCommunicationService,
boost::asio::io_context& ioc
)
{
@@ -111,6 +118,8 @@ public:
});
coroutineGroup.asyncWait(yield);
clusterCommunicationService.stop();
etl.stop();
LOG(util::LogService::info()) << "ETL stopped";

View File

@@ -41,7 +41,8 @@ parseConfig(std::string_view configPath)
auto const json = ConfigFileJson::makeConfigFileJson(configPath);
if (!json.has_value()) {
std::cerr << "Error parsing json from config: " << configPath << "\n" << json.error().error << std::endl;
std::cerr << "Error parsing json from config: " << configPath << "\n"
<< json.error().error << std::endl;
return false;
}
auto const errors = getClioConfig().parse(json.value());

View File

@@ -51,9 +51,9 @@ OnConnectCheck::operator()(web::ng::Connection const& connection)
{
dosguard_.get().increment(connection.ip());
if (not dosguard_.get().isOk(connection.ip())) {
return std::unexpected{
web::ng::Response{boost::beast::http::status::too_many_requests, "Too many requests", connection}
};
return std::unexpected{web::ng::Response{
boost::beast::http::status::too_many_requests, "Too many requests", connection
}};
}
return {};
@@ -80,7 +80,10 @@ DisconnectHook::operator()(web::ng::Connection const& connection)
dosguard_.get().decrement(connection.ip());
}
MetricsHandler::MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier, rpc::WorkQueue& workQueue)
MetricsHandler::MetricsHandler(
std::shared_ptr<web::AdminVerificationStrategy> adminVerifier,
rpc::WorkQueue& workQueue
)
: adminVerifier_{std::move(adminVerifier)}, workQueue_{std::ref(workQueue)}
{
}
@@ -120,7 +123,9 @@ MetricsHandler::operator()(
if (!postSuccessful) {
return web::ng::Response{
boost::beast::http::status::too_many_requests, rpc::makeError(rpc::RippledError::rpcTOO_BUSY), request
boost::beast::http::status::too_many_requests,
rpc::makeError(rpc::RippledError::rpcTOO_BUSY),
request
};
}
@@ -177,7 +182,9 @@ CacheStateHandler::operator()(
if (cache_.get().isFull())
return web::ng::Response{boost::beast::http::status::ok, kCACHE_CHECK_LOADED_HTML, request};
return web::ng::Response{boost::beast::http::status::service_unavailable, kCACHE_CHECK_NOT_LOADED_HTML, request};
return web::ng::Response{
boost::beast::http::status::service_unavailable, kCACHE_CHECK_NOT_LOADED_HTML, request
};
}
} // namespace app

View File

@@ -68,8 +68,8 @@ public:
};
/**
* @brief A function object that is called when the IP of a connection changes (usually if proxy detected).
* This is used to update the DOS guard.
* @brief A function object that is called when the IP of a connection changes (usually if proxy
* detected). This is used to update the DOS guard.
*/
class IpChangeHook {
std::reference_wrapper<web::dosguard::DOSGuardInterface> dosguard_;
@@ -126,10 +126,14 @@ public:
/**
* @brief Construct a new MetricsHandler object
*
* @param adminVerifier The AdminVerificationStrategy to use for verifying the connection for admin access.
* @param adminVerifier The AdminVerificationStrategy to use for verifying the connection for
* admin access.
* @param workQueue The WorkQueue to use for handling the request.
*/
MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier, rpc::WorkQueue& workQueue);
MetricsHandler(
std::shared_ptr<web::AdminVerificationStrategy> adminVerifier,
rpc::WorkQueue& workQueue
);
/**
* @brief The call of the function object.
@@ -214,10 +218,14 @@ public:
/**
* @brief Construct a new RequestHandler object
*
* @param adminVerifier The AdminVerificationStrategy to use for verifying the connection for admin access.
* @param adminVerifier The AdminVerificationStrategy to use for verifying the connection for
* admin access.
* @param rpcHandler The RPC handler to use for handling the request.
*/
RequestHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier, RpcHandlerType& rpcHandler)
RequestHandler(
std::shared_ptr<web::AdminVerificationStrategy> adminVerifier,
RpcHandlerType& rpcHandler
)
: adminVerifier_(std::move(adminVerifier)), rpcHandler_(rpcHandler)
{
}

139
src/cluster/Backend.cpp Normal file
View File

@@ -0,0 +1,139 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "cluster/Backend.hpp"
#include "cluster/ClioNode.hpp"
#include "data/BackendInterface.hpp"
#include "data/LedgerCacheLoadingState.hpp"
#include "etl/WriterState.hpp"
#include <boost/asio/spawn.hpp>
#include <boost/asio/thread_pool.hpp>
#include <boost/json/parse.hpp>
#include <boost/json/serialize.hpp>
#include <boost/json/value.hpp>
#include <boost/json/value_from.hpp>
#include <boost/json/value_to.hpp>
#include <boost/uuid/random_generator.hpp>
#include <boost/uuid/uuid.hpp>
#include <fmt/format.h>
#include <chrono>
#include <memory>
#include <utility>
#include <vector>
namespace cluster {
Backend::Backend(
boost::asio::thread_pool& ctx,
std::shared_ptr<data::BackendInterface> backend,
std::unique_ptr<etl::WriterStateInterface const> writerState,
std::unique_ptr<data::LedgerCacheLoadingStateInterface const> cacheLoadingState,
std::chrono::steady_clock::duration readInterval,
std::chrono::steady_clock::duration writeInterval
)
: backend_(std::move(backend))
, writerState_(std::move(writerState))
, cacheLoadingState_(std::move(cacheLoadingState))
, readerTask_(readInterval, ctx)
, writerTask_(writeInterval, ctx)
, selfUuid_(std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator{}()))
{
}
void
Backend::run()
{
readerTask_.run([this](boost::asio::yield_context yield) {
auto clusterData = doRead(yield);
onNewState_(selfUuid_, std::make_shared<ClusterData>(std::move(clusterData)));
});
writerTask_.run([this]() { doWrite(); });
}
Backend::~Backend()
{
stop();
}
void
Backend::stop()
{
readerTask_.stop();
writerTask_.stop();
}
ClioNode::CUuid
Backend::selfId() const
{
return selfUuid_;
}
Backend::ClusterData
Backend::doRead(boost::asio::yield_context yield)
{
BackendInterface::ClioNodesDataFetchResult expectedResult;
try {
expectedResult = backend_->fetchClioNodesData(yield);
} catch (...) {
expectedResult = std::unexpected{"Failed to fetch Clio nodes data"};
}
if (!expectedResult.has_value()) {
return std::unexpected{std::move(expectedResult).error()};
}
std::vector<ClioNode> otherNodesData;
for (auto const& [uuid, nodeDataStr] : expectedResult.value()) {
if (uuid == *selfUuid_) {
continue;
}
boost::system::error_code errorCode;
auto const json = boost::json::parse(nodeDataStr, errorCode);
if (errorCode.failed()) {
return std::unexpected{fmt::format("Error parsing json from DB: {}", nodeDataStr)};
}
auto expectedNodeData = boost::json::try_value_to<ClioNode>(json);
if (expectedNodeData.has_error()) {
return std::unexpected{
fmt::format("Error converting json to ClioNode: {}", nodeDataStr)
};
}
*expectedNodeData->uuid = uuid;
otherNodesData.push_back(std::move(expectedNodeData).value());
}
otherNodesData.push_back(ClioNode::from(selfUuid_, *writerState_, *cacheLoadingState_));
return otherNodesData;
}
void
Backend::doWrite()
{
auto const selfData = ClioNode::from(selfUuid_, *writerState_, *cacheLoadingState_);
boost::json::value jsonValue{};
boost::json::value_from(selfData, jsonValue);
backend_->writeNodeMessage(*selfData.uuid, boost::json::serialize(jsonValue.as_object()));
}
} // namespace cluster

152
src/cluster/Backend.hpp Normal file
View File

@@ -0,0 +1,152 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "cluster/ClioNode.hpp"
#include "cluster/impl/RepeatedTask.hpp"
#include "data/BackendInterface.hpp"
#include "data/LedgerCacheLoadingState.hpp"
#include "etl/WriterState.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio/any_io_executor.hpp>
#include <boost/asio/cancellation_signal.hpp>
#include <boost/asio/execution_context.hpp>
#include <boost/asio/executor.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/thread_pool.hpp>
#include <boost/signals2/connection.hpp>
#include <boost/signals2/signal.hpp>
#include <boost/signals2/variadic_signal.hpp>
#include <boost/uuid/uuid.hpp>
#include <chrono>
#include <concepts>
#include <memory>
#include <string>
#include <vector>
namespace cluster {
/**
* @brief Backend communication handler for cluster state synchronization.
*
* This class manages reading and writing cluster state information to/from the backend database.
* It periodically reads the state of other nodes in the cluster and writes the current node's
* state, enabling cluster-wide coordination and awareness.
*/
class Backend {
public:
/** @brief Type representing cluster data result - either a vector of nodes or an error message
*/
using ClusterData = std::expected<std::vector<ClioNode>, std::string>;
private:
util::Logger log_{"ClusterCommunication"};
std::shared_ptr<data::BackendInterface> backend_;
std::unique_ptr<etl::WriterStateInterface const> writerState_;
std::unique_ptr<data::LedgerCacheLoadingStateInterface const> cacheLoadingState_;
impl::RepeatedTask<boost::asio::thread_pool> readerTask_;
impl::RepeatedTask<boost::asio::thread_pool> writerTask_;
ClioNode::Uuid selfUuid_;
boost::signals2::signal<void(ClioNode::CUuid, std::shared_ptr<ClusterData const>)> onNewState_;
public:
/**
* @brief Construct a Backend communication handler.
*
* @param ctx The execution context for asynchronous operations
* @param backend Interface to the backend database
* @param writerState State indicating whether this node is writing to the database
* @param cacheLoadingState State controlling whether this node is allowed to load the cache
* @param readInterval How often to read cluster state from the backend
* @param writeInterval How often to write this node's state to the backend
*/
Backend(
boost::asio::thread_pool& ctx,
std::shared_ptr<data::BackendInterface> backend,
std::unique_ptr<etl::WriterStateInterface const> writerState,
std::unique_ptr<data::LedgerCacheLoadingStateInterface const> cacheLoadingState,
std::chrono::steady_clock::duration readInterval,
std::chrono::steady_clock::duration writeInterval
);
~Backend();
Backend(Backend&&) = delete;
Backend&
operator=(Backend&&) = delete;
Backend(Backend const&) = delete;
Backend&
operator=(Backend const&) = delete;
/**
* @brief Start the backend read and write tasks.
*
* Begins periodic reading of cluster state from the backend and writing of this node's state.
*/
void
run();
/**
* @brief Stop the backend read and write tasks.
*
* Stops all periodic tasks and waits for them to complete.
*/
void
stop();
/**
* @brief Subscribe to new cluster state notifications.
*
* @tparam S Callable type accepting (ClioNode::cUUID, ClusterData)
* @param s Subscriber callback to be invoked when new cluster state is available
* @return A connection object that can be used to unsubscribe
*/
template <typename S>
requires std::invocable<S, ClioNode::CUuid, std::shared_ptr<ClusterData const>>
boost::signals2::connection
subscribeToNewState(S&& s)
{
return onNewState_.connect(s);
}
/**
* @brief Get the UUID of this node in the cluster.
*
* @return The UUID of this node.
*/
ClioNode::CUuid
selfId() const;
private:
ClusterData
doRead(boost::asio::yield_context yield);
void
doWrite();
};
} // namespace cluster

View File

@@ -1,5 +1,14 @@
add_library(clio_cluster)
target_sources(clio_cluster PRIVATE ClioNode.cpp ClusterCommunicationService.cpp)
target_sources(
clio_cluster
PRIVATE
Backend.cpp
CacheLoaderDecider.cpp
ClioNode.cpp
ClusterCommunicationService.cpp
Metrics.cpp
WriterDecider.cpp
)
target_link_libraries(clio_cluster PRIVATE clio_util clio_data)

View File

@@ -0,0 +1,93 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2026, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "cluster/CacheLoaderDecider.hpp"
#include "cluster/Backend.hpp"
#include "cluster/ClioNode.hpp"
#include "data/LedgerCacheLoadingState.hpp"
#include "util/Assert.hpp"
#include "util/Spawn.hpp"
#include <boost/asio/thread_pool.hpp>
#include <algorithm>
#include <iterator>
#include <memory>
#include <utility>
#include <vector>
namespace cluster {
CacheLoaderDecider::CacheLoaderDecider(
boost::asio::thread_pool& ctx,
std::unique_ptr<data::LedgerCacheLoadingStateInterface> cacheLoadingState
)
: ctx_(ctx), cacheLoadingState_(std::move(cacheLoadingState))
{
}
void
CacheLoaderDecider::onNewState(
ClioNode::CUuid selfId,
std::shared_ptr<Backend::ClusterData const> clusterData
)
{
if (not clusterData->has_value())
return;
util::spawn(
ctx_,
[cacheLoadingState = cacheLoadingState_->clone(),
selfId = std::move(selfId),
clusterData = clusterData->value()](auto&&) mutable {
auto const selfData = std::ranges::find_if(
clusterData, [&selfId](ClioNode const& node) { return node.uuid == selfId; }
);
ASSERT(selfData != clusterData.end(), "Self data should always be in the cluster data");
if (selfData->cacheIsFull)
return;
std::vector<ClioNode> notFullNodes;
std::ranges::copy_if(
clusterData, std::back_inserter(notFullNodes), [](ClioNode const& node) {
return not node.cacheIsFull;
}
);
auto const someNodeIsLoadingCache = std::ranges::any_of(
notFullNodes, [](ClioNode const& node) { return node.cacheIsCurrentlyLoading; }
);
if (someNodeIsLoadingCache) {
return;
}
std::ranges::sort(notFullNodes, [](ClioNode const& lhs, ClioNode const& rhs) {
return *lhs.uuid < *rhs.uuid;
});
if (*notFullNodes.front().uuid == *selfId) {
cacheLoadingState->allowLoading();
}
}
);
}
} // namespace cluster

View File

@@ -0,0 +1,80 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2026, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "cluster/Backend.hpp"
#include "cluster/ClioNode.hpp"
#include "data/LedgerCacheLoadingState.hpp"
#include <boost/asio/thread_pool.hpp>
#include <memory>
namespace cluster {
/**
* @brief Decides which node in the cluster should load the ledger cache.
*
* This class monitors cluster state changes and determines whether the current node
* should begin loading the ledger cache from the backend. The decision is made by:
* 1. Doing nothing if this node's cache is already full
* 2. Doing nothing if any node in the cluster is currently loading the cache
* 3. Sorting all nodes whose cache is not yet full by UUID for deterministic ordering
* 4. Permitting loading on this node if it is the first in the sorted list
*
* This ensures at most one node in the cluster loads the cache at a time.
*/
class CacheLoaderDecider {
/** @brief Thread pool for spawning asynchronous tasks */
boost::asio::thread_pool& ctx_;
/** @brief Interface for controlling cache loading permission of this node */
std::unique_ptr<data::LedgerCacheLoadingStateInterface> cacheLoadingState_;
public:
/**
* @brief Constructs a CacheLoaderDecider.
*
* @param ctx Thread pool for executing asynchronous operations
* @param cacheLoadingState Cache loading state interface for permitting cache load
*/
CacheLoaderDecider(
boost::asio::thread_pool& ctx,
std::unique_ptr<data::LedgerCacheLoadingStateInterface> cacheLoadingState
);
/**
* @brief Handles cluster state changes and decides whether this node should load the cache.
*
* This method is called when cluster state changes. It asynchronously:
* - Does nothing if this node's cache is already full
* - Does nothing if any node in the cluster is currently loading the cache
* - Sorts all not-yet-full nodes by UUID to establish a deterministic order
* - Permits cache loading on this node if it is first in the sorted list
*
* @param selfId The UUID of the current node
* @param clusterData Shared pointer to current cluster data; may be empty if communication
* failed
*/
void
onNewState(ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData);
};
} // namespace cluster

View File

@@ -19,6 +19,8 @@
#include "cluster/ClioNode.hpp"
#include "data/LedgerCacheLoadingState.hpp"
#include "etl/WriterState.hpp"
#include "util/TimeUtils.hpp"
#include <boost/json/conversion.hpp>
@@ -26,39 +28,107 @@
#include <boost/json/value.hpp>
#include <boost/uuid/uuid.hpp>
#include <chrono>
#include <cstdint>
#include <memory>
#include <stdexcept>
#include <string>
#include <string_view>
#include <utility>
namespace cluster {
namespace {
struct Fields {
struct JsonFields {
static constexpr std::string_view const kUPDATE_TIME = "update_time";
static constexpr std::string_view const kDB_ROLE = "db_role";
static constexpr std::string_view const kETL_STARTED = "etl_started";
static constexpr std::string_view const kCACHE_IS_FULL = "cache_is_full";
static constexpr std::string_view const kCACHE_IS_CURRENTLY_LOADING =
"cache_is_currently_loading";
};
} // namespace
ClioNode
ClioNode::from(
ClioNode::Uuid uuid,
etl::WriterStateInterface const& writerState,
data::LedgerCacheLoadingStateInterface const& cacheLoadingState
)
{
auto const dbRole = [&writerState]() {
if (writerState.isReadOnly()) {
return ClioNode::DbRole::ReadOnly;
}
if (writerState.isFallback()) {
return ClioNode::DbRole::Fallback;
}
return writerState.isWriting() ? ClioNode::DbRole::Writer : ClioNode::DbRole::NotWriter;
}();
return ClioNode{
.uuid = std::move(uuid),
.updateTime = std::chrono::system_clock::now(),
.dbRole = dbRole,
.etlStarted = writerState.isEtlStarted(),
.cacheIsFull = writerState.isCacheFull(),
.cacheIsCurrentlyLoading = cacheLoadingState.isCurrentlyLoading()
};
}
void
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, ClioNode const& node)
{
jv = {
{Fields::kUPDATE_TIME, util::systemTpToUtcStr(node.updateTime, ClioNode::kTIME_FORMAT)},
{JsonFields::kUPDATE_TIME, util::systemTpToUtcStr(node.updateTime, ClioNode::kTIME_FORMAT)},
{JsonFields::kDB_ROLE, static_cast<int64_t>(node.dbRole)},
{JsonFields::kETL_STARTED, node.etlStarted},
{JsonFields::kCACHE_IS_FULL, node.cacheIsFull},
{JsonFields::kCACHE_IS_CURRENTLY_LOADING, node.cacheIsCurrentlyLoading}
};
}
ClioNode
tag_invoke(boost::json::value_to_tag<ClioNode>, boost::json::value const& jv)
{
auto const& updateTimeStr = jv.as_object().at(Fields::kUPDATE_TIME).as_string();
auto const updateTime = util::systemTpFromUtcStr(std::string(updateTimeStr), ClioNode::kTIME_FORMAT);
auto const& obj = jv.as_object();
auto const& updateTimeStr = obj.at(JsonFields::kUPDATE_TIME).as_string();
auto const updateTime =
util::systemTpFromUtcStr(std::string(updateTimeStr), ClioNode::kTIME_FORMAT);
if (!updateTime.has_value()) {
throw std::runtime_error("Failed to parse update time");
}
return ClioNode{.uuid = std::make_shared<boost::uuids::uuid>(), .updateTime = updateTime.value()};
// Each field has a default value for backward compatibility
auto dbRole = ClioNode::DbRole::Fallback;
if (auto const* v = obj.if_contains(JsonFields::kDB_ROLE)) {
auto const dbRoleValue = v->as_int64();
if (dbRoleValue > static_cast<int64_t>(ClioNode::DbRole::MAX))
throw std::runtime_error("Invalid db_role value");
dbRole = static_cast<ClioNode::DbRole>(dbRoleValue);
}
auto const etlStarted =
obj.contains(JsonFields::kETL_STARTED) ? obj.at(JsonFields::kETL_STARTED).as_bool() : true;
auto const cacheIsFull = obj.contains(JsonFields::kCACHE_IS_FULL)
? obj.at(JsonFields::kCACHE_IS_FULL).as_bool()
: true;
auto const cacheIsCurrentlyLoading = obj.contains(JsonFields::kCACHE_IS_CURRENTLY_LOADING)
? obj.at(JsonFields::kCACHE_IS_CURRENTLY_LOADING).as_bool()
: false;
return ClioNode{
// Json data doesn't contain uuid so leaving it empty here. It will be filled outside of
// this parsing
.uuid = std::make_shared<boost::uuids::uuid>(),
.updateTime = updateTime.value(),
.dbRole = dbRole,
.etlStarted = etlStarted,
.cacheIsFull = cacheIsFull,
.cacheIsCurrentlyLoading = cacheIsCurrentlyLoading
};
}
} // namespace cluster

View File

@@ -19,6 +19,9 @@
#pragma once
#include "data/LedgerCacheLoadingState.hpp"
#include "etl/WriterState.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/value.hpp>
#include <boost/uuid/uuid.hpp>
@@ -37,16 +40,46 @@ struct ClioNode {
*/
static constexpr char const* kTIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ";
// enum class WriterRole {
// ReadOnly,
// NotWriter,
// Writer
// };
/**
* @brief Database role of a node in the cluster.
*
* Roles are used to coordinate which node writes to the database:
* - ReadOnly: Node is configured to never write (strict read-only mode)
* - NotWriter: Node can write but is currently not the designated writer
* - Writer: Node is actively writing to the database
* - Fallback: Node is using the fallback writer decision mechanism
*
* When any node in the cluster is in Fallback mode, the entire cluster switches
* from the cluster communication mechanism to the slower but more reliable
* database-based conflict detection mechanism.
*/
enum class DbRole { ReadOnly = 0, NotWriter = 1, Writer = 2, Fallback = 3, MAX = 3 };
std::shared_ptr<boost::uuids::uuid> uuid; ///< The UUID of the node.
std::chrono::system_clock::time_point updateTime; ///< The time the data about the node was last updated.
using Uuid = std::shared_ptr<boost::uuids::uuid>;
using CUuid = std::shared_ptr<boost::uuids::uuid const>;
// WriterRole writerRole;
Uuid uuid; ///< The UUID of the node.
std::chrono::system_clock::time_point
updateTime; ///< The time the data about the node was last updated.
DbRole dbRole; ///< The database role of the node
bool etlStarted; ///< Whether the ETL monitor has started on this node
bool cacheIsFull; ///< Whether the ledger cache is fully loaded on this node
bool cacheIsCurrentlyLoading; ///< Whether this node is currently loading the ledger cache
/**
* @brief Create a ClioNode from writer state and cache loading state.
*
* @param uuid The UUID of the node
* @param writerState The writer state to determine the node's database role
* @param cacheLoadingState The cache loading state to determine if cache is being loaded
* @return A ClioNode with the current time and role derived from writerState
*/
static ClioNode
from(
Uuid uuid,
etl::WriterStateInterface const& writerState,
data::LedgerCacheLoadingStateInterface const& cacheLoadingState
);
};
void

View File

@@ -19,98 +19,52 @@
#include "cluster/ClusterCommunicationService.hpp"
#include "cluster/ClioNode.hpp"
#include "data/BackendInterface.hpp"
#include "util/Assert.hpp"
#include "util/Spawn.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio/bind_cancellation_slot.hpp>
#include <boost/asio/cancellation_type.hpp>
#include <boost/asio/error.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/use_future.hpp>
#include <boost/json/parse.hpp>
#include <boost/json/serialize.hpp>
#include <boost/json/value.hpp>
#include <boost/json/value_from.hpp>
#include <boost/json/value_to.hpp>
#include <boost/uuid/random_generator.hpp>
#include <boost/uuid/uuid.hpp>
#include "data/LedgerCacheLoadingState.hpp"
#include "etl/SystemState.hpp"
#include "etl/WriterState.hpp"
#include "util/config/ConfigDefinition.hpp"
#include <chrono>
#include <ctime>
#include <latch>
#include <memory>
#include <string>
#include <utility>
#include <vector>
namespace {
constexpr auto kTOTAL_WORKERS = 2uz; // 1 reading and 1 writing worker (coroutines)
} // namespace
namespace cluster {
ClusterCommunicationService::ClusterCommunicationService(
std::shared_ptr<data::BackendInterface> backend,
std::unique_ptr<etl::WriterStateInterface> writerState,
std::unique_ptr<data::LedgerCacheLoadingStateInterface> cacheLoadingState,
std::chrono::steady_clock::duration readInterval,
std::chrono::steady_clock::duration writeInterval
)
: backend_(std::move(backend))
, readInterval_(readInterval)
, writeInterval_(writeInterval)
, finishedCountdown_(kTOTAL_WORKERS)
, selfData_{ClioNode{
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator{}()),
.updateTime = std::chrono::system_clock::time_point{}
}}
: backend_(
ctx_,
std::move(backend),
writerState->clone(),
cacheLoadingState->clone(),
readInterval,
writeInterval
)
, writerDecider_(ctx_, std::move(writerState))
, cacheLoaderDecider_(ctx_, std::move(cacheLoadingState))
{
nodesInClusterMetric_.set(1); // The node always sees itself
isHealthy_ = true;
}
void
ClusterCommunicationService::run()
{
ASSERT(not running_ and not stopped_, "Can only be ran once");
running_ = true;
util::spawn(strand_, [this](boost::asio::yield_context yield) {
boost::asio::steady_timer timer(yield.get_executor());
boost::system::error_code ec;
while (running_) {
timer.expires_after(readInterval_);
auto token = cancelSignal_.slot();
timer.async_wait(boost::asio::bind_cancellation_slot(token, yield[ec]));
if (ec == boost::asio::error::operation_aborted or not running_)
break;
doRead(yield);
}
finishedCountdown_.count_down(1);
backend_.subscribeToNewState([this](auto&&... args) {
metrics_.onNewState(std::forward<decltype(args)>(args)...);
});
util::spawn(strand_, [this](boost::asio::yield_context yield) {
boost::asio::steady_timer timer(yield.get_executor());
boost::system::error_code ec;
while (running_) {
doWrite();
timer.expires_after(writeInterval_);
auto token = cancelSignal_.slot();
timer.async_wait(boost::asio::bind_cancellation_slot(token, yield[ec]));
if (ec == boost::asio::error::operation_aborted or not running_)
break;
}
finishedCountdown_.count_down(1);
backend_.subscribeToNewState([this](auto&&... args) {
writerDecider_.onNewState(std::forward<decltype(args)>(args)...);
});
backend_.subscribeToNewState([this](auto&&... args) {
cacheLoaderDecider_.onNewState(std::forward<decltype(args)>(args)...);
});
backend_.run();
}
ClusterCommunicationService::~ClusterCommunicationService()
@@ -121,107 +75,30 @@ ClusterCommunicationService::~ClusterCommunicationService()
void
ClusterCommunicationService::stop()
{
if (stopped_)
return;
stopped_ = true;
// for ASAN to see through concurrency correctly we need to exit all coroutines before joining the ctx
running_ = false;
// cancelSignal_ is not thread safe so we execute emit on the same strand
boost::asio::spawn(
strand_, [this](auto&&) { cancelSignal_.emit(boost::asio::cancellation_type::all); }, boost::asio::use_future
)
.wait();
finishedCountdown_.wait();
ctx_.join();
backend_.stop();
}
std::shared_ptr<boost::uuids::uuid>
ClusterCommunicationService::selfUuid() const
ClusterCommunicationService::MakeResult
ClusterCommunicationService::make(
util::config::ClioConfigDefinition const& config,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<etl::SystemState> state
)
{
// Uuid never changes so it is safe to copy it without using strand_
return selfData_.uuid;
}
ClioNode
ClusterCommunicationService::selfData() const
{
ClioNode result{};
util::spawn(strand_, [this, &result](boost::asio::yield_context) { result = selfData_; });
return result;
}
std::expected<std::vector<ClioNode>, std::string>
ClusterCommunicationService::clusterData() const
{
if (not isHealthy_) {
return std::unexpected{"Service is not healthy"};
auto const& cache = backend->cache();
auto cacheLoadingState = std::make_unique<data::LedgerCacheLoadingState>(cache);
if (not config.get<bool>("cache.limit_load_in_cluster")) {
cacheLoadingState->allowLoading();
}
std::vector<ClioNode> result;
util::spawn(strand_, [this, &result](boost::asio::yield_context) {
result = otherNodesData_;
result.push_back(selfData_);
});
return result;
}
void
ClusterCommunicationService::doRead(boost::asio::yield_context yield)
{
otherNodesData_.clear();
BackendInterface::ClioNodesDataFetchResult expectedResult;
try {
expectedResult = backend_->fetchClioNodesData(yield);
} catch (...) {
expectedResult = std::unexpected{"Failed to fecth Clio nodes data"};
}
if (!expectedResult.has_value()) {
LOG(log_.error()) << "Failed to fetch nodes data";
isHealthy_ = false;
return;
}
// Create a new vector here to not have partially parsed data in otherNodesData_
std::vector<ClioNode> otherNodesData;
for (auto const& [uuid, nodeDataStr] : expectedResult.value()) {
if (uuid == *selfData_.uuid) {
continue;
}
boost::system::error_code errorCode;
auto const json = boost::json::parse(nodeDataStr, errorCode);
if (errorCode.failed()) {
LOG(log_.error()) << "Error parsing json from DB: " << nodeDataStr;
isHealthy_ = false;
return;
}
auto expectedNodeData = boost::json::try_value_to<ClioNode>(json);
if (expectedNodeData.has_error()) {
LOG(log_.error()) << "Error converting json to ClioNode: " << json;
isHealthy_ = false;
return;
}
*expectedNodeData->uuid = uuid;
otherNodesData.push_back(std::move(expectedNodeData).value());
}
otherNodesData_ = std::move(otherNodesData);
nodesInClusterMetric_.set(otherNodesData_.size() + 1);
isHealthy_ = true;
}
void
ClusterCommunicationService::doWrite()
{
selfData_.updateTime = std::chrono::system_clock::now();
boost::json::value jsonValue{};
boost::json::value_from(selfData_, jsonValue);
backend_->writeNodeMessage(*selfData_.uuid, boost::json::serialize(jsonValue.as_object()));
auto cacheLoadingStateClone = cacheLoadingState->clone();
return MakeResult{
.service = std::make_unique<ClusterCommunicationService>(
std::move(backend),
std::make_unique<etl::WriterState>(std::move(state), cache),
std::move(cacheLoadingState)
),
.cacheLoadingState = std::move(cacheLoadingStateClone)
};
}
} // namespace cluster

View File

@@ -19,13 +19,16 @@
#pragma once
#include "cluster/ClioNode.hpp"
#include "cluster/ClusterCommunicationServiceInterface.hpp"
#include "cluster/Backend.hpp"
#include "cluster/CacheLoaderDecider.hpp"
#include "cluster/Concepts.hpp"
#include "cluster/Metrics.hpp"
#include "cluster/WriterDecider.hpp"
#include "data/BackendInterface.hpp"
#include "util/log/Logger.hpp"
#include "util/prometheus/Bool.hpp"
#include "util/prometheus/Gauge.hpp"
#include "util/prometheus/Prometheus.hpp"
#include "data/LedgerCacheLoadingState.hpp"
#include "etl/SystemState.hpp"
#include "etl/WriterState.hpp"
#include "util/config/ConfigDefinition.hpp"
#include <boost/asio/cancellation_signal.hpp>
#include <boost/asio/spawn.hpp>
@@ -33,67 +36,54 @@
#include <boost/asio/thread_pool.hpp>
#include <boost/uuid/uuid.hpp>
#include <atomic>
#include <chrono>
#include <latch>
#include <memory>
#include <string>
#include <vector>
namespace cluster {
/**
* @brief Service to post and read messages to/from the cluster. It uses a backend to communicate with the cluster.
* @brief Service to post and read messages to/from the cluster. It uses a backend to communicate
* with the cluster.
*/
class ClusterCommunicationService : public ClusterCommunicationServiceInterface {
util::prometheus::GaugeInt& nodesInClusterMetric_ = PrometheusService::gaugeInt(
"cluster_nodes_total_number",
{},
"Total number of nodes this node can detect in the cluster."
);
util::prometheus::Bool isHealthy_ = PrometheusService::boolMetric(
"cluster_communication_is_healthy",
{},
"Whether cluster communication service is operating healthy (1 - healthy, 0 - we have a problem)"
);
// TODO: Use util::async::CoroExecutionContext after https://github.com/XRPLF/clio/issues/1973 is implemented
class ClusterCommunicationService : public ClusterCommunicationServiceTag {
// TODO: Use util::async::CoroExecutionContext after https://github.com/XRPLF/clio/issues/1973
// is implemented
boost::asio::thread_pool ctx_{1};
boost::asio::strand<boost::asio::thread_pool::executor_type> strand_ = boost::asio::make_strand(ctx_);
util::Logger log_{"ClusterCommunication"};
std::shared_ptr<data::BackendInterface> backend_;
std::chrono::steady_clock::duration readInterval_;
std::chrono::steady_clock::duration writeInterval_;
boost::asio::cancellation_signal cancelSignal_;
std::latch finishedCountdown_;
std::atomic_bool running_ = false;
bool stopped_ = false;
ClioNode selfData_;
std::vector<ClioNode> otherNodesData_;
Backend backend_;
Metrics metrics_;
WriterDecider writerDecider_;
CacheLoaderDecider cacheLoaderDecider_;
public:
static constexpr std::chrono::milliseconds kDEFAULT_READ_INTERVAL{2100};
static constexpr std::chrono::milliseconds kDEFAULT_WRITE_INTERVAL{1200};
static constexpr std::chrono::milliseconds kDEFAULT_READ_INTERVAL{1000};
static constexpr std::chrono::milliseconds kDEFAULT_WRITE_INTERVAL{1000};
/**
* @brief Construct a new Cluster Communication Service object.
*
* @param backend The backend to use for communication.
* @param writerState The state showing whether clio is writing to the database.
* @param cacheLoadingState State controlling cache loading permission for this node.
* @param readInterval The interval to read messages from the cluster.
* @param writeInterval The interval to write messages to the cluster.
*/
ClusterCommunicationService(
std::shared_ptr<data::BackendInterface> backend,
std::unique_ptr<etl::WriterStateInterface> writerState,
std::unique_ptr<data::LedgerCacheLoadingStateInterface> cacheLoadingState,
std::chrono::steady_clock::duration readInterval = kDEFAULT_READ_INTERVAL,
std::chrono::steady_clock::duration writeInterval = kDEFAULT_WRITE_INTERVAL
);
~ClusterCommunicationService() override;
ClusterCommunicationService(ClusterCommunicationService&&) = delete;
ClusterCommunicationService(ClusterCommunicationService const&) = delete;
ClusterCommunicationService&
operator=(ClusterCommunicationService&&) = delete;
ClusterCommunicationService&
operator=(ClusterCommunicationService const&) = delete;
/**
* @brief Start the service.
*/
@@ -106,43 +96,36 @@ public:
void
stop();
ClusterCommunicationService(ClusterCommunicationService&&) = delete;
ClusterCommunicationService(ClusterCommunicationService const&) = delete;
ClusterCommunicationService&
operator=(ClusterCommunicationService&&) = delete;
ClusterCommunicationService&
operator=(ClusterCommunicationService const&) = delete;
/**
* @brief Result of ClusterCommunicationService::make().
*
* The @c cacheLoadingState is a clone whose allowLoading() is connected to the state owned by
* the service, so the caller can pass it to the cache loader.
*/
struct MakeResult {
std::unique_ptr<ClusterCommunicationService> service; ///< The constructed service
std::unique_ptr<data::LedgerCacheLoadingStateInterface const>
cacheLoadingState; ///< Clone of cache loading state for use by the cache loader
};
/**
* @brief Get the UUID of the current node.
* @brief Factory method: construct the service and return a cache loading state for the caller.
*
* @return The UUID of the current node.
*/
std::shared_ptr<boost::uuids::uuid>
selfUuid() const;
/**
* @brief Get the data of the current node.
* Reads the @c cache.limit_load_in_cluster config flag: if true, loading is immediately
* allowed (single-node mode); if false, the cluster will gate permission via
* CacheLoaderDecider.
*
* @return The data of the current node.
* @param config The application configuration
* @param backend The data backend
* @param state The shared ETL system state
* @return A MakeResult containing the service and a cache loading state clone
*/
ClioNode
selfData() const override;
/**
* @brief Get the data of all nodes in the cluster (including self).
*
* @return The data of all nodes in the cluster or error if the service is not healthy.
*/
std::expected<std::vector<ClioNode>, std::string>
clusterData() const override;
private:
void
doRead(boost::asio::yield_context yield);
void
doWrite();
static MakeResult
make(
util::config::ClioConfigDefinition const& config,
std::shared_ptr<BackendInterface> backend,
std::shared_ptr<etl::SystemState> state
);
};
} // namespace cluster

39
src/cluster/Concepts.hpp Normal file
View File

@@ -0,0 +1,39 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <concepts>
namespace cluster {
/**
* @brief Tag type for cluster communication service implementations.
*
* This tag is used to identify types that implement cluster communication functionality.
* Types should inherit from this tag to be recognized as cluster communication services.
*/
struct ClusterCommunicationServiceTag {
virtual ~ClusterCommunicationServiceTag() = default;
};
template <typename T>
concept SomeClusterCommunicationService = std::derived_from<T, ClusterCommunicationServiceTag>;
} // namespace cluster

View File

@@ -17,38 +17,31 @@
*/
//==============================================================================
#pragma once
#include "cluster/Metrics.hpp"
#include "cluster/Backend.hpp"
#include "cluster/ClioNode.hpp"
#include <expected>
#include <string>
#include <vector>
#include <memory>
namespace cluster {
/**
* @brief Interface for the cluster communication service.
*/
class ClusterCommunicationServiceInterface {
public:
virtual ~ClusterCommunicationServiceInterface() = default;
Metrics::Metrics()
{
nodesInClusterMetric_.set(1); // The node always sees itself
isHealthy_ = true;
}
/**
* @brief Get the data of the current node.
*
* @return The data of the current node.
*/
[[nodiscard]] virtual ClioNode
selfData() const = 0;
/**
* @brief Get the data of all nodes in the cluster (including self).
*
* @return The data of all nodes in the cluster or error if the service is not healthy.
*/
[[nodiscard]] virtual std::expected<std::vector<ClioNode>, std::string>
clusterData() const = 0;
};
void
Metrics::onNewState(ClioNode::CUuid, std::shared_ptr<Backend::ClusterData const> clusterData)
{
if (clusterData->has_value()) {
isHealthy_ = true;
nodesInClusterMetric_.set(clusterData->value().size());
} else {
isHealthy_ = false;
nodesInClusterMetric_.set(1);
}
}
} // namespace cluster

78
src/cluster/Metrics.hpp Normal file
View File

@@ -0,0 +1,78 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "cluster/Backend.hpp"
#include "cluster/ClioNode.hpp"
#include "util/prometheus/Bool.hpp"
#include "util/prometheus/Gauge.hpp"
#include "util/prometheus/Prometheus.hpp"
#include <memory>
namespace cluster {
/**
* @brief Manages Prometheus metrics for cluster communication and node tracking.
*
* This class tracks cluster-related metrics including:
* - Total number of nodes detected in the cluster
* - Health status of cluster communication
*/
class Metrics {
/** @brief Gauge tracking the total number of nodes visible in the cluster */
util::prometheus::GaugeInt& nodesInClusterMetric_ = PrometheusService::gaugeInt(
"cluster_nodes_total_number",
{},
"Total number of nodes this node can detect in the cluster."
);
/** @brief Boolean metric indicating whether cluster communication is healthy */
util::prometheus::Bool isHealthy_ = PrometheusService::boolMetric(
"cluster_communication_is_healthy",
{},
"Whether cluster communication service is operating healthy (1 - healthy, 0 - we have a "
"problem)"
);
public:
/**
* @brief Constructs a Metrics instance and initializes metrics.
*
* Sets the initial node count to 1 (self) and marks communication as healthy.
*/
Metrics();
/**
* @brief Updates metrics based on new cluster state.
*
* This callback is invoked when cluster state changes. It updates:
* - Health status based on whether cluster data is available
* - Node count to reflect the current cluster size
*
* @param uuid The UUID of the node (unused in current implementation)
* @param clusterData Shared pointer to the current cluster data; may be empty if communication
* failed
*/
void
onNewState(ClioNode::CUuid uuid, std::shared_ptr<Backend::ClusterData const> clusterData);
};
} // namespace cluster

View File

@@ -0,0 +1,121 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "cluster/WriterDecider.hpp"
#include "cluster/Backend.hpp"
#include "cluster/ClioNode.hpp"
#include "etl/WriterState.hpp"
#include "util/Assert.hpp"
#include "util/Spawn.hpp"
#include <boost/asio/thread_pool.hpp>
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
namespace cluster {
WriterDecider::WriterDecider(
boost::asio::thread_pool& ctx,
std::unique_ptr<etl::WriterStateInterface> writerState
)
: ctx_(ctx), writerState_(std::move(writerState))
{
}
void
WriterDecider::onNewState(
ClioNode::CUuid selfId,
std::shared_ptr<Backend::ClusterData const> clusterData
)
{
if (not clusterData->has_value())
return;
util::spawn(
ctx_,
[writerState = writerState_->clone(),
selfId = std::move(selfId),
clusterData = clusterData->value()](auto&&) mutable {
auto const selfData = std::ranges::find_if(
clusterData, [&selfId](ClioNode const& node) { return node.uuid == selfId; }
);
ASSERT(selfData != clusterData.end(), "Self data should always be in the cluster data");
if (selfData->dbRole == ClioNode::DbRole::Fallback) {
return;
}
if (selfData->dbRole == ClioNode::DbRole::ReadOnly) {
writerState->giveUpWriting();
return;
}
// If any node in the cluster is in Fallback mode, the entire cluster must switch
// to the fallback writer decision mechanism for consistency
if (std::ranges::any_of(clusterData, [](ClioNode const& node) {
return node.dbRole == ClioNode::DbRole::Fallback;
})) {
writerState->setWriterDecidingFallback();
return;
}
// We are not ReadOnly and there is no Fallback in the cluster
std::ranges::sort(clusterData, [](ClioNode const& lhs, ClioNode const& rhs) {
return *lhs.uuid < *rhs.uuid;
});
auto it = std::ranges::find_if(clusterData, [](ClioNode const& node) {
return node.etlStarted and node.cacheIsFull and
(node.dbRole == ClioNode::DbRole::NotWriter or
node.dbRole == ClioNode::DbRole::Writer);
});
auto electNode = [&selfId, &writerState](auto it) {
if (*it->uuid == *selfId) {
writerState->startWriting();
} else {
writerState->giveUpWriting();
}
};
if (it != clusterData.end()) {
electNode(it);
return;
}
// Try to find a node with at least started ETL
it = std::ranges::find_if(clusterData, [](ClioNode const& node) {
return node.etlStarted and
(node.dbRole == ClioNode::DbRole::NotWriter or
node.dbRole == ClioNode::DbRole::Writer);
});
if (it != clusterData.end()) {
electNode(it);
return;
}
writerState->giveUpWriting();
}
);
}
} // namespace cluster

View File

@@ -0,0 +1,79 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "cluster/Backend.hpp"
#include "cluster/ClioNode.hpp"
#include "etl/WriterState.hpp"
#include <boost/asio/thread_pool.hpp>
#include <memory>
namespace cluster {
/**
* @brief Decides which node in the cluster should be the writer based on cluster state.
*
* This class monitors cluster state changes and determines whether the current node
* should act as the writer to the database. The decision is made by:
* 1. Sorting all nodes by UUID for deterministic ordering
* 2. Selecting the first node that is allowed to write (not ReadOnly)
* 3. Activating writing on this node if it's the current node, otherwise deactivating
*
* This ensures only one node in the cluster actively writes to the database at a time.
*/
class WriterDecider {
/** @brief Thread pool for spawning asynchronous tasks */
boost::asio::thread_pool& ctx_;
/** @brief Interface for controlling the writer state of this node */
std::unique_ptr<etl::WriterStateInterface> writerState_;
public:
/**
* @brief Constructs a WriterDecider.
*
* @param ctx Thread pool for executing asynchronous operations
* @param writerState Writer state interface for controlling write operations
*/
WriterDecider(
boost::asio::thread_pool& ctx,
std::unique_ptr<etl::WriterStateInterface> writerState
);
/**
* @brief Handles cluster state changes and decides whether this node should be the writer.
*
* This method is called when cluster state changes. It asynchronously:
* - Sorts all nodes by UUID to establish a deterministic order
* - Identifies the first node allowed to write (not ReadOnly)
* - Activates writing if this node is selected, otherwise deactivates writing
* - Logs a warning if no nodes in the cluster are allowed to write
*
* @param selfId The UUID of the current node
* @param clusterData Shared pointer to current cluster data; may be empty if communication
* failed
*/
void
onNewState(ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData);
};
} // namespace cluster

View File

@@ -0,0 +1,109 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "util/Assert.hpp"
#include "util/Spawn.hpp"
#include <boost/asio/bind_cancellation_slot.hpp>
#include <boost/asio/cancellation_signal.hpp>
#include <boost/asio/cancellation_type.hpp>
#include <boost/asio/error.hpp>
#include <boost/asio/executor.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/strand.hpp>
#include <atomic>
#include <chrono>
#include <concepts>
#include <semaphore>
namespace cluster::impl {
// TODO: Try to replace util::Repeat by this. https://github.com/XRPLF/clio/issues/2926
template <typename Context>
class RepeatedTask {
std::chrono::steady_clock::duration interval_;
boost::asio::strand<typename Context::executor_type> strand_;
enum class State { Running, Stopped };
std::atomic<State> state_ = State::Stopped;
std::binary_semaphore semaphore_{0};
boost::asio::steady_timer timer_;
public:
RepeatedTask(std::chrono::steady_clock::duration interval, Context& ctx)
: interval_(interval), strand_(boost::asio::make_strand(ctx)), timer_(strand_)
{
}
~RepeatedTask()
{
stop();
}
template <typename Fn>
requires std::invocable<Fn, boost::asio::yield_context> or std::invocable<Fn>
void
run(Fn&& f)
{
ASSERT(state_ == State::Stopped, "Can only be ran once");
state_ = State::Running;
util::spawn(strand_, [this, f = std::forward<Fn>(f)](boost::asio::yield_context yield) {
boost::system::error_code ec;
while (state_ == State::Running) {
timer_.expires_after(interval_);
timer_.async_wait(yield[ec]);
if (ec or state_ != State::Running)
break;
if constexpr (std::invocable<decltype(f), boost::asio::yield_context>) {
f(yield);
} else {
f();
}
}
semaphore_.release();
});
}
void
stop()
{
if (auto expected = State::Running;
not state_.compare_exchange_strong(expected, State::Stopped))
return; // Already stopped or not started
std::binary_semaphore cancelSemaphore{0};
boost::asio::post(strand_, [this, &cancelSemaphore]() {
timer_.cancel();
cancelSemaphore.release();
});
cancelSemaphore.acquire();
semaphore_.acquire();
}
};
} // namespace cluster::impl

View File

@@ -57,10 +57,15 @@ supportedAmendments()
}
bool
lookupAmendment(auto const& allAmendments, std::vector<ripple::uint256> const& ledgerAmendments, std::string_view name)
lookupAmendment(
auto const& allAmendments,
std::vector<ripple::uint256> const& ledgerAmendments,
std::string_view name
)
{
namespace rg = std::ranges;
if (auto const am = rg::find(allAmendments, name, &data::Amendment::name); am != rg::end(allAmendments))
if (auto const am = rg::find(allAmendments, name, &data::Amendment::name);
am != rg::end(allAmendments))
return rg::find(ledgerAmendments, am->feature) != rg::end(ledgerAmendments);
return false;
}
@@ -70,9 +75,12 @@ lookupAmendment(auto const& allAmendments, std::vector<ripple::uint256> const& l
namespace data {
namespace impl {
WritingAmendmentKey::WritingAmendmentKey(std::string amendmentName) : AmendmentKey{std::move(amendmentName)}
WritingAmendmentKey::WritingAmendmentKey(std::string amendmentName)
: AmendmentKey{std::move(amendmentName)}
{
ASSERT(not supportedAmendments().contains(name), "Attempt to register the same amendment twice");
ASSERT(
not supportedAmendments().contains(name), "Attempt to register the same amendment twice"
);
supportedAmendments().insert(name);
}
@@ -96,7 +104,8 @@ operator ripple::uint256() const
return Amendment::getAmendmentId(name);
}
AmendmentCenter::AmendmentCenter(std::shared_ptr<data::BackendInterface> const& backend) : backend_{backend}
AmendmentCenter::AmendmentCenter(std::shared_ptr<data::BackendInterface> const& backend)
: backend_{backend}
{
namespace rg = std::ranges;
namespace vs = std::views;
@@ -108,7 +117,8 @@ AmendmentCenter::AmendmentCenter(std::shared_ptr<data::BackendInterface> const&
.name = name,
.feature = Amendment::getAmendmentId(name),
.isSupportedByXRPL = support != ripple::AmendmentSupport::Unsupported,
.isSupportedByClio = rg::find(supportedAmendments(), name) != rg::end(supportedAmendments()),
.isSupportedByClio =
rg::find(supportedAmendments(), name) != rg::end(supportedAmendments()),
.isRetired = support == ripple::AmendmentSupport::Retired
};
}),
@@ -144,19 +154,28 @@ AmendmentCenter::isEnabled(AmendmentKey const& key, uint32_t seq) const
}
bool
AmendmentCenter::isEnabled(boost::asio::yield_context yield, AmendmentKey const& key, uint32_t seq) const
AmendmentCenter::isEnabled(
boost::asio::yield_context yield,
AmendmentKey const& key,
uint32_t seq
) const
{
try {
if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments)
return lookupAmendment(all_, *listAmendments, key);
} catch (std::runtime_error const&) {
return false; // Some old ledger does not contain Amendments ledger object so do best we can for now
return false; // Some old ledger does not contain Amendments ledger object so do best we
// can for now
}
return false;
}
std::vector<bool>
AmendmentCenter::isEnabled(boost::asio::yield_context yield, std::vector<AmendmentKey> const& keys, uint32_t seq) const
AmendmentCenter::isEnabled(
boost::asio::yield_context yield,
std::vector<AmendmentKey> const& keys,
uint32_t seq
) const
{
namespace rg = std::ranges;
@@ -181,7 +200,11 @@ AmendmentCenter::isEnabled(boost::asio::yield_context yield, std::vector<Amendme
Amendment const&
AmendmentCenter::getAmendment(AmendmentKey const& key) const
{
ASSERT(supported_.contains(key), "The amendment '{}' must be present in supported amendments list", key.name);
ASSERT(
supported_.contains(key),
"The amendment '{}' must be present in supported amendments list",
key.name
);
return supported_.at(key);
}
@@ -201,7 +224,8 @@ std::optional<std::vector<ripple::uint256>>
AmendmentCenter::fetchAmendmentsList(boost::asio::yield_context yield, uint32_t seq) const
{
// the amendments should always be present on the ledger
auto const amendments = backend_->fetchLedgerObject(ripple::keylet::amendments().key, seq, yield);
auto const amendments =
backend_->fetchLedgerObject(ripple::keylet::amendments().key, seq, yield);
if (not amendments.has_value())
throw std::runtime_error("Amendments ledger object must be present in the database");

View File

@@ -62,9 +62,9 @@ struct WritingAmendmentKey : AmendmentKey {
*/
struct Amendments {
// NOTE: if Clio wants to report it supports an Amendment it should be listed here.
// Whether an amendment is obsolete and/or supported by libxrpl is extracted directly from libxrpl.
// If an amendment is in the list below it just means Clio did whatever changes needed to support it.
// Most of the time it's going to be no changes at all.
// Whether an amendment is obsolete and/or supported by libxrpl is extracted directly from
// libxrpl. If an amendment is in the list below it just means Clio did whatever changes needed
// to support it. Most of the time it's going to be no changes at all.
/** @cond */
// NOLINTBEGIN(readability-identifier-naming)
@@ -177,6 +177,7 @@ struct Amendments {
REGISTER(fix1512);
REGISTER(fix1523);
REGISTER(fix1528);
REGISTER(fixBatchInnerSigs);
// NOLINTEND(readability-identifier-naming)
/** @endcond */
};
@@ -255,7 +256,11 @@ public:
* @return A vector of bools representing enabled state for each of the given keys
*/
[[nodiscard]] std::vector<bool>
isEnabled(boost::asio::yield_context yield, std::vector<AmendmentKey> const& keys, uint32_t seq) const final;
isEnabled(
boost::asio::yield_context yield,
std::vector<AmendmentKey> const& keys,
uint32_t seq
) const final;
/**
* @brief Get an amendment

View File

@@ -92,7 +92,11 @@ public:
* @return A vector of bools representing enabled state for each of the given keys
*/
[[nodiscard]] virtual std::vector<bool>
isEnabled(boost::asio::yield_context yield, std::vector<AmendmentKey> const& keys, uint32_t seq) const = 0;
isEnabled(
boost::asio::yield_context yield,
std::vector<AmendmentKey> const& keys,
uint32_t seq
) const = 0;
/**
* @brief Get an amendment

View File

@@ -41,7 +41,10 @@ std::vector<std::int64_t> const kHISTOGRAM_BUCKETS{1, 2, 5, 10, 20, 50, 100, 200
std::int64_t
durationInMillisecondsSince(std::chrono::steady_clock::time_point const startTime)
{
return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - startTime).count();
return std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - startTime
)
.count();
}
} // namespace
@@ -144,7 +147,10 @@ BackendCounters::registerReadStarted(std::uint64_t const count)
}
void
BackendCounters::registerReadFinished(std::chrono::steady_clock::time_point const startTime, std::uint64_t const count)
BackendCounters::registerReadFinished(
std::chrono::steady_clock::time_point const startTime,
std::uint64_t const count
)
{
asyncReadCounters_.registerFinished(count);
auto const duration = durationInMillisecondsSince(startTime);
@@ -238,7 +244,8 @@ void
BackendCounters::AsyncOperationCounters::registerError(std::uint64_t count)
{
ASSERT(
pendingCounter_.get().value() >= static_cast<std::int64_t>(count), "Error operations can't be more than pending"
pendingCounter_.get().value() >= static_cast<std::int64_t>(count),
"Error operations can't be more than pending"
);
pendingCounter_.get() -= count;
errorCounter_.get() += count;

View File

@@ -46,7 +46,9 @@ concept SomeBackendCounters = requires(T a) {
{ a.registerWriteFinished(std::chrono::steady_clock::time_point{}) } -> std::same_as<void>;
{ a.registerWriteRetry() } -> std::same_as<void>;
{ a.registerReadStarted(std::uint64_t{}) } -> std::same_as<void>;
{ a.registerReadFinished(std::chrono::steady_clock::time_point{}, std::uint64_t{}) } -> std::same_as<void>;
{
a.registerReadFinished(std::chrono::steady_clock::time_point{}, std::uint64_t{})
} -> std::same_as<void>;
{ a.registerReadRetry(std::uint64_t{}) } -> std::same_as<void>;
{ a.registerReadError(std::uint64_t{}) } -> std::same_as<void>;
{ a.report() } -> std::same_as<boost::json::object>;

View File

@@ -128,7 +128,8 @@ BackendInterface::fetchLedgerObjects(
misses.push_back(keys[i]);
}
}
LOG(log_.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
LOG(log_.trace()) << "Cache hits = " << keys.size() - misses.size()
<< " - cache misses = " << misses.size();
if (!misses.empty()) {
auto objs = doFetchLedgerObjects(misses, sequence, yield);
@@ -192,7 +193,9 @@ BackendInterface::fetchBookOffers(
ripple::uint256 const bookEnd = ripple::getQualityNext(book);
ripple::uint256 uTipIndex = book;
std::vector<ripple::uint256> keys;
auto getMillis = [](auto diff) { return std::chrono::duration_cast<std::chrono::milliseconds>(diff).count(); };
auto getMillis = [](auto diff) {
return std::chrono::duration_cast<std::chrono::milliseconds>(diff).count();
};
auto begin = std::chrono::system_clock::now();
std::uint32_t numSucc = 0;
std::uint32_t numPages = 0;
@@ -233,20 +236,23 @@ BackendInterface::fetchBookOffers(
auto mid = std::chrono::system_clock::now();
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
for (size_t i = 0; i < keys.size() && i < limit; ++i) {
LOG(log_.trace()) << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
LOG(log_.trace()) << "Key = " << ripple::strHex(keys[i])
<< " blob = " << ripple::strHex(objs[i])
<< " ledgerSequence = " << ledgerSequence;
ASSERT(!objs[i].empty(), "Ledger object can't be empty");
page.offers.push_back({keys[i], objs[i]});
}
auto end = std::chrono::system_clock::now();
LOG(log_.debug()) << "Fetching " << std::to_string(keys.size()) << " offers took "
<< std::to_string(getMillis(mid - begin)) << " milliseconds. Fetching next dir took "
<< std::to_string(succMillis) << " milliseconds. Fetched next dir " << std::to_string(numSucc)
<< " times"
<< " Fetching next page of dir took " << std::to_string(pageMillis) << " milliseconds"
<< ". num pages = " << std::to_string(numPages) << ". Fetching all objects took "
<< std::to_string(getMillis(end - mid))
<< " milliseconds. total time = " << std::to_string(getMillis(end - begin)) << " milliseconds"
<< std::to_string(getMillis(mid - begin))
<< " milliseconds. Fetching next dir took " << std::to_string(succMillis)
<< " milliseconds. Fetched next dir " << std::to_string(numSucc) << " times"
<< " Fetching next page of dir took " << std::to_string(pageMillis)
<< " milliseconds"
<< ". num pages = " << std::to_string(numPages)
<< ". Fetching all objects took " << std::to_string(getMillis(end - mid))
<< " milliseconds. total time = " << std::to_string(getMillis(end - begin))
<< " milliseconds"
<< " book = " << ripple::strHex(book);
return page;
@@ -273,7 +279,8 @@ BackendInterface::updateRange(uint32_t newMax)
if (range_.has_value() and newMax < range_->maxSequence) {
ASSERT(
false,
"Range shouldn't exist yet or newMax should be at least range->maxSequence. newMax = {}, "
"Range shouldn't exist yet or newMax should be at least range->maxSequence. newMax = "
"{}, "
"range->maxSequence = {}",
newMax,
range_->maxSequence
@@ -339,8 +346,8 @@ BackendInterface::fetchLedgerPage(
if (!objects[i].empty()) {
page.objects.push_back({keys[i], std::move(objects[i])});
} else if (!outOfOrder) {
LOG(log_.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
<< " - seq = " << ledgerSequence;
LOG(log_.error()) << "Deleted or non-existent object in successor table. key = "
<< ripple::strHex(keys[i]) << " - seq = " << ledgerSequence;
std::stringstream msg;
for (size_t j = 0; j < objects.size(); ++j) {
msg << " - " << ripple::strHex(keys[j]);

View File

@@ -109,18 +109,23 @@ synchronous(FnType&& func)
using R = typename boost::result_of<FnType(boost::asio::yield_context)>::type;
if constexpr (!std::is_same_v<R, void>) {
R res;
util::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func, &res](auto yield) { res = func(yield); });
util::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func, &res](auto yield) {
res = func(yield);
});
ctx.run();
return res;
} else {
util::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func](auto yield) { func(yield); });
util::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func](auto yield) {
func(yield);
});
ctx.run();
}
}
/**
* @brief Synchronously execute the given function object and retry until no DatabaseTimeout is thrown.
* @brief Synchronously execute the given function object and retry until no DatabaseTimeout is
* thrown.
*
* @tparam FnType The type of function object to execute
* @param func The function object to execute
@@ -225,7 +230,8 @@ public:
fetchLedgerRange() const;
/**
* @brief Fetch the specified number of account root object indexes by page, the accounts need to exist for seq.
* @brief Fetch the specified number of account root object indexes by page, the accounts need
* to exist for seq.
*
* @param number The number of accounts to fetch
* @param pageSize The maximum number of accounts per page
@@ -296,7 +302,10 @@ public:
* @return A vector of TransactionAndMetadata matching the given hashes
*/
virtual std::vector<TransactionAndMetadata>
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const = 0;
fetchTransactions(
std::vector<ripple::uint256> const& hashes,
boost::asio::yield_context yield
) const = 0;
/**
* @brief Fetches all transactions for a specific account.
@@ -325,7 +334,10 @@ public:
* @return Results as a vector of TransactionAndMetadata
*/
virtual std::vector<TransactionAndMetadata>
fetchAllTransactionsInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
fetchAllTransactionsInLedger(
std::uint32_t ledgerSequence,
boost::asio::yield_context yield
) const = 0;
/**
* @brief Fetches all transaction hashes from a specific ledger.
@@ -335,7 +347,10 @@ public:
* @return Hashes as ripple::uint256 in a vector
*/
virtual std::vector<ripple::uint256>
fetchAllTransactionHashesInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
fetchAllTransactionHashesInLedger(
std::uint32_t ledgerSequence,
boost::asio::yield_context yield
) const = 0;
/**
* @brief Fetches a specific NFT.
@@ -346,7 +361,11 @@ public:
* @return NFT object on success; nullopt otherwise
*/
virtual std::optional<NFT>
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
fetchNFT(
ripple::uint256 const& tokenID,
std::uint32_t ledgerSequence,
boost::asio::yield_context yield
) const = 0;
/**
* @brief Fetches all transactions for a specific NFT.
@@ -376,7 +395,8 @@ public:
* @param limit Paging limit.
* @param cursorIn Optional cursor to allow us to pick up from where we last left off.
* @param yield Currently executing coroutine.
* @return NFTs issued by this account, or this issuer/taxon combination if taxon is passed and an optional marker
* @return NFTs issued by this account, or this issuer/taxon combination if taxon is passed and
* an optional marker
*/
virtual NFTsAndCursor
fetchNFTsByIssuer(
@@ -410,8 +430,8 @@ public:
/**
* @brief Fetches a specific ledger object.
*
* Currently the real fetch happens in doFetchLedgerObject and fetchLedgerObject attempts to fetch from Cache first
* and only calls out to the real DB if a cache miss occurred.
* Currently the real fetch happens in doFetchLedgerObject and fetchLedgerObject attempts to
* fetch from Cache first and only calls out to the real DB if a cache miss occurred.
*
* @param key The key of the object
* @param sequence The ledger sequence to fetch for
@@ -419,7 +439,11 @@ public:
* @return The object as a Blob on success; nullopt otherwise
*/
std::optional<Blob>
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const;
fetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t sequence,
boost::asio::yield_context yield
) const;
/**
* @brief Fetches a specific ledger object sequence.
@@ -432,13 +456,18 @@ public:
* @return The sequence in unit32_t on success; nullopt otherwise
*/
std::optional<std::uint32_t>
fetchLedgerObjectSeq(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const;
fetchLedgerObjectSeq(
ripple::uint256 const& key,
std::uint32_t sequence,
boost::asio::yield_context yield
) const;
/**
* @brief Fetches all ledger objects by their keys.
*
* Currently the real fetch happens in doFetchLedgerObjects and fetchLedgerObjects attempts to fetch from Cache
* first and only calls out to the real DB for each of the keys that was not found in the cache.
* Currently the real fetch happens in doFetchLedgerObjects and fetchLedgerObjects attempts to
* fetch from Cache first and only calls out to the real DB for each of the keys that was not
* found in the cache.
*
* @param keys A vector with the keys of the objects to fetch
* @param sequence The ledger sequence to fetch for
@@ -461,7 +490,11 @@ public:
* @return The object as a Blob on success; nullopt otherwise
*/
virtual std::optional<Blob>
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
doFetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t sequence,
boost::asio::yield_context yield
) const = 0;
/**
* @brief The database-specific implementation for fetching a ledger object sequence.
@@ -531,13 +564,18 @@ public:
* @return The successor on success; nullopt otherwise
*/
std::optional<LedgerObject>
fetchSuccessorObject(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
fetchSuccessorObject(
ripple::uint256 key,
std::uint32_t ledgerSequence,
boost::asio::yield_context yield
) const;
/**
* @brief Fetches the successor key.
*
* Thea real fetch happens in doFetchSuccessorKey. This function will attempt to lookup the successor in the cache
* first and only if it's not found in the cache will it fetch from the actual DB.
* Thea real fetch happens in doFetchSuccessorKey. This function will attempt to lookup the
* successor in the cache first and only if it's not found in the cache will it fetch from the
* actual DB.
*
* @param key The key to fetch for
* @param ledgerSequence The ledger sequence to fetch for
@@ -545,7 +583,11 @@ public:
* @return The successor key on success; nullopt otherwise
*/
std::optional<ripple::uint256>
fetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
fetchSuccessorKey(
ripple::uint256 key,
std::uint32_t ledgerSequence,
boost::asio::yield_context yield
) const;
/**
* @brief Database-specific implementation of fetching the successor key
@@ -556,7 +598,11 @@ public:
* @return The successor on success; nullopt otherwise
*/
virtual std::optional<ripple::uint256>
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
doFetchSuccessorKey(
ripple::uint256 key,
std::uint32_t ledgerSequence,
boost::asio::yield_context yield
) const = 0;
/**
* @brief Fetches book offers.
@@ -583,7 +629,10 @@ public:
* @return The status of the migrator if found; nullopt otherwise
*/
virtual std::optional<std::string>
fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const = 0;
fetchMigratorStatus(
std::string const& migratorName,
boost::asio::yield_context yield
) const = 0;
/** @brief Return type for fetchClioNodesData() method */
using ClioNodesDataFetchResult =
@@ -601,7 +650,8 @@ public:
/**
* @brief Synchronously fetches the ledger range from DB.
*
* This function just wraps hardFetchLedgerRange(boost::asio::yield_context) using synchronous(FnType&&).
* This function just wraps hardFetchLedgerRange(boost::asio::yield_context) using
* synchronous(FnType&&).
*
* @return The ledger range if available; nullopt otherwise
*/

View File

@@ -1,23 +1,28 @@
add_library(clio_data)
target_sources(
clio_data
PRIVATE AmendmentCenter.cpp
BackendCounters.cpp
BackendInterface.cpp
LedgerCache.cpp
LedgerCacheSaver.cpp
LedgerHeaderCache.cpp
cassandra/impl/Future.cpp
cassandra/impl/Cluster.cpp
cassandra/impl/Batch.cpp
cassandra/impl/Result.cpp
cassandra/impl/Tuple.cpp
cassandra/impl/SslContext.cpp
cassandra/Handle.cpp
cassandra/SettingsProvider.cpp
impl/InputFile.cpp
impl/LedgerCacheFile.cpp
impl/OutputFile.cpp
clio_data
PRIVATE
AmendmentCenter.cpp
BackendCounters.cpp
BackendInterface.cpp
LedgerCache.cpp
LedgerCacheLoadingState.cpp
LedgerCacheSaver.cpp
LedgerHeaderCache.cpp
cassandra/impl/Future.cpp
cassandra/impl/Cluster.cpp
cassandra/impl/Batch.cpp
cassandra/impl/Result.cpp
cassandra/impl/Tuple.cpp
cassandra/impl/SslContext.cpp
cassandra/Handle.cpp
cassandra/SettingsProvider.cpp
impl/InputFile.cpp
impl/LedgerCacheFile.cpp
impl/OutputFile.cpp
)
target_link_libraries(clio_data PUBLIC cassandra-cpp-driver::cassandra-cpp-driver clio_util)
target_link_libraries(
clio_data
PUBLIC cassandra-cpp-driver::cassandra-cpp-driver clio_util
)

View File

@@ -102,10 +102,14 @@ public:
this->waitForWritesToFinish();
if (!range_) {
executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_);
executor_.writeSync(
schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_
);
}
if (not this->executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
if (not this->executeSyncUpdate(
schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1)
)) {
LOG(log_.warn()) << "Update failed for ledger " << ledgerSequence_;
return false;
}
@@ -139,7 +143,8 @@ public:
r.bindAt(
1,
std::make_tuple(
cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0,
cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn))
: 0,
cursorIn.value_or(ripple::uint256(0))
)
);
@@ -170,9 +175,10 @@ public:
selectNFTStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTStatements), [&](auto const& nftID) {
return schema_->selectNFT.bind(nftID, ledgerSequence);
}
std::cbegin(nftIDs),
std::cend(nftIDs),
std::back_inserter(selectNFTStatements),
[&](auto const& nftID) { return schema_->selectNFT.bind(nftID, ledgerSequence); }
);
auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
@@ -181,9 +187,10 @@ public:
selectNFTURIStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTURIStatements), [&](auto const& nftID) {
return schema_->selectNFTURI.bind(nftID, ledgerSequence);
}
std::cbegin(nftIDs),
std::cend(nftIDs),
std::back_inserter(selectNFTURIStatements),
[&](auto const& nftID) { return schema_->selectNFTURI.bind(nftID, ledgerSequence); }
);
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
@@ -193,7 +200,8 @@ public:
maybeRow.has_value()) {
auto [seq, owner, isBurned] = *maybeRow;
NFT nft(nftIDs[i], seq, owner, isBurned);
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri.has_value())
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>();
maybeUri.has_value())
nft.uri = *maybeUri;
ret.nfts.push_back(nft);
}
@@ -213,8 +221,9 @@ public:
std::optional<ripple::AccountID> lastItem;
while (liveAccounts.size() < number) {
Statement const statement = lastItem ? schema_->selectAccountFromToken.bind(*lastItem, Limit{pageSize})
: schema_->selectAccountFromBeginning.bind(Limit{pageSize});
Statement const statement = lastItem
? schema_->selectAccountFromToken.bind(*lastItem, Limit{pageSize})
: schema_->selectAccountFromBeginning.bind(Limit{pageSize});
auto const res = executor_.read(yield, statement);
if (res) {

View File

@@ -83,8 +83,15 @@ struct NFTTransactionsData {
* @param meta The transaction metadata
* @param txHash The transaction hash
*/
NFTTransactionsData(ripple::uint256 const& tokenID, ripple::TxMeta const& meta, ripple::uint256 const& txHash)
: tokenID(tokenID), ledgerSequence(meta.getLgrSeq()), transactionIndex(meta.getIndex()), txHash(txHash)
NFTTransactionsData(
ripple::uint256 const& tokenID,
ripple::TxMeta const& meta,
ripple::uint256 const& txHash
)
: tokenID(tokenID)
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())
, txHash(txHash)
{
}
};
@@ -94,11 +101,13 @@ struct NFTTransactionsData {
*
* Gets written to nf_tokens table and the like.
*
* The transaction index is only stored because we want to store only the final state of an NFT per ledger.
* Since we pull this from transactions we keep track of which tx index created this so we can de-duplicate, as it is
* possible for one ledger to have multiple txs that change the state of the same NFT.
* The transaction index is only stored because we want to store only the final state of an NFT per
* ledger. Since we pull this from transactions we keep track of which tx index created this so we
* can de-duplicate, as it is possible for one ledger to have multiple txs that change the state of
* the same NFT.
*
* We only set the uri if this is a mint tx, or if we are loading initial state from NFTokenPage objects.
* We only set the uri if this is a mint tx, or if we are loading initial state from NFTokenPage
* objects.
*/
struct NFTsData {
ripple::uint256 tokenID;
@@ -113,8 +122,9 @@ struct NFTsData {
* @brief Construct a new NFTsData object
*
* @note This constructor is used when parsing an NFTokenMint tx
* Unfortunately because of the extreme edge case of being able to re-mint an NFT with the same ID, we must
* explicitly record a null URI. For this reason, we _always_ write this field as a result of this tx.
* Unfortunately because of the extreme edge case of being able to re-mint an NFT with the same
* ID, we must explicitly record a null URI. For this reason, we _always_ write this field as a
* result of this tx.
*
* @param tokenID The token ID
* @param owner The owner
@@ -127,7 +137,11 @@ struct NFTsData {
ripple::Blob const& uri,
ripple::TxMeta const& meta
)
: tokenID(tokenID), ledgerSequence(meta.getLgrSeq()), transactionIndex(meta.getIndex()), owner(owner), uri(uri)
: tokenID(tokenID)
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())
, owner(owner)
, uri(uri)
{
}
@@ -141,7 +155,12 @@ struct NFTsData {
* @param meta The transaction metadata
* @param isBurned Whether the NFT is burned
*/
NFTsData(ripple::uint256 const& tokenID, ripple::AccountID const& owner, ripple::TxMeta const& meta, bool isBurned)
NFTsData(
ripple::uint256 const& tokenID,
ripple::AccountID const& owner,
ripple::TxMeta const& meta,
bool isBurned
)
: tokenID(tokenID)
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())
@@ -154,8 +173,9 @@ struct NFTsData {
* @brief Construct a new NFTsData object
*
* @note This constructor is used when parsing an NFTokenPage directly from ledger state.
* Unfortunately because of the extreme edge case of being able to re-mint an NFT with the same ID, we must
* explicitly record a null URI. For this reason, we _always_ write this field as a result of this tx.
* Unfortunately because of the extreme edge case of being able to re-mint an NFT with the same
* ID, we must explicitly record a null URI. For this reason, we _always_ write this field as a
* result of this tx.
*
* @param tokenID The token ID
* @param ledgerSequence The ledger sequence

View File

@@ -102,11 +102,17 @@ public:
// This would be the first write to the table.
// In this case, insert both min_sequence/max_sequence range into the table.
if (not range_.has_value()) {
executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/false, ledgerSequence_);
executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/true, ledgerSequence_);
executor_.writeSync(
schema_->insertLedgerRange, /* isLatestLedger =*/false, ledgerSequence_
);
executor_.writeSync(
schema_->insertLedgerRange, /* isLatestLedger =*/true, ledgerSequence_
);
}
if (not this->executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
if (not this->executeSyncUpdate(
schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1)
)) {
log_.warn() << "Update failed for ledger " << ledgerSequence_;
return false;
}
@@ -131,7 +137,8 @@ public:
nftIDs = fetchNFTIDsByTaxon(issuer, *taxon, limit, cursorIn, yield);
} else {
// Amazon Keyspaces Workflow for non-taxon queries
auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
auto const startTaxon =
cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
auto const startTokenID = cursorIn.value_or(ripple::uint256(0));
Statement const firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
@@ -163,10 +170,10 @@ public:
/**
* @brief (Unsupported in Keyspaces) Fetches account root object indexes by page.
* @note Loading the cache by enumerating all accounts is currently unsupported by the AWS Keyspaces backend.
* This function's logic relies on "PER PARTITION LIMIT 1", which Keyspaces does not support, and there is
* no efficient alternative. This is acceptable as the cache is primarily loaded via diffs. Calling this
* function will throw an exception.
* @note Loading the cache by enumerating all accounts is currently unsupported by the AWS
* Keyspaces backend. This function's logic relies on "PER PARTITION LIMIT 1", which Keyspaces
* does not support, and there is no efficient alternative. This is acceptable as the cache is
* primarily loaded via diffs. Calling this function will throw an exception.
*
* @param number The total number of accounts to fetch.
* @param pageSize The maximum number of accounts per page.
@@ -220,7 +227,8 @@ private:
{
std::vector<ripple::uint256> nftIDs;
auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
auto const startTaxon =
cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
auto const startTokenID = cursorIn.value_or(ripple::uint256(0));
Statement firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
@@ -250,7 +258,8 @@ private:
}
/**
* @brief Takes a list of NFT IDs, fetches their full data, and assembles the final result with a cursor.
* @brief Takes a list of NFT IDs, fetches their full data, and assembles the final result with
* a cursor.
*/
NFTsAndCursor
populateNFTsAndCreateCursor(
@@ -273,17 +282,19 @@ private:
std::vector<Statement> selectNFTStatements;
selectNFTStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTStatements), [&](auto const& nftID) {
return schema_->selectNFT.bind(nftID, ledgerSequence);
}
std::cbegin(nftIDs),
std::cend(nftIDs),
std::back_inserter(selectNFTStatements),
[&](auto const& nftID) { return schema_->selectNFT.bind(nftID, ledgerSequence); }
);
std::vector<Statement> selectNFTURIStatements;
selectNFTURIStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTURIStatements), [&](auto const& nftID) {
return schema_->selectNFTURI.bind(nftID, ledgerSequence);
}
std::cbegin(nftIDs),
std::cend(nftIDs),
std::back_inserter(selectNFTURIStatements),
[&](auto const& nftID) { return schema_->selectNFTURI.bind(nftID, ledgerSequence); }
);
auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
@@ -295,7 +306,8 @@ private:
maybeRow.has_value()) {
auto [seq, owner, isBurned] = *maybeRow;
NFT nft(nftIDs[i], seq, owner, isBurned);
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri.has_value())
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>();
maybeUri.has_value())
nft.uri = *maybeUri;
ret.nfts.push_back(nft);
}

View File

@@ -224,6 +224,7 @@ LedgerCache::setFull()
return;
full_ = true;
isCurrentlyLoading_ = false;
std::scoped_lock const lck{mtx_};
deletes_.clear();
}
@@ -254,7 +255,8 @@ LedgerCache::getSuccessorHitRate() const
{
if (successorReqCounter_.get().value() == 0u)
return 1;
return static_cast<float>(successorHitCounter_.get().value()) / successorReqCounter_.get().value();
return static_cast<float>(successorHitCounter_.get().value()) /
successorReqCounter_.get().value();
}
std::expected<void, std::string>
@@ -266,7 +268,9 @@ LedgerCache::saveToFile(std::string const& path) const
impl::LedgerCacheFile file{path};
std::shared_lock const lock{mtx_};
impl::LedgerCacheFile::DataView const data{.latestSeq = latestSeq_, .map = map_, .deleted = deleted_};
impl::LedgerCacheFile::DataView const data{
.latestSeq = latestSeq_, .map = map_, .deleted = deleted_
};
return file.write(data);
}
@@ -287,4 +291,16 @@ LedgerCache::loadFromFile(std::string const& path, uint32_t minLatestSequence)
return {};
}
void
LedgerCache::startLoading()
{
isCurrentlyLoading_ = true;
}
bool
LedgerCache::isCurrentlyLoading() const
{
return isCurrentlyLoading_;
}
} // namespace data

View File

@@ -58,26 +58,34 @@ public:
private:
// counters for fetchLedgerObject(s) hit rate
std::reference_wrapper<util::prometheus::CounterInt> objectReqCounter_{PrometheusService::counterInt(
"ledger_cache_counter_total_number",
util::prometheus::Labels({{"type", "request"}, {"fetch", "ledger_objects"}}),
"LedgerCache statistics"
)};
std::reference_wrapper<util::prometheus::CounterInt> objectHitCounter_{PrometheusService::counterInt(
"ledger_cache_counter_total_number",
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "ledger_objects"}})
)};
std::reference_wrapper<util::prometheus::CounterInt> objectReqCounter_{
PrometheusService::counterInt(
"ledger_cache_counter_total_number",
util::prometheus::Labels({{"type", "request"}, {"fetch", "ledger_objects"}}),
"LedgerCache statistics"
)
};
std::reference_wrapper<util::prometheus::CounterInt> objectHitCounter_{
PrometheusService::counterInt(
"ledger_cache_counter_total_number",
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "ledger_objects"}})
)
};
// counters for fetchSuccessorKey hit rate
std::reference_wrapper<util::prometheus::CounterInt> successorReqCounter_{PrometheusService::counterInt(
"ledger_cache_counter_total_number",
util::prometheus::Labels({{"type", "request"}, {"fetch", "successor_key"}}),
"ledgerCache"
)};
std::reference_wrapper<util::prometheus::CounterInt> successorHitCounter_{PrometheusService::counterInt(
"ledger_cache_counter_total_number",
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "successor_key"}})
)};
std::reference_wrapper<util::prometheus::CounterInt> successorReqCounter_{
PrometheusService::counterInt(
"ledger_cache_counter_total_number",
util::prometheus::Labels({{"type", "request"}, {"fetch", "successor_key"}}),
"ledgerCache"
)
};
std::reference_wrapper<util::prometheus::CounterInt> successorHitCounter_{
PrometheusService::counterInt(
"ledger_cache_counter_total_number",
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "successor_key"}})
)
};
CacheMap map_;
CacheMap deleted_;
@@ -95,8 +103,17 @@ private:
util::prometheus::Labels{},
"Whether ledger cache is disabled or not"
)};
util::prometheus::Bool isCurrentlyLoading_{
PrometheusService::boolMetric(
"ledger_cache_is_currently_loading",
util::prometheus::Labels{},
"Whether ledger cache is currently loading or not"
)
// temporary set to prevent background thread from writing already deleted data. not used when cache is full
};
// temporary set to prevent background thread from writing already deleted data. not used when
// cache is full
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
public:
@@ -150,6 +167,12 @@ public:
std::expected<void, std::string>
loadFromFile(std::string const& path, uint32_t minLatestSequence) override;
void
startLoading() override;
[[nodiscard]] bool
isCurrentlyLoading() const override;
};
} // namespace data

View File

@@ -126,9 +126,9 @@ public:
/**
* @brief Sets the full flag to true.
*
* This is used when cache loaded in its entirety at startup of the application. This can be either loaded from DB,
* populated together with initial ledger download (on first run) or downloaded from a peer node (specified in
* config).
* This is used when cache loaded in its entirety at startup of the application. This can be
* either loaded from DB, populated together with initial ledger download (on first run) or
* downloaded from a peer node (specified in config).
*/
virtual void
setFull() = 0;
@@ -152,13 +152,15 @@ public:
size() const = 0;
/**
* @return A number representing the success rate of hitting an object in the cache versus missing it.
* @return A number representing the success rate of hitting an object in the cache versus
* missing it.
*/
virtual float
getObjectHitRate() const = 0;
/**
* @return A number representing the success rate of hitting a successor in the cache versus missing it.
* @return A number representing the success rate of hitting a successor in the cache versus
* missing it.
*/
virtual float
getSuccessorHitRate() const = 0;
@@ -191,6 +193,21 @@ public:
*/
[[nodiscard]] virtual std::expected<void, std::string>
loadFromFile(std::string const& path, uint32_t minLatestSequence) = 0;
/**
* @brief Mark the cache as currently loading from the backend.
* @note Should be called before initiating a backend-based cache load. The flag is
* automatically cleared when setFull() is called.
*/
virtual void
startLoading() = 0;
/**
* @brief Check whether the cache is currently being loaded from the backend.
* @return true if startLoading() has been called and setFull() has not yet been called
*/
[[nodiscard]] virtual bool
isCurrentlyLoading() const = 0;
};
} // namespace data

View File

@@ -0,0 +1,61 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2026, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "data/LedgerCacheLoadingState.hpp"
#include <memory>
namespace data {
LedgerCacheLoadingState::LedgerCacheLoadingState(LedgerCacheInterface const& cache) : cache_(cache)
{
}
void
LedgerCacheLoadingState::allowLoading()
{
*isLoadingAllowed_ = true;
isLoadingAllowed_->notify_all();
}
bool
LedgerCacheLoadingState::isLoadingAllowed() const
{
return *isLoadingAllowed_;
}
void
LedgerCacheLoadingState::waitForLoadingAllowed() const
{
isLoadingAllowed_->wait(false);
}
bool
LedgerCacheLoadingState::isCurrentlyLoading() const
{
return cache_.get().isCurrentlyLoading();
}
std::unique_ptr<LedgerCacheLoadingStateInterface>
LedgerCacheLoadingState::clone() const
{
return std::make_unique<LedgerCacheLoadingState>(*this);
}
} // namespace data

View File

@@ -0,0 +1,116 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2026, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/LedgerCacheInterface.hpp"
#include <atomic>
#include <functional>
#include <memory>
namespace data {
/**
* @brief Interface for coordinating cache loading permissions across a cluster.
*
* Controls whether this node is allowed to load the ledger cache from the backend.
* In a cluster, at most one node should load the cache at a time; this state is used
* to gate loading until permission is granted.
*/
class LedgerCacheLoadingStateInterface {
public:
virtual ~LedgerCacheLoadingStateInterface() = default;
/**
* @brief Allow this node to begin loading the cache from the backend.
*/
virtual void
allowLoading() = 0;
/**
* @brief Check whether loading has been permitted.
* @return true if allowLoading() has been called
*/
[[nodiscard]] virtual bool
isLoadingAllowed() const = 0;
/**
* @brief Block until loading is permitted.
* @note Returns immediately if allowLoading() was already called.
*/
virtual void
waitForLoadingAllowed() const = 0;
/**
* @brief Check whether the cache is currently being loaded from the backend.
* @return true if the underlying cache has been marked as loading and is not yet full
*/
[[nodiscard]] virtual bool
isCurrentlyLoading() const = 0;
/**
* @brief Create a clone that shares the same loading-allowed flag.
* @note Clones share the @c isLoadingAllowed_ atomic, so allowLoading() on any
* copy is visible to all clones.
* @return A new instance sharing the same loading permission state
*/
[[nodiscard]] virtual std::unique_ptr<LedgerCacheLoadingStateInterface>
clone() const = 0;
};
/**
* @brief Concrete implementation of @ref LedgerCacheLoadingStateInterface.
*
* Stores a reference to the ledger cache to delegate isCurrentlyLoading(), and a
* shared atomic flag for the loading-allowed coordination.
*/
class LedgerCacheLoadingState : public LedgerCacheLoadingStateInterface {
std::reference_wrapper<LedgerCacheInterface const> cache_;
std::shared_ptr<std::atomic_bool> isLoadingAllowed_ = std::make_shared<std::atomic_bool>(false);
public:
/**
* @brief Construct a new LedgerCacheLoadingState.
* @param cache The cache whose loading status will be monitored
*/
explicit LedgerCacheLoadingState(LedgerCacheInterface const& cache);
/** @copydoc LedgerCacheLoadingStateInterface::allowLoading() */
void
allowLoading() override;
/** @copydoc LedgerCacheLoadingStateInterface::isLoadingAllowed() */
[[nodiscard]] bool
isLoadingAllowed() const override;
/** @copydoc LedgerCacheLoadingStateInterface::waitForLoadingAllowed() */
void
waitForLoadingAllowed() const override;
/** @copydoc LedgerCacheLoadingStateInterface::isCurrentlyLoading() */
[[nodiscard]] bool
isCurrentlyLoading() const override;
/** @copydoc LedgerCacheLoadingStateInterface::clone() */
[[nodiscard]] std::unique_ptr<LedgerCacheLoadingStateInterface>
clone() const override;
};
} // namespace data

View File

@@ -29,7 +29,10 @@
namespace data {
LedgerCacheSaver::LedgerCacheSaver(util::config::ClioConfigDefinition const& config, LedgerCacheInterface const& cache)
LedgerCacheSaver::LedgerCacheSaver(
util::config::ClioConfigDefinition const& config,
LedgerCacheInterface const& cache
)
: cacheFilePath_(config.maybeValue<std::string>("cache.file.path"))
, cache_(cache)
, isAsync_(config.get<bool>("cache.file.async_save"))
@@ -51,11 +54,14 @@ LedgerCacheSaver::save()
}
LOG(util::LogService::info()) << "Saving ledger cache to " << *cacheFilePath_;
if (auto const [success, durationMs] = util::timed([&]() { return cache_.get().saveToFile(*cacheFilePath_); });
if (auto const [success, durationMs] =
util::timed([&]() { return cache_.get().saveToFile(*cacheFilePath_); });
success.has_value()) {
LOG(util::LogService::info()) << "Successfully saved ledger cache in " << durationMs << " ms";
LOG(util::LogService::info())
<< "Successfully saved ledger cache in " << durationMs << " ms";
} else {
LOG(util::LogService::error()) << "Error saving LedgerCache to file: " << success.error();
LOG(util::LogService::error())
<< "Error saving LedgerCache to file: " << success.error();
}
});
if (not isAsync_) {

View File

@@ -62,7 +62,10 @@ public:
* @param config The configuration object containing the cache file path setting
* @param cache Reference to the ledger cache interface to be saved
*/
LedgerCacheSaver(util::config::ClioConfigDefinition const& config, LedgerCacheInterface const& cache);
LedgerCacheSaver(
util::config::ClioConfigDefinition const& config,
LedgerCacheInterface const& cache
);
/**
* @brief Destructor that ensures the saving thread is properly joined.

View File

@@ -81,8 +81,16 @@ struct TransactionAndMetadata {
* @param ledgerSequence The ledger sequence
* @param date The date
*/
TransactionAndMetadata(Blob transaction, Blob metadata, std::uint32_t ledgerSequence, std::uint32_t date)
: transaction{std::move(transaction)}, metadata{std::move(metadata)}, ledgerSequence{ledgerSequence}, date{date}
TransactionAndMetadata(
Blob transaction,
Blob metadata,
std::uint32_t ledgerSequence,
std::uint32_t date
)
: transaction{std::move(transaction)}
, metadata{std::move(metadata)}
, ledgerSequence{ledgerSequence}
, date{date}
{
}
@@ -192,7 +200,11 @@ struct NFT {
ripple::AccountID const& owner,
Blob uri,
bool isBurned)
: tokenID{tokenID}, ledgerSequence{ledgerSequence}, owner{owner}, uri{std::move(uri)}, isBurned{isBurned}
: tokenID{tokenID}
, ledgerSequence{ledgerSequence}
, owner{owner}
, uri{std::move(uri)}
, isBurned{isBurned}
{
}
@@ -204,7 +216,10 @@ struct NFT {
* @param owner The owner
* @param isBurned Whether the token is burned
*/
NFT(ripple::uint256 const& tokenID, std::uint32_t ledgerSequence, ripple::AccountID const& owner, bool isBurned)
NFT(ripple::uint256 const& tokenID,
std::uint32_t ledgerSequence,
ripple::AccountID const& owner,
bool isBurned)
: NFT(tokenID, ledgerSequence, owner, {}, isBurned)
{
}
@@ -212,8 +227,8 @@ struct NFT {
/**
* @brief Check if the NFT is the same as another
*
* Clearly two tokens are the same if they have the same ID, but this struct stores the state of a given
* token at a given ledger sequence, so we also need to compare with ledgerSequence.
* Clearly two tokens are the same if they have the same ID, but this struct stores the state of
* a given token at a given ledger sequence, so we also need to compare with ledgerSequence.
*
* @param other The other NFT
* @return true if they are the same; false otherwise
@@ -293,7 +308,8 @@ struct AmendmentKey {
* @brief Construct a new AmendmentKey
* @param val Anything convertible to a string
*/
AmendmentKey(std::convertible_to<std::string> auto&& val) : name{std::forward<decltype(val)>(val)}
AmendmentKey(std::convertible_to<std::string> auto&& val)
: name{std::forward<decltype(val)>(val)}
{
}
@@ -315,8 +331,14 @@ struct AmendmentKey {
operator<=>(AmendmentKey const& other) const = default;
};
constexpr ripple::uint256 kFIRST_KEY{"0000000000000000000000000000000000000000000000000000000000000000"};
constexpr ripple::uint256 kLAST_KEY{"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"};
constexpr ripple::uint256 kHI192{"0000000000000000000000000000000000000000000000001111111111111111"};
constexpr ripple::uint256 kFIRST_KEY{
"0000000000000000000000000000000000000000000000000000000000000000"
};
constexpr ripple::uint256 kLAST_KEY{
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
};
constexpr ripple::uint256 kHI192{
"0000000000000000000000000000000000000000000000001111111111111111"
};
} // namespace data

View File

@@ -104,7 +104,11 @@ public:
* @param cache The ledger cache
* @param readOnly Whether the database should be in readonly mode
*/
CassandraBackendFamily(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly)
CassandraBackendFamily(
SettingsProviderType settingsProvider,
data::LedgerCacheInterface& cache,
bool readOnly
)
: BackendInterface(cache)
, settingsProvider_{std::move(settingsProvider)}
, schema_{settingsProvider_}
@@ -116,8 +120,8 @@ public:
if (not readOnly) {
if (auto const res = handle_.execute(schema_.createKeyspace); not res.has_value()) {
// on datastax, creation of keyspaces can be configured to only be done thru the admin
// interface. this does not mean that the keyspace does not already exist tho.
// on datastax, creation of keyspaces can be configured to only be done thru the
// admin interface. this does not mean that the keyspace does not already exist tho.
if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED)
throw std::runtime_error("Could not create keyspace: " + res.error());
}
@@ -130,7 +134,8 @@ public:
schema_.prepareStatements(handle_);
} catch (std::runtime_error const& ex) {
auto const error = fmt::format(
"Failed to prepare the statements: {}; readOnly: {}. ReadOnly should be turned off or another Clio "
"Failed to prepare the statements: {}; readOnly: {}. ReadOnly should be turned off "
"or another Clio "
"node with write access to DB should be started first.",
ex.what(),
readOnly
@@ -169,8 +174,8 @@ public:
auto cursor = txnCursor;
if (cursor) {
statement.bindAt(1, cursor->asTuple());
LOG(log_.debug()) << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence
<< cursor->transactionIndex;
LOG(log_.debug()) << "account = " << ripple::strHex(account)
<< " tuple = " << cursor->ledgerSequence << cursor->transactionIndex;
} else {
auto const seq = forward ? rng->minSequence : rng->maxSequence;
auto const placeHolder = forward ? 0u : std::numeric_limits<std::uint32_t>::max();
@@ -195,7 +200,8 @@ public:
auto numRows = results.numRows();
LOG(log_.info()) << "num_rows = " << numRows;
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
for (auto [hash, data] :
extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
hashes.push_back(hash);
if (--numRows == 0) {
LOG(log_.debug()) << "Setting cursor";
@@ -251,7 +257,10 @@ public:
}
std::optional<ripple::LedgerHeader>
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
fetchLedgerBySequence(
std::uint32_t const sequence,
boost::asio::yield_context yield
) const override
{
if (auto const lock = ledgerCache_.get(); lock.has_value() && lock->seq == sequence)
return lock->ledger;
@@ -259,7 +268,8 @@ public:
auto const res = executor_.read(yield, schema_->selectLedgerBySeq, sequence);
if (res) {
if (auto const& result = res.value(); result) {
if (auto const maybeValue = result.template get<std::vector<unsigned char>>(); maybeValue) {
if (auto const maybeValue = result.template get<std::vector<unsigned char>>();
maybeValue) {
auto const header = util::deserializeHeader(ripple::makeSlice(*maybeValue));
ledgerCache_.put(FetchLedgerCache::CacheEntry{header, sequence});
return header;
@@ -336,7 +346,10 @@ public:
}
std::vector<TransactionAndMetadata>
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
fetchAllTransactionsInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const override
{
auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence, yield);
return fetchTransactions(hashes, yield);
@@ -349,7 +362,8 @@ public:
) const override
{
auto start = std::chrono::system_clock::now();
auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
auto const res =
executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
if (not res) {
LOG(log_.error()) << "Could not fetch all transaction hashes: " << res.error();
@@ -368,9 +382,12 @@ public:
hashes.push_back(std::move(hash));
auto end = std::chrono::system_clock::now();
LOG(log_.debug()) << "Fetched " << hashes.size() << " transaction hashes from database in "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< " milliseconds";
LOG(
log_.debug()
) << "Fetched "
<< hashes.size() << " transaction hashes from database in "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< " milliseconds";
return hashes;
}
@@ -386,7 +403,8 @@ public:
if (not res)
return std::nullopt;
if (auto const maybeRow = res->template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
if (auto const maybeRow = res->template get<uint32_t, ripple::AccountID, bool>();
maybeRow) {
auto [seq, owner, isBurned] = *maybeRow;
auto result = std::make_optional<NFT>(tokenID, seq, owner, isBurned);
@@ -437,8 +455,8 @@ public:
auto cursor = cursorIn;
if (cursor) {
statement.bindAt(1, cursor->asTuple());
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence
<< cursor->transactionIndex;
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID)
<< " tuple = " << cursor->ledgerSequence << cursor->transactionIndex;
} else {
auto const seq = forward ? rng->minSequence : rng->maxSequence;
auto const placeHolder = forward ? 0 : std::numeric_limits<std::uint32_t>::max();
@@ -461,7 +479,8 @@ public:
auto numRows = results.numRows();
LOG(log_.info()) << "num_rows = " << numRows;
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
for (auto [hash, data] :
extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
hashes.push_back(hash);
if (--numRows == 0) {
LOG(log_.debug()) << "Setting cursor";
@@ -495,7 +514,11 @@ public:
) const override
{
auto const holderEntries = executor_.read(
yield, schema_->selectMPTHolders, mptID, cursorIn.value_or(ripple::AccountID(0)), Limit{limit}
yield,
schema_->selectMPTHolders,
mptID,
cursorIn.value_or(ripple::AccountID(0)),
Limit{limit}
);
auto const& holderResults = holderEntries.value();
@@ -513,7 +536,9 @@ public:
auto mptObjects = doFetchLedgerObjects(mptKeys, ledgerSequence, yield);
auto it = std::remove_if(mptObjects.begin(), mptObjects.end(), [](Blob const& mpt) { return mpt.empty(); });
auto it = std::remove_if(mptObjects.begin(), mptObjects.end(), [](Blob const& mpt) {
return mpt.empty();
});
mptObjects.erase(it, mptObjects.end());
@@ -531,7 +556,8 @@ public:
boost::asio::yield_context yield
) const override
{
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence
<< ", key = " << ripple::to_string(key);
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
if (auto const result = res->template get<Blob>(); result) {
if (result->size())
@@ -553,7 +579,8 @@ public:
boost::asio::yield_context yield
) const override
{
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence
<< ", key = " << ripple::to_string(key);
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
if (auto const result = res->template get<Blob, std::uint32_t>(); result) {
auto [_, seq] = result.value();
@@ -571,7 +598,8 @@ public:
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
{
if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res) {
if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>(); maybeValue) {
if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>();
maybeValue) {
auto [transaction, meta, seq, date] = *maybeValue;
return std::make_optional<TransactionAndMetadata>(transaction, meta, seq, date);
}
@@ -591,7 +619,8 @@ public:
boost::asio::yield_context yield
) const override
{
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence);
res) {
if (auto const result = res->template get<ripple::uint256>(); result) {
if (*result == kLAST_KEY)
return std::nullopt;
@@ -607,7 +636,10 @@ public:
}
std::vector<TransactionAndMetadata>
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
fetchTransactions(
std::vector<ripple::uint256> const& hashes,
boost::asio::yield_context yield
) const override
{
if (hashes.empty())
return {};
@@ -622,9 +654,10 @@ public:
auto const timeDiff = util::timed([this, yield, &results, &hashes, &statements]() {
// TODO: seems like a job for "hash IN (list of hashes)" instead?
std::transform(
std::cbegin(hashes), std::cend(hashes), std::back_inserter(statements), [this](auto const& hash) {
return schema_->selectTransaction.bind(hash);
}
std::cbegin(hashes),
std::cend(hashes),
std::back_inserter(statements),
[this](auto const& hash) { return schema_->selectTransaction.bind(hash); }
);
auto const entries = executor_.readEach(yield, statements);
@@ -633,7 +666,8 @@ public:
std::cend(entries),
std::back_inserter(results),
[](auto const& res) -> TransactionAndMetadata {
if (auto const maybeRow = res.template get<Blob, Blob, uint32_t, uint32_t>(); maybeRow)
if (auto const maybeRow = res.template get<Blob, Blob, uint32_t, uint32_t>();
maybeRow)
return *maybeRow;
return {};
@@ -642,8 +676,8 @@ public:
});
ASSERT(numHashes == results.size(), "Number of hashes and results must match");
LOG(log_.debug()) << "Fetched " << numHashes << " transactions from database in " << timeDiff
<< " milliseconds";
LOG(log_.debug()) << "Fetched " << numHashes << " transactions from database in "
<< timeDiff << " milliseconds";
return results;
}
@@ -668,14 +702,18 @@ public:
// TODO: seems like a job for "key IN (list of keys)" instead?
std::transform(
std::cbegin(keys), std::cend(keys), std::back_inserter(statements), [this, &sequence](auto const& key) {
return schema_->selectObject.bind(key, sequence);
}
std::cbegin(keys),
std::cend(keys),
std::back_inserter(statements),
[this, &sequence](auto const& key) { return schema_->selectObject.bind(key, sequence); }
);
auto const entries = executor_.readEach(yield, statements);
std::transform(
std::cbegin(entries), std::cend(entries), std::back_inserter(results), [](auto const& res) -> Blob {
std::cbegin(entries),
std::cend(entries),
std::back_inserter(results),
[](auto const& res) -> Blob {
if (auto const maybeValue = res.template get<Blob>(); maybeValue)
return *maybeValue;
@@ -688,34 +726,40 @@ public:
}
std::vector<LedgerObject>
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
fetchLedgerDiff(
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const override
{
auto const [keys, timeDiff] = util::timed([this, &ledgerSequence, yield]() -> std::vector<ripple::uint256> {
auto const res = executor_.read(yield, schema_->selectDiff, ledgerSequence);
if (not res) {
LOG(log_.error()) << "Could not fetch ledger diff: " << res.error() << "; ledger = " << ledgerSequence;
return {};
}
auto const [keys, timeDiff] =
util::timed([this, &ledgerSequence, yield]() -> std::vector<ripple::uint256> {
auto const res = executor_.read(yield, schema_->selectDiff, ledgerSequence);
if (not res) {
LOG(log_.error()) << "Could not fetch ledger diff: " << res.error()
<< "; ledger = " << ledgerSequence;
return {};
}
auto const& results = res.value();
if (not results) {
LOG(log_.error()) << "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence;
return {};
}
auto const& results = res.value();
if (not results) {
LOG(log_.error())
<< "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence;
return {};
}
std::vector<ripple::uint256> resultKeys;
for (auto [key] : extract<ripple::uint256>(results))
resultKeys.push_back(key);
std::vector<ripple::uint256> resultKeys;
for (auto [key] : extract<ripple::uint256>(results))
resultKeys.push_back(key);
return resultKeys;
});
return resultKeys;
});
// one of the above errors must have happened
if (keys.empty())
return {};
LOG(log_.debug()) << "Fetched " << keys.size() << " diff hashes from database in " << timeDiff
<< " milliseconds";
LOG(log_.debug()) << "Fetched " << keys.size() << " diff hashes from database in "
<< timeDiff << " milliseconds";
auto const objs = fetchLedgerObjects(keys, ledgerSequence, yield);
std::vector<LedgerObject> results;
@@ -733,7 +777,10 @@ public:
}
std::optional<std::string>
fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const override
fetchMigratorStatus(
std::string const& migratorName,
boost::asio::yield_context yield
) const override
{
auto const res = executor_.read(yield, schema_->selectMigratorStatus, Text(migratorName));
if (not res) {
@@ -771,7 +818,8 @@ public:
void
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override
{
LOG(log_.trace()) << " Writing ledger object " << key.size() << ":" << seq << " [" << blob.size() << " bytes]";
LOG(log_.trace()) << " Writing ledger object " << key.size() << ":" << seq << " ["
<< blob.size() << " bytes]";
if (range_)
executor_.write(schema_->insertDiff, seq, key);
@@ -783,7 +831,8 @@ public:
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) override
{
LOG(log_.trace()) << "Writing successor. key = " << key.size() << " bytes. "
<< " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes.";
<< " seq = " << std::to_string(seq) << " successor = " << successor.size()
<< " bytes.";
ASSERT(!key.empty(), "Key must not be empty");
ASSERT(!successor.empty(), "Successor must not be empty");
@@ -797,13 +846,15 @@ public:
statements.reserve(data.size() * 10); // assume 10 transactions avg
for (auto& record : data) {
std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
return schema_->insertAccountTx.bind(
std::forward<decltype(account)>(account),
std::make_tuple(record.ledgerSequence, record.transactionIndex),
record.txHash
);
});
std::ranges::transform(
record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
return schema_->insertAccountTx.bind(
std::forward<decltype(account)>(account),
std::make_tuple(record.ledgerSequence, record.transactionIndex),
record.txHash
);
}
);
}
executor_.write(std::move(statements));
@@ -815,13 +866,15 @@ public:
std::vector<Statement> statements;
statements.reserve(record.accounts.size());
std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
return schema_->insertAccountTx.bind(
std::forward<decltype(account)>(account),
std::make_tuple(record.ledgerSequence, record.transactionIndex),
record.txHash
);
});
std::ranges::transform(
record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
return schema_->insertAccountTx.bind(
std::forward<decltype(account)>(account),
std::make_tuple(record.ledgerSequence, record.transactionIndex),
record.txHash
);
}
);
executor_.write(std::move(statements));
}
@@ -834,7 +887,9 @@ public:
std::ranges::transform(data, std::back_inserter(statements), [this](auto const& record) {
return schema_->insertNFTTx.bind(
record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash
record.tokenID,
std::make_tuple(record.ledgerSequence, record.transactionIndex),
record.txHash
);
});
@@ -854,7 +909,12 @@ public:
executor_.write(schema_->insertLedgerTransaction, seq, hash);
executor_.write(
schema_->insertTransaction, std::move(hash), seq, date, std::move(transaction), std::move(metadata)
schema_->insertTransaction,
std::move(hash),
seq,
date,
std::move(transaction),
std::move(metadata)
);
}
@@ -866,9 +926,9 @@ public:
for (NFTsData const& record : data) {
if (!record.onlyUriChanged) {
statements.push_back(
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
);
statements.push_back(schema_->insertNFT.bind(
record.tokenID, record.ledgerSequence, record.owner, record.isBurned
));
// If `uri` is set (and it can be set to an empty uri), we know this
// is a net-new NFT. That is, this NFT has not been seen before by
@@ -881,15 +941,15 @@ public:
static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
record.tokenID
));
statements.push_back(
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
);
statements.push_back(schema_->insertNFTURI.bind(
record.tokenID, record.ledgerSequence, record.uri.value()
));
}
} else {
// only uri changed, we update the uri table only
statements.push_back(
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
);
statements.push_back(schema_->insertNFTURI.bind(
record.tokenID, record.ledgerSequence, record.uri.value()
));
}
}
@@ -918,14 +978,18 @@ public:
writeMigratorStatus(std::string const& migratorName, std::string const& status) override
{
executor_.writeSync(
schema_->insertMigratorStatus, data::cassandra::Text{migratorName}, data::cassandra::Text(status)
schema_->insertMigratorStatus,
data::cassandra::Text{migratorName},
data::cassandra::Text(status)
);
}
void
writeNodeMessage(boost::uuids::uuid const& uuid, std::string message) override
{
executor_.writeSync(schema_->updateClioNodeMessage, data::cassandra::Text{std::move(message)}, uuid);
executor_.writeSync(
schema_->updateClioNodeMessage, data::cassandra::Text{std::move(message)}, uuid
);
}
bool

View File

@@ -78,12 +78,13 @@ concept SomeExecutionStrategy = requires(
* @brief The requirements of a retry policy.
*/
template <typename T>
concept SomeRetryPolicy = requires(T a, boost::asio::io_context ioc, CassandraError err, uint32_t attempt) {
{ T(ioc) };
{ a.shouldRetry(err) } -> std::same_as<bool>;
{
a.retry([]() {})
} -> std::same_as<void>;
};
concept SomeRetryPolicy =
requires(T a, boost::asio::io_context ioc, CassandraError err, uint32_t attempt) {
{ T(ioc) };
{ a.shouldRetry(err) } -> std::same_as<bool>;
{
a.retry([]() {})
} -> std::same_as<void>;
};
} // namespace data::cassandra

View File

@@ -105,9 +105,9 @@ public:
bool
isTimeout() const
{
return code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or
code_ == CASS_ERROR_SERVER_UNAVAILABLE or code_ == CASS_ERROR_SERVER_OVERLOADED or
code_ == CASS_ERROR_SERVER_READ_TIMEOUT;
return code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or
code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or code_ == CASS_ERROR_SERVER_UNAVAILABLE or
code_ == CASS_ERROR_SERVER_OVERLOADED or code_ == CASS_ERROR_SERVER_READ_TIMEOUT;
}
/**

View File

@@ -36,7 +36,8 @@ Handle::Handle(Settings clusterSettings) : cluster_{clusterSettings}
{
}
Handle::Handle(std::string_view contactPoints) : Handle{Settings::defaultSettings().withContactPoints(contactPoints)}
Handle::Handle(std::string_view contactPoints)
: Handle{Settings::defaultSettings().withContactPoints(contactPoints)}
{
}
@@ -84,8 +85,11 @@ Handle::disconnect() const
Handle::FutureType
Handle::asyncReconnect(std::string_view keyspace) const
{
if (auto rc = asyncDisconnect().await(); not rc) // sync
throw std::logic_error("Reconnect to keyspace '" + std::string{keyspace} + "' failed: " + rc.error());
if (auto rc = asyncDisconnect().await(); not rc) { // sync
throw std::logic_error(
"Reconnect to keyspace '" + std::string{keyspace} + "' failed: " + rc.error()
);
}
return asyncConnect(keyspace);
}
@@ -123,7 +127,10 @@ Handle::asyncExecute(StatementType const& statement) const
}
Handle::FutureWithCallbackType
Handle::asyncExecute(StatementType const& statement, std::function<void(ResultOrErrorType)>&& cb) const
Handle::asyncExecute(
StatementType const& statement,
std::function<void(ResultOrErrorType)>&& cb
) const
{
return Handle::FutureWithCallbackType{cass_session_execute(session_, statement), std::move(cb)};
}
@@ -147,9 +154,14 @@ Handle::execute(std::vector<StatementType> const& statements) const
}
Handle::FutureWithCallbackType
Handle::asyncExecute(std::vector<StatementType> const& statements, std::function<void(ResultOrErrorType)>&& cb) const
Handle::asyncExecute(
std::vector<StatementType> const& statements,
std::function<void(ResultOrErrorType)>&& cb
) const
{
return Handle::FutureWithCallbackType{cass_session_execute_batch(session_, Batch{statements}), std::move(cb)};
return Handle::FutureWithCallbackType{
cass_session_execute_batch(session_, Batch{statements}), std::move(cb)
};
}
Handle::PreparedStatementType

View File

@@ -293,14 +293,18 @@ public:
execute(std::vector<StatementType> const& statements) const;
/**
* @brief Execute a batch of (bound or simple) statements asynchronously with a completion callback.
* @brief Execute a batch of (bound or simple) statements asynchronously with a completion
* callback.
*
* @param statements The statements to execute
* @param cb The callback to execute when data is ready
* @return A future that holds onto the callback provided
*/
[[nodiscard]] FutureWithCallbackType
asyncExecute(std::vector<StatementType> const& statements, std::function<void(ResultOrErrorType)>&& cb) const;
asyncExecute(
std::vector<StatementType> const& statements,
std::function<void(ResultOrErrorType)>&& cb
) const;
/**
* @brief Prepare a statement.
@@ -314,8 +318,8 @@ public:
};
/**
* @brief Extracts the results into series of std::tuple<Types...> by creating a simple wrapper with an STL input
* iterator inside.
* @brief Extracts the results into series of std::tuple<Types...> by creating a simple wrapper with
* an STL input iterator inside.
*
* You can call .begin() and .end() in order to iterate as usual.
* This also means that you can use it in a range-based for or with some algorithms.

View File

@@ -43,9 +43,14 @@ namespace data::cassandra {
* @return The qualified table name
*/
template <SomeSettingsProvider SettingsProviderType>
[[nodiscard]] std::string inline qualifiedTableName(SettingsProviderType const& provider, std::string_view name)
[[nodiscard]] std::string inline qualifiedTableName(
SettingsProviderType const& provider,
std::string_view name
)
{
return fmt::format("{}.{}{}", provider.getKeyspace(), provider.getTablePrefix().value_or(""), name);
return fmt::format(
"{}.{}{}", provider.getKeyspace(), provider.getTablePrefix().value_or(""), name
);
}
/**
@@ -65,7 +70,8 @@ public:
*
* @param settingsProvider The settings provider
*/
explicit Schema(SettingsProviderType const& settingsProvider) : settingsProvider_{std::cref(settingsProvider)}
explicit Schema(SettingsProviderType const& settingsProvider)
: settingsProvider_{std::cref(settingsProvider)}
{
}

View File

@@ -61,12 +61,18 @@ SettingsProvider::parseOptionalCertificate() const
auto const path = std::filesystem::path(certPath.asString());
std::ifstream fileStream(path.string(), std::ios::in);
if (!fileStream) {
throw std::system_error(errno, std::generic_category(), "Opening certificate " + path.string());
throw std::system_error(
errno, std::generic_category(), "Opening certificate " + path.string()
);
}
std::string contents(std::istreambuf_iterator<char>{fileStream}, std::istreambuf_iterator<char>{});
std::string contents(
std::istreambuf_iterator<char>{fileStream}, std::istreambuf_iterator<char>{}
);
if (fileStream.bad()) {
throw std::system_error(errno, std::generic_category(), "Reading certificate " + path.string());
throw std::system_error(
errno, std::generic_category(), "Reading certificate " + path.string()
);
}
return contents;
@@ -82,7 +88,8 @@ SettingsProvider::parseSettings() const
// all config values used in settings is under "database.cassandra" prefix
if (config_.getValueView("secure_connect_bundle").hasValue()) {
auto const bundle = Settings::SecureConnectionBundle{(config_.get<std::string>("secure_connect_bundle"))};
auto const bundle =
Settings::SecureConnectionBundle{(config_.get<std::string>("secure_connect_bundle"))};
settings.connectionInfo = bundle;
} else {
Settings::ContactPoints out;
@@ -101,12 +108,14 @@ SettingsProvider::parseSettings() const
if (config_.getValueView("connect_timeout").hasValue()) {
auto const connectTimeoutSecond = config_.get<uint32_t>("connect_timeout");
settings.connectionTimeout = std::chrono::milliseconds{connectTimeoutSecond * util::kMILLISECONDS_PER_SECOND};
settings.connectionTimeout =
std::chrono::milliseconds{connectTimeoutSecond * util::kMILLISECONDS_PER_SECOND};
}
if (config_.getValueView("request_timeout").hasValue()) {
auto const requestTimeoutSecond = config_.get<uint32_t>("request_timeout");
settings.requestTimeout = std::chrono::milliseconds{requestTimeoutSecond * util::kMILLISECONDS_PER_SECOND};
settings.requestTimeout =
std::chrono::milliseconds{requestTimeoutSecond * util::kMILLISECONDS_PER_SECOND};
}
settings.certificate = parseOptionalCertificate();

View File

@@ -52,7 +52,8 @@ template <
typename StatementType,
typename HandleType = Handle,
SomeRetryPolicy RetryPolicyType = ExponentialBackoffRetryPolicy>
class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<StatementType, HandleType, RetryPolicyType>> {
class AsyncExecutor : public std::enable_shared_from_this<
AsyncExecutor<StatementType, HandleType, RetryPolicyType>> {
using FutureWithCallbackType = typename HandleType::FutureWithCallbackType;
using CallbackType = std::function<void(typename HandleType::ResultOrErrorType)>;
using RetryCallbackType = std::function<void()>;
@@ -92,7 +93,9 @@ public:
}
};
auto ptr = std::make_shared<EnableMakeShared>(ioc, std::move(data), std::move(onComplete), std::move(onRetry));
auto ptr = std::make_shared<EnableMakeShared>(
ioc, std::move(data), std::move(onComplete), std::move(onRetry)
);
ptr->execute(handle);
}
@@ -103,7 +106,10 @@ private:
CallbackType&& onComplete,
RetryCallbackType&& onRetry
)
: data_{std::move(data)}, retryPolicy_{ioc}, onComplete_{std::move(onComplete)}, onRetry_{std::move(onRetry)}
: data_{std::move(data)}
, retryPolicy_{ioc}
, onComplete_{std::move(onComplete)}
, onRetry_{std::move(onRetry)}
{
}

View File

@@ -44,7 +44,8 @@ namespace data::cassandra::impl {
* UNLOGGED: For performance. Sends many separate updates in one network trip to be fast.
* Use this for bulk-loading unrelated data, but know there's NO all-or-nothing guarantee.
*
* More info here: https://docs.datastax.com/en/developer/cpp-driver-dse/1.10/features/basics/batches/index.html
* More info here:
* https://docs.datastax.com/en/developer/cpp-driver-dse/1.10/features/basics/batches/index.html
*/
Batch::Batch(std::vector<Statement> const& statements)
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_UNLOGGED), kBATCH_DELETER}

Some files were not shown because too many files have changed in this diff Show More